adt3-S dhd_driver source code [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.101.10.240.x / dhd_pcie.c
1 /*
2 * DHD Bus Module for PCIE
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Open:>>
22 *
23 * $Id$
24 */
25
26 /** XXX Twiki: [PCIeFullDongleArchitecture] */
27
28 /* include files */
29 #include <typedefs.h>
30 #include <bcmutils.h>
31 #include <bcmrand.h>
32 #include <bcmdevs.h>
33 #include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
34 #include <siutils.h>
35 #include <hndoobr.h>
36 #include <hndsoc.h>
37 #include <hndpmu.h>
38 #include <etd.h>
39 #include <hnd_debug.h>
40 #include <sbchipc.h>
41 #include <sbhndarm.h>
42 #include <hnd_armtrap.h>
43 #if defined(DHD_DEBUG)
44 #include <hnd_cons.h>
45 #endif /* defined(DHD_DEBUG) */
46 #include <dngl_stats.h>
47 #include <pcie_core.h>
48 #include <dhd.h>
49 #include <dhd_bus.h>
50 #include <dhd_flowring.h>
51 #include <dhd_proto.h>
52 #include <dhd_dbg.h>
53 #include <dhd_debug.h>
54 #include <dhd_daemon.h>
55 #include <dhdioctl.h>
56 #include <sdiovar.h>
57 #include <bcmmsgbuf.h>
58 #include <pcicfg.h>
59 #include <dhd_pcie.h>
60 #include <bcmpcie.h>
61 #include <bcmendian.h>
62 #include <bcmstdlib_s.h>
63 #ifdef DHDTCPACK_SUPPRESS
64 #include <dhd_ip.h>
65 #endif /* DHDTCPACK_SUPPRESS */
66 #include <bcmevent.h>
67 #include <dhd_config.h>
68
69 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
70 #include <linux/pm_runtime.h>
71 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
72
73 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
74 #include <debugger.h>
75 #endif /* DEBUGGER || DHD_DSCOPE */
76
77 #if defined(FW_SIGNATURE)
78 #include <dngl_rtlv.h>
79 #include <bcm_fwsign.h>
80 #endif /* FW_SIGNATURE */
81
82 #ifdef DNGL_AXI_ERROR_LOGGING
83 #include <dhd_linux_wq.h>
84 #include <dhd_linux.h>
85 #endif /* DNGL_AXI_ERROR_LOGGING */
86
87 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
88 #include <dhd_linux_priv.h>
89 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
90
91 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
92
93 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
94 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
95
96 #define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
97 #define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
98 #define MAX_HP2P_CMPL_RINGS 2u
99
100 /* XXX defines for 4378 */
101 #define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
102 #define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
103 #define ACC_MPU_SHIFT 25
104 #define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
105
106 /* XXX Offset for 4375 work around register */
107 #define REG_WORK_AROUND (0x1e4/sizeof(uint32))
108
109 /* XXX defines for 43602a0 workaround JIRA CRWLARMCR4-53 */
110 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
111 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
112 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
113
114 /* CTO Prevention Recovery */
115 #define CTO_TO_CLEAR_WAIT_MS 50
116 #define CTO_TO_CLEAR_WAIT_MAX_CNT 200
117
118 /* FLR setting */
119 #define PCIE_FLR_CAPAB_BIT 28
120 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
121
122 #define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
123 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
124 /*
125 * Increase SSReset de-assert time to 8ms.
126 * since it takes longer time if re-scan time on 4378B0.
127 */
128 #define DHD_SSRESET_STATUS_RETRIES 200u
129
130 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
131 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
132 (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
133
134 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
135 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
136 (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
137
138 /* Fetch address of a member in the ring_mem structure in dongle memory */
139 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
140 (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
141
142 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
143 extern unsigned int system_rev;
144 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
145
146 #ifdef EWP_EDL
147 extern int host_edl_support;
148 #endif
149
150 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
151 uint dma_ring_indices = 0;
152 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
153 bool h2d_phase = 0;
154 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
155 * defined in dhd_linux.c
156 */
157 bool force_trap_bad_h2d_phase = 0;
158
159 int dhd_dongle_ramsize;
160 struct dhd_bus *g_dhd_bus = NULL;
161 #ifdef DNGL_AXI_ERROR_LOGGING
162 static void dhd_log_dump_axi_error(uint8 *axi_err);
163 #endif /* DNGL_AXI_ERROR_LOGGING */
164
165 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
166 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
167 #if defined(DHD_FW_COREDUMP)
168 static int dhdpcie_mem_dump(dhd_bus_t *bus);
169 static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
170 #endif /* DHD_FW_COREDUMP */
171
172 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
173 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
174 const char *name, void *params,
175 uint plen, void *arg, uint len, int val_size);
176 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
177 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
178 uint32 len, uint32 srcdelay, uint32 destdelay,
179 uint32 d11_lpbk, uint32 core_num, uint32 wait);
180 static uint serialized_backplane_access(dhd_bus_t* bus, uint addr, uint size, uint* val, bool read);
181 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
182 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
183 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
184
185 #if defined(FW_SIGNATURE)
186 static int dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write);
187 static int dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus);
188 static int dhdpcie_bus_write_fws_status(dhd_bus_t *bus);
189 static int dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus);
190 static int dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path);
191 static int dhdpcie_download_rtlv_end(dhd_bus_t *bus);
192 static int dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr,
193 uint32 download_size, const char *signature_fname,
194 const char *bloader_fname, uint32 bloader_download_addr);
195 #endif /* FW_SIGNATURE */
196
197 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
198 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
199 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
200 static int dhdpcie_readshared(dhd_bus_t *bus);
201 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
202 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
203 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
204 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
205 bool dongle_isolation, bool reset_flag);
206 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
207 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
208 static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
209 static void dhd_init_bar1_switch_lock(dhd_bus_t *bus);
210 static void dhd_deinit_bar1_switch_lock(dhd_bus_t *bus);
211 static void dhd_init_pwr_req_lock(dhd_bus_t *bus);
212 static void dhd_deinit_pwr_req_lock(dhd_bus_t *bus);
213 static void dhd_init_bus_lp_state_lock(dhd_bus_t *bus);
214 static void dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus);
215 static void dhd_init_backplane_access_lock(dhd_bus_t *bus);
216 static void dhd_deinit_backplane_access_lock(dhd_bus_t *bus);
217 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
218 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
219 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
220 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
221 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
222 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
223 #ifdef DHD_SUPPORT_64BIT
224 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
225 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
226 #endif /* DHD_SUPPORT_64BIT */
227 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
228 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
229 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
230 static void dhdpcie_fw_trap(dhd_bus_t *bus);
231 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
232 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
233 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
234 #ifdef PCIE_INB_DW
235 static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval,
236 bool d2h, enum dhd_bus_ds_state inbstate);
237 #else
238 static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h);
239 #endif /* PCIE_INB_DW */
240 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
241
242 #ifdef IDLE_TX_FLOW_MGMT
243 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
244 static void dhd_bus_idle_scan(dhd_bus_t *bus);
245 #endif /* IDLE_TX_FLOW_MGMT */
246
247 #ifdef EXYNOS_PCIE_DEBUG
248 extern void exynos_pcie_register_dump(int ch_num);
249 #endif /* EXYNOS_PCIE_DEBUG */
250
251 #if defined(DHD_H2D_LOG_TIME_SYNC)
252 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
253 #endif /* DHD_H2D_LOG_TIME_SYNC */
254
255 #define PCI_VENDOR_ID_BROADCOM 0x14e4
256
257 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
258 #define MAX_D3_ACK_TIMEOUT 100
259 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
260
261 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
262 #if defined(PCIE_INB_DW)
263 static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
264 #endif
265
266 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
267 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
268
269 static int dhdpcie_init_d11status(struct dhd_bus *bus);
270
271 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
272
273 #define NUM_PATTERNS 2
274 static bool dhd_bus_tcm_test(struct dhd_bus *bus);
275
276 #if defined(FW_SIGNATURE)
277 static int dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
278 #endif
279 static void dhdpcie_pme_stat_clear(dhd_bus_t *bus);
280
281 /* IOVar table */
282 enum {
283 IOV_INTR = 1,
284 IOV_MEMSIZE,
285 IOV_SET_DOWNLOAD_STATE,
286 IOV_SET_DOWNLOAD_INFO,
287 IOV_DEVRESET,
288 IOV_VARS,
289 IOV_MSI_SIM,
290 IOV_PCIE_LPBK,
291 IOV_CC_NVMSHADOW,
292 IOV_RAMSIZE,
293 IOV_RAMSTART,
294 IOV_SLEEP_ALLOWED,
295 IOV_PCIE_DMAXFER,
296 IOV_PCIE_SUSPEND,
297 IOV_DONGLEISOLATION,
298 IOV_LTRSLEEPON_UNLOOAD,
299 IOV_METADATA_DBG,
300 IOV_RX_METADATALEN,
301 IOV_TX_METADATALEN,
302 IOV_TXP_THRESHOLD,
303 IOV_BUZZZ_DUMP,
304 IOV_DUMP_RINGUPD_BLOCK,
305 IOV_DMA_RINGINDICES,
306 IOV_FORCE_FW_TRAP,
307 IOV_DB1_FOR_MB,
308 IOV_FLOW_PRIO_MAP,
309 #ifdef DHD_PCIE_RUNTIMEPM
310 IOV_IDLETIME,
311 #endif /* DHD_PCIE_RUNTIMEPM */
312 IOV_RXBOUND,
313 IOV_TXBOUND,
314 IOV_HANGREPORT,
315 IOV_H2D_MAILBOXDATA,
316 IOV_INFORINGS,
317 IOV_H2D_PHASE,
318 IOV_H2D_ENABLE_TRAP_BADPHASE,
319 IOV_H2D_TXPOST_MAX_ITEM,
320 #if defined(DHD_HTPUT_TUNABLES)
321 IOV_H2D_HTPUT_TXPOST_MAX_ITEM,
322 #endif /* DHD_HTPUT_TUNABLES */
323 IOV_TRAPDATA,
324 IOV_TRAPDATA_RAW,
325 IOV_CTO_PREVENTION,
326 IOV_PCIE_WD_RESET,
327 IOV_DUMP_DONGLE,
328 IOV_HWA_ENABLE,
329 IOV_IDMA_ENABLE,
330 IOV_IFRM_ENABLE,
331 IOV_CLEAR_RING,
332 IOV_DAR_ENABLE,
333 IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
334 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
335 IOV_GDB_SERVER, /**< starts gdb server on given interface */
336 #endif /* DEBUGGER || DHD_DSCOPE */
337 #if defined(GDB_PROXY)
338 IOV_GDB_PROXY_PROBE, /**< gdb proxy support presence check */
339 IOV_GDB_PROXY_STOP_COUNT, /**< gdb proxy firmware stop count */
340 #endif /* GDB_PROXY */
341 IOV_INB_DW_ENABLE,
342 #if defined(PCIE_INB_DW)
343 IOV_DEEP_SLEEP,
344 #endif
345 IOV_CTO_THRESHOLD,
346 IOV_HSCBSIZE, /* get HSCB buffer size */
347 IOV_HP2P_ENABLE,
348 IOV_HP2P_PKT_THRESHOLD,
349 IOV_HP2P_TIME_THRESHOLD,
350 IOV_HP2P_PKT_EXPIRY,
351 IOV_HP2P_TXCPL_MAXITEMS,
352 IOV_HP2P_RXCPL_MAXITEMS,
353 IOV_EXTDTXS_IN_TXCPL,
354 IOV_HOSTRDY_AFTER_INIT,
355 IOV_HP2P_MF_ENABLE,
356 IOV_PCIE_LAST /**< unused IOVAR */
357 };
358
359 const bcm_iovar_t dhdpcie_iovars[] = {
360 {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
361 {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
362 {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
363 {"dwnldinfo", IOV_SET_DOWNLOAD_INFO, 0, 0, IOVT_BUFFER,
364 sizeof(fw_download_info_t) },
365 {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
366 {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
367 {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
368 {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
369 {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
370 {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
371 {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
372 {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
373 {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
374 {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
375 {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
376 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
377 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
378 {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
379 {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
380 {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
381 {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
382 {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
383 {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
384 {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
385 {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
386 #ifdef DHD_PCIE_RUNTIMEPM
387 {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
388 #endif /* DHD_PCIE_RUNTIMEPM */
389 {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
390 {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
391 {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
392 {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
393 {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
394 {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
395 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
396 IOVT_UINT32, 0 },
397 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
398 #if defined(DHD_HTPUT_TUNABLES)
399 {"h2d_htput_max_txpost", IOV_H2D_HTPUT_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
400 #endif /* DHD_HTPUT_TUNABLES */
401 {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
402 {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
403 {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
404 {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
405 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
406 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
407 {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
408 {"hwa_enable", IOV_HWA_ENABLE, 0, 0, IOVT_UINT32, 0 },
409 {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
410 {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
411 {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
412 {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
413 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
414 {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
415 #endif /* DEBUGGER || DHD_DSCOPE */
416 #if defined(GDB_PROXY)
417 {"gdb_proxy_probe", IOV_GDB_PROXY_PROBE, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
418 {"gdb_proxy_stop_count", IOV_GDB_PROXY_STOP_COUNT, 0, 0, IOVT_UINT32, 0 },
419 #endif /* GDB_PROXY */
420 {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
421 #if defined(PCIE_INB_DW)
422 {"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32, 0},
423 #endif
424 {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
425 {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
426
427 {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
428 {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
429 {"hp2p_mf_enable", IOV_HP2P_MF_ENABLE, 0, 0, IOVT_UINT32, 0 },
430 {NULL, 0, 0, 0, 0, 0 }
431 };
432
433 #define MAX_READ_TIMEOUT 2 * 1000 * 1000
434
435 #ifndef DHD_RXBOUND
436 #define DHD_RXBOUND 64
437 #endif
438 #ifndef DHD_TXBOUND
439 #define DHD_TXBOUND 64
440 #endif
441
442 #define DHD_INFORING_BOUND 32
443 #define DHD_BTLOGRING_BOUND 32
444
445 uint dhd_rxbound = DHD_RXBOUND;
446 uint dhd_txbound = DHD_TXBOUND;
447
448 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
449 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
450 static struct dhd_gdb_bus_ops_s bus_ops = {
451 .read_u16 = dhdpcie_bus_rtcm16,
452 .read_u32 = dhdpcie_bus_rtcm32,
453 .write_u32 = dhdpcie_bus_wtcm32,
454 };
455 #endif /* DEBUGGER || DHD_DSCOPE */
456
457 bool
458 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
459 {
460 return bus->flr_force_fail;
461 }
462
463 /**
464 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
465 * link with the bus driver, in order to look for or await the device.
466 */
467 int
468 dhd_bus_register(void)
469 {
470 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
471
472 return dhdpcie_bus_register();
473 }
474
475 void
476 dhd_bus_unregister(void)
477 {
478 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
479
480 dhdpcie_bus_unregister();
481 return;
482 }
483
484 /** returns a host virtual address */
485 uint32 *
486 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
487 {
488 return (uint32 *)REG_MAP(addr, size);
489 }
490
491 void
492 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
493 {
494 REG_UNMAP(addr);
495 return;
496 }
497
498 /**
499 * retrun H2D Doorbell registers address
500 * use DAR registers instead of enum register for corerev >= 23 (4347B0)
501 */
502 static INLINE uint
503 dhd_bus_db0_addr_get(struct dhd_bus *bus)
504 {
505 uint addr = PCIH2D_MailBox;
506 uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
507
508 return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
509 }
510
511 static INLINE uint
512 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
513 {
514 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
515 }
516
517 static INLINE uint
518 dhd_bus_db1_addr_get(struct dhd_bus *bus)
519 {
520 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
521 }
522
523 static INLINE uint
524 dhd_bus_db1_addr_3_get(struct dhd_bus *bus)
525 {
526 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB3_1(bus->sih->buscorerev) : PCIH2D_DB1_3);
527 }
528
529 static void
530 dhd_init_pwr_req_lock(dhd_bus_t *bus)
531 {
532 if (!bus->pwr_req_lock) {
533 bus->pwr_req_lock = osl_spin_lock_init(bus->osh);
534 }
535 }
536
537 static void
538 dhd_deinit_pwr_req_lock(dhd_bus_t *bus)
539 {
540 if (bus->pwr_req_lock) {
541 osl_spin_lock_deinit(bus->osh, bus->pwr_req_lock);
542 bus->pwr_req_lock = NULL;
543 }
544 }
545
546 /*
547 * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
548 */
549 static INLINE void
550 dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable)
551 {
552 if (enable) {
553 si_corereg(bus->sih, bus->sih->buscoreidx, offset,
554 SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
555 SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
556 } else {
557 si_corereg(bus->sih, bus->sih->buscoreidx, offset,
558 SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
559 }
560 }
561
562 static INLINE void
563 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
564 {
565 uint mask;
566
567 /*
568 * If multiple de-asserts, decrement ref and return
569 * Clear power request when only one pending
570 * so initial request is not removed unexpectedly
571 */
572 if (bus->pwr_req_ref > 1) {
573 bus->pwr_req_ref--;
574 return;
575 }
576
577 ASSERT(bus->pwr_req_ref == 1);
578
579 if (MULTIBP_ENAB(bus->sih)) {
580 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
581 mask = SRPWR_DMN1_ARMBPSD_MASK;
582 } else {
583 mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
584 }
585
586 si_srpwr_request(bus->sih, mask, 0);
587 bus->pwr_req_ref = 0;
588 }
589
590 static INLINE void
591 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
592 {
593 unsigned long flags = 0;
594
595 DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
596 _dhd_bus_pcie_pwr_req_clear_cmn(bus);
597 DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
598 }
599
600 static INLINE void
601 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
602 {
603 _dhd_bus_pcie_pwr_req_clear_cmn(bus);
604 }
605
606 static INLINE void
607 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
608 {
609 uint mask, val;
610
611 /* If multiple request entries, increment reference and return */
612 if (bus->pwr_req_ref > 0) {
613 bus->pwr_req_ref++;
614 return;
615 }
616
617 ASSERT(bus->pwr_req_ref == 0);
618
619 if (MULTIBP_ENAB(bus->sih)) {
620 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
621 mask = SRPWR_DMN1_ARMBPSD_MASK;
622 val = SRPWR_DMN1_ARMBPSD_MASK;
623 } else {
624 mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
625 val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
626 }
627
628 si_srpwr_request(bus->sih, mask, val);
629
630 bus->pwr_req_ref = 1;
631 }
632
633 static INLINE void
634 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
635 {
636 unsigned long flags = 0;
637
638 DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
639 _dhd_bus_pcie_pwr_req_cmn(bus);
640 DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
641 }
642
643 static INLINE void
644 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
645 {
646 uint mask, val;
647
648 mask = SRPWR_DMN_ALL_MASK(bus->sih);
649 val = SRPWR_DMN_ALL_MASK(bus->sih);
650
651 si_srpwr_request(bus->sih, mask, val);
652 }
653
654 void
655 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
656 {
657 unsigned long flags = 0;
658
659 /*
660 * Few corerevs need the power domain to be active for FLR.
661 * Return if the pwr req is not applicable for the corerev
662 */
663 if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) {
664 return;
665 }
666
667 DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
668 _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
669 DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
670 }
671
672 static INLINE void
673 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
674 {
675 uint mask;
676
677 mask = SRPWR_DMN_ALL_MASK(bus->sih);
678
679 si_srpwr_request(bus->sih, mask, 0);
680 }
681
682 void
683 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
684 {
685 unsigned long flags = 0;
686
687 /* return if the pwr clear is not applicable for the corerev */
688 if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) {
689 return;
690 }
691 DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
692 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
693 DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
694 }
695
696 static INLINE void
697 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
698 {
699 _dhd_bus_pcie_pwr_req_cmn(bus);
700 }
701
702 bool
703 dhdpcie_chip_support_msi(dhd_bus_t *bus)
704 {
705 /* XXX For chips with buscorerev <= 14 intstatus
706 * is not getting cleared from these firmwares.
707 * Either host can read and clear intstatus for these
708 * or not enable MSI at all.
709 * Here option 2 of not enabling MSI is choosen.
710 * Also for hw4 chips, msi is not enabled.
711 */
712 DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n",
713 __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
714 if (bus->sih->buscorerev <= 14 ||
715 si_chipid(bus->sih) == BCM4389_CHIP_ID ||
716 si_chipid(bus->sih) == BCM4385_CHIP_ID ||
717 si_chipid(bus->sih) == BCM4375_CHIP_ID ||
718 si_chipid(bus->sih) == BCM4376_CHIP_ID ||
719 si_chipid(bus->sih) == BCM4362_CHIP_ID ||
720 si_chipid(bus->sih) == BCM43751_CHIP_ID ||
721 si_chipid(bus->sih) == BCM43752_CHIP_ID ||
722 si_chipid(bus->sih) == BCM4361_CHIP_ID ||
723 si_chipid(bus->sih) == BCM4359_CHIP_ID) {
724 return FALSE;
725 } else {
726 return TRUE;
727 }
728 }
729
730 /**
731 * Called once for each hardware (dongle) instance that this DHD manages.
732 *
733 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
734 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
735 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
736 *
737 * 'tcm' is the *host* virtual address at which tcm is mapped.
738 */
739 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
740 volatile char *regs, volatile char *tcm, void *pci_dev)
741 {
742 dhd_bus_t *bus = NULL;
743 int ret = BCME_OK;
744
745 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
746
747 do {
748 if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
749 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
750 ret = BCME_NORESOURCE;
751 break;
752 }
753
754 bus->regs = regs;
755 bus->tcm = tcm;
756 bus->osh = osh;
757 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
758 bus->dev = (struct pci_dev *)pci_dev;
759
760 dll_init(&bus->flowring_active_list);
761 #ifdef IDLE_TX_FLOW_MGMT
762 bus->active_list_last_process_ts = OSL_SYSUPTIME();
763 #endif /* IDLE_TX_FLOW_MGMT */
764
765 /* Attach pcie shared structure */
766 if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
767 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
768 ret = BCME_NORESOURCE;
769 break;
770 }
771
772 /* dhd_common_init(osh); */
773
774 if (dhdpcie_dongle_attach(bus)) {
775 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
776 ret = BCME_NOTREADY;
777 break;
778 }
779
780 /* software resources */
781 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
782 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
783 ret = BCME_NORESOURCE;
784 break;
785 }
786 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
787 bus->dhd->busstate = DHD_BUS_DOWN;
788 bus->dhd->hostrdy_after_init = TRUE;
789 bus->db1_for_mb = TRUE;
790 bus->dhd->hang_report = TRUE;
791 bus->use_mailbox = FALSE;
792 bus->use_d0_inform = FALSE;
793 bus->intr_enabled = FALSE;
794 bus->flr_force_fail = FALSE;
795 /* update the dma indices if set through module parameter. */
796 if (dma_ring_indices != 0) {
797 dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
798 }
799 /* update h2d phase support if set through module parameter */
800 bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
801 /* update force trap on bad phase if set through module parameter */
802 bus->dhd->force_dongletrap_on_bad_h2d_phase =
803 force_trap_bad_h2d_phase ? TRUE : FALSE;
804 #ifdef IDLE_TX_FLOW_MGMT
805 bus->enable_idle_flowring_mgmt = FALSE;
806 #endif /* IDLE_TX_FLOW_MGMT */
807 bus->irq_registered = FALSE;
808
809 #ifdef DHD_MSI_SUPPORT
810 bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
811 PCIE_MSI : PCIE_INTX;
812 #else
813 bus->d2h_intr_method = PCIE_INTX;
814 #endif /* DHD_MSI_SUPPORT */
815
816 DHD_TRACE(("%s: EXIT SUCCESS\n",
817 __FUNCTION__));
818 g_dhd_bus = bus;
819 *bus_ptr = bus;
820 return ret;
821 } while (0);
822
823 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
824 if (bus && bus->pcie_sh) {
825 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
826 }
827
828 if (bus) {
829 MFREE(osh, bus, sizeof(dhd_bus_t));
830 }
831
832 return ret;
833 }
834
835 bool
836 dhd_bus_skip_clm(dhd_pub_t *dhdp)
837 {
838 switch (dhd_bus_chip_id(dhdp)) {
839 case BCM4369_CHIP_ID:
840 return TRUE;
841 default:
842 return FALSE;
843 }
844 }
845
846 uint
847 dhd_bus_chip(struct dhd_bus *bus)
848 {
849 ASSERT(bus->sih != NULL);
850 return bus->sih->chip;
851 }
852
853 uint
854 dhd_bus_chiprev(struct dhd_bus *bus)
855 {
856 ASSERT(bus);
857 ASSERT(bus->sih != NULL);
858 return bus->sih->chiprev;
859 }
860
861 void *
862 dhd_bus_pub(struct dhd_bus *bus)
863 {
864 return bus->dhd;
865 }
866
867 void *
868 dhd_bus_sih(struct dhd_bus *bus)
869 {
870 return (void *)bus->sih;
871 }
872
873 void *
874 dhd_bus_txq(struct dhd_bus *bus)
875 {
876 return &bus->txq;
877 }
878
879 /** Get Chip ID version */
880 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
881 {
882 dhd_bus_t *bus = dhdp->bus;
883 return bus->sih->chip;
884 }
885
886 /** Get Chip Rev ID version */
887 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
888 {
889 dhd_bus_t *bus = dhdp->bus;
890 return bus->sih->chiprev;
891 }
892
893 /** Get Chip Pkg ID version */
894 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
895 {
896 dhd_bus_t *bus = dhdp->bus;
897 return bus->sih->chippkg;
898 }
899
900 /** Conduct Loopback test */
901 int
902 dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
903 {
904 dma_xfer_info_t dmaxfer_lpbk;
905 int ret = BCME_OK;
906
907 #define PCIE_DMAXFER_LPBK_LENGTH 4096
908 memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
909 dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
910 dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
911 dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
912 dmaxfer_lpbk.type = type;
913 dmaxfer_lpbk.should_wait = TRUE;
914
915 ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
916 (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
917 if (ret < 0) {
918 DHD_ERROR(("failed to start PCIe Loopback Test!!! "
919 "Type:%d Reason:%d\n", type, ret));
920 return ret;
921 }
922
923 if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
924 DHD_ERROR(("failed to check PCIe Loopback Test!!! "
925 "Type:%d Status:%d Error code:%d\n", type,
926 dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
927 ret = BCME_ERROR;
928 } else {
929 DHD_ERROR(("successful to check PCIe Loopback Test"
930 " Type:%d\n", type));
931 }
932 #undef PCIE_DMAXFER_LPBK_LENGTH
933
934 return ret;
935 }
936
937 /* Check if there is DPC scheduling errors */
938 bool
939 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
940 {
941 dhd_bus_t *bus = dhdp->bus;
942 bool sched_err;
943
944 if (bus->dpc_entry_time < bus->isr_exit_time) {
945 /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
946 sched_err = TRUE;
947 } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
948 /* Kernel doesn't schedule the DPC after DHD tries to reschedule
949 * the DPC due to pending work items to be processed.
950 */
951 sched_err = TRUE;
952 } else {
953 sched_err = FALSE;
954 }
955
956 if (sched_err) {
957 /* print out minimum timestamp info */
958 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
959 " isr_exit_time="SEC_USEC_FMT
960 " dpc_entry_time="SEC_USEC_FMT
961 "\ndpc_exit_time="SEC_USEC_FMT
962 " isr_sched_dpc_time="SEC_USEC_FMT
963 " resched_dpc_time="SEC_USEC_FMT"\n",
964 GET_SEC_USEC(bus->isr_entry_time),
965 GET_SEC_USEC(bus->isr_exit_time),
966 GET_SEC_USEC(bus->dpc_entry_time),
967 GET_SEC_USEC(bus->dpc_exit_time),
968 GET_SEC_USEC(bus->isr_sched_dpc_time),
969 GET_SEC_USEC(bus->resched_dpc_time)));
970 }
971
972 return sched_err;
973 }
974
975 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
976 uint32
977 dhdpcie_bus_intstatus(dhd_bus_t *bus)
978 {
979 uint32 intstatus = 0;
980 uint32 intmask = 0;
981
982 if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
983 DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
984 return intstatus;
985 }
986 /* XXX: check for PCIE Gen2 also */
987 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
988 (bus->sih->buscorerev == 2)) {
989 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
990 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
991 intstatus &= I_MB;
992 } else {
993 /* this is a PCIE core register..not a config register... */
994 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
995
996 /* this is a PCIE core register..not a config register... */
997 intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
998 /* Is device removed. intstatus & intmask read 0xffffffff */
999 if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
1000 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
1001 DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
1002 __FUNCTION__, intstatus, intmask));
1003 bus->is_linkdown = TRUE;
1004 dhd_pcie_debug_info_dump(bus->dhd);
1005 #ifdef CUSTOMER_HW4_DEBUG
1006 #ifdef SUPPORT_LINKDOWN_RECOVERY
1007 #ifdef CONFIG_ARCH_MSM
1008 bus->no_cfg_restore = 1;
1009 #endif /* CONFIG_ARCH_MSM */
1010 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1011 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
1012 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1013 copy_hang_info_linkdown(bus->dhd);
1014 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1015 dhd_os_send_hang_message(bus->dhd);
1016 #endif /* CUSTOMER_HW4_DEBUG */
1017 return intstatus;
1018 }
1019
1020 #ifndef DHD_READ_INTSTATUS_IN_DPC
1021 intstatus &= intmask;
1022 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1023
1024 /* XXX: define the mask in a .h file */
1025 /*
1026 * The fourth argument to si_corereg is the "mask" fields of the register to update
1027 * and the fifth field is the "value" to update. Now if we are interested in only
1028 * few fields of the "mask" bit map, we should not be writing back what we read
1029 * By doing so, we might clear/ack interrupts that are not handled yet.
1030 */
1031 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
1032 intstatus);
1033
1034 intstatus &= bus->def_intmask;
1035 }
1036
1037 return intstatus;
1038 }
1039
1040 void
1041 dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
1042 {
1043 dhd_bus_t *bus = dhd->bus;
1044 int ret;
1045
1046 /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
1047 */
1048 DHD_DISABLE_RUNTIME_PM(dhd);
1049
1050 /* Sleep for 1 seconds so that any AXI timeout
1051 * if running on ALP clock also will be captured
1052 */
1053 OSL_SLEEP(1000);
1054
1055 /* reset backplane and cto,
1056 * then access through pcie is recovered.
1057 */
1058 ret = dhdpcie_cto_error_recovery(bus);
1059 if (!ret) {
1060 /* Waiting for backplane reset */
1061 OSL_SLEEP(10);
1062 /* Dump debug Info */
1063 dhd_prot_debug_info_print(bus->dhd);
1064 /* Dump console buffer */
1065 dhd_bus_dump_console_buffer(bus);
1066 #if defined(DHD_FW_COREDUMP)
1067 /* save core dump or write to a file */
1068 if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
1069 #ifdef DHD_SSSR_DUMP
1070 DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
1071 bus->dhd->collect_sssr = TRUE;
1072 #endif /* DHD_SSSR_DUMP */
1073 bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
1074 dhdpcie_mem_dump(bus);
1075 }
1076 #endif /* DHD_FW_COREDUMP */
1077 }
1078 #ifdef SUPPORT_LINKDOWN_RECOVERY
1079 #ifdef CONFIG_ARCH_MSM
1080 bus->no_cfg_restore = 1;
1081 #endif /* CONFIG_ARCH_MSM */
1082 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1083 bus->is_linkdown = TRUE;
1084 bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
1085 /* Send HANG event */
1086 dhd_os_send_hang_message(bus->dhd);
1087 }
1088
1089 void
1090 dhd_bus_dump_imp_cfg_registers(struct dhd_bus *bus)
1091 {
1092 uint32 status_cmd = dhd_pcie_config_read(bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
1093 uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32));
1094 uint32 base_addr0 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR0, sizeof(uint32));
1095 uint32 base_addr1 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR1, sizeof(uint32));
1096 uint32 linkctl = dhd_pcie_config_read(bus, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1097 uint32 l1ssctrl =
1098 dhd_pcie_config_read(bus, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32));
1099 uint32 devctl = dhd_pcie_config_read(bus, PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1100 uint32 devctl2 = dhd_pcie_config_read(bus, PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1101
1102 DHD_ERROR(("status_cmd(0x%x)=0x%x, pmcsr(0x%x)=0x%x "
1103 "base_addr0(0x%x)=0x%x base_addr1(0x%x)=0x%x "
1104 "linkctl(0x%x)=0x%x l1ssctrl(0x%x)=0x%x "
1105 "devctl(0x%x)=0x%x devctl2(0x%x)=0x%x \n",
1106 PCIECFGREG_STATUS_CMD, status_cmd,
1107 PCIE_CFG_PMCSR, pmcsr,
1108 PCIECFGREG_BASEADDR0, base_addr0,
1109 PCIECFGREG_BASEADDR1, base_addr1,
1110 PCIECFGREG_LINK_STATUS_CTRL, linkctl,
1111 PCIECFGREG_PML1_SUB_CTRL1, l1ssctrl,
1112 PCIECFGREG_DEV_STATUS_CTRL, devctl,
1113 PCIECFGGEN_DEV_STATUS_CTRL2, devctl2));
1114 }
1115
1116 /**
1117 * Name: dhdpcie_bus_isr
1118 * Parameters:
1119 * 1: IN int irq -- interrupt vector
1120 * 2: IN void *arg -- handle to private data structure
1121 * Return value:
1122 * Status (TRUE or FALSE)
1123 *
1124 * Description:
1125 * Interrupt Service routine checks for the status register,
1126 * disable interrupt and queue DPC if mail box interrupts are raised.
1127 */
1128 int32
1129 dhdpcie_bus_isr(dhd_bus_t *bus)
1130 {
1131 uint32 intstatus = 0;
1132
1133 do {
1134 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1135 /* verify argument */
1136 if (!bus) {
1137 DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
1138 break;
1139 }
1140
1141 if (bus->dhd->dongle_reset) {
1142 DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
1143 break;
1144 }
1145
1146 if (bus->dhd->busstate == DHD_BUS_DOWN) {
1147 DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
1148 break;
1149 }
1150
1151 /* avoid processing of interrupts until msgbuf prot is inited */
1152 if (!bus->intr_enabled) {
1153 DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1154 break;
1155 }
1156
1157 if (PCIECTO_ENAB(bus)) {
1158 /* read pci_intstatus */
1159 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
1160
1161 if (intstatus == (uint32)-1) {
1162 DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
1163 __FUNCTION__));
1164 dhdpcie_disable_irq_nosync(bus);
1165 break;
1166 }
1167
1168 if (intstatus & PCI_CTO_INT_MASK) {
1169 DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1170 "intstat=0x%x enab=%d\n", __FUNCTION__,
1171 intstatus, bus->cto_enable));
1172 bus->cto_triggered = 1;
1173 dhd_bus_dump_imp_cfg_registers(bus);
1174 /*
1175 * DAR still accessible
1176 */
1177 dhd_bus_dump_dar_registers(bus);
1178
1179 /* Disable further PCIe interrupts */
1180 dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1181 /* Stop Tx flow */
1182 dhd_bus_stop_queue(bus);
1183
1184 /* Schedule CTO recovery */
1185 dhd_schedule_cto_recovery(bus->dhd);
1186
1187 return TRUE;
1188 }
1189 }
1190
1191 if (bus->d2h_intr_method == PCIE_MSI) {
1192 /* For MSI, as intstatus is cleared by firmware, no need to read */
1193 goto skip_intstatus_read;
1194 }
1195
1196 #ifndef DHD_READ_INTSTATUS_IN_DPC
1197 intstatus = dhdpcie_bus_intstatus(bus);
1198
1199 /* Check if the interrupt is ours or not */
1200 if (intstatus == 0) {
1201 bus->non_ours_irq_count++;
1202 bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
1203 DHD_INFO(("%s : this interrupt is not ours\n", __FUNCTION__));
1204 break;
1205 }
1206
1207 /* save the intstatus */
1208 /* read interrupt status register!! Status bits will be cleared in DPC !! */
1209 bus->intstatus = intstatus;
1210
1211 /* return error for 0xFFFFFFFF */
1212 if (intstatus == (uint32)-1) {
1213 DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1214 __FUNCTION__, intstatus));
1215 dhdpcie_disable_irq_nosync(bus);
1216 break;
1217 }
1218
1219 skip_intstatus_read:
1220 /* Overall operation:
1221 * - Mask further interrupts
1222 * - Read/ack intstatus
1223 * - Take action based on bits and state
1224 * - Reenable interrupts (as per state)
1225 */
1226
1227 /* Count the interrupt call */
1228 bus->intrcount++;
1229 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1230
1231 bus->ipend = TRUE;
1232
1233 bus->isr_intr_disable_count++;
1234
1235 /* For Linux, Macos etc (otherthan NDIS) instead of disabling
1236 * dongle interrupt by clearing the IntMask, disable directly
1237 * interrupt from the host side, so that host will not recieve
1238 * any interrupts at all, even though dongle raises interrupts
1239 */
1240 dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1241
1242 bus->intdis = TRUE;
1243
1244 #if defined(PCIE_ISR_THREAD)
1245
1246 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
1247 DHD_OS_WAKE_LOCK(bus->dhd);
1248 while (dhd_bus_dpc(bus));
1249 DHD_OS_WAKE_UNLOCK(bus->dhd);
1250 #else
1251 bus->dpc_sched = TRUE;
1252 bus->isr_sched_dpc_time = OSL_LOCALTIME_NS();
1253 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
1254 #endif /* defined(SDIO_ISR_THREAD) */
1255
1256 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
1257 return TRUE;
1258
1259 } while (0);
1260
1261 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
1262 return FALSE;
1263 }
1264
1265 int
1266 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1267 {
1268 uint32 cur_state = 0;
1269 uint32 pm_csr = 0;
1270 osl_t *osh = bus->osh;
1271
1272 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1273 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1274
1275 if (cur_state == state) {
1276 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1277 return BCME_OK;
1278 }
1279
1280 if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1281 return BCME_ERROR;
1282
1283 /* Validate the state transition
1284 * if already in a lower power state, return error
1285 */
1286 if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1287 cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1288 cur_state > state) {
1289 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1290 return BCME_ERROR;
1291 }
1292
1293 pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1294 pm_csr |= state;
1295
1296 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1297
1298 /* need to wait for the specified mandatory pcie power transition delay time */
1299 if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1300 cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1301 OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1302 else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1303 cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1304 OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1305
1306 /* read back the power state and verify */
1307 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1308 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1309 if (cur_state != state) {
1310 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1311 __FUNCTION__, cur_state));
1312 return BCME_ERROR;
1313 } else {
1314 DHD_ERROR(("%s: power transition to %u success \n",
1315 __FUNCTION__, cur_state));
1316 }
1317
1318 return BCME_OK;
1319 }
1320
1321 int
1322 dhdpcie_config_check(dhd_bus_t *bus)
1323 {
1324 uint32 i, val;
1325 int ret = BCME_ERROR;
1326
1327 for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1328 val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1329 if ((val & 0xFFFF) == VENDOR_BROADCOM) {
1330 ret = BCME_OK;
1331 break;
1332 }
1333 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1334 }
1335
1336 return ret;
1337 }
1338
1339 int
1340 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1341 {
1342 uint32 i;
1343 osl_t *osh = bus->osh;
1344
1345 if (BCME_OK != dhdpcie_config_check(bus)) {
1346 return BCME_ERROR;
1347 }
1348
1349 for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1350 OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1351 }
1352 OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1353
1354 if (restore_pmcsr)
1355 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1356 sizeof(uint32), bus->saved_config.pmcsr);
1357
1358 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1359 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1360 bus->saved_config.msi_addr0);
1361 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1362 sizeof(uint32), bus->saved_config.msi_addr1);
1363 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1364 sizeof(uint32), bus->saved_config.msi_data);
1365
1366 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1367 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1368 OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1369 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1370 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1371 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1372 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1373 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1374
1375 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1376 sizeof(uint32), bus->saved_config.l1pm0);
1377 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1378 sizeof(uint32), bus->saved_config.l1pm1);
1379
1380 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1381 bus->saved_config.bar0_win);
1382 dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1383
1384 return BCME_OK;
1385 }
1386
1387 int
1388 dhdpcie_config_save(dhd_bus_t *bus)
1389 {
1390 uint32 i;
1391 osl_t *osh = bus->osh;
1392
1393 if (BCME_OK != dhdpcie_config_check(bus)) {
1394 return BCME_ERROR;
1395 }
1396
1397 for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1398 bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1399 }
1400
1401 bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1402
1403 bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1404 sizeof(uint32));
1405 bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1406 sizeof(uint32));
1407 bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1408 sizeof(uint32));
1409 bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1410 sizeof(uint32));
1411
1412 bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1413 PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1414 bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1415 PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1416 bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1417 PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1418 bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1419 PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1420
1421 bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1422 sizeof(uint32));
1423 bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1424 sizeof(uint32));
1425
1426 bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1427 sizeof(uint32));
1428 bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1429 sizeof(uint32));
1430
1431 return BCME_OK;
1432 }
1433
1434 #ifdef CONFIG_ARCH_EXYNOS
1435 dhd_pub_t *link_recovery = NULL;
1436 #endif /* CONFIG_ARCH_EXYNOS */
1437
1438 static void
1439 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1440 {
1441 uint buscorerev = bus->sih->buscorerev;
1442 bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1443 bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1444 bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1445 bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1446 if (buscorerev < 64) {
1447 bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1448 }
1449 }
1450
1451 static void
1452 dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1453 {
1454 uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
1455 (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1456 pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1457 }
1458 void
1459 dhdpcie_dongle_reset(dhd_bus_t *bus)
1460 {
1461
1462 /* if the pcie link is down, watchdog reset
1463 * should not be done, as it may hang
1464 */
1465 if (bus->is_linkdown) {
1466 return;
1467 }
1468
1469 /* Currently BP reset using CFG reg is done only for android platforms */
1470 #ifdef DHD_USE_BP_RESET_SPROM
1471 /* This is for architectures that does NOT control subsystem reset */
1472 (void)dhd_bus_cfg_sprom_ctrl_bp_reset(bus);
1473 return;
1474 #elif defined(DHD_USE_BP_RESET_SS_CTRL)
1475 /* This is for architectures that supports Subsystem Control */
1476 (void)dhd_bus_cfg_ss_ctrl_bp_reset(bus);
1477 return;
1478 #else
1479
1480 /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1481 if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED)
1482 {
1483 /* Legacy chipcommon watchdog reset */
1484 dhdpcie_cc_watchdog_reset(bus);
1485 }
1486 return;
1487 #endif /* DHD_USE_BP_RESET */
1488 }
1489
1490 static bool
1491 is_bmpu_supported(dhd_bus_t *bus)
1492 {
1493 if (BCM4378_CHIP(bus->sih->chip) ||
1494 BCM4376_CHIP(bus->sih->chip) ||
1495 BCM4387_CHIP(bus->sih->chip) ||
1496 BCM4385_CHIP(bus->sih->chip)) {
1497 return TRUE;
1498 }
1499 return FALSE;
1500 }
1501
1502 #define CHIP_COMMON_SCR_DHD_TO_BL_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_DHD_TO_BL)
1503 #define CHIP_COMMON_SCR_BL_TO_DHD_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_BL_TO_DHD)
1504 void
1505 dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
1506 {
1507 volatile uint32 *cr4_regs;
1508 uint val = 0;
1509
1510 if (is_bmpu_supported(bus) == FALSE) {
1511 return;
1512 }
1513
1514 /* reset to default values dhd_to_bl and bl_to_dhd regs */
1515 (void)serialized_backplane_access(bus, CHIP_COMMON_SCR_DHD_TO_BL_ADDR(bus->sih),
1516 sizeof(val), &val, FALSE);
1517 (void)serialized_backplane_access(bus, CHIP_COMMON_SCR_BL_TO_DHD_ADDR(bus->sih),
1518 sizeof(val), &val, FALSE);
1519
1520 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
1521 if (cr4_regs == NULL) {
1522 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
1523 return;
1524 }
1525 if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
1526 /* bus mpu is supported */
1527 W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
1528 }
1529 }
1530
1531 static bool
1532 dhdpcie_dongle_attach(dhd_bus_t *bus)
1533 {
1534 osl_t *osh = bus->osh;
1535 volatile void *regsva = (volatile void*)bus->regs;
1536 uint16 devid;
1537 uint32 val;
1538 sbpcieregs_t *sbpcieregs;
1539 bool dongle_reset_needed;
1540 uint16 chipid;
1541
1542 BCM_REFERENCE(chipid);
1543
1544 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1545
1546 /* Configure CTO Prevention functionality */
1547 #if defined(BCMPCIE_CTO_PREVENTION)
1548 chipid = dhd_get_chipid(bus);
1549
1550 if (BCM4349_CHIP(chipid) || BCM4350_CHIP(chipid) || BCM4345_CHIP(chipid)) {
1551 DHD_ERROR(("Disable CTO\n"));
1552 bus->cto_enable = FALSE;
1553 } else {
1554 DHD_ERROR(("Enable CTO\n"));
1555 bus->cto_enable = TRUE;
1556 }
1557 #else
1558 DHD_ERROR(("Disable CTO\n"));
1559 bus->cto_enable = FALSE;
1560 #endif /* BCMPCIE_CTO_PREVENTION */
1561
1562 if (PCIECTO_ENAB(bus)) {
1563 dhdpcie_cto_init(bus, TRUE);
1564 }
1565
1566 #ifdef CONFIG_ARCH_EXYNOS
1567 link_recovery = bus->dhd;
1568 #endif /* CONFIG_ARCH_EXYNOS */
1569
1570 dhd_init_pwr_req_lock(bus);
1571 dhd_init_bus_lp_state_lock(bus);
1572 dhd_init_backplane_access_lock(bus);
1573
1574 bus->alp_only = TRUE;
1575 bus->sih = NULL;
1576
1577 /* Checking PCIe bus status with reading configuration space */
1578 val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1579 if ((val & 0xFFFF) != VENDOR_BROADCOM) {
1580 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1581 goto fail;
1582 }
1583 devid = (val >> 16) & 0xFFFF;
1584 bus->cl_devid = devid;
1585
1586 /* Set bar0 window to si_enum_base */
1587 dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1588
1589 /*
1590 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1591 * due to switch address space from PCI_BUS to SI_BUS.
1592 */
1593 val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1594 if (val == 0xffffffff) {
1595 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1596 goto fail;
1597 }
1598
1599 /* si_attach() will provide an SI handle and scan the backplane */
1600 if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1601 &bus->vars, &bus->varsz))) {
1602 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1603 goto fail;
1604 }
1605
1606 if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1607 /*
1608 * HW JIRA - CRWLPCIEGEN2-672
1609 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1610 * fixed in REV68
1611 */
1612 if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1613 dhdpcie_ssreset_dis_enum_rst(bus);
1614 }
1615
1616 /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1617 * dhdpcie_bus_release_dongle() --> si_detach()
1618 * dhdpcie_dongle_attach() --> si_attach()
1619 */
1620 bus->pwr_req_ref = 0;
1621 }
1622
1623 if (MULTIBP_ENAB(bus->sih)) {
1624 dhd_bus_pcie_pwr_req_nolock(bus);
1625 }
1626
1627 /* Get info on the ARM and SOCRAM cores... */
1628 /* Should really be qualified by device id */
1629 if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1630 (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1631 (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1632 (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1633 bus->armrev = si_corerev(bus->sih);
1634 bus->coreid = si_coreid(bus->sih);
1635 } else {
1636 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1637 goto fail;
1638 }
1639
1640 /* CA7 requires coherent bits on */
1641 if (bus->coreid == ARMCA7_CORE_ID) {
1642 val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
1643 dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
1644 (val | PCIE_BARCOHERENTACCEN_MASK));
1645 }
1646
1647 /* EFI requirement - stop driver load if FW is already running
1648 * need to do this here before pcie_watchdog_reset, because
1649 * pcie_watchdog_reset will put the ARM back into halt state
1650 */
1651 if (!dhdpcie_is_arm_halted(bus)) {
1652 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1653 __FUNCTION__));
1654 goto fail;
1655 }
1656
1657 BCM_REFERENCE(dongle_reset_needed);
1658
1659 /* For inbuilt drivers pcie clk req will be done by RC,
1660 * so do not do clkreq from dhd
1661 */
1662 if (dhd_download_fw_on_driverload)
1663 {
1664 /* Enable CLKREQ# */
1665 dhdpcie_clkreq(bus->osh, 1, 1);
1666 }
1667
1668 /* Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
1669
1670 /*
1671 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1672 * without checking dongle_isolation flag, but if it is called via some other path
1673 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1674 * be called.
1675 */
1676 if (bus->dhd == NULL) {
1677 /* dhd_attach not yet happened, do dongle reset */
1678 #ifdef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1679 dongle_reset_needed = FALSE;
1680 #else
1681 dongle_reset_needed = TRUE;
1682 #endif /* DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1683 } else {
1684 /* Based on dongle_isolationflag, reset dongle */
1685 dongle_reset_needed = !(bus->dhd->dongle_isolation);
1686 }
1687
1688 /*
1689 * Issue dongle to reset all the cores on the chip - similar to rmmod dhd
1690 * This is required to avoid spurious interrupts to the Host and bring back
1691 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1692 */
1693 if (dongle_reset_needed) {
1694 dhdpcie_dongle_reset(bus);
1695 }
1696
1697 /* need to set the force_bt_quiesce flag here
1698 * before calling dhdpcie_dongle_flr_or_pwr_toggle
1699 */
1700 bus->force_bt_quiesce = TRUE;
1701 /*
1702 * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1703 * So don't need BT quiesce.
1704 */
1705 if (bus->sih->buscorerev >= 66) {
1706 bus->force_bt_quiesce = FALSE;
1707 }
1708
1709 dhdpcie_dongle_flr_or_pwr_toggle(bus);
1710
1711 dhdpcie_bus_mpu_disable(bus);
1712
1713 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1714 sbpcieregs = (sbpcieregs_t*)(bus->regs);
1715
1716 /* WAR where the BAR1 window may not be sized properly */
1717 W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1718 val = R_REG(osh, &sbpcieregs->configdata);
1719 W_REG(osh, &sbpcieregs->configdata, val);
1720
1721 /* if chip uses sysmem instead of tcm, typically ARM CA chips */
1722 if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1723 if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1724 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1725 goto fail;
1726 }
1727 /* also populate base address */
1728 switch ((uint16)bus->sih->chip) {
1729 case BCM4368_CHIP_ID:
1730 bus->dongle_ram_base = CA7_4368_RAM_BASE;
1731 break;
1732 case BCM4385_CHIP_ID:
1733 bus->dongle_ram_base = CA7_4385_RAM_BASE;
1734 break;
1735 case BCM4388_CHIP_ID:
1736 case BCM4389_CHIP_ID:
1737 bus->dongle_ram_base = CA7_4389_RAM_BASE;
1738 break;
1739 default:
1740 /* also populate base address */
1741 bus->dongle_ram_base = 0x200000;
1742 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1743 __FUNCTION__, bus->dongle_ram_base));
1744 break;
1745 }
1746 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1747 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1748 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1749 goto fail;
1750 }
1751 } else {
1752 /* cr4 has a different way to find the RAM size from TCM's */
1753 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1754 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1755 goto fail;
1756 }
1757 /* also populate base address */
1758 switch ((uint16)bus->sih->chip) {
1759 case BCM4339_CHIP_ID:
1760 case BCM4335_CHIP_ID:
1761 bus->dongle_ram_base = CR4_4335_RAM_BASE;
1762 break;
1763 case BCM4358_CHIP_ID:
1764 case BCM4354_CHIP_ID:
1765 case BCM43567_CHIP_ID:
1766 case BCM43569_CHIP_ID:
1767 case BCM4350_CHIP_ID:
1768 case BCM43570_CHIP_ID:
1769 bus->dongle_ram_base = CR4_4350_RAM_BASE;
1770 break;
1771 case BCM4360_CHIP_ID:
1772 bus->dongle_ram_base = CR4_4360_RAM_BASE;
1773 break;
1774
1775 case BCM4364_CHIP_ID:
1776 bus->dongle_ram_base = CR4_4364_RAM_BASE;
1777 break;
1778
1779 CASE_BCM4345_CHIP:
1780 bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
1781 ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
1782 break;
1783 CASE_BCM43602_CHIP:
1784 bus->dongle_ram_base = CR4_43602_RAM_BASE;
1785 break;
1786 case BCM4349_CHIP_GRPID:
1787 /* RAM based changed from 4349c0(revid=9) onwards */
1788 bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
1789 CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1790 break;
1791 case BCM4347_CHIP_ID:
1792 case BCM4357_CHIP_ID:
1793 case BCM4361_CHIP_ID:
1794 bus->dongle_ram_base = CR4_4347_RAM_BASE;
1795 break;
1796 case BCM43751_CHIP_ID:
1797 bus->dongle_ram_base = CR4_43751_RAM_BASE;
1798 break;
1799 case BCM43752_CHIP_ID:
1800 bus->dongle_ram_base = CR4_43752_RAM_BASE;
1801 break;
1802 case BCM4376_CHIP_GRPID:
1803 bus->dongle_ram_base = CR4_4376_RAM_BASE;
1804 break;
1805 case BCM4378_CHIP_GRPID:
1806 bus->dongle_ram_base = CR4_4378_RAM_BASE;
1807 break;
1808 case BCM4362_CHIP_ID:
1809 bus->dongle_ram_base = CR4_4362_RAM_BASE;
1810 break;
1811 case BCM4375_CHIP_ID:
1812 case BCM4369_CHIP_ID:
1813 bus->dongle_ram_base = CR4_4369_RAM_BASE;
1814 break;
1815 case BCM4377_CHIP_ID:
1816 bus->dongle_ram_base = CR4_4377_RAM_BASE;
1817 break;
1818 case BCM4387_CHIP_GRPID:
1819 bus->dongle_ram_base = CR4_4387_RAM_BASE;
1820 break;
1821 case BCM4385_CHIP_ID:
1822 bus->dongle_ram_base = CR4_4385_RAM_BASE;
1823 break;
1824 case BCM4389_CHIP_ID:
1825 /* XXX: For corerev 3, use 4387 rambase */
1826 bus->dongle_ram_base = CR4_4387_RAM_BASE;
1827 break;
1828 default:
1829 bus->dongle_ram_base = 0;
1830 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1831 __FUNCTION__, bus->dongle_ram_base));
1832 }
1833 }
1834 bus->ramsize = bus->orig_ramsize;
1835 if (dhd_dongle_ramsize) {
1836 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_ramsize);
1837 }
1838
1839 if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1840 DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1841 __FUNCTION__, bus->ramsize, bus->ramsize));
1842 goto fail;
1843 }
1844
1845 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1846 bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1847
1848 dhdpcie_bar1_window_switch_enab(bus);
1849
1850 /* Init bar1_switch_lock only after bar1_switch_enab is inited */
1851 dhd_init_bar1_switch_lock(bus);
1852
1853 bus->srmemsize = si_socram_srmem_size(bus->sih);
1854
1855 dhdpcie_bus_intr_init(bus);
1856
1857 /* Set the poll and/or interrupt flags */
1858 bus->intr = (bool)dhd_intr;
1859 if ((bus->poll = (bool)dhd_poll))
1860 bus->pollrate = 1;
1861 #ifdef DHD_DISABLE_ASPM
1862 dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1863 #endif /* DHD_DISABLE_ASPM */
1864 #ifdef PCIE_INB_DW
1865 bus->inb_enabled = TRUE;
1866 #endif /* PCIE_INB_DW */
1867 #if defined(PCIE_INB_DW)
1868 bus->ds_enabled = TRUE;
1869 #endif
1870
1871 bus->hwa_enabled = TRUE;
1872 bus->idma_enabled = TRUE;
1873 bus->ifrm_enabled = TRUE;
1874
1875 dhdpcie_pme_stat_clear(bus);
1876
1877 if (MULTIBP_ENAB(bus->sih)) {
1878 dhd_bus_pcie_pwr_req_clear_nolock(bus);
1879
1880 /*
1881 * One time clearing of Common Power Domain since HW default is set
1882 * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
1883 * for 4378B0 (rev 68).
1884 * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
1885 */
1886 si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
1887
1888 /*
1889 * WAR to fix ARM cold boot;
1890 * Assert WL domain in DAR helps but not enum
1891 */
1892 if (bus->sih->buscorerev >= 68) {
1893 dhd_bus_pcie_pwr_req_wl_domain(bus,
1894 DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
1895 }
1896 }
1897
1898 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1899
1900 return 0;
1901
1902 fail:
1903 /* for EFI even if there is an error, load still succeeds
1904 * so si_detach should not be called here, it is called during unload
1905 */
1906
1907 dhd_deinit_pwr_req_lock(bus);
1908 dhd_deinit_bus_lp_state_lock(bus);
1909 dhd_deinit_backplane_access_lock(bus);
1910
1911 if (bus->sih != NULL) {
1912 if (MULTIBP_ENAB(bus->sih)) {
1913 dhd_bus_pcie_pwr_req_clear_nolock(bus);
1914 }
1915
1916 si_detach(bus->sih);
1917 bus->sih = NULL;
1918 }
1919 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1920 return -1;
1921 }
1922
1923 int
1924 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1925 {
1926 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1927 return 0;
1928 }
1929 int
1930 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1931 {
1932 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1933 return 0;
1934 }
1935
1936 /* Non atomic function, caller should hold appropriate lock */
1937 void
1938 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1939 {
1940 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1941 if (bus) {
1942 if (bus->sih && !bus->is_linkdown) {
1943 /* Skip after recieving D3 ACK */
1944 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1945 return;
1946 }
1947 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1948 (bus->sih->buscorerev == 4)) {
1949 dhpcie_bus_unmask_interrupt(bus);
1950 } else {
1951 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1952 bus->def_intmask, bus->def_intmask);
1953 }
1954 }
1955
1956 }
1957
1958 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1959 }
1960
1961 /* Non atomic function, caller should hold appropriate lock */
1962 void
1963 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1964 {
1965 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1966 if (bus && bus->sih && !bus->is_linkdown) {
1967 /* Skip after recieving D3 ACK */
1968 if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
1969 return;
1970 }
1971
1972 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1973 (bus->sih->buscorerev == 4)) {
1974 dhpcie_bus_mask_interrupt(bus);
1975 } else {
1976 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1977 bus->def_intmask, 0);
1978 }
1979 }
1980
1981 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1982 }
1983
1984 /*
1985 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1986 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1987 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1988 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1989 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1990 */
1991 void
1992 dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
1993 {
1994 unsigned long flags;
1995 int timeleft;
1996
1997 #ifdef DHD_PCIE_RUNTIMEPM
1998 dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
1999 #endif /* DHD_PCIE_RUNTIMEPM */
2000
2001 dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
2002 if (dhdp->dhd_watchdog_ms_backup) {
2003 DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
2004 __FUNCTION__));
2005 dhd_os_wd_timer(dhdp, 0);
2006 }
2007 if (dhdp->busstate != DHD_BUS_DOWN) {
2008 #ifdef DHD_DONGLE_TRAP_IN_DETACH
2009 /*
2010 * For x86 platforms, rmmod/insmod is failing due to some power
2011 * resources are not held high.
2012 * Hence induce DB7 trap during detach and in FW trap handler all
2013 * power resources are held high.
2014 */
2015 if (!dhd_query_bus_erros(dhdp) && dhdp->db7_trap.fw_db7w_trap) {
2016 dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE;
2017 dhdpcie_fw_trap(dhdp->bus);
2018 OSL_DELAY(100 * 1000); // wait 100 msec
2019 dhdp->db7_trap.fw_db7w_trap_inprogress = FALSE;
2020 } else {
2021 DHD_ERROR(("%s: DB7 Not sent!!!\n",
2022 __FUNCTION__));
2023 }
2024 #endif /* DHD_DONGLE_TRAP_IN_DETACH */
2025 DHD_GENERAL_LOCK(dhdp, flags);
2026 dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
2027 DHD_GENERAL_UNLOCK(dhdp, flags);
2028 }
2029
2030 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
2031 if ((timeleft == 0) || (timeleft == 1)) {
2032 /* XXX This condition ideally should not occur, this means some
2033 * bus usage context is not clearing the respective usage bit, print
2034 * dhd_bus_busy_state and crash the host for further debugging.
2035 */
2036 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
2037 __FUNCTION__, dhdp->dhd_bus_busy_state));
2038 ASSERT(0);
2039 }
2040
2041 return;
2042 }
2043
2044 static void
2045 dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp)
2046 {
2047 unsigned long flags;
2048 int timeleft;
2049
2050 DHD_GENERAL_LOCK(dhdp, flags);
2051 dhdp->busstate = DHD_BUS_REMOVE;
2052 DHD_GENERAL_UNLOCK(dhdp, flags);
2053
2054 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
2055 if ((timeleft == 0) || (timeleft == 1)) {
2056 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
2057 __FUNCTION__, dhdp->dhd_bus_busy_state));
2058 ASSERT(0);
2059 }
2060
2061 return;
2062 }
2063
2064 static void
2065 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
2066 {
2067 unsigned long flags;
2068 DHD_TRACE(("%s Enter\n", __FUNCTION__));
2069
2070 DHD_GENERAL_LOCK(bus->dhd, flags);
2071 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2072 bus->dhd->busstate = DHD_BUS_DOWN;
2073 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2074
2075 #ifdef PCIE_INB_DW
2076 /* De-Initialize the lock to serialize Device Wake Inband activities */
2077 if (bus->inb_lock) {
2078 osl_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
2079 bus->inb_lock = NULL;
2080 }
2081 #endif
2082
2083 dhd_os_sdlock(bus->dhd);
2084
2085 if (bus->sih && !bus->dhd->dongle_isolation) {
2086
2087 dhd_bus_pcie_pwr_req_reload_war(bus);
2088
2089 /* Skip below WARs for Android as insmod fails after rmmod in Brix Android */
2090
2091 /* if the pcie link is down, watchdog reset
2092 * should not be done, as it may hang
2093 */
2094
2095 if (!bus->is_linkdown) {
2096 /* For Non-EFI modular builds, do dongle reset during rmmod */
2097 /* For EFI-DHD this compile flag will be defined.
2098 * In EFI, depending on bt over pcie mode
2099 * we either power toggle or do F0 FLR
2100 * from dhdpcie_bus_release dongle. So no need to
2101 * do dongle reset from here
2102 */
2103 dhdpcie_dongle_reset(bus);
2104 }
2105
2106 bus->dhd->is_pcie_watchdog_reset = TRUE;
2107 }
2108
2109 dhd_os_sdunlock(bus->dhd);
2110
2111 DHD_TRACE(("%s Exit\n", __FUNCTION__));
2112 }
2113
2114 void
2115 dhd_init_bus_lp_state_lock(dhd_bus_t *bus)
2116 {
2117 if (!bus->bus_lp_state_lock) {
2118 bus->bus_lp_state_lock = osl_spin_lock_init(bus->osh);
2119 }
2120 }
2121
2122 void
2123 dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus)
2124 {
2125 if (bus->bus_lp_state_lock) {
2126 osl_spin_lock_deinit(bus->osh, bus->bus_lp_state_lock);
2127 bus->bus_lp_state_lock = NULL;
2128 }
2129 }
2130
2131 void
2132 dhd_init_backplane_access_lock(dhd_bus_t *bus)
2133 {
2134 if (!bus->backplane_access_lock) {
2135 bus->backplane_access_lock = osl_spin_lock_init(bus->osh);
2136 }
2137 }
2138
2139 void
2140 dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
2141 {
2142 if (bus->backplane_access_lock) {
2143 osl_spin_lock_deinit(bus->osh, bus->backplane_access_lock);
2144 bus->backplane_access_lock = NULL;
2145 }
2146 }
2147
2148 /** Detach and free everything */
2149 void
2150 dhdpcie_bus_release(dhd_bus_t *bus)
2151 {
2152 bool dongle_isolation = FALSE;
2153 osl_t *osh = NULL;
2154
2155 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2156
2157 if (bus) {
2158
2159 osh = bus->osh;
2160 ASSERT(osh);
2161
2162 if (bus->dhd) {
2163 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
2164 debugger_close();
2165 #endif /* DEBUGGER || DHD_DSCOPE */
2166 dhdpcie_advertise_bus_remove(bus->dhd);
2167 dongle_isolation = bus->dhd->dongle_isolation;
2168 bus->dhd->is_pcie_watchdog_reset = FALSE;
2169 dhdpcie_bus_remove_prep(bus);
2170
2171 if (bus->intr) {
2172 dhdpcie_bus_intr_disable(bus);
2173 dhdpcie_free_irq(bus);
2174 }
2175 dhd_deinit_bus_lp_state_lock(bus);
2176 dhd_deinit_bar1_switch_lock(bus);
2177 dhd_deinit_backplane_access_lock(bus);
2178 dhd_deinit_pwr_req_lock(bus);
2179 /**
2180 * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
2181 * access Dongle registers.
2182 * dhd_detach will communicate with dongle to delete flowring ..etc.
2183 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
2184 */
2185 dhd_detach(bus->dhd);
2186 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
2187 dhd_free(bus->dhd);
2188 bus->dhd = NULL;
2189 }
2190 /* unmap the regs and tcm here!! */
2191 if (bus->regs) {
2192 dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
2193 bus->regs = NULL;
2194 }
2195 if (bus->tcm) {
2196 dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
2197 bus->tcm = NULL;
2198 }
2199
2200 dhdpcie_bus_release_malloc(bus, osh);
2201 /* Detach pcie shared structure */
2202 if (bus->pcie_sh) {
2203 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
2204 }
2205
2206 if (bus->console.buf != NULL) {
2207 MFREE(osh, bus->console.buf, bus->console.bufsize);
2208 }
2209
2210 /* Finally free bus info */
2211 MFREE(osh, bus, sizeof(dhd_bus_t));
2212
2213 g_dhd_bus = NULL;
2214 }
2215
2216 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2217 } /* dhdpcie_bus_release */
2218
2219 void
2220 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
2221 {
2222 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
2223 bus->dhd, bus->dhd->dongle_reset));
2224
2225 if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
2226 DHD_TRACE(("%s Exit\n", __FUNCTION__));
2227 return;
2228 }
2229
2230 if (bus->is_linkdown) {
2231 DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
2232 return;
2233 }
2234
2235 if (bus->sih) {
2236
2237 /*
2238 * Perform dongle reset only if dongle isolation is not enabled.
2239 * In android platforms, dongle isolation will be enabled and
2240 * quiescing dongle will be done using DB7 trap.
2241 */
2242 if (!dongle_isolation &&
2243 bus->dhd && !bus->dhd->is_pcie_watchdog_reset) {
2244 dhdpcie_dongle_reset(bus);
2245 }
2246
2247 /* Only for EFI this will be effective */
2248 dhdpcie_dongle_flr_or_pwr_toggle(bus);
2249
2250 if (bus->ltrsleep_on_unload) {
2251 si_corereg(bus->sih, bus->sih->buscoreidx,
2252 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
2253 }
2254
2255 if (bus->sih->buscorerev == 13)
2256 pcie_serdes_iddqdisable(bus->osh, bus->sih,
2257 (sbpcieregs_t *) bus->regs);
2258
2259 /* For inbuilt drivers pcie clk req will be done by RC,
2260 * so do not do clkreq from dhd
2261 */
2262 if (dhd_download_fw_on_driverload)
2263 {
2264 /* Disable CLKREQ# */
2265 dhdpcie_clkreq(bus->osh, 1, 0);
2266 }
2267
2268 if (bus->sih != NULL) {
2269 si_detach(bus->sih);
2270 bus->sih = NULL;
2271 }
2272 if (bus->vars && bus->varsz)
2273 MFREE(osh, bus->vars, bus->varsz);
2274 bus->vars = NULL;
2275 }
2276
2277 DHD_TRACE(("%s Exit\n", __FUNCTION__));
2278 }
2279
2280 uint32
2281 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
2282 {
2283 uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
2284 return data;
2285 }
2286
2287 /** 32 bit config write */
2288 void
2289 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
2290 {
2291 OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
2292 }
2293
2294 void
2295 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
2296 {
2297 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
2298 }
2299
2300 void
2301 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
2302 {
2303 int32 min_size = DONGLE_MIN_MEMSIZE;
2304 /* Restrict the memsize to user specified limit */
2305 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d max accepted %d\n",
2306 mem_size, min_size, (int32)bus->orig_ramsize));
2307 if ((mem_size > min_size) &&
2308 (mem_size < (int32)bus->orig_ramsize)) {
2309 bus->ramsize = mem_size;
2310 } else {
2311 DHD_ERROR(("%s: Invalid mem_size %d\n", __FUNCTION__, mem_size));
2312 }
2313 }
2314
2315 void
2316 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
2317 {
2318 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2319
2320 if (bus->dhd && bus->dhd->dongle_reset)
2321 return;
2322
2323 if (bus->vars && bus->varsz) {
2324 MFREE(osh, bus->vars, bus->varsz);
2325 }
2326
2327 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2328 return;
2329
2330 }
2331
2332 /** Stop bus module: clear pending frames, disable data flow */
2333 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
2334 {
2335 unsigned long flags;
2336
2337 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2338
2339 if (!bus->dhd)
2340 return;
2341
2342 if (bus->dhd->busstate == DHD_BUS_DOWN) {
2343 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
2344 goto done;
2345 }
2346
2347 DHD_STOP_RPM_TIMER(bus->dhd);
2348
2349 DHD_GENERAL_LOCK(bus->dhd, flags);
2350 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2351 bus->dhd->busstate = DHD_BUS_DOWN;
2352 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2353
2354 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2355 atomic_set(&bus->dhd->block_bus, TRUE);
2356 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2357
2358 dhdpcie_bus_intr_disable(bus);
2359
2360 if (!bus->is_linkdown) {
2361 uint32 status;
2362 status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
2363 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2364 }
2365
2366 if (!dhd_download_fw_on_driverload) {
2367 dhd_dpc_kill(bus->dhd);
2368 }
2369
2370 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2371 pm_runtime_disable(dhd_bus_to_dev(bus));
2372 pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2373 pm_runtime_enable(dhd_bus_to_dev(bus));
2374 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2375
2376 /* Clear rx control and wake any waiters */
2377 /* XXX More important in disconnect, but no context? */
2378 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
2379 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
2380
2381 done:
2382 return;
2383 }
2384
2385 /**
2386 * Watchdog timer function.
2387 * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
2388 */
2389 bool dhd_bus_watchdog(dhd_pub_t *dhd)
2390 {
2391 unsigned long flags;
2392 dhd_bus_t *bus = dhd->bus;
2393
2394 if (dhd_query_bus_erros(bus->dhd)) {
2395 return FALSE;
2396 }
2397
2398 DHD_GENERAL_LOCK(dhd, flags);
2399 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
2400 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
2401 DHD_GENERAL_UNLOCK(dhd, flags);
2402 return FALSE;
2403 }
2404 DHD_BUS_BUSY_SET_IN_WD(dhd);
2405 DHD_GENERAL_UNLOCK(dhd, flags);
2406
2407 #ifdef DHD_PCIE_RUNTIMEPM
2408 dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
2409 #endif /* DHD_PCIE_RUNTIMEPM */
2410
2411 /* Poll for console output periodically */
2412 if (dhd->busstate == DHD_BUS_DATA &&
2413 dhd->dhd_console_ms != 0 &&
2414 DHD_CHK_BUS_NOT_IN_LPS(bus)) {
2415 bus->console.count += dhd_watchdog_ms;
2416 if (bus->console.count >= dhd->dhd_console_ms) {
2417 bus->console.count -= dhd->dhd_console_ms;
2418
2419 if (MULTIBP_ENAB(bus->sih)) {
2420 dhd_bus_pcie_pwr_req(bus);
2421 }
2422
2423 /* Make sure backplane clock is on */
2424 if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
2425 if (dhdpcie_bus_readconsole(bus) < 0) {
2426 dhd->dhd_console_ms = 0; /* On error, stop trying */
2427 }
2428 }
2429
2430 if (MULTIBP_ENAB(bus->sih)) {
2431 dhd_bus_pcie_pwr_req_clear(bus);
2432 }
2433 }
2434 }
2435
2436 #ifdef DHD_READ_INTSTATUS_IN_DPC
2437 if (bus->poll) {
2438 bus->ipend = TRUE;
2439 bus->dpc_sched = TRUE;
2440 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
2441 }
2442 #endif /* DHD_READ_INTSTATUS_IN_DPC */
2443
2444 DHD_GENERAL_LOCK(dhd, flags);
2445 DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
2446 dhd_os_busbusy_wake(dhd);
2447 DHD_GENERAL_UNLOCK(dhd, flags);
2448 #if !defined(DHD_PCIE_RUNTIMEPM) && defined(PCIE_INB_DW)
2449 dhd->bus->inb_dw_deassert_cnt += dhd_watchdog_ms;
2450 if (dhd->bus->inb_dw_deassert_cnt >=
2451 DHD_INB_DW_DEASSERT_MS) {
2452 dhd->bus->inb_dw_deassert_cnt = 0;
2453 /* Inband device wake is deasserted from DPC context after DS_Exit is received,
2454 * but if at all there is no d2h interrupt received, dpc will not be scheduled
2455 * and inband DW is not deasserted, hence DW is deasserted from watchdog thread
2456 * for every 250ms.
2457 */
2458 dhd_bus_dw_deassert(dhd);
2459 }
2460 #endif
2461 return TRUE;
2462 } /* dhd_bus_watchdog */
2463
2464 #if defined(SUPPORT_MULTIPLE_REVISION)
2465 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2466 {
2467 uint32 chiprev;
2468 #if defined(SUPPORT_MULTIPLE_CHIPS)
2469 char chipver_tag[20] = "_4358";
2470 #else
2471 char chipver_tag[10] = {0, };
2472 #endif /* SUPPORT_MULTIPLE_CHIPS */
2473
2474 chiprev = dhd_bus_chiprev(bus);
2475 if (chiprev == 0) {
2476 DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2477 strcat(chipver_tag, "_a0");
2478 } else if (chiprev == 1) {
2479 DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2480 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2481 strcat(chipver_tag, "_a1");
2482 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2483 } else if (chiprev == 3) {
2484 DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2485 #if defined(SUPPORT_MULTIPLE_CHIPS)
2486 strcat(chipver_tag, "_a3");
2487 #endif /* SUPPORT_MULTIPLE_CHIPS */
2488 } else {
2489 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2490 }
2491
2492 strcat(fw_path, chipver_tag);
2493
2494 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2495 if (chiprev == 1 || chiprev == 3) {
2496 int ret = dhd_check_module_b85a();
2497 if ((chiprev == 1) && (ret < 0)) {
2498 memset(chipver_tag, 0x00, sizeof(chipver_tag));
2499 strcat(chipver_tag, "_b85");
2500 strcat(chipver_tag, "_a1");
2501 }
2502 }
2503
2504 DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2505 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2506
2507 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2508 if (system_rev >= 10) {
2509 DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
2510 strcat(chipver_tag, "_r10");
2511 }
2512 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2513 strcat(nv_path, chipver_tag);
2514
2515 return 0;
2516 }
2517
2518 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2519 {
2520 uint32 chip_ver;
2521 char chipver_tag[10] = {0, };
2522 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2523 defined(SUPPORT_BCM4359_MIXED_MODULES)
2524 int module_type = -1;
2525 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2526
2527 chip_ver = bus->sih->chiprev;
2528 if (chip_ver == 4) {
2529 DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2530 strncat(chipver_tag, "_b0", strlen("_b0"));
2531 } else if (chip_ver == 5) {
2532 DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2533 strncat(chipver_tag, "_b1", strlen("_b1"));
2534 } else if (chip_ver == 9) {
2535 DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2536 strncat(chipver_tag, "_c0", strlen("_c0"));
2537 } else {
2538 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2539 return -1;
2540 }
2541
2542 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2543 defined(SUPPORT_BCM4359_MIXED_MODULES)
2544 module_type = dhd_check_module_b90();
2545
2546 switch (module_type) {
2547 case BCM4359_MODULE_TYPE_B90B:
2548 strcat(fw_path, chipver_tag);
2549 break;
2550 case BCM4359_MODULE_TYPE_B90S:
2551 default:
2552 /*
2553 * .cid.info file not exist case,
2554 * loading B90S FW force for initial MFG boot up.
2555 */
2556 if (chip_ver == 5) {
2557 strncat(fw_path, "_b90s", strlen("_b90s"));
2558 }
2559 strcat(fw_path, chipver_tag);
2560 strcat(nv_path, chipver_tag);
2561 break;
2562 }
2563 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2564 strcat(fw_path, chipver_tag);
2565 strcat(nv_path, chipver_tag);
2566 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2567
2568 return 0;
2569 }
2570
2571 #if defined(USE_CID_CHECK)
2572
2573 #define MAX_EXTENSION 20
2574 #define MODULE_BCM4361_INDEX 3
2575 #define CHIP_REV_A0 1
2576 #define CHIP_REV_A1 2
2577 #define CHIP_REV_B0 3
2578 #define CHIP_REV_B1 4
2579 #define CHIP_REV_B2 5
2580 #define CHIP_REV_C0 6
2581 #define BOARD_TYPE_EPA 0x080f
2582 #define BOARD_TYPE_IPA 0x0827
2583 #define BOARD_TYPE_IPA_OLD 0x081a
2584 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
2585 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
2586 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
2587 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
2588 #define MAX_VID_LEN 8
2589 #define CIS_TUPLE_HDR_LEN 2
2590 #if defined(BCM4375_CHIP)
2591 #define CIS_TUPLE_START_ADDRESS 0x18011120
2592 #define CIS_TUPLE_END_ADDRESS 0x18011177
2593 #elif defined(BCM4389_CHIP_DEF)
2594 /* 4389A0 CIS tuple start address is different with 4389B0
2595 * due to OTP layout is changed from 4389B0
2596 */
2597 #define CIS_TUPLE_START_ADDRESS 0x1801113C
2598 #define CIS_TUPLE_END_ADDRESS 0x18011193
2599 #define CIS_TUPLE_START_ADDRESS_89B0 0x18011058
2600 #define CIS_TUPLE_END_ADDRESS_89B0 0x180110AF
2601 #else
2602 #define CIS_TUPLE_START_ADDRESS 0x18011110
2603 #define CIS_TUPLE_END_ADDRESS 0x18011167
2604 #endif /* defined(BCM4375_CHIP) */
2605 #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
2606 + 1) / sizeof(uint32))
2607 #define CIS_TUPLE_TAG_START 0x80
2608 #define CIS_TUPLE_TAG_VENDOR 0x81
2609 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
2610 #define CIS_TUPLE_TAG_LENGTH 1
2611 #define NVRAM_FEM_MURATA "_murata"
2612 #define CID_FEM_MURATA "_mur_"
2613
2614 typedef struct cis_tuple_format {
2615 uint8 id;
2616 uint8 len; /* total length of tag and data */
2617 uint8 tag;
2618 uint8 data[1];
2619 } cis_tuple_format_t;
2620
2621 typedef struct {
2622 char cid_ext[MAX_EXTENSION];
2623 char nvram_ext[MAX_EXTENSION];
2624 char fw_ext[MAX_EXTENSION];
2625 } naming_info_t;
2626
2627 naming_info_t bcm4361_naming_table[] = {
2628 { {""}, {""}, {""} },
2629 { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2630 { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2631 { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2632 { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2633 { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2634 { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2635 { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2636 { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2637 { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2638 { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2639 { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2640 { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2641 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2642 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2643 { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2644 { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2645 { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2646 { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2647 { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2648 { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2649 { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2650 { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2651 { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
2652 { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2653 { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2654 { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2655 { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2656 { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2657 { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2658 { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2659 { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2660 { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} }
2661 };
2662
2663 #define MODULE_BCM4375_INDEX 3
2664
2665 naming_info_t bcm4375_naming_table[] = {
2666 { {""}, {""}, {""} },
2667 { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
2668 { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
2669 { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
2670 { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
2671 { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
2672 { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
2673 { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
2674 { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
2675 { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
2676 { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
2677 { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
2678 { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
2679 { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
2680 { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
2681 { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
2682 { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
2683 { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
2684 { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
2685 { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
2686 };
2687
2688 #define MODULE_BCM4389_INDEX 3
2689
2690 naming_info_t bcm4389_naming_table[] = {
2691 { {""}, {""}, {""} },
2692 { {"e51_es11"}, {"_ES01_semco_a0"}, {"_a0"} },
2693 { {"e51_es12"}, {"_ES02_semco_a0"}, {"_a0"} },
2694 { {"e53_es23"}, {"_ES10_semco_b0"}, {"_b0"} },
2695 { {"1wk_es21"}, {"_1wk_es21_b0"}, {"_b0"} },
2696 { {"1wk_es30"}, {"_1wk_es30_b0"}, {"_b0"} },
2697 { {"1wk_es31"}, {"_1wk_es31_b0"}, {"_b0"} }
2698 };
2699
2700 #if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF)
2701 static naming_info_t *
2702 dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2703 {
2704 int index_found = 0, i = 0;
2705
2706 if (module_type && strlen(module_type) > 0) {
2707 for (i = 1; i < table_size; i++) {
2708 if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2709 index_found = i;
2710 break;
2711 }
2712 }
2713 }
2714
2715 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2716
2717 return &table[index_found];
2718 }
2719
2720 static naming_info_t *
2721 dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2722 char *cid_info)
2723 {
2724 int index_found = 0, i = 0;
2725 char *ptr;
2726
2727 /* truncate extension */
2728 for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2729 ptr = bcmstrstr(ptr, "_");
2730 if (ptr) {
2731 ptr++;
2732 }
2733 }
2734
2735 for (i = 1; i < table_size && ptr; i++) {
2736 if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2737 index_found = i;
2738 break;
2739 }
2740 }
2741
2742 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2743
2744 return &table[index_found];
2745 }
2746
2747 static int
2748 dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
2749 unsigned char *vid, int *vid_length)
2750 {
2751 int boardtype_backplane_addr[] = {
2752 0x18010324, /* OTP Control 1 */
2753 0x18012618, /* PMU min resource mask */
2754 };
2755 int boardtype_backplane_data[] = {
2756 0x00fa0000,
2757 0x0e4fffff /* Keep on ARMHTAVAIL */
2758 };
2759 int int_val = 0, i = 0;
2760 cis_tuple_format_t *tuple;
2761 int totlen, len;
2762 uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2763 uint32 cis_start_addr = CIS_TUPLE_START_ADDRESS;
2764 #ifdef BCM4389_CHIP_DEF
2765 uint chipid = dhd_bus_chip_id(bus->dhd);
2766 uint revid = dhd_bus_chiprev_id(bus->dhd);
2767
2768 if ((BCM4389_CHIP_GRPID == chipid) && (revid == 1)) {
2769 cis_start_addr = CIS_TUPLE_START_ADDRESS_89B0;
2770 }
2771 DHD_INFO(("%s : chipid :%u, revid %u\n", __FUNCTION__, chipid, revid));
2772 #endif /* BCM4389_CHIP_DEF */
2773 for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2774 /* Write new OTP and PMU configuration */
2775 if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2776 &boardtype_backplane_data[i], FALSE) != BCME_OK) {
2777 DHD_ERROR(("invalid size/addr combination\n"));
2778 return BCME_ERROR;
2779 }
2780
2781 if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2782 &int_val, TRUE) != BCME_OK) {
2783 DHD_ERROR(("invalid size/addr combination\n"));
2784 return BCME_ERROR;
2785 }
2786
2787 DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2788 __FUNCTION__, boardtype_backplane_addr[i], int_val));
2789 }
2790
2791 /* read tuple raw data */
2792 for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2793 if (si_backplane_access(bus->sih, cis_start_addr + i * sizeof(uint32),
2794 sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
2795 break;
2796 }
2797 DHD_INFO(("%s: tuple index %d, raw data 0x%08x\n", __FUNCTION__, i, raw_data[i]));
2798 }
2799
2800 totlen = i * sizeof(uint32);
2801 tuple = (cis_tuple_format_t *)raw_data;
2802
2803 /* check the first tuple has tag 'start' */
2804 if (tuple->id != CIS_TUPLE_TAG_START) {
2805 DHD_ERROR(("%s: Can not find the TAG\n", __FUNCTION__));
2806 return BCME_ERROR;
2807 }
2808
2809 *vid_length = *boardtype = 0;
2810
2811 /* find tagged parameter */
2812 while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2813 (*vid_length == 0 || *boardtype == 0)) {
2814 len = tuple->len;
2815
2816 if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2817 (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2818 /* found VID */
2819 memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2820 *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2821 prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2822 }
2823 else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2824 (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2825 /* found boardtype */
2826 *boardtype = (int)tuple->data[0];
2827 prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2828 }
2829
2830 tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2831 totlen -= (len + CIS_TUPLE_HDR_LEN);
2832 }
2833
2834 if (*vid_length <= 0 || *boardtype <= 0) {
2835 DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2836 *vid_length, *boardtype));
2837 return BCME_ERROR;
2838 }
2839
2840 return BCME_OK;
2841
2842 }
2843
2844 static naming_info_t *
2845 dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2846 dhd_bus_t *bus, bool *is_murata_fem)
2847 {
2848 int board_type = 0, chip_rev = 0, vid_length = 0;
2849 unsigned char vid[MAX_VID_LEN];
2850 naming_info_t *info = &table[0];
2851 char *cid_info = NULL;
2852
2853 if (!bus || !bus->sih) {
2854 DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2855 return NULL;
2856 }
2857 chip_rev = bus->sih->chiprev;
2858
2859 if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
2860 != BCME_OK) {
2861 DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2862 return NULL;
2863 }
2864
2865 DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2866
2867 #if defined(BCM4361_CHIP)
2868 /* A0 chipset has exception only */
2869 if (chip_rev == CHIP_REV_A0) {
2870 if (board_type == BOARD_TYPE_EPA) {
2871 info = dhd_find_naming_info(table, table_size,
2872 DEFAULT_CIDINFO_FOR_EPA);
2873 } else if ((board_type == BOARD_TYPE_IPA) ||
2874 (board_type == BOARD_TYPE_IPA_OLD)) {
2875 info = dhd_find_naming_info(table, table_size,
2876 DEFAULT_CIDINFO_FOR_IPA);
2877 }
2878 } else {
2879 cid_info = dhd_get_cid_info(vid, vid_length);
2880 if (cid_info) {
2881 info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2882 if (strstr(cid_info, CID_FEM_MURATA)) {
2883 *is_murata_fem = TRUE;
2884 }
2885 }
2886 }
2887 #else
2888 cid_info = dhd_get_cid_info(vid, vid_length);
2889 if (cid_info) {
2890 info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2891 if (strstr(cid_info, CID_FEM_MURATA)) {
2892 *is_murata_fem = TRUE;
2893 }
2894 }
2895 #endif /* BCM4361_CHIP */
2896
2897 return info;
2898 }
2899 #endif /* BCM4361_CHIP || BCM4375_CHIP || BCM4389_CHIP_DEF */
2900 #endif /* USE_CID_CHECK */
2901
2902 static int
2903 concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2904 {
2905 int ret = BCME_OK;
2906 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2907 char module_type[MAX_VNAME_LEN];
2908 naming_info_t *info = NULL;
2909 bool is_murata_fem = FALSE;
2910
2911 memset(module_type, 0, sizeof(module_type));
2912
2913 if (dhd_check_module_bcm(module_type,
2914 MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2915 info = dhd_find_naming_info(bcm4361_naming_table,
2916 ARRAYSIZE(bcm4361_naming_table), module_type);
2917 } else {
2918 /* in case of .cid.info doesn't exists */
2919 info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2920 ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2921 }
2922
2923 if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
2924 is_murata_fem = FALSE;
2925 }
2926
2927 if (info) {
2928 if (is_murata_fem) {
2929 strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2930 }
2931 strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2932 strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2933 } else {
2934 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2935 ret = BCME_ERROR;
2936 }
2937 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2938 char chipver_tag[10] = {0, };
2939
2940 strcat(fw_path, chipver_tag);
2941 strcat(nv_path, chipver_tag);
2942 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2943
2944 return ret;
2945 }
2946
2947 static int
2948 concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
2949 {
2950 int ret = BCME_OK;
2951 #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2952 char module_type[MAX_VNAME_LEN];
2953 naming_info_t *info = NULL;
2954 bool is_murata_fem = FALSE;
2955
2956 memset(module_type, 0, sizeof(module_type));
2957
2958 if (dhd_check_module_bcm(module_type,
2959 MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
2960 info = dhd_find_naming_info(bcm4375_naming_table,
2961 ARRAYSIZE(bcm4375_naming_table), module_type);
2962 } else {
2963 /* in case of .cid.info doesn't exists */
2964 info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
2965 ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
2966 }
2967
2968 if (info) {
2969 strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2970 strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2971 } else {
2972 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2973 ret = BCME_ERROR;
2974 }
2975 #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2976 char chipver_tag[10] = {0, };
2977
2978 strcat(fw_path, chipver_tag);
2979 strcat(nv_path, chipver_tag);
2980 #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2981
2982 return ret;
2983 }
2984
2985 static int
2986 concate_revision_bcm4389(dhd_bus_t *bus, char *fw_path, char *nv_path)
2987 {
2988 int ret = BCME_OK;
2989 #if defined(SUPPORT_BCM4389_MIXED_MODULES) && defined(USE_CID_CHECK)
2990 char module_type[MAX_VNAME_LEN];
2991 naming_info_t *info = NULL;
2992 bool is_murata_fem = FALSE;
2993
2994 memset(module_type, 0, sizeof(module_type));
2995
2996 if (dhd_check_module_bcm(module_type,
2997 MODULE_BCM4389_INDEX, &is_murata_fem) == BCME_OK) {
2998 info = dhd_find_naming_info(bcm4389_naming_table,
2999 ARRAYSIZE(bcm4389_naming_table), module_type);
3000 } else {
3001 /* in case of .cid.info doesn't exists */
3002 info = dhd_find_naming_info_by_chip_rev(bcm4389_naming_table,
3003 ARRAYSIZE(bcm4389_naming_table), bus, &is_murata_fem);
3004 }
3005
3006 if (info) {
3007 strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
3008 strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
3009 } else {
3010 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
3011 ret = BCME_ERROR;
3012 }
3013 #else /* SUPPORT_BCM4389_MIXED_MODULES && USE_CID_CHECK */
3014 char chipver_tag[10] = {0, };
3015
3016 strcat(fw_path, chipver_tag);
3017 strcat(nv_path, chipver_tag);
3018 #endif /* SUPPORT_BCM4389_MIXED_MODULES && USE_CID_CHECK */
3019 return ret;
3020 }
3021
3022 int
3023 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
3024 {
3025 int res = 0;
3026
3027 if (!bus || !bus->sih) {
3028 DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
3029 return -1;
3030 }
3031
3032 if (!fw_path || !nv_path) {
3033 DHD_ERROR(("fw_path or nv_path is null.\n"));
3034 return res;
3035 }
3036
3037 switch (si_chipid(bus->sih)) {
3038
3039 case BCM43569_CHIP_ID:
3040 case BCM4358_CHIP_ID:
3041 res = concate_revision_bcm4358(bus, fw_path, nv_path);
3042 break;
3043 case BCM4355_CHIP_ID:
3044 case BCM4359_CHIP_ID:
3045 res = concate_revision_bcm4359(bus, fw_path, nv_path);
3046 break;
3047 case BCM4361_CHIP_ID:
3048 case BCM4347_CHIP_ID:
3049 res = concate_revision_bcm4361(bus, fw_path, nv_path);
3050 break;
3051 case BCM4375_CHIP_ID:
3052 res = concate_revision_bcm4375(bus, fw_path, nv_path);
3053 break;
3054 case BCM4389_CHIP_ID:
3055 res = concate_revision_bcm4389(bus, fw_path, nv_path);
3056 break;
3057 default:
3058 DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
3059 return res;
3060 }
3061
3062 return res;
3063 }
3064 #endif /* SUPPORT_MULTIPLE_REVISION */
3065
3066 uint16
3067 dhd_get_chipid(struct dhd_bus *bus)
3068 {
3069 if (bus && bus->sih) {
3070 return (uint16)si_chipid(bus->sih);
3071 } else if (bus && bus->regs) {
3072 chipcregs_t *cc = (chipcregs_t *)bus->regs;
3073 uint w, chipid;
3074
3075 /* Set bar0 window to si_enum_base */
3076 dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(0));
3077
3078 w = R_REG(bus->osh, &cc->chipid);
3079 chipid = w & CID_ID_MASK;
3080
3081 return (uint16)chipid;
3082 } else {
3083 return 0;
3084 }
3085 }
3086
3087 /**
3088 * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
3089 *
3090 * BCM_REQUEST_FW specific :
3091 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
3092 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
3093 *
3094 * BCMEMBEDIMAGE specific:
3095 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3096 * file will be used instead.
3097 *
3098 * @return BCME_OK on success
3099 */
3100 int
3101 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
3102 char *pfw_path, char *pnv_path,
3103 char *pclm_path, char *pconf_path)
3104 {
3105 int ret;
3106
3107 bus->fw_path = pfw_path;
3108 bus->nv_path = pnv_path;
3109 bus->dhd->clm_path = pclm_path;
3110 bus->dhd->conf_path = pconf_path;
3111
3112 #if defined(SUPPORT_MULTIPLE_REVISION)
3113 if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
3114 DHD_ERROR(("%s: fail to concatnate revison \n",
3115 __FUNCTION__));
3116 /* Proceed if SUPPORT_MULTIPLE_CHIPS is enabled */
3117 #ifndef SUPPORT_MULTIPLE_CHIPS
3118 return BCME_BADARG;
3119 #endif /* !SUPPORT_MULTIPLE_CHIPS */
3120 }
3121 #endif /* SUPPORT_MULTIPLE_REVISION */
3122
3123 #if defined(DHD_BLOB_EXISTENCE_CHECK)
3124 dhd_set_blob_support(bus->dhd, bus->fw_path);
3125 #endif /* DHD_BLOB_EXISTENCE_CHECK */
3126
3127 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
3128 __FUNCTION__, bus->fw_path, bus->nv_path));
3129 dhdpcie_dump_resource(bus);
3130
3131 ret = dhdpcie_download_firmware(bus, osh);
3132
3133 return ret;
3134 }
3135
3136 void
3137 dhd_set_bus_params(struct dhd_bus *bus)
3138 {
3139 if (bus->dhd->conf->dhd_poll >= 0) {
3140 bus->poll = bus->dhd->conf->dhd_poll;
3141 if (!bus->pollrate)
3142 bus->pollrate = 1;
3143 printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
3144 }
3145 }
3146
3147 /**
3148 * Loads firmware given by 'bus->fw_path' into PCIe dongle.
3149 *
3150 * BCM_REQUEST_FW specific :
3151 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
3152 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
3153 *
3154 * BCMEMBEDIMAGE specific:
3155 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3156 * file will be used instead.
3157 *
3158 * @return BCME_OK on success
3159 */
3160 static int
3161 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
3162 {
3163 int ret = 0;
3164 #if defined(BCM_REQUEST_FW)
3165 uint chipid = bus->sih->chip;
3166 uint revid = bus->sih->chiprev;
3167 char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
3168 char nv_path[64]; /* path to nvram vars file */
3169 bus->fw_path = fw_path;
3170 bus->nv_path = nv_path;
3171 switch (chipid) {
3172 case BCM43570_CHIP_ID:
3173 bcmstrncat(fw_path, "43570", 5);
3174 switch (revid) {
3175 case 0:
3176 bcmstrncat(fw_path, "a0", 2);
3177 break;
3178 case 2:
3179 bcmstrncat(fw_path, "a2", 2);
3180 break;
3181 default:
3182 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
3183 revid));
3184 break;
3185 }
3186 break;
3187 default:
3188 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
3189 chipid));
3190 return 0;
3191 }
3192 /* load board specific nvram file */
3193 snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
3194 /* load firmware */
3195 snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
3196 #endif /* BCM_REQUEST_FW */
3197
3198 DHD_OS_WAKE_LOCK(bus->dhd);
3199
3200 dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
3201 dhd_set_bus_params(bus);
3202
3203 ret = _dhdpcie_download_firmware(bus);
3204
3205 DHD_OS_WAKE_UNLOCK(bus->dhd);
3206 return ret;
3207 } /* dhdpcie_download_firmware */
3208
3209 /**
3210 * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
3211 * is updated with the event logging partitions within that file as well.
3212 *
3213 * @param pfw_path Path to .bin or .bea file
3214 */
3215 static int
3216 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
3217 {
3218 int bcmerror = BCME_ERROR;
3219 int offset = 0;
3220 int len = 0;
3221 bool store_reset;
3222 char *imgbuf = NULL; /**< XXX a file pointer, contradicting its name and type */
3223 uint8 *memblock = NULL, *memptr = NULL;
3224 uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
3225 int offset_end = bus->ramsize;
3226 uint32 file_size = 0, read_len = 0;
3227
3228 #if defined(DHD_FW_MEM_CORRUPTION)
3229 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
3230 dhd_tcm_test_enable = TRUE;
3231 } else {
3232 dhd_tcm_test_enable = FALSE;
3233 }
3234 #endif /* DHD_FW_MEM_CORRUPTION */
3235 DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
3236 /* TCM check */
3237 if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
3238 DHD_ERROR(("dhd_bus_tcm_test failed\n"));
3239 bcmerror = BCME_ERROR;
3240 goto err;
3241 }
3242 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
3243
3244 /* Should succeed in opening image if it is actually given through registry
3245 * entry or in module param.
3246 */
3247 imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
3248 if (imgbuf == NULL) {
3249 printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
3250 goto err;
3251 }
3252
3253 file_size = dhd_os_get_image_size(imgbuf);
3254 if (!file_size) {
3255 DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
3256 goto err;
3257 }
3258
3259 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
3260 if (memblock == NULL) {
3261 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
3262 bcmerror = BCME_NOMEM;
3263 goto err;
3264 }
3265 if (dhd_msg_level & DHD_TRACE_VAL) {
3266 memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
3267 if (memptr_tmp == NULL) {
3268 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
3269 goto err;
3270 }
3271 }
3272 if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
3273 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
3274 }
3275
3276 /* check if CR4/CA7 */
3277 store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
3278 si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
3279 /* Download image with MEMBLOCK size */
3280 while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
3281 if (len < 0) {
3282 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
3283 bcmerror = BCME_ERROR;
3284 goto err;
3285 }
3286
3287 read_len += len;
3288 if (read_len > file_size) {
3289 DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
3290 " file_size=%u truncating len to %d \n", __FUNCTION__,
3291 len, read_len, file_size, (len - (read_len - file_size))));
3292 len -= (read_len - file_size);
3293 }
3294
3295 /* if address is 0, store the reset instruction to be written in 0 */
3296 if (store_reset) {
3297 ASSERT(offset == 0);
3298 bus->resetinstr = *(((uint32*)memptr));
3299 /* Add start of RAM address to the address given by user */
3300 offset += bus->dongle_ram_base;
3301 offset_end += offset;
3302 store_reset = FALSE;
3303 }
3304
3305 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
3306 if (bcmerror) {
3307 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
3308 __FUNCTION__, bcmerror, MEMBLOCK, offset));
3309 goto err;
3310 }
3311
3312 if (dhd_msg_level & DHD_TRACE_VAL) {
3313 bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
3314 if (bcmerror) {
3315 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
3316 __FUNCTION__, bcmerror, MEMBLOCK, offset));
3317 goto err;
3318 }
3319 if (memcmp(memptr_tmp, memptr, len)) {
3320 DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
3321 goto err;
3322 } else
3323 DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
3324 }
3325 offset += MEMBLOCK;
3326
3327 if (offset >= offset_end) {
3328 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
3329 __FUNCTION__, offset, offset_end));
3330 bcmerror = BCME_ERROR;
3331 goto err;
3332 }
3333
3334 if (read_len >= file_size) {
3335 break;
3336 }
3337 }
3338 err:
3339 if (memblock) {
3340 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
3341 if (dhd_msg_level & DHD_TRACE_VAL) {
3342 if (memptr_tmp)
3343 MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
3344 }
3345 }
3346
3347 if (imgbuf) {
3348 dhd_os_close_image1(bus->dhd, imgbuf);
3349 }
3350
3351 return bcmerror;
3352 } /* dhdpcie_download_code_file */
3353
3354 #ifdef CUSTOMER_HW4_DEBUG
3355 #define MIN_NVRAMVARS_SIZE 128
3356 #endif /* CUSTOMER_HW4_DEBUG */
3357
3358 static int
3359 dhdpcie_download_nvram(struct dhd_bus *bus)
3360 {
3361 int bcmerror = BCME_ERROR;
3362 uint len;
3363 char * memblock = NULL;
3364 char *bufp;
3365 char *pnv_path;
3366 bool nvram_file_exists;
3367 bool nvram_uefi_exists = FALSE;
3368 bool local_alloc = FALSE;
3369 pnv_path = bus->nv_path;
3370
3371 nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
3372
3373 /* First try UEFI */
3374 len = MAX_NVRAMBUF_SIZE;
3375 dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
3376
3377 /* If UEFI empty, then read from file system */
3378 if ((len <= 0) || (memblock == NULL)) {
3379
3380 if (nvram_file_exists) {
3381 len = MAX_NVRAMBUF_SIZE;
3382 dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
3383 if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
3384 goto err;
3385 }
3386 }
3387 else {
3388 /* For SROM OTP no external file or UEFI required */
3389 bcmerror = BCME_OK;
3390 }
3391 } else {
3392 nvram_uefi_exists = TRUE;
3393 }
3394
3395 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
3396
3397 if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
3398 bufp = (char *) memblock;
3399
3400 {
3401 bufp[len] = 0;
3402 if (nvram_uefi_exists || nvram_file_exists) {
3403 len = process_nvram_vars(bufp, len);
3404 }
3405 }
3406
3407 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
3408 #ifdef CUSTOMER_HW4_DEBUG
3409 if (len < MIN_NVRAMVARS_SIZE) {
3410 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
3411 __FUNCTION__));
3412 bcmerror = BCME_ERROR;
3413 goto err;
3414 }
3415 #endif /* CUSTOMER_HW4_DEBUG */
3416
3417 if (len % 4) {
3418 len += 4 - (len % 4);
3419 }
3420 bufp += len;
3421 *bufp++ = 0;
3422 if (len)
3423 bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
3424 if (bcmerror) {
3425 DHD_ERROR(("%s: error downloading vars: %d\n",
3426 __FUNCTION__, bcmerror));
3427 }
3428 }
3429
3430 err:
3431 if (memblock) {
3432 if (local_alloc) {
3433 MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
3434 } else {
3435 dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
3436 }
3437 }
3438
3439 return bcmerror;
3440 }
3441
3442 /**
3443 * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3444 *
3445 * BCMEMBEDIMAGE specific:
3446 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3447 * file will be used instead.
3448 *
3449 */
3450 static int
3451 _dhdpcie_download_firmware(struct dhd_bus *bus)
3452 {
3453 int bcmerror = -1;
3454
3455 bool embed = FALSE; /* download embedded firmware */
3456 bool dlok = FALSE; /* download firmware succeeded */
3457
3458 /* Out immediately if no image to download */
3459 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3460 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3461 return 0;
3462 }
3463 /* Keep arm in reset */
3464 if (dhdpcie_bus_download_state(bus, TRUE)) {
3465 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
3466 goto err;
3467 }
3468
3469 /* External image takes precedence if specified */
3470 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3471 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
3472 DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
3473 __LINE__));
3474 goto err;
3475 } else {
3476 embed = FALSE;
3477 dlok = TRUE;
3478 }
3479 }
3480
3481 BCM_REFERENCE(embed);
3482 if (!dlok) {
3483 DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
3484 goto err;
3485 }
3486
3487 /* EXAMPLE: nvram_array */
3488 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3489 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3490
3491 /* External nvram takes precedence if specified */
3492 if (dhdpcie_download_nvram(bus)) {
3493 DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
3494 goto err;
3495 }
3496
3497 /* Take arm out of reset */
3498 if (dhdpcie_bus_download_state(bus, FALSE)) {
3499 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
3500 goto err;
3501 }
3502
3503 bcmerror = 0;
3504
3505 err:
3506 return bcmerror;
3507 } /* _dhdpcie_download_firmware */
3508
3509 static int
3510 dhdpcie_bus_readconsole(dhd_bus_t *bus)
3511 {
3512 dhd_console_t *c = &bus->console;
3513 uint8 line[CONSOLE_LINE_MAX], ch;
3514 uint32 n, idx, addr;
3515 int rv;
3516 uint readlen = 0;
3517 uint i = 0;
3518
3519 /* Don't do anything until FWREADY updates console address */
3520 if (bus->console_addr == 0)
3521 return -1;
3522
3523 /* Read console log struct */
3524 addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3525
3526 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
3527 return rv;
3528
3529 /* Allocate console buffer (one time only) */
3530 if (c->buf == NULL) {
3531 c->bufsize = ltoh32(c->log.buf_size);
3532 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
3533 return BCME_NOMEM;
3534 DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
3535 }
3536 idx = ltoh32(c->log.idx);
3537
3538 /* Protect against corrupt value */
3539 if (idx > c->bufsize)
3540 return BCME_ERROR;
3541
3542 /* Skip reading the console buffer if the index pointer has not moved */
3543 if (idx == c->last)
3544 return BCME_OK;
3545
3546 DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3547 idx, c->last));
3548
3549 /* Read the console buffer data to a local buffer
3550 * optimize and read only the portion of the buffer needed, but
3551 * important to handle wrap-around. Read ptr is 'c->last',
3552 * write ptr is 'idx'
3553 */
3554 addr = ltoh32(c->log.buf);
3555
3556 /* wrap around case - write ptr < read ptr */
3557 if (idx < c->last) {
3558 /* from read ptr to end of buffer */
3559 readlen = c->bufsize - c->last;
3560 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3561 addr + c->last, c->buf, readlen)) < 0) {
3562 DHD_ERROR(("conlog: read error[1] ! \n"));
3563 return rv;
3564 }
3565 /* from beginning of buffer to write ptr */
3566 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3567 addr, c->buf + readlen,
3568 idx)) < 0) {
3569 DHD_ERROR(("conlog: read error[2] ! \n"));
3570 return rv;
3571 }
3572 readlen += idx;
3573 } else {
3574 /* non-wraparound case, write ptr > read ptr */
3575 readlen = (uint)idx - c->last;
3576 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3577 addr + c->last, c->buf, readlen)) < 0) {
3578 DHD_ERROR(("conlog: read error[3] ! \n"));
3579 return rv;
3580 }
3581 }
3582 /* update read ptr */
3583 c->last = idx;
3584
3585 /* now output the read data from the local buffer to the host console */
3586 while (i < readlen) {
3587 for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3588 ch = c->buf[i];
3589 ++i;
3590 if (ch == '\n')
3591 break;
3592 line[n] = ch;
3593 }
3594
3595 if (n > 0) {
3596 if (line[n - 1] == '\r')
3597 n--;
3598 line[n] = 0;
3599 DHD_FWLOG(("CONSOLE: %s\n", line));
3600 }
3601 }
3602
3603 return BCME_OK;
3604
3605 } /* dhdpcie_bus_readconsole */
3606
3607 void
3608 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3609 {
3610 uint32 n, i;
3611 uint32 addr;
3612 char *console_buffer = NULL;
3613 uint32 console_ptr, console_size, console_index;
3614 uint8 line[CONSOLE_LINE_MAX], ch;
3615 int rv;
3616
3617 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3618
3619 if (bus->is_linkdown) {
3620 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3621 return;
3622 }
3623
3624 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3625 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3626 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3627 goto exit;
3628 }
3629
3630 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3631 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3632 (uint8 *)&console_size, sizeof(console_size))) < 0) {
3633 goto exit;
3634 }
3635
3636 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3637 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3638 (uint8 *)&console_index, sizeof(console_index))) < 0) {
3639 goto exit;
3640 }
3641
3642 console_ptr = ltoh32(console_ptr);
3643 console_size = ltoh32(console_size);
3644 console_index = ltoh32(console_index);
3645
3646 if (console_size > CONSOLE_BUFFER_MAX ||
3647 !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3648 goto exit;
3649 }
3650
3651 if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3652 (uint8 *)console_buffer, console_size)) < 0) {
3653 goto exit;
3654 }
3655
3656 for (i = 0, n = 0; i < console_size; i += n + 1) {
3657 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3658 ch = console_buffer[(console_index + i + n) % console_size];
3659 if (ch == '\n')
3660 break;
3661 line[n] = ch;
3662 }
3663
3664 if (n > 0) {
3665 if (line[n - 1] == '\r')
3666 n--;
3667 line[n] = 0;
3668 /* Don't use DHD_ERROR macro since we print
3669 * a lot of information quickly. The macro
3670 * will truncate a lot of the printfs
3671 */
3672
3673 DHD_FWLOG(("CONSOLE: %s\n", line));
3674 }
3675 }
3676
3677 exit:
3678 if (console_buffer)
3679 MFREE(bus->dhd->osh, console_buffer, console_size);
3680 return;
3681 }
3682
3683 static void
3684 dhdpcie_schedule_log_dump(dhd_bus_t *bus)
3685 {
3686 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
3687 log_dump_type_t *flush_type;
3688
3689 /* flush_type is freed at do_dhd_log_dump function */
3690 flush_type = MALLOCZ(bus->dhd->osh, sizeof(log_dump_type_t));
3691 if (flush_type) {
3692 *flush_type = DLD_BUF_TYPE_ALL;
3693 dhd_schedule_log_dump(bus->dhd, flush_type);
3694 } else {
3695 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
3696 }
3697 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
3698 }
3699
3700 /**
3701 * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3702 *
3703 * @return BCME_OK on success
3704 */
3705 static int
3706 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3707 {
3708 int bcmerror = 0;
3709 uint msize = 512;
3710 char *mbuffer = NULL;
3711 uint maxstrlen = 256;
3712 char *str = NULL;
3713 pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3714 struct bcmstrbuf strbuf;
3715 unsigned long flags;
3716 bool dongle_trap_occured = FALSE;
3717
3718 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3719
3720 if (DHD_NOCHECKDIED_ON()) {
3721 return 0;
3722 }
3723
3724 if (data == NULL) {
3725 /*
3726 * Called after a rx ctrl timeout. "data" is NULL.
3727 * allocate memory to trace the trap or assert.
3728 */
3729 size = msize;
3730 mbuffer = data = MALLOC(bus->dhd->osh, msize);
3731
3732 if (mbuffer == NULL) {
3733 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3734 bcmerror = BCME_NOMEM;
3735 goto done2;
3736 }
3737 }
3738
3739 if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3740 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3741 bcmerror = BCME_NOMEM;
3742 goto done2;
3743 }
3744 DHD_GENERAL_LOCK(bus->dhd, flags);
3745 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3746 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3747
3748 if (MULTIBP_ENAB(bus->sih)) {
3749 dhd_bus_pcie_pwr_req(bus);
3750 }
3751 if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3752 goto done1;
3753 }
3754
3755 bcm_binit(&strbuf, data, size);
3756
3757 bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
3758 local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
3759
3760 if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3761 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3762 * (Avoids conflict with real asserts for programmatic parsing of output.)
3763 */
3764 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3765 }
3766
3767 if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
3768 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3769 * (Avoids conflict with real asserts for programmatic parsing of output.)
3770 */
3771 bcm_bprintf(&strbuf, "No trap%s in dongle",
3772 (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
3773 ?"/assrt" :"");
3774 } else {
3775 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3776 /* Download assert */
3777 bcm_bprintf(&strbuf, "Dongle assert");
3778 if (bus->pcie_sh->assert_exp_addr != 0) {
3779 str[0] = '\0';
3780 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3781 bus->pcie_sh->assert_exp_addr,
3782 (uint8 *)str, maxstrlen)) < 0) {
3783 goto done1;
3784 }
3785
3786 str[maxstrlen - 1] = '\0';
3787 bcm_bprintf(&strbuf, " expr \"%s\"", str);
3788 }
3789
3790 if (bus->pcie_sh->assert_file_addr != 0) {
3791 str[0] = '\0';
3792 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3793 bus->pcie_sh->assert_file_addr,
3794 (uint8 *)str, maxstrlen)) < 0) {
3795 goto done1;
3796 }
3797
3798 str[maxstrlen - 1] = '\0';
3799 bcm_bprintf(&strbuf, " file \"%s\"", str);
3800 }
3801
3802 bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
3803 }
3804
3805 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3806 trap_t *tr = &bus->dhd->last_trap_info;
3807 dongle_trap_occured = TRUE;
3808 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3809 bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3810 bus->dhd->dongle_trap_occured = TRUE;
3811 goto done1;
3812 }
3813 dhd_bus_dump_trap_info(bus, &strbuf);
3814 }
3815 }
3816
3817 if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3818 DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
3819
3820 /* wake up IOCTL wait event */
3821 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3822
3823 dhd_bus_dump_console_buffer(bus);
3824 dhd_prot_debug_info_print(bus->dhd);
3825
3826 #if defined(DHD_FW_COREDUMP)
3827 /* save core dump or write to a file */
3828 if (bus->dhd->memdump_enabled) {
3829 #ifdef DHD_SSSR_DUMP
3830 DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
3831 bus->dhd->collect_sssr = TRUE;
3832 #endif /* DHD_SSSR_DUMP */
3833 #ifdef DHD_SDTC_ETB_DUMP
3834 DHD_ERROR(("%s : Set collect_sdtc as TRUE\n", __FUNCTION__));
3835 bus->dhd->collect_sdtc = TRUE;
3836 #endif /* DHD_SDTC_ETB_DUMP */
3837 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3838 dhdpcie_mem_dump(bus);
3839 }
3840 #endif /* DHD_FW_COREDUMP */
3841
3842 /* set the trap occured flag only after all the memdump,
3843 * logdump and sssr dump collection has been scheduled
3844 */
3845 if (dongle_trap_occured) {
3846 bus->dhd->dongle_trap_occured = TRUE;
3847 }
3848
3849 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3850 copy_hang_info_trap(bus->dhd);
3851 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3852
3853 dhd_schedule_reset(bus->dhd);
3854
3855 }
3856
3857 done1:
3858 if (bcmerror) {
3859 /* dhdpcie_checkdied is invoked only when dongle has trapped
3860 * or after PCIe link down..etc. so set dongle_trap_occured so that
3861 * log_dump logic can rely on only one flag dongle_trap_occured.
3862 */
3863 bus->dhd->dongle_trap_occured = TRUE;
3864 dhdpcie_schedule_log_dump(bus);
3865 }
3866 if (MULTIBP_ENAB(bus->sih)) {
3867 dhd_bus_pcie_pwr_req_clear(bus);
3868 }
3869
3870 DHD_GENERAL_LOCK(bus->dhd, flags);
3871 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3872 dhd_os_busbusy_wake(bus->dhd);
3873 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3874 done2:
3875 if (mbuffer)
3876 MFREE(bus->dhd->osh, mbuffer, msize);
3877 if (str)
3878 MFREE(bus->dhd->osh, str, maxstrlen);
3879
3880 return bcmerror;
3881 } /* dhdpcie_checkdied */
3882
3883 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
3884 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3885 {
3886 int ret = 0;
3887 int size; /* Full mem size */
3888 int start; /* Start address */
3889 int read_size = 0; /* Read size of each iteration */
3890 uint8 *databuf = buf;
3891
3892 if (bus == NULL) {
3893 return;
3894 }
3895
3896 start = bus->dongle_ram_base;
3897 read_size = 4;
3898 /* check for dead bus */
3899 {
3900 uint test_word = 0;
3901 ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3902 /* if read error or bus timeout */
3903 if (ret || (test_word == 0xFFFFFFFF)) {
3904 return;
3905 }
3906 }
3907
3908 /* Get full mem size */
3909 size = bus->ramsize;
3910 /* Read mem content */
3911 while (size)
3912 {
3913 read_size = MIN(MEMBLOCK, size);
3914 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3915 return;
3916 }
3917
3918 /* Decrement size and increment start address */
3919 size -= read_size;
3920 start += read_size;
3921 databuf += read_size;
3922 }
3923 bus->dhd->soc_ram = buf;
3924 bus->dhd->soc_ram_length = bus->ramsize;
3925 return;
3926 }
3927
3928 #if defined(DHD_FW_COREDUMP)
3929 static int
3930 dhdpcie_get_mem_dump(dhd_bus_t *bus)
3931 {
3932 int ret = BCME_OK;
3933 int size = 0;
3934 int start = 0;
3935 int read_size = 0; /* Read size of each iteration */
3936 uint8 *p_buf = NULL, *databuf = NULL;
3937 unsigned long flags_bus;
3938
3939 if (!bus) {
3940 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3941 return BCME_ERROR;
3942 }
3943
3944 if (!bus->dhd) {
3945 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
3946 return BCME_ERROR;
3947 }
3948
3949 size = bus->ramsize; /* Full mem size */
3950 start = bus->dongle_ram_base; /* Start address */
3951
3952 /* Get full mem size */
3953 p_buf = dhd_get_fwdump_buf(bus->dhd, size);
3954 if (!p_buf) {
3955 DHD_ERROR(("%s: Out of memory (%d bytes)\n",
3956 __FUNCTION__, size));
3957 return BCME_ERROR;
3958 }
3959
3960 /* Read mem content */
3961 DHD_TRACE_HW4(("Dump dongle memory\n"));
3962 databuf = p_buf;
3963
3964 /* Hold BUS_LP_STATE_LOCK to avoid simultaneous bus access */
3965 DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
3966 while (size > 0) {
3967 read_size = MIN(MEMBLOCK, size);
3968 ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
3969 if (ret) {
3970 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3971 #ifdef DHD_DEBUG_UART
3972 bus->dhd->memdump_success = FALSE;
3973 #endif /* DHD_DEBUG_UART */
3974 break;
3975 }
3976 DHD_TRACE(("."));
3977
3978 /* Decrement size and increment start address */
3979 size -= read_size;
3980 start += read_size;
3981 databuf += read_size;
3982 }
3983 DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
3984
3985 return ret;
3986 }
3987
3988 static int
3989 dhdpcie_mem_dump(dhd_bus_t *bus)
3990 {
3991 dhd_pub_t *dhdp;
3992 int ret;
3993 uint32 dhd_console_ms_prev = 0;
3994
3995 dhdp = bus->dhd;
3996 if (!dhdp) {
3997 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3998 return BCME_ERROR;
3999 }
4000 dhd_console_ms_prev = dhdp->dhd_console_ms;
4001 if (dhd_console_ms_prev) {
4002 DHD_ERROR(("%s: Disabling console msgs(0x%d) before mem dump to local buf\n",
4003 __FUNCTION__, dhd_console_ms_prev));
4004 dhdp->dhd_console_ms = 0;
4005 }
4006
4007 #ifdef EXYNOS_PCIE_DEBUG
4008 exynos_pcie_register_dump(1);
4009 #endif /* EXYNOS_PCIE_DEBUG */
4010
4011 #ifdef SUPPORT_LINKDOWN_RECOVERY
4012 if (bus->is_linkdown) {
4013 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
4014 /* panic only for DUMP_MEMFILE_BUGON */
4015 ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
4016 return BCME_ERROR;
4017 }
4018 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4019
4020 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
4021 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
4022 return BCME_ERROR;
4023 }
4024
4025 /* Induce DB7 trap for below non-trap cases */
4026 switch (dhdp->memdump_type) {
4027 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
4028 /* intentional fall through */
4029 case DUMP_TYPE_D3_ACK_TIMEOUT:
4030 /* intentional fall through */
4031 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
4032 /* intentional fall through */
4033 case DUMP_TYPE_PKTID_INVALID:
4034 /* intentional fall through */
4035 case DUMP_TYPE_SCAN_TIMEOUT:
4036 /* intentional fall through */
4037 case DUMP_TYPE_SCAN_BUSY:
4038 /* intentional fall through */
4039 case DUMP_TYPE_BY_LIVELOCK:
4040 /* intentional fall through */
4041 case DUMP_TYPE_IFACE_OP_FAILURE:
4042 /* intentional fall through */
4043 case DUMP_TYPE_PKTID_POOL_DEPLETED:
4044 /* intentional fall through */
4045 case DUMP_TYPE_ESCAN_SYNCID_MISMATCH:
4046 if (dhdp->db7_trap.fw_db7w_trap) {
4047 /* Set fw_db7w_trap_inprogress here and clear from DPC */
4048 dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE;
4049 dhdpcie_fw_trap(dhdp->bus);
4050 OSL_DELAY(100 * 1000); // wait 100 msec
4051 } else {
4052 DHD_ERROR(("%s: DB7 Not supported!!!\n",
4053 __FUNCTION__));
4054 }
4055 break;
4056 default:
4057 break;
4058 }
4059
4060 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4061 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
4062 return BCME_ERROR;
4063 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4064
4065 ret = dhdpcie_get_mem_dump(bus);
4066 if (ret) {
4067 DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
4068 __FUNCTION__, ret));
4069 return ret;
4070 }
4071 #ifdef DHD_DEBUG_UART
4072 bus->dhd->memdump_success = TRUE;
4073 #endif /* DHD_DEBUG_UART */
4074
4075 if (dhd_console_ms_prev) {
4076 DHD_ERROR(("%s: enable console msgs(0x%d) after collecting memdump to local buf\n",
4077 __FUNCTION__, dhd_console_ms_prev));
4078 dhdp->dhd_console_ms = dhd_console_ms_prev;
4079 }
4080
4081 dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
4082 /* buf, actually soc_ram free handled in dhd_{free,clear} */
4083
4084 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4085 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
4086 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
4087 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4088
4089 return ret;
4090 }
4091
4092 int
4093 dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
4094 {
4095 if (!dhdp) {
4096 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
4097 return BCME_ERROR;
4098 }
4099
4100 return dhdpcie_get_mem_dump(dhdp->bus);
4101 }
4102
4103 int
4104 dhd_bus_mem_dump(dhd_pub_t *dhdp)
4105 {
4106 dhd_bus_t *bus = dhdp->bus;
4107 int ret = BCME_ERROR;
4108
4109 if (dhdp->busstate == DHD_BUS_DOWN) {
4110 DHD_ERROR(("%s bus is down\n", __FUNCTION__));
4111 return BCME_ERROR;
4112 }
4113
4114 /* Try to resume if already suspended or suspend in progress */
4115 #ifdef DHD_PCIE_RUNTIMEPM
4116 dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
4117 #endif /* DHD_PCIE_RUNTIMEPM */
4118
4119 /* Skip if still in suspended or suspend in progress */
4120 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
4121 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
4122 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
4123 return BCME_ERROR;
4124 }
4125
4126 DHD_OS_WAKE_LOCK(dhdp);
4127 ret = dhdpcie_mem_dump(bus);
4128 DHD_OS_WAKE_UNLOCK(dhdp);
4129 return ret;
4130 }
4131 #endif /* DHD_FW_COREDUMP */
4132
4133 int
4134 dhd_socram_dump(dhd_bus_t *bus)
4135 {
4136 #if defined(DHD_FW_COREDUMP)
4137 DHD_OS_WAKE_LOCK(bus->dhd);
4138 dhd_bus_mem_dump(bus->dhd);
4139 DHD_OS_WAKE_UNLOCK(bus->dhd);
4140 return 0;
4141 #else
4142 return -1;
4143 #endif
4144 }
4145
4146 /**
4147 * Transfers bytes from host to dongle using pio mode.
4148 * Parameter 'address' is a backplane address.
4149 */
4150 static int
4151 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
4152 {
4153 uint dsize;
4154 int detect_endian_flag = 0x01;
4155 bool little_endian;
4156
4157 if (write && bus->is_linkdown) {
4158 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4159 return BCME_ERROR;
4160 }
4161
4162 if (MULTIBP_ENAB(bus->sih)) {
4163 dhd_bus_pcie_pwr_req(bus);
4164 }
4165 /* Detect endianness. */
4166 little_endian = *(char *)&detect_endian_flag;
4167
4168 /* In remap mode, adjust address beyond socram and redirect
4169 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
4170 * is not backplane accessible
4171 */
4172
4173 /* Determine initial transfer parameters */
4174 #ifdef DHD_SUPPORT_64BIT
4175 dsize = sizeof(uint64);
4176 #else /* !DHD_SUPPORT_64BIT */
4177 dsize = sizeof(uint32);
4178 #endif /* DHD_SUPPORT_64BIT */
4179
4180 /* Do the transfer(s) */
4181 DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
4182 __FUNCTION__, (write ? "write" : "read"), size, address));
4183 if (write) {
4184 while (size) {
4185 #ifdef DHD_SUPPORT_64BIT
4186 if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
4187 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
4188 }
4189 #else /* !DHD_SUPPORT_64BIT */
4190 if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
4191 dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
4192 }
4193 #endif /* DHD_SUPPORT_64BIT */
4194 else {
4195 dsize = sizeof(uint8);
4196 dhdpcie_bus_wtcm8(bus, address, *data);
4197 }
4198
4199 /* Adjust for next transfer (if any) */
4200 if ((size -= dsize)) {
4201 data += dsize;
4202 address += dsize;
4203 }
4204 }
4205 } else {
4206 while (size) {
4207 #ifdef DHD_SUPPORT_64BIT
4208 if (size >= sizeof(uint64) && little_endian && !(address % 8))
4209 {
4210 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
4211 }
4212 #else /* !DHD_SUPPORT_64BIT */
4213 if (size >= sizeof(uint32) && little_endian && !(address % 4))
4214 {
4215 *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
4216 }
4217 #endif /* DHD_SUPPORT_64BIT */
4218 else {
4219 dsize = sizeof(uint8);
4220 *data = dhdpcie_bus_rtcm8(bus, address);
4221 }
4222
4223 /* Adjust for next transfer (if any) */
4224 if ((size -= dsize) > 0) {
4225 data += dsize;
4226 address += dsize;
4227 }
4228 }
4229 }
4230 if (MULTIBP_ENAB(bus->sih)) {
4231 dhd_bus_pcie_pwr_req_clear(bus);
4232 }
4233 return BCME_OK;
4234 } /* dhdpcie_bus_membytes */
4235
4236 /**
4237 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
4238 * to the (non flow controlled) flow ring.
4239 */
4240 int
4241 BCMFASTPATH(dhd_bus_schedule_queue)(struct dhd_bus *bus, uint16 flow_id, bool txs)
4242 /** XXX function name could be more descriptive, eg use 'tx' and 'flow ring' in name */
4243 {
4244 flow_ring_node_t *flow_ring_node;
4245 int ret = BCME_OK;
4246 #ifdef DHD_LOSSLESS_ROAMING
4247 dhd_pub_t *dhdp = bus->dhd;
4248 #endif
4249
4250 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
4251
4252 /* ASSERT on flow_id */
4253 if (flow_id >= bus->max_submission_rings) {
4254 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
4255 flow_id, bus->max_submission_rings));
4256 return 0;
4257 }
4258
4259 flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
4260
4261 if (flow_ring_node->prot_info == NULL) {
4262 DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
4263 return BCME_NOTREADY;
4264 }
4265
4266 #ifdef DHD_LOSSLESS_ROAMING
4267 if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
4268 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
4269 __FUNCTION__, flow_ring_node->flow_info.tid));
4270 return BCME_OK;
4271 }
4272 #endif /* DHD_LOSSLESS_ROAMING */
4273
4274 {
4275 unsigned long flags;
4276 void *txp = NULL;
4277 flow_queue_t *queue;
4278 #ifdef DHD_LOSSLESS_ROAMING
4279 struct ether_header *eh;
4280 uint8 *pktdata;
4281 #endif /* DHD_LOSSLESS_ROAMING */
4282
4283 queue = &flow_ring_node->queue; /* queue associated with flow ring */
4284
4285 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4286
4287 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
4288 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4289 return BCME_NOTREADY;
4290 }
4291
4292 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4293 if (bus->dhd->conf->orphan_move <= 1)
4294 PKTORPHAN(txp, bus->dhd->conf->tsq);
4295
4296 /*
4297 * Modifying the packet length caused P2P cert failures.
4298 * Specifically on test cases where a packet of size 52 bytes
4299 * was injected, the sniffer capture showed 62 bytes because of
4300 * which the cert tests failed. So making the below change
4301 * only Router specific.
4302 */
4303
4304 #ifdef DHDTCPACK_SUPPRESS
4305 if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
4306 ret = dhd_tcpack_check_xmit(bus->dhd, txp);
4307 if (ret != BCME_OK) {
4308 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
4309 __FUNCTION__));
4310 }
4311 }
4312 #endif /* DHDTCPACK_SUPPRESS */
4313 #ifdef DHD_LOSSLESS_ROAMING
4314 pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
4315 eh = (struct ether_header *) pktdata;
4316 if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
4317 uint8 prio = (uint8)PKTPRIO(txp);
4318 /* Restore to original priority for 802.1X packet */
4319 if (prio == PRIO_8021D_NC) {
4320 PKTSETPRIO(txp, dhdp->prio_8021x);
4321 }
4322 }
4323 #endif /* DHD_LOSSLESS_ROAMING */
4324 /* Attempt to transfer packet over flow ring */
4325 /* XXX: ifidx is wrong */
4326 ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
4327 if (ret != BCME_OK) { /* may not have resources in flow ring */
4328 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
4329 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4330 /* reinsert at head */
4331 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
4332 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4333
4334 /* If we are able to requeue back, return success */
4335 return BCME_OK;
4336 }
4337
4338 #ifdef DHD_MEM_STATS
4339 DHD_MEM_STATS_LOCK(bus->dhd->mem_stats_lock, flags);
4340 bus->dhd->txpath_mem += PKTLEN(bus->dhd->osh, txp);
4341 DHD_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
4342 __FUNCTION__, bus->dhd->txpath_mem, PKTLEN(bus->dhd->osh, txp)));
4343 DHD_MEM_STATS_UNLOCK(bus->dhd->mem_stats_lock, flags);
4344 #endif /* DHD_MEM_STATS */
4345 }
4346
4347 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4348 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4349 }
4350
4351 return ret;
4352 } /* dhd_bus_schedule_queue */
4353
4354 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
4355 int
4356 BCMFASTPATH(dhd_bus_txdata)(struct dhd_bus *bus, void *txp, uint8 ifidx)
4357 {
4358 uint16 flowid;
4359 #ifdef IDLE_TX_FLOW_MGMT
4360 uint8 node_status;
4361 #endif /* IDLE_TX_FLOW_MGMT */
4362 flow_queue_t *queue;
4363 flow_ring_node_t *flow_ring_node;
4364 unsigned long flags;
4365 int ret = BCME_OK;
4366 void *txp_pend = NULL;
4367
4368 if (!bus->dhd->flowid_allocator) {
4369 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
4370 goto toss;
4371 }
4372
4373 flowid = DHD_PKT_GET_FLOWID(txp);
4374
4375 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4376
4377 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
4378 __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
4379
4380 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4381 if ((flowid > bus->dhd->max_tx_flowid) ||
4382 #ifdef IDLE_TX_FLOW_MGMT
4383 (!flow_ring_node->active))
4384 #else
4385 (!flow_ring_node->active) ||
4386 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
4387 (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
4388 #endif /* IDLE_TX_FLOW_MGMT */
4389 {
4390 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4391 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
4392 __FUNCTION__, flowid, flow_ring_node->status,
4393 flow_ring_node->active));
4394 ret = BCME_ERROR;
4395 goto toss;
4396 }
4397
4398 #ifdef IDLE_TX_FLOW_MGMT
4399 node_status = flow_ring_node->status;
4400
4401 /* handle diffrent status states here!! */
4402 switch (node_status)
4403 {
4404 case FLOW_RING_STATUS_OPEN:
4405
4406 if (bus->enable_idle_flowring_mgmt) {
4407 /* Move the node to the head of active list */
4408 dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
4409 }
4410 break;
4411
4412 case FLOW_RING_STATUS_SUSPENDED:
4413 DHD_INFO(("Need to Initiate TX Flow resume\n"));
4414 /* Issue resume_ring request */
4415 dhd_bus_flow_ring_resume_request(bus,
4416 flow_ring_node);
4417 break;
4418
4419 case FLOW_RING_STATUS_CREATE_PENDING:
4420 case FLOW_RING_STATUS_RESUME_PENDING:
4421 /* Dont do anything here!! */
4422 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4423 node_status));
4424 break;
4425
4426 case FLOW_RING_STATUS_DELETE_PENDING:
4427 default:
4428 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
4429 flowid, node_status));
4430 /* error here!! */
4431 ret = BCME_ERROR;
4432 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4433 goto toss;
4434 }
4435 /* Now queue the packet */
4436 #endif /* IDLE_TX_FLOW_MGMT */
4437
4438 queue = &flow_ring_node->queue; /* queue associated with flow ring */
4439
4440 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
4441 txp_pend = txp;
4442
4443 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4444
4445 if (flow_ring_node->status) {
4446 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
4447 __FUNCTION__, flowid, flow_ring_node->status,
4448 flow_ring_node->active));
4449 if (txp_pend) {
4450 txp = txp_pend;
4451 goto toss;
4452 }
4453 return BCME_OK;
4454 }
4455 ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
4456
4457 /* If we have anything pending, try to push into q */
4458 if (txp_pend) {
4459 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4460
4461 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
4462 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4463 txp = txp_pend;
4464 goto toss;
4465 }
4466
4467 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4468 }
4469
4470 return ret;
4471
4472 toss:
4473 DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
4474 PKTCFREE(bus->dhd->osh, txp, TRUE);
4475 return ret;
4476 } /* dhd_bus_txdata */
4477
4478 void
4479 dhd_bus_stop_queue(struct dhd_bus *bus)
4480 {
4481 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
4482 }
4483
4484 void
4485 dhd_bus_start_queue(struct dhd_bus *bus)
4486 {
4487 /*
4488 * Tx queue has been stopped due to resource shortage (or)
4489 * bus is not in a state to turn on.
4490 *
4491 * Note that we try to re-start network interface only
4492 * when we have enough resources, one has to first change the
4493 * flag indicating we have all the resources.
4494 */
4495 if (dhd_prot_check_tx_resource(bus->dhd)) {
4496 DHD_ERROR(("%s: Interface NOT started, previously stopped "
4497 "due to resource shortage\n", __FUNCTION__));
4498 return;
4499 }
4500 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
4501 }
4502
4503 /* Device console input function */
4504 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
4505 {
4506 dhd_bus_t *bus = dhd->bus;
4507 uint32 addr, val;
4508 int rv;
4509 #ifdef PCIE_INB_DW
4510 unsigned long flags = 0;
4511 #endif /* PCIE_INB_DW */
4512
4513 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
4514 if (bus->console_addr == 0)
4515 return BCME_UNSUPPORTED;
4516
4517 /* Don't allow input if dongle is in reset */
4518 if (bus->dhd->dongle_reset) {
4519 return BCME_NOTREADY;
4520 }
4521
4522 /* Zero cbuf_index */
4523 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
4524 /* handle difference in definition of hnd_log_t in certain branches */
4525 if (dhd->wlc_ver_major < 14) {
4526 addr -= (uint32)sizeof(uint32);
4527 }
4528 val = htol32(0);
4529 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4530 goto done;
4531
4532 /* Write message into cbuf */
4533 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
4534 /* handle difference in definition of hnd_log_t in certain branches */
4535 if (dhd->wlc_ver_major < 14) {
4536 addr -= sizeof(uint32);
4537 }
4538 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
4539 goto done;
4540
4541 /* Write length into vcons_in */
4542 addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
4543 val = htol32(msglen);
4544 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4545 goto done;
4546
4547 #ifdef PCIE_INB_DW
4548 /* Use a lock to ensure this tx DEVICE_WAKE + tx H2D_HOST_CONS_INT sequence is
4549 * mutually exclusive with the rx D2H_DEV_DS_ENTER_REQ + tx H2D_HOST_DS_ACK sequence.
4550 */
4551 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
4552 #endif /* PCIE_INB_DW */
4553
4554 /* generate an interrupt to dongle to indicate that it needs to process cons command */
4555 dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
4556
4557 #ifdef PCIE_INB_DW
4558 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
4559 #endif /* PCIE_INB_DW */
4560 done:
4561 return rv;
4562 } /* dhd_bus_console_in */
4563
4564 /**
4565 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
4566 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
4567 */
4568 void
4569 BCMFASTPATH(dhd_bus_rx_frame)(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
4570 {
4571 dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
4572 }
4573
4574 /* Aquire/Release bar1_switch_lock only if the chip supports bar1 switching */
4575 #define DHD_BUS_BAR1_SWITCH_LOCK(bus, flags) \
4576 ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_LOCK((bus)->bar1_switch_lock, flags) : \
4577 BCM_REFERENCE(flags)
4578
4579 #define DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags) \
4580 ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_UNLOCK((bus)->bar1_switch_lock, flags) : \
4581 BCM_REFERENCE(flags)
4582
4583 /* Init/Deinit bar1_switch_lock only if the chip supports bar1 switching */
4584 static void
4585 dhd_init_bar1_switch_lock(dhd_bus_t *bus)
4586 {
4587 if (bus->bar1_switch_enab && !bus->bar1_switch_lock) {
4588 bus->bar1_switch_lock = osl_spin_lock_init(bus->osh);
4589 }
4590 }
4591
4592 static void
4593 dhd_deinit_bar1_switch_lock(dhd_bus_t *bus)
4594 {
4595 if (bus->bar1_switch_enab && bus->bar1_switch_lock) {
4596 osl_spin_lock_deinit(bus->osh, bus->bar1_switch_lock);
4597 bus->bar1_switch_lock = NULL;
4598 }
4599 }
4600
4601 /*
4602 * The bpwindow for any address will be lower bound of multiples of bar1_size.
4603 * For eg, if addr=0x938fff and bar1_size is 0x400000, then
4604 * address will fall in the window of 0x800000-0xbfffff, so need
4605 * to select bpwindow as 0x800000.
4606 * To achieve this mask the LSB nibbles of bar1_size of the given addr.
4607 */
4608 #define DHD_BUS_BAR1_BPWIN(addr, bar1_size) \
4609 (uint32)((addr) & ~((bar1_size) - 1))
4610
4611 /**
4612 * dhdpcie_bar1_window_switch_enab
4613 *
4614 * Check if the chip requires BAR1 window switching based on
4615 * dongle_ram_base, ramsize and mapped bar1_size and sets
4616 * bus->bar1_switch_enab accordingly
4617 * @bus: dhd bus context
4618 *
4619 */
4620 void
4621 dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus)
4622 {
4623 uint32 ramstart = bus->dongle_ram_base;
4624 uint32 ramend = bus->dongle_ram_base + bus->ramsize - 1;
4625 uint32 bpwinstart = DHD_BUS_BAR1_BPWIN(ramstart, bus->bar1_size);
4626 uint32 bpwinend = DHD_BUS_BAR1_BPWIN(ramend, bus->bar1_size);
4627
4628 bus->bar1_switch_enab = FALSE;
4629
4630 /*
4631 * Window switch is needed to access complete BAR1
4632 * if bpwinstart and bpwinend are different
4633 */
4634 if (bpwinstart != bpwinend) {
4635 bus->bar1_switch_enab = TRUE;
4636 }
4637
4638 DHD_ERROR(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n",
4639 __FUNCTION__, bus->bar1_switch_enab, ramstart, ramend, bus->bar1_size));
4640 }
4641
4642 /**
4643 * dhdpcie_setbar1win
4644 *
4645 * os independendent function for setting bar1 window in order to allow
4646 * also set current window positon.
4647 *
4648 * @bus: dhd bus context
4649 * @addr: new backplane windows address for BAR1
4650 */
4651 static void
4652 dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
4653 {
4654 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, addr);
4655 bus->curr_bar1_win = addr;
4656 }
4657
4658 /**
4659 * dhdpcie_bus_chkandshift_bpoffset
4660 *
4661 * Check the provided address is within the current BAR1 window,
4662 * if not, shift the window
4663 *
4664 * @bus: dhd bus context
4665 * @offset: back plane address that the caller wants to access
4666 *
4667 * Return: new offset for access
4668 */
4669 static ulong
4670 dhdpcie_bus_chkandshift_bpoffset(dhd_bus_t *bus, ulong offset)
4671 {
4672
4673 uint32 bpwin;
4674
4675 if (!bus->bar1_switch_enab) {
4676 return offset;
4677 }
4678
4679 /* Determine BAR1 backplane window using window size
4680 * Window address mask should be ~(size - 1)
4681 */
4682 bpwin = DHD_BUS_BAR1_BPWIN(offset, bus->bar1_size);
4683
4684 if (bpwin != bus->curr_bar1_win) {
4685 DHD_INFO(("%s: move BAR1 window curr_bar1_win=0x%x bpwin=0x%x offset=0x%lx\n",
4686 __FUNCTION__, bus->curr_bar1_win, bpwin, offset));
4687 /* Move BAR1 window */
4688 dhdpcie_setbar1win(bus, bpwin);
4689 }
4690
4691 return offset - bpwin;
4692 }
4693
4694 /** 'offset' is a backplane address */
4695 void
4696 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
4697 {
4698 ulong flags = 0;
4699
4700 if (bus->is_linkdown) {
4701 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4702 return;
4703 }
4704
4705 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4706
4707 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4708
4709 W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
4710
4711 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4712 }
4713
4714 void
4715 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
4716 {
4717 ulong flags = 0;
4718
4719 if (bus->is_linkdown) {
4720 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4721 return;
4722 }
4723
4724 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4725
4726 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4727
4728 W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
4729
4730 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4731 }
4732
4733 void
4734 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
4735 {
4736 ulong flags = 0;
4737
4738 if (bus->is_linkdown) {
4739 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4740 return;
4741 }
4742
4743 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4744
4745 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4746
4747 W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
4748
4749 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4750 }
4751
4752 #ifdef DHD_SUPPORT_64BIT
4753 void
4754 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
4755 {
4756 ulong flags = 0;
4757
4758 if (bus->is_linkdown) {
4759 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4760 return;
4761 }
4762
4763 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4764
4765 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4766
4767 W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
4768
4769 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4770 }
4771 #endif /* DHD_SUPPORT_64BIT */
4772
4773 uint8
4774 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
4775 {
4776 volatile uint8 data;
4777 ulong flags = 0;
4778
4779 if (bus->is_linkdown) {
4780 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4781 data = (uint8)-1;
4782 return data;
4783 }
4784
4785 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4786
4787 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4788
4789 data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
4790
4791 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4792 return data;
4793 }
4794
4795 uint16
4796 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
4797 {
4798 volatile uint16 data;
4799 ulong flags = 0;
4800
4801 if (bus->is_linkdown) {
4802 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4803 data = (uint16)-1;
4804 return data;
4805 }
4806
4807 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4808
4809 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4810
4811 data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
4812
4813 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4814 return data;
4815 }
4816
4817 uint32
4818 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
4819 {
4820 volatile uint32 data;
4821 ulong flags = 0;
4822
4823 if (bus->is_linkdown) {
4824 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4825 data = (uint32)-1;
4826 return data;
4827 }
4828
4829 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4830
4831 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4832
4833 data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
4834
4835 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4836 return data;
4837 }
4838
4839 #ifdef DHD_SUPPORT_64BIT
4840 uint64
4841 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
4842 {
4843 volatile uint64 data;
4844 ulong flags = 0;
4845
4846 if (bus->is_linkdown) {
4847 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4848 data = (uint64)-1;
4849 return data;
4850 }
4851
4852 DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
4853
4854 offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
4855
4856 data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
4857
4858 DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
4859 return data;
4860 }
4861 #endif /* DHD_SUPPORT_64BIT */
4862
4863 /** A snippet of dongle memory is shared between host and dongle */
4864 void
4865 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
4866 {
4867 uint64 long_data;
4868 ulong addr; /* dongle address */
4869
4870 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
4871
4872 if (bus->is_linkdown) {
4873 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4874 return;
4875 }
4876
4877 if (MULTIBP_ENAB(bus->sih)) {
4878 dhd_bus_pcie_pwr_req(bus);
4879 }
4880 switch (type) {
4881 case D2H_DMA_SCRATCH_BUF:
4882 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
4883 long_data = HTOL64(*(uint64 *)data);
4884 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4885 if (dhd_msg_level & DHD_INFO_VAL) {
4886 prhex(__FUNCTION__, data, len);
4887 }
4888 break;
4889
4890 case D2H_DMA_SCRATCH_BUF_LEN :
4891 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4892 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4893 if (dhd_msg_level & DHD_INFO_VAL) {
4894 prhex(__FUNCTION__, data, len);
4895 }
4896 break;
4897
4898 case H2D_DMA_INDX_WR_BUF:
4899 long_data = HTOL64(*(uint64 *)data);
4900 addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4901 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4902 if (dhd_msg_level & DHD_INFO_VAL) {
4903 prhex(__FUNCTION__, data, len);
4904 }
4905 break;
4906
4907 case H2D_DMA_INDX_RD_BUF:
4908 long_data = HTOL64(*(uint64 *)data);
4909 addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4910 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4911 if (dhd_msg_level & DHD_INFO_VAL) {
4912 prhex(__FUNCTION__, data, len);
4913 }
4914 break;
4915
4916 case D2H_DMA_INDX_WR_BUF:
4917 long_data = HTOL64(*(uint64 *)data);
4918 addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4919 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4920 if (dhd_msg_level & DHD_INFO_VAL) {
4921 prhex(__FUNCTION__, data, len);
4922 }
4923 break;
4924
4925 case D2H_DMA_INDX_RD_BUF:
4926 long_data = HTOL64(*(uint64 *)data);
4927 addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4928 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4929 if (dhd_msg_level & DHD_INFO_VAL) {
4930 prhex(__FUNCTION__, data, len);
4931 }
4932 break;
4933
4934 case H2D_IFRM_INDX_WR_BUF:
4935 long_data = HTOL64(*(uint64 *)data);
4936 addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4937 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4938 if (dhd_msg_level & DHD_INFO_VAL) {
4939 prhex(__FUNCTION__, data, len);
4940 }
4941 break;
4942
4943 case RING_ITEM_LEN :
4944 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4945 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4946 break;
4947
4948 case RING_MAX_ITEMS :
4949 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4950 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4951 break;
4952
4953 case RING_BUF_ADDR :
4954 long_data = HTOL64(*(uint64 *)data);
4955 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4956 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4957 if (dhd_msg_level & DHD_INFO_VAL) {
4958 prhex(__FUNCTION__, data, len);
4959 }
4960 break;
4961
4962 case RING_WR_UPD :
4963 addr = bus->ring_sh[ringid].ring_state_w;
4964 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4965 break;
4966
4967 case RING_RD_UPD :
4968 addr = bus->ring_sh[ringid].ring_state_r;
4969 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4970 break;
4971
4972 case D2H_MB_DATA:
4973 addr = bus->d2h_mb_data_ptr_addr;
4974 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4975 break;
4976
4977 case H2D_MB_DATA:
4978 addr = bus->h2d_mb_data_ptr_addr;
4979 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4980 break;
4981
4982 case HOST_API_VERSION:
4983 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4984 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4985 break;
4986
4987 case DNGL_TO_HOST_TRAP_ADDR:
4988 long_data = HTOL64(*(uint64 *)data);
4989 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4990 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4991 DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4992 break;
4993
4994 case HOST_SCB_ADDR:
4995 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
4996 #ifdef DHD_SUPPORT_64BIT
4997 dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
4998 #else /* !DHD_SUPPORT_64BIT */
4999 dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
5000 #endif /* DHD_SUPPORT_64BIT */
5001 DHD_INFO(("Wrote host_scb_addr:0x%x\n",
5002 (uint32) HTOL32(*(uint32 *)data)));
5003 break;
5004
5005 default:
5006 break;
5007 }
5008 if (MULTIBP_ENAB(bus->sih)) {
5009 dhd_bus_pcie_pwr_req_clear(bus);
5010 }
5011 } /* dhd_bus_cmn_writeshared */
5012
5013 /** A snippet of dongle memory is shared between host and dongle */
5014 void
5015 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
5016 {
5017 ulong addr; /* dongle address */
5018
5019 if (MULTIBP_ENAB(bus->sih)) {
5020 dhd_bus_pcie_pwr_req(bus);
5021 }
5022 switch (type) {
5023 case RING_WR_UPD :
5024 addr = bus->ring_sh[ringid].ring_state_w;
5025 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
5026 break;
5027
5028 case RING_RD_UPD :
5029 addr = bus->ring_sh[ringid].ring_state_r;
5030 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
5031 break;
5032
5033 case TOTAL_LFRAG_PACKET_CNT :
5034 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
5035 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
5036 break;
5037
5038 case H2D_MB_DATA:
5039 addr = bus->h2d_mb_data_ptr_addr;
5040 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
5041 break;
5042
5043 case D2H_MB_DATA:
5044 addr = bus->d2h_mb_data_ptr_addr;
5045 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
5046 break;
5047
5048 case MAX_HOST_RXBUFS :
5049 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
5050 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
5051 break;
5052
5053 case HOST_SCB_ADDR:
5054 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
5055 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
5056 break;
5057
5058 default :
5059 break;
5060 }
5061 if (MULTIBP_ENAB(bus->sih)) {
5062 dhd_bus_pcie_pwr_req_clear(bus);
5063 }
5064 }
5065
5066 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
5067 {
5068 return ((pciedev_shared_t*)bus->pcie_sh)->flags;
5069 }
5070
5071 void
5072 dhd_bus_clearcounts(dhd_pub_t *dhdp)
5073 {
5074 }
5075
5076 /**
5077 * @param params input buffer, NULL for 'set' operation.
5078 * @param plen length of 'params' buffer, 0 for 'set' operation.
5079 * @param arg output buffer
5080 */
5081 int
5082 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
5083 void *params, uint plen, void *arg, uint len, bool set)
5084 {
5085 dhd_bus_t *bus = dhdp->bus;
5086 const bcm_iovar_t *vi = NULL;
5087 int bcmerror = BCME_UNSUPPORTED;
5088 uint val_size;
5089 uint32 actionid;
5090
5091 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5092
5093 ASSERT(name);
5094 if (!name)
5095 return BCME_BADARG;
5096
5097 /* Get MUST have return space */
5098 ASSERT(set || (arg && len));
5099 if (!(set || (arg && len)))
5100 return BCME_BADARG;
5101
5102 /* Set does NOT take qualifiers */
5103 ASSERT(!set || (!params && !plen));
5104 if (!(!set || (!params && !plen)))
5105 return BCME_BADARG;
5106
5107 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
5108 name, (set ? "set" : "get"), len, plen));
5109
5110 /* Look up var locally; if not found pass to host driver */
5111 if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
5112 goto exit;
5113 }
5114
5115 if (MULTIBP_ENAB(bus->sih)) {
5116 if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
5117 DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
5118 } else {
5119 dhd_bus_pcie_pwr_req(bus);
5120 }
5121 }
5122
5123 /* set up 'params' pointer in case this is a set command so that
5124 * the convenience int and bool code can be common to set and get
5125 */
5126 if (params == NULL) {
5127 params = arg;
5128 plen = len;
5129 }
5130
5131 if (vi->type == IOVT_VOID)
5132 val_size = 0;
5133 else if (vi->type == IOVT_BUFFER)
5134 val_size = len;
5135 else
5136 /* all other types are integer sized */
5137 val_size = sizeof(int);
5138
5139 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
5140 bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
5141
5142 exit:
5143 /* In DEVRESET_QUIESCE/DEVRESET_ON,
5144 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
5145 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
5146 * In this case, bypass pwr req clear.
5147 */
5148 if (bcmerror == BCME_DNGL_DEVRESET) {
5149 bcmerror = BCME_OK;
5150 } else {
5151 if (MULTIBP_ENAB(bus->sih)) {
5152 if (vi != NULL) {
5153 if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
5154 DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
5155 } else {
5156 dhd_bus_pcie_pwr_req_clear(bus);
5157 }
5158 }
5159 }
5160 }
5161 return bcmerror;
5162 } /* dhd_bus_iovar_op */
5163
5164 #ifdef BCM_BUZZZ
5165 #include <bcm_buzzz.h>
5166
5167 int
5168 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
5169 const int num_counters)
5170 {
5171 int bytes = 0;
5172 uint32 ctr;
5173 uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
5174 uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
5175
5176 /* Compute elapsed counter values per counter event type */
5177 for (ctr = 0U; ctr < num_counters; ctr++) {
5178 prev[ctr] = core[ctr];
5179 curr[ctr] = *log++;
5180 core[ctr] = curr[ctr]; /* saved for next log */
5181
5182 if (curr[ctr] < prev[ctr])
5183 delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
5184 else
5185 delta[ctr] = (curr[ctr] - prev[ctr]);
5186
5187 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
5188 }
5189
5190 return bytes;
5191 }
5192
5193 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
5194 uint32 u32;
5195 uint8 u8[4];
5196 struct {
5197 uint8 cpicnt;
5198 uint8 exccnt;
5199 uint8 sleepcnt;
5200 uint8 lsucnt;
5201 };
5202 } cm3_cnts_t;
5203
5204 int
5205 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
5206 {
5207 int bytes = 0;
5208
5209 uint32 cyccnt, instrcnt;
5210 cm3_cnts_t cm3_cnts;
5211 uint8 foldcnt;
5212
5213 { /* 32bit cyccnt */
5214 uint32 curr, prev, delta;
5215 prev = core[0]; curr = *log++; core[0] = curr;
5216 if (curr < prev)
5217 delta = curr + (~0U - prev);
5218 else
5219 delta = (curr - prev);
5220
5221 bytes += sprintf(p + bytes, "%12u ", delta);
5222 cyccnt = delta;
5223 }
5224
5225 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
5226 int i;
5227 uint8 max8 = ~0;
5228 cm3_cnts_t curr, prev, delta;
5229 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
5230 for (i = 0; i < 4; i++) {
5231 if (curr.u8[i] < prev.u8[i])
5232 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
5233 else
5234 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
5235 bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
5236 }
5237 cm3_cnts.u32 = delta.u32;
5238 }
5239
5240 { /* Extract the foldcnt from arg0 */
5241 uint8 curr, prev, delta, max8 = ~0;
5242 bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
5243 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
5244 if (curr < prev)
5245 delta = curr + (max8 - prev);
5246 else
5247 delta = (curr - prev);
5248 bytes += sprintf(p + bytes, "%4u ", delta);
5249 foldcnt = delta;
5250 }
5251
5252 instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
5253 + cm3_cnts.u8[3]) + foldcnt;
5254 if (instrcnt > 0xFFFFFF00)
5255 bytes += sprintf(p + bytes, "[%10s] ", "~");
5256 else
5257 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
5258 return bytes;
5259 }
5260
5261 int
5262 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
5263 {
5264 int bytes = 0;
5265 bcm_buzzz_arg0_t arg0;
5266 static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
5267
5268 if (buzzz->counters == 6) {
5269 bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
5270 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
5271 } else {
5272 bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
5273 log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
5274 }
5275
5276 /* Dump the logged arguments using the registered formats */
5277 arg0.u32 = *log++;
5278
5279 switch (arg0.klog.args) {
5280 case 0:
5281 bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
5282 break;
5283 case 1:
5284 {
5285 uint32 arg1 = *log++;
5286 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
5287 break;
5288 }
5289 case 2:
5290 {
5291 uint32 arg1, arg2;
5292 arg1 = *log++; arg2 = *log++;
5293 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
5294 break;
5295 }
5296 case 3:
5297 {
5298 uint32 arg1, arg2, arg3;
5299 arg1 = *log++; arg2 = *log++; arg3 = *log++;
5300 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
5301 break;
5302 }
5303 case 4:
5304 {
5305 uint32 arg1, arg2, arg3, arg4;
5306 arg1 = *log++; arg2 = *log++;
5307 arg3 = *log++; arg4 = *log++;
5308 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
5309 break;
5310 }
5311 default:
5312 printf("%s: Maximum one argument supported\n", __FUNCTION__);
5313 break;
5314 }
5315
5316 bytes += sprintf(p + bytes, "\n");
5317
5318 return bytes;
5319 }
5320
5321 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
5322 {
5323 int i;
5324 uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
5325 void * log;
5326
5327 for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
5328 core[i] = 0;
5329 }
5330
5331 log_sz = buzzz_p->log_sz;
5332
5333 part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
5334
5335 if (buzzz_p->wrap == TRUE) {
5336 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
5337 total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
5338 } else {
5339 part2 = 0U;
5340 total = buzzz_p->count;
5341 }
5342
5343 if (total == 0U) {
5344 printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
5345 return;
5346 } else {
5347 printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
5348 total, part2, part1);
5349 }
5350
5351 if (part2) { /* with wrap */
5352 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
5353 while (part2--) { /* from cur to end : part2 */
5354 p[0] = '\0';
5355 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
5356 printf("%s", p);
5357 log = (void*)((size_t)log + buzzz_p->log_sz);
5358 }
5359 }
5360
5361 log = (void*)buffer_p;
5362 while (part1--) {
5363 p[0] = '\0';
5364 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
5365 printf("%s", p);
5366 log = (void*)((size_t)log + buzzz_p->log_sz);
5367 }
5368
5369 printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
5370 }
5371
5372 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
5373 {
5374 bcm_buzzz_t * buzzz_p = NULL;
5375 void * buffer_p = NULL;
5376 char * page_p = NULL;
5377 pciedev_shared_t *sh;
5378 int ret = 0;
5379
5380 if (bus->dhd->busstate != DHD_BUS_DATA) {
5381 return BCME_UNSUPPORTED;
5382 }
5383 if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
5384 printf("%s: Page memory allocation failure\n", __FUNCTION__);
5385 goto done;
5386 }
5387 if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
5388 printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
5389 goto done;
5390 }
5391
5392 ret = dhdpcie_readshared(bus);
5393 if (ret < 0) {
5394 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
5395 goto done;
5396 }
5397
5398 sh = bus->pcie_sh;
5399
5400 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
5401
5402 if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
5403
5404 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
5405 (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
5406
5407 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
5408 "count<%u> status<%u> wrap<%u>\n"
5409 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
5410 (int)sh->buzz_dbg_ptr,
5411 (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
5412 buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
5413 buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
5414 buzzz_p->buffer_sz, buzzz_p->log_sz);
5415
5416 if (buzzz_p->count == 0) {
5417 printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
5418 goto done;
5419 }
5420
5421 /* Allocate memory for trace buffer and format strings */
5422 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
5423 if (buffer_p == NULL) {
5424 printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
5425 goto done;
5426 }
5427
5428 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
5429 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
5430 (uint8 *)buffer_p, buzzz_p->buffer_sz);
5431
5432 /* Process and display the trace using formatted output */
5433
5434 {
5435 int ctr;
5436 for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
5437 printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
5438 }
5439 printf("<code execution point>\n");
5440 }
5441
5442 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
5443
5444 printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
5445
5446 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
5447 }
5448
5449 done:
5450
5451 if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
5452 if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
5453 if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
5454
5455 return BCME_OK;
5456 }
5457 #endif /* BCM_BUZZZ */
5458
5459 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
5460 ((sih)->buscoretype == PCIE2_CORE_ID))
5461
5462 static void
5463 dhdpcie_enum_reg_init(dhd_bus_t *bus)
5464 {
5465 /* initialize Function control register (clear bit 4) to HW init value */
5466 si_corereg(bus->sih, bus->sih->buscoreidx,
5467 OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
5468 PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
5469
5470 /* clear IntMask */
5471 si_corereg(bus->sih, bus->sih->buscoreidx,
5472 OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
5473 /* clear IntStatus */
5474 si_corereg(bus->sih, bus->sih->buscoreidx,
5475 OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
5476 si_corereg(bus->sih, bus->sih->buscoreidx,
5477 OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
5478
5479 /* clear MSIVector */
5480 si_corereg(bus->sih, bus->sih->buscoreidx,
5481 OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
5482 /* clear MSIIntMask */
5483 si_corereg(bus->sih, bus->sih->buscoreidx,
5484 OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
5485 /* clear MSIIntStatus */
5486 si_corereg(bus->sih, bus->sih->buscoreidx,
5487 OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
5488 si_corereg(bus->sih, bus->sih->buscoreidx,
5489 OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
5490
5491 /* clear PowerIntMask */
5492 si_corereg(bus->sih, bus->sih->buscoreidx,
5493 OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
5494 /* clear PowerIntStatus */
5495 si_corereg(bus->sih, bus->sih->buscoreidx,
5496 OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
5497 si_corereg(bus->sih, bus->sih->buscoreidx,
5498 OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
5499
5500 /* clear MailboxIntMask */
5501 si_corereg(bus->sih, bus->sih->buscoreidx,
5502 OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
5503 /* clear MailboxInt */
5504 si_corereg(bus->sih, bus->sih->buscoreidx,
5505 OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
5506 si_corereg(bus->sih, bus->sih->buscoreidx,
5507 OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
5508 }
5509
5510 int
5511 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
5512 {
5513 uint flr_capab;
5514 uint val;
5515 int retry = 0;
5516
5517 DHD_ERROR(("******** Perform FLR ********\n"));
5518
5519 /* Kernel Panic for 4378Ax during traptest/devreset4 reload case:
5520 * For 4378Ax, enum registers will not be reset with FLR (producer index WAR).
5521 * So, the MailboxIntMask is left as 0xffff during fw boot-up,
5522 * and the fw trap handling during fw boot causes Kernel Panic.
5523 * Jira: SWWLAN-212578: [4378A0 PCIe DVT] :
5524 * Kernel Panic seen in F0 FLR with BT Idle/Traffic/DMA
5525 */
5526 if (bus->sih && PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
5527 if (bus->pcie_mailbox_mask != 0) {
5528 dhdpcie_bus_intr_disable(bus);
5529 }
5530 /* initialize F0 enum registers before FLR for rev66/67 */
5531 dhdpcie_enum_reg_init(bus);
5532 }
5533
5534 /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
5535 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
5536 flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
5537 DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
5538 PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
5539 if (!flr_capab) {
5540 DHD_ERROR(("Chip does not support FLR\n"));
5541 return BCME_UNSUPPORTED;
5542 }
5543
5544 /* Save pcie config space */
5545 DHD_INFO(("Save Pcie Config Space\n"));
5546 DHD_PCIE_CONFIG_SAVE(bus);
5547
5548 /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
5549 DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5550 PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5551 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5552 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5553 val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5554 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5555 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5556
5557 /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
5558 DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
5559
5560 CAN_SLEEP() ? OSL_SLEEP(DHD_FUNCTION_LEVEL_RESET_DELAY) :
5561 OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * USEC_PER_MSEC);
5562
5563 if (force_fail) {
5564 DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5565 PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5566 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5567 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5568 val));
5569 val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
5570 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5571 val));
5572 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5573
5574 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5575 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5576 val));
5577 }
5578
5579 /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5580 DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5581 PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5582 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5583 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5584 val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5585 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5586 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5587
5588 /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5589 DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5590 "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5591 do {
5592 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5593 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5594 PCIE_CFG_SUBSYSTEM_CONTROL, val));
5595 val = val & (1 << PCIE_SSRESET_STATUS_BIT);
5596 OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
5597 } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
5598
5599 if (val) {
5600 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5601 PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
5602 /* User has to fire the IOVAR again, if force_fail is needed */
5603 if (force_fail) {
5604 bus->flr_force_fail = FALSE;
5605 DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
5606 }
5607 return BCME_DONGLE_DOWN;
5608 }
5609
5610 /* Restore pcie config space */
5611 DHD_INFO(("Restore Pcie Config Space\n"));
5612 DHD_PCIE_CONFIG_RESTORE(bus);
5613
5614 DHD_ERROR(("******** FLR Succedeed ********\n"));
5615
5616 return BCME_OK;
5617 }
5618
5619 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
5620
5621 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
5622 #define DHD_BP_RESET_STATUS_RETRIES 50u
5623
5624 #define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT 10
5625 #define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT 12
5626
5627 int
5628 dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus)
5629 {
5630 uint val;
5631 int retry = 0;
5632 int ret = BCME_OK;
5633 bool reset_stat_bit;
5634
5635 DHD_ERROR(("******** Perform BP reset ********\n"));
5636
5637 /* Disable ASPM */
5638 DHD_ERROR(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5639 PCIECFGREG_LINK_STATUS_CTRL));
5640 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5641 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5642 val = val & (~PCIE_ASPM_ENAB);
5643 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5644 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5645
5646 /* wait for delay usec */
5647 DHD_ERROR(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5648 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5649
5650 /* Set bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
5651 DHD_ERROR(("Set PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
5652 " of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5653 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5654 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5655 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
5656 val = val | (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT);
5657 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
5658 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5659
5660 /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is set */
5661 DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
5662 "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is set\n",
5663 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5664 do {
5665 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5666 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5667 PCIE_CFG_SUBSYSTEM_CONTROL, val));
5668 reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT);
5669 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5670 } while (!reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5671
5672 if (!reset_stat_bit) {
5673 DHD_ERROR(("ERROR: reg=0x%x bit %d is not set\n",
5674 PCIE_CFG_SUBSYSTEM_CONTROL,
5675 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT));
5676 ret = BCME_ERROR;
5677 goto aspm_enab;
5678 }
5679
5680 /* Clear bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
5681 DHD_ERROR(("Clear PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
5682 " of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5683 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5684 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5685 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
5686 val = val & ~(1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT);
5687 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
5688 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5689
5690 /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5691 DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
5692 "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is cleared\n",
5693 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5694 do {
5695 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5696 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5697 PCIE_CFG_SUBSYSTEM_CONTROL, val));
5698 reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT);
5699 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5700 } while (reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5701
5702 if (reset_stat_bit) {
5703 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5704 PCIE_CFG_SUBSYSTEM_CONTROL,
5705 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT));
5706 ret = BCME_ERROR;
5707 }
5708
5709 aspm_enab:
5710 /* Enable ASPM */
5711 DHD_ERROR(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5712 PCIECFGREG_LINK_STATUS_CTRL));
5713 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5714 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5715 val = val | (PCIE_ASPM_L1_ENAB);
5716 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5717 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5718
5719 if (ret) {
5720 DHD_ERROR(("******** BP reset Failed ********\n"));
5721 } else {
5722 DHD_ERROR(("******** BP reset Succedeed ********\n"));
5723 }
5724
5725 return ret;
5726 }
5727
5728 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
5729 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
5730
5731 int
5732 dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus)
5733 {
5734 uint val;
5735 int retry = 0;
5736 uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
5737 int ret = BCME_OK;
5738 bool cond;
5739
5740 DHD_ERROR(("******** Perform BP reset ********\n"));
5741
5742 /* Disable ASPM */
5743 DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5744 PCIECFGREG_LINK_STATUS_CTRL));
5745 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5746 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5747 val = val & (~PCIE_ASPM_ENAB);
5748 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5749 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5750
5751 /* wait for delay usec */
5752 DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5753 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5754
5755 /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5756 DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5757 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5758 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5759 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5760 val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5761 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5762 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
5763
5764 /* Wait till bit backplane reset is ASSERTED i,e
5765 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5766 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5767 * else DAR register will read previous old value
5768 */
5769 DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5770 "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5771 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5772 do {
5773 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5774 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5775 cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5776 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5777 } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5778
5779 if (cond) {
5780 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5781 PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
5782 ret = BCME_ERROR;
5783 goto aspm_enab;
5784 }
5785
5786 /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5787 DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5788 "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5789 PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
5790 do {
5791 val = si_corereg(bus->sih, bus->sih->buscoreidx,
5792 dar_clk_ctrl_status_reg, 0, 0);
5793 DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5794 dar_clk_ctrl_status_reg, val));
5795 cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
5796 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5797 } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5798
5799 if (cond) {
5800 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5801 dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
5802 ret = BCME_ERROR;
5803 }
5804
5805 aspm_enab:
5806 /* Enable ASPM */
5807 DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5808 PCIECFGREG_LINK_STATUS_CTRL));
5809 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5810 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5811 val = val | (PCIE_ASPM_L1_ENAB);
5812 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5813 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5814
5815 DHD_ERROR(("******** BP reset Succedeed ********\n"));
5816
5817 return ret;
5818 }
5819
5820 int
5821 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
5822 {
5823 dhd_bus_t *bus = dhdp->bus;
5824 int bcmerror = 0;
5825 unsigned long flags;
5826 int retry = POWERUP_MAX_RETRY;
5827
5828 if (flag == TRUE) { /* Turn off WLAN */
5829 /* Removing Power */
5830 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5831
5832 /* wait for other contexts to finish -- if required a call
5833 * to OSL_DELAY for 1s can be added to give other contexts
5834 * a chance to finish
5835 */
5836 dhdpcie_advertise_bus_cleanup(bus->dhd);
5837
5838 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5839 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5840 atomic_set(&bus->dhd->block_bus, TRUE);
5841 dhd_flush_rx_tx_wq(bus->dhd);
5842 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5843
5844 #ifdef BCMPCIE_OOB_HOST_WAKE
5845 /* Clean up any pending host wake IRQ */
5846 dhd_bus_oob_intr_set(bus->dhd, FALSE);
5847 dhd_bus_oob_intr_unregister(bus->dhd);
5848 #endif /* BCMPCIE_OOB_HOST_WAKE */
5849 dhd_os_wd_timer(dhdp, 0);
5850 dhd_bus_stop(bus, TRUE);
5851 if (bus->intr) {
5852 dhdpcie_bus_intr_disable(bus);
5853 dhdpcie_free_irq(bus);
5854 }
5855 dhd_deinit_bus_lp_state_lock(bus);
5856 dhd_deinit_bar1_switch_lock(bus);
5857 dhd_deinit_backplane_access_lock(bus);
5858 dhd_deinit_pwr_req_lock(bus);
5859 dhd_bus_release_dongle(bus);
5860 dhdpcie_bus_free_resource(bus);
5861 bcmerror = dhdpcie_bus_disable_device(bus);
5862 if (bcmerror) {
5863 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5864 __FUNCTION__, bcmerror));
5865 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5866 atomic_set(&bus->dhd->block_bus, FALSE);
5867 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5868 }
5869 /* Clean up protocol data after Bus Master Enable bit clear
5870 * so that host can safely unmap DMA and remove the allocated buffers
5871 * from the PKTID MAP. Some Applicantion Processors supported
5872 * System MMU triggers Kernel panic when they detect to attempt to
5873 * DMA-unmapped memory access from the devices which use the
5874 * System MMU. Therefore, Kernel panic can be happened since it is
5875 * possible that dongle can access to DMA-unmapped memory after
5876 * calling the dhd_prot_reset().
5877 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5878 * should be located after the dhdpcie_bus_disable_device().
5879 */
5880 dhd_prot_reset(dhdp);
5881 /* XXX Reset dhd_pub_t instance to initial status
5882 * for built-in type driver
5883 */
5884 dhd_clear(dhdp);
5885
5886 bcmerror = dhdpcie_bus_stop_host_dev(bus);
5887 if (bcmerror) {
5888 DHD_ERROR(("%s: dhdpcie_bus_stop host_dev failed: %d\n",
5889 __FUNCTION__, bcmerror));
5890 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5891 atomic_set(&bus->dhd->block_bus, FALSE);
5892 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5893 goto done;
5894 }
5895
5896 DHD_GENERAL_LOCK(bus->dhd, flags);
5897 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5898 bus->dhd->busstate = DHD_BUS_DOWN;
5899 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5900 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5901 atomic_set(&bus->dhd->block_bus, FALSE);
5902 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5903 } else {
5904 if (bus->intr) {
5905 dhdpcie_free_irq(bus);
5906 }
5907 #ifdef BCMPCIE_OOB_HOST_WAKE
5908 /* Clean up any pending host wake IRQ */
5909 dhd_bus_oob_intr_set(bus->dhd, FALSE);
5910 dhd_bus_oob_intr_unregister(bus->dhd);
5911 #endif /* BCMPCIE_OOB_HOST_WAKE */
5912 dhd_dpc_kill(bus->dhd);
5913 if (!bus->no_bus_init) {
5914 dhd_bus_release_dongle(bus);
5915 dhdpcie_bus_free_resource(bus);
5916 bcmerror = dhdpcie_bus_disable_device(bus);
5917 if (bcmerror) {
5918 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5919 __FUNCTION__, bcmerror));
5920 }
5921
5922 /* Clean up protocol data after Bus Master Enable bit clear
5923 * so that host can safely unmap DMA and remove the allocated
5924 * buffers from the PKTID MAP. Some Applicantion Processors
5925 * supported System MMU triggers Kernel panic when they detect
5926 * to attempt to DMA-unmapped memory access from the devices
5927 * which use the System MMU.
5928 * Therefore, Kernel panic can be happened since it is possible
5929 * that dongle can access to DMA-unmapped memory after calling
5930 * the dhd_prot_reset().
5931 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5932 * should be located after the dhdpcie_bus_disable_device().
5933 */
5934 dhd_prot_reset(dhdp);
5935 /* XXX Reset dhd_pub_t instance to initial status
5936 * for built-in type driver
5937 */
5938 dhd_clear(dhdp);
5939 } else {
5940 bus->no_bus_init = FALSE;
5941 }
5942
5943 bcmerror = dhdpcie_bus_stop_host_dev(bus);
5944 if (bcmerror) {
5945 DHD_ERROR(("%s: dhdpcie_bus_stop_host_dev failed: %d\n",
5946 __FUNCTION__, bcmerror));
5947 goto done;
5948 }
5949 }
5950
5951 bus->dhd->dongle_reset = TRUE;
5952 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
5953
5954 } else { /* Turn on WLAN */
5955 if (bus->dhd->busstate == DHD_BUS_DOWN) {
5956 /* Powering On */
5957 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5958 /* PCIe RC Turn on */
5959 do {
5960 bcmerror = dhdpcie_bus_start_host_dev(bus);
5961 if (!bcmerror) {
5962 DHD_ERROR(("%s: dhdpcie_bus_start_host_dev OK\n",
5963 __FUNCTION__));
5964 break;
5965 } else {
5966 OSL_SLEEP(10);
5967 }
5968 } while (retry--);
5969
5970 if (bcmerror) {
5971 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5972 __FUNCTION__, bcmerror));
5973 goto done;
5974 }
5975 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5976 dhd_bus_aspm_enable_rc_ep(bus, FALSE);
5977 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5978 bus->is_linkdown = 0;
5979 bus->cto_triggered = 0;
5980 #ifdef SUPPORT_LINKDOWN_RECOVERY
5981 bus->read_shm_fail = FALSE;
5982 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5983 bcmerror = dhdpcie_bus_enable_device(bus);
5984 if (bcmerror) {
5985 DHD_ERROR(("%s: host configuration restore failed: %d\n",
5986 __FUNCTION__, bcmerror));
5987 goto done;
5988 }
5989
5990 bcmerror = dhdpcie_bus_alloc_resource(bus);
5991 if (bcmerror) {
5992 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5993 __FUNCTION__, bcmerror));
5994 goto done;
5995 }
5996
5997 #ifdef FORCE_DONGLE_RESET_IN_DEVRESET_ON
5998 /*
5999 * This will be enabled from phone platforms to
6000 * reset dongle during Wifi ON
6001 */
6002 dhdpcie_dongle_reset(bus);
6003 #endif /* FORCE_DONGLE_RESET_IN_DEVRESET_ON */
6004
6005 bcmerror = dhdpcie_bus_dongle_attach(bus);
6006 if (bcmerror) {
6007 /*
6008 * As request irq is done later, till then CTO will not be detected,
6009 * so unconditionally dump cfg and DAR registers.
6010 */
6011 dhd_bus_dump_imp_cfg_registers(bus);
6012 dhd_bus_dump_dar_registers(bus);
6013 /* Check if CTO has happened */
6014 if (PCIECTO_ENAB(bus)) {
6015 /* read pci_intstatus */
6016 uint32 pci_intstatus =
6017 dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
6018 if (pci_intstatus == (uint32)-1) {
6019 DHD_ERROR(("%s : Invalid pci_intstatus(0x%x)\n",
6020 __FUNCTION__, pci_intstatus));
6021 } else if (pci_intstatus & PCI_CTO_INT_MASK) {
6022 DHD_ERROR(("%s: ##### CTO REPORTED BY DONGLE "
6023 "intstat=0x%x enab=%d\n", __FUNCTION__,
6024 pci_intstatus, bus->cto_enable));
6025 }
6026 }
6027 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
6028 __FUNCTION__, bcmerror));
6029 goto done;
6030 }
6031
6032 bcmerror = dhd_bus_request_irq(bus);
6033 if (bcmerror) {
6034 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
6035 __FUNCTION__, bcmerror));
6036 goto done;
6037 }
6038
6039 bus->dhd->dongle_reset = FALSE;
6040
6041 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
6042 dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
6043 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
6044
6045 bcmerror = dhd_bus_start(dhdp);
6046 if (bcmerror) {
6047 DHD_ERROR(("%s: dhd_bus_start: %d\n",
6048 __FUNCTION__, bcmerror));
6049 #ifdef DEBUG_DNGL_INIT_FAIL
6050 #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
6051 bus->dhd->memdump_enabled = DUMP_MEMFILE;
6052 #else
6053 /* Force panic as HAL will not be inited yet */
6054 bus->dhd->memdump_enabled = DUMP_MEMONLY;
6055 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
6056 if (bus->dhd->memdump_enabled) {
6057 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
6058 dhdpcie_mem_dump(bus);
6059 }
6060 #endif /* DEBUG_DNGL_INIT_FAIL */
6061 goto done;
6062 }
6063
6064 /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
6065 if (bus->dhd->dhd_watchdog_ms_backup) {
6066 DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
6067 __FUNCTION__));
6068 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
6069 }
6070 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
6071 } else {
6072 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
6073 goto done;
6074 }
6075 }
6076
6077 done:
6078 if (bcmerror) {
6079 DHD_GENERAL_LOCK(bus->dhd, flags);
6080 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
6081 bus->dhd->busstate = DHD_BUS_DOWN;
6082 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6083 }
6084 return bcmerror;
6085 }
6086
6087 static int
6088 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
6089 {
6090 int h2d_support, d2h_support;
6091
6092 d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
6093 h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
6094 return (d2h_support | (h2d_support << 1));
6095
6096 }
6097 int
6098 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
6099 {
6100 int bcmerror = 0;
6101 /* Can change it only during initialization/FW download */
6102 if (dhd->busstate == DHD_BUS_DOWN) {
6103 if ((int_val > 3) || (int_val < 0)) {
6104 DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
6105 bcmerror = BCME_BADARG;
6106 } else {
6107 dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
6108 dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
6109 dhd->dma_ring_upd_overwrite = TRUE;
6110 }
6111 } else {
6112 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6113 __FUNCTION__));
6114 bcmerror = BCME_NOTDOWN;
6115 }
6116
6117 return bcmerror;
6118
6119 }
6120
6121 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
6122 * calls shall be serialized. This wrapper function provides such serialization
6123 * and shall be used everywjer einstead of direct call of si_backplane_access()
6124 *
6125 * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
6126 * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
6127 * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
6128 * conditions calls of si_backplane_access() shall be serialized. Presence of
6129 * tasklet context implies that serialization shall b ebased on spinlock. Hence
6130 * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
6131 * spinlock-based.
6132 *
6133 * Other platforms may add their own implementations of
6134 * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
6135 * needed implementation might be empty)
6136 */
6137 static uint
6138 serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
6139 {
6140 uint ret;
6141 unsigned long flags;
6142 DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
6143 ret = si_backplane_access(bus->sih, addr, size, val, read);
6144 DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
6145 return ret;
6146 }
6147
6148 /**
6149 * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
6150 *
6151 * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
6152 * @param params input buffer
6153 * @param plen length in [bytes] of input buffer 'params'
6154 * @param arg output buffer
6155 * @param len length in [bytes] of output buffer 'arg'
6156 */
6157 static int
6158 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
6159 void *params, uint plen, void *arg, uint len, int val_size)
6160 {
6161 int bcmerror = 0;
6162 int32 int_val = 0;
6163 int32 int_val2 = 0;
6164 int32 int_val3 = 0;
6165 bool bool_val = 0;
6166
6167 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
6168 __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
6169
6170 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
6171 goto exit;
6172
6173 if (plen >= sizeof(int_val))
6174 bcopy(params, &int_val, sizeof(int_val));
6175
6176 if (plen >= sizeof(int_val) * 2)
6177 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
6178
6179 if (plen >= sizeof(int_val) * 3)
6180 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
6181
6182 bool_val = (int_val != 0) ? TRUE : FALSE;
6183
6184 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
6185 if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
6186 actionid == IOV_GVAL(IOV_DEVRESET))) {
6187 bcmerror = BCME_NOTREADY;
6188 goto exit;
6189 }
6190
6191 switch (actionid) {
6192
6193 case IOV_SVAL(IOV_VARS):
6194 bcmerror = dhdpcie_downloadvars(bus, arg, len);
6195 break;
6196 case IOV_SVAL(IOV_PCIE_LPBK):
6197 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
6198 break;
6199
6200 case IOV_SVAL(IOV_PCIE_DMAXFER): {
6201 dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
6202
6203 if (!dmaxfer)
6204 return BCME_BADARG;
6205 if (dmaxfer->version != DHD_DMAXFER_VERSION)
6206 return BCME_VERSION;
6207 if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
6208 return BCME_BADLEN;
6209 }
6210
6211 bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
6212 dmaxfer->src_delay, dmaxfer->dest_delay,
6213 dmaxfer->type, dmaxfer->core_num,
6214 dmaxfer->should_wait);
6215
6216 if (dmaxfer->should_wait && bcmerror >= 0) {
6217 bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
6218 }
6219 break;
6220 }
6221
6222 case IOV_GVAL(IOV_PCIE_DMAXFER): {
6223 dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
6224 if (!dmaxfer)
6225 return BCME_BADARG;
6226 if (dmaxfer->version != DHD_DMAXFER_VERSION)
6227 return BCME_VERSION;
6228 if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
6229 return BCME_BADLEN;
6230 }
6231 bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
6232 break;
6233 }
6234
6235 #ifdef PCIE_INB_DW
6236 case IOV_GVAL(IOV_INB_DW_ENABLE):
6237 int_val = bus->inb_enabled;
6238 bcopy(&int_val, arg, val_size);
6239 break;
6240 case IOV_SVAL(IOV_INB_DW_ENABLE):
6241 bus->inb_enabled = (bool)int_val;
6242 break;
6243 #endif /* PCIE_INB_DW */
6244 #if defined(PCIE_INB_DW)
6245 case IOV_GVAL(IOV_DEEP_SLEEP):
6246 int_val = bus->ds_enabled;
6247 bcopy(&int_val, arg, val_size);
6248 break;
6249
6250 case IOV_SVAL(IOV_DEEP_SLEEP):
6251 if (int_val == 1) {
6252 if (!bus->ds_enabled) {
6253 bus->ds_enabled = TRUE;
6254 /* Deassert */
6255 if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
6256 #ifdef PCIE_INB_DW
6257 if (INBAND_DW_ENAB(bus)) {
6258 int timeleft;
6259 timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
6260 if (timeleft == 0) {
6261 DHD_ERROR(("DS-ENTER timeout\n"));
6262 bus->ds_enabled = FALSE;
6263 break;
6264 }
6265 }
6266 #endif /* PCIE_INB_DW */
6267 }
6268 else {
6269 DHD_ERROR(("%s: Enable Deep Sleep failed !\n",
6270 __FUNCTION__));
6271 bus->ds_enabled = FALSE;
6272 }
6273 } else {
6274 DHD_ERROR(("%s: Deep Sleep already enabled !\n", __FUNCTION__));
6275 }
6276 }
6277 else if (int_val == 0) {
6278 if (bus->ds_enabled) {
6279 bus->calc_ds_exit_latency = TRUE;
6280 /* Assert */
6281 if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK) {
6282 bus->ds_enabled = FALSE;
6283 if (INBAND_DW_ENAB(bus)) {
6284 if (bus->ds_exit_latency != 0) {
6285 DHD_ERROR(("DS-EXIT latency = %llu us\n",
6286 bus->ds_exit_latency));
6287 } else {
6288 DHD_ERROR(("Failed to measure DS-EXIT"
6289 " latency!(Possibly a non"
6290 " waitable context)\n"));
6291 }
6292 }
6293 } else {
6294 DHD_ERROR(("%s: Disable Deep Sleep failed !\n",
6295 __FUNCTION__));
6296 }
6297 bus->calc_ds_exit_latency = FALSE;
6298 } else {
6299 DHD_ERROR(("%s: Deep Sleep already disabled !\n", __FUNCTION__));
6300 }
6301 }
6302 else
6303 DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
6304
6305 break;
6306 #endif
6307 case IOV_GVAL(IOV_PCIE_SUSPEND):
6308 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
6309 bcopy(&int_val, arg, val_size);
6310 break;
6311
6312 case IOV_SVAL(IOV_PCIE_SUSPEND):
6313 if (bool_val) { /* Suspend */
6314 int ret;
6315 unsigned long flags;
6316
6317 /*
6318 * If some other context is busy, wait until they are done,
6319 * before starting suspend
6320 */
6321 ret = dhd_os_busbusy_wait_condition(bus->dhd,
6322 &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
6323 if (ret == 0) {
6324 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
6325 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
6326 return BCME_BUSY;
6327 }
6328
6329 DHD_GENERAL_LOCK(bus->dhd, flags);
6330 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
6331 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6332 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6333 dhdpcie_bus_suspend(bus, TRUE, TRUE);
6334 #else
6335 dhdpcie_bus_suspend(bus, TRUE);
6336 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6337
6338 DHD_GENERAL_LOCK(bus->dhd, flags);
6339 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
6340 dhd_os_busbusy_wake(bus->dhd);
6341 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6342 } else { /* Resume */
6343 unsigned long flags;
6344 DHD_GENERAL_LOCK(bus->dhd, flags);
6345 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
6346 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6347
6348 dhdpcie_bus_suspend(bus, FALSE);
6349
6350 DHD_GENERAL_LOCK(bus->dhd, flags);
6351 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
6352 dhd_os_busbusy_wake(bus->dhd);
6353 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6354 }
6355 break;
6356
6357 case IOV_GVAL(IOV_MEMSIZE):
6358 int_val = (int32)bus->ramsize;
6359 bcopy(&int_val, arg, val_size);
6360 break;
6361
6362 /* Debug related. Dumps core registers or one of the dongle memory */
6363 case IOV_GVAL(IOV_DUMP_DONGLE):
6364 {
6365 dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
6366 dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
6367 uint32 *p = ddo->val;
6368 const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
6369
6370 if (plen < sizeof(ddi) || len < sizeof(ddo)) {
6371 bcmerror = BCME_BADARG;
6372 break;
6373 }
6374
6375 switch (ddi.type) {
6376 case DUMP_DONGLE_COREREG:
6377 ddo->n_bytes = 0;
6378
6379 if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
6380 break; // beyond last core: core enumeration ended
6381 }
6382
6383 ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
6384 ddo->address += ddi.offset; // BP address at which this dump starts
6385
6386 ddo->id = si_coreid(bus->sih);
6387 ddo->rev = si_corerev(bus->sih);
6388
6389 while (ddi.offset < max_offset &&
6390 sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
6391 *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
6392 ddi.offset += sizeof(uint32);
6393 ddo->n_bytes += sizeof(uint32);
6394 }
6395 break;
6396 default:
6397 // TODO: implement d11 SHM/TPL dumping
6398 bcmerror = BCME_BADARG;
6399 break;
6400 }
6401 break;
6402 }
6403
6404 /* Debug related. Returns a string with dongle capabilities */
6405 case IOV_GVAL(IOV_DNGL_CAPS):
6406 {
6407 strlcpy(arg, bus->dhd->fw_capabilities, (size_t)len);
6408 break;
6409 }
6410
6411 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
6412 case IOV_SVAL(IOV_GDB_SERVER):
6413 /* debugger_*() functions may sleep, so cannot hold spinlock */
6414 if (int_val > 0) {
6415 debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
6416 } else {
6417 debugger_close();
6418 }
6419 break;
6420 #endif /* DEBUGGER || DHD_DSCOPE */
6421 #if defined(GDB_PROXY)
6422 case IOV_GVAL(IOV_GDB_PROXY_PROBE):
6423 {
6424 dhd_gdb_proxy_probe_data_t ret;
6425 ret.data_len = (uint32)sizeof(ret);
6426 ret.magic = DHD_IOCTL_MAGIC;
6427 ret.flags = 0;
6428 if (bus->gdb_proxy_access_enabled) {
6429 ret.flags |= DHD_GDB_PROXY_PROBE_ACCESS_ENABLED;
6430 if (bus->dhd->busstate < DHD_BUS_LOAD) {
6431 ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING;
6432 } else {
6433 ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING;
6434 }
6435 }
6436 if (bus->gdb_proxy_bootloader_mode) {
6437 ret.flags |= DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE;
6438 }
6439 ret.last_id = bus->gdb_proxy_last_id;
6440 if (plen && int_val) {
6441 bus->gdb_proxy_last_id = (uint32)int_val;
6442 }
6443 if (len >= sizeof(ret)) {
6444 bcopy(&ret, arg, sizeof(ret));
6445 bus->dhd->gdb_proxy_active = TRUE;
6446 } else {
6447 bcmerror = BCME_BADARG;
6448 }
6449 break;
6450 }
6451 case IOV_GVAL(IOV_GDB_PROXY_STOP_COUNT):
6452 int_val = (int32)bus->dhd->gdb_proxy_stop_count;
6453 bcopy(&int_val, arg, sizeof(int_val));
6454 break;
6455 case IOV_SVAL(IOV_GDB_PROXY_STOP_COUNT):
6456 bus->dhd->gdb_proxy_stop_count = (uint32)int_val;
6457 break;
6458 #endif /* GDB_PROXY */
6459
6460 #ifdef BCM_BUZZZ
6461 /* Dump dongle side buzzz trace to console */
6462 case IOV_GVAL(IOV_BUZZZ_DUMP):
6463 bcmerror = dhd_buzzz_dump_dngl(bus);
6464 break;
6465 #endif /* BCM_BUZZZ */
6466
6467 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
6468 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
6469 break;
6470
6471 #if defined(FW_SIGNATURE)
6472 case IOV_SVAL(IOV_SET_DOWNLOAD_INFO):
6473 {
6474 fw_download_info_t *info = (fw_download_info_t*)params;
6475 DHD_INFO(("dwnldinfo: sig=%s fw=%x,%u bl=%s,0x%x\n",
6476 info->fw_signature_fname,
6477 info->fw_start_addr, info->fw_size,
6478 info->bootloader_fname, info->bootloader_start_addr));
6479 bcmerror = dhdpcie_bus_save_download_info(bus,
6480 info->fw_start_addr, info->fw_size, info->fw_signature_fname,
6481 info->bootloader_fname, info->bootloader_start_addr);
6482 break;
6483 }
6484 #endif /* FW_SIGNATURE */
6485
6486 case IOV_GVAL(IOV_RAMSIZE):
6487 int_val = (int32)bus->ramsize;
6488 bcopy(&int_val, arg, val_size);
6489 break;
6490
6491 case IOV_SVAL(IOV_RAMSIZE):
6492 bus->ramsize = int_val;
6493 bus->orig_ramsize = int_val;
6494 break;
6495
6496 case IOV_GVAL(IOV_RAMSTART):
6497 int_val = (int32)bus->dongle_ram_base;
6498 bcopy(&int_val, arg, val_size);
6499 break;
6500
6501 case IOV_GVAL(IOV_CC_NVMSHADOW):
6502 {
6503 struct bcmstrbuf dump_b;
6504
6505 bcm_binit(&dump_b, arg, len);
6506 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
6507 break;
6508 }
6509
6510 case IOV_GVAL(IOV_SLEEP_ALLOWED):
6511 bool_val = bus->sleep_allowed;
6512 bcopy(&bool_val, arg, val_size);
6513 break;
6514
6515 case IOV_SVAL(IOV_SLEEP_ALLOWED):
6516 bus->sleep_allowed = bool_val;
6517 break;
6518
6519 case IOV_GVAL(IOV_DONGLEISOLATION):
6520 int_val = bus->dhd->dongle_isolation;
6521 bcopy(&int_val, arg, val_size);
6522 break;
6523
6524 case IOV_SVAL(IOV_DONGLEISOLATION):
6525 bus->dhd->dongle_isolation = bool_val;
6526 break;
6527
6528 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
6529 int_val = bus->ltrsleep_on_unload;
6530 bcopy(&int_val, arg, val_size);
6531 break;
6532
6533 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
6534 bus->ltrsleep_on_unload = bool_val;
6535 break;
6536
6537 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
6538 {
6539 struct bcmstrbuf dump_b;
6540 bcm_binit(&dump_b, arg, len);
6541 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
6542 break;
6543 }
6544 case IOV_GVAL(IOV_DMA_RINGINDICES):
6545 {
6546 int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
6547 bcopy(&int_val, arg, sizeof(int_val));
6548 break;
6549 }
6550 case IOV_SVAL(IOV_DMA_RINGINDICES):
6551 bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
6552 break;
6553
6554 case IOV_GVAL(IOV_METADATA_DBG):
6555 int_val = dhd_prot_metadata_dbg_get(bus->dhd);
6556 bcopy(&int_val, arg, val_size);
6557 break;
6558 case IOV_SVAL(IOV_METADATA_DBG):
6559 dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
6560 break;
6561
6562 case IOV_GVAL(IOV_RX_METADATALEN):
6563 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
6564 bcopy(&int_val, arg, val_size);
6565 break;
6566
6567 case IOV_SVAL(IOV_RX_METADATALEN):
6568 if (int_val > 64) {
6569 bcmerror = BCME_BUFTOOLONG;
6570 break;
6571 }
6572 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
6573 break;
6574
6575 case IOV_SVAL(IOV_TXP_THRESHOLD):
6576 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
6577 break;
6578
6579 case IOV_GVAL(IOV_TXP_THRESHOLD):
6580 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
6581 bcopy(&int_val, arg, val_size);
6582 break;
6583
6584 case IOV_SVAL(IOV_DB1_FOR_MB):
6585 if (int_val)
6586 bus->db1_for_mb = TRUE;
6587 else
6588 bus->db1_for_mb = FALSE;
6589 break;
6590
6591 case IOV_GVAL(IOV_DB1_FOR_MB):
6592 if (bus->db1_for_mb)
6593 int_val = 1;
6594 else
6595 int_val = 0;
6596 bcopy(&int_val, arg, val_size);
6597 break;
6598
6599 case IOV_GVAL(IOV_TX_METADATALEN):
6600 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
6601 bcopy(&int_val, arg, val_size);
6602 break;
6603
6604 case IOV_SVAL(IOV_TX_METADATALEN):
6605 if (int_val > 64) {
6606 bcmerror = BCME_BUFTOOLONG;
6607 break;
6608 }
6609 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
6610 break;
6611
6612 case IOV_SVAL(IOV_DEVRESET):
6613 {
6614 devreset_info_t *devreset = (devreset_info_t *)arg;
6615
6616 if (!devreset) {
6617 return BCME_BADARG;
6618 }
6619
6620 if (devreset->length == sizeof(devreset_info_t)) {
6621 if (devreset->version != DHD_DEVRESET_VERSION) {
6622 return BCME_VERSION;
6623 }
6624 int_val = devreset->mode;
6625 }
6626
6627 switch (int_val) {
6628 case DHD_BUS_DEVRESET_ON:
6629 bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6630 break;
6631 case DHD_BUS_DEVRESET_OFF:
6632 bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6633 break;
6634 case DHD_BUS_DEVRESET_FLR:
6635 bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
6636 break;
6637 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
6638 bus->flr_force_fail = TRUE;
6639 break;
6640 default:
6641 DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
6642 break;
6643 }
6644 break;
6645 }
6646 case IOV_SVAL(IOV_FORCE_FW_TRAP):
6647 if (bus->dhd->busstate == DHD_BUS_DATA)
6648 dhdpcie_fw_trap(bus);
6649 else {
6650 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
6651 bcmerror = BCME_NOTUP;
6652 }
6653 break;
6654 case IOV_GVAL(IOV_FLOW_PRIO_MAP):
6655 int_val = bus->dhd->flow_prio_map_type;
6656 bcopy(&int_val, arg, val_size);
6657 break;
6658
6659 case IOV_SVAL(IOV_FLOW_PRIO_MAP):
6660 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
6661 bcopy(&int_val, arg, val_size);
6662 break;
6663
6664 #ifdef DHD_PCIE_RUNTIMEPM
6665 case IOV_GVAL(IOV_IDLETIME):
6666 if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
6667 int_val = bus->idletime;
6668 } else {
6669 int_val = 0;
6670 }
6671 bcopy(&int_val, arg, val_size);
6672 break;
6673
6674 case IOV_SVAL(IOV_IDLETIME):
6675 if (int_val < 0) {
6676 bcmerror = BCME_BADARG;
6677 } else {
6678 bus->idletime = int_val;
6679 if (bus->idletime) {
6680 DHD_ENABLE_RUNTIME_PM(bus->dhd);
6681 } else {
6682 DHD_DISABLE_RUNTIME_PM(bus->dhd);
6683 }
6684 }
6685 break;
6686 #endif /* DHD_PCIE_RUNTIMEPM */
6687
6688 case IOV_GVAL(IOV_TXBOUND):
6689 int_val = (int32)dhd_txbound;
6690 bcopy(&int_val, arg, val_size);
6691 break;
6692
6693 case IOV_SVAL(IOV_TXBOUND):
6694 dhd_txbound = (uint)int_val;
6695 break;
6696
6697 case IOV_SVAL(IOV_H2D_MAILBOXDATA):
6698 dhdpcie_send_mb_data(bus, (uint)int_val);
6699 break;
6700
6701 case IOV_SVAL(IOV_INFORINGS):
6702 dhd_prot_init_info_rings(bus->dhd);
6703 break;
6704
6705 case IOV_SVAL(IOV_H2D_PHASE):
6706 if (bus->dhd->busstate != DHD_BUS_DOWN) {
6707 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6708 __FUNCTION__));
6709 bcmerror = BCME_NOTDOWN;
6710 break;
6711 }
6712 if (int_val)
6713 bus->dhd->h2d_phase_supported = TRUE;
6714 else
6715 bus->dhd->h2d_phase_supported = FALSE;
6716 break;
6717
6718 case IOV_GVAL(IOV_H2D_PHASE):
6719 int_val = (int32) bus->dhd->h2d_phase_supported;
6720 bcopy(&int_val, arg, val_size);
6721 break;
6722
6723 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6724 if (bus->dhd->busstate != DHD_BUS_DOWN) {
6725 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6726 __FUNCTION__));
6727 bcmerror = BCME_NOTDOWN;
6728 break;
6729 }
6730 if (int_val)
6731 bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
6732 else
6733 bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
6734 break;
6735
6736 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6737 int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
6738 bcopy(&int_val, arg, val_size);
6739 break;
6740
6741 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
6742 if (bus->dhd->busstate != DHD_BUS_DOWN) {
6743 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6744 __FUNCTION__));
6745 bcmerror = BCME_NOTDOWN;
6746 break;
6747 }
6748 dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
6749 break;
6750
6751 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
6752 int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
6753 bcopy(&int_val, arg, val_size);
6754 break;
6755
6756 #if defined(DHD_HTPUT_TUNABLES)
6757 case IOV_SVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM):
6758 if (bus->dhd->busstate != DHD_BUS_DOWN) {
6759 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6760 __FUNCTION__));
6761 bcmerror = BCME_NOTDOWN;
6762 break;
6763 }
6764 dhd_prot_set_h2d_htput_max_txpost(bus->dhd, (uint16)int_val);
6765 break;
6766
6767 case IOV_GVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM):
6768 int_val = dhd_prot_get_h2d_htput_max_txpost(bus->dhd);
6769 bcopy(&int_val, arg, val_size);
6770 break;
6771 #endif /* DHD_HTPUT_TUNABLES */
6772
6773 case IOV_GVAL(IOV_RXBOUND):
6774 int_val = (int32)dhd_rxbound;
6775 bcopy(&int_val, arg, val_size);
6776 break;
6777
6778 case IOV_SVAL(IOV_RXBOUND):
6779 dhd_rxbound = (uint)int_val;
6780 break;
6781
6782 case IOV_GVAL(IOV_TRAPDATA):
6783 {
6784 struct bcmstrbuf dump_b;
6785 bcm_binit(&dump_b, arg, len);
6786 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
6787 break;
6788 }
6789
6790 case IOV_GVAL(IOV_TRAPDATA_RAW):
6791 {
6792 struct bcmstrbuf dump_b;
6793 bcm_binit(&dump_b, arg, len);
6794 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
6795 break;
6796 }
6797
6798 case IOV_SVAL(IOV_HANGREPORT):
6799 bus->dhd->hang_report = bool_val;
6800 DHD_ERROR(("%s: Set hang_report as %d\n",
6801 __FUNCTION__, bus->dhd->hang_report));
6802 break;
6803
6804 case IOV_GVAL(IOV_HANGREPORT):
6805 int_val = (int32)bus->dhd->hang_report;
6806 bcopy(&int_val, arg, val_size);
6807 break;
6808
6809 case IOV_SVAL(IOV_CTO_PREVENTION):
6810 bcmerror = dhdpcie_cto_init(bus, bool_val);
6811 break;
6812
6813 case IOV_GVAL(IOV_CTO_PREVENTION):
6814 if (bus->sih->buscorerev < 19) {
6815 bcmerror = BCME_UNSUPPORTED;
6816 break;
6817 }
6818 int_val = (int32)bus->cto_enable;
6819 bcopy(&int_val, arg, val_size);
6820 break;
6821
6822 case IOV_SVAL(IOV_CTO_THRESHOLD):
6823 {
6824 if (bus->sih->buscorerev < 19) {
6825 bcmerror = BCME_UNSUPPORTED;
6826 break;
6827 }
6828 bus->cto_threshold = (uint32)int_val;
6829 }
6830 break;
6831
6832 case IOV_GVAL(IOV_CTO_THRESHOLD):
6833 if (bus->sih->buscorerev < 19) {
6834 bcmerror = BCME_UNSUPPORTED;
6835 break;
6836 }
6837 if (bus->cto_threshold) {
6838 int_val = (int32)bus->cto_threshold;
6839 } else {
6840 int_val = pcie_cto_to_thresh_default(bus->sih->buscorerev);
6841 }
6842
6843 bcopy(&int_val, arg, val_size);
6844 break;
6845
6846 case IOV_SVAL(IOV_PCIE_WD_RESET):
6847 if (bool_val) {
6848 /* Legacy chipcommon watchdog reset */
6849 dhdpcie_cc_watchdog_reset(bus);
6850 }
6851 break;
6852
6853 case IOV_GVAL(IOV_HWA_ENABLE):
6854 int_val = bus->hwa_enabled;
6855 bcopy(&int_val, arg, val_size);
6856 break;
6857 case IOV_SVAL(IOV_HWA_ENABLE):
6858 bus->hwa_enabled = (bool)int_val;
6859 break;
6860 case IOV_GVAL(IOV_IDMA_ENABLE):
6861 int_val = bus->idma_enabled;
6862 bcopy(&int_val, arg, val_size);
6863 break;
6864 case IOV_SVAL(IOV_IDMA_ENABLE):
6865 bus->idma_enabled = (bool)int_val;
6866 break;
6867 case IOV_GVAL(IOV_IFRM_ENABLE):
6868 int_val = bus->ifrm_enabled;
6869 bcopy(&int_val, arg, val_size);
6870 break;
6871 case IOV_SVAL(IOV_IFRM_ENABLE):
6872 bus->ifrm_enabled = (bool)int_val;
6873 break;
6874 case IOV_GVAL(IOV_CLEAR_RING):
6875 bcopy(&int_val, arg, val_size);
6876 dhd_flow_rings_flush(bus->dhd, 0);
6877 break;
6878 case IOV_GVAL(IOV_DAR_ENABLE):
6879 int_val = bus->dar_enabled;
6880 bcopy(&int_val, arg, val_size);
6881 break;
6882 case IOV_SVAL(IOV_DAR_ENABLE):
6883 bus->dar_enabled = (bool)int_val;
6884 break;
6885 case IOV_GVAL(IOV_HSCBSIZE):
6886 bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
6887 break;
6888
6889 case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
6890 if (bus->dhd->busstate != DHD_BUS_DOWN) {
6891 return BCME_NOTDOWN;
6892 }
6893 if (int_val)
6894 bus->dhd->extdtxs_in_txcpl = TRUE;
6895 else
6896 bus->dhd->extdtxs_in_txcpl = FALSE;
6897 break;
6898
6899 case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
6900 int_val = bus->dhd->extdtxs_in_txcpl;
6901 bcopy(&int_val, arg, val_size);
6902 break;
6903
6904 case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
6905 if (bus->dhd->busstate != DHD_BUS_DOWN) {
6906 return BCME_NOTDOWN;
6907 }
6908 if (int_val)
6909 bus->dhd->hostrdy_after_init = TRUE;
6910 else
6911 bus->dhd->hostrdy_after_init = FALSE;
6912 break;
6913
6914 case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
6915 int_val = bus->dhd->hostrdy_after_init;
6916 bcopy(&int_val, arg, val_size);
6917 break;
6918
6919 default:
6920 bcmerror = BCME_UNSUPPORTED;
6921 break;
6922 }
6923
6924 exit:
6925 return bcmerror;
6926 } /* dhdpcie_bus_doiovar */
6927
6928 /** Transfers bytes from host to dongle using pio mode */
6929 static int
6930 dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
6931 {
6932 if (bus->dhd == NULL) {
6933 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6934 return 0;
6935 }
6936 if (bus->dhd->prot == NULL) {
6937 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6938 return 0;
6939 }
6940 if (bus->dhd->busstate != DHD_BUS_DATA) {
6941 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
6942 return 0;
6943 }
6944 dhdmsgbuf_lpbk_req(bus->dhd, len);
6945 return 0;
6946 }
6947
6948 void
6949 dhd_bus_dump_dar_registers(struct dhd_bus *bus)
6950 {
6951 uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
6952 dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
6953 uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
6954 dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
6955
6956 if (bus->is_linkdown) {
6957 DHD_ERROR(("%s: link is down\n", __FUNCTION__));
6958 return;
6959 }
6960
6961 if (DAR_PWRREQ(bus)) {
6962 dhd_bus_pcie_pwr_req(bus);
6963 }
6964
6965 dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
6966 dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
6967 dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
6968 dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
6969 dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
6970 dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
6971
6972 if (bus->sih->buscorerev < 24) {
6973 DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
6974 __FUNCTION__, bus->sih->buscorerev));
6975 return;
6976 }
6977
6978 dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
6979 dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
6980 dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
6981 dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
6982 dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
6983 dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
6984
6985 DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
6986 __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
6987 dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
6988
6989 DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
6990 __FUNCTION__, dar_errlog_reg, dar_errlog_val,
6991 dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
6992 }
6993
6994 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
6995 void
6996 dhd_bus_hostready(struct dhd_bus *bus)
6997 {
6998 if (!bus->dhd->d2h_hostrdy_supported) {
6999 return;
7000 }
7001
7002 if (bus->is_linkdown) {
7003 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7004 return;
7005 }
7006
7007 DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
7008 dhd_pcie_config_read(bus, PCI_CFG_CMD, sizeof(uint32))));
7009
7010 dhd_bus_dump_dar_registers(bus);
7011
7012 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
7013 bus->hostready_count ++;
7014 DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
7015 }
7016
7017 /* Clear INTSTATUS */
7018 void
7019 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
7020 {
7021 uint32 intstatus = 0;
7022 /* Skip after recieving D3 ACK */
7023 if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
7024 return;
7025 }
7026 /* XXX: check for PCIE Gen2 also */
7027 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
7028 (bus->sih->buscorerev == 2)) {
7029 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
7030 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
7031 } else {
7032 /* this is a PCIE core register..not a config register... */
7033 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
7034 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
7035 intstatus);
7036 }
7037 }
7038
7039 int
7040 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7041 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
7042 #else
7043 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
7044 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7045 {
7046 int timeleft;
7047 int rc = 0;
7048 unsigned long flags;
7049 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7050 int d3_read_retry = 0;
7051 uint32 d2h_mb_data = 0;
7052 uint32 zero = 0;
7053 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7054
7055 printf("%s: state=%d\n", __FUNCTION__, state);
7056 if (bus->dhd == NULL) {
7057 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
7058 return BCME_ERROR;
7059 }
7060 if (bus->dhd->prot == NULL) {
7061 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
7062 return BCME_ERROR;
7063 }
7064
7065 if (dhd_query_bus_erros(bus->dhd)) {
7066 return BCME_ERROR;
7067 }
7068
7069 DHD_GENERAL_LOCK(bus->dhd, flags);
7070 if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
7071 DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
7072 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7073 return BCME_ERROR;
7074 }
7075 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7076 if (bus->dhd->dongle_reset) {
7077 DHD_ERROR(("Dongle is in reset state.\n"));
7078 return -EIO;
7079 }
7080
7081 /* Check whether we are already in the requested state.
7082 * state=TRUE means Suspend
7083 * state=FALSE meanse Resume
7084 */
7085 if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
7086 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
7087 return BCME_OK;
7088 } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
7089 DHD_ERROR(("Bus is already in RESUME state.\n"));
7090 return BCME_OK;
7091 }
7092
7093 if (state) {
7094 int idle_retry = 0;
7095 int active;
7096
7097 if (bus->is_linkdown) {
7098 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
7099 __FUNCTION__, state));
7100 return BCME_ERROR;
7101 }
7102
7103 /* Suspend */
7104 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
7105
7106 bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
7107 if (bus->dhd->dhd_watchdog_ms_backup) {
7108 DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
7109 __FUNCTION__));
7110 dhd_os_wd_timer(bus->dhd, 0);
7111 }
7112
7113 DHD_GENERAL_LOCK(bus->dhd, flags);
7114 if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
7115 DHD_ERROR(("Tx Request is not ended\n"));
7116 bus->dhd->busstate = DHD_BUS_DATA;
7117 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7118 return -EBUSY;
7119 }
7120
7121 bus->last_suspend_start_time = OSL_LOCALTIME_NS();
7122
7123 /* stop all interface network queue. */
7124 dhd_bus_stop_queue(bus);
7125 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7126
7127 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7128 if (byint) {
7129 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7130 /* Clear wait_for_d3_ack before sending D3_INFORM */
7131 bus->wait_for_d3_ack = 0;
7132 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
7133
7134 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
7135 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7136 } else {
7137 /* Clear wait_for_d3_ack before sending D3_INFORM */
7138 bus->wait_for_d3_ack = 0;
7139 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
7140 while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
7141 dhdpcie_handle_mb_data(bus);
7142 usleep_range(1000, 1500);
7143 d3_read_retry++;
7144 }
7145 }
7146 #else
7147 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7148
7149 #ifdef PCIE_INB_DW
7150 /* As D3_INFORM will be sent after De-assert,
7151 * skip sending DS-ACK for DS-REQ.
7152 */
7153 bus->skip_ds_ack = TRUE;
7154 #endif /* PCIE_INB_DW */
7155
7156 #if defined(PCIE_INB_DW)
7157 dhd_bus_set_device_wake(bus, TRUE);
7158 #endif
7159 #ifdef PCIE_INB_DW
7160 /* De-assert at this point for In-band device_wake */
7161 if (INBAND_DW_ENAB(bus)) {
7162 dhd_bus_set_device_wake(bus, FALSE);
7163 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
7164 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
7165 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
7166 }
7167 #endif /* PCIE_INB_DW */
7168 /* Clear wait_for_d3_ack before sending D3_INFORM */
7169 bus->wait_for_d3_ack = 0;
7170 /*
7171 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
7172 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
7173 * inside atomic context, so that no more DBs will be
7174 * rung after sending D3_INFORM
7175 */
7176 #ifdef PCIE_INB_DW
7177 if (INBAND_DW_ENAB(bus)) {
7178 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
7179 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
7180 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
7181 } else
7182 #endif /* PCIE_INB_DW */
7183 {
7184 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
7185 }
7186
7187 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
7188
7189 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
7190
7191 #ifdef DHD_RECOVER_TIMEOUT
7192 /* XXX: WAR for missing D3 ACK MB interrupt */
7193 if (bus->wait_for_d3_ack == 0) {
7194 /* If wait_for_d3_ack was not updated because D2H MB was not received */
7195 uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7196 bus->pcie_mailbox_int, 0, 0);
7197 int host_irq_disabled = dhdpcie_irq_disabled(bus);
7198 if ((intstatus) && (intstatus != (uint32)-1) &&
7199 (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
7200 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
7201 " host_irq_disabled=%d\n",
7202 __FUNCTION__, intstatus, host_irq_disabled));
7203 dhd_pcie_intr_count_dump(bus->dhd);
7204 dhd_print_tasklet_status(bus->dhd);
7205 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
7206 !bus->use_mailbox) {
7207 dhd_prot_process_ctrlbuf(bus->dhd);
7208 } else {
7209 dhdpcie_handle_mb_data(bus);
7210 }
7211 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
7212 /* Clear Interrupts */
7213 dhdpcie_bus_clear_intstatus(bus);
7214 }
7215 } /* bus->wait_for_d3_ack was 0 */
7216 #endif /* DHD_RECOVER_TIMEOUT */
7217
7218 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7219 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7220
7221 /* To allow threads that got pre-empted to complete.
7222 */
7223 while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
7224 (idle_retry < MAX_WKLK_IDLE_CHECK)) {
7225 OSL_SLEEP(1);
7226 idle_retry++;
7227 }
7228
7229 if (bus->wait_for_d3_ack) {
7230 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
7231 /* Got D3 Ack. Suspend the bus */
7232 if (active) {
7233 DHD_ERROR(("%s():Suspend failed because of wakelock"
7234 "restoring Dongle to D0\n", __FUNCTION__));
7235
7236 if (bus->dhd->dhd_watchdog_ms_backup) {
7237 DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
7238 __FUNCTION__));
7239 dhd_os_wd_timer(bus->dhd,
7240 bus->dhd->dhd_watchdog_ms_backup);
7241 }
7242
7243 /*
7244 * Dongle still thinks that it has to be in D3 state until
7245 * it gets a D0 Inform, but we are backing off from suspend.
7246 * Ensure that Dongle is brought back to D0.
7247 *
7248 * Bringing back Dongle from D3 Ack state to D0 state is a
7249 * 2 step process. Dongle would want to know that D0 Inform
7250 * would be sent as a MB interrupt to bring it out of D3 Ack
7251 * state to D0 state. So we have to send both this message.
7252 */
7253
7254 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
7255 bus->wait_for_d3_ack = 0;
7256
7257 DHD_SET_BUS_NOT_IN_LPS(bus);
7258 #ifdef PCIE_INB_DW
7259 if (INBAND_DW_ENAB(bus)) {
7260 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
7261 /* Since suspend has failed due to wakelock is held,
7262 * update the DS state to DW_DEVICE_HOST_WAKE_WAIT.
7263 * So that host sends the DS-ACK for DS-REQ.
7264 */
7265 DHD_ERROR(("Suspend failed due to wakelock is held, "
7266 "set inband dw state to DW_DEVICE_HOST_WAKE_WAIT\n"));
7267 dhdpcie_bus_set_pcie_inband_dw_state(bus,
7268 DW_DEVICE_HOST_WAKE_WAIT);
7269 dhd_bus_ds_trace(bus, 0, TRUE,
7270 dhdpcie_bus_get_pcie_inband_dw_state(bus));
7271 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
7272 }
7273 bus->skip_ds_ack = FALSE;
7274 #endif /* PCIE_INB_DW */
7275 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
7276 * interrupts using intmask and host interrupts
7277 * which were disabled in the dhdpcie_bus_isr()->
7278 * dhd_bus_handle_d3_ack().
7279 */
7280 /* Enable back interrupt using Intmask!! */
7281 dhdpcie_bus_intr_enable(bus);
7282 /* Defer enabling host irq after RPM suspend failure */
7283 if (!DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd)) {
7284 /* Enable back interrupt from Host side!! */
7285 if (dhdpcie_irq_disabled(bus)) {
7286 dhdpcie_enable_irq(bus);
7287 bus->resume_intr_enable_count++;
7288 }
7289 }
7290 if (bus->use_d0_inform) {
7291 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7292 dhdpcie_send_mb_data(bus,
7293 (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
7294 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7295 }
7296 /* ring doorbell 1 (hostready) */
7297 dhd_bus_hostready(bus);
7298
7299 DHD_GENERAL_LOCK(bus->dhd, flags);
7300 bus->dhd->busstate = DHD_BUS_DATA;
7301 /* resume all interface network queue. */
7302 dhd_bus_start_queue(bus);
7303 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7304 rc = BCME_ERROR;
7305 } else {
7306 /* Actual Suspend after no wakelock */
7307 /* At this time bus->bus_low_power_state will be
7308 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
7309 * in dhd_bus_handle_d3_ack()
7310 */
7311 #ifdef PCIE_INB_DW
7312 if (INBAND_DW_ENAB(bus)) {
7313 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
7314 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
7315 DW_DEVICE_HOST_SLEEP_WAIT) {
7316 dhdpcie_bus_set_pcie_inband_dw_state(bus,
7317 DW_DEVICE_HOST_SLEEP);
7318 #ifdef PCIE_INB_DW
7319 dhd_bus_ds_trace(bus, 0, TRUE,
7320 dhdpcie_bus_get_pcie_inband_dw_state(bus));
7321 #else
7322 dhd_bus_ds_trace(bus, 0, TRUE);
7323 #endif /* PCIE_INB_DW */
7324 }
7325 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
7326 }
7327 #endif /* PCIE_INB_DW */
7328 if (bus->use_d0_inform &&
7329 (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
7330 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7331 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
7332 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7333 }
7334
7335 #if defined(BCMPCIE_OOB_HOST_WAKE)
7336 if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
7337 DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
7338 } else {
7339 dhdpcie_oob_intr_set(bus, TRUE);
7340 }
7341 #endif /* BCMPCIE_OOB_HOST_WAKE */
7342
7343 DHD_GENERAL_LOCK(bus->dhd, flags);
7344 /* The Host cannot process interrupts now so disable the same.
7345 * No need to disable the dongle INTR using intmask, as we are
7346 * already calling disabling INTRs from DPC context after
7347 * getting D3_ACK in dhd_bus_handle_d3_ack.
7348 * Code may not look symmetric between Suspend and
7349 * Resume paths but this is done to close down the timing window
7350 * between DPC and suspend context and bus->bus_low_power_state
7351 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
7352 */
7353 bus->dhd->d3ackcnt_timeout = 0;
7354 bus->dhd->busstate = DHD_BUS_SUSPEND;
7355 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7356 dhdpcie_dump_resource(bus);
7357 rc = dhdpcie_pci_suspend_resume(bus, state);
7358 if (!rc) {
7359 bus->last_suspend_end_time = OSL_LOCALTIME_NS();
7360 }
7361 }
7362 } else if (timeleft == 0) { /* D3 ACK Timeout */
7363 #ifdef DHD_FW_COREDUMP
7364 uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
7365 #endif /* DHD_FW_COREDUMP */
7366
7367 /* check if the D3 ACK timeout due to scheduling issue */
7368 bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
7369 bus->isr_entry_time > bus->last_d3_inform_time &&
7370 dhd_bus_query_dpc_sched_errors(bus->dhd);
7371 bus->dhd->d3ack_timeout_occured = TRUE;
7372 /* If the D3 Ack has timeout */
7373 bus->dhd->d3ackcnt_timeout++;
7374 DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
7375 __FUNCTION__, bus->dhd->is_sched_error ?
7376 " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
7377 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7378 /* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
7379 * due to tasklet or workqueue scheduling problems in the Linux Kernel.
7380 * Customer informs that it is hard to find any clue from the
7381 * host memory dump since the important tasklet or workqueue information
7382 * is already disappered due the latency while printing out the timestamp
7383 * logs for debugging scan timeout issue.
7384 * For this reason, customer requestes us to trigger Kernel Panic rather
7385 * than taking a SOCRAM dump.
7386 */
7387 if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
7388 /* change g_assert_type to trigger Kernel panic */
7389 g_assert_type = 2;
7390 /* use ASSERT() to trigger panic */
7391 ASSERT(0);
7392 }
7393 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7394 DHD_SET_BUS_NOT_IN_LPS(bus);
7395
7396 DHD_GENERAL_LOCK(bus->dhd, flags);
7397 bus->dhd->busstate = DHD_BUS_DATA;
7398 /* resume all interface network queue. */
7399 dhd_bus_start_queue(bus);
7400 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7401 /* XXX : avoid multiple socram dump from dongle trap and
7402 * invalid PCIe bus assceess due to PCIe link down
7403 */
7404 if (!bus->dhd->dongle_trap_occured &&
7405 !bus->is_linkdown &&
7406 !bus->cto_triggered) {
7407 uint32 intstatus = 0;
7408
7409 /* Check if PCIe bus status is valid */
7410 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7411 bus->pcie_mailbox_int, 0, 0);
7412 if (intstatus == (uint32)-1) {
7413 /* Invalidate PCIe bus status */
7414 bus->is_linkdown = 1;
7415 }
7416
7417 dhd_bus_dump_console_buffer(bus);
7418 dhd_prot_debug_info_print(bus->dhd);
7419 #ifdef DHD_FW_COREDUMP
7420 if (cur_memdump_mode) {
7421 /* write core dump to file */
7422 bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
7423 dhdpcie_mem_dump(bus);
7424 }
7425 #endif /* DHD_FW_COREDUMP */
7426
7427 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
7428 __FUNCTION__));
7429 #ifdef SUPPORT_LINKDOWN_RECOVERY
7430 #ifdef CONFIG_ARCH_MSM
7431 bus->no_cfg_restore = 1;
7432 #endif /* CONFIG_ARCH_MSM */
7433 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7434 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
7435 }
7436 #if defined(DHD_ERPOM)
7437 dhd_schedule_reset(bus->dhd);
7438 #endif
7439 rc = -ETIMEDOUT;
7440 }
7441 } else {
7442 /* Resume */
7443 DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
7444 bus->last_resume_start_time = OSL_LOCALTIME_NS();
7445
7446 /**
7447 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
7448 * si_backplane_access(function to read/write backplane)
7449 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
7450 * window being accessed is different form the window
7451 * being pointed by second_bar0win.
7452 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
7453 * invalidating second_bar0win after resume updates
7454 * PCIE2_BAR0_CORE2_WIN with right window.
7455 */
7456 si_invalidate_second_bar0win(bus->sih);
7457 #if defined(BCMPCIE_OOB_HOST_WAKE)
7458 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
7459 #endif /* BCMPCIE_OOB_HOST_WAKE */
7460 #ifdef PCIE_INB_DW
7461 if (INBAND_DW_ENAB(bus)) {
7462 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
7463 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
7464 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
7465 #ifdef PCIE_INB_DW
7466 dhd_bus_ds_trace(bus, 0, TRUE,
7467 dhdpcie_bus_get_pcie_inband_dw_state(bus));
7468 #else
7469 dhd_bus_ds_trace(bus, 0, TRUE);
7470 #endif /* PCIE_INB_DW */
7471 }
7472 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
7473 }
7474 bus->skip_ds_ack = FALSE;
7475 #endif /* PCIE_INB_DW */
7476 rc = dhdpcie_pci_suspend_resume(bus, state);
7477 dhdpcie_dump_resource(bus);
7478
7479 /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
7480 DHD_SET_BUS_NOT_IN_LPS(bus);
7481
7482 if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
7483 if (bus->use_d0_inform) {
7484 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7485 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
7486 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7487 }
7488 /* ring doorbell 1 (hostready) */
7489 dhd_bus_hostready(bus);
7490 }
7491 DHD_GENERAL_LOCK(bus->dhd, flags);
7492 bus->dhd->busstate = DHD_BUS_DATA;
7493 #ifdef DHD_PCIE_RUNTIMEPM
7494 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
7495 bus->bus_wake = 1;
7496 OSL_SMP_WMB();
7497 wake_up(&bus->rpm_queue);
7498 }
7499 #endif /* DHD_PCIE_RUNTIMEPM */
7500 /* resume all interface network queue. */
7501 dhd_bus_start_queue(bus);
7502
7503 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
7504 * using intmask and host interrupts
7505 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
7506 */
7507 dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
7508 /* Defer enabling host interrupt until RPM resume done */
7509 if (!DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
7510 if (dhdpcie_irq_disabled(bus)) {
7511 dhdpcie_enable_irq(bus);
7512 bus->resume_intr_enable_count++;
7513 }
7514 }
7515
7516 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7517
7518 if (bus->dhd->dhd_watchdog_ms_backup) {
7519 DHD_ERROR(("%s: Enabling wdtick after resume\n",
7520 __FUNCTION__));
7521 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
7522 }
7523
7524 bus->last_resume_end_time = OSL_LOCALTIME_NS();
7525
7526 /* Update TCM rd index for EDL ring */
7527 DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
7528
7529 }
7530 return rc;
7531 }
7532
7533 #define BUS_SUSPEND TRUE
7534 #define BUS_RESUME FALSE
7535 int dhd_bus_suspend(dhd_pub_t *dhd)
7536 {
7537 int ret;
7538 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7539 /* TODO: Check whether the arguments are correct */
7540 ret = dhdpcie_bus_suspend(dhd->bus, TRUE, BUS_SUSPEND);
7541 #else
7542 ret = dhdpcie_bus_suspend(dhd->bus, BUS_SUSPEND);
7543 #endif
7544 return ret;
7545 }
7546
7547 int dhd_bus_resume(dhd_pub_t *dhd, int stage)
7548 {
7549 int ret;
7550 BCM_REFERENCE(stage);
7551
7552 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7553 /* TODO: Check whether the arguments are correct */
7554 ret = dhdpcie_bus_suspend(dhd->bus, FALSE, BUS_RESUME);
7555 #else
7556 ret = dhdpcie_bus_suspend(dhd->bus, BUS_RESUME);
7557 #endif
7558 return ret;
7559 }
7560
7561 uint32
7562 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
7563 {
7564 ASSERT(bus && bus->sih);
7565 if (enable) {
7566 si_corereg(bus->sih, bus->sih->buscoreidx,
7567 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
7568 } else {
7569 si_corereg(bus->sih, bus->sih->buscoreidx,
7570 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
7571 }
7572 return 0;
7573 }
7574
7575 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
7576 uint32
7577 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
7578 {
7579 uint reg_val;
7580
7581 ASSERT(bus && bus->sih);
7582
7583 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
7584 0x1004);
7585 reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
7586 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
7587 reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
7588 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
7589 reg_val);
7590
7591 return 0;
7592 }
7593
7594 static uint32
7595 dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
7596 {
7597 uint16 chipid = si_chipid(bus->sih);
7598 /*
7599 * XXX: WAR for CRWLDOT11M-3011
7600 * program the DMA descriptor Buffer length as the expected frame length
7601 * + 8 bytes extra for corerev 82 when buffer length % 128 is equal to 4
7602 */
7603 if ((chipid == BCM4375_CHIP_ID ||
7604 chipid == BCM4362_CHIP_ID ||
7605 chipid == BCM4377_CHIP_ID ||
7606 chipid == BCM43751_CHIP_ID ||
7607 chipid == BCM43752_CHIP_ID) &&
7608 (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
7609 len += 8;
7610 }
7611 DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
7612 return len;
7613 }
7614
7615 /** Transfers bytes from host to dongle and to host again using DMA */
7616 static int
7617 dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
7618 uint32 len, uint32 srcdelay, uint32 destdelay,
7619 uint32 d11_lpbk, uint32 core_num, uint32 wait)
7620 {
7621 int ret = 0;
7622
7623 if (bus->dhd == NULL) {
7624 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
7625 return BCME_ERROR;
7626 }
7627 if (bus->dhd->prot == NULL) {
7628 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
7629 return BCME_ERROR;
7630 }
7631 if (bus->dhd->busstate != DHD_BUS_DATA) {
7632 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
7633 return BCME_ERROR;
7634 }
7635
7636 if (len < 5 || len > 4194296) {
7637 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
7638 return BCME_ERROR;
7639 }
7640
7641 len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
7642
7643 bus->dmaxfer_complete = FALSE;
7644 ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
7645 d11_lpbk, core_num);
7646 if (ret != BCME_OK || !wait) {
7647 DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
7648 ret, wait));
7649 } else {
7650 ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
7651 if (ret < 0)
7652 ret = BCME_NOTREADY;
7653 }
7654
7655 return ret;
7656
7657 }
7658
7659 bool
7660 dhd_bus_is_multibp_capable(struct dhd_bus *bus)
7661 {
7662 return MULTIBP_CAP(bus->sih);
7663 }
7664
7665 #define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
7666 #define PCIE_REV_FOR_4378B0 68
7667
7668 static int
7669 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
7670 {
7671 int bcmerror = 0;
7672 volatile uint32 *cr4_regs;
7673 bool do_flr;
7674 bool do_wr_flops = TRUE;
7675
7676 if (!bus->sih) {
7677 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
7678 return BCME_ERROR;
7679 }
7680
7681 do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
7682 (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
7683
7684 /*
7685 * Jira SWWLAN-214966: 4378B0 BToverPCIe: fails to download firmware
7686 * with "insmod dhd.ko firmware_path=rtecdc.bin nvram_path=nvram.txt" format
7687 * CTO is seen during autoload case.
7688 * Need to assert PD1 power req during ARM out of reset.
7689 * And doing FLR after this would conflict as FLR resets PCIe enum space.
7690 */
7691 if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7692 dhd_bus_pcie_pwr_req(bus);
7693 }
7694
7695 /* To enter download state, disable ARM and reset SOCRAM.
7696 * To exit download state, simply reset ARM (default is RAM boot).
7697 */
7698 if (enter) {
7699
7700 /* Make sure BAR1 maps to backplane address 0 */
7701 dhdpcie_setbar1win(bus, 0x00000000);
7702 bus->alp_only = TRUE;
7703 #ifdef GDB_PROXY
7704 bus->gdb_proxy_access_enabled = TRUE;
7705 bus->gdb_proxy_bootloader_mode = FALSE;
7706 #endif /* GDB_PROXY */
7707
7708 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
7709 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
7710
7711 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
7712 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
7713 !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
7714 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
7715 bcmerror = BCME_ERROR;
7716 goto fail;
7717 }
7718
7719 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
7720 /* Halt ARM & remove reset */
7721 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7722 if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
7723 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
7724 bcmerror = BCME_ERROR;
7725 goto fail;
7726 }
7727 si_core_reset(bus->sih, 0, 0);
7728 /* reset last 4 bytes of RAM address. to be used for shared area */
7729 dhdpcie_init_shared_addr(bus);
7730 } else if (cr4_regs == NULL) { /* no CR4 present on chip */
7731 si_core_disable(bus->sih, 0);
7732
7733 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7734 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
7735 bcmerror = BCME_ERROR;
7736 goto fail;
7737 }
7738
7739 si_core_reset(bus->sih, 0, 0);
7740
7741 /* Clear the top bit of memory */
7742 if (bus->ramsize) {
7743 uint32 zeros = 0;
7744 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
7745 (uint8*)&zeros, 4) < 0) {
7746 bcmerror = BCME_ERROR;
7747 goto fail;
7748 }
7749 }
7750 } else {
7751 /* For CR4,
7752 * Halt ARM
7753 * Remove ARM reset
7754 * Read RAM base address [0x18_0000]
7755 * [next] Download firmware
7756 * [done at else] Populate the reset vector
7757 * [done at else] Remove ARM halt
7758 */
7759 /* Halt ARM & remove reset */
7760 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7761 if (BCM43602_CHIP(bus->sih->chip)) {
7762 /* XXX CRWLARMCR4-53 43602a0 HW bug when banks are powered down */
7763 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
7764 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
7765 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
7766 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
7767 }
7768 /* reset last 4 bytes of RAM address. to be used for shared area */
7769 dhdpcie_init_shared_addr(bus);
7770 }
7771 } else {
7772 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
7773 /* write vars */
7774 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
7775 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7776 goto fail;
7777 }
7778 /* write random numbers to sysmem for the purpose of
7779 * randomizing heap address space.
7780 */
7781 if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7782 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7783 __FUNCTION__));
7784 goto fail;
7785 }
7786
7787 #if defined(FW_SIGNATURE)
7788 if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops))
7789 != BCME_OK) {
7790 goto fail;
7791 }
7792 #endif /* FW_SIGNATURE */
7793
7794 if (do_wr_flops) {
7795 /* switch back to arm core again */
7796 if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
7797 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n",
7798 __FUNCTION__));
7799 bcmerror = BCME_ERROR;
7800 goto fail;
7801 }
7802 /* write address 0 with reset instruction */
7803 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7804 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7805 /* now remove reset and halt and continue to run CA7 */
7806 }
7807 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
7808 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7809 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
7810 bcmerror = BCME_ERROR;
7811 goto fail;
7812 }
7813
7814 if (!si_iscoreup(bus->sih)) {
7815 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
7816 bcmerror = BCME_ERROR;
7817 goto fail;
7818 }
7819
7820 /* Enable remap before ARM reset but after vars.
7821 * No backplane access in remap mode
7822 */
7823 if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
7824 !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
7825 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
7826 bcmerror = BCME_ERROR;
7827 goto fail;
7828 }
7829
7830 /* XXX Change standby configuration here if necessary */
7831
7832 if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
7833 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
7834 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
7835 bcmerror = BCME_ERROR;
7836 goto fail;
7837 }
7838 } else {
7839 if (BCM43602_CHIP(bus->sih->chip)) {
7840 /* Firmware crashes on SOCSRAM access when core is in reset */
7841 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7842 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
7843 __FUNCTION__));
7844 bcmerror = BCME_ERROR;
7845 goto fail;
7846 }
7847 si_core_reset(bus->sih, 0, 0);
7848 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
7849 }
7850
7851 /* write vars */
7852 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
7853 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7854 goto fail;
7855 }
7856
7857 /* write a random number rTLV to TCM for the purpose of
7858 * randomizing heap address space.
7859 */
7860 if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7861 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7862 __FUNCTION__));
7863 goto fail;
7864 }
7865
7866 #if defined(FW_SIGNATURE)
7867 if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops))
7868 != BCME_OK) {
7869 goto fail;
7870 }
7871 #endif /* FW_SIGNATURE */
7872 if (do_wr_flops) {
7873 /* switch back to arm core again */
7874 if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
7875 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n",
7876 __FUNCTION__));
7877 bcmerror = BCME_ERROR;
7878 goto fail;
7879 }
7880
7881 /* write address 0 with reset instruction */
7882 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7883 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7884
7885 if (bcmerror == BCME_OK) {
7886 uint32 tmp;
7887
7888 bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
7889 (uint8 *)&tmp, sizeof(tmp));
7890
7891 if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
7892 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
7893 __FUNCTION__, bus->resetinstr));
7894 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
7895 __FUNCTION__, tmp));
7896 bcmerror = BCME_ERROR;
7897 goto fail;
7898 }
7899 }
7900 /* now remove reset and halt and continue to run CR4 */
7901 }
7902 }
7903
7904 bus->arm_oor_time = OSL_LOCALTIME_NS();
7905 si_core_reset(bus->sih, 0, 0);
7906
7907 /* Allow HT Clock now that the ARM is running. */
7908 bus->alp_only = FALSE;
7909
7910 bus->dhd->busstate = DHD_BUS_LOAD;
7911 }
7912
7913 fail:
7914 /* Always return to PCIE core */
7915 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
7916
7917 if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7918 dhd_bus_pcie_pwr_req_clear(bus);
7919 }
7920
7921 return bcmerror;
7922 } /* dhdpcie_bus_download_state */
7923
7924 #if defined(FW_SIGNATURE)
7925
7926 static int
7927 dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write)
7928 {
7929 int bcmerror = BCME_OK;
7930
7931 DHD_INFO(("FWSIG: bl=%s,%x fw=%x,%u sig=%s,%x,%u"
7932 " stat=%x,%u ram=%x,%x\n",
7933 bus->bootloader_filename, bus->bootloader_addr,
7934 bus->fw_download_addr, bus->fw_download_len,
7935 bus->fwsig_filename, bus->fwsig_download_addr,
7936 bus->fwsig_download_len,
7937 bus->fwstat_download_addr, bus->fwstat_download_len,
7938 bus->dongle_ram_base, bus->ramtop_addr));
7939
7940 if (bus->fwsig_filename[0] == 0) {
7941 DHD_INFO(("%s: missing signature file\n", __FUNCTION__));
7942 goto exit;
7943 }
7944
7945 /* Write RAM Bootloader to TCM if requested */
7946 if ((bcmerror = dhdpcie_bus_download_ram_bootloader(bus))
7947 != BCME_OK) {
7948 DHD_ERROR(("%s: could not write RAM BL to TCM, err %d\n",
7949 __FUNCTION__, bcmerror));
7950 goto exit;
7951 }
7952
7953 /* Write FW signature rTLV to TCM */
7954 if ((bcmerror = dhdpcie_bus_write_fwsig(bus, bus->fwsig_filename,
7955 NULL))) {
7956 DHD_ERROR(("%s: could not write FWsig to TCM, err %d\n",
7957 __FUNCTION__, bcmerror));
7958 goto exit;
7959 }
7960
7961 /* Write FW signature verification status rTLV to TCM */
7962 if ((bcmerror = dhdpcie_bus_write_fws_status(bus)) != BCME_OK) {
7963 DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
7964 __FUNCTION__, bcmerror));
7965 goto exit;
7966 }
7967
7968 /* Write FW memory map rTLV to TCM */
7969 if ((bcmerror = dhdpcie_bus_write_fws_mem_info(bus)) != BCME_OK) {
7970 DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
7971 __FUNCTION__, bcmerror));
7972 goto exit;
7973 }
7974
7975 /* Write a end-of-TLVs marker to TCM */
7976 if ((bcmerror = dhdpcie_download_rtlv_end(bus)) != BCME_OK) {
7977 DHD_ERROR(("%s: could not write rTLV-end marker to TCM, err %d\n",
7978 __FUNCTION__, bcmerror));
7979 goto exit;
7980 }
7981
7982 /* In case of BL RAM, do write flops */
7983 if (bus->bootloader_filename[0] != 0) {
7984 *do_write = TRUE;
7985 } else {
7986 *do_write = FALSE;
7987 }
7988
7989 exit:
7990 return bcmerror;
7991 }
7992
7993 /* Download a reversed-TLV to the top of dongle RAM without overlapping any existing rTLVs */
7994 static int
7995 dhdpcie_download_rtlv(dhd_bus_t *bus, dngl_rtlv_type_t type, dngl_rtlv_len_t len, uint8 *value)
7996 {
7997 int bcmerror = BCME_OK;
7998 #ifdef DHD_DEBUG
7999 uint8 *readback_buf = NULL;
8000 uint32 readback_val = 0;
8001 #endif /* DHD_DEBUG */
8002 uint32 dest_addr = 0; /* dongle RAM dest address */
8003 uint32 dest_size = 0; /* dongle RAM dest size */
8004 uint32 dest_raw_size = 0; /* dest size with added checksum */
8005
8006 /* Calculate the destination dongle RAM address and size */
8007 dest_size = ROUNDUP(len, 4);
8008 dest_addr = bus->ramtop_addr - sizeof(dngl_rtlv_type_t) - sizeof(dngl_rtlv_len_t)
8009 - dest_size;
8010 bus->ramtop_addr = dest_addr;
8011
8012 /* Create the rTLV size field. This consists of 2 16-bit fields:
8013 * The lower 16 bits is the size. The higher 16 bits is a checksum
8014 * consisting of the size with all bits reversed.
8015 * +-------------+-------------+
8016 * | checksum | size |
8017 * +-------------+-------------+
8018 * High 16 bits Low 16 bits
8019 */
8020 dest_raw_size = (~dest_size << 16) | (dest_size & 0x0000FFFF);
8021
8022 /* Write the value block */
8023 if (dest_size > 0) {
8024 bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr, value, dest_size);
8025 if (bcmerror) {
8026 DHD_ERROR(("%s: error %d on writing %d membytes to 0x%08x\n",
8027 __FUNCTION__, bcmerror, dest_size, dest_addr));
8028 goto exit;
8029 }
8030 }
8031
8032 /* Write the length word */
8033 bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr + dest_size,
8034 (uint8*)&dest_raw_size, sizeof(dngl_rtlv_len_t));
8035
8036 /* Write the type word */
8037 bcmerror = dhdpcie_bus_membytes(bus, TRUE,
8038 dest_addr + dest_size + sizeof(dngl_rtlv_len_t),
8039 (uint8*)&type, sizeof(dngl_rtlv_type_t));
8040
8041 #ifdef DHD_DEBUG
8042 /* Read back and compare the downloaded data */
8043 if (dest_size > 0) {
8044 readback_buf = (uint8*)MALLOC(bus->dhd->osh, dest_size);
8045 if (!readback_buf) {
8046 bcmerror = BCME_NOMEM;
8047 goto exit;
8048 }
8049 memset(readback_buf, 0xaa, dest_size);
8050 bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr, readback_buf, dest_size);
8051 if (bcmerror) {
8052 DHD_ERROR(("%s: readback error %d, %d bytes from 0x%08x\n",
8053 __FUNCTION__, bcmerror, dest_size, dest_addr));
8054 goto exit;
8055 }
8056 if (memcmp(value, readback_buf, dest_size) != 0) {
8057 DHD_ERROR(("%s: Downloaded data mismatch.\n", __FUNCTION__));
8058 bcmerror = BCME_ERROR;
8059 goto exit;
8060 } else {
8061 DHD_ERROR(("Download and compare of TLV 0x%x succeeded"
8062 " (size %u, addr %x).\n", type, dest_size, dest_addr));
8063 }
8064 }
8065
8066 /* Read back and compare the downloaded len field */
8067 bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr + dest_size,
8068 (uint8*)&readback_val, sizeof(dngl_rtlv_len_t));
8069 if (!bcmerror) {
8070 if (readback_val != dest_raw_size) {
8071 bcmerror = BCME_BADLEN;
8072 }
8073 }
8074 if (bcmerror) {
8075 DHD_ERROR(("%s: Downloaded len error %d\n", __FUNCTION__, bcmerror));
8076 goto exit;
8077 }
8078
8079 /* Read back and compare the downloaded type field */
8080 bcmerror = dhdpcie_bus_membytes(bus, FALSE,
8081 dest_addr + dest_size + sizeof(dngl_rtlv_len_t),
8082 (uint8*)&readback_val, sizeof(dngl_rtlv_type_t));
8083 if (!bcmerror) {
8084 if (readback_val != type) {
8085 bcmerror = BCME_BADOPTION;
8086 }
8087 }
8088 if (bcmerror) {
8089 DHD_ERROR(("%s: Downloaded type error %d\n", __FUNCTION__, bcmerror));
8090 goto exit;
8091 }
8092 #endif /* DHD_DEBUG */
8093
8094 bus->ramtop_addr = dest_addr;
8095
8096 exit:
8097 #ifdef DHD_DEBUG
8098 if (readback_buf) {
8099 MFREE(bus->dhd->osh, readback_buf, dest_size);
8100 }
8101 #endif /* DHD_DEBUG */
8102
8103 return bcmerror;
8104 } /* dhdpcie_download_rtlv */
8105
8106 /* Download a reversed-TLV END marker to the top of dongle RAM */
8107 static int
8108 dhdpcie_download_rtlv_end(dhd_bus_t *bus)
8109 {
8110 return dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_END_MARKER, 0, NULL);
8111 }
8112
8113 /* Write the FW signature verification status to dongle memory */
8114 static int
8115 dhdpcie_bus_write_fws_status(dhd_bus_t *bus)
8116 {
8117 bcm_fwsign_verif_status_t vstatus;
8118 int ret;
8119
8120 bzero(&vstatus, sizeof(vstatus));
8121
8122 ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_STATUS, sizeof(vstatus),
8123 (uint8*)&vstatus);
8124 bus->fwstat_download_addr = bus->ramtop_addr;
8125 bus->fwstat_download_len = sizeof(vstatus);
8126
8127 return ret;
8128 } /* dhdpcie_bus_write_fws_status */
8129
8130 /* Write the FW signature verification memory map to dongle memory */
8131 static int
8132 dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus)
8133 {
8134 bcm_fwsign_mem_info_t memmap;
8135 int ret;
8136
8137 bzero(&memmap, sizeof(memmap));
8138 memmap.firmware.start = bus->fw_download_addr;
8139 memmap.firmware.end = memmap.firmware.start + bus->fw_download_len;
8140 memmap.heap.start = ROUNDUP(memmap.firmware.end + BL_HEAP_START_GAP_SIZE, 4);
8141 memmap.heap.end = memmap.heap.start + BL_HEAP_SIZE;
8142 memmap.signature.start = bus->fwsig_download_addr;
8143 memmap.signature.end = memmap.signature.start + bus->fwsig_download_len;
8144 memmap.vstatus.start = bus->fwstat_download_addr;
8145 memmap.vstatus.end = memmap.vstatus.start + bus->fwstat_download_len;
8146 DHD_INFO(("%s: mem_info: fw=%x-%x heap=%x-%x sig=%x-%x vst=%x-%x res=%x\n",
8147 __FUNCTION__,
8148 memmap.firmware.start, memmap.firmware.end,
8149 memmap.heap.start, memmap.heap.end,
8150 memmap.signature.start, memmap.signature.end,
8151 memmap.vstatus.start, memmap.vstatus.end,
8152 memmap.reset_vec.start));
8153
8154 ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_MEM_MAP, sizeof(memmap),
8155 (uint8*)&memmap);
8156 bus->fw_memmap_download_addr = bus->ramtop_addr;
8157 bus->fw_memmap_download_len = sizeof(memmap);
8158
8159 return ret;
8160 } /* dhdpcie_bus_write_fws_mem_info */
8161
8162 /* Download a bootloader image to dongle RAM */
8163 static int
8164 dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus)
8165 {
8166 int ret = BCME_OK;
8167 uint32 dongle_ram_base_save;
8168
8169 DHD_INFO(("download_bloader: %s,0x%x. ramtop=0x%x\n",
8170 bus->bootloader_filename, bus->bootloader_addr, bus->ramtop_addr));
8171 if (bus->bootloader_filename[0] == '\0') {
8172 return ret;
8173 }
8174
8175 /* Save ram base */
8176 dongle_ram_base_save = bus->dongle_ram_base;
8177
8178 /* Set ram base to bootloader download start address */
8179 bus->dongle_ram_base = bus->bootloader_addr;
8180
8181 /* Download the bootloader image to TCM */
8182 ret = dhdpcie_download_code_file(bus, bus->bootloader_filename);
8183
8184 /* Restore ram base */
8185 bus->dongle_ram_base = dongle_ram_base_save;
8186
8187 return ret;
8188 } /* dhdpcie_bus_download_ram_bootloader */
8189
8190 /* Save the FW download address and size */
8191 static int
8192 dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr,
8193 uint32 download_size, const char *signature_fname,
8194 const char *bloader_fname, uint32 bloader_download_addr)
8195 {
8196 bus->fw_download_len = download_size;
8197 bus->fw_download_addr = download_addr;
8198 strlcpy(bus->fwsig_filename, signature_fname, sizeof(bus->fwsig_filename));
8199 strlcpy(bus->bootloader_filename, bloader_fname, sizeof(bus->bootloader_filename));
8200 bus->bootloader_addr = bloader_download_addr;
8201 #ifdef GDB_PROXY
8202 /* GDB proxy bootloader mode - if signature file specified (i.e.
8203 * bootloader is used), but bootloader is not specified (i.e. ROM
8204 * bootloader is uses).
8205 * Bootloader mode is significant only for for preattachment debugging
8206 * of chips, in which debug cell can't be initialized before ARM CPU
8207 * start
8208 */
8209 bus->gdb_proxy_bootloader_mode =
8210 (bus->fwsig_filename[0] != 0) && (bus->bootloader_filename[0] == 0);
8211 #endif /* GDB_PROXY */
8212 return BCME_OK;
8213 } /* dhdpcie_bus_save_download_info */
8214
8215 /* Read a small binary file and write it to the specified socram dest address */
8216 static int
8217 dhdpcie_download_sig_file(dhd_bus_t *bus, char *path, uint32 type)
8218 {
8219 int bcmerror = BCME_OK;
8220 void *filep = NULL;
8221 uint8 *srcbuf = NULL;
8222 int srcsize = 0;
8223 int len;
8224 uint32 dest_size = 0; /* dongle RAM dest size */
8225
8226 if (path == NULL || path[0] == '\0') {
8227 DHD_ERROR(("%s: no file\n", __FUNCTION__));
8228 bcmerror = BCME_NOTFOUND;
8229 goto exit;
8230 }
8231
8232 /* Open file, get size */
8233 filep = dhd_os_open_image1(bus->dhd, path);
8234 if (filep == NULL) {
8235 DHD_ERROR(("%s: error opening file %s\n", __FUNCTION__, path));
8236 bcmerror = BCME_NOTFOUND;
8237 goto exit;
8238 }
8239 srcsize = dhd_os_get_image_size(filep);
8240 if (srcsize <= 0 || srcsize > MEMBLOCK) {
8241 DHD_ERROR(("%s: invalid fwsig size %u\n", __FUNCTION__, srcsize));
8242 bcmerror = BCME_BUFTOOSHORT;
8243 goto exit;
8244 }
8245 dest_size = ROUNDUP(srcsize, 4);
8246
8247 /* Allocate src buffer, read in the entire file */
8248 srcbuf = (uint8 *)MALLOCZ(bus->dhd->osh, dest_size);
8249 if (!srcbuf) {
8250 bcmerror = BCME_NOMEM;
8251 goto exit;
8252 }
8253 len = dhd_os_get_image_block(srcbuf, srcsize, filep);
8254 if (len != srcsize) {
8255 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
8256 bcmerror = BCME_BADLEN;
8257 goto exit;
8258 }
8259
8260 /* Write the src buffer as a rTLV to the dongle */
8261 bcmerror = dhdpcie_download_rtlv(bus, type, dest_size, srcbuf);
8262
8263 bus->fwsig_download_addr = bus->ramtop_addr;
8264 bus->fwsig_download_len = dest_size;
8265
8266 exit:
8267 if (filep) {
8268 dhd_os_close_image1(bus->dhd, filep);
8269 }
8270 if (srcbuf) {
8271 MFREE(bus->dhd->osh, srcbuf, dest_size);
8272 }
8273
8274 return bcmerror;
8275 } /* dhdpcie_download_sig_file */
8276
8277 static int
8278 dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path)
8279 {
8280 int bcmerror = BCME_OK;
8281
8282 /* Download the FW signature file to the chip */
8283 bcmerror = dhdpcie_download_sig_file(bus, fwsig_path, DNGL_RTLV_TYPE_FW_SIGNATURE);
8284 if (bcmerror) {
8285 goto exit;
8286 }
8287
8288 exit:
8289 if (bcmerror) {
8290 DHD_ERROR(("%s: error %d\n", __FUNCTION__, bcmerror));
8291 }
8292 return bcmerror;
8293 } /* dhdpcie_bus_write_fwsig */
8294
8295 /* Dump secure firmware status. */
8296 static int
8297 dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
8298 {
8299 bcm_fwsign_verif_status_t status;
8300 bcm_fwsign_mem_info_t meminfo;
8301 int err = BCME_OK;
8302
8303 bzero(&status, sizeof(status));
8304 if (bus->fwstat_download_addr != 0) {
8305 err = dhdpcie_bus_membytes(bus, FALSE, bus->fwstat_download_addr,
8306 (uint8 *)&status, sizeof(status));
8307 if (err != BCME_OK) {
8308 DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
8309 __FUNCTION__, err, sizeof(status), bus->fwstat_download_addr));
8310 return (err);
8311 }
8312 }
8313
8314 bzero(&meminfo, sizeof(meminfo));
8315 if (bus->fw_memmap_download_addr != 0) {
8316 err = dhdpcie_bus_membytes(bus, FALSE, bus->fw_memmap_download_addr,
8317 (uint8 *)&meminfo, sizeof(meminfo));
8318 if (err != BCME_OK) {
8319 DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
8320 __FUNCTION__, err, sizeof(meminfo), bus->fw_memmap_download_addr));
8321 return (err);
8322 }
8323 }
8324
8325 bcm_bprintf(strbuf, "Firmware signing\nSignature: (%08x) len (%d)\n",
8326 bus->fwsig_download_addr, bus->fwsig_download_len);
8327
8328 bcm_bprintf(strbuf,
8329 "Verification status: (%08x)\n"
8330 "\tstatus: %d\n"
8331 "\tstate: %u\n"
8332 "\talloc_bytes: %u\n"
8333 "\tmax_alloc_bytes: %u\n"
8334 "\ttotal_alloc_bytes: %u\n"
8335 "\ttotal_freed_bytes: %u\n"
8336 "\tnum_allocs: %u\n"
8337 "\tmax_allocs: %u\n"
8338 "\tmax_alloc_size: %u\n"
8339 "\talloc_failures: %u\n",
8340 bus->fwstat_download_addr,
8341 status.status,
8342 status.state,
8343 status.alloc_bytes,
8344 status.max_alloc_bytes,
8345 status.total_alloc_bytes,
8346 status.total_freed_bytes,
8347 status.num_allocs,
8348 status.max_allocs,
8349 status.max_alloc_size,
8350 status.alloc_failures);
8351
8352 bcm_bprintf(strbuf,
8353 "Memory info: (%08x)\n"
8354 "\tfw %08x-%08x\n\theap %08x-%08x\n\tsig %08x-%08x\n\tvst %08x-%08x\n",
8355 bus->fw_memmap_download_addr,
8356 meminfo.firmware.start, meminfo.firmware.end,
8357 meminfo.heap.start, meminfo.heap.end,
8358 meminfo.signature.start, meminfo.signature.end,
8359 meminfo.vstatus.start, meminfo.vstatus.end);
8360
8361 return (err);
8362 }
8363 #endif /* FW_SIGNATURE */
8364
8365 /* Write nvram data to the top of dongle RAM, ending with a size in # of 32-bit words */
8366 static int
8367 dhdpcie_bus_write_vars(dhd_bus_t *bus)
8368 {
8369 int bcmerror = 0;
8370 uint32 varsize, phys_size;
8371 uint32 varaddr;
8372 uint8 *vbuffer;
8373 uint32 varsizew;
8374 #ifdef DHD_DEBUG
8375 uint8 *nvram_ularray;
8376 #endif /* DHD_DEBUG */
8377
8378 /* Even if there are no vars are to be written, we still need to set the ramsize. */
8379 varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
8380 varaddr = (bus->ramsize - 4) - varsize;
8381
8382 varaddr += bus->dongle_ram_base;
8383 bus->ramtop_addr = varaddr;
8384
8385 if (bus->vars) {
8386
8387 /* XXX In case the controller has trouble with odd bytes... */
8388 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
8389 if (!vbuffer)
8390 return BCME_NOMEM;
8391
8392 bzero(vbuffer, varsize);
8393 bcopy(bus->vars, vbuffer, bus->varsz);
8394 /* Write the vars list */
8395 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
8396
8397 /* Implement read back and verify later */
8398 #ifdef DHD_DEBUG
8399 /* Verify NVRAM bytes */
8400 DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
8401 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
8402 if (!nvram_ularray) {
8403 MFREE(bus->dhd->osh, vbuffer, varsize);
8404 return BCME_NOMEM;
8405 }
8406
8407 /* Upload image to verify downloaded contents. */
8408 memset(nvram_ularray, 0xaa, varsize);
8409
8410 /* Read the vars list to temp buffer for comparison */
8411 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
8412 if (bcmerror) {
8413 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
8414 __FUNCTION__, bcmerror, varsize, varaddr));
8415 }
8416
8417 /* Compare the org NVRAM with the one read from RAM */
8418 if (memcmp(vbuffer, nvram_ularray, varsize)) {
8419 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
8420 prhex("nvram file", vbuffer, varsize);
8421 prhex("downloaded nvram", nvram_ularray, varsize);
8422 MFREE(bus->dhd->osh, nvram_ularray, varsize);
8423 MFREE(bus->dhd->osh, vbuffer, varsize);
8424 return BCME_ERROR;
8425 } else
8426 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
8427 __FUNCTION__));
8428
8429 MFREE(bus->dhd->osh, nvram_ularray, varsize);
8430 #endif /* DHD_DEBUG */
8431
8432 MFREE(bus->dhd->osh, vbuffer, varsize);
8433 }
8434
8435 phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
8436
8437 phys_size += bus->dongle_ram_base;
8438
8439 /* adjust to the user specified RAM */
8440 DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
8441 phys_size, bus->ramsize));
8442 DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
8443 varaddr, varsize));
8444 varsize = ((phys_size - 4) - varaddr);
8445
8446 /*
8447 * Determine the length token:
8448 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
8449 */
8450 if (bcmerror) {
8451 varsizew = 0;
8452 bus->nvram_csm = varsizew;
8453 } else {
8454 varsizew = varsize / 4;
8455 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
8456 bus->nvram_csm = varsizew;
8457 varsizew = htol32(varsizew);
8458 }
8459
8460 DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
8461
8462 /* Write the length token to the last word */
8463 bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
8464 (uint8*)&varsizew, 4);
8465
8466 return bcmerror;
8467 } /* dhdpcie_bus_write_vars */
8468
8469 int
8470 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
8471 {
8472 int bcmerror = BCME_OK;
8473 #ifdef KEEP_JP_REGREV
8474 /* XXX Needed by customer's request */
8475 char *tmpbuf;
8476 uint tmpidx;
8477 #endif /* KEEP_JP_REGREV */
8478 #ifdef GDB_PROXY
8479 const char nodeadman_record[] = "deadman_to=0";
8480 #endif /* GDB_PROXY */
8481
8482 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8483
8484 if (!len) {
8485 bcmerror = BCME_BUFTOOSHORT;
8486 goto err;
8487 }
8488
8489 /* Free the old ones and replace with passed variables */
8490 if (bus->vars)
8491 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
8492 #ifdef GDB_PROXY
8493 if (bus->dhd->gdb_proxy_nodeadman) {
8494 len += sizeof(nodeadman_record);
8495 }
8496 #endif /* GDB_PROXY */
8497
8498 bus->vars = MALLOC(bus->dhd->osh, len);
8499 bus->varsz = bus->vars ? len : 0;
8500 if (bus->vars == NULL) {
8501 bcmerror = BCME_NOMEM;
8502 goto err;
8503 }
8504
8505 /* Copy the passed variables, which should include the terminating double-null */
8506 bcopy(arg, bus->vars, bus->varsz);
8507 #ifdef GDB_PROXY
8508 if (bus->dhd->gdb_proxy_nodeadman &&
8509 !replace_nvram_variable(bus->vars, bus->varsz, nodeadman_record, NULL))
8510 {
8511 bcmerror = BCME_NOMEM;
8512 goto err;
8513 }
8514 #endif /* GDB_PROXY */
8515
8516 /* Re-Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
8517
8518 #ifdef DHD_USE_SINGLE_NVRAM_FILE
8519 /* XXX Change the default country code only for MFG firmware */
8520 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
8521 char *sp = NULL;
8522 char *ep = NULL;
8523 int i;
8524 char tag[2][8] = {"ccode=", "regrev="};
8525
8526 /* Find ccode and regrev info */
8527 for (i = 0; i < 2; i++) {
8528 sp = strnstr(bus->vars, tag[i], bus->varsz);
8529 if (!sp) {
8530 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
8531 __FUNCTION__, bus->nv_path));
8532 bcmerror = BCME_ERROR;
8533 goto err;
8534 }
8535 sp = strchr(sp, '=');
8536 ep = strchr(sp, '\0');
8537 /* We assumed that string length of both ccode and
8538 * regrev values should not exceed WLC_CNTRY_BUF_SZ
8539 */
8540 if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
8541 sp++;
8542 while (*sp != '\0') {
8543 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
8544 __FUNCTION__, tag[i], *sp));
8545 *sp++ = '0';
8546 }
8547 } else {
8548 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
8549 __FUNCTION__, tag[i]));
8550 bcmerror = BCME_ERROR;
8551 goto err;
8552 }
8553 }
8554 }
8555 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
8556
8557 #ifdef KEEP_JP_REGREV
8558 /* XXX Needed by customer's request */
8559 #ifdef DHD_USE_SINGLE_NVRAM_FILE
8560 if (dhd_bus_get_fw_mode(bus->dhd) != DHD_FLAG_MFG_MODE)
8561 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
8562 {
8563 char *pos = NULL;
8564 tmpbuf = MALLOCZ(bus->dhd->osh, bus->varsz + 1);
8565 if (tmpbuf == NULL) {
8566 goto err;
8567 }
8568 memcpy(tmpbuf, bus->vars, bus->varsz);
8569 for (tmpidx = 0; tmpidx < bus->varsz; tmpidx++) {
8570 if (tmpbuf[tmpidx] == 0) {
8571 tmpbuf[tmpidx] = '\n';
8572 }
8573 }
8574 bus->dhd->vars_ccode[0] = 0;
8575 bus->dhd->vars_regrev = 0;
8576 if ((pos = strstr(tmpbuf, "ccode"))) {
8577 sscanf(pos, "ccode=%3s\n", bus->dhd->vars_ccode);
8578 }
8579 if ((pos = strstr(tmpbuf, "regrev"))) {
8580 sscanf(pos, "regrev=%u\n", &(bus->dhd->vars_regrev));
8581 }
8582 MFREE(bus->dhd->osh, tmpbuf, bus->varsz + 1);
8583 }
8584 #endif /* KEEP_JP_REGREV */
8585
8586 err:
8587 return bcmerror;
8588 }
8589
8590 /* loop through the capability list and see if the pcie capabilty exists */
8591 uint8
8592 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
8593 {
8594 uint8 cap_id;
8595 uint8 cap_ptr = 0;
8596 uint8 byte_val;
8597
8598 /* check for Header type 0 */
8599 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
8600 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
8601 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
8602 goto end;
8603 }
8604
8605 /* check if the capability pointer field exists */
8606 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
8607 if (!(byte_val & PCI_CAPPTR_PRESENT)) {
8608 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
8609 goto end;
8610 }
8611
8612 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
8613 /* check if the capability pointer is 0x00 */
8614 if (cap_ptr == 0x00) {
8615 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
8616 goto end;
8617 }
8618
8619 /* loop thr'u the capability list and see if the pcie capabilty exists */
8620
8621 cap_id = read_pci_cfg_byte(cap_ptr);
8622
8623 while (cap_id != req_cap_id) {
8624 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
8625 if (cap_ptr == 0x00) break;
8626 cap_id = read_pci_cfg_byte(cap_ptr);
8627 }
8628
8629 end:
8630 return cap_ptr;
8631 }
8632
8633 void
8634 dhdpcie_pme_active(osl_t *osh, bool enable)
8635 {
8636 uint8 cap_ptr;
8637 uint32 pme_csr;
8638
8639 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
8640
8641 if (!cap_ptr) {
8642 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
8643 return;
8644 }
8645
8646 pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
8647 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
8648
8649 pme_csr |= PME_CSR_PME_STAT;
8650 if (enable) {
8651 pme_csr |= PME_CSR_PME_EN;
8652 } else {
8653 pme_csr &= ~PME_CSR_PME_EN;
8654 }
8655
8656 OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
8657 }
8658
8659 bool
8660 dhdpcie_pme_cap(osl_t *osh)
8661 {
8662 uint8 cap_ptr;
8663 uint32 pme_cap;
8664
8665 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
8666
8667 if (!cap_ptr) {
8668 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
8669 return FALSE;
8670 }
8671
8672 pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
8673
8674 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
8675
8676 return ((pme_cap & PME_CAP_PM_STATES) != 0);
8677 }
8678
8679 static void
8680 dhdpcie_pme_stat_clear(dhd_bus_t *bus)
8681 {
8682 uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32));
8683
8684 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(uint32), pmcsr | PCIE_PMCSR_PMESTAT);
8685 }
8686
8687 uint32
8688 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
8689 {
8690
8691 uint8 pcie_cap;
8692 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
8693 uint32 reg_val;
8694
8695 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
8696
8697 if (!pcie_cap) {
8698 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
8699 return 0;
8700 }
8701
8702 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
8703
8704 /* set operation */
8705 if (mask) {
8706 /* read */
8707 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8708
8709 /* modify */
8710 reg_val &= ~mask;
8711 reg_val |= (mask & val);
8712
8713 /* write */
8714 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
8715 }
8716 return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8717 }
8718
8719 uint8
8720 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
8721 {
8722 uint8 pcie_cap;
8723 uint32 reg_val;
8724 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
8725
8726 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
8727
8728 if (!pcie_cap) {
8729 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
8730 return 0;
8731 }
8732
8733 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
8734
8735 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8736 /* set operation */
8737 if (mask) {
8738 if (val)
8739 reg_val |= PCIE_CLKREQ_ENAB;
8740 else
8741 reg_val &= ~PCIE_CLKREQ_ENAB;
8742 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
8743 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8744 }
8745 if (reg_val & PCIE_CLKREQ_ENAB)
8746 return 1;
8747 else
8748 return 0;
8749 }
8750
8751 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8752 {
8753 dhd_bus_t *bus;
8754 uint64 current_time = OSL_LOCALTIME_NS();
8755
8756 if (!dhd) {
8757 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
8758 return;
8759 }
8760
8761 bus = dhd->bus;
8762 if (!bus) {
8763 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
8764 return;
8765 }
8766
8767 bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
8768 bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8769 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8770 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
8771 bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
8772 bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
8773 bus->dpc_return_busdown_count, bus->non_ours_irq_count);
8774 #ifdef BCMPCIE_OOB_HOST_WAKE
8775 bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
8776 " oob_intr_disable_count=%lu\noob_irq_num=%d"
8777 " last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT
8778 " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
8779 " oob_irq_enabled=%d oob_gpio_level=%d\n",
8780 bus->oob_intr_count, bus->oob_intr_enable_count,
8781 bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
8782 GET_SEC_USEC(bus->last_oob_irq_isr_time),
8783 GET_SEC_USEC(bus->last_oob_irq_thr_time),
8784 GET_SEC_USEC(bus->last_oob_irq_enable_time),
8785 GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
8786 dhdpcie_get_oob_irq_level());
8787 #endif /* BCMPCIE_OOB_HOST_WAKE */
8788 bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
8789 " isr_exit_time="SEC_USEC_FMT"\n"
8790 "isr_sched_dpc_time="SEC_USEC_FMT" rpm_sched_dpc_time="SEC_USEC_FMT"\n"
8791 " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
8792 "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
8793 " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
8794 " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
8795 "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
8796 "last_d3_inform_time="SEC_USEC_FMT"\n",
8797 GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
8798 GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->isr_sched_dpc_time),
8799 GET_SEC_USEC(bus->rpm_sched_dpc_time),
8800 GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
8801 GET_SEC_USEC(bus->last_process_ctrlbuf_time),
8802 GET_SEC_USEC(bus->last_process_flowring_time),
8803 GET_SEC_USEC(bus->last_process_txcpl_time),
8804 GET_SEC_USEC(bus->last_process_rxcpl_time),
8805 GET_SEC_USEC(bus->last_process_infocpl_time),
8806 GET_SEC_USEC(bus->last_process_edl_time),
8807 GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
8808 GET_SEC_USEC(bus->last_d3_inform_time));
8809
8810 bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
8811 SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
8812 SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
8813 GET_SEC_USEC(bus->last_suspend_end_time),
8814 GET_SEC_USEC(bus->last_resume_start_time),
8815 GET_SEC_USEC(bus->last_resume_end_time));
8816
8817 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
8818 bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
8819 " logtrace_thread_sem_down_time="SEC_USEC_FMT
8820 "\nlogtrace_thread_flush_time="SEC_USEC_FMT
8821 " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
8822 "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
8823 GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
8824 GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
8825 GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
8826 GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
8827 GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
8828 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
8829 }
8830
8831 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8832 {
8833 uint32 intstatus = 0;
8834 uint32 intmask = 0;
8835 uint32 d2h_db0 = 0;
8836 uint32 d2h_mb_data = 0;
8837
8838 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8839 dhd->bus->pcie_mailbox_int, 0, 0);
8840 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8841 dhd->bus->pcie_mailbox_mask, 0, 0);
8842 d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
8843 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
8844
8845 bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
8846 intstatus, intmask, d2h_db0);
8847 bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
8848 d2h_mb_data, dhd->bus->def_intmask);
8849 }
8850 /** Add bus dump output to a buffer */
8851 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
8852 {
8853 uint16 flowid;
8854 int ix = 0;
8855 flow_ring_node_t *flow_ring_node;
8856 flow_info_t *flow_info;
8857 #ifdef TX_STATUS_LATENCY_STATS
8858 uint8 ifindex;
8859 if_flow_lkup_t *if_flow_lkup;
8860 dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
8861 #endif /* TX_STATUS_LATENCY_STATS */
8862
8863 #if defined(FW_SIGNATURE)
8864 /* Dump secure firmware status. */
8865 if (dhdp->busstate <= DHD_BUS_LOAD) {
8866 dhd_bus_dump_fws(dhdp->bus, strbuf);
8867 }
8868 #endif
8869
8870 if (dhdp->busstate != DHD_BUS_DATA)
8871 return;
8872
8873 #ifdef TX_STATUS_LATENCY_STATS
8874 memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
8875 #endif /* TX_STATUS_LATENCY_STATS */
8876 #ifdef DHD_WAKE_STATUS
8877 bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
8878 bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
8879 dhdp->bus->wake_counts.rcwake);
8880 #ifdef DHD_WAKE_RX_STATUS
8881 bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
8882 dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
8883 dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
8884 bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
8885 dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
8886 dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
8887 bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
8888 dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
8889 dhdp->bus->wake_counts.rx_icmpv6_ns);
8890 #endif /* DHD_WAKE_RX_STATUS */
8891 #ifdef DHD_WAKE_EVENT_STATUS
8892 for (flowid = 0; flowid < WLC_E_LAST; flowid++)
8893 if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
8894 bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
8895 dhdp->bus->wake_counts.rc_event[flowid]);
8896 bcm_bprintf(strbuf, "\n");
8897 #endif /* DHD_WAKE_EVENT_STATUS */
8898 #endif /* DHD_WAKE_STATUS */
8899
8900 dhd_prot_print_info(dhdp, strbuf);
8901 dhd_dump_intr_registers(dhdp, strbuf);
8902 dhd_dump_intr_counters(dhdp, strbuf);
8903 bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
8904 dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
8905 bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
8906 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
8907 bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
8908 dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
8909 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
8910 #if defined(DHD_HTPUT_TUNABLES)
8911 bcm_bprintf(strbuf, "htput_flow_ring_start:%d total_htput:%d client_htput=%d\n",
8912 dhdp->htput_flow_ring_start, HTPUT_TOTAL_FLOW_RINGS, dhdp->htput_client_flow_rings);
8913 #endif /* DHD_HTPUT_TUNABLES */
8914 bcm_bprintf(strbuf,
8915 "%4s %4s %2s %4s %17s %4s %4s %6s %10s %17s %17s %17s %17s %14s %14s %10s ",
8916 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
8917 " Overflows", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
8918 "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
8919
8920 #ifdef TX_STATUS_LATENCY_STATS
8921 /* Average Tx status/Completion Latency in micro secs */
8922 bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
8923 #endif /* TX_STATUS_LATENCY_STATS */
8924
8925 bcm_bprintf(strbuf, "\n");
8926
8927 for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
8928 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
8929 if (!flow_ring_node->active)
8930 continue;
8931
8932 flow_info = &flow_ring_node->flow_info;
8933 bcm_bprintf(strbuf,
8934 "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
8935 flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
8936 MAC2STRDBG(flow_info->da),
8937 DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
8938 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
8939 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
8940 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
8941 dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, TRUE, strbuf,
8942 "%5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d");
8943
8944 #ifdef TX_STATUS_LATENCY_STATS
8945 bcm_bprintf(strbuf, "%16llu %16llu ",
8946 flow_info->num_tx_pkts,
8947 flow_info->num_tx_status ?
8948 DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
8949 flow_info->num_tx_status) : 0);
8950 ifindex = flow_info->ifindex;
8951 ASSERT(ifindex < DHD_MAX_IFS);
8952 if (ifindex < DHD_MAX_IFS) {
8953 if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
8954 if_tx_status_latency[ifindex].cum_tx_status_latency +=
8955 flow_info->cum_tx_status_latency;
8956 } else {
8957 DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
8958 __FUNCTION__, ifindex, flowid));
8959 }
8960 #endif /* TX_STATUS_LATENCY_STATS */
8961 bcm_bprintf(strbuf, "\n");
8962 }
8963
8964 #ifdef TX_STATUS_LATENCY_STATS
8965 bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
8966 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8967 for (ix = 0; ix < DHD_MAX_IFS; ix++) {
8968 if (!if_flow_lkup[ix].status) {
8969 continue;
8970 }
8971 bcm_bprintf(strbuf, "%2d %16llu %16llu\n",
8972 ix,
8973 if_tx_status_latency[ix].num_tx_status ?
8974 DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
8975 if_tx_status_latency[ix].num_tx_status): 0,
8976 if_tx_status_latency[ix].num_tx_status);
8977 }
8978 #endif /* TX_STATUS_LATENCY_STATS */
8979
8980 bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
8981 bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
8982 bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
8983 if (dhdp->d2h_hostrdy_supported) {
8984 bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
8985 }
8986 #ifdef PCIE_INB_DW
8987 /* Inband device wake counters */
8988 if (INBAND_DW_ENAB(dhdp->bus)) {
8989 bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
8990 dhdp->bus->inband_dw_assert_cnt);
8991 bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
8992 dhdp->bus->inband_dw_deassert_cnt);
8993 bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
8994 dhdp->bus->inband_ds_exit_host_cnt);
8995 bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
8996 dhdp->bus->inband_ds_exit_device_cnt);
8997 bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
8998 dhdp->bus->inband_ds_exit_to_cnt);
8999 bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
9000 dhdp->bus->inband_host_sleep_exit_to_cnt);
9001 }
9002 #endif /* PCIE_INB_DW */
9003 bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
9004 dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
9005
9006 bcm_bprintf(strbuf, "\n\nDB7 stats - db7_send_cnt: %d, db7_trap_cnt: %d, "
9007 "max duration: %lld (%lld - %lld), db7_timing_error_cnt: %d\n",
9008 dhdp->db7_trap.debug_db7_send_cnt,
9009 dhdp->db7_trap.debug_db7_trap_cnt,
9010 dhdp->db7_trap.debug_max_db7_dur,
9011 dhdp->db7_trap.debug_max_db7_trap_time,
9012 dhdp->db7_trap.debug_max_db7_send_time,
9013 dhdp->db7_trap.debug_db7_timing_error_cnt);
9014 }
9015
9016 #ifdef DNGL_AXI_ERROR_LOGGING
9017 bool
9018 dhd_axi_sig_match(dhd_pub_t *dhdp)
9019 {
9020 uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
9021
9022 if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
9023 DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
9024 return FALSE;
9025 }
9026
9027 DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
9028 __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
9029 dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
9030 if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
9031 axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
9032 uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
9033 OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
9034 if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
9035 return TRUE;
9036 } else {
9037 DHD_ERROR(("%s: No AXI signature: 0x%x\n",
9038 __FUNCTION__, axi_signature));
9039 return FALSE;
9040 }
9041 } else {
9042 DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
9043 return FALSE;
9044 }
9045 }
9046
9047 void
9048 dhd_axi_error(dhd_pub_t *dhdp)
9049 {
9050 dhd_axi_error_dump_t *axi_err_dump;
9051 uint8 *axi_err_buf = NULL;
9052 uint8 *p_axi_err = NULL;
9053 uint32 axi_logbuf_addr;
9054 uint32 axi_tcm_addr;
9055 int err, size;
9056
9057 /* XXX: On the Dongle side, if an invalid Host Address is generated for a transaction
9058 * it results in SMMU Fault. Now the Host won't respond for the invalid transaction.
9059 * On the Dongle side, after 50msec this results in AXI Slave Error.
9060 * Hence introduce a delay higher than 50msec to ensure AXI Slave error happens and
9061 * the Dongle collects the required information.
9062 */
9063 OSL_DELAY(75000);
9064
9065 axi_logbuf_addr = dhdp->axierror_logbuf_addr;
9066 if (!axi_logbuf_addr) {
9067 DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
9068 goto sched_axi;
9069 }
9070
9071 axi_err_dump = dhdp->axi_err_dump;
9072 if (!axi_err_dump) {
9073 goto sched_axi;
9074 }
9075
9076 if (!dhd_axi_sig_match(dhdp)) {
9077 goto sched_axi;
9078 }
9079
9080 /* Reading AXI error data for SMMU fault */
9081 DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
9082 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
9083 size = sizeof(hnd_ext_trap_axi_error_v1_t);
9084 axi_err_buf = MALLOCZ(dhdp->osh, size);
9085 if (axi_err_buf == NULL) {
9086 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
9087 goto sched_axi;
9088 }
9089
9090 p_axi_err = axi_err_buf;
9091 err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
9092 if (err) {
9093 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
9094 __FUNCTION__, err, size, axi_tcm_addr));
9095 goto sched_axi;
9096 }
9097
9098 /* Dump data to Dmesg */
9099 dhd_log_dump_axi_error(axi_err_buf);
9100 err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
9101 if (err) {
9102 DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
9103 __FUNCTION__, err));
9104 }
9105
9106 sched_axi:
9107 if (axi_err_buf) {
9108 MFREE(dhdp->osh, axi_err_buf, size);
9109 }
9110 dhd_schedule_axi_error_dump(dhdp, NULL);
9111 }
9112
9113 static void
9114 dhd_log_dump_axi_error(uint8 *axi_err)
9115 {
9116 dma_dentry_v1_t dma_dentry;
9117 dma_fifo_v1_t dma_fifo;
9118 int i = 0, j = 0;
9119
9120 if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
9121 hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
9122 DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
9123 DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
9124 DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
9125 DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
9126 __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
9127 DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
9128 __FUNCTION__, axi_err_v1->axi_errorlog_status));
9129 DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
9130 __FUNCTION__, axi_err_v1->axi_errorlog_core));
9131 DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
9132 __FUNCTION__, axi_err_v1->axi_errorlog_hi));
9133 DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
9134 __FUNCTION__, axi_err_v1->axi_errorlog_lo));
9135 DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
9136 __FUNCTION__, axi_err_v1->axi_errorlog_id));
9137
9138 for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
9139 dma_fifo = axi_err_v1->dma_fifo[i];
9140 DHD_ERROR(("%s: valid:%d : 0x%x\n",
9141 __FUNCTION__, i, dma_fifo.valid));
9142 DHD_ERROR(("%s: direction:%d : 0x%x\n",
9143 __FUNCTION__, i, dma_fifo.direction));
9144 DHD_ERROR(("%s: index:%d : 0x%x\n",
9145 __FUNCTION__, i, dma_fifo.index));
9146 DHD_ERROR(("%s: dpa:%d : 0x%x\n",
9147 __FUNCTION__, i, dma_fifo.dpa));
9148 DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
9149 __FUNCTION__, i, dma_fifo.desc_lo));
9150 DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
9151 __FUNCTION__, i, dma_fifo.desc_hi));
9152 DHD_ERROR(("%s: din:%d : 0x%x\n",
9153 __FUNCTION__, i, dma_fifo.din));
9154 DHD_ERROR(("%s: dout:%d : 0x%x\n",
9155 __FUNCTION__, i, dma_fifo.dout));
9156 for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
9157 dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
9158 DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
9159 __FUNCTION__, i, dma_dentry.ctrl1));
9160 DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
9161 __FUNCTION__, i, dma_dentry.ctrl2));
9162 DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
9163 __FUNCTION__, i, dma_dentry.addrlo));
9164 DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
9165 __FUNCTION__, i, dma_dentry.addrhi));
9166 }
9167 }
9168 }
9169 else {
9170 DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
9171 }
9172 }
9173 #endif /* DNGL_AXI_ERROR_LOGGING */
9174
9175 /**
9176 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
9177 * flow queue to their flow ring.
9178 */
9179 static void
9180 dhd_update_txflowrings(dhd_pub_t *dhd)
9181 {
9182 unsigned long flags;
9183 dll_t *item, *next;
9184 flow_ring_node_t *flow_ring_node;
9185 struct dhd_bus *bus = dhd->bus;
9186 int count = 0;
9187
9188 if (dhd_query_bus_erros(dhd)) {
9189 return;
9190 }
9191
9192 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
9193 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9194 for (item = dll_head_p(&bus->flowring_active_list);
9195 (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
9196 item = next, count++) {
9197 if (dhd->hang_was_sent) {
9198 break;
9199 }
9200
9201 if (count > bus->max_tx_flowrings) {
9202 DHD_ERROR(("%s : overflow max flowrings\n", __FUNCTION__));
9203 dhd->hang_reason = HANG_REASON_UNKNOWN;
9204 dhd_os_send_hang_message(dhd);
9205 break;
9206 }
9207
9208 next = dll_next_p(item);
9209 flow_ring_node = dhd_constlist_to_flowring(item);
9210
9211 /* Ensure that flow_ring_node in the list is Not Null */
9212 ASSERT(flow_ring_node != NULL);
9213
9214 /* Ensure that the flowring node has valid contents */
9215 ASSERT(flow_ring_node->prot_info != NULL);
9216
9217 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
9218 }
9219 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9220 }
9221
9222 /** Mailbox ringbell Function */
9223 static void
9224 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
9225 {
9226 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
9227 (bus->sih->buscorerev == 4)) {
9228 DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
9229 return;
9230 }
9231 if (bus->db1_for_mb) {
9232 /* this is a pcie core register, not the config register */
9233 /* XXX: make sure we are on PCIE */
9234 DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
9235 if (DAR_PWRREQ(bus)) {
9236 dhd_bus_pcie_pwr_req(bus);
9237 }
9238 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
9239 ~0, 0x12345678);
9240 } else {
9241 DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
9242 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
9243 /* XXX CRWLPCIEGEN2-182 requires double write */
9244 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
9245 }
9246 }
9247
9248 /* Upon receiving a mailbox interrupt,
9249 * if H2D_FW_TRAP bit is set in mailbox location
9250 * device traps
9251 */
9252 static void
9253 dhdpcie_fw_trap(dhd_bus_t *bus)
9254 {
9255 DHD_ERROR(("%s: send trap!!!\n", __FUNCTION__));
9256 if (bus->dhd->db7_trap.fw_db7w_trap) {
9257 uint32 addr = dhd_bus_db1_addr_3_get(bus);
9258 bus->dhd->db7_trap.debug_db7_send_time = OSL_LOCALTIME_NS();
9259 bus->dhd->db7_trap.debug_db7_send_cnt++;
9260 si_corereg(bus->sih, bus->sih->buscoreidx, addr, ~0,
9261 bus->dhd->db7_trap.db7_magic_number);
9262 return;
9263 }
9264
9265 /* Send the mailbox data and generate mailbox intr. */
9266 dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
9267 /* For FWs that cannot interprete H2D_FW_TRAP */
9268 (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
9269 }
9270
9271 #ifdef PCIE_INB_DW
9272
9273 void
9274 dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
9275 {
9276 /* The DHD_BUS_INB_DW_LOCK must be held before
9277 * calling this function !!
9278 */
9279 if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
9280 DW_DEVICE_DS_DEV_SLEEP_PEND) &&
9281 (bus->host_active_cnt == 0)) {
9282 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
9283 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9284 }
9285 }
9286
9287 int
9288 dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
9289 {
9290 int timeleft;
9291 unsigned long flags;
9292 int ret;
9293
9294 if (!INBAND_DW_ENAB(bus)) {
9295 return BCME_ERROR;
9296 }
9297 if (val) {
9298 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9299
9300 /*
9301 * Reset the Door Bell Timeout value. So that the Watchdog
9302 * doesn't try to Deassert Device Wake, while we are in
9303 * the process of still Asserting the same.
9304 */
9305 dhd_bus_doorbell_timeout_reset(bus);
9306
9307 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
9308 DW_DEVICE_DS_DEV_SLEEP) {
9309 /* Clear wait_for_ds_exit */
9310 bus->wait_for_ds_exit = 0;
9311 if (bus->calc_ds_exit_latency) {
9312 bus->ds_exit_latency = 0;
9313 bus->ds_exit_ts2 = 0;
9314 bus->ds_exit_ts1 = OSL_SYSUPTIME_US();
9315 }
9316 ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
9317 if (ret != BCME_OK) {
9318 DHD_ERROR(("Failed: assert Inband device_wake\n"));
9319 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9320 ret = BCME_ERROR;
9321 goto exit;
9322 }
9323 dhdpcie_bus_set_pcie_inband_dw_state(bus,
9324 DW_DEVICE_DS_DISABLED_WAIT);
9325 bus->inband_dw_assert_cnt++;
9326 } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
9327 DW_DEVICE_DS_DISABLED_WAIT) {
9328 DHD_ERROR(("Inband device wake is already asserted, "
9329 "waiting for DS-Exit\n"));
9330 }
9331 else {
9332 DHD_INFO(("Not in DS SLEEP state \n"));
9333 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9334 ret = BCME_OK;
9335 goto exit;
9336 }
9337
9338 /*
9339 * Since we are going to wait/sleep .. release the lock.
9340 * The Device Wake sanity is still valid, because
9341 * a) If there is another context that comes in and tries
9342 * to assert DS again and if it gets the lock, since
9343 * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
9344 * context would return saying Not in DS Sleep.
9345 * b) If ther is another context that comes in and tries
9346 * to de-assert DS and gets the lock,
9347 * since the ds_state is != DW_DEVICE_DS_DEV_WAKE
9348 * that context would return too. This can not happen
9349 * since the watchdog is the only context that can
9350 * De-Assert Device Wake and as the first step of
9351 * Asserting the Device Wake, we have pushed out the
9352 * Door Bell Timeout.
9353 *
9354 */
9355
9356 if (!CAN_SLEEP()) {
9357 dhdpcie_bus_set_pcie_inband_dw_state(bus,
9358 DW_DEVICE_DS_DEV_WAKE);
9359 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9360 /* Called from context that cannot sleep */
9361 OSL_DELAY(1000);
9362 } else {
9363 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9364 /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
9365 timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
9366 if (!bus->wait_for_ds_exit || timeleft == 0) {
9367 DHD_ERROR(("dhd_bus_inb_set_device_wake:DS-EXIT timeout, "
9368 "wait_for_ds_exit : %d\n", bus->wait_for_ds_exit));
9369 bus->inband_ds_exit_to_cnt++;
9370 bus->ds_exit_timeout = 0;
9371 #ifdef DHD_FW_COREDUMP
9372 if (bus->dhd->memdump_enabled) {
9373 /* collect core dump */
9374 DHD_GENERAL_LOCK(bus->dhd, flags);
9375 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(bus->dhd);
9376 DHD_GENERAL_UNLOCK(bus->dhd, flags);
9377 bus->dhd->memdump_type =
9378 DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE;
9379 dhd_bus_mem_dump(bus->dhd);
9380 }
9381 #else
9382 ASSERT(0);
9383 #endif /* DHD_FW_COREDUMP */
9384 ret = BCME_ERROR;
9385 goto exit;
9386 }
9387 }
9388 ret = BCME_OK;
9389 } else {
9390 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9391 if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
9392 DW_DEVICE_DS_DEV_WAKE)) {
9393 ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
9394 if (ret != BCME_OK) {
9395 DHD_ERROR(("Failed: deassert Inband device_wake\n"));
9396 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9397 goto exit;
9398 }
9399 dhdpcie_bus_set_pcie_inband_dw_state(bus,
9400 DW_DEVICE_DS_ACTIVE);
9401 bus->inband_dw_deassert_cnt++;
9402 } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
9403 DW_DEVICE_DS_DEV_SLEEP_PEND) &&
9404 (bus->host_active_cnt == 0)) {
9405 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
9406 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9407 }
9408
9409 ret = BCME_OK;
9410 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9411 }
9412
9413 exit:
9414 return ret;
9415 }
9416 #endif /* PCIE_INB_DW */
9417 #if defined(PCIE_INB_DW)
9418 void
9419 dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
9420 {
9421 if (dhd_doorbell_timeout) {
9422 #ifdef DHD_PCIE_RUNTIMEPM
9423 if (dhd_runtimepm_ms) {
9424 dhd_timeout_start(&bus->doorbell_timer,
9425 (dhd_doorbell_timeout * 1000) / dhd_runtimepm_ms);
9426 }
9427 #else
9428 uint wd_scale = dhd_watchdog_ms;
9429 if (dhd_watchdog_ms) {
9430 dhd_timeout_start(&bus->doorbell_timer,
9431 (dhd_doorbell_timeout * 1000) / wd_scale);
9432 }
9433 #endif /* DHD_PCIE_RUNTIMEPM */
9434 }
9435 else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
9436 dhd_bus_set_device_wake(bus, FALSE);
9437 }
9438 }
9439
9440 int
9441 dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
9442 {
9443 if (bus->ds_enabled && bus->dhd->ring_attached) {
9444 #ifdef PCIE_INB_DW
9445 if (INBAND_DW_ENAB(bus)) {
9446 return dhd_bus_inb_set_device_wake(bus, val);
9447 }
9448 #endif /* PCIE_INB_DW */
9449 }
9450 return BCME_OK;
9451 }
9452
9453 void
9454 dhd_bus_dw_deassert(dhd_pub_t *dhd)
9455 {
9456 dhd_bus_t *bus = dhd->bus;
9457 unsigned long flags;
9458
9459 /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
9460 if (dhd_doorbell_timeout != 0 && bus->dhd->busstate == DHD_BUS_DATA &&
9461 dhd_timeout_expired(&bus->doorbell_timer)) {
9462 DHD_GENERAL_LOCK(dhd, flags);
9463 if (DHD_BUS_BUSY_CHECK_IDLE(dhd) &&
9464 !DHD_CHECK_CFG_IN_PROGRESS(dhd)) {
9465 DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhd);
9466 DHD_GENERAL_UNLOCK(dhd, flags);
9467 dhd_bus_set_device_wake(bus, FALSE);
9468 DHD_GENERAL_LOCK(dhd, flags);
9469 DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhd);
9470 dhd_os_busbusy_wake(bus->dhd);
9471 DHD_GENERAL_UNLOCK(dhd, flags);
9472 } else {
9473 DHD_GENERAL_UNLOCK(dhd, flags);
9474 }
9475 }
9476
9477 #ifdef PCIE_INB_DW
9478 if (INBAND_DW_ENAB(bus)) {
9479 if (bus->ds_exit_timeout) {
9480 bus->ds_exit_timeout --;
9481 if (bus->ds_exit_timeout == 1) {
9482 DHD_ERROR(("DS-EXIT TIMEOUT\n"));
9483 bus->ds_exit_timeout = 0;
9484 bus->inband_ds_exit_to_cnt++;
9485 }
9486 }
9487 if (bus->host_sleep_exit_timeout) {
9488 bus->host_sleep_exit_timeout --;
9489 if (bus->host_sleep_exit_timeout == 1) {
9490 DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
9491 bus->host_sleep_exit_timeout = 0;
9492 bus->inband_host_sleep_exit_to_cnt++;
9493 }
9494 }
9495 }
9496 #endif /* PCIE_INB_DW */
9497 }
9498 #endif
9499
9500 /** mailbox doorbell ring function */
9501 void
9502 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
9503 {
9504 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9505 if (__DHD_CHK_BUS_IN_LPS(bus)) {
9506 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9507 __FUNCTION__, bus->bus_low_power_state));
9508 return;
9509 }
9510
9511 /* Skip in the case of link down */
9512 if (bus->is_linkdown) {
9513 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9514 return;
9515 }
9516
9517 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
9518 (bus->sih->buscorerev == 4)) {
9519 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
9520 PCIE_INTB, PCIE_INTB);
9521 } else {
9522 /* this is a pcie core register, not the config regsiter */
9523 /* XXX: makesure we are on PCIE */
9524 DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
9525 if (IDMA_ACTIVE(bus->dhd)) {
9526 if (DAR_PWRREQ(bus)) {
9527 dhd_bus_pcie_pwr_req(bus);
9528 }
9529 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
9530 ~0, value);
9531 } else {
9532 if (DAR_PWRREQ(bus)) {
9533 dhd_bus_pcie_pwr_req(bus);
9534 }
9535 si_corereg(bus->sih, bus->sih->buscoreidx,
9536 dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
9537 }
9538 }
9539 }
9540
9541 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
9542 void
9543 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
9544 {
9545 /* this is a pcie core register, not the config regsiter */
9546 /* XXX: makesure we are on PCIE */
9547 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9548 if (__DHD_CHK_BUS_IN_LPS(bus)) {
9549 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9550 __FUNCTION__, bus->bus_low_power_state));
9551 return;
9552 }
9553
9554 /* Skip in the case of link down */
9555 if (bus->is_linkdown) {
9556 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9557 return;
9558 }
9559
9560 DHD_INFO(("writing a door bell 2 to the device\n"));
9561 if (DAR_PWRREQ(bus)) {
9562 dhd_bus_pcie_pwr_req(bus);
9563 }
9564 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
9565 ~0, value);
9566 }
9567
9568 void
9569 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
9570 {
9571 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9572 if (__DHD_CHK_BUS_IN_LPS(bus)) {
9573 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9574 __FUNCTION__, bus->bus_low_power_state));
9575 return;
9576 }
9577
9578 /* Skip in the case of link down */
9579 if (bus->is_linkdown) {
9580 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9581 return;
9582 }
9583
9584 #if defined(PCIE_INB_DW)
9585 if (OOB_DW_ENAB(bus)) {
9586 dhd_bus_set_device_wake(bus, TRUE);
9587 }
9588 dhd_bus_doorbell_timeout_reset(bus);
9589 #endif
9590 if (DAR_PWRREQ(bus)) {
9591 dhd_bus_pcie_pwr_req(bus);
9592 }
9593
9594 #ifdef DHD_DB0TS
9595 if (bus->dhd->db0ts_capable) {
9596 uint64 ts;
9597
9598 ts = local_clock();
9599 do_div(ts, 1000);
9600
9601 value = htol32(ts & 0xFFFFFFFF);
9602 DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
9603 }
9604 #endif /* DHD_DB0TS */
9605 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
9606 }
9607
9608 void
9609 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
9610 {
9611 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9612 if (__DHD_CHK_BUS_IN_LPS(bus)) {
9613 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9614 __FUNCTION__, bus->bus_low_power_state));
9615 return;
9616 }
9617
9618 /* Skip in the case of link down */
9619 if (bus->is_linkdown) {
9620 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9621 return;
9622 }
9623
9624 #if defined(PCIE_INB_DW)
9625 if (devwake) {
9626 if (OOB_DW_ENAB(bus)) {
9627 dhd_bus_set_device_wake(bus, TRUE);
9628 }
9629 }
9630 dhd_bus_doorbell_timeout_reset(bus);
9631 #endif
9632
9633 if (DAR_PWRREQ(bus)) {
9634 dhd_bus_pcie_pwr_req(bus);
9635 }
9636 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
9637 }
9638
9639 static void
9640 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
9641 {
9642 uint32 w;
9643 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9644 if (__DHD_CHK_BUS_IN_LPS(bus)) {
9645 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9646 __FUNCTION__, bus->bus_low_power_state));
9647 return;
9648 }
9649
9650 /* Skip in the case of link down */
9651 if (bus->is_linkdown) {
9652 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9653 return;
9654 }
9655
9656 w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
9657 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
9658 }
9659
9660 dhd_mb_ring_t
9661 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
9662 {
9663 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
9664 (bus->sih->buscorerev == 4)) {
9665 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
9666 bus->pcie_mailbox_int);
9667 if (bus->pcie_mb_intr_addr) {
9668 bus->pcie_mb_intr_osh = si_osh(bus->sih);
9669 return dhd_bus_ringbell_oldpcie;
9670 }
9671 } else {
9672 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
9673 dhd_bus_db0_addr_get(bus));
9674 if (bus->pcie_mb_intr_addr) {
9675 bus->pcie_mb_intr_osh = si_osh(bus->sih);
9676 return dhdpcie_bus_ringbell_fast;
9677 }
9678 }
9679 return dhd_bus_ringbell;
9680 }
9681
9682 dhd_mb_ring_2_t
9683 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
9684 {
9685 bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
9686 dhd_bus_db0_addr_2_get(bus));
9687 if (bus->pcie_mb_intr_2_addr) {
9688 bus->pcie_mb_intr_osh = si_osh(bus->sih);
9689 return dhdpcie_bus_ringbell_2_fast;
9690 }
9691 return dhd_bus_ringbell_2;
9692 }
9693
9694 bool
9695 BCMFASTPATH(dhd_bus_dpc)(struct dhd_bus *bus)
9696 {
9697 bool resched = FALSE; /* Flag indicating resched wanted */
9698 unsigned long flags;
9699
9700 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9701
9702 bus->dpc_entry_time = OSL_LOCALTIME_NS();
9703
9704 /* must be the fisrt activity in this function */
9705 if (dhd_query_bus_erros(bus->dhd)) {
9706 dhdpcie_disable_irq_nosync(bus);
9707 return 0;
9708 }
9709
9710 DHD_GENERAL_LOCK(bus->dhd, flags);
9711 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
9712 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
9713 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
9714 * and if we return from here, then IOCTL response will never be handled
9715 */
9716 if (bus->dhd->busstate == DHD_BUS_DOWN) {
9717 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
9718 bus->intstatus = 0;
9719 DHD_GENERAL_UNLOCK(bus->dhd, flags);
9720 bus->dpc_return_busdown_count++;
9721 return 0;
9722 }
9723 #ifdef DHD_PCIE_RUNTIMEPM
9724 bus->idlecount = 0;
9725 #endif /* DHD_PCIE_RUNTIMEPM */
9726 DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
9727 DHD_GENERAL_UNLOCK(bus->dhd, flags);
9728
9729 #ifdef DHD_READ_INTSTATUS_IN_DPC
9730 if (bus->ipend) {
9731 bus->ipend = FALSE;
9732 bus->intstatus = dhdpcie_bus_intstatus(bus);
9733 /* Check if the interrupt is ours or not */
9734 if (bus->intstatus == 0) {
9735 goto INTR_ON;
9736 }
9737 bus->intrcount++;
9738 }
9739 #endif /* DHD_READ_INTSTATUS_IN_DPC */
9740
9741 resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
9742 if (!resched) {
9743 bus->intstatus = 0;
9744 #ifdef DHD_READ_INTSTATUS_IN_DPC
9745 INTR_ON:
9746 #endif /* DHD_READ_INTSTATUS_IN_DPC */
9747 bus->dpc_intr_enable_count++;
9748 /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
9749 * which has been disabled in the dhdpcie_bus_isr()
9750 */
9751 if (dhdpcie_irq_disabled(bus)) {
9752 dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
9753 bus->dpc_intr_enable_count++;
9754 }
9755 bus->dpc_exit_time = OSL_LOCALTIME_NS();
9756 } else {
9757 bus->resched_dpc_time = OSL_LOCALTIME_NS();
9758 }
9759
9760 bus->dpc_sched = resched;
9761
9762 DHD_GENERAL_LOCK(bus->dhd, flags);
9763 DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
9764 dhd_os_busbusy_wake(bus->dhd);
9765 DHD_GENERAL_UNLOCK(bus->dhd, flags);
9766
9767 return resched;
9768
9769 }
9770
9771 int
9772 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
9773 {
9774 uint32 cur_h2d_mb_data = 0;
9775
9776 if (bus->is_linkdown) {
9777 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9778 return BCME_ERROR;
9779 }
9780
9781 DHD_INFO(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
9782
9783 #ifdef PCIE_INB_DW
9784 dhd_bus_ds_trace(bus, h2d_mb_data, FALSE, dhdpcie_bus_get_pcie_inband_dw_state(bus));
9785 #else
9786 dhd_bus_ds_trace(bus, h2d_mb_data, FALSE);
9787 #endif /* PCIE_INB_DW */
9788 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
9789 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
9790 h2d_mb_data));
9791 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
9792 /* XXX: check the error return value here... */
9793 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
9794 DHD_ERROR(("failure sending the H2D Mailbox message "
9795 "to firmware\n"));
9796 goto fail;
9797 }
9798 goto done;
9799 }
9800
9801 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
9802
9803 if (cur_h2d_mb_data != 0) {
9804 uint32 i = 0;
9805 DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
9806 /* XXX: start a zero length timer to keep checking this to be zero */
9807 while ((i++ < 100) && cur_h2d_mb_data) {
9808 OSL_DELAY(10);
9809 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
9810 }
9811 if (i >= 100) {
9812 DHD_ERROR(("%s : waited 1ms for the dngl "
9813 "to ack the previous mb transaction\n", __FUNCTION__));
9814 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
9815 __FUNCTION__, cur_h2d_mb_data));
9816 }
9817 }
9818
9819 dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
9820 dhd_bus_gen_devmb_intr(bus);
9821
9822 done:
9823 if (h2d_mb_data == H2D_HOST_D3_INFORM) {
9824 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
9825 bus->last_d3_inform_time = OSL_LOCALTIME_NS();
9826 bus->d3_inform_cnt++;
9827 }
9828 if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
9829 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
9830 bus->d0_inform_in_use_cnt++;
9831 }
9832 if (h2d_mb_data == H2D_HOST_D0_INFORM) {
9833 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
9834 bus->d0_inform_cnt++;
9835 }
9836 return BCME_OK;
9837 fail:
9838 return BCME_ERROR;
9839 }
9840
9841 static void
9842 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
9843 {
9844 bus->suspend_intr_disable_count++;
9845 /* Disable dongle Interrupts Immediately after D3 */
9846
9847 /* For Linux, Macos etc (otherthan NDIS) along with disabling
9848 * dongle interrupt by clearing the IntMask, disable directly
9849 * interrupt from the host side as well. Also clear the intstatus
9850 * if it is set to avoid unnecessary intrrupts after D3 ACK.
9851 */
9852 dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
9853 dhdpcie_bus_clear_intstatus(bus);
9854 dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
9855
9856 DHD_SET_BUS_LPS_D3_ACKED(bus);
9857 DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
9858
9859 if (bus->dhd->dhd_induce_error == DHD_INDUCE_D3_ACK_TIMEOUT) {
9860 /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
9861 DHD_ERROR(("%s: Due to d3ack induce error forcefully set "
9862 "bus_low_power_state to DHD_BUS_D3_INFORM_SENT\n", __FUNCTION__));
9863 DHD_SET_BUS_LPS_D3_INFORMED(bus);
9864 }
9865 /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
9866 * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
9867 */
9868 if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9869 bus->wait_for_d3_ack = 1;
9870 dhd_os_d3ack_wake(bus->dhd);
9871 } else {
9872 DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
9873 }
9874 }
9875 void
9876 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
9877 {
9878 #ifdef PCIE_INB_DW
9879 unsigned long flags = 0;
9880 #endif /* PCIE_INB_DW */
9881 if (MULTIBP_ENAB(bus->sih)) {
9882 dhd_bus_pcie_pwr_req(bus);
9883 }
9884
9885 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9886 #ifdef PCIE_INB_DW
9887 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9888 dhd_bus_ds_trace(bus, d2h_mb_data, TRUE, dhdpcie_bus_get_pcie_inband_dw_state(bus));
9889 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9890 #else
9891 dhd_bus_ds_trace(bus, d2h_mb_data, TRUE);
9892 #endif /* PCIE_INB_DW */
9893
9894 if (d2h_mb_data & D2H_DEV_FWHALT) {
9895 if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
9896 DHD_ERROR(("FW trap has happened, dongle_trap_data 0x%8x\n",
9897 bus->dhd->dongle_trap_data));
9898 }
9899
9900 if (bus->dhd->dongle_trap_data & D2H_DEV_TRAP_HOSTDB) {
9901 uint64 db7_dur;
9902
9903 bus->dhd->db7_trap.debug_db7_trap_time = OSL_LOCALTIME_NS();
9904 bus->dhd->db7_trap.debug_db7_trap_cnt++;
9905 db7_dur = bus->dhd->db7_trap.debug_db7_trap_time -
9906 bus->dhd->db7_trap.debug_db7_send_time;
9907 if (db7_dur > bus->dhd->db7_trap.debug_max_db7_dur) {
9908 bus->dhd->db7_trap.debug_max_db7_send_time =
9909 bus->dhd->db7_trap.debug_db7_send_time;
9910 bus->dhd->db7_trap.debug_max_db7_trap_time =
9911 bus->dhd->db7_trap.debug_db7_trap_time;
9912 }
9913 bus->dhd->db7_trap.debug_max_db7_dur =
9914 MAX(bus->dhd->db7_trap.debug_max_db7_dur, db7_dur);
9915 if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
9916 bus->dhd->db7_trap.debug_db7_timing_error_cnt++;
9917 }
9918 } else {
9919 dhdpcie_checkdied(bus, NULL, 0);
9920 #ifdef SUPPORT_LINKDOWN_RECOVERY
9921 #ifdef CONFIG_ARCH_MSM
9922 bus->no_cfg_restore = 1;
9923 #endif /* CONFIG_ARCH_MSM */
9924 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9925 dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
9926 }
9927 if (bus->dhd->db7_trap.fw_db7w_trap_inprogress) {
9928 bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE;
9929 bus->dhd->dongle_trap_occured = TRUE;
9930 }
9931 goto exit;
9932 }
9933 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
9934 bool ds_acked = FALSE;
9935 BCM_REFERENCE(ds_acked);
9936 if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
9937 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
9938 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
9939 bus->dhd->busstate = DHD_BUS_DOWN;
9940 goto exit;
9941 }
9942 /* what should we do */
9943 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9944 #ifdef PCIE_INB_DW
9945 if (INBAND_DW_ENAB(bus)) {
9946 /* As per inband state machine, host should not send DS-ACK
9947 * during suspend or suspend in progress, instead D3 inform will be sent.
9948 */
9949 if (!bus->skip_ds_ack) {
9950 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9951 if (dhdpcie_bus_get_pcie_inband_dw_state(bus)
9952 == DW_DEVICE_DS_ACTIVE) {
9953 dhdpcie_bus_set_pcie_inband_dw_state(bus,
9954 DW_DEVICE_DS_DEV_SLEEP_PEND);
9955 if (bus->host_active_cnt == 0) {
9956 dhdpcie_bus_set_pcie_inband_dw_state(bus,
9957 DW_DEVICE_DS_DEV_SLEEP);
9958 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9959 ds_acked = TRUE;
9960 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP"
9961 "ACK to DNGL\n"));
9962 } else {
9963 DHD_ERROR(("%s: Failed to send DS-ACK, "
9964 "host_active_cnt is %d\n",
9965 __FUNCTION__, bus->host_active_cnt));
9966 }
9967 }
9968 /* Currently DW_DEVICE_HOST_SLEEP_WAIT is set only
9969 * under dhd_bus_suspend() function.
9970 */
9971 else if (dhdpcie_bus_get_pcie_inband_dw_state(bus)
9972 == DW_DEVICE_HOST_SLEEP_WAIT) {
9973 DHD_ERROR(("%s: DS-ACK not sent due to suspend "
9974 "in progress\n", __FUNCTION__));
9975 } else {
9976 DHD_ERROR(("%s: Failed to send DS-ACK, DS state is %d",
9977 __FUNCTION__,
9978 dhdpcie_bus_get_pcie_inband_dw_state(bus)));
9979 }
9980 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9981 dhd_os_ds_enter_wake(bus->dhd);
9982 } else {
9983 DHD_INFO(("%s: Skip DS-ACK due to "
9984 "suspend in progress\n", __FUNCTION__));
9985 }
9986 } else
9987 #endif /* PCIE_INB_DW */
9988 {
9989 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9990 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9991 }
9992 }
9993 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
9994 #ifdef PCIE_INB_DW
9995 if (INBAND_DW_ENAB(bus)) {
9996 if (bus->calc_ds_exit_latency) {
9997 bus->ds_exit_ts2 = OSL_SYSUPTIME_US();
9998 if (bus->ds_exit_ts2 > bus->ds_exit_ts1 &&
9999 bus->ds_exit_ts1 != 0)
10000 bus->ds_exit_latency = bus->ds_exit_ts2 - bus->ds_exit_ts1;
10001 else
10002 bus->ds_exit_latency = 0;
10003 }
10004 }
10005 #endif /* PCIE_INB_DW */
10006 /* what should we do */
10007 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
10008 #ifdef PCIE_INB_DW
10009 if (INBAND_DW_ENAB(bus)) {
10010 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
10011 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
10012 DW_DEVICE_DS_DISABLED_WAIT) {
10013 /* wake up only if some one is waiting in
10014 * DW_DEVICE_DS_DISABLED_WAIT state
10015 * in this case the waiter will change the state
10016 * to DW_DEVICE_DS_DEV_WAKE
10017 */
10018 bus->inband_ds_exit_host_cnt++;
10019 /* To synchronize with the previous memory operations call wmb() */
10020 OSL_SMP_WMB();
10021 bus->wait_for_ds_exit = 1;
10022 /* Call another wmb() to make sure before waking up the
10023 * other event value gets updated.
10024 */
10025 OSL_SMP_WMB();
10026 dhdpcie_bus_set_pcie_inband_dw_state(bus,
10027 DW_DEVICE_DS_DEV_WAKE);
10028 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
10029 dhd_os_ds_exit_wake(bus->dhd);
10030 } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
10031 DW_DEVICE_DS_DEV_SLEEP) {
10032 DHD_INFO(("recvd unsolicited DS-EXIT from dongle in DEV_SLEEP\n"));
10033 /*
10034 * unsolicited state change to DW_DEVICE_DS_DEV_WAKE if
10035 * D2H_DEV_DS_EXIT_NOTE received in DW_DEVICE_DS_DEV_SLEEP state.
10036 * This is need when dongle is woken by external events like
10037 * WOW, ping ..etc
10038 */
10039 bus->inband_ds_exit_device_cnt++;
10040 dhdpcie_bus_set_pcie_inband_dw_state(bus,
10041 DW_DEVICE_DS_DEV_WAKE);
10042 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
10043 } else {
10044 DHD_INFO(("D2H_MB_DATA: not in DS_DISABLED_WAIT/DS_DEV_SLEEP\n"));
10045 bus->inband_ds_exit_host_cnt++;
10046 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
10047 }
10048 dhd_bus_set_device_wake(bus, FALSE);
10049 }
10050 #endif /* PCIE_INB_DW */
10051 }
10052 if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
10053 /* what should we do */
10054 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
10055 #ifdef PCIE_INB_DW
10056 if (INBAND_DW_ENAB(bus)) {
10057 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
10058 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
10059 DW_DEVICE_HOST_WAKE_WAIT) {
10060 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
10061 }
10062 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
10063 }
10064 #endif /* PCIE_INB_DW */
10065 }
10066 if (d2h_mb_data & D2H_DEV_D3_ACK) {
10067 /* what should we do */
10068 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
10069 if (!bus->wait_for_d3_ack) {
10070 #if defined(DHD_HANG_SEND_UP_TEST)
10071 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
10072 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
10073 } else {
10074 dhd_bus_handle_d3_ack(bus);
10075 }
10076 #else /* DHD_HANG_SEND_UP_TEST */
10077 dhd_bus_handle_d3_ack(bus);
10078 #endif /* DHD_HANG_SEND_UP_TEST */
10079 }
10080 }
10081
10082 exit:
10083 if (MULTIBP_ENAB(bus->sih)) {
10084 dhd_bus_pcie_pwr_req_clear(bus);
10085 }
10086 }
10087
10088 static void
10089 dhdpcie_handle_mb_data(dhd_bus_t *bus)
10090 {
10091 uint32 d2h_mb_data = 0;
10092 uint32 zero = 0;
10093
10094 if (MULTIBP_ENAB(bus->sih)) {
10095 dhd_bus_pcie_pwr_req(bus);
10096 }
10097
10098 if (bus->is_linkdown) {
10099 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
10100 return;
10101 }
10102
10103 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
10104 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
10105 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
10106 __FUNCTION__, d2h_mb_data));
10107 goto exit;
10108 }
10109
10110 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
10111
10112 DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
10113 if (d2h_mb_data & D2H_DEV_FWHALT) {
10114 DHD_ERROR(("FW trap has happened\n"));
10115 dhdpcie_checkdied(bus, NULL, 0);
10116 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
10117 goto exit;
10118 }
10119 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
10120 /* what should we do */
10121 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
10122 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
10123 DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
10124 }
10125 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
10126 /* what should we do */
10127 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
10128 }
10129 if (d2h_mb_data & D2H_DEV_D3_ACK) {
10130 /* what should we do */
10131 DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
10132 if (!bus->wait_for_d3_ack) {
10133 #if defined(DHD_HANG_SEND_UP_TEST)
10134 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
10135 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
10136 } else {
10137 dhd_bus_handle_d3_ack(bus);
10138 }
10139 #else /* DHD_HANG_SEND_UP_TEST */
10140 dhd_bus_handle_d3_ack(bus);
10141 #endif /* DHD_HANG_SEND_UP_TEST */
10142 }
10143 }
10144
10145 exit:
10146 if (MULTIBP_ENAB(bus->sih)) {
10147 dhd_bus_pcie_pwr_req_clear(bus);
10148 }
10149 }
10150
10151 static void
10152 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
10153 {
10154 uint32 d2h_mb_data = 0;
10155 uint32 zero = 0;
10156
10157 if (MULTIBP_ENAB(bus->sih)) {
10158 dhd_bus_pcie_pwr_req(bus);
10159 }
10160
10161 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
10162 if (!d2h_mb_data) {
10163 goto exit;
10164 }
10165
10166 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
10167
10168 dhd_bus_handle_mb_data(bus, d2h_mb_data);
10169
10170 exit:
10171 if (MULTIBP_ENAB(bus->sih)) {
10172 dhd_bus_pcie_pwr_req_clear(bus);
10173 }
10174 }
10175
10176 static bool
10177 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
10178 {
10179 bool resched = FALSE;
10180
10181 if (MULTIBP_ENAB(bus->sih)) {
10182 dhd_bus_pcie_pwr_req(bus);
10183 }
10184 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
10185 (bus->sih->buscorerev == 4)) {
10186 /* Msg stream interrupt */
10187 if (intstatus & I_BIT1) {
10188 resched = dhdpci_bus_read_frames(bus);
10189 } else if (intstatus & I_BIT0) {
10190 /* do nothing for Now */
10191 }
10192 } else {
10193 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
10194 bus->api.handle_mb_data(bus);
10195
10196 /* Do no process any rings after recieving D3_ACK */
10197 if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
10198 DHD_ERROR(("%s: D3 Ack Recieved. "
10199 "Skip processing rest of ring buffers.\n", __FUNCTION__));
10200 goto exit;
10201 }
10202
10203 /* The fact that we are here implies that dhdpcie_bus_intstatus( )
10204 * retuned a non-zer0 status after applying the current mask.
10205 * No further check required, in fact bus->instatus can be eliminated.
10206 * Both bus->instatus, and bud->intdis are shared between isr and dpc.
10207 */
10208 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
10209 if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
10210 resched = dhdpci_bus_read_frames(bus);
10211 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
10212 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
10213 }
10214 #else
10215 resched = dhdpci_bus_read_frames(bus);
10216 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
10217 }
10218
10219 exit:
10220 if (MULTIBP_ENAB(bus->sih)) {
10221 dhd_bus_pcie_pwr_req_clear(bus);
10222 }
10223 return resched;
10224 }
10225
10226 #if defined(DHD_H2D_LOG_TIME_SYNC)
10227 static void
10228 dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
10229 {
10230 unsigned long time_elapsed;
10231
10232 /* Poll for timeout value periodically */
10233 if ((bus->dhd->busstate == DHD_BUS_DATA) &&
10234 (bus->dhd->dhd_rte_time_sync_ms != 0) &&
10235 DHD_CHK_BUS_NOT_IN_LPS(bus)) {
10236 /*
10237 * XXX OSL_SYSUPTIME_US() overflow should not happen.
10238 * As it is a unsigned 64 bit value 18446744073709551616L,
10239 * which needs 213503982334 days to overflow
10240 */
10241 time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
10242 /* Compare time is milli seconds */
10243 if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
10244 /*
10245 * Its fine, if it has crossed the timeout value. No need to adjust the
10246 * elapsed time
10247 */
10248 bus->dhd_rte_time_sync_count += time_elapsed;
10249
10250 /* Schedule deffered work. Work function will send IOVAR. */
10251 dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
10252 }
10253 }
10254 }
10255 #endif /* DHD_H2D_LOG_TIME_SYNC */
10256
10257 static bool
10258 dhdpci_bus_read_frames(dhd_bus_t *bus)
10259 {
10260 bool more = FALSE;
10261
10262 /* First check if there a FW trap */
10263 if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
10264 (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
10265 #ifdef DNGL_AXI_ERROR_LOGGING
10266 if (bus->dhd->axi_error) {
10267 DHD_ERROR(("AXI Error happened\n"));
10268 return FALSE;
10269 }
10270 #endif /* DNGL_AXI_ERROR_LOGGING */
10271 dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
10272 return FALSE;
10273 }
10274
10275 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
10276 dhd_prot_process_ctrlbuf(bus->dhd);
10277 bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
10278
10279 /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
10280 if (DHD_CHK_BUS_IN_LPS(bus)) {
10281 DHD_ERROR(("%s: Bus is in power save state (%d). "
10282 "Skip processing rest of ring buffers.\n",
10283 __FUNCTION__, bus->bus_low_power_state));
10284 return FALSE;
10285 }
10286
10287 /* update the flow ring cpls */
10288 dhd_update_txflowrings(bus->dhd);
10289 bus->last_process_flowring_time = OSL_LOCALTIME_NS();
10290
10291 /* With heavy TX traffic, we could get a lot of TxStatus
10292 * so add bound
10293 */
10294 more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
10295 bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
10296
10297 /* With heavy RX traffic, this routine potentially could spend some time
10298 * processing RX frames without RX bound
10299 */
10300 more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
10301 bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
10302
10303 /* Process info ring completion messages */
10304 #ifdef EWP_EDL
10305 if (!bus->dhd->dongle_edl_support)
10306 #endif
10307 {
10308 more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
10309 bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
10310 }
10311 #ifdef EWP_EDL
10312 else {
10313 more |= dhd_prot_process_msgbuf_edl(bus->dhd);
10314 bus->last_process_edl_time = OSL_LOCALTIME_NS();
10315 }
10316 #endif /* EWP_EDL */
10317
10318 #ifdef IDLE_TX_FLOW_MGMT
10319 if (bus->enable_idle_flowring_mgmt) {
10320 /* Look for idle flow rings */
10321 dhd_bus_check_idle_scan(bus);
10322 }
10323 #endif /* IDLE_TX_FLOW_MGMT */
10324
10325 /* don't talk to the dongle if fw is about to be reloaded */
10326 if (bus->dhd->hang_was_sent) {
10327 more = FALSE;
10328 }
10329
10330 #ifdef SUPPORT_LINKDOWN_RECOVERY
10331 /* XXX : It seems that linkdown is occurred without notification,
10332 * In case read shared memory failed, recovery hang is needed
10333 */
10334 if (bus->read_shm_fail) {
10335 /* Read interrupt state once again to confirm linkdown */
10336 int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
10337 bus->pcie_mailbox_int, 0, 0);
10338 if (intstatus != (uint32)-1) {
10339 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
10340 #ifdef DHD_FW_COREDUMP
10341 if (bus->dhd->memdump_enabled) {
10342 DHD_OS_WAKE_LOCK(bus->dhd);
10343 bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
10344 dhd_bus_mem_dump(bus->dhd);
10345 DHD_OS_WAKE_UNLOCK(bus->dhd);
10346 }
10347 #endif /* DHD_FW_COREDUMP */
10348 } else {
10349 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
10350 #ifdef CONFIG_ARCH_MSM
10351 bus->no_cfg_restore = 1;
10352 #endif /* CONFIG_ARCH_MSM */
10353 bus->is_linkdown = 1;
10354 }
10355
10356 /* XXX The dhd_prot_debug_info_print() function *has* to be
10357 * invoked only if the bus->is_linkdown is updated so that
10358 * host doesn't need to read any pcie registers if
10359 * PCIe link is down.
10360 */
10361 dhd_prot_debug_info_print(bus->dhd);
10362 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
10363 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
10364 copy_hang_info_linkdown(bus->dhd);
10365 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
10366 dhd_os_send_hang_message(bus->dhd);
10367 more = FALSE;
10368 }
10369 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10370 #if defined(DHD_H2D_LOG_TIME_SYNC)
10371 dhdpci_bus_rte_log_time_sync_poll(bus);
10372 #endif /* DHD_H2D_LOG_TIME_SYNC */
10373 return more;
10374 }
10375
10376 bool
10377 dhdpcie_tcm_valid(dhd_bus_t *bus)
10378 {
10379 uint32 addr = 0;
10380 int rv;
10381 uint32 shaddr = 0;
10382 pciedev_shared_t sh;
10383
10384 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
10385
10386 /* Read last word in memory to determine address of pciedev_shared structure */
10387 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
10388
10389 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
10390 (addr > shaddr)) {
10391 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
10392 __FUNCTION__, addr));
10393 return FALSE;
10394 }
10395
10396 /* Read hndrte_shared structure */
10397 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
10398 sizeof(pciedev_shared_t))) < 0) {
10399 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
10400 return FALSE;
10401 }
10402
10403 /* Compare any field in pciedev_shared_t */
10404 if (sh.console_addr != bus->pcie_sh->console_addr) {
10405 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
10406 return FALSE;
10407 }
10408
10409 return TRUE;
10410 }
10411
10412 static void
10413 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
10414 {
10415 snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
10416 firmware_api_version, host_api_version);
10417 return;
10418 }
10419
10420 static bool
10421 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
10422 {
10423 bool retcode = FALSE;
10424
10425 DHD_INFO(("firmware api revision %d, host api revision %d\n",
10426 firmware_api_version, host_api_version));
10427
10428 switch (firmware_api_version) {
10429 case PCIE_SHARED_VERSION_7:
10430 case PCIE_SHARED_VERSION_6:
10431 case PCIE_SHARED_VERSION_5:
10432 retcode = TRUE;
10433 break;
10434 default:
10435 if (firmware_api_version <= host_api_version)
10436 retcode = TRUE;
10437 }
10438 return retcode;
10439 }
10440
10441 static int
10442 dhdpcie_readshared(dhd_bus_t *bus)
10443 {
10444 uint32 addr = 0;
10445 int rv, dma_indx_wr_buf, dma_indx_rd_buf;
10446 uint32 shaddr = 0;
10447 pciedev_shared_t *sh = bus->pcie_sh;
10448 dhd_timeout_t tmo;
10449 bool idma_en = FALSE;
10450 #if defined(PCIE_INB_DW)
10451 bool d2h_inband_dw = FALSE;
10452 #endif /* defined(PCIE_INB_DW) */
10453 uint32 timeout = MAX_READ_TIMEOUT;
10454 uint32 elapsed;
10455
10456 if (MULTIBP_ENAB(bus->sih)) {
10457 dhd_bus_pcie_pwr_req(bus);
10458 }
10459
10460 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
10461
10462 /* start a timer for 5 seconds */
10463 dhd_timeout_start(&tmo, timeout);
10464
10465 while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
10466 /* Read last word in memory to determine address of pciedev_shared structure */
10467 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
10468 }
10469
10470 if (addr == (uint32)-1) {
10471 DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
10472 #ifdef SUPPORT_LINKDOWN_RECOVERY
10473 #ifdef CONFIG_ARCH_MSM
10474 bus->no_cfg_restore = 1;
10475 #endif /* CONFIG_ARCH_MSM */
10476 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10477 bus->is_linkdown = 1;
10478 return BCME_ERROR;
10479 }
10480
10481 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
10482 (addr > shaddr)) {
10483 elapsed = tmo.elapsed;
10484 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
10485 __FUNCTION__, addr));
10486 DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
10487 #ifdef DEBUG_DNGL_INIT_FAIL
10488 if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
10489 #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
10490 bus->dhd->memdump_enabled = DUMP_MEMFILE;
10491 #else
10492 /* Force panic as HAL will not be inited yet */
10493 bus->dhd->memdump_enabled = DUMP_MEMONLY;
10494 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10495 if (bus->dhd->memdump_enabled) {
10496 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
10497 dhdpcie_mem_dump(bus);
10498 }
10499 }
10500 #endif /* DEBUG_DNGL_INIT_FAIL */
10501 return BCME_ERROR;
10502 } else {
10503 bus->rd_shared_pass_time = OSL_LOCALTIME_NS();
10504 elapsed = tmo.elapsed;
10505 bus->shared_addr = (ulong)addr;
10506 DHD_ERROR(("### Total time ARM OOR to Readshared pass took %llu usec ###\n",
10507 DIV_U64_BY_U32((bus->rd_shared_pass_time - bus->arm_oor_time),
10508 NSEC_PER_USEC)));
10509 DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
10510 "before dongle is ready\n", __FUNCTION__, addr, elapsed));
10511 }
10512
10513 /* Read hndrte_shared structure */
10514 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
10515 sizeof(pciedev_shared_t))) < 0) {
10516 DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
10517 return rv;
10518 }
10519
10520 /* Endianness */
10521 sh->flags = ltoh32(sh->flags);
10522 sh->trap_addr = ltoh32(sh->trap_addr);
10523 sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
10524 sh->assert_file_addr = ltoh32(sh->assert_file_addr);
10525 sh->assert_line = ltoh32(sh->assert_line);
10526 sh->console_addr = ltoh32(sh->console_addr);
10527 sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
10528 sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
10529 sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
10530 sh->flags2 = ltoh32(sh->flags2);
10531
10532 /* load bus console address */
10533 bus->console_addr = sh->console_addr;
10534
10535 /* Read the dma rx offset */
10536 bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
10537 dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
10538
10539 DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
10540
10541 bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
10542 if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
10543 {
10544 DHD_ERROR(("%s: pcie_shared version %d in dhd "
10545 "is older than pciedev_shared version %d in dongle\n",
10546 __FUNCTION__, PCIE_SHARED_VERSION,
10547 bus->api.fw_rev));
10548 return BCME_ERROR;
10549 }
10550 dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
10551
10552 bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
10553 sizeof(uint16) : sizeof(uint32);
10554 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
10555 __FUNCTION__, bus->rw_index_sz));
10556
10557 #ifdef IDLE_TX_FLOW_MGMT
10558 if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
10559 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
10560 __FUNCTION__));
10561 bus->enable_idle_flowring_mgmt = TRUE;
10562 }
10563 #endif /* IDLE_TX_FLOW_MGMT */
10564
10565 #ifdef PCIE_INB_DW
10566 bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
10567 d2h_inband_dw = bus->dhd->d2h_inband_dw;
10568 #endif /* PCIE_INB_DW */
10569
10570 #if defined(PCIE_INB_DW)
10571 DHD_ERROR(("FW supports Inband dw ? %s\n",
10572 d2h_inband_dw ? "Y":"N"));
10573 #endif /* defined(PCIE_INB_DW) */
10574
10575 if (IDMA_CAPABLE(bus)) {
10576 if (bus->sih->buscorerev == 23) {
10577 #ifdef PCIE_INB_DW
10578 if (bus->dhd->d2h_inband_dw)
10579 {
10580 idma_en = TRUE;
10581 }
10582 #endif /* PCIE_INB_DW */
10583 } else {
10584 idma_en = TRUE;
10585 }
10586 }
10587
10588 /* Read flag2 HWA bit */
10589 bus->dhd->hwa_capable = (sh->flags2 & PCIE_SHARED2_HWA) ? TRUE : FALSE;
10590 DHD_ERROR(("FW supports HWA ? %s\n", bus->dhd->hwa_capable ? "Y":"N"));
10591 bus->hwa_db_index_sz = PCIE_HWA_DB_INDEX_SZ;
10592
10593 if (idma_en) {
10594 bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
10595 bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
10596 }
10597
10598 bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
10599
10600 bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
10601
10602 /* Does the FW support DMA'ing r/w indices */
10603 if (sh->flags & PCIE_SHARED_DMA_INDEX) {
10604 if (!bus->dhd->dma_ring_upd_overwrite) {
10605 {
10606 if (!IFRM_ENAB(bus->dhd)) {
10607 bus->dhd->dma_h2d_ring_upd_support = TRUE;
10608 }
10609 bus->dhd->dma_d2h_ring_upd_support = TRUE;
10610 }
10611 }
10612
10613 if (bus->dhd->dma_d2h_ring_upd_support && bus->dhd->d2h_sync_mode) {
10614 DHD_ERROR(("%s: ERROR COMBO: sync (0x%x) enabled for DMA indices\n",
10615 __FUNCTION__, bus->dhd->d2h_sync_mode));
10616 }
10617
10618 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
10619 __FUNCTION__,
10620 (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
10621 (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
10622 } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
10623 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
10624 __FUNCTION__));
10625 return BCME_UNSUPPORTED;
10626 } else {
10627 bus->dhd->dma_h2d_ring_upd_support = FALSE;
10628 bus->dhd->dma_d2h_ring_upd_support = FALSE;
10629 }
10630
10631 /* Does the firmware support fast delete ring? */
10632 if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
10633 DHD_INFO(("%s: Firmware supports fast delete ring\n",
10634 __FUNCTION__));
10635 bus->dhd->fast_delete_ring_support = TRUE;
10636 } else {
10637 DHD_INFO(("%s: Firmware does not support fast delete ring\n",
10638 __FUNCTION__));
10639 bus->dhd->fast_delete_ring_support = FALSE;
10640 }
10641
10642 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
10643 {
10644 ring_info_t ring_info;
10645
10646 /* boundary check */
10647 if ((sh->rings_info_ptr < bus->dongle_ram_base) || (sh->rings_info_ptr > shaddr)) {
10648 DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
10649 __FUNCTION__, sh->rings_info_ptr));
10650 return BCME_ERROR;
10651 }
10652
10653 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
10654 (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
10655 return rv;
10656
10657 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
10658 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
10659
10660 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
10661 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
10662 bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
10663 bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
10664 bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
10665 bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
10666 bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
10667 }
10668 else {
10669 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
10670 bus->max_submission_rings = bus->max_tx_flowrings;
10671 bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
10672 bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
10673 bus->api.handle_mb_data = dhdpcie_handle_mb_data;
10674 bus->use_mailbox = TRUE;
10675 }
10676 if (bus->max_completion_rings == 0) {
10677 DHD_ERROR(("dongle completion rings are invalid %d\n",
10678 bus->max_completion_rings));
10679 return BCME_ERROR;
10680 }
10681 if (bus->max_submission_rings == 0) {
10682 DHD_ERROR(("dongle submission rings are invalid %d\n",
10683 bus->max_submission_rings));
10684 return BCME_ERROR;
10685 }
10686 if (bus->max_tx_flowrings == 0) {
10687 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
10688 return BCME_ERROR;
10689 }
10690
10691 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
10692 * The max_sub_queues is read from FW initialized ring_info
10693 */
10694 if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
10695 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
10696 H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
10697 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
10698 D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
10699
10700 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
10701 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
10702 "Host will use w/r indices in TCM\n",
10703 __FUNCTION__));
10704 bus->dhd->dma_h2d_ring_upd_support = FALSE;
10705 bus->dhd->idma_enable = FALSE;
10706 }
10707 }
10708
10709 if (bus->dhd->dma_d2h_ring_upd_support) {
10710 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
10711 D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
10712 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
10713 H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
10714
10715 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
10716 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
10717 "Host will use w/r indices in TCM\n",
10718 __FUNCTION__));
10719 bus->dhd->dma_d2h_ring_upd_support = FALSE;
10720 }
10721 }
10722
10723 if (IFRM_ENAB(bus->dhd)) {
10724 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
10725 H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
10726
10727 if (dma_indx_wr_buf != BCME_OK) {
10728 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
10729 __FUNCTION__));
10730 bus->dhd->ifrm_enable = FALSE;
10731 }
10732 }
10733
10734 /* read ringmem and ringstate ptrs from shared area and store in host variables */
10735 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
10736 if (dhd_msg_level & DHD_INFO_VAL) {
10737 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
10738 }
10739 DHD_INFO(("%s: ring_info\n", __FUNCTION__));
10740
10741 DHD_ERROR(("%s: max H2D queues %d\n",
10742 __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
10743
10744 DHD_INFO(("mail box address\n"));
10745 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
10746 __FUNCTION__, bus->h2d_mb_data_ptr_addr));
10747 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
10748 __FUNCTION__, bus->d2h_mb_data_ptr_addr));
10749 }
10750
10751 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
10752 __FUNCTION__, bus->dhd->d2h_sync_mode));
10753
10754 bus->dhd->d2h_hostrdy_supported =
10755 ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
10756
10757 bus->dhd->ext_trap_data_supported =
10758 ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
10759
10760 if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
10761 bus->dhd->pcie_txs_metadata_enable = 0;
10762
10763 if (sh->flags2 & PCIE_SHARED2_TRAP_ON_HOST_DB7) {
10764 memset(&bus->dhd->db7_trap, 0, sizeof(bus->dhd->db7_trap));
10765 bus->dhd->db7_trap.fw_db7w_trap = 1;
10766 /* add an option to let the user select ?? */
10767 bus->dhd->db7_trap.db7_magic_number = PCIE_DB7_MAGIC_NUMBER_DPC_TRAP;
10768 }
10769
10770 bus->dhd->hscb_enable =
10771 (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
10772
10773 #ifdef EWP_EDL
10774 if (host_edl_support) {
10775 bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
10776 DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
10777 }
10778 #endif /* EWP_EDL */
10779
10780 bus->dhd->debug_buf_dest_support =
10781 (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
10782 DHD_ERROR(("FW supports debug buf dest ? %s \n",
10783 bus->dhd->debug_buf_dest_support ? "Y" : "N"));
10784
10785 #ifdef DHD_DB0TS
10786 bus->dhd->db0ts_capable =
10787 (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
10788 #endif /* DHD_DB0TS */
10789
10790 if (MULTIBP_ENAB(bus->sih)) {
10791 dhd_bus_pcie_pwr_req_clear(bus);
10792
10793 /*
10794 * WAR to fix ARM cold boot;
10795 * De-assert WL domain in DAR
10796 */
10797 if (bus->sih->buscorerev >= 68) {
10798 dhd_bus_pcie_pwr_req_wl_domain(bus,
10799 DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
10800 }
10801 }
10802 return BCME_OK;
10803 } /* dhdpcie_readshared */
10804
10805 /** Read ring mem and ring state ptr info from shared memory area in device memory */
10806 static void
10807 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
10808 {
10809 uint16 i = 0;
10810 uint16 j = 0;
10811 uint32 tcm_memloc;
10812 uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
10813 uint32 h2d_hwa_db_idx_ptr = 0, d2h_hwa_db_idx_ptr = 0;
10814 uint16 max_tx_flowrings = bus->max_tx_flowrings;
10815
10816 /* Ring mem ptr info */
10817 /* Alloated in the order
10818 H2D_MSGRING_CONTROL_SUBMIT 0
10819 H2D_MSGRING_RXPOST_SUBMIT 1
10820 D2H_MSGRING_CONTROL_COMPLETE 2
10821 D2H_MSGRING_TX_COMPLETE 3
10822 D2H_MSGRING_RX_COMPLETE 4
10823 */
10824
10825 {
10826 /* ringmemptr holds start of the mem block address space */
10827 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
10828
10829 /* Find out ringmem ptr for each ring common ring */
10830 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
10831 bus->ring_sh[i].ring_mem_addr = tcm_memloc;
10832 /* Update mem block */
10833 tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
10834 DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
10835 i, bus->ring_sh[i].ring_mem_addr));
10836 }
10837 }
10838
10839 /* Ring state mem ptr info */
10840 {
10841 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
10842 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
10843 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
10844 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
10845
10846 if (HWA_CAPAB(bus->dhd)) {
10847 h2d_hwa_db_idx_ptr = ltoh32(ring_info->h2d_hwa_db_idx_ptr);
10848 d2h_hwa_db_idx_ptr = ltoh32(ring_info->d2h_hwa_db_idx_ptr);
10849 }
10850
10851 /* Store h2d common ring write/read pointers */
10852 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
10853 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
10854 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
10855
10856 /* update mem block */
10857 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
10858 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
10859
10860 DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
10861 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
10862
10863 if (HWA_CAPAB(bus->dhd)) {
10864 /* Read HWA DB index value from TCM */
10865 bus->ring_sh[i].ring_hwa_db_idx =
10866 ltoh16(dhdpcie_bus_rtcm16(bus, h2d_hwa_db_idx_ptr));
10867 /* update mem block */
10868 h2d_hwa_db_idx_ptr += bus->hwa_db_index_sz;
10869 DHD_INFO(("h2d hwa: idx %d hw_db %x \n", i,
10870 bus->ring_sh[i].ring_hwa_db_idx));
10871 }
10872 }
10873
10874 /* Store d2h common ring write/read pointers */
10875 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
10876 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
10877 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
10878
10879 /* update mem block */
10880 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
10881 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
10882
10883 DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
10884 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
10885
10886 if (HWA_CAPAB(bus->dhd)) {
10887 /* Read HWA DB index value from TCM */
10888 bus->ring_sh[i].ring_hwa_db_idx =
10889 ltoh16(dhdpcie_bus_rtcm16(bus, d2h_hwa_db_idx_ptr));
10890 /* update mem block */
10891 d2h_hwa_db_idx_ptr += bus->hwa_db_index_sz;
10892 DHD_INFO(("d2h hwa: idx %d hw_db %x \n", i,
10893 bus->ring_sh[i].ring_hwa_db_idx));
10894 }
10895 }
10896
10897 /* Store txflow ring write/read pointers */
10898 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
10899 max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
10900 } else {
10901 /* Account for Debug info h2d ring located after the last tx flow ring */
10902 max_tx_flowrings = max_tx_flowrings + 1;
10903 }
10904 for (j = 0; j < max_tx_flowrings; i++, j++)
10905 {
10906 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
10907 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
10908
10909 /* update mem block */
10910 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
10911 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
10912
10913 DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
10914 __FUNCTION__, i,
10915 bus->ring_sh[i].ring_state_w,
10916 bus->ring_sh[i].ring_state_r));
10917
10918 if (HWA_CAPAB(bus->dhd)) {
10919 /* Read HWA DB index value from TCM */
10920 bus->ring_sh[i].ring_hwa_db_idx =
10921 ltoh16(dhdpcie_bus_rtcm16(bus, h2d_hwa_db_idx_ptr));
10922 /* update mem block */
10923 h2d_hwa_db_idx_ptr += bus->hwa_db_index_sz;
10924 DHD_INFO(("h2d hwa: idx %d hw_db %x \n", i,
10925 bus->ring_sh[i].ring_hwa_db_idx));
10926 }
10927 }
10928 /* store wr/rd pointers for debug info completion or EDL ring */
10929 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
10930 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
10931 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
10932 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
10933 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
10934 bus->ring_sh[i].ring_state_w,
10935 bus->ring_sh[i].ring_state_r));
10936
10937 if (HWA_CAPAB(bus->dhd)) {
10938 /* Read HWA DB index value from TCM */
10939 bus->ring_sh[i].ring_hwa_db_idx =
10940 ltoh16(dhdpcie_bus_rtcm16(bus, d2h_hwa_db_idx_ptr));
10941 /* update mem block */
10942 d2h_hwa_db_idx_ptr += bus->hwa_db_index_sz;
10943 DHD_INFO(("d2h hwa: idx %d hw_db %x \n", i,
10944 bus->ring_sh[i].ring_hwa_db_idx));
10945 }
10946 }
10947 } /* dhd_fillup_ring_sharedptr_info */
10948
10949 /**
10950 * Initialize bus module: prepare for communication with the dongle. Called after downloading
10951 * firmware into the dongle.
10952 */
10953 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
10954 {
10955 dhd_bus_t *bus = dhdp->bus;
10956 int ret = 0;
10957
10958 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10959
10960 ASSERT(bus->dhd);
10961 if (!bus->dhd)
10962 return 0;
10963
10964 dhd_bus_pcie_pwr_req_clear_reload_war(bus);
10965
10966 if (MULTIBP_ENAB(bus->sih)) {
10967 dhd_bus_pcie_pwr_req(bus);
10968 }
10969
10970 /* Configure AER registers to log the TLP header */
10971 dhd_bus_aer_config(bus);
10972
10973 /* Make sure we're talking to the core. */
10974 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10975 ASSERT(bus->reg != NULL);
10976
10977 /* before opening up bus for data transfer, check if shared are is intact */
10978 ret = dhdpcie_readshared(bus);
10979 if (ret < 0) {
10980 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
10981 goto exit;
10982 }
10983
10984 /* Make sure we're talking to the core. */
10985 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10986 ASSERT(bus->reg != NULL);
10987
10988 /* Set bus state according to enable result */
10989 dhdp->busstate = DHD_BUS_DATA;
10990 DHD_SET_BUS_NOT_IN_LPS(bus);
10991 dhdp->dhd_bus_busy_state = 0;
10992
10993 /* D11 status via PCIe completion header */
10994 if ((ret = dhdpcie_init_d11status(bus)) < 0) {
10995 goto exit;
10996 }
10997
10998 if (!dhd_download_fw_on_driverload)
10999 dhd_dpc_enable(bus->dhd);
11000 /* Enable the interrupt after device is up */
11001 dhdpcie_bus_intr_enable(bus);
11002
11003 DHD_ERROR(("%s: Enabling bus->intr_enabled\n", __FUNCTION__));
11004 bus->intr_enabled = TRUE;
11005
11006 /* XXX These need to change w/API updates */
11007 /* bcmsdh_intr_unmask(bus->sdh); */
11008 #ifdef DHD_PCIE_RUNTIMEPM
11009 bus->idlecount = 0;
11010 bus->idletime = (int32)MAX_IDLE_COUNT;
11011 init_waitqueue_head(&bus->rpm_queue);
11012 mutex_init(&bus->pm_lock);
11013 #else
11014 bus->idletime = 0;
11015 #endif /* DHD_PCIE_RUNTIMEPM */
11016 #ifdef PCIE_INB_DW
11017 bus->skip_ds_ack = FALSE;
11018 /* Initialize the lock to serialize Device Wake Inband activities */
11019 if (!bus->inb_lock) {
11020 bus->inb_lock = osl_spin_lock_init(bus->dhd->osh);
11021 }
11022 #endif
11023
11024 /* XXX Temp errnum workaround: return ok, caller checks bus state */
11025
11026 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
11027 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
11028 bus->use_d0_inform = TRUE;
11029 } else {
11030 bus->use_d0_inform = FALSE;
11031 }
11032
11033 bus->hostready_count = 0;
11034
11035 exit:
11036 if (MULTIBP_ENAB(bus->sih)) {
11037 dhd_bus_pcie_pwr_req_clear(bus);
11038 }
11039 return ret;
11040 }
11041
11042 static void
11043 dhdpcie_init_shared_addr(dhd_bus_t *bus)
11044 {
11045 uint32 addr = 0;
11046 uint32 val = 0;
11047 addr = bus->dongle_ram_base + bus->ramsize - 4;
11048 #ifdef DHD_PCIE_RUNTIMEPM
11049 dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
11050 #endif /* DHD_PCIE_RUNTIMEPM */
11051 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
11052 }
11053
11054 bool
11055 dhdpcie_chipmatch(uint16 vendor, uint16 device)
11056 {
11057 if (vendor != PCI_VENDOR_ID_BROADCOM) {
11058 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
11059 vendor, device));
11060 return (-ENODEV);
11061 }
11062
11063 switch (device) {
11064 case BCM4345_CHIP_ID:
11065 case BCM43454_CHIP_ID:
11066 case BCM43455_CHIP_ID:
11067 case BCM43457_CHIP_ID:
11068 case BCM43458_CHIP_ID:
11069 case BCM4350_D11AC_ID:
11070 case BCM4350_D11AC2G_ID:
11071 case BCM4350_D11AC5G_ID:
11072 case BCM4350_CHIP_ID:
11073 case BCM4354_D11AC_ID:
11074 case BCM4354_D11AC2G_ID:
11075 case BCM4354_D11AC5G_ID:
11076 case BCM4354_CHIP_ID:
11077 case BCM4356_D11AC_ID:
11078 case BCM4356_D11AC2G_ID:
11079 case BCM4356_D11AC5G_ID:
11080 case BCM4356_CHIP_ID:
11081 case BCM4371_D11AC_ID:
11082 case BCM4371_D11AC2G_ID:
11083 case BCM4371_D11AC5G_ID:
11084 case BCM4371_CHIP_ID:
11085 case BCM4345_D11AC_ID:
11086 case BCM4345_D11AC2G_ID:
11087 case BCM4345_D11AC5G_ID:
11088 case BCM43452_D11AC_ID:
11089 case BCM43452_D11AC2G_ID:
11090 case BCM43452_D11AC5G_ID:
11091 case BCM4335_D11AC_ID:
11092 case BCM4335_D11AC2G_ID:
11093 case BCM4335_D11AC5G_ID:
11094 case BCM4335_CHIP_ID:
11095 case BCM43602_D11AC_ID:
11096 case BCM43602_D11AC2G_ID:
11097 case BCM43602_D11AC5G_ID:
11098 case BCM43602_CHIP_ID:
11099 case BCM43569_D11AC_ID:
11100 case BCM43569_D11AC2G_ID:
11101 case BCM43569_D11AC5G_ID:
11102 case BCM43569_CHIP_ID:
11103 /* XXX: For 4358, BCM4358_CHIP_ID is not checked intentionally as
11104 * this is not a real chip id, but propagated from the OTP.
11105 */
11106 case BCM4358_D11AC_ID:
11107 case BCM4358_D11AC2G_ID:
11108 case BCM4358_D11AC5G_ID:
11109 case BCM4349_D11AC_ID:
11110 case BCM4349_D11AC2G_ID:
11111 case BCM4349_D11AC5G_ID:
11112 case BCM4355_D11AC_ID:
11113 case BCM4355_D11AC2G_ID:
11114 case BCM4355_D11AC5G_ID:
11115 case BCM4355_CHIP_ID:
11116 /* XXX: BCM4359_CHIP_ID is not checked intentionally as this is
11117 * not a real chip id, but propogated from the OTP.
11118 */
11119 case BCM4359_D11AC_ID:
11120 case BCM4359_D11AC2G_ID:
11121 case BCM4359_D11AC5G_ID:
11122 case BCM43596_D11AC_ID:
11123 case BCM43596_D11AC2G_ID:
11124 case BCM43596_D11AC5G_ID:
11125 case BCM43597_D11AC_ID:
11126 case BCM43597_D11AC2G_ID:
11127 case BCM43597_D11AC5G_ID:
11128 case BCM4364_D11AC_ID:
11129 case BCM4364_D11AC2G_ID:
11130 case BCM4364_D11AC5G_ID:
11131 case BCM4364_CHIP_ID:
11132 case BCM4361_D11AC_ID:
11133 case BCM4361_D11AC2G_ID:
11134 case BCM4361_D11AC5G_ID:
11135 case BCM4361_CHIP_ID:
11136 case BCM4347_D11AC_ID:
11137 case BCM4347_D11AC2G_ID:
11138 case BCM4347_D11AC5G_ID:
11139 case BCM4347_CHIP_ID:
11140 case BCM4369_D11AX_ID:
11141 case BCM4369_D11AX2G_ID:
11142 case BCM4369_D11AX5G_ID:
11143 case BCM4369_CHIP_ID:
11144 case BCM4376_D11AX_ID:
11145 case BCM4376_D11AX2G_ID:
11146 case BCM4376_D11AX5G_ID:
11147 case BCM4376_CHIP_ID:
11148 case BCM4377_M_D11AX_ID:
11149 case BCM4377_D11AX_ID:
11150 case BCM4377_D11AX2G_ID:
11151 case BCM4377_D11AX5G_ID:
11152 case BCM4377_CHIP_ID:
11153 case BCM4378_D11AX_ID:
11154 case BCM4378_D11AX2G_ID:
11155 case BCM4378_D11AX5G_ID:
11156 case BCM4378_CHIP_ID:
11157 case BCM4387_D11AX_ID:
11158 case BCM4387_CHIP_ID:
11159 case BCM4368_D11AC_ID:
11160 case BCM4368_D11AC2G_ID:
11161 case BCM4368_D11AC5G_ID:
11162 case BCM4368_CHIP_ID:
11163 case BCM4362_D11AX_ID:
11164 case BCM4362_D11AX2G_ID:
11165 case BCM4362_D11AX5G_ID:
11166 case BCM4362_CHIP_ID:
11167 case BCM4375_D11AX_ID:
11168 case BCM4375_D11AX2G_ID:
11169 case BCM4375_D11AX5G_ID:
11170 case BCM4375_CHIP_ID:
11171 case BCM43751_D11AX_ID:
11172 case BCM43751_D11AX2G_ID:
11173 case BCM43751_D11AX5G_ID:
11174 case BCM43751_CHIP_ID:
11175 case BCM43752_D11AX_ID:
11176 case BCM43752_D11AX2G_ID:
11177 case BCM43752_D11AX5G_ID:
11178 case BCM43752_CHIP_ID:
11179 case BCM4388_CHIP_ID:
11180 case BCM4388_D11AX_ID:
11181 case BCM4389_CHIP_ID:
11182 case BCM4389_D11AX_ID:
11183 case BCM4385_D11AX_ID:
11184 case BCM4385_CHIP_ID:
11185
11186 return 0;
11187 default:
11188 DHD_ERROR(("%s: Unsupported vendor %x device %x\n",
11189 __FUNCTION__, vendor, device));
11190 return (-ENODEV);
11191 }
11192 } /* dhdpcie_chipmatch */
11193
11194 /**
11195 * Name: dhdpcie_cc_nvmshadow
11196 *
11197 * Description:
11198 * A shadow of OTP/SPROM exists in ChipCommon Region
11199 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
11200 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
11201 * can also be read from ChipCommon Registers.
11202 */
11203 /* XXX So far tested with 4345 and 4350 (Hence the checks in the function.) */
11204 static int
11205 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
11206 {
11207 uint16 dump_offset = 0;
11208 uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
11209
11210 /* Table for 65nm OTP Size (in bits) */
11211 int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
11212
11213 volatile uint16 *nvm_shadow;
11214
11215 uint cur_coreid;
11216 uint chipc_corerev;
11217 chipcregs_t *chipcregs;
11218
11219 /* Save the current core */
11220 cur_coreid = si_coreid(bus->sih);
11221 /* Switch to ChipC */
11222 chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
11223 ASSERT(chipcregs != NULL);
11224
11225 chipc_corerev = si_corerev(bus->sih);
11226
11227 /* Check ChipcommonCore Rev */
11228 if (chipc_corerev < 44) {
11229 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
11230 return BCME_UNSUPPORTED;
11231 }
11232
11233 /* Check ChipID */
11234 if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
11235 ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
11236 ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
11237 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
11238 "4350/4345/4355/4364 only\n", __FUNCTION__));
11239 return BCME_UNSUPPORTED;
11240 }
11241
11242 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
11243 if (chipcregs->sromcontrol & SRC_PRESENT) {
11244 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
11245 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
11246 >> SRC_SIZE_SHIFT))) * 1024;
11247 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
11248 }
11249
11250 /* XXX Check if OTP exists. 2 possible approaches:
11251 * 1) Check if OtpPresent in SpromCtrl (0x190 in ChipCommon Regs) is set OR
11252 * 2) Check if OtpSize > 0
11253 */
11254 if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
11255 bcm_bprintf(b, "\nOTP Present");
11256
11257 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
11258 == OTPL_WRAP_TYPE_40NM) {
11259 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
11260 /* Chipcommon rev51 is a variation on rev45 and does not support
11261 * the latest OTP configuration.
11262 */
11263 if (chipc_corerev != 51 && chipc_corerev >= 49) {
11264 otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
11265 >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
11266 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
11267 } else {
11268 otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
11269 >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
11270 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
11271 }
11272 } else {
11273 /* This part is untested since newer chips have 40nm OTP */
11274 /* Chipcommon rev51 is a variation on rev45 and does not support
11275 * the latest OTP configuration.
11276 */
11277 if (chipc_corerev != 51 && chipc_corerev >= 49) {
11278 otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
11279 >> OTPL_ROW_SIZE_SHIFT];
11280 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
11281 } else {
11282 otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
11283 >> CC_CAP_OTPSIZE_SHIFT];
11284 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
11285 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
11286 __FUNCTION__));
11287 }
11288 }
11289 }
11290
11291 /* Chipcommon rev51 is a variation on rev45 and does not support
11292 * the latest OTP configuration.
11293 */
11294 if (chipc_corerev != 51 && chipc_corerev >= 49) {
11295 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
11296 ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
11297 DHD_ERROR(("%s: SPROM and OTP could not be found "
11298 "sromcontrol = %x, otplayout = %x \n",
11299 __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
11300 return BCME_NOTFOUND;
11301 }
11302 } else {
11303 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
11304 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
11305 DHD_ERROR(("%s: SPROM and OTP could not be found "
11306 "sromcontrol = %x, capablities = %x \n",
11307 __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
11308 return BCME_NOTFOUND;
11309 }
11310 }
11311
11312 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
11313 if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
11314 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
11315
11316 bcm_bprintf(b, "OTP Strap selected.\n"
11317 "\nOTP Shadow in ChipCommon:\n");
11318
11319 dump_size = otp_size / 16 ; /* 16bit words */
11320
11321 } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
11322 (chipcregs->sromcontrol & SRC_PRESENT)) {
11323
11324 bcm_bprintf(b, "SPROM Strap selected\n"
11325 "\nSPROM Shadow in ChipCommon:\n");
11326
11327 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
11328 /* dump_size in 16bit words */
11329 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
11330 } else {
11331 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
11332 __FUNCTION__));
11333 return BCME_NOTFOUND;
11334 }
11335
11336 if (bus->regs == NULL) {
11337 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
11338 return BCME_NOTREADY;
11339 } else {
11340 bcm_bprintf(b, "\n OffSet:");
11341
11342 /* Chipcommon rev51 is a variation on rev45 and does not support
11343 * the latest OTP configuration.
11344 */
11345 if (chipc_corerev != 51 && chipc_corerev >= 49) {
11346 /* Chip common can read only 8kbits,
11347 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
11348 */
11349 nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
11350 } else {
11351 /* Point to the SPROM/OTP shadow in ChipCommon */
11352 nvm_shadow = chipcregs->sromotp;
11353 }
11354
11355 if (nvm_shadow == NULL) {
11356 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
11357 return BCME_NOTFOUND;
11358 }
11359
11360 /*
11361 * Read 16 bits / iteration.
11362 * dump_size & dump_offset in 16-bit words
11363 */
11364 while (dump_offset < dump_size) {
11365 if (dump_offset % 2 == 0)
11366 /* Print the offset in the shadow space in Bytes */
11367 bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
11368
11369 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
11370 dump_offset += 0x1;
11371 }
11372 }
11373
11374 /* Switch back to the original core */
11375 si_setcore(bus->sih, cur_coreid, 0);
11376
11377 return BCME_OK;
11378 } /* dhdpcie_cc_nvmshadow */
11379
11380 /** Flow rings are dynamically created and destroyed */
11381 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
11382 {
11383 void *pkt;
11384 flow_queue_t *queue;
11385 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
11386 unsigned long flags;
11387
11388 queue = &flow_ring_node->queue;
11389
11390 #ifdef DHDTCPACK_SUPPRESS
11391 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
11392 * when there is a newly coming packet from network stack.
11393 */
11394 dhd_tcpack_info_tbl_clean(bus->dhd);
11395 #endif /* DHDTCPACK_SUPPRESS */
11396
11397 /* clean up BUS level info */
11398 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
11399
11400 /* Flush all pending packets in the queue, if any */
11401 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
11402 PKTFREE(bus->dhd->osh, pkt, TRUE);
11403 }
11404 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
11405
11406 /* Reinitialise flowring's queue */
11407 dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
11408 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
11409 flow_ring_node->active = FALSE;
11410
11411 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11412
11413 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
11414 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11415 dll_delete(&flow_ring_node->list);
11416 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11417
11418 /* Release the flowring object back into the pool */
11419 dhd_prot_flowrings_pool_release(bus->dhd,
11420 flow_ring_node->flowid, flow_ring_node->prot_info);
11421
11422 /* Free the flowid back to the flowid allocator */
11423 dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
11424 flow_ring_node->flowid);
11425 }
11426
11427 /**
11428 * Allocate a Flow ring buffer,
11429 * Init Ring buffer, send Msg to device about flow ring creation
11430 */
11431 int
11432 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
11433 {
11434 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
11435
11436 DHD_INFO(("%s :Flow create\n", __FUNCTION__));
11437
11438 /* Send Msg to device about flow ring creation */
11439 if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
11440 return BCME_NOMEM;
11441
11442 return BCME_OK;
11443 }
11444
11445 /** Handle response from dongle on a 'flow ring create' request */
11446 void
11447 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
11448 {
11449 flow_ring_node_t *flow_ring_node;
11450 unsigned long flags;
11451
11452 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
11453
11454 /* Boundary check of the flowid */
11455 if (flowid > bus->dhd->max_tx_flowid) {
11456 DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
11457 flowid, bus->dhd->max_tx_flowid));
11458 return;
11459 }
11460
11461 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11462 if (!flow_ring_node) {
11463 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
11464 return;
11465 }
11466
11467 ASSERT(flow_ring_node->flowid == flowid);
11468 if (flow_ring_node->flowid != flowid) {
11469 DHD_ERROR(("%s: flowid %d is different from the flowid "
11470 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
11471 flow_ring_node->flowid));
11472 return;
11473 }
11474
11475 if (status != BCME_OK) {
11476 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
11477 __FUNCTION__, status));
11478 /* Call Flow clean up */
11479 dhd_bus_clean_flow_ring(bus, flow_ring_node);
11480 return;
11481 }
11482
11483 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
11484 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
11485 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11486
11487 /* Now add the Flow ring node into the active list
11488 * Note that this code to add the newly created node to the active
11489 * list was living in dhd_flowid_lookup. But note that after
11490 * adding the node to the active list the contents of node is being
11491 * filled in dhd_prot_flow_ring_create.
11492 * If there is a D2H interrupt after the node gets added to the
11493 * active list and before the node gets populated with values
11494 * from the Bottom half dhd_update_txflowrings would be called.
11495 * which will then try to walk through the active flow ring list,
11496 * pickup the nodes and operate on them. Now note that since
11497 * the function dhd_prot_flow_ring_create is not finished yet
11498 * the contents of flow_ring_node can still be NULL leading to
11499 * crashes. Hence the flow_ring_node should be added to the
11500 * active list only after its truely created, which is after
11501 * receiving the create response message from the Host.
11502 */
11503 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11504 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11505 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11506
11507 dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
11508
11509 return;
11510 }
11511
11512 int
11513 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
11514 {
11515 void * pkt;
11516 flow_queue_t *queue;
11517 flow_ring_node_t *flow_ring_node;
11518 unsigned long flags;
11519
11520 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
11521
11522 flow_ring_node = (flow_ring_node_t *)arg;
11523
11524 #ifdef DHDTCPACK_SUPPRESS
11525 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
11526 * when there is a newly coming packet from network stack.
11527 */
11528 dhd_tcpack_info_tbl_clean(bus->dhd);
11529 #endif /* DHDTCPACK_SUPPRESS */
11530 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
11531 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
11532 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11533 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
11534 return BCME_ERROR;
11535 }
11536 flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
11537
11538 queue = &flow_ring_node->queue; /* queue associated with flow ring */
11539
11540 /* Flush all pending packets in the queue, if any */
11541 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
11542 PKTFREE(bus->dhd->osh, pkt, TRUE);
11543 }
11544 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
11545
11546 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11547
11548 /* Send Msg to device about flow ring deletion */
11549 dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
11550
11551 return BCME_OK;
11552 }
11553
11554 void
11555 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
11556 {
11557 flow_ring_node_t *flow_ring_node;
11558
11559 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
11560
11561 /* Boundary check of the flowid */
11562 if (flowid > bus->dhd->max_tx_flowid) {
11563 DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
11564 flowid, bus->dhd->max_tx_flowid));
11565 return;
11566 }
11567
11568 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11569 if (!flow_ring_node) {
11570 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
11571 return;
11572 }
11573
11574 ASSERT(flow_ring_node->flowid == flowid);
11575 if (flow_ring_node->flowid != flowid) {
11576 DHD_ERROR(("%s: flowid %d is different from the flowid "
11577 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
11578 flow_ring_node->flowid));
11579 return;
11580 }
11581
11582 if (status != BCME_OK) {
11583 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
11584 __FUNCTION__, status));
11585 return;
11586 }
11587 /* Call Flow clean up */
11588 dhd_bus_clean_flow_ring(bus, flow_ring_node);
11589
11590 return;
11591
11592 }
11593
11594 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
11595 {
11596 void *pkt;
11597 flow_queue_t *queue;
11598 flow_ring_node_t *flow_ring_node;
11599 unsigned long flags;
11600
11601 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
11602
11603 flow_ring_node = (flow_ring_node_t *)arg;
11604
11605 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
11606 queue = &flow_ring_node->queue; /* queue associated with flow ring */
11607 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
11608 * once flow ring flush response is received for this flowring node.
11609 */
11610 flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
11611
11612 #ifdef DHDTCPACK_SUPPRESS
11613 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
11614 * when there is a newly coming packet from network stack.
11615 */
11616 dhd_tcpack_info_tbl_clean(bus->dhd);
11617 #endif /* DHDTCPACK_SUPPRESS */
11618
11619 /* Flush all pending packets in the queue, if any */
11620 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
11621 PKTFREE(bus->dhd->osh, pkt, TRUE);
11622 }
11623 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
11624
11625 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11626
11627 /* Send Msg to device about flow ring flush */
11628 dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
11629
11630 return BCME_OK;
11631 }
11632
11633 void
11634 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
11635 {
11636 flow_ring_node_t *flow_ring_node;
11637
11638 if (status != BCME_OK) {
11639 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
11640 __FUNCTION__, status));
11641 return;
11642 }
11643
11644 /* Boundary check of the flowid */
11645 if (flowid > bus->dhd->max_tx_flowid) {
11646 DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
11647 flowid, bus->dhd->max_tx_flowid));
11648 return;
11649 }
11650
11651 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11652 if (!flow_ring_node) {
11653 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
11654 return;
11655 }
11656
11657 ASSERT(flow_ring_node->flowid == flowid);
11658 if (flow_ring_node->flowid != flowid) {
11659 DHD_ERROR(("%s: flowid %d is different from the flowid "
11660 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
11661 flow_ring_node->flowid));
11662 return;
11663 }
11664
11665 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
11666 return;
11667 }
11668
11669 uint32
11670 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
11671 {
11672 return bus->max_submission_rings;
11673 }
11674
11675 /* To be symmetric with SDIO */
11676 void
11677 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
11678 {
11679 return;
11680 }
11681
11682 void
11683 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
11684 {
11685 dhdp->bus->is_linkdown = val;
11686 }
11687
11688 int
11689 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
11690 {
11691 return dhdp->bus->is_linkdown;
11692 }
11693
11694 int
11695 dhd_bus_get_cto(dhd_pub_t *dhdp)
11696 {
11697 return dhdp->bus->cto_triggered;
11698 }
11699
11700 #ifdef IDLE_TX_FLOW_MGMT
11701 /* resume request */
11702 int
11703 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
11704 {
11705 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
11706
11707 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
11708
11709 flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
11710
11711 /* Send Msg to device about flow ring resume */
11712 dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
11713
11714 return BCME_OK;
11715 }
11716
11717 /* add the node back to active flowring */
11718 void
11719 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
11720 {
11721
11722 flow_ring_node_t *flow_ring_node;
11723
11724 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
11725
11726 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11727 ASSERT(flow_ring_node->flowid == flowid);
11728
11729 if (status != BCME_OK) {
11730 DHD_ERROR(("%s Error Status = %d \n",
11731 __FUNCTION__, status));
11732 return;
11733 }
11734
11735 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
11736 __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
11737
11738 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
11739
11740 dhd_bus_schedule_queue(bus, flowid, FALSE);
11741 return;
11742 }
11743
11744 /* scan the flow rings in active list for idle time out */
11745 void
11746 dhd_bus_check_idle_scan(dhd_bus_t *bus)
11747 {
11748 uint64 time_stamp; /* in millisec */
11749 uint64 diff;
11750
11751 time_stamp = OSL_SYSUPTIME();
11752 diff = time_stamp - bus->active_list_last_process_ts;
11753
11754 if (diff > IDLE_FLOW_LIST_TIMEOUT) {
11755 dhd_bus_idle_scan(bus);
11756 bus->active_list_last_process_ts = OSL_SYSUPTIME();
11757 }
11758
11759 return;
11760 }
11761
11762 /* scan the nodes in active list till it finds a non idle node */
11763 void
11764 dhd_bus_idle_scan(dhd_bus_t *bus)
11765 {
11766 dll_t *item, *prev;
11767 flow_ring_node_t *flow_ring_node;
11768 uint64 time_stamp, diff;
11769 unsigned long flags;
11770 uint16 ringid[MAX_SUSPEND_REQ];
11771 uint16 count = 0;
11772
11773 time_stamp = OSL_SYSUPTIME();
11774 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11775
11776 for (item = dll_tail_p(&bus->flowring_active_list);
11777 !dll_end(&bus->flowring_active_list, item); item = prev) {
11778 prev = dll_prev_p(item);
11779
11780 flow_ring_node = dhd_constlist_to_flowring(item);
11781
11782 if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
11783 continue;
11784
11785 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
11786 /* Takes care of deleting zombie rings */
11787 /* delete from the active list */
11788 DHD_INFO(("deleting flow id %u from active list\n",
11789 flow_ring_node->flowid));
11790 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11791 continue;
11792 }
11793
11794 diff = time_stamp - flow_ring_node->last_active_ts;
11795
11796 if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
11797 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
11798 /* delete from the active list */
11799 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11800 flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
11801 ringid[count] = flow_ring_node->flowid;
11802 count++;
11803 if (count == MAX_SUSPEND_REQ) {
11804 /* create a batch message now!! */
11805 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11806 count = 0;
11807 }
11808
11809 } else {
11810
11811 /* No more scanning, break from here! */
11812 break;
11813 }
11814 }
11815
11816 if (count) {
11817 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11818 }
11819
11820 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11821
11822 return;
11823 }
11824
11825 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11826 {
11827 unsigned long flags;
11828 dll_t* list;
11829
11830 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11831 /* check if the node is already at head, otherwise delete it and prepend */
11832 list = dll_head_p(&bus->flowring_active_list);
11833 if (&flow_ring_node->list != list) {
11834 dll_delete(&flow_ring_node->list);
11835 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11836 }
11837
11838 /* update flow ring timestamp */
11839 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11840
11841 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11842
11843 return;
11844 }
11845
11846 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11847 {
11848 unsigned long flags;
11849
11850 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11851
11852 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11853 /* update flow ring timestamp */
11854 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11855
11856 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11857
11858 return;
11859 }
11860 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11861 {
11862 dll_delete(&flow_ring_node->list);
11863 }
11864
11865 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11866 {
11867 unsigned long flags;
11868
11869 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11870
11871 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11872
11873 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11874
11875 return;
11876 }
11877 #endif /* IDLE_TX_FLOW_MGMT */
11878
11879 int
11880 dhdpcie_bus_start_host_dev(struct dhd_bus *bus)
11881 {
11882 return dhdpcie_start_host_dev(bus);
11883 }
11884
11885 int
11886 dhdpcie_bus_stop_host_dev(struct dhd_bus *bus)
11887 {
11888 return dhdpcie_stop_host_dev(bus);
11889 }
11890
11891 int
11892 dhdpcie_bus_disable_device(struct dhd_bus *bus)
11893 {
11894 return dhdpcie_disable_device(bus);
11895 }
11896
11897 int
11898 dhdpcie_bus_enable_device(struct dhd_bus *bus)
11899 {
11900 return dhdpcie_enable_device(bus);
11901 }
11902
11903 int
11904 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
11905 {
11906 return dhdpcie_alloc_resource(bus);
11907 }
11908
11909 void
11910 dhdpcie_bus_free_resource(struct dhd_bus *bus)
11911 {
11912 dhdpcie_free_resource(bus);
11913 }
11914
11915 int
11916 dhd_bus_request_irq(struct dhd_bus *bus)
11917 {
11918 return dhdpcie_bus_request_irq(bus);
11919 }
11920
11921 bool
11922 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
11923 {
11924 return dhdpcie_dongle_attach(bus);
11925 }
11926
11927 int
11928 dhd_bus_release_dongle(struct dhd_bus *bus)
11929 {
11930 bool dongle_isolation;
11931 osl_t *osh;
11932
11933 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
11934
11935 if (bus) {
11936 osh = bus->osh;
11937 ASSERT(osh);
11938
11939 if (bus->dhd) {
11940 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
11941 debugger_close();
11942 #endif /* DEBUGGER || DHD_DSCOPE */
11943
11944 dongle_isolation = bus->dhd->dongle_isolation;
11945 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
11946 }
11947 }
11948
11949 return 0;
11950 }
11951
11952 int
11953 dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
11954 {
11955 if (enable) {
11956 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
11957 PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
11958 } else {
11959 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
11960 }
11961 return 0;
11962 }
11963
11964 int
11965 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
11966 {
11967 volatile void *regsva = (volatile void *)bus->regs;
11968 uint32 val;
11969 uint16 chipid = dhd_get_chipid(bus);
11970 uint32 ctoctrl;
11971
11972 bus->cto_enable = enable;
11973
11974 dhdpcie_cto_cfg_init(bus, enable);
11975
11976 if (enable) {
11977 if (bus->cto_threshold == 0) {
11978 if ((chipid == BCM4387_CHIP_ID) ||
11979 (chipid == BCM4388_CHIP_ID) ||
11980 (chipid == BCM4389_CHIP_ID)) {
11981 bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT_REV69;
11982 } else {
11983 bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
11984 }
11985 }
11986 val = ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
11987 PCIE_CTO_TO_THRESHHOLD_MASK) |
11988 ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
11989 PCIE_CTO_CLKCHKCNT_MASK) |
11990 PCIE_CTO_ENAB_MASK;
11991
11992 pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, val);
11993 } else {
11994 pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
11995 }
11996
11997 ctoctrl = pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), 0, 0);
11998
11999 DHD_ERROR(("%s: ctoctrl(0x%x) enable/disable %d for chipid(0x%x)\n",
12000 __FUNCTION__, ctoctrl, bus->cto_enable, chipid));
12001
12002 return 0;
12003 }
12004
12005 static int
12006 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
12007 {
12008 uint32 pci_intmask, err_status;
12009 uint8 i = 0;
12010 uint32 val;
12011
12012 pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
12013 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
12014
12015 DHD_OS_WAKE_LOCK(bus->dhd);
12016
12017 DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
12018
12019 /*
12020 * DAR still accessible
12021 */
12022 dhd_bus_dump_dar_registers(bus);
12023
12024 /* reset backplane */
12025 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
12026 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
12027
12028 /* clear timeout error */
12029 while (1) {
12030 err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
12031 DAR_ERRLOG(bus->sih->buscorerev),
12032 0, 0);
12033 if (err_status & PCIE_CTO_ERR_MASK) {
12034 si_corereg(bus->sih, bus->sih->buscoreidx,
12035 DAR_ERRLOG(bus->sih->buscorerev),
12036 ~0, PCIE_CTO_ERR_MASK);
12037 } else {
12038 break;
12039 }
12040 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
12041 i++;
12042 if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
12043 DHD_ERROR(("cto recovery fail\n"));
12044
12045 DHD_OS_WAKE_UNLOCK(bus->dhd);
12046 return BCME_ERROR;
12047 }
12048 }
12049
12050 /* clear interrupt status */
12051 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
12052
12053 /* Halt ARM & remove reset */
12054 /* TBD : we can add ARM Halt here in case */
12055
12056 /* reset SPROM_CFG_TO_SB_RST */
12057 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
12058
12059 DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
12060 PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
12061 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
12062
12063 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
12064 DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
12065 PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
12066
12067 DHD_OS_WAKE_UNLOCK(bus->dhd);
12068
12069 return BCME_OK;
12070 }
12071
12072 void
12073 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
12074 {
12075 uint32 val;
12076
12077 val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
12078 dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
12079 val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
12080 }
12081
12082 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
12083 /*
12084 * XXX: WAR: Update dongle that driver supports sending of d11
12085 * tx_status through unused status field of PCIe completion header
12086 * if dongle also supports the same WAR.
12087 */
12088 static int
12089 dhdpcie_init_d11status(struct dhd_bus *bus)
12090 {
12091 uint32 addr;
12092 uint32 flags2;
12093 int ret = 0;
12094
12095 if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
12096 flags2 = bus->pcie_sh->flags2;
12097 addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
12098 flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
12099 ret = dhdpcie_bus_membytes(bus, TRUE, addr,
12100 (uint8 *)&flags2, sizeof(flags2));
12101 if (ret < 0) {
12102 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
12103 __FUNCTION__));
12104 return ret;
12105 }
12106 bus->pcie_sh->flags2 = flags2;
12107 bus->dhd->d11_tx_status = TRUE;
12108 }
12109 return ret;
12110 }
12111
12112 #else
12113 static int
12114 dhdpcie_init_d11status(struct dhd_bus *bus)
12115 {
12116 return 0;
12117 }
12118 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
12119
12120 int
12121 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
12122 {
12123 int err = 0;
12124 #ifdef BCMPCIE_OOB_HOST_WAKE
12125 err = dhdpcie_oob_intr_register(dhdp->bus);
12126 #endif /* BCMPCIE_OOB_HOST_WAKE */
12127 return err;
12128 }
12129
12130 void
12131 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
12132 {
12133 #ifdef BCMPCIE_OOB_HOST_WAKE
12134 dhdpcie_oob_intr_unregister(dhdp->bus);
12135 #endif /* BCMPCIE_OOB_HOST_WAKE */
12136 }
12137
12138 void
12139 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
12140 {
12141 #ifdef BCMPCIE_OOB_HOST_WAKE
12142 dhdpcie_oob_intr_set(dhdp->bus, enable);
12143 #endif /* BCMPCIE_OOB_HOST_WAKE */
12144 }
12145
12146 int
12147 dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp)
12148 {
12149 int irq_num = 0;
12150 #ifdef BCMPCIE_OOB_HOST_WAKE
12151 irq_num = dhdpcie_get_oob_irq_num(dhdp->bus);
12152 #endif /* BCMPCIE_OOB_HOST_WAKE */
12153 return irq_num;
12154 }
12155
12156 bool
12157 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
12158 {
12159 return bus->dhd->d2h_hostrdy_supported;
12160 }
12161
12162 void
12163 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
12164 {
12165 dhd_bus_t *bus = pub->bus;
12166 uint32 coreoffset = index << 12;
12167 uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
12168 uint32 value;
12169
12170 while (first_addr <= last_addr) {
12171 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
12172 if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
12173 DHD_ERROR(("Invalid size/addr combination \n"));
12174 }
12175 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
12176 first_addr = first_addr + 4;
12177 }
12178 }
12179
12180 bool
12181 dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
12182 {
12183 if (!bus->dhd) {
12184 return FALSE;
12185 } else if (bus->hwa_enabled) {
12186 return bus->dhd->hwa_capable;
12187 } else {
12188 return FALSE;
12189 }
12190 }
12191
12192 bool
12193 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
12194 {
12195 if (!bus->dhd)
12196 return FALSE;
12197 else if (bus->idma_enabled) {
12198 return bus->dhd->idma_enable;
12199 } else {
12200 return FALSE;
12201 }
12202 }
12203
12204 bool
12205 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
12206 {
12207 if (!bus->dhd)
12208 return FALSE;
12209 else if (bus->ifrm_enabled) {
12210 return bus->dhd->ifrm_enable;
12211 } else {
12212 return FALSE;
12213 }
12214 }
12215
12216 bool
12217 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
12218 {
12219 if (!bus->dhd) {
12220 return FALSE;
12221 } else if (bus->dar_enabled) {
12222 return bus->dhd->dar_enable;
12223 } else {
12224 return FALSE;
12225 }
12226 }
12227
12228 void
12229 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
12230 {
12231 DHD_ERROR(("ENABLING DW:%d\n", dw_option));
12232 bus->dw_option = dw_option;
12233 }
12234
12235 #ifdef PCIE_INB_DW
12236 bool
12237 dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
12238 {
12239 if (!bus->dhd)
12240 return FALSE;
12241 if (bus->inb_enabled) {
12242 return bus->dhd->d2h_inband_dw;
12243 } else {
12244 return FALSE;
12245 }
12246 }
12247
12248 void
12249 dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
12250 {
12251 if (!INBAND_DW_ENAB(bus))
12252 return;
12253
12254 DHD_INFO(("%s:%d\n", __FUNCTION__, state));
12255 bus->dhd->ds_state = state;
12256 if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
12257 bus->ds_exit_timeout = 100;
12258 }
12259 if (state == DW_DEVICE_HOST_WAKE_WAIT) {
12260 bus->host_sleep_exit_timeout = 100;
12261 }
12262 if (state == DW_DEVICE_DS_DEV_WAKE) {
12263 bus->ds_exit_timeout = 0;
12264 }
12265 if (state == DW_DEVICE_DS_ACTIVE) {
12266 bus->host_sleep_exit_timeout = 0;
12267 }
12268 }
12269
12270 enum dhd_bus_ds_state
12271 dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
12272 {
12273 if (!INBAND_DW_ENAB(bus))
12274 return DW_DEVICE_DS_INVALID;
12275 return bus->dhd->ds_state;
12276 }
12277 #endif /* PCIE_INB_DW */
12278
12279 static void
12280 #ifdef PCIE_INB_DW
12281 dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h, enum dhd_bus_ds_state inbstate)
12282 #else
12283 dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h)
12284 #endif /* PCIE_INB_DW */
12285 {
12286 uint32 cnt = bus->ds_trace_count % MAX_DS_TRACE_SIZE;
12287
12288 bus->ds_trace[cnt].timestamp = OSL_LOCALTIME_NS();
12289 bus->ds_trace[cnt].d2h = d2h;
12290 bus->ds_trace[cnt].dsval = dsval;
12291 #ifdef PCIE_INB_DW
12292 bus->ds_trace[cnt].inbstate = inbstate;
12293 #endif /* PCIE_INB_DW */
12294 bus->ds_trace_count ++;
12295 }
12296
12297 #ifdef PCIE_INB_DW
12298 const char *
12299 dhd_convert_dsval(uint32 val, bool d2h)
12300 {
12301 if (d2h) {
12302 switch (val) {
12303 case D2H_DEV_D3_ACK:
12304 return "D2H_DEV_D3_ACK";
12305 case D2H_DEV_DS_ENTER_REQ:
12306 return "D2H_DEV_DS_ENTER_REQ";
12307 case D2H_DEV_DS_EXIT_NOTE:
12308 return "D2H_DEV_DS_EXIT_NOTE";
12309 case D2H_DEV_FWHALT:
12310 return "D2H_DEV_FWHALT";
12311 case D2HMB_DS_HOST_SLEEP_EXIT_ACK:
12312 return "D2HMB_DS_HOST_SLEEP_EXIT_ACK";
12313 default:
12314 return "INVALID";
12315 }
12316 } else {
12317 switch (val) {
12318 case H2DMB_DS_DEVICE_WAKE_DEASSERT:
12319 return "H2DMB_DS_DEVICE_WAKE_DEASSERT";
12320 case H2DMB_DS_DEVICE_WAKE_ASSERT:
12321 return "H2DMB_DS_DEVICE_WAKE_ASSERT";
12322 case H2D_HOST_D3_INFORM:
12323 return "H2D_HOST_D3_INFORM";
12324 case H2D_HOST_DS_ACK:
12325 return "H2D_HOST_DS_ACK";
12326 case H2D_HOST_DS_NAK:
12327 return "H2D_HOST_DS_NAK";
12328 case H2D_HOST_CONS_INT:
12329 return "H2D_HOST_CONS_INT";
12330 case H2D_FW_TRAP:
12331 return "H2D_FW_TRAP";
12332 default:
12333 return "INVALID";
12334 }
12335 }
12336 }
12337
12338 const char *
12339 dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate)
12340 {
12341 switch (inbstate) {
12342 case DW_DEVICE_DS_DEV_SLEEP:
12343 return "DW_DEVICE_DS_DEV_SLEEP";
12344 break;
12345 case DW_DEVICE_DS_DISABLED_WAIT:
12346 return "DW_DEVICE_DS_DISABLED_WAIT";
12347 break;
12348 case DW_DEVICE_DS_DEV_WAKE:
12349 return "DW_DEVICE_DS_DEV_WAKE";
12350 break;
12351 case DW_DEVICE_DS_ACTIVE:
12352 return "DW_DEVICE_DS_ACTIVE";
12353 break;
12354 case DW_DEVICE_HOST_SLEEP_WAIT:
12355 return "DW_DEVICE_HOST_SLEEP_WAIT";
12356 break;
12357 case DW_DEVICE_HOST_SLEEP:
12358 return "DW_DEVICE_HOST_SLEEP";
12359 break;
12360 case DW_DEVICE_HOST_WAKE_WAIT:
12361 return "DW_DEVICE_HOST_WAKE_WAIT";
12362 break;
12363 case DW_DEVICE_DS_D3_INFORM_WAIT:
12364 return "DW_DEVICE_DS_D3_INFORM_WAIT";
12365 break;
12366 default:
12367 return "INVALID";
12368 }
12369 }
12370 #endif /* PCIE_INB_DW */
12371
12372 void
12373 dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
12374 {
12375 int dumpsz;
12376 int i;
12377
12378 dumpsz = bus->ds_trace_count < MAX_DS_TRACE_SIZE ?
12379 bus->ds_trace_count : MAX_DS_TRACE_SIZE;
12380 if (dumpsz == 0) {
12381 bcm_bprintf(strbuf, "\nEmpty DS TRACE\n");
12382 return;
12383 }
12384 bcm_bprintf(strbuf, "---- DS TRACE ------\n");
12385 #ifdef PCIE_INB_DW
12386 bcm_bprintf(strbuf, "%s\t\t%s\t%-30s\t\t%s\n",
12387 "Timestamp us", "Dir", "Value", "Inband-State");
12388 for (i = 0; i < dumpsz; i ++) {
12389 bcm_bprintf(strbuf, "%llu\t%s\t%-30s\t\t%s\n",
12390 bus->ds_trace[i].timestamp,
12391 bus->ds_trace[i].d2h ? "D2H":"H2D",
12392 dhd_convert_dsval(bus->ds_trace[i].dsval, bus->ds_trace[i].d2h),
12393 dhd_convert_inb_state_names(bus->ds_trace[i].inbstate));
12394 }
12395 #else
12396 bcm_bprintf(strbuf, "Timestamp us\t\tDir\tValue\n");
12397 for (i = 0; i < dumpsz; i ++) {
12398 bcm_bprintf(strbuf, "%llu\t%s\t%d\n",
12399 bus->ds_trace[i].timestamp,
12400 bus->ds_trace[i].d2h ? "D2H":"H2D",
12401 bus->ds_trace[i].dsval);
12402 }
12403 #endif /* PCIE_INB_DW */
12404 bcm_bprintf(strbuf, "--------------------------\n");
12405 }
12406
12407 void
12408 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
12409 {
12410 trap_t *tr = &bus->dhd->last_trap_info;
12411 bcm_bprintf(strbuf,
12412 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
12413 " lp 0x%x, rpc 0x%x"
12414 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
12415 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
12416 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
12417 ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
12418 ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
12419 ltoh32(bus->pcie_sh->trap_addr),
12420 ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
12421 ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
12422 ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
12423 ltoh32(tr->r11), ltoh32(tr->r12));
12424 }
12425
12426 int
12427 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
12428 {
12429 int bcmerror = 0;
12430 struct dhd_bus *bus = dhdp->bus;
12431
12432 if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
12433 DHD_ERROR(("Invalid size/addr combination \n"));
12434 bcmerror = BCME_ERROR;
12435 }
12436
12437 return bcmerror;
12438 }
12439
12440 int
12441 dhd_get_idletime(dhd_pub_t *dhd)
12442 {
12443 return dhd->bus->idletime;
12444 }
12445
12446 bool
12447 dhd_get_rpm_state(dhd_pub_t *dhd)
12448 {
12449 return dhd->bus->rpm_enabled;
12450 }
12451
12452 void
12453 dhd_set_rpm_state(dhd_pub_t *dhd, bool state)
12454 {
12455 DHD_ERROR(("%s: %d\n", __FUNCTION__, state));
12456 dhd->bus->rpm_enabled = state;
12457 }
12458
12459 static INLINE void
12460 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
12461 {
12462 OSL_DELAY(1);
12463 if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
12464 DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
12465 } else {
12466 DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
12467 }
12468 return;
12469 }
12470
12471 #ifdef DHD_SSSR_DUMP
12472 static int
12473 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
12474 uint addr_reg, uint data_reg)
12475 {
12476 uint addr;
12477 uint val = 0;
12478 int i;
12479
12480 DHD_ERROR(("%s\n", __FUNCTION__));
12481
12482 if (!buf) {
12483 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
12484 return BCME_ERROR;
12485 }
12486
12487 if (!fifo_size) {
12488 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
12489 return BCME_ERROR;
12490 }
12491
12492 /* Set the base address offset to 0 */
12493 addr = addr_reg;
12494 val = 0;
12495 dhd_sbreg_op(dhd, addr, &val, FALSE);
12496
12497 addr = data_reg;
12498 /* Read 4 bytes at once and loop for fifo_size / 4 */
12499 for (i = 0; i < fifo_size / 4; i++) {
12500 if (serialized_backplane_access(dhd->bus, addr,
12501 sizeof(uint), &val, TRUE) != BCME_OK) {
12502 DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
12503 return BCME_ERROR;
12504 }
12505 buf[i] = val;
12506 OSL_DELAY(1);
12507 }
12508 return BCME_OK;
12509 }
12510
12511 static int
12512 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
12513 uint addr_reg)
12514 {
12515 uint addr;
12516 uint val = 0;
12517 int i;
12518 si_t *sih = dhd->bus->sih;
12519 bool vasip_enab, dig_mem_check;
12520 uint32 ioctrl_addr = 0;
12521
12522 DHD_ERROR(("%s addr_reg=0x%x size=0x%x\n", __FUNCTION__, addr_reg, fifo_size));
12523
12524 if (!buf) {
12525 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
12526 return BCME_ERROR;
12527 }
12528
12529 if (!fifo_size) {
12530 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
12531 return BCME_ERROR;
12532 }
12533
12534 vasip_enab = FALSE;
12535 dig_mem_check = FALSE;
12536 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12537 switch (dhd->sssr_reg_info->rev2.version) {
12538 case SSSR_REG_INFO_VER_3 :
12539 /* intentional fall through */
12540 case SSSR_REG_INFO_VER_2 :
12541 if ((dhd->sssr_reg_info->rev2.length > OFFSETOF(sssr_reg_info_v2_t,
12542 dig_mem_info)) && dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
12543 dig_mem_check = TRUE;
12544 }
12545 break;
12546 case SSSR_REG_INFO_VER_1 :
12547 if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
12548 vasip_enab = TRUE;
12549 } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
12550 dig_mem_info)) && dhd->sssr_reg_info->rev1.
12551 dig_mem_info.dig_sr_size) {
12552 dig_mem_check = TRUE;
12553 }
12554 ioctrl_addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl;
12555 break;
12556 case SSSR_REG_INFO_VER_0 :
12557 if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
12558 vasip_enab = TRUE;
12559 }
12560 ioctrl_addr = dhd->sssr_reg_info->rev0.vasip_regs.wrapper_regs.ioctrl;
12561 break;
12562 default :
12563 DHD_ERROR(("invalid sssr_reg_ver"));
12564 return BCME_UNSUPPORTED;
12565 }
12566 if (addr_reg) {
12567 DHD_ERROR(("dig_mem_check=%d vasip_enab=%d\n", dig_mem_check, vasip_enab));
12568 if (!vasip_enab && dig_mem_check) {
12569 int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
12570 fifo_size);
12571 if (err != BCME_OK) {
12572 DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
12573 __FUNCTION__));
12574 }
12575 } else {
12576 /* Check if vasip clk is disabled, if yes enable it */
12577 addr = ioctrl_addr;
12578 dhd_sbreg_op(dhd, addr, &val, TRUE);
12579 if (!val) {
12580 val = 1;
12581 dhd_sbreg_op(dhd, addr, &val, FALSE);
12582 }
12583
12584 addr = addr_reg;
12585 /* Read 4 bytes at once and loop for fifo_size / 4 */
12586 for (i = 0; i < fifo_size / 4; i++, addr += 4) {
12587 if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
12588 &val, TRUE) != BCME_OK) {
12589 DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
12590 addr));
12591 return BCME_ERROR;
12592 }
12593 buf[i] = val;
12594 OSL_DELAY(1);
12595 }
12596 }
12597 } else {
12598 uint cur_coreid;
12599 uint chipc_corerev;
12600 chipcregs_t *chipcregs;
12601
12602 /* Save the current core */
12603 cur_coreid = si_coreid(sih);
12604
12605 /* Switch to ChipC */
12606 chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
12607 if (!chipcregs) {
12608 DHD_ERROR(("%s: si_setcore returns NULL for core id %u \n",
12609 __FUNCTION__, CC_CORE_ID));
12610 return BCME_ERROR;
12611 }
12612
12613 chipc_corerev = si_corerev(sih);
12614
12615 if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
12616 W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
12617
12618 /* Read 4 bytes at once and loop for fifo_size / 4 */
12619 for (i = 0; i < fifo_size / 4; i++) {
12620 buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
12621 OSL_DELAY(1);
12622 }
12623 }
12624
12625 /* Switch back to the original core */
12626 si_setcore(sih, cur_coreid, 0);
12627 }
12628
12629 return BCME_OK;
12630 }
12631
12632 #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
12633 void
12634 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
12635 uint8 *ext_trap_data, void *event_decode_data)
12636 {
12637 hnd_ext_trap_hdr_t *hdr = NULL;
12638 bcm_tlv_t *tlv;
12639 eventlog_trapdata_info_t *etd_evtlog = NULL;
12640 eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
12641 uint arr_size = 0;
12642 int i = 0;
12643 int err = 0;
12644 uint32 seqnum = 0;
12645
12646 if (!ext_trap_data || !event_decode_data || !dhd)
12647 return;
12648
12649 if (!dhd->concise_dbg_buf)
12650 return;
12651
12652 /* First word is original trap_data, skip */
12653 ext_trap_data += sizeof(uint32);
12654
12655 hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
12656 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
12657 if (tlv) {
12658 uint32 baseaddr = 0;
12659 uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
12660
12661 etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
12662 DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
12663 "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
12664 (etd_evtlog->num_elements),
12665 ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
12666 if (!etd_evtlog->num_elements ||
12667 etd_evtlog->num_elements > MAX_EVENTLOG_BUFFERS) {
12668 DHD_ERROR(("%s: ETD has bad 'num_elements' !\n", __FUNCTION__));
12669 return;
12670 }
12671 if (!etd_evtlog->log_arr_addr) {
12672 DHD_ERROR(("%s: ETD has bad 'log_arr_addr' !\n", __FUNCTION__));
12673 return;
12674 }
12675
12676 arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
12677 evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
12678 if (!evtlog_buf_arr) {
12679 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
12680 return;
12681 }
12682
12683 /* boundary check */
12684 baseaddr = etd_evtlog->log_arr_addr;
12685 if ((baseaddr < dhd->bus->dongle_ram_base) ||
12686 ((baseaddr + arr_size) > endaddr)) {
12687 DHD_ERROR(("%s: Error reading invalid address\n",
12688 __FUNCTION__));
12689 goto err;
12690 }
12691
12692 /* read the eventlog_trap_buf_info_t array from dongle memory */
12693 err = dhdpcie_bus_membytes(dhd->bus, FALSE,
12694 (ulong)(etd_evtlog->log_arr_addr),
12695 (uint8 *)evtlog_buf_arr, arr_size);
12696 if (err != BCME_OK) {
12697 DHD_ERROR(("%s: Error reading event log array from dongle !\n",
12698 __FUNCTION__));
12699 goto err;
12700 }
12701 /* ntoh is required only for seq_num, because in the original
12702 * case of event logs from info ring, it is sent from dongle in that way
12703 * so for ETD also dongle follows same convention
12704 */
12705 seqnum = ntoh32(etd_evtlog->seq_num);
12706 memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
12707 for (i = 0; i < (etd_evtlog->num_elements); ++i) {
12708 /* boundary check */
12709 baseaddr = evtlog_buf_arr[i].buf_addr;
12710 if ((baseaddr < dhd->bus->dongle_ram_base) ||
12711 ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
12712 DHD_ERROR(("%s: Error reading invalid address\n",
12713 __FUNCTION__));
12714 goto err;
12715 }
12716 /* read each individual event log buf from dongle memory */
12717 err = dhdpcie_bus_membytes(dhd->bus, FALSE,
12718 ((ulong)evtlog_buf_arr[i].buf_addr),
12719 dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
12720 if (err != BCME_OK) {
12721 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
12722 __FUNCTION__));
12723 goto err;
12724 }
12725 dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
12726 event_decode_data, (evtlog_buf_arr[i].len),
12727 FALSE, hton32(seqnum));
12728 ++seqnum;
12729 }
12730 err:
12731 MFREE(dhd->osh, evtlog_buf_arr, arr_size);
12732 } else {
12733 DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
12734 }
12735 }
12736 #endif /* BCMPCIE && DHD_LOG_DUMP */
12737
12738 static uint32
12739 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
12740 {
12741 uint addr;
12742 uint val = 0;
12743 uint powerctrl_mask;
12744
12745 DHD_ERROR(("%s\n", __FUNCTION__));
12746
12747 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12748 switch (dhd->sssr_reg_info->rev2.version) {
12749 case SSSR_REG_INFO_VER_3 :
12750 /* intentional fall through */
12751 case SSSR_REG_INFO_VER_2 :
12752 addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl;
12753 powerctrl_mask = dhd->sssr_reg_info->rev2.
12754 chipcommon_regs.base_regs.powerctrl_mask;
12755 break;
12756 case SSSR_REG_INFO_VER_1 :
12757 case SSSR_REG_INFO_VER_0 :
12758 addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl;
12759 powerctrl_mask = dhd->sssr_reg_info->rev1.
12760 chipcommon_regs.base_regs.powerctrl_mask;
12761 break;
12762 default :
12763 DHD_ERROR(("invalid sssr_reg_ver"));
12764 return BCME_UNSUPPORTED;
12765 }
12766
12767 /* conditionally clear bits [11:8] of PowerCtrl */
12768 dhd_sbreg_op(dhd, addr, &val, TRUE);
12769
12770 if (!(val & powerctrl_mask)) {
12771 dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
12772 }
12773 return BCME_OK;
12774 }
12775
12776 static uint32
12777 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
12778 {
12779 uint addr;
12780 uint val = 0, reg_val = 0;
12781 uint powerctrl_mask;
12782
12783 DHD_ERROR(("%s\n", __FUNCTION__));
12784
12785 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12786 switch (dhd->sssr_reg_info->rev2.version) {
12787 case SSSR_REG_INFO_VER_3 :
12788 /* intentional fall through */
12789 case SSSR_REG_INFO_VER_2 :
12790 addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl;
12791 powerctrl_mask = dhd->sssr_reg_info->rev2.
12792 chipcommon_regs.base_regs.powerctrl_mask;
12793 break;
12794 case SSSR_REG_INFO_VER_1 :
12795 case SSSR_REG_INFO_VER_0 :
12796 addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl;
12797 powerctrl_mask = dhd->sssr_reg_info->rev1.
12798 chipcommon_regs.base_regs.powerctrl_mask;
12799 break;
12800 default :
12801 DHD_ERROR(("invalid sssr_reg_ver"));
12802 return BCME_UNSUPPORTED;
12803 }
12804
12805 /* conditionally clear bits [11:8] of PowerCtrl */
12806 dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
12807 if (reg_val & powerctrl_mask) {
12808 val = 0;
12809 dhd_sbreg_op(dhd, addr, &val, FALSE);
12810 }
12811 return reg_val;
12812 }
12813
12814 static int
12815 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
12816 {
12817 uint addr;
12818 uint val;
12819 uint32 cc_intmask, pmuintmask0, pmuintmask1, resreqtimer, macresreqtimer,
12820 macresreqtimer1, vasip_sr_size = 0;
12821
12822 DHD_ERROR(("%s\n", __FUNCTION__));
12823
12824 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12825 switch (dhd->sssr_reg_info->rev2.version) {
12826 case SSSR_REG_INFO_VER_3 :
12827 /* intentional fall through */
12828 case SSSR_REG_INFO_VER_2 :
12829 cc_intmask = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.intmask;
12830 pmuintmask0 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask0;
12831 pmuintmask1 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask1;
12832 resreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.resreqtimer;
12833 macresreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.macresreqtimer;
12834 macresreqtimer1 = dhd->sssr_reg_info->rev2.
12835 pmu_regs.base_regs.macresreqtimer1;
12836 break;
12837 case SSSR_REG_INFO_VER_1 :
12838 case SSSR_REG_INFO_VER_0 :
12839 cc_intmask = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.intmask;
12840 pmuintmask0 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask0;
12841 pmuintmask1 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask1;
12842 resreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.resreqtimer;
12843 macresreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.macresreqtimer;
12844 macresreqtimer1 = dhd->sssr_reg_info->rev1.
12845 pmu_regs.base_regs.macresreqtimer1;
12846 vasip_sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
12847 break;
12848 default :
12849 DHD_ERROR(("invalid sssr_reg_ver"));
12850 return BCME_UNSUPPORTED;
12851 }
12852
12853 /* clear chipcommon intmask */
12854 val = 0x0;
12855 dhd_sbreg_op(dhd, cc_intmask, &val, FALSE);
12856
12857 /* clear PMUIntMask0 */
12858 val = 0x0;
12859 dhd_sbreg_op(dhd, pmuintmask0, &val, FALSE);
12860
12861 /* clear PMUIntMask1 */
12862 val = 0x0;
12863 dhd_sbreg_op(dhd, pmuintmask1, &val, FALSE);
12864
12865 /* clear res_req_timer */
12866 val = 0x0;
12867 dhd_sbreg_op(dhd, resreqtimer, &val, FALSE);
12868
12869 /* clear macresreqtimer */
12870 val = 0x0;
12871 dhd_sbreg_op(dhd, macresreqtimer, &val, FALSE);
12872
12873 /* clear macresreqtimer1 */
12874 val = 0x0;
12875 dhd_sbreg_op(dhd, macresreqtimer1, &val, FALSE);
12876
12877 /* clear VasipClkEn */
12878 if (vasip_sr_size) {
12879 addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl;
12880 val = 0x0;
12881 dhd_sbreg_op(dhd, addr, &val, FALSE);
12882 }
12883
12884 return BCME_OK;
12885 }
12886
12887 static void
12888 dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
12889 {
12890 #define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
12891 #define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
12892 uint trap_data_mask[MAX_NUM_D11CORES] =
12893 {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
12894 int i;
12895 /* Apply only for 4375 chip */
12896 if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
12897 for (i = 0; i < MAX_NUM_D11CORES; i++) {
12898 if (dhd->sssr_d11_outofreset[i] &&
12899 (dhd->dongle_trap_data & trap_data_mask[i])) {
12900 dhd->sssr_d11_outofreset[i] = TRUE;
12901 } else {
12902 dhd->sssr_d11_outofreset[i] = FALSE;
12903 }
12904 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
12905 "trap_data:0x%x-0x%x\n",
12906 __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
12907 dhd->dongle_trap_data, trap_data_mask[i]));
12908 }
12909 }
12910 }
12911
12912 static int
12913 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
12914 {
12915 int i;
12916 uint addr = 0;
12917 uint val = 0;
12918 uint8 num_d11cores;
12919
12920 DHD_ERROR(("%s\n", __FUNCTION__));
12921
12922 num_d11cores = dhd_d11_slices_num_get(dhd);
12923
12924 for (i = 0; i < num_d11cores; i++) {
12925 /* Check if bit 0 of resetctrl is cleared */
12926 /* SSSR register information structure v0 and
12927 * v1 shares most except dig_mem
12928 */
12929 switch (dhd->sssr_reg_info->rev2.version) {
12930 case SSSR_REG_INFO_VER_3 :
12931 /* intentional fall through */
12932 case SSSR_REG_INFO_VER_2 :
12933 addr = dhd->sssr_reg_info->rev2.
12934 mac_regs[i].wrapper_regs.resetctrl;
12935 break;
12936 case SSSR_REG_INFO_VER_1 :
12937 case SSSR_REG_INFO_VER_0 :
12938 addr = dhd->sssr_reg_info->rev1.
12939 mac_regs[i].wrapper_regs.resetctrl;
12940 break;
12941 default :
12942 DHD_ERROR(("invalid sssr_reg_ver"));
12943 return BCME_UNSUPPORTED;
12944 }
12945 if (!addr) {
12946 DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
12947 __FUNCTION__, i));
12948 continue;
12949 }
12950 dhd_sbreg_op(dhd, addr, &val, TRUE);
12951 if (!(val & 1)) {
12952 dhd->sssr_d11_outofreset[i] = TRUE;
12953 } else {
12954 dhd->sssr_d11_outofreset[i] = FALSE;
12955 }
12956 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
12957 __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
12958 }
12959 /* XXX Temporary WAR for 4375 to handle AXI errors on bad core
12960 * to not collect SSSR dump for the core whose bit is not set in trap_data.
12961 * It will be reverted once AXI errors are fixed
12962 */
12963 dhdpcie_update_d11_status_from_trapdata(dhd);
12964
12965 return BCME_OK;
12966 }
12967
12968 static int
12969 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
12970 {
12971 int i;
12972 uint val = 0;
12973 uint8 num_d11cores;
12974 uint32 clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val;
12975
12976 DHD_ERROR(("%s\n", __FUNCTION__));
12977
12978 num_d11cores = dhd_d11_slices_num_get(dhd);
12979
12980 for (i = 0; i < num_d11cores; i++) {
12981 if (dhd->sssr_d11_outofreset[i]) {
12982 /* clear request clk only if itopoobb/extrsrcreqs is non zero */
12983 /* SSSR register information structure v0 and
12984 * v1 shares most except dig_mem
12985 */
12986 switch (dhd->sssr_reg_info->rev2.version) {
12987 case SSSR_REG_INFO_VER_3 :
12988 /* intentional fall through */
12989 case SSSR_REG_INFO_VER_2 :
12990 clockrequeststatus = dhd->sssr_reg_info->rev2.
12991 mac_regs[i].wrapper_regs.extrsrcreq;
12992 clockcontrolstatus = dhd->sssr_reg_info->rev2.
12993 mac_regs[i].base_regs.clockcontrolstatus;
12994 clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
12995 mac_regs[i].base_regs.clockcontrolstatus_val;
12996 break;
12997 case SSSR_REG_INFO_VER_1 :
12998 case SSSR_REG_INFO_VER_0 :
12999 clockrequeststatus = dhd->sssr_reg_info->rev1.
13000 mac_regs[i].wrapper_regs.itopoobb;
13001 clockcontrolstatus = dhd->sssr_reg_info->rev1.
13002 mac_regs[i].base_regs.clockcontrolstatus;
13003 clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
13004 mac_regs[i].base_regs.clockcontrolstatus_val;
13005 break;
13006 default :
13007 DHD_ERROR(("invalid sssr_reg_ver"));
13008 return BCME_UNSUPPORTED;
13009 }
13010 dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
13011 if (val != 0) {
13012 /* clear clockcontrolstatus */
13013 dhd_sbreg_op(dhd, clockcontrolstatus,
13014 &clockcontrolstatus_val, FALSE);
13015 }
13016 }
13017 }
13018 return BCME_OK;
13019 }
13020
13021 static int
13022 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
13023 {
13024 uint val = 0;
13025 uint cfgval = 0;
13026 uint32 resetctrl, clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val;
13027
13028 DHD_ERROR(("%s\n", __FUNCTION__));
13029
13030 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13031 switch (dhd->sssr_reg_info->rev2.version) {
13032 case SSSR_REG_INFO_VER_3 :
13033 /* intentional fall through */
13034 case SSSR_REG_INFO_VER_2 :
13035 resetctrl = dhd->sssr_reg_info->rev2.
13036 arm_regs.wrapper_regs.resetctrl;
13037 clockrequeststatus = dhd->sssr_reg_info->rev2.
13038 arm_regs.wrapper_regs.extrsrcreq;
13039 clockcontrolstatus = dhd->sssr_reg_info->rev2.
13040 arm_regs.base_regs.clockcontrolstatus;
13041 clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
13042 arm_regs.base_regs.clockcontrolstatus_val;
13043 break;
13044 case SSSR_REG_INFO_VER_1 :
13045 case SSSR_REG_INFO_VER_0 :
13046 resetctrl = dhd->sssr_reg_info->rev1.
13047 arm_regs.wrapper_regs.resetctrl;
13048 clockrequeststatus = dhd->sssr_reg_info->rev1.
13049 arm_regs.wrapper_regs.itopoobb;
13050 clockcontrolstatus = dhd->sssr_reg_info->rev1.
13051 arm_regs.base_regs.clockcontrolstatus;
13052 clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
13053 arm_regs.base_regs.clockcontrolstatus_val;
13054 break;
13055 default :
13056 DHD_ERROR(("invalid sssr_reg_ver"));
13057 return BCME_UNSUPPORTED;
13058 }
13059
13060 /* Check if bit 0 of resetctrl is cleared */
13061 dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
13062 if (!(val & 1)) {
13063 /* clear request clk only if itopoobb/extrsrcreqs is non zero */
13064 dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
13065 if (val != 0) {
13066 /* clear clockcontrolstatus */
13067 dhd_sbreg_op(dhd, clockcontrolstatus, &clockcontrolstatus_val, FALSE);
13068 }
13069
13070 if (MULTIBP_ENAB(dhd->bus->sih)) {
13071 /* Clear coherent bits for CA7 because CPU is halted */
13072 if (dhd->bus->coreid == ARMCA7_CORE_ID) {
13073 cfgval = dhdpcie_bus_cfg_read_dword(dhd->bus,
13074 PCIE_CFG_SUBSYSTEM_CONTROL, 4);
13075 dhdpcie_bus_cfg_write_dword(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
13076 (cfgval & ~PCIE_BARCOHERENTACCEN_MASK));
13077 }
13078
13079 /* Just halt ARM but do not reset the core */
13080 resetctrl &= ~(SI_CORE_SIZE - 1);
13081 resetctrl += OFFSETOF(aidmp_t, ioctrl);
13082
13083 dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
13084 val |= SICF_CPUHALT;
13085 dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
13086 }
13087 }
13088
13089 return BCME_OK;
13090 }
13091
13092 static int
13093 dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
13094 {
13095 uint val = 0;
13096 uint32 resetctrl;
13097
13098 DHD_ERROR(("%s\n", __FUNCTION__));
13099
13100 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13101 switch (dhd->sssr_reg_info->rev2.version) {
13102 case SSSR_REG_INFO_VER_3 :
13103 /* intentional fall through */
13104 case SSSR_REG_INFO_VER_2 :
13105 resetctrl = dhd->sssr_reg_info->rev2.
13106 arm_regs.wrapper_regs.resetctrl;
13107 break;
13108 case SSSR_REG_INFO_VER_1 :
13109 case SSSR_REG_INFO_VER_0 :
13110 resetctrl = dhd->sssr_reg_info->rev1.
13111 arm_regs.wrapper_regs.resetctrl;
13112 break;
13113 default :
13114 DHD_ERROR(("invalid sssr_reg_ver"));
13115 return BCME_UNSUPPORTED;
13116 }
13117
13118 /* Check if bit 0 of resetctrl is cleared */
13119 dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
13120 if (!(val & 1)) {
13121 if (MULTIBP_ENAB(dhd->bus->sih) && (dhd->bus->coreid != ARMCA7_CORE_ID)) {
13122 /* Take ARM out of halt but do not reset core */
13123 resetctrl &= ~(SI_CORE_SIZE - 1);
13124 resetctrl += OFFSETOF(aidmp_t, ioctrl);
13125
13126 dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
13127 val &= ~SICF_CPUHALT;
13128 dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
13129 dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
13130 }
13131 }
13132
13133 return BCME_OK;
13134 }
13135
13136 static int
13137 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
13138 {
13139 uint val = 0;
13140 uint32 clockrequeststatus, clockcontrolstatus_addr, clockcontrolstatus_val;
13141
13142 DHD_ERROR(("%s\n", __FUNCTION__));
13143
13144 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13145 switch (dhd->sssr_reg_info->rev2.version) {
13146 case SSSR_REG_INFO_VER_3 :
13147 /* intentional fall through */
13148 case SSSR_REG_INFO_VER_2 :
13149 clockrequeststatus = dhd->sssr_reg_info->rev2.
13150 pcie_regs.wrapper_regs.extrsrcreq;
13151 clockcontrolstatus_addr = dhd->sssr_reg_info->rev2.
13152 pcie_regs.base_regs.clockcontrolstatus;
13153 clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
13154 pcie_regs.base_regs.clockcontrolstatus_val;
13155 break;
13156 case SSSR_REG_INFO_VER_1 :
13157 case SSSR_REG_INFO_VER_0 :
13158 clockrequeststatus = dhd->sssr_reg_info->rev1.
13159 pcie_regs.wrapper_regs.itopoobb;
13160 clockcontrolstatus_addr = dhd->sssr_reg_info->rev1.
13161 pcie_regs.base_regs.clockcontrolstatus;
13162 clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
13163 pcie_regs.base_regs.clockcontrolstatus_val;
13164 break;
13165 default :
13166 DHD_ERROR(("invalid sssr_reg_ver"));
13167 return BCME_UNSUPPORTED;
13168 }
13169
13170 /* clear request clk only if itopoobb/extrsrcreqs is non zero */
13171 dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
13172 if (val) {
13173 /* clear clockcontrolstatus */
13174 dhd_sbreg_op(dhd, clockcontrolstatus_addr, &clockcontrolstatus_val, FALSE);
13175 }
13176 return BCME_OK;
13177 }
13178
13179 static int
13180 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
13181 {
13182 uint addr;
13183 uint val = 0;
13184
13185 DHD_ERROR(("%s\n", __FUNCTION__));
13186
13187 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13188 switch (dhd->sssr_reg_info->rev2.version) {
13189 case SSSR_REG_INFO_VER_3 :
13190 /* intentional fall through */
13191 case SSSR_REG_INFO_VER_2 :
13192 addr = dhd->sssr_reg_info->rev2.pcie_regs.base_regs.ltrstate;
13193 break;
13194 case SSSR_REG_INFO_VER_1 :
13195 case SSSR_REG_INFO_VER_0 :
13196 addr = dhd->sssr_reg_info->rev1.pcie_regs.base_regs.ltrstate;
13197 break;
13198 default :
13199 DHD_ERROR(("invalid sssr_reg_ver"));
13200 return BCME_UNSUPPORTED;
13201 }
13202
13203 val = LTR_ACTIVE;
13204 dhd_sbreg_op(dhd, addr, &val, FALSE);
13205
13206 val = LTR_SLEEP;
13207 dhd_sbreg_op(dhd, addr, &val, FALSE);
13208
13209 return BCME_OK;
13210 }
13211
13212 static int
13213 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
13214 {
13215 DHD_ERROR(("%s\n", __FUNCTION__));
13216
13217 dhdpcie_arm_clear_clk_req(dhd);
13218
13219 dhdpcie_d11_clear_clk_req(dhd);
13220
13221 dhdpcie_pcie_clear_clk_req(dhd);
13222
13223 return BCME_OK;
13224 }
13225
13226 static int
13227 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
13228 {
13229 int i;
13230 uint val = 0;
13231 uint8 num_d11cores;
13232 uint32 resetctrl_addr, ioctrl_addr, ioctrl_resetseq_val0, ioctrl_resetseq_val1,
13233 ioctrl_resetseq_val2, ioctrl_resetseq_val3, ioctrl_resetseq_val4;
13234
13235 DHD_ERROR(("%s\n", __FUNCTION__));
13236
13237 num_d11cores = dhd_d11_slices_num_get(dhd);
13238
13239 for (i = 0; i < num_d11cores; i++) {
13240 if (dhd->sssr_d11_outofreset[i]) {
13241 /* SSSR register information structure v0 and v1 shares
13242 * most except dig_mem
13243 */
13244 switch (dhd->sssr_reg_info->rev2.version) {
13245 case SSSR_REG_INFO_VER_3 :
13246 /* intentional fall through */
13247 case SSSR_REG_INFO_VER_2 :
13248 resetctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i].
13249 wrapper_regs.resetctrl;
13250 ioctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i].
13251 wrapper_regs.ioctrl;
13252 ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev2.
13253 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
13254 ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev2.
13255 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
13256 ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev2.
13257 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
13258 ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev2.
13259 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
13260 ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev2.
13261 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
13262 break;
13263 case SSSR_REG_INFO_VER_1 :
13264 case SSSR_REG_INFO_VER_0 :
13265 resetctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i].
13266 wrapper_regs.resetctrl;
13267 ioctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i].
13268 wrapper_regs.ioctrl;
13269 ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev1.
13270 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
13271 ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev1.
13272 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
13273 ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev1.
13274 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
13275 ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev1.
13276 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
13277 ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev1.
13278 mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
13279 break;
13280 default :
13281 DHD_ERROR(("invalid sssr_reg_ver"));
13282 return BCME_UNSUPPORTED;
13283 }
13284 /* disable core by setting bit 0 */
13285 val = 1;
13286 dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE);
13287 OSL_DELAY(6000);
13288
13289 dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val0, FALSE);
13290
13291 dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val1, FALSE);
13292
13293 /* enable core by clearing bit 0 */
13294 val = 0;
13295 dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE);
13296
13297 dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val2, FALSE);
13298
13299 dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val3, FALSE);
13300
13301 dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val4, FALSE);
13302 }
13303 }
13304 return BCME_OK;
13305 }
13306
13307 #ifdef DHD_SSSR_DUMP_BEFORE_SR
13308 static int
13309 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
13310 {
13311 int i;
13312 uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
13313 uint8 num_d11cores;
13314
13315 DHD_ERROR(("%s\n", __FUNCTION__));
13316
13317 num_d11cores = dhd_d11_slices_num_get(dhd);
13318
13319 for (i = 0; i < num_d11cores; i++) {
13320 if (dhd->sssr_d11_outofreset[i]) {
13321 sr_size = dhd_sssr_mac_buf_size(dhd, i);
13322 xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
13323 xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
13324 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
13325 sr_size, xmtaddress, xmtdata);
13326 }
13327 }
13328
13329 dig_buf_size = dhd_sssr_dig_buf_size(dhd);
13330 dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
13331 if (dig_buf_size) {
13332 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
13333 dig_buf_size, dig_buf_addr);
13334 }
13335
13336 return BCME_OK;
13337 }
13338 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
13339
13340 static int
13341 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
13342 {
13343 int i;
13344 uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
13345 uint8 num_d11cores;
13346
13347 DHD_ERROR(("%s\n", __FUNCTION__));
13348
13349 num_d11cores = dhd_d11_slices_num_get(dhd);
13350
13351 for (i = 0; i < num_d11cores; i++) {
13352 if (dhd->sssr_d11_outofreset[i]) {
13353 sr_size = dhd_sssr_mac_buf_size(dhd, i);
13354 xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
13355 xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
13356 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
13357 sr_size, xmtaddress, xmtdata);
13358 }
13359 }
13360
13361 dig_buf_size = dhd_sssr_dig_buf_size(dhd);
13362 dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
13363
13364 if (dig_buf_size) {
13365 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, dig_buf_size, dig_buf_addr);
13366 }
13367
13368 return BCME_OK;
13369 }
13370
13371 int
13372 dhdpcie_sssr_dump(dhd_pub_t *dhd)
13373 {
13374 uint32 powerctrl_val;
13375
13376 if (!dhd->sssr_inited) {
13377 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
13378 return BCME_ERROR;
13379 }
13380
13381 if (dhd->bus->is_linkdown) {
13382 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
13383 return BCME_ERROR;
13384 }
13385
13386 DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
13387 "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
13388 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
13389 OFFSETOF(chipcregs_t, powerctl), 0, 0),
13390 si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
13391 PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
13392 PMU_REG(dhd->bus->sih, res_state, 0, 0)));
13393
13394 dhdpcie_d11_check_outofreset(dhd);
13395
13396 #ifdef DHD_SSSR_DUMP_BEFORE_SR
13397 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
13398 if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
13399 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
13400 return BCME_ERROR;
13401 }
13402 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
13403
13404 dhdpcie_clear_intmask_and_timer(dhd);
13405 dhdpcie_clear_clk_req(dhd);
13406 powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
13407 dhdpcie_pcie_send_ltrsleep(dhd);
13408
13409 if (MULTIBP_ENAB(dhd->bus->sih)) {
13410 dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE);
13411 }
13412
13413 /* Wait for some time before Restore */
13414 OSL_DELAY(6000);
13415
13416 DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
13417 "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
13418 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
13419 OFFSETOF(chipcregs_t, powerctl), 0, 0),
13420 si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
13421 PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
13422 PMU_REG(dhd->bus->sih, res_state, 0, 0)));
13423
13424 if (MULTIBP_ENAB(dhd->bus->sih)) {
13425 dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE);
13426 /* Add delay for WL domain to power up */
13427 OSL_DELAY(15000);
13428
13429 DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
13430 "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
13431 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
13432 OFFSETOF(chipcregs_t, powerctl), 0, 0),
13433 si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
13434 PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
13435 PMU_REG(dhd->bus->sih, res_state, 0, 0)));
13436 }
13437
13438 dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
13439 dhdpcie_arm_resume_clk_req(dhd);
13440 dhdpcie_bring_d11_outofreset(dhd);
13441
13442 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
13443 if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
13444 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
13445 return BCME_ERROR;
13446 }
13447 dhd->sssr_dump_collected = TRUE;
13448 dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
13449
13450 return BCME_OK;
13451 }
13452
13453 #define PCIE_CFG_DSTATE_MASK 0x11u
13454
13455 static int
13456 dhdpcie_fis_trigger(dhd_pub_t *dhd)
13457 {
13458 uint32 fis_ctrl_status;
13459 uint32 cfg_status_cmd;
13460 uint32 cfg_pmcsr;
13461
13462 if (!dhd->sssr_inited) {
13463 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
13464 return BCME_ERROR;
13465 }
13466
13467 if (dhd->bus->is_linkdown) {
13468 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
13469 return BCME_ERROR;
13470 }
13471
13472 #ifdef DHD_PCIE_RUNTIMEPM
13473 /* Bring back to D0 */
13474 dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
13475 /* Stop RPM timer so that even INB DW DEASSERT should not happen */
13476 DHD_STOP_RPM_TIMER(dhd);
13477 #endif /* DHD_PCIE_RUNTIMEPM */
13478
13479 /* Set fis_triggered flag to ignore link down callback from RC */
13480 dhd->fis_triggered = TRUE;
13481
13482 /* Set FIS PwrswForceOnAll */
13483 PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_FIS_FORCEON_ALL_MASK, PMU_FIS_FORCEON_ALL_MASK);
13484
13485 fis_ctrl_status = PMU_REG(dhd->bus->sih, fis_ctrl_status, 0, 0);
13486
13487 DHD_ERROR(("%s: fis_ctrl_status=0x%x\n", __FUNCTION__, fis_ctrl_status));
13488
13489 cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
13490 cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
13491 DHD_ERROR(("before save: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13492 PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
13493
13494 DHD_PCIE_CONFIG_SAVE(dhd->bus);
13495
13496 /* Trigger FIS */
13497 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
13498 DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
13499 OSL_DELAY(100 * 1000);
13500
13501 /*
13502 * For android built-in platforms need to perform REG ON/OFF
13503 * to restore pcie link.
13504 * dhd_download_fw_on_driverload will be FALSE for built-in.
13505 */
13506 if (!dhd_download_fw_on_driverload) {
13507 DHD_ERROR(("%s: Toggle REG_ON and restore config space\n", __FUNCTION__));
13508 dhdpcie_bus_stop_host_dev(dhd->bus);
13509 dhd_wifi_platform_set_power(dhd, FALSE);
13510 dhd_wifi_platform_set_power(dhd, TRUE);
13511 dhdpcie_bus_start_host_dev(dhd->bus);
13512 /* Restore inited pcie cfg from pci_load_saved_state */
13513 dhdpcie_bus_enable_device(dhd->bus);
13514 }
13515
13516 cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
13517 cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
13518 DHD_ERROR(("after regon-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13519 PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
13520
13521 /* To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore */
13522 DHD_PCIE_CONFIG_RESTORE(dhd->bus);
13523
13524 cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
13525 cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
13526 DHD_ERROR(("after normal-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13527 PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
13528
13529 /*
13530 * To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore
13531 * in both MSM and LSI RCs
13532 */
13533 if ((cfg_pmcsr & PCIE_CFG_DSTATE_MASK) != 0) {
13534 int ret = dhdpcie_set_master_and_d0_pwrstate(dhd->bus);
13535 if (ret != BCME_OK) {
13536 DHD_ERROR(("%s: Setting D0 failed, ABORT FIS collection\n", __FUNCTION__));
13537 return ret;
13538 }
13539 cfg_status_cmd =
13540 dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
13541 cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
13542 DHD_ERROR(("after force-d0: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13543 PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
13544 }
13545
13546 /* Clear fis_triggered as REG OFF/ON recovered link */
13547 dhd->fis_triggered = FALSE;
13548
13549 return BCME_OK;
13550 }
13551
13552 int
13553 dhd_bus_fis_trigger(dhd_pub_t *dhd)
13554 {
13555 return dhdpcie_fis_trigger(dhd);
13556 }
13557
13558 static int
13559 dhdpcie_reset_hwa(dhd_pub_t *dhd)
13560 {
13561 int ret;
13562 sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
13563 sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3;
13564
13565 /* HWA wrapper registers */
13566 uint32 ioctrl, resetctrl;
13567 /* HWA base registers */
13568 uint32 clkenable, clkgatingenable, clkext, clkctlstatus;
13569 uint32 hwa_resetseq_val[SSSR_HWA_RESET_SEQ_STEPS];
13570 int i = 0;
13571
13572 if (sssr_reg_info->version < SSSR_REG_INFO_VER_3) {
13573 DHD_ERROR(("%s: not supported for version:%d\n",
13574 __FUNCTION__, sssr_reg_info->version));
13575 return BCME_UNSUPPORTED;
13576 }
13577
13578 if (sssr_reg_info->hwa_regs.base_regs.clkenable == 0) {
13579 DHD_ERROR(("%s: hwa regs are not set\n", __FUNCTION__));
13580 return BCME_UNSUPPORTED;
13581 }
13582
13583 DHD_ERROR(("%s: version:%d\n", __FUNCTION__, sssr_reg_info->version));
13584
13585 ioctrl = sssr_reg_info->hwa_regs.wrapper_regs.ioctrl;
13586 resetctrl = sssr_reg_info->hwa_regs.wrapper_regs.resetctrl;
13587
13588 clkenable = sssr_reg_info->hwa_regs.base_regs.clkenable;
13589 clkgatingenable = sssr_reg_info->hwa_regs.base_regs.clkgatingenable;
13590 clkext = sssr_reg_info->hwa_regs.base_regs.clkext;
13591 clkctlstatus = sssr_reg_info->hwa_regs.base_regs.clkctlstatus;
13592
13593 ret = memcpy_s(hwa_resetseq_val, sizeof(hwa_resetseq_val),
13594 sssr_reg_info->hwa_regs.hwa_resetseq_val,
13595 sizeof(sssr_reg_info->hwa_regs.hwa_resetseq_val));
13596 if (ret) {
13597 DHD_ERROR(("%s: hwa_resetseq_val memcpy_s failed: %d\n",
13598 __FUNCTION__, ret));
13599 return ret;
13600 }
13601
13602 dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE);
13603 dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE);
13604 dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE);
13605 dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE);
13606
13607 dhd_sbreg_op(dhd, clkenable, &hwa_resetseq_val[i++], FALSE);
13608 dhd_sbreg_op(dhd, clkgatingenable, &hwa_resetseq_val[i++], FALSE);
13609 dhd_sbreg_op(dhd, clkext, &hwa_resetseq_val[i++], FALSE);
13610 dhd_sbreg_op(dhd, clkctlstatus, &hwa_resetseq_val[i++], FALSE);
13611
13612 return BCME_OK;
13613 }
13614
13615 static int
13616 dhdpcie_fis_dump(dhd_pub_t *dhd)
13617 {
13618 int i;
13619 uint8 num_d11cores;
13620
13621 DHD_ERROR(("%s\n", __FUNCTION__));
13622
13623 if (!dhd->sssr_inited) {
13624 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
13625 return BCME_ERROR;
13626 }
13627
13628 if (dhd->bus->is_linkdown) {
13629 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
13630 return BCME_ERROR;
13631 }
13632
13633 /* bring up all pmu resources */
13634 PMU_REG(dhd->bus->sih, min_res_mask, ~0,
13635 PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
13636 OSL_DELAY(10 * 1000);
13637
13638 num_d11cores = dhd_d11_slices_num_get(dhd);
13639
13640 for (i = 0; i < num_d11cores; i++) {
13641 dhd->sssr_d11_outofreset[i] = TRUE;
13642 }
13643
13644 dhdpcie_bring_d11_outofreset(dhd);
13645 OSL_DELAY(6000);
13646
13647 /* clear FIS Done */
13648 PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
13649
13650 if (dhdpcie_reset_hwa(dhd) != BCME_OK) {
13651 DHD_ERROR(("%s: dhdpcie_reset_hwa failed\n", __FUNCTION__));
13652 return BCME_ERROR;
13653 }
13654
13655 dhdpcie_d11_check_outofreset(dhd);
13656
13657 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
13658 if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
13659 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
13660 return BCME_ERROR;
13661 }
13662 dhd->sssr_dump_collected = TRUE;
13663 dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
13664
13665 return BCME_OK;
13666 }
13667
13668 int
13669 dhd_bus_fis_dump(dhd_pub_t *dhd)
13670 {
13671 return dhdpcie_fis_dump(dhd);
13672 }
13673 #endif /* DHD_SSSR_DUMP */
13674
13675 #ifdef DHD_SDTC_ETB_DUMP
13676 int
13677 dhd_bus_get_etb_info(dhd_pub_t *dhd, uint32 etbinfo_addr, etb_info_t *etb_info)
13678 {
13679
13680 int ret = 0;
13681
13682 if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, etbinfo_addr,
13683 (unsigned char *)etb_info, sizeof(*etb_info)))) {
13684 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
13685 return BCME_ERROR;
13686 }
13687
13688 return BCME_OK;
13689 }
13690
13691 int
13692 dhd_bus_get_sdtc_etb(dhd_pub_t *dhd, uint8 *sdtc_etb_mempool, uint addr, uint read_bytes)
13693 {
13694 int ret = 0;
13695
13696 if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, addr,
13697 (unsigned char *)sdtc_etb_mempool, read_bytes))) {
13698 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
13699 return BCME_ERROR;
13700 }
13701 return BCME_OK;
13702 }
13703 #endif /* DHD_SDTC_ETB_DUMP */
13704
13705 #ifdef DHD_WAKE_STATUS
13706 wake_counts_t*
13707 dhd_bus_get_wakecount(dhd_pub_t *dhd)
13708 {
13709 return &dhd->bus->wake_counts;
13710 }
13711 int
13712 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
13713 {
13714 return bcmpcie_set_get_wake(dhd->bus, 0);
13715 }
13716 #endif /* DHD_WAKE_STATUS */
13717
13718 /* Writes random number(s) to the TCM. FW upon initialization reads this register
13719 * to fetch the random number, and uses it to randomize heap address space layout.
13720 */
13721 static int
13722 dhdpcie_wrt_rnd(struct dhd_bus *bus)
13723 {
13724 bcm_rand_metadata_t rnd_data;
13725 uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
13726 uint32 count = BCM_ENTROPY_HOST_NBYTES;
13727 int ret = 0;
13728 uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
13729 ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
13730
13731 memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
13732 rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
13733 rnd_data.count = htol32(count);
13734 /* write the metadata about random number */
13735 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
13736 /* scale back by number of random number counts */
13737 addr -= count;
13738
13739 bus->ramtop_addr = addr;
13740
13741 /* Now write the random number(s) */
13742 ret = dhd_get_random_bytes(rand_buf, count);
13743 if (ret != BCME_OK) {
13744 return ret;
13745 }
13746 dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
13747
13748 bus->next_tlv = addr;
13749
13750 return BCME_OK;
13751 }
13752
13753 void
13754 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
13755 {
13756 struct dhd_bus *bus = dhd->bus;
13757 uint64 current_time;
13758
13759 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
13760 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
13761 bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
13762 DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
13763 bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
13764 #ifdef BCMPCIE_OOB_HOST_WAKE
13765 DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
13766 bus->oob_intr_count, bus->oob_intr_enable_count,
13767 bus->oob_intr_disable_count));
13768 DHD_ERROR(("oob_irq_num=%d last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT"\n",
13769 dhdpcie_get_oob_irq_num(bus),
13770 GET_SEC_USEC(bus->last_oob_irq_isr_time),
13771 GET_SEC_USEC(bus->last_oob_irq_thr_time)));
13772 DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
13773 " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
13774 GET_SEC_USEC(bus->last_oob_irq_enable_time),
13775 GET_SEC_USEC(bus->last_oob_irq_disable_time)));
13776 DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
13777 dhdpcie_get_oob_irq_status(bus),
13778 dhdpcie_get_oob_irq_level()));
13779 #endif /* BCMPCIE_OOB_HOST_WAKE */
13780 DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
13781 bus->dpc_return_busdown_count, bus->non_ours_irq_count));
13782
13783 current_time = OSL_LOCALTIME_NS();
13784 DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
13785 GET_SEC_USEC(current_time)));
13786 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
13787 " isr_exit_time="SEC_USEC_FMT"\n",
13788 GET_SEC_USEC(bus->isr_entry_time),
13789 GET_SEC_USEC(bus->isr_exit_time)));
13790 DHD_ERROR(("isr_sched_dpc_time="SEC_USEC_FMT
13791 " rpm_sched_dpc_time="SEC_USEC_FMT
13792 " last_non_ours_irq_time="SEC_USEC_FMT"\n",
13793 GET_SEC_USEC(bus->isr_sched_dpc_time),
13794 GET_SEC_USEC(bus->rpm_sched_dpc_time),
13795 GET_SEC_USEC(bus->last_non_ours_irq_time)));
13796 DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
13797 " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
13798 GET_SEC_USEC(bus->dpc_entry_time),
13799 GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
13800 DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
13801 " last_process_txcpl_time="SEC_USEC_FMT"\n",
13802 GET_SEC_USEC(bus->last_process_flowring_time),
13803 GET_SEC_USEC(bus->last_process_txcpl_time)));
13804 DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
13805 " last_process_infocpl_time="SEC_USEC_FMT
13806 " last_process_edl_time="SEC_USEC_FMT"\n",
13807 GET_SEC_USEC(bus->last_process_rxcpl_time),
13808 GET_SEC_USEC(bus->last_process_infocpl_time),
13809 GET_SEC_USEC(bus->last_process_edl_time)));
13810 DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
13811 " resched_dpc_time="SEC_USEC_FMT"\n",
13812 GET_SEC_USEC(bus->dpc_exit_time),
13813 GET_SEC_USEC(bus->resched_dpc_time)));
13814 DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
13815 GET_SEC_USEC(bus->last_d3_inform_time)));
13816
13817 DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
13818 " last_suspend_end_time="SEC_USEC_FMT"\n",
13819 GET_SEC_USEC(bus->last_suspend_start_time),
13820 GET_SEC_USEC(bus->last_suspend_end_time)));
13821 DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
13822 " last_resume_end_time="SEC_USEC_FMT"\n",
13823 GET_SEC_USEC(bus->last_resume_start_time),
13824 GET_SEC_USEC(bus->last_resume_end_time)));
13825
13826 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
13827 DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
13828 " logtrace_thread_sem_down_time="SEC_USEC_FMT
13829 "\nlogtrace_thread_flush_time="SEC_USEC_FMT
13830 " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
13831 "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
13832 GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
13833 GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
13834 GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
13835 GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
13836 GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
13837 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
13838 }
13839
13840 void
13841 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
13842 {
13843 dhd_pcie_intr_count_dump(dhd);
13844 }
13845
13846 int
13847 dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
13848 {
13849 uint32 save_idx, val;
13850 si_t *sih = dhd->bus->sih;
13851 uint32 oob_base, oob_base1;
13852 uint32 wrapper_dump_list[] = {
13853 AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
13854 AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
13855 AI_RESETSTATUS, AI_RESETCTRL,
13856 AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
13857 AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
13858 };
13859 uint32 i;
13860 hndoobr_reg_t *reg;
13861 cr4regs_t *cr4regs;
13862 ca7regs_t *ca7regs;
13863
13864 save_idx = si_coreidx(sih);
13865
13866 DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
13867
13868 if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
13869 for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) {
13870 val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
13871 DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
13872 }
13873 }
13874
13875 if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
13876 DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
13877 for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) {
13878 val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
13879 DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
13880 }
13881 DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
13882 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
13883 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
13884 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
13885 DHD_ERROR(("reg:0x%x val:0x%x\n",
13886 (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
13887 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
13888 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
13889 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
13890 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
13891 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
13892 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
13893 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
13894 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
13895 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
13896 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
13897 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
13898 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
13899 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
13900 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
13901 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
13902 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
13903 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
13904 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
13905 val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
13906 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
13907 }
13908 /* XXX: Currently dumping CA7 registers causing CTO, temporarily disabling it */
13909 BCM_REFERENCE(ca7regs);
13910 #ifdef NOT_YET
13911 if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
13912 DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
13913 val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
13914 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
13915 val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
13916 DHD_ERROR(("reg:0x%x val:0x%x\n",
13917 (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
13918 val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
13919 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
13920 val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
13921 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
13922 val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
13923 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
13924 val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
13925 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
13926 }
13927 #endif /* NOT_YET */
13928
13929 DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
13930
13931 oob_base = si_oobr_baseaddr(sih, FALSE);
13932 oob_base1 = si_oobr_baseaddr(sih, TRUE);
13933 if (oob_base) {
13934 dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
13935 dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
13936 dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
13937 dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
13938 } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
13939 val = R_REG(dhd->osh, &reg->intstatus[0]);
13940 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
13941 val = R_REG(dhd->osh, &reg->intstatus[1]);
13942 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
13943 val = R_REG(dhd->osh, &reg->intstatus[2]);
13944 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
13945 val = R_REG(dhd->osh, &reg->intstatus[3]);
13946 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
13947 }
13948
13949 if (oob_base1) {
13950 DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
13951
13952 dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
13953 dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
13954 dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
13955 dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
13956 }
13957
13958 si_setcoreidx(dhd->bus->sih, save_idx);
13959
13960 return 0;
13961 }
13962
13963 static void
13964 dhdpcie_hw_war_regdump(dhd_bus_t *bus)
13965 {
13966 uint32 save_idx, val;
13967 volatile uint32 *reg;
13968
13969 save_idx = si_coreidx(bus->sih);
13970 if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
13971 val = R_REG(bus->osh, reg + REG_WORK_AROUND);
13972 DHD_ERROR(("CC HW_WAR :0x%x\n", val));
13973 }
13974
13975 if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
13976 val = R_REG(bus->osh, reg + REG_WORK_AROUND);
13977 DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
13978 }
13979
13980 if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
13981 val = R_REG(bus->osh, reg + REG_WORK_AROUND);
13982 DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
13983 }
13984 si_setcoreidx(bus->sih, save_idx);
13985
13986 val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
13987 DHD_ERROR(("MINRESMASK :0x%x\n", val));
13988 }
13989
13990 int
13991 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
13992 {
13993 if (dhd->bus->is_linkdown) {
13994 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
13995 "due to PCIe link down ------- \r\n"));
13996 return 0;
13997 }
13998
13999 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
14000
14001 //HostToDev
14002 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
14003 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
14004 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
14005 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
14006 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
14007 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
14008 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
14009 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
14010 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
14011
14012 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
14013 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
14014 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
14015 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
14016 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
14017 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
14018 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
14019 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
14020 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
14021
14022 //DevToHost
14023 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
14024 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
14025 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
14026 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
14027 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
14028 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
14029 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
14030 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
14031 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
14032
14033 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
14034 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
14035 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
14036 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
14037 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
14038 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
14039 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
14040 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
14041 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
14042
14043 return 0;
14044 }
14045
14046 bool
14047 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
14048 {
14049 uint32 intstatus = 0;
14050 uint32 intmask = 0;
14051 uint32 d2h_db0 = 0;
14052 uint32 d2h_mb_data = 0;
14053
14054 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
14055 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14056 dhd->bus->pcie_mailbox_int, 0, 0);
14057 if (intstatus == (uint32)-1) {
14058 DHD_ERROR(("intstatus=0x%x \n", intstatus));
14059 return FALSE;
14060 }
14061
14062 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14063 dhd->bus->pcie_mailbox_mask, 0, 0);
14064 if (intmask == (uint32) -1) {
14065 DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
14066 return FALSE;
14067 }
14068
14069 d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14070 PCID2H_MailBox, 0, 0);
14071 if (d2h_db0 == (uint32)-1) {
14072 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
14073 intstatus, intmask, d2h_db0));
14074 return FALSE;
14075 }
14076
14077 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
14078 intstatus, intmask, d2h_db0));
14079 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
14080 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
14081 dhd->bus->def_intmask));
14082
14083 return TRUE;
14084 }
14085
14086 void
14087 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
14088 {
14089 DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
14090 DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
14091 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14092 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
14093 #ifdef EXTENDED_PCIE_DEBUG_DUMP
14094 DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
14095 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14096 PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
14097 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14098 PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
14099 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14100 PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
14101 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14102 PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
14103 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
14104 }
14105
14106 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
14107 #define MAX_RC_REG_INFO_VAL 8
14108 #define PCIE_EXTCAP_ERR_HD_SZ 4
14109 void
14110 dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t *dhd, int *bytes_written)
14111 {
14112 int i;
14113 int remain_len;
14114
14115 /* dump link control & status */
14116 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
14117 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14118 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
14119 dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP,
14120 PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL);
14121 dhd->hang_info_cnt++;
14122 }
14123
14124 /* dump device control & status */
14125 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
14126 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14127 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
14128 dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP,
14129 PCIE_CAP_DEVCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL);
14130 dhd->hang_info_cnt++;
14131 }
14132
14133 /* dump uncorrectable error */
14134 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
14135 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14136 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
14137 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14138 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0), HANG_KEY_DEL);
14139 dhd->hang_info_cnt++;
14140 }
14141
14142 /* dump correctable error */
14143 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
14144 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14145 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
14146 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14147 /* XXX: use definition in linux/pcie_regs.h */
14148 PCI_ERR_COR_STATUS, TRUE, FALSE, 0), HANG_KEY_DEL);
14149 dhd->hang_info_cnt++;
14150 }
14151
14152 /* HG05/06 reserved */
14153 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
14154 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14155 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
14156 0, HANG_KEY_DEL);
14157 dhd->hang_info_cnt++;
14158 }
14159
14160 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
14161 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14162 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
14163 0, HANG_KEY_DEL);
14164 dhd->hang_info_cnt++;
14165 }
14166
14167 /* dump error header log in RAW */
14168 for (i = 0; i < PCIE_EXTCAP_ERR_HD_SZ; i++) {
14169 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
14170 *bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len,
14171 "%c%08x", HANG_RAW_DEL, dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14172 PCIE_EXTCAP_ERR_HEADER_LOG_0 + i * PCIE_EXTCAP_ERR_HD_SZ,
14173 TRUE, FALSE, 0));
14174 }
14175 dhd->hang_info_cnt++;
14176 }
14177 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
14178
14179 int
14180 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
14181 {
14182 int host_irq_disabled;
14183
14184 DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
14185 host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
14186 DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
14187 dhd_print_tasklet_status(dhd);
14188 dhd_pcie_intr_count_dump(dhd);
14189
14190 DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
14191 dhdpcie_dump_resource(dhd->bus);
14192
14193 dhd_pcie_dump_rc_conf_space_cap(dhd);
14194
14195 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
14196 dhd_debug_get_rc_linkcap(dhd->bus)));
14197 #ifdef CUSTOMER_HW4_DEBUG
14198 if (dhd->bus->is_linkdown) {
14199 DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
14200 "link may be DOWN\n"));
14201 return 0;
14202 }
14203 #endif /* CUSTOMER_HW4_DEBUG */
14204 DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
14205 /* XXX: hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
14206 dhd_bus_dump_imp_cfg_registers(dhd->bus);
14207 #ifdef EXTENDED_PCIE_DEBUG_DUMP
14208 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
14209 dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
14210 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
14211 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
14212 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
14213 dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG1, sizeof(uint32)),
14214 PCI_TLP_HDR_LOG2,
14215 dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG2, sizeof(uint32)),
14216 PCI_TLP_HDR_LOG3,
14217 dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG3, sizeof(uint32)),
14218 PCI_TLP_HDR_LOG4,
14219 dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG4, sizeof(uint32))));
14220 if (dhd->bus->sih->buscorerev >= 24) {
14221 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
14222 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
14223 dhd_pcie_config_read(dhd->bus, PCIECFGREG_DEV_STATUS_CTRL,
14224 sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
14225 dhd_pcie_config_read(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL,
14226 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
14227 dhd_pcie_config_read(dhd->bus, PCIECFGREG_PML1_SUB_CTRL2,
14228 sizeof(uint32))));
14229 dhd_bus_dump_dar_registers(dhd->bus);
14230 }
14231 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
14232
14233 if (dhd->bus->is_linkdown) {
14234 DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
14235 return 0;
14236 }
14237
14238 if (MULTIBP_ENAB(dhd->bus->sih)) {
14239 dhd_bus_pcie_pwr_req(dhd->bus);
14240 }
14241
14242 DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
14243 /* XXX: hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
14244 * CurrentPcieGen2ProgramGuide/pcie_ep.htm
14245 */
14246
14247 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
14248 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
14249 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
14250 PCIECFGREG_PHY_DBG_CLKREQ1,
14251 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
14252 PCIECFGREG_PHY_DBG_CLKREQ2,
14253 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
14254 PCIECFGREG_PHY_DBG_CLKREQ3,
14255 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
14256
14257 #ifdef EXTENDED_PCIE_DEBUG_DUMP
14258 if (dhd->bus->sih->buscorerev >= 24) {
14259
14260 DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
14261 "ltssm_hist_2(0x%x)=0x%x "
14262 "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
14263 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
14264 PCIECFGREG_PHY_LTSSM_HIST_1,
14265 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
14266 PCIECFGREG_PHY_LTSSM_HIST_2,
14267 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
14268 PCIECFGREG_PHY_LTSSM_HIST_3,
14269 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
14270
14271 DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
14272 PCIECFGREG_TREFUP,
14273 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
14274 PCIECFGREG_TREFUP_EXT,
14275 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
14276 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
14277 "Function_Intstatus(0x%x)=0x%x "
14278 "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
14279 "Power_Intmask(0x%x)=0x%x\n",
14280 PCIE_CORE_REG_ERRLOG,
14281 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14282 PCIE_CORE_REG_ERRLOG, 0, 0),
14283 PCIE_CORE_REG_ERR_ADDR,
14284 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14285 PCIE_CORE_REG_ERR_ADDR, 0, 0),
14286 PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
14287 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14288 PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
14289 PCIFunctionIntmask(dhd->bus->sih->buscorerev),
14290 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14291 PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
14292 PCIPowerIntstatus(dhd->bus->sih->buscorerev),
14293 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14294 PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
14295 PCIPowerIntmask(dhd->bus->sih->buscorerev),
14296 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14297 PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
14298 DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
14299 "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
14300 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
14301 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14302 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
14303 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
14304 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14305 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
14306 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
14307 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14308 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
14309 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
14310 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14311 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
14312 DHD_ERROR(("err_code(0x%x)=0x%x\n",
14313 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
14314 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
14315 OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
14316
14317 dhd_pcie_dump_wrapper_regs(dhd);
14318 dhdpcie_hw_war_regdump(dhd->bus);
14319 }
14320 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
14321
14322 dhd_pcie_dma_info_dump(dhd);
14323
14324 if (MULTIBP_ENAB(dhd->bus->sih)) {
14325 dhd_bus_pcie_pwr_req_clear(dhd->bus);
14326 }
14327
14328 return 0;
14329 }
14330
14331 bool
14332 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
14333 {
14334 return bus->force_bt_quiesce;
14335 }
14336
14337 uint8
14338 dhd_d11_slices_num_get(dhd_pub_t *dhdp)
14339 {
14340 return si_scan_core_present(dhdp->bus->sih) ?
14341 MAX_NUM_D11_CORES_WITH_SCAN : MAX_NUM_D11CORES;
14342 }
14343
14344 static bool
14345 dhd_bus_tcm_test(struct dhd_bus *bus)
14346 {
14347 int ret = 0;
14348 int size; /* Full mem size */
14349 int start; /* Start address */
14350 int read_size = 0; /* Read size of each iteration */
14351 int num = 0;
14352 uint8 *read_buf, *write_buf;
14353 uint8 init_val[NUM_PATTERNS] = {
14354 0xFFu, /* 11111111 */
14355 0x00u, /* 00000000 */
14356 };
14357
14358 if (!bus) {
14359 DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
14360 return FALSE;
14361 }
14362
14363 read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
14364
14365 if (!read_buf) {
14366 DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
14367 return FALSE;
14368 }
14369
14370 write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
14371
14372 if (!write_buf) {
14373 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
14374 DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
14375 return FALSE;
14376 }
14377
14378 DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
14379 DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
14380
14381 while (num < NUM_PATTERNS) {
14382 start = bus->dongle_ram_base;
14383 /* Get full mem size */
14384 size = bus->ramsize;
14385
14386 memset(write_buf, init_val[num], MEMBLOCK);
14387 while (size > 0) {
14388 read_size = MIN(MEMBLOCK, size);
14389 memset(read_buf, 0, read_size);
14390
14391 /* Write */
14392 if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
14393 DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
14394 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
14395 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
14396 return FALSE;
14397 }
14398
14399 /* Read */
14400 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
14401 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
14402 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
14403 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
14404 return FALSE;
14405 }
14406
14407 /* Compare */
14408 if (memcmp(read_buf, write_buf, read_size)) {
14409 DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
14410 __FUNCTION__, start, num));
14411 prhex("Readbuf", read_buf, read_size);
14412 prhex("Writebuf", write_buf, read_size);
14413 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
14414 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
14415 return FALSE;
14416 }
14417
14418 /* Decrement size and increment start address */
14419 size -= read_size;
14420 start += read_size;
14421 }
14422 num++;
14423 }
14424
14425 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
14426 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
14427
14428 DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
14429 return TRUE;
14430 }
14431
14432 #define PCI_CFG_LINK_SPEED_SHIFT 16
14433 int
14434 dhd_get_pcie_linkspeed(dhd_pub_t *dhd)
14435 {
14436 uint32 pcie_lnkst;
14437 uint32 pcie_lnkspeed;
14438 pcie_lnkst = OSL_PCI_READ_CONFIG(dhd->osh, PCIECFGREG_LINK_STATUS_CTRL,
14439 sizeof(pcie_lnkst));
14440
14441 pcie_lnkspeed = (pcie_lnkst >> PCI_CFG_LINK_SPEED_SHIFT) & PCI_LINK_SPEED_MASK;
14442 DHD_INFO(("%s: Link speed: %d\n", __FUNCTION__, pcie_lnkspeed));
14443 return pcie_lnkspeed;
14444 }