8fbd3ea567cd3e5321ce8988463de2ca1093f789
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / dhd_pcie.c
1 /*
2 * DHD Bus Module for PCIE
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: dhd_pcie.c 797197 2018-12-29 03:31:21Z $
28 */
29
30 /* include files */
31 #include <typedefs.h>
32 #include <bcmutils.h>
33 #include <bcmdevs.h>
34 #include <siutils.h>
35 #include <hndsoc.h>
36 #include <hndpmu.h>
37 #include <etd.h>
38 #include <hnd_debug.h>
39 #include <sbchipc.h>
40 #include <hnd_armtrap.h>
41 #if defined(DHD_DEBUG)
42 #include <hnd_cons.h>
43 #endif /* defined(DHD_DEBUG) */
44 #include <dngl_stats.h>
45 #include <pcie_core.h>
46 #include <dhd.h>
47 #include <dhd_bus.h>
48 #include <dhd_flowring.h>
49 #include <dhd_proto.h>
50 #include <dhd_dbg.h>
51 #include <dhd_debug.h>
52 #include <dhd_daemon.h>
53 #include <dhdioctl.h>
54 #include <sdiovar.h>
55 #include <bcmmsgbuf.h>
56 #include <pcicfg.h>
57 #include <dhd_pcie.h>
58 #include <bcmpcie.h>
59 #include <bcmendian.h>
60 #ifdef DHDTCPACK_SUPPRESS
61 #include <dhd_ip.h>
62 #endif /* DHDTCPACK_SUPPRESS */
63 #include <bcmevent.h>
64
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #include <linux/pm_runtime.h>
67 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
68
69 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
70 #include <debugger.h>
71 #endif /* DEBUGGER || DHD_DSCOPE */
72
73 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
74
75 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
76 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
77
78 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
79 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
80 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
81
82 /* CTO Prevention Recovery */
83 #ifdef BCMQT_HW
84 #define CTO_TO_CLEAR_WAIT_MS 10000
85 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
86 #else
87 #define CTO_TO_CLEAR_WAIT_MS 1000
88 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
89 #endif // endif
90
91 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
92 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
93 (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
94
95 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
96 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
97 (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
98
99 /* Fetch address of a member in the ring_mem structure in dongle memory */
100 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
101 (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
102
103 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
104 extern unsigned int system_rev;
105 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
106
107 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
108 uint dma_ring_indices = 0;
109 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
110 bool h2d_phase = 0;
111 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
112 * defined in dhd_linux.c
113 */
114 bool force_trap_bad_h2d_phase = 0;
115
116 int dhd_dongle_memsize;
117 int dhd_dongle_ramsize;
118 struct dhd_bus *g_dhd_bus = NULL;
119 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
120 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
121 #if defined(DHD_FW_COREDUMP)
122 static int dhdpcie_mem_dump(dhd_bus_t *bus);
123 #endif /* DHD_FW_COREDUMP */
124
125 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
126 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
127 const char *name, void *params,
128 int plen, void *arg, int len, int val_size);
129 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
130 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
131 uint32 len, uint32 srcdelay, uint32 destdelay,
132 uint32 d11_lpbk, uint32 core_num, uint32 wait);
133 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
134 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
135 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
136 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
137 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
138 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
139 static int dhdpcie_readshared(dhd_bus_t *bus);
140 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
141 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
142 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
143 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
144 bool dongle_isolation, bool reset_flag);
145 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
146 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
147 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
148 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
149 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
150 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
151 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
152 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
153 #ifdef DHD_SUPPORT_64BIT
154 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
155 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
156 #endif /* DHD_SUPPORT_64BIT */
157 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
158 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
159 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
160 static void dhdpcie_fw_trap(dhd_bus_t *bus);
161 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
162 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
163 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
164
165 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
166 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
167 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
168
169 #ifdef IDLE_TX_FLOW_MGMT
170 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
171 static void dhd_bus_idle_scan(dhd_bus_t *bus);
172 #endif /* IDLE_TX_FLOW_MGMT */
173
174 #ifdef EXYNOS_PCIE_DEBUG
175 extern void exynos_pcie_register_dump(int ch_num);
176 #endif /* EXYNOS_PCIE_DEBUG */
177
178 #define PCI_VENDOR_ID_BROADCOM 0x14e4
179
180 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
181 #define MAX_D3_ACK_TIMEOUT 100
182 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
183
184 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
185 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
186 static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
187
188 static int dhdpcie_init_d11status(struct dhd_bus *bus);
189
190 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
191
192 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
193 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
194
195 static int dhdpcie_wrt_host_whitelist_region(struct dhd_bus *bus);
196
197 #ifdef DHD_SSSR_DUMP
198 static int dhdpcie_sssr_dump(dhd_pub_t *dhd);
199 #endif /* DHD_SSSR_DUMP */
200
201 /* IOVar table */
202 enum {
203 IOV_INTR = 1,
204 IOV_MEMSIZE,
205 IOV_SET_DOWNLOAD_STATE,
206 IOV_DEVRESET,
207 IOV_VARS,
208 IOV_MSI_SIM,
209 IOV_PCIE_LPBK,
210 IOV_CC_NVMSHADOW,
211 IOV_RAMSIZE,
212 IOV_RAMSTART,
213 IOV_SLEEP_ALLOWED,
214 IOV_PCIE_DMAXFER,
215 IOV_PCIE_SUSPEND,
216 IOV_DONGLEISOLATION,
217 IOV_LTRSLEEPON_UNLOOAD,
218 IOV_METADATA_DBG,
219 IOV_RX_METADATALEN,
220 IOV_TX_METADATALEN,
221 IOV_TXP_THRESHOLD,
222 IOV_BUZZZ_DUMP,
223 IOV_DUMP_RINGUPD_BLOCK,
224 IOV_DMA_RINGINDICES,
225 IOV_FORCE_FW_TRAP,
226 IOV_DB1_FOR_MB,
227 IOV_FLOW_PRIO_MAP,
228 #ifdef DHD_PCIE_RUNTIMEPM
229 IOV_IDLETIME,
230 #endif /* DHD_PCIE_RUNTIMEPM */
231 IOV_RXBOUND,
232 IOV_TXBOUND,
233 IOV_HANGREPORT,
234 IOV_H2D_MAILBOXDATA,
235 IOV_INFORINGS,
236 IOV_H2D_PHASE,
237 IOV_H2D_ENABLE_TRAP_BADPHASE,
238 IOV_H2D_TXPOST_MAX_ITEM,
239 IOV_TRAPDATA,
240 IOV_TRAPDATA_RAW,
241 IOV_CTO_PREVENTION,
242 IOV_PCIE_WD_RESET,
243 IOV_DUMP_DONGLE,
244 IOV_IDMA_ENABLE,
245 IOV_IFRM_ENABLE,
246 IOV_CLEAR_RING,
247 IOV_DAR_ENABLE,
248 IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
249 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
250 IOV_GDB_SERVER, /**< starts gdb server on given interface */
251 #endif /* DEBUGGER || DHD_DSCOPE */
252 IOV_INB_DW_ENABLE,
253 IOV_CTO_THRESHOLD,
254 #ifdef D2H_MINIDUMP
255 IOV_MINIDUMP_OVERRIDE,
256 #endif /* D2H_MINIDUMP */
257 IOV_PCIE_LAST /**< unused IOVAR */
258 };
259
260 const bcm_iovar_t dhdpcie_iovars[] = {
261 {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
262 {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
263 {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
264 {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
265 {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
266 {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
267 {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
268 {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
269 {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
270 {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
271 {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
272 {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 },
273 {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
274 {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
275 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
276 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
277 {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
278 {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
279 {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
280 {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
281 {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
282 {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
283 {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
284 {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
285 #ifdef DHD_PCIE_RUNTIMEPM
286 {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
287 #endif /* DHD_PCIE_RUNTIMEPM */
288 {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
289 {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
290 {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
291 {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
292 {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
293 {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
294 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
295 IOVT_UINT32, 0 },
296 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
297 {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
298 {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
299 {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
300 {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
301 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
302 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
303 {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
304 {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
305 {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
306 {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
307 {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
308 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
309 {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
310 #endif /* DEBUGGER || DHD_DSCOPE */
311 {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
312 {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
313 #ifdef D2H_MINIDUMP
314 {"minidump_override", IOV_MINIDUMP_OVERRIDE, 0, 0, IOVT_UINT32, 0 },
315 #endif /* D2H_MINIDUMP */
316 {NULL, 0, 0, 0, 0, 0 }
317 };
318
319 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
320
321 #ifndef DHD_RXBOUND
322 #define DHD_RXBOUND 64
323 #endif // endif
324 #ifndef DHD_TXBOUND
325 #define DHD_TXBOUND 64
326 #endif // endif
327
328 #define DHD_INFORING_BOUND 32
329 #define DHD_BTLOGRING_BOUND 32
330
331 uint dhd_rxbound = DHD_RXBOUND;
332 uint dhd_txbound = DHD_TXBOUND;
333
334 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
335 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
336 static struct dhd_gdb_bus_ops_s bus_ops = {
337 .read_u16 = dhdpcie_bus_rtcm16,
338 .read_u32 = dhdpcie_bus_rtcm32,
339 .write_u32 = dhdpcie_bus_wtcm32,
340 };
341 #endif /* DEBUGGER || DHD_DSCOPE */
342
343 bool
344 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
345 {
346 return bus->flr_force_fail;
347 }
348
349 /**
350 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
351 * link with the bus driver, in order to look for or await the device.
352 */
353 int
354 dhd_bus_register(void)
355 {
356 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
357
358 return dhdpcie_bus_register();
359 }
360
361 void
362 dhd_bus_unregister(void)
363 {
364 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
365
366 dhdpcie_bus_unregister();
367 return;
368 }
369
370 /** returns a host virtual address */
371 uint32 *
372 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
373 {
374 return (uint32 *)REG_MAP(addr, size);
375 }
376
377 void
378 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
379 {
380 REG_UNMAP(addr);
381 return;
382 }
383
384 /**
385 * retrun H2D Doorbell registers address
386 * use DAR registers instead of enum register for corerev >= 23 (4347B0)
387 */
388 static INLINE uint
389 dhd_bus_db0_addr_get(struct dhd_bus *bus)
390 {
391 uint addr = PCIH2D_MailBox;
392 uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
393
394 return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
395 }
396
397 static INLINE uint
398 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
399 {
400 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
401 }
402
403 static INLINE uint
404 dhd_bus_db1_addr_get(struct dhd_bus *bus)
405 {
406 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
407 }
408
409 static INLINE uint
410 dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
411 {
412 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
413 }
414
415 static INLINE void
416 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
417 {
418 uint mask;
419
420 /*
421 * If multiple de-asserts, decrement ref and return
422 * Clear power request when only one pending
423 * so initial request is not removed unexpectedly
424 */
425 if (bus->pwr_req_ref > 1) {
426 bus->pwr_req_ref--;
427 return;
428 }
429
430 ASSERT(bus->pwr_req_ref == 1);
431
432 if (MULTIBP_ENAB(bus->sih)) {
433 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
434 mask = SRPWR_DMN1_ARMBPSD_MASK;
435 } else {
436 mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
437 }
438
439 si_srpwr_request(bus->sih, mask, 0);
440 bus->pwr_req_ref = 0;
441 }
442
443 static INLINE void
444 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
445 {
446 unsigned long flags = 0;
447
448 DHD_GENERAL_LOCK(bus->dhd, flags);
449 _dhd_bus_pcie_pwr_req_clear_cmn(bus);
450 DHD_GENERAL_UNLOCK(bus->dhd, flags);
451 }
452
453 static INLINE void
454 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
455 {
456 _dhd_bus_pcie_pwr_req_clear_cmn(bus);
457 }
458
459 static INLINE void
460 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
461 {
462 uint mask, val;
463
464 /* If multiple request entries, increment reference and return */
465 if (bus->pwr_req_ref > 0) {
466 bus->pwr_req_ref++;
467 return;
468 }
469
470 ASSERT(bus->pwr_req_ref == 0);
471
472 if (MULTIBP_ENAB(bus->sih)) {
473 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
474 mask = SRPWR_DMN1_ARMBPSD_MASK;
475 val = SRPWR_DMN1_ARMBPSD_MASK;
476 } else {
477 mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
478 val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
479 }
480
481 si_srpwr_request(bus->sih, mask, val);
482
483 bus->pwr_req_ref = 1;
484 }
485
486 static INLINE void
487 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
488 {
489 unsigned long flags = 0;
490
491 DHD_GENERAL_LOCK(bus->dhd, flags);
492 _dhd_bus_pcie_pwr_req_cmn(bus);
493 DHD_GENERAL_UNLOCK(bus->dhd, flags);
494 }
495
496 static INLINE void
497 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
498 {
499 uint mask, val;
500
501 mask = SRPWR_DMN_ALL_MASK;
502 val = SRPWR_DMN_ALL_MASK;
503
504 si_srpwr_request(bus->sih, mask, val);
505 }
506
507 static INLINE void
508 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
509 {
510 unsigned long flags = 0;
511
512 DHD_GENERAL_LOCK(bus->dhd, flags);
513 _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
514 DHD_GENERAL_UNLOCK(bus->dhd, flags);
515 }
516
517 static INLINE void
518 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus *bus)
519 {
520 uint mask;
521
522 mask = SRPWR_DMN3_MACMAIN_MASK | SRPWR_DMN2_MACAUX_MASK;
523
524 si_srpwr_request(bus->sih, mask, 0);
525 }
526
527 static INLINE void
528 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
529 {
530 unsigned long flags = 0;
531
532 DHD_GENERAL_LOCK(bus->dhd, flags);
533 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus);
534 DHD_GENERAL_UNLOCK(bus->dhd, flags);
535 }
536
537 static INLINE void
538 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
539 {
540 _dhd_bus_pcie_pwr_req_cmn(bus);
541 }
542
543 bool
544 dhdpcie_chip_support_msi(dhd_bus_t *bus)
545 {
546 DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
547 __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
548 if (bus->sih->buscorerev <= 14 ||
549 si_chipid(bus->sih) == BCM4375_CHIP_ID ||
550 si_chipid(bus->sih) == BCM4361_CHIP_ID ||
551 si_chipid(bus->sih) == BCM4359_CHIP_ID) {
552 return FALSE;
553 } else {
554 return TRUE;
555 }
556 }
557
558 /**
559 * Called once for each hardware (dongle) instance that this DHD manages.
560 *
561 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
562 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
563 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
564 *
565 * 'tcm' is the *host* virtual address at which tcm is mapped.
566 */
567 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
568 volatile char *regs, volatile char *tcm, void *pci_dev)
569 {
570 dhd_bus_t *bus = NULL;
571 int ret = BCME_OK;
572
573 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
574
575 do {
576 if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
577 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
578 ret = BCME_NORESOURCE;
579 break;
580 }
581
582 bus->regs = regs;
583 bus->tcm = tcm;
584 bus->osh = osh;
585 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
586 bus->dev = (struct pci_dev *)pci_dev;
587
588 dll_init(&bus->flowring_active_list);
589 #ifdef IDLE_TX_FLOW_MGMT
590 bus->active_list_last_process_ts = OSL_SYSUPTIME();
591 #endif /* IDLE_TX_FLOW_MGMT */
592
593 /* Attach pcie shared structure */
594 if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
595 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
596 ret = BCME_NORESOURCE;
597 break;
598 }
599
600 /* dhd_common_init(osh); */
601
602 if (dhdpcie_dongle_attach(bus)) {
603 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
604 ret = BCME_NOTREADY;
605 break;
606 }
607
608 /* software resources */
609 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
610 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
611 ret = BCME_NORESOURCE;
612 break;
613 }
614 bus->dhd->busstate = DHD_BUS_DOWN;
615 bus->db1_for_mb = TRUE;
616 bus->dhd->hang_report = TRUE;
617 bus->use_mailbox = FALSE;
618 bus->use_d0_inform = FALSE;
619 bus->intr_enabled = FALSE;
620 bus->flr_force_fail = FALSE;
621 /* update the dma indices if set through module parameter. */
622 if (dma_ring_indices != 0) {
623 dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
624 }
625 /* update h2d phase support if set through module parameter */
626 bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
627 /* update force trap on bad phase if set through module parameter */
628 bus->dhd->force_dongletrap_on_bad_h2d_phase =
629 force_trap_bad_h2d_phase ? TRUE : FALSE;
630 #ifdef IDLE_TX_FLOW_MGMT
631 bus->enable_idle_flowring_mgmt = FALSE;
632 #endif /* IDLE_TX_FLOW_MGMT */
633 bus->irq_registered = FALSE;
634
635 #ifdef DHD_MSI_SUPPORT
636 bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
637 PCIE_MSI : PCIE_INTX;
638 #else
639 bus->d2h_intr_method = PCIE_INTX;
640 #endif /* DHD_MSI_SUPPORT */
641
642 DHD_TRACE(("%s: EXIT SUCCESS\n",
643 __FUNCTION__));
644 g_dhd_bus = bus;
645 *bus_ptr = bus;
646 return ret;
647 } while (0);
648
649 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
650
651 if (bus && bus->pcie_sh) {
652 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
653 }
654
655 if (bus) {
656 MFREE(osh, bus, sizeof(dhd_bus_t));
657 }
658
659 return ret;
660 }
661
662 bool
663 dhd_bus_skip_clm(dhd_pub_t *dhdp)
664 {
665 switch (dhd_bus_chip_id(dhdp)) {
666 case BCM4369_CHIP_ID:
667 return TRUE;
668 default:
669 return FALSE;
670 }
671 }
672
673 uint
674 dhd_bus_chip(struct dhd_bus *bus)
675 {
676 ASSERT(bus->sih != NULL);
677 return bus->sih->chip;
678 }
679
680 uint
681 dhd_bus_chiprev(struct dhd_bus *bus)
682 {
683 ASSERT(bus);
684 ASSERT(bus->sih != NULL);
685 return bus->sih->chiprev;
686 }
687
688 void *
689 dhd_bus_pub(struct dhd_bus *bus)
690 {
691 return bus->dhd;
692 }
693
694 void *
695 dhd_bus_sih(struct dhd_bus *bus)
696 {
697 return (void *)bus->sih;
698 }
699
700 void *
701 dhd_bus_txq(struct dhd_bus *bus)
702 {
703 return &bus->txq;
704 }
705
706 /** Get Chip ID version */
707 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
708 {
709 dhd_bus_t *bus = dhdp->bus;
710 return bus->sih->chip;
711 }
712
713 /** Get Chip Rev ID version */
714 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
715 {
716 dhd_bus_t *bus = dhdp->bus;
717 return bus->sih->chiprev;
718 }
719
720 /** Get Chip Pkg ID version */
721 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
722 {
723 dhd_bus_t *bus = dhdp->bus;
724 return bus->sih->chippkg;
725 }
726
727 /* Log the lastest DPC schedule time */
728 void
729 dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
730 {
731 dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
732 }
733
734 /* Check if there is DPC scheduling errors */
735 bool
736 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
737 {
738 dhd_bus_t *bus = dhdp->bus;
739 bool sched_err;
740
741 if (bus->dpc_entry_time < bus->isr_exit_time) {
742 /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
743 sched_err = TRUE;
744 } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
745 /* Kernel doesn't schedule the DPC after DHD tries to reschedule
746 * the DPC due to pending work items to be processed.
747 */
748 sched_err = TRUE;
749 } else {
750 sched_err = FALSE;
751 }
752
753 if (sched_err) {
754 /* print out minimum timestamp info */
755 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
756 " isr_exit_time="SEC_USEC_FMT
757 " dpc_entry_time="SEC_USEC_FMT
758 "\ndpc_exit_time="SEC_USEC_FMT
759 " dpc_sched_time="SEC_USEC_FMT
760 " resched_dpc_time="SEC_USEC_FMT"\n",
761 GET_SEC_USEC(bus->isr_entry_time),
762 GET_SEC_USEC(bus->isr_exit_time),
763 GET_SEC_USEC(bus->dpc_entry_time),
764 GET_SEC_USEC(bus->dpc_exit_time),
765 GET_SEC_USEC(bus->dpc_sched_time),
766 GET_SEC_USEC(bus->resched_dpc_time)));
767 }
768
769 return sched_err;
770 }
771
772 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
773 uint32
774 dhdpcie_bus_intstatus(dhd_bus_t *bus)
775 {
776 uint32 intstatus = 0;
777 uint32 intmask = 0;
778
779 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
780 DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
781 return intstatus;
782 }
783 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
784 (bus->sih->buscorerev == 2)) {
785 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
786 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
787 intstatus &= I_MB;
788 } else {
789 /* this is a PCIE core register..not a config register... */
790 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
791
792 /* this is a PCIE core register..not a config register... */
793 intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
794 /* Is device removed. intstatus & intmask read 0xffffffff */
795 if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
796 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
797 DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
798 __FUNCTION__, intstatus, intmask));
799 bus->is_linkdown = TRUE;
800 dhd_pcie_debug_info_dump(bus->dhd);
801 #ifdef CUSTOMER_HW4_DEBUG
802 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
803 #ifdef SUPPORT_LINKDOWN_RECOVERY
804 #ifdef CONFIG_ARCH_MSM
805 bus->no_cfg_restore = 1;
806 #endif /* CONFIG_ARCH_MSM */
807 #endif /* SUPPORT_LINKDOWN_RECOVERY */
808 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
809 dhd_os_send_hang_message(bus->dhd);
810 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
811 #endif /* CUSTOMER_HW4_DEBUG */
812 return intstatus;
813 }
814
815 intstatus &= intmask;
816
817 /*
818 * The fourth argument to si_corereg is the "mask" fields of the register to update
819 * and the fifth field is the "value" to update. Now if we are interested in only
820 * few fields of the "mask" bit map, we should not be writing back what we read
821 * By doing so, we might clear/ack interrupts that are not handled yet.
822 */
823 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
824 intstatus);
825
826 intstatus &= bus->def_intmask;
827 }
828
829 return intstatus;
830 }
831
832 /**
833 * Name: dhdpcie_bus_isr
834 * Parameters:
835 * 1: IN int irq -- interrupt vector
836 * 2: IN void *arg -- handle to private data structure
837 * Return value:
838 * Status (TRUE or FALSE)
839 *
840 * Description:
841 * Interrupt Service routine checks for the status register,
842 * disable interrupt and queue DPC if mail box interrupts are raised.
843 */
844 int32
845 dhdpcie_bus_isr(dhd_bus_t *bus)
846 {
847 uint32 intstatus = 0;
848
849 do {
850 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
851 /* verify argument */
852 if (!bus) {
853 DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
854 break;
855 }
856
857 if (bus->dhd->dongle_reset) {
858 DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
859 break;
860 }
861
862 if (bus->dhd->busstate == DHD_BUS_DOWN) {
863 DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
864 break;
865 }
866
867 /* avoid processing of interrupts until msgbuf prot is inited */
868 if (!bus->intr_enabled) {
869 DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
870 break;
871 }
872
873 if (PCIECTO_ENAB(bus)) {
874 /* read pci_intstatus */
875 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
876
877 if (intstatus & PCI_CTO_INT_MASK) {
878 /* reset backplane and cto,
879 * then access through pcie is recovered.
880 */
881 dhdpcie_cto_error_recovery(bus);
882 return TRUE;
883 }
884 }
885
886 if (bus->d2h_intr_method == PCIE_MSI) {
887 /* For MSI, as intstatus is cleared by firmware, no need to read */
888 goto skip_intstatus_read;
889 }
890
891 intstatus = dhdpcie_bus_intstatus(bus);
892
893 /* Check if the interrupt is ours or not */
894 if (intstatus == 0) {
895 DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
896 bus->non_ours_irq_count++;
897 bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
898 break;
899 }
900
901 /* save the intstatus */
902 /* read interrupt status register!! Status bits will be cleared in DPC !! */
903 bus->intstatus = intstatus;
904
905 /* return error for 0xFFFFFFFF */
906 if (intstatus == (uint32)-1) {
907 DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
908 __FUNCTION__, intstatus));
909 dhdpcie_disable_irq_nosync(bus);
910 break;
911 }
912
913 skip_intstatus_read:
914 /* Overall operation:
915 * - Mask further interrupts
916 * - Read/ack intstatus
917 * - Take action based on bits and state
918 * - Reenable interrupts (as per state)
919 */
920
921 /* Count the interrupt call */
922 bus->intrcount++;
923
924 bus->ipend = TRUE;
925
926 bus->isr_intr_disable_count++;
927
928 /* For Linux, Macos etc (otherthan NDIS) instead of disabling
929 * dongle interrupt by clearing the IntMask, disable directly
930 * interrupt from the host side, so that host will not recieve
931 * any interrupts at all, even though dongle raises interrupts
932 */
933 dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
934
935 bus->intdis = TRUE;
936
937 #if defined(PCIE_ISR_THREAD)
938
939 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
940 DHD_OS_WAKE_LOCK(bus->dhd);
941 while (dhd_bus_dpc(bus));
942 DHD_OS_WAKE_UNLOCK(bus->dhd);
943 #else
944 bus->dpc_sched = TRUE;
945 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
946 #endif /* defined(SDIO_ISR_THREAD) */
947
948 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
949 return TRUE;
950
951 } while (0);
952
953 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
954 return FALSE;
955 }
956
957 int
958 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
959 {
960 uint32 cur_state = 0;
961 uint32 pm_csr = 0;
962 osl_t *osh = bus->osh;
963
964 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
965 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
966
967 if (cur_state == state) {
968 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
969 return BCME_OK;
970 }
971
972 if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
973 return BCME_ERROR;
974
975 /* Validate the state transition
976 * if already in a lower power state, return error
977 */
978 if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
979 cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
980 cur_state > state) {
981 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
982 return BCME_ERROR;
983 }
984
985 pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
986 pm_csr |= state;
987
988 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
989
990 /* need to wait for the specified mandatory pcie power transition delay time */
991 if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
992 cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
993 OSL_DELAY(DHDPCIE_PM_D3_DELAY);
994 else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
995 cur_state == PCIECFGREG_PM_CSR_STATE_D2)
996 OSL_DELAY(DHDPCIE_PM_D2_DELAY);
997
998 /* read back the power state and verify */
999 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1000 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1001 if (cur_state != state) {
1002 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1003 __FUNCTION__, cur_state));
1004 return BCME_ERROR;
1005 } else {
1006 DHD_ERROR(("%s: power transition to %u success \n",
1007 __FUNCTION__, cur_state));
1008 }
1009
1010 return BCME_OK;
1011 }
1012
1013 int
1014 dhdpcie_config_check(dhd_bus_t *bus)
1015 {
1016 uint32 i, val;
1017 int ret = BCME_ERROR;
1018
1019 for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1020 val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1021 if ((val & 0xFFFF) == VENDOR_BROADCOM) {
1022 ret = BCME_OK;
1023 break;
1024 }
1025 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1026 }
1027
1028 return ret;
1029 }
1030
1031 int
1032 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1033 {
1034 uint32 i;
1035 osl_t *osh = bus->osh;
1036
1037 if (BCME_OK != dhdpcie_config_check(bus)) {
1038 return BCME_ERROR;
1039 }
1040
1041 for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1042 OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1043 }
1044 OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1045
1046 if (restore_pmcsr)
1047 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1048 sizeof(uint32), bus->saved_config.pmcsr);
1049
1050 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1051 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1052 bus->saved_config.msi_addr0);
1053 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1054 sizeof(uint32), bus->saved_config.msi_addr1);
1055 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1056 sizeof(uint32), bus->saved_config.msi_data);
1057
1058 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1059 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1060 OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1061 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1062 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1063 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1064 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1065 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1066
1067 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1068 sizeof(uint32), bus->saved_config.l1pm0);
1069 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1070 sizeof(uint32), bus->saved_config.l1pm1);
1071
1072 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1073 bus->saved_config.bar0_win);
1074 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, sizeof(uint32),
1075 bus->saved_config.bar1_win);
1076
1077 return BCME_OK;
1078 }
1079
1080 int
1081 dhdpcie_config_save(dhd_bus_t *bus)
1082 {
1083 uint32 i;
1084 osl_t *osh = bus->osh;
1085
1086 if (BCME_OK != dhdpcie_config_check(bus)) {
1087 return BCME_ERROR;
1088 }
1089
1090 for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1091 bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1092 }
1093
1094 bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1095
1096 bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1097 sizeof(uint32));
1098 bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1099 sizeof(uint32));
1100 bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1101 sizeof(uint32));
1102 bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1103 sizeof(uint32));
1104
1105 bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1106 PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1107 bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1108 PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1109 bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1110 PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1111 bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1112 PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1113
1114 bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1115 sizeof(uint32));
1116 bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1117 sizeof(uint32));
1118
1119 bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1120 sizeof(uint32));
1121 bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1122 sizeof(uint32));
1123
1124 return BCME_OK;
1125 }
1126
1127 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1128 dhd_pub_t *link_recovery = NULL;
1129 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1130
1131 static void
1132 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1133 {
1134 uint buscorerev = bus->sih->buscorerev;
1135 bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1136 bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1137 bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1138 bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1139 if (buscorerev < 64) {
1140 bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1141 }
1142 }
1143
1144 void
1145 dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
1146 {
1147 uint32 linkctrl_rc, linkctrl_ep;
1148 linkctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1149 FALSE, 0);
1150 linkctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1151 FALSE, 0);
1152 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__,
1153 (enable ? "ENABLE" : "DISABLE"), linkctrl_rc, linkctrl_ep));
1154 if (enable) {
1155 /* Enable only L1 ASPM (bit 1) first RC then EP */
1156 dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1157 TRUE, (linkctrl_rc | PCIE_ASPM_L1_ENAB));
1158 dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1159 TRUE, (linkctrl_ep | PCIE_ASPM_L1_ENAB));
1160 } else {
1161 /* Disable complete ASPM (bit 1 and bit 0) first EP then RC */
1162 dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1163 TRUE, (linkctrl_ep & (~PCIE_ASPM_ENAB)));
1164 dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1165 TRUE, (linkctrl_rc & (~PCIE_ASPM_ENAB)));
1166 }
1167 linkctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1168 FALSE, 0);
1169 linkctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1170 FALSE, 0);
1171 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__,
1172 (enable ? "ENABLE" : "DISABLE"), linkctrl_rc, linkctrl_ep));
1173 }
1174
1175 void
1176 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
1177 {
1178 uint32 l1ssctrl_rc, l1ssctrl_ep;
1179
1180 /* Disable ASPM of RC and EP */
1181 dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1182
1183 /* Extendend Capacility Reg */
1184 l1ssctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1185 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1186 l1ssctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1187 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1188 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__,
1189 (enable ? "ENABLE" : "DISABLE"), l1ssctrl_rc, l1ssctrl_ep));
1190 if (enable) {
1191 /* Enable RC then EP */
1192 dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1193 TRUE, TRUE, (l1ssctrl_rc | PCIE_EXT_L1SS_ENAB));
1194 dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1195 TRUE, TRUE, (l1ssctrl_ep | PCIE_EXT_L1SS_ENAB));
1196 } else {
1197 /* Disable EP then RC */
1198 dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1199 TRUE, TRUE, (l1ssctrl_ep & (~PCIE_EXT_L1SS_ENAB)));
1200 dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1201 TRUE, TRUE, (l1ssctrl_rc & (~PCIE_EXT_L1SS_ENAB)));
1202 }
1203 l1ssctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1204 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1205 l1ssctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1206 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1207 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__,
1208 (enable ? "ENABLE" : "DISABLE"), l1ssctrl_rc, l1ssctrl_ep));
1209
1210 /* Enable ASPM of RC and EP */
1211 dhd_bus_aspm_enable_rc_ep(bus, TRUE);
1212 }
1213
1214 void
1215 dhdpcie_dongle_reset(dhd_bus_t *bus)
1216 {
1217 #ifndef DHD_USE_BP_RESET
1218 uint32 wd_en = 0;
1219 #endif /* !DHD_USE_BP_RESET */
1220
1221 /* if the pcie link is down, watchdog reset
1222 * should not be done, as it may hang
1223 */
1224 if (bus->is_linkdown) {
1225 return;
1226 }
1227
1228 #ifdef DHD_USE_BP_RESET
1229 dhd_bus_perform_bp_reset(bus);
1230 #else
1231 wd_en = (bus->sih->buscorerev == 66) ? WD_SSRESET_PCIE_F0_EN :
1232 (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1233 pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1234 #endif /* DHD_USE_BP_RESET */
1235 }
1236
1237 static bool
1238 dhdpcie_dongle_attach(dhd_bus_t *bus)
1239 {
1240 osl_t *osh = bus->osh;
1241 volatile void *regsva = (volatile void*)bus->regs;
1242 uint16 devid;
1243 uint32 val;
1244 sbpcieregs_t *sbpcieregs;
1245 bool dongle_isolation;
1246
1247 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1248
1249 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1250 link_recovery = bus->dhd;
1251 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1252
1253 bus->alp_only = TRUE;
1254 bus->sih = NULL;
1255
1256 /* Checking PCIe bus status with reading configuration space */
1257 val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1258 if ((val & 0xFFFF) != VENDOR_BROADCOM) {
1259 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1260 goto fail;
1261 }
1262 devid = (val >> 16) & 0xFFFF;
1263 bus->cl_devid = devid;
1264
1265 /* Set bar0 window to si_enum_base */
1266 dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1267
1268 /*
1269 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1270 * due to switch address space from PCI_BUS to SI_BUS.
1271 */
1272 val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1273 if (val == 0xffffffff) {
1274 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1275 goto fail;
1276 }
1277
1278 /* si_attach() will provide an SI handle and scan the backplane */
1279 if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1280 &bus->vars, &bus->varsz))) {
1281 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1282 goto fail;
1283 }
1284
1285 if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1286 DHD_ERROR(("Enable CTO\n"));
1287 bus->cto_enable = TRUE;
1288 dhdpcie_cto_init(bus, bus->cto_enable);
1289 /*
1290 * HW JIRA - CRWLPCIEGEN2-672
1291 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1292 * fixed in REV68
1293 */
1294 if (bus->sih->buscorerev == 66) {
1295 dhdpcie_ssreset_dis_enum_rst(bus);
1296 }
1297
1298 /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1299 * dhdpcie_bus_release_dongle() --> si_detach()
1300 * dhdpcie_dongle_attach() --> si_attach()
1301 */
1302 bus->pwr_req_ref = 0;
1303 }
1304
1305 if (MULTIBP_ENAB(bus->sih)) {
1306 dhd_bus_pcie_pwr_req_nolock(bus);
1307 }
1308
1309 /* Olympic EFI requirement - stop driver load if FW is already running
1310 * need to do this here before pcie_watchdog_reset, because
1311 * pcie_watchdog_reset will put the ARM back into halt state
1312 */
1313 if (!dhdpcie_is_arm_halted(bus)) {
1314 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1315 __FUNCTION__));
1316 goto fail;
1317 }
1318
1319 BCM_REFERENCE(dongle_isolation);
1320
1321 /* Dongle reset during power on can be invoked in case of module type driver */
1322 if (dhd_download_fw_on_driverload) {
1323 /* Enable CLKREQ# */
1324 dhdpcie_clkreq(bus->osh, 1, 1);
1325
1326 /*
1327 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1328 * without checking dongle_isolation flag, but if it is called via some other path
1329 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1330 * be called.
1331 */
1332 if (bus->dhd == NULL) {
1333 /* dhd_attach not yet happened, do watchdog reset */
1334 dongle_isolation = FALSE;
1335 } else {
1336 dongle_isolation = bus->dhd->dongle_isolation;
1337 }
1338 /*
1339 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1340 * This is required to avoid spurious interrupts to the Host and bring back
1341 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1342 */
1343 if (dongle_isolation == FALSE) {
1344 dhdpcie_dongle_reset(bus);
1345
1346 }
1347 }
1348
1349 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1350 sbpcieregs = (sbpcieregs_t*)(bus->regs);
1351
1352 /* WAR where the BAR1 window may not be sized properly */
1353 W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1354 val = R_REG(osh, &sbpcieregs->configdata);
1355 W_REG(osh, &sbpcieregs->configdata, val);
1356
1357 /* Get info on the ARM and SOCRAM cores... */
1358 /* Should really be qualified by device id */
1359 if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1360 (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1361 (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1362 (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1363 bus->armrev = si_corerev(bus->sih);
1364 } else {
1365 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1366 goto fail;
1367 }
1368
1369 if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1370 /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1371 * adjusted.
1372 */
1373 if (!bus->ramsize_adjusted) {
1374 if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1375 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1376 goto fail;
1377 }
1378 switch ((uint16)bus->sih->chip) {
1379 default:
1380 /* also populate base address */
1381 bus->dongle_ram_base = CA7_4365_RAM_BASE;
1382 bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1383 break;
1384 }
1385 }
1386 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1387 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1388 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1389 goto fail;
1390 }
1391 } else {
1392 /* cr4 has a different way to find the RAM size from TCM's */
1393 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1394 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1395 goto fail;
1396 }
1397 /* also populate base address */
1398 switch ((uint16)bus->sih->chip) {
1399 case BCM4339_CHIP_ID:
1400 case BCM4335_CHIP_ID:
1401 bus->dongle_ram_base = CR4_4335_RAM_BASE;
1402 break;
1403 case BCM4358_CHIP_ID:
1404 case BCM4354_CHIP_ID:
1405 case BCM43567_CHIP_ID:
1406 case BCM43569_CHIP_ID:
1407 case BCM4350_CHIP_ID:
1408 case BCM43570_CHIP_ID:
1409 bus->dongle_ram_base = CR4_4350_RAM_BASE;
1410 break;
1411 case BCM4360_CHIP_ID:
1412 bus->dongle_ram_base = CR4_4360_RAM_BASE;
1413 break;
1414
1415 case BCM4364_CHIP_ID:
1416 bus->dongle_ram_base = CR4_4364_RAM_BASE;
1417 break;
1418
1419 CASE_BCM4345_CHIP:
1420 bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
1421 ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
1422 break;
1423 CASE_BCM43602_CHIP:
1424 bus->dongle_ram_base = CR4_43602_RAM_BASE;
1425 break;
1426 case BCM4349_CHIP_GRPID:
1427 /* RAM based changed from 4349c0(revid=9) onwards */
1428 bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
1429 CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1430 break;
1431 case BCM4347_CHIP_ID:
1432 case BCM4357_CHIP_ID:
1433 case BCM4361_CHIP_ID:
1434 bus->dongle_ram_base = CR4_4347_RAM_BASE;
1435 break;
1436 case BCM4375_CHIP_ID:
1437 case BCM4369_CHIP_ID:
1438 bus->dongle_ram_base = CR4_4369_RAM_BASE;
1439 break;
1440 default:
1441 bus->dongle_ram_base = 0;
1442 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1443 __FUNCTION__, bus->dongle_ram_base));
1444 }
1445 }
1446 bus->ramsize = bus->orig_ramsize;
1447 if (dhd_dongle_memsize)
1448 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1449
1450 if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1451 DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1452 __FUNCTION__, bus->ramsize, bus->ramsize));
1453 goto fail;
1454 }
1455
1456 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1457 bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1458
1459 bus->srmemsize = si_socram_srmem_size(bus->sih);
1460
1461 dhdpcie_bus_intr_init(bus);
1462
1463 /* Set the poll and/or interrupt flags */
1464 bus->intr = (bool)dhd_intr;
1465 #ifdef DHD_DISABLE_ASPM
1466 dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1467 #endif /* DHD_DISABLE_ASPM */
1468
1469 bus->idma_enabled = TRUE;
1470 bus->ifrm_enabled = TRUE;
1471 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1472
1473 if (MULTIBP_ENAB(bus->sih)) {
1474 dhd_bus_pcie_pwr_req_clear_nolock(bus);
1475 }
1476
1477 bus->force_bt_quiesce = TRUE;
1478
1479 return 0;
1480
1481 fail:
1482 if (bus->sih != NULL) {
1483 if (MULTIBP_ENAB(bus->sih)) {
1484 dhd_bus_pcie_pwr_req_clear_nolock(bus);
1485 }
1486 /* for EFI even if there is an error, load still succeeds
1487 * so si_detach should not be called here, it is called during unload
1488 */
1489 si_detach(bus->sih);
1490 bus->sih = NULL;
1491 }
1492 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1493 return -1;
1494 }
1495
1496 int
1497 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1498 {
1499 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1500 return 0;
1501 }
1502 int
1503 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1504 {
1505 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1506 return 0;
1507 }
1508
1509 /* Non atomic function, caller should hold appropriate lock */
1510 void
1511 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1512 {
1513 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1514 if (bus && bus->sih && !bus->is_linkdown) {
1515 /* Skip after recieving D3 ACK */
1516 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1517 return;
1518 }
1519 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1520 (bus->sih->buscorerev == 4)) {
1521 dhpcie_bus_unmask_interrupt(bus);
1522 } else {
1523 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1524 bus->def_intmask, bus->def_intmask);
1525 }
1526 }
1527 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1528 }
1529
1530 /* Non atomic function, caller should hold appropriate lock */
1531 void
1532 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1533 {
1534 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1535 if (bus && bus->sih && !bus->is_linkdown) {
1536 /* Skip after recieving D3 ACK */
1537 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1538 return;
1539 }
1540 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1541 (bus->sih->buscorerev == 4)) {
1542 dhpcie_bus_mask_interrupt(bus);
1543 } else {
1544 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1545 bus->def_intmask, 0);
1546 }
1547 }
1548 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1549 }
1550
1551 /*
1552 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1553 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1554 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1555 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1556 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1557 */
1558 void
1559 dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
1560 {
1561 unsigned long flags;
1562 int timeleft;
1563
1564 #ifdef DHD_PCIE_RUNTIMEPM
1565 dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
1566 #endif /* DHD_PCIE_RUNTIMEPM */
1567
1568 dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1569 if (dhdp->dhd_watchdog_ms_backup) {
1570 DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1571 __FUNCTION__));
1572 dhd_os_wd_timer(dhdp, 0);
1573 }
1574 if (dhdp->busstate != DHD_BUS_DOWN) {
1575 DHD_GENERAL_LOCK(dhdp, flags);
1576 dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1577 DHD_GENERAL_UNLOCK(dhdp, flags);
1578 }
1579
1580 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1581 if ((timeleft == 0) || (timeleft == 1)) {
1582 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1583 __FUNCTION__, dhdp->dhd_bus_busy_state));
1584 ASSERT(0);
1585 }
1586
1587 return;
1588 }
1589
1590 static void
1591 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1592 {
1593 unsigned long flags;
1594 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1595
1596 DHD_GENERAL_LOCK(bus->dhd, flags);
1597 bus->dhd->busstate = DHD_BUS_DOWN;
1598 DHD_GENERAL_UNLOCK(bus->dhd, flags);
1599
1600 dhd_os_sdlock(bus->dhd);
1601
1602 if (bus->sih && !bus->dhd->dongle_isolation) {
1603 if (bus->sih->buscorerev == 66) {
1604 dhd_bus_pcie_pwr_req_reload_war(bus);
1605 }
1606
1607 /* Has insmod fails after rmmod issue in Brix Android */
1608
1609 /* if the pcie link is down, watchdog reset
1610 * should not be done, as it may hang
1611 */
1612
1613 if (!bus->is_linkdown) {
1614 dhdpcie_dongle_reset(bus);
1615 }
1616
1617 bus->dhd->is_pcie_watchdog_reset = TRUE;
1618 }
1619
1620 dhd_os_sdunlock(bus->dhd);
1621
1622 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1623 }
1624
1625 void
1626 dhd_init_bus_lock(dhd_bus_t *bus)
1627 {
1628 if (!bus->bus_lock) {
1629 bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
1630 }
1631 }
1632
1633 void
1634 dhd_deinit_bus_lock(dhd_bus_t *bus)
1635 {
1636 if (bus->bus_lock) {
1637 dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
1638 bus->bus_lock = NULL;
1639 }
1640 }
1641
1642 /** Detach and free everything */
1643 void
1644 dhdpcie_bus_release(dhd_bus_t *bus)
1645 {
1646 bool dongle_isolation = FALSE;
1647 osl_t *osh = NULL;
1648 unsigned long flags_bus;
1649
1650 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1651
1652 if (bus) {
1653
1654 osh = bus->osh;
1655 ASSERT(osh);
1656
1657 if (bus->dhd) {
1658 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
1659 debugger_close();
1660 #endif /* DEBUGGER || DHD_DSCOPE */
1661 dhdpcie_advertise_bus_cleanup(bus->dhd);
1662 dongle_isolation = bus->dhd->dongle_isolation;
1663 bus->dhd->is_pcie_watchdog_reset = FALSE;
1664 dhdpcie_bus_remove_prep(bus);
1665
1666 if (bus->intr) {
1667 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
1668 dhdpcie_bus_intr_disable(bus);
1669 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
1670 dhdpcie_free_irq(bus);
1671 }
1672 dhd_deinit_bus_lock(bus);
1673 /**
1674 * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
1675 * access Dongle registers.
1676 * dhd_detach will communicate with dongle to delete flowring ..etc.
1677 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
1678 */
1679 dhd_detach(bus->dhd);
1680 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
1681 dhd_free(bus->dhd);
1682 bus->dhd = NULL;
1683 }
1684 /* unmap the regs and tcm here!! */
1685 if (bus->regs) {
1686 dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
1687 bus->regs = NULL;
1688 }
1689 if (bus->tcm) {
1690 dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
1691 bus->tcm = NULL;
1692 }
1693
1694 dhdpcie_bus_release_malloc(bus, osh);
1695 /* Detach pcie shared structure */
1696 if (bus->pcie_sh) {
1697 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1698 bus->pcie_sh = NULL;
1699 }
1700
1701 if (bus->console.buf != NULL) {
1702 MFREE(osh, bus->console.buf, bus->console.bufsize);
1703 }
1704
1705 /* Finally free bus info */
1706 MFREE(osh, bus, sizeof(dhd_bus_t));
1707
1708 g_dhd_bus = NULL;
1709 }
1710
1711 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1712 } /* dhdpcie_bus_release */
1713
1714 void
1715 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
1716 {
1717 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
1718 bus->dhd, bus->dhd->dongle_reset));
1719
1720 if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
1721 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1722 return;
1723 }
1724
1725 if (bus->is_linkdown) {
1726 DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
1727 return;
1728 }
1729
1730 if (bus->sih) {
1731
1732 if (!dongle_isolation &&
1733 (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
1734 dhdpcie_dongle_reset(bus);
1735 }
1736
1737 if (bus->ltrsleep_on_unload) {
1738 si_corereg(bus->sih, bus->sih->buscoreidx,
1739 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
1740 }
1741
1742 if (bus->sih->buscorerev == 13)
1743 pcie_serdes_iddqdisable(bus->osh, bus->sih,
1744 (sbpcieregs_t *) bus->regs);
1745
1746 if (dhd_download_fw_on_driverload) {
1747 /* Disable CLKREQ# */
1748 dhdpcie_clkreq(bus->osh, 1, 0);
1749 }
1750
1751 if (bus->sih != NULL) {
1752 si_detach(bus->sih);
1753 bus->sih = NULL;
1754 }
1755 if (bus->vars && bus->varsz)
1756 MFREE(osh, bus->vars, bus->varsz);
1757 bus->vars = NULL;
1758 }
1759
1760 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1761 }
1762
1763 uint32
1764 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
1765 {
1766 uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
1767 return data;
1768 }
1769
1770 /** 32 bit config write */
1771 void
1772 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
1773 {
1774 OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
1775 }
1776
1777 void
1778 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
1779 {
1780 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
1781 }
1782
1783 void
1784 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
1785 {
1786 int32 min_size = DONGLE_MIN_MEMSIZE;
1787 /* Restrict the memsize to user specified limit */
1788 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
1789 dhd_dongle_memsize, min_size));
1790 if ((dhd_dongle_memsize > min_size) &&
1791 (dhd_dongle_memsize < (int32)bus->orig_ramsize))
1792 bus->ramsize = dhd_dongle_memsize;
1793 }
1794
1795 void
1796 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
1797 {
1798 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1799
1800 if (bus->dhd && bus->dhd->dongle_reset)
1801 return;
1802
1803 if (bus->vars && bus->varsz) {
1804 MFREE(osh, bus->vars, bus->varsz);
1805 bus->vars = NULL;
1806 }
1807
1808 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1809 return;
1810
1811 }
1812
1813 /** Stop bus module: clear pending frames, disable data flow */
1814 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
1815 {
1816 unsigned long flags, flags_bus;
1817
1818 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1819
1820 if (!bus->dhd)
1821 return;
1822
1823 if (bus->dhd->busstate == DHD_BUS_DOWN) {
1824 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
1825 goto done;
1826 }
1827
1828 DHD_DISABLE_RUNTIME_PM(bus->dhd);
1829
1830 DHD_GENERAL_LOCK(bus->dhd, flags);
1831 bus->dhd->busstate = DHD_BUS_DOWN;
1832 DHD_GENERAL_UNLOCK(bus->dhd, flags);
1833
1834 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1835 atomic_set(&bus->dhd->block_bus, TRUE);
1836 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1837
1838 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
1839 dhdpcie_bus_intr_disable(bus);
1840 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
1841
1842 if (!bus->is_linkdown) {
1843 uint32 status;
1844 status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1845 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
1846 }
1847
1848 if (!dhd_download_fw_on_driverload) {
1849 dhd_dpc_kill(bus->dhd);
1850 }
1851
1852 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1853 pm_runtime_disable(dhd_bus_to_dev(bus));
1854 pm_runtime_set_suspended(dhd_bus_to_dev(bus));
1855 pm_runtime_enable(dhd_bus_to_dev(bus));
1856 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1857
1858 /* Clear rx control and wake any waiters */
1859 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
1860 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
1861
1862 done:
1863 return;
1864 }
1865
1866 /**
1867 * Watchdog timer function.
1868 * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
1869 */
1870 bool dhd_bus_watchdog(dhd_pub_t *dhd)
1871 {
1872 unsigned long flags;
1873 dhd_bus_t *bus = dhd->bus;
1874
1875 DHD_GENERAL_LOCK(dhd, flags);
1876 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
1877 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
1878 DHD_GENERAL_UNLOCK(dhd, flags);
1879 return FALSE;
1880 }
1881 DHD_BUS_BUSY_SET_IN_WD(dhd);
1882 DHD_GENERAL_UNLOCK(dhd, flags);
1883
1884 #ifdef DHD_PCIE_RUNTIMEPM
1885 dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
1886 #endif /* DHD_PCIE_RUNTIMEPM */
1887
1888 /* Poll for console output periodically */
1889 if (dhd->busstate == DHD_BUS_DATA &&
1890 dhd->dhd_console_ms != 0 &&
1891 bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
1892 bus->console.count += dhd_watchdog_ms;
1893 if (bus->console.count >= dhd->dhd_console_ms) {
1894 bus->console.count -= dhd->dhd_console_ms;
1895
1896 if (MULTIBP_ENAB(bus->sih)) {
1897 dhd_bus_pcie_pwr_req(bus);
1898 }
1899
1900 /* Make sure backplane clock is on */
1901 if (dhdpcie_bus_readconsole(bus) < 0) {
1902 dhd->dhd_console_ms = 0; /* On error, stop trying */
1903 }
1904
1905 if (MULTIBP_ENAB(bus->sih)) {
1906 dhd_bus_pcie_pwr_req_clear(bus);
1907 }
1908 }
1909 }
1910
1911 DHD_GENERAL_LOCK(dhd, flags);
1912 DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
1913 dhd_os_busbusy_wake(dhd);
1914 DHD_GENERAL_UNLOCK(dhd, flags);
1915
1916 return TRUE;
1917 } /* dhd_bus_watchdog */
1918
1919 #if defined(SUPPORT_MULTIPLE_REVISION)
1920 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
1921 defined(SUPPORT_BCM4359_MIXED_MODULES)
1922 #define VENDOR_MURATA "murata"
1923 #define VENDOR_WISOL "wisol"
1924 #define VNAME_DELIM "_"
1925 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
1926
1927 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
1928
1929 #define MAX_EXTENSION 20
1930 #define MODULE_BCM4361_INDEX 3
1931 #define CHIP_REV_A0 1
1932 #define CHIP_REV_A1 2
1933 #define CHIP_REV_B0 3
1934 #define CHIP_REV_B1 4
1935 #define CHIP_REV_B2 5
1936 #define CHIP_REV_C0 6
1937 #define BOARD_TYPE_EPA 0x080f
1938 #define BOARD_TYPE_IPA 0x0827
1939 #define BOARD_TYPE_IPA_OLD 0x081a
1940 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
1941 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
1942 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
1943 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
1944 #define MAX_VID_LEN 8
1945 #define CIS_TUPLE_HDR_LEN 2
1946 #define CIS_TUPLE_START_ADDRESS 0x18011110
1947 #define CIS_TUPLE_END_ADDRESS 0x18011167
1948 #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
1949 + 1) / sizeof(uint32))
1950 #define CIS_TUPLE_TAG_START 0x80
1951 #define CIS_TUPLE_TAG_VENDOR 0x81
1952 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
1953 #define CIS_TUPLE_TAG_LENGTH 1
1954 #define NVRAM_FEM_MURATA "_murata"
1955 #define CID_FEM_MURATA "_mur_"
1956
1957 typedef struct cis_tuple_format {
1958 uint8 id;
1959 uint8 len; /* total length of tag and data */
1960 uint8 tag;
1961 uint8 data[1];
1962 } cis_tuple_format_t;
1963
1964 typedef struct {
1965 char cid_ext[MAX_EXTENSION];
1966 char nvram_ext[MAX_EXTENSION];
1967 char fw_ext[MAX_EXTENSION];
1968 } naming_info_t;
1969
1970 naming_info_t bcm4361_naming_table[] = {
1971 { {""}, {""}, {""} },
1972 { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
1973 { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
1974 { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
1975 { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
1976 { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
1977 { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
1978 { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
1979 { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
1980 { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
1981 { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
1982 { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
1983 { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
1984 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
1985 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
1986 { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
1987 { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
1988 { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
1989 { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
1990 { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
1991 { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
1992 { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
1993 { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
1994 { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
1995 { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
1996 { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
1997 { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
1998 { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
1999 { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2000 { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2001 { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2002 { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2003 { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2004 { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2005 { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2006 };
2007
2008 static naming_info_t *
2009 dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2010 {
2011 int index_found = 0, i = 0;
2012
2013 if (module_type && strlen(module_type) > 0) {
2014 for (i = 1; i < table_size; i++) {
2015 if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2016 index_found = i;
2017 break;
2018 }
2019 }
2020 }
2021
2022 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2023
2024 return &table[index_found];
2025 }
2026
2027 static naming_info_t *
2028 dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2029 char *cid_info)
2030 {
2031 int index_found = 0, i = 0;
2032 char *ptr;
2033
2034 /* truncate extension */
2035 for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2036 ptr = bcmstrstr(ptr, "_");
2037 if (ptr) {
2038 ptr++;
2039 }
2040 }
2041
2042 for (i = 1; i < table_size && ptr; i++) {
2043 if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2044 index_found = i;
2045 break;
2046 }
2047 }
2048
2049 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2050
2051 return &table[index_found];
2052 }
2053
2054 static int
2055 dhd_parse_board_information_bcm4361(dhd_bus_t *bus, int *boardtype,
2056 unsigned char *vid, int *vid_length)
2057 {
2058 int boardtype_backplane_addr[] = {
2059 0x18010324, /* OTP Control 1 */
2060 0x18012618, /* PMU min resource mask */
2061 };
2062 int boardtype_backplane_data[] = {
2063 0x00fa0000,
2064 0x0e4fffff /* Keep on ARMHTAVAIL */
2065 };
2066 int int_val = 0, i = 0;
2067 cis_tuple_format_t *tuple;
2068 int totlen, len;
2069 uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2070
2071 for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2072 /* Write new OTP and PMU configuration */
2073 if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2074 &boardtype_backplane_data[i], FALSE) != BCME_OK) {
2075 DHD_ERROR(("invalid size/addr combination\n"));
2076 return BCME_ERROR;
2077 }
2078
2079 if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2080 &int_val, TRUE) != BCME_OK) {
2081 DHD_ERROR(("invalid size/addr combination\n"));
2082 return BCME_ERROR;
2083 }
2084
2085 DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2086 __FUNCTION__, boardtype_backplane_addr[i], int_val));
2087 }
2088
2089 /* read tuple raw data */
2090 for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2091 if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2092 sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
2093 break;
2094 }
2095 }
2096
2097 totlen = i * sizeof(uint32);
2098 tuple = (cis_tuple_format_t *)raw_data;
2099
2100 /* check the first tuple has tag 'start' */
2101 if (tuple->id != CIS_TUPLE_TAG_START) {
2102 return BCME_ERROR;
2103 }
2104
2105 *vid_length = *boardtype = 0;
2106
2107 /* find tagged parameter */
2108 while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2109 (*vid_length == 0 || *boardtype == 0)) {
2110 len = tuple->len;
2111
2112 if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2113 (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2114 /* found VID */
2115 memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2116 *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2117 prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2118 }
2119 else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2120 (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2121 /* found boardtype */
2122 *boardtype = (int)tuple->data[0];
2123 prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2124 }
2125
2126 tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2127 totlen -= (len + CIS_TUPLE_HDR_LEN);
2128 }
2129
2130 if (*vid_length <= 0 || *boardtype <= 0) {
2131 DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2132 *vid_length, *boardtype));
2133 return BCME_ERROR;
2134 }
2135
2136 return BCME_OK;
2137
2138 }
2139
2140 static naming_info_t *
2141 dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2142 dhd_bus_t *bus, bool *is_murata_fem)
2143 {
2144 int board_type = 0, chip_rev = 0, vid_length = 0;
2145 unsigned char vid[MAX_VID_LEN];
2146 naming_info_t *info = &table[0];
2147 char *cid_info = NULL;
2148
2149 if (!bus || !bus->sih) {
2150 DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2151 return NULL;
2152 }
2153 chip_rev = bus->sih->chiprev;
2154
2155 if (dhd_parse_board_information_bcm4361(bus, &board_type, vid, &vid_length)
2156 != BCME_OK) {
2157 DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2158 return NULL;
2159 }
2160
2161 DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2162
2163 /* A0 chipset has exception only */
2164 if (chip_rev == CHIP_REV_A0) {
2165 if (board_type == BOARD_TYPE_EPA) {
2166 info = dhd_find_naming_info(table, table_size,
2167 DEFAULT_CIDINFO_FOR_EPA);
2168 } else if ((board_type == BOARD_TYPE_IPA) ||
2169 (board_type == BOARD_TYPE_IPA_OLD)) {
2170 info = dhd_find_naming_info(table, table_size,
2171 DEFAULT_CIDINFO_FOR_IPA);
2172 }
2173 } else {
2174 cid_info = dhd_get_cid_info(vid, vid_length);
2175 if (cid_info) {
2176 info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2177 if (strstr(cid_info, CID_FEM_MURATA)) {
2178 *is_murata_fem = TRUE;
2179 }
2180 }
2181 }
2182
2183 return info;
2184 }
2185 #endif /* SUPPORT_BCM4361_MIXED_MODULES && USE_CID_CHECK */
2186
2187 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2188 {
2189 uint32 chiprev;
2190 #if defined(SUPPORT_MULTIPLE_CHIPS)
2191 char chipver_tag[20] = "_4358";
2192 #else
2193 char chipver_tag[10] = {0, };
2194 #endif /* SUPPORT_MULTIPLE_CHIPS */
2195
2196 chiprev = dhd_bus_chiprev(bus);
2197 if (chiprev == 0) {
2198 DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2199 strcat(chipver_tag, "_a0");
2200 } else if (chiprev == 1) {
2201 DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2202 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2203 strcat(chipver_tag, "_a1");
2204 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2205 } else if (chiprev == 3) {
2206 DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2207 #if defined(SUPPORT_MULTIPLE_CHIPS)
2208 strcat(chipver_tag, "_a3");
2209 #endif /* SUPPORT_MULTIPLE_CHIPS */
2210 } else {
2211 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2212 }
2213
2214 strcat(fw_path, chipver_tag);
2215
2216 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2217 if (chiprev == 1 || chiprev == 3) {
2218 int ret = dhd_check_module_b85a();
2219 if ((chiprev == 1) && (ret < 0)) {
2220 memset(chipver_tag, 0x00, sizeof(chipver_tag));
2221 strcat(chipver_tag, "_b85");
2222 strcat(chipver_tag, "_a1");
2223 }
2224 }
2225
2226 DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2227 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2228
2229 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2230 if (system_rev >= 10) {
2231 DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
2232 strcat(chipver_tag, "_r10");
2233 }
2234 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2235 strcat(nv_path, chipver_tag);
2236
2237 return BCME_OK;
2238 }
2239
2240 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2241 {
2242 uint32 chip_ver;
2243 char chipver_tag[10] = {0, };
2244 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2245 defined(SUPPORT_BCM4359_MIXED_MODULES)
2246 char chipver_tag_nv[20] = {0, };
2247 int module_type = -1;
2248 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2249
2250 chip_ver = bus->sih->chiprev;
2251 if (chip_ver == 4) {
2252 DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2253 strncat(chipver_tag, "_b0", strlen("_b0"));
2254 } else if (chip_ver == 5) {
2255 DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2256 strncat(chipver_tag, "_b1", strlen("_b1"));
2257 } else if (chip_ver == 9) {
2258 DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2259 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2260 defined(SUPPORT_BCM4359_MIXED_MODULES)
2261 if (dhd_check_module(VENDOR_MURATA)) {
2262 strncat(chipver_tag_nv, VNAME_DELIM, strlen(VNAME_DELIM));
2263 strncat(chipver_tag_nv, VENDOR_MURATA, strlen(VENDOR_MURATA));
2264 } else if (dhd_check_module(VENDOR_WISOL)) {
2265 strncat(chipver_tag_nv, VNAME_DELIM, strlen(VNAME_DELIM));
2266 strncat(chipver_tag_nv, VENDOR_WISOL, strlen(VENDOR_WISOL));
2267 }
2268 /* In case of SEMCO module, extra vendor string doen not need to add */
2269 strncat(chipver_tag_nv, "_c0", strlen("_c0"));
2270 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2271 strncat(chipver_tag, "_c0", strlen("_c0"));
2272 #if defined(CONFIG_WLAN_GRACE) || defined(CONFIG_SEC_GRACEQLTE_PROJECT) || \
2273 defined(CONFIG_SEC_LYKANLTE_PROJECT) || defined(CONFIG_SEC_KELLYLTE_PROJECT)
2274 DHD_ERROR(("----- Adding _plus string -----\n"));
2275 strncat(chipver_tag, "_plus", strlen("_plus"));
2276 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2277 defined(SUPPORT_BCM4359_MIXED_MODULES)
2278 strncat(chipver_tag_nv, "_plus", strlen("_plus"));
2279 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2280 #endif /* CONFIG_WLAN_GRACE || CONFIG_SEC_GRACEQLTE_PROJECT || CONFIG_SEC_LYKANLTE_PROJECT ||
2281 * CONFIG_SEC_KELLYLTE_PROJECT
2282 */
2283 } else {
2284 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2285 return BCME_ERROR;
2286 }
2287
2288 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2289 defined(SUPPORT_BCM4359_MIXED_MODULES)
2290 module_type = dhd_check_module_b90();
2291
2292 switch (module_type) {
2293 case BCM4359_MODULE_TYPE_B90B:
2294 strcat(fw_path, chipver_tag);
2295 break;
2296 case BCM4359_MODULE_TYPE_B90S:
2297 strcat(fw_path, chipver_tag);
2298 if (!(strstr(nv_path, VENDOR_MURATA) || strstr(nv_path, VENDOR_WISOL))) {
2299 strcat(nv_path, chipver_tag_nv);
2300 } else {
2301 strcat(nv_path, chipver_tag);
2302 }
2303 break;
2304 default:
2305 /*
2306 * .cid.info file not exist case,
2307 * loading B90S FW force for initial MFG boot up.
2308 */
2309 if (chip_ver == 5) {
2310 strncat(fw_path, "_b90s", strlen("_b90s"));
2311 }
2312 strcat(fw_path, chipver_tag);
2313 strcat(nv_path, chipver_tag);
2314 break;
2315 }
2316 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2317 strcat(fw_path, chipver_tag);
2318 strcat(nv_path, chipver_tag);
2319 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2320
2321 return BCME_OK;
2322 }
2323 static int
2324 concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2325 {
2326 int ret = BCME_OK;
2327 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2328 char module_type[MAX_VNAME_LEN];
2329 naming_info_t *info = NULL;
2330 bool is_murata_fem = FALSE;
2331
2332 memset(module_type, 0, sizeof(module_type));
2333
2334 if (dhd_check_module_bcm4361(module_type,
2335 MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2336 info = dhd_find_naming_info(bcm4361_naming_table,
2337 ARRAYSIZE(bcm4361_naming_table), module_type);
2338 } else {
2339 /* in case of .cid.info doesn't exists */
2340 info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2341 ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2342 }
2343
2344 if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
2345 is_murata_fem = FALSE;
2346 }
2347
2348 if (info) {
2349 if (is_murata_fem) {
2350 strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2351 }
2352 strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2353 strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2354 } else {
2355 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2356 ret = BCME_ERROR;
2357 }
2358 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2359 char chipver_tag[10] = {0, };
2360
2361 strcat(fw_path, chipver_tag);
2362 strcat(nv_path, chipver_tag);
2363 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2364
2365 return ret;
2366 }
2367
2368 int
2369 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2370 {
2371 int res = 0;
2372
2373 if (!bus || !bus->sih) {
2374 DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2375 return -1;
2376 }
2377
2378 if (!fw_path || !nv_path) {
2379 DHD_ERROR(("fw_path or nv_path is null.\n"));
2380 return res;
2381 }
2382
2383 switch (si_chipid(bus->sih)) {
2384
2385 case BCM43569_CHIP_ID:
2386 case BCM4358_CHIP_ID:
2387 res = concate_revision_bcm4358(bus, fw_path, nv_path);
2388 break;
2389 case BCM4355_CHIP_ID:
2390 case BCM4359_CHIP_ID:
2391 res = concate_revision_bcm4359(bus, fw_path, nv_path);
2392 break;
2393 case BCM4361_CHIP_ID:
2394 case BCM4347_CHIP_ID:
2395 res = concate_revision_bcm4361(bus, fw_path, nv_path);
2396 break;
2397 default:
2398 DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2399 return res;
2400 }
2401
2402 return res;
2403 }
2404 #endif /* SUPPORT_MULTIPLE_REVISION */
2405
2406 uint16
2407 dhd_get_chipid(dhd_pub_t *dhd)
2408 {
2409 dhd_bus_t *bus = dhd->bus;
2410
2411 if (bus && bus->sih)
2412 return (uint16)si_chipid(bus->sih);
2413 else
2414 return 0;
2415 }
2416
2417 /**
2418 * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2419 *
2420 * BCM_REQUEST_FW specific :
2421 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2422 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2423 *
2424 * BCMEMBEDIMAGE specific:
2425 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2426 * file will be used instead.
2427 *
2428 * @return BCME_OK on success
2429 */
2430 int
2431 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
2432 char *pfw_path, char *pnv_path)
2433 {
2434 int ret;
2435
2436 bus->fw_path = pfw_path;
2437 bus->nv_path = pnv_path;
2438
2439 #if defined(SUPPORT_MULTIPLE_REVISION)
2440 if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2441 DHD_ERROR(("%s: fail to concatnate revison \n",
2442 __FUNCTION__));
2443 return BCME_BADARG;
2444 }
2445 #endif /* SUPPORT_MULTIPLE_REVISION */
2446
2447 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2448 dhd_set_blob_support(bus->dhd, bus->fw_path);
2449 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2450
2451 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2452 __FUNCTION__, bus->fw_path, bus->nv_path));
2453 dhdpcie_dump_resource(bus);
2454
2455 ret = dhdpcie_download_firmware(bus, osh);
2456
2457 return ret;
2458 }
2459
2460 /**
2461 * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2462 *
2463 * BCM_REQUEST_FW specific :
2464 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2465 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2466 *
2467 * BCMEMBEDIMAGE specific:
2468 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2469 * file will be used instead.
2470 *
2471 * @return BCME_OK on success
2472 */
2473 static int
2474 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
2475 {
2476 int ret = 0;
2477 #if defined(BCM_REQUEST_FW)
2478 uint chipid = bus->sih->chip;
2479 uint revid = bus->sih->chiprev;
2480 char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
2481 char nv_path[64]; /* path to nvram vars file */
2482 bus->fw_path = fw_path;
2483 bus->nv_path = nv_path;
2484 switch (chipid) {
2485 case BCM43570_CHIP_ID:
2486 bcmstrncat(fw_path, "43570", 5);
2487 switch (revid) {
2488 case 0:
2489 bcmstrncat(fw_path, "a0", 2);
2490 break;
2491 case 2:
2492 bcmstrncat(fw_path, "a2", 2);
2493 break;
2494 default:
2495 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
2496 revid));
2497 break;
2498 }
2499 break;
2500 default:
2501 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
2502 chipid));
2503 return 0;
2504 }
2505 /* load board specific nvram file */
2506 snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
2507 /* load firmware */
2508 snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
2509 #endif /* BCM_REQUEST_FW */
2510
2511 DHD_OS_WAKE_LOCK(bus->dhd);
2512 ret = _dhdpcie_download_firmware(bus);
2513
2514 DHD_OS_WAKE_UNLOCK(bus->dhd);
2515 return ret;
2516 } /* dhdpcie_download_firmware */
2517
2518 #define DHD_MEMORY_SET_PATTERN 0xAA
2519
2520 /**
2521 * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
2522 * is updated with the event logging partitions within that file as well.
2523 *
2524 * @param pfw_path Path to .bin or .bea file
2525 */
2526 static int
2527 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
2528 {
2529 int bcmerror = BCME_ERROR;
2530 int offset = 0;
2531 #if defined(DHD_FW_MEM_CORRUPTION)
2532 uint8 *p_org_fw = NULL;
2533 uint32 org_fw_size = 0;
2534 uint32 fw_write_offset = 0;
2535 #endif /* DHD_FW_MEM_CORRUPTION */
2536 int len = 0;
2537 bool store_reset;
2538 char *imgbuf = NULL;
2539 uint8 *memblock = NULL, *memptr;
2540 int offset_end = bus->ramsize;
2541
2542 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
2543
2544 /* Should succeed in opening image if it is actually given through registry
2545 * entry or in module param.
2546 */
2547 imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
2548 if (imgbuf == NULL) {
2549 goto err;
2550 }
2551
2552 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
2553 if (memblock == NULL) {
2554 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
2555 bcmerror = BCME_NOMEM;
2556 goto err;
2557 }
2558 if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
2559 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
2560 }
2561
2562 #if defined(DHD_FW_MEM_CORRUPTION)
2563 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2564 org_fw_size = dhd_os_get_image_size(imgbuf);
2565 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2566 p_org_fw = (uint8*)DHD_OS_PREALLOC(bus->dhd,
2567 DHD_PREALLOC_MEMDUMP_RAM, org_fw_size);
2568 #else
2569 p_org_fw = (uint8*)VMALLOC(bus->dhd->osh, org_fw_size);
2570 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
2571 if (p_org_fw == NULL) {
2572 DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n",
2573 __FUNCTION__, org_fw_size));
2574 bcmerror = BCME_NOMEM;
2575 goto err;
2576 } else {
2577 memset(p_org_fw, 0, org_fw_size);
2578 }
2579 }
2580 #endif /* DHD_FW_MEM_CORRUPTION */
2581
2582 /* check if CR4/CA7 */
2583 store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
2584 si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
2585 /* Download image with MEMBLOCK size */
2586 while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
2587 if (len < 0) {
2588 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
2589 bcmerror = BCME_ERROR;
2590 goto err;
2591 }
2592 /* if address is 0, store the reset instruction to be written in 0 */
2593 if (store_reset) {
2594 ASSERT(offset == 0);
2595 bus->resetinstr = *(((uint32*)memptr));
2596 /* Add start of RAM address to the address given by user */
2597 offset += bus->dongle_ram_base;
2598 offset_end += offset;
2599 store_reset = FALSE;
2600 }
2601
2602 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
2603 if (bcmerror) {
2604 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
2605 __FUNCTION__, bcmerror, MEMBLOCK, offset));
2606 goto err;
2607 }
2608 offset += MEMBLOCK;
2609 #if defined(DHD_FW_MEM_CORRUPTION)
2610 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2611 memcpy((p_org_fw + fw_write_offset), memptr, len);
2612 fw_write_offset += len;
2613 }
2614 #endif /* DHD_FW_MEM_CORRUPTION */
2615
2616 if (offset >= offset_end) {
2617 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
2618 __FUNCTION__, offset, offset_end));
2619 bcmerror = BCME_ERROR;
2620 goto err;
2621 }
2622 }
2623 #ifdef DHD_FW_MEM_CORRUPTION
2624 /* Read and compare the downloaded code */
2625 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2626 unsigned char *p_readback_buf = NULL;
2627 uint32 compared_len;
2628 uint32 remaining_len = 0;
2629
2630 compared_len = 0;
2631 p_readback_buf = MALLOC(bus->dhd->osh, MEMBLOCK);
2632 if (p_readback_buf == NULL) {
2633 DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n",
2634 __FUNCTION__, MEMBLOCK));
2635 bcmerror = BCME_NOMEM;
2636 goto compare_err;
2637 }
2638 /* Read image to verify downloaded contents. */
2639 offset = bus->dongle_ram_base;
2640
2641 while (compared_len < org_fw_size) {
2642 memset(p_readback_buf, DHD_MEMORY_SET_PATTERN, MEMBLOCK);
2643 remaining_len = org_fw_size - compared_len;
2644
2645 if (remaining_len >= MEMBLOCK) {
2646 len = MEMBLOCK;
2647 } else {
2648 len = remaining_len;
2649 }
2650 bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
2651 (uint8 *)p_readback_buf, len);
2652 if (bcmerror) {
2653 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2654 __FUNCTION__, bcmerror, MEMBLOCK, offset));
2655 goto compare_err;
2656 }
2657
2658 if (memcmp((p_org_fw + compared_len), p_readback_buf, len) != 0) {
2659 DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n",
2660 __FUNCTION__, compared_len));
2661 bcmerror = BCME_ERROR;
2662 goto compare_err;
2663 }
2664
2665 compared_len += len;
2666 offset += len;
2667 }
2668 DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
2669
2670 compare_err:
2671 if (p_readback_buf) {
2672 MFREE(bus->dhd->osh, p_readback_buf, MEMBLOCK);
2673 }
2674 }
2675 #endif /* DHD_FW_MEM_CORRUPTION */
2676
2677 err:
2678 #if defined(DHD_FW_MEM_CORRUPTION)
2679 if (p_org_fw) {
2680 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2681 DHD_OS_PREFREE(bus->dhd, p_org_fw, org_fw_size);
2682 #else
2683 VMFREE(bus->dhd->osh, p_org_fw, org_fw_size);
2684 #endif // endif
2685 }
2686 #endif /* DHD_FW_MEM_CORRUPTION */
2687 if (memblock) {
2688 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
2689 }
2690
2691 if (imgbuf) {
2692 dhd_os_close_image1(bus->dhd, imgbuf);
2693 }
2694
2695 return bcmerror;
2696 } /* dhdpcie_download_code_file */
2697
2698 #ifdef CUSTOMER_HW4_DEBUG
2699 #define MIN_NVRAMVARS_SIZE 128
2700 #endif /* CUSTOMER_HW4_DEBUG */
2701
2702 static int
2703 dhdpcie_download_nvram(struct dhd_bus *bus)
2704 {
2705 int bcmerror = BCME_ERROR;
2706 uint len;
2707 char * memblock = NULL;
2708 char *bufp;
2709 char *pnv_path;
2710 bool nvram_file_exists;
2711 bool nvram_uefi_exists = FALSE;
2712 bool local_alloc = FALSE;
2713 pnv_path = bus->nv_path;
2714
2715 nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
2716
2717 /* First try UEFI */
2718 len = MAX_NVRAMBUF_SIZE;
2719 dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
2720
2721 /* If UEFI empty, then read from file system */
2722 if ((len <= 0) || (memblock == NULL)) {
2723
2724 if (nvram_file_exists) {
2725 len = MAX_NVRAMBUF_SIZE;
2726 dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
2727 if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
2728 goto err;
2729 }
2730 }
2731 else {
2732 /* For SROM OTP no external file or UEFI required */
2733 bcmerror = BCME_OK;
2734 }
2735 } else {
2736 nvram_uefi_exists = TRUE;
2737 }
2738
2739 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
2740
2741 if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
2742 bufp = (char *) memblock;
2743
2744 {
2745 bufp[len] = 0;
2746 if (nvram_uefi_exists || nvram_file_exists) {
2747 len = process_nvram_vars(bufp, len);
2748 }
2749 }
2750
2751 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
2752 #ifdef CUSTOMER_HW4_DEBUG
2753 if (len < MIN_NVRAMVARS_SIZE) {
2754 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
2755 __FUNCTION__));
2756 bcmerror = BCME_ERROR;
2757 goto err;
2758 }
2759 #endif /* CUSTOMER_HW4_DEBUG */
2760
2761 if (len % 4) {
2762 len += 4 - (len % 4);
2763 }
2764 bufp += len;
2765 *bufp++ = 0;
2766 if (len)
2767 bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
2768 if (bcmerror) {
2769 DHD_ERROR(("%s: error downloading vars: %d\n",
2770 __FUNCTION__, bcmerror));
2771 }
2772 }
2773
2774 err:
2775 if (memblock) {
2776 if (local_alloc) {
2777 MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
2778 } else {
2779 dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
2780 }
2781 }
2782
2783 return bcmerror;
2784 }
2785
2786 static int
2787 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
2788 {
2789 int bcmerror = BCME_ERROR;
2790 char *imgbuf = NULL;
2791
2792 if (buf == NULL || len == 0)
2793 goto err;
2794
2795 /* External image takes precedence if specified */
2796 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2797 // opens and seeks to correct file offset:
2798 imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
2799 if (imgbuf == NULL) {
2800 DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
2801 goto err;
2802 }
2803
2804 /* Read it */
2805 if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
2806 DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
2807 goto err;
2808 }
2809
2810 bcmerror = BCME_OK;
2811 }
2812
2813 err:
2814 if (imgbuf)
2815 dhd_os_close_image1(bus->dhd, imgbuf);
2816
2817 return bcmerror;
2818 }
2819
2820 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
2821 * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
2822 * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
2823 */
2824 static void
2825 dhdpcie_ramsize_adj(struct dhd_bus *bus)
2826 {
2827 int i, search_len = 0;
2828 uint8 *memptr = NULL;
2829 uint8 *ramsizeptr = NULL;
2830 uint ramsizelen;
2831 uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
2832 hnd_ramsize_ptr_t ramsize_info;
2833
2834 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2835
2836 /* Adjust dongle RAMSIZE already called. */
2837 if (bus->ramsize_adjusted) {
2838 return;
2839 }
2840
2841 /* success or failure, we don't want to be here
2842 * more than once.
2843 */
2844 bus->ramsize_adjusted = TRUE;
2845
2846 /* Not handle if user restrict dongle ram size enabled */
2847 if (dhd_dongle_memsize) {
2848 DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
2849 dhd_dongle_memsize));
2850 return;
2851 }
2852
2853 /* Out immediately if no image to download */
2854 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2855 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2856 return;
2857 }
2858
2859 /* Get maximum RAMSIZE info search length */
2860 for (i = 0; ; i++) {
2861 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2862 break;
2863
2864 if (search_len < (int)ramsize_ptr_ptr[i])
2865 search_len = (int)ramsize_ptr_ptr[i];
2866 }
2867
2868 if (!search_len)
2869 return;
2870
2871 search_len += sizeof(hnd_ramsize_ptr_t);
2872
2873 memptr = MALLOC(bus->dhd->osh, search_len);
2874 if (memptr == NULL) {
2875 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
2876 return;
2877 }
2878
2879 /* External image takes precedence if specified */
2880 if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
2881 goto err;
2882 }
2883 else {
2884 ramsizeptr = memptr;
2885 ramsizelen = search_len;
2886 }
2887
2888 if (ramsizeptr) {
2889 /* Check Magic */
2890 for (i = 0; ; i++) {
2891 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2892 break;
2893
2894 if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
2895 continue;
2896
2897 memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
2898 sizeof(hnd_ramsize_ptr_t));
2899
2900 if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
2901 bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
2902 bus->ramsize = LTOH32(ramsize_info.ram_size);
2903 DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
2904 bus->ramsize));
2905 break;
2906 }
2907 }
2908 }
2909
2910 err:
2911 if (memptr)
2912 MFREE(bus->dhd->osh, memptr, search_len);
2913
2914 return;
2915 } /* dhdpcie_ramsize_adj */
2916
2917 /**
2918 * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
2919 *
2920 * BCMEMBEDIMAGE specific:
2921 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2922 * file will be used instead.
2923 *
2924 */
2925 static int
2926 _dhdpcie_download_firmware(struct dhd_bus *bus)
2927 {
2928 int bcmerror = -1;
2929
2930 bool embed = FALSE; /* download embedded firmware */
2931 bool dlok = FALSE; /* download firmware succeeded */
2932
2933 /* Out immediately if no image to download */
2934 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2935 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2936 return 0;
2937 }
2938 /* Adjust ram size */
2939 dhdpcie_ramsize_adj(bus);
2940
2941 /* Keep arm in reset */
2942 if (dhdpcie_bus_download_state(bus, TRUE)) {
2943 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
2944 goto err;
2945 }
2946
2947 /* External image takes precedence if specified */
2948 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2949 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
2950 DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
2951 __LINE__));
2952 goto err;
2953 } else {
2954 embed = FALSE;
2955 dlok = TRUE;
2956 }
2957 }
2958
2959 BCM_REFERENCE(embed);
2960 if (!dlok) {
2961 DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
2962 goto err;
2963 }
2964
2965 /* EXAMPLE: nvram_array */
2966 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
2967 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
2968
2969 /* External nvram takes precedence if specified */
2970 if (dhdpcie_download_nvram(bus)) {
2971 DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
2972 goto err;
2973 }
2974
2975 /* Take arm out of reset */
2976 if (dhdpcie_bus_download_state(bus, FALSE)) {
2977 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
2978 goto err;
2979 }
2980
2981 bcmerror = 0;
2982
2983 err:
2984 return bcmerror;
2985 } /* _dhdpcie_download_firmware */
2986
2987 static int
2988 dhdpcie_bus_readconsole(dhd_bus_t *bus)
2989 {
2990 dhd_console_t *c = &bus->console;
2991 uint8 line[CONSOLE_LINE_MAX], ch;
2992 uint32 n, idx, addr;
2993 int rv;
2994 uint readlen = 0;
2995 uint i = 0;
2996
2997 /* Don't do anything until FWREADY updates console address */
2998 if (bus->console_addr == 0)
2999 return -1;
3000
3001 /* Read console log struct */
3002 addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3003
3004 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
3005 return rv;
3006
3007 /* Allocate console buffer (one time only) */
3008 if (c->buf == NULL) {
3009 c->bufsize = ltoh32(c->log.buf_size);
3010 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
3011 return BCME_NOMEM;
3012 DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
3013 }
3014 idx = ltoh32(c->log.idx);
3015
3016 /* Protect against corrupt value */
3017 if (idx > c->bufsize)
3018 return BCME_ERROR;
3019
3020 /* Skip reading the console buffer if the index pointer has not moved */
3021 if (idx == c->last)
3022 return BCME_OK;
3023
3024 DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3025 idx, c->last));
3026
3027 /* Read the console buffer data to a local buffer */
3028 /* optimize and read only the portion of the buffer needed, but
3029 * important to handle wrap-around.
3030 */
3031 addr = ltoh32(c->log.buf);
3032
3033 /* wrap around case - write ptr < read ptr */
3034 if (idx < c->last) {
3035 /* from read ptr to end of buffer */
3036 readlen = c->bufsize - c->last;
3037 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3038 addr + c->last, c->buf, readlen)) < 0) {
3039 DHD_ERROR(("conlog: read error[1] ! \n"));
3040 return rv;
3041 }
3042 /* from beginning of buffer to write ptr */
3043 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3044 addr, c->buf + readlen,
3045 idx)) < 0) {
3046 DHD_ERROR(("conlog: read error[2] ! \n"));
3047 return rv;
3048 }
3049 readlen += idx;
3050 } else {
3051 /* non-wraparound case, write ptr > read ptr */
3052 readlen = (uint)idx - c->last;
3053 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3054 addr + c->last, c->buf, readlen)) < 0) {
3055 DHD_ERROR(("conlog: read error[3] ! \n"));
3056 return rv;
3057 }
3058 }
3059 /* update read ptr */
3060 c->last = idx;
3061
3062 /* now output the read data from the local buffer to the host console */
3063 while (i < readlen) {
3064 for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3065 ch = c->buf[i];
3066 ++i;
3067 if (ch == '\n')
3068 break;
3069 line[n] = ch;
3070 }
3071
3072 if (n > 0) {
3073 if (line[n - 1] == '\r')
3074 n--;
3075 line[n] = 0;
3076 DHD_FWLOG(("CONSOLE: %s\n", line));
3077 }
3078 }
3079
3080 return BCME_OK;
3081
3082 } /* dhdpcie_bus_readconsole */
3083
3084 void
3085 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3086 {
3087 uint32 n, i;
3088 uint32 addr;
3089 char *console_buffer = NULL;
3090 uint32 console_ptr, console_size, console_index;
3091 uint8 line[CONSOLE_LINE_MAX], ch;
3092 int rv;
3093
3094 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3095
3096 if (bus->is_linkdown) {
3097 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3098 return;
3099 }
3100
3101 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3102 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3103 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3104 goto exit;
3105 }
3106
3107 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3108 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3109 (uint8 *)&console_size, sizeof(console_size))) < 0) {
3110 goto exit;
3111 }
3112
3113 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3114 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3115 (uint8 *)&console_index, sizeof(console_index))) < 0) {
3116 goto exit;
3117 }
3118
3119 console_ptr = ltoh32(console_ptr);
3120 console_size = ltoh32(console_size);
3121 console_index = ltoh32(console_index);
3122
3123 if (console_size > CONSOLE_BUFFER_MAX ||
3124 !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3125 goto exit;
3126 }
3127
3128 if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3129 (uint8 *)console_buffer, console_size)) < 0) {
3130 goto exit;
3131 }
3132
3133 for (i = 0, n = 0; i < console_size; i += n + 1) {
3134 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3135 ch = console_buffer[(console_index + i + n) % console_size];
3136 if (ch == '\n')
3137 break;
3138 line[n] = ch;
3139 }
3140
3141 if (n > 0) {
3142 if (line[n - 1] == '\r')
3143 n--;
3144 line[n] = 0;
3145 /* Don't use DHD_ERROR macro since we print
3146 * a lot of information quickly. The macro
3147 * will truncate a lot of the printfs
3148 */
3149
3150 DHD_FWLOG(("CONSOLE: %s\n", line));
3151 }
3152 }
3153
3154 exit:
3155 if (console_buffer)
3156 MFREE(bus->dhd->osh, console_buffer, console_size);
3157 return;
3158 }
3159
3160 /**
3161 * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3162 *
3163 * @return BCME_OK on success
3164 */
3165 static int
3166 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3167 {
3168 int bcmerror = 0;
3169 uint msize = 512;
3170 char *mbuffer = NULL;
3171 uint maxstrlen = 256;
3172 char *str = NULL;
3173 pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3174 struct bcmstrbuf strbuf;
3175 unsigned long flags;
3176 bool dongle_trap_occured = FALSE;
3177
3178 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3179
3180 if (DHD_NOCHECKDIED_ON()) {
3181 return 0;
3182 }
3183
3184 if (data == NULL) {
3185 /*
3186 * Called after a rx ctrl timeout. "data" is NULL.
3187 * allocate memory to trace the trap or assert.
3188 */
3189 size = msize;
3190 mbuffer = data = MALLOC(bus->dhd->osh, msize);
3191
3192 if (mbuffer == NULL) {
3193 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3194 bcmerror = BCME_NOMEM;
3195 goto done;
3196 }
3197 }
3198
3199 if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3200 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3201 bcmerror = BCME_NOMEM;
3202 goto done;
3203 }
3204 DHD_GENERAL_LOCK(bus->dhd, flags);
3205 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3206 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3207
3208 if (MULTIBP_ENAB(bus->sih)) {
3209 dhd_bus_pcie_pwr_req(bus);
3210 }
3211 if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3212 goto done;
3213 }
3214
3215 bcm_binit(&strbuf, data, size);
3216
3217 bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
3218 local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
3219
3220 if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3221 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3222 * (Avoids conflict with real asserts for programmatic parsing of output.)
3223 */
3224 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3225 }
3226
3227 if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
3228 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3229 * (Avoids conflict with real asserts for programmatic parsing of output.)
3230 */
3231 bcm_bprintf(&strbuf, "No trap%s in dongle",
3232 (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
3233 ?"/assrt" :"");
3234 } else {
3235 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3236 /* Download assert */
3237 bcm_bprintf(&strbuf, "Dongle assert");
3238 if (bus->pcie_sh->assert_exp_addr != 0) {
3239 str[0] = '\0';
3240 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3241 bus->pcie_sh->assert_exp_addr,
3242 (uint8 *)str, maxstrlen)) < 0) {
3243 goto done;
3244 }
3245
3246 str[maxstrlen - 1] = '\0';
3247 bcm_bprintf(&strbuf, " expr \"%s\"", str);
3248 }
3249
3250 if (bus->pcie_sh->assert_file_addr != 0) {
3251 str[0] = '\0';
3252 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3253 bus->pcie_sh->assert_file_addr,
3254 (uint8 *)str, maxstrlen)) < 0) {
3255 goto done;
3256 }
3257
3258 str[maxstrlen - 1] = '\0';
3259 bcm_bprintf(&strbuf, " file \"%s\"", str);
3260 }
3261
3262 bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
3263 }
3264
3265 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3266 trap_t *tr = &bus->dhd->last_trap_info;
3267 dongle_trap_occured = TRUE;
3268 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3269 bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3270 bus->dhd->dongle_trap_occured = TRUE;
3271 goto done;
3272 }
3273 dhd_bus_dump_trap_info(bus, &strbuf);
3274 }
3275 }
3276
3277 if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3278 DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
3279
3280 /* wake up IOCTL wait event */
3281 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3282
3283 dhd_bus_dump_console_buffer(bus);
3284 dhd_prot_debug_info_print(bus->dhd);
3285
3286 #if defined(DHD_FW_COREDUMP)
3287 /* save core dump or write to a file */
3288 if (bus->dhd->memdump_enabled) {
3289 #ifdef DHD_SSSR_DUMP
3290 if (bus->dhd->sssr_inited) {
3291 dhdpcie_sssr_dump(bus->dhd);
3292 }
3293 #endif /* DHD_SSSR_DUMP */
3294 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3295 dhdpcie_mem_dump(bus);
3296 }
3297 #endif /* DHD_FW_COREDUMP */
3298
3299 /* set the trap occured flag only after all the memdump,
3300 * logdump and sssr dump collection has been scheduled
3301 */
3302 if (dongle_trap_occured) {
3303 bus->dhd->dongle_trap_occured = TRUE;
3304 }
3305
3306 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3307 copy_hang_info_trap(bus->dhd);
3308 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3309 dhd_schedule_reset(bus->dhd);
3310
3311 }
3312
3313 DHD_GENERAL_LOCK(bus->dhd, flags);
3314 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3315 dhd_os_busbusy_wake(bus->dhd);
3316 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3317
3318 done:
3319 if (MULTIBP_ENAB(bus->sih)) {
3320 dhd_bus_pcie_pwr_req_clear(bus);
3321 }
3322 if (mbuffer)
3323 MFREE(bus->dhd->osh, mbuffer, msize);
3324 if (str)
3325 MFREE(bus->dhd->osh, str, maxstrlen);
3326
3327 return bcmerror;
3328 } /* dhdpcie_checkdied */
3329
3330 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
3331 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3332 {
3333 int ret = 0;
3334 int size; /* Full mem size */
3335 int start; /* Start address */
3336 int read_size = 0; /* Read size of each iteration */
3337 uint8 *databuf = buf;
3338
3339 if (bus == NULL) {
3340 return;
3341 }
3342
3343 start = bus->dongle_ram_base;
3344 read_size = 4;
3345 /* check for dead bus */
3346 {
3347 uint test_word = 0;
3348 ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3349 /* if read error or bus timeout */
3350 if (ret || (test_word == 0xFFFFFFFF)) {
3351 return;
3352 }
3353 }
3354
3355 /* Get full mem size */
3356 size = bus->ramsize;
3357 /* Read mem content */
3358 while (size)
3359 {
3360 read_size = MIN(MEMBLOCK, size);
3361 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3362 return;
3363 }
3364
3365 /* Decrement size and increment start address */
3366 size -= read_size;
3367 start += read_size;
3368 databuf += read_size;
3369 }
3370 bus->dhd->soc_ram = buf;
3371 bus->dhd->soc_ram_length = bus->ramsize;
3372 return;
3373 }
3374
3375 #if defined(DHD_FW_COREDUMP)
3376 static int
3377 dhdpcie_mem_dump(dhd_bus_t *bus)
3378 {
3379 int ret = 0;
3380 int size; /* Full mem size */
3381 int start = bus->dongle_ram_base; /* Start address */
3382 int read_size = 0; /* Read size of each iteration */
3383 uint8 *buf = NULL, *databuf = NULL;
3384
3385 #ifdef EXYNOS_PCIE_DEBUG
3386 exynos_pcie_register_dump(1);
3387 #endif /* EXYNOS_PCIE_DEBUG */
3388
3389 #ifdef SUPPORT_LINKDOWN_RECOVERY
3390 if (bus->is_linkdown) {
3391 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
3392 return BCME_ERROR;
3393 }
3394 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3395
3396 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3397 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
3398 return BCME_ERROR;
3399 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3400
3401 /* Get full mem size */
3402 size = bus->ramsize;
3403 buf = dhd_get_fwdump_buf(bus->dhd, size);
3404 if (!buf) {
3405 DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
3406 return BCME_ERROR;
3407 }
3408
3409 /* Read mem content */
3410 DHD_TRACE_HW4(("Dump dongle memory\n"));
3411 databuf = buf;
3412 while (size > 0)
3413 {
3414 read_size = MIN(MEMBLOCK, size);
3415 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
3416 {
3417 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3418 #ifdef DHD_DEBUG_UART
3419 bus->dhd->memdump_success = FALSE;
3420 #endif /* DHD_DEBUG_UART */
3421 return BCME_ERROR;
3422 }
3423 DHD_TRACE(("."));
3424
3425 /* Decrement size and increment start address */
3426 size -= read_size;
3427 start += read_size;
3428 databuf += read_size;
3429 }
3430 #ifdef DHD_DEBUG_UART
3431 bus->dhd->memdump_success = TRUE;
3432 #endif /* DHD_DEBUG_UART */
3433
3434 dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
3435 /* buf, actually soc_ram free handled in dhd_{free,clear} */
3436
3437 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3438 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3439 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3440 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3441
3442 return ret;
3443 }
3444
3445 int
3446 dhd_bus_mem_dump(dhd_pub_t *dhdp)
3447 {
3448 dhd_bus_t *bus = dhdp->bus;
3449 int ret = BCME_ERROR;
3450
3451 if (dhdp->busstate == DHD_BUS_DOWN) {
3452 DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3453 return BCME_ERROR;
3454 }
3455
3456 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
3457 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3458 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3459 return BCME_ERROR;
3460 }
3461
3462 DHD_OS_WAKE_LOCK(dhdp);
3463 ret = dhdpcie_mem_dump(bus);
3464 DHD_OS_WAKE_UNLOCK(dhdp);
3465 return ret;
3466 }
3467 #endif /* DHD_FW_COREDUMP */
3468
3469 int
3470 dhd_socram_dump(dhd_bus_t *bus)
3471 {
3472 #if defined(DHD_FW_COREDUMP)
3473 DHD_OS_WAKE_LOCK(bus->dhd);
3474 dhd_bus_mem_dump(bus->dhd);
3475 DHD_OS_WAKE_UNLOCK(bus->dhd);
3476 return 0;
3477 #else
3478 return -1;
3479 #endif // endif
3480 }
3481
3482 /**
3483 * Transfers bytes from host to dongle using pio mode.
3484 * Parameter 'address' is a backplane address.
3485 */
3486 static int
3487 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
3488 {
3489 uint dsize;
3490 int detect_endian_flag = 0x01;
3491 bool little_endian;
3492
3493 if (write && bus->is_linkdown) {
3494 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3495 return BCME_ERROR;
3496 }
3497
3498 if (MULTIBP_ENAB(bus->sih)) {
3499 dhd_bus_pcie_pwr_req(bus);
3500 }
3501 /* Detect endianness. */
3502 little_endian = *(char *)&detect_endian_flag;
3503
3504 /* In remap mode, adjust address beyond socram and redirect
3505 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
3506 * is not backplane accessible
3507 */
3508
3509 /* Determine initial transfer parameters */
3510 #ifdef DHD_SUPPORT_64BIT
3511 dsize = sizeof(uint64);
3512 #else /* !DHD_SUPPORT_64BIT */
3513 dsize = sizeof(uint32);
3514 #endif /* DHD_SUPPORT_64BIT */
3515
3516 /* Do the transfer(s) */
3517 if (write) {
3518 while (size) {
3519 #ifdef DHD_SUPPORT_64BIT
3520 if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
3521 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
3522 }
3523 #else /* !DHD_SUPPORT_64BIT */
3524 if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
3525 dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
3526 }
3527 #endif /* DHD_SUPPORT_64BIT */
3528 else {
3529 dsize = sizeof(uint8);
3530 dhdpcie_bus_wtcm8(bus, address, *data);
3531 }
3532
3533 /* Adjust for next transfer (if any) */
3534 if ((size -= dsize)) {
3535 data += dsize;
3536 address += dsize;
3537 }
3538 }
3539 } else {
3540 while (size) {
3541 #ifdef DHD_SUPPORT_64BIT
3542 if (size >= sizeof(uint64) && little_endian && !(address % 8))
3543 {
3544 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
3545 }
3546 #else /* !DHD_SUPPORT_64BIT */
3547 if (size >= sizeof(uint32) && little_endian && !(address % 4))
3548 {
3549 *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
3550 }
3551 #endif /* DHD_SUPPORT_64BIT */
3552 else {
3553 dsize = sizeof(uint8);
3554 *data = dhdpcie_bus_rtcm8(bus, address);
3555 }
3556
3557 /* Adjust for next transfer (if any) */
3558 if ((size -= dsize) > 0) {
3559 data += dsize;
3560 address += dsize;
3561 }
3562 }
3563 }
3564 if (MULTIBP_ENAB(bus->sih)) {
3565 dhd_bus_pcie_pwr_req_clear(bus);
3566 }
3567 return BCME_OK;
3568 } /* dhdpcie_bus_membytes */
3569
3570 /**
3571 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
3572 * to the (non flow controlled) flow ring.
3573 */
3574 int BCMFASTPATH
3575 dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
3576 {
3577 flow_ring_node_t *flow_ring_node;
3578 int ret = BCME_OK;
3579 #ifdef DHD_LOSSLESS_ROAMING
3580 dhd_pub_t *dhdp = bus->dhd;
3581 #endif // endif
3582 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
3583
3584 /* ASSERT on flow_id */
3585 if (flow_id >= bus->max_submission_rings) {
3586 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
3587 flow_id, bus->max_submission_rings));
3588 return 0;
3589 }
3590
3591 flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
3592
3593 if (flow_ring_node->prot_info == NULL) {
3594 DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
3595 return BCME_NOTREADY;
3596 }
3597
3598 #ifdef DHD_LOSSLESS_ROAMING
3599 if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
3600 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
3601 __FUNCTION__, flow_ring_node->flow_info.tid));
3602 return BCME_OK;
3603 }
3604 #endif /* DHD_LOSSLESS_ROAMING */
3605
3606 {
3607 unsigned long flags;
3608 void *txp = NULL;
3609 flow_queue_t *queue;
3610 #ifdef DHD_LOSSLESS_ROAMING
3611 struct ether_header *eh;
3612 uint8 *pktdata;
3613 #endif /* DHD_LOSSLESS_ROAMING */
3614
3615 queue = &flow_ring_node->queue; /* queue associated with flow ring */
3616
3617 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3618
3619 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
3620 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3621 return BCME_NOTREADY;
3622 }
3623
3624 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
3625 PKTORPHAN(txp);
3626
3627 /*
3628 * Modifying the packet length caused P2P cert failures.
3629 * Specifically on test cases where a packet of size 52 bytes
3630 * was injected, the sniffer capture showed 62 bytes because of
3631 * which the cert tests failed. So making the below change
3632 * only Router specific.
3633 */
3634
3635 #ifdef DHDTCPACK_SUPPRESS
3636 if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
3637 ret = dhd_tcpack_check_xmit(bus->dhd, txp);
3638 if (ret != BCME_OK) {
3639 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
3640 __FUNCTION__));
3641 }
3642 }
3643 #endif /* DHDTCPACK_SUPPRESS */
3644 #ifdef DHD_LOSSLESS_ROAMING
3645 pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
3646 eh = (struct ether_header *) pktdata;
3647 if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
3648 uint8 prio = (uint8)PKTPRIO(txp);
3649 /* Restore to original priority for 802.1X packet */
3650 if (prio == PRIO_8021D_NC) {
3651 PKTSETPRIO(txp, dhdp->prio_8021x);
3652 }
3653 }
3654 #endif /* DHD_LOSSLESS_ROAMING */
3655 /* Attempt to transfer packet over flow ring */
3656 ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
3657 if (ret != BCME_OK) { /* may not have resources in flow ring */
3658 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
3659 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3660 /* reinsert at head */
3661 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
3662 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3663
3664 /* If we are able to requeue back, return success */
3665 return BCME_OK;
3666 }
3667 }
3668
3669 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3670
3671 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3672 }
3673
3674 return ret;
3675 } /* dhd_bus_schedule_queue */
3676
3677 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
3678 int BCMFASTPATH
3679 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
3680 {
3681 uint16 flowid;
3682 #ifdef IDLE_TX_FLOW_MGMT
3683 uint8 node_status;
3684 #endif /* IDLE_TX_FLOW_MGMT */
3685 flow_queue_t *queue;
3686 flow_ring_node_t *flow_ring_node;
3687 unsigned long flags;
3688 int ret = BCME_OK;
3689 void *txp_pend = NULL;
3690
3691 if (!bus->dhd->flowid_allocator) {
3692 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
3693 goto toss;
3694 }
3695
3696 flowid = DHD_PKT_GET_FLOWID(txp);
3697
3698 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
3699
3700 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
3701 __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
3702
3703 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3704 if ((flowid >= bus->dhd->num_flow_rings) ||
3705 #ifdef IDLE_TX_FLOW_MGMT
3706 (!flow_ring_node->active))
3707 #else
3708 (!flow_ring_node->active) ||
3709 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
3710 (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
3711 #endif /* IDLE_TX_FLOW_MGMT */
3712 {
3713 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3714 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
3715 __FUNCTION__, flowid, flow_ring_node->status,
3716 flow_ring_node->active));
3717 ret = BCME_ERROR;
3718 goto toss;
3719 }
3720
3721 #ifdef IDLE_TX_FLOW_MGMT
3722 node_status = flow_ring_node->status;
3723
3724 /* handle diffrent status states here!! */
3725 switch (node_status)
3726 {
3727 case FLOW_RING_STATUS_OPEN:
3728
3729 if (bus->enable_idle_flowring_mgmt) {
3730 /* Move the node to the head of active list */
3731 dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
3732 }
3733 break;
3734
3735 case FLOW_RING_STATUS_SUSPENDED:
3736 DHD_INFO(("Need to Initiate TX Flow resume\n"));
3737 /* Issue resume_ring request */
3738 dhd_bus_flow_ring_resume_request(bus,
3739 flow_ring_node);
3740 break;
3741
3742 case FLOW_RING_STATUS_CREATE_PENDING:
3743 case FLOW_RING_STATUS_RESUME_PENDING:
3744 /* Dont do anything here!! */
3745 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
3746 node_status));
3747 break;
3748
3749 case FLOW_RING_STATUS_DELETE_PENDING:
3750 default:
3751 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
3752 flowid, node_status));
3753 /* error here!! */
3754 ret = BCME_ERROR;
3755 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3756 goto toss;
3757 }
3758 /* Now queue the packet */
3759 #endif /* IDLE_TX_FLOW_MGMT */
3760
3761 queue = &flow_ring_node->queue; /* queue associated with flow ring */
3762
3763 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
3764 txp_pend = txp;
3765
3766 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3767
3768 if (flow_ring_node->status) {
3769 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
3770 __FUNCTION__, flowid, flow_ring_node->status,
3771 flow_ring_node->active));
3772 if (txp_pend) {
3773 txp = txp_pend;
3774 goto toss;
3775 }
3776 return BCME_OK;
3777 }
3778 ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
3779
3780 /* If we have anything pending, try to push into q */
3781 if (txp_pend) {
3782 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3783
3784 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
3785 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3786 txp = txp_pend;
3787 goto toss;
3788 }
3789
3790 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3791 }
3792
3793 return ret;
3794
3795 toss:
3796 DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
3797 PKTCFREE(bus->dhd->osh, txp, TRUE);
3798 return ret;
3799 } /* dhd_bus_txdata */
3800
3801 void
3802 dhd_bus_stop_queue(struct dhd_bus *bus)
3803 {
3804 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
3805 }
3806
3807 void
3808 dhd_bus_start_queue(struct dhd_bus *bus)
3809 {
3810 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
3811 }
3812
3813 /* Device console input function */
3814 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
3815 {
3816 dhd_bus_t *bus = dhd->bus;
3817 uint32 addr, val;
3818 int rv;
3819 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
3820 if (bus->console_addr == 0)
3821 return BCME_UNSUPPORTED;
3822
3823 /* Don't allow input if dongle is in reset */
3824 if (bus->dhd->dongle_reset) {
3825 return BCME_NOTREADY;
3826 }
3827
3828 /* Zero cbuf_index */
3829 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
3830 val = htol32(0);
3831 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3832 goto done;
3833
3834 /* Write message into cbuf */
3835 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
3836 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
3837 goto done;
3838
3839 /* Write length into vcons_in */
3840 addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
3841 val = htol32(msglen);
3842 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3843 goto done;
3844
3845 /* generate an interrupt to dongle to indicate that it needs to process cons command */
3846 dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
3847 done:
3848 return rv;
3849 } /* dhd_bus_console_in */
3850
3851 /**
3852 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
3853 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
3854 */
3855 void BCMFASTPATH
3856 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
3857 {
3858 dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
3859 }
3860
3861 /** 'offset' is a backplane address */
3862 void
3863 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
3864 {
3865 if (bus->is_linkdown) {
3866 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3867 return;
3868 } else {
3869 W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
3870 }
3871 }
3872
3873 uint8
3874 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
3875 {
3876 volatile uint8 data;
3877 if (bus->is_linkdown) {
3878 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3879 data = (uint8)-1;
3880 } else {
3881 data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
3882 }
3883 return data;
3884 }
3885
3886 void
3887 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
3888 {
3889 if (bus->is_linkdown) {
3890 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3891 return;
3892 } else {
3893 W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
3894 }
3895 }
3896 void
3897 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
3898 {
3899 if (bus->is_linkdown) {
3900 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3901 return;
3902 } else {
3903 W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
3904 }
3905 }
3906 #ifdef DHD_SUPPORT_64BIT
3907 void
3908 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
3909 {
3910 if (bus->is_linkdown) {
3911 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3912 return;
3913 } else {
3914 W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
3915 }
3916 }
3917 #endif /* DHD_SUPPORT_64BIT */
3918
3919 uint16
3920 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
3921 {
3922 volatile uint16 data;
3923 if (bus->is_linkdown) {
3924 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3925 data = (uint16)-1;
3926 } else {
3927 data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
3928 }
3929 return data;
3930 }
3931
3932 uint32
3933 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
3934 {
3935 volatile uint32 data;
3936 if (bus->is_linkdown) {
3937 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3938 data = (uint32)-1;
3939 } else {
3940 data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
3941 }
3942 return data;
3943 }
3944
3945 #ifdef DHD_SUPPORT_64BIT
3946 uint64
3947 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
3948 {
3949 volatile uint64 data;
3950 if (bus->is_linkdown) {
3951 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3952 data = (uint64)-1;
3953 } else {
3954 data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
3955 }
3956 return data;
3957 }
3958 #endif /* DHD_SUPPORT_64BIT */
3959
3960 /** A snippet of dongle memory is shared between host and dongle */
3961 void
3962 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
3963 {
3964 uint64 long_data;
3965 ulong addr; /* dongle address */
3966
3967 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
3968
3969 if (bus->is_linkdown) {
3970 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3971 return;
3972 }
3973
3974 if (MULTIBP_ENAB(bus->sih)) {
3975 dhd_bus_pcie_pwr_req(bus);
3976 }
3977 switch (type) {
3978 case D2H_DMA_SCRATCH_BUF:
3979 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
3980 long_data = HTOL64(*(uint64 *)data);
3981 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
3982 if (dhd_msg_level & DHD_INFO_VAL) {
3983 prhex(__FUNCTION__, data, len);
3984 }
3985 break;
3986
3987 case D2H_DMA_SCRATCH_BUF_LEN :
3988 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
3989 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
3990 if (dhd_msg_level & DHD_INFO_VAL) {
3991 prhex(__FUNCTION__, data, len);
3992 }
3993 break;
3994
3995 case H2D_DMA_INDX_WR_BUF:
3996 long_data = HTOL64(*(uint64 *)data);
3997 addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
3998 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
3999 if (dhd_msg_level & DHD_INFO_VAL) {
4000 prhex(__FUNCTION__, data, len);
4001 }
4002 break;
4003
4004 case H2D_DMA_INDX_RD_BUF:
4005 long_data = HTOL64(*(uint64 *)data);
4006 addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4007 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4008 if (dhd_msg_level & DHD_INFO_VAL) {
4009 prhex(__FUNCTION__, data, len);
4010 }
4011 break;
4012
4013 case D2H_DMA_INDX_WR_BUF:
4014 long_data = HTOL64(*(uint64 *)data);
4015 addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4016 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4017 if (dhd_msg_level & DHD_INFO_VAL) {
4018 prhex(__FUNCTION__, data, len);
4019 }
4020 break;
4021
4022 case D2H_DMA_INDX_RD_BUF:
4023 long_data = HTOL64(*(uint64 *)data);
4024 addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4025 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4026 if (dhd_msg_level & DHD_INFO_VAL) {
4027 prhex(__FUNCTION__, data, len);
4028 }
4029 break;
4030
4031 case H2D_IFRM_INDX_WR_BUF:
4032 long_data = HTOL64(*(uint64 *)data);
4033 addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4034 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4035 if (dhd_msg_level & DHD_INFO_VAL) {
4036 prhex(__FUNCTION__, data, len);
4037 }
4038 break;
4039
4040 case RING_ITEM_LEN :
4041 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4042 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4043 break;
4044
4045 case RING_MAX_ITEMS :
4046 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4047 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4048 break;
4049
4050 case RING_BUF_ADDR :
4051 long_data = HTOL64(*(uint64 *)data);
4052 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4053 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4054 if (dhd_msg_level & DHD_INFO_VAL) {
4055 prhex(__FUNCTION__, data, len);
4056 }
4057 break;
4058
4059 case RING_WR_UPD :
4060 addr = bus->ring_sh[ringid].ring_state_w;
4061 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4062 break;
4063
4064 case RING_RD_UPD :
4065 addr = bus->ring_sh[ringid].ring_state_r;
4066 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4067 break;
4068
4069 case D2H_MB_DATA:
4070 addr = bus->d2h_mb_data_ptr_addr;
4071 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4072 break;
4073
4074 case H2D_MB_DATA:
4075 addr = bus->h2d_mb_data_ptr_addr;
4076 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4077 break;
4078
4079 case HOST_API_VERSION:
4080 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4081 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4082 break;
4083
4084 case DNGL_TO_HOST_TRAP_ADDR:
4085 long_data = HTOL64(*(uint64 *)data);
4086 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4087 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4088 DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4089 break;
4090
4091 #ifdef D2H_MINIDUMP
4092 case DNGL_TO_HOST_TRAP_ADDR_LEN:
4093 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, device_trap_debug_buffer_len);
4094 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4095 break;
4096 #endif /* D2H_MINIDUMP */
4097
4098 default:
4099 break;
4100 }
4101 if (MULTIBP_ENAB(bus->sih)) {
4102 dhd_bus_pcie_pwr_req_clear(bus);
4103 }
4104 } /* dhd_bus_cmn_writeshared */
4105
4106 /** A snippet of dongle memory is shared between host and dongle */
4107 void
4108 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
4109 {
4110 ulong addr; /* dongle address */
4111
4112 if (MULTIBP_ENAB(bus->sih)) {
4113 dhd_bus_pcie_pwr_req(bus);
4114 }
4115 switch (type) {
4116 case RING_WR_UPD :
4117 addr = bus->ring_sh[ringid].ring_state_w;
4118 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4119 break;
4120
4121 case RING_RD_UPD :
4122 addr = bus->ring_sh[ringid].ring_state_r;
4123 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4124 break;
4125
4126 case TOTAL_LFRAG_PACKET_CNT :
4127 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4128 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4129 break;
4130
4131 case H2D_MB_DATA:
4132 addr = bus->h2d_mb_data_ptr_addr;
4133 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4134 break;
4135
4136 case D2H_MB_DATA:
4137 addr = bus->d2h_mb_data_ptr_addr;
4138 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4139 break;
4140
4141 case MAX_HOST_RXBUFS :
4142 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4143 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4144 break;
4145
4146 default :
4147 break;
4148 }
4149 if (MULTIBP_ENAB(bus->sih)) {
4150 dhd_bus_pcie_pwr_req_clear(bus);
4151 }
4152 }
4153
4154 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
4155 {
4156 return ((pciedev_shared_t*)bus->pcie_sh)->flags;
4157 }
4158
4159 void
4160 dhd_bus_clearcounts(dhd_pub_t *dhdp)
4161 {
4162 }
4163
4164 /**
4165 * @param params input buffer, NULL for 'set' operation.
4166 * @param plen length of 'params' buffer, 0 for 'set' operation.
4167 * @param arg output buffer
4168 */
4169 int
4170 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
4171 void *params, int plen, void *arg, int len, bool set)
4172 {
4173 dhd_bus_t *bus = dhdp->bus;
4174 const bcm_iovar_t *vi = NULL;
4175 int bcmerror = BCME_UNSUPPORTED;
4176 int val_size;
4177 uint32 actionid;
4178
4179 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4180
4181 ASSERT(name);
4182 ASSERT(len >= 0);
4183 if (!name || len < 0)
4184 return BCME_BADARG;
4185
4186 /* Get MUST have return space */
4187 ASSERT(set || (arg && len));
4188 if (!(set || (arg && len)))
4189 return BCME_BADARG;
4190
4191 /* Set does NOT take qualifiers */
4192 ASSERT(!set || (!params && !plen));
4193 if (!(!set || (!params && !plen)))
4194 return BCME_BADARG;
4195
4196 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4197 name, (set ? "set" : "get"), len, plen));
4198
4199 if (MULTIBP_ENAB(bus->sih)) {
4200 dhd_bus_pcie_pwr_req(bus);
4201 }
4202
4203 /* Look up var locally; if not found pass to host driver */
4204 if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
4205 goto exit;
4206 }
4207
4208 /* set up 'params' pointer in case this is a set command so that
4209 * the convenience int and bool code can be common to set and get
4210 */
4211 if (params == NULL) {
4212 params = arg;
4213 plen = len;
4214 }
4215
4216 if (vi->type == IOVT_VOID)
4217 val_size = 0;
4218 else if (vi->type == IOVT_BUFFER)
4219 val_size = len;
4220 else
4221 /* all other types are integer sized */
4222 val_size = sizeof(int);
4223
4224 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4225 bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
4226
4227 exit:
4228 /* In DEVRESET_QUIESCE/DEVRESET_ON,
4229 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4230 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4231 * In this case, bypass pwr req clear.
4232 */
4233 if (bcmerror == BCME_DNGL_DEVRESET) {
4234 bcmerror = BCME_OK;
4235 } else {
4236 if (MULTIBP_ENAB(bus->sih)) {
4237 dhd_bus_pcie_pwr_req_clear(bus);
4238 }
4239 }
4240 return bcmerror;
4241 } /* dhd_bus_iovar_op */
4242
4243 #ifdef BCM_BUZZZ
4244 #include <bcm_buzzz.h>
4245
4246 int
4247 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
4248 const int num_counters)
4249 {
4250 int bytes = 0;
4251 uint32 ctr;
4252 uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
4253 uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
4254
4255 /* Compute elapsed counter values per counter event type */
4256 for (ctr = 0U; ctr < num_counters; ctr++) {
4257 prev[ctr] = core[ctr];
4258 curr[ctr] = *log++;
4259 core[ctr] = curr[ctr]; /* saved for next log */
4260
4261 if (curr[ctr] < prev[ctr])
4262 delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
4263 else
4264 delta[ctr] = (curr[ctr] - prev[ctr]);
4265
4266 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
4267 }
4268
4269 return bytes;
4270 }
4271
4272 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
4273 uint32 u32;
4274 uint8 u8[4];
4275 struct {
4276 uint8 cpicnt;
4277 uint8 exccnt;
4278 uint8 sleepcnt;
4279 uint8 lsucnt;
4280 };
4281 } cm3_cnts_t;
4282
4283 int
4284 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
4285 {
4286 int bytes = 0;
4287
4288 uint32 cyccnt, instrcnt;
4289 cm3_cnts_t cm3_cnts;
4290 uint8 foldcnt;
4291
4292 { /* 32bit cyccnt */
4293 uint32 curr, prev, delta;
4294 prev = core[0]; curr = *log++; core[0] = curr;
4295 if (curr < prev)
4296 delta = curr + (~0U - prev);
4297 else
4298 delta = (curr - prev);
4299
4300 bytes += sprintf(p + bytes, "%12u ", delta);
4301 cyccnt = delta;
4302 }
4303
4304 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
4305 int i;
4306 uint8 max8 = ~0;
4307 cm3_cnts_t curr, prev, delta;
4308 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
4309 for (i = 0; i < 4; i++) {
4310 if (curr.u8[i] < prev.u8[i])
4311 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
4312 else
4313 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
4314 bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
4315 }
4316 cm3_cnts.u32 = delta.u32;
4317 }
4318
4319 { /* Extract the foldcnt from arg0 */
4320 uint8 curr, prev, delta, max8 = ~0;
4321 bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
4322 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
4323 if (curr < prev)
4324 delta = curr + (max8 - prev);
4325 else
4326 delta = (curr - prev);
4327 bytes += sprintf(p + bytes, "%4u ", delta);
4328 foldcnt = delta;
4329 }
4330
4331 instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
4332 + cm3_cnts.u8[3]) + foldcnt;
4333 if (instrcnt > 0xFFFFFF00)
4334 bytes += sprintf(p + bytes, "[%10s] ", "~");
4335 else
4336 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
4337 return bytes;
4338 }
4339
4340 int
4341 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
4342 {
4343 int bytes = 0;
4344 bcm_buzzz_arg0_t arg0;
4345 static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
4346
4347 if (buzzz->counters == 6) {
4348 bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
4349 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4350 } else {
4351 bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
4352 log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
4353 }
4354
4355 /* Dump the logged arguments using the registered formats */
4356 arg0.u32 = *log++;
4357
4358 switch (arg0.klog.args) {
4359 case 0:
4360 bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
4361 break;
4362 case 1:
4363 {
4364 uint32 arg1 = *log++;
4365 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
4366 break;
4367 }
4368 case 2:
4369 {
4370 uint32 arg1, arg2;
4371 arg1 = *log++; arg2 = *log++;
4372 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
4373 break;
4374 }
4375 case 3:
4376 {
4377 uint32 arg1, arg2, arg3;
4378 arg1 = *log++; arg2 = *log++; arg3 = *log++;
4379 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
4380 break;
4381 }
4382 case 4:
4383 {
4384 uint32 arg1, arg2, arg3, arg4;
4385 arg1 = *log++; arg2 = *log++;
4386 arg3 = *log++; arg4 = *log++;
4387 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
4388 break;
4389 }
4390 default:
4391 printf("Maximum one argument supported\n");
4392 break;
4393 }
4394
4395 bytes += sprintf(p + bytes, "\n");
4396
4397 return bytes;
4398 }
4399
4400 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
4401 {
4402 int i;
4403 uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
4404 void * log;
4405
4406 for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
4407 core[i] = 0;
4408 }
4409
4410 log_sz = buzzz_p->log_sz;
4411
4412 part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
4413
4414 if (buzzz_p->wrap == TRUE) {
4415 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
4416 total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
4417 } else {
4418 part2 = 0U;
4419 total = buzzz_p->count;
4420 }
4421
4422 if (total == 0U) {
4423 printf("bcm_buzzz_dump total<%u> done\n", total);
4424 return;
4425 } else {
4426 printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
4427 total, part2, part1);
4428 }
4429
4430 if (part2) { /* with wrap */
4431 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
4432 while (part2--) { /* from cur to end : part2 */
4433 p[0] = '\0';
4434 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4435 printf("%s", p);
4436 log = (void*)((size_t)log + buzzz_p->log_sz);
4437 }
4438 }
4439
4440 log = (void*)buffer_p;
4441 while (part1--) {
4442 p[0] = '\0';
4443 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4444 printf("%s", p);
4445 log = (void*)((size_t)log + buzzz_p->log_sz);
4446 }
4447
4448 printf("bcm_buzzz_dump done.\n");
4449 }
4450
4451 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
4452 {
4453 bcm_buzzz_t * buzzz_p = NULL;
4454 void * buffer_p = NULL;
4455 char * page_p = NULL;
4456 pciedev_shared_t *sh;
4457 int ret = 0;
4458
4459 if (bus->dhd->busstate != DHD_BUS_DATA) {
4460 return BCME_UNSUPPORTED;
4461 }
4462 if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
4463 printf("Page memory allocation failure\n");
4464 goto done;
4465 }
4466 if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
4467 printf("BCM BUZZZ memory allocation failure\n");
4468 goto done;
4469 }
4470
4471 ret = dhdpcie_readshared(bus);
4472 if (ret < 0) {
4473 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
4474 goto done;
4475 }
4476
4477 sh = bus->pcie_sh;
4478
4479 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
4480
4481 if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
4482
4483 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
4484 (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
4485
4486 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
4487 "count<%u> status<%u> wrap<%u>\n"
4488 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
4489 (int)sh->buzz_dbg_ptr,
4490 (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
4491 buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
4492 buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
4493 buzzz_p->buffer_sz, buzzz_p->log_sz);
4494
4495 if (buzzz_p->count == 0) {
4496 printf("Empty dongle BUZZZ trace\n\n");
4497 goto done;
4498 }
4499
4500 /* Allocate memory for trace buffer and format strings */
4501 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
4502 if (buffer_p == NULL) {
4503 printf("Buffer memory allocation failure\n");
4504 goto done;
4505 }
4506
4507 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
4508 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
4509 (uint8 *)buffer_p, buzzz_p->buffer_sz);
4510
4511 /* Process and display the trace using formatted output */
4512
4513 {
4514 int ctr;
4515 for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
4516 printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
4517 }
4518 printf("<code execution point>\n");
4519 }
4520
4521 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
4522
4523 printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
4524
4525 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
4526 }
4527
4528 done:
4529
4530 if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
4531 if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
4532 if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
4533
4534 return BCME_OK;
4535 }
4536 #endif /* BCM_BUZZZ */
4537
4538 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
4539 ((sih)->buscoretype == PCIE2_CORE_ID))
4540
4541 #define PCIE_FLR_CAPAB_BIT 28
4542 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
4543
4544 /* Change delays for only QT HW, FPGA and silicon uses same delay */
4545 #ifdef BCMQT_HW
4546 #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
4547 #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
4548 #else
4549 #define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */
4550 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
4551 #endif // endif
4552 #define DHD_SSRESET_STATUS_RETRIES 50u
4553
4554 int
4555 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
4556 {
4557 bool flr_capab;
4558 uint val;
4559 int retry = 0;
4560
4561 DHD_ERROR(("******** Perform FLR ********\n"));
4562
4563 /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
4564 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
4565 flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
4566 DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
4567 PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
4568 if (!flr_capab) {
4569 DHD_ERROR(("Chip does not support FLR\n"));
4570 return BCME_UNSUPPORTED;
4571 }
4572
4573 /* Save pcie config space */
4574 DHD_ERROR(("Save Pcie Config Space\n"));
4575 DHD_PCIE_CONFIG_SAVE(bus);
4576
4577 /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
4578 DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4579 PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
4580 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
4581 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4582 val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
4583 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4584 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
4585
4586 /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
4587 DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
4588 OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
4589
4590 if (force_fail) {
4591 DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
4592 PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
4593 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
4594 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
4595 val));
4596 val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
4597 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
4598 val));
4599 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
4600
4601 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
4602 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
4603 val));
4604 }
4605
4606 /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
4607 DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4608 PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
4609 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
4610 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4611 val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
4612 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4613 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
4614
4615 /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
4616 DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
4617 "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
4618 do {
4619 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
4620 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
4621 PCIE_CFG_SUBSYSTEM_CONTROL, val));
4622 val = val & (1 << PCIE_SSRESET_STATUS_BIT);
4623 OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
4624 } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
4625
4626 if (val) {
4627 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4628 PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
4629 /* User has to fire the IOVAR again, if force_fail is needed */
4630 if (force_fail) {
4631 bus->flr_force_fail = FALSE;
4632 DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
4633 }
4634 return BCME_ERROR;
4635 }
4636
4637 /* Restore pcie config space */
4638 DHD_ERROR(("Restore Pcie Config Space\n"));
4639 DHD_PCIE_CONFIG_RESTORE(bus);
4640
4641 DHD_ERROR(("******** FLR Succedeed ********\n"));
4642
4643 return BCME_OK;
4644 }
4645
4646 #ifdef DHD_USE_BP_RESET
4647 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
4648
4649 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
4650 #define DHD_BP_RESET_STATUS_RETRIES 50u
4651
4652 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
4653 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
4654 int
4655 dhd_bus_perform_bp_reset(struct dhd_bus *bus)
4656 {
4657 uint val;
4658 int retry = 0;
4659 uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
4660 int ret = BCME_OK;
4661 bool cond;
4662
4663 DHD_ERROR(("******** Perform BP reset ********\n"));
4664
4665 /* Disable ASPM */
4666 DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4667 PCIECFGREG_LINK_STATUS_CTRL));
4668 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
4669 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4670 val = val & (~PCIE_ASPM_ENAB);
4671 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4672 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
4673
4674 /* wait for delay usec */
4675 DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
4676 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
4677
4678 /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
4679 DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
4680 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
4681 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
4682 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
4683 val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
4684 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
4685 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
4686
4687 /* Wait till bit backplane reset is ASSERTED i,e
4688 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
4689 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
4690 * else DAR register will read previous old value
4691 */
4692 DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
4693 "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
4694 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
4695 do {
4696 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
4697 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
4698 cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
4699 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
4700 } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
4701
4702 if (cond) {
4703 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4704 PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
4705 ret = BCME_ERROR;
4706 goto aspm_enab;
4707 }
4708
4709 /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
4710 DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
4711 "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
4712 PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
4713 do {
4714 val = si_corereg(bus->sih, bus->sih->buscoreidx,
4715 dar_clk_ctrl_status_reg, 0, 0);
4716 DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
4717 dar_clk_ctrl_status_reg, val));
4718 cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
4719 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
4720 } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
4721
4722 if (cond) {
4723 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4724 dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
4725 ret = BCME_ERROR;
4726 }
4727
4728 aspm_enab:
4729 /* Enable ASPM */
4730 DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4731 PCIECFGREG_LINK_STATUS_CTRL));
4732 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
4733 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4734 val = val | (PCIE_ASPM_L1_ENAB);
4735 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4736 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
4737
4738 DHD_ERROR(("******** BP reset Succedeed ********\n"));
4739
4740 return ret;
4741 }
4742 #endif /* DHD_USE_BP_RESET */
4743
4744 int
4745 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
4746 {
4747 dhd_bus_t *bus = dhdp->bus;
4748 int bcmerror = 0;
4749 unsigned long flags;
4750 unsigned long flags_bus;
4751 #ifdef CONFIG_ARCH_MSM
4752 int retry = POWERUP_MAX_RETRY;
4753 #endif /* CONFIG_ARCH_MSM */
4754
4755 if (flag == TRUE) { /* Turn off WLAN */
4756 /* Removing Power */
4757 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
4758
4759 bus->dhd->up = FALSE;
4760
4761 /* wait for other contexts to finish -- if required a call
4762 * to OSL_DELAY for 1s can be added to give other contexts
4763 * a chance to finish
4764 */
4765 dhdpcie_advertise_bus_cleanup(bus->dhd);
4766
4767 if (bus->dhd->busstate != DHD_BUS_DOWN) {
4768 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4769 atomic_set(&bus->dhd->block_bus, TRUE);
4770 dhd_flush_rx_tx_wq(bus->dhd);
4771 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4772
4773 #ifdef BCMPCIE_OOB_HOST_WAKE
4774 /* Clean up any pending host wake IRQ */
4775 dhd_bus_oob_intr_set(bus->dhd, FALSE);
4776 dhd_bus_oob_intr_unregister(bus->dhd);
4777 #endif /* BCMPCIE_OOB_HOST_WAKE */
4778 dhd_os_wd_timer(dhdp, 0);
4779 dhd_bus_stop(bus, TRUE);
4780 if (bus->intr) {
4781 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
4782 dhdpcie_bus_intr_disable(bus);
4783 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
4784 dhdpcie_free_irq(bus);
4785 }
4786 dhd_deinit_bus_lock(bus);
4787 dhd_bus_release_dongle(bus);
4788 dhdpcie_bus_free_resource(bus);
4789 bcmerror = dhdpcie_bus_disable_device(bus);
4790 if (bcmerror) {
4791 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4792 __FUNCTION__, bcmerror));
4793 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4794 atomic_set(&bus->dhd->block_bus, FALSE);
4795 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4796 }
4797 /* Clean up protocol data after Bus Master Enable bit clear
4798 * so that host can safely unmap DMA and remove the allocated buffers
4799 * from the PKTID MAP. Some Applicantion Processors supported
4800 * System MMU triggers Kernel panic when they detect to attempt to
4801 * DMA-unmapped memory access from the devices which use the
4802 * System MMU. Therefore, Kernel panic can be happened since it is
4803 * possible that dongle can access to DMA-unmapped memory after
4804 * calling the dhd_prot_reset().
4805 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4806 * should be located after the dhdpcie_bus_disable_device().
4807 */
4808 dhd_prot_reset(dhdp);
4809 dhd_clear(dhdp);
4810 #ifdef CONFIG_ARCH_MSM
4811 bcmerror = dhdpcie_bus_clock_stop(bus);
4812 if (bcmerror) {
4813 DHD_ERROR(("%s: host clock stop failed: %d\n",
4814 __FUNCTION__, bcmerror));
4815 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4816 atomic_set(&bus->dhd->block_bus, FALSE);
4817 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4818 goto done;
4819 }
4820 #endif /* CONFIG_ARCH_MSM */
4821 DHD_GENERAL_LOCK(bus->dhd, flags);
4822 bus->dhd->busstate = DHD_BUS_DOWN;
4823 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4824 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4825 atomic_set(&bus->dhd->block_bus, FALSE);
4826 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4827 } else {
4828 if (bus->intr) {
4829 dhdpcie_free_irq(bus);
4830 }
4831 #ifdef BCMPCIE_OOB_HOST_WAKE
4832 /* Clean up any pending host wake IRQ */
4833 dhd_bus_oob_intr_set(bus->dhd, FALSE);
4834 dhd_bus_oob_intr_unregister(bus->dhd);
4835 #endif /* BCMPCIE_OOB_HOST_WAKE */
4836 dhd_dpc_kill(bus->dhd);
4837 if (!bus->no_bus_init) {
4838 dhd_bus_release_dongle(bus);
4839 dhdpcie_bus_free_resource(bus);
4840 bcmerror = dhdpcie_bus_disable_device(bus);
4841 if (bcmerror) {
4842 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4843 __FUNCTION__, bcmerror));
4844 }
4845
4846 /* Clean up protocol data after Bus Master Enable bit clear
4847 * so that host can safely unmap DMA and remove the allocated
4848 * buffers from the PKTID MAP. Some Applicantion Processors
4849 * supported System MMU triggers Kernel panic when they detect
4850 * to attempt to DMA-unmapped memory access from the devices
4851 * which use the System MMU.
4852 * Therefore, Kernel panic can be happened since it is possible
4853 * that dongle can access to DMA-unmapped memory after calling
4854 * the dhd_prot_reset().
4855 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4856 * should be located after the dhdpcie_bus_disable_device().
4857 */
4858 dhd_prot_reset(dhdp);
4859 dhd_clear(dhdp);
4860 } else {
4861 bus->no_bus_init = FALSE;
4862 }
4863 #ifdef CONFIG_ARCH_MSM
4864 bcmerror = dhdpcie_bus_clock_stop(bus);
4865 if (bcmerror) {
4866 DHD_ERROR(("%s: host clock stop failed: %d\n",
4867 __FUNCTION__, bcmerror));
4868 goto done;
4869 }
4870 #endif /* CONFIG_ARCH_MSM */
4871 }
4872
4873 bus->dhd->dongle_reset = TRUE;
4874 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
4875
4876 } else { /* Turn on WLAN */
4877 if (bus->dhd->busstate == DHD_BUS_DOWN) {
4878 /* Powering On */
4879 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
4880 #ifdef CONFIG_ARCH_MSM
4881 while (--retry) {
4882 bcmerror = dhdpcie_bus_clock_start(bus);
4883 if (!bcmerror) {
4884 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
4885 __FUNCTION__));
4886 break;
4887 } else {
4888 OSL_SLEEP(10);
4889 }
4890 }
4891
4892 if (bcmerror && !retry) {
4893 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
4894 __FUNCTION__, bcmerror));
4895 goto done;
4896 }
4897 #endif /* CONFIG_ARCH_MSM */
4898 bus->is_linkdown = 0;
4899 #ifdef SUPPORT_LINKDOWN_RECOVERY
4900 bus->read_shm_fail = FALSE;
4901 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4902 bcmerror = dhdpcie_bus_enable_device(bus);
4903 if (bcmerror) {
4904 DHD_ERROR(("%s: host configuration restore failed: %d\n",
4905 __FUNCTION__, bcmerror));
4906 goto done;
4907 }
4908
4909 bcmerror = dhdpcie_bus_alloc_resource(bus);
4910 if (bcmerror) {
4911 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
4912 __FUNCTION__, bcmerror));
4913 goto done;
4914 }
4915
4916 bcmerror = dhdpcie_bus_dongle_attach(bus);
4917 if (bcmerror) {
4918 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
4919 __FUNCTION__, bcmerror));
4920 goto done;
4921 }
4922
4923 bcmerror = dhd_bus_request_irq(bus);
4924 if (bcmerror) {
4925 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
4926 __FUNCTION__, bcmerror));
4927 goto done;
4928 }
4929
4930 bus->dhd->dongle_reset = FALSE;
4931
4932 bcmerror = dhd_bus_start(dhdp);
4933 if (bcmerror) {
4934 DHD_ERROR(("%s: dhd_bus_start: %d\n",
4935 __FUNCTION__, bcmerror));
4936 goto done;
4937 }
4938
4939 bus->dhd->up = TRUE;
4940 /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
4941 if (bus->dhd->dhd_watchdog_ms_backup) {
4942 DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
4943 __FUNCTION__));
4944 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
4945 }
4946 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
4947 } else {
4948 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
4949 goto done;
4950 }
4951 }
4952
4953 done:
4954 if (bcmerror) {
4955 DHD_GENERAL_LOCK(bus->dhd, flags);
4956 bus->dhd->busstate = DHD_BUS_DOWN;
4957 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4958 }
4959 return bcmerror;
4960 }
4961
4962 static int
4963 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
4964 {
4965 int h2d_support, d2h_support;
4966
4967 d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
4968 h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
4969 return (d2h_support | (h2d_support << 1));
4970
4971 }
4972 int
4973 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
4974 {
4975 int bcmerror = 0;
4976 /* Can change it only during initialization/FW download */
4977 if (dhd->busstate == DHD_BUS_DOWN) {
4978 if ((int_val > 3) || (int_val < 0)) {
4979 DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
4980 bcmerror = BCME_BADARG;
4981 } else {
4982 dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
4983 dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
4984 dhd->dma_ring_upd_overwrite = TRUE;
4985 }
4986 } else {
4987 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4988 __FUNCTION__));
4989 bcmerror = BCME_NOTDOWN;
4990 }
4991
4992 return bcmerror;
4993
4994 }
4995 /**
4996 * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
4997 *
4998 * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
4999 * @param params input buffer
5000 * @param plen length in [bytes] of input buffer 'params'
5001 * @param arg output buffer
5002 * @param len length in [bytes] of output buffer 'arg'
5003 */
5004 static int
5005 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
5006 void *params, int plen, void *arg, int len, int val_size)
5007 {
5008 int bcmerror = 0;
5009 int32 int_val = 0;
5010 int32 int_val2 = 0;
5011 int32 int_val3 = 0;
5012 bool bool_val = 0;
5013
5014 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5015 __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
5016
5017 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
5018 goto exit;
5019
5020 if (plen >= (int)sizeof(int_val))
5021 bcopy(params, &int_val, sizeof(int_val));
5022
5023 if (plen >= (int)sizeof(int_val) * 2)
5024 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
5025
5026 if (plen >= (int)sizeof(int_val) * 3)
5027 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
5028
5029 bool_val = (int_val != 0) ? TRUE : FALSE;
5030
5031 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5032 if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
5033 actionid == IOV_GVAL(IOV_DEVRESET))) {
5034 bcmerror = BCME_NOTREADY;
5035 goto exit;
5036 }
5037
5038 switch (actionid) {
5039
5040 case IOV_SVAL(IOV_VARS):
5041 bcmerror = dhdpcie_downloadvars(bus, arg, len);
5042 break;
5043 case IOV_SVAL(IOV_PCIE_LPBK):
5044 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
5045 break;
5046
5047 case IOV_SVAL(IOV_PCIE_DMAXFER): {
5048 int int_val4 = 0;
5049 int wait = 0;
5050 int core_num = 0;
5051 if (plen >= (int)sizeof(int_val) * 4) {
5052 bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
5053 &int_val4, sizeof(int_val4));
5054 }
5055 if (plen >= (int)sizeof(int_val) * 5) {
5056 bcopy((void*)((uintptr)params + 4 * sizeof(int_val)),
5057 &wait, sizeof(wait));
5058 }
5059 if (plen >= (int)sizeof(core_num) * 6) {
5060 bcopy((void*)((uintptr)params + 5 * sizeof(core_num)),
5061 &core_num, sizeof(core_num));
5062 }
5063 bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3,
5064 int_val4, core_num, wait);
5065 if (wait && bcmerror >= 0) {
5066 /* get the status of the dma transfer */
5067 int_val4 = dhdmsgbuf_dmaxfer_status(bus->dhd);
5068 bcopy(&int_val4, params, sizeof(int_val));
5069 }
5070 break;
5071 }
5072
5073 case IOV_GVAL(IOV_PCIE_DMAXFER): {
5074 int dma_status = 0;
5075 dma_status = dhdmsgbuf_dmaxfer_status(bus->dhd);
5076 bcopy(&dma_status, arg, val_size);
5077 bcmerror = BCME_OK;
5078 break;
5079 }
5080
5081 case IOV_GVAL(IOV_PCIE_SUSPEND):
5082 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
5083 bcopy(&int_val, arg, val_size);
5084 break;
5085
5086 case IOV_SVAL(IOV_PCIE_SUSPEND):
5087 if (bool_val) { /* Suspend */
5088 int ret;
5089 unsigned long flags;
5090
5091 /*
5092 * If some other context is busy, wait until they are done,
5093 * before starting suspend
5094 */
5095 ret = dhd_os_busbusy_wait_condition(bus->dhd,
5096 &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
5097 if (ret == 0) {
5098 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5099 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
5100 return BCME_BUSY;
5101 }
5102
5103 DHD_GENERAL_LOCK(bus->dhd, flags);
5104 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
5105 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5106 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5107 dhdpcie_bus_suspend(bus, TRUE, TRUE);
5108 #else
5109 dhdpcie_bus_suspend(bus, TRUE);
5110 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5111
5112 DHD_GENERAL_LOCK(bus->dhd, flags);
5113 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
5114 dhd_os_busbusy_wake(bus->dhd);
5115 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5116 } else { /* Resume */
5117 unsigned long flags;
5118 DHD_GENERAL_LOCK(bus->dhd, flags);
5119 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
5120 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5121
5122 dhdpcie_bus_suspend(bus, FALSE);
5123
5124 DHD_GENERAL_LOCK(bus->dhd, flags);
5125 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
5126 dhd_os_busbusy_wake(bus->dhd);
5127 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5128 }
5129 break;
5130
5131 case IOV_GVAL(IOV_MEMSIZE):
5132 int_val = (int32)bus->ramsize;
5133 bcopy(&int_val, arg, val_size);
5134 break;
5135
5136 /* Debug related. Dumps core registers or one of the dongle memory */
5137 case IOV_GVAL(IOV_DUMP_DONGLE):
5138 {
5139 dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
5140 dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
5141 uint32 *p = ddo->val;
5142 const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
5143
5144 if (plen < sizeof(ddi) || len < sizeof(ddo)) {
5145 bcmerror = BCME_BADARG;
5146 break;
5147 }
5148
5149 switch (ddi.type) {
5150 case DUMP_DONGLE_COREREG:
5151 ddo->n_bytes = 0;
5152
5153 if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
5154 break; // beyond last core: core enumeration ended
5155 }
5156
5157 ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
5158 ddo->address += ddi.offset; // BP address at which this dump starts
5159
5160 ddo->id = si_coreid(bus->sih);
5161 ddo->rev = si_corerev(bus->sih);
5162
5163 while (ddi.offset < max_offset &&
5164 sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
5165 *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
5166 ddi.offset += sizeof(uint32);
5167 ddo->n_bytes += sizeof(uint32);
5168 }
5169 break;
5170 default:
5171 // TODO: implement d11 SHM/TPL dumping
5172 bcmerror = BCME_BADARG;
5173 break;
5174 }
5175 break;
5176 }
5177
5178 /* Debug related. Returns a string with dongle capabilities */
5179 case IOV_GVAL(IOV_DNGL_CAPS):
5180 {
5181 strncpy(arg, bus->dhd->fw_capabilities,
5182 MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
5183 ((char*)arg)[len - 1] = '\0';
5184 break;
5185 }
5186
5187 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
5188 case IOV_SVAL(IOV_GDB_SERVER):
5189 /* debugger_*() functions may sleep, so cannot hold spinlock */
5190 DHD_PERIM_UNLOCK(bus->dhd);
5191 if (int_val > 0) {
5192 debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
5193 } else {
5194 debugger_close();
5195 }
5196 DHD_PERIM_LOCK(bus->dhd);
5197 break;
5198 #endif /* DEBUGGER || DHD_DSCOPE */
5199
5200 #ifdef BCM_BUZZZ
5201 /* Dump dongle side buzzz trace to console */
5202 case IOV_GVAL(IOV_BUZZZ_DUMP):
5203 bcmerror = dhd_buzzz_dump_dngl(bus);
5204 break;
5205 #endif /* BCM_BUZZZ */
5206
5207 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
5208 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
5209 break;
5210
5211 case IOV_GVAL(IOV_RAMSIZE):
5212 int_val = (int32)bus->ramsize;
5213 bcopy(&int_val, arg, val_size);
5214 break;
5215
5216 case IOV_SVAL(IOV_RAMSIZE):
5217 bus->ramsize = int_val;
5218 bus->orig_ramsize = int_val;
5219 break;
5220
5221 case IOV_GVAL(IOV_RAMSTART):
5222 int_val = (int32)bus->dongle_ram_base;
5223 bcopy(&int_val, arg, val_size);
5224 break;
5225
5226 case IOV_GVAL(IOV_CC_NVMSHADOW):
5227 {
5228 struct bcmstrbuf dump_b;
5229
5230 bcm_binit(&dump_b, arg, len);
5231 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
5232 break;
5233 }
5234
5235 case IOV_GVAL(IOV_SLEEP_ALLOWED):
5236 bool_val = bus->sleep_allowed;
5237 bcopy(&bool_val, arg, val_size);
5238 break;
5239
5240 case IOV_SVAL(IOV_SLEEP_ALLOWED):
5241 bus->sleep_allowed = bool_val;
5242 break;
5243
5244 case IOV_GVAL(IOV_DONGLEISOLATION):
5245 int_val = bus->dhd->dongle_isolation;
5246 bcopy(&int_val, arg, val_size);
5247 break;
5248
5249 case IOV_SVAL(IOV_DONGLEISOLATION):
5250 bus->dhd->dongle_isolation = bool_val;
5251 break;
5252
5253 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
5254 int_val = bus->ltrsleep_on_unload;
5255 bcopy(&int_val, arg, val_size);
5256 break;
5257
5258 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
5259 bus->ltrsleep_on_unload = bool_val;
5260 break;
5261
5262 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
5263 {
5264 struct bcmstrbuf dump_b;
5265 bcm_binit(&dump_b, arg, len);
5266 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
5267 break;
5268 }
5269 case IOV_GVAL(IOV_DMA_RINGINDICES):
5270 {
5271 int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
5272 bcopy(&int_val, arg, sizeof(int_val));
5273 break;
5274 }
5275 case IOV_SVAL(IOV_DMA_RINGINDICES):
5276 bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
5277 break;
5278
5279 case IOV_GVAL(IOV_METADATA_DBG):
5280 int_val = dhd_prot_metadata_dbg_get(bus->dhd);
5281 bcopy(&int_val, arg, val_size);
5282 break;
5283 case IOV_SVAL(IOV_METADATA_DBG):
5284 dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
5285 break;
5286
5287 case IOV_GVAL(IOV_RX_METADATALEN):
5288 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
5289 bcopy(&int_val, arg, val_size);
5290 break;
5291
5292 case IOV_SVAL(IOV_RX_METADATALEN):
5293 if (int_val > 64) {
5294 bcmerror = BCME_BUFTOOLONG;
5295 break;
5296 }
5297 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
5298 break;
5299
5300 case IOV_SVAL(IOV_TXP_THRESHOLD):
5301 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
5302 break;
5303
5304 case IOV_GVAL(IOV_TXP_THRESHOLD):
5305 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
5306 bcopy(&int_val, arg, val_size);
5307 break;
5308
5309 case IOV_SVAL(IOV_DB1_FOR_MB):
5310 if (int_val)
5311 bus->db1_for_mb = TRUE;
5312 else
5313 bus->db1_for_mb = FALSE;
5314 break;
5315
5316 case IOV_GVAL(IOV_DB1_FOR_MB):
5317 if (bus->db1_for_mb)
5318 int_val = 1;
5319 else
5320 int_val = 0;
5321 bcopy(&int_val, arg, val_size);
5322 break;
5323
5324 case IOV_GVAL(IOV_TX_METADATALEN):
5325 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
5326 bcopy(&int_val, arg, val_size);
5327 break;
5328
5329 case IOV_SVAL(IOV_TX_METADATALEN):
5330 if (int_val > 64) {
5331 bcmerror = BCME_BUFTOOLONG;
5332 break;
5333 }
5334 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
5335 break;
5336
5337 case IOV_SVAL(IOV_DEVRESET):
5338 switch (int_val) {
5339 case DHD_BUS_DEVRESET_ON:
5340 bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5341 break;
5342 case DHD_BUS_DEVRESET_OFF:
5343 bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5344 break;
5345 case DHD_BUS_DEVRESET_FLR:
5346 bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
5347 break;
5348 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
5349 bus->flr_force_fail = TRUE;
5350 break;
5351 default:
5352 DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
5353 break;
5354 }
5355 break;
5356 case IOV_SVAL(IOV_FORCE_FW_TRAP):
5357 if (bus->dhd->busstate == DHD_BUS_DATA)
5358 dhdpcie_fw_trap(bus);
5359 else {
5360 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
5361 bcmerror = BCME_NOTUP;
5362 }
5363 break;
5364 case IOV_GVAL(IOV_FLOW_PRIO_MAP):
5365 int_val = bus->dhd->flow_prio_map_type;
5366 bcopy(&int_val, arg, val_size);
5367 break;
5368
5369 case IOV_SVAL(IOV_FLOW_PRIO_MAP):
5370 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
5371 bcopy(&int_val, arg, val_size);
5372 break;
5373
5374 #ifdef DHD_PCIE_RUNTIMEPM
5375 case IOV_GVAL(IOV_IDLETIME):
5376 int_val = bus->idletime;
5377 bcopy(&int_val, arg, val_size);
5378 break;
5379
5380 case IOV_SVAL(IOV_IDLETIME):
5381 if (int_val < 0) {
5382 bcmerror = BCME_BADARG;
5383 } else {
5384 bus->idletime = int_val;
5385 if (bus->idletime) {
5386 DHD_ENABLE_RUNTIME_PM(bus->dhd);
5387 } else {
5388 DHD_DISABLE_RUNTIME_PM(bus->dhd);
5389 }
5390 }
5391 break;
5392 #endif /* DHD_PCIE_RUNTIMEPM */
5393
5394 case IOV_GVAL(IOV_TXBOUND):
5395 int_val = (int32)dhd_txbound;
5396 bcopy(&int_val, arg, val_size);
5397 break;
5398
5399 case IOV_SVAL(IOV_TXBOUND):
5400 dhd_txbound = (uint)int_val;
5401 break;
5402
5403 case IOV_SVAL(IOV_H2D_MAILBOXDATA):
5404 dhdpcie_send_mb_data(bus, (uint)int_val);
5405 break;
5406
5407 case IOV_SVAL(IOV_INFORINGS):
5408 dhd_prot_init_info_rings(bus->dhd);
5409 break;
5410
5411 case IOV_SVAL(IOV_H2D_PHASE):
5412 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5413 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5414 __FUNCTION__));
5415 bcmerror = BCME_NOTDOWN;
5416 break;
5417 }
5418 if (int_val)
5419 bus->dhd->h2d_phase_supported = TRUE;
5420 else
5421 bus->dhd->h2d_phase_supported = FALSE;
5422 break;
5423
5424 case IOV_GVAL(IOV_H2D_PHASE):
5425 int_val = (int32) bus->dhd->h2d_phase_supported;
5426 bcopy(&int_val, arg, val_size);
5427 break;
5428
5429 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5430 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5431 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5432 __FUNCTION__));
5433 bcmerror = BCME_NOTDOWN;
5434 break;
5435 }
5436 if (int_val)
5437 bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
5438 else
5439 bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
5440 break;
5441
5442 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5443 int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
5444 bcopy(&int_val, arg, val_size);
5445 break;
5446
5447 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
5448 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5449 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5450 __FUNCTION__));
5451 bcmerror = BCME_NOTDOWN;
5452 break;
5453 }
5454 dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
5455 break;
5456
5457 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
5458 int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
5459 bcopy(&int_val, arg, val_size);
5460 break;
5461
5462 case IOV_GVAL(IOV_RXBOUND):
5463 int_val = (int32)dhd_rxbound;
5464 bcopy(&int_val, arg, val_size);
5465 break;
5466
5467 case IOV_SVAL(IOV_RXBOUND):
5468 dhd_rxbound = (uint)int_val;
5469 break;
5470
5471 case IOV_GVAL(IOV_TRAPDATA):
5472 {
5473 struct bcmstrbuf dump_b;
5474 bcm_binit(&dump_b, arg, len);
5475 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
5476 break;
5477 }
5478
5479 case IOV_GVAL(IOV_TRAPDATA_RAW):
5480 {
5481 struct bcmstrbuf dump_b;
5482 bcm_binit(&dump_b, arg, len);
5483 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
5484 break;
5485 }
5486 case IOV_SVAL(IOV_HANGREPORT):
5487 bus->dhd->hang_report = bool_val;
5488 DHD_ERROR(("%s: Set hang_report as %d\n",
5489 __FUNCTION__, bus->dhd->hang_report));
5490 break;
5491
5492 case IOV_GVAL(IOV_HANGREPORT):
5493 int_val = (int32)bus->dhd->hang_report;
5494 bcopy(&int_val, arg, val_size);
5495 break;
5496
5497 case IOV_SVAL(IOV_CTO_PREVENTION):
5498 {
5499 uint32 pcie_lnkst;
5500
5501 if (bus->sih->buscorerev < 19) {
5502 bcmerror = BCME_UNSUPPORTED;
5503 break;
5504 }
5505 si_corereg(bus->sih, bus->sih->buscoreidx,
5506 OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
5507
5508 pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
5509 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
5510
5511 if ((bus->sih->buscorerev == 19) &&
5512 (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
5513 PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
5514 bcmerror = BCME_UNSUPPORTED;
5515 break;
5516 }
5517 bus->cto_enable = bool_val;
5518 dhdpcie_cto_init(bus, bus->cto_enable);
5519 DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
5520 __FUNCTION__, bus->cto_enable));
5521 }
5522 break;
5523
5524 case IOV_GVAL(IOV_CTO_PREVENTION):
5525 if (bus->sih->buscorerev < 19) {
5526 bcmerror = BCME_UNSUPPORTED;
5527 break;
5528 }
5529 int_val = (int32)bus->cto_enable;
5530 bcopy(&int_val, arg, val_size);
5531 break;
5532
5533 case IOV_SVAL(IOV_CTO_THRESHOLD):
5534 {
5535 if (bus->sih->buscorerev < 19) {
5536 bcmerror = BCME_UNSUPPORTED;
5537 break;
5538 }
5539 bus->cto_threshold = (uint32)int_val;
5540 }
5541 break;
5542
5543 case IOV_GVAL(IOV_CTO_THRESHOLD):
5544 if (bus->sih->buscorerev < 19) {
5545 bcmerror = BCME_UNSUPPORTED;
5546 break;
5547 }
5548 if (bus->cto_threshold)
5549 int_val = (int32)bus->cto_threshold;
5550 else
5551 int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
5552
5553 bcopy(&int_val, arg, val_size);
5554 break;
5555
5556 case IOV_SVAL(IOV_PCIE_WD_RESET):
5557 if (bool_val) {
5558 uint32 wd_en = (bus->sih->buscorerev == 66) ? WD_SSRESET_PCIE_F0_EN :
5559 (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
5560 pcie_watchdog_reset(bus->osh, bus->sih,
5561 WD_ENABLE_MASK, wd_en);
5562 }
5563 break;
5564
5565 case IOV_GVAL(IOV_IDMA_ENABLE):
5566 int_val = bus->idma_enabled;
5567 bcopy(&int_val, arg, val_size);
5568 break;
5569 case IOV_SVAL(IOV_IDMA_ENABLE):
5570 bus->idma_enabled = (bool)int_val;
5571 break;
5572 case IOV_GVAL(IOV_IFRM_ENABLE):
5573 int_val = bus->ifrm_enabled;
5574 bcopy(&int_val, arg, val_size);
5575 break;
5576 case IOV_SVAL(IOV_IFRM_ENABLE):
5577 bus->ifrm_enabled = (bool)int_val;
5578 break;
5579 case IOV_GVAL(IOV_CLEAR_RING):
5580 bcopy(&int_val, arg, val_size);
5581 dhd_flow_rings_flush(bus->dhd, 0);
5582 break;
5583 case IOV_GVAL(IOV_DAR_ENABLE):
5584 int_val = bus->dar_enabled;
5585 bcopy(&int_val, arg, val_size);
5586 break;
5587 case IOV_SVAL(IOV_DAR_ENABLE):
5588 bus->dar_enabled = (bool)int_val;
5589 break;
5590 #ifdef D2H_MINIDUMP
5591 case IOV_GVAL(IOV_MINIDUMP_OVERRIDE):
5592 int_val = bus->d2h_minidump_override;
5593 bcopy(&int_val, arg, val_size);
5594 break;
5595 case IOV_SVAL(IOV_MINIDUMP_OVERRIDE):
5596 /* Can change it only before FW download */
5597 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5598 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5599 __FUNCTION__));
5600 bcmerror = BCME_NOTDOWN;
5601 break;
5602 }
5603 bus->d2h_minidump_override = (bool)int_val;
5604 break;
5605 #endif /* D2H_MINIDUMP */
5606 default:
5607 bcmerror = BCME_UNSUPPORTED;
5608 break;
5609 }
5610
5611 exit:
5612 return bcmerror;
5613 } /* dhdpcie_bus_doiovar */
5614
5615 /** Transfers bytes from host to dongle using pio mode */
5616 static int
5617 dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
5618 {
5619 if (bus->dhd == NULL) {
5620 DHD_ERROR(("bus not inited\n"));
5621 return 0;
5622 }
5623 if (bus->dhd->prot == NULL) {
5624 DHD_ERROR(("prot is not inited\n"));
5625 return 0;
5626 }
5627 if (bus->dhd->busstate != DHD_BUS_DATA) {
5628 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
5629 return 0;
5630 }
5631 dhdmsgbuf_lpbk_req(bus->dhd, len);
5632 return 0;
5633 }
5634
5635 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
5636 void
5637 dhd_bus_hostready(struct dhd_bus *bus)
5638 {
5639 if (!bus->dhd->d2h_hostrdy_supported) {
5640 return;
5641 }
5642
5643 if (bus->is_linkdown) {
5644 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
5645 return;
5646 }
5647
5648 DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
5649 dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
5650 if (DAR_PWRREQ(bus)) {
5651 dhd_bus_pcie_pwr_req(bus);
5652 }
5653 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
5654 bus->hostready_count ++;
5655 DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
5656 }
5657
5658 /* Clear INTSTATUS */
5659 void
5660 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
5661 {
5662 uint32 intstatus = 0;
5663 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
5664 (bus->sih->buscorerev == 2)) {
5665 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
5666 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
5667 } else {
5668 /* this is a PCIE core register..not a config register... */
5669 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
5670 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
5671 intstatus);
5672 }
5673 }
5674
5675 int
5676 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5677 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
5678 #else
5679 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
5680 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5681 {
5682 int timeleft;
5683 int rc = 0;
5684 unsigned long flags, flags_bus;
5685 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5686 int d3_read_retry = 0;
5687 uint32 d2h_mb_data = 0;
5688 uint32 zero = 0;
5689 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5690
5691 if (bus->dhd == NULL) {
5692 DHD_ERROR(("bus not inited\n"));
5693 return BCME_ERROR;
5694 }
5695 if (bus->dhd->prot == NULL) {
5696 DHD_ERROR(("prot is not inited\n"));
5697 return BCME_ERROR;
5698 }
5699
5700 if (dhd_query_bus_erros(bus->dhd)) {
5701 return BCME_ERROR;
5702 }
5703
5704 DHD_GENERAL_LOCK(bus->dhd, flags);
5705 if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
5706 DHD_ERROR(("not in a readystate\n"));
5707 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5708 return BCME_ERROR;
5709 }
5710 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5711 if (bus->dhd->dongle_reset) {
5712 DHD_ERROR(("Dongle is in reset state.\n"));
5713 return -EIO;
5714 }
5715
5716 /* Check whether we are already in the requested state.
5717 * state=TRUE means Suspend
5718 * state=FALSE meanse Resume
5719 */
5720 if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
5721 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
5722 return BCME_OK;
5723 } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
5724 DHD_ERROR(("Bus is already in RESUME state.\n"));
5725 return BCME_OK;
5726 }
5727
5728 if (state) {
5729 int idle_retry = 0;
5730 int active;
5731
5732 if (bus->is_linkdown) {
5733 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
5734 __FUNCTION__, state));
5735 return BCME_ERROR;
5736 }
5737
5738 /* Suspend */
5739 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
5740
5741 bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
5742 if (bus->dhd->dhd_watchdog_ms_backup) {
5743 DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
5744 __FUNCTION__));
5745 dhd_os_wd_timer(bus->dhd, 0);
5746 }
5747
5748 DHD_GENERAL_LOCK(bus->dhd, flags);
5749 if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
5750 DHD_ERROR(("Tx Request is not ended\n"));
5751 bus->dhd->busstate = DHD_BUS_DATA;
5752 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5753 return -EBUSY;
5754 }
5755
5756 bus->last_suspend_start_time = OSL_LOCALTIME_NS();
5757
5758 /* stop all interface network queue. */
5759 dhd_bus_stop_queue(bus);
5760 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5761
5762 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5763 if (byint) {
5764 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5765 /* Clear wait_for_d3_ack before sending D3_INFORM */
5766 bus->wait_for_d3_ack = 0;
5767 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
5768
5769 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
5770 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5771 } else {
5772 /* Clear wait_for_d3_ack before sending D3_INFORM */
5773 bus->wait_for_d3_ack = 0;
5774 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
5775 while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
5776 dhdpcie_handle_mb_data(bus);
5777 usleep_range(1000, 1500);
5778 d3_read_retry++;
5779 }
5780 }
5781 #else
5782 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5783
5784 /* Clear wait_for_d3_ack before sending D3_INFORM */
5785 bus->wait_for_d3_ack = 0;
5786 /*
5787 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
5788 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
5789 * inside atomic context, so that no more DBs will be
5790 * rung after sending D3_INFORM
5791 */
5792 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
5793
5794 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
5795
5796 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
5797
5798 #ifdef DHD_RECOVER_TIMEOUT
5799 if (bus->wait_for_d3_ack == 0) {
5800 /* If wait_for_d3_ack was not updated because D2H MB was not received */
5801 uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
5802 bus->pcie_mailbox_int, 0, 0);
5803 int host_irq_disabled = dhdpcie_irq_disabled(bus);
5804 if ((intstatus) && (intstatus != (uint32)-1) &&
5805 (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
5806 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
5807 " host_irq_disabled=%d\n",
5808 __FUNCTION__, intstatus, host_irq_disabled));
5809 dhd_pcie_intr_count_dump(bus->dhd);
5810 dhd_print_tasklet_status(bus->dhd);
5811 dhd_prot_process_ctrlbuf(bus->dhd);
5812 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
5813 /* Clear Interrupts */
5814 dhdpcie_bus_clear_intstatus(bus);
5815 }
5816 } /* bus->wait_for_d3_ack was 0 */
5817 #endif /* DHD_RECOVER_TIMEOUT */
5818
5819 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5820 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5821
5822 /* To allow threads that got pre-empted to complete.
5823 */
5824 while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
5825 (idle_retry < MAX_WKLK_IDLE_CHECK)) {
5826 OSL_SLEEP(1);
5827 idle_retry++;
5828 }
5829
5830 if (bus->wait_for_d3_ack) {
5831 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
5832 /* Got D3 Ack. Suspend the bus */
5833 if (active) {
5834 DHD_ERROR(("%s():Suspend failed because of wakelock"
5835 "restoring Dongle to D0\n", __FUNCTION__));
5836
5837 if (bus->dhd->dhd_watchdog_ms_backup) {
5838 DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
5839 __FUNCTION__));
5840 dhd_os_wd_timer(bus->dhd,
5841 bus->dhd->dhd_watchdog_ms_backup);
5842 }
5843
5844 /*
5845 * Dongle still thinks that it has to be in D3 state until
5846 * it gets a D0 Inform, but we are backing off from suspend.
5847 * Ensure that Dongle is brought back to D0.
5848 *
5849 * Bringing back Dongle from D3 Ack state to D0 state is a
5850 * 2 step process. Dongle would want to know that D0 Inform
5851 * would be sent as a MB interrupt to bring it out of D3 Ack
5852 * state to D0 state. So we have to send both this message.
5853 */
5854
5855 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
5856 bus->wait_for_d3_ack = 0;
5857
5858 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5859 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
5860 /* Enable back the intmask which was cleared in DPC
5861 * after getting D3_ACK.
5862 */
5863 bus->resume_intr_enable_count++;
5864
5865 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
5866 * interrupts using intmask and host interrupts
5867 * which were disabled in the dhdpcie_bus_isr()->
5868 * dhd_bus_handle_d3_ack().
5869 */
5870 /* Enable back interrupt using Intmask!! */
5871 dhdpcie_bus_intr_enable(bus);
5872 /* Enable back interrupt from Host side!! */
5873 dhdpcie_enable_irq(bus);
5874
5875 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5876
5877 if (bus->use_d0_inform) {
5878 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5879 dhdpcie_send_mb_data(bus,
5880 (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
5881 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5882 }
5883 /* ring doorbell 1 (hostready) */
5884 dhd_bus_hostready(bus);
5885
5886 DHD_GENERAL_LOCK(bus->dhd, flags);
5887 bus->dhd->busstate = DHD_BUS_DATA;
5888 /* resume all interface network queue. */
5889 dhd_bus_start_queue(bus);
5890 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5891 rc = BCME_ERROR;
5892 } else {
5893 /* Actual Suspend after no wakelock */
5894 /* At this time bus->bus_low_power_state will be
5895 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
5896 * in dhd_bus_handle_d3_ack()
5897 */
5898 if (bus->use_d0_inform &&
5899 (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
5900 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5901 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
5902 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5903 }
5904
5905 #if defined(BCMPCIE_OOB_HOST_WAKE)
5906 dhdpcie_oob_intr_set(bus, TRUE);
5907 #endif /* BCMPCIE_OOB_HOST_WAKE */
5908
5909 DHD_GENERAL_LOCK(bus->dhd, flags);
5910 /* The Host cannot process interrupts now so disable the same.
5911 * No need to disable the dongle INTR using intmask, as we are
5912 * already calling disabling INTRs from DPC context after
5913 * getting D3_ACK in dhd_bus_handle_d3_ack.
5914 * Code may not look symmetric between Suspend and
5915 * Resume paths but this is done to close down the timing window
5916 * between DPC and suspend context and bus->bus_low_power_state
5917 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
5918 */
5919 bus->dhd->d3ackcnt_timeout = 0;
5920 bus->dhd->busstate = DHD_BUS_SUSPEND;
5921 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5922 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
5923 "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__,
5924 PCIECFGREG_BASEADDR0,
5925 dhd_pcie_config_read(bus->osh,
5926 PCIECFGREG_BASEADDR0, sizeof(uint32)),
5927 PCIECFGREG_BASEADDR1,
5928 dhd_pcie_config_read(bus->osh,
5929 PCIECFGREG_BASEADDR1, sizeof(uint32))));
5930 dhdpcie_dump_resource(bus);
5931 /* Handle Host Suspend */
5932 rc = dhdpcie_pci_suspend_resume(bus, state);
5933 if (!rc) {
5934 bus->last_suspend_end_time = OSL_LOCALTIME_NS();
5935 }
5936 }
5937 } else if (timeleft == 0) { /* D3 ACK Timeout */
5938 #ifdef DHD_FW_COREDUMP
5939 uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
5940 #endif /* DHD_FW_COREDUMP */
5941
5942 /* check if the D3 ACK timeout due to scheduling issue */
5943 bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
5944 bus->isr_entry_time > bus->last_d3_inform_time &&
5945 dhd_bus_query_dpc_sched_errors(bus->dhd);
5946 bus->dhd->d3ack_timeout_occured = TRUE;
5947 /* If the D3 Ack has timeout */
5948 bus->dhd->d3ackcnt_timeout++;
5949 DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
5950 __FUNCTION__, bus->dhd->is_sched_error ?
5951 " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
5952 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
5953 if (bus->dhd->is_sched_error && cur_memdump_mode) {
5954 /* change g_assert_type to trigger Kernel panic */
5955 g_assert_type = 2;
5956 /* use ASSERT() to trigger panic */
5957 ASSERT(0);
5958 }
5959 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
5960 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5961 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
5962 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5963 DHD_GENERAL_LOCK(bus->dhd, flags);
5964 bus->dhd->busstate = DHD_BUS_DATA;
5965 /* resume all interface network queue. */
5966 dhd_bus_start_queue(bus);
5967 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5968 if (!bus->dhd->dongle_trap_occured &&
5969 !bus->is_linkdown) {
5970 uint32 intstatus = 0;
5971
5972 /* Check if PCIe bus status is valid */
5973 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
5974 bus->pcie_mailbox_int, 0, 0);
5975 if (intstatus == (uint32)-1) {
5976 /* Invalidate PCIe bus status */
5977 bus->is_linkdown = 1;
5978 }
5979
5980 dhd_bus_dump_console_buffer(bus);
5981 dhd_prot_debug_info_print(bus->dhd);
5982 #ifdef DHD_FW_COREDUMP
5983 if (cur_memdump_mode) {
5984 /* write core dump to file */
5985 bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
5986 dhdpcie_mem_dump(bus);
5987 }
5988 #endif /* DHD_FW_COREDUMP */
5989
5990 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
5991 __FUNCTION__));
5992 #ifdef SUPPORT_LINKDOWN_RECOVERY
5993 #ifdef CONFIG_ARCH_MSM
5994 bus->no_cfg_restore = 1;
5995 #endif /* CONFIG_ARCH_MSM */
5996 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5997 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
5998 }
5999 #if defined(DHD_ERPOM)
6000 dhd_schedule_reset(bus->dhd);
6001 #endif // endif
6002 rc = -ETIMEDOUT;
6003 }
6004 } else {
6005 /* Resume */
6006 DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
6007 bus->last_resume_start_time = OSL_LOCALTIME_NS();
6008
6009 /**
6010 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
6011 * si_backplane_access(function to read/write backplane)
6012 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
6013 * window being accessed is different form the window
6014 * being pointed by second_bar0win.
6015 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
6016 * invalidating second_bar0win after resume updates
6017 * PCIE2_BAR0_CORE2_WIN with right window.
6018 */
6019 si_invalidate_second_bar0win(bus->sih);
6020 #if defined(BCMPCIE_OOB_HOST_WAKE)
6021 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
6022 #endif /* BCMPCIE_OOB_HOST_WAKE */
6023 rc = dhdpcie_pci_suspend_resume(bus, state);
6024 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n",
6025 __FUNCTION__, PCIECFGREG_BASEADDR0,
6026 dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
6027 PCIECFGREG_BASEADDR1,
6028 dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32))));
6029 dhdpcie_dump_resource(bus);
6030
6031 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6032 /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
6033 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6034 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6035
6036 if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6037 if (bus->use_d0_inform) {
6038 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6039 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
6040 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6041 }
6042 /* ring doorbell 1 (hostready) */
6043 dhd_bus_hostready(bus);
6044 }
6045 DHD_GENERAL_LOCK(bus->dhd, flags);
6046 bus->dhd->busstate = DHD_BUS_DATA;
6047 #ifdef DHD_PCIE_RUNTIMEPM
6048 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
6049 bus->bus_wake = 1;
6050 OSL_SMP_WMB();
6051 wake_up_interruptible(&bus->rpm_queue);
6052 }
6053 #endif /* DHD_PCIE_RUNTIMEPM */
6054 /* resume all interface network queue. */
6055 dhd_bus_start_queue(bus);
6056
6057 /* TODO: for NDIS also we need to use enable_irq in future */
6058 bus->resume_intr_enable_count++;
6059
6060 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
6061 * using intmask and host interrupts
6062 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
6063 */
6064 dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
6065 dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
6066
6067 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6068
6069 if (bus->dhd->dhd_watchdog_ms_backup) {
6070 DHD_ERROR(("%s: Enabling wdtick after resume\n",
6071 __FUNCTION__));
6072 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
6073 }
6074
6075 bus->last_resume_end_time = OSL_LOCALTIME_NS();
6076
6077 }
6078 return rc;
6079 }
6080
6081 uint32
6082 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
6083 {
6084 ASSERT(bus && bus->sih);
6085 if (enable) {
6086 si_corereg(bus->sih, bus->sih->buscoreidx,
6087 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
6088 } else {
6089 si_corereg(bus->sih, bus->sih->buscoreidx,
6090 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
6091 }
6092 return 0;
6093 }
6094
6095 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
6096 uint32
6097 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
6098 {
6099 uint reg_val;
6100
6101 ASSERT(bus && bus->sih);
6102
6103 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
6104 0x1004);
6105 reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
6106 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
6107 reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
6108 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
6109 reg_val);
6110
6111 return 0;
6112 }
6113
6114 static uint32
6115 dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
6116 {
6117 uint16 chipid = si_chipid(bus->sih);
6118 if ((chipid == BCM4375_CHIP_ID ||
6119 chipid == BCM4377_CHIP_ID) &&
6120 (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK) &&
6121 (len % 128 == 4)) {
6122 len += 8;
6123 }
6124 DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
6125 return len;
6126 }
6127
6128 /** Transfers bytes from host to dongle and to host again using DMA */
6129 static int
6130 dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
6131 uint32 len, uint32 srcdelay, uint32 destdelay,
6132 uint32 d11_lpbk, uint32 core_num, uint32 wait)
6133 {
6134 int ret = 0;
6135
6136 if (bus->dhd == NULL) {
6137 DHD_ERROR(("bus not inited\n"));
6138 return BCME_ERROR;
6139 }
6140 if (bus->dhd->prot == NULL) {
6141 DHD_ERROR(("prot is not inited\n"));
6142 return BCME_ERROR;
6143 }
6144 if (bus->dhd->busstate != DHD_BUS_DATA) {
6145 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
6146 return BCME_ERROR;
6147 }
6148
6149 if (len < 5 || len > 4194296) {
6150 DHD_ERROR(("len is too small or too large\n"));
6151 return BCME_ERROR;
6152 }
6153
6154 len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
6155
6156 bus->dmaxfer_complete = FALSE;
6157 ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
6158 d11_lpbk, core_num);
6159 if (ret != BCME_OK || !wait)
6160 return ret;
6161
6162 ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
6163 if (ret < 0)
6164 ret = BCME_NOTREADY;
6165
6166 return ret;
6167
6168 }
6169
6170 static int
6171 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
6172 {
6173 int bcmerror = 0;
6174 volatile uint32 *cr4_regs;
6175
6176 if (!bus->sih) {
6177 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
6178 return BCME_ERROR;
6179 }
6180 /* To enter download state, disable ARM and reset SOCRAM.
6181 * To exit download state, simply reset ARM (default is RAM boot).
6182 */
6183 if (enter) {
6184 /* Make sure BAR1 maps to backplane address 0 */
6185 dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
6186 bus->alp_only = TRUE;
6187
6188 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
6189 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6190
6191 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6192 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
6193 !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6194 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6195 bcmerror = BCME_ERROR;
6196 goto fail;
6197 }
6198
6199 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6200 /* Halt ARM & remove reset */
6201 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6202 if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
6203 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
6204 bcmerror = BCME_ERROR;
6205 goto fail;
6206 }
6207 si_core_reset(bus->sih, 0, 0);
6208 /* reset last 4 bytes of RAM address. to be used for shared area */
6209 dhdpcie_init_shared_addr(bus);
6210 } else if (cr4_regs == NULL) { /* no CR4 present on chip */
6211 si_core_disable(bus->sih, 0);
6212
6213 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6214 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6215 bcmerror = BCME_ERROR;
6216 goto fail;
6217 }
6218
6219 si_core_reset(bus->sih, 0, 0);
6220
6221 /* Clear the top bit of memory */
6222 if (bus->ramsize) {
6223 uint32 zeros = 0;
6224 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
6225 (uint8*)&zeros, 4) < 0) {
6226 bcmerror = BCME_ERROR;
6227 goto fail;
6228 }
6229 }
6230 } else {
6231 /* For CR4,
6232 * Halt ARM
6233 * Remove ARM reset
6234 * Read RAM base address [0x18_0000]
6235 * [next] Download firmware
6236 * [done at else] Populate the reset vector
6237 * [done at else] Remove ARM halt
6238 */
6239 /* Halt ARM & remove reset */
6240 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6241 if (BCM43602_CHIP(bus->sih->chip)) {
6242 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
6243 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6244 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
6245 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6246 }
6247 /* reset last 4 bytes of RAM address. to be used for shared area */
6248 dhdpcie_init_shared_addr(bus);
6249 }
6250 } else {
6251 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6252 /* write vars */
6253 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6254 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6255 goto fail;
6256 }
6257 /* write random numbers to sysmem for the purpose of
6258 * randomizing heap address space.
6259 */
6260 if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6261 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6262 __FUNCTION__));
6263 goto fail;
6264 }
6265 /* switch back to arm core again */
6266 if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6267 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
6268 bcmerror = BCME_ERROR;
6269 goto fail;
6270 }
6271 /* write address 0 with reset instruction */
6272 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
6273 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
6274 /* now remove reset and halt and continue to run CA7 */
6275 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
6276 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6277 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6278 bcmerror = BCME_ERROR;
6279 goto fail;
6280 }
6281
6282 if (!si_iscoreup(bus->sih)) {
6283 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
6284 bcmerror = BCME_ERROR;
6285 goto fail;
6286 }
6287
6288 /* Enable remap before ARM reset but after vars.
6289 * No backplane access in remap mode
6290 */
6291 if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
6292 !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
6293 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
6294 bcmerror = BCME_ERROR;
6295 goto fail;
6296 }
6297
6298 if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6299 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
6300 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6301 bcmerror = BCME_ERROR;
6302 goto fail;
6303 }
6304 } else {
6305 if (BCM43602_CHIP(bus->sih->chip)) {
6306 /* Firmware crashes on SOCSRAM access when core is in reset */
6307 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6308 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
6309 __FUNCTION__));
6310 bcmerror = BCME_ERROR;
6311 goto fail;
6312 }
6313 si_core_reset(bus->sih, 0, 0);
6314 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6315 }
6316
6317 /* write vars */
6318 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6319 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6320 goto fail;
6321 }
6322
6323 /* write a random number to TCM for the purpose of
6324 * randomizing heap address space.
6325 */
6326 if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6327 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6328 __FUNCTION__));
6329 goto fail;
6330 }
6331
6332 if ((bcmerror = dhdpcie_wrt_host_whitelist_region(bus)) != BCME_OK) {
6333 DHD_ERROR(("%s: Failed to write Whitelist region to TCM !\n",
6334 __FUNCTION__));
6335 goto fail;
6336 }
6337 /* switch back to arm core again */
6338 if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
6339 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
6340 bcmerror = BCME_ERROR;
6341 goto fail;
6342 }
6343
6344 /* write address 0 with reset instruction */
6345 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
6346 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
6347
6348 if (bcmerror == BCME_OK) {
6349 uint32 tmp;
6350
6351 bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
6352 (uint8 *)&tmp, sizeof(tmp));
6353
6354 if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
6355 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
6356 __FUNCTION__, bus->resetinstr));
6357 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
6358 __FUNCTION__, tmp));
6359 bcmerror = BCME_ERROR;
6360 goto fail;
6361 }
6362 }
6363
6364 /* now remove reset and halt and continue to run CR4 */
6365 }
6366
6367 si_core_reset(bus->sih, 0, 0);
6368
6369 /* Allow HT Clock now that the ARM is running. */
6370 bus->alp_only = FALSE;
6371
6372 bus->dhd->busstate = DHD_BUS_LOAD;
6373 }
6374
6375 fail:
6376 /* Always return to PCIE core */
6377 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6378
6379 return bcmerror;
6380 } /* dhdpcie_bus_download_state */
6381
6382 static int
6383 dhdpcie_bus_write_vars(dhd_bus_t *bus)
6384 {
6385 int bcmerror = 0;
6386 uint32 varsize, phys_size;
6387 uint32 varaddr;
6388 uint8 *vbuffer;
6389 uint32 varsizew;
6390 #ifdef DHD_DEBUG
6391 uint8 *nvram_ularray;
6392 #endif /* DHD_DEBUG */
6393
6394 /* Even if there are no vars are to be written, we still need to set the ramsize. */
6395 varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
6396 varaddr = (bus->ramsize - 4) - varsize;
6397
6398 varaddr += bus->dongle_ram_base;
6399
6400 if (bus->vars) {
6401
6402 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
6403 if (!vbuffer)
6404 return BCME_NOMEM;
6405
6406 bzero(vbuffer, varsize);
6407 bcopy(bus->vars, vbuffer, bus->varsz);
6408 /* Write the vars list */
6409 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
6410
6411 /* Implement read back and verify later */
6412 #ifdef DHD_DEBUG
6413 /* Verify NVRAM bytes */
6414 DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
6415 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
6416 if (!nvram_ularray) {
6417 MFREE(bus->dhd->osh, vbuffer, varsize);
6418 return BCME_NOMEM;
6419 }
6420
6421 /* Upload image to verify downloaded contents. */
6422 memset(nvram_ularray, 0xaa, varsize);
6423
6424 /* Read the vars list to temp buffer for comparison */
6425 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
6426 if (bcmerror) {
6427 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
6428 __FUNCTION__, bcmerror, varsize, varaddr));
6429 }
6430
6431 /* Compare the org NVRAM with the one read from RAM */
6432 if (memcmp(vbuffer, nvram_ularray, varsize)) {
6433 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
6434 } else
6435 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
6436 __FUNCTION__));
6437
6438 MFREE(bus->dhd->osh, nvram_ularray, varsize);
6439 #endif /* DHD_DEBUG */
6440
6441 MFREE(bus->dhd->osh, vbuffer, varsize);
6442 }
6443
6444 phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
6445
6446 phys_size += bus->dongle_ram_base;
6447
6448 /* adjust to the user specified RAM */
6449 DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
6450 phys_size, bus->ramsize));
6451 DHD_INFO(("Vars are at %d, orig varsize is %d\n",
6452 varaddr, varsize));
6453 varsize = ((phys_size - 4) - varaddr);
6454
6455 /*
6456 * Determine the length token:
6457 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
6458 */
6459 if (bcmerror) {
6460 varsizew = 0;
6461 bus->nvram_csm = varsizew;
6462 } else {
6463 varsizew = varsize / 4;
6464 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
6465 bus->nvram_csm = varsizew;
6466 varsizew = htol32(varsizew);
6467 }
6468
6469 DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
6470
6471 /* Write the length token to the last word */
6472 bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
6473 (uint8*)&varsizew, 4);
6474
6475 return bcmerror;
6476 } /* dhdpcie_bus_write_vars */
6477
6478 int
6479 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
6480 {
6481 int bcmerror = BCME_OK;
6482 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6483 char *tmpbuf;
6484 uint tmpidx;
6485 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6486
6487 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6488
6489 /* Basic sanity checks */
6490 if (bus->dhd->up) {
6491 bcmerror = BCME_NOTDOWN;
6492 goto err;
6493 }
6494 if (!len) {
6495 bcmerror = BCME_BUFTOOSHORT;
6496 goto err;
6497 }
6498
6499 /* Free the old ones and replace with passed variables */
6500 if (bus->vars)
6501 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
6502
6503 bus->vars = MALLOC(bus->dhd->osh, len);
6504 bus->varsz = bus->vars ? len : 0;
6505 if (bus->vars == NULL) {
6506 bcmerror = BCME_NOMEM;
6507 goto err;
6508 }
6509
6510 /* Copy the passed variables, which should include the terminating double-null */
6511 bcopy(arg, bus->vars, bus->varsz);
6512
6513 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6514 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
6515 char *sp = NULL;
6516 char *ep = NULL;
6517 int i;
6518 char tag[2][8] = {"ccode=", "regrev="};
6519
6520 /* Find ccode and regrev info */
6521 for (i = 0; i < 2; i++) {
6522 sp = strnstr(bus->vars, tag[i], bus->varsz);
6523 if (!sp) {
6524 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
6525 __FUNCTION__, bus->nv_path));
6526 bcmerror = BCME_ERROR;
6527 goto err;
6528 }
6529 sp = strchr(sp, '=');
6530 ep = strchr(sp, '\0');
6531 /* We assumed that string length of both ccode and
6532 * regrev values should not exceed WLC_CNTRY_BUF_SZ
6533 */
6534 if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
6535 sp++;
6536 while (*sp != '\0') {
6537 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
6538 __FUNCTION__, tag[i], *sp));
6539 *sp++ = '0';
6540 }
6541 } else {
6542 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
6543 __FUNCTION__, tag[i]));
6544 bcmerror = BCME_ERROR;
6545 goto err;
6546 }
6547 }
6548 }
6549 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6550
6551 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6552 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6553 if (dhd_bus_get_fw_mode(bus->dhd) != DHD_FLAG_MFG_MODE)
6554 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6555 {
6556 char *pos = NULL;
6557 tmpbuf = MALLOCZ(bus->dhd->osh, bus->varsz + 1);
6558 if (tmpbuf == NULL) {
6559 goto err;
6560 }
6561 memcpy(tmpbuf, bus->vars, bus->varsz);
6562 for (tmpidx = 0; tmpidx < bus->varsz; tmpidx++) {
6563 if (tmpbuf[tmpidx] == 0) {
6564 tmpbuf[tmpidx] = '\n';
6565 }
6566 }
6567 bus->dhd->vars_ccode[0] = 0;
6568 bus->dhd->vars_regrev = 0;
6569 if ((pos = strstr(tmpbuf, "ccode"))) {
6570 sscanf(pos, "ccode=%s\n", bus->dhd->vars_ccode);
6571 }
6572 if ((pos = strstr(tmpbuf, "regrev"))) {
6573 sscanf(pos, "regrev=%u\n", &(bus->dhd->vars_regrev));
6574 }
6575 MFREE(bus->dhd->osh, tmpbuf, bus->varsz + 1);
6576 }
6577 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6578
6579 err:
6580 return bcmerror;
6581 }
6582
6583 /* loop through the capability list and see if the pcie capabilty exists */
6584 uint8
6585 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
6586 {
6587 uint8 cap_id;
6588 uint8 cap_ptr = 0;
6589 uint8 byte_val;
6590
6591 /* check for Header type 0 */
6592 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
6593 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
6594 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
6595 goto end;
6596 }
6597
6598 /* check if the capability pointer field exists */
6599 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
6600 if (!(byte_val & PCI_CAPPTR_PRESENT)) {
6601 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
6602 goto end;
6603 }
6604
6605 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
6606 /* check if the capability pointer is 0x00 */
6607 if (cap_ptr == 0x00) {
6608 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
6609 goto end;
6610 }
6611
6612 /* loop thr'u the capability list and see if the pcie capabilty exists */
6613
6614 cap_id = read_pci_cfg_byte(cap_ptr);
6615
6616 while (cap_id != req_cap_id) {
6617 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
6618 if (cap_ptr == 0x00) break;
6619 cap_id = read_pci_cfg_byte(cap_ptr);
6620 }
6621
6622 end:
6623 return cap_ptr;
6624 }
6625
6626 void
6627 dhdpcie_pme_active(osl_t *osh, bool enable)
6628 {
6629 uint8 cap_ptr;
6630 uint32 pme_csr;
6631
6632 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
6633
6634 if (!cap_ptr) {
6635 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
6636 return;
6637 }
6638
6639 pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
6640 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
6641
6642 pme_csr |= PME_CSR_PME_STAT;
6643 if (enable) {
6644 pme_csr |= PME_CSR_PME_EN;
6645 } else {
6646 pme_csr &= ~PME_CSR_PME_EN;
6647 }
6648
6649 OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
6650 }
6651
6652 bool
6653 dhdpcie_pme_cap(osl_t *osh)
6654 {
6655 uint8 cap_ptr;
6656 uint32 pme_cap;
6657
6658 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
6659
6660 if (!cap_ptr) {
6661 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
6662 return FALSE;
6663 }
6664
6665 pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
6666
6667 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
6668
6669 return ((pme_cap & PME_CAP_PM_STATES) != 0);
6670 }
6671
6672 uint32
6673 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
6674 {
6675
6676 uint8 pcie_cap;
6677 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
6678 uint32 reg_val;
6679
6680 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
6681
6682 if (!pcie_cap) {
6683 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
6684 return 0;
6685 }
6686
6687 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
6688
6689 /* set operation */
6690 if (mask) {
6691 /* read */
6692 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6693
6694 /* modify */
6695 reg_val &= ~mask;
6696 reg_val |= (mask & val);
6697
6698 /* write */
6699 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
6700 }
6701 return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6702 }
6703
6704 uint8
6705 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
6706 {
6707 uint8 pcie_cap;
6708 uint32 reg_val;
6709 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
6710
6711 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
6712
6713 if (!pcie_cap) {
6714 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
6715 return 0;
6716 }
6717
6718 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
6719
6720 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6721 /* set operation */
6722 if (mask) {
6723 if (val)
6724 reg_val |= PCIE_CLKREQ_ENAB;
6725 else
6726 reg_val &= ~PCIE_CLKREQ_ENAB;
6727 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
6728 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6729 }
6730 if (reg_val & PCIE_CLKREQ_ENAB)
6731 return 1;
6732 else
6733 return 0;
6734 }
6735
6736 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
6737 {
6738 dhd_bus_t *bus;
6739 uint64 current_time = OSL_LOCALTIME_NS();
6740
6741 if (!dhd) {
6742 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
6743 return;
6744 }
6745
6746 bus = dhd->bus;
6747 if (!bus) {
6748 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
6749 return;
6750 }
6751
6752 bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
6753 bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
6754 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
6755 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
6756 bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
6757 bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
6758 bus->dpc_return_busdown_count, bus->non_ours_irq_count);
6759 #ifdef BCMPCIE_OOB_HOST_WAKE
6760 bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
6761 " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n",
6762 bus->oob_intr_count, bus->oob_intr_enable_count,
6763 bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(dhd->bus),
6764 bus->last_oob_irq_time);
6765 #endif /* BCMPCIE_OOB_HOST_WAKE */
6766 bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
6767 " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
6768 " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
6769 "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
6770 " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
6771 " last_process_infocpl_time="SEC_USEC_FMT
6772 "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
6773 "last_d3_inform_time="SEC_USEC_FMT"\n",
6774 GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
6775 GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_entry_time),
6776 GET_SEC_USEC(bus->dpc_sched_time), GET_SEC_USEC(dhd->bus->last_non_ours_irq_time),
6777 GET_SEC_USEC(bus->last_process_ctrlbuf_time),
6778 GET_SEC_USEC(bus->last_process_flowring_time),
6779 GET_SEC_USEC(bus->last_process_txcpl_time),
6780 GET_SEC_USEC(bus->last_process_rxcpl_time),
6781 GET_SEC_USEC(bus->last_process_infocpl_time),
6782 GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
6783 GET_SEC_USEC(bus->last_d3_inform_time));
6784
6785 bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
6786 SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
6787 SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
6788 GET_SEC_USEC(dhd->bus->last_suspend_end_time),
6789 GET_SEC_USEC(dhd->bus->last_resume_start_time),
6790 GET_SEC_USEC(dhd->bus->last_resume_end_time));
6791 }
6792
6793 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
6794 {
6795 uint32 intstatus = 0;
6796 uint32 intmask = 0;
6797 uint32 d2h_db0 = 0;
6798 uint32 d2h_mb_data = 0;
6799
6800 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
6801 dhd->bus->pcie_mailbox_int, 0, 0);
6802 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
6803 dhd->bus->pcie_mailbox_mask, 0, 0);
6804 d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
6805 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
6806
6807 bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
6808 intstatus, intmask, d2h_db0);
6809 bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
6810 d2h_mb_data, dhd->bus->def_intmask);
6811 }
6812 /** Add bus dump output to a buffer */
6813 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
6814 {
6815 uint16 flowid;
6816 int ix = 0;
6817 flow_ring_node_t *flow_ring_node;
6818 flow_info_t *flow_info;
6819 #ifdef TX_STATUS_LATENCY_STATS
6820 uint8 ifindex;
6821 if_flow_lkup_t *if_flow_lkup;
6822 dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
6823 #endif /* TX_STATUS_LATENCY_STATS */
6824
6825 if (dhdp->busstate != DHD_BUS_DATA)
6826 return;
6827
6828 #ifdef TX_STATUS_LATENCY_STATS
6829 memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
6830 #endif /* TX_STATUS_LATENCY_STATS */
6831 #ifdef DHD_WAKE_STATUS
6832 bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
6833 bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
6834 dhdp->bus->wake_counts.rcwake);
6835 #ifdef DHD_WAKE_RX_STATUS
6836 bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
6837 dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
6838 dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
6839 bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
6840 dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
6841 dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
6842 bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
6843 dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
6844 dhdp->bus->wake_counts.rx_icmpv6_ns);
6845 #endif /* DHD_WAKE_RX_STATUS */
6846 #ifdef DHD_WAKE_EVENT_STATUS
6847 for (flowid = 0; flowid < WLC_E_LAST; flowid++)
6848 if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
6849 bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
6850 dhdp->bus->wake_counts.rc_event[flowid]);
6851 bcm_bprintf(strbuf, "\n");
6852 #endif /* DHD_WAKE_EVENT_STATUS */
6853 #endif /* DHD_WAKE_STATUS */
6854
6855 dhd_prot_print_info(dhdp, strbuf);
6856 dhd_dump_intr_registers(dhdp, strbuf);
6857 dhd_dump_intr_counters(dhdp, strbuf);
6858 bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
6859 dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
6860 bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
6861 bcm_bprintf(strbuf,
6862 "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
6863 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
6864 "Overflows", "RD", "WR");
6865
6866 #ifdef TX_STATUS_LATENCY_STATS
6867 /* Average Tx status/Completion Latency in micro secs */
6868 bcm_bprintf(strbuf, "%12s", "AvgTxCmpL_Us ");
6869 #endif /* TX_STATUS_LATENCY_STATS */
6870
6871 bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
6872
6873 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
6874 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
6875 if (!flow_ring_node->active)
6876 continue;
6877
6878 flow_info = &flow_ring_node->flow_info;
6879 bcm_bprintf(strbuf,
6880 "%3d. %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
6881 flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
6882 MAC2STRDBG(flow_info->da),
6883 DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
6884 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
6885 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
6886 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
6887 dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
6888 "%4d %4d ");
6889
6890 #ifdef TX_STATUS_LATENCY_STATS
6891 bcm_bprintf(strbuf, "%12d ",
6892 flow_info->num_tx_status ?
6893 DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
6894 flow_info->num_tx_status) : 0);
6895
6896 ifindex = flow_info->ifindex;
6897 ASSERT(ifindex < DHD_MAX_IFS);
6898 if (ifindex < DHD_MAX_IFS) {
6899 if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
6900 if_tx_status_latency[ifindex].cum_tx_status_latency +=
6901 flow_info->cum_tx_status_latency;
6902 } else {
6903 DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
6904 __FUNCTION__, ifindex, flowid));
6905 }
6906 #endif /* TX_STATUS_LATENCY_STATS */
6907 bcm_bprintf(strbuf,
6908 "%5s %6s %5s\n", "NA", "NA", "NA");
6909 }
6910
6911 #ifdef TX_STATUS_LATENCY_STATS
6912 bcm_bprintf(strbuf, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStats");
6913 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
6914 for (ix = 0; ix < DHD_MAX_IFS; ix++) {
6915 if (!if_flow_lkup[ix].status) {
6916 continue;
6917 }
6918 bcm_bprintf(strbuf, "%2d %16d %16d\n",
6919 ix,
6920 if_tx_status_latency[ix].num_tx_status ?
6921 DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
6922 if_tx_status_latency[ix].num_tx_status): 0,
6923 if_tx_status_latency[ix].num_tx_status);
6924 }
6925 #endif /* TX_STATUS_LATENCY_STATS */
6926 bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
6927 bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
6928 bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
6929 if (dhdp->d2h_hostrdy_supported) {
6930 bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
6931 }
6932 bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
6933 dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
6934 }
6935
6936 /**
6937 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
6938 * flow queue to their flow ring.
6939 */
6940 static void
6941 dhd_update_txflowrings(dhd_pub_t *dhd)
6942 {
6943 unsigned long flags;
6944 dll_t *item, *next;
6945 flow_ring_node_t *flow_ring_node;
6946 struct dhd_bus *bus = dhd->bus;
6947
6948 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
6949 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
6950 for (item = dll_head_p(&bus->flowring_active_list);
6951 (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
6952 item = next) {
6953 if (dhd->hang_was_sent) {
6954 break;
6955 }
6956
6957 next = dll_next_p(item);
6958 flow_ring_node = dhd_constlist_to_flowring(item);
6959
6960 /* Ensure that flow_ring_node in the list is Not Null */
6961 ASSERT(flow_ring_node != NULL);
6962
6963 /* Ensure that the flowring node has valid contents */
6964 ASSERT(flow_ring_node->prot_info != NULL);
6965
6966 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
6967 }
6968 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
6969 }
6970
6971 /** Mailbox ringbell Function */
6972 static void
6973 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
6974 {
6975 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
6976 (bus->sih->buscorerev == 4)) {
6977 DHD_ERROR(("mailbox communication not supported\n"));
6978 return;
6979 }
6980 if (bus->db1_for_mb) {
6981 /* this is a pcie core register, not the config register */
6982 DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
6983 if (DAR_PWRREQ(bus)) {
6984 dhd_bus_pcie_pwr_req(bus);
6985 }
6986 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
6987 ~0, 0x12345678);
6988 } else {
6989 DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
6990 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
6991 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
6992 }
6993 }
6994
6995 /* Upon receiving a mailbox interrupt,
6996 * if H2D_FW_TRAP bit is set in mailbox location
6997 * device traps
6998 */
6999 static void
7000 dhdpcie_fw_trap(dhd_bus_t *bus)
7001 {
7002 /* Send the mailbox data and generate mailbox intr. */
7003 dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
7004 /* For FWs that cannot interprete H2D_FW_TRAP */
7005 (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
7006 }
7007
7008 /** mailbox doorbell ring function */
7009 void
7010 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
7011 {
7012 /* Skip after sending D3_INFORM */
7013 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7014 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7015 __FUNCTION__, bus->bus_low_power_state));
7016 return;
7017 }
7018
7019 /* Skip in the case of link down */
7020 if (bus->is_linkdown) {
7021 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7022 return;
7023 }
7024
7025 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7026 (bus->sih->buscorerev == 4)) {
7027 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
7028 PCIE_INTB, PCIE_INTB);
7029 } else {
7030 /* this is a pcie core register, not the config regsiter */
7031 DHD_INFO(("writing a door bell to the device\n"));
7032 if (IDMA_ACTIVE(bus->dhd)) {
7033 if (DAR_PWRREQ(bus)) {
7034 dhd_bus_pcie_pwr_req(bus);
7035 }
7036 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
7037 ~0, value);
7038 } else {
7039 if (DAR_PWRREQ(bus)) {
7040 dhd_bus_pcie_pwr_req(bus);
7041 }
7042 si_corereg(bus->sih, bus->sih->buscoreidx,
7043 dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
7044 }
7045 }
7046 }
7047
7048 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
7049 void
7050 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
7051 {
7052 /* this is a pcie core register, not the config regsiter */
7053 /* Skip after sending D3_INFORM */
7054 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7055 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7056 __FUNCTION__, bus->bus_low_power_state));
7057 return;
7058 }
7059
7060 /* Skip in the case of link down */
7061 if (bus->is_linkdown) {
7062 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7063 return;
7064 }
7065
7066 DHD_INFO(("writing a door bell 2 to the device\n"));
7067 if (DAR_PWRREQ(bus)) {
7068 dhd_bus_pcie_pwr_req(bus);
7069 }
7070 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
7071 ~0, value);
7072 }
7073
7074 void
7075 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
7076 {
7077 /* Skip after sending D3_INFORM */
7078 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7079 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7080 __FUNCTION__, bus->bus_low_power_state));
7081 return;
7082 }
7083
7084 /* Skip in the case of link down */
7085 if (bus->is_linkdown) {
7086 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7087 return;
7088 }
7089
7090 if (DAR_PWRREQ(bus)) {
7091 dhd_bus_pcie_pwr_req(bus);
7092 }
7093 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
7094 }
7095
7096 void
7097 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
7098 {
7099 /* Skip after sending D3_INFORM */
7100 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7101 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7102 __FUNCTION__, bus->bus_low_power_state));
7103 return;
7104 }
7105
7106 /* Skip in the case of link down */
7107 if (bus->is_linkdown) {
7108 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7109 return;
7110 }
7111
7112 if (DAR_PWRREQ(bus)) {
7113 dhd_bus_pcie_pwr_req(bus);
7114 }
7115 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
7116 }
7117
7118 static void
7119 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
7120 {
7121 uint32 w;
7122 /* Skip after sending D3_INFORM */
7123 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7124 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7125 __FUNCTION__, bus->bus_low_power_state));
7126 return;
7127 }
7128
7129 /* Skip in the case of link down */
7130 if (bus->is_linkdown) {
7131 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7132 return;
7133 }
7134
7135 w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
7136 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
7137 }
7138
7139 dhd_mb_ring_t
7140 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
7141 {
7142 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7143 (bus->sih->buscorerev == 4)) {
7144 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7145 bus->pcie_mailbox_int);
7146 if (bus->pcie_mb_intr_addr) {
7147 bus->pcie_mb_intr_osh = si_osh(bus->sih);
7148 return dhd_bus_ringbell_oldpcie;
7149 }
7150 } else {
7151 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7152 dhd_bus_db0_addr_get(bus));
7153 if (bus->pcie_mb_intr_addr) {
7154 bus->pcie_mb_intr_osh = si_osh(bus->sih);
7155 return dhdpcie_bus_ringbell_fast;
7156 }
7157 }
7158 return dhd_bus_ringbell;
7159 }
7160
7161 dhd_mb_ring_2_t
7162 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
7163 {
7164 bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7165 dhd_bus_db0_addr_2_get(bus));
7166 if (bus->pcie_mb_intr_2_addr) {
7167 bus->pcie_mb_intr_osh = si_osh(bus->sih);
7168 return dhdpcie_bus_ringbell_2_fast;
7169 }
7170 return dhd_bus_ringbell_2;
7171 }
7172
7173 bool BCMFASTPATH
7174 dhd_bus_dpc(struct dhd_bus *bus)
7175 {
7176 bool resched = FALSE; /* Flag indicating resched wanted */
7177 unsigned long flags;
7178
7179 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7180
7181 bus->dpc_entry_time = OSL_LOCALTIME_NS();
7182
7183 DHD_GENERAL_LOCK(bus->dhd, flags);
7184 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
7185 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
7186 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
7187 * and if we return from here, then IOCTL response will never be handled
7188 */
7189 if (bus->dhd->busstate == DHD_BUS_DOWN) {
7190 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
7191 bus->intstatus = 0;
7192 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7193 bus->dpc_return_busdown_count++;
7194 return 0;
7195 }
7196 #ifdef DHD_PCIE_RUNTIMEPM
7197 bus->idlecount = 0;
7198 #endif /* DHD_PCIE_RUNTIMEPM */
7199 DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
7200 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7201
7202 resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
7203 if (!resched) {
7204 bus->intstatus = 0;
7205 bus->dpc_intr_enable_count++;
7206 /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
7207 * which has been disabled in the dhdpcie_bus_isr()
7208 */
7209 dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
7210 bus->dpc_exit_time = OSL_LOCALTIME_NS();
7211 } else {
7212 bus->resched_dpc_time = OSL_LOCALTIME_NS();
7213 }
7214
7215 bus->dpc_sched = resched;
7216
7217 DHD_GENERAL_LOCK(bus->dhd, flags);
7218 DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
7219 dhd_os_busbusy_wake(bus->dhd);
7220 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7221
7222 return resched;
7223
7224 }
7225
7226 int
7227 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
7228 {
7229 uint32 cur_h2d_mb_data = 0;
7230
7231 DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
7232
7233 if (bus->is_linkdown) {
7234 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7235 return BCME_ERROR;
7236 }
7237
7238 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
7239 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
7240 h2d_mb_data));
7241 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
7242 {
7243 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
7244 DHD_ERROR(("failure sending the H2D Mailbox message "
7245 "to firmware\n"));
7246 goto fail;
7247 }
7248 }
7249 goto done;
7250 }
7251
7252 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
7253
7254 if (cur_h2d_mb_data != 0) {
7255 uint32 i = 0;
7256 DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
7257 while ((i++ < 100) && cur_h2d_mb_data) {
7258 OSL_DELAY(10);
7259 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
7260 }
7261 if (i >= 100) {
7262 DHD_ERROR(("%s : waited 1ms for the dngl "
7263 "to ack the previous mb transaction\n", __FUNCTION__));
7264 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
7265 __FUNCTION__, cur_h2d_mb_data));
7266 }
7267 }
7268
7269 dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
7270 dhd_bus_gen_devmb_intr(bus);
7271
7272 done:
7273 if (h2d_mb_data == H2D_HOST_D3_INFORM) {
7274 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
7275 bus->last_d3_inform_time = OSL_LOCALTIME_NS();
7276 bus->d3_inform_cnt++;
7277 }
7278 if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
7279 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
7280 bus->d0_inform_in_use_cnt++;
7281 }
7282 if (h2d_mb_data == H2D_HOST_D0_INFORM) {
7283 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
7284 bus->d0_inform_cnt++;
7285 }
7286 return BCME_OK;
7287 fail:
7288 return BCME_ERROR;
7289 }
7290
7291 static void
7292 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
7293 {
7294 unsigned long flags_bus;
7295 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7296 bus->suspend_intr_disable_count++;
7297 /* Disable dongle Interrupts Immediately after D3 */
7298
7299 /* For Linux, Macos etc (otherthan NDIS) along with disabling
7300 * dongle interrupt by clearing the IntMask, disable directly
7301 * interrupt from the host side as well. Also clear the intstatus
7302 * if it is set to avoid unnecessary intrrupts after D3 ACK.
7303 */
7304 dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
7305 dhdpcie_bus_clear_intstatus(bus);
7306 dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
7307
7308 /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
7309 bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
7310 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7311 bus->wait_for_d3_ack = 1;
7312 dhd_os_d3ack_wake(bus->dhd);
7313 }
7314 void
7315 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
7316 {
7317 if (MULTIBP_ENAB(bus->sih)) {
7318 dhd_bus_pcie_pwr_req(bus);
7319 }
7320
7321 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
7322
7323 if (d2h_mb_data & D2H_DEV_FWHALT) {
7324 DHD_ERROR(("FW trap has happened\n"));
7325 dhdpcie_checkdied(bus, NULL, 0);
7326 #ifdef SUPPORT_LINKDOWN_RECOVERY
7327 #ifdef CONFIG_ARCH_MSM
7328 bus->no_cfg_restore = 1;
7329 #endif /* CONFIG_ARCH_MSM */
7330 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7331 dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
7332 goto exit;
7333 }
7334 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
7335 bool ds_acked = FALSE;
7336 BCM_REFERENCE(ds_acked);
7337 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
7338 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
7339 bus->dhd->busstate = DHD_BUS_DOWN;
7340 goto exit;
7341 }
7342 /* what should we do */
7343 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7344 {
7345 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
7346 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7347 }
7348 }
7349 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
7350 /* what should we do */
7351 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7352 }
7353 if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
7354 /* what should we do */
7355 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
7356 }
7357 if (d2h_mb_data & D2H_DEV_D3_ACK) {
7358 /* what should we do */
7359 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7360 if (!bus->wait_for_d3_ack) {
7361 #if defined(DHD_HANG_SEND_UP_TEST)
7362 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
7363 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7364 } else {
7365 dhd_bus_handle_d3_ack(bus);
7366 }
7367 #else /* DHD_HANG_SEND_UP_TEST */
7368 dhd_bus_handle_d3_ack(bus);
7369 #endif /* DHD_HANG_SEND_UP_TEST */
7370 }
7371 }
7372
7373 exit:
7374 if (MULTIBP_ENAB(bus->sih)) {
7375 dhd_bus_pcie_pwr_req_clear(bus);
7376 }
7377 }
7378
7379 static void
7380 dhdpcie_handle_mb_data(dhd_bus_t *bus)
7381 {
7382 uint32 d2h_mb_data = 0;
7383 uint32 zero = 0;
7384
7385 if (MULTIBP_ENAB(bus->sih)) {
7386 dhd_bus_pcie_pwr_req(bus);
7387 }
7388
7389 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
7390 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
7391 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
7392 __FUNCTION__, d2h_mb_data));
7393 goto exit;
7394 }
7395
7396 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
7397
7398 DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
7399 if (d2h_mb_data & D2H_DEV_FWHALT) {
7400 DHD_ERROR(("FW trap has happened\n"));
7401 dhdpcie_checkdied(bus, NULL, 0);
7402 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
7403 goto exit;
7404 }
7405 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
7406 /* what should we do */
7407 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7408 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
7409 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7410 }
7411 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
7412 /* what should we do */
7413 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7414 }
7415 if (d2h_mb_data & D2H_DEV_D3_ACK) {
7416 /* what should we do */
7417 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7418 if (!bus->wait_for_d3_ack) {
7419 #if defined(DHD_HANG_SEND_UP_TEST)
7420 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
7421 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7422 } else {
7423 dhd_bus_handle_d3_ack(bus);
7424 }
7425 #else /* DHD_HANG_SEND_UP_TEST */
7426 dhd_bus_handle_d3_ack(bus);
7427 #endif /* DHD_HANG_SEND_UP_TEST */
7428 }
7429 }
7430
7431 exit:
7432 if (MULTIBP_ENAB(bus->sih)) {
7433 dhd_bus_pcie_pwr_req_clear(bus);
7434 }
7435 }
7436
7437 static void
7438 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
7439 {
7440 uint32 d2h_mb_data = 0;
7441 uint32 zero = 0;
7442
7443 if (bus->is_linkdown) {
7444 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
7445 return;
7446 }
7447
7448 if (MULTIBP_ENAB(bus->sih)) {
7449 dhd_bus_pcie_pwr_req(bus);
7450 }
7451
7452 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
7453 if (!d2h_mb_data) {
7454 goto exit;
7455 }
7456
7457 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
7458
7459 dhd_bus_handle_mb_data(bus, d2h_mb_data);
7460
7461 exit:
7462 if (MULTIBP_ENAB(bus->sih)) {
7463 dhd_bus_pcie_pwr_req_clear(bus);
7464 }
7465 }
7466
7467 static bool
7468 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
7469 {
7470 bool resched = FALSE;
7471
7472 if (MULTIBP_ENAB(bus->sih)) {
7473 dhd_bus_pcie_pwr_req(bus);
7474 }
7475 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7476 (bus->sih->buscorerev == 4)) {
7477 /* Msg stream interrupt */
7478 if (intstatus & I_BIT1) {
7479 resched = dhdpci_bus_read_frames(bus);
7480 } else if (intstatus & I_BIT0) {
7481 /* do nothing for Now */
7482 }
7483 } else {
7484 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
7485 bus->api.handle_mb_data(bus);
7486
7487 if ((bus->dhd->busstate == DHD_BUS_SUSPEND) || (bus->use_mailbox &&
7488 (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE))) {
7489 DHD_ERROR(("%s: Bus is in power save state. "
7490 "Skip processing rest of ring buffers.\n", __FUNCTION__));
7491 goto exit;
7492 }
7493
7494 /* Validate intstatus only for INTX case */
7495 if ((bus->d2h_intr_method == PCIE_MSI) ||
7496 ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
7497 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7498 if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
7499 resched = dhdpci_bus_read_frames(bus);
7500 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
7501 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
7502 }
7503 #else
7504 resched = dhdpci_bus_read_frames(bus);
7505 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7506 }
7507 }
7508
7509 exit:
7510 if (MULTIBP_ENAB(bus->sih)) {
7511 dhd_bus_pcie_pwr_req_clear(bus);
7512 }
7513 return resched;
7514 }
7515
7516 static bool
7517 dhdpci_bus_read_frames(dhd_bus_t *bus)
7518 {
7519 bool more = FALSE;
7520
7521 /* First check if there a FW trap */
7522 if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
7523 (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
7524 dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
7525 return FALSE;
7526 }
7527
7528 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
7529 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7530
7531 dhd_prot_process_ctrlbuf(bus->dhd);
7532 bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
7533 /* Unlock to give chance for resp to be handled */
7534 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7535
7536 /* Do not process rest of ring buf once bus enters low power state */
7537 if (!bus->use_mailbox && (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE)) {
7538 DHD_ERROR(("%s: Bus is in power save state. "
7539 "Skip processing rest of ring buffers.\n", __FUNCTION__));
7540 return FALSE;
7541 }
7542
7543 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7544 /* update the flow ring cpls */
7545 dhd_update_txflowrings(bus->dhd);
7546 bus->last_process_flowring_time = OSL_LOCALTIME_NS();
7547
7548 /* With heavy TX traffic, we could get a lot of TxStatus
7549 * so add bound
7550 */
7551 more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
7552 bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
7553
7554 /* With heavy RX traffic, this routine potentially could spend some time
7555 * processing RX frames without RX bound
7556 */
7557 more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
7558 bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
7559
7560 /* Process info ring completion messages */
7561 more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
7562 bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
7563
7564 #ifdef IDLE_TX_FLOW_MGMT
7565 if (bus->enable_idle_flowring_mgmt) {
7566 /* Look for idle flow rings */
7567 dhd_bus_check_idle_scan(bus);
7568 }
7569 #endif /* IDLE_TX_FLOW_MGMT */
7570
7571 /* don't talk to the dongle if fw is about to be reloaded */
7572 if (bus->dhd->hang_was_sent) {
7573 more = FALSE;
7574 }
7575 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7576
7577 #ifdef SUPPORT_LINKDOWN_RECOVERY
7578 if (bus->read_shm_fail) {
7579 /* Read interrupt state once again to confirm linkdown */
7580 int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7581 bus->pcie_mailbox_int, 0, 0);
7582 if (intstatus != (uint32)-1) {
7583 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
7584 #ifdef DHD_FW_COREDUMP
7585 if (bus->dhd->memdump_enabled) {
7586 DHD_OS_WAKE_LOCK(bus->dhd);
7587 bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
7588 dhd_bus_mem_dump(bus->dhd);
7589 DHD_OS_WAKE_UNLOCK(bus->dhd);
7590 }
7591 #endif /* DHD_FW_COREDUMP */
7592 } else {
7593 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
7594 #ifdef CONFIG_ARCH_MSM
7595 bus->no_cfg_restore = 1;
7596 #endif /* CONFIG_ARCH_MSM */
7597 bus->is_linkdown = 1;
7598 }
7599
7600 dhd_prot_debug_info_print(bus->dhd);
7601 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
7602 dhd_os_send_hang_message(bus->dhd);
7603 more = FALSE;
7604 }
7605 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7606 return more;
7607 }
7608
7609 bool
7610 dhdpcie_tcm_valid(dhd_bus_t *bus)
7611 {
7612 uint32 addr = 0;
7613 int rv;
7614 uint32 shaddr = 0;
7615 pciedev_shared_t sh;
7616
7617 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
7618
7619 /* Read last word in memory to determine address of pciedev_shared structure */
7620 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
7621
7622 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
7623 (addr > shaddr)) {
7624 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
7625 __FUNCTION__, addr));
7626 return FALSE;
7627 }
7628
7629 /* Read hndrte_shared structure */
7630 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
7631 sizeof(pciedev_shared_t))) < 0) {
7632 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
7633 return FALSE;
7634 }
7635
7636 /* Compare any field in pciedev_shared_t */
7637 if (sh.console_addr != bus->pcie_sh->console_addr) {
7638 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
7639 return FALSE;
7640 }
7641
7642 return TRUE;
7643 }
7644
7645 static void
7646 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
7647 {
7648 snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
7649 firmware_api_version, host_api_version);
7650 return;
7651 }
7652
7653 static bool
7654 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
7655 {
7656 bool retcode = FALSE;
7657
7658 DHD_INFO(("firmware api revision %d, host api revision %d\n",
7659 firmware_api_version, host_api_version));
7660
7661 switch (firmware_api_version) {
7662 case PCIE_SHARED_VERSION_7:
7663 case PCIE_SHARED_VERSION_6:
7664 case PCIE_SHARED_VERSION_5:
7665 retcode = TRUE;
7666 break;
7667 default:
7668 if (firmware_api_version <= host_api_version)
7669 retcode = TRUE;
7670 }
7671 return retcode;
7672 }
7673
7674 static int
7675 dhdpcie_readshared(dhd_bus_t *bus)
7676 {
7677 uint32 addr = 0;
7678 int rv, dma_indx_wr_buf, dma_indx_rd_buf;
7679 uint32 shaddr = 0;
7680 pciedev_shared_t *sh = bus->pcie_sh;
7681 dhd_timeout_t tmo;
7682 bool idma_en = FALSE;
7683
7684 if (MULTIBP_ENAB(bus->sih)) {
7685 dhd_bus_pcie_pwr_req(bus);
7686 }
7687
7688 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
7689 /* start a timer for 5 seconds */
7690 dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
7691
7692 while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
7693 /* Read last word in memory to determine address of pciedev_shared structure */
7694 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
7695 }
7696
7697 if (addr == (uint32)-1) {
7698 DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
7699 #ifdef SUPPORT_LINKDOWN_RECOVERY
7700 #ifdef CONFIG_ARCH_MSM
7701 bus->no_cfg_restore = 1;
7702 #endif /* CONFIG_ARCH_MSM */
7703 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7704 bus->is_linkdown = 1;
7705 return BCME_ERROR;
7706 }
7707
7708 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
7709 (addr > shaddr)) {
7710 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
7711 __FUNCTION__, addr));
7712 DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
7713 #ifdef DEBUG_DNGL_INIT_FAIL
7714 #ifdef CUSTOMER_HW4_DEBUG
7715 bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
7716 #endif /* CUSTOMER_HW4_DEBUG */
7717 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
7718 dhdpcie_mem_dump(bus);
7719 #endif /* DEBUG_DNGL_INIT_FAIL */
7720 return BCME_ERROR;
7721 } else {
7722 bus->shared_addr = (ulong)addr;
7723 DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
7724 "before dongle is ready\n", addr, tmo.elapsed));
7725 }
7726
7727 /* Read hndrte_shared structure */
7728 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
7729 sizeof(pciedev_shared_t))) < 0) {
7730 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
7731 return rv;
7732 }
7733
7734 /* Endianness */
7735 sh->flags = ltoh32(sh->flags);
7736 sh->trap_addr = ltoh32(sh->trap_addr);
7737 sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
7738 sh->assert_file_addr = ltoh32(sh->assert_file_addr);
7739 sh->assert_line = ltoh32(sh->assert_line);
7740 sh->console_addr = ltoh32(sh->console_addr);
7741 sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
7742 sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
7743 sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
7744 sh->flags2 = ltoh32(sh->flags2);
7745
7746 /* load bus console address */
7747 bus->console_addr = sh->console_addr;
7748
7749 /* Read the dma rx offset */
7750 bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
7751 dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
7752
7753 DHD_INFO(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
7754
7755 bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
7756 if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
7757 {
7758 DHD_ERROR(("%s: pcie_shared version %d in dhd "
7759 "is older than pciedev_shared version %d in dongle\n",
7760 __FUNCTION__, PCIE_SHARED_VERSION,
7761 bus->api.fw_rev));
7762 return BCME_ERROR;
7763 }
7764 dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
7765
7766 bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
7767 sizeof(uint16) : sizeof(uint32);
7768 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
7769 __FUNCTION__, bus->rw_index_sz));
7770
7771 #ifdef IDLE_TX_FLOW_MGMT
7772 if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
7773 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
7774 __FUNCTION__));
7775 bus->enable_idle_flowring_mgmt = TRUE;
7776 }
7777 #endif /* IDLE_TX_FLOW_MGMT */
7778
7779 if (IDMA_CAPABLE(bus)) {
7780 if (bus->sih->buscorerev == 23) {
7781 } else {
7782 idma_en = TRUE;
7783 }
7784 }
7785
7786 if (idma_en) {
7787 bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
7788 bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
7789 }
7790
7791 bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
7792
7793 bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
7794
7795 /* Does the FW support DMA'ing r/w indices */
7796 if (sh->flags & PCIE_SHARED_DMA_INDEX) {
7797 if (!bus->dhd->dma_ring_upd_overwrite) {
7798 {
7799 if (!IFRM_ENAB(bus->dhd)) {
7800 bus->dhd->dma_h2d_ring_upd_support = TRUE;
7801 }
7802 bus->dhd->dma_d2h_ring_upd_support = TRUE;
7803 }
7804 }
7805
7806 if (bus->dhd->dma_d2h_ring_upd_support)
7807 bus->dhd->d2h_sync_mode = 0;
7808
7809 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
7810 __FUNCTION__,
7811 (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
7812 (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
7813 } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
7814 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
7815 __FUNCTION__));
7816 return BCME_UNSUPPORTED;
7817 } else {
7818 bus->dhd->dma_h2d_ring_upd_support = FALSE;
7819 bus->dhd->dma_d2h_ring_upd_support = FALSE;
7820 }
7821
7822 /* Does the firmware support fast delete ring? */
7823 if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
7824 DHD_INFO(("%s: Firmware supports fast delete ring\n",
7825 __FUNCTION__));
7826 bus->dhd->fast_delete_ring_support = TRUE;
7827 } else {
7828 DHD_INFO(("%s: Firmware does not support fast delete ring\n",
7829 __FUNCTION__));
7830 bus->dhd->fast_delete_ring_support = FALSE;
7831 }
7832
7833 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
7834 {
7835 ring_info_t ring_info;
7836
7837 /* boundary check */
7838 if ((sh->rings_info_ptr < bus->dongle_ram_base) || (sh->rings_info_ptr > shaddr)) {
7839 DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
7840 __FUNCTION__, sh->rings_info_ptr));
7841 return BCME_ERROR;
7842 }
7843
7844 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
7845 (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
7846 return rv;
7847
7848 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
7849 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
7850
7851 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
7852 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
7853 bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
7854 bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
7855 bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
7856 bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
7857 bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
7858 }
7859 else {
7860 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
7861 bus->max_submission_rings = bus->max_tx_flowrings;
7862 bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
7863 bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
7864 bus->api.handle_mb_data = dhdpcie_handle_mb_data;
7865 bus->use_mailbox = TRUE;
7866 }
7867 if (bus->max_completion_rings == 0) {
7868 DHD_ERROR(("dongle completion rings are invalid %d\n",
7869 bus->max_completion_rings));
7870 return BCME_ERROR;
7871 }
7872 if (bus->max_submission_rings == 0) {
7873 DHD_ERROR(("dongle submission rings are invalid %d\n",
7874 bus->max_submission_rings));
7875 return BCME_ERROR;
7876 }
7877 if (bus->max_tx_flowrings == 0) {
7878 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
7879 return BCME_ERROR;
7880 }
7881
7882 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
7883 * The max_sub_queues is read from FW initialized ring_info
7884 */
7885 if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
7886 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7887 H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
7888 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7889 D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
7890
7891 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
7892 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
7893 "Host will use w/r indices in TCM\n",
7894 __FUNCTION__));
7895 bus->dhd->dma_h2d_ring_upd_support = FALSE;
7896 bus->dhd->idma_enable = FALSE;
7897 }
7898 }
7899
7900 if (bus->dhd->dma_d2h_ring_upd_support) {
7901 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7902 D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
7903 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7904 H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
7905
7906 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
7907 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
7908 "Host will use w/r indices in TCM\n",
7909 __FUNCTION__));
7910 bus->dhd->dma_d2h_ring_upd_support = FALSE;
7911 }
7912 }
7913
7914 if (IFRM_ENAB(bus->dhd)) {
7915 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7916 H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
7917
7918 if (dma_indx_wr_buf != BCME_OK) {
7919 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
7920 __FUNCTION__));
7921 bus->dhd->ifrm_enable = FALSE;
7922 }
7923 }
7924
7925 /* read ringmem and ringstate ptrs from shared area and store in host variables */
7926 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
7927 if (dhd_msg_level & DHD_INFO_VAL) {
7928 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
7929 }
7930 DHD_INFO(("ring_info\n"));
7931
7932 DHD_ERROR(("%s: max H2D queues %d\n",
7933 __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
7934
7935 DHD_INFO(("mail box address\n"));
7936 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
7937 __FUNCTION__, bus->h2d_mb_data_ptr_addr));
7938 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
7939 __FUNCTION__, bus->d2h_mb_data_ptr_addr));
7940 }
7941
7942 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
7943 __FUNCTION__, bus->dhd->d2h_sync_mode));
7944
7945 bus->dhd->d2h_hostrdy_supported =
7946 ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
7947
7948 bus->dhd->ext_trap_data_supported =
7949 ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
7950
7951 if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
7952 bus->dhd->pcie_txs_metadata_enable = 0;
7953
7954 #ifdef D2H_MINIDUMP
7955 bus->d2h_minidump = (sh->flags2 & PCIE_SHARED2_FW_SMALL_MEMDUMP) ? TRUE : FALSE;
7956 DHD_ERROR(("FW supports minidump ? %s \n", bus->d2h_minidump ? "Y" : "N"));
7957 if (bus->d2h_minidump_override) {
7958 bus->d2h_minidump = FALSE;
7959 }
7960 DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
7961 bus->d2h_minidump, bus->d2h_minidump_override));
7962 #endif /* D2H_MINIDUMP */
7963
7964 if (MULTIBP_ENAB(bus->sih)) {
7965 dhd_bus_pcie_pwr_req_clear(bus);
7966 }
7967 return BCME_OK;
7968 } /* dhdpcie_readshared */
7969
7970 /** Read ring mem and ring state ptr info from shared memory area in device memory */
7971 static void
7972 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
7973 {
7974 uint16 i = 0;
7975 uint16 j = 0;
7976 uint32 tcm_memloc;
7977 uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
7978 uint16 max_tx_flowrings = bus->max_tx_flowrings;
7979
7980 /* Ring mem ptr info */
7981 /* Alloated in the order
7982 H2D_MSGRING_CONTROL_SUBMIT 0
7983 H2D_MSGRING_RXPOST_SUBMIT 1
7984 D2H_MSGRING_CONTROL_COMPLETE 2
7985 D2H_MSGRING_TX_COMPLETE 3
7986 D2H_MSGRING_RX_COMPLETE 4
7987 */
7988
7989 {
7990 /* ringmemptr holds start of the mem block address space */
7991 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
7992
7993 /* Find out ringmem ptr for each ring common ring */
7994 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
7995 bus->ring_sh[i].ring_mem_addr = tcm_memloc;
7996 /* Update mem block */
7997 tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
7998 DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
7999 i, bus->ring_sh[i].ring_mem_addr));
8000 }
8001 }
8002
8003 /* Ring state mem ptr info */
8004 {
8005 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
8006 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
8007 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
8008 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
8009
8010 /* Store h2d common ring write/read pointers */
8011 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
8012 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
8013 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
8014
8015 /* update mem block */
8016 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
8017 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
8018
8019 DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
8020 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8021 }
8022
8023 /* Store d2h common ring write/read pointers */
8024 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
8025 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
8026 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
8027
8028 /* update mem block */
8029 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
8030 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
8031
8032 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
8033 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8034 }
8035
8036 /* Store txflow ring write/read pointers */
8037 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
8038 max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
8039 } else {
8040 /* Account for Debug info h2d ring located after the last tx flow ring */
8041 max_tx_flowrings = max_tx_flowrings + 1;
8042 }
8043 for (j = 0; j < max_tx_flowrings; i++, j++)
8044 {
8045 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
8046 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
8047
8048 /* update mem block */
8049 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
8050 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
8051
8052 DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
8053 bus->ring_sh[i].ring_state_w,
8054 bus->ring_sh[i].ring_state_r));
8055 }
8056 /* store wr/rd pointers for debug info completion ring */
8057 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
8058 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
8059 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
8060 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
8061 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
8062 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8063 }
8064 } /* dhd_fillup_ring_sharedptr_info */
8065
8066 /**
8067 * Initialize bus module: prepare for communication with the dongle. Called after downloading
8068 * firmware into the dongle.
8069 */
8070 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
8071 {
8072 dhd_bus_t *bus = dhdp->bus;
8073 int ret = 0;
8074
8075 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8076
8077 ASSERT(bus->dhd);
8078 if (!bus->dhd)
8079 return 0;
8080
8081 if (bus->sih->buscorerev == 66) {
8082 dhd_bus_pcie_pwr_req_clear_reload_war(bus);
8083 }
8084
8085 if (MULTIBP_ENAB(bus->sih)) {
8086 dhd_bus_pcie_pwr_req(bus);
8087 }
8088
8089 /* Configure AER registers to log the TLP header */
8090 dhd_bus_aer_config(bus);
8091
8092 /* Make sure we're talking to the core. */
8093 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
8094 ASSERT(bus->reg != NULL);
8095
8096 /* before opening up bus for data transfer, check if shared are is intact */
8097 ret = dhdpcie_readshared(bus);
8098 if (ret < 0) {
8099 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
8100 goto exit;
8101 }
8102
8103 /* Make sure we're talking to the core. */
8104 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
8105 ASSERT(bus->reg != NULL);
8106
8107 dhd_init_bus_lock(bus);
8108
8109 /* Set bus state according to enable result */
8110 dhdp->busstate = DHD_BUS_DATA;
8111 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
8112 dhdp->dhd_bus_busy_state = 0;
8113
8114 /* D11 status via PCIe completion header */
8115 if ((ret = dhdpcie_init_d11status(bus)) < 0) {
8116 goto exit;
8117 }
8118
8119 if (!dhd_download_fw_on_driverload)
8120 dhd_dpc_enable(bus->dhd);
8121 /* Enable the interrupt after device is up */
8122 dhdpcie_bus_intr_enable(bus);
8123
8124 bus->intr_enabled = TRUE;
8125
8126 /* bcmsdh_intr_unmask(bus->sdh); */
8127 #ifdef DHD_PCIE_RUNTIMEPM
8128 bus->idlecount = 0;
8129 bus->idletime = (int32)MAX_IDLE_COUNT;
8130 init_waitqueue_head(&bus->rpm_queue);
8131 mutex_init(&bus->pm_lock);
8132 #else
8133 bus->idletime = 0;
8134 #endif /* DHD_PCIE_RUNTIMEPM */
8135
8136 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
8137 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
8138 bus->use_d0_inform = TRUE;
8139 } else {
8140 bus->use_d0_inform = FALSE;
8141 }
8142
8143 exit:
8144 if (MULTIBP_ENAB(bus->sih)) {
8145 dhd_bus_pcie_pwr_req_clear(bus);
8146 }
8147 return ret;
8148 }
8149
8150 static void
8151 dhdpcie_init_shared_addr(dhd_bus_t *bus)
8152 {
8153 uint32 addr = 0;
8154 uint32 val = 0;
8155 addr = bus->dongle_ram_base + bus->ramsize - 4;
8156 #ifdef DHD_PCIE_RUNTIMEPM
8157 dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
8158 #endif /* DHD_PCIE_RUNTIMEPM */
8159 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
8160 }
8161
8162 bool
8163 dhdpcie_chipmatch(uint16 vendor, uint16 device)
8164 {
8165 if (vendor != PCI_VENDOR_ID_BROADCOM) {
8166 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
8167 vendor, device));
8168 return (-ENODEV);
8169 }
8170
8171 if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
8172 (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
8173 (device == BCM43569_CHIP_ID)) {
8174 return 0;
8175 }
8176
8177 if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
8178 (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
8179 return 0;
8180 }
8181
8182 if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
8183 (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
8184 return 0;
8185 }
8186
8187 if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
8188 (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
8189 return 0;
8190 }
8191
8192 if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
8193 (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
8194 return 0;
8195 }
8196
8197 if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
8198 (device == BCM43452_D11AC5G_ID)) {
8199 return 0;
8200 }
8201
8202 if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
8203 (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
8204 return 0;
8205 }
8206
8207 if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
8208 (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
8209 return 0;
8210 }
8211
8212 if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
8213 (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
8214 return 0;
8215 }
8216
8217 if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
8218 (device == BCM4358_D11AC5G_ID)) {
8219 return 0;
8220 }
8221
8222 if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
8223 (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
8224 return 0;
8225 }
8226
8227 if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
8228 (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
8229 return 0;
8230 }
8231
8232 if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
8233 (device == BCM4359_D11AC5G_ID)) {
8234 return 0;
8235 }
8236
8237 if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
8238 (device == BCM43596_D11AC5G_ID)) {
8239 return 0;
8240 }
8241
8242 if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
8243 (device == BCM43597_D11AC5G_ID)) {
8244 return 0;
8245 }
8246
8247 if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
8248 (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
8249 return 0;
8250 }
8251
8252 if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
8253 (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
8254 return 0;
8255 }
8256 if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
8257 (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
8258 return 0;
8259 }
8260
8261 if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
8262 (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
8263 return 0;
8264 }
8265
8266 if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
8267 (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
8268 (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
8269 return 0;
8270 }
8271
8272 if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
8273 (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
8274 return 0;
8275 }
8276
8277 if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
8278 (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
8279 return 0;
8280 }
8281
8282 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
8283 return (-ENODEV);
8284 } /* dhdpcie_chipmatch */
8285
8286 /**
8287 * Name: dhdpcie_cc_nvmshadow
8288 *
8289 * Description:
8290 * A shadow of OTP/SPROM exists in ChipCommon Region
8291 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
8292 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
8293 * can also be read from ChipCommon Registers.
8294 */
8295 static int
8296 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
8297 {
8298 uint16 dump_offset = 0;
8299 uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
8300
8301 /* Table for 65nm OTP Size (in bits) */
8302 int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
8303
8304 volatile uint16 *nvm_shadow;
8305
8306 uint cur_coreid;
8307 uint chipc_corerev;
8308 chipcregs_t *chipcregs;
8309
8310 /* Save the current core */
8311 cur_coreid = si_coreid(bus->sih);
8312 /* Switch to ChipC */
8313 chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
8314 ASSERT(chipcregs != NULL);
8315
8316 chipc_corerev = si_corerev(bus->sih);
8317
8318 /* Check ChipcommonCore Rev */
8319 if (chipc_corerev < 44) {
8320 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
8321 return BCME_UNSUPPORTED;
8322 }
8323
8324 /* Check ChipID */
8325 if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
8326 ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
8327 ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
8328 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
8329 "4350/4345/4355/4364 only\n", __FUNCTION__));
8330 return BCME_UNSUPPORTED;
8331 }
8332
8333 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
8334 if (chipcregs->sromcontrol & SRC_PRESENT) {
8335 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
8336 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
8337 >> SRC_SIZE_SHIFT))) * 1024;
8338 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
8339 }
8340
8341 if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
8342 bcm_bprintf(b, "\nOTP Present");
8343
8344 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
8345 == OTPL_WRAP_TYPE_40NM) {
8346 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
8347 /* Chipcommon rev51 is a variation on rev45 and does not support
8348 * the latest OTP configuration.
8349 */
8350 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8351 otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
8352 >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
8353 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8354 } else {
8355 otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
8356 >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
8357 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8358 }
8359 } else {
8360 /* This part is untested since newer chips have 40nm OTP */
8361 /* Chipcommon rev51 is a variation on rev45 and does not support
8362 * the latest OTP configuration.
8363 */
8364 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8365 otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
8366 >> OTPL_ROW_SIZE_SHIFT];
8367 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8368 } else {
8369 otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
8370 >> CC_CAP_OTPSIZE_SHIFT];
8371 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8372 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
8373 __FUNCTION__));
8374 }
8375 }
8376 }
8377
8378 /* Chipcommon rev51 is a variation on rev45 and does not support
8379 * the latest OTP configuration.
8380 */
8381 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8382 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
8383 ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
8384 DHD_ERROR(("%s: SPROM and OTP could not be found "
8385 "sromcontrol = %x, otplayout = %x \n",
8386 __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
8387 return BCME_NOTFOUND;
8388 }
8389 } else {
8390 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
8391 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
8392 DHD_ERROR(("%s: SPROM and OTP could not be found "
8393 "sromcontrol = %x, capablities = %x \n",
8394 __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
8395 return BCME_NOTFOUND;
8396 }
8397 }
8398
8399 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
8400 if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
8401 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
8402
8403 bcm_bprintf(b, "OTP Strap selected.\n"
8404 "\nOTP Shadow in ChipCommon:\n");
8405
8406 dump_size = otp_size / 16 ; /* 16bit words */
8407
8408 } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
8409 (chipcregs->sromcontrol & SRC_PRESENT)) {
8410
8411 bcm_bprintf(b, "SPROM Strap selected\n"
8412 "\nSPROM Shadow in ChipCommon:\n");
8413
8414 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
8415 /* dump_size in 16bit words */
8416 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
8417 } else {
8418 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
8419 __FUNCTION__));
8420 return BCME_NOTFOUND;
8421 }
8422
8423 if (bus->regs == NULL) {
8424 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
8425 return BCME_NOTREADY;
8426 } else {
8427 bcm_bprintf(b, "\n OffSet:");
8428
8429 /* Chipcommon rev51 is a variation on rev45 and does not support
8430 * the latest OTP configuration.
8431 */
8432 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8433 /* Chip common can read only 8kbits,
8434 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
8435 */
8436 nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
8437 } else {
8438 /* Point to the SPROM/OTP shadow in ChipCommon */
8439 nvm_shadow = chipcregs->sromotp;
8440 }
8441
8442 if (nvm_shadow == NULL) {
8443 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
8444 return BCME_NOTFOUND;
8445 }
8446
8447 /*
8448 * Read 16 bits / iteration.
8449 * dump_size & dump_offset in 16-bit words
8450 */
8451 while (dump_offset < dump_size) {
8452 if (dump_offset % 2 == 0)
8453 /* Print the offset in the shadow space in Bytes */
8454 bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
8455
8456 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
8457 dump_offset += 0x1;
8458 }
8459 }
8460
8461 /* Switch back to the original core */
8462 si_setcore(bus->sih, cur_coreid, 0);
8463
8464 return BCME_OK;
8465 } /* dhdpcie_cc_nvmshadow */
8466
8467 /** Flow rings are dynamically created and destroyed */
8468 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
8469 {
8470 void *pkt;
8471 flow_queue_t *queue;
8472 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
8473 unsigned long flags;
8474
8475 queue = &flow_ring_node->queue;
8476
8477 #ifdef DHDTCPACK_SUPPRESS
8478 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8479 * when there is a newly coming packet from network stack.
8480 */
8481 dhd_tcpack_info_tbl_clean(bus->dhd);
8482 #endif /* DHDTCPACK_SUPPRESS */
8483
8484 /* clean up BUS level info */
8485 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8486
8487 /* Flush all pending packets in the queue, if any */
8488 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
8489 PKTFREE(bus->dhd->osh, pkt, TRUE);
8490 }
8491 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
8492
8493 /* Reinitialise flowring's queue */
8494 dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
8495 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
8496 flow_ring_node->active = FALSE;
8497
8498 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8499
8500 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
8501 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8502 dll_delete(&flow_ring_node->list);
8503 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8504
8505 /* Release the flowring object back into the pool */
8506 dhd_prot_flowrings_pool_release(bus->dhd,
8507 flow_ring_node->flowid, flow_ring_node->prot_info);
8508
8509 /* Free the flowid back to the flowid allocator */
8510 dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
8511 flow_ring_node->flowid);
8512 }
8513
8514 /**
8515 * Allocate a Flow ring buffer,
8516 * Init Ring buffer, send Msg to device about flow ring creation
8517 */
8518 int
8519 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
8520 {
8521 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
8522
8523 DHD_INFO(("%s :Flow create\n", __FUNCTION__));
8524
8525 /* Send Msg to device about flow ring creation */
8526 if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
8527 return BCME_NOMEM;
8528
8529 return BCME_OK;
8530 }
8531
8532 /** Handle response from dongle on a 'flow ring create' request */
8533 void
8534 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
8535 {
8536 flow_ring_node_t *flow_ring_node;
8537 unsigned long flags;
8538
8539 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
8540
8541 /* Boundary check of the flowid */
8542 if (flowid >= bus->dhd->num_flow_rings) {
8543 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
8544 flowid, bus->dhd->num_flow_rings));
8545 return;
8546 }
8547
8548 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8549 if (!flow_ring_node) {
8550 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
8551 return;
8552 }
8553
8554 ASSERT(flow_ring_node->flowid == flowid);
8555 if (flow_ring_node->flowid != flowid) {
8556 DHD_ERROR(("%s: flowid %d is different from the flowid "
8557 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
8558 flow_ring_node->flowid));
8559 return;
8560 }
8561
8562 if (status != BCME_OK) {
8563 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
8564 __FUNCTION__, status));
8565 /* Call Flow clean up */
8566 dhd_bus_clean_flow_ring(bus, flow_ring_node);
8567 return;
8568 }
8569
8570 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8571 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
8572 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8573
8574 /* Now add the Flow ring node into the active list
8575 * Note that this code to add the newly created node to the active
8576 * list was living in dhd_flowid_lookup. But note that after
8577 * adding the node to the active list the contents of node is being
8578 * filled in dhd_prot_flow_ring_create.
8579 * If there is a D2H interrupt after the node gets added to the
8580 * active list and before the node gets populated with values
8581 * from the Bottom half dhd_update_txflowrings would be called.
8582 * which will then try to walk through the active flow ring list,
8583 * pickup the nodes and operate on them. Now note that since
8584 * the function dhd_prot_flow_ring_create is not finished yet
8585 * the contents of flow_ring_node can still be NULL leading to
8586 * crashes. Hence the flow_ring_node should be added to the
8587 * active list only after its truely created, which is after
8588 * receiving the create response message from the Host.
8589 */
8590 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8591 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
8592 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8593
8594 dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
8595
8596 return;
8597 }
8598
8599 int
8600 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
8601 {
8602 void * pkt;
8603 flow_queue_t *queue;
8604 flow_ring_node_t *flow_ring_node;
8605 unsigned long flags;
8606
8607 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
8608
8609 flow_ring_node = (flow_ring_node_t *)arg;
8610
8611 #ifdef DHDTCPACK_SUPPRESS
8612 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8613 * when there is a newly coming packet from network stack.
8614 */
8615 dhd_tcpack_info_tbl_clean(bus->dhd);
8616 #endif /* DHDTCPACK_SUPPRESS */
8617 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8618 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
8619 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8620 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
8621 return BCME_ERROR;
8622 }
8623 flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
8624
8625 queue = &flow_ring_node->queue; /* queue associated with flow ring */
8626
8627 /* Flush all pending packets in the queue, if any */
8628 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
8629 PKTFREE(bus->dhd->osh, pkt, TRUE);
8630 }
8631 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
8632
8633 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8634
8635 /* Send Msg to device about flow ring deletion */
8636 dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
8637
8638 return BCME_OK;
8639 }
8640
8641 void
8642 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
8643 {
8644 flow_ring_node_t *flow_ring_node;
8645
8646 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
8647
8648 /* Boundary check of the flowid */
8649 if (flowid >= bus->dhd->num_flow_rings) {
8650 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
8651 flowid, bus->dhd->num_flow_rings));
8652 return;
8653 }
8654
8655 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8656 if (!flow_ring_node) {
8657 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
8658 return;
8659 }
8660
8661 ASSERT(flow_ring_node->flowid == flowid);
8662 if (flow_ring_node->flowid != flowid) {
8663 DHD_ERROR(("%s: flowid %d is different from the flowid "
8664 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
8665 flow_ring_node->flowid));
8666 return;
8667 }
8668
8669 if (status != BCME_OK) {
8670 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
8671 __FUNCTION__, status));
8672 return;
8673 }
8674
8675 if (flow_ring_node->status != FLOW_RING_STATUS_DELETE_PENDING) {
8676 DHD_ERROR(("%s: invalid state flowid = %d, status = %d\n",
8677 __FUNCTION__, flowid, flow_ring_node->status));
8678 return;
8679 }
8680
8681 /* Call Flow clean up */
8682 dhd_bus_clean_flow_ring(bus, flow_ring_node);
8683
8684 return;
8685
8686 }
8687
8688 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
8689 {
8690 void *pkt;
8691 flow_queue_t *queue;
8692 flow_ring_node_t *flow_ring_node;
8693 unsigned long flags;
8694
8695 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
8696
8697 flow_ring_node = (flow_ring_node_t *)arg;
8698
8699 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8700 queue = &flow_ring_node->queue; /* queue associated with flow ring */
8701 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
8702 * once flow ring flush response is received for this flowring node.
8703 */
8704 flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
8705
8706 #ifdef DHDTCPACK_SUPPRESS
8707 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8708 * when there is a newly coming packet from network stack.
8709 */
8710 dhd_tcpack_info_tbl_clean(bus->dhd);
8711 #endif /* DHDTCPACK_SUPPRESS */
8712
8713 /* Flush all pending packets in the queue, if any */
8714 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
8715 PKTFREE(bus->dhd->osh, pkt, TRUE);
8716 }
8717 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
8718
8719 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8720
8721 /* Send Msg to device about flow ring flush */
8722 dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
8723
8724 return BCME_OK;
8725 }
8726
8727 void
8728 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
8729 {
8730 flow_ring_node_t *flow_ring_node;
8731
8732 if (status != BCME_OK) {
8733 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
8734 __FUNCTION__, status));
8735 return;
8736 }
8737
8738 /* Boundary check of the flowid */
8739 if (flowid >= bus->dhd->num_flow_rings) {
8740 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
8741 flowid, bus->dhd->num_flow_rings));
8742 return;
8743 }
8744
8745 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8746 if (!flow_ring_node) {
8747 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
8748 return;
8749 }
8750
8751 ASSERT(flow_ring_node->flowid == flowid);
8752 if (flow_ring_node->flowid != flowid) {
8753 DHD_ERROR(("%s: flowid %d is different from the flowid "
8754 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
8755 flow_ring_node->flowid));
8756 return;
8757 }
8758
8759 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
8760 return;
8761 }
8762
8763 uint32
8764 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
8765 {
8766 return bus->max_submission_rings;
8767 }
8768
8769 /* To be symmetric with SDIO */
8770 void
8771 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
8772 {
8773 return;
8774 }
8775
8776 void
8777 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
8778 {
8779 dhdp->bus->is_linkdown = val;
8780 }
8781
8782 int
8783 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
8784 {
8785 return dhdp->bus->is_linkdown;
8786 }
8787
8788 #ifdef IDLE_TX_FLOW_MGMT
8789 /* resume request */
8790 int
8791 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
8792 {
8793 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
8794
8795 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
8796
8797 flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
8798
8799 /* Send Msg to device about flow ring resume */
8800 dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
8801
8802 return BCME_OK;
8803 }
8804
8805 /* add the node back to active flowring */
8806 void
8807 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
8808 {
8809
8810 flow_ring_node_t *flow_ring_node;
8811
8812 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
8813
8814 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8815 ASSERT(flow_ring_node->flowid == flowid);
8816
8817 if (status != BCME_OK) {
8818 DHD_ERROR(("%s Error Status = %d \n",
8819 __FUNCTION__, status));
8820 return;
8821 }
8822
8823 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
8824 __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
8825
8826 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
8827
8828 dhd_bus_schedule_queue(bus, flowid, FALSE);
8829 return;
8830 }
8831
8832 /* scan the flow rings in active list for idle time out */
8833 void
8834 dhd_bus_check_idle_scan(dhd_bus_t *bus)
8835 {
8836 uint64 time_stamp; /* in millisec */
8837 uint64 diff;
8838
8839 time_stamp = OSL_SYSUPTIME();
8840 diff = time_stamp - bus->active_list_last_process_ts;
8841
8842 if (diff > IDLE_FLOW_LIST_TIMEOUT) {
8843 dhd_bus_idle_scan(bus);
8844 bus->active_list_last_process_ts = OSL_SYSUPTIME();
8845 }
8846
8847 return;
8848 }
8849
8850 /* scan the nodes in active list till it finds a non idle node */
8851 void
8852 dhd_bus_idle_scan(dhd_bus_t *bus)
8853 {
8854 dll_t *item, *prev;
8855 flow_ring_node_t *flow_ring_node;
8856 uint64 time_stamp, diff;
8857 unsigned long flags;
8858 uint16 ringid[MAX_SUSPEND_REQ];
8859 uint16 count = 0;
8860
8861 time_stamp = OSL_SYSUPTIME();
8862 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8863
8864 for (item = dll_tail_p(&bus->flowring_active_list);
8865 !dll_end(&bus->flowring_active_list, item); item = prev) {
8866 prev = dll_prev_p(item);
8867
8868 flow_ring_node = dhd_constlist_to_flowring(item);
8869
8870 if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
8871 continue;
8872
8873 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
8874 /* Takes care of deleting zombie rings */
8875 /* delete from the active list */
8876 DHD_INFO(("deleting flow id %u from active list\n",
8877 flow_ring_node->flowid));
8878 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
8879 continue;
8880 }
8881
8882 diff = time_stamp - flow_ring_node->last_active_ts;
8883
8884 if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
8885 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
8886 /* delete from the active list */
8887 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
8888 flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
8889 ringid[count] = flow_ring_node->flowid;
8890 count++;
8891 if (count == MAX_SUSPEND_REQ) {
8892 /* create a batch message now!! */
8893 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
8894 count = 0;
8895 }
8896
8897 } else {
8898
8899 /* No more scanning, break from here! */
8900 break;
8901 }
8902 }
8903
8904 if (count) {
8905 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
8906 }
8907
8908 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8909
8910 return;
8911 }
8912
8913 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8914 {
8915 unsigned long flags;
8916 dll_t* list;
8917
8918 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8919 /* check if the node is already at head, otherwise delete it and prepend */
8920 list = dll_head_p(&bus->flowring_active_list);
8921 if (&flow_ring_node->list != list) {
8922 dll_delete(&flow_ring_node->list);
8923 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
8924 }
8925
8926 /* update flow ring timestamp */
8927 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
8928
8929 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8930
8931 return;
8932 }
8933
8934 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8935 {
8936 unsigned long flags;
8937
8938 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8939
8940 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
8941 /* update flow ring timestamp */
8942 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
8943
8944 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8945
8946 return;
8947 }
8948 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8949 {
8950 dll_delete(&flow_ring_node->list);
8951 }
8952
8953 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8954 {
8955 unsigned long flags;
8956
8957 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8958
8959 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
8960
8961 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8962
8963 return;
8964 }
8965 #endif /* IDLE_TX_FLOW_MGMT */
8966
8967 int
8968 dhdpcie_bus_clock_start(struct dhd_bus *bus)
8969 {
8970 return dhdpcie_start_host_pcieclock(bus);
8971 }
8972
8973 int
8974 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
8975 {
8976 return dhdpcie_stop_host_pcieclock(bus);
8977 }
8978
8979 int
8980 dhdpcie_bus_disable_device(struct dhd_bus *bus)
8981 {
8982 return dhdpcie_disable_device(bus);
8983 }
8984
8985 int
8986 dhdpcie_bus_enable_device(struct dhd_bus *bus)
8987 {
8988 return dhdpcie_enable_device(bus);
8989 }
8990
8991 int
8992 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
8993 {
8994 return dhdpcie_alloc_resource(bus);
8995 }
8996
8997 void
8998 dhdpcie_bus_free_resource(struct dhd_bus *bus)
8999 {
9000 dhdpcie_free_resource(bus);
9001 }
9002
9003 int
9004 dhd_bus_request_irq(struct dhd_bus *bus)
9005 {
9006 return dhdpcie_bus_request_irq(bus);
9007 }
9008
9009 bool
9010 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
9011 {
9012 return dhdpcie_dongle_attach(bus);
9013 }
9014
9015 int
9016 dhd_bus_release_dongle(struct dhd_bus *bus)
9017 {
9018 bool dongle_isolation;
9019 osl_t *osh;
9020
9021 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9022
9023 if (bus) {
9024 osh = bus->osh;
9025 ASSERT(osh);
9026
9027 if (bus->dhd) {
9028 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
9029 debugger_close();
9030 #endif /* DEBUGGER || DHD_DSCOPE */
9031
9032 dongle_isolation = bus->dhd->dongle_isolation;
9033 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
9034 }
9035 }
9036
9037 return 0;
9038 }
9039
9040 void
9041 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
9042 {
9043 uint32 val;
9044
9045 if (enable) {
9046 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
9047 PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
9048 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9049 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
9050 if (bus->cto_threshold == 0) {
9051 bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
9052 }
9053
9054 si_corereg(bus->sih, bus->sih->buscoreidx,
9055 OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
9056 ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
9057 PCIE_CTO_TO_THRESHHOLD_MASK) |
9058 ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
9059 PCIE_CTO_CLKCHKCNT_MASK) |
9060 PCIE_CTO_ENAB_MASK);
9061 } else {
9062 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
9063 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9064 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
9065
9066 si_corereg(bus->sih, bus->sih->buscoreidx,
9067 OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
9068 }
9069 }
9070
9071 static void
9072 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
9073 {
9074 uint32 pci_intmask, err_status, dar_val;
9075 uint8 i = 0;
9076 uint32 val;
9077
9078 pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
9079 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
9080
9081 DHD_OS_WAKE_LOCK(bus->dhd);
9082
9083 DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
9084
9085 /*
9086 * DAR still accessible
9087 */
9088 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9089 DAR_CLK_CTRL(bus->sih->buscorerev), 0, 0);
9090 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_CLK_CTRL(bus->sih->buscorerev), dar_val));
9091
9092 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9093 DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), 0, 0);
9094 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), dar_val));
9095
9096 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9097 DAR_INTSTAT(bus->sih->buscorerev), 0, 0);
9098 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_INTSTAT(bus->sih->buscorerev), dar_val));
9099
9100 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9101 DAR_ERRLOG(bus->sih->buscorerev), 0, 0);
9102 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRLOG(bus->sih->buscorerev), dar_val));
9103
9104 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9105 DAR_ERRADDR(bus->sih->buscorerev), 0, 0);
9106 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRADDR(bus->sih->buscorerev), dar_val));
9107
9108 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9109 DAR_PCIMailBoxInt(bus->sih->buscorerev), 0, 0);
9110 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIMailBoxInt(bus->sih->buscorerev), dar_val));
9111
9112 /* reset backplane */
9113 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9114 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
9115
9116 /* clear timeout error */
9117 while (1) {
9118 err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
9119 DAR_ERRLOG(bus->sih->buscorerev),
9120 0, 0);
9121 if (err_status & PCIE_CTO_ERR_MASK) {
9122 si_corereg(bus->sih, bus->sih->buscoreidx,
9123 DAR_ERRLOG(bus->sih->buscorerev),
9124 ~0, PCIE_CTO_ERR_MASK);
9125 } else {
9126 break;
9127 }
9128 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
9129 i++;
9130 if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
9131 DHD_ERROR(("cto recovery fail\n"));
9132
9133 DHD_OS_WAKE_UNLOCK(bus->dhd);
9134 return;
9135 }
9136 }
9137
9138 /* clear interrupt status */
9139 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
9140
9141 /* Halt ARM & remove reset */
9142 /* TBD : we can add ARM Halt here in case */
9143
9144 /* reset SPROM_CFG_TO_SB_RST */
9145 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9146
9147 DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9148 PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
9149 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
9150
9151 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9152 DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9153 PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
9154
9155 DHD_OS_WAKE_UNLOCK(bus->dhd);
9156 }
9157
9158 void
9159 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
9160 {
9161 uint32 val;
9162
9163 val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
9164 dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
9165 val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
9166 }
9167
9168 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
9169 static int
9170 dhdpcie_init_d11status(struct dhd_bus *bus)
9171 {
9172 uint32 addr;
9173 uint32 flags2;
9174 int ret = 0;
9175
9176 if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
9177 flags2 = bus->pcie_sh->flags2;
9178 addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
9179 flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
9180 ret = dhdpcie_bus_membytes(bus, TRUE, addr,
9181 (uint8 *)&flags2, sizeof(flags2));
9182 if (ret < 0) {
9183 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
9184 __FUNCTION__));
9185 return ret;
9186 }
9187 bus->pcie_sh->flags2 = flags2;
9188 bus->dhd->d11_tx_status = TRUE;
9189 }
9190 return ret;
9191 }
9192
9193 #else
9194 static int
9195 dhdpcie_init_d11status(struct dhd_bus *bus)
9196 {
9197 return 0;
9198 }
9199 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
9200
9201 #ifdef BCMPCIE_OOB_HOST_WAKE
9202 int
9203 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
9204 {
9205 return dhdpcie_oob_intr_register(dhdp->bus);
9206 }
9207
9208 void
9209 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
9210 {
9211 dhdpcie_oob_intr_unregister(dhdp->bus);
9212 }
9213
9214 void
9215 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
9216 {
9217 dhdpcie_oob_intr_set(dhdp->bus, enable);
9218 }
9219 #endif /* BCMPCIE_OOB_HOST_WAKE */
9220
9221 bool
9222 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
9223 {
9224 return bus->dhd->d2h_hostrdy_supported;
9225 }
9226
9227 void
9228 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
9229 {
9230 dhd_bus_t *bus = pub->bus;
9231 uint32 coreoffset = index << 12;
9232 uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
9233 uint32 value;
9234
9235 while (first_addr <= last_addr) {
9236 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
9237 if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
9238 DHD_ERROR(("Invalid size/addr combination \n"));
9239 }
9240 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
9241 first_addr = first_addr + 4;
9242 }
9243 }
9244
9245 bool
9246 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
9247 {
9248 if (!bus->dhd)
9249 return FALSE;
9250 else if (bus->idma_enabled) {
9251 return bus->dhd->idma_enable;
9252 } else {
9253 return FALSE;
9254 }
9255 }
9256
9257 bool
9258 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
9259 {
9260 if (!bus->dhd)
9261 return FALSE;
9262 else if (bus->ifrm_enabled) {
9263 return bus->dhd->ifrm_enable;
9264 } else {
9265 return FALSE;
9266 }
9267 }
9268
9269 bool
9270 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
9271 {
9272 if (!bus->dhd) {
9273 return FALSE;
9274 } else if (bus->dar_enabled) {
9275 return bus->dhd->dar_enable;
9276 } else {
9277 return FALSE;
9278 }
9279 }
9280
9281 void
9282 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
9283 {
9284 DHD_ERROR(("ENABLING DW:%d\n", dw_option));
9285 bus->dw_option = dw_option;
9286 }
9287
9288 void
9289 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
9290 {
9291 trap_t *tr = &bus->dhd->last_trap_info;
9292 bcm_bprintf(strbuf,
9293 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
9294 " lp 0x%x, rpc 0x%x"
9295 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
9296 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
9297 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
9298 ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
9299 ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
9300 ltoh32(bus->pcie_sh->trap_addr),
9301 ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
9302 ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
9303 ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
9304 ltoh32(tr->r11), ltoh32(tr->r12));
9305 }
9306
9307 int
9308 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
9309 {
9310 int bcmerror = 0;
9311 struct dhd_bus *bus = dhdp->bus;
9312
9313 if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
9314 DHD_ERROR(("Invalid size/addr combination \n"));
9315 bcmerror = BCME_ERROR;
9316 }
9317
9318 return bcmerror;
9319 }
9320
9321 int
9322 dhd_get_idletime(dhd_pub_t *dhd)
9323 {
9324 return dhd->bus->idletime;
9325 }
9326
9327 #ifdef DHD_SSSR_DUMP
9328
9329 static INLINE void
9330 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
9331 {
9332 OSL_DELAY(1);
9333 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
9334 DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
9335 return;
9336 }
9337
9338 static int
9339 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
9340 uint addr_reg, uint data_reg)
9341 {
9342 uint addr;
9343 uint val = 0;
9344 int i;
9345
9346 DHD_ERROR(("%s\n", __FUNCTION__));
9347
9348 if (!buf) {
9349 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
9350 return BCME_ERROR;
9351 }
9352
9353 if (!fifo_size) {
9354 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
9355 return BCME_ERROR;
9356 }
9357
9358 /* Set the base address offset to 0 */
9359 addr = addr_reg;
9360 val = 0;
9361 dhd_sbreg_op(dhd, addr, &val, FALSE);
9362
9363 addr = data_reg;
9364 /* Read 4 bytes at once and loop for fifo_size / 4 */
9365 for (i = 0; i < fifo_size / 4; i++) {
9366 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
9367 buf[i] = val;
9368 OSL_DELAY(1);
9369 }
9370 return BCME_OK;
9371 }
9372
9373 static int
9374 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
9375 uint addr_reg)
9376 {
9377 uint addr;
9378 uint val = 0;
9379 int i;
9380 si_t *sih = dhd->bus->sih;
9381
9382 DHD_ERROR(("%s\n", __FUNCTION__));
9383
9384 if (!buf) {
9385 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
9386 return BCME_ERROR;
9387 }
9388
9389 if (!fifo_size) {
9390 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
9391 return BCME_ERROR;
9392 }
9393
9394 if (addr_reg) {
9395
9396 if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
9397 dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
9398 dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, fifo_size);
9399 } else {
9400 /* Check if vasip clk is disabled, if yes enable it */
9401 addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
9402 dhd_sbreg_op(dhd, addr, &val, TRUE);
9403 if (!val) {
9404 val = 1;
9405 dhd_sbreg_op(dhd, addr, &val, FALSE);
9406 }
9407
9408 addr = addr_reg;
9409 /* Read 4 bytes at once and loop for fifo_size / 4 */
9410 for (i = 0; i < fifo_size / 4; i++, addr += 4) {
9411 si_backplane_access(sih, addr, sizeof(uint), &val, TRUE);
9412 buf[i] = val;
9413 OSL_DELAY(1);
9414 }
9415 }
9416 } else {
9417 uint cur_coreid;
9418 uint chipc_corerev;
9419 chipcregs_t *chipcregs;
9420
9421 /* Save the current core */
9422 cur_coreid = si_coreid(sih);
9423
9424 /* Switch to ChipC */
9425 chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
9426
9427 chipc_corerev = si_corerev(sih);
9428
9429 if (chipc_corerev == 64) {
9430 W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
9431
9432 /* Read 4 bytes at once and loop for fifo_size / 4 */
9433 for (i = 0; i < fifo_size / 4; i++) {
9434 buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
9435 OSL_DELAY(1);
9436 }
9437 }
9438
9439 /* Switch back to the original core */
9440 si_setcore(sih, cur_coreid, 0);
9441 }
9442
9443 return BCME_OK;
9444 }
9445
9446 #if defined(BCMPCIE) && defined(DHD_LOG_DUMP)
9447 void
9448 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
9449 uint8 *ext_trap_data, void *event_decode_data)
9450 {
9451 hnd_ext_trap_hdr_t *hdr = NULL;
9452 bcm_tlv_t *tlv;
9453 eventlog_trapdata_info_t *etd_evtlog = NULL;
9454 eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
9455 uint arr_size = 0;
9456 int i = 0;
9457 int err = 0;
9458 uint32 seqnum = 0;
9459
9460 if (!ext_trap_data || !event_decode_data || !dhd)
9461 return;
9462
9463 if (!dhd->concise_dbg_buf)
9464 return;
9465
9466 /* First word is original trap_data, skip */
9467 ext_trap_data += sizeof(uint32);
9468
9469 hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
9470 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
9471 if (tlv) {
9472 uint32 baseaddr = 0;
9473 uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
9474
9475 etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
9476 DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
9477 "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
9478 (etd_evtlog->num_elements),
9479 ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
9480 arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
9481 if (!arr_size) {
9482 DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
9483 return;
9484 }
9485 evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
9486 if (!evtlog_buf_arr) {
9487 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
9488 return;
9489 }
9490
9491 /* boundary check */
9492 baseaddr = etd_evtlog->log_arr_addr;
9493 if ((baseaddr < dhd->bus->dongle_ram_base) ||
9494 ((baseaddr + arr_size) > endaddr)) {
9495 DHD_ERROR(("%s: Error reading invalid address\n",
9496 __FUNCTION__));
9497 goto err;
9498 }
9499
9500 /* read the eventlog_trap_buf_info_t array from dongle memory */
9501 err = dhdpcie_bus_membytes(dhd->bus, FALSE,
9502 (ulong)(etd_evtlog->log_arr_addr),
9503 (uint8 *)evtlog_buf_arr, arr_size);
9504 if (err != BCME_OK) {
9505 DHD_ERROR(("%s: Error reading event log array from dongle !\n",
9506 __FUNCTION__));
9507 goto err;
9508 }
9509 /* ntoh is required only for seq_num, because in the original
9510 * case of event logs from info ring, it is sent from dongle in that way
9511 * so for ETD also dongle follows same convention
9512 */
9513 seqnum = ntoh32(etd_evtlog->seq_num);
9514 memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
9515 for (i = 0; i < (etd_evtlog->num_elements); ++i) {
9516 /* boundary check */
9517 baseaddr = evtlog_buf_arr[i].buf_addr;
9518 if ((baseaddr < dhd->bus->dongle_ram_base) ||
9519 ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
9520 DHD_ERROR(("%s: Error reading invalid address\n",
9521 __FUNCTION__));
9522 goto err;
9523 }
9524 /* read each individual event log buf from dongle memory */
9525 err = dhdpcie_bus_membytes(dhd->bus, FALSE,
9526 ((ulong)evtlog_buf_arr[i].buf_addr),
9527 dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
9528 if (err != BCME_OK) {
9529 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
9530 __FUNCTION__));
9531 goto err;
9532 }
9533 dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
9534 event_decode_data, (evtlog_buf_arr[i].len),
9535 FALSE, hton32(seqnum));
9536 ++seqnum;
9537 }
9538 err:
9539 MFREE(dhd->osh, evtlog_buf_arr, arr_size);
9540 } else {
9541 DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
9542 }
9543 }
9544 #endif /* BCMPCIE && DHD_LOG_DUMP */
9545
9546 static int
9547 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
9548 {
9549 uint addr;
9550 uint val;
9551
9552 DHD_ERROR(("%s\n", __FUNCTION__));
9553
9554 /* conditionally clear bits [11:8] of PowerCtrl */
9555 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9556 dhd_sbreg_op(dhd, addr, &val, TRUE);
9557 if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
9558 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9559 val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
9560 dhd_sbreg_op(dhd, addr, &val, FALSE);
9561 }
9562 return BCME_OK;
9563 }
9564
9565 static int
9566 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
9567 {
9568 uint addr;
9569 uint val;
9570
9571 DHD_ERROR(("%s\n", __FUNCTION__));
9572
9573 /* conditionally clear bits [11:8] of PowerCtrl */
9574 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9575 dhd_sbreg_op(dhd, addr, &val, TRUE);
9576 if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
9577 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9578 val = 0;
9579 dhd_sbreg_op(dhd, addr, &val, FALSE);
9580 }
9581 return BCME_OK;
9582 }
9583
9584 static int
9585 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
9586 {
9587 uint addr;
9588 uint val;
9589
9590 DHD_ERROR(("%s\n", __FUNCTION__));
9591
9592 /* clear chipcommon intmask */
9593 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
9594 val = 0x0;
9595 dhd_sbreg_op(dhd, addr, &val, FALSE);
9596
9597 /* clear PMUIntMask0 */
9598 addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
9599 val = 0x0;
9600 dhd_sbreg_op(dhd, addr, &val, FALSE);
9601
9602 /* clear PMUIntMask1 */
9603 addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
9604 val = 0x0;
9605 dhd_sbreg_op(dhd, addr, &val, FALSE);
9606
9607 /* clear res_req_timer */
9608 addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
9609 val = 0x0;
9610 dhd_sbreg_op(dhd, addr, &val, FALSE);
9611
9612 /* clear macresreqtimer */
9613 addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
9614 val = 0x0;
9615 dhd_sbreg_op(dhd, addr, &val, FALSE);
9616
9617 /* clear macresreqtimer1 */
9618 addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
9619 val = 0x0;
9620 dhd_sbreg_op(dhd, addr, &val, FALSE);
9621
9622 /* clear VasipClkEn */
9623 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
9624 addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
9625 val = 0x0;
9626 dhd_sbreg_op(dhd, addr, &val, FALSE);
9627 }
9628
9629 return BCME_OK;
9630 }
9631
9632 static int
9633 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
9634 {
9635 int i;
9636 uint addr;
9637 uint val = 0;
9638
9639 DHD_ERROR(("%s\n", __FUNCTION__));
9640
9641 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9642 /* Check if bit 0 of resetctrl is cleared */
9643 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
9644 if (!addr) {
9645 DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
9646 __FUNCTION__, i));
9647 /* ignore invalid address */
9648 dhd->sssr_d11_outofreset[i] = FALSE;
9649 continue;
9650 }
9651 dhd_sbreg_op(dhd, addr, &val, TRUE);
9652 if (!(val & 1)) {
9653 dhd->sssr_d11_outofreset[i] = TRUE;
9654 } else {
9655 dhd->sssr_d11_outofreset[i] = FALSE;
9656 }
9657 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
9658 __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
9659 }
9660 return BCME_OK;
9661 }
9662
9663 static int
9664 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
9665 {
9666 int i;
9667 uint addr;
9668 uint val = 0;
9669
9670 DHD_ERROR(("%s\n", __FUNCTION__));
9671
9672 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9673 if (dhd->sssr_d11_outofreset[i]) {
9674 /* clear request clk only if itopoobb is non zero */
9675 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
9676 dhd_sbreg_op(dhd, addr, &val, TRUE);
9677 if (val != 0) {
9678 /* clear clockcontrolstatus */
9679 addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
9680 val =
9681 dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
9682 dhd_sbreg_op(dhd, addr, &val, FALSE);
9683 }
9684 }
9685 }
9686 return BCME_OK;
9687 }
9688
9689 static int
9690 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
9691 {
9692 uint addr;
9693 uint val = 0;
9694
9695 DHD_ERROR(("%s\n", __FUNCTION__));
9696
9697 /* Check if bit 0 of resetctrl is cleared */
9698 addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
9699 dhd_sbreg_op(dhd, addr, &val, TRUE);
9700 if (!(val & 1)) {
9701 /* clear request clk only if itopoobb is non zero */
9702 addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
9703 dhd_sbreg_op(dhd, addr, &val, TRUE);
9704 if (val != 0) {
9705 /* clear clockcontrolstatus */
9706 addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
9707 val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
9708 dhd_sbreg_op(dhd, addr, &val, FALSE);
9709 }
9710 }
9711 return BCME_OK;
9712 }
9713
9714 static int
9715 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
9716 {
9717 uint addr;
9718 uint val = 0;
9719
9720 DHD_ERROR(("%s\n", __FUNCTION__));
9721
9722 /* clear request clk only if itopoobb is non zero */
9723 addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
9724 dhd_sbreg_op(dhd, addr, &val, TRUE);
9725 if (val) {
9726 /* clear clockcontrolstatus */
9727 addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
9728 val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
9729 dhd_sbreg_op(dhd, addr, &val, FALSE);
9730 }
9731 return BCME_OK;
9732 }
9733
9734 static int
9735 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
9736 {
9737 uint addr;
9738 uint val = 0;
9739
9740 DHD_ERROR(("%s\n", __FUNCTION__));
9741
9742 addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
9743 val = LTR_ACTIVE;
9744 dhd_sbreg_op(dhd, addr, &val, FALSE);
9745
9746 val = LTR_SLEEP;
9747 dhd_sbreg_op(dhd, addr, &val, FALSE);
9748
9749 return BCME_OK;
9750 }
9751
9752 static int
9753 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
9754 {
9755 DHD_ERROR(("%s\n", __FUNCTION__));
9756
9757 dhdpcie_arm_clear_clk_req(dhd);
9758
9759 dhdpcie_d11_clear_clk_req(dhd);
9760
9761 dhdpcie_pcie_clear_clk_req(dhd);
9762
9763 return BCME_OK;
9764 }
9765
9766 static int
9767 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
9768 {
9769 int i;
9770 uint addr;
9771 uint val = 0;
9772
9773 DHD_ERROR(("%s\n", __FUNCTION__));
9774
9775 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9776 if (dhd->sssr_d11_outofreset[i]) {
9777 /* disable core by setting bit 0 */
9778 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
9779 val = 1;
9780 dhd_sbreg_op(dhd, addr, &val, FALSE);
9781 OSL_DELAY(6000);
9782
9783 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
9784 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
9785 dhd_sbreg_op(dhd, addr, &val, FALSE);
9786
9787 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
9788 dhd_sbreg_op(dhd, addr, &val, FALSE);
9789
9790 /* enable core by clearing bit 0 */
9791 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
9792 val = 0;
9793 dhd_sbreg_op(dhd, addr, &val, FALSE);
9794
9795 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
9796 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
9797 dhd_sbreg_op(dhd, addr, &val, FALSE);
9798
9799 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
9800 dhd_sbreg_op(dhd, addr, &val, FALSE);
9801
9802 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
9803 dhd_sbreg_op(dhd, addr, &val, FALSE);
9804 }
9805 }
9806 return BCME_OK;
9807 }
9808
9809 static int
9810 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
9811 {
9812 int i;
9813
9814 DHD_ERROR(("%s\n", __FUNCTION__));
9815
9816 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9817 if (dhd->sssr_d11_outofreset[i]) {
9818 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
9819 dhd->sssr_reg_info.mac_regs[i].sr_size,
9820 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
9821 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
9822 }
9823 }
9824
9825 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
9826 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
9827 dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
9828 dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
9829 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
9830 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
9831 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
9832 dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
9833 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
9834 }
9835
9836 return BCME_OK;
9837 }
9838
9839 static int
9840 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
9841 {
9842 int i;
9843
9844 DHD_ERROR(("%s\n", __FUNCTION__));
9845
9846 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9847 if (dhd->sssr_d11_outofreset[i]) {
9848 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
9849 dhd->sssr_reg_info.mac_regs[i].sr_size,
9850 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
9851 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
9852 }
9853 }
9854
9855 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
9856 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
9857 dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
9858 dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
9859 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
9860 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
9861 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
9862 dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
9863 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
9864 }
9865
9866 return BCME_OK;
9867 }
9868
9869 static int
9870 dhdpcie_sssr_dump(dhd_pub_t *dhd)
9871 {
9872 if (!dhd->sssr_inited) {
9873 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
9874 return BCME_ERROR;
9875 }
9876
9877 if (dhd->bus->is_linkdown) {
9878 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
9879 return BCME_ERROR;
9880 }
9881
9882 dhdpcie_d11_check_outofreset(dhd);
9883
9884 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
9885 if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
9886 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
9887 return BCME_ERROR;
9888 }
9889
9890 dhdpcie_clear_intmask_and_timer(dhd);
9891 dhdpcie_suspend_chipcommon_powerctrl(dhd);
9892 dhdpcie_clear_clk_req(dhd);
9893 dhdpcie_pcie_send_ltrsleep(dhd);
9894
9895 /* Wait for some time before Restore */
9896 OSL_DELAY(6000);
9897
9898 dhdpcie_resume_chipcommon_powerctrl(dhd);
9899 dhdpcie_bring_d11_outofreset(dhd);
9900
9901 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
9902 if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
9903 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
9904 return BCME_ERROR;
9905 }
9906
9907 dhd_schedule_sssr_dump(dhd);
9908
9909 return BCME_OK;
9910 }
9911
9912 int
9913 dhd_bus_sssr_dump(dhd_pub_t *dhd)
9914 {
9915 return dhdpcie_sssr_dump(dhd);
9916 }
9917 #endif /* DHD_SSSR_DUMP */
9918
9919 #ifdef DHD_WAKE_STATUS
9920 wake_counts_t*
9921 dhd_bus_get_wakecount(dhd_pub_t *dhd)
9922 {
9923 return &dhd->bus->wake_counts;
9924 }
9925 int
9926 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
9927 {
9928 return bcmpcie_set_get_wake(dhd->bus, 0);
9929 }
9930 #endif /* DHD_WAKE_STATUS */
9931
9932 #define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP)
9933 #define OTP_USER_AREA_OFFSET 0x80
9934 #define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET)
9935 #define OTP_VERSION_TUPLE_ID 0x15
9936 #define OTP_VENDOR_TUPLE_ID 0x80
9937 #define OTP_CIS_REGION_END_TUPLE_ID 0XFF
9938 #define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE)
9939 #define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG)
9940 #define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4)
9941 #define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL)
9942 #define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C)
9943 #define PMU_OTP_PWR_ON_MASK 0xC47
9944
9945 int
9946 dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char* program, char *nv_path)
9947 {
9948 uint32 val = 0;
9949 uint16 chip_id = 0;
9950 uint8 otp_data[2];
9951 char stepping[3];
9952 char module_name[5];
9953 char module_vendor = 0;
9954 char module_rev[4];
9955 uint8 tuple_id = 0;
9956 uint8 tuple_len = 0;
9957 uint32 cur_offset = 0;
9958 uint32 version_tuple_offset = 0;
9959 char module_info[64];
9960 char progname[32];
9961 bool srom_present = 0, otp_present = 0;
9962 uint32 sprom_ctrl = 0;
9963 uint32 otp_ctrl = 0, minres_mask = 0;
9964 int i = 0, j = 0, status = BCME_ERROR;
9965
9966 if (!nv_path || !bus) {
9967 return BCME_ERROR;
9968 }
9969
9970 /* read chip id first */
9971 if (si_backplane_access(bus->sih, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE) != BCME_OK) {
9972 DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__));
9973 }
9974 else {
9975 chip_id = val & 0xffff;
9976 }
9977
9978 /* read SpromCtrl register */
9979 si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, TRUE);
9980 val = sprom_ctrl;
9981
9982 /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set
9983 * and chip is 4355 or 4364
9984 */
9985 if ((val & 0x20) && (chip_id == 0x4355 || chip_id == 0x4364)) {
9986 otp_present = 1;
9987
9988 /* Check if the 4th bit (sprom_present) in CC Status REG is set */
9989 si_backplane_access(bus->sih, CHIP_COMMON_STATUS_REG_ADDR, 4, &val, TRUE);
9990 if (val & 0x10) {
9991 srom_present = 1;
9992 }
9993
9994 /* OTP power up sequence */
9995 /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */
9996 si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, TRUE);
9997 val = 0x1A0000;
9998 si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &val, FALSE);
9999
10000 /* 2. enable OTP power through min res mask register in PMU */
10001 si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4, &minres_mask, TRUE);
10002 val = minres_mask | PMU_OTP_PWR_ON_MASK;
10003 si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4, &val, FALSE);
10004
10005 /* 3. if srom is present, need to set OtpSelect 4th bit
10006 * in SpromCtrl register to read otp
10007 */
10008 if (srom_present) {
10009
10010 val = sprom_ctrl | 0x10;
10011 si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &val, FALSE);
10012
10013 }
10014 /* Wait for PMU to power up. */
10015 OSL_DELAY(500);
10016 si_backplane_access(bus->sih, PMU_RES_STATE_REG_ADDR, 4, &val, TRUE);
10017 DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__, val));
10018
10019 si_backplane_access(bus->sih, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE);
10020 DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__, val));
10021
10022 si_backplane_access(bus->sih, OTP_ADDRESS, 2, &val, TRUE);
10023 DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__, val));
10024
10025 cur_offset = OTP_USER_AREA_ADDR + 0x40;
10026 /* read required data from otp to construct FW string name
10027 * data like - chip info, module info. This is present in the
10028 * form of a Vendor CIS Tuple whose format is provided by Olympic.
10029 * The data is in the form of ASCII character strings.
10030 * The Vendor tuple along with other CIS tuples are present
10031 * in the OTP user area. A CIS tuple is a TLV format.
10032 * (T = 1-byte, L = 1-byte, V = n-bytes)
10033 */
10034
10035 /* Find the version tuple */
10036 while (tuple_id != OTP_CIS_REGION_END_TUPLE_ID) {
10037 si_backplane_access(bus->sih, cur_offset,
10038 2, (uint *)otp_data, TRUE);
10039
10040 tuple_id = otp_data[0];
10041 tuple_len = otp_data[1];
10042 if (tuple_id == OTP_VERSION_TUPLE_ID) {
10043 version_tuple_offset = cur_offset;
10044 break;
10045 }
10046 /* if its NULL tuple, skip */
10047 if (tuple_id == 0)
10048 cur_offset += 1;
10049 else
10050 cur_offset += tuple_len + 2;
10051 }
10052
10053 /* skip the major, minor ver. numbers, manufacturer and product names */
10054 cur_offset = version_tuple_offset + 6;
10055
10056 /* read the chip info */
10057 si_backplane_access(bus->sih, cur_offset,
10058 2, (uint *)otp_data, TRUE);
10059 if (otp_data[0] == 's' && otp_data[1] == '=') {
10060 /* read the stepping */
10061 cur_offset += 2;
10062 stepping[2] = 0;
10063 si_backplane_access(bus->sih, cur_offset,
10064 2, (uint *)stepping, TRUE);
10065 /* read module info */
10066 memset(module_info, 0, 64);
10067 cur_offset += 2;
10068 si_backplane_access(bus->sih, cur_offset,
10069 2, (uint *)otp_data, TRUE);
10070 while (otp_data[0] != OTP_CIS_REGION_END_TUPLE_ID &&
10071 otp_data[1] != OTP_CIS_REGION_END_TUPLE_ID) {
10072 memcpy(&module_info[i], otp_data, 2);
10073 i += 2;
10074 cur_offset += 2;
10075 si_backplane_access(bus->sih, cur_offset,
10076 2, (uint *)otp_data, TRUE);
10077 }
10078 /* replace any null characters found at the beginning
10079 * and middle of the string
10080 */
10081 for (j = 0; j < i; ++j) {
10082 if (module_info[j] == 0)
10083 module_info[j] = ' ';
10084 }
10085 DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n",
10086 stepping[0], stepping[1], module_info));
10087 /* extract the module name, revision and vendor
10088 * information from the module info string
10089 */
10090 for (i = 0; module_info[i]; i++) {
10091 if (module_info[i] == 'M' && module_info[i + 1] == '=') {
10092 memcpy(module_name, &module_info[i + 2], 4);
10093 module_name[4] = 0;
10094 i += 5;
10095 }
10096 else if (module_info[i] == 'm' && module_info[i + 1] == '=') {
10097 memcpy(module_rev, &module_info[i + 2], 3);
10098 module_rev[3] = 0;
10099 i += 4;
10100 }
10101 else if (module_info[i] == 'V' && module_info[i + 1] == '=') {
10102 module_vendor = module_info[i + 2];
10103 i += 2;
10104 }
10105 }
10106
10107 /* construct the complete file path to nvram as per
10108 * olympic conventions
10109 */
10110 strncpy(progname, program, sizeof(progname));
10111 sprintf(nv_path, "P-%s_M-%s_V-%c__m-%s.txt", progname, module_name,
10112 module_vendor, module_rev);
10113 DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__, nv_path));
10114 status = BCME_OK;
10115 }
10116
10117 /* restore back the registers to their previous values */
10118 if (srom_present) {
10119 si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, FALSE);
10120 }
10121
10122 if (otp_present) {
10123 si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4,
10124 &minres_mask, FALSE);
10125 si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, FALSE);
10126 }
10127
10128 }
10129 return status;
10130 }
10131
10132 /* Writes random number(s) to the TCM. FW upon initialization reads this register
10133 * to fetch the random number, and uses it to randomize heap address space layout.
10134 */
10135 static int
10136 dhdpcie_wrt_rnd(struct dhd_bus *bus)
10137 {
10138 bcm_rand_metadata_t rnd_data;
10139 uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
10140 uint32 count = BCM_ENTROPY_HOST_NBYTES;
10141 int ret = 0;
10142 uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
10143 ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
10144
10145 memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
10146 rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
10147 rnd_data.count = htol32(count);
10148 /* write the metadata about random number */
10149 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
10150 /* scale back by number of random number counts */
10151 addr -= count;
10152
10153 /* Now get & write the random number(s) */
10154 ret = dhd_get_random_bytes(rand_buf, count);
10155 if (ret != BCME_OK) {
10156 return ret;
10157 }
10158 dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
10159
10160 return BCME_OK;
10161 }
10162
10163 #ifdef D2H_MINIDUMP
10164 bool
10165 dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp)
10166 {
10167 return dhdp->bus->d2h_minidump;
10168 }
10169 #endif /* D2H_MINIDUMP */
10170
10171 void
10172 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
10173 {
10174 struct dhd_bus *bus = dhd->bus;
10175 uint64 current_time;
10176
10177 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
10178 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
10179 bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
10180 DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
10181 bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
10182 #ifdef BCMPCIE_OOB_HOST_WAKE
10183 DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
10184 bus->oob_intr_count, bus->oob_intr_enable_count,
10185 bus->oob_intr_disable_count));
10186 DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
10187 dhdpcie_get_oob_irq_num(bus),
10188 GET_SEC_USEC(bus->last_oob_irq_time)));
10189 #endif /* BCMPCIE_OOB_HOST_WAKE */
10190 DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
10191 bus->dpc_return_busdown_count, bus->non_ours_irq_count));
10192
10193 current_time = OSL_LOCALTIME_NS();
10194 DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
10195 GET_SEC_USEC(current_time)));
10196 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
10197 " isr_exit_time="SEC_USEC_FMT"\n",
10198 GET_SEC_USEC(bus->isr_entry_time),
10199 GET_SEC_USEC(bus->isr_exit_time)));
10200 DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
10201 " last_non_ours_irq_time="SEC_USEC_FMT"\n",
10202 GET_SEC_USEC(bus->dpc_sched_time),
10203 GET_SEC_USEC(bus->last_non_ours_irq_time)));
10204 DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
10205 " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
10206 GET_SEC_USEC(bus->dpc_entry_time),
10207 GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
10208 DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
10209 " last_process_txcpl_time="SEC_USEC_FMT"\n",
10210 GET_SEC_USEC(bus->last_process_flowring_time),
10211 GET_SEC_USEC(bus->last_process_txcpl_time)));
10212 DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
10213 " last_process_infocpl_time="SEC_USEC_FMT"\n",
10214 GET_SEC_USEC(bus->last_process_rxcpl_time),
10215 GET_SEC_USEC(bus->last_process_infocpl_time)));
10216 DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
10217 " resched_dpc_time="SEC_USEC_FMT"\n",
10218 GET_SEC_USEC(bus->dpc_exit_time),
10219 GET_SEC_USEC(bus->resched_dpc_time)));
10220 DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
10221 GET_SEC_USEC(bus->last_d3_inform_time)));
10222
10223 DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
10224 " last_suspend_end_time="SEC_USEC_FMT"\n",
10225 GET_SEC_USEC(bus->last_suspend_start_time),
10226 GET_SEC_USEC(bus->last_suspend_end_time)));
10227 DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
10228 " last_resume_end_time="SEC_USEC_FMT"\n",
10229 GET_SEC_USEC(bus->last_resume_start_time),
10230 GET_SEC_USEC(bus->last_resume_end_time)));
10231 }
10232
10233 void
10234 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
10235 {
10236 dhd_pcie_intr_count_dump(dhd);
10237 }
10238
10239 int
10240 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
10241 {
10242 if (dhd->bus->is_linkdown) {
10243 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
10244 "due to PCIe link down ------- \r\n"));
10245 return 0;
10246 }
10247
10248 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
10249
10250 //HostToDev
10251 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10252 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
10253 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
10254 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10255 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
10256 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
10257 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10258 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
10259 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
10260
10261 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10262 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
10263 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
10264 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10265 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
10266 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
10267 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10268 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
10269 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
10270
10271 //DevToHost
10272 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10273 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
10274 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
10275 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10276 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
10277 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
10278 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10279 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
10280 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
10281
10282 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10283 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
10284 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
10285 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10286 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
10287 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
10288 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10289 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
10290 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
10291
10292 return 0;
10293 }
10294
10295 bool
10296 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
10297 {
10298 uint32 intstatus = 0;
10299 uint32 intmask = 0;
10300 uint32 d2h_db0 = 0;
10301 uint32 d2h_mb_data = 0;
10302
10303 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
10304 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10305 dhd->bus->pcie_mailbox_int, 0, 0);
10306 if (intstatus == (uint32)-1) {
10307 DHD_ERROR(("intstatus=0x%x \n", intstatus));
10308 return FALSE;
10309 }
10310
10311 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10312 dhd->bus->pcie_mailbox_mask, 0, 0);
10313 if (intmask == (uint32) -1) {
10314 DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
10315 return FALSE;
10316 }
10317
10318 d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10319 PCID2H_MailBox, 0, 0);
10320 if (d2h_db0 == (uint32)-1) {
10321 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10322 intstatus, intmask, d2h_db0));
10323 return FALSE;
10324 }
10325
10326 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10327 intstatus, intmask, d2h_db0));
10328 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
10329 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
10330 dhd->bus->def_intmask));
10331
10332 return TRUE;
10333 }
10334
10335 void
10336 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
10337 {
10338 DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
10339 DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
10340 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10341 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
10342 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10343 DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
10344 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10345 PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
10346 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10347 PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
10348 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10349 PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
10350 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10351 PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
10352 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10353 }
10354
10355 int
10356 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
10357 {
10358 int host_irq_disabled;
10359
10360 DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
10361 host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
10362 DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
10363 dhd_print_tasklet_status(dhd);
10364 dhd_pcie_intr_count_dump(dhd);
10365
10366 DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
10367 dhdpcie_dump_resource(dhd->bus);
10368
10369 dhd_pcie_dump_rc_conf_space_cap(dhd);
10370
10371 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
10372 dhd_debug_get_rc_linkcap(dhd->bus)));
10373
10374 #ifdef CUSTOMER_HW4_DEBUG
10375 if (dhd->bus->is_linkdown) {
10376 DHD_ERROR(("Skip dumping the PCIe registers due to PCIe Link down\n"));
10377 return 0;
10378 }
10379 #endif /* CUSTOMER_HW4_DEBUG */
10380
10381 DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
10382 DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
10383 "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
10384 PCIECFGREG_STATUS_CMD,
10385 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
10386 PCIECFGREG_BASEADDR0,
10387 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
10388 PCIECFGREG_BASEADDR1,
10389 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
10390 PCIE_CFG_PMCSR,
10391 dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
10392 DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
10393 "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
10394 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
10395 sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
10396 dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
10397 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
10398 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
10399 sizeof(uint32))));
10400 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10401 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
10402 dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10403 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
10404 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
10405 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
10406 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
10407 PCI_TLP_HDR_LOG2,
10408 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
10409 PCI_TLP_HDR_LOG3,
10410 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
10411 PCI_TLP_HDR_LOG4,
10412 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
10413 if (dhd->bus->sih->buscorerev >= 24) {
10414 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
10415 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
10416 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
10417 sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
10418 dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
10419 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
10420 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
10421 sizeof(uint32))));
10422
10423 DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
10424 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10425 PCIDARClkCtl(dhd->bus->sih->buscorerev),
10426 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10427 PCIDARClkCtl(dhd->bus->sih->buscorerev), 0, 0),
10428 PCIDARPwrCtl(dhd->bus->sih->buscorerev),
10429 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10430 PCIDARPwrCtl(dhd->bus->sih->buscorerev), 0, 0),
10431 PCIDARH2D_DB0(dhd->bus->sih->buscorerev),
10432 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10433 PCIDARH2D_DB0(dhd->bus->sih->buscorerev), 0, 0)));
10434 }
10435 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10436 if (!dhd_pcie_dump_int_regs(dhd)) {
10437 DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid int regs\n"));
10438 return 0;
10439 }
10440
10441 DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
10442
10443 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10444 if (dhd->bus->sih->buscorerev >= 24) {
10445 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n",
10446 PCIDARErrlog(dhd->bus->sih->buscorerev),
10447 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10448 PCIDARErrlog(dhd->bus->sih->buscorerev), 0, 0),
10449 PCIDARErrlog_Addr(dhd->bus->sih->buscorerev),
10450 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10451 PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), 0, 0)));
10452 DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n",
10453 PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev),
10454 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10455 PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
10456 PCIDARMailboxint(dhd->bus->sih->buscorerev),
10457 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10458 PCIDARMailboxint(dhd->bus->sih->buscorerev), 0, 0)));
10459 }
10460 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10461
10462 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
10463 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
10464 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
10465 PCIECFGREG_PHY_DBG_CLKREQ1,
10466 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
10467 PCIECFGREG_PHY_DBG_CLKREQ2,
10468 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
10469 PCIECFGREG_PHY_DBG_CLKREQ3,
10470 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
10471
10472 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10473 if (dhd->bus->sih->buscorerev >= 24) {
10474 DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
10475 "ltssm_hist_2(0x%x)=0x%x "
10476 "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
10477 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
10478 PCIECFGREG_PHY_LTSSM_HIST_1,
10479 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
10480 PCIECFGREG_PHY_LTSSM_HIST_2,
10481 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
10482 PCIECFGREG_PHY_LTSSM_HIST_3,
10483 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
10484 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10485 PCIE_CLK_CTRL,
10486 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CLK_CTRL, 0, 0),
10487 PCIE_PWR_CTRL,
10488 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_PWR_CTRL, 0, 0),
10489 PCIH2D_MailBox,
10490 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10491 PCIH2D_MailBox, 0, 0)));
10492 DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
10493 PCIECFGREG_TREFUP,
10494 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
10495 PCIECFGREG_TREFUP_EXT,
10496 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
10497 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
10498 "Function_Intstatus(0x%x)=0x%x "
10499 "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
10500 "Power_Intmask(0x%x)=0x%x\n",
10501 PCIE_CORE_REG_ERRLOG,
10502 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10503 PCIE_CORE_REG_ERRLOG, 0, 0),
10504 PCIE_CORE_REG_ERR_ADDR,
10505 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10506 PCIE_CORE_REG_ERR_ADDR, 0, 0),
10507 PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
10508 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10509 PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
10510 PCIFunctionIntmask(dhd->bus->sih->buscorerev),
10511 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10512 PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
10513 PCIPowerIntstatus(dhd->bus->sih->buscorerev),
10514 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10515 PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
10516 PCIPowerIntmask(dhd->bus->sih->buscorerev),
10517 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10518 PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
10519 DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
10520 "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
10521 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
10522 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10523 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
10524 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
10525 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10526 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
10527 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
10528 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10529 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
10530 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
10531 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10532 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
10533 DHD_ERROR(("err_code(0x%x)=0x%x\n",
10534 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
10535 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10536 OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
10537 }
10538 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10539
10540 dhd_pcie_dma_info_dump(dhd);
10541
10542 return 0;
10543 }
10544
10545 /*
10546 * TLV ID for Host whitelist Region.
10547 */
10548 #define BCM_NVRAM_WHTLST_SIGNATURE 0xFEED4B1Du
10549
10550 /*
10551 * For the time being only one whitelist region supported and 64 Bit high and
10552 * 64 bit low address should be written.
10553 */
10554 #define BCM_HOST_WHITELIST_NBYTES 16u
10555
10556 /* Writes host whitelist region to the TCM. FW upon initialization reads this register
10557 * to fetch whitelist regions, and validate DMA descriptors before programming
10558 * against these whitelist regions.
10559 */
10560 static int
10561 dhdpcie_wrt_host_whitelist_region(struct dhd_bus *bus)
10562 {
10563 int ret;
10564 bcm_host_whitelist_metadata_t whitelist_data;
10565 uint8 whtlst_buff[BCM_HOST_WHITELIST_NBYTES];
10566 bcm_rand_metadata_t rnd_data;
10567 uint32 addr = bus->dongle_ram_base + (uint32)((bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
10568 ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data) +
10569 BCM_ENTROPY_HOST_NBYTES + sizeof(whitelist_data)));
10570 whitelist_data.signature = htol32(BCM_NVRAM_WHTLST_SIGNATURE);
10571 whitelist_data.count = htol32(BCM_HOST_WHITELIST_NBYTES);
10572 ret = dhd_get_host_whitelist_region((void*)whtlst_buff,
10573 whitelist_data.count);
10574 if (ret == BCME_RANGE) {
10575 DHD_INFO(("%s: No Whitelist region programmed !\n",
10576 __FUNCTION__));
10577 return BCME_OK;
10578 }
10579 if (ret == BCME_OK) {
10580 /* write the metadata about whitelist region */
10581 ret = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&whitelist_data,
10582 sizeof(whitelist_data));
10583 if (ret == BCME_OK) {
10584 /* scale back by number of whitelist region counts */
10585 addr -= BCM_HOST_WHITELIST_NBYTES;
10586
10587 /* Now write whitelist region(s) */
10588 ret = dhdpcie_bus_membytes(bus, TRUE, addr, whtlst_buff,
10589 BCM_HOST_WHITELIST_NBYTES);
10590 }
10591 }
10592 return ret;
10593 }
10594
10595 bool
10596 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
10597 {
10598 return bus->force_bt_quiesce;
10599 }