source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / dhd_pcie.c
1 /*
2 * DHD Bus Module for PCIE
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: dhd_pcie.c 802450 2019-02-01 14:05:56Z $
28 */
29
30 /* include files */
31 #include <typedefs.h>
32 #include <bcmutils.h>
33 #include <bcmdevs.h>
34 #include <siutils.h>
35 #include <hndsoc.h>
36 #include <hndpmu.h>
37 #include <etd.h>
38 #include <hnd_debug.h>
39 #include <sbchipc.h>
40 #include <hnd_armtrap.h>
41 #if defined(DHD_DEBUG)
42 #include <hnd_cons.h>
43 #endif /* defined(DHD_DEBUG) */
44 #include <dngl_stats.h>
45 #include <pcie_core.h>
46 #include <dhd.h>
47 #include <dhd_bus.h>
48 #include <dhd_flowring.h>
49 #include <dhd_proto.h>
50 #include <dhd_dbg.h>
51 #include <dhd_debug.h>
52 #include <dhd_daemon.h>
53 #include <dhdioctl.h>
54 #include <sdiovar.h>
55 #include <bcmmsgbuf.h>
56 #include <pcicfg.h>
57 #include <dhd_pcie.h>
58 #include <bcmpcie.h>
59 #include <bcmendian.h>
60 #ifdef DHDTCPACK_SUPPRESS
61 #include <dhd_ip.h>
62 #endif /* DHDTCPACK_SUPPRESS */
63 #include <bcmevent.h>
64
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #include <linux/pm_runtime.h>
67 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
68
69 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
70 #include <debugger.h>
71 #endif /* DEBUGGER || DHD_DSCOPE */
72
73 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
74
75 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
76 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
77
78 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
79 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
80 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
81
82 /* CTO Prevention Recovery */
83 #ifdef BCMQT_HW
84 #define CTO_TO_CLEAR_WAIT_MS 10000
85 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
86 #else
87 #define CTO_TO_CLEAR_WAIT_MS 1000
88 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
89 #endif // endif
90
91 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
92 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
93 (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
94
95 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
96 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
97 (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
98
99 /* Fetch address of a member in the ring_mem structure in dongle memory */
100 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
101 (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
102
103 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
104 extern unsigned int system_rev;
105 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
106
107 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
108 uint dma_ring_indices = 0;
109 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
110 bool h2d_phase = 0;
111 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
112 * defined in dhd_linux.c
113 */
114 bool force_trap_bad_h2d_phase = 0;
115
116 int dhd_dongle_memsize;
117 int dhd_dongle_ramsize;
118 struct dhd_bus *g_dhd_bus = NULL;
119 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
120 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
121 #if defined(DHD_FW_COREDUMP)
122 static int dhdpcie_mem_dump(dhd_bus_t *bus);
123 #endif /* DHD_FW_COREDUMP */
124
125 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
126 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
127 const char *name, void *params,
128 int plen, void *arg, int len, int val_size);
129 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
130 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
131 uint32 len, uint32 srcdelay, uint32 destdelay,
132 uint32 d11_lpbk, uint32 core_num, uint32 wait);
133 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
134 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
135 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
136 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
137 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
138 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
139 static int dhdpcie_readshared(dhd_bus_t *bus);
140 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
141 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
142 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
143 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
144 bool dongle_isolation, bool reset_flag);
145 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
146 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
147 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
148 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
149 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
150 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
151 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
152 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
153 #ifdef DHD_SUPPORT_64BIT
154 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
155 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
156 #endif /* DHD_SUPPORT_64BIT */
157 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
158 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
159 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
160 static void dhdpcie_fw_trap(dhd_bus_t *bus);
161 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
162 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
163 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
164 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
165
166 #ifdef IDLE_TX_FLOW_MGMT
167 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
168 static void dhd_bus_idle_scan(dhd_bus_t *bus);
169 #endif /* IDLE_TX_FLOW_MGMT */
170
171 #ifdef EXYNOS_PCIE_DEBUG
172 extern void exynos_pcie_register_dump(int ch_num);
173 #endif /* EXYNOS_PCIE_DEBUG */
174
175 #define PCI_VENDOR_ID_BROADCOM 0x14e4
176
177 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
178 #define MAX_D3_ACK_TIMEOUT 100
179 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
180
181 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
182 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
183 static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
184
185 static int dhdpcie_init_d11status(struct dhd_bus *bus);
186
187 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
188
189 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
190 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
191
192 static int dhdpcie_wrt_host_whitelist_region(struct dhd_bus *bus);
193
194 #ifdef DHD_SSSR_DUMP
195 static int dhdpcie_sssr_dump(dhd_pub_t *dhd);
196 #endif /* DHD_SSSR_DUMP */
197
198 /* IOVar table */
199 enum {
200 IOV_INTR = 1,
201 IOV_MEMSIZE,
202 IOV_SET_DOWNLOAD_STATE,
203 IOV_DEVRESET,
204 IOV_VARS,
205 IOV_MSI_SIM,
206 IOV_PCIE_LPBK,
207 IOV_CC_NVMSHADOW,
208 IOV_RAMSIZE,
209 IOV_RAMSTART,
210 IOV_SLEEP_ALLOWED,
211 IOV_PCIE_DMAXFER,
212 IOV_PCIE_SUSPEND,
213 IOV_DONGLEISOLATION,
214 IOV_LTRSLEEPON_UNLOOAD,
215 IOV_METADATA_DBG,
216 IOV_RX_METADATALEN,
217 IOV_TX_METADATALEN,
218 IOV_TXP_THRESHOLD,
219 IOV_BUZZZ_DUMP,
220 IOV_DUMP_RINGUPD_BLOCK,
221 IOV_DMA_RINGINDICES,
222 IOV_FORCE_FW_TRAP,
223 IOV_DB1_FOR_MB,
224 IOV_FLOW_PRIO_MAP,
225 #ifdef DHD_PCIE_RUNTIMEPM
226 IOV_IDLETIME,
227 #endif /* DHD_PCIE_RUNTIMEPM */
228 IOV_RXBOUND,
229 IOV_TXBOUND,
230 IOV_HANGREPORT,
231 IOV_H2D_MAILBOXDATA,
232 IOV_INFORINGS,
233 IOV_H2D_PHASE,
234 IOV_H2D_ENABLE_TRAP_BADPHASE,
235 IOV_H2D_TXPOST_MAX_ITEM,
236 IOV_TRAPDATA,
237 IOV_TRAPDATA_RAW,
238 IOV_CTO_PREVENTION,
239 IOV_PCIE_WD_RESET,
240 IOV_DUMP_DONGLE,
241 IOV_IDMA_ENABLE,
242 IOV_IFRM_ENABLE,
243 IOV_CLEAR_RING,
244 IOV_DAR_ENABLE,
245 IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
246 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
247 IOV_GDB_SERVER, /**< starts gdb server on given interface */
248 #endif /* DEBUGGER || DHD_DSCOPE */
249 IOV_INB_DW_ENABLE,
250 IOV_CTO_THRESHOLD,
251 #ifdef D2H_MINIDUMP
252 IOV_MINIDUMP_OVERRIDE,
253 #endif /* D2H_MINIDUMP */
254 IOV_PCIE_LAST /**< unused IOVAR */
255 };
256
257 const bcm_iovar_t dhdpcie_iovars[] = {
258 {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
259 {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
260 {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
261 {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
262 {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
263 {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
264 {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
265 {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
266 {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
267 {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
268 {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
269 {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 },
270 {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
271 {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
272 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
273 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
274 {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
275 {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
276 {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
277 {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
278 {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
279 {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
280 {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
281 {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
282 #ifdef DHD_PCIE_RUNTIMEPM
283 {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
284 #endif /* DHD_PCIE_RUNTIMEPM */
285 {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
286 {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
287 {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
288 {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
289 {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
290 {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
291 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
292 IOVT_UINT32, 0 },
293 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
294 {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
295 {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
296 {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
297 {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
298 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
299 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
300 {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
301 {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
302 {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
303 {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
304 {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
305 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
306 {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
307 #endif /* DEBUGGER || DHD_DSCOPE */
308 {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
309 {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
310 #ifdef D2H_MINIDUMP
311 {"minidump_override", IOV_MINIDUMP_OVERRIDE, 0, 0, IOVT_UINT32, 0 },
312 #endif /* D2H_MINIDUMP */
313 {NULL, 0, 0, 0, 0, 0 }
314 };
315
316 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
317
318 #ifndef DHD_RXBOUND
319 #define DHD_RXBOUND 64
320 #endif // endif
321 #ifndef DHD_TXBOUND
322 #define DHD_TXBOUND 64
323 #endif // endif
324
325 #define DHD_INFORING_BOUND 32
326 #define DHD_BTLOGRING_BOUND 32
327
328 uint dhd_rxbound = DHD_RXBOUND;
329 uint dhd_txbound = DHD_TXBOUND;
330
331 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
332 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
333 static struct dhd_gdb_bus_ops_s bus_ops = {
334 .read_u16 = dhdpcie_bus_rtcm16,
335 .read_u32 = dhdpcie_bus_rtcm32,
336 .write_u32 = dhdpcie_bus_wtcm32,
337 };
338 #endif /* DEBUGGER || DHD_DSCOPE */
339
340 bool
341 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
342 {
343 return bus->flr_force_fail;
344 }
345
346 /**
347 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
348 * link with the bus driver, in order to look for or await the device.
349 */
350 int
351 dhd_bus_register(void)
352 {
353 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
354
355 return dhdpcie_bus_register();
356 }
357
358 void
359 dhd_bus_unregister(void)
360 {
361 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
362
363 dhdpcie_bus_unregister();
364 return;
365 }
366
367 /** returns a host virtual address */
368 uint32 *
369 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
370 {
371 return (uint32 *)REG_MAP(addr, size);
372 }
373
374 void
375 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
376 {
377 REG_UNMAP(addr);
378 return;
379 }
380
381 /**
382 * retrun H2D Doorbell registers address
383 * use DAR registers instead of enum register for corerev >= 23 (4347B0)
384 */
385 static INLINE uint
386 dhd_bus_db0_addr_get(struct dhd_bus *bus)
387 {
388 uint addr = PCIH2D_MailBox;
389 uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
390
391 return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
392 }
393
394 static INLINE uint
395 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
396 {
397 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
398 }
399
400 static INLINE uint
401 dhd_bus_db1_addr_get(struct dhd_bus *bus)
402 {
403 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
404 }
405
406 static INLINE uint
407 dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
408 {
409 return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
410 }
411
412 static INLINE void
413 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
414 {
415 uint mask;
416
417 /*
418 * If multiple de-asserts, decrement ref and return
419 * Clear power request when only one pending
420 * so initial request is not removed unexpectedly
421 */
422 if (bus->pwr_req_ref > 1) {
423 bus->pwr_req_ref--;
424 return;
425 }
426
427 ASSERT(bus->pwr_req_ref == 1);
428
429 if (MULTIBP_ENAB(bus->sih)) {
430 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
431 mask = SRPWR_DMN1_ARMBPSD_MASK;
432 } else {
433 mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
434 }
435
436 si_srpwr_request(bus->sih, mask, 0);
437 bus->pwr_req_ref = 0;
438 }
439
440 static INLINE void
441 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
442 {
443 unsigned long flags = 0;
444
445 DHD_GENERAL_LOCK(bus->dhd, flags);
446 _dhd_bus_pcie_pwr_req_clear_cmn(bus);
447 DHD_GENERAL_UNLOCK(bus->dhd, flags);
448 }
449
450 static INLINE void
451 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
452 {
453 _dhd_bus_pcie_pwr_req_clear_cmn(bus);
454 }
455
456 static INLINE void
457 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
458 {
459 uint mask, val;
460
461 /* If multiple request entries, increment reference and return */
462 if (bus->pwr_req_ref > 0) {
463 bus->pwr_req_ref++;
464 return;
465 }
466
467 ASSERT(bus->pwr_req_ref == 0);
468
469 if (MULTIBP_ENAB(bus->sih)) {
470 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
471 mask = SRPWR_DMN1_ARMBPSD_MASK;
472 val = SRPWR_DMN1_ARMBPSD_MASK;
473 } else {
474 mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
475 val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
476 }
477
478 si_srpwr_request(bus->sih, mask, val);
479
480 bus->pwr_req_ref = 1;
481 }
482
483 static INLINE void
484 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
485 {
486 unsigned long flags = 0;
487
488 DHD_GENERAL_LOCK(bus->dhd, flags);
489 _dhd_bus_pcie_pwr_req_cmn(bus);
490 DHD_GENERAL_UNLOCK(bus->dhd, flags);
491 }
492
493 static INLINE void
494 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
495 {
496 uint mask, val;
497
498 mask = SRPWR_DMN_ALL_MASK;
499 val = SRPWR_DMN_ALL_MASK;
500
501 si_srpwr_request(bus->sih, mask, val);
502 }
503
504 static INLINE void
505 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
506 {
507 unsigned long flags = 0;
508
509 DHD_GENERAL_LOCK(bus->dhd, flags);
510 _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
511 DHD_GENERAL_UNLOCK(bus->dhd, flags);
512 }
513
514 static INLINE void
515 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus *bus)
516 {
517 uint mask;
518
519 mask = SRPWR_DMN3_MACMAIN_MASK | SRPWR_DMN2_MACAUX_MASK;
520
521 si_srpwr_request(bus->sih, mask, 0);
522 }
523
524 static INLINE void
525 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
526 {
527 unsigned long flags = 0;
528
529 DHD_GENERAL_LOCK(bus->dhd, flags);
530 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus);
531 DHD_GENERAL_UNLOCK(bus->dhd, flags);
532 }
533
534 static INLINE void
535 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
536 {
537 _dhd_bus_pcie_pwr_req_cmn(bus);
538 }
539
540 bool
541 dhdpcie_chip_support_msi(dhd_bus_t *bus)
542 {
543 DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
544 __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
545 if (bus->sih->buscorerev <= 14 ||
546 si_chipid(bus->sih) == BCM4375_CHIP_ID ||
547 si_chipid(bus->sih) == BCM4361_CHIP_ID ||
548 si_chipid(bus->sih) == BCM4359_CHIP_ID) {
549 return FALSE;
550 } else {
551 return TRUE;
552 }
553 }
554
555 /**
556 * Called once for each hardware (dongle) instance that this DHD manages.
557 *
558 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
559 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
560 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
561 *
562 * 'tcm' is the *host* virtual address at which tcm is mapped.
563 */
564 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
565 volatile char *regs, volatile char *tcm, void *pci_dev)
566 {
567 dhd_bus_t *bus = NULL;
568 int ret = BCME_OK;
569
570 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
571
572 do {
573 if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
574 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
575 ret = BCME_NORESOURCE;
576 break;
577 }
578
579 bus->regs = regs;
580 bus->tcm = tcm;
581 bus->osh = osh;
582 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
583 bus->dev = (struct pci_dev *)pci_dev;
584
585 dll_init(&bus->flowring_active_list);
586 #ifdef IDLE_TX_FLOW_MGMT
587 bus->active_list_last_process_ts = OSL_SYSUPTIME();
588 #endif /* IDLE_TX_FLOW_MGMT */
589
590 /* Attach pcie shared structure */
591 if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
592 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
593 ret = BCME_NORESOURCE;
594 break;
595 }
596
597 /* dhd_common_init(osh); */
598
599 if (dhdpcie_dongle_attach(bus)) {
600 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
601 ret = BCME_NOTREADY;
602 break;
603 }
604
605 /* software resources */
606 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
607 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
608 ret = BCME_NORESOURCE;
609 break;
610 }
611 bus->dhd->busstate = DHD_BUS_DOWN;
612 bus->db1_for_mb = TRUE;
613 bus->dhd->hang_report = TRUE;
614 bus->use_mailbox = FALSE;
615 bus->use_d0_inform = FALSE;
616 bus->intr_enabled = FALSE;
617 bus->flr_force_fail = FALSE;
618 /* update the dma indices if set through module parameter. */
619 if (dma_ring_indices != 0) {
620 dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
621 }
622 /* update h2d phase support if set through module parameter */
623 bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
624 /* update force trap on bad phase if set through module parameter */
625 bus->dhd->force_dongletrap_on_bad_h2d_phase =
626 force_trap_bad_h2d_phase ? TRUE : FALSE;
627 #ifdef IDLE_TX_FLOW_MGMT
628 bus->enable_idle_flowring_mgmt = FALSE;
629 #endif /* IDLE_TX_FLOW_MGMT */
630 bus->irq_registered = FALSE;
631
632 #ifdef DHD_MSI_SUPPORT
633 bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
634 PCIE_MSI : PCIE_INTX;
635 #else
636 bus->d2h_intr_method = PCIE_INTX;
637 #endif /* DHD_MSI_SUPPORT */
638
639 DHD_TRACE(("%s: EXIT SUCCESS\n",
640 __FUNCTION__));
641 g_dhd_bus = bus;
642 *bus_ptr = bus;
643 return ret;
644 } while (0);
645
646 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
647
648 if (bus && bus->pcie_sh) {
649 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
650 }
651
652 if (bus) {
653 MFREE(osh, bus, sizeof(dhd_bus_t));
654 }
655
656 return ret;
657 }
658
659 bool
660 dhd_bus_skip_clm(dhd_pub_t *dhdp)
661 {
662 switch (dhd_bus_chip_id(dhdp)) {
663 case BCM4369_CHIP_ID:
664 return TRUE;
665 default:
666 return FALSE;
667 }
668 }
669
670 uint
671 dhd_bus_chip(struct dhd_bus *bus)
672 {
673 ASSERT(bus->sih != NULL);
674 return bus->sih->chip;
675 }
676
677 uint
678 dhd_bus_chiprev(struct dhd_bus *bus)
679 {
680 ASSERT(bus);
681 ASSERT(bus->sih != NULL);
682 return bus->sih->chiprev;
683 }
684
685 void *
686 dhd_bus_pub(struct dhd_bus *bus)
687 {
688 return bus->dhd;
689 }
690
691 void *
692 dhd_bus_sih(struct dhd_bus *bus)
693 {
694 return (void *)bus->sih;
695 }
696
697 void *
698 dhd_bus_txq(struct dhd_bus *bus)
699 {
700 return &bus->txq;
701 }
702
703 /** Get Chip ID version */
704 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
705 {
706 dhd_bus_t *bus = dhdp->bus;
707 return bus->sih->chip;
708 }
709
710 /** Get Chip Rev ID version */
711 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
712 {
713 dhd_bus_t *bus = dhdp->bus;
714 return bus->sih->chiprev;
715 }
716
717 /** Get Chip Pkg ID version */
718 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
719 {
720 dhd_bus_t *bus = dhdp->bus;
721 return bus->sih->chippkg;
722 }
723
724 /* Log the lastest DPC schedule time */
725 void
726 dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
727 {
728 dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
729 }
730
731 /* Check if there is DPC scheduling errors */
732 bool
733 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
734 {
735 dhd_bus_t *bus = dhdp->bus;
736 bool sched_err;
737
738 if (bus->dpc_entry_time < bus->isr_exit_time) {
739 /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
740 sched_err = TRUE;
741 } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
742 /* Kernel doesn't schedule the DPC after DHD tries to reschedule
743 * the DPC due to pending work items to be processed.
744 */
745 sched_err = TRUE;
746 } else {
747 sched_err = FALSE;
748 }
749
750 if (sched_err) {
751 /* print out minimum timestamp info */
752 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
753 " isr_exit_time="SEC_USEC_FMT
754 " last_non_ours_irq_time="SEC_USEC_FMT
755 " \ndpc_entry_time="SEC_USEC_FMT
756 " dpc_exit_time="SEC_USEC_FMT
757 " dpc_sched_time="SEC_USEC_FMT
758 " resched_dpc_time="SEC_USEC_FMT"\n",
759 GET_SEC_USEC(bus->isr_entry_time),
760 GET_SEC_USEC(bus->isr_exit_time),
761 GET_SEC_USEC(bus->last_non_ours_irq_time),
762 GET_SEC_USEC(bus->dpc_entry_time),
763 GET_SEC_USEC(bus->dpc_exit_time),
764 GET_SEC_USEC(bus->dpc_sched_time),
765 GET_SEC_USEC(bus->resched_dpc_time)));
766 /* Added more log to debug un-scheduling from isr */
767 DHD_ERROR(("donglereset=%d, busstate=%d instatus=0x%x intr_enabled=%d \n",
768 dhdp->dongle_reset, dhdp->busstate, bus->intstatus, bus->intr_enabled));
769
770 dhd_pcie_dump_rc_conf_space_cap(dhdp);
771 #ifdef EXTENDED_PCIE_DEBUG_DUMP
772 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
773 dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_ERR,
774 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
775 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
776 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
777 dhd_pcie_config_read(bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
778 PCI_TLP_HDR_LOG2,
779 dhd_pcie_config_read(bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
780 PCI_TLP_HDR_LOG3,
781 dhd_pcie_config_read(bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
782 PCI_TLP_HDR_LOG4,
783 dhd_pcie_config_read(bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
784 if (bus->sih->buscorerev >= 24) {
785 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
786 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
787 dhd_pcie_config_read(bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
788 sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
789 dhd_pcie_config_read(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
790 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
791 dhd_pcie_config_read(bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
792 sizeof(uint32))));
793
794 DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
795 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
796 PCIDARClkCtl(bus->sih->buscorerev),
797 si_corereg(bus->sih, bus->sih->buscoreidx,
798 PCIDARClkCtl(bus->sih->buscorerev), 0, 0),
799 PCIDARPwrCtl(bus->sih->buscorerev),
800 si_corereg(bus->sih, bus->sih->buscoreidx,
801 PCIDARPwrCtl(bus->sih->buscorerev), 0, 0),
802 PCIDARH2D_DB0(bus->sih->buscorerev),
803 si_corereg(bus->sih, bus->sih->buscoreidx,
804 PCIDARH2D_DB0(bus->sih->buscorerev), 0, 0)));
805 }
806 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
807 }
808
809 return sched_err;
810 }
811
812 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
813 uint32
814 dhdpcie_bus_intstatus(dhd_bus_t *bus)
815 {
816 uint32 intstatus = 0;
817 uint32 intmask = 0;
818
819 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
820 DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
821 return intstatus;
822 }
823 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
824 (bus->sih->buscorerev == 2)) {
825 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
826 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
827 intstatus &= I_MB;
828 } else {
829 /* this is a PCIE core register..not a config register... */
830 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
831
832 /* this is a PCIE core register..not a config register... */
833 intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
834 /* Is device removed. intstatus & intmask read 0xffffffff */
835 if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
836 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
837 DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
838 __FUNCTION__, intstatus, intmask));
839 bus->is_linkdown = TRUE;
840 dhd_pcie_debug_info_dump(bus->dhd);
841 #ifdef CUSTOMER_HW4_DEBUG
842 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
843 #ifdef SUPPORT_LINKDOWN_RECOVERY
844 #ifdef CONFIG_ARCH_MSM
845 bus->no_cfg_restore = 1;
846 #endif /* CONFIG_ARCH_MSM */
847 #endif /* SUPPORT_LINKDOWN_RECOVERY */
848 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
849 dhd_os_send_hang_message(bus->dhd);
850 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
851 #endif /* CUSTOMER_HW4_DEBUG */
852 return intstatus;
853 }
854
855 intstatus &= intmask;
856
857 /*
858 * The fourth argument to si_corereg is the "mask" fields of the register to update
859 * and the fifth field is the "value" to update. Now if we are interested in only
860 * few fields of the "mask" bit map, we should not be writing back what we read
861 * By doing so, we might clear/ack interrupts that are not handled yet.
862 */
863 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
864 intstatus);
865
866 intstatus &= bus->def_intmask;
867 }
868
869 return intstatus;
870 }
871
872 /**
873 * Name: dhdpcie_bus_isr
874 * Parameters:
875 * 1: IN int irq -- interrupt vector
876 * 2: IN void *arg -- handle to private data structure
877 * Return value:
878 * Status (TRUE or FALSE)
879 *
880 * Description:
881 * Interrupt Service routine checks for the status register,
882 * disable interrupt and queue DPC if mail box interrupts are raised.
883 */
884 int32
885 dhdpcie_bus_isr(dhd_bus_t *bus)
886 {
887 uint32 intstatus = 0;
888
889 do {
890 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
891 /* verify argument */
892 if (!bus) {
893 DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
894 break;
895 }
896
897 if (bus->dhd->dongle_reset) {
898 DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
899 break;
900 }
901
902 if (bus->dhd->busstate == DHD_BUS_DOWN) {
903 DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
904 break;
905 }
906
907 /* avoid processing of interrupts until msgbuf prot is inited */
908 if (!bus->intr_enabled) {
909 DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
910 break;
911 }
912 #ifdef DHD_PCIE_RUNTIMEPM
913 bus->idlecount = 0;
914 #endif /* DHD_PCIE_RUNTIMEPM */
915 if (PCIECTO_ENAB(bus)) {
916 /* read pci_intstatus */
917 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
918
919 if (intstatus & PCI_CTO_INT_MASK) {
920 /* reset backplane and cto,
921 * then access through pcie is recovered.
922 */
923 dhdpcie_cto_error_recovery(bus);
924 return TRUE;
925 }
926 }
927
928 if (bus->d2h_intr_method == PCIE_MSI) {
929 /* For MSI, as intstatus is cleared by firmware, no need to read */
930 goto skip_intstatus_read;
931 }
932
933 intstatus = dhdpcie_bus_intstatus(bus);
934
935 /* Check if the interrupt is ours or not */
936 if (intstatus == 0) {
937 DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
938 bus->non_ours_irq_count++;
939 bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
940 break;
941 }
942
943 /* save the intstatus */
944 /* read interrupt status register!! Status bits will be cleared in DPC !! */
945 bus->intstatus = intstatus;
946
947 /* return error for 0xFFFFFFFF */
948 if (intstatus == (uint32)-1) {
949 DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
950 __FUNCTION__, intstatus));
951 dhdpcie_disable_irq_nosync(bus);
952 break;
953 }
954
955 skip_intstatus_read:
956 /* Overall operation:
957 * - Mask further interrupts
958 * - Read/ack intstatus
959 * - Take action based on bits and state
960 * - Reenable interrupts (as per state)
961 */
962
963 /* Count the interrupt call */
964 bus->intrcount++;
965
966 bus->ipend = TRUE;
967
968 bus->isr_intr_disable_count++;
969
970 /* For Linux, Macos etc (otherthan NDIS) instead of disabling
971 * dongle interrupt by clearing the IntMask, disable directly
972 * interrupt from the host side, so that host will not recieve
973 * any interrupts at all, even though dongle raises interrupts
974 */
975 dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
976
977 bus->intdis = TRUE;
978
979 #if defined(PCIE_ISR_THREAD)
980
981 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
982 DHD_OS_WAKE_LOCK(bus->dhd);
983 while (dhd_bus_dpc(bus));
984 DHD_OS_WAKE_UNLOCK(bus->dhd);
985 #else
986 bus->dpc_sched = TRUE;
987 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
988 #endif /* defined(SDIO_ISR_THREAD) */
989
990 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
991 return TRUE;
992
993 } while (0);
994
995 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
996 return FALSE;
997 }
998
999 int
1000 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1001 {
1002 uint32 cur_state = 0;
1003 uint32 pm_csr = 0;
1004 osl_t *osh = bus->osh;
1005
1006 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1007 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1008
1009 if (cur_state == state) {
1010 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1011 return BCME_OK;
1012 }
1013
1014 if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1015 return BCME_ERROR;
1016
1017 /* Validate the state transition
1018 * if already in a lower power state, return error
1019 */
1020 if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1021 cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1022 cur_state > state) {
1023 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1024 return BCME_ERROR;
1025 }
1026
1027 pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1028 pm_csr |= state;
1029
1030 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1031
1032 /* need to wait for the specified mandatory pcie power transition delay time */
1033 if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1034 cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1035 OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1036 else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1037 cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1038 OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1039
1040 /* read back the power state and verify */
1041 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1042 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1043 if (cur_state != state) {
1044 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1045 __FUNCTION__, cur_state));
1046 return BCME_ERROR;
1047 } else {
1048 DHD_ERROR(("%s: power transition to %u success \n",
1049 __FUNCTION__, cur_state));
1050 }
1051
1052 return BCME_OK;
1053 }
1054
1055 int
1056 dhdpcie_config_check(dhd_bus_t *bus)
1057 {
1058 uint32 i, val;
1059 int ret = BCME_ERROR;
1060
1061 for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1062 val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1063 if ((val & 0xFFFF) == VENDOR_BROADCOM) {
1064 ret = BCME_OK;
1065 break;
1066 }
1067 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1068 }
1069
1070 return ret;
1071 }
1072
1073 int
1074 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1075 {
1076 uint32 i;
1077 osl_t *osh = bus->osh;
1078
1079 if (BCME_OK != dhdpcie_config_check(bus)) {
1080 return BCME_ERROR;
1081 }
1082
1083 for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1084 OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1085 }
1086 OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1087
1088 if (restore_pmcsr)
1089 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1090 sizeof(uint32), bus->saved_config.pmcsr);
1091
1092 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1093 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1094 bus->saved_config.msi_addr0);
1095 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1096 sizeof(uint32), bus->saved_config.msi_addr1);
1097 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1098 sizeof(uint32), bus->saved_config.msi_data);
1099
1100 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1101 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1102 OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1103 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1104 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1105 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1106 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1107 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1108
1109 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1110 sizeof(uint32), bus->saved_config.l1pm0);
1111 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1112 sizeof(uint32), bus->saved_config.l1pm1);
1113
1114 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1115 bus->saved_config.bar0_win);
1116 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, sizeof(uint32),
1117 bus->saved_config.bar1_win);
1118
1119 return BCME_OK;
1120 }
1121
1122 int
1123 dhdpcie_config_save(dhd_bus_t *bus)
1124 {
1125 uint32 i;
1126 osl_t *osh = bus->osh;
1127
1128 if (BCME_OK != dhdpcie_config_check(bus)) {
1129 return BCME_ERROR;
1130 }
1131
1132 for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1133 bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1134 }
1135
1136 bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1137
1138 bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1139 sizeof(uint32));
1140 bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1141 sizeof(uint32));
1142 bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1143 sizeof(uint32));
1144 bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1145 sizeof(uint32));
1146
1147 bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1148 PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1149 bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1150 PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1151 bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1152 PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1153 bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1154 PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1155
1156 bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1157 sizeof(uint32));
1158 bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1159 sizeof(uint32));
1160
1161 bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1162 sizeof(uint32));
1163 bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1164 sizeof(uint32));
1165
1166 return BCME_OK;
1167 }
1168
1169 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1170 dhd_pub_t *link_recovery = NULL;
1171 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1172
1173 static void
1174 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1175 {
1176 uint buscorerev = bus->sih->buscorerev;
1177 bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1178 bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1179 bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1180 bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1181 if (buscorerev < 64) {
1182 bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1183 }
1184 }
1185
1186 void
1187 dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
1188 {
1189 uint32 linkctrl_rc, linkctrl_ep;
1190 linkctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1191 FALSE, 0);
1192 linkctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1193 FALSE, 0);
1194 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__,
1195 (enable ? "ENABLE" : "DISABLE"), linkctrl_rc, linkctrl_ep));
1196 if (enable) {
1197 /* Enable only L1 ASPM (bit 1) first RC then EP */
1198 dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1199 TRUE, (linkctrl_rc | PCIE_ASPM_L1_ENAB));
1200 dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1201 TRUE, (linkctrl_ep | PCIE_ASPM_L1_ENAB));
1202 } else {
1203 /* Disable complete ASPM (bit 1 and bit 0) first EP then RC */
1204 dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1205 TRUE, (linkctrl_ep & (~PCIE_ASPM_ENAB)));
1206 dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1207 TRUE, (linkctrl_rc & (~PCIE_ASPM_ENAB)));
1208 }
1209 linkctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1210 FALSE, 0);
1211 linkctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
1212 FALSE, 0);
1213 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__,
1214 (enable ? "ENABLE" : "DISABLE"), linkctrl_rc, linkctrl_ep));
1215 }
1216
1217 void
1218 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
1219 {
1220 uint32 l1ssctrl_rc, l1ssctrl_ep;
1221
1222 /* Disable ASPM of RC and EP */
1223 dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1224
1225 /* Extendend Capacility Reg */
1226 l1ssctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1227 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1228 l1ssctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1229 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1230 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__,
1231 (enable ? "ENABLE" : "DISABLE"), l1ssctrl_rc, l1ssctrl_ep));
1232 if (enable) {
1233 /* Enable RC then EP */
1234 dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1235 TRUE, TRUE, (l1ssctrl_rc | PCIE_EXT_L1SS_ENAB));
1236 dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1237 TRUE, TRUE, (l1ssctrl_ep | PCIE_EXT_L1SS_ENAB));
1238 } else {
1239 /* Disable EP then RC */
1240 dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1241 TRUE, TRUE, (l1ssctrl_ep & (~PCIE_EXT_L1SS_ENAB)));
1242 dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
1243 TRUE, TRUE, (l1ssctrl_rc & (~PCIE_EXT_L1SS_ENAB)));
1244 }
1245 l1ssctrl_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1246 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1247 l1ssctrl_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
1248 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
1249 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__,
1250 (enable ? "ENABLE" : "DISABLE"), l1ssctrl_rc, l1ssctrl_ep));
1251
1252 /* Enable ASPM of RC and EP */
1253 dhd_bus_aspm_enable_rc_ep(bus, TRUE);
1254 }
1255
1256 void
1257 dhdpcie_dongle_reset(dhd_bus_t *bus)
1258 {
1259 #ifndef DHD_USE_BP_RESET
1260 uint32 wd_en = 0;
1261 #endif /* !DHD_USE_BP_RESET */
1262
1263 /* if the pcie link is down, watchdog reset
1264 * should not be done, as it may hang
1265 */
1266 if (bus->is_linkdown) {
1267 return;
1268 }
1269
1270 #ifdef DHD_USE_BP_RESET
1271 dhd_bus_perform_bp_reset(bus);
1272 #else
1273 wd_en = (bus->sih->buscorerev == 66) ? WD_SSRESET_PCIE_F0_EN :
1274 (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1275 pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1276 #endif /* DHD_USE_BP_RESET */
1277 }
1278
1279 static bool
1280 dhdpcie_dongle_attach(dhd_bus_t *bus)
1281 {
1282 osl_t *osh = bus->osh;
1283 volatile void *regsva = (volatile void*)bus->regs;
1284 uint16 devid;
1285 uint32 val;
1286 sbpcieregs_t *sbpcieregs;
1287 bool dongle_isolation;
1288
1289 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1290
1291 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1292 link_recovery = bus->dhd;
1293 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1294
1295 bus->alp_only = TRUE;
1296 bus->sih = NULL;
1297
1298 /* Checking PCIe bus status with reading configuration space */
1299 val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1300 if ((val & 0xFFFF) != VENDOR_BROADCOM) {
1301 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1302 goto fail;
1303 }
1304 devid = (val >> 16) & 0xFFFF;
1305 bus->cl_devid = devid;
1306
1307 /* Set bar0 window to si_enum_base */
1308 dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1309
1310 /*
1311 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1312 * due to switch address space from PCI_BUS to SI_BUS.
1313 */
1314 val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1315 if (val == 0xffffffff) {
1316 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1317 goto fail;
1318 }
1319
1320 /* si_attach() will provide an SI handle and scan the backplane */
1321 if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1322 &bus->vars, &bus->varsz))) {
1323 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1324 goto fail;
1325 }
1326
1327 if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1328 DHD_ERROR(("Enable CTO\n"));
1329 bus->cto_enable = TRUE;
1330 dhdpcie_cto_init(bus, bus->cto_enable);
1331 /*
1332 * HW JIRA - CRWLPCIEGEN2-672
1333 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1334 * fixed in REV68
1335 */
1336 if (bus->sih->buscorerev == 66) {
1337 dhdpcie_ssreset_dis_enum_rst(bus);
1338 }
1339
1340 /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1341 * dhdpcie_bus_release_dongle() --> si_detach()
1342 * dhdpcie_dongle_attach() --> si_attach()
1343 */
1344 bus->pwr_req_ref = 0;
1345 }
1346
1347 if (MULTIBP_ENAB(bus->sih)) {
1348 dhd_bus_pcie_pwr_req_nolock(bus);
1349 }
1350
1351 /* Olympic EFI requirement - stop driver load if FW is already running
1352 * need to do this here before pcie_watchdog_reset, because
1353 * pcie_watchdog_reset will put the ARM back into halt state
1354 */
1355 if (!dhdpcie_is_arm_halted(bus)) {
1356 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1357 __FUNCTION__));
1358 goto fail;
1359 }
1360
1361 BCM_REFERENCE(dongle_isolation);
1362
1363 /* Dongle reset during power on can be invoked in case of module type driver */
1364 if (dhd_download_fw_on_driverload) {
1365 /* Enable CLKREQ# */
1366 dhdpcie_clkreq(bus->osh, 1, 1);
1367
1368 /*
1369 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1370 * without checking dongle_isolation flag, but if it is called via some other path
1371 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1372 * be called.
1373 */
1374 if (bus->dhd == NULL) {
1375 /* dhd_attach not yet happened, do watchdog reset */
1376 dongle_isolation = FALSE;
1377 } else {
1378 dongle_isolation = bus->dhd->dongle_isolation;
1379 }
1380 /*
1381 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1382 * This is required to avoid spurious interrupts to the Host and bring back
1383 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1384 */
1385 if (dongle_isolation == FALSE) {
1386 dhdpcie_dongle_reset(bus);
1387
1388 }
1389 }
1390
1391 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1392 sbpcieregs = (sbpcieregs_t*)(bus->regs);
1393
1394 /* WAR where the BAR1 window may not be sized properly */
1395 W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1396 val = R_REG(osh, &sbpcieregs->configdata);
1397 W_REG(osh, &sbpcieregs->configdata, val);
1398
1399 /* Get info on the ARM and SOCRAM cores... */
1400 /* Should really be qualified by device id */
1401 if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1402 (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1403 (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1404 (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1405 bus->armrev = si_corerev(bus->sih);
1406 } else {
1407 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1408 goto fail;
1409 }
1410
1411 if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1412 /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1413 * adjusted.
1414 */
1415 if (!bus->ramsize_adjusted) {
1416 if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1417 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1418 goto fail;
1419 }
1420 switch ((uint16)bus->sih->chip) {
1421 default:
1422 /* also populate base address */
1423 bus->dongle_ram_base = CA7_4365_RAM_BASE;
1424 bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1425 break;
1426 }
1427 }
1428 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1429 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1430 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1431 goto fail;
1432 }
1433 } else {
1434 /* cr4 has a different way to find the RAM size from TCM's */
1435 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1436 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1437 goto fail;
1438 }
1439 /* also populate base address */
1440 switch ((uint16)bus->sih->chip) {
1441 case BCM4339_CHIP_ID:
1442 case BCM4335_CHIP_ID:
1443 bus->dongle_ram_base = CR4_4335_RAM_BASE;
1444 break;
1445 case BCM4358_CHIP_ID:
1446 case BCM4354_CHIP_ID:
1447 case BCM43567_CHIP_ID:
1448 case BCM43569_CHIP_ID:
1449 case BCM4350_CHIP_ID:
1450 case BCM43570_CHIP_ID:
1451 bus->dongle_ram_base = CR4_4350_RAM_BASE;
1452 break;
1453 case BCM4360_CHIP_ID:
1454 bus->dongle_ram_base = CR4_4360_RAM_BASE;
1455 break;
1456
1457 case BCM4364_CHIP_ID:
1458 bus->dongle_ram_base = CR4_4364_RAM_BASE;
1459 break;
1460
1461 CASE_BCM4345_CHIP:
1462 bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
1463 ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
1464 break;
1465 CASE_BCM43602_CHIP:
1466 bus->dongle_ram_base = CR4_43602_RAM_BASE;
1467 break;
1468 case BCM4349_CHIP_GRPID:
1469 /* RAM based changed from 4349c0(revid=9) onwards */
1470 bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
1471 CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1472 break;
1473 case BCM4347_CHIP_ID:
1474 case BCM4357_CHIP_ID:
1475 case BCM4361_CHIP_ID:
1476 bus->dongle_ram_base = CR4_4347_RAM_BASE;
1477 break;
1478 case BCM4375_CHIP_ID:
1479 case BCM4369_CHIP_ID:
1480 bus->dongle_ram_base = CR4_4369_RAM_BASE;
1481 break;
1482 default:
1483 bus->dongle_ram_base = 0;
1484 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1485 __FUNCTION__, bus->dongle_ram_base));
1486 }
1487 }
1488 bus->ramsize = bus->orig_ramsize;
1489 if (dhd_dongle_memsize)
1490 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1491
1492 if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1493 DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1494 __FUNCTION__, bus->ramsize, bus->ramsize));
1495 goto fail;
1496 }
1497
1498 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1499 bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1500
1501 bus->srmemsize = si_socram_srmem_size(bus->sih);
1502
1503 dhdpcie_bus_intr_init(bus);
1504
1505 /* Set the poll and/or interrupt flags */
1506 bus->intr = (bool)dhd_intr;
1507 #ifdef DHD_DISABLE_ASPM
1508 dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1509 #endif /* DHD_DISABLE_ASPM */
1510
1511 bus->idma_enabled = TRUE;
1512 bus->ifrm_enabled = TRUE;
1513 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1514
1515 if (MULTIBP_ENAB(bus->sih)) {
1516 dhd_bus_pcie_pwr_req_clear_nolock(bus);
1517 }
1518
1519 bus->force_bt_quiesce = TRUE;
1520
1521 return 0;
1522
1523 fail:
1524 if (bus->sih != NULL) {
1525 if (MULTIBP_ENAB(bus->sih)) {
1526 dhd_bus_pcie_pwr_req_clear_nolock(bus);
1527 }
1528 /* for EFI even if there is an error, load still succeeds
1529 * so si_detach should not be called here, it is called during unload
1530 */
1531 si_detach(bus->sih);
1532 bus->sih = NULL;
1533 }
1534 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1535 return -1;
1536 }
1537
1538 int
1539 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1540 {
1541 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1542 return 0;
1543 }
1544 int
1545 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1546 {
1547 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1548 return 0;
1549 }
1550
1551 /* Non atomic function, caller should hold appropriate lock */
1552 void
1553 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1554 {
1555 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1556 if (bus && bus->sih && !bus->is_linkdown) {
1557 /* Skip after recieving D3 ACK */
1558 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1559 return;
1560 }
1561 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1562 (bus->sih->buscorerev == 4)) {
1563 dhpcie_bus_unmask_interrupt(bus);
1564 } else {
1565 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1566 bus->def_intmask, bus->def_intmask);
1567 }
1568 }
1569 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1570 }
1571
1572 /* Non atomic function, caller should hold appropriate lock */
1573 void
1574 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1575 {
1576 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1577 if (bus && bus->sih && !bus->is_linkdown) {
1578 /* Skip after recieving D3 ACK */
1579 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1580 return;
1581 }
1582 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1583 (bus->sih->buscorerev == 4)) {
1584 dhpcie_bus_mask_interrupt(bus);
1585 } else {
1586 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1587 bus->def_intmask, 0);
1588 }
1589 }
1590 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1591 }
1592
1593 /*
1594 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1595 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1596 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1597 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1598 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1599 */
1600 void
1601 dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
1602 {
1603 unsigned long flags;
1604 int timeleft;
1605
1606 #ifdef DHD_PCIE_RUNTIMEPM
1607 dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
1608 #endif /* DHD_PCIE_RUNTIMEPM */
1609
1610 dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1611 if (dhdp->dhd_watchdog_ms_backup) {
1612 DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1613 __FUNCTION__));
1614 dhd_os_wd_timer(dhdp, 0);
1615 }
1616 if (dhdp->busstate != DHD_BUS_DOWN) {
1617 DHD_GENERAL_LOCK(dhdp, flags);
1618 dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1619 DHD_GENERAL_UNLOCK(dhdp, flags);
1620 }
1621
1622 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1623 if ((timeleft == 0) || (timeleft == 1)) {
1624 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1625 __FUNCTION__, dhdp->dhd_bus_busy_state));
1626 ASSERT(0);
1627 }
1628
1629 return;
1630 }
1631
1632 static void
1633 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1634 {
1635 unsigned long flags;
1636 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1637
1638 DHD_GENERAL_LOCK(bus->dhd, flags);
1639 bus->dhd->busstate = DHD_BUS_DOWN;
1640 DHD_GENERAL_UNLOCK(bus->dhd, flags);
1641
1642 dhd_os_sdlock(bus->dhd);
1643
1644 if (bus->sih && !bus->dhd->dongle_isolation) {
1645 if (bus->sih->buscorerev == 66) {
1646 dhd_bus_pcie_pwr_req_reload_war(bus);
1647 }
1648
1649 /* Has insmod fails after rmmod issue in Brix Android */
1650
1651 /* if the pcie link is down, watchdog reset
1652 * should not be done, as it may hang
1653 */
1654
1655 if (!bus->is_linkdown) {
1656 dhdpcie_dongle_reset(bus);
1657 }
1658
1659 bus->dhd->is_pcie_watchdog_reset = TRUE;
1660 }
1661
1662 dhd_os_sdunlock(bus->dhd);
1663
1664 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1665 }
1666
1667 void
1668 dhd_init_bus_lock(dhd_bus_t *bus)
1669 {
1670 if (!bus->bus_lock) {
1671 bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
1672 }
1673 }
1674
1675 void
1676 dhd_deinit_bus_lock(dhd_bus_t *bus)
1677 {
1678 if (bus->bus_lock) {
1679 dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
1680 bus->bus_lock = NULL;
1681 }
1682 }
1683
1684 /** Detach and free everything */
1685 void
1686 dhdpcie_bus_release(dhd_bus_t *bus)
1687 {
1688 bool dongle_isolation = FALSE;
1689 osl_t *osh = NULL;
1690 unsigned long flags_bus;
1691
1692 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1693
1694 if (bus) {
1695
1696 osh = bus->osh;
1697 ASSERT(osh);
1698
1699 if (bus->dhd) {
1700 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
1701 debugger_close();
1702 #endif /* DEBUGGER || DHD_DSCOPE */
1703 dhdpcie_advertise_bus_cleanup(bus->dhd);
1704 dongle_isolation = bus->dhd->dongle_isolation;
1705 bus->dhd->is_pcie_watchdog_reset = FALSE;
1706 dhdpcie_bus_remove_prep(bus);
1707
1708 if (bus->intr) {
1709 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
1710 dhdpcie_bus_intr_disable(bus);
1711 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
1712 dhdpcie_free_irq(bus);
1713 }
1714 dhd_deinit_bus_lock(bus);
1715 /**
1716 * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
1717 * access Dongle registers.
1718 * dhd_detach will communicate with dongle to delete flowring ..etc.
1719 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
1720 */
1721 dhd_detach(bus->dhd);
1722 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
1723 dhd_free(bus->dhd);
1724 bus->dhd = NULL;
1725 }
1726 /* unmap the regs and tcm here!! */
1727 if (bus->regs) {
1728 dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
1729 bus->regs = NULL;
1730 }
1731 if (bus->tcm) {
1732 dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
1733 bus->tcm = NULL;
1734 }
1735
1736 dhdpcie_bus_release_malloc(bus, osh);
1737 /* Detach pcie shared structure */
1738 if (bus->pcie_sh) {
1739 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1740 bus->pcie_sh = NULL;
1741 }
1742
1743 if (bus->console.buf != NULL) {
1744 MFREE(osh, bus->console.buf, bus->console.bufsize);
1745 }
1746
1747 /* Finally free bus info */
1748 MFREE(osh, bus, sizeof(dhd_bus_t));
1749
1750 g_dhd_bus = NULL;
1751 }
1752
1753 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1754 } /* dhdpcie_bus_release */
1755
1756 void
1757 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
1758 {
1759 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
1760 bus->dhd, bus->dhd->dongle_reset));
1761
1762 if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
1763 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1764 return;
1765 }
1766
1767 if (bus->is_linkdown) {
1768 DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
1769 return;
1770 }
1771
1772 if (bus->sih) {
1773
1774 if (!dongle_isolation &&
1775 (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
1776 dhdpcie_dongle_reset(bus);
1777 }
1778
1779 if (bus->ltrsleep_on_unload) {
1780 si_corereg(bus->sih, bus->sih->buscoreidx,
1781 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
1782 }
1783
1784 if (bus->sih->buscorerev == 13)
1785 pcie_serdes_iddqdisable(bus->osh, bus->sih,
1786 (sbpcieregs_t *) bus->regs);
1787
1788 if (dhd_download_fw_on_driverload) {
1789 /* Disable CLKREQ# */
1790 dhdpcie_clkreq(bus->osh, 1, 0);
1791 }
1792
1793 if (bus->sih != NULL) {
1794 si_detach(bus->sih);
1795 bus->sih = NULL;
1796 }
1797 if (bus->vars && bus->varsz)
1798 MFREE(osh, bus->vars, bus->varsz);
1799 bus->vars = NULL;
1800 }
1801
1802 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1803 }
1804
1805 uint32
1806 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
1807 {
1808 uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
1809 return data;
1810 }
1811
1812 /** 32 bit config write */
1813 void
1814 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
1815 {
1816 OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
1817 }
1818
1819 void
1820 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
1821 {
1822 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
1823 }
1824
1825 void
1826 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
1827 {
1828 int32 min_size = DONGLE_MIN_MEMSIZE;
1829 /* Restrict the memsize to user specified limit */
1830 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
1831 dhd_dongle_memsize, min_size));
1832 if ((dhd_dongle_memsize > min_size) &&
1833 (dhd_dongle_memsize < (int32)bus->orig_ramsize))
1834 bus->ramsize = dhd_dongle_memsize;
1835 }
1836
1837 void
1838 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
1839 {
1840 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1841
1842 if (bus->dhd && bus->dhd->dongle_reset)
1843 return;
1844
1845 if (bus->vars && bus->varsz) {
1846 MFREE(osh, bus->vars, bus->varsz);
1847 bus->vars = NULL;
1848 }
1849
1850 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1851 return;
1852
1853 }
1854
1855 /** Stop bus module: clear pending frames, disable data flow */
1856 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
1857 {
1858 unsigned long flags, flags_bus;
1859
1860 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1861
1862 if (!bus->dhd)
1863 return;
1864
1865 if (bus->dhd->busstate == DHD_BUS_DOWN) {
1866 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
1867 goto done;
1868 }
1869
1870 DHD_DISABLE_RUNTIME_PM(bus->dhd);
1871
1872 DHD_GENERAL_LOCK(bus->dhd, flags);
1873 bus->dhd->busstate = DHD_BUS_DOWN;
1874 DHD_GENERAL_UNLOCK(bus->dhd, flags);
1875
1876 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1877 atomic_set(&bus->dhd->block_bus, TRUE);
1878 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1879
1880 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
1881 dhdpcie_bus_intr_disable(bus);
1882 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
1883
1884 if (!bus->is_linkdown) {
1885 uint32 status;
1886 status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1887 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
1888 }
1889
1890 if (!dhd_download_fw_on_driverload) {
1891 dhd_dpc_kill(bus->dhd);
1892 }
1893
1894 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1895 pm_runtime_disable(dhd_bus_to_dev(bus));
1896 pm_runtime_set_suspended(dhd_bus_to_dev(bus));
1897 pm_runtime_enable(dhd_bus_to_dev(bus));
1898 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1899
1900 /* Clear rx control and wake any waiters */
1901 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
1902 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
1903
1904 done:
1905 return;
1906 }
1907
1908 /**
1909 * Watchdog timer function.
1910 * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
1911 */
1912 bool dhd_bus_watchdog(dhd_pub_t *dhd)
1913 {
1914 unsigned long flags;
1915 dhd_bus_t *bus = dhd->bus;
1916
1917 DHD_GENERAL_LOCK(dhd, flags);
1918 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
1919 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
1920 DHD_GENERAL_UNLOCK(dhd, flags);
1921 return FALSE;
1922 }
1923 DHD_BUS_BUSY_SET_IN_WD(dhd);
1924 DHD_GENERAL_UNLOCK(dhd, flags);
1925
1926 #ifdef DHD_PCIE_RUNTIMEPM
1927 dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
1928 #endif /* DHD_PCIE_RUNTIMEPM */
1929
1930 /* Poll for console output periodically */
1931 if (dhd->busstate == DHD_BUS_DATA &&
1932 dhd->dhd_console_ms != 0 &&
1933 bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
1934 bus->console.count += dhd_watchdog_ms;
1935 if (bus->console.count >= dhd->dhd_console_ms) {
1936 bus->console.count -= dhd->dhd_console_ms;
1937
1938 if (MULTIBP_ENAB(bus->sih)) {
1939 dhd_bus_pcie_pwr_req(bus);
1940 }
1941
1942 /* Make sure backplane clock is on */
1943 if (dhdpcie_bus_readconsole(bus) < 0) {
1944 dhd->dhd_console_ms = 0; /* On error, stop trying */
1945 }
1946
1947 if (MULTIBP_ENAB(bus->sih)) {
1948 dhd_bus_pcie_pwr_req_clear(bus);
1949 }
1950 }
1951 }
1952
1953 DHD_GENERAL_LOCK(dhd, flags);
1954 DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
1955 dhd_os_busbusy_wake(dhd);
1956 DHD_GENERAL_UNLOCK(dhd, flags);
1957
1958 return TRUE;
1959 } /* dhd_bus_watchdog */
1960
1961 #if defined(SUPPORT_MULTIPLE_REVISION)
1962 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
1963 defined(SUPPORT_BCM4359_MIXED_MODULES)
1964 #define VENDOR_MURATA "murata"
1965 #define VENDOR_WISOL "wisol"
1966 #define VNAME_DELIM "_"
1967 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
1968
1969 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
1970
1971 #define MAX_EXTENSION 20
1972 #define MODULE_BCM4361_INDEX 3
1973 #define CHIP_REV_A0 1
1974 #define CHIP_REV_A1 2
1975 #define CHIP_REV_B0 3
1976 #define CHIP_REV_B1 4
1977 #define CHIP_REV_B2 5
1978 #define CHIP_REV_C0 6
1979 #define BOARD_TYPE_EPA 0x080f
1980 #define BOARD_TYPE_IPA 0x0827
1981 #define BOARD_TYPE_IPA_OLD 0x081a
1982 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
1983 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
1984 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
1985 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
1986 #define MAX_VID_LEN 8
1987 #define CIS_TUPLE_HDR_LEN 2
1988 #define CIS_TUPLE_START_ADDRESS 0x18011110
1989 #define CIS_TUPLE_END_ADDRESS 0x18011167
1990 #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
1991 + 1) / sizeof(uint32))
1992 #define CIS_TUPLE_TAG_START 0x80
1993 #define CIS_TUPLE_TAG_VENDOR 0x81
1994 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
1995 #define CIS_TUPLE_TAG_LENGTH 1
1996 #define NVRAM_FEM_MURATA "_murata"
1997 #define CID_FEM_MURATA "_mur_"
1998
1999 typedef struct cis_tuple_format {
2000 uint8 id;
2001 uint8 len; /* total length of tag and data */
2002 uint8 tag;
2003 uint8 data[1];
2004 } cis_tuple_format_t;
2005
2006 typedef struct {
2007 char cid_ext[MAX_EXTENSION];
2008 char nvram_ext[MAX_EXTENSION];
2009 char fw_ext[MAX_EXTENSION];
2010 } naming_info_t;
2011
2012 naming_info_t bcm4361_naming_table[] = {
2013 { {""}, {""}, {""} },
2014 { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2015 { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2016 { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2017 { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2018 { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2019 { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2020 { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2021 { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2022 { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2023 { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2024 { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2025 { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2026 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2027 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2028 { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2029 { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2030 { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2031 { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2032 { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2033 { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2034 { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2035 { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2036 { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
2037 { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2038 { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2039 { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2040 { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2041 { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2042 { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2043 { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2044 { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2045 { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2046 { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2047 { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2048 };
2049
2050 static naming_info_t *
2051 dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2052 {
2053 int index_found = 0, i = 0;
2054
2055 if (module_type && strlen(module_type) > 0) {
2056 for (i = 1; i < table_size; i++) {
2057 if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2058 index_found = i;
2059 break;
2060 }
2061 }
2062 }
2063
2064 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2065
2066 return &table[index_found];
2067 }
2068
2069 static naming_info_t *
2070 dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2071 char *cid_info)
2072 {
2073 int index_found = 0, i = 0;
2074 char *ptr;
2075
2076 /* truncate extension */
2077 for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2078 ptr = bcmstrstr(ptr, "_");
2079 if (ptr) {
2080 ptr++;
2081 }
2082 }
2083
2084 for (i = 1; i < table_size && ptr; i++) {
2085 if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2086 index_found = i;
2087 break;
2088 }
2089 }
2090
2091 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2092
2093 return &table[index_found];
2094 }
2095
2096 static int
2097 dhd_parse_board_information_bcm4361(dhd_bus_t *bus, int *boardtype,
2098 unsigned char *vid, int *vid_length)
2099 {
2100 int boardtype_backplane_addr[] = {
2101 0x18010324, /* OTP Control 1 */
2102 0x18012618, /* PMU min resource mask */
2103 };
2104 int boardtype_backplane_data[] = {
2105 0x00fa0000,
2106 0x0e4fffff /* Keep on ARMHTAVAIL */
2107 };
2108 int int_val = 0, i = 0;
2109 cis_tuple_format_t *tuple;
2110 int totlen, len;
2111 uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2112
2113 for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2114 /* Write new OTP and PMU configuration */
2115 if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2116 &boardtype_backplane_data[i], FALSE) != BCME_OK) {
2117 DHD_ERROR(("invalid size/addr combination\n"));
2118 return BCME_ERROR;
2119 }
2120
2121 if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2122 &int_val, TRUE) != BCME_OK) {
2123 DHD_ERROR(("invalid size/addr combination\n"));
2124 return BCME_ERROR;
2125 }
2126
2127 DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2128 __FUNCTION__, boardtype_backplane_addr[i], int_val));
2129 }
2130
2131 /* read tuple raw data */
2132 for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2133 if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2134 sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
2135 break;
2136 }
2137 }
2138
2139 totlen = i * sizeof(uint32);
2140 tuple = (cis_tuple_format_t *)raw_data;
2141
2142 /* check the first tuple has tag 'start' */
2143 if (tuple->id != CIS_TUPLE_TAG_START) {
2144 return BCME_ERROR;
2145 }
2146
2147 *vid_length = *boardtype = 0;
2148
2149 /* find tagged parameter */
2150 while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2151 (*vid_length == 0 || *boardtype == 0)) {
2152 len = tuple->len;
2153
2154 if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2155 (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2156 /* found VID */
2157 memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2158 *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2159 prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2160 }
2161 else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2162 (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2163 /* found boardtype */
2164 *boardtype = (int)tuple->data[0];
2165 prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2166 }
2167
2168 tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2169 totlen -= (len + CIS_TUPLE_HDR_LEN);
2170 }
2171
2172 if (*vid_length <= 0 || *boardtype <= 0) {
2173 DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2174 *vid_length, *boardtype));
2175 return BCME_ERROR;
2176 }
2177
2178 return BCME_OK;
2179
2180 }
2181
2182 static naming_info_t *
2183 dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2184 dhd_bus_t *bus, bool *is_murata_fem)
2185 {
2186 int board_type = 0, chip_rev = 0, vid_length = 0;
2187 unsigned char vid[MAX_VID_LEN];
2188 naming_info_t *info = &table[0];
2189 char *cid_info = NULL;
2190
2191 if (!bus || !bus->sih) {
2192 DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2193 return NULL;
2194 }
2195 chip_rev = bus->sih->chiprev;
2196
2197 if (dhd_parse_board_information_bcm4361(bus, &board_type, vid, &vid_length)
2198 != BCME_OK) {
2199 DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2200 return NULL;
2201 }
2202
2203 DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2204
2205 /* A0 chipset has exception only */
2206 if (chip_rev == CHIP_REV_A0) {
2207 if (board_type == BOARD_TYPE_EPA) {
2208 info = dhd_find_naming_info(table, table_size,
2209 DEFAULT_CIDINFO_FOR_EPA);
2210 } else if ((board_type == BOARD_TYPE_IPA) ||
2211 (board_type == BOARD_TYPE_IPA_OLD)) {
2212 info = dhd_find_naming_info(table, table_size,
2213 DEFAULT_CIDINFO_FOR_IPA);
2214 }
2215 } else {
2216 cid_info = dhd_get_cid_info(vid, vid_length);
2217 if (cid_info) {
2218 info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2219 if (strstr(cid_info, CID_FEM_MURATA)) {
2220 *is_murata_fem = TRUE;
2221 }
2222 }
2223 }
2224
2225 return info;
2226 }
2227 #endif /* SUPPORT_BCM4361_MIXED_MODULES && USE_CID_CHECK */
2228
2229 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2230 {
2231 uint32 chiprev;
2232 #if defined(SUPPORT_MULTIPLE_CHIPS)
2233 char chipver_tag[20] = "_4358";
2234 #else
2235 char chipver_tag[10] = {0, };
2236 #endif /* SUPPORT_MULTIPLE_CHIPS */
2237
2238 chiprev = dhd_bus_chiprev(bus);
2239 if (chiprev == 0) {
2240 DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2241 strcat(chipver_tag, "_a0");
2242 } else if (chiprev == 1) {
2243 DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2244 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2245 strcat(chipver_tag, "_a1");
2246 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2247 } else if (chiprev == 3) {
2248 DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2249 #if defined(SUPPORT_MULTIPLE_CHIPS)
2250 strcat(chipver_tag, "_a3");
2251 #endif /* SUPPORT_MULTIPLE_CHIPS */
2252 } else {
2253 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2254 }
2255
2256 strcat(fw_path, chipver_tag);
2257
2258 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2259 if (chiprev == 1 || chiprev == 3) {
2260 int ret = dhd_check_module_b85a();
2261 if ((chiprev == 1) && (ret < 0)) {
2262 memset(chipver_tag, 0x00, sizeof(chipver_tag));
2263 strcat(chipver_tag, "_b85");
2264 strcat(chipver_tag, "_a1");
2265 }
2266 }
2267
2268 DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2269 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2270
2271 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2272 if (system_rev >= 10) {
2273 DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
2274 strcat(chipver_tag, "_r10");
2275 }
2276 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2277 strcat(nv_path, chipver_tag);
2278
2279 return BCME_OK;
2280 }
2281
2282 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2283 {
2284 uint32 chip_ver;
2285 char chipver_tag[10] = {0, };
2286 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2287 defined(SUPPORT_BCM4359_MIXED_MODULES)
2288 char chipver_tag_nv[20] = {0, };
2289 int module_type = -1;
2290 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2291
2292 chip_ver = bus->sih->chiprev;
2293 if (chip_ver == 4) {
2294 DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2295 strncat(chipver_tag, "_b0", strlen("_b0"));
2296 } else if (chip_ver == 5) {
2297 DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2298 strncat(chipver_tag, "_b1", strlen("_b1"));
2299 } else if (chip_ver == 9) {
2300 DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2301 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2302 defined(SUPPORT_BCM4359_MIXED_MODULES)
2303 if (dhd_check_module(VENDOR_MURATA)) {
2304 strncat(chipver_tag_nv, VNAME_DELIM, strlen(VNAME_DELIM));
2305 strncat(chipver_tag_nv, VENDOR_MURATA, strlen(VENDOR_MURATA));
2306 } else if (dhd_check_module(VENDOR_WISOL)) {
2307 strncat(chipver_tag_nv, VNAME_DELIM, strlen(VNAME_DELIM));
2308 strncat(chipver_tag_nv, VENDOR_WISOL, strlen(VENDOR_WISOL));
2309 }
2310 /* In case of SEMCO module, extra vendor string doen not need to add */
2311 strncat(chipver_tag_nv, "_c0", strlen("_c0"));
2312 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2313 strncat(chipver_tag, "_c0", strlen("_c0"));
2314 #if defined(CONFIG_WLAN_GRACE) || defined(CONFIG_SEC_GRACEQLTE_PROJECT) || \
2315 defined(CONFIG_SEC_LYKANLTE_PROJECT) || defined(CONFIG_SEC_KELLYLTE_PROJECT)
2316 DHD_ERROR(("----- Adding _plus string -----\n"));
2317 strncat(chipver_tag, "_plus", strlen("_plus"));
2318 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2319 defined(SUPPORT_BCM4359_MIXED_MODULES)
2320 strncat(chipver_tag_nv, "_plus", strlen("_plus"));
2321 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2322 #endif /* CONFIG_WLAN_GRACE || CONFIG_SEC_GRACEQLTE_PROJECT || CONFIG_SEC_LYKANLTE_PROJECT ||
2323 * CONFIG_SEC_KELLYLTE_PROJECT
2324 */
2325 } else {
2326 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2327 return BCME_ERROR;
2328 }
2329
2330 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2331 defined(SUPPORT_BCM4359_MIXED_MODULES)
2332 module_type = dhd_check_module_b90();
2333
2334 switch (module_type) {
2335 case BCM4359_MODULE_TYPE_B90B:
2336 strcat(fw_path, chipver_tag);
2337 break;
2338 case BCM4359_MODULE_TYPE_B90S:
2339 strcat(fw_path, chipver_tag);
2340 if (!(strstr(nv_path, VENDOR_MURATA) || strstr(nv_path, VENDOR_WISOL))) {
2341 strcat(nv_path, chipver_tag_nv);
2342 } else {
2343 strcat(nv_path, chipver_tag);
2344 }
2345 break;
2346 default:
2347 /*
2348 * .cid.info file not exist case,
2349 * loading B90S FW force for initial MFG boot up.
2350 */
2351 if (chip_ver == 5) {
2352 strncat(fw_path, "_b90s", strlen("_b90s"));
2353 }
2354 strcat(fw_path, chipver_tag);
2355 strcat(nv_path, chipver_tag);
2356 break;
2357 }
2358 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2359 strcat(fw_path, chipver_tag);
2360 strcat(nv_path, chipver_tag);
2361 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2362
2363 return BCME_OK;
2364 }
2365 static int
2366 concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2367 {
2368 int ret = BCME_OK;
2369 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2370 char module_type[MAX_VNAME_LEN];
2371 naming_info_t *info = NULL;
2372 bool is_murata_fem = FALSE;
2373
2374 memset(module_type, 0, sizeof(module_type));
2375
2376 if (dhd_check_module_bcm4361(module_type,
2377 MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2378 info = dhd_find_naming_info(bcm4361_naming_table,
2379 ARRAYSIZE(bcm4361_naming_table), module_type);
2380 } else {
2381 /* in case of .cid.info doesn't exists */
2382 info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2383 ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2384 }
2385
2386 if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
2387 is_murata_fem = FALSE;
2388 }
2389
2390 if (info) {
2391 if (is_murata_fem) {
2392 strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2393 }
2394 strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2395 strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2396 } else {
2397 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2398 ret = BCME_ERROR;
2399 }
2400 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2401 char chipver_tag[10] = {0, };
2402
2403 strcat(fw_path, chipver_tag);
2404 strcat(nv_path, chipver_tag);
2405 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2406
2407 return ret;
2408 }
2409
2410 int
2411 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2412 {
2413 int res = 0;
2414
2415 if (!bus || !bus->sih) {
2416 DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2417 return -1;
2418 }
2419
2420 if (!fw_path || !nv_path) {
2421 DHD_ERROR(("fw_path or nv_path is null.\n"));
2422 return res;
2423 }
2424
2425 switch (si_chipid(bus->sih)) {
2426
2427 case BCM43569_CHIP_ID:
2428 case BCM4358_CHIP_ID:
2429 res = concate_revision_bcm4358(bus, fw_path, nv_path);
2430 break;
2431 case BCM4355_CHIP_ID:
2432 case BCM4359_CHIP_ID:
2433 res = concate_revision_bcm4359(bus, fw_path, nv_path);
2434 break;
2435 case BCM4361_CHIP_ID:
2436 case BCM4347_CHIP_ID:
2437 res = concate_revision_bcm4361(bus, fw_path, nv_path);
2438 break;
2439 default:
2440 DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2441 return res;
2442 }
2443
2444 return res;
2445 }
2446 #endif /* SUPPORT_MULTIPLE_REVISION */
2447
2448 uint16
2449 dhd_get_chipid(dhd_pub_t *dhd)
2450 {
2451 dhd_bus_t *bus = dhd->bus;
2452
2453 if (bus && bus->sih)
2454 return (uint16)si_chipid(bus->sih);
2455 else
2456 return 0;
2457 }
2458
2459 /**
2460 * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2461 *
2462 * BCM_REQUEST_FW specific :
2463 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2464 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2465 *
2466 * BCMEMBEDIMAGE specific:
2467 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2468 * file will be used instead.
2469 *
2470 * @return BCME_OK on success
2471 */
2472 int
2473 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
2474 char *pfw_path, char *pnv_path)
2475 {
2476 int ret;
2477
2478 bus->fw_path = pfw_path;
2479 bus->nv_path = pnv_path;
2480
2481 #if defined(SUPPORT_MULTIPLE_REVISION)
2482 if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2483 DHD_ERROR(("%s: fail to concatnate revison \n",
2484 __FUNCTION__));
2485 return BCME_BADARG;
2486 }
2487 #endif /* SUPPORT_MULTIPLE_REVISION */
2488
2489 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2490 dhd_set_blob_support(bus->dhd, bus->fw_path);
2491 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2492
2493 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2494 __FUNCTION__, bus->fw_path, bus->nv_path));
2495 dhdpcie_dump_resource(bus);
2496
2497 ret = dhdpcie_download_firmware(bus, osh);
2498
2499 return ret;
2500 }
2501
2502 /**
2503 * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2504 *
2505 * BCM_REQUEST_FW specific :
2506 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2507 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2508 *
2509 * BCMEMBEDIMAGE specific:
2510 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2511 * file will be used instead.
2512 *
2513 * @return BCME_OK on success
2514 */
2515 static int
2516 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
2517 {
2518 int ret = 0;
2519 #if defined(BCM_REQUEST_FW)
2520 uint chipid = bus->sih->chip;
2521 uint revid = bus->sih->chiprev;
2522 char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
2523 char nv_path[64]; /* path to nvram vars file */
2524 bus->fw_path = fw_path;
2525 bus->nv_path = nv_path;
2526 switch (chipid) {
2527 case BCM43570_CHIP_ID:
2528 bcmstrncat(fw_path, "43570", 5);
2529 switch (revid) {
2530 case 0:
2531 bcmstrncat(fw_path, "a0", 2);
2532 break;
2533 case 2:
2534 bcmstrncat(fw_path, "a2", 2);
2535 break;
2536 default:
2537 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
2538 revid));
2539 break;
2540 }
2541 break;
2542 default:
2543 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
2544 chipid));
2545 return 0;
2546 }
2547 /* load board specific nvram file */
2548 snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
2549 /* load firmware */
2550 snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
2551 #endif /* BCM_REQUEST_FW */
2552
2553 DHD_OS_WAKE_LOCK(bus->dhd);
2554 ret = _dhdpcie_download_firmware(bus);
2555
2556 DHD_OS_WAKE_UNLOCK(bus->dhd);
2557 return ret;
2558 } /* dhdpcie_download_firmware */
2559
2560 #define DHD_MEMORY_SET_PATTERN 0xAA
2561
2562 /**
2563 * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
2564 * is updated with the event logging partitions within that file as well.
2565 *
2566 * @param pfw_path Path to .bin or .bea file
2567 */
2568 static int
2569 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
2570 {
2571 int bcmerror = BCME_ERROR;
2572 int offset = 0;
2573 #if defined(DHD_FW_MEM_CORRUPTION)
2574 uint8 *p_org_fw = NULL;
2575 uint32 org_fw_size = 0;
2576 uint32 fw_write_offset = 0;
2577 #endif /* DHD_FW_MEM_CORRUPTION */
2578 int len = 0;
2579 bool store_reset;
2580 char *imgbuf = NULL;
2581 uint8 *memblock = NULL, *memptr;
2582 int offset_end = bus->ramsize;
2583
2584 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
2585
2586 /* Should succeed in opening image if it is actually given through registry
2587 * entry or in module param.
2588 */
2589 imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
2590 if (imgbuf == NULL) {
2591 goto err;
2592 }
2593
2594 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
2595 if (memblock == NULL) {
2596 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
2597 bcmerror = BCME_NOMEM;
2598 goto err;
2599 }
2600 if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
2601 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
2602 }
2603
2604 #if defined(DHD_FW_MEM_CORRUPTION)
2605 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2606 org_fw_size = dhd_os_get_image_size(imgbuf);
2607 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2608 p_org_fw = (uint8*)DHD_OS_PREALLOC(bus->dhd,
2609 DHD_PREALLOC_MEMDUMP_RAM, org_fw_size);
2610 #else
2611 p_org_fw = (uint8*)VMALLOC(bus->dhd->osh, org_fw_size);
2612 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
2613 if (p_org_fw == NULL) {
2614 DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n",
2615 __FUNCTION__, org_fw_size));
2616 bcmerror = BCME_NOMEM;
2617 goto err;
2618 } else {
2619 memset(p_org_fw, 0, org_fw_size);
2620 }
2621 }
2622 #endif /* DHD_FW_MEM_CORRUPTION */
2623
2624 /* check if CR4/CA7 */
2625 store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
2626 si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
2627 /* Download image with MEMBLOCK size */
2628 while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
2629 if (len < 0) {
2630 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
2631 bcmerror = BCME_ERROR;
2632 goto err;
2633 }
2634 /* if address is 0, store the reset instruction to be written in 0 */
2635 if (store_reset) {
2636 ASSERT(offset == 0);
2637 bus->resetinstr = *(((uint32*)memptr));
2638 /* Add start of RAM address to the address given by user */
2639 offset += bus->dongle_ram_base;
2640 offset_end += offset;
2641 store_reset = FALSE;
2642 }
2643
2644 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
2645 if (bcmerror) {
2646 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
2647 __FUNCTION__, bcmerror, MEMBLOCK, offset));
2648 goto err;
2649 }
2650 offset += MEMBLOCK;
2651 #if defined(DHD_FW_MEM_CORRUPTION)
2652 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2653 memcpy((p_org_fw + fw_write_offset), memptr, len);
2654 fw_write_offset += len;
2655 }
2656 #endif /* DHD_FW_MEM_CORRUPTION */
2657
2658 if (offset >= offset_end) {
2659 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
2660 __FUNCTION__, offset, offset_end));
2661 bcmerror = BCME_ERROR;
2662 goto err;
2663 }
2664 }
2665 #ifdef DHD_FW_MEM_CORRUPTION
2666 /* Read and compare the downloaded code */
2667 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2668 unsigned char *p_readback_buf = NULL;
2669 uint32 compared_len;
2670 uint32 remaining_len = 0;
2671
2672 compared_len = 0;
2673 p_readback_buf = MALLOC(bus->dhd->osh, MEMBLOCK);
2674 if (p_readback_buf == NULL) {
2675 DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n",
2676 __FUNCTION__, MEMBLOCK));
2677 bcmerror = BCME_NOMEM;
2678 goto compare_err;
2679 }
2680 /* Read image to verify downloaded contents. */
2681 offset = bus->dongle_ram_base;
2682
2683 while (compared_len < org_fw_size) {
2684 memset(p_readback_buf, DHD_MEMORY_SET_PATTERN, MEMBLOCK);
2685 remaining_len = org_fw_size - compared_len;
2686
2687 if (remaining_len >= MEMBLOCK) {
2688 len = MEMBLOCK;
2689 } else {
2690 len = remaining_len;
2691 }
2692 bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
2693 (uint8 *)p_readback_buf, len);
2694 if (bcmerror) {
2695 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2696 __FUNCTION__, bcmerror, MEMBLOCK, offset));
2697 goto compare_err;
2698 }
2699
2700 if (memcmp((p_org_fw + compared_len), p_readback_buf, len) != 0) {
2701 DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n",
2702 __FUNCTION__, compared_len));
2703 bcmerror = BCME_ERROR;
2704 goto compare_err;
2705 }
2706
2707 compared_len += len;
2708 offset += len;
2709 }
2710 DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
2711
2712 compare_err:
2713 if (p_readback_buf) {
2714 MFREE(bus->dhd->osh, p_readback_buf, MEMBLOCK);
2715 }
2716 }
2717 #endif /* DHD_FW_MEM_CORRUPTION */
2718
2719 err:
2720 #if defined(DHD_FW_MEM_CORRUPTION)
2721 if (p_org_fw) {
2722 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2723 DHD_OS_PREFREE(bus->dhd, p_org_fw, org_fw_size);
2724 #else
2725 VMFREE(bus->dhd->osh, p_org_fw, org_fw_size);
2726 #endif // endif
2727 }
2728 #endif /* DHD_FW_MEM_CORRUPTION */
2729 if (memblock) {
2730 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
2731 }
2732
2733 if (imgbuf) {
2734 dhd_os_close_image1(bus->dhd, imgbuf);
2735 }
2736
2737 return bcmerror;
2738 } /* dhdpcie_download_code_file */
2739
2740 #ifdef CUSTOMER_HW4_DEBUG
2741 #define MIN_NVRAMVARS_SIZE 128
2742 #endif /* CUSTOMER_HW4_DEBUG */
2743
2744 static int
2745 dhdpcie_download_nvram(struct dhd_bus *bus)
2746 {
2747 int bcmerror = BCME_ERROR;
2748 uint len;
2749 char * memblock = NULL;
2750 char *bufp;
2751 char *pnv_path;
2752 bool nvram_file_exists;
2753 bool nvram_uefi_exists = FALSE;
2754 bool local_alloc = FALSE;
2755 pnv_path = bus->nv_path;
2756
2757 nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
2758
2759 /* First try UEFI */
2760 len = MAX_NVRAMBUF_SIZE;
2761 dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
2762
2763 /* If UEFI empty, then read from file system */
2764 if ((len <= 0) || (memblock == NULL)) {
2765
2766 if (nvram_file_exists) {
2767 len = MAX_NVRAMBUF_SIZE;
2768 dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
2769 if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
2770 goto err;
2771 }
2772 }
2773 else {
2774 /* For SROM OTP no external file or UEFI required */
2775 bcmerror = BCME_OK;
2776 }
2777 } else {
2778 nvram_uefi_exists = TRUE;
2779 }
2780
2781 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
2782
2783 if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
2784 bufp = (char *) memblock;
2785
2786 {
2787 bufp[len] = 0;
2788 if (nvram_uefi_exists || nvram_file_exists) {
2789 len = process_nvram_vars(bufp, len);
2790 }
2791 }
2792
2793 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
2794 #ifdef CUSTOMER_HW4_DEBUG
2795 if (len < MIN_NVRAMVARS_SIZE) {
2796 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
2797 __FUNCTION__));
2798 bcmerror = BCME_ERROR;
2799 goto err;
2800 }
2801 #endif /* CUSTOMER_HW4_DEBUG */
2802
2803 if (len % 4) {
2804 len += 4 - (len % 4);
2805 }
2806 bufp += len;
2807 *bufp++ = 0;
2808 if (len)
2809 bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
2810 if (bcmerror) {
2811 DHD_ERROR(("%s: error downloading vars: %d\n",
2812 __FUNCTION__, bcmerror));
2813 }
2814 }
2815
2816 err:
2817 if (memblock) {
2818 if (local_alloc) {
2819 MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
2820 } else {
2821 dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
2822 }
2823 }
2824
2825 return bcmerror;
2826 }
2827
2828 static int
2829 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
2830 {
2831 int bcmerror = BCME_ERROR;
2832 char *imgbuf = NULL;
2833
2834 if (buf == NULL || len == 0)
2835 goto err;
2836
2837 /* External image takes precedence if specified */
2838 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2839 // opens and seeks to correct file offset:
2840 imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
2841 if (imgbuf == NULL) {
2842 DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
2843 goto err;
2844 }
2845
2846 /* Read it */
2847 if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
2848 DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
2849 goto err;
2850 }
2851
2852 bcmerror = BCME_OK;
2853 }
2854
2855 err:
2856 if (imgbuf)
2857 dhd_os_close_image1(bus->dhd, imgbuf);
2858
2859 return bcmerror;
2860 }
2861
2862 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
2863 * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
2864 * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
2865 */
2866 static void
2867 dhdpcie_ramsize_adj(struct dhd_bus *bus)
2868 {
2869 int i, search_len = 0;
2870 uint8 *memptr = NULL;
2871 uint8 *ramsizeptr = NULL;
2872 uint ramsizelen;
2873 uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
2874 hnd_ramsize_ptr_t ramsize_info;
2875
2876 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2877
2878 /* Adjust dongle RAMSIZE already called. */
2879 if (bus->ramsize_adjusted) {
2880 return;
2881 }
2882
2883 /* success or failure, we don't want to be here
2884 * more than once.
2885 */
2886 bus->ramsize_adjusted = TRUE;
2887
2888 /* Not handle if user restrict dongle ram size enabled */
2889 if (dhd_dongle_memsize) {
2890 DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
2891 dhd_dongle_memsize));
2892 return;
2893 }
2894
2895 /* Out immediately if no image to download */
2896 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2897 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2898 return;
2899 }
2900
2901 /* Get maximum RAMSIZE info search length */
2902 for (i = 0; ; i++) {
2903 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2904 break;
2905
2906 if (search_len < (int)ramsize_ptr_ptr[i])
2907 search_len = (int)ramsize_ptr_ptr[i];
2908 }
2909
2910 if (!search_len)
2911 return;
2912
2913 search_len += sizeof(hnd_ramsize_ptr_t);
2914
2915 memptr = MALLOC(bus->dhd->osh, search_len);
2916 if (memptr == NULL) {
2917 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
2918 return;
2919 }
2920
2921 /* External image takes precedence if specified */
2922 if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
2923 goto err;
2924 }
2925 else {
2926 ramsizeptr = memptr;
2927 ramsizelen = search_len;
2928 }
2929
2930 if (ramsizeptr) {
2931 /* Check Magic */
2932 for (i = 0; ; i++) {
2933 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2934 break;
2935
2936 if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
2937 continue;
2938
2939 memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
2940 sizeof(hnd_ramsize_ptr_t));
2941
2942 if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
2943 bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
2944 bus->ramsize = LTOH32(ramsize_info.ram_size);
2945 DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
2946 bus->ramsize));
2947 break;
2948 }
2949 }
2950 }
2951
2952 err:
2953 if (memptr)
2954 MFREE(bus->dhd->osh, memptr, search_len);
2955
2956 return;
2957 } /* dhdpcie_ramsize_adj */
2958
2959 /**
2960 * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
2961 *
2962 * BCMEMBEDIMAGE specific:
2963 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2964 * file will be used instead.
2965 *
2966 */
2967 static int
2968 _dhdpcie_download_firmware(struct dhd_bus *bus)
2969 {
2970 int bcmerror = -1;
2971
2972 bool embed = FALSE; /* download embedded firmware */
2973 bool dlok = FALSE; /* download firmware succeeded */
2974
2975 /* Out immediately if no image to download */
2976 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2977 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2978 return 0;
2979 }
2980 /* Adjust ram size */
2981 dhdpcie_ramsize_adj(bus);
2982
2983 /* Keep arm in reset */
2984 if (dhdpcie_bus_download_state(bus, TRUE)) {
2985 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
2986 goto err;
2987 }
2988
2989 /* External image takes precedence if specified */
2990 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2991 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
2992 DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
2993 __LINE__));
2994 goto err;
2995 } else {
2996 embed = FALSE;
2997 dlok = TRUE;
2998 }
2999 }
3000
3001 BCM_REFERENCE(embed);
3002 if (!dlok) {
3003 DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
3004 goto err;
3005 }
3006
3007 /* EXAMPLE: nvram_array */
3008 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3009 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3010
3011 /* External nvram takes precedence if specified */
3012 if (dhdpcie_download_nvram(bus)) {
3013 DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
3014 goto err;
3015 }
3016
3017 /* Take arm out of reset */
3018 if (dhdpcie_bus_download_state(bus, FALSE)) {
3019 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
3020 goto err;
3021 }
3022
3023 bcmerror = 0;
3024
3025 err:
3026 return bcmerror;
3027 } /* _dhdpcie_download_firmware */
3028
3029 static int
3030 dhdpcie_bus_readconsole(dhd_bus_t *bus)
3031 {
3032 dhd_console_t *c = &bus->console;
3033 uint8 line[CONSOLE_LINE_MAX], ch;
3034 uint32 n, idx, addr;
3035 int rv;
3036 uint readlen = 0;
3037 uint i = 0;
3038
3039 /* Don't do anything until FWREADY updates console address */
3040 if (bus->console_addr == 0)
3041 return -1;
3042
3043 /* Read console log struct */
3044 addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3045
3046 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
3047 return rv;
3048
3049 /* Allocate console buffer (one time only) */
3050 if (c->buf == NULL) {
3051 c->bufsize = ltoh32(c->log.buf_size);
3052 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
3053 return BCME_NOMEM;
3054 DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
3055 }
3056 idx = ltoh32(c->log.idx);
3057
3058 /* Protect against corrupt value */
3059 if (idx > c->bufsize)
3060 return BCME_ERROR;
3061
3062 /* Skip reading the console buffer if the index pointer has not moved */
3063 if (idx == c->last)
3064 return BCME_OK;
3065
3066 DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3067 idx, c->last));
3068
3069 /* Read the console buffer data to a local buffer */
3070 /* optimize and read only the portion of the buffer needed, but
3071 * important to handle wrap-around.
3072 */
3073 addr = ltoh32(c->log.buf);
3074
3075 /* wrap around case - write ptr < read ptr */
3076 if (idx < c->last) {
3077 /* from read ptr to end of buffer */
3078 readlen = c->bufsize - c->last;
3079 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3080 addr + c->last, c->buf, readlen)) < 0) {
3081 DHD_ERROR(("conlog: read error[1] ! \n"));
3082 return rv;
3083 }
3084 /* from beginning of buffer to write ptr */
3085 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3086 addr, c->buf + readlen,
3087 idx)) < 0) {
3088 DHD_ERROR(("conlog: read error[2] ! \n"));
3089 return rv;
3090 }
3091 readlen += idx;
3092 } else {
3093 /* non-wraparound case, write ptr > read ptr */
3094 readlen = (uint)idx - c->last;
3095 if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3096 addr + c->last, c->buf, readlen)) < 0) {
3097 DHD_ERROR(("conlog: read error[3] ! \n"));
3098 return rv;
3099 }
3100 }
3101 /* update read ptr */
3102 c->last = idx;
3103
3104 /* now output the read data from the local buffer to the host console */
3105 while (i < readlen) {
3106 for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3107 ch = c->buf[i];
3108 ++i;
3109 if (ch == '\n')
3110 break;
3111 line[n] = ch;
3112 }
3113
3114 if (n > 0) {
3115 if (line[n - 1] == '\r')
3116 n--;
3117 line[n] = 0;
3118 DHD_FWLOG(("CONSOLE: %s\n", line));
3119 }
3120 }
3121
3122 return BCME_OK;
3123
3124 } /* dhdpcie_bus_readconsole */
3125
3126 void
3127 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3128 {
3129 uint32 n, i;
3130 uint32 addr;
3131 char *console_buffer = NULL;
3132 uint32 console_ptr, console_size, console_index;
3133 uint8 line[CONSOLE_LINE_MAX], ch;
3134 int rv;
3135
3136 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3137
3138 if (bus->is_linkdown) {
3139 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3140 return;
3141 }
3142
3143 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3144 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3145 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3146 goto exit;
3147 }
3148
3149 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3150 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3151 (uint8 *)&console_size, sizeof(console_size))) < 0) {
3152 goto exit;
3153 }
3154
3155 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3156 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3157 (uint8 *)&console_index, sizeof(console_index))) < 0) {
3158 goto exit;
3159 }
3160
3161 console_ptr = ltoh32(console_ptr);
3162 console_size = ltoh32(console_size);
3163 console_index = ltoh32(console_index);
3164
3165 if (console_size > CONSOLE_BUFFER_MAX ||
3166 !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3167 goto exit;
3168 }
3169
3170 if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3171 (uint8 *)console_buffer, console_size)) < 0) {
3172 goto exit;
3173 }
3174
3175 for (i = 0, n = 0; i < console_size; i += n + 1) {
3176 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3177 ch = console_buffer[(console_index + i + n) % console_size];
3178 if (ch == '\n')
3179 break;
3180 line[n] = ch;
3181 }
3182
3183 if (n > 0) {
3184 if (line[n - 1] == '\r')
3185 n--;
3186 line[n] = 0;
3187 /* Don't use DHD_ERROR macro since we print
3188 * a lot of information quickly. The macro
3189 * will truncate a lot of the printfs
3190 */
3191
3192 DHD_FWLOG(("CONSOLE: %s\n", line));
3193 }
3194 }
3195
3196 exit:
3197 if (console_buffer)
3198 MFREE(bus->dhd->osh, console_buffer, console_size);
3199 return;
3200 }
3201
3202 /**
3203 * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3204 *
3205 * @return BCME_OK on success
3206 */
3207 static int
3208 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3209 {
3210 int bcmerror = 0;
3211 uint msize = 512;
3212 char *mbuffer = NULL;
3213 uint maxstrlen = 256;
3214 char *str = NULL;
3215 pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3216 struct bcmstrbuf strbuf;
3217 unsigned long flags;
3218 bool dongle_trap_occured = FALSE;
3219
3220 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3221
3222 if (DHD_NOCHECKDIED_ON()) {
3223 return 0;
3224 }
3225
3226 if (data == NULL) {
3227 /*
3228 * Called after a rx ctrl timeout. "data" is NULL.
3229 * allocate memory to trace the trap or assert.
3230 */
3231 size = msize;
3232 mbuffer = data = MALLOC(bus->dhd->osh, msize);
3233
3234 if (mbuffer == NULL) {
3235 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3236 bcmerror = BCME_NOMEM;
3237 goto done;
3238 }
3239 }
3240
3241 if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3242 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3243 bcmerror = BCME_NOMEM;
3244 goto done;
3245 }
3246 DHD_GENERAL_LOCK(bus->dhd, flags);
3247 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3248 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3249
3250 if (MULTIBP_ENAB(bus->sih)) {
3251 dhd_bus_pcie_pwr_req(bus);
3252 }
3253 if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3254 goto done;
3255 }
3256
3257 bcm_binit(&strbuf, data, size);
3258
3259 bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
3260 local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
3261
3262 if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3263 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3264 * (Avoids conflict with real asserts for programmatic parsing of output.)
3265 */
3266 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3267 }
3268
3269 if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
3270 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3271 * (Avoids conflict with real asserts for programmatic parsing of output.)
3272 */
3273 bcm_bprintf(&strbuf, "No trap%s in dongle",
3274 (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
3275 ?"/assrt" :"");
3276 } else {
3277 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3278 /* Download assert */
3279 bcm_bprintf(&strbuf, "Dongle assert");
3280 if (bus->pcie_sh->assert_exp_addr != 0) {
3281 str[0] = '\0';
3282 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3283 bus->pcie_sh->assert_exp_addr,
3284 (uint8 *)str, maxstrlen)) < 0) {
3285 goto done;
3286 }
3287
3288 str[maxstrlen - 1] = '\0';
3289 bcm_bprintf(&strbuf, " expr \"%s\"", str);
3290 }
3291
3292 if (bus->pcie_sh->assert_file_addr != 0) {
3293 str[0] = '\0';
3294 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3295 bus->pcie_sh->assert_file_addr,
3296 (uint8 *)str, maxstrlen)) < 0) {
3297 goto done;
3298 }
3299
3300 str[maxstrlen - 1] = '\0';
3301 bcm_bprintf(&strbuf, " file \"%s\"", str);
3302 }
3303
3304 bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
3305 }
3306
3307 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3308 trap_t *tr = &bus->dhd->last_trap_info;
3309 dongle_trap_occured = TRUE;
3310 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3311 bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3312 bus->dhd->dongle_trap_occured = TRUE;
3313 goto done;
3314 }
3315 dhd_bus_dump_trap_info(bus, &strbuf);
3316 }
3317 }
3318
3319 if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3320 DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
3321
3322 /* wake up IOCTL wait event */
3323 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3324
3325 dhd_bus_dump_console_buffer(bus);
3326 dhd_prot_debug_info_print(bus->dhd);
3327
3328 #if defined(DHD_FW_COREDUMP)
3329 /* save core dump or write to a file */
3330 if (bus->dhd->memdump_enabled) {
3331 #ifdef DHD_SSSR_DUMP
3332 if (bus->dhd->sssr_inited) {
3333 dhdpcie_sssr_dump(bus->dhd);
3334 }
3335 #endif /* DHD_SSSR_DUMP */
3336 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3337 dhdpcie_mem_dump(bus);
3338 }
3339 #endif /* DHD_FW_COREDUMP */
3340
3341 /* set the trap occured flag only after all the memdump,
3342 * logdump and sssr dump collection has been scheduled
3343 */
3344 if (dongle_trap_occured) {
3345 bus->dhd->dongle_trap_occured = TRUE;
3346 }
3347
3348 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3349 copy_hang_info_trap(bus->dhd);
3350 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3351 dhd_schedule_reset(bus->dhd);
3352
3353 }
3354
3355 DHD_GENERAL_LOCK(bus->dhd, flags);
3356 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3357 dhd_os_busbusy_wake(bus->dhd);
3358 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3359
3360 done:
3361 if (MULTIBP_ENAB(bus->sih)) {
3362 dhd_bus_pcie_pwr_req_clear(bus);
3363 }
3364 if (mbuffer)
3365 MFREE(bus->dhd->osh, mbuffer, msize);
3366 if (str)
3367 MFREE(bus->dhd->osh, str, maxstrlen);
3368
3369 return bcmerror;
3370 } /* dhdpcie_checkdied */
3371
3372 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
3373 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3374 {
3375 int ret = 0;
3376 int size; /* Full mem size */
3377 int start; /* Start address */
3378 int read_size = 0; /* Read size of each iteration */
3379 uint8 *databuf = buf;
3380
3381 if (bus == NULL) {
3382 return;
3383 }
3384
3385 start = bus->dongle_ram_base;
3386 read_size = 4;
3387 /* check for dead bus */
3388 {
3389 uint test_word = 0;
3390 ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3391 /* if read error or bus timeout */
3392 if (ret || (test_word == 0xFFFFFFFF)) {
3393 return;
3394 }
3395 }
3396
3397 /* Get full mem size */
3398 size = bus->ramsize;
3399 /* Read mem content */
3400 while (size)
3401 {
3402 read_size = MIN(MEMBLOCK, size);
3403 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3404 return;
3405 }
3406
3407 /* Decrement size and increment start address */
3408 size -= read_size;
3409 start += read_size;
3410 databuf += read_size;
3411 }
3412 bus->dhd->soc_ram = buf;
3413 bus->dhd->soc_ram_length = bus->ramsize;
3414 return;
3415 }
3416
3417 #if defined(DHD_FW_COREDUMP)
3418 static int
3419 dhdpcie_mem_dump(dhd_bus_t *bus)
3420 {
3421 int ret = 0;
3422 int size; /* Full mem size */
3423 int start = bus->dongle_ram_base; /* Start address */
3424 int read_size = 0; /* Read size of each iteration */
3425 uint8 *buf = NULL, *databuf = NULL;
3426
3427 #ifdef EXYNOS_PCIE_DEBUG
3428 exynos_pcie_register_dump(1);
3429 #endif /* EXYNOS_PCIE_DEBUG */
3430
3431 #ifdef SUPPORT_LINKDOWN_RECOVERY
3432 if (bus->is_linkdown) {
3433 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
3434 return BCME_ERROR;
3435 }
3436 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3437
3438 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3439 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
3440 return BCME_ERROR;
3441 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3442
3443 /* Get full mem size */
3444 size = bus->ramsize;
3445 buf = dhd_get_fwdump_buf(bus->dhd, size);
3446 if (!buf) {
3447 DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
3448 return BCME_ERROR;
3449 }
3450
3451 /* Read mem content */
3452 DHD_TRACE_HW4(("Dump dongle memory\n"));
3453 databuf = buf;
3454 while (size > 0)
3455 {
3456 read_size = MIN(MEMBLOCK, size);
3457 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
3458 {
3459 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3460 #ifdef DHD_DEBUG_UART
3461 bus->dhd->memdump_success = FALSE;
3462 #endif /* DHD_DEBUG_UART */
3463 return BCME_ERROR;
3464 }
3465 DHD_TRACE(("."));
3466
3467 /* Decrement size and increment start address */
3468 size -= read_size;
3469 start += read_size;
3470 databuf += read_size;
3471 }
3472 #ifdef DHD_DEBUG_UART
3473 bus->dhd->memdump_success = TRUE;
3474 #endif /* DHD_DEBUG_UART */
3475
3476 dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
3477 /* buf, actually soc_ram free handled in dhd_{free,clear} */
3478
3479 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3480 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3481 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3482 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3483
3484 return ret;
3485 }
3486
3487 int
3488 dhd_bus_mem_dump(dhd_pub_t *dhdp)
3489 {
3490 dhd_bus_t *bus = dhdp->bus;
3491 int ret = BCME_ERROR;
3492
3493 if (dhdp->busstate == DHD_BUS_DOWN) {
3494 DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3495 return BCME_ERROR;
3496 }
3497
3498 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
3499 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3500 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3501 return BCME_ERROR;
3502 }
3503
3504 DHD_OS_WAKE_LOCK(dhdp);
3505 ret = dhdpcie_mem_dump(bus);
3506 DHD_OS_WAKE_UNLOCK(dhdp);
3507 return ret;
3508 }
3509 #endif /* DHD_FW_COREDUMP */
3510
3511 int
3512 dhd_socram_dump(dhd_bus_t *bus)
3513 {
3514 #if defined(DHD_FW_COREDUMP)
3515 DHD_OS_WAKE_LOCK(bus->dhd);
3516 dhd_bus_mem_dump(bus->dhd);
3517 DHD_OS_WAKE_UNLOCK(bus->dhd);
3518 return 0;
3519 #else
3520 return -1;
3521 #endif // endif
3522 }
3523
3524 /**
3525 * Transfers bytes from host to dongle using pio mode.
3526 * Parameter 'address' is a backplane address.
3527 */
3528 static int
3529 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
3530 {
3531 uint dsize;
3532 int detect_endian_flag = 0x01;
3533 bool little_endian;
3534
3535 if (write && bus->is_linkdown) {
3536 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3537 return BCME_ERROR;
3538 }
3539
3540 if (MULTIBP_ENAB(bus->sih)) {
3541 dhd_bus_pcie_pwr_req(bus);
3542 }
3543 /* Detect endianness. */
3544 little_endian = *(char *)&detect_endian_flag;
3545
3546 /* In remap mode, adjust address beyond socram and redirect
3547 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
3548 * is not backplane accessible
3549 */
3550
3551 /* Determine initial transfer parameters */
3552 #ifdef DHD_SUPPORT_64BIT
3553 dsize = sizeof(uint64);
3554 #else /* !DHD_SUPPORT_64BIT */
3555 dsize = sizeof(uint32);
3556 #endif /* DHD_SUPPORT_64BIT */
3557
3558 /* Do the transfer(s) */
3559 if (write) {
3560 while (size) {
3561 #ifdef DHD_SUPPORT_64BIT
3562 if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
3563 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
3564 }
3565 #else /* !DHD_SUPPORT_64BIT */
3566 if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
3567 dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
3568 }
3569 #endif /* DHD_SUPPORT_64BIT */
3570 else {
3571 dsize = sizeof(uint8);
3572 dhdpcie_bus_wtcm8(bus, address, *data);
3573 }
3574
3575 /* Adjust for next transfer (if any) */
3576 if ((size -= dsize)) {
3577 data += dsize;
3578 address += dsize;
3579 }
3580 }
3581 } else {
3582 while (size) {
3583 #ifdef DHD_SUPPORT_64BIT
3584 if (size >= sizeof(uint64) && little_endian && !(address % 8))
3585 {
3586 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
3587 }
3588 #else /* !DHD_SUPPORT_64BIT */
3589 if (size >= sizeof(uint32) && little_endian && !(address % 4))
3590 {
3591 *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
3592 }
3593 #endif /* DHD_SUPPORT_64BIT */
3594 else {
3595 dsize = sizeof(uint8);
3596 *data = dhdpcie_bus_rtcm8(bus, address);
3597 }
3598
3599 /* Adjust for next transfer (if any) */
3600 if ((size -= dsize) > 0) {
3601 data += dsize;
3602 address += dsize;
3603 }
3604 }
3605 }
3606 if (MULTIBP_ENAB(bus->sih)) {
3607 dhd_bus_pcie_pwr_req_clear(bus);
3608 }
3609 return BCME_OK;
3610 } /* dhdpcie_bus_membytes */
3611
3612 /**
3613 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
3614 * to the (non flow controlled) flow ring.
3615 */
3616 int BCMFASTPATH
3617 dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
3618 {
3619 flow_ring_node_t *flow_ring_node;
3620 int ret = BCME_OK;
3621 #ifdef DHD_LOSSLESS_ROAMING
3622 dhd_pub_t *dhdp = bus->dhd;
3623 #endif // endif
3624 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
3625
3626 /* ASSERT on flow_id */
3627 if (flow_id >= bus->max_submission_rings) {
3628 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
3629 flow_id, bus->max_submission_rings));
3630 return 0;
3631 }
3632
3633 flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
3634
3635 if (flow_ring_node->prot_info == NULL) {
3636 DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
3637 return BCME_NOTREADY;
3638 }
3639
3640 #ifdef DHD_LOSSLESS_ROAMING
3641 if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
3642 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
3643 __FUNCTION__, flow_ring_node->flow_info.tid));
3644 return BCME_OK;
3645 }
3646 #endif /* DHD_LOSSLESS_ROAMING */
3647
3648 {
3649 unsigned long flags;
3650 void *txp = NULL;
3651 flow_queue_t *queue;
3652 #ifdef DHD_LOSSLESS_ROAMING
3653 struct ether_header *eh;
3654 uint8 *pktdata;
3655 #endif /* DHD_LOSSLESS_ROAMING */
3656
3657 queue = &flow_ring_node->queue; /* queue associated with flow ring */
3658
3659 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3660
3661 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
3662 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3663 return BCME_NOTREADY;
3664 }
3665
3666 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
3667 PKTORPHAN(txp);
3668
3669 /*
3670 * Modifying the packet length caused P2P cert failures.
3671 * Specifically on test cases where a packet of size 52 bytes
3672 * was injected, the sniffer capture showed 62 bytes because of
3673 * which the cert tests failed. So making the below change
3674 * only Router specific.
3675 */
3676
3677 #ifdef DHDTCPACK_SUPPRESS
3678 if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
3679 ret = dhd_tcpack_check_xmit(bus->dhd, txp);
3680 if (ret != BCME_OK) {
3681 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
3682 __FUNCTION__));
3683 }
3684 }
3685 #endif /* DHDTCPACK_SUPPRESS */
3686 #ifdef DHD_LOSSLESS_ROAMING
3687 pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
3688 eh = (struct ether_header *) pktdata;
3689 if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
3690 uint8 prio = (uint8)PKTPRIO(txp);
3691 /* Restore to original priority for 802.1X packet */
3692 if (prio == PRIO_8021D_NC) {
3693 PKTSETPRIO(txp, dhdp->prio_8021x);
3694 }
3695 }
3696 #endif /* DHD_LOSSLESS_ROAMING */
3697 /* Attempt to transfer packet over flow ring */
3698 ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
3699 if (ret != BCME_OK) { /* may not have resources in flow ring */
3700 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
3701 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3702 /* reinsert at head */
3703 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
3704 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3705
3706 /* If we are able to requeue back, return success */
3707 return BCME_OK;
3708 }
3709 }
3710
3711 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3712
3713 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3714 }
3715
3716 return ret;
3717 } /* dhd_bus_schedule_queue */
3718
3719 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
3720 int BCMFASTPATH
3721 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
3722 {
3723 uint16 flowid;
3724 #ifdef IDLE_TX_FLOW_MGMT
3725 uint8 node_status;
3726 #endif /* IDLE_TX_FLOW_MGMT */
3727 flow_queue_t *queue;
3728 flow_ring_node_t *flow_ring_node;
3729 unsigned long flags;
3730 int ret = BCME_OK;
3731 void *txp_pend = NULL;
3732
3733 if (!bus->dhd->flowid_allocator) {
3734 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
3735 goto toss;
3736 }
3737
3738 flowid = DHD_PKT_GET_FLOWID(txp);
3739
3740 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
3741
3742 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
3743 __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
3744
3745 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3746 if ((flowid >= bus->dhd->num_flow_rings) ||
3747 #ifdef IDLE_TX_FLOW_MGMT
3748 (!flow_ring_node->active))
3749 #else
3750 (!flow_ring_node->active) ||
3751 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
3752 (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
3753 #endif /* IDLE_TX_FLOW_MGMT */
3754 {
3755 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3756 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
3757 __FUNCTION__, flowid, flow_ring_node->status,
3758 flow_ring_node->active));
3759 ret = BCME_ERROR;
3760 goto toss;
3761 }
3762
3763 #ifdef IDLE_TX_FLOW_MGMT
3764 node_status = flow_ring_node->status;
3765
3766 /* handle diffrent status states here!! */
3767 switch (node_status)
3768 {
3769 case FLOW_RING_STATUS_OPEN:
3770
3771 if (bus->enable_idle_flowring_mgmt) {
3772 /* Move the node to the head of active list */
3773 dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
3774 }
3775 break;
3776
3777 case FLOW_RING_STATUS_SUSPENDED:
3778 DHD_INFO(("Need to Initiate TX Flow resume\n"));
3779 /* Issue resume_ring request */
3780 dhd_bus_flow_ring_resume_request(bus,
3781 flow_ring_node);
3782 break;
3783
3784 case FLOW_RING_STATUS_CREATE_PENDING:
3785 case FLOW_RING_STATUS_RESUME_PENDING:
3786 /* Dont do anything here!! */
3787 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
3788 node_status));
3789 break;
3790
3791 case FLOW_RING_STATUS_DELETE_PENDING:
3792 default:
3793 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
3794 flowid, node_status));
3795 /* error here!! */
3796 ret = BCME_ERROR;
3797 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3798 goto toss;
3799 }
3800 /* Now queue the packet */
3801 #endif /* IDLE_TX_FLOW_MGMT */
3802
3803 queue = &flow_ring_node->queue; /* queue associated with flow ring */
3804
3805 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
3806 txp_pend = txp;
3807
3808 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3809
3810 if (flow_ring_node->status) {
3811 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
3812 __FUNCTION__, flowid, flow_ring_node->status,
3813 flow_ring_node->active));
3814 if (txp_pend) {
3815 txp = txp_pend;
3816 goto toss;
3817 }
3818 return BCME_OK;
3819 }
3820 ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
3821
3822 /* If we have anything pending, try to push into q */
3823 if (txp_pend) {
3824 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3825
3826 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
3827 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3828 txp = txp_pend;
3829 goto toss;
3830 }
3831
3832 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3833 }
3834
3835 return ret;
3836
3837 toss:
3838 DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
3839 PKTCFREE(bus->dhd->osh, txp, TRUE);
3840 return ret;
3841 } /* dhd_bus_txdata */
3842
3843 void
3844 dhd_bus_stop_queue(struct dhd_bus *bus)
3845 {
3846 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
3847 }
3848
3849 void
3850 dhd_bus_start_queue(struct dhd_bus *bus)
3851 {
3852 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
3853 }
3854
3855 /* Device console input function */
3856 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
3857 {
3858 dhd_bus_t *bus = dhd->bus;
3859 uint32 addr, val;
3860 int rv;
3861 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
3862 if (bus->console_addr == 0)
3863 return BCME_UNSUPPORTED;
3864
3865 /* Don't allow input if dongle is in reset */
3866 if (bus->dhd->dongle_reset) {
3867 return BCME_NOTREADY;
3868 }
3869
3870 /* Zero cbuf_index */
3871 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
3872 val = htol32(0);
3873 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3874 goto done;
3875
3876 /* Write message into cbuf */
3877 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
3878 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
3879 goto done;
3880
3881 /* Write length into vcons_in */
3882 addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
3883 val = htol32(msglen);
3884 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3885 goto done;
3886
3887 /* generate an interrupt to dongle to indicate that it needs to process cons command */
3888 dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
3889 done:
3890 return rv;
3891 } /* dhd_bus_console_in */
3892
3893 /**
3894 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
3895 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
3896 */
3897 void BCMFASTPATH
3898 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
3899 {
3900 dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
3901 }
3902
3903 /** 'offset' is a backplane address */
3904 void
3905 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
3906 {
3907 if (bus->is_linkdown) {
3908 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3909 return;
3910 } else {
3911 W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
3912 }
3913 }
3914
3915 uint8
3916 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
3917 {
3918 volatile uint8 data;
3919 if (bus->is_linkdown) {
3920 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3921 data = (uint8)-1;
3922 } else {
3923 data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
3924 }
3925 return data;
3926 }
3927
3928 void
3929 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
3930 {
3931 if (bus->is_linkdown) {
3932 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3933 return;
3934 } else {
3935 W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
3936 }
3937 }
3938 void
3939 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
3940 {
3941 if (bus->is_linkdown) {
3942 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3943 return;
3944 } else {
3945 W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
3946 }
3947 }
3948 #ifdef DHD_SUPPORT_64BIT
3949 void
3950 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
3951 {
3952 if (bus->is_linkdown) {
3953 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3954 return;
3955 } else {
3956 W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
3957 }
3958 }
3959 #endif /* DHD_SUPPORT_64BIT */
3960
3961 uint16
3962 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
3963 {
3964 volatile uint16 data;
3965 if (bus->is_linkdown) {
3966 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3967 data = (uint16)-1;
3968 } else {
3969 data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
3970 }
3971 return data;
3972 }
3973
3974 uint32
3975 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
3976 {
3977 volatile uint32 data;
3978 if (bus->is_linkdown) {
3979 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3980 data = (uint32)-1;
3981 } else {
3982 data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
3983 }
3984 return data;
3985 }
3986
3987 #ifdef DHD_SUPPORT_64BIT
3988 uint64
3989 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
3990 {
3991 volatile uint64 data;
3992 if (bus->is_linkdown) {
3993 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
3994 data = (uint64)-1;
3995 } else {
3996 data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
3997 }
3998 return data;
3999 }
4000 #endif /* DHD_SUPPORT_64BIT */
4001
4002 /** A snippet of dongle memory is shared between host and dongle */
4003 void
4004 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
4005 {
4006 uint64 long_data;
4007 ulong addr; /* dongle address */
4008
4009 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
4010
4011 if (bus->is_linkdown) {
4012 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4013 return;
4014 }
4015
4016 if (MULTIBP_ENAB(bus->sih)) {
4017 dhd_bus_pcie_pwr_req(bus);
4018 }
4019 switch (type) {
4020 case D2H_DMA_SCRATCH_BUF:
4021 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
4022 long_data = HTOL64(*(uint64 *)data);
4023 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4024 if (dhd_msg_level & DHD_INFO_VAL) {
4025 prhex(__FUNCTION__, data, len);
4026 }
4027 break;
4028
4029 case D2H_DMA_SCRATCH_BUF_LEN :
4030 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4031 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4032 if (dhd_msg_level & DHD_INFO_VAL) {
4033 prhex(__FUNCTION__, data, len);
4034 }
4035 break;
4036
4037 case H2D_DMA_INDX_WR_BUF:
4038 long_data = HTOL64(*(uint64 *)data);
4039 addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4040 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4041 if (dhd_msg_level & DHD_INFO_VAL) {
4042 prhex(__FUNCTION__, data, len);
4043 }
4044 break;
4045
4046 case H2D_DMA_INDX_RD_BUF:
4047 long_data = HTOL64(*(uint64 *)data);
4048 addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4049 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4050 if (dhd_msg_level & DHD_INFO_VAL) {
4051 prhex(__FUNCTION__, data, len);
4052 }
4053 break;
4054
4055 case D2H_DMA_INDX_WR_BUF:
4056 long_data = HTOL64(*(uint64 *)data);
4057 addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4058 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4059 if (dhd_msg_level & DHD_INFO_VAL) {
4060 prhex(__FUNCTION__, data, len);
4061 }
4062 break;
4063
4064 case D2H_DMA_INDX_RD_BUF:
4065 long_data = HTOL64(*(uint64 *)data);
4066 addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4067 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4068 if (dhd_msg_level & DHD_INFO_VAL) {
4069 prhex(__FUNCTION__, data, len);
4070 }
4071 break;
4072
4073 case H2D_IFRM_INDX_WR_BUF:
4074 long_data = HTOL64(*(uint64 *)data);
4075 addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4076 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4077 if (dhd_msg_level & DHD_INFO_VAL) {
4078 prhex(__FUNCTION__, data, len);
4079 }
4080 break;
4081
4082 case RING_ITEM_LEN :
4083 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4084 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4085 break;
4086
4087 case RING_MAX_ITEMS :
4088 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4089 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4090 break;
4091
4092 case RING_BUF_ADDR :
4093 long_data = HTOL64(*(uint64 *)data);
4094 addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4095 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4096 if (dhd_msg_level & DHD_INFO_VAL) {
4097 prhex(__FUNCTION__, data, len);
4098 }
4099 break;
4100
4101 case RING_WR_UPD :
4102 addr = bus->ring_sh[ringid].ring_state_w;
4103 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4104 break;
4105
4106 case RING_RD_UPD :
4107 addr = bus->ring_sh[ringid].ring_state_r;
4108 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4109 break;
4110
4111 case D2H_MB_DATA:
4112 addr = bus->d2h_mb_data_ptr_addr;
4113 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4114 break;
4115
4116 case H2D_MB_DATA:
4117 addr = bus->h2d_mb_data_ptr_addr;
4118 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4119 break;
4120
4121 case HOST_API_VERSION:
4122 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4123 dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4124 break;
4125
4126 case DNGL_TO_HOST_TRAP_ADDR:
4127 long_data = HTOL64(*(uint64 *)data);
4128 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4129 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4130 DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4131 break;
4132
4133 #ifdef D2H_MINIDUMP
4134 case DNGL_TO_HOST_TRAP_ADDR_LEN:
4135 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, device_trap_debug_buffer_len);
4136 dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4137 break;
4138 #endif /* D2H_MINIDUMP */
4139
4140 default:
4141 break;
4142 }
4143 if (MULTIBP_ENAB(bus->sih)) {
4144 dhd_bus_pcie_pwr_req_clear(bus);
4145 }
4146 } /* dhd_bus_cmn_writeshared */
4147
4148 /** A snippet of dongle memory is shared between host and dongle */
4149 void
4150 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
4151 {
4152 ulong addr; /* dongle address */
4153
4154 if (MULTIBP_ENAB(bus->sih)) {
4155 dhd_bus_pcie_pwr_req(bus);
4156 }
4157 switch (type) {
4158 case RING_WR_UPD :
4159 addr = bus->ring_sh[ringid].ring_state_w;
4160 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4161 break;
4162
4163 case RING_RD_UPD :
4164 addr = bus->ring_sh[ringid].ring_state_r;
4165 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4166 break;
4167
4168 case TOTAL_LFRAG_PACKET_CNT :
4169 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4170 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4171 break;
4172
4173 case H2D_MB_DATA:
4174 addr = bus->h2d_mb_data_ptr_addr;
4175 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4176 break;
4177
4178 case D2H_MB_DATA:
4179 addr = bus->d2h_mb_data_ptr_addr;
4180 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4181 break;
4182
4183 case MAX_HOST_RXBUFS :
4184 addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4185 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4186 break;
4187
4188 default :
4189 break;
4190 }
4191 if (MULTIBP_ENAB(bus->sih)) {
4192 dhd_bus_pcie_pwr_req_clear(bus);
4193 }
4194 }
4195
4196 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
4197 {
4198 return ((pciedev_shared_t*)bus->pcie_sh)->flags;
4199 }
4200
4201 void
4202 dhd_bus_clearcounts(dhd_pub_t *dhdp)
4203 {
4204 }
4205
4206 /**
4207 * @param params input buffer, NULL for 'set' operation.
4208 * @param plen length of 'params' buffer, 0 for 'set' operation.
4209 * @param arg output buffer
4210 */
4211 int
4212 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
4213 void *params, int plen, void *arg, int len, bool set)
4214 {
4215 dhd_bus_t *bus = dhdp->bus;
4216 const bcm_iovar_t *vi = NULL;
4217 int bcmerror = BCME_UNSUPPORTED;
4218 int val_size;
4219 uint32 actionid;
4220
4221 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4222
4223 ASSERT(name);
4224 ASSERT(len >= 0);
4225 if (!name || len < 0)
4226 return BCME_BADARG;
4227
4228 /* Get MUST have return space */
4229 ASSERT(set || (arg && len));
4230 if (!(set || (arg && len)))
4231 return BCME_BADARG;
4232
4233 /* Set does NOT take qualifiers */
4234 ASSERT(!set || (!params && !plen));
4235 if (!(!set || (!params && !plen)))
4236 return BCME_BADARG;
4237
4238 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4239 name, (set ? "set" : "get"), len, plen));
4240
4241 if (MULTIBP_ENAB(bus->sih)) {
4242 dhd_bus_pcie_pwr_req(bus);
4243 }
4244
4245 /* Look up var locally; if not found pass to host driver */
4246 if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
4247 goto exit;
4248 }
4249
4250 /* set up 'params' pointer in case this is a set command so that
4251 * the convenience int and bool code can be common to set and get
4252 */
4253 if (params == NULL) {
4254 params = arg;
4255 plen = len;
4256 }
4257
4258 if (vi->type == IOVT_VOID)
4259 val_size = 0;
4260 else if (vi->type == IOVT_BUFFER)
4261 val_size = len;
4262 else
4263 /* all other types are integer sized */
4264 val_size = sizeof(int);
4265
4266 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4267 bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
4268
4269 exit:
4270 /* In DEVRESET_QUIESCE/DEVRESET_ON,
4271 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4272 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4273 * In this case, bypass pwr req clear.
4274 */
4275 if (bcmerror == BCME_DNGL_DEVRESET) {
4276 bcmerror = BCME_OK;
4277 } else {
4278 if (MULTIBP_ENAB(bus->sih)) {
4279 dhd_bus_pcie_pwr_req_clear(bus);
4280 }
4281 }
4282 return bcmerror;
4283 } /* dhd_bus_iovar_op */
4284
4285 #ifdef BCM_BUZZZ
4286 #include <bcm_buzzz.h>
4287
4288 int
4289 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
4290 const int num_counters)
4291 {
4292 int bytes = 0;
4293 uint32 ctr;
4294 uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
4295 uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
4296
4297 /* Compute elapsed counter values per counter event type */
4298 for (ctr = 0U; ctr < num_counters; ctr++) {
4299 prev[ctr] = core[ctr];
4300 curr[ctr] = *log++;
4301 core[ctr] = curr[ctr]; /* saved for next log */
4302
4303 if (curr[ctr] < prev[ctr])
4304 delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
4305 else
4306 delta[ctr] = (curr[ctr] - prev[ctr]);
4307
4308 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
4309 }
4310
4311 return bytes;
4312 }
4313
4314 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
4315 uint32 u32;
4316 uint8 u8[4];
4317 struct {
4318 uint8 cpicnt;
4319 uint8 exccnt;
4320 uint8 sleepcnt;
4321 uint8 lsucnt;
4322 };
4323 } cm3_cnts_t;
4324
4325 int
4326 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
4327 {
4328 int bytes = 0;
4329
4330 uint32 cyccnt, instrcnt;
4331 cm3_cnts_t cm3_cnts;
4332 uint8 foldcnt;
4333
4334 { /* 32bit cyccnt */
4335 uint32 curr, prev, delta;
4336 prev = core[0]; curr = *log++; core[0] = curr;
4337 if (curr < prev)
4338 delta = curr + (~0U - prev);
4339 else
4340 delta = (curr - prev);
4341
4342 bytes += sprintf(p + bytes, "%12u ", delta);
4343 cyccnt = delta;
4344 }
4345
4346 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
4347 int i;
4348 uint8 max8 = ~0;
4349 cm3_cnts_t curr, prev, delta;
4350 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
4351 for (i = 0; i < 4; i++) {
4352 if (curr.u8[i] < prev.u8[i])
4353 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
4354 else
4355 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
4356 bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
4357 }
4358 cm3_cnts.u32 = delta.u32;
4359 }
4360
4361 { /* Extract the foldcnt from arg0 */
4362 uint8 curr, prev, delta, max8 = ~0;
4363 bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
4364 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
4365 if (curr < prev)
4366 delta = curr + (max8 - prev);
4367 else
4368 delta = (curr - prev);
4369 bytes += sprintf(p + bytes, "%4u ", delta);
4370 foldcnt = delta;
4371 }
4372
4373 instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
4374 + cm3_cnts.u8[3]) + foldcnt;
4375 if (instrcnt > 0xFFFFFF00)
4376 bytes += sprintf(p + bytes, "[%10s] ", "~");
4377 else
4378 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
4379 return bytes;
4380 }
4381
4382 int
4383 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
4384 {
4385 int bytes = 0;
4386 bcm_buzzz_arg0_t arg0;
4387 static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
4388
4389 if (buzzz->counters == 6) {
4390 bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
4391 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4392 } else {
4393 bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
4394 log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
4395 }
4396
4397 /* Dump the logged arguments using the registered formats */
4398 arg0.u32 = *log++;
4399
4400 switch (arg0.klog.args) {
4401 case 0:
4402 bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
4403 break;
4404 case 1:
4405 {
4406 uint32 arg1 = *log++;
4407 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
4408 break;
4409 }
4410 case 2:
4411 {
4412 uint32 arg1, arg2;
4413 arg1 = *log++; arg2 = *log++;
4414 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
4415 break;
4416 }
4417 case 3:
4418 {
4419 uint32 arg1, arg2, arg3;
4420 arg1 = *log++; arg2 = *log++; arg3 = *log++;
4421 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
4422 break;
4423 }
4424 case 4:
4425 {
4426 uint32 arg1, arg2, arg3, arg4;
4427 arg1 = *log++; arg2 = *log++;
4428 arg3 = *log++; arg4 = *log++;
4429 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
4430 break;
4431 }
4432 default:
4433 printf("Maximum one argument supported\n");
4434 break;
4435 }
4436
4437 bytes += sprintf(p + bytes, "\n");
4438
4439 return bytes;
4440 }
4441
4442 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
4443 {
4444 int i;
4445 uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
4446 void * log;
4447
4448 for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
4449 core[i] = 0;
4450 }
4451
4452 log_sz = buzzz_p->log_sz;
4453
4454 part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
4455
4456 if (buzzz_p->wrap == TRUE) {
4457 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
4458 total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
4459 } else {
4460 part2 = 0U;
4461 total = buzzz_p->count;
4462 }
4463
4464 if (total == 0U) {
4465 printf("bcm_buzzz_dump total<%u> done\n", total);
4466 return;
4467 } else {
4468 printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
4469 total, part2, part1);
4470 }
4471
4472 if (part2) { /* with wrap */
4473 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
4474 while (part2--) { /* from cur to end : part2 */
4475 p[0] = '\0';
4476 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4477 printf("%s", p);
4478 log = (void*)((size_t)log + buzzz_p->log_sz);
4479 }
4480 }
4481
4482 log = (void*)buffer_p;
4483 while (part1--) {
4484 p[0] = '\0';
4485 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4486 printf("%s", p);
4487 log = (void*)((size_t)log + buzzz_p->log_sz);
4488 }
4489
4490 printf("bcm_buzzz_dump done.\n");
4491 }
4492
4493 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
4494 {
4495 bcm_buzzz_t * buzzz_p = NULL;
4496 void * buffer_p = NULL;
4497 char * page_p = NULL;
4498 pciedev_shared_t *sh;
4499 int ret = 0;
4500
4501 if (bus->dhd->busstate != DHD_BUS_DATA) {
4502 return BCME_UNSUPPORTED;
4503 }
4504 if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
4505 printf("Page memory allocation failure\n");
4506 goto done;
4507 }
4508 if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
4509 printf("BCM BUZZZ memory allocation failure\n");
4510 goto done;
4511 }
4512
4513 ret = dhdpcie_readshared(bus);
4514 if (ret < 0) {
4515 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
4516 goto done;
4517 }
4518
4519 sh = bus->pcie_sh;
4520
4521 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
4522
4523 if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
4524
4525 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
4526 (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
4527
4528 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
4529 "count<%u> status<%u> wrap<%u>\n"
4530 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
4531 (int)sh->buzz_dbg_ptr,
4532 (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
4533 buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
4534 buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
4535 buzzz_p->buffer_sz, buzzz_p->log_sz);
4536
4537 if (buzzz_p->count == 0) {
4538 printf("Empty dongle BUZZZ trace\n\n");
4539 goto done;
4540 }
4541
4542 /* Allocate memory for trace buffer and format strings */
4543 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
4544 if (buffer_p == NULL) {
4545 printf("Buffer memory allocation failure\n");
4546 goto done;
4547 }
4548
4549 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
4550 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
4551 (uint8 *)buffer_p, buzzz_p->buffer_sz);
4552
4553 /* Process and display the trace using formatted output */
4554
4555 {
4556 int ctr;
4557 for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
4558 printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
4559 }
4560 printf("<code execution point>\n");
4561 }
4562
4563 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
4564
4565 printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
4566
4567 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
4568 }
4569
4570 done:
4571
4572 if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
4573 if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
4574 if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
4575
4576 return BCME_OK;
4577 }
4578 #endif /* BCM_BUZZZ */
4579
4580 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
4581 ((sih)->buscoretype == PCIE2_CORE_ID))
4582
4583 #define PCIE_FLR_CAPAB_BIT 28
4584 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
4585
4586 /* Change delays for only QT HW, FPGA and silicon uses same delay */
4587 #ifdef BCMQT_HW
4588 #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
4589 #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
4590 #else
4591 #define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */
4592 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
4593 #endif // endif
4594 #define DHD_SSRESET_STATUS_RETRIES 50u
4595
4596 int
4597 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
4598 {
4599 bool flr_capab;
4600 uint val;
4601 int retry = 0;
4602
4603 DHD_ERROR(("******** Perform FLR ********\n"));
4604
4605 /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
4606 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
4607 flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
4608 DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
4609 PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
4610 if (!flr_capab) {
4611 DHD_ERROR(("Chip does not support FLR\n"));
4612 return BCME_UNSUPPORTED;
4613 }
4614
4615 /* Save pcie config space */
4616 DHD_ERROR(("Save Pcie Config Space\n"));
4617 DHD_PCIE_CONFIG_SAVE(bus);
4618
4619 /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
4620 DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4621 PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
4622 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
4623 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4624 val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
4625 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4626 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
4627
4628 /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
4629 DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
4630 OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
4631
4632 if (force_fail) {
4633 DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
4634 PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
4635 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
4636 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
4637 val));
4638 val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
4639 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
4640 val));
4641 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
4642
4643 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
4644 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
4645 val));
4646 }
4647
4648 /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
4649 DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4650 PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
4651 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
4652 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4653 val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
4654 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4655 OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
4656
4657 /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
4658 DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
4659 "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
4660 do {
4661 val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
4662 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
4663 PCIE_CFG_SUBSYSTEM_CONTROL, val));
4664 val = val & (1 << PCIE_SSRESET_STATUS_BIT);
4665 OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
4666 } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
4667
4668 if (val) {
4669 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4670 PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
4671 /* User has to fire the IOVAR again, if force_fail is needed */
4672 if (force_fail) {
4673 bus->flr_force_fail = FALSE;
4674 DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
4675 }
4676 return BCME_ERROR;
4677 }
4678
4679 /* Restore pcie config space */
4680 DHD_ERROR(("Restore Pcie Config Space\n"));
4681 DHD_PCIE_CONFIG_RESTORE(bus);
4682
4683 DHD_ERROR(("******** FLR Succedeed ********\n"));
4684
4685 return BCME_OK;
4686 }
4687
4688 #ifdef DHD_USE_BP_RESET
4689 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
4690
4691 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
4692 #define DHD_BP_RESET_STATUS_RETRIES 50u
4693
4694 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
4695 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
4696 int
4697 dhd_bus_perform_bp_reset(struct dhd_bus *bus)
4698 {
4699 uint val;
4700 int retry = 0;
4701 uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
4702 int ret = BCME_OK;
4703 bool cond;
4704
4705 DHD_ERROR(("******** Perform BP reset ********\n"));
4706
4707 /* Disable ASPM */
4708 DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4709 PCIECFGREG_LINK_STATUS_CTRL));
4710 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
4711 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4712 val = val & (~PCIE_ASPM_ENAB);
4713 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4714 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
4715
4716 /* wait for delay usec */
4717 DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
4718 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
4719
4720 /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
4721 DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
4722 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
4723 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
4724 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
4725 val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
4726 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
4727 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
4728
4729 /* Wait till bit backplane reset is ASSERTED i,e
4730 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
4731 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
4732 * else DAR register will read previous old value
4733 */
4734 DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
4735 "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
4736 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
4737 do {
4738 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
4739 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
4740 cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
4741 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
4742 } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
4743
4744 if (cond) {
4745 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4746 PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
4747 ret = BCME_ERROR;
4748 goto aspm_enab;
4749 }
4750
4751 /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
4752 DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
4753 "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
4754 PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
4755 do {
4756 val = si_corereg(bus->sih, bus->sih->buscoreidx,
4757 dar_clk_ctrl_status_reg, 0, 0);
4758 DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
4759 dar_clk_ctrl_status_reg, val));
4760 cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
4761 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
4762 } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
4763
4764 if (cond) {
4765 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4766 dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
4767 ret = BCME_ERROR;
4768 }
4769
4770 aspm_enab:
4771 /* Enable ASPM */
4772 DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4773 PCIECFGREG_LINK_STATUS_CTRL));
4774 val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
4775 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4776 val = val | (PCIE_ASPM_L1_ENAB);
4777 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
4778 OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
4779
4780 DHD_ERROR(("******** BP reset Succedeed ********\n"));
4781
4782 return ret;
4783 }
4784 #endif /* DHD_USE_BP_RESET */
4785
4786 int
4787 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
4788 {
4789 dhd_bus_t *bus = dhdp->bus;
4790 int bcmerror = 0;
4791 unsigned long flags;
4792 unsigned long flags_bus;
4793 #ifdef CONFIG_ARCH_MSM
4794 int retry = POWERUP_MAX_RETRY;
4795 #endif /* CONFIG_ARCH_MSM */
4796
4797 if (flag == TRUE) { /* Turn off WLAN */
4798 /* Removing Power */
4799 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
4800
4801 bus->dhd->up = FALSE;
4802
4803 /* wait for other contexts to finish -- if required a call
4804 * to OSL_DELAY for 1s can be added to give other contexts
4805 * a chance to finish
4806 */
4807 dhdpcie_advertise_bus_cleanup(bus->dhd);
4808
4809 if (bus->dhd->busstate != DHD_BUS_DOWN) {
4810 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4811 atomic_set(&bus->dhd->block_bus, TRUE);
4812 dhd_flush_rx_tx_wq(bus->dhd);
4813 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4814
4815 #ifdef BCMPCIE_OOB_HOST_WAKE
4816 /* Clean up any pending host wake IRQ */
4817 dhd_bus_oob_intr_set(bus->dhd, FALSE);
4818 dhd_bus_oob_intr_unregister(bus->dhd);
4819 #endif /* BCMPCIE_OOB_HOST_WAKE */
4820 dhd_os_wd_timer(dhdp, 0);
4821 dhd_bus_stop(bus, TRUE);
4822 if (bus->intr) {
4823 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
4824 dhdpcie_bus_intr_disable(bus);
4825 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
4826 dhdpcie_free_irq(bus);
4827 }
4828 dhd_deinit_bus_lock(bus);
4829 dhd_bus_release_dongle(bus);
4830 dhdpcie_bus_free_resource(bus);
4831 bcmerror = dhdpcie_bus_disable_device(bus);
4832 if (bcmerror) {
4833 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4834 __FUNCTION__, bcmerror));
4835 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4836 atomic_set(&bus->dhd->block_bus, FALSE);
4837 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4838 }
4839 /* Clean up protocol data after Bus Master Enable bit clear
4840 * so that host can safely unmap DMA and remove the allocated buffers
4841 * from the PKTID MAP. Some Applicantion Processors supported
4842 * System MMU triggers Kernel panic when they detect to attempt to
4843 * DMA-unmapped memory access from the devices which use the
4844 * System MMU. Therefore, Kernel panic can be happened since it is
4845 * possible that dongle can access to DMA-unmapped memory after
4846 * calling the dhd_prot_reset().
4847 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4848 * should be located after the dhdpcie_bus_disable_device().
4849 */
4850 dhd_prot_reset(dhdp);
4851 dhd_clear(dhdp);
4852 #ifdef CONFIG_ARCH_MSM
4853 bcmerror = dhdpcie_bus_clock_stop(bus);
4854 if (bcmerror) {
4855 DHD_ERROR(("%s: host clock stop failed: %d\n",
4856 __FUNCTION__, bcmerror));
4857 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4858 atomic_set(&bus->dhd->block_bus, FALSE);
4859 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4860 goto done;
4861 }
4862 #endif /* CONFIG_ARCH_MSM */
4863 DHD_GENERAL_LOCK(bus->dhd, flags);
4864 bus->dhd->busstate = DHD_BUS_DOWN;
4865 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4866 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4867 atomic_set(&bus->dhd->block_bus, FALSE);
4868 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4869 } else {
4870 if (bus->intr) {
4871 dhdpcie_free_irq(bus);
4872 }
4873 #ifdef BCMPCIE_OOB_HOST_WAKE
4874 /* Clean up any pending host wake IRQ */
4875 dhd_bus_oob_intr_set(bus->dhd, FALSE);
4876 dhd_bus_oob_intr_unregister(bus->dhd);
4877 #endif /* BCMPCIE_OOB_HOST_WAKE */
4878 dhd_dpc_kill(bus->dhd);
4879 if (!bus->no_bus_init) {
4880 dhd_bus_release_dongle(bus);
4881 dhdpcie_bus_free_resource(bus);
4882 bcmerror = dhdpcie_bus_disable_device(bus);
4883 if (bcmerror) {
4884 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4885 __FUNCTION__, bcmerror));
4886 }
4887
4888 /* Clean up protocol data after Bus Master Enable bit clear
4889 * so that host can safely unmap DMA and remove the allocated
4890 * buffers from the PKTID MAP. Some Applicantion Processors
4891 * supported System MMU triggers Kernel panic when they detect
4892 * to attempt to DMA-unmapped memory access from the devices
4893 * which use the System MMU.
4894 * Therefore, Kernel panic can be happened since it is possible
4895 * that dongle can access to DMA-unmapped memory after calling
4896 * the dhd_prot_reset().
4897 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4898 * should be located after the dhdpcie_bus_disable_device().
4899 */
4900 dhd_prot_reset(dhdp);
4901 dhd_clear(dhdp);
4902 } else {
4903 bus->no_bus_init = FALSE;
4904 }
4905 #ifdef CONFIG_ARCH_MSM
4906 bcmerror = dhdpcie_bus_clock_stop(bus);
4907 if (bcmerror) {
4908 DHD_ERROR(("%s: host clock stop failed: %d\n",
4909 __FUNCTION__, bcmerror));
4910 goto done;
4911 }
4912 #endif /* CONFIG_ARCH_MSM */
4913 }
4914
4915 bus->dhd->dongle_reset = TRUE;
4916 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
4917
4918 } else { /* Turn on WLAN */
4919 if (bus->dhd->busstate == DHD_BUS_DOWN) {
4920 /* Powering On */
4921 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
4922 #ifdef CONFIG_ARCH_MSM
4923 while (--retry) {
4924 bcmerror = dhdpcie_bus_clock_start(bus);
4925 if (!bcmerror) {
4926 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
4927 __FUNCTION__));
4928 break;
4929 } else {
4930 OSL_SLEEP(10);
4931 }
4932 }
4933
4934 if (bcmerror && !retry) {
4935 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
4936 __FUNCTION__, bcmerror));
4937 goto done;
4938 }
4939 #endif /* CONFIG_ARCH_MSM */
4940 bus->is_linkdown = 0;
4941 #ifdef SUPPORT_LINKDOWN_RECOVERY
4942 bus->read_shm_fail = FALSE;
4943 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4944 bcmerror = dhdpcie_bus_enable_device(bus);
4945 if (bcmerror) {
4946 DHD_ERROR(("%s: host configuration restore failed: %d\n",
4947 __FUNCTION__, bcmerror));
4948 goto done;
4949 }
4950
4951 bcmerror = dhdpcie_bus_alloc_resource(bus);
4952 if (bcmerror) {
4953 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
4954 __FUNCTION__, bcmerror));
4955 goto done;
4956 }
4957
4958 bcmerror = dhdpcie_bus_dongle_attach(bus);
4959 if (bcmerror) {
4960 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
4961 __FUNCTION__, bcmerror));
4962 goto done;
4963 }
4964
4965 bcmerror = dhd_bus_request_irq(bus);
4966 if (bcmerror) {
4967 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
4968 __FUNCTION__, bcmerror));
4969 goto done;
4970 }
4971
4972 bus->dhd->dongle_reset = FALSE;
4973
4974 bcmerror = dhd_bus_start(dhdp);
4975 if (bcmerror) {
4976 DHD_ERROR(("%s: dhd_bus_start: %d\n",
4977 __FUNCTION__, bcmerror));
4978 goto done;
4979 }
4980
4981 bus->dhd->up = TRUE;
4982 /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
4983 if (bus->dhd->dhd_watchdog_ms_backup) {
4984 DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
4985 __FUNCTION__));
4986 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
4987 }
4988 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
4989 } else {
4990 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
4991 goto done;
4992 }
4993 }
4994
4995 done:
4996 if (bcmerror) {
4997 DHD_GENERAL_LOCK(bus->dhd, flags);
4998 bus->dhd->busstate = DHD_BUS_DOWN;
4999 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5000 }
5001 return bcmerror;
5002 }
5003
5004 static int
5005 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
5006 {
5007 int h2d_support, d2h_support;
5008
5009 d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
5010 h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
5011 return (d2h_support | (h2d_support << 1));
5012
5013 }
5014 int
5015 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
5016 {
5017 int bcmerror = 0;
5018 /* Can change it only during initialization/FW download */
5019 if (dhd->busstate == DHD_BUS_DOWN) {
5020 if ((int_val > 3) || (int_val < 0)) {
5021 DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5022 bcmerror = BCME_BADARG;
5023 } else {
5024 dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
5025 dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
5026 dhd->dma_ring_upd_overwrite = TRUE;
5027 }
5028 } else {
5029 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5030 __FUNCTION__));
5031 bcmerror = BCME_NOTDOWN;
5032 }
5033
5034 return bcmerror;
5035
5036 }
5037 /**
5038 * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5039 *
5040 * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
5041 * @param params input buffer
5042 * @param plen length in [bytes] of input buffer 'params'
5043 * @param arg output buffer
5044 * @param len length in [bytes] of output buffer 'arg'
5045 */
5046 static int
5047 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
5048 void *params, int plen, void *arg, int len, int val_size)
5049 {
5050 int bcmerror = 0;
5051 int32 int_val = 0;
5052 int32 int_val2 = 0;
5053 int32 int_val3 = 0;
5054 bool bool_val = 0;
5055
5056 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5057 __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
5058
5059 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
5060 goto exit;
5061
5062 if (plen >= (int)sizeof(int_val))
5063 bcopy(params, &int_val, sizeof(int_val));
5064
5065 if (plen >= (int)sizeof(int_val) * 2)
5066 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
5067
5068 if (plen >= (int)sizeof(int_val) * 3)
5069 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
5070
5071 bool_val = (int_val != 0) ? TRUE : FALSE;
5072
5073 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5074 if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
5075 actionid == IOV_GVAL(IOV_DEVRESET))) {
5076 bcmerror = BCME_NOTREADY;
5077 goto exit;
5078 }
5079
5080 switch (actionid) {
5081
5082 case IOV_SVAL(IOV_VARS):
5083 bcmerror = dhdpcie_downloadvars(bus, arg, len);
5084 break;
5085 case IOV_SVAL(IOV_PCIE_LPBK):
5086 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
5087 break;
5088
5089 case IOV_SVAL(IOV_PCIE_DMAXFER): {
5090 int int_val4 = 0;
5091 int wait = 0;
5092 int core_num = 0;
5093 if (plen >= (int)sizeof(int_val) * 4) {
5094 bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
5095 &int_val4, sizeof(int_val4));
5096 }
5097 if (plen >= (int)sizeof(int_val) * 5) {
5098 bcopy((void*)((uintptr)params + 4 * sizeof(int_val)),
5099 &wait, sizeof(wait));
5100 }
5101 if (plen >= (int)sizeof(core_num) * 6) {
5102 bcopy((void*)((uintptr)params + 5 * sizeof(core_num)),
5103 &core_num, sizeof(core_num));
5104 }
5105 bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3,
5106 int_val4, core_num, wait);
5107 if (wait && bcmerror >= 0) {
5108 /* get the status of the dma transfer */
5109 int_val4 = dhdmsgbuf_dmaxfer_status(bus->dhd);
5110 bcopy(&int_val4, params, sizeof(int_val));
5111 }
5112 break;
5113 }
5114
5115 case IOV_GVAL(IOV_PCIE_DMAXFER): {
5116 int dma_status = 0;
5117 dma_status = dhdmsgbuf_dmaxfer_status(bus->dhd);
5118 bcopy(&dma_status, arg, val_size);
5119 bcmerror = BCME_OK;
5120 break;
5121 }
5122
5123 case IOV_GVAL(IOV_PCIE_SUSPEND):
5124 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
5125 bcopy(&int_val, arg, val_size);
5126 break;
5127
5128 case IOV_SVAL(IOV_PCIE_SUSPEND):
5129 if (bool_val) { /* Suspend */
5130 int ret;
5131 unsigned long flags;
5132
5133 /*
5134 * If some other context is busy, wait until they are done,
5135 * before starting suspend
5136 */
5137 ret = dhd_os_busbusy_wait_condition(bus->dhd,
5138 &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
5139 if (ret == 0) {
5140 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5141 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
5142 return BCME_BUSY;
5143 }
5144
5145 DHD_GENERAL_LOCK(bus->dhd, flags);
5146 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
5147 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5148 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5149 dhdpcie_bus_suspend(bus, TRUE, TRUE);
5150 #else
5151 dhdpcie_bus_suspend(bus, TRUE);
5152 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5153
5154 DHD_GENERAL_LOCK(bus->dhd, flags);
5155 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
5156 dhd_os_busbusy_wake(bus->dhd);
5157 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5158 } else { /* Resume */
5159 unsigned long flags;
5160 DHD_GENERAL_LOCK(bus->dhd, flags);
5161 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
5162 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5163
5164 dhdpcie_bus_suspend(bus, FALSE);
5165
5166 DHD_GENERAL_LOCK(bus->dhd, flags);
5167 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
5168 dhd_os_busbusy_wake(bus->dhd);
5169 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5170 }
5171 break;
5172
5173 case IOV_GVAL(IOV_MEMSIZE):
5174 int_val = (int32)bus->ramsize;
5175 bcopy(&int_val, arg, val_size);
5176 break;
5177
5178 /* Debug related. Dumps core registers or one of the dongle memory */
5179 case IOV_GVAL(IOV_DUMP_DONGLE):
5180 {
5181 dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
5182 dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
5183 uint32 *p = ddo->val;
5184 const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
5185
5186 if (plen < sizeof(ddi) || len < sizeof(ddo)) {
5187 bcmerror = BCME_BADARG;
5188 break;
5189 }
5190
5191 switch (ddi.type) {
5192 case DUMP_DONGLE_COREREG:
5193 ddo->n_bytes = 0;
5194
5195 if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
5196 break; // beyond last core: core enumeration ended
5197 }
5198
5199 ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
5200 ddo->address += ddi.offset; // BP address at which this dump starts
5201
5202 ddo->id = si_coreid(bus->sih);
5203 ddo->rev = si_corerev(bus->sih);
5204
5205 while (ddi.offset < max_offset &&
5206 sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
5207 *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
5208 ddi.offset += sizeof(uint32);
5209 ddo->n_bytes += sizeof(uint32);
5210 }
5211 break;
5212 default:
5213 // TODO: implement d11 SHM/TPL dumping
5214 bcmerror = BCME_BADARG;
5215 break;
5216 }
5217 break;
5218 }
5219
5220 /* Debug related. Returns a string with dongle capabilities */
5221 case IOV_GVAL(IOV_DNGL_CAPS):
5222 {
5223 strncpy(arg, bus->dhd->fw_capabilities,
5224 MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
5225 ((char*)arg)[len - 1] = '\0';
5226 break;
5227 }
5228
5229 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
5230 case IOV_SVAL(IOV_GDB_SERVER):
5231 /* debugger_*() functions may sleep, so cannot hold spinlock */
5232 DHD_PERIM_UNLOCK(bus->dhd);
5233 if (int_val > 0) {
5234 debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
5235 } else {
5236 debugger_close();
5237 }
5238 DHD_PERIM_LOCK(bus->dhd);
5239 break;
5240 #endif /* DEBUGGER || DHD_DSCOPE */
5241
5242 #ifdef BCM_BUZZZ
5243 /* Dump dongle side buzzz trace to console */
5244 case IOV_GVAL(IOV_BUZZZ_DUMP):
5245 bcmerror = dhd_buzzz_dump_dngl(bus);
5246 break;
5247 #endif /* BCM_BUZZZ */
5248
5249 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
5250 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
5251 break;
5252
5253 case IOV_GVAL(IOV_RAMSIZE):
5254 int_val = (int32)bus->ramsize;
5255 bcopy(&int_val, arg, val_size);
5256 break;
5257
5258 case IOV_SVAL(IOV_RAMSIZE):
5259 bus->ramsize = int_val;
5260 bus->orig_ramsize = int_val;
5261 break;
5262
5263 case IOV_GVAL(IOV_RAMSTART):
5264 int_val = (int32)bus->dongle_ram_base;
5265 bcopy(&int_val, arg, val_size);
5266 break;
5267
5268 case IOV_GVAL(IOV_CC_NVMSHADOW):
5269 {
5270 struct bcmstrbuf dump_b;
5271
5272 bcm_binit(&dump_b, arg, len);
5273 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
5274 break;
5275 }
5276
5277 case IOV_GVAL(IOV_SLEEP_ALLOWED):
5278 bool_val = bus->sleep_allowed;
5279 bcopy(&bool_val, arg, val_size);
5280 break;
5281
5282 case IOV_SVAL(IOV_SLEEP_ALLOWED):
5283 bus->sleep_allowed = bool_val;
5284 break;
5285
5286 case IOV_GVAL(IOV_DONGLEISOLATION):
5287 int_val = bus->dhd->dongle_isolation;
5288 bcopy(&int_val, arg, val_size);
5289 break;
5290
5291 case IOV_SVAL(IOV_DONGLEISOLATION):
5292 bus->dhd->dongle_isolation = bool_val;
5293 break;
5294
5295 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
5296 int_val = bus->ltrsleep_on_unload;
5297 bcopy(&int_val, arg, val_size);
5298 break;
5299
5300 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
5301 bus->ltrsleep_on_unload = bool_val;
5302 break;
5303
5304 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
5305 {
5306 struct bcmstrbuf dump_b;
5307 bcm_binit(&dump_b, arg, len);
5308 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
5309 break;
5310 }
5311 case IOV_GVAL(IOV_DMA_RINGINDICES):
5312 {
5313 int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
5314 bcopy(&int_val, arg, sizeof(int_val));
5315 break;
5316 }
5317 case IOV_SVAL(IOV_DMA_RINGINDICES):
5318 bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
5319 break;
5320
5321 case IOV_GVAL(IOV_METADATA_DBG):
5322 int_val = dhd_prot_metadata_dbg_get(bus->dhd);
5323 bcopy(&int_val, arg, val_size);
5324 break;
5325 case IOV_SVAL(IOV_METADATA_DBG):
5326 dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
5327 break;
5328
5329 case IOV_GVAL(IOV_RX_METADATALEN):
5330 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
5331 bcopy(&int_val, arg, val_size);
5332 break;
5333
5334 case IOV_SVAL(IOV_RX_METADATALEN):
5335 if (int_val > 64) {
5336 bcmerror = BCME_BUFTOOLONG;
5337 break;
5338 }
5339 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
5340 break;
5341
5342 case IOV_SVAL(IOV_TXP_THRESHOLD):
5343 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
5344 break;
5345
5346 case IOV_GVAL(IOV_TXP_THRESHOLD):
5347 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
5348 bcopy(&int_val, arg, val_size);
5349 break;
5350
5351 case IOV_SVAL(IOV_DB1_FOR_MB):
5352 if (int_val)
5353 bus->db1_for_mb = TRUE;
5354 else
5355 bus->db1_for_mb = FALSE;
5356 break;
5357
5358 case IOV_GVAL(IOV_DB1_FOR_MB):
5359 if (bus->db1_for_mb)
5360 int_val = 1;
5361 else
5362 int_val = 0;
5363 bcopy(&int_val, arg, val_size);
5364 break;
5365
5366 case IOV_GVAL(IOV_TX_METADATALEN):
5367 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
5368 bcopy(&int_val, arg, val_size);
5369 break;
5370
5371 case IOV_SVAL(IOV_TX_METADATALEN):
5372 if (int_val > 64) {
5373 bcmerror = BCME_BUFTOOLONG;
5374 break;
5375 }
5376 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
5377 break;
5378
5379 case IOV_SVAL(IOV_DEVRESET):
5380 switch (int_val) {
5381 case DHD_BUS_DEVRESET_ON:
5382 bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5383 break;
5384 case DHD_BUS_DEVRESET_OFF:
5385 bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5386 break;
5387 case DHD_BUS_DEVRESET_FLR:
5388 bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
5389 break;
5390 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
5391 bus->flr_force_fail = TRUE;
5392 break;
5393 default:
5394 DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
5395 break;
5396 }
5397 break;
5398 case IOV_SVAL(IOV_FORCE_FW_TRAP):
5399 if (bus->dhd->busstate == DHD_BUS_DATA)
5400 dhdpcie_fw_trap(bus);
5401 else {
5402 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
5403 bcmerror = BCME_NOTUP;
5404 }
5405 break;
5406 case IOV_GVAL(IOV_FLOW_PRIO_MAP):
5407 int_val = bus->dhd->flow_prio_map_type;
5408 bcopy(&int_val, arg, val_size);
5409 break;
5410
5411 case IOV_SVAL(IOV_FLOW_PRIO_MAP):
5412 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
5413 bcopy(&int_val, arg, val_size);
5414 break;
5415
5416 #ifdef DHD_PCIE_RUNTIMEPM
5417 case IOV_GVAL(IOV_IDLETIME):
5418 int_val = bus->idletime;
5419 bcopy(&int_val, arg, val_size);
5420 break;
5421
5422 case IOV_SVAL(IOV_IDLETIME):
5423 if (int_val < 0) {
5424 bcmerror = BCME_BADARG;
5425 } else {
5426 bus->idletime = int_val;
5427 if (bus->idletime) {
5428 DHD_ENABLE_RUNTIME_PM(bus->dhd);
5429 } else {
5430 DHD_DISABLE_RUNTIME_PM(bus->dhd);
5431 }
5432 }
5433 break;
5434 #endif /* DHD_PCIE_RUNTIMEPM */
5435
5436 case IOV_GVAL(IOV_TXBOUND):
5437 int_val = (int32)dhd_txbound;
5438 bcopy(&int_val, arg, val_size);
5439 break;
5440
5441 case IOV_SVAL(IOV_TXBOUND):
5442 dhd_txbound = (uint)int_val;
5443 break;
5444
5445 case IOV_SVAL(IOV_H2D_MAILBOXDATA):
5446 dhdpcie_send_mb_data(bus, (uint)int_val);
5447 break;
5448
5449 case IOV_SVAL(IOV_INFORINGS):
5450 dhd_prot_init_info_rings(bus->dhd);
5451 break;
5452
5453 case IOV_SVAL(IOV_H2D_PHASE):
5454 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5455 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5456 __FUNCTION__));
5457 bcmerror = BCME_NOTDOWN;
5458 break;
5459 }
5460 if (int_val)
5461 bus->dhd->h2d_phase_supported = TRUE;
5462 else
5463 bus->dhd->h2d_phase_supported = FALSE;
5464 break;
5465
5466 case IOV_GVAL(IOV_H2D_PHASE):
5467 int_val = (int32) bus->dhd->h2d_phase_supported;
5468 bcopy(&int_val, arg, val_size);
5469 break;
5470
5471 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5472 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5473 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5474 __FUNCTION__));
5475 bcmerror = BCME_NOTDOWN;
5476 break;
5477 }
5478 if (int_val)
5479 bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
5480 else
5481 bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
5482 break;
5483
5484 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5485 int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
5486 bcopy(&int_val, arg, val_size);
5487 break;
5488
5489 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
5490 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5491 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5492 __FUNCTION__));
5493 bcmerror = BCME_NOTDOWN;
5494 break;
5495 }
5496 dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
5497 break;
5498
5499 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
5500 int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
5501 bcopy(&int_val, arg, val_size);
5502 break;
5503
5504 case IOV_GVAL(IOV_RXBOUND):
5505 int_val = (int32)dhd_rxbound;
5506 bcopy(&int_val, arg, val_size);
5507 break;
5508
5509 case IOV_SVAL(IOV_RXBOUND):
5510 dhd_rxbound = (uint)int_val;
5511 break;
5512
5513 case IOV_GVAL(IOV_TRAPDATA):
5514 {
5515 struct bcmstrbuf dump_b;
5516 bcm_binit(&dump_b, arg, len);
5517 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
5518 break;
5519 }
5520
5521 case IOV_GVAL(IOV_TRAPDATA_RAW):
5522 {
5523 struct bcmstrbuf dump_b;
5524 bcm_binit(&dump_b, arg, len);
5525 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
5526 break;
5527 }
5528 case IOV_SVAL(IOV_HANGREPORT):
5529 bus->dhd->hang_report = bool_val;
5530 DHD_ERROR(("%s: Set hang_report as %d\n",
5531 __FUNCTION__, bus->dhd->hang_report));
5532 break;
5533
5534 case IOV_GVAL(IOV_HANGREPORT):
5535 int_val = (int32)bus->dhd->hang_report;
5536 bcopy(&int_val, arg, val_size);
5537 break;
5538
5539 case IOV_SVAL(IOV_CTO_PREVENTION):
5540 {
5541 uint32 pcie_lnkst;
5542
5543 if (bus->sih->buscorerev < 19) {
5544 bcmerror = BCME_UNSUPPORTED;
5545 break;
5546 }
5547 si_corereg(bus->sih, bus->sih->buscoreidx,
5548 OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
5549
5550 pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
5551 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
5552
5553 if ((bus->sih->buscorerev == 19) &&
5554 (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
5555 PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
5556 bcmerror = BCME_UNSUPPORTED;
5557 break;
5558 }
5559 bus->cto_enable = bool_val;
5560 dhdpcie_cto_init(bus, bus->cto_enable);
5561 DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
5562 __FUNCTION__, bus->cto_enable));
5563 }
5564 break;
5565
5566 case IOV_GVAL(IOV_CTO_PREVENTION):
5567 if (bus->sih->buscorerev < 19) {
5568 bcmerror = BCME_UNSUPPORTED;
5569 break;
5570 }
5571 int_val = (int32)bus->cto_enable;
5572 bcopy(&int_val, arg, val_size);
5573 break;
5574
5575 case IOV_SVAL(IOV_CTO_THRESHOLD):
5576 {
5577 if (bus->sih->buscorerev < 19) {
5578 bcmerror = BCME_UNSUPPORTED;
5579 break;
5580 }
5581 bus->cto_threshold = (uint32)int_val;
5582 }
5583 break;
5584
5585 case IOV_GVAL(IOV_CTO_THRESHOLD):
5586 if (bus->sih->buscorerev < 19) {
5587 bcmerror = BCME_UNSUPPORTED;
5588 break;
5589 }
5590 if (bus->cto_threshold)
5591 int_val = (int32)bus->cto_threshold;
5592 else
5593 int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
5594
5595 bcopy(&int_val, arg, val_size);
5596 break;
5597
5598 case IOV_SVAL(IOV_PCIE_WD_RESET):
5599 if (bool_val) {
5600 uint32 wd_en = (bus->sih->buscorerev == 66) ? WD_SSRESET_PCIE_F0_EN :
5601 (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
5602 pcie_watchdog_reset(bus->osh, bus->sih,
5603 WD_ENABLE_MASK, wd_en);
5604 }
5605 break;
5606
5607 case IOV_GVAL(IOV_IDMA_ENABLE):
5608 int_val = bus->idma_enabled;
5609 bcopy(&int_val, arg, val_size);
5610 break;
5611 case IOV_SVAL(IOV_IDMA_ENABLE):
5612 bus->idma_enabled = (bool)int_val;
5613 break;
5614 case IOV_GVAL(IOV_IFRM_ENABLE):
5615 int_val = bus->ifrm_enabled;
5616 bcopy(&int_val, arg, val_size);
5617 break;
5618 case IOV_SVAL(IOV_IFRM_ENABLE):
5619 bus->ifrm_enabled = (bool)int_val;
5620 break;
5621 case IOV_GVAL(IOV_CLEAR_RING):
5622 bcopy(&int_val, arg, val_size);
5623 dhd_flow_rings_flush(bus->dhd, 0);
5624 break;
5625 case IOV_GVAL(IOV_DAR_ENABLE):
5626 int_val = bus->dar_enabled;
5627 bcopy(&int_val, arg, val_size);
5628 break;
5629 case IOV_SVAL(IOV_DAR_ENABLE):
5630 bus->dar_enabled = (bool)int_val;
5631 break;
5632 #ifdef D2H_MINIDUMP
5633 case IOV_GVAL(IOV_MINIDUMP_OVERRIDE):
5634 int_val = bus->d2h_minidump_override;
5635 bcopy(&int_val, arg, val_size);
5636 break;
5637 case IOV_SVAL(IOV_MINIDUMP_OVERRIDE):
5638 /* Can change it only before FW download */
5639 if (bus->dhd->busstate != DHD_BUS_DOWN) {
5640 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5641 __FUNCTION__));
5642 bcmerror = BCME_NOTDOWN;
5643 break;
5644 }
5645 bus->d2h_minidump_override = (bool)int_val;
5646 break;
5647 #endif /* D2H_MINIDUMP */
5648 default:
5649 bcmerror = BCME_UNSUPPORTED;
5650 break;
5651 }
5652
5653 exit:
5654 return bcmerror;
5655 } /* dhdpcie_bus_doiovar */
5656
5657 /** Transfers bytes from host to dongle using pio mode */
5658 static int
5659 dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
5660 {
5661 if (bus->dhd == NULL) {
5662 DHD_ERROR(("bus not inited\n"));
5663 return 0;
5664 }
5665 if (bus->dhd->prot == NULL) {
5666 DHD_ERROR(("prot is not inited\n"));
5667 return 0;
5668 }
5669 if (bus->dhd->busstate != DHD_BUS_DATA) {
5670 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
5671 return 0;
5672 }
5673 dhdmsgbuf_lpbk_req(bus->dhd, len);
5674 return 0;
5675 }
5676
5677 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
5678 void
5679 dhd_bus_hostready(struct dhd_bus *bus)
5680 {
5681 if (!bus->dhd->d2h_hostrdy_supported) {
5682 return;
5683 }
5684
5685 if (bus->is_linkdown) {
5686 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
5687 return;
5688 }
5689
5690 DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
5691 dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
5692 if (DAR_PWRREQ(bus)) {
5693 dhd_bus_pcie_pwr_req(bus);
5694 }
5695 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
5696 bus->hostready_count ++;
5697 DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
5698 }
5699
5700 /* Clear INTSTATUS */
5701 void
5702 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
5703 {
5704 uint32 intstatus = 0;
5705 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
5706 (bus->sih->buscorerev == 2)) {
5707 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
5708 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
5709 } else {
5710 /* this is a PCIE core register..not a config register... */
5711 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
5712 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
5713 intstatus);
5714 }
5715 }
5716
5717 int
5718 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5719 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
5720 #else
5721 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
5722 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5723 {
5724 int timeleft;
5725 int rc = 0;
5726 unsigned long flags, flags_bus;
5727 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5728 int d3_read_retry = 0;
5729 uint32 d2h_mb_data = 0;
5730 uint32 zero = 0;
5731 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5732
5733 if (bus->dhd == NULL) {
5734 DHD_ERROR(("bus not inited\n"));
5735 return BCME_ERROR;
5736 }
5737 if (bus->dhd->prot == NULL) {
5738 DHD_ERROR(("prot is not inited\n"));
5739 return BCME_ERROR;
5740 }
5741
5742 if (dhd_query_bus_erros(bus->dhd)) {
5743 return BCME_ERROR;
5744 }
5745
5746 DHD_GENERAL_LOCK(bus->dhd, flags);
5747 if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
5748 DHD_ERROR(("not in a readystate\n"));
5749 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5750 return BCME_ERROR;
5751 }
5752 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5753 if (bus->dhd->dongle_reset) {
5754 DHD_ERROR(("Dongle is in reset state.\n"));
5755 return -EIO;
5756 }
5757
5758 /* Check whether we are already in the requested state.
5759 * state=TRUE means Suspend
5760 * state=FALSE meanse Resume
5761 */
5762 if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
5763 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
5764 return BCME_OK;
5765 } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
5766 DHD_ERROR(("Bus is already in RESUME state.\n"));
5767 return BCME_OK;
5768 }
5769
5770 if (state) {
5771 int idle_retry = 0;
5772 int active;
5773
5774 if (bus->is_linkdown) {
5775 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
5776 __FUNCTION__, state));
5777 return BCME_ERROR;
5778 }
5779
5780 /* Suspend */
5781 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
5782
5783 bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
5784 if (bus->dhd->dhd_watchdog_ms_backup) {
5785 DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
5786 __FUNCTION__));
5787 dhd_os_wd_timer(bus->dhd, 0);
5788 }
5789
5790 DHD_GENERAL_LOCK(bus->dhd, flags);
5791 if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
5792 DHD_ERROR(("Tx Request is not ended\n"));
5793 bus->dhd->busstate = DHD_BUS_DATA;
5794 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5795 return -EBUSY;
5796 }
5797
5798 bus->last_suspend_start_time = OSL_LOCALTIME_NS();
5799
5800 /* stop all interface network queue. */
5801 dhd_bus_stop_queue(bus);
5802 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5803
5804 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5805 if (byint) {
5806 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5807 /* Clear wait_for_d3_ack before sending D3_INFORM */
5808 bus->wait_for_d3_ack = 0;
5809 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
5810
5811 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
5812 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5813 } else {
5814 /* Clear wait_for_d3_ack before sending D3_INFORM */
5815 bus->wait_for_d3_ack = 0;
5816 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
5817 while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
5818 dhdpcie_handle_mb_data(bus);
5819 usleep_range(1000, 1500);
5820 d3_read_retry++;
5821 }
5822 }
5823 #else
5824 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5825
5826 /* Clear wait_for_d3_ack before sending D3_INFORM */
5827 bus->wait_for_d3_ack = 0;
5828 /*
5829 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
5830 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
5831 * inside atomic context, so that no more DBs will be
5832 * rung after sending D3_INFORM
5833 */
5834 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
5835
5836 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
5837
5838 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
5839
5840 #ifdef DHD_RECOVER_TIMEOUT
5841 if (bus->wait_for_d3_ack == 0) {
5842 /* If wait_for_d3_ack was not updated because D2H MB was not received */
5843 uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
5844 bus->pcie_mailbox_int, 0, 0);
5845 int host_irq_disabled = dhdpcie_irq_disabled(bus);
5846 if ((intstatus) && (intstatus != (uint32)-1) &&
5847 (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
5848 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
5849 " host_irq_disabled=%d\n",
5850 __FUNCTION__, intstatus, host_irq_disabled));
5851 dhd_pcie_intr_count_dump(bus->dhd);
5852 dhd_print_tasklet_status(bus->dhd);
5853 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
5854 !bus->use_mailbox) {
5855 dhd_prot_process_ctrlbuf(bus->dhd);
5856 } else {
5857 dhdpcie_handle_mb_data(bus);
5858 }
5859 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
5860 /* Clear Interrupts */
5861 dhdpcie_bus_clear_intstatus(bus);
5862 }
5863 } /* bus->wait_for_d3_ack was 0 */
5864 #endif /* DHD_RECOVER_TIMEOUT */
5865
5866 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5867 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5868
5869 /* To allow threads that got pre-empted to complete.
5870 */
5871 while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
5872 (idle_retry < MAX_WKLK_IDLE_CHECK)) {
5873 OSL_SLEEP(1);
5874 idle_retry++;
5875 }
5876
5877 if (bus->wait_for_d3_ack) {
5878 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
5879 /* Got D3 Ack. Suspend the bus */
5880 if (active) {
5881 DHD_ERROR(("%s():Suspend failed because of wakelock"
5882 "restoring Dongle to D0\n", __FUNCTION__));
5883
5884 if (bus->dhd->dhd_watchdog_ms_backup) {
5885 DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
5886 __FUNCTION__));
5887 dhd_os_wd_timer(bus->dhd,
5888 bus->dhd->dhd_watchdog_ms_backup);
5889 }
5890
5891 /*
5892 * Dongle still thinks that it has to be in D3 state until
5893 * it gets a D0 Inform, but we are backing off from suspend.
5894 * Ensure that Dongle is brought back to D0.
5895 *
5896 * Bringing back Dongle from D3 Ack state to D0 state is a
5897 * 2 step process. Dongle would want to know that D0 Inform
5898 * would be sent as a MB interrupt to bring it out of D3 Ack
5899 * state to D0 state. So we have to send both this message.
5900 */
5901
5902 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
5903 bus->wait_for_d3_ack = 0;
5904
5905 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5906 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
5907 /* Enable back the intmask which was cleared in DPC
5908 * after getting D3_ACK.
5909 */
5910 bus->resume_intr_enable_count++;
5911
5912 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
5913 * interrupts using intmask and host interrupts
5914 * which were disabled in the dhdpcie_bus_isr()->
5915 * dhd_bus_handle_d3_ack().
5916 */
5917 /* Enable back interrupt using Intmask!! */
5918 dhdpcie_bus_intr_enable(bus);
5919 /* Enable back interrupt from Host side!! */
5920 dhdpcie_enable_irq(bus);
5921
5922 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5923
5924 if (bus->use_d0_inform) {
5925 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5926 dhdpcie_send_mb_data(bus,
5927 (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
5928 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5929 }
5930 /* ring doorbell 1 (hostready) */
5931 dhd_bus_hostready(bus);
5932
5933 DHD_GENERAL_LOCK(bus->dhd, flags);
5934 bus->dhd->busstate = DHD_BUS_DATA;
5935 /* resume all interface network queue. */
5936 dhd_bus_start_queue(bus);
5937 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5938 rc = BCME_ERROR;
5939 } else {
5940 /* Actual Suspend after no wakelock */
5941 /* At this time bus->bus_low_power_state will be
5942 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
5943 * in dhd_bus_handle_d3_ack()
5944 */
5945 if (bus->use_d0_inform &&
5946 (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
5947 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
5948 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
5949 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
5950 }
5951
5952 #if defined(BCMPCIE_OOB_HOST_WAKE)
5953 dhdpcie_oob_intr_set(bus, TRUE);
5954 #endif /* BCMPCIE_OOB_HOST_WAKE */
5955
5956 DHD_GENERAL_LOCK(bus->dhd, flags);
5957 /* The Host cannot process interrupts now so disable the same.
5958 * No need to disable the dongle INTR using intmask, as we are
5959 * already calling disabling INTRs from DPC context after
5960 * getting D3_ACK in dhd_bus_handle_d3_ack.
5961 * Code may not look symmetric between Suspend and
5962 * Resume paths but this is done to close down the timing window
5963 * between DPC and suspend context and bus->bus_low_power_state
5964 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
5965 */
5966 bus->dhd->d3ackcnt_timeout = 0;
5967 bus->dhd->busstate = DHD_BUS_SUSPEND;
5968 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5969 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
5970 "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__,
5971 PCIECFGREG_BASEADDR0,
5972 dhd_pcie_config_read(bus->osh,
5973 PCIECFGREG_BASEADDR0, sizeof(uint32)),
5974 PCIECFGREG_BASEADDR1,
5975 dhd_pcie_config_read(bus->osh,
5976 PCIECFGREG_BASEADDR1, sizeof(uint32))));
5977 dhdpcie_dump_resource(bus);
5978 /* Handle Host Suspend */
5979 rc = dhdpcie_pci_suspend_resume(bus, state);
5980 if (!rc) {
5981 bus->last_suspend_end_time = OSL_LOCALTIME_NS();
5982 }
5983 }
5984 } else if (timeleft == 0) { /* D3 ACK Timeout */
5985 #ifdef DHD_FW_COREDUMP
5986 uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
5987 #endif /* DHD_FW_COREDUMP */
5988
5989 /* check if the D3 ACK timeout due to scheduling issue */
5990 bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
5991 bus->isr_entry_time > bus->last_d3_inform_time &&
5992 dhd_bus_query_dpc_sched_errors(bus->dhd);
5993 bus->dhd->d3ack_timeout_occured = TRUE;
5994 /* If the D3 Ack has timeout */
5995 bus->dhd->d3ackcnt_timeout++;
5996 DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
5997 __FUNCTION__, bus->dhd->is_sched_error ?
5998 " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
5999 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
6000 if (bus->dhd->is_sched_error && cur_memdump_mode) {
6001 /* change g_assert_type to trigger Kernel panic */
6002 g_assert_type = 2;
6003 /* use ASSERT() to trigger panic */
6004 ASSERT(0);
6005 }
6006 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
6007 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6008 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6009 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6010 DHD_GENERAL_LOCK(bus->dhd, flags);
6011 bus->dhd->busstate = DHD_BUS_DATA;
6012 /* resume all interface network queue. */
6013 dhd_bus_start_queue(bus);
6014 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6015 if (!bus->dhd->dongle_trap_occured &&
6016 !bus->is_linkdown) {
6017 uint32 intstatus = 0;
6018
6019 /* Check if PCIe bus status is valid */
6020 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6021 bus->pcie_mailbox_int, 0, 0);
6022 if (intstatus == (uint32)-1) {
6023 /* Invalidate PCIe bus status */
6024 bus->is_linkdown = 1;
6025 }
6026
6027 dhd_bus_dump_console_buffer(bus);
6028 dhd_prot_debug_info_print(bus->dhd);
6029 #ifdef DHD_FW_COREDUMP
6030 if (cur_memdump_mode) {
6031 /* write core dump to file */
6032 bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
6033 dhdpcie_mem_dump(bus);
6034 }
6035 #endif /* DHD_FW_COREDUMP */
6036
6037 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
6038 __FUNCTION__));
6039 #ifdef SUPPORT_LINKDOWN_RECOVERY
6040 #ifdef CONFIG_ARCH_MSM
6041 bus->no_cfg_restore = 1;
6042 #endif /* CONFIG_ARCH_MSM */
6043 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6044 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
6045 }
6046 #if defined(DHD_ERPOM)
6047 dhd_schedule_reset(bus->dhd);
6048 #endif // endif
6049 rc = -ETIMEDOUT;
6050 }
6051 } else {
6052 /* Resume */
6053 DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
6054 bus->last_resume_start_time = OSL_LOCALTIME_NS();
6055
6056 /**
6057 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
6058 * si_backplane_access(function to read/write backplane)
6059 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
6060 * window being accessed is different form the window
6061 * being pointed by second_bar0win.
6062 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
6063 * invalidating second_bar0win after resume updates
6064 * PCIE2_BAR0_CORE2_WIN with right window.
6065 */
6066 si_invalidate_second_bar0win(bus->sih);
6067 #if defined(BCMPCIE_OOB_HOST_WAKE)
6068 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
6069 #endif /* BCMPCIE_OOB_HOST_WAKE */
6070 rc = dhdpcie_pci_suspend_resume(bus, state);
6071 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n",
6072 __FUNCTION__, PCIECFGREG_BASEADDR0,
6073 dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
6074 PCIECFGREG_BASEADDR1,
6075 dhd_pcie_config_read(bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32))));
6076 dhdpcie_dump_resource(bus);
6077
6078 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6079 /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
6080 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6081 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6082
6083 if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6084 if (bus->use_d0_inform) {
6085 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6086 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
6087 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6088 }
6089 /* ring doorbell 1 (hostready) */
6090 dhd_bus_hostready(bus);
6091 }
6092 DHD_GENERAL_LOCK(bus->dhd, flags);
6093 bus->dhd->busstate = DHD_BUS_DATA;
6094 #ifdef DHD_PCIE_RUNTIMEPM
6095 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
6096 bus->bus_wake = 1;
6097 OSL_SMP_WMB();
6098 wake_up_interruptible(&bus->rpm_queue);
6099 }
6100 #endif /* DHD_PCIE_RUNTIMEPM */
6101 /* resume all interface network queue. */
6102 dhd_bus_start_queue(bus);
6103
6104 /* TODO: for NDIS also we need to use enable_irq in future */
6105 bus->resume_intr_enable_count++;
6106
6107 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
6108 * using intmask and host interrupts
6109 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
6110 */
6111 dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
6112 dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
6113
6114 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6115
6116 if (bus->dhd->dhd_watchdog_ms_backup) {
6117 DHD_ERROR(("%s: Enabling wdtick after resume\n",
6118 __FUNCTION__));
6119 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
6120 }
6121
6122 bus->last_resume_end_time = OSL_LOCALTIME_NS();
6123
6124 }
6125 return rc;
6126 }
6127
6128 uint32
6129 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
6130 {
6131 ASSERT(bus && bus->sih);
6132 if (enable) {
6133 si_corereg(bus->sih, bus->sih->buscoreidx,
6134 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
6135 } else {
6136 si_corereg(bus->sih, bus->sih->buscoreidx,
6137 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
6138 }
6139 return 0;
6140 }
6141
6142 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
6143 uint32
6144 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
6145 {
6146 uint reg_val;
6147
6148 ASSERT(bus && bus->sih);
6149
6150 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
6151 0x1004);
6152 reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
6153 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
6154 reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
6155 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
6156 reg_val);
6157
6158 return 0;
6159 }
6160
6161 static uint32
6162 dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
6163 {
6164 uint16 chipid = si_chipid(bus->sih);
6165 if ((chipid == BCM4375_CHIP_ID ||
6166 chipid == BCM4377_CHIP_ID) &&
6167 (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK) &&
6168 (len % 128 == 4)) {
6169 len += 8;
6170 }
6171 DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
6172 return len;
6173 }
6174
6175 /** Transfers bytes from host to dongle and to host again using DMA */
6176 static int
6177 dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
6178 uint32 len, uint32 srcdelay, uint32 destdelay,
6179 uint32 d11_lpbk, uint32 core_num, uint32 wait)
6180 {
6181 int ret = 0;
6182
6183 if (bus->dhd == NULL) {
6184 DHD_ERROR(("bus not inited\n"));
6185 return BCME_ERROR;
6186 }
6187 if (bus->dhd->prot == NULL) {
6188 DHD_ERROR(("prot is not inited\n"));
6189 return BCME_ERROR;
6190 }
6191 if (bus->dhd->busstate != DHD_BUS_DATA) {
6192 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
6193 return BCME_ERROR;
6194 }
6195
6196 if (len < 5 || len > 4194296) {
6197 DHD_ERROR(("len is too small or too large\n"));
6198 return BCME_ERROR;
6199 }
6200
6201 len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
6202
6203 bus->dmaxfer_complete = FALSE;
6204 ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
6205 d11_lpbk, core_num);
6206 if (ret != BCME_OK || !wait)
6207 return ret;
6208
6209 ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
6210 if (ret < 0)
6211 ret = BCME_NOTREADY;
6212
6213 return ret;
6214
6215 }
6216
6217 static int
6218 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
6219 {
6220 int bcmerror = 0;
6221 volatile uint32 *cr4_regs;
6222
6223 if (!bus->sih) {
6224 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
6225 return BCME_ERROR;
6226 }
6227 /* To enter download state, disable ARM and reset SOCRAM.
6228 * To exit download state, simply reset ARM (default is RAM boot).
6229 */
6230 if (enter) {
6231 /* Make sure BAR1 maps to backplane address 0 */
6232 dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
6233 bus->alp_only = TRUE;
6234
6235 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
6236 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6237
6238 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6239 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
6240 !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6241 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6242 bcmerror = BCME_ERROR;
6243 goto fail;
6244 }
6245
6246 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6247 /* Halt ARM & remove reset */
6248 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6249 if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
6250 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
6251 bcmerror = BCME_ERROR;
6252 goto fail;
6253 }
6254 si_core_reset(bus->sih, 0, 0);
6255 /* reset last 4 bytes of RAM address. to be used for shared area */
6256 dhdpcie_init_shared_addr(bus);
6257 } else if (cr4_regs == NULL) { /* no CR4 present on chip */
6258 si_core_disable(bus->sih, 0);
6259
6260 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6261 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6262 bcmerror = BCME_ERROR;
6263 goto fail;
6264 }
6265
6266 si_core_reset(bus->sih, 0, 0);
6267
6268 /* Clear the top bit of memory */
6269 if (bus->ramsize) {
6270 uint32 zeros = 0;
6271 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
6272 (uint8*)&zeros, 4) < 0) {
6273 bcmerror = BCME_ERROR;
6274 goto fail;
6275 }
6276 }
6277 } else {
6278 /* For CR4,
6279 * Halt ARM
6280 * Remove ARM reset
6281 * Read RAM base address [0x18_0000]
6282 * [next] Download firmware
6283 * [done at else] Populate the reset vector
6284 * [done at else] Remove ARM halt
6285 */
6286 /* Halt ARM & remove reset */
6287 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6288 if (BCM43602_CHIP(bus->sih->chip)) {
6289 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
6290 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6291 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
6292 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6293 }
6294 /* reset last 4 bytes of RAM address. to be used for shared area */
6295 dhdpcie_init_shared_addr(bus);
6296 }
6297 } else {
6298 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6299 /* write vars */
6300 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6301 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6302 goto fail;
6303 }
6304 /* write random numbers to sysmem for the purpose of
6305 * randomizing heap address space.
6306 */
6307 if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6308 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6309 __FUNCTION__));
6310 goto fail;
6311 }
6312 /* switch back to arm core again */
6313 if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6314 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
6315 bcmerror = BCME_ERROR;
6316 goto fail;
6317 }
6318 /* write address 0 with reset instruction */
6319 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
6320 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
6321 /* now remove reset and halt and continue to run CA7 */
6322 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
6323 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6324 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6325 bcmerror = BCME_ERROR;
6326 goto fail;
6327 }
6328
6329 if (!si_iscoreup(bus->sih)) {
6330 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
6331 bcmerror = BCME_ERROR;
6332 goto fail;
6333 }
6334
6335 /* Enable remap before ARM reset but after vars.
6336 * No backplane access in remap mode
6337 */
6338 if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
6339 !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
6340 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
6341 bcmerror = BCME_ERROR;
6342 goto fail;
6343 }
6344
6345 if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6346 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
6347 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6348 bcmerror = BCME_ERROR;
6349 goto fail;
6350 }
6351 } else {
6352 if (BCM43602_CHIP(bus->sih->chip)) {
6353 /* Firmware crashes on SOCSRAM access when core is in reset */
6354 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6355 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
6356 __FUNCTION__));
6357 bcmerror = BCME_ERROR;
6358 goto fail;
6359 }
6360 si_core_reset(bus->sih, 0, 0);
6361 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6362 }
6363
6364 /* write vars */
6365 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6366 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6367 goto fail;
6368 }
6369
6370 /* write a random number to TCM for the purpose of
6371 * randomizing heap address space.
6372 */
6373 if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6374 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6375 __FUNCTION__));
6376 goto fail;
6377 }
6378
6379 if ((bcmerror = dhdpcie_wrt_host_whitelist_region(bus)) != BCME_OK) {
6380 DHD_ERROR(("%s: Failed to write Whitelist region to TCM !\n",
6381 __FUNCTION__));
6382 goto fail;
6383 }
6384 /* switch back to arm core again */
6385 if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
6386 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
6387 bcmerror = BCME_ERROR;
6388 goto fail;
6389 }
6390
6391 /* write address 0 with reset instruction */
6392 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
6393 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
6394
6395 if (bcmerror == BCME_OK) {
6396 uint32 tmp;
6397
6398 bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
6399 (uint8 *)&tmp, sizeof(tmp));
6400
6401 if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
6402 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
6403 __FUNCTION__, bus->resetinstr));
6404 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
6405 __FUNCTION__, tmp));
6406 bcmerror = BCME_ERROR;
6407 goto fail;
6408 }
6409 }
6410
6411 /* now remove reset and halt and continue to run CR4 */
6412 }
6413
6414 si_core_reset(bus->sih, 0, 0);
6415
6416 /* Allow HT Clock now that the ARM is running. */
6417 bus->alp_only = FALSE;
6418
6419 bus->dhd->busstate = DHD_BUS_LOAD;
6420 }
6421
6422 fail:
6423 /* Always return to PCIE core */
6424 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6425
6426 return bcmerror;
6427 } /* dhdpcie_bus_download_state */
6428
6429 static int
6430 dhdpcie_bus_write_vars(dhd_bus_t *bus)
6431 {
6432 int bcmerror = 0;
6433 uint32 varsize, phys_size;
6434 uint32 varaddr;
6435 uint8 *vbuffer;
6436 uint32 varsizew;
6437 #ifdef DHD_DEBUG
6438 uint8 *nvram_ularray;
6439 #endif /* DHD_DEBUG */
6440
6441 /* Even if there are no vars are to be written, we still need to set the ramsize. */
6442 varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
6443 varaddr = (bus->ramsize - 4) - varsize;
6444
6445 varaddr += bus->dongle_ram_base;
6446
6447 if (bus->vars) {
6448
6449 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
6450 if (!vbuffer)
6451 return BCME_NOMEM;
6452
6453 bzero(vbuffer, varsize);
6454 bcopy(bus->vars, vbuffer, bus->varsz);
6455 /* Write the vars list */
6456 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
6457
6458 /* Implement read back and verify later */
6459 #ifdef DHD_DEBUG
6460 /* Verify NVRAM bytes */
6461 DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
6462 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
6463 if (!nvram_ularray) {
6464 MFREE(bus->dhd->osh, vbuffer, varsize);
6465 return BCME_NOMEM;
6466 }
6467
6468 /* Upload image to verify downloaded contents. */
6469 memset(nvram_ularray, 0xaa, varsize);
6470
6471 /* Read the vars list to temp buffer for comparison */
6472 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
6473 if (bcmerror) {
6474 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
6475 __FUNCTION__, bcmerror, varsize, varaddr));
6476 }
6477
6478 /* Compare the org NVRAM with the one read from RAM */
6479 if (memcmp(vbuffer, nvram_ularray, varsize)) {
6480 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
6481 } else
6482 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
6483 __FUNCTION__));
6484
6485 MFREE(bus->dhd->osh, nvram_ularray, varsize);
6486 #endif /* DHD_DEBUG */
6487
6488 MFREE(bus->dhd->osh, vbuffer, varsize);
6489 }
6490
6491 phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
6492
6493 phys_size += bus->dongle_ram_base;
6494
6495 /* adjust to the user specified RAM */
6496 DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
6497 phys_size, bus->ramsize));
6498 DHD_INFO(("Vars are at %d, orig varsize is %d\n",
6499 varaddr, varsize));
6500 varsize = ((phys_size - 4) - varaddr);
6501
6502 /*
6503 * Determine the length token:
6504 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
6505 */
6506 if (bcmerror) {
6507 varsizew = 0;
6508 bus->nvram_csm = varsizew;
6509 } else {
6510 varsizew = varsize / 4;
6511 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
6512 bus->nvram_csm = varsizew;
6513 varsizew = htol32(varsizew);
6514 }
6515
6516 DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
6517
6518 /* Write the length token to the last word */
6519 bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
6520 (uint8*)&varsizew, 4);
6521
6522 return bcmerror;
6523 } /* dhdpcie_bus_write_vars */
6524
6525 int
6526 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
6527 {
6528 int bcmerror = BCME_OK;
6529 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6530 char *tmpbuf;
6531 uint tmpidx;
6532 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6533
6534 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6535
6536 /* Basic sanity checks */
6537 if (bus->dhd->up) {
6538 bcmerror = BCME_NOTDOWN;
6539 goto err;
6540 }
6541 if (!len) {
6542 bcmerror = BCME_BUFTOOSHORT;
6543 goto err;
6544 }
6545
6546 /* Free the old ones and replace with passed variables */
6547 if (bus->vars)
6548 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
6549
6550 bus->vars = MALLOC(bus->dhd->osh, len);
6551 bus->varsz = bus->vars ? len : 0;
6552 if (bus->vars == NULL) {
6553 bcmerror = BCME_NOMEM;
6554 goto err;
6555 }
6556
6557 /* Copy the passed variables, which should include the terminating double-null */
6558 bcopy(arg, bus->vars, bus->varsz);
6559
6560 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6561 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
6562 char *sp = NULL;
6563 char *ep = NULL;
6564 int i;
6565 char tag[2][8] = {"ccode=", "regrev="};
6566
6567 /* Find ccode and regrev info */
6568 for (i = 0; i < 2; i++) {
6569 sp = strnstr(bus->vars, tag[i], bus->varsz);
6570 if (!sp) {
6571 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
6572 __FUNCTION__, bus->nv_path));
6573 bcmerror = BCME_ERROR;
6574 goto err;
6575 }
6576 sp = strchr(sp, '=');
6577 ep = strchr(sp, '\0');
6578 /* We assumed that string length of both ccode and
6579 * regrev values should not exceed WLC_CNTRY_BUF_SZ
6580 */
6581 if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
6582 sp++;
6583 while (*sp != '\0') {
6584 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
6585 __FUNCTION__, tag[i], *sp));
6586 *sp++ = '0';
6587 }
6588 } else {
6589 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
6590 __FUNCTION__, tag[i]));
6591 bcmerror = BCME_ERROR;
6592 goto err;
6593 }
6594 }
6595 }
6596 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6597
6598 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6599 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6600 if (dhd_bus_get_fw_mode(bus->dhd) != DHD_FLAG_MFG_MODE)
6601 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6602 {
6603 char *pos = NULL;
6604 tmpbuf = MALLOCZ(bus->dhd->osh, bus->varsz + 1);
6605 if (tmpbuf == NULL) {
6606 goto err;
6607 }
6608 memcpy(tmpbuf, bus->vars, bus->varsz);
6609 for (tmpidx = 0; tmpidx < bus->varsz; tmpidx++) {
6610 if (tmpbuf[tmpidx] == 0) {
6611 tmpbuf[tmpidx] = '\n';
6612 }
6613 }
6614 bus->dhd->vars_ccode[0] = 0;
6615 bus->dhd->vars_regrev = 0;
6616 if ((pos = strstr(tmpbuf, "ccode"))) {
6617 sscanf(pos, "ccode=%3s\n", bus->dhd->vars_ccode);
6618 }
6619 if ((pos = strstr(tmpbuf, "regrev"))) {
6620 sscanf(pos, "regrev=%u\n", &(bus->dhd->vars_regrev));
6621 }
6622 MFREE(bus->dhd->osh, tmpbuf, bus->varsz + 1);
6623 }
6624 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6625
6626 err:
6627 return bcmerror;
6628 }
6629
6630 /* loop through the capability list and see if the pcie capabilty exists */
6631 uint8
6632 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
6633 {
6634 uint8 cap_id;
6635 uint8 cap_ptr = 0;
6636 uint8 byte_val;
6637
6638 /* check for Header type 0 */
6639 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
6640 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
6641 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
6642 goto end;
6643 }
6644
6645 /* check if the capability pointer field exists */
6646 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
6647 if (!(byte_val & PCI_CAPPTR_PRESENT)) {
6648 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
6649 goto end;
6650 }
6651
6652 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
6653 /* check if the capability pointer is 0x00 */
6654 if (cap_ptr == 0x00) {
6655 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
6656 goto end;
6657 }
6658
6659 /* loop thr'u the capability list and see if the pcie capabilty exists */
6660
6661 cap_id = read_pci_cfg_byte(cap_ptr);
6662
6663 while (cap_id != req_cap_id) {
6664 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
6665 if (cap_ptr == 0x00) break;
6666 cap_id = read_pci_cfg_byte(cap_ptr);
6667 }
6668
6669 end:
6670 return cap_ptr;
6671 }
6672
6673 void
6674 dhdpcie_pme_active(osl_t *osh, bool enable)
6675 {
6676 uint8 cap_ptr;
6677 uint32 pme_csr;
6678
6679 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
6680
6681 if (!cap_ptr) {
6682 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
6683 return;
6684 }
6685
6686 pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
6687 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
6688
6689 pme_csr |= PME_CSR_PME_STAT;
6690 if (enable) {
6691 pme_csr |= PME_CSR_PME_EN;
6692 } else {
6693 pme_csr &= ~PME_CSR_PME_EN;
6694 }
6695
6696 OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
6697 }
6698
6699 bool
6700 dhdpcie_pme_cap(osl_t *osh)
6701 {
6702 uint8 cap_ptr;
6703 uint32 pme_cap;
6704
6705 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
6706
6707 if (!cap_ptr) {
6708 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
6709 return FALSE;
6710 }
6711
6712 pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
6713
6714 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
6715
6716 return ((pme_cap & PME_CAP_PM_STATES) != 0);
6717 }
6718
6719 uint32
6720 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
6721 {
6722
6723 uint8 pcie_cap;
6724 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
6725 uint32 reg_val;
6726
6727 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
6728
6729 if (!pcie_cap) {
6730 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
6731 return 0;
6732 }
6733
6734 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
6735
6736 /* set operation */
6737 if (mask) {
6738 /* read */
6739 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6740
6741 /* modify */
6742 reg_val &= ~mask;
6743 reg_val |= (mask & val);
6744
6745 /* write */
6746 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
6747 }
6748 return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6749 }
6750
6751 uint8
6752 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
6753 {
6754 uint8 pcie_cap;
6755 uint32 reg_val;
6756 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
6757
6758 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
6759
6760 if (!pcie_cap) {
6761 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
6762 return 0;
6763 }
6764
6765 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
6766
6767 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6768 /* set operation */
6769 if (mask) {
6770 if (val)
6771 reg_val |= PCIE_CLKREQ_ENAB;
6772 else
6773 reg_val &= ~PCIE_CLKREQ_ENAB;
6774 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
6775 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
6776 }
6777 if (reg_val & PCIE_CLKREQ_ENAB)
6778 return 1;
6779 else
6780 return 0;
6781 }
6782
6783 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
6784 {
6785 dhd_bus_t *bus;
6786 uint64 current_time = OSL_LOCALTIME_NS();
6787
6788 if (!dhd) {
6789 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
6790 return;
6791 }
6792
6793 bus = dhd->bus;
6794 if (!bus) {
6795 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
6796 return;
6797 }
6798
6799 bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
6800 bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
6801 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
6802 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
6803 bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
6804 bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
6805 bus->dpc_return_busdown_count, bus->non_ours_irq_count);
6806 #ifdef BCMPCIE_OOB_HOST_WAKE
6807 bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
6808 " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n",
6809 bus->oob_intr_count, bus->oob_intr_enable_count,
6810 bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(dhd->bus),
6811 bus->last_oob_irq_time);
6812 #endif /* BCMPCIE_OOB_HOST_WAKE */
6813 bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
6814 " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
6815 " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
6816 "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
6817 " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
6818 " last_process_infocpl_time="SEC_USEC_FMT
6819 "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
6820 "last_d3_inform_time="SEC_USEC_FMT"\n",
6821 GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
6822 GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_entry_time),
6823 GET_SEC_USEC(bus->dpc_sched_time), GET_SEC_USEC(dhd->bus->last_non_ours_irq_time),
6824 GET_SEC_USEC(bus->last_process_ctrlbuf_time),
6825 GET_SEC_USEC(bus->last_process_flowring_time),
6826 GET_SEC_USEC(bus->last_process_txcpl_time),
6827 GET_SEC_USEC(bus->last_process_rxcpl_time),
6828 GET_SEC_USEC(bus->last_process_infocpl_time),
6829 GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
6830 GET_SEC_USEC(bus->last_d3_inform_time));
6831
6832 bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
6833 SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
6834 SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
6835 GET_SEC_USEC(dhd->bus->last_suspend_end_time),
6836 GET_SEC_USEC(dhd->bus->last_resume_start_time),
6837 GET_SEC_USEC(dhd->bus->last_resume_end_time));
6838 }
6839
6840 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
6841 {
6842 uint32 intstatus = 0;
6843 uint32 intmask = 0;
6844 uint32 d2h_db0 = 0;
6845 uint32 d2h_mb_data = 0;
6846
6847 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
6848 dhd->bus->pcie_mailbox_int, 0, 0);
6849 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
6850 dhd->bus->pcie_mailbox_mask, 0, 0);
6851 d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
6852 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
6853
6854 bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
6855 intstatus, intmask, d2h_db0);
6856 bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
6857 d2h_mb_data, dhd->bus->def_intmask);
6858 }
6859 /** Add bus dump output to a buffer */
6860 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
6861 {
6862 uint16 flowid;
6863 int ix = 0;
6864 flow_ring_node_t *flow_ring_node;
6865 flow_info_t *flow_info;
6866 #ifdef TX_STATUS_LATENCY_STATS
6867 uint8 ifindex;
6868 if_flow_lkup_t *if_flow_lkup;
6869 dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
6870 #endif /* TX_STATUS_LATENCY_STATS */
6871
6872 if (dhdp->busstate != DHD_BUS_DATA)
6873 return;
6874
6875 #ifdef TX_STATUS_LATENCY_STATS
6876 memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
6877 #endif /* TX_STATUS_LATENCY_STATS */
6878 #ifdef DHD_WAKE_STATUS
6879 bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
6880 bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
6881 dhdp->bus->wake_counts.rcwake);
6882 #ifdef DHD_WAKE_RX_STATUS
6883 bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
6884 dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
6885 dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
6886 bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
6887 dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
6888 dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
6889 bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
6890 dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
6891 dhdp->bus->wake_counts.rx_icmpv6_ns);
6892 #endif /* DHD_WAKE_RX_STATUS */
6893 #ifdef DHD_WAKE_EVENT_STATUS
6894 for (flowid = 0; flowid < WLC_E_LAST; flowid++)
6895 if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
6896 bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
6897 dhdp->bus->wake_counts.rc_event[flowid]);
6898 bcm_bprintf(strbuf, "\n");
6899 #endif /* DHD_WAKE_EVENT_STATUS */
6900 #endif /* DHD_WAKE_STATUS */
6901
6902 dhd_prot_print_info(dhdp, strbuf);
6903 dhd_dump_intr_registers(dhdp, strbuf);
6904 dhd_dump_intr_counters(dhdp, strbuf);
6905 bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
6906 dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
6907 bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
6908 bcm_bprintf(strbuf,
6909 "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
6910 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
6911 "Overflows", "RD", "WR");
6912
6913 #ifdef TX_STATUS_LATENCY_STATS
6914 /* Average Tx status/Completion Latency in micro secs */
6915 bcm_bprintf(strbuf, "%12s", "AvgTxCmpL_Us ");
6916 #endif /* TX_STATUS_LATENCY_STATS */
6917
6918 bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
6919
6920 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
6921 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
6922 if (!flow_ring_node->active)
6923 continue;
6924
6925 flow_info = &flow_ring_node->flow_info;
6926 bcm_bprintf(strbuf,
6927 "%3d. %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
6928 flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
6929 MAC2STRDBG(flow_info->da),
6930 DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
6931 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
6932 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
6933 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
6934 dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
6935 "%4d %4d ");
6936
6937 #ifdef TX_STATUS_LATENCY_STATS
6938 bcm_bprintf(strbuf, "%12d ",
6939 flow_info->num_tx_status ?
6940 DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
6941 flow_info->num_tx_status) : 0);
6942
6943 ifindex = flow_info->ifindex;
6944 ASSERT(ifindex < DHD_MAX_IFS);
6945 if (ifindex < DHD_MAX_IFS) {
6946 if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
6947 if_tx_status_latency[ifindex].cum_tx_status_latency +=
6948 flow_info->cum_tx_status_latency;
6949 } else {
6950 DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
6951 __FUNCTION__, ifindex, flowid));
6952 }
6953 #endif /* TX_STATUS_LATENCY_STATS */
6954 bcm_bprintf(strbuf,
6955 "%5s %6s %5s\n", "NA", "NA", "NA");
6956 }
6957
6958 #ifdef TX_STATUS_LATENCY_STATS
6959 bcm_bprintf(strbuf, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStats");
6960 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
6961 for (ix = 0; ix < DHD_MAX_IFS; ix++) {
6962 if (!if_flow_lkup[ix].status) {
6963 continue;
6964 }
6965 bcm_bprintf(strbuf, "%2d %16d %16d\n",
6966 ix,
6967 if_tx_status_latency[ix].num_tx_status ?
6968 DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
6969 if_tx_status_latency[ix].num_tx_status): 0,
6970 if_tx_status_latency[ix].num_tx_status);
6971 }
6972 #endif /* TX_STATUS_LATENCY_STATS */
6973 bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
6974 bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
6975 bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
6976 if (dhdp->d2h_hostrdy_supported) {
6977 bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
6978 }
6979 bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
6980 dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
6981 }
6982
6983 /**
6984 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
6985 * flow queue to their flow ring.
6986 */
6987 static void
6988 dhd_update_txflowrings(dhd_pub_t *dhd)
6989 {
6990 unsigned long flags;
6991 dll_t *item, *next;
6992 flow_ring_node_t *flow_ring_node;
6993 struct dhd_bus *bus = dhd->bus;
6994
6995 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
6996 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
6997 for (item = dll_head_p(&bus->flowring_active_list);
6998 (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
6999 item = next) {
7000 if (dhd->hang_was_sent) {
7001 break;
7002 }
7003
7004 next = dll_next_p(item);
7005 flow_ring_node = dhd_constlist_to_flowring(item);
7006
7007 /* Ensure that flow_ring_node in the list is Not Null */
7008 ASSERT(flow_ring_node != NULL);
7009
7010 /* Ensure that the flowring node has valid contents */
7011 ASSERT(flow_ring_node->prot_info != NULL);
7012
7013 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
7014 }
7015 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7016 }
7017
7018 /** Mailbox ringbell Function */
7019 static void
7020 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
7021 {
7022 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7023 (bus->sih->buscorerev == 4)) {
7024 DHD_ERROR(("mailbox communication not supported\n"));
7025 return;
7026 }
7027 if (bus->db1_for_mb) {
7028 /* this is a pcie core register, not the config register */
7029 DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
7030 if (DAR_PWRREQ(bus)) {
7031 dhd_bus_pcie_pwr_req(bus);
7032 }
7033 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
7034 ~0, 0x12345678);
7035 } else {
7036 DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
7037 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
7038 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
7039 }
7040 }
7041
7042 /* Upon receiving a mailbox interrupt,
7043 * if H2D_FW_TRAP bit is set in mailbox location
7044 * device traps
7045 */
7046 static void
7047 dhdpcie_fw_trap(dhd_bus_t *bus)
7048 {
7049 /* Send the mailbox data and generate mailbox intr. */
7050 dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
7051 /* For FWs that cannot interprete H2D_FW_TRAP */
7052 (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
7053 }
7054
7055 /** mailbox doorbell ring function */
7056 void
7057 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
7058 {
7059 /* Skip after sending D3_INFORM */
7060 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7061 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7062 __FUNCTION__, bus->bus_low_power_state));
7063 return;
7064 }
7065
7066 /* Skip in the case of link down */
7067 if (bus->is_linkdown) {
7068 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7069 return;
7070 }
7071
7072 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7073 (bus->sih->buscorerev == 4)) {
7074 si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
7075 PCIE_INTB, PCIE_INTB);
7076 } else {
7077 /* this is a pcie core register, not the config regsiter */
7078 DHD_INFO(("writing a door bell to the device\n"));
7079 if (IDMA_ACTIVE(bus->dhd)) {
7080 if (DAR_PWRREQ(bus)) {
7081 dhd_bus_pcie_pwr_req(bus);
7082 }
7083 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
7084 ~0, value);
7085 } else {
7086 if (DAR_PWRREQ(bus)) {
7087 dhd_bus_pcie_pwr_req(bus);
7088 }
7089 si_corereg(bus->sih, bus->sih->buscoreidx,
7090 dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
7091 }
7092 }
7093 }
7094
7095 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
7096 void
7097 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
7098 {
7099 /* this is a pcie core register, not the config regsiter */
7100 /* Skip after sending D3_INFORM */
7101 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7102 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7103 __FUNCTION__, bus->bus_low_power_state));
7104 return;
7105 }
7106
7107 /* Skip in the case of link down */
7108 if (bus->is_linkdown) {
7109 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7110 return;
7111 }
7112
7113 DHD_INFO(("writing a door bell 2 to the device\n"));
7114 if (DAR_PWRREQ(bus)) {
7115 dhd_bus_pcie_pwr_req(bus);
7116 }
7117 si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
7118 ~0, value);
7119 }
7120
7121 void
7122 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
7123 {
7124 /* Skip after sending D3_INFORM */
7125 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7126 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7127 __FUNCTION__, bus->bus_low_power_state));
7128 return;
7129 }
7130
7131 /* Skip in the case of link down */
7132 if (bus->is_linkdown) {
7133 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7134 return;
7135 }
7136
7137 if (DAR_PWRREQ(bus)) {
7138 dhd_bus_pcie_pwr_req(bus);
7139 }
7140 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
7141 }
7142
7143 void
7144 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
7145 {
7146 /* Skip after sending D3_INFORM */
7147 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7148 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7149 __FUNCTION__, bus->bus_low_power_state));
7150 return;
7151 }
7152
7153 /* Skip in the case of link down */
7154 if (bus->is_linkdown) {
7155 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7156 return;
7157 }
7158
7159 if (DAR_PWRREQ(bus)) {
7160 dhd_bus_pcie_pwr_req(bus);
7161 }
7162 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
7163 }
7164
7165 static void
7166 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
7167 {
7168 uint32 w;
7169 /* Skip after sending D3_INFORM */
7170 if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7171 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7172 __FUNCTION__, bus->bus_low_power_state));
7173 return;
7174 }
7175
7176 /* Skip in the case of link down */
7177 if (bus->is_linkdown) {
7178 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7179 return;
7180 }
7181
7182 w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
7183 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
7184 }
7185
7186 dhd_mb_ring_t
7187 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
7188 {
7189 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7190 (bus->sih->buscorerev == 4)) {
7191 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7192 bus->pcie_mailbox_int);
7193 if (bus->pcie_mb_intr_addr) {
7194 bus->pcie_mb_intr_osh = si_osh(bus->sih);
7195 return dhd_bus_ringbell_oldpcie;
7196 }
7197 } else {
7198 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7199 dhd_bus_db0_addr_get(bus));
7200 if (bus->pcie_mb_intr_addr) {
7201 bus->pcie_mb_intr_osh = si_osh(bus->sih);
7202 return dhdpcie_bus_ringbell_fast;
7203 }
7204 }
7205 return dhd_bus_ringbell;
7206 }
7207
7208 dhd_mb_ring_2_t
7209 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
7210 {
7211 bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7212 dhd_bus_db0_addr_2_get(bus));
7213 if (bus->pcie_mb_intr_2_addr) {
7214 bus->pcie_mb_intr_osh = si_osh(bus->sih);
7215 return dhdpcie_bus_ringbell_2_fast;
7216 }
7217 return dhd_bus_ringbell_2;
7218 }
7219
7220 bool BCMFASTPATH
7221 dhd_bus_dpc(struct dhd_bus *bus)
7222 {
7223 bool resched = FALSE; /* Flag indicating resched wanted */
7224 unsigned long flags;
7225
7226 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7227
7228 bus->dpc_entry_time = OSL_LOCALTIME_NS();
7229
7230 DHD_GENERAL_LOCK(bus->dhd, flags);
7231 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
7232 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
7233 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
7234 * and if we return from here, then IOCTL response will never be handled
7235 */
7236 if (bus->dhd->busstate == DHD_BUS_DOWN) {
7237 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
7238 bus->intstatus = 0;
7239 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7240 bus->dpc_return_busdown_count++;
7241 return 0;
7242 }
7243 #ifdef DHD_PCIE_RUNTIMEPM
7244 bus->idlecount = 0;
7245 #endif /* DHD_PCIE_RUNTIMEPM */
7246 DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
7247 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7248
7249 resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
7250 if (!resched) {
7251 bus->intstatus = 0;
7252 bus->dpc_intr_enable_count++;
7253 /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
7254 * which has been disabled in the dhdpcie_bus_isr()
7255 */
7256 dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
7257 bus->dpc_exit_time = OSL_LOCALTIME_NS();
7258 } else {
7259 bus->resched_dpc_time = OSL_LOCALTIME_NS();
7260 }
7261
7262 bus->dpc_sched = resched;
7263
7264 DHD_GENERAL_LOCK(bus->dhd, flags);
7265 DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
7266 dhd_os_busbusy_wake(bus->dhd);
7267 DHD_GENERAL_UNLOCK(bus->dhd, flags);
7268
7269 return resched;
7270
7271 }
7272
7273 int
7274 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
7275 {
7276 uint32 cur_h2d_mb_data = 0;
7277
7278 DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
7279
7280 if (bus->is_linkdown) {
7281 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7282 return BCME_ERROR;
7283 }
7284
7285 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
7286 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
7287 h2d_mb_data));
7288 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
7289 {
7290 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
7291 DHD_ERROR(("failure sending the H2D Mailbox message "
7292 "to firmware\n"));
7293 goto fail;
7294 }
7295 }
7296 goto done;
7297 }
7298
7299 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
7300
7301 if (cur_h2d_mb_data != 0) {
7302 uint32 i = 0;
7303 DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
7304 while ((i++ < 100) && cur_h2d_mb_data) {
7305 OSL_DELAY(10);
7306 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
7307 }
7308 if (i >= 100) {
7309 DHD_ERROR(("%s : waited 1ms for the dngl "
7310 "to ack the previous mb transaction\n", __FUNCTION__));
7311 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
7312 __FUNCTION__, cur_h2d_mb_data));
7313 }
7314 }
7315
7316 dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
7317 dhd_bus_gen_devmb_intr(bus);
7318
7319 done:
7320 if (h2d_mb_data == H2D_HOST_D3_INFORM) {
7321 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
7322 bus->last_d3_inform_time = OSL_LOCALTIME_NS();
7323 bus->d3_inform_cnt++;
7324 }
7325 if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
7326 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
7327 bus->d0_inform_in_use_cnt++;
7328 }
7329 if (h2d_mb_data == H2D_HOST_D0_INFORM) {
7330 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
7331 bus->d0_inform_cnt++;
7332 }
7333 return BCME_OK;
7334 fail:
7335 return BCME_ERROR;
7336 }
7337
7338 static void
7339 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
7340 {
7341 unsigned long flags_bus;
7342 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7343 bus->suspend_intr_disable_count++;
7344 /* Disable dongle Interrupts Immediately after D3 */
7345
7346 /* For Linux, Macos etc (otherthan NDIS) along with disabling
7347 * dongle interrupt by clearing the IntMask, disable directly
7348 * interrupt from the host side as well. Also clear the intstatus
7349 * if it is set to avoid unnecessary intrrupts after D3 ACK.
7350 */
7351 dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
7352 dhdpcie_bus_clear_intstatus(bus);
7353 dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
7354
7355 /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
7356 bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
7357 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7358 bus->wait_for_d3_ack = 1;
7359 dhd_os_d3ack_wake(bus->dhd);
7360 }
7361 void
7362 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
7363 {
7364 if (MULTIBP_ENAB(bus->sih)) {
7365 dhd_bus_pcie_pwr_req(bus);
7366 }
7367
7368 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
7369
7370 if (d2h_mb_data & D2H_DEV_FWHALT) {
7371 DHD_ERROR(("FW trap has happened\n"));
7372 dhdpcie_checkdied(bus, NULL, 0);
7373 #ifdef SUPPORT_LINKDOWN_RECOVERY
7374 #ifdef CONFIG_ARCH_MSM
7375 bus->no_cfg_restore = 1;
7376 #endif /* CONFIG_ARCH_MSM */
7377 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7378 dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
7379 goto exit;
7380 }
7381 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
7382 bool ds_acked = FALSE;
7383 BCM_REFERENCE(ds_acked);
7384 if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
7385 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
7386 bus->dhd->busstate = DHD_BUS_DOWN;
7387 goto exit;
7388 }
7389 /* what should we do */
7390 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7391 {
7392 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
7393 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7394 }
7395 }
7396 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
7397 /* what should we do */
7398 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7399 }
7400 if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
7401 /* what should we do */
7402 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
7403 }
7404 if (d2h_mb_data & D2H_DEV_D3_ACK) {
7405 /* what should we do */
7406 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7407 if (!bus->wait_for_d3_ack) {
7408 #if defined(DHD_HANG_SEND_UP_TEST)
7409 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
7410 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7411 } else {
7412 dhd_bus_handle_d3_ack(bus);
7413 }
7414 #else /* DHD_HANG_SEND_UP_TEST */
7415 dhd_bus_handle_d3_ack(bus);
7416 #endif /* DHD_HANG_SEND_UP_TEST */
7417 }
7418 }
7419
7420 exit:
7421 if (MULTIBP_ENAB(bus->sih)) {
7422 dhd_bus_pcie_pwr_req_clear(bus);
7423 }
7424 }
7425
7426 static void
7427 dhdpcie_handle_mb_data(dhd_bus_t *bus)
7428 {
7429 uint32 d2h_mb_data = 0;
7430 uint32 zero = 0;
7431
7432 if (MULTIBP_ENAB(bus->sih)) {
7433 dhd_bus_pcie_pwr_req(bus);
7434 }
7435
7436 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
7437 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
7438 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
7439 __FUNCTION__, d2h_mb_data));
7440 goto exit;
7441 }
7442
7443 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
7444
7445 DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
7446 if (d2h_mb_data & D2H_DEV_FWHALT) {
7447 DHD_ERROR(("FW trap has happened\n"));
7448 dhdpcie_checkdied(bus, NULL, 0);
7449 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
7450 goto exit;
7451 }
7452 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
7453 /* what should we do */
7454 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7455 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
7456 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7457 }
7458 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
7459 /* what should we do */
7460 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7461 }
7462 if (d2h_mb_data & D2H_DEV_D3_ACK) {
7463 /* what should we do */
7464 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7465 if (!bus->wait_for_d3_ack) {
7466 #if defined(DHD_HANG_SEND_UP_TEST)
7467 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
7468 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7469 } else {
7470 dhd_bus_handle_d3_ack(bus);
7471 }
7472 #else /* DHD_HANG_SEND_UP_TEST */
7473 dhd_bus_handle_d3_ack(bus);
7474 #endif /* DHD_HANG_SEND_UP_TEST */
7475 }
7476 }
7477
7478 exit:
7479 if (MULTIBP_ENAB(bus->sih)) {
7480 dhd_bus_pcie_pwr_req_clear(bus);
7481 }
7482 }
7483
7484 static void
7485 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
7486 {
7487 uint32 d2h_mb_data = 0;
7488 uint32 zero = 0;
7489
7490 if (bus->is_linkdown) {
7491 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
7492 return;
7493 }
7494
7495 if (MULTIBP_ENAB(bus->sih)) {
7496 dhd_bus_pcie_pwr_req(bus);
7497 }
7498
7499 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
7500 if (!d2h_mb_data) {
7501 goto exit;
7502 }
7503
7504 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
7505
7506 dhd_bus_handle_mb_data(bus, d2h_mb_data);
7507
7508 exit:
7509 if (MULTIBP_ENAB(bus->sih)) {
7510 dhd_bus_pcie_pwr_req_clear(bus);
7511 }
7512 }
7513
7514 static bool
7515 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
7516 {
7517 bool resched = FALSE;
7518
7519 if (MULTIBP_ENAB(bus->sih)) {
7520 dhd_bus_pcie_pwr_req(bus);
7521 }
7522 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7523 (bus->sih->buscorerev == 4)) {
7524 /* Msg stream interrupt */
7525 if (intstatus & I_BIT1) {
7526 resched = dhdpci_bus_read_frames(bus);
7527 } else if (intstatus & I_BIT0) {
7528 /* do nothing for Now */
7529 }
7530 } else {
7531 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
7532 bus->api.handle_mb_data(bus);
7533
7534 if ((bus->dhd->busstate == DHD_BUS_SUSPEND) || (bus->use_mailbox &&
7535 (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE))) {
7536 DHD_ERROR(("%s: Bus is in power save state. "
7537 "Skip processing rest of ring buffers.\n", __FUNCTION__));
7538 goto exit;
7539 }
7540
7541 /* Validate intstatus only for INTX case */
7542 if ((bus->d2h_intr_method == PCIE_MSI) ||
7543 ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
7544 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7545 if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
7546 resched = dhdpci_bus_read_frames(bus);
7547 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
7548 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
7549 }
7550 #else
7551 resched = dhdpci_bus_read_frames(bus);
7552 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7553 }
7554 }
7555
7556 exit:
7557 if (MULTIBP_ENAB(bus->sih)) {
7558 dhd_bus_pcie_pwr_req_clear(bus);
7559 }
7560 return resched;
7561 }
7562
7563 static bool
7564 dhdpci_bus_read_frames(dhd_bus_t *bus)
7565 {
7566 bool more = FALSE;
7567
7568 /* First check if there a FW trap */
7569 if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
7570 (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
7571 dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
7572 return FALSE;
7573 }
7574
7575 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
7576 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7577
7578 dhd_prot_process_ctrlbuf(bus->dhd);
7579 bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
7580 /* Unlock to give chance for resp to be handled */
7581 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7582
7583 /* Do not process rest of ring buf once bus enters low power state */
7584 if (!bus->use_mailbox && (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE)) {
7585 DHD_ERROR(("%s: Bus is in power save state. "
7586 "Skip processing rest of ring buffers.\n", __FUNCTION__));
7587 return FALSE;
7588 }
7589
7590 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7591 /* update the flow ring cpls */
7592 dhd_update_txflowrings(bus->dhd);
7593 bus->last_process_flowring_time = OSL_LOCALTIME_NS();
7594
7595 /* With heavy TX traffic, we could get a lot of TxStatus
7596 * so add bound
7597 */
7598 more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
7599 bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
7600
7601 /* With heavy RX traffic, this routine potentially could spend some time
7602 * processing RX frames without RX bound
7603 */
7604 more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
7605 bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
7606
7607 /* Process info ring completion messages */
7608 more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
7609 bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
7610
7611 #ifdef IDLE_TX_FLOW_MGMT
7612 if (bus->enable_idle_flowring_mgmt) {
7613 /* Look for idle flow rings */
7614 dhd_bus_check_idle_scan(bus);
7615 }
7616 #endif /* IDLE_TX_FLOW_MGMT */
7617
7618 /* don't talk to the dongle if fw is about to be reloaded */
7619 if (bus->dhd->hang_was_sent) {
7620 more = FALSE;
7621 }
7622 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
7623
7624 #ifdef SUPPORT_LINKDOWN_RECOVERY
7625 if (bus->read_shm_fail) {
7626 /* Read interrupt state once again to confirm linkdown */
7627 int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7628 bus->pcie_mailbox_int, 0, 0);
7629 if (intstatus != (uint32)-1) {
7630 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
7631 #ifdef DHD_FW_COREDUMP
7632 if (bus->dhd->memdump_enabled) {
7633 DHD_OS_WAKE_LOCK(bus->dhd);
7634 bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
7635 dhd_bus_mem_dump(bus->dhd);
7636 DHD_OS_WAKE_UNLOCK(bus->dhd);
7637 }
7638 #endif /* DHD_FW_COREDUMP */
7639 } else {
7640 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
7641 #ifdef CONFIG_ARCH_MSM
7642 bus->no_cfg_restore = 1;
7643 #endif /* CONFIG_ARCH_MSM */
7644 bus->is_linkdown = 1;
7645 }
7646
7647 dhd_prot_debug_info_print(bus->dhd);
7648 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
7649 dhd_os_send_hang_message(bus->dhd);
7650 more = FALSE;
7651 }
7652 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7653 return more;
7654 }
7655
7656 bool
7657 dhdpcie_tcm_valid(dhd_bus_t *bus)
7658 {
7659 uint32 addr = 0;
7660 int rv;
7661 uint32 shaddr = 0;
7662 pciedev_shared_t sh;
7663
7664 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
7665
7666 /* Read last word in memory to determine address of pciedev_shared structure */
7667 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
7668
7669 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
7670 (addr > shaddr)) {
7671 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
7672 __FUNCTION__, addr));
7673 return FALSE;
7674 }
7675
7676 /* Read hndrte_shared structure */
7677 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
7678 sizeof(pciedev_shared_t))) < 0) {
7679 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
7680 return FALSE;
7681 }
7682
7683 /* Compare any field in pciedev_shared_t */
7684 if (sh.console_addr != bus->pcie_sh->console_addr) {
7685 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
7686 return FALSE;
7687 }
7688
7689 return TRUE;
7690 }
7691
7692 static void
7693 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
7694 {
7695 snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
7696 firmware_api_version, host_api_version);
7697 return;
7698 }
7699
7700 static bool
7701 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
7702 {
7703 bool retcode = FALSE;
7704
7705 DHD_INFO(("firmware api revision %d, host api revision %d\n",
7706 firmware_api_version, host_api_version));
7707
7708 switch (firmware_api_version) {
7709 case PCIE_SHARED_VERSION_7:
7710 case PCIE_SHARED_VERSION_6:
7711 case PCIE_SHARED_VERSION_5:
7712 retcode = TRUE;
7713 break;
7714 default:
7715 if (firmware_api_version <= host_api_version)
7716 retcode = TRUE;
7717 }
7718 return retcode;
7719 }
7720
7721 static int
7722 dhdpcie_readshared(dhd_bus_t *bus)
7723 {
7724 uint32 addr = 0;
7725 int rv, dma_indx_wr_buf, dma_indx_rd_buf;
7726 uint32 shaddr = 0;
7727 pciedev_shared_t *sh = bus->pcie_sh;
7728 dhd_timeout_t tmo;
7729 bool idma_en = FALSE;
7730
7731 if (MULTIBP_ENAB(bus->sih)) {
7732 dhd_bus_pcie_pwr_req(bus);
7733 }
7734
7735 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
7736 /* start a timer for 5 seconds */
7737 dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
7738
7739 while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
7740 /* Read last word in memory to determine address of pciedev_shared structure */
7741 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
7742 }
7743
7744 if (addr == (uint32)-1) {
7745 DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
7746 #ifdef SUPPORT_LINKDOWN_RECOVERY
7747 #ifdef CONFIG_ARCH_MSM
7748 bus->no_cfg_restore = 1;
7749 #endif /* CONFIG_ARCH_MSM */
7750 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7751 bus->is_linkdown = 1;
7752 return BCME_ERROR;
7753 }
7754
7755 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
7756 (addr > shaddr)) {
7757 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
7758 __FUNCTION__, addr));
7759 DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
7760 #ifdef DEBUG_DNGL_INIT_FAIL
7761 #ifdef CUSTOMER_HW4_DEBUG
7762 bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
7763 #endif /* CUSTOMER_HW4_DEBUG */
7764 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
7765 dhdpcie_mem_dump(bus);
7766 #endif /* DEBUG_DNGL_INIT_FAIL */
7767 return BCME_ERROR;
7768 } else {
7769 bus->shared_addr = (ulong)addr;
7770 DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
7771 "before dongle is ready\n", addr, tmo.elapsed));
7772 }
7773
7774 /* Read hndrte_shared structure */
7775 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
7776 sizeof(pciedev_shared_t))) < 0) {
7777 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
7778 return rv;
7779 }
7780
7781 /* Endianness */
7782 sh->flags = ltoh32(sh->flags);
7783 sh->trap_addr = ltoh32(sh->trap_addr);
7784 sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
7785 sh->assert_file_addr = ltoh32(sh->assert_file_addr);
7786 sh->assert_line = ltoh32(sh->assert_line);
7787 sh->console_addr = ltoh32(sh->console_addr);
7788 sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
7789 sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
7790 sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
7791 sh->flags2 = ltoh32(sh->flags2);
7792
7793 /* load bus console address */
7794 bus->console_addr = sh->console_addr;
7795
7796 /* Read the dma rx offset */
7797 bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
7798 dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
7799
7800 DHD_INFO(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
7801
7802 bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
7803 if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
7804 {
7805 DHD_ERROR(("%s: pcie_shared version %d in dhd "
7806 "is older than pciedev_shared version %d in dongle\n",
7807 __FUNCTION__, PCIE_SHARED_VERSION,
7808 bus->api.fw_rev));
7809 return BCME_ERROR;
7810 }
7811 dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
7812
7813 bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
7814 sizeof(uint16) : sizeof(uint32);
7815 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
7816 __FUNCTION__, bus->rw_index_sz));
7817
7818 #ifdef IDLE_TX_FLOW_MGMT
7819 if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
7820 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
7821 __FUNCTION__));
7822 bus->enable_idle_flowring_mgmt = TRUE;
7823 }
7824 #endif /* IDLE_TX_FLOW_MGMT */
7825
7826 if (IDMA_CAPABLE(bus)) {
7827 if (bus->sih->buscorerev == 23) {
7828 } else {
7829 idma_en = TRUE;
7830 }
7831 }
7832
7833 if (idma_en) {
7834 bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
7835 bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
7836 }
7837
7838 bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
7839
7840 bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
7841
7842 /* Does the FW support DMA'ing r/w indices */
7843 if (sh->flags & PCIE_SHARED_DMA_INDEX) {
7844 if (!bus->dhd->dma_ring_upd_overwrite) {
7845 {
7846 if (!IFRM_ENAB(bus->dhd)) {
7847 bus->dhd->dma_h2d_ring_upd_support = TRUE;
7848 }
7849 bus->dhd->dma_d2h_ring_upd_support = TRUE;
7850 }
7851 }
7852
7853 if (bus->dhd->dma_d2h_ring_upd_support)
7854 bus->dhd->d2h_sync_mode = 0;
7855
7856 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
7857 __FUNCTION__,
7858 (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
7859 (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
7860 } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
7861 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
7862 __FUNCTION__));
7863 return BCME_UNSUPPORTED;
7864 } else {
7865 bus->dhd->dma_h2d_ring_upd_support = FALSE;
7866 bus->dhd->dma_d2h_ring_upd_support = FALSE;
7867 }
7868
7869 /* Does the firmware support fast delete ring? */
7870 if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
7871 DHD_INFO(("%s: Firmware supports fast delete ring\n",
7872 __FUNCTION__));
7873 bus->dhd->fast_delete_ring_support = TRUE;
7874 } else {
7875 DHD_INFO(("%s: Firmware does not support fast delete ring\n",
7876 __FUNCTION__));
7877 bus->dhd->fast_delete_ring_support = FALSE;
7878 }
7879
7880 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
7881 {
7882 ring_info_t ring_info;
7883
7884 /* boundary check */
7885 if ((sh->rings_info_ptr < bus->dongle_ram_base) || (sh->rings_info_ptr > shaddr)) {
7886 DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
7887 __FUNCTION__, sh->rings_info_ptr));
7888 return BCME_ERROR;
7889 }
7890
7891 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
7892 (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
7893 return rv;
7894
7895 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
7896 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
7897
7898 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
7899 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
7900 bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
7901 bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
7902 bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
7903 bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
7904 bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
7905 }
7906 else {
7907 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
7908 bus->max_submission_rings = bus->max_tx_flowrings;
7909 bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
7910 bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
7911 bus->api.handle_mb_data = dhdpcie_handle_mb_data;
7912 bus->use_mailbox = TRUE;
7913 }
7914 if (bus->max_completion_rings == 0) {
7915 DHD_ERROR(("dongle completion rings are invalid %d\n",
7916 bus->max_completion_rings));
7917 return BCME_ERROR;
7918 }
7919 if (bus->max_submission_rings == 0) {
7920 DHD_ERROR(("dongle submission rings are invalid %d\n",
7921 bus->max_submission_rings));
7922 return BCME_ERROR;
7923 }
7924 if (bus->max_tx_flowrings == 0) {
7925 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
7926 return BCME_ERROR;
7927 }
7928
7929 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
7930 * The max_sub_queues is read from FW initialized ring_info
7931 */
7932 if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
7933 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7934 H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
7935 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7936 D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
7937
7938 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
7939 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
7940 "Host will use w/r indices in TCM\n",
7941 __FUNCTION__));
7942 bus->dhd->dma_h2d_ring_upd_support = FALSE;
7943 bus->dhd->idma_enable = FALSE;
7944 }
7945 }
7946
7947 if (bus->dhd->dma_d2h_ring_upd_support) {
7948 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7949 D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
7950 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7951 H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
7952
7953 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
7954 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
7955 "Host will use w/r indices in TCM\n",
7956 __FUNCTION__));
7957 bus->dhd->dma_d2h_ring_upd_support = FALSE;
7958 }
7959 }
7960
7961 if (IFRM_ENAB(bus->dhd)) {
7962 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
7963 H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
7964
7965 if (dma_indx_wr_buf != BCME_OK) {
7966 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
7967 __FUNCTION__));
7968 bus->dhd->ifrm_enable = FALSE;
7969 }
7970 }
7971
7972 /* read ringmem and ringstate ptrs from shared area and store in host variables */
7973 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
7974 if (dhd_msg_level & DHD_INFO_VAL) {
7975 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
7976 }
7977 DHD_INFO(("ring_info\n"));
7978
7979 DHD_ERROR(("%s: max H2D queues %d\n",
7980 __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
7981
7982 DHD_INFO(("mail box address\n"));
7983 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
7984 __FUNCTION__, bus->h2d_mb_data_ptr_addr));
7985 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
7986 __FUNCTION__, bus->d2h_mb_data_ptr_addr));
7987 }
7988
7989 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
7990 __FUNCTION__, bus->dhd->d2h_sync_mode));
7991
7992 bus->dhd->d2h_hostrdy_supported =
7993 ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
7994
7995 bus->dhd->ext_trap_data_supported =
7996 ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
7997
7998 if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
7999 bus->dhd->pcie_txs_metadata_enable = 0;
8000
8001 #ifdef D2H_MINIDUMP
8002 bus->d2h_minidump = (sh->flags2 & PCIE_SHARED2_FW_SMALL_MEMDUMP) ? TRUE : FALSE;
8003 DHD_ERROR(("FW supports minidump ? %s \n", bus->d2h_minidump ? "Y" : "N"));
8004 if (bus->d2h_minidump_override) {
8005 bus->d2h_minidump = FALSE;
8006 }
8007 DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
8008 bus->d2h_minidump, bus->d2h_minidump_override));
8009 #endif /* D2H_MINIDUMP */
8010
8011 if (MULTIBP_ENAB(bus->sih)) {
8012 dhd_bus_pcie_pwr_req_clear(bus);
8013 }
8014 return BCME_OK;
8015 } /* dhdpcie_readshared */
8016
8017 /** Read ring mem and ring state ptr info from shared memory area in device memory */
8018 static void
8019 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
8020 {
8021 uint16 i = 0;
8022 uint16 j = 0;
8023 uint32 tcm_memloc;
8024 uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
8025 uint16 max_tx_flowrings = bus->max_tx_flowrings;
8026
8027 /* Ring mem ptr info */
8028 /* Alloated in the order
8029 H2D_MSGRING_CONTROL_SUBMIT 0
8030 H2D_MSGRING_RXPOST_SUBMIT 1
8031 D2H_MSGRING_CONTROL_COMPLETE 2
8032 D2H_MSGRING_TX_COMPLETE 3
8033 D2H_MSGRING_RX_COMPLETE 4
8034 */
8035
8036 {
8037 /* ringmemptr holds start of the mem block address space */
8038 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
8039
8040 /* Find out ringmem ptr for each ring common ring */
8041 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
8042 bus->ring_sh[i].ring_mem_addr = tcm_memloc;
8043 /* Update mem block */
8044 tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
8045 DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
8046 i, bus->ring_sh[i].ring_mem_addr));
8047 }
8048 }
8049
8050 /* Ring state mem ptr info */
8051 {
8052 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
8053 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
8054 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
8055 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
8056
8057 /* Store h2d common ring write/read pointers */
8058 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
8059 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
8060 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
8061
8062 /* update mem block */
8063 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
8064 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
8065
8066 DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
8067 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8068 }
8069
8070 /* Store d2h common ring write/read pointers */
8071 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
8072 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
8073 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
8074
8075 /* update mem block */
8076 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
8077 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
8078
8079 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
8080 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8081 }
8082
8083 /* Store txflow ring write/read pointers */
8084 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
8085 max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
8086 } else {
8087 /* Account for Debug info h2d ring located after the last tx flow ring */
8088 max_tx_flowrings = max_tx_flowrings + 1;
8089 }
8090 for (j = 0; j < max_tx_flowrings; i++, j++)
8091 {
8092 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
8093 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
8094
8095 /* update mem block */
8096 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
8097 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
8098
8099 DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
8100 bus->ring_sh[i].ring_state_w,
8101 bus->ring_sh[i].ring_state_r));
8102 }
8103 /* store wr/rd pointers for debug info completion ring */
8104 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
8105 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
8106 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
8107 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
8108 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
8109 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8110 }
8111 } /* dhd_fillup_ring_sharedptr_info */
8112
8113 /**
8114 * Initialize bus module: prepare for communication with the dongle. Called after downloading
8115 * firmware into the dongle.
8116 */
8117 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
8118 {
8119 dhd_bus_t *bus = dhdp->bus;
8120 int ret = 0;
8121
8122 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8123
8124 ASSERT(bus->dhd);
8125 if (!bus->dhd)
8126 return 0;
8127
8128 if (bus->sih->buscorerev == 66) {
8129 dhd_bus_pcie_pwr_req_clear_reload_war(bus);
8130 }
8131
8132 if (MULTIBP_ENAB(bus->sih)) {
8133 dhd_bus_pcie_pwr_req(bus);
8134 }
8135
8136 /* Configure AER registers to log the TLP header */
8137 dhd_bus_aer_config(bus);
8138
8139 /* Make sure we're talking to the core. */
8140 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
8141 ASSERT(bus->reg != NULL);
8142
8143 /* before opening up bus for data transfer, check if shared are is intact */
8144 ret = dhdpcie_readshared(bus);
8145 if (ret < 0) {
8146 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
8147 goto exit;
8148 }
8149
8150 /* Make sure we're talking to the core. */
8151 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
8152 ASSERT(bus->reg != NULL);
8153
8154 dhd_init_bus_lock(bus);
8155
8156 /* Set bus state according to enable result */
8157 dhdp->busstate = DHD_BUS_DATA;
8158 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
8159 dhdp->dhd_bus_busy_state = 0;
8160
8161 /* D11 status via PCIe completion header */
8162 if ((ret = dhdpcie_init_d11status(bus)) < 0) {
8163 goto exit;
8164 }
8165
8166 if (!dhd_download_fw_on_driverload)
8167 dhd_dpc_enable(bus->dhd);
8168 /* Enable the interrupt after device is up */
8169 dhdpcie_bus_intr_enable(bus);
8170
8171 bus->intr_enabled = TRUE;
8172
8173 /* bcmsdh_intr_unmask(bus->sdh); */
8174 #ifdef DHD_PCIE_RUNTIMEPM
8175 bus->idlecount = 0;
8176 bus->idletime = (int32)MAX_IDLE_COUNT;
8177 init_waitqueue_head(&bus->rpm_queue);
8178 mutex_init(&bus->pm_lock);
8179 #else
8180 bus->idletime = 0;
8181 #endif /* DHD_PCIE_RUNTIMEPM */
8182
8183 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
8184 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
8185 bus->use_d0_inform = TRUE;
8186 } else {
8187 bus->use_d0_inform = FALSE;
8188 }
8189
8190 exit:
8191 if (MULTIBP_ENAB(bus->sih)) {
8192 dhd_bus_pcie_pwr_req_clear(bus);
8193 }
8194 return ret;
8195 }
8196
8197 static void
8198 dhdpcie_init_shared_addr(dhd_bus_t *bus)
8199 {
8200 uint32 addr = 0;
8201 uint32 val = 0;
8202 addr = bus->dongle_ram_base + bus->ramsize - 4;
8203 #ifdef DHD_PCIE_RUNTIMEPM
8204 dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
8205 #endif /* DHD_PCIE_RUNTIMEPM */
8206 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
8207 }
8208
8209 bool
8210 dhdpcie_chipmatch(uint16 vendor, uint16 device)
8211 {
8212 if (vendor != PCI_VENDOR_ID_BROADCOM) {
8213 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
8214 vendor, device));
8215 return (-ENODEV);
8216 }
8217
8218 if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
8219 (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
8220 (device == BCM43569_CHIP_ID)) {
8221 return 0;
8222 }
8223
8224 if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
8225 (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
8226 return 0;
8227 }
8228
8229 if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
8230 (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
8231 return 0;
8232 }
8233
8234 if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
8235 (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
8236 return 0;
8237 }
8238
8239 if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
8240 (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
8241 return 0;
8242 }
8243
8244 if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
8245 (device == BCM43452_D11AC5G_ID)) {
8246 return 0;
8247 }
8248
8249 if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
8250 (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
8251 return 0;
8252 }
8253
8254 if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
8255 (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
8256 return 0;
8257 }
8258
8259 if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
8260 (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
8261 return 0;
8262 }
8263
8264 if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
8265 (device == BCM4358_D11AC5G_ID)) {
8266 return 0;
8267 }
8268
8269 if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
8270 (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
8271 return 0;
8272 }
8273
8274 if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
8275 (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
8276 return 0;
8277 }
8278
8279 if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
8280 (device == BCM4359_D11AC5G_ID)) {
8281 return 0;
8282 }
8283
8284 if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
8285 (device == BCM43596_D11AC5G_ID)) {
8286 return 0;
8287 }
8288
8289 if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
8290 (device == BCM43597_D11AC5G_ID)) {
8291 return 0;
8292 }
8293
8294 if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
8295 (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
8296 return 0;
8297 }
8298
8299 if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
8300 (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
8301 return 0;
8302 }
8303 if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
8304 (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
8305 return 0;
8306 }
8307
8308 if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
8309 (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
8310 return 0;
8311 }
8312
8313 if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
8314 (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
8315 (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
8316 return 0;
8317 }
8318
8319 if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
8320 (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
8321 return 0;
8322 }
8323
8324 if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
8325 (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
8326 return 0;
8327 }
8328
8329 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
8330 return (-ENODEV);
8331 } /* dhdpcie_chipmatch */
8332
8333 /**
8334 * Name: dhdpcie_cc_nvmshadow
8335 *
8336 * Description:
8337 * A shadow of OTP/SPROM exists in ChipCommon Region
8338 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
8339 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
8340 * can also be read from ChipCommon Registers.
8341 */
8342 static int
8343 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
8344 {
8345 uint16 dump_offset = 0;
8346 uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
8347
8348 /* Table for 65nm OTP Size (in bits) */
8349 int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
8350
8351 volatile uint16 *nvm_shadow;
8352
8353 uint cur_coreid;
8354 uint chipc_corerev;
8355 chipcregs_t *chipcregs;
8356
8357 /* Save the current core */
8358 cur_coreid = si_coreid(bus->sih);
8359 /* Switch to ChipC */
8360 chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
8361 ASSERT(chipcregs != NULL);
8362
8363 chipc_corerev = si_corerev(bus->sih);
8364
8365 /* Check ChipcommonCore Rev */
8366 if (chipc_corerev < 44) {
8367 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
8368 return BCME_UNSUPPORTED;
8369 }
8370
8371 /* Check ChipID */
8372 if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
8373 ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
8374 ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
8375 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
8376 "4350/4345/4355/4364 only\n", __FUNCTION__));
8377 return BCME_UNSUPPORTED;
8378 }
8379
8380 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
8381 if (chipcregs->sromcontrol & SRC_PRESENT) {
8382 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
8383 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
8384 >> SRC_SIZE_SHIFT))) * 1024;
8385 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
8386 }
8387
8388 if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
8389 bcm_bprintf(b, "\nOTP Present");
8390
8391 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
8392 == OTPL_WRAP_TYPE_40NM) {
8393 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
8394 /* Chipcommon rev51 is a variation on rev45 and does not support
8395 * the latest OTP configuration.
8396 */
8397 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8398 otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
8399 >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
8400 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8401 } else {
8402 otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
8403 >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
8404 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8405 }
8406 } else {
8407 /* This part is untested since newer chips have 40nm OTP */
8408 /* Chipcommon rev51 is a variation on rev45 and does not support
8409 * the latest OTP configuration.
8410 */
8411 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8412 otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
8413 >> OTPL_ROW_SIZE_SHIFT];
8414 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8415 } else {
8416 otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
8417 >> CC_CAP_OTPSIZE_SHIFT];
8418 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
8419 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
8420 __FUNCTION__));
8421 }
8422 }
8423 }
8424
8425 /* Chipcommon rev51 is a variation on rev45 and does not support
8426 * the latest OTP configuration.
8427 */
8428 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8429 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
8430 ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
8431 DHD_ERROR(("%s: SPROM and OTP could not be found "
8432 "sromcontrol = %x, otplayout = %x \n",
8433 __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
8434 return BCME_NOTFOUND;
8435 }
8436 } else {
8437 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
8438 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
8439 DHD_ERROR(("%s: SPROM and OTP could not be found "
8440 "sromcontrol = %x, capablities = %x \n",
8441 __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
8442 return BCME_NOTFOUND;
8443 }
8444 }
8445
8446 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
8447 if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
8448 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
8449
8450 bcm_bprintf(b, "OTP Strap selected.\n"
8451 "\nOTP Shadow in ChipCommon:\n");
8452
8453 dump_size = otp_size / 16 ; /* 16bit words */
8454
8455 } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
8456 (chipcregs->sromcontrol & SRC_PRESENT)) {
8457
8458 bcm_bprintf(b, "SPROM Strap selected\n"
8459 "\nSPROM Shadow in ChipCommon:\n");
8460
8461 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
8462 /* dump_size in 16bit words */
8463 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
8464 } else {
8465 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
8466 __FUNCTION__));
8467 return BCME_NOTFOUND;
8468 }
8469
8470 if (bus->regs == NULL) {
8471 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
8472 return BCME_NOTREADY;
8473 } else {
8474 bcm_bprintf(b, "\n OffSet:");
8475
8476 /* Chipcommon rev51 is a variation on rev45 and does not support
8477 * the latest OTP configuration.
8478 */
8479 if (chipc_corerev != 51 && chipc_corerev >= 49) {
8480 /* Chip common can read only 8kbits,
8481 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
8482 */
8483 nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
8484 } else {
8485 /* Point to the SPROM/OTP shadow in ChipCommon */
8486 nvm_shadow = chipcregs->sromotp;
8487 }
8488
8489 if (nvm_shadow == NULL) {
8490 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
8491 return BCME_NOTFOUND;
8492 }
8493
8494 /*
8495 * Read 16 bits / iteration.
8496 * dump_size & dump_offset in 16-bit words
8497 */
8498 while (dump_offset < dump_size) {
8499 if (dump_offset % 2 == 0)
8500 /* Print the offset in the shadow space in Bytes */
8501 bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
8502
8503 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
8504 dump_offset += 0x1;
8505 }
8506 }
8507
8508 /* Switch back to the original core */
8509 si_setcore(bus->sih, cur_coreid, 0);
8510
8511 return BCME_OK;
8512 } /* dhdpcie_cc_nvmshadow */
8513
8514 /** Flow rings are dynamically created and destroyed */
8515 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
8516 {
8517 void *pkt;
8518 flow_queue_t *queue;
8519 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
8520 unsigned long flags;
8521
8522 queue = &flow_ring_node->queue;
8523
8524 #ifdef DHDTCPACK_SUPPRESS
8525 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8526 * when there is a newly coming packet from network stack.
8527 */
8528 dhd_tcpack_info_tbl_clean(bus->dhd);
8529 #endif /* DHDTCPACK_SUPPRESS */
8530
8531 /* clean up BUS level info */
8532 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8533
8534 /* Flush all pending packets in the queue, if any */
8535 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
8536 PKTFREE(bus->dhd->osh, pkt, TRUE);
8537 }
8538 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
8539
8540 /* Reinitialise flowring's queue */
8541 dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
8542 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
8543 flow_ring_node->active = FALSE;
8544
8545 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8546
8547 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
8548 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8549 dll_delete(&flow_ring_node->list);
8550 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8551
8552 /* Release the flowring object back into the pool */
8553 dhd_prot_flowrings_pool_release(bus->dhd,
8554 flow_ring_node->flowid, flow_ring_node->prot_info);
8555
8556 /* Free the flowid back to the flowid allocator */
8557 dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
8558 flow_ring_node->flowid);
8559 }
8560
8561 /**
8562 * Allocate a Flow ring buffer,
8563 * Init Ring buffer, send Msg to device about flow ring creation
8564 */
8565 int
8566 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
8567 {
8568 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
8569
8570 DHD_INFO(("%s :Flow create\n", __FUNCTION__));
8571
8572 /* Send Msg to device about flow ring creation */
8573 if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
8574 return BCME_NOMEM;
8575
8576 return BCME_OK;
8577 }
8578
8579 /** Handle response from dongle on a 'flow ring create' request */
8580 void
8581 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
8582 {
8583 flow_ring_node_t *flow_ring_node;
8584 unsigned long flags;
8585
8586 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
8587
8588 /* Boundary check of the flowid */
8589 if (flowid >= bus->dhd->num_flow_rings) {
8590 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
8591 flowid, bus->dhd->num_flow_rings));
8592 return;
8593 }
8594
8595 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8596 if (!flow_ring_node) {
8597 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
8598 return;
8599 }
8600
8601 ASSERT(flow_ring_node->flowid == flowid);
8602 if (flow_ring_node->flowid != flowid) {
8603 DHD_ERROR(("%s: flowid %d is different from the flowid "
8604 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
8605 flow_ring_node->flowid));
8606 return;
8607 }
8608
8609 if (status != BCME_OK) {
8610 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
8611 __FUNCTION__, status));
8612 /* Call Flow clean up */
8613 dhd_bus_clean_flow_ring(bus, flow_ring_node);
8614 return;
8615 }
8616
8617 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8618 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
8619 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8620
8621 /* Now add the Flow ring node into the active list
8622 * Note that this code to add the newly created node to the active
8623 * list was living in dhd_flowid_lookup. But note that after
8624 * adding the node to the active list the contents of node is being
8625 * filled in dhd_prot_flow_ring_create.
8626 * If there is a D2H interrupt after the node gets added to the
8627 * active list and before the node gets populated with values
8628 * from the Bottom half dhd_update_txflowrings would be called.
8629 * which will then try to walk through the active flow ring list,
8630 * pickup the nodes and operate on them. Now note that since
8631 * the function dhd_prot_flow_ring_create is not finished yet
8632 * the contents of flow_ring_node can still be NULL leading to
8633 * crashes. Hence the flow_ring_node should be added to the
8634 * active list only after its truely created, which is after
8635 * receiving the create response message from the Host.
8636 */
8637 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8638 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
8639 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8640
8641 dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
8642
8643 return;
8644 }
8645
8646 int
8647 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
8648 {
8649 void * pkt;
8650 flow_queue_t *queue;
8651 flow_ring_node_t *flow_ring_node;
8652 unsigned long flags;
8653
8654 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
8655
8656 flow_ring_node = (flow_ring_node_t *)arg;
8657
8658 #ifdef DHDTCPACK_SUPPRESS
8659 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8660 * when there is a newly coming packet from network stack.
8661 */
8662 dhd_tcpack_info_tbl_clean(bus->dhd);
8663 #endif /* DHDTCPACK_SUPPRESS */
8664 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8665 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
8666 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8667 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
8668 return BCME_ERROR;
8669 }
8670 flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
8671
8672 queue = &flow_ring_node->queue; /* queue associated with flow ring */
8673
8674 /* Flush all pending packets in the queue, if any */
8675 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
8676 PKTFREE(bus->dhd->osh, pkt, TRUE);
8677 }
8678 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
8679
8680 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8681
8682 /* Send Msg to device about flow ring deletion */
8683 dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
8684
8685 return BCME_OK;
8686 }
8687
8688 void
8689 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
8690 {
8691 flow_ring_node_t *flow_ring_node;
8692
8693 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
8694
8695 /* Boundary check of the flowid */
8696 if (flowid >= bus->dhd->num_flow_rings) {
8697 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
8698 flowid, bus->dhd->num_flow_rings));
8699 return;
8700 }
8701
8702 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8703 if (!flow_ring_node) {
8704 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
8705 return;
8706 }
8707
8708 ASSERT(flow_ring_node->flowid == flowid);
8709 if (flow_ring_node->flowid != flowid) {
8710 DHD_ERROR(("%s: flowid %d is different from the flowid "
8711 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
8712 flow_ring_node->flowid));
8713 return;
8714 }
8715
8716 if (status != BCME_OK) {
8717 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
8718 __FUNCTION__, status));
8719 return;
8720 }
8721
8722 if (flow_ring_node->status != FLOW_RING_STATUS_DELETE_PENDING) {
8723 DHD_ERROR(("%s: invalid state flowid = %d, status = %d\n",
8724 __FUNCTION__, flowid, flow_ring_node->status));
8725 return;
8726 }
8727
8728 /* Call Flow clean up */
8729 dhd_bus_clean_flow_ring(bus, flow_ring_node);
8730
8731 return;
8732
8733 }
8734
8735 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
8736 {
8737 void *pkt;
8738 flow_queue_t *queue;
8739 flow_ring_node_t *flow_ring_node;
8740 unsigned long flags;
8741
8742 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
8743
8744 flow_ring_node = (flow_ring_node_t *)arg;
8745
8746 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
8747 queue = &flow_ring_node->queue; /* queue associated with flow ring */
8748 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
8749 * once flow ring flush response is received for this flowring node.
8750 */
8751 flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
8752
8753 #ifdef DHDTCPACK_SUPPRESS
8754 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8755 * when there is a newly coming packet from network stack.
8756 */
8757 dhd_tcpack_info_tbl_clean(bus->dhd);
8758 #endif /* DHDTCPACK_SUPPRESS */
8759
8760 /* Flush all pending packets in the queue, if any */
8761 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
8762 PKTFREE(bus->dhd->osh, pkt, TRUE);
8763 }
8764 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
8765
8766 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
8767
8768 /* Send Msg to device about flow ring flush */
8769 dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
8770
8771 return BCME_OK;
8772 }
8773
8774 void
8775 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
8776 {
8777 flow_ring_node_t *flow_ring_node;
8778
8779 if (status != BCME_OK) {
8780 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
8781 __FUNCTION__, status));
8782 return;
8783 }
8784
8785 /* Boundary check of the flowid */
8786 if (flowid >= bus->dhd->num_flow_rings) {
8787 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
8788 flowid, bus->dhd->num_flow_rings));
8789 return;
8790 }
8791
8792 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8793 if (!flow_ring_node) {
8794 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
8795 return;
8796 }
8797
8798 ASSERT(flow_ring_node->flowid == flowid);
8799 if (flow_ring_node->flowid != flowid) {
8800 DHD_ERROR(("%s: flowid %d is different from the flowid "
8801 "of the flow_ring_node %d\n", __FUNCTION__, flowid,
8802 flow_ring_node->flowid));
8803 return;
8804 }
8805
8806 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
8807 return;
8808 }
8809
8810 uint32
8811 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
8812 {
8813 return bus->max_submission_rings;
8814 }
8815
8816 /* To be symmetric with SDIO */
8817 void
8818 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
8819 {
8820 return;
8821 }
8822
8823 void
8824 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
8825 {
8826 dhdp->bus->is_linkdown = val;
8827 }
8828
8829 int
8830 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
8831 {
8832 return dhdp->bus->is_linkdown;
8833 }
8834
8835 #ifdef IDLE_TX_FLOW_MGMT
8836 /* resume request */
8837 int
8838 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
8839 {
8840 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
8841
8842 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
8843
8844 flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
8845
8846 /* Send Msg to device about flow ring resume */
8847 dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
8848
8849 return BCME_OK;
8850 }
8851
8852 /* add the node back to active flowring */
8853 void
8854 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
8855 {
8856
8857 flow_ring_node_t *flow_ring_node;
8858
8859 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
8860
8861 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
8862 ASSERT(flow_ring_node->flowid == flowid);
8863
8864 if (status != BCME_OK) {
8865 DHD_ERROR(("%s Error Status = %d \n",
8866 __FUNCTION__, status));
8867 return;
8868 }
8869
8870 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
8871 __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
8872
8873 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
8874
8875 dhd_bus_schedule_queue(bus, flowid, FALSE);
8876 return;
8877 }
8878
8879 /* scan the flow rings in active list for idle time out */
8880 void
8881 dhd_bus_check_idle_scan(dhd_bus_t *bus)
8882 {
8883 uint64 time_stamp; /* in millisec */
8884 uint64 diff;
8885
8886 time_stamp = OSL_SYSUPTIME();
8887 diff = time_stamp - bus->active_list_last_process_ts;
8888
8889 if (diff > IDLE_FLOW_LIST_TIMEOUT) {
8890 dhd_bus_idle_scan(bus);
8891 bus->active_list_last_process_ts = OSL_SYSUPTIME();
8892 }
8893
8894 return;
8895 }
8896
8897 /* scan the nodes in active list till it finds a non idle node */
8898 void
8899 dhd_bus_idle_scan(dhd_bus_t *bus)
8900 {
8901 dll_t *item, *prev;
8902 flow_ring_node_t *flow_ring_node;
8903 uint64 time_stamp, diff;
8904 unsigned long flags;
8905 uint16 ringid[MAX_SUSPEND_REQ];
8906 uint16 count = 0;
8907
8908 time_stamp = OSL_SYSUPTIME();
8909 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8910
8911 for (item = dll_tail_p(&bus->flowring_active_list);
8912 !dll_end(&bus->flowring_active_list, item); item = prev) {
8913 prev = dll_prev_p(item);
8914
8915 flow_ring_node = dhd_constlist_to_flowring(item);
8916
8917 if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
8918 continue;
8919
8920 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
8921 /* Takes care of deleting zombie rings */
8922 /* delete from the active list */
8923 DHD_INFO(("deleting flow id %u from active list\n",
8924 flow_ring_node->flowid));
8925 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
8926 continue;
8927 }
8928
8929 diff = time_stamp - flow_ring_node->last_active_ts;
8930
8931 if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
8932 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
8933 /* delete from the active list */
8934 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
8935 flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
8936 ringid[count] = flow_ring_node->flowid;
8937 count++;
8938 if (count == MAX_SUSPEND_REQ) {
8939 /* create a batch message now!! */
8940 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
8941 count = 0;
8942 }
8943
8944 } else {
8945
8946 /* No more scanning, break from here! */
8947 break;
8948 }
8949 }
8950
8951 if (count) {
8952 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
8953 }
8954
8955 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8956
8957 return;
8958 }
8959
8960 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8961 {
8962 unsigned long flags;
8963 dll_t* list;
8964
8965 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8966 /* check if the node is already at head, otherwise delete it and prepend */
8967 list = dll_head_p(&bus->flowring_active_list);
8968 if (&flow_ring_node->list != list) {
8969 dll_delete(&flow_ring_node->list);
8970 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
8971 }
8972
8973 /* update flow ring timestamp */
8974 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
8975
8976 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8977
8978 return;
8979 }
8980
8981 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8982 {
8983 unsigned long flags;
8984
8985 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8986
8987 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
8988 /* update flow ring timestamp */
8989 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
8990
8991 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8992
8993 return;
8994 }
8995 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
8996 {
8997 dll_delete(&flow_ring_node->list);
8998 }
8999
9000 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
9001 {
9002 unsigned long flags;
9003
9004 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9005
9006 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9007
9008 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9009
9010 return;
9011 }
9012 #endif /* IDLE_TX_FLOW_MGMT */
9013
9014 int
9015 dhdpcie_bus_clock_start(struct dhd_bus *bus)
9016 {
9017 return dhdpcie_start_host_pcieclock(bus);
9018 }
9019
9020 int
9021 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
9022 {
9023 return dhdpcie_stop_host_pcieclock(bus);
9024 }
9025
9026 int
9027 dhdpcie_bus_disable_device(struct dhd_bus *bus)
9028 {
9029 return dhdpcie_disable_device(bus);
9030 }
9031
9032 int
9033 dhdpcie_bus_enable_device(struct dhd_bus *bus)
9034 {
9035 return dhdpcie_enable_device(bus);
9036 }
9037
9038 int
9039 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
9040 {
9041 return dhdpcie_alloc_resource(bus);
9042 }
9043
9044 void
9045 dhdpcie_bus_free_resource(struct dhd_bus *bus)
9046 {
9047 dhdpcie_free_resource(bus);
9048 }
9049
9050 int
9051 dhd_bus_request_irq(struct dhd_bus *bus)
9052 {
9053 return dhdpcie_bus_request_irq(bus);
9054 }
9055
9056 bool
9057 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
9058 {
9059 return dhdpcie_dongle_attach(bus);
9060 }
9061
9062 int
9063 dhd_bus_release_dongle(struct dhd_bus *bus)
9064 {
9065 bool dongle_isolation;
9066 osl_t *osh;
9067
9068 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9069
9070 if (bus) {
9071 osh = bus->osh;
9072 ASSERT(osh);
9073
9074 if (bus->dhd) {
9075 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
9076 debugger_close();
9077 #endif /* DEBUGGER || DHD_DSCOPE */
9078
9079 dongle_isolation = bus->dhd->dongle_isolation;
9080 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
9081 }
9082 }
9083
9084 return 0;
9085 }
9086
9087 void
9088 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
9089 {
9090 uint32 val;
9091
9092 if (enable) {
9093 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
9094 PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
9095 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9096 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
9097 if (bus->cto_threshold == 0) {
9098 bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
9099 }
9100
9101 si_corereg(bus->sih, bus->sih->buscoreidx,
9102 OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
9103 ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
9104 PCIE_CTO_TO_THRESHHOLD_MASK) |
9105 ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
9106 PCIE_CTO_CLKCHKCNT_MASK) |
9107 PCIE_CTO_ENAB_MASK);
9108 } else {
9109 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
9110 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9111 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
9112
9113 si_corereg(bus->sih, bus->sih->buscoreidx,
9114 OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
9115 }
9116 }
9117
9118 static void
9119 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
9120 {
9121 uint32 pci_intmask, err_status, dar_val;
9122 uint8 i = 0;
9123 uint32 val;
9124
9125 pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
9126 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
9127
9128 DHD_OS_WAKE_LOCK(bus->dhd);
9129
9130 DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
9131
9132 /*
9133 * DAR still accessible
9134 */
9135 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9136 DAR_CLK_CTRL(bus->sih->buscorerev), 0, 0);
9137 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_CLK_CTRL(bus->sih->buscorerev), dar_val));
9138
9139 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9140 DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), 0, 0);
9141 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIE_PWR_CTRL(bus->sih->buscorerev), dar_val));
9142
9143 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9144 DAR_INTSTAT(bus->sih->buscorerev), 0, 0);
9145 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_INTSTAT(bus->sih->buscorerev), dar_val));
9146
9147 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9148 DAR_ERRLOG(bus->sih->buscorerev), 0, 0);
9149 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRLOG(bus->sih->buscorerev), dar_val));
9150
9151 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9152 DAR_ERRADDR(bus->sih->buscorerev), 0, 0);
9153 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_ERRADDR(bus->sih->buscorerev), dar_val));
9154
9155 dar_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9156 DAR_PCIMailBoxInt(bus->sih->buscorerev), 0, 0);
9157 DHD_ERROR((" 0x%x:0x%x\n", (uint32) DAR_PCIMailBoxInt(bus->sih->buscorerev), dar_val));
9158
9159 /* reset backplane */
9160 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9161 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
9162
9163 /* clear timeout error */
9164 while (1) {
9165 err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
9166 DAR_ERRLOG(bus->sih->buscorerev),
9167 0, 0);
9168 if (err_status & PCIE_CTO_ERR_MASK) {
9169 si_corereg(bus->sih, bus->sih->buscoreidx,
9170 DAR_ERRLOG(bus->sih->buscorerev),
9171 ~0, PCIE_CTO_ERR_MASK);
9172 } else {
9173 break;
9174 }
9175 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
9176 i++;
9177 if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
9178 DHD_ERROR(("cto recovery fail\n"));
9179
9180 DHD_OS_WAKE_UNLOCK(bus->dhd);
9181 return;
9182 }
9183 }
9184
9185 /* clear interrupt status */
9186 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
9187
9188 /* Halt ARM & remove reset */
9189 /* TBD : we can add ARM Halt here in case */
9190
9191 /* reset SPROM_CFG_TO_SB_RST */
9192 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9193
9194 DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9195 PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
9196 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
9197
9198 val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9199 DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9200 PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
9201
9202 DHD_OS_WAKE_UNLOCK(bus->dhd);
9203 }
9204
9205 void
9206 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
9207 {
9208 uint32 val;
9209
9210 val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
9211 dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
9212 val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
9213 }
9214
9215 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
9216 static int
9217 dhdpcie_init_d11status(struct dhd_bus *bus)
9218 {
9219 uint32 addr;
9220 uint32 flags2;
9221 int ret = 0;
9222
9223 if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
9224 flags2 = bus->pcie_sh->flags2;
9225 addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
9226 flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
9227 ret = dhdpcie_bus_membytes(bus, TRUE, addr,
9228 (uint8 *)&flags2, sizeof(flags2));
9229 if (ret < 0) {
9230 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
9231 __FUNCTION__));
9232 return ret;
9233 }
9234 bus->pcie_sh->flags2 = flags2;
9235 bus->dhd->d11_tx_status = TRUE;
9236 }
9237 return ret;
9238 }
9239
9240 #else
9241 static int
9242 dhdpcie_init_d11status(struct dhd_bus *bus)
9243 {
9244 return 0;
9245 }
9246 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
9247
9248 #ifdef BCMPCIE_OOB_HOST_WAKE
9249 int
9250 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
9251 {
9252 return dhdpcie_oob_intr_register(dhdp->bus);
9253 }
9254
9255 void
9256 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
9257 {
9258 dhdpcie_oob_intr_unregister(dhdp->bus);
9259 }
9260
9261 void
9262 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
9263 {
9264 dhdpcie_oob_intr_set(dhdp->bus, enable);
9265 }
9266 #endif /* BCMPCIE_OOB_HOST_WAKE */
9267
9268 bool
9269 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
9270 {
9271 return bus->dhd->d2h_hostrdy_supported;
9272 }
9273
9274 void
9275 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
9276 {
9277 dhd_bus_t *bus = pub->bus;
9278 uint32 coreoffset = index << 12;
9279 uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
9280 uint32 value;
9281
9282 while (first_addr <= last_addr) {
9283 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
9284 if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
9285 DHD_ERROR(("Invalid size/addr combination \n"));
9286 }
9287 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
9288 first_addr = first_addr + 4;
9289 }
9290 }
9291
9292 bool
9293 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
9294 {
9295 if (!bus->dhd)
9296 return FALSE;
9297 else if (bus->idma_enabled) {
9298 return bus->dhd->idma_enable;
9299 } else {
9300 return FALSE;
9301 }
9302 }
9303
9304 bool
9305 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
9306 {
9307 if (!bus->dhd)
9308 return FALSE;
9309 else if (bus->ifrm_enabled) {
9310 return bus->dhd->ifrm_enable;
9311 } else {
9312 return FALSE;
9313 }
9314 }
9315
9316 bool
9317 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
9318 {
9319 if (!bus->dhd) {
9320 return FALSE;
9321 } else if (bus->dar_enabled) {
9322 return bus->dhd->dar_enable;
9323 } else {
9324 return FALSE;
9325 }
9326 }
9327
9328 void
9329 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
9330 {
9331 DHD_ERROR(("ENABLING DW:%d\n", dw_option));
9332 bus->dw_option = dw_option;
9333 }
9334
9335 void
9336 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
9337 {
9338 trap_t *tr = &bus->dhd->last_trap_info;
9339 bcm_bprintf(strbuf,
9340 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
9341 " lp 0x%x, rpc 0x%x"
9342 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
9343 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
9344 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
9345 ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
9346 ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
9347 ltoh32(bus->pcie_sh->trap_addr),
9348 ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
9349 ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
9350 ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
9351 ltoh32(tr->r11), ltoh32(tr->r12));
9352 }
9353
9354 int
9355 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
9356 {
9357 int bcmerror = 0;
9358 struct dhd_bus *bus = dhdp->bus;
9359
9360 if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
9361 DHD_ERROR(("Invalid size/addr combination \n"));
9362 bcmerror = BCME_ERROR;
9363 }
9364
9365 return bcmerror;
9366 }
9367
9368 int
9369 dhd_get_idletime(dhd_pub_t *dhd)
9370 {
9371 return dhd->bus->idletime;
9372 }
9373
9374 #ifdef DHD_SSSR_DUMP
9375
9376 static INLINE void
9377 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
9378 {
9379 OSL_DELAY(1);
9380 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
9381 DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
9382 return;
9383 }
9384
9385 static int
9386 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
9387 uint addr_reg, uint data_reg)
9388 {
9389 uint addr;
9390 uint val = 0;
9391 int i;
9392
9393 DHD_ERROR(("%s\n", __FUNCTION__));
9394
9395 if (!buf) {
9396 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
9397 return BCME_ERROR;
9398 }
9399
9400 if (!fifo_size) {
9401 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
9402 return BCME_ERROR;
9403 }
9404
9405 /* Set the base address offset to 0 */
9406 addr = addr_reg;
9407 val = 0;
9408 dhd_sbreg_op(dhd, addr, &val, FALSE);
9409
9410 addr = data_reg;
9411 /* Read 4 bytes at once and loop for fifo_size / 4 */
9412 for (i = 0; i < fifo_size / 4; i++) {
9413 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
9414 buf[i] = val;
9415 OSL_DELAY(1);
9416 }
9417 return BCME_OK;
9418 }
9419
9420 static int
9421 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
9422 uint addr_reg)
9423 {
9424 uint addr;
9425 uint val = 0;
9426 int i;
9427 si_t *sih = dhd->bus->sih;
9428
9429 DHD_ERROR(("%s\n", __FUNCTION__));
9430
9431 if (!buf) {
9432 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
9433 return BCME_ERROR;
9434 }
9435
9436 if (!fifo_size) {
9437 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
9438 return BCME_ERROR;
9439 }
9440
9441 if (addr_reg) {
9442
9443 if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
9444 dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
9445 dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, fifo_size);
9446 } else {
9447 /* Check if vasip clk is disabled, if yes enable it */
9448 addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
9449 dhd_sbreg_op(dhd, addr, &val, TRUE);
9450 if (!val) {
9451 val = 1;
9452 dhd_sbreg_op(dhd, addr, &val, FALSE);
9453 }
9454
9455 addr = addr_reg;
9456 /* Read 4 bytes at once and loop for fifo_size / 4 */
9457 for (i = 0; i < fifo_size / 4; i++, addr += 4) {
9458 si_backplane_access(sih, addr, sizeof(uint), &val, TRUE);
9459 buf[i] = val;
9460 OSL_DELAY(1);
9461 }
9462 }
9463 } else {
9464 uint cur_coreid;
9465 uint chipc_corerev;
9466 chipcregs_t *chipcregs;
9467
9468 /* Save the current core */
9469 cur_coreid = si_coreid(sih);
9470
9471 /* Switch to ChipC */
9472 chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
9473
9474 chipc_corerev = si_corerev(sih);
9475
9476 if (chipc_corerev == 64) {
9477 W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
9478
9479 /* Read 4 bytes at once and loop for fifo_size / 4 */
9480 for (i = 0; i < fifo_size / 4; i++) {
9481 buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
9482 OSL_DELAY(1);
9483 }
9484 }
9485
9486 /* Switch back to the original core */
9487 si_setcore(sih, cur_coreid, 0);
9488 }
9489
9490 return BCME_OK;
9491 }
9492
9493 #if defined(BCMPCIE) && defined(DHD_LOG_DUMP)
9494 void
9495 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
9496 uint8 *ext_trap_data, void *event_decode_data)
9497 {
9498 hnd_ext_trap_hdr_t *hdr = NULL;
9499 bcm_tlv_t *tlv;
9500 eventlog_trapdata_info_t *etd_evtlog = NULL;
9501 eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
9502 uint arr_size = 0;
9503 int i = 0;
9504 int err = 0;
9505 uint32 seqnum = 0;
9506
9507 if (!ext_trap_data || !event_decode_data || !dhd)
9508 return;
9509
9510 if (!dhd->concise_dbg_buf)
9511 return;
9512
9513 /* First word is original trap_data, skip */
9514 ext_trap_data += sizeof(uint32);
9515
9516 hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
9517 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
9518 if (tlv) {
9519 uint32 baseaddr = 0;
9520 uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
9521
9522 etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
9523 DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
9524 "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
9525 (etd_evtlog->num_elements),
9526 ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
9527 arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
9528 if (!arr_size) {
9529 DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
9530 return;
9531 }
9532 evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
9533 if (!evtlog_buf_arr) {
9534 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
9535 return;
9536 }
9537
9538 /* boundary check */
9539 baseaddr = etd_evtlog->log_arr_addr;
9540 if ((baseaddr < dhd->bus->dongle_ram_base) ||
9541 ((baseaddr + arr_size) > endaddr)) {
9542 DHD_ERROR(("%s: Error reading invalid address\n",
9543 __FUNCTION__));
9544 goto err;
9545 }
9546
9547 /* read the eventlog_trap_buf_info_t array from dongle memory */
9548 err = dhdpcie_bus_membytes(dhd->bus, FALSE,
9549 (ulong)(etd_evtlog->log_arr_addr),
9550 (uint8 *)evtlog_buf_arr, arr_size);
9551 if (err != BCME_OK) {
9552 DHD_ERROR(("%s: Error reading event log array from dongle !\n",
9553 __FUNCTION__));
9554 goto err;
9555 }
9556 /* ntoh is required only for seq_num, because in the original
9557 * case of event logs from info ring, it is sent from dongle in that way
9558 * so for ETD also dongle follows same convention
9559 */
9560 seqnum = ntoh32(etd_evtlog->seq_num);
9561 memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
9562 for (i = 0; i < (etd_evtlog->num_elements); ++i) {
9563 /* boundary check */
9564 baseaddr = evtlog_buf_arr[i].buf_addr;
9565 if ((baseaddr < dhd->bus->dongle_ram_base) ||
9566 ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
9567 DHD_ERROR(("%s: Error reading invalid address\n",
9568 __FUNCTION__));
9569 goto err;
9570 }
9571 /* read each individual event log buf from dongle memory */
9572 err = dhdpcie_bus_membytes(dhd->bus, FALSE,
9573 ((ulong)evtlog_buf_arr[i].buf_addr),
9574 dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
9575 if (err != BCME_OK) {
9576 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
9577 __FUNCTION__));
9578 goto err;
9579 }
9580 dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
9581 event_decode_data, (evtlog_buf_arr[i].len),
9582 FALSE, hton32(seqnum));
9583 ++seqnum;
9584 }
9585 err:
9586 MFREE(dhd->osh, evtlog_buf_arr, arr_size);
9587 } else {
9588 DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
9589 }
9590 }
9591 #endif /* BCMPCIE && DHD_LOG_DUMP */
9592
9593 static int
9594 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
9595 {
9596 uint addr;
9597 uint val;
9598
9599 DHD_ERROR(("%s\n", __FUNCTION__));
9600
9601 /* conditionally clear bits [11:8] of PowerCtrl */
9602 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9603 dhd_sbreg_op(dhd, addr, &val, TRUE);
9604 if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
9605 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9606 val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
9607 dhd_sbreg_op(dhd, addr, &val, FALSE);
9608 }
9609 return BCME_OK;
9610 }
9611
9612 static int
9613 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
9614 {
9615 uint addr;
9616 uint val;
9617
9618 DHD_ERROR(("%s\n", __FUNCTION__));
9619
9620 /* conditionally clear bits [11:8] of PowerCtrl */
9621 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9622 dhd_sbreg_op(dhd, addr, &val, TRUE);
9623 if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
9624 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
9625 val = 0;
9626 dhd_sbreg_op(dhd, addr, &val, FALSE);
9627 }
9628 return BCME_OK;
9629 }
9630
9631 static int
9632 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
9633 {
9634 uint addr;
9635 uint val;
9636
9637 DHD_ERROR(("%s\n", __FUNCTION__));
9638
9639 /* clear chipcommon intmask */
9640 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
9641 val = 0x0;
9642 dhd_sbreg_op(dhd, addr, &val, FALSE);
9643
9644 /* clear PMUIntMask0 */
9645 addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
9646 val = 0x0;
9647 dhd_sbreg_op(dhd, addr, &val, FALSE);
9648
9649 /* clear PMUIntMask1 */
9650 addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
9651 val = 0x0;
9652 dhd_sbreg_op(dhd, addr, &val, FALSE);
9653
9654 /* clear res_req_timer */
9655 addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
9656 val = 0x0;
9657 dhd_sbreg_op(dhd, addr, &val, FALSE);
9658
9659 /* clear macresreqtimer */
9660 addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
9661 val = 0x0;
9662 dhd_sbreg_op(dhd, addr, &val, FALSE);
9663
9664 /* clear macresreqtimer1 */
9665 addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
9666 val = 0x0;
9667 dhd_sbreg_op(dhd, addr, &val, FALSE);
9668
9669 /* clear VasipClkEn */
9670 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
9671 addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
9672 val = 0x0;
9673 dhd_sbreg_op(dhd, addr, &val, FALSE);
9674 }
9675
9676 return BCME_OK;
9677 }
9678
9679 static int
9680 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
9681 {
9682 int i;
9683 uint addr;
9684 uint val = 0;
9685
9686 DHD_ERROR(("%s\n", __FUNCTION__));
9687
9688 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9689 /* Check if bit 0 of resetctrl is cleared */
9690 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
9691 if (!addr) {
9692 DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
9693 __FUNCTION__, i));
9694 /* ignore invalid address */
9695 dhd->sssr_d11_outofreset[i] = FALSE;
9696 continue;
9697 }
9698 dhd_sbreg_op(dhd, addr, &val, TRUE);
9699 if (!(val & 1)) {
9700 dhd->sssr_d11_outofreset[i] = TRUE;
9701 } else {
9702 dhd->sssr_d11_outofreset[i] = FALSE;
9703 }
9704 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
9705 __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
9706 }
9707 return BCME_OK;
9708 }
9709
9710 static int
9711 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
9712 {
9713 int i;
9714 uint addr;
9715 uint val = 0;
9716
9717 DHD_ERROR(("%s\n", __FUNCTION__));
9718
9719 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9720 if (dhd->sssr_d11_outofreset[i]) {
9721 /* clear request clk only if itopoobb is non zero */
9722 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
9723 dhd_sbreg_op(dhd, addr, &val, TRUE);
9724 if (val != 0) {
9725 /* clear clockcontrolstatus */
9726 addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
9727 val =
9728 dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
9729 dhd_sbreg_op(dhd, addr, &val, FALSE);
9730 }
9731 }
9732 }
9733 return BCME_OK;
9734 }
9735
9736 static int
9737 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
9738 {
9739 uint addr;
9740 uint val = 0;
9741
9742 DHD_ERROR(("%s\n", __FUNCTION__));
9743
9744 /* Check if bit 0 of resetctrl is cleared */
9745 addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
9746 dhd_sbreg_op(dhd, addr, &val, TRUE);
9747 if (!(val & 1)) {
9748 /* clear request clk only if itopoobb is non zero */
9749 addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
9750 dhd_sbreg_op(dhd, addr, &val, TRUE);
9751 if (val != 0) {
9752 /* clear clockcontrolstatus */
9753 addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
9754 val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
9755 dhd_sbreg_op(dhd, addr, &val, FALSE);
9756 }
9757 }
9758 return BCME_OK;
9759 }
9760
9761 static int
9762 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
9763 {
9764 uint addr;
9765 uint val = 0;
9766
9767 DHD_ERROR(("%s\n", __FUNCTION__));
9768
9769 /* clear request clk only if itopoobb is non zero */
9770 addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
9771 dhd_sbreg_op(dhd, addr, &val, TRUE);
9772 if (val) {
9773 /* clear clockcontrolstatus */
9774 addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
9775 val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
9776 dhd_sbreg_op(dhd, addr, &val, FALSE);
9777 }
9778 return BCME_OK;
9779 }
9780
9781 static int
9782 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
9783 {
9784 uint addr;
9785 uint val = 0;
9786
9787 DHD_ERROR(("%s\n", __FUNCTION__));
9788
9789 addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
9790 val = LTR_ACTIVE;
9791 dhd_sbreg_op(dhd, addr, &val, FALSE);
9792
9793 val = LTR_SLEEP;
9794 dhd_sbreg_op(dhd, addr, &val, FALSE);
9795
9796 return BCME_OK;
9797 }
9798
9799 static int
9800 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
9801 {
9802 DHD_ERROR(("%s\n", __FUNCTION__));
9803
9804 dhdpcie_arm_clear_clk_req(dhd);
9805
9806 dhdpcie_d11_clear_clk_req(dhd);
9807
9808 dhdpcie_pcie_clear_clk_req(dhd);
9809
9810 return BCME_OK;
9811 }
9812
9813 static int
9814 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
9815 {
9816 int i;
9817 uint addr;
9818 uint val = 0;
9819
9820 DHD_ERROR(("%s\n", __FUNCTION__));
9821
9822 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9823 if (dhd->sssr_d11_outofreset[i]) {
9824 /* disable core by setting bit 0 */
9825 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
9826 val = 1;
9827 dhd_sbreg_op(dhd, addr, &val, FALSE);
9828 OSL_DELAY(6000);
9829
9830 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
9831 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
9832 dhd_sbreg_op(dhd, addr, &val, FALSE);
9833
9834 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
9835 dhd_sbreg_op(dhd, addr, &val, FALSE);
9836
9837 /* enable core by clearing bit 0 */
9838 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
9839 val = 0;
9840 dhd_sbreg_op(dhd, addr, &val, FALSE);
9841
9842 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
9843 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
9844 dhd_sbreg_op(dhd, addr, &val, FALSE);
9845
9846 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
9847 dhd_sbreg_op(dhd, addr, &val, FALSE);
9848
9849 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
9850 dhd_sbreg_op(dhd, addr, &val, FALSE);
9851 }
9852 }
9853 return BCME_OK;
9854 }
9855
9856 static int
9857 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
9858 {
9859 int i;
9860
9861 DHD_ERROR(("%s\n", __FUNCTION__));
9862
9863 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9864 if (dhd->sssr_d11_outofreset[i]) {
9865 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
9866 dhd->sssr_reg_info.mac_regs[i].sr_size,
9867 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
9868 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
9869 }
9870 }
9871
9872 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
9873 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
9874 dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
9875 dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
9876 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
9877 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
9878 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
9879 dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
9880 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
9881 }
9882
9883 return BCME_OK;
9884 }
9885
9886 static int
9887 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
9888 {
9889 int i;
9890
9891 DHD_ERROR(("%s\n", __FUNCTION__));
9892
9893 for (i = 0; i < MAX_NUM_D11CORES; i++) {
9894 if (dhd->sssr_d11_outofreset[i]) {
9895 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
9896 dhd->sssr_reg_info.mac_regs[i].sr_size,
9897 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
9898 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
9899 }
9900 }
9901
9902 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
9903 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
9904 dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
9905 dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
9906 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
9907 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
9908 dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
9909 dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
9910 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
9911 }
9912
9913 return BCME_OK;
9914 }
9915
9916 static int
9917 dhdpcie_sssr_dump(dhd_pub_t *dhd)
9918 {
9919 if (!dhd->sssr_inited) {
9920 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
9921 return BCME_ERROR;
9922 }
9923
9924 if (dhd->bus->is_linkdown) {
9925 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
9926 return BCME_ERROR;
9927 }
9928
9929 dhdpcie_d11_check_outofreset(dhd);
9930
9931 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
9932 if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
9933 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
9934 return BCME_ERROR;
9935 }
9936
9937 dhdpcie_clear_intmask_and_timer(dhd);
9938 dhdpcie_suspend_chipcommon_powerctrl(dhd);
9939 dhdpcie_clear_clk_req(dhd);
9940 dhdpcie_pcie_send_ltrsleep(dhd);
9941
9942 /* Wait for some time before Restore */
9943 OSL_DELAY(6000);
9944
9945 dhdpcie_resume_chipcommon_powerctrl(dhd);
9946 dhdpcie_bring_d11_outofreset(dhd);
9947
9948 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
9949 if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
9950 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
9951 return BCME_ERROR;
9952 }
9953
9954 dhd_schedule_sssr_dump(dhd);
9955
9956 return BCME_OK;
9957 }
9958
9959 int
9960 dhd_bus_sssr_dump(dhd_pub_t *dhd)
9961 {
9962 return dhdpcie_sssr_dump(dhd);
9963 }
9964 #endif /* DHD_SSSR_DUMP */
9965
9966 #ifdef DHD_WAKE_STATUS
9967 wake_counts_t*
9968 dhd_bus_get_wakecount(dhd_pub_t *dhd)
9969 {
9970 return &dhd->bus->wake_counts;
9971 }
9972 int
9973 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
9974 {
9975 return bcmpcie_set_get_wake(dhd->bus, 0);
9976 }
9977 #endif /* DHD_WAKE_STATUS */
9978
9979 #define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP)
9980 #define OTP_USER_AREA_OFFSET 0x80
9981 #define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET)
9982 #define OTP_VERSION_TUPLE_ID 0x15
9983 #define OTP_VENDOR_TUPLE_ID 0x80
9984 #define OTP_CIS_REGION_END_TUPLE_ID 0XFF
9985 #define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE)
9986 #define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG)
9987 #define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4)
9988 #define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL)
9989 #define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C)
9990 #define PMU_OTP_PWR_ON_MASK 0xC47
9991
9992 int
9993 dhdpcie_get_nvpath_otp(dhd_bus_t *bus, char* program, char *nv_path)
9994 {
9995 uint32 val = 0;
9996 uint16 chip_id = 0;
9997 uint8 otp_data[2];
9998 char stepping[3];
9999 char module_name[5];
10000 char module_vendor = 0;
10001 char module_rev[4];
10002 uint8 tuple_id = 0;
10003 uint8 tuple_len = 0;
10004 uint32 cur_offset = 0;
10005 uint32 version_tuple_offset = 0;
10006 char module_info[64];
10007 char progname[32];
10008 bool srom_present = 0, otp_present = 0;
10009 uint32 sprom_ctrl = 0;
10010 uint32 otp_ctrl = 0, minres_mask = 0;
10011 int i = 0, j = 0, status = BCME_ERROR;
10012
10013 if (!nv_path || !bus) {
10014 return BCME_ERROR;
10015 }
10016
10017 /* read chip id first */
10018 if (si_backplane_access(bus->sih, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE) != BCME_OK) {
10019 DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__));
10020 }
10021 else {
10022 chip_id = val & 0xffff;
10023 }
10024
10025 /* read SpromCtrl register */
10026 si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, TRUE);
10027 val = sprom_ctrl;
10028
10029 /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set
10030 * and chip is 4355 or 4364
10031 */
10032 if ((val & 0x20) && (chip_id == 0x4355 || chip_id == 0x4364)) {
10033 otp_present = 1;
10034
10035 /* Check if the 4th bit (sprom_present) in CC Status REG is set */
10036 si_backplane_access(bus->sih, CHIP_COMMON_STATUS_REG_ADDR, 4, &val, TRUE);
10037 if (val & 0x10) {
10038 srom_present = 1;
10039 }
10040
10041 /* OTP power up sequence */
10042 /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */
10043 si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, TRUE);
10044 val = 0x1A0000;
10045 si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &val, FALSE);
10046
10047 /* 2. enable OTP power through min res mask register in PMU */
10048 si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4, &minres_mask, TRUE);
10049 val = minres_mask | PMU_OTP_PWR_ON_MASK;
10050 si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4, &val, FALSE);
10051
10052 /* 3. if srom is present, need to set OtpSelect 4th bit
10053 * in SpromCtrl register to read otp
10054 */
10055 if (srom_present) {
10056
10057 val = sprom_ctrl | 0x10;
10058 si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &val, FALSE);
10059
10060 }
10061 /* Wait for PMU to power up. */
10062 OSL_DELAY(500);
10063 si_backplane_access(bus->sih, PMU_RES_STATE_REG_ADDR, 4, &val, TRUE);
10064 DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__, val));
10065
10066 si_backplane_access(bus->sih, SI_ENUM_BASE_DEFAULT, 4, &val, TRUE);
10067 DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__, val));
10068
10069 si_backplane_access(bus->sih, OTP_ADDRESS, 2, &val, TRUE);
10070 DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__, val));
10071
10072 cur_offset = OTP_USER_AREA_ADDR + 0x40;
10073 /* read required data from otp to construct FW string name
10074 * data like - chip info, module info. This is present in the
10075 * form of a Vendor CIS Tuple whose format is provided by Olympic.
10076 * The data is in the form of ASCII character strings.
10077 * The Vendor tuple along with other CIS tuples are present
10078 * in the OTP user area. A CIS tuple is a TLV format.
10079 * (T = 1-byte, L = 1-byte, V = n-bytes)
10080 */
10081
10082 /* Find the version tuple */
10083 while (tuple_id != OTP_CIS_REGION_END_TUPLE_ID) {
10084 si_backplane_access(bus->sih, cur_offset,
10085 2, (uint *)otp_data, TRUE);
10086
10087 tuple_id = otp_data[0];
10088 tuple_len = otp_data[1];
10089 if (tuple_id == OTP_VERSION_TUPLE_ID) {
10090 version_tuple_offset = cur_offset;
10091 break;
10092 }
10093 /* if its NULL tuple, skip */
10094 if (tuple_id == 0)
10095 cur_offset += 1;
10096 else
10097 cur_offset += tuple_len + 2;
10098 }
10099
10100 /* skip the major, minor ver. numbers, manufacturer and product names */
10101 cur_offset = version_tuple_offset + 6;
10102
10103 /* read the chip info */
10104 si_backplane_access(bus->sih, cur_offset,
10105 2, (uint *)otp_data, TRUE);
10106 if (otp_data[0] == 's' && otp_data[1] == '=') {
10107 /* read the stepping */
10108 cur_offset += 2;
10109 stepping[2] = 0;
10110 si_backplane_access(bus->sih, cur_offset,
10111 2, (uint *)stepping, TRUE);
10112 /* read module info */
10113 memset(module_info, 0, 64);
10114 cur_offset += 2;
10115 si_backplane_access(bus->sih, cur_offset,
10116 2, (uint *)otp_data, TRUE);
10117 while (otp_data[0] != OTP_CIS_REGION_END_TUPLE_ID &&
10118 otp_data[1] != OTP_CIS_REGION_END_TUPLE_ID) {
10119 memcpy(&module_info[i], otp_data, 2);
10120 i += 2;
10121 cur_offset += 2;
10122 si_backplane_access(bus->sih, cur_offset,
10123 2, (uint *)otp_data, TRUE);
10124 }
10125 /* replace any null characters found at the beginning
10126 * and middle of the string
10127 */
10128 for (j = 0; j < i; ++j) {
10129 if (module_info[j] == 0)
10130 module_info[j] = ' ';
10131 }
10132 DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n",
10133 stepping[0], stepping[1], module_info));
10134 /* extract the module name, revision and vendor
10135 * information from the module info string
10136 */
10137 for (i = 0; module_info[i]; i++) {
10138 if (module_info[i] == 'M' && module_info[i + 1] == '=') {
10139 memcpy(module_name, &module_info[i + 2], 4);
10140 module_name[4] = 0;
10141 i += 5;
10142 }
10143 else if (module_info[i] == 'm' && module_info[i + 1] == '=') {
10144 memcpy(module_rev, &module_info[i + 2], 3);
10145 module_rev[3] = 0;
10146 i += 4;
10147 }
10148 else if (module_info[i] == 'V' && module_info[i + 1] == '=') {
10149 module_vendor = module_info[i + 2];
10150 i += 2;
10151 }
10152 }
10153
10154 /* construct the complete file path to nvram as per
10155 * olympic conventions
10156 */
10157 strncpy(progname, program, sizeof(progname));
10158 sprintf(nv_path, "P-%s_M-%s_V-%c__m-%s.txt", progname, module_name,
10159 module_vendor, module_rev);
10160 DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__, nv_path));
10161 status = BCME_OK;
10162 }
10163
10164 /* restore back the registers to their previous values */
10165 if (srom_present) {
10166 si_backplane_access(bus->sih, SPROM_CTRL_REG_ADDR, 4, &sprom_ctrl, FALSE);
10167 }
10168
10169 if (otp_present) {
10170 si_backplane_access(bus->sih, PMU_MINRESMASK_REG_ADDR, 4,
10171 &minres_mask, FALSE);
10172 si_backplane_access(bus->sih, OTP_CTRL1_REG_ADDR, 4, &otp_ctrl, FALSE);
10173 }
10174
10175 }
10176 return status;
10177 }
10178
10179 /* Writes random number(s) to the TCM. FW upon initialization reads this register
10180 * to fetch the random number, and uses it to randomize heap address space layout.
10181 */
10182 static int
10183 dhdpcie_wrt_rnd(struct dhd_bus *bus)
10184 {
10185 bcm_rand_metadata_t rnd_data;
10186 uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
10187 uint32 count = BCM_ENTROPY_HOST_NBYTES;
10188 int ret = 0;
10189 uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
10190 ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
10191
10192 memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
10193 rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
10194 rnd_data.count = htol32(count);
10195 /* write the metadata about random number */
10196 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
10197 /* scale back by number of random number counts */
10198 addr -= count;
10199
10200 /* Now get & write the random number(s) */
10201 ret = dhd_get_random_bytes(rand_buf, count);
10202 if (ret != BCME_OK) {
10203 return ret;
10204 }
10205 dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
10206
10207 return BCME_OK;
10208 }
10209
10210 #ifdef D2H_MINIDUMP
10211 bool
10212 dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp)
10213 {
10214 return dhdp->bus->d2h_minidump;
10215 }
10216 #endif /* D2H_MINIDUMP */
10217
10218 void
10219 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
10220 {
10221 struct dhd_bus *bus = dhd->bus;
10222 uint64 current_time;
10223
10224 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
10225 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
10226 bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
10227 DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
10228 bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
10229 #ifdef BCMPCIE_OOB_HOST_WAKE
10230 DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
10231 bus->oob_intr_count, bus->oob_intr_enable_count,
10232 bus->oob_intr_disable_count));
10233 DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
10234 dhdpcie_get_oob_irq_num(bus),
10235 GET_SEC_USEC(bus->last_oob_irq_time)));
10236 #endif /* BCMPCIE_OOB_HOST_WAKE */
10237 DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
10238 bus->dpc_return_busdown_count, bus->non_ours_irq_count));
10239
10240 current_time = OSL_LOCALTIME_NS();
10241 DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
10242 GET_SEC_USEC(current_time)));
10243 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
10244 " isr_exit_time="SEC_USEC_FMT"\n",
10245 GET_SEC_USEC(bus->isr_entry_time),
10246 GET_SEC_USEC(bus->isr_exit_time)));
10247 DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
10248 " last_non_ours_irq_time="SEC_USEC_FMT"\n",
10249 GET_SEC_USEC(bus->dpc_sched_time),
10250 GET_SEC_USEC(bus->last_non_ours_irq_time)));
10251 DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
10252 " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
10253 GET_SEC_USEC(bus->dpc_entry_time),
10254 GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
10255 DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
10256 " last_process_txcpl_time="SEC_USEC_FMT"\n",
10257 GET_SEC_USEC(bus->last_process_flowring_time),
10258 GET_SEC_USEC(bus->last_process_txcpl_time)));
10259 DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
10260 " last_process_infocpl_time="SEC_USEC_FMT"\n",
10261 GET_SEC_USEC(bus->last_process_rxcpl_time),
10262 GET_SEC_USEC(bus->last_process_infocpl_time)));
10263 DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
10264 " resched_dpc_time="SEC_USEC_FMT"\n",
10265 GET_SEC_USEC(bus->dpc_exit_time),
10266 GET_SEC_USEC(bus->resched_dpc_time)));
10267 DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
10268 GET_SEC_USEC(bus->last_d3_inform_time)));
10269
10270 DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
10271 " last_suspend_end_time="SEC_USEC_FMT"\n",
10272 GET_SEC_USEC(bus->last_suspend_start_time),
10273 GET_SEC_USEC(bus->last_suspend_end_time)));
10274 DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
10275 " last_resume_end_time="SEC_USEC_FMT"\n",
10276 GET_SEC_USEC(bus->last_resume_start_time),
10277 GET_SEC_USEC(bus->last_resume_end_time)));
10278 }
10279
10280 void
10281 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
10282 {
10283 dhd_pcie_intr_count_dump(dhd);
10284 }
10285
10286 int
10287 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
10288 {
10289 if (dhd->bus->is_linkdown) {
10290 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
10291 "due to PCIe link down ------- \r\n"));
10292 return 0;
10293 }
10294
10295 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
10296
10297 //HostToDev
10298 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10299 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
10300 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
10301 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10302 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
10303 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
10304 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10305 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
10306 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
10307
10308 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10309 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
10310 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
10311 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10312 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
10313 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
10314 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10315 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
10316 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
10317
10318 //DevToHost
10319 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10320 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
10321 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
10322 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10323 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
10324 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
10325 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10326 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
10327 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
10328
10329 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10330 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
10331 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
10332 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10333 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
10334 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
10335 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10336 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
10337 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
10338
10339 return 0;
10340 }
10341
10342 bool
10343 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
10344 {
10345 uint32 intstatus = 0;
10346 uint32 intmask = 0;
10347 uint32 d2h_db0 = 0;
10348 uint32 d2h_mb_data = 0;
10349
10350 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
10351 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10352 dhd->bus->pcie_mailbox_int, 0, 0);
10353 if (intstatus == (uint32)-1) {
10354 DHD_ERROR(("intstatus=0x%x \n", intstatus));
10355 return FALSE;
10356 }
10357
10358 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10359 dhd->bus->pcie_mailbox_mask, 0, 0);
10360 if (intmask == (uint32) -1) {
10361 DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
10362 return FALSE;
10363 }
10364
10365 d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10366 PCID2H_MailBox, 0, 0);
10367 if (d2h_db0 == (uint32)-1) {
10368 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10369 intstatus, intmask, d2h_db0));
10370 return FALSE;
10371 }
10372
10373 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10374 intstatus, intmask, d2h_db0));
10375 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
10376 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
10377 dhd->bus->def_intmask));
10378
10379 return TRUE;
10380 }
10381
10382 void
10383 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
10384 {
10385 DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
10386 DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
10387 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10388 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
10389 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10390 DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
10391 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10392 PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
10393 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10394 PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
10395 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10396 PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
10397 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10398 PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
10399 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10400 }
10401
10402 int
10403 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
10404 {
10405 int host_irq_disabled;
10406
10407 DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
10408 host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
10409 DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
10410 dhd_print_tasklet_status(dhd);
10411 dhd_pcie_intr_count_dump(dhd);
10412
10413 DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
10414 dhdpcie_dump_resource(dhd->bus);
10415
10416 dhd_pcie_dump_rc_conf_space_cap(dhd);
10417
10418 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
10419 dhd_debug_get_rc_linkcap(dhd->bus)));
10420
10421 #ifdef CUSTOMER_HW4_DEBUG
10422 if (dhd->bus->is_linkdown) {
10423 DHD_ERROR(("Skip dumping the PCIe registers due to PCIe Link down\n"));
10424 return 0;
10425 }
10426 #endif /* CUSTOMER_HW4_DEBUG */
10427
10428 DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
10429 DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
10430 "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
10431 PCIECFGREG_STATUS_CMD,
10432 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
10433 PCIECFGREG_BASEADDR0,
10434 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
10435 PCIECFGREG_BASEADDR1,
10436 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
10437 PCIE_CFG_PMCSR,
10438 dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
10439 DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
10440 "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
10441 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
10442 sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
10443 dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
10444 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
10445 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
10446 sizeof(uint32))));
10447 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10448 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
10449 dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
10450 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
10451 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
10452 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
10453 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
10454 PCI_TLP_HDR_LOG2,
10455 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
10456 PCI_TLP_HDR_LOG3,
10457 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
10458 PCI_TLP_HDR_LOG4,
10459 dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
10460 if (dhd->bus->sih->buscorerev >= 24) {
10461 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
10462 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
10463 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
10464 sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
10465 dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
10466 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
10467 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
10468 sizeof(uint32))));
10469
10470 DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
10471 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10472 PCIDARClkCtl(dhd->bus->sih->buscorerev),
10473 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10474 PCIDARClkCtl(dhd->bus->sih->buscorerev), 0, 0),
10475 PCIDARPwrCtl(dhd->bus->sih->buscorerev),
10476 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10477 PCIDARPwrCtl(dhd->bus->sih->buscorerev), 0, 0),
10478 PCIDARH2D_DB0(dhd->bus->sih->buscorerev),
10479 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10480 PCIDARH2D_DB0(dhd->bus->sih->buscorerev), 0, 0)));
10481 }
10482 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10483 if (!dhd_pcie_dump_int_regs(dhd)) {
10484 DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid int regs\n"));
10485 return 0;
10486 }
10487
10488 DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
10489
10490 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10491 if (dhd->bus->sih->buscorerev >= 24) {
10492 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n",
10493 PCIDARErrlog(dhd->bus->sih->buscorerev),
10494 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10495 PCIDARErrlog(dhd->bus->sih->buscorerev), 0, 0),
10496 PCIDARErrlog_Addr(dhd->bus->sih->buscorerev),
10497 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10498 PCIDARErrlog_Addr(dhd->bus->sih->buscorerev), 0, 0)));
10499 DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n",
10500 PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev),
10501 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10502 PCIDARFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
10503 PCIDARMailboxint(dhd->bus->sih->buscorerev),
10504 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10505 PCIDARMailboxint(dhd->bus->sih->buscorerev), 0, 0)));
10506 }
10507 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10508
10509 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
10510 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
10511 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
10512 PCIECFGREG_PHY_DBG_CLKREQ1,
10513 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
10514 PCIECFGREG_PHY_DBG_CLKREQ2,
10515 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
10516 PCIECFGREG_PHY_DBG_CLKREQ3,
10517 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
10518
10519 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10520 if (dhd->bus->sih->buscorerev >= 24) {
10521 DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
10522 "ltssm_hist_2(0x%x)=0x%x "
10523 "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
10524 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
10525 PCIECFGREG_PHY_LTSSM_HIST_1,
10526 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
10527 PCIECFGREG_PHY_LTSSM_HIST_2,
10528 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
10529 PCIECFGREG_PHY_LTSSM_HIST_3,
10530 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
10531 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10532 PCIE_CLK_CTRL,
10533 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_CLK_CTRL, 0, 0),
10534 PCIE_PWR_CTRL,
10535 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIE_PWR_CTRL, 0, 0),
10536 PCIH2D_MailBox,
10537 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10538 PCIH2D_MailBox, 0, 0)));
10539 DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
10540 PCIECFGREG_TREFUP,
10541 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
10542 PCIECFGREG_TREFUP_EXT,
10543 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
10544 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
10545 "Function_Intstatus(0x%x)=0x%x "
10546 "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
10547 "Power_Intmask(0x%x)=0x%x\n",
10548 PCIE_CORE_REG_ERRLOG,
10549 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10550 PCIE_CORE_REG_ERRLOG, 0, 0),
10551 PCIE_CORE_REG_ERR_ADDR,
10552 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10553 PCIE_CORE_REG_ERR_ADDR, 0, 0),
10554 PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
10555 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10556 PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
10557 PCIFunctionIntmask(dhd->bus->sih->buscorerev),
10558 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10559 PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
10560 PCIPowerIntstatus(dhd->bus->sih->buscorerev),
10561 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10562 PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
10563 PCIPowerIntmask(dhd->bus->sih->buscorerev),
10564 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10565 PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
10566 DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
10567 "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
10568 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
10569 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10570 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
10571 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
10572 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10573 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
10574 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
10575 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10576 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
10577 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
10578 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10579 OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
10580 DHD_ERROR(("err_code(0x%x)=0x%x\n",
10581 (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
10582 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10583 OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
10584 }
10585 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10586
10587 dhd_pcie_dma_info_dump(dhd);
10588
10589 return 0;
10590 }
10591
10592 /*
10593 * TLV ID for Host whitelist Region.
10594 */
10595 #define BCM_NVRAM_WHTLST_SIGNATURE 0xFEED4B1Du
10596
10597 /*
10598 * For the time being only one whitelist region supported and 64 Bit high and
10599 * 64 bit low address should be written.
10600 */
10601 #define BCM_HOST_WHITELIST_NBYTES 16u
10602
10603 /* Writes host whitelist region to the TCM. FW upon initialization reads this register
10604 * to fetch whitelist regions, and validate DMA descriptors before programming
10605 * against these whitelist regions.
10606 */
10607 static int
10608 dhdpcie_wrt_host_whitelist_region(struct dhd_bus *bus)
10609 {
10610 int ret;
10611 bcm_host_whitelist_metadata_t whitelist_data;
10612 uint8 whtlst_buff[BCM_HOST_WHITELIST_NBYTES];
10613 bcm_rand_metadata_t rnd_data;
10614 uint32 addr = bus->dongle_ram_base + (uint32)((bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
10615 ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data) +
10616 BCM_ENTROPY_HOST_NBYTES + sizeof(whitelist_data)));
10617 whitelist_data.signature = htol32(BCM_NVRAM_WHTLST_SIGNATURE);
10618 whitelist_data.count = htol32(BCM_HOST_WHITELIST_NBYTES);
10619 ret = dhd_get_host_whitelist_region((void*)whtlst_buff,
10620 whitelist_data.count);
10621 if (ret == BCME_RANGE) {
10622 DHD_INFO(("%s: No Whitelist region programmed !\n",
10623 __FUNCTION__));
10624 return BCME_OK;
10625 }
10626 if (ret == BCME_OK) {
10627 /* write the metadata about whitelist region */
10628 ret = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&whitelist_data,
10629 sizeof(whitelist_data));
10630 if (ret == BCME_OK) {
10631 /* scale back by number of whitelist region counts */
10632 addr -= BCM_HOST_WHITELIST_NBYTES;
10633
10634 /* Now write whitelist region(s) */
10635 ret = dhdpcie_bus_membytes(bus, TRUE, addr, whtlst_buff,
10636 BCM_HOST_WHITELIST_NBYTES);
10637 }
10638 }
10639 return ret;
10640 }
10641
10642 bool
10643 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
10644 {
10645 return bus->force_bt_quiesce;
10646 }