Commit | Line | Data |
---|---|---|
84813812 LJ |
1 | /* |
2 | * DHD Bus Module for PCIE | |
3 | * | |
4 | * Copyright (C) 1999-2019, Broadcom. | |
5 | * | |
6 | * Unless you and Broadcom execute a separate written software license | |
7 | * agreement governing use of this software, this software is licensed to you | |
8 | * under the terms of the GNU General Public License version 2 (the "GPL"), | |
9 | * available at http://www.broadcom.com/licenses/GPLv2.php, with the | |
10 | * following added to such license: | |
11 | * | |
12 | * As a special exception, the copyright holders of this software give you | |
13 | * permission to link this software with independent modules, and to copy and | |
14 | * distribute the resulting executable under terms of your choice, provided that | |
15 | * you also meet, for each linked independent module, the terms and conditions of | |
16 | * the license of that module. An independent module is a module which is not | |
17 | * derived from this software. The special exception does not apply to any | |
18 | * modifications of the software. | |
19 | * | |
20 | * Notwithstanding the above, under no circumstances may you combine this | |
21 | * software in any way with any other Broadcom software provided under a license | |
22 | * other than the GPL, without Broadcom's express prior written consent. | |
23 | * | |
24 | * | |
25 | * <<Broadcom-WL-IPTag/Open:>> | |
26 | * | |
27 | * $Id: dhd_pcie.c 825481 2019-06-14 10:06:03Z $ | |
28 | */ | |
29 | ||
30 | /* include files */ | |
31 | #include <typedefs.h> | |
32 | #include <bcmutils.h> | |
33 | #include <bcmdevs.h> | |
34 | #include <siutils.h> | |
35 | #include <hndoobr.h> | |
36 | #include <hndsoc.h> | |
37 | #include <hndpmu.h> | |
38 | #include <etd.h> | |
39 | #include <hnd_debug.h> | |
40 | #include <sbchipc.h> | |
41 | #include <sbhndarm.h> | |
42 | #include <hnd_armtrap.h> | |
43 | #if defined(DHD_DEBUG) | |
44 | #include <hnd_cons.h> | |
45 | #endif /* defined(DHD_DEBUG) */ | |
46 | #include <dngl_stats.h> | |
47 | #include <pcie_core.h> | |
48 | #include <dhd.h> | |
49 | #include <dhd_bus.h> | |
50 | #include <dhd_flowring.h> | |
51 | #include <dhd_proto.h> | |
52 | #include <dhd_dbg.h> | |
53 | #include <dhd_debug.h> | |
54 | #include <dhd_daemon.h> | |
55 | #include <dhdioctl.h> | |
56 | #include <sdiovar.h> | |
57 | #include <bcmmsgbuf.h> | |
58 | #include <pcicfg.h> | |
59 | #include <dhd_pcie.h> | |
60 | #include <bcmpcie.h> | |
61 | #include <bcmendian.h> | |
62 | #include <bcmstdlib_s.h> | |
63 | #ifdef DHDTCPACK_SUPPRESS | |
64 | #include <dhd_ip.h> | |
65 | #endif /* DHDTCPACK_SUPPRESS */ | |
66 | #include <bcmevent.h> | |
67 | #include <dhd_config.h> | |
68 | ||
69 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
70 | #include <linux/pm_runtime.h> | |
71 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
72 | ||
73 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
74 | #include <debugger.h> | |
75 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
76 | ||
77 | #ifdef DNGL_AXI_ERROR_LOGGING | |
78 | #include <dhd_linux_wq.h> | |
79 | #include <dhd_linux.h> | |
80 | #endif /* DNGL_AXI_ERROR_LOGGING */ | |
81 | ||
82 | #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) | |
83 | #include <dhd_linux_priv.h> | |
84 | #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ | |
85 | ||
86 | #include <otpdefs.h> | |
87 | #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */ | |
88 | ||
89 | #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ | |
90 | #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ | |
91 | ||
92 | #define DHD_MAX_ITEMS_HPP_TXCPL_RING 512 | |
93 | #define DHD_MAX_ITEMS_HPP_RXCPL_RING 512 | |
94 | ||
95 | #define ARMCR4REG_CORECAP (0x4/sizeof(uint32)) | |
96 | #define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32)) | |
97 | #define ACC_MPU_SHIFT 25 | |
98 | #define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT) | |
99 | ||
100 | #define REG_WORK_AROUND (0x1e4/sizeof(uint32)) | |
101 | ||
102 | #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) | |
103 | #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) | |
104 | /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ | |
105 | ||
106 | /* CTO Prevention Recovery */ | |
107 | #ifdef BCMQT_HW | |
108 | #define CTO_TO_CLEAR_WAIT_MS 10000 | |
109 | #define CTO_TO_CLEAR_WAIT_MAX_CNT 100 | |
110 | #else | |
111 | #define CTO_TO_CLEAR_WAIT_MS 1000 | |
112 | #define CTO_TO_CLEAR_WAIT_MAX_CNT 10 | |
113 | #endif // endif | |
114 | ||
115 | /* Fetch address of a member in the pciedev_shared structure in dongle memory */ | |
116 | #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \ | |
117 | (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member) | |
118 | ||
119 | /* Fetch address of a member in rings_info_ptr structure in dongle memory */ | |
120 | #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \ | |
121 | (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member) | |
122 | ||
123 | /* Fetch address of a member in the ring_mem structure in dongle memory */ | |
124 | #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \ | |
125 | (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member) | |
126 | ||
127 | #if defined(SUPPORT_MULTIPLE_BOARD_REV) | |
128 | extern unsigned int system_rev; | |
129 | #endif /* SUPPORT_MULTIPLE_BOARD_REV */ | |
130 | ||
131 | #ifdef EWP_EDL | |
132 | extern int host_edl_support; | |
133 | #endif // endif | |
134 | ||
135 | /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */ | |
136 | uint dma_ring_indices = 0; | |
137 | /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */ | |
138 | bool h2d_phase = 0; | |
139 | /* This can be overwritten by module parameter(force_trap_bad_h2d_phase) | |
140 | * defined in dhd_linux.c | |
141 | */ | |
142 | bool force_trap_bad_h2d_phase = 0; | |
143 | ||
144 | int dhd_dongle_memsize; | |
145 | int dhd_dongle_ramsize; | |
146 | struct dhd_bus *g_dhd_bus = NULL; | |
147 | #ifdef DNGL_AXI_ERROR_LOGGING | |
148 | static void dhd_log_dump_axi_error(uint8 *axi_err); | |
149 | #endif /* DNGL_AXI_ERROR_LOGGING */ | |
150 | ||
151 | static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); | |
152 | static int dhdpcie_bus_readconsole(dhd_bus_t *bus); | |
153 | #if defined(DHD_FW_COREDUMP) | |
154 | static int dhdpcie_mem_dump(dhd_bus_t *bus); | |
155 | static int dhdpcie_get_mem_dump(dhd_bus_t *bus); | |
156 | #endif /* DHD_FW_COREDUMP */ | |
157 | ||
158 | static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); | |
159 | static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, | |
160 | const char *name, void *params, | |
161 | int plen, void *arg, int len, int val_size); | |
162 | static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval); | |
163 | static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, | |
164 | uint32 len, uint32 srcdelay, uint32 destdelay, | |
165 | uint32 d11_lpbk, uint32 core_num, uint32 wait); | |
166 | static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter); | |
167 | static int _dhdpcie_download_firmware(struct dhd_bus *bus); | |
168 | static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh); | |
169 | static int dhdpcie_bus_write_vars(dhd_bus_t *bus); | |
170 | static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus); | |
171 | static bool dhdpci_bus_read_frames(dhd_bus_t *bus); | |
172 | static int dhdpcie_readshared(dhd_bus_t *bus); | |
173 | static void dhdpcie_init_shared_addr(dhd_bus_t *bus); | |
174 | static bool dhdpcie_dongle_attach(dhd_bus_t *bus); | |
175 | static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size); | |
176 | static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, | |
177 | bool dongle_isolation, bool reset_flag); | |
178 | static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh); | |
179 | static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len); | |
180 | static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr); | |
181 | static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset); | |
182 | static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); | |
183 | static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); | |
184 | static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset); | |
185 | static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); | |
186 | static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); | |
187 | #ifdef DHD_SUPPORT_64BIT | |
188 | static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used)); | |
189 | static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used)); | |
190 | #endif /* DHD_SUPPORT_64BIT */ | |
191 | static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); | |
192 | static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size); | |
193 | static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); | |
194 | static void dhdpcie_fw_trap(dhd_bus_t *bus); | |
195 | static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); | |
196 | static void dhdpcie_handle_mb_data(dhd_bus_t *bus); | |
197 | extern void dhd_dpc_enable(dhd_pub_t *dhdp); | |
198 | extern void dhd_dpc_kill(dhd_pub_t *dhdp); | |
199 | ||
200 | #ifdef IDLE_TX_FLOW_MGMT | |
201 | static void dhd_bus_check_idle_scan(dhd_bus_t *bus); | |
202 | static void dhd_bus_idle_scan(dhd_bus_t *bus); | |
203 | #endif /* IDLE_TX_FLOW_MGMT */ | |
204 | ||
205 | #ifdef EXYNOS_PCIE_DEBUG | |
206 | extern void exynos_pcie_register_dump(int ch_num); | |
207 | #endif /* EXYNOS_PCIE_DEBUG */ | |
208 | ||
209 | #if defined(DHD_H2D_LOG_TIME_SYNC) | |
210 | static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus); | |
211 | #endif /* DHD_H2D_LOG_TIME_SYNC */ | |
212 | ||
213 | #define PCI_VENDOR_ID_BROADCOM 0x14e4 | |
214 | ||
215 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
216 | #define MAX_D3_ACK_TIMEOUT 100 | |
217 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
218 | ||
219 | #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ | |
220 | static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); | |
221 | static int dhdpcie_cto_error_recovery(struct dhd_bus *bus); | |
222 | ||
223 | static int dhdpcie_init_d11status(struct dhd_bus *bus); | |
224 | ||
225 | static int dhdpcie_wrt_rnd(struct dhd_bus *bus); | |
226 | ||
227 | extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd); | |
228 | extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost); | |
229 | ||
230 | #ifdef DHD_HP2P | |
231 | extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer); | |
232 | static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val); | |
233 | #endif // endif | |
234 | #define NUM_PATTERNS 2 | |
235 | static bool dhd_bus_tcm_test(struct dhd_bus *bus); | |
236 | ||
237 | /* IOVar table */ | |
238 | enum { | |
239 | IOV_INTR = 1, | |
240 | IOV_MEMSIZE, | |
241 | IOV_SET_DOWNLOAD_STATE, | |
242 | IOV_DEVRESET, | |
243 | IOV_VARS, | |
244 | IOV_MSI_SIM, | |
245 | IOV_PCIE_LPBK, | |
246 | IOV_CC_NVMSHADOW, | |
247 | IOV_RAMSIZE, | |
248 | IOV_RAMSTART, | |
249 | IOV_SLEEP_ALLOWED, | |
250 | IOV_PCIE_DMAXFER, | |
251 | IOV_PCIE_SUSPEND, | |
252 | IOV_DONGLEISOLATION, | |
253 | IOV_LTRSLEEPON_UNLOOAD, | |
254 | IOV_METADATA_DBG, | |
255 | IOV_RX_METADATALEN, | |
256 | IOV_TX_METADATALEN, | |
257 | IOV_TXP_THRESHOLD, | |
258 | IOV_BUZZZ_DUMP, | |
259 | IOV_DUMP_RINGUPD_BLOCK, | |
260 | IOV_DMA_RINGINDICES, | |
261 | IOV_FORCE_FW_TRAP, | |
262 | IOV_DB1_FOR_MB, | |
263 | IOV_FLOW_PRIO_MAP, | |
264 | IOV_RXBOUND, | |
265 | IOV_TXBOUND, | |
266 | IOV_HANGREPORT, | |
267 | IOV_H2D_MAILBOXDATA, | |
268 | IOV_INFORINGS, | |
269 | IOV_H2D_PHASE, | |
270 | IOV_H2D_ENABLE_TRAP_BADPHASE, | |
271 | IOV_H2D_TXPOST_MAX_ITEM, | |
272 | IOV_TRAPDATA, | |
273 | IOV_TRAPDATA_RAW, | |
274 | IOV_CTO_PREVENTION, | |
275 | IOV_PCIE_WD_RESET, | |
276 | IOV_DUMP_DONGLE, | |
277 | IOV_HWA_ENAB_BMAP, | |
278 | IOV_IDMA_ENABLE, | |
279 | IOV_IFRM_ENABLE, | |
280 | IOV_CLEAR_RING, | |
281 | IOV_DAR_ENABLE, | |
282 | IOV_DNGL_CAPS, /**< returns string with dongle capabilities */ | |
283 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
284 | IOV_GDB_SERVER, /**< starts gdb server on given interface */ | |
285 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
286 | IOV_INB_DW_ENABLE, | |
287 | IOV_CTO_THRESHOLD, | |
288 | IOV_HSCBSIZE, /* get HSCB buffer size */ | |
289 | IOV_HP2P_ENABLE, | |
290 | IOV_HP2P_PKT_THRESHOLD, | |
291 | IOV_HP2P_TIME_THRESHOLD, | |
292 | IOV_HP2P_PKT_EXPIRY, | |
293 | IOV_HP2P_TXCPL_MAXITEMS, | |
294 | IOV_HP2P_RXCPL_MAXITEMS, | |
295 | IOV_EXTDTXS_IN_TXCPL, | |
296 | IOV_HOSTRDY_AFTER_INIT, | |
297 | IOV_PCIE_LAST /**< unused IOVAR */ | |
298 | }; | |
299 | ||
300 | const bcm_iovar_t dhdpcie_iovars[] = { | |
301 | {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, | |
302 | {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 }, | |
303 | {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, | |
304 | {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, | |
305 | {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 }, | |
306 | {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 }, | |
307 | {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 }, | |
308 | {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 }, | |
309 | {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, | |
310 | {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, | |
311 | {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)}, | |
312 | {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 }, | |
313 | {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 }, | |
314 | {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, | |
315 | {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 }, | |
316 | {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 }, | |
317 | {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0}, | |
318 | {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 }, | |
319 | {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, | |
320 | {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, | |
321 | {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 }, | |
322 | {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, | |
323 | {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 }, | |
324 | {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 }, | |
325 | {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, | |
326 | {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, | |
327 | {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, | |
328 | {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 }, | |
329 | {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 }, | |
330 | {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 }, | |
331 | {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0, | |
332 | IOVT_UINT32, 0 }, | |
333 | {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 }, | |
334 | {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 }, | |
335 | {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 }, | |
336 | {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 }, | |
337 | {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 }, | |
338 | {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER, | |
339 | MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))}, | |
340 | {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 }, | |
341 | {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 }, | |
342 | {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 }, | |
343 | {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 }, | |
344 | {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 }, | |
345 | {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0}, | |
346 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
347 | {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 }, | |
348 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
349 | {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 }, | |
350 | {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, | |
351 | {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 }, | |
352 | #ifdef DHD_HP2P | |
353 | {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 }, | |
354 | {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, | |
355 | {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, | |
356 | {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 }, | |
357 | {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 }, | |
358 | {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 }, | |
359 | #endif // endif | |
360 | {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 }, | |
361 | {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 }, | |
362 | {NULL, 0, 0, 0, 0, 0 } | |
363 | }; | |
364 | ||
365 | #define MAX_READ_TIMEOUT 2 * 1000 * 1000 | |
366 | ||
367 | #ifndef DHD_RXBOUND | |
368 | #define DHD_RXBOUND 64 | |
369 | #endif // endif | |
370 | #ifndef DHD_TXBOUND | |
371 | #define DHD_TXBOUND 64 | |
372 | #endif // endif | |
373 | ||
374 | #define DHD_INFORING_BOUND 32 | |
375 | #define DHD_BTLOGRING_BOUND 32 | |
376 | ||
377 | uint dhd_rxbound = DHD_RXBOUND; | |
378 | uint dhd_txbound = DHD_TXBOUND; | |
379 | ||
380 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
381 | /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */ | |
382 | static struct dhd_gdb_bus_ops_s bus_ops = { | |
383 | .read_u16 = dhdpcie_bus_rtcm16, | |
384 | .read_u32 = dhdpcie_bus_rtcm32, | |
385 | .write_u32 = dhdpcie_bus_wtcm32, | |
386 | }; | |
387 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
388 | ||
389 | bool | |
390 | dhd_bus_get_flr_force_fail(struct dhd_bus *bus) | |
391 | { | |
392 | return bus->flr_force_fail; | |
393 | } | |
394 | ||
395 | /** | |
396 | * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to | |
397 | * link with the bus driver, in order to look for or await the device. | |
398 | */ | |
399 | int | |
400 | dhd_bus_register(void) | |
401 | { | |
402 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
403 | ||
404 | return dhdpcie_bus_register(); | |
405 | } | |
406 | ||
407 | void | |
408 | dhd_bus_unregister(void) | |
409 | { | |
410 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
411 | ||
412 | dhdpcie_bus_unregister(); | |
413 | return; | |
414 | } | |
415 | ||
416 | /** returns a host virtual address */ | |
417 | uint32 * | |
418 | dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size) | |
419 | { | |
420 | return (uint32 *)REG_MAP(addr, size); | |
421 | } | |
422 | ||
423 | void | |
424 | dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size) | |
425 | { | |
426 | REG_UNMAP(addr); | |
427 | return; | |
428 | } | |
429 | ||
430 | /** | |
431 | * retrun H2D Doorbell registers address | |
432 | * use DAR registers instead of enum register for corerev >= 23 (4347B0) | |
433 | */ | |
434 | static INLINE uint | |
435 | dhd_bus_db0_addr_get(struct dhd_bus *bus) | |
436 | { | |
437 | uint addr = PCIH2D_MailBox; | |
438 | uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev); | |
439 | ||
440 | return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr); | |
441 | } | |
442 | ||
443 | static INLINE uint | |
444 | dhd_bus_db0_addr_2_get(struct dhd_bus *bus) | |
445 | { | |
446 | return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2); | |
447 | } | |
448 | ||
449 | static INLINE uint | |
450 | dhd_bus_db1_addr_get(struct dhd_bus *bus) | |
451 | { | |
452 | return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1); | |
453 | } | |
454 | ||
455 | static INLINE uint | |
456 | dhd_bus_db1_addr_1_get(struct dhd_bus *bus) | |
457 | { | |
458 | return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1); | |
459 | } | |
460 | ||
461 | /* | |
462 | * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request | |
463 | */ | |
464 | static INLINE void | |
465 | dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable) | |
466 | { | |
467 | if (enable) { | |
468 | si_corereg(bus->sih, bus->sih->buscoreidx, offset, | |
469 | SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, | |
470 | SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT); | |
471 | } else { | |
472 | si_corereg(bus->sih, bus->sih->buscoreidx, offset, | |
473 | SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0); | |
474 | } | |
475 | } | |
476 | ||
477 | static INLINE void | |
478 | _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus) | |
479 | { | |
480 | uint mask; | |
481 | ||
482 | /* | |
483 | * If multiple de-asserts, decrement ref and return | |
484 | * Clear power request when only one pending | |
485 | * so initial request is not removed unexpectedly | |
486 | */ | |
487 | if (bus->pwr_req_ref > 1) { | |
488 | bus->pwr_req_ref--; | |
489 | return; | |
490 | } | |
491 | ||
492 | ASSERT(bus->pwr_req_ref == 1); | |
493 | ||
494 | if (MULTIBP_ENAB(bus->sih)) { | |
495 | /* Common BP controlled by HW so only need to toggle WL/ARM backplane */ | |
496 | mask = SRPWR_DMN1_ARMBPSD_MASK; | |
497 | } else { | |
498 | mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; | |
499 | } | |
500 | ||
501 | si_srpwr_request(bus->sih, mask, 0); | |
502 | bus->pwr_req_ref = 0; | |
503 | } | |
504 | ||
505 | static INLINE void | |
506 | dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus) | |
507 | { | |
508 | unsigned long flags = 0; | |
509 | ||
510 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
511 | _dhd_bus_pcie_pwr_req_clear_cmn(bus); | |
512 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
513 | } | |
514 | ||
515 | static INLINE void | |
516 | dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus) | |
517 | { | |
518 | _dhd_bus_pcie_pwr_req_clear_cmn(bus); | |
519 | } | |
520 | ||
521 | static INLINE void | |
522 | _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus) | |
523 | { | |
524 | uint mask, val; | |
525 | ||
526 | /* If multiple request entries, increment reference and return */ | |
527 | if (bus->pwr_req_ref > 0) { | |
528 | bus->pwr_req_ref++; | |
529 | return; | |
530 | } | |
531 | ||
532 | ASSERT(bus->pwr_req_ref == 0); | |
533 | ||
534 | if (MULTIBP_ENAB(bus->sih)) { | |
535 | /* Common BP controlled by HW so only need to toggle WL/ARM backplane */ | |
536 | mask = SRPWR_DMN1_ARMBPSD_MASK; | |
537 | val = SRPWR_DMN1_ARMBPSD_MASK; | |
538 | } else { | |
539 | mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; | |
540 | val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK; | |
541 | } | |
542 | ||
543 | si_srpwr_request(bus->sih, mask, val); | |
544 | ||
545 | bus->pwr_req_ref = 1; | |
546 | } | |
547 | ||
548 | static INLINE void | |
549 | dhd_bus_pcie_pwr_req(struct dhd_bus *bus) | |
550 | { | |
551 | unsigned long flags = 0; | |
552 | ||
553 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
554 | _dhd_bus_pcie_pwr_req_cmn(bus); | |
555 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
556 | } | |
557 | ||
558 | static INLINE void | |
559 | _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus) | |
560 | { | |
561 | uint mask, val; | |
562 | ||
563 | mask = SRPWR_DMN_ALL_MASK(bus->sih); | |
564 | val = SRPWR_DMN_ALL_MASK(bus->sih); | |
565 | ||
566 | si_srpwr_request(bus->sih, mask, val); | |
567 | } | |
568 | ||
569 | static INLINE void | |
570 | dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus) | |
571 | { | |
572 | unsigned long flags = 0; | |
573 | ||
574 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
575 | _dhd_bus_pcie_pwr_req_pd0123_cmn(bus); | |
576 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
577 | } | |
578 | ||
579 | static INLINE void | |
580 | _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus) | |
581 | { | |
582 | uint mask; | |
583 | ||
584 | mask = SRPWR_DMN_ALL_MASK(bus->sih); | |
585 | ||
586 | si_srpwr_request(bus->sih, mask, 0); | |
587 | } | |
588 | ||
589 | static INLINE void | |
590 | dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus) | |
591 | { | |
592 | unsigned long flags = 0; | |
593 | ||
594 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
595 | _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus); | |
596 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
597 | } | |
598 | ||
599 | static INLINE void | |
600 | dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus) | |
601 | { | |
602 | _dhd_bus_pcie_pwr_req_cmn(bus); | |
603 | } | |
604 | ||
605 | bool | |
606 | dhdpcie_chip_support_msi(dhd_bus_t *bus) | |
607 | { | |
608 | DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n", | |
609 | __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih))); | |
610 | if (bus->sih->buscorerev <= 14 || | |
611 | si_chipid(bus->sih) == BCM4375_CHIP_ID || | |
612 | si_chipid(bus->sih) == BCM4362_CHIP_ID || | |
613 | si_chipid(bus->sih) == BCM43751_CHIP_ID || | |
614 | si_chipid(bus->sih) == BCM4361_CHIP_ID || | |
615 | si_chipid(bus->sih) == BCM4359_CHIP_ID) { | |
616 | return FALSE; | |
617 | } else { | |
618 | return TRUE; | |
619 | } | |
620 | } | |
621 | ||
622 | /** | |
623 | * Called once for each hardware (dongle) instance that this DHD manages. | |
624 | * | |
625 | * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096 | |
626 | * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The | |
627 | * precondition is that the PCIEBAR0Window register 'points' at the PCIe core. | |
628 | * | |
629 | * 'tcm' is the *host* virtual address at which tcm is mapped. | |
630 | */ | |
631 | int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, | |
632 | volatile char *regs, volatile char *tcm, void *pci_dev) | |
633 | { | |
634 | dhd_bus_t *bus = NULL; | |
635 | int ret = BCME_OK; | |
636 | ||
637 | DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); | |
638 | ||
639 | do { | |
640 | if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) { | |
641 | DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); | |
642 | ret = BCME_NORESOURCE; | |
643 | break; | |
644 | } | |
645 | ||
646 | bus->regs = regs; | |
647 | bus->tcm = tcm; | |
648 | bus->osh = osh; | |
649 | /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ | |
650 | bus->dev = (struct pci_dev *)pci_dev; | |
651 | ||
652 | dll_init(&bus->flowring_active_list); | |
653 | #ifdef IDLE_TX_FLOW_MGMT | |
654 | bus->active_list_last_process_ts = OSL_SYSUPTIME(); | |
655 | #endif /* IDLE_TX_FLOW_MGMT */ | |
656 | ||
657 | /* Attach pcie shared structure */ | |
658 | if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { | |
659 | DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__)); | |
660 | ret = BCME_NORESOURCE; | |
661 | break; | |
662 | } | |
663 | ||
664 | /* dhd_common_init(osh); */ | |
665 | ||
666 | if (dhdpcie_dongle_attach(bus)) { | |
667 | DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); | |
668 | ret = BCME_NOTREADY; | |
669 | break; | |
670 | } | |
671 | ||
672 | /* software resources */ | |
673 | if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) { | |
674 | DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); | |
675 | ret = BCME_NORESOURCE; | |
676 | break; | |
677 | } | |
678 | DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); | |
679 | bus->dhd->busstate = DHD_BUS_DOWN; | |
680 | bus->dhd->hostrdy_after_init = TRUE; | |
681 | bus->db1_for_mb = TRUE; | |
682 | bus->dhd->hang_report = TRUE; | |
683 | bus->use_mailbox = FALSE; | |
684 | bus->use_d0_inform = FALSE; | |
685 | bus->intr_enabled = FALSE; | |
686 | bus->flr_force_fail = FALSE; | |
687 | /* By default disable HWA and enable it via iovar */ | |
688 | bus->hwa_enab_bmap = 0; | |
689 | /* update the dma indices if set through module parameter. */ | |
690 | if (dma_ring_indices != 0) { | |
691 | dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices); | |
692 | } | |
693 | /* update h2d phase support if set through module parameter */ | |
694 | bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE; | |
695 | /* update force trap on bad phase if set through module parameter */ | |
696 | bus->dhd->force_dongletrap_on_bad_h2d_phase = | |
697 | force_trap_bad_h2d_phase ? TRUE : FALSE; | |
698 | #ifdef IDLE_TX_FLOW_MGMT | |
699 | bus->enable_idle_flowring_mgmt = FALSE; | |
700 | #endif /* IDLE_TX_FLOW_MGMT */ | |
701 | bus->irq_registered = FALSE; | |
702 | ||
703 | #ifdef DHD_MSI_SUPPORT | |
704 | bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ? | |
705 | PCIE_MSI : PCIE_INTX; | |
706 | #else | |
707 | bus->d2h_intr_method = PCIE_INTX; | |
708 | #endif /* DHD_MSI_SUPPORT */ | |
709 | ||
710 | #ifdef DHD_HP2P | |
711 | bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING; | |
712 | bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING; | |
713 | #endif /* DHD_HP2P */ | |
714 | ||
715 | DHD_TRACE(("%s: EXIT SUCCESS\n", | |
716 | __FUNCTION__)); | |
717 | g_dhd_bus = bus; | |
718 | *bus_ptr = bus; | |
719 | return ret; | |
720 | } while (0); | |
721 | ||
722 | DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__)); | |
723 | ||
724 | if (bus && bus->pcie_sh) { | |
725 | MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); | |
726 | } | |
727 | ||
728 | if (bus) { | |
729 | MFREE(osh, bus, sizeof(dhd_bus_t)); | |
730 | } | |
731 | ||
732 | return ret; | |
733 | } | |
734 | ||
735 | bool | |
736 | dhd_bus_skip_clm(dhd_pub_t *dhdp) | |
737 | { | |
738 | switch (dhd_bus_chip_id(dhdp)) { | |
739 | case BCM4369_CHIP_ID: | |
740 | return TRUE; | |
741 | default: | |
742 | return FALSE; | |
743 | } | |
744 | } | |
745 | ||
746 | uint | |
747 | dhd_bus_chip(struct dhd_bus *bus) | |
748 | { | |
749 | ASSERT(bus->sih != NULL); | |
750 | return bus->sih->chip; | |
751 | } | |
752 | ||
753 | uint | |
754 | dhd_bus_chiprev(struct dhd_bus *bus) | |
755 | { | |
756 | ASSERT(bus); | |
757 | ASSERT(bus->sih != NULL); | |
758 | return bus->sih->chiprev; | |
759 | } | |
760 | ||
761 | void * | |
762 | dhd_bus_pub(struct dhd_bus *bus) | |
763 | { | |
764 | return bus->dhd; | |
765 | } | |
766 | ||
767 | void * | |
768 | dhd_bus_sih(struct dhd_bus *bus) | |
769 | { | |
770 | return (void *)bus->sih; | |
771 | } | |
772 | ||
773 | void * | |
774 | dhd_bus_txq(struct dhd_bus *bus) | |
775 | { | |
776 | return &bus->txq; | |
777 | } | |
778 | ||
779 | /** Get Chip ID version */ | |
780 | uint dhd_bus_chip_id(dhd_pub_t *dhdp) | |
781 | { | |
782 | dhd_bus_t *bus = dhdp->bus; | |
783 | return bus->sih->chip; | |
784 | } | |
785 | ||
786 | /** Get Chip Rev ID version */ | |
787 | uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) | |
788 | { | |
789 | dhd_bus_t *bus = dhdp->bus; | |
790 | return bus->sih->chiprev; | |
791 | } | |
792 | ||
793 | /** Get Chip Pkg ID version */ | |
794 | uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) | |
795 | { | |
796 | dhd_bus_t *bus = dhdp->bus; | |
797 | return bus->sih->chippkg; | |
798 | } | |
799 | ||
800 | /** Conduct Loopback test */ | |
801 | int | |
802 | dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) | |
803 | { | |
804 | dma_xfer_info_t dmaxfer_lpbk; | |
805 | int ret = BCME_OK; | |
806 | ||
807 | #define PCIE_DMAXFER_LPBK_LENGTH 4096 | |
808 | memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t)); | |
809 | dmaxfer_lpbk.version = DHD_DMAXFER_VERSION; | |
810 | dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t); | |
811 | dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH; | |
812 | dmaxfer_lpbk.type = type; | |
813 | dmaxfer_lpbk.should_wait = TRUE; | |
814 | ||
815 | ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0, | |
816 | (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET); | |
817 | if (ret < 0) { | |
818 | DHD_ERROR(("failed to start PCIe Loopback Test!!! " | |
819 | "Type:%d Reason:%d\n", type, ret)); | |
820 | return ret; | |
821 | } | |
822 | ||
823 | if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) { | |
824 | DHD_ERROR(("failed to check PCIe Loopback Test!!! " | |
825 | "Type:%d Status:%d Error code:%d\n", type, | |
826 | dmaxfer_lpbk.status, dmaxfer_lpbk.error_code)); | |
827 | ret = BCME_ERROR; | |
828 | } else { | |
829 | DHD_ERROR(("successful to check PCIe Loopback Test" | |
830 | " Type:%d\n", type)); | |
831 | } | |
832 | #undef PCIE_DMAXFER_LPBK_LENGTH | |
833 | ||
834 | return ret; | |
835 | } | |
836 | ||
837 | /* Log the lastest DPC schedule time */ | |
838 | void | |
839 | dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp) | |
840 | { | |
841 | dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS(); | |
842 | } | |
843 | ||
844 | /* Check if there is DPC scheduling errors */ | |
845 | bool | |
846 | dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) | |
847 | { | |
848 | dhd_bus_t *bus = dhdp->bus; | |
849 | bool sched_err; | |
850 | ||
851 | if (bus->dpc_entry_time < bus->isr_exit_time) { | |
852 | /* Kernel doesn't schedule the DPC after processing PCIe IRQ */ | |
853 | sched_err = TRUE; | |
854 | } else if (bus->dpc_entry_time < bus->resched_dpc_time) { | |
855 | /* Kernel doesn't schedule the DPC after DHD tries to reschedule | |
856 | * the DPC due to pending work items to be processed. | |
857 | */ | |
858 | sched_err = TRUE; | |
859 | } else { | |
860 | sched_err = FALSE; | |
861 | } | |
862 | ||
863 | if (sched_err) { | |
864 | /* print out minimum timestamp info */ | |
865 | DHD_ERROR(("isr_entry_time="SEC_USEC_FMT | |
866 | " isr_exit_time="SEC_USEC_FMT | |
867 | " dpc_entry_time="SEC_USEC_FMT | |
868 | "\ndpc_exit_time="SEC_USEC_FMT | |
869 | " dpc_sched_time="SEC_USEC_FMT | |
870 | " resched_dpc_time="SEC_USEC_FMT"\n", | |
871 | GET_SEC_USEC(bus->isr_entry_time), | |
872 | GET_SEC_USEC(bus->isr_exit_time), | |
873 | GET_SEC_USEC(bus->dpc_entry_time), | |
874 | GET_SEC_USEC(bus->dpc_exit_time), | |
875 | GET_SEC_USEC(bus->dpc_sched_time), | |
876 | GET_SEC_USEC(bus->resched_dpc_time))); | |
877 | } | |
878 | ||
879 | return sched_err; | |
880 | } | |
881 | ||
882 | /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */ | |
883 | uint32 | |
884 | dhdpcie_bus_intstatus(dhd_bus_t *bus) | |
885 | { | |
886 | uint32 intstatus = 0; | |
887 | uint32 intmask = 0; | |
888 | ||
889 | if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { | |
890 | DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__)); | |
891 | return intstatus; | |
892 | } | |
893 | if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || | |
894 | (bus->sih->buscorerev == 2)) { | |
895 | intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); | |
896 | dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); | |
897 | intstatus &= I_MB; | |
898 | } else { | |
899 | /* this is a PCIE core register..not a config register... */ | |
900 | intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); | |
901 | ||
902 | /* this is a PCIE core register..not a config register... */ | |
903 | intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0); | |
904 | /* Is device removed. intstatus & intmask read 0xffffffff */ | |
905 | if (intstatus == (uint32)-1 || intmask == (uint32)-1) { | |
906 | DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__)); | |
907 | DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n", | |
908 | __FUNCTION__, intstatus, intmask)); | |
909 | bus->is_linkdown = TRUE; | |
910 | dhd_pcie_debug_info_dump(bus->dhd); | |
911 | return intstatus; | |
912 | } | |
913 | ||
914 | #ifndef DHD_READ_INTSTATUS_IN_DPC | |
915 | intstatus &= intmask; | |
916 | #endif /* DHD_READ_INTSTATUS_IN_DPC */ | |
917 | ||
918 | /* | |
919 | * The fourth argument to si_corereg is the "mask" fields of the register to update | |
920 | * and the fifth field is the "value" to update. Now if we are interested in only | |
921 | * few fields of the "mask" bit map, we should not be writing back what we read | |
922 | * By doing so, we might clear/ack interrupts that are not handled yet. | |
923 | */ | |
924 | si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask, | |
925 | intstatus); | |
926 | ||
927 | intstatus &= bus->def_intmask; | |
928 | } | |
929 | ||
930 | return intstatus; | |
931 | } | |
932 | ||
933 | void | |
934 | dhdpcie_cto_recovery_handler(dhd_pub_t *dhd) | |
935 | { | |
936 | dhd_bus_t *bus = dhd->bus; | |
937 | int ret; | |
938 | ||
939 | /* Disable PCIe Runtime PM to avoid D3_ACK timeout. | |
940 | */ | |
941 | DHD_DISABLE_RUNTIME_PM(dhd); | |
942 | ||
943 | /* Sleep for 1 seconds so that any AXI timeout | |
944 | * if running on ALP clock also will be captured | |
945 | */ | |
946 | OSL_SLEEP(1000); | |
947 | ||
948 | /* reset backplane and cto, | |
949 | * then access through pcie is recovered. | |
950 | */ | |
951 | ret = dhdpcie_cto_error_recovery(bus); | |
952 | if (!ret) { | |
953 | /* Waiting for backplane reset */ | |
954 | OSL_SLEEP(10); | |
955 | /* Dump debug Info */ | |
956 | dhd_prot_debug_info_print(bus->dhd); | |
957 | /* Dump console buffer */ | |
958 | dhd_bus_dump_console_buffer(bus); | |
959 | #if defined(DHD_FW_COREDUMP) | |
960 | /* save core dump or write to a file */ | |
961 | if (!bus->is_linkdown && bus->dhd->memdump_enabled) { | |
962 | #ifdef DHD_SSSR_DUMP | |
963 | bus->dhd->collect_sssr = TRUE; | |
964 | #endif /* DHD_SSSR_DUMP */ | |
965 | bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY; | |
966 | dhdpcie_mem_dump(bus); | |
967 | } | |
968 | #endif /* DHD_FW_COREDUMP */ | |
969 | } | |
970 | bus->is_linkdown = TRUE; | |
971 | bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT; | |
972 | /* Send HANG event */ | |
973 | dhd_os_send_hang_message(bus->dhd); | |
974 | } | |
975 | ||
976 | /** | |
977 | * Name: dhdpcie_bus_isr | |
978 | * Parameters: | |
979 | * 1: IN int irq -- interrupt vector | |
980 | * 2: IN void *arg -- handle to private data structure | |
981 | * Return value: | |
982 | * Status (TRUE or FALSE) | |
983 | * | |
984 | * Description: | |
985 | * Interrupt Service routine checks for the status register, | |
986 | * disable interrupt and queue DPC if mail box interrupts are raised. | |
987 | */ | |
988 | int32 | |
989 | dhdpcie_bus_isr(dhd_bus_t *bus) | |
990 | { | |
991 | uint32 intstatus = 0; | |
992 | ||
993 | do { | |
994 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
995 | /* verify argument */ | |
996 | if (!bus) { | |
997 | DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__)); | |
998 | break; | |
999 | } | |
1000 | ||
1001 | if (bus->dhd->dongle_reset) { | |
1002 | DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__)); | |
1003 | break; | |
1004 | } | |
1005 | ||
1006 | if (bus->dhd->busstate == DHD_BUS_DOWN) { | |
1007 | DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__)); | |
1008 | break; | |
1009 | } | |
1010 | ||
1011 | /* avoid processing of interrupts until msgbuf prot is inited */ | |
1012 | if (!bus->intr_enabled) { | |
1013 | DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__)); | |
1014 | break; | |
1015 | } | |
1016 | ||
1017 | if (PCIECTO_ENAB(bus)) { | |
1018 | /* read pci_intstatus */ | |
1019 | intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4); | |
1020 | ||
1021 | if (intstatus == (uint32)-1) { | |
1022 | DHD_ERROR(("%s : Invalid intstatus for cto recovery\n", | |
1023 | __FUNCTION__)); | |
1024 | dhdpcie_disable_irq_nosync(bus); | |
1025 | break; | |
1026 | } | |
1027 | ||
1028 | if (intstatus & PCI_CTO_INT_MASK) { | |
1029 | DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE " | |
1030 | "intstat=0x%x enab=%d\n", __FUNCTION__, | |
1031 | intstatus, bus->cto_enable)); | |
1032 | bus->cto_triggered = 1; | |
1033 | /* | |
1034 | * DAR still accessible | |
1035 | */ | |
1036 | dhd_bus_dump_dar_registers(bus); | |
1037 | ||
1038 | /* Disable further PCIe interrupts */ | |
1039 | dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */ | |
1040 | /* Stop Tx flow */ | |
1041 | dhd_bus_stop_queue(bus); | |
1042 | ||
1043 | /* Schedule CTO recovery */ | |
1044 | dhd_schedule_cto_recovery(bus->dhd); | |
1045 | ||
1046 | return TRUE; | |
1047 | } | |
1048 | } | |
1049 | ||
1050 | if (bus->d2h_intr_method == PCIE_MSI) { | |
1051 | /* For MSI, as intstatus is cleared by firmware, no need to read */ | |
1052 | goto skip_intstatus_read; | |
1053 | } | |
1054 | ||
1055 | #ifndef DHD_READ_INTSTATUS_IN_DPC | |
1056 | intstatus = dhdpcie_bus_intstatus(bus); | |
1057 | ||
1058 | /* Check if the interrupt is ours or not */ | |
1059 | if (intstatus == 0) { | |
1060 | /* in EFI since we poll for interrupt, this message will flood the logs | |
1061 | * so disable this for EFI | |
1062 | */ | |
1063 | DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__)); | |
1064 | bus->non_ours_irq_count++; | |
1065 | bus->last_non_ours_irq_time = OSL_LOCALTIME_NS(); | |
1066 | break; | |
1067 | } | |
1068 | ||
1069 | /* save the intstatus */ | |
1070 | /* read interrupt status register!! Status bits will be cleared in DPC !! */ | |
1071 | bus->intstatus = intstatus; | |
1072 | ||
1073 | /* return error for 0xFFFFFFFF */ | |
1074 | if (intstatus == (uint32)-1) { | |
1075 | DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n", | |
1076 | __FUNCTION__, intstatus)); | |
1077 | dhdpcie_disable_irq_nosync(bus); | |
1078 | break; | |
1079 | } | |
1080 | ||
1081 | skip_intstatus_read: | |
1082 | /* Overall operation: | |
1083 | * - Mask further interrupts | |
1084 | * - Read/ack intstatus | |
1085 | * - Take action based on bits and state | |
1086 | * - Reenable interrupts (as per state) | |
1087 | */ | |
1088 | ||
1089 | /* Count the interrupt call */ | |
1090 | bus->intrcount++; | |
1091 | #endif /* DHD_READ_INTSTATUS_IN_DPC */ | |
1092 | ||
1093 | bus->ipend = TRUE; | |
1094 | ||
1095 | bus->isr_intr_disable_count++; | |
1096 | ||
1097 | /* For Linux, Macos etc (otherthan NDIS) instead of disabling | |
1098 | * dongle interrupt by clearing the IntMask, disable directly | |
1099 | * interrupt from the host side, so that host will not recieve | |
1100 | * any interrupts at all, even though dongle raises interrupts | |
1101 | */ | |
1102 | dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */ | |
1103 | ||
1104 | bus->intdis = TRUE; | |
1105 | ||
1106 | #if defined(PCIE_ISR_THREAD) | |
1107 | ||
1108 | DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); | |
1109 | DHD_OS_WAKE_LOCK(bus->dhd); | |
1110 | while (dhd_bus_dpc(bus)); | |
1111 | DHD_OS_WAKE_UNLOCK(bus->dhd); | |
1112 | #else | |
1113 | bus->dpc_sched = TRUE; | |
1114 | dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ | |
1115 | #endif /* defined(SDIO_ISR_THREAD) */ | |
1116 | ||
1117 | DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__)); | |
1118 | return TRUE; | |
1119 | ||
1120 | } while (0); | |
1121 | ||
1122 | DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__)); | |
1123 | return FALSE; | |
1124 | } | |
1125 | ||
1126 | int | |
1127 | dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state) | |
1128 | { | |
1129 | uint32 cur_state = 0; | |
1130 | uint32 pm_csr = 0; | |
1131 | osl_t *osh = bus->osh; | |
1132 | ||
1133 | pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); | |
1134 | cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; | |
1135 | ||
1136 | if (cur_state == state) { | |
1137 | DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state)); | |
1138 | return BCME_OK; | |
1139 | } | |
1140 | ||
1141 | if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT) | |
1142 | return BCME_ERROR; | |
1143 | ||
1144 | /* Validate the state transition | |
1145 | * if already in a lower power state, return error | |
1146 | */ | |
1147 | if (state != PCIECFGREG_PM_CSR_STATE_D0 && | |
1148 | cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD && | |
1149 | cur_state > state) { | |
1150 | DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__)); | |
1151 | return BCME_ERROR; | |
1152 | } | |
1153 | ||
1154 | pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK; | |
1155 | pm_csr |= state; | |
1156 | ||
1157 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr); | |
1158 | ||
1159 | /* need to wait for the specified mandatory pcie power transition delay time */ | |
1160 | if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT || | |
1161 | cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT) | |
1162 | OSL_DELAY(DHDPCIE_PM_D3_DELAY); | |
1163 | else if (state == PCIECFGREG_PM_CSR_STATE_D2 || | |
1164 | cur_state == PCIECFGREG_PM_CSR_STATE_D2) | |
1165 | OSL_DELAY(DHDPCIE_PM_D2_DELAY); | |
1166 | ||
1167 | /* read back the power state and verify */ | |
1168 | pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); | |
1169 | cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; | |
1170 | if (cur_state != state) { | |
1171 | DHD_ERROR(("%s: power transition failed ! Current state is %u \n", | |
1172 | __FUNCTION__, cur_state)); | |
1173 | return BCME_ERROR; | |
1174 | } else { | |
1175 | DHD_ERROR(("%s: power transition to %u success \n", | |
1176 | __FUNCTION__, cur_state)); | |
1177 | } | |
1178 | ||
1179 | return BCME_OK; | |
1180 | } | |
1181 | ||
1182 | int | |
1183 | dhdpcie_config_check(dhd_bus_t *bus) | |
1184 | { | |
1185 | uint32 i, val; | |
1186 | int ret = BCME_ERROR; | |
1187 | ||
1188 | for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) { | |
1189 | val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32)); | |
1190 | if ((val & 0xFFFF) == VENDOR_BROADCOM) { | |
1191 | ret = BCME_OK; | |
1192 | break; | |
1193 | } | |
1194 | OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000); | |
1195 | } | |
1196 | ||
1197 | return ret; | |
1198 | } | |
1199 | ||
1200 | int | |
1201 | dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr) | |
1202 | { | |
1203 | uint32 i; | |
1204 | osl_t *osh = bus->osh; | |
1205 | ||
1206 | if (BCME_OK != dhdpcie_config_check(bus)) { | |
1207 | return BCME_ERROR; | |
1208 | } | |
1209 | ||
1210 | for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { | |
1211 | OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]); | |
1212 | } | |
1213 | OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]); | |
1214 | ||
1215 | if (restore_pmcsr) | |
1216 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, | |
1217 | sizeof(uint32), bus->saved_config.pmcsr); | |
1218 | ||
1219 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap); | |
1220 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32), | |
1221 | bus->saved_config.msi_addr0); | |
1222 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, | |
1223 | sizeof(uint32), bus->saved_config.msi_addr1); | |
1224 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA, | |
1225 | sizeof(uint32), bus->saved_config.msi_data); | |
1226 | ||
1227 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL, | |
1228 | sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat); | |
1229 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2, | |
1230 | sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2); | |
1231 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL, | |
1232 | sizeof(uint32), bus->saved_config.exp_link_ctrl_stat); | |
1233 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2, | |
1234 | sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2); | |
1235 | ||
1236 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, | |
1237 | sizeof(uint32), bus->saved_config.l1pm0); | |
1238 | OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, | |
1239 | sizeof(uint32), bus->saved_config.l1pm1); | |
1240 | ||
1241 | OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32), | |
1242 | bus->saved_config.bar0_win); | |
1243 | dhdpcie_setbar1win(bus, bus->saved_config.bar1_win); | |
1244 | ||
1245 | return BCME_OK; | |
1246 | } | |
1247 | ||
1248 | int | |
1249 | dhdpcie_config_save(dhd_bus_t *bus) | |
1250 | { | |
1251 | uint32 i; | |
1252 | osl_t *osh = bus->osh; | |
1253 | ||
1254 | if (BCME_OK != dhdpcie_config_check(bus)) { | |
1255 | return BCME_ERROR; | |
1256 | } | |
1257 | ||
1258 | for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { | |
1259 | bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32)); | |
1260 | } | |
1261 | ||
1262 | bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); | |
1263 | ||
1264 | bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP, | |
1265 | sizeof(uint32)); | |
1266 | bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, | |
1267 | sizeof(uint32)); | |
1268 | bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, | |
1269 | sizeof(uint32)); | |
1270 | bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA, | |
1271 | sizeof(uint32)); | |
1272 | ||
1273 | bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh, | |
1274 | PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32)); | |
1275 | bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, | |
1276 | PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32)); | |
1277 | bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh, | |
1278 | PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32)); | |
1279 | bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, | |
1280 | PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32)); | |
1281 | ||
1282 | bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, | |
1283 | sizeof(uint32)); | |
1284 | bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, | |
1285 | sizeof(uint32)); | |
1286 | ||
1287 | bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, | |
1288 | sizeof(uint32)); | |
1289 | bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN, | |
1290 | sizeof(uint32)); | |
1291 | ||
1292 | return BCME_OK; | |
1293 | } | |
1294 | ||
1295 | #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY | |
1296 | dhd_pub_t *link_recovery = NULL; | |
1297 | #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ | |
1298 | ||
1299 | static void | |
1300 | dhdpcie_bus_intr_init(dhd_bus_t *bus) | |
1301 | { | |
1302 | uint buscorerev = bus->sih->buscorerev; | |
1303 | bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev); | |
1304 | bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev); | |
1305 | bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev); | |
1306 | bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev); | |
1307 | if (buscorerev < 64) { | |
1308 | bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1; | |
1309 | } | |
1310 | } | |
1311 | ||
1312 | static void | |
1313 | dhdpcie_cc_watchdog_reset(dhd_bus_t *bus) | |
1314 | { | |
1315 | uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN : | |
1316 | (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN); | |
1317 | pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en); | |
1318 | } | |
1319 | ||
1320 | void | |
1321 | dhdpcie_dongle_reset(dhd_bus_t *bus) | |
1322 | { | |
1323 | /* if the pcie link is down, watchdog reset | |
1324 | * should not be done, as it may hang | |
1325 | */ | |
1326 | if (bus->is_linkdown) { | |
1327 | return; | |
1328 | } | |
1329 | ||
1330 | /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */ | |
1331 | if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) { | |
1332 | #ifdef DHD_USE_BP_RESET | |
1333 | /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */ | |
1334 | dhd_bus_perform_bp_reset(bus); | |
1335 | #else | |
1336 | /* Legacy chipcommon watchdog reset */ | |
1337 | dhdpcie_cc_watchdog_reset(bus); | |
1338 | #endif /* DHD_USE_BP_RESET */ | |
1339 | } | |
1340 | } | |
1341 | ||
1342 | static bool | |
1343 | dhdpcie_dongle_attach(dhd_bus_t *bus) | |
1344 | { | |
1345 | osl_t *osh = bus->osh; | |
1346 | volatile void *regsva = (volatile void*)bus->regs; | |
1347 | uint16 devid; | |
1348 | uint32 val; | |
1349 | sbpcieregs_t *sbpcieregs; | |
1350 | bool dongle_isolation; | |
1351 | ||
1352 | DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); | |
1353 | ||
1354 | #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY | |
1355 | link_recovery = bus->dhd; | |
1356 | #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ | |
1357 | ||
1358 | bus->alp_only = TRUE; | |
1359 | bus->sih = NULL; | |
1360 | ||
1361 | /* Checking PCIe bus status with reading configuration space */ | |
1362 | val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); | |
1363 | if ((val & 0xFFFF) != VENDOR_BROADCOM) { | |
1364 | DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__)); | |
1365 | goto fail; | |
1366 | } | |
1367 | devid = (val >> 16) & 0xFFFF; | |
1368 | bus->cl_devid = devid; | |
1369 | ||
1370 | /* Set bar0 window to si_enum_base */ | |
1371 | dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid)); | |
1372 | ||
1373 | /* | |
1374 | * Checking PCI_SPROM_CONTROL register for preventing invalid address access | |
1375 | * due to switch address space from PCI_BUS to SI_BUS. | |
1376 | */ | |
1377 | val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); | |
1378 | if (val == 0xffffffff) { | |
1379 | DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__)); | |
1380 | goto fail; | |
1381 | } | |
1382 | ||
1383 | /* si_attach() will provide an SI handle and scan the backplane */ | |
1384 | if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, | |
1385 | &bus->vars, &bus->varsz))) { | |
1386 | DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); | |
1387 | goto fail; | |
1388 | } | |
1389 | ||
1390 | /* Configure CTO Prevention functionality */ | |
1391 | #if defined(BCMFPGA_HW) | |
1392 | DHD_ERROR(("Disable CTO\n")); | |
1393 | bus->cto_enable = FALSE; | |
1394 | #else | |
1395 | #if defined(BCMPCIE_CTO_PREVENTION) | |
1396 | if (bus->sih->buscorerev >= 24) { | |
1397 | DHD_ERROR(("Enable CTO\n")); | |
1398 | bus->cto_enable = TRUE; | |
1399 | } else | |
1400 | #endif /* BCMPCIE_CTO_PREVENTION */ | |
1401 | { | |
1402 | DHD_ERROR(("Disable CTO\n")); | |
1403 | bus->cto_enable = FALSE; | |
1404 | } | |
1405 | #endif /* BCMFPGA_HW */ | |
1406 | ||
1407 | if (PCIECTO_ENAB(bus)) { | |
1408 | dhdpcie_cto_init(bus, TRUE); | |
1409 | } | |
1410 | ||
1411 | if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) { | |
1412 | /* | |
1413 | * HW JIRA - CRWLPCIEGEN2-672 | |
1414 | * Producer Index Feature which is used by F1 gets reset on F0 FLR | |
1415 | * fixed in REV68 | |
1416 | */ | |
1417 | if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { | |
1418 | dhdpcie_ssreset_dis_enum_rst(bus); | |
1419 | } | |
1420 | ||
1421 | /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset | |
1422 | * dhdpcie_bus_release_dongle() --> si_detach() | |
1423 | * dhdpcie_dongle_attach() --> si_attach() | |
1424 | */ | |
1425 | bus->pwr_req_ref = 0; | |
1426 | } | |
1427 | ||
1428 | if (MULTIBP_ENAB(bus->sih)) { | |
1429 | dhd_bus_pcie_pwr_req_nolock(bus); | |
1430 | } | |
1431 | ||
1432 | /* Get info on the ARM and SOCRAM cores... */ | |
1433 | /* Should really be qualified by device id */ | |
1434 | if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || | |
1435 | (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || | |
1436 | (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || | |
1437 | (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { | |
1438 | bus->armrev = si_corerev(bus->sih); | |
1439 | bus->coreid = si_coreid(bus->sih); | |
1440 | } else { | |
1441 | DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); | |
1442 | goto fail; | |
1443 | } | |
1444 | ||
1445 | /* CA7 requires coherent bits on */ | |
1446 | if (bus->coreid == ARMCA7_CORE_ID) { | |
1447 | val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4); | |
1448 | dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, | |
1449 | (val | PCIE_BARCOHERENTACCEN_MASK)); | |
1450 | } | |
1451 | ||
1452 | /* Olympic EFI requirement - stop driver load if FW is already running | |
1453 | * need to do this here before pcie_watchdog_reset, because | |
1454 | * pcie_watchdog_reset will put the ARM back into halt state | |
1455 | */ | |
1456 | if (!dhdpcie_is_arm_halted(bus)) { | |
1457 | DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n", | |
1458 | __FUNCTION__)); | |
1459 | goto fail; | |
1460 | } | |
1461 | ||
1462 | BCM_REFERENCE(dongle_isolation); | |
1463 | ||
1464 | /* For inbuilt drivers pcie clk req will be done by RC, | |
1465 | * so do not do clkreq from dhd | |
1466 | */ | |
1467 | if (dhd_download_fw_on_driverload) | |
1468 | { | |
1469 | /* Enable CLKREQ# */ | |
1470 | dhdpcie_clkreq(bus->osh, 1, 1); | |
1471 | } | |
1472 | ||
1473 | /* | |
1474 | * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset | |
1475 | * without checking dongle_isolation flag, but if it is called via some other path | |
1476 | * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should | |
1477 | * be called. | |
1478 | */ | |
1479 | if (bus->dhd == NULL) { | |
1480 | /* dhd_attach not yet happened, do watchdog reset */ | |
1481 | dongle_isolation = FALSE; | |
1482 | } else { | |
1483 | dongle_isolation = bus->dhd->dongle_isolation; | |
1484 | } | |
1485 | ||
1486 | #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH | |
1487 | /* | |
1488 | * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd | |
1489 | * This is required to avoid spurious interrupts to the Host and bring back | |
1490 | * dongle to a sane state (on host soft-reboot / watchdog-reboot). | |
1491 | */ | |
1492 | if (dongle_isolation == FALSE) { | |
1493 | dhdpcie_dongle_reset(bus); | |
1494 | } | |
1495 | #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */ | |
1496 | ||
1497 | /* need to set the force_bt_quiesce flag here | |
1498 | * before calling dhdpcie_dongle_flr_or_pwr_toggle | |
1499 | */ | |
1500 | bus->force_bt_quiesce = TRUE; | |
1501 | /* | |
1502 | * For buscorerev = 66 and after, F0 FLR should be done independent from F1. | |
1503 | * So don't need BT quiesce. | |
1504 | */ | |
1505 | if (bus->sih->buscorerev >= 66) { | |
1506 | bus->force_bt_quiesce = FALSE; | |
1507 | } | |
1508 | ||
1509 | dhdpcie_dongle_flr_or_pwr_toggle(bus); | |
1510 | ||
1511 | si_setcore(bus->sih, PCIE2_CORE_ID, 0); | |
1512 | sbpcieregs = (sbpcieregs_t*)(bus->regs); | |
1513 | ||
1514 | /* WAR where the BAR1 window may not be sized properly */ | |
1515 | W_REG(osh, &sbpcieregs->configaddr, 0x4e0); | |
1516 | val = R_REG(osh, &sbpcieregs->configdata); | |
1517 | W_REG(osh, &sbpcieregs->configdata, val); | |
1518 | ||
1519 | if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { | |
1520 | /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not | |
1521 | * adjusted. | |
1522 | */ | |
1523 | if (!bus->ramsize_adjusted) { | |
1524 | if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { | |
1525 | DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); | |
1526 | goto fail; | |
1527 | } | |
1528 | switch ((uint16)bus->sih->chip) { | |
1529 | default: | |
1530 | /* also populate base address */ | |
1531 | bus->dongle_ram_base = CA7_4365_RAM_BASE; | |
1532 | bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */ | |
1533 | break; | |
1534 | } | |
1535 | } | |
1536 | } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { | |
1537 | if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { | |
1538 | DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); | |
1539 | goto fail; | |
1540 | } | |
1541 | } else { | |
1542 | /* cr4 has a different way to find the RAM size from TCM's */ | |
1543 | if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { | |
1544 | DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); | |
1545 | goto fail; | |
1546 | } | |
1547 | /* also populate base address */ | |
1548 | switch ((uint16)bus->sih->chip) { | |
1549 | case BCM4339_CHIP_ID: | |
1550 | case BCM4335_CHIP_ID: | |
1551 | bus->dongle_ram_base = CR4_4335_RAM_BASE; | |
1552 | break; | |
1553 | case BCM4358_CHIP_ID: | |
1554 | case BCM4354_CHIP_ID: | |
1555 | case BCM43567_CHIP_ID: | |
1556 | case BCM43569_CHIP_ID: | |
1557 | case BCM4350_CHIP_ID: | |
1558 | case BCM43570_CHIP_ID: | |
1559 | bus->dongle_ram_base = CR4_4350_RAM_BASE; | |
1560 | break; | |
1561 | case BCM4360_CHIP_ID: | |
1562 | bus->dongle_ram_base = CR4_4360_RAM_BASE; | |
1563 | break; | |
1564 | ||
1565 | case BCM4364_CHIP_ID: | |
1566 | bus->dongle_ram_base = CR4_4364_RAM_BASE; | |
1567 | break; | |
1568 | ||
1569 | CASE_BCM4345_CHIP: | |
1570 | bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ | |
1571 | ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; | |
1572 | break; | |
1573 | CASE_BCM43602_CHIP: | |
1574 | bus->dongle_ram_base = CR4_43602_RAM_BASE; | |
1575 | break; | |
1576 | case BCM4349_CHIP_GRPID: | |
1577 | /* RAM based changed from 4349c0(revid=9) onwards */ | |
1578 | bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? | |
1579 | CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); | |
1580 | break; | |
1581 | case BCM4347_CHIP_ID: | |
1582 | case BCM4357_CHIP_ID: | |
1583 | case BCM4361_CHIP_ID: | |
1584 | bus->dongle_ram_base = CR4_4347_RAM_BASE; | |
1585 | break; | |
1586 | case BCM4362_CHIP_ID: | |
1587 | bus->dongle_ram_base = CR4_4362_RAM_BASE; | |
1588 | break; | |
1589 | case BCM43751_CHIP_ID: | |
1590 | bus->dongle_ram_base = CR4_43751_RAM_BASE; | |
1591 | break; | |
1592 | case BCM43752_CHIP_ID: | |
1593 | bus->dongle_ram_base = CR4_43752_RAM_BASE; | |
1594 | break; | |
1595 | case BCM4375_CHIP_ID: | |
1596 | case BCM4369_CHIP_ID: | |
1597 | bus->dongle_ram_base = CR4_4369_RAM_BASE; | |
1598 | break; | |
1599 | default: | |
1600 | bus->dongle_ram_base = 0; | |
1601 | DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", | |
1602 | __FUNCTION__, bus->dongle_ram_base)); | |
1603 | } | |
1604 | } | |
1605 | bus->ramsize = bus->orig_ramsize; | |
1606 | if (dhd_dongle_memsize) | |
1607 | dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize); | |
1608 | ||
1609 | if (bus->ramsize > DONGLE_TCM_MAP_SIZE) { | |
1610 | DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n", | |
1611 | __FUNCTION__, bus->ramsize, bus->ramsize)); | |
1612 | goto fail; | |
1613 | } | |
1614 | ||
1615 | DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", | |
1616 | bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); | |
1617 | ||
1618 | bus->srmemsize = si_socram_srmem_size(bus->sih); | |
1619 | ||
1620 | dhdpcie_bus_intr_init(bus); | |
1621 | ||
1622 | /* Set the poll and/or interrupt flags */ | |
1623 | bus->intr = (bool)dhd_intr; | |
1624 | if ((bus->poll = (bool)dhd_poll)) | |
1625 | bus->pollrate = 1; | |
1626 | #ifdef DHD_DISABLE_ASPM | |
1627 | dhd_bus_aspm_enable_rc_ep(bus, FALSE); | |
1628 | #endif /* DHD_DISABLE_ASPM */ | |
1629 | ||
1630 | bus->idma_enabled = TRUE; | |
1631 | bus->ifrm_enabled = TRUE; | |
1632 | DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); | |
1633 | ||
1634 | if (MULTIBP_ENAB(bus->sih)) { | |
1635 | dhd_bus_pcie_pwr_req_clear_nolock(bus); | |
1636 | ||
1637 | /* | |
1638 | * One time clearing of Common Power Domain since HW default is set | |
1639 | * Needs to be after FLR because FLR resets PCIe enum back to HW defaults | |
1640 | * for 4378B0 (rev 68). | |
1641 | * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672 | |
1642 | */ | |
1643 | si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0); | |
1644 | ||
1645 | /* | |
1646 | * WAR to fix ARM cold boot; | |
1647 | * Assert WL domain in DAR helps but not enum | |
1648 | */ | |
1649 | if (bus->sih->buscorerev >= 68) { | |
1650 | dhd_bus_pcie_pwr_req_wl_domain(bus, | |
1651 | DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE); | |
1652 | } | |
1653 | } | |
1654 | ||
1655 | return 0; | |
1656 | ||
1657 | fail: | |
1658 | if (bus->sih != NULL) { | |
1659 | if (MULTIBP_ENAB(bus->sih)) { | |
1660 | dhd_bus_pcie_pwr_req_clear_nolock(bus); | |
1661 | } | |
1662 | /* for EFI even if there is an error, load still succeeds | |
1663 | * so si_detach should not be called here, it is called during unload | |
1664 | */ | |
1665 | si_detach(bus->sih); | |
1666 | bus->sih = NULL; | |
1667 | } | |
1668 | DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__)); | |
1669 | return -1; | |
1670 | } | |
1671 | ||
1672 | int | |
1673 | dhpcie_bus_unmask_interrupt(dhd_bus_t *bus) | |
1674 | { | |
1675 | dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB); | |
1676 | return 0; | |
1677 | } | |
1678 | int | |
1679 | dhpcie_bus_mask_interrupt(dhd_bus_t *bus) | |
1680 | { | |
1681 | dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0); | |
1682 | return 0; | |
1683 | } | |
1684 | ||
1685 | /* Non atomic function, caller should hold appropriate lock */ | |
1686 | void | |
1687 | dhdpcie_bus_intr_enable(dhd_bus_t *bus) | |
1688 | { | |
1689 | DHD_TRACE(("%s Enter\n", __FUNCTION__)); | |
1690 | if (bus) { | |
1691 | if (bus->sih && !bus->is_linkdown) { | |
1692 | /* Skip after recieving D3 ACK */ | |
1693 | if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { | |
1694 | return; | |
1695 | } | |
1696 | if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || | |
1697 | (bus->sih->buscorerev == 4)) { | |
1698 | dhpcie_bus_unmask_interrupt(bus); | |
1699 | } else { | |
1700 | #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) | |
1701 | dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, | |
1702 | bus->def_intmask, TRUE); | |
1703 | #endif | |
1704 | si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, | |
1705 | bus->def_intmask, bus->def_intmask); | |
1706 | } | |
1707 | } | |
1708 | ||
1709 | } | |
1710 | ||
1711 | DHD_TRACE(("%s Exit\n", __FUNCTION__)); | |
1712 | } | |
1713 | ||
1714 | /* Non atomic function, caller should hold appropriate lock */ | |
1715 | void | |
1716 | dhdpcie_bus_intr_disable(dhd_bus_t *bus) | |
1717 | { | |
1718 | DHD_TRACE(("%s Enter\n", __FUNCTION__)); | |
1719 | if (bus && bus->sih && !bus->is_linkdown) { | |
1720 | /* Skip after recieving D3 ACK */ | |
1721 | if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { | |
1722 | return; | |
1723 | } | |
1724 | if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || | |
1725 | (bus->sih->buscorerev == 4)) { | |
1726 | dhpcie_bus_mask_interrupt(bus); | |
1727 | } else { | |
1728 | si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, | |
1729 | bus->def_intmask, 0); | |
1730 | } | |
1731 | } | |
1732 | ||
1733 | DHD_TRACE(("%s Exit\n", __FUNCTION__)); | |
1734 | } | |
1735 | ||
1736 | /* | |
1737 | * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress | |
1738 | * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts | |
1739 | * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for | |
1740 | * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so | |
1741 | * they will exit from there itself without marking dhd_bus_busy_state as BUSY. | |
1742 | */ | |
1743 | void | |
1744 | dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) | |
1745 | { | |
1746 | unsigned long flags; | |
1747 | int timeleft; | |
1748 | ||
1749 | dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms; | |
1750 | if (dhdp->dhd_watchdog_ms_backup) { | |
1751 | DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n", | |
1752 | __FUNCTION__)); | |
1753 | dhd_os_wd_timer(dhdp, 0); | |
1754 | } | |
1755 | if (dhdp->busstate != DHD_BUS_DOWN) { | |
1756 | DHD_GENERAL_LOCK(dhdp, flags); | |
1757 | dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; | |
1758 | DHD_GENERAL_UNLOCK(dhdp, flags); | |
1759 | } | |
1760 | ||
1761 | timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); | |
1762 | if ((timeleft == 0) || (timeleft == 1)) { | |
1763 | DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", | |
1764 | __FUNCTION__, dhdp->dhd_bus_busy_state)); | |
1765 | ASSERT(0); | |
1766 | } | |
1767 | ||
1768 | return; | |
1769 | } | |
1770 | ||
1771 | static void | |
1772 | dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp) | |
1773 | { | |
1774 | unsigned long flags; | |
1775 | int timeleft; | |
1776 | ||
1777 | DHD_GENERAL_LOCK(dhdp, flags); | |
1778 | dhdp->busstate = DHD_BUS_REMOVE; | |
1779 | DHD_GENERAL_UNLOCK(dhdp, flags); | |
1780 | ||
1781 | timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); | |
1782 | if ((timeleft == 0) || (timeleft == 1)) { | |
1783 | DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", | |
1784 | __FUNCTION__, dhdp->dhd_bus_busy_state)); | |
1785 | ASSERT(0); | |
1786 | } | |
1787 | ||
1788 | return; | |
1789 | } | |
1790 | ||
1791 | static void | |
1792 | dhdpcie_bus_remove_prep(dhd_bus_t *bus) | |
1793 | { | |
1794 | unsigned long flags; | |
1795 | DHD_TRACE(("%s Enter\n", __FUNCTION__)); | |
1796 | ||
1797 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
1798 | DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); | |
1799 | bus->dhd->busstate = DHD_BUS_DOWN; | |
1800 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
1801 | ||
1802 | dhd_os_sdlock(bus->dhd); | |
1803 | ||
1804 | if (bus->sih && !bus->dhd->dongle_isolation) { | |
1805 | if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) { | |
1806 | dhd_bus_pcie_pwr_req_reload_war(bus); | |
1807 | } | |
1808 | ||
1809 | /* Has insmod fails after rmmod issue in Brix Android */ | |
1810 | ||
1811 | /* if the pcie link is down, watchdog reset | |
1812 | * should not be done, as it may hang | |
1813 | */ | |
1814 | ||
1815 | if (!bus->is_linkdown) { | |
1816 | #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH | |
1817 | /* for efi, depending on bt over pcie mode | |
1818 | * we either power toggle or do F0 FLR | |
1819 | * from dhdpcie_bus_release dongle. So no need to | |
1820 | * do dongle reset from here | |
1821 | */ | |
1822 | dhdpcie_dongle_reset(bus); | |
1823 | #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */ | |
1824 | } | |
1825 | ||
1826 | bus->dhd->is_pcie_watchdog_reset = TRUE; | |
1827 | } | |
1828 | ||
1829 | dhd_os_sdunlock(bus->dhd); | |
1830 | ||
1831 | DHD_TRACE(("%s Exit\n", __FUNCTION__)); | |
1832 | } | |
1833 | ||
1834 | void | |
1835 | dhd_init_bus_lock(dhd_bus_t *bus) | |
1836 | { | |
1837 | if (!bus->bus_lock) { | |
1838 | bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh); | |
1839 | } | |
1840 | } | |
1841 | ||
1842 | void | |
1843 | dhd_deinit_bus_lock(dhd_bus_t *bus) | |
1844 | { | |
1845 | if (bus->bus_lock) { | |
1846 | dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock); | |
1847 | bus->bus_lock = NULL; | |
1848 | } | |
1849 | } | |
1850 | ||
1851 | void | |
1852 | dhd_init_backplane_access_lock(dhd_bus_t *bus) | |
1853 | { | |
1854 | if (!bus->backplane_access_lock) { | |
1855 | bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh); | |
1856 | } | |
1857 | } | |
1858 | ||
1859 | void | |
1860 | dhd_deinit_backplane_access_lock(dhd_bus_t *bus) | |
1861 | { | |
1862 | if (bus->backplane_access_lock) { | |
1863 | dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock); | |
1864 | bus->backplane_access_lock = NULL; | |
1865 | } | |
1866 | } | |
1867 | ||
1868 | /** Detach and free everything */ | |
1869 | void | |
1870 | dhdpcie_bus_release(dhd_bus_t *bus) | |
1871 | { | |
1872 | bool dongle_isolation = FALSE; | |
1873 | osl_t *osh = NULL; | |
1874 | unsigned long flags_bus; | |
1875 | ||
1876 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
1877 | ||
1878 | if (bus) { | |
1879 | ||
1880 | osh = bus->osh; | |
1881 | ASSERT(osh); | |
1882 | ||
1883 | if (bus->dhd) { | |
1884 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
1885 | debugger_close(); | |
1886 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
1887 | dhdpcie_advertise_bus_remove(bus->dhd); | |
1888 | dongle_isolation = bus->dhd->dongle_isolation; | |
1889 | bus->dhd->is_pcie_watchdog_reset = FALSE; | |
1890 | dhdpcie_bus_remove_prep(bus); | |
1891 | ||
1892 | if (bus->intr) { | |
1893 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
1894 | dhdpcie_bus_intr_disable(bus); | |
1895 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
1896 | dhdpcie_free_irq(bus); | |
1897 | } | |
1898 | dhd_deinit_bus_lock(bus); | |
1899 | dhd_deinit_backplane_access_lock(bus); | |
1900 | /** | |
1901 | * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to | |
1902 | * access Dongle registers. | |
1903 | * dhd_detach will communicate with dongle to delete flowring ..etc. | |
1904 | * So dhdpcie_bus_release_dongle should be called only after the dhd_detach. | |
1905 | */ | |
1906 | dhd_detach(bus->dhd); | |
1907 | dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); | |
1908 | dhd_free(bus->dhd); | |
1909 | bus->dhd = NULL; | |
1910 | } | |
1911 | /* unmap the regs and tcm here!! */ | |
1912 | if (bus->regs) { | |
1913 | dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE); | |
1914 | bus->regs = NULL; | |
1915 | } | |
1916 | if (bus->tcm) { | |
1917 | dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE); | |
1918 | bus->tcm = NULL; | |
1919 | } | |
1920 | ||
1921 | dhdpcie_bus_release_malloc(bus, osh); | |
1922 | /* Detach pcie shared structure */ | |
1923 | if (bus->pcie_sh) { | |
1924 | MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); | |
1925 | bus->pcie_sh = NULL; | |
1926 | } | |
1927 | ||
1928 | if (bus->console.buf != NULL) { | |
1929 | MFREE(osh, bus->console.buf, bus->console.bufsize); | |
1930 | } | |
1931 | ||
1932 | /* Finally free bus info */ | |
1933 | MFREE(osh, bus, sizeof(dhd_bus_t)); | |
1934 | ||
1935 | g_dhd_bus = NULL; | |
1936 | } | |
1937 | ||
1938 | DHD_TRACE(("%s: Exit\n", __FUNCTION__)); | |
1939 | } /* dhdpcie_bus_release */ | |
1940 | ||
1941 | void | |
1942 | dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) | |
1943 | { | |
1944 | DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, | |
1945 | bus->dhd, bus->dhd->dongle_reset)); | |
1946 | ||
1947 | if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) { | |
1948 | DHD_TRACE(("%s Exit\n", __FUNCTION__)); | |
1949 | return; | |
1950 | } | |
1951 | ||
1952 | if (bus->is_linkdown) { | |
1953 | DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__)); | |
1954 | return; | |
1955 | } | |
1956 | ||
1957 | if (bus->sih) { | |
1958 | ||
1959 | if (!dongle_isolation && | |
1960 | (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) { | |
1961 | dhdpcie_dongle_reset(bus); | |
1962 | } | |
1963 | ||
1964 | dhdpcie_dongle_flr_or_pwr_toggle(bus); | |
1965 | ||
1966 | if (bus->ltrsleep_on_unload) { | |
1967 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
1968 | OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); | |
1969 | } | |
1970 | ||
1971 | if (bus->sih->buscorerev == 13) | |
1972 | pcie_serdes_iddqdisable(bus->osh, bus->sih, | |
1973 | (sbpcieregs_t *) bus->regs); | |
1974 | ||
1975 | /* For inbuilt drivers pcie clk req will be done by RC, | |
1976 | * so do not do clkreq from dhd | |
1977 | */ | |
1978 | if (dhd_download_fw_on_driverload) | |
1979 | { | |
1980 | /* Disable CLKREQ# */ | |
1981 | dhdpcie_clkreq(bus->osh, 1, 0); | |
1982 | } | |
1983 | ||
1984 | if (bus->sih != NULL) { | |
1985 | si_detach(bus->sih); | |
1986 | bus->sih = NULL; | |
1987 | } | |
1988 | if (bus->vars && bus->varsz) | |
1989 | MFREE(osh, bus->vars, bus->varsz); | |
1990 | bus->vars = NULL; | |
1991 | } | |
1992 | ||
1993 | DHD_TRACE(("%s Exit\n", __FUNCTION__)); | |
1994 | } | |
1995 | ||
1996 | uint32 | |
1997 | dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size) | |
1998 | { | |
1999 | uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size); | |
2000 | return data; | |
2001 | } | |
2002 | ||
2003 | /** 32 bit config write */ | |
2004 | void | |
2005 | dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data) | |
2006 | { | |
2007 | OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data); | |
2008 | } | |
2009 | ||
2010 | void | |
2011 | dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data) | |
2012 | { | |
2013 | OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data); | |
2014 | } | |
2015 | ||
2016 | void | |
2017 | dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size) | |
2018 | { | |
2019 | int32 min_size = DONGLE_MIN_MEMSIZE; | |
2020 | /* Restrict the memsize to user specified limit */ | |
2021 | DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", | |
2022 | dhd_dongle_memsize, min_size)); | |
2023 | if ((dhd_dongle_memsize > min_size) && | |
2024 | (dhd_dongle_memsize < (int32)bus->orig_ramsize)) | |
2025 | bus->ramsize = dhd_dongle_memsize; | |
2026 | } | |
2027 | ||
2028 | void | |
2029 | dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) | |
2030 | { | |
2031 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
2032 | ||
2033 | if (bus->dhd && bus->dhd->dongle_reset) | |
2034 | return; | |
2035 | ||
2036 | if (bus->vars && bus->varsz) { | |
2037 | MFREE(osh, bus->vars, bus->varsz); | |
2038 | bus->vars = NULL; | |
2039 | } | |
2040 | ||
2041 | DHD_TRACE(("%s: Exit\n", __FUNCTION__)); | |
2042 | return; | |
2043 | ||
2044 | } | |
2045 | ||
2046 | /** Stop bus module: clear pending frames, disable data flow */ | |
2047 | void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) | |
2048 | { | |
2049 | unsigned long flags, flags_bus; | |
2050 | ||
2051 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
2052 | ||
2053 | if (!bus->dhd) | |
2054 | return; | |
2055 | ||
2056 | if (bus->dhd->busstate == DHD_BUS_DOWN) { | |
2057 | DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__)); | |
2058 | goto done; | |
2059 | } | |
2060 | ||
2061 | DHD_DISABLE_RUNTIME_PM(bus->dhd); | |
2062 | ||
2063 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
2064 | DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); | |
2065 | bus->dhd->busstate = DHD_BUS_DOWN; | |
2066 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
2067 | ||
2068 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
2069 | atomic_set(&bus->dhd->block_bus, TRUE); | |
2070 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
2071 | ||
2072 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
2073 | dhdpcie_bus_intr_disable(bus); | |
2074 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
2075 | ||
2076 | if (!bus->is_linkdown) { | |
2077 | uint32 status; | |
2078 | status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); | |
2079 | dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); | |
2080 | } | |
2081 | ||
2082 | if (!dhd_download_fw_on_driverload) { | |
2083 | dhd_dpc_kill(bus->dhd); | |
2084 | } | |
2085 | ||
2086 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
2087 | pm_runtime_disable(dhd_bus_to_dev(bus)); | |
2088 | pm_runtime_set_suspended(dhd_bus_to_dev(bus)); | |
2089 | pm_runtime_enable(dhd_bus_to_dev(bus)); | |
2090 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
2091 | ||
2092 | /* Clear rx control and wake any waiters */ | |
2093 | dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT); | |
2094 | dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP); | |
2095 | ||
2096 | done: | |
2097 | return; | |
2098 | } | |
2099 | ||
2100 | /** | |
2101 | * Watchdog timer function. | |
2102 | * @param dhd Represents a specific hardware (dongle) instance that this DHD manages | |
2103 | */ | |
2104 | bool dhd_bus_watchdog(dhd_pub_t *dhd) | |
2105 | { | |
2106 | unsigned long flags; | |
2107 | dhd_bus_t *bus = dhd->bus; | |
2108 | ||
2109 | DHD_GENERAL_LOCK(dhd, flags); | |
2110 | if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) || | |
2111 | DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) { | |
2112 | DHD_GENERAL_UNLOCK(dhd, flags); | |
2113 | return FALSE; | |
2114 | } | |
2115 | DHD_BUS_BUSY_SET_IN_WD(dhd); | |
2116 | DHD_GENERAL_UNLOCK(dhd, flags); | |
2117 | ||
2118 | /* Poll for console output periodically */ | |
2119 | if (dhd->busstate == DHD_BUS_DATA && | |
2120 | dhd->dhd_console_ms != 0 && | |
2121 | bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) { | |
2122 | bus->console.count += dhd_watchdog_ms; | |
2123 | if (bus->console.count >= dhd->dhd_console_ms) { | |
2124 | bus->console.count -= dhd->dhd_console_ms; | |
2125 | ||
2126 | if (MULTIBP_ENAB(bus->sih)) { | |
2127 | dhd_bus_pcie_pwr_req(bus); | |
2128 | } | |
2129 | ||
2130 | /* Make sure backplane clock is on */ | |
2131 | if (dhdpcie_bus_readconsole(bus) < 0) { | |
2132 | dhd->dhd_console_ms = 0; /* On error, stop trying */ | |
2133 | } | |
2134 | ||
2135 | if (MULTIBP_ENAB(bus->sih)) { | |
2136 | dhd_bus_pcie_pwr_req_clear(bus); | |
2137 | } | |
2138 | } | |
2139 | } | |
2140 | ||
2141 | #ifdef DHD_READ_INTSTATUS_IN_DPC | |
2142 | if (bus->poll) { | |
2143 | bus->ipend = TRUE; | |
2144 | bus->dpc_sched = TRUE; | |
2145 | dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ | |
2146 | } | |
2147 | #endif /* DHD_READ_INTSTATUS_IN_DPC */ | |
2148 | ||
2149 | DHD_GENERAL_LOCK(dhd, flags); | |
2150 | DHD_BUS_BUSY_CLEAR_IN_WD(dhd); | |
2151 | dhd_os_busbusy_wake(dhd); | |
2152 | DHD_GENERAL_UNLOCK(dhd, flags); | |
2153 | ||
2154 | return TRUE; | |
2155 | } /* dhd_bus_watchdog */ | |
2156 | ||
2157 | #if defined(SUPPORT_MULTIPLE_REVISION) | |
2158 | static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path) | |
2159 | { | |
2160 | uint32 chiprev; | |
2161 | #if defined(SUPPORT_MULTIPLE_CHIPS) | |
2162 | char chipver_tag[20] = "_4358"; | |
2163 | #else | |
2164 | char chipver_tag[10] = {0, }; | |
2165 | #endif /* SUPPORT_MULTIPLE_CHIPS */ | |
2166 | ||
2167 | chiprev = dhd_bus_chiprev(bus); | |
2168 | if (chiprev == 0) { | |
2169 | DHD_ERROR(("----- CHIP 4358 A0 -----\n")); | |
2170 | strcat(chipver_tag, "_a0"); | |
2171 | } else if (chiprev == 1) { | |
2172 | DHD_ERROR(("----- CHIP 4358 A1 -----\n")); | |
2173 | #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) | |
2174 | strcat(chipver_tag, "_a1"); | |
2175 | #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */ | |
2176 | } else if (chiprev == 3) { | |
2177 | DHD_ERROR(("----- CHIP 4358 A3 -----\n")); | |
2178 | #if defined(SUPPORT_MULTIPLE_CHIPS) | |
2179 | strcat(chipver_tag, "_a3"); | |
2180 | #endif /* SUPPORT_MULTIPLE_CHIPS */ | |
2181 | } else { | |
2182 | DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev)); | |
2183 | } | |
2184 | ||
2185 | strcat(fw_path, chipver_tag); | |
2186 | ||
2187 | #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) | |
2188 | if (chiprev == 1 || chiprev == 3) { | |
2189 | int ret = dhd_check_module_b85a(); | |
2190 | if ((chiprev == 1) && (ret < 0)) { | |
2191 | memset(chipver_tag, 0x00, sizeof(chipver_tag)); | |
2192 | strcat(chipver_tag, "_b85"); | |
2193 | strcat(chipver_tag, "_a1"); | |
2194 | } | |
2195 | } | |
2196 | ||
2197 | DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag)); | |
2198 | #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */ | |
2199 | ||
2200 | #if defined(SUPPORT_MULTIPLE_BOARD_REV) | |
2201 | if (system_rev >= 10) { | |
2202 | DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev)); | |
2203 | strcat(chipver_tag, "_r10"); | |
2204 | } | |
2205 | #endif /* SUPPORT_MULTIPLE_BOARD_REV */ | |
2206 | strcat(nv_path, chipver_tag); | |
2207 | ||
2208 | return 0; | |
2209 | } | |
2210 | ||
2211 | static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path) | |
2212 | { | |
2213 | uint32 chip_ver; | |
2214 | char chipver_tag[10] = {0, }; | |
2215 | #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \ | |
2216 | defined(SUPPORT_BCM4359_MIXED_MODULES) | |
2217 | int module_type = -1; | |
2218 | #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ | |
2219 | ||
2220 | chip_ver = bus->sih->chiprev; | |
2221 | if (chip_ver == 4) { | |
2222 | DHD_ERROR(("----- CHIP 4359 B0 -----\n")); | |
2223 | strncat(chipver_tag, "_b0", strlen("_b0")); | |
2224 | } else if (chip_ver == 5) { | |
2225 | DHD_ERROR(("----- CHIP 4359 B1 -----\n")); | |
2226 | strncat(chipver_tag, "_b1", strlen("_b1")); | |
2227 | } else if (chip_ver == 9) { | |
2228 | DHD_ERROR(("----- CHIP 4359 C0 -----\n")); | |
2229 | strncat(chipver_tag, "_c0", strlen("_c0")); | |
2230 | } else { | |
2231 | DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver)); | |
2232 | return -1; | |
2233 | } | |
2234 | ||
2235 | #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \ | |
2236 | defined(SUPPORT_BCM4359_MIXED_MODULES) | |
2237 | module_type = dhd_check_module_b90(); | |
2238 | ||
2239 | switch (module_type) { | |
2240 | case BCM4359_MODULE_TYPE_B90B: | |
2241 | strcat(fw_path, chipver_tag); | |
2242 | break; | |
2243 | case BCM4359_MODULE_TYPE_B90S: | |
2244 | default: | |
2245 | /* | |
2246 | * .cid.info file not exist case, | |
2247 | * loading B90S FW force for initial MFG boot up. | |
2248 | */ | |
2249 | if (chip_ver == 5) { | |
2250 | strncat(fw_path, "_b90s", strlen("_b90s")); | |
2251 | } | |
2252 | strcat(fw_path, chipver_tag); | |
2253 | strcat(nv_path, chipver_tag); | |
2254 | break; | |
2255 | } | |
2256 | #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ | |
2257 | strcat(fw_path, chipver_tag); | |
2258 | strcat(nv_path, chipver_tag); | |
2259 | #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */ | |
2260 | ||
2261 | return 0; | |
2262 | } | |
2263 | ||
2264 | #if defined(USE_CID_CHECK) | |
2265 | ||
2266 | #define MAX_EXTENSION 20 | |
2267 | #define MODULE_BCM4361_INDEX 3 | |
2268 | #define CHIP_REV_A0 1 | |
2269 | #define CHIP_REV_A1 2 | |
2270 | #define CHIP_REV_B0 3 | |
2271 | #define CHIP_REV_B1 4 | |
2272 | #define CHIP_REV_B2 5 | |
2273 | #define CHIP_REV_C0 6 | |
2274 | #define BOARD_TYPE_EPA 0x080f | |
2275 | #define BOARD_TYPE_IPA 0x0827 | |
2276 | #define BOARD_TYPE_IPA_OLD 0x081a | |
2277 | #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA" | |
2278 | #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA" | |
2279 | #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1" | |
2280 | #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0" | |
2281 | #define MAX_VID_LEN 8 | |
2282 | #define CIS_TUPLE_HDR_LEN 2 | |
2283 | #if defined(BCM4361_CHIP) | |
2284 | #define CIS_TUPLE_START_ADDRESS 0x18011110 | |
2285 | #define CIS_TUPLE_END_ADDRESS 0x18011167 | |
2286 | #elif defined(BCM4375_CHIP) | |
2287 | #define CIS_TUPLE_START_ADDRESS 0x18011120 | |
2288 | #define CIS_TUPLE_END_ADDRESS 0x18011177 | |
2289 | #endif /* defined(BCM4361_CHIP) */ | |
2290 | #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\ | |
2291 | + 1) / sizeof(uint32)) | |
2292 | #define CIS_TUPLE_TAG_START 0x80 | |
2293 | #define CIS_TUPLE_TAG_VENDOR 0x81 | |
2294 | #define CIS_TUPLE_TAG_BOARDTYPE 0x1b | |
2295 | #define CIS_TUPLE_TAG_LENGTH 1 | |
2296 | #define NVRAM_FEM_MURATA "_murata" | |
2297 | #define CID_FEM_MURATA "_mur_" | |
2298 | ||
2299 | typedef struct cis_tuple_format { | |
2300 | uint8 id; | |
2301 | uint8 len; /* total length of tag and data */ | |
2302 | uint8 tag; | |
2303 | uint8 data[1]; | |
2304 | } cis_tuple_format_t; | |
2305 | ||
2306 | typedef struct { | |
2307 | char cid_ext[MAX_EXTENSION]; | |
2308 | char nvram_ext[MAX_EXTENSION]; | |
2309 | char fw_ext[MAX_EXTENSION]; | |
2310 | } naming_info_t; | |
2311 | ||
2312 | naming_info_t bcm4361_naming_table[] = { | |
2313 | { {""}, {""}, {""} }, | |
2314 | { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} }, | |
2315 | { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} }, | |
2316 | { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} }, | |
2317 | { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} }, | |
2318 | { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} }, | |
2319 | { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} }, | |
2320 | { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} }, | |
2321 | { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} }, | |
2322 | { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} }, | |
2323 | { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} }, | |
2324 | { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} }, | |
2325 | { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} }, | |
2326 | { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} }, | |
2327 | { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} }, | |
2328 | { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} }, | |
2329 | { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} }, | |
2330 | { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} }, | |
2331 | { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} }, | |
2332 | { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} }, | |
2333 | { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} }, | |
2334 | { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} }, | |
2335 | { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, | |
2336 | { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */ | |
2337 | { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} }, | |
2338 | { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} }, | |
2339 | { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} }, | |
2340 | { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} }, | |
2341 | { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} }, | |
2342 | { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} }, | |
2343 | { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} }, | |
2344 | { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} }, | |
2345 | { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} }, | |
2346 | { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} }, | |
2347 | { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} } | |
2348 | }; | |
2349 | ||
2350 | #define MODULE_BCM4375_INDEX 3 | |
2351 | ||
2352 | naming_info_t bcm4375_naming_table[] = { | |
2353 | { {""}, {""}, {""} }, | |
2354 | { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} }, | |
2355 | { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} }, | |
2356 | { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} }, | |
2357 | { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} }, | |
2358 | { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} }, | |
2359 | { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} }, | |
2360 | { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} }, | |
2361 | { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} }, | |
2362 | { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} }, | |
2363 | { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} }, | |
2364 | { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} }, | |
2365 | { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} }, | |
2366 | { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} }, | |
2367 | { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} }, | |
2368 | { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} }, | |
2369 | { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} }, | |
2370 | { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} }, | |
2371 | { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} }, | |
2372 | { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} } | |
2373 | }; | |
2374 | ||
2375 | static naming_info_t * | |
2376 | dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type) | |
2377 | { | |
2378 | int index_found = 0, i = 0; | |
2379 | ||
2380 | if (module_type && strlen(module_type) > 0) { | |
2381 | for (i = 1; i < table_size; i++) { | |
2382 | if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) { | |
2383 | index_found = i; | |
2384 | break; | |
2385 | } | |
2386 | } | |
2387 | } | |
2388 | ||
2389 | DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found)); | |
2390 | ||
2391 | return &table[index_found]; | |
2392 | } | |
2393 | ||
2394 | static naming_info_t * | |
2395 | dhd_find_naming_info_by_cid(naming_info_t table[], int table_size, | |
2396 | char *cid_info) | |
2397 | { | |
2398 | int index_found = 0, i = 0; | |
2399 | char *ptr; | |
2400 | ||
2401 | /* truncate extension */ | |
2402 | for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) { | |
2403 | ptr = bcmstrstr(ptr, "_"); | |
2404 | if (ptr) { | |
2405 | ptr++; | |
2406 | } | |
2407 | } | |
2408 | ||
2409 | for (i = 1; i < table_size && ptr; i++) { | |
2410 | if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) { | |
2411 | index_found = i; | |
2412 | break; | |
2413 | } | |
2414 | } | |
2415 | ||
2416 | DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found)); | |
2417 | ||
2418 | return &table[index_found]; | |
2419 | } | |
2420 | ||
2421 | static int | |
2422 | dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype, | |
2423 | unsigned char *vid, int *vid_length) | |
2424 | { | |
2425 | int boardtype_backplane_addr[] = { | |
2426 | 0x18010324, /* OTP Control 1 */ | |
2427 | 0x18012618, /* PMU min resource mask */ | |
2428 | }; | |
2429 | int boardtype_backplane_data[] = { | |
2430 | 0x00fa0000, | |
2431 | 0x0e4fffff /* Keep on ARMHTAVAIL */ | |
2432 | }; | |
2433 | int int_val = 0, i = 0; | |
2434 | cis_tuple_format_t *tuple; | |
2435 | int totlen, len; | |
2436 | uint32 raw_data[CIS_TUPLE_MAX_COUNT]; | |
2437 | ||
2438 | for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) { | |
2439 | /* Write new OTP and PMU configuration */ | |
2440 | if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int), | |
2441 | &boardtype_backplane_data[i], FALSE) != BCME_OK) { | |
2442 | DHD_ERROR(("invalid size/addr combination\n")); | |
2443 | return BCME_ERROR; | |
2444 | } | |
2445 | ||
2446 | if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int), | |
2447 | &int_val, TRUE) != BCME_OK) { | |
2448 | DHD_ERROR(("invalid size/addr combination\n")); | |
2449 | return BCME_ERROR; | |
2450 | } | |
2451 | ||
2452 | DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n", | |
2453 | __FUNCTION__, boardtype_backplane_addr[i], int_val)); | |
2454 | } | |
2455 | ||
2456 | /* read tuple raw data */ | |
2457 | for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) { | |
2458 | if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32), | |
2459 | sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) { | |
2460 | break; | |
2461 | } | |
2462 | } | |
2463 | ||
2464 | totlen = i * sizeof(uint32); | |
2465 | tuple = (cis_tuple_format_t *)raw_data; | |
2466 | ||
2467 | /* check the first tuple has tag 'start' */ | |
2468 | if (tuple->id != CIS_TUPLE_TAG_START) { | |
2469 | return BCME_ERROR; | |
2470 | } | |
2471 | ||
2472 | *vid_length = *boardtype = 0; | |
2473 | ||
2474 | /* find tagged parameter */ | |
2475 | while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) && | |
2476 | (*vid_length == 0 || *boardtype == 0)) { | |
2477 | len = tuple->len; | |
2478 | ||
2479 | if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) && | |
2480 | (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { | |
2481 | /* found VID */ | |
2482 | memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); | |
2483 | *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH; | |
2484 | prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); | |
2485 | } | |
2486 | else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) && | |
2487 | (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) { | |
2488 | /* found boardtype */ | |
2489 | *boardtype = (int)tuple->data[0]; | |
2490 | prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH); | |
2491 | } | |
2492 | ||
2493 | tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN)); | |
2494 | totlen -= (len + CIS_TUPLE_HDR_LEN); | |
2495 | } | |
2496 | ||
2497 | if (*vid_length <= 0 || *boardtype <= 0) { | |
2498 | DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n", | |
2499 | *vid_length, *boardtype)); | |
2500 | return BCME_ERROR; | |
2501 | } | |
2502 | ||
2503 | return BCME_OK; | |
2504 | ||
2505 | } | |
2506 | ||
2507 | static naming_info_t * | |
2508 | dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size, | |
2509 | dhd_bus_t *bus, bool *is_murata_fem) | |
2510 | { | |
2511 | int board_type = 0, chip_rev = 0, vid_length = 0; | |
2512 | unsigned char vid[MAX_VID_LEN]; | |
2513 | naming_info_t *info = &table[0]; | |
2514 | char *cid_info = NULL; | |
2515 | ||
2516 | if (!bus || !bus->sih) { | |
2517 | DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus)); | |
2518 | return NULL; | |
2519 | } | |
2520 | chip_rev = bus->sih->chiprev; | |
2521 | ||
2522 | if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length) | |
2523 | != BCME_OK) { | |
2524 | DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__)); | |
2525 | return NULL; | |
2526 | } | |
2527 | ||
2528 | DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev)); | |
2529 | ||
2530 | #if defined(BCM4361_CHIP) | |
2531 | /* A0 chipset has exception only */ | |
2532 | if (chip_rev == CHIP_REV_A0) { | |
2533 | if (board_type == BOARD_TYPE_EPA) { | |
2534 | info = dhd_find_naming_info(table, table_size, | |
2535 | DEFAULT_CIDINFO_FOR_EPA); | |
2536 | } else if ((board_type == BOARD_TYPE_IPA) || | |
2537 | (board_type == BOARD_TYPE_IPA_OLD)) { | |
2538 | info = dhd_find_naming_info(table, table_size, | |
2539 | DEFAULT_CIDINFO_FOR_IPA); | |
2540 | } | |
2541 | } else { | |
2542 | cid_info = dhd_get_cid_info(vid, vid_length); | |
2543 | if (cid_info) { | |
2544 | info = dhd_find_naming_info_by_cid(table, table_size, cid_info); | |
2545 | if (strstr(cid_info, CID_FEM_MURATA)) { | |
2546 | *is_murata_fem = TRUE; | |
2547 | } | |
2548 | } | |
2549 | } | |
2550 | #else | |
2551 | cid_info = dhd_get_cid_info(vid, vid_length); | |
2552 | if (cid_info) { | |
2553 | info = dhd_find_naming_info_by_cid(table, table_size, cid_info); | |
2554 | if (strstr(cid_info, CID_FEM_MURATA)) { | |
2555 | *is_murata_fem = TRUE; | |
2556 | } | |
2557 | } | |
2558 | #endif /* BCM4361_CHIP */ | |
2559 | ||
2560 | return info; | |
2561 | } | |
2562 | #endif /* USE_CID_CHECK */ | |
2563 | ||
2564 | static int | |
2565 | concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path) | |
2566 | { | |
2567 | int ret = BCME_OK; | |
2568 | #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK) | |
2569 | char module_type[MAX_VNAME_LEN]; | |
2570 | naming_info_t *info = NULL; | |
2571 | bool is_murata_fem = FALSE; | |
2572 | ||
2573 | memset(module_type, 0, sizeof(module_type)); | |
2574 | ||
2575 | if (dhd_check_module_bcm(module_type, | |
2576 | MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) { | |
2577 | info = dhd_find_naming_info(bcm4361_naming_table, | |
2578 | ARRAYSIZE(bcm4361_naming_table), module_type); | |
2579 | } else { | |
2580 | /* in case of .cid.info doesn't exists */ | |
2581 | info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table, | |
2582 | ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem); | |
2583 | } | |
2584 | ||
2585 | if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) { | |
2586 | is_murata_fem = FALSE; | |
2587 | } | |
2588 | ||
2589 | if (info) { | |
2590 | if (is_murata_fem) { | |
2591 | strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA)); | |
2592 | } | |
2593 | strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext)); | |
2594 | strncat(fw_path, info->fw_ext, strlen(info->fw_ext)); | |
2595 | } else { | |
2596 | DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__)); | |
2597 | ret = BCME_ERROR; | |
2598 | } | |
2599 | #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */ | |
2600 | char chipver_tag[10] = {0, }; | |
2601 | ||
2602 | strcat(fw_path, chipver_tag); | |
2603 | strcat(nv_path, chipver_tag); | |
2604 | #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */ | |
2605 | ||
2606 | return ret; | |
2607 | } | |
2608 | ||
2609 | static int | |
2610 | concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path) | |
2611 | { | |
2612 | int ret = BCME_OK; | |
2613 | #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK) | |
2614 | char module_type[MAX_VNAME_LEN]; | |
2615 | naming_info_t *info = NULL; | |
2616 | bool is_murata_fem = FALSE; | |
2617 | ||
2618 | memset(module_type, 0, sizeof(module_type)); | |
2619 | ||
2620 | if (dhd_check_module_bcm(module_type, | |
2621 | MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) { | |
2622 | info = dhd_find_naming_info(bcm4375_naming_table, | |
2623 | ARRAYSIZE(bcm4375_naming_table), module_type); | |
2624 | } else { | |
2625 | /* in case of .cid.info doesn't exists */ | |
2626 | info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table, | |
2627 | ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem); | |
2628 | } | |
2629 | ||
2630 | if (info) { | |
2631 | strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext)); | |
2632 | strncat(fw_path, info->fw_ext, strlen(info->fw_ext)); | |
2633 | } else { | |
2634 | DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__)); | |
2635 | ret = BCME_ERROR; | |
2636 | } | |
2637 | #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */ | |
2638 | char chipver_tag[10] = {0, }; | |
2639 | ||
2640 | strcat(fw_path, chipver_tag); | |
2641 | strcat(nv_path, chipver_tag); | |
2642 | #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */ | |
2643 | ||
2644 | return ret; | |
2645 | } | |
2646 | ||
2647 | int | |
2648 | concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path) | |
2649 | { | |
2650 | int res = 0; | |
2651 | ||
2652 | if (!bus || !bus->sih) { | |
2653 | DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__)); | |
2654 | return -1; | |
2655 | } | |
2656 | ||
2657 | if (!fw_path || !nv_path) { | |
2658 | DHD_ERROR(("fw_path or nv_path is null.\n")); | |
2659 | return res; | |
2660 | } | |
2661 | ||
2662 | switch (si_chipid(bus->sih)) { | |
2663 | ||
2664 | case BCM43569_CHIP_ID: | |
2665 | case BCM4358_CHIP_ID: | |
2666 | res = concate_revision_bcm4358(bus, fw_path, nv_path); | |
2667 | break; | |
2668 | case BCM4355_CHIP_ID: | |
2669 | case BCM4359_CHIP_ID: | |
2670 | res = concate_revision_bcm4359(bus, fw_path, nv_path); | |
2671 | break; | |
2672 | case BCM4361_CHIP_ID: | |
2673 | case BCM4347_CHIP_ID: | |
2674 | res = concate_revision_bcm4361(bus, fw_path, nv_path); | |
2675 | break; | |
2676 | case BCM4375_CHIP_ID: | |
2677 | res = concate_revision_bcm4375(bus, fw_path, nv_path); | |
2678 | break; | |
2679 | default: | |
2680 | DHD_ERROR(("REVISION SPECIFIC feature is not required\n")); | |
2681 | return res; | |
2682 | } | |
2683 | ||
2684 | return res; | |
2685 | } | |
2686 | #endif /* SUPPORT_MULTIPLE_REVISION */ | |
2687 | ||
2688 | uint16 | |
2689 | dhd_get_chipid(dhd_pub_t *dhd) | |
2690 | { | |
2691 | dhd_bus_t *bus = dhd->bus; | |
2692 | ||
2693 | if (bus && bus->sih) | |
2694 | return (uint16)si_chipid(bus->sih); | |
2695 | else | |
2696 | return 0; | |
2697 | } | |
2698 | ||
2699 | /** | |
2700 | * Loads firmware given by caller supplied path and nvram image into PCIe dongle. | |
2701 | * | |
2702 | * BCM_REQUEST_FW specific : | |
2703 | * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing | |
2704 | * firmware and nvm for that chip. If the download fails, retries download with a different nvm file | |
2705 | * | |
2706 | * BCMEMBEDIMAGE specific: | |
2707 | * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header | |
2708 | * file will be used instead. | |
2709 | * | |
2710 | * @return BCME_OK on success | |
2711 | */ | |
2712 | int | |
2713 | dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, | |
2714 | char *pfw_path, char *pnv_path, | |
2715 | char *pclm_path, char *pconf_path) | |
2716 | { | |
2717 | int ret; | |
2718 | ||
2719 | bus->fw_path = pfw_path; | |
2720 | bus->nv_path = pnv_path; | |
2721 | bus->dhd->clm_path = pclm_path; | |
2722 | bus->dhd->conf_path = pconf_path; | |
2723 | ||
2724 | #if defined(SUPPORT_MULTIPLE_REVISION) | |
2725 | if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) { | |
2726 | DHD_ERROR(("%s: fail to concatnate revison \n", | |
2727 | __FUNCTION__)); | |
2728 | return BCME_BADARG; | |
2729 | } | |
2730 | #endif /* SUPPORT_MULTIPLE_REVISION */ | |
2731 | ||
2732 | #if defined(DHD_BLOB_EXISTENCE_CHECK) | |
2733 | dhd_set_blob_support(bus->dhd, bus->fw_path); | |
2734 | #endif /* DHD_BLOB_EXISTENCE_CHECK */ | |
2735 | ||
2736 | DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", | |
2737 | __FUNCTION__, bus->fw_path, bus->nv_path)); | |
2738 | dhdpcie_dump_resource(bus); | |
2739 | ||
2740 | ret = dhdpcie_download_firmware(bus, osh); | |
2741 | ||
2742 | return ret; | |
2743 | } | |
2744 | ||
2745 | void | |
2746 | dhd_set_bus_params(struct dhd_bus *bus) | |
2747 | { | |
2748 | if (bus->dhd->conf->dhd_poll >= 0) { | |
2749 | bus->poll = bus->dhd->conf->dhd_poll; | |
2750 | if (!bus->pollrate) | |
2751 | bus->pollrate = 1; | |
2752 | printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll); | |
2753 | } | |
2754 | } | |
2755 | ||
2756 | /** | |
2757 | * Loads firmware given by 'bus->fw_path' into PCIe dongle. | |
2758 | * | |
2759 | * BCM_REQUEST_FW specific : | |
2760 | * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing | |
2761 | * firmware and nvm for that chip. If the download fails, retries download with a different nvm file | |
2762 | * | |
2763 | * BCMEMBEDIMAGE specific: | |
2764 | * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header | |
2765 | * file will be used instead. | |
2766 | * | |
2767 | * @return BCME_OK on success | |
2768 | */ | |
2769 | static int | |
2770 | dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) | |
2771 | { | |
2772 | int ret = 0; | |
2773 | #if defined(BCM_REQUEST_FW) | |
2774 | uint chipid = bus->sih->chip; | |
2775 | uint revid = bus->sih->chiprev; | |
2776 | char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */ | |
2777 | char nv_path[64]; /* path to nvram vars file */ | |
2778 | bus->fw_path = fw_path; | |
2779 | bus->nv_path = nv_path; | |
2780 | switch (chipid) { | |
2781 | case BCM43570_CHIP_ID: | |
2782 | bcmstrncat(fw_path, "43570", 5); | |
2783 | switch (revid) { | |
2784 | case 0: | |
2785 | bcmstrncat(fw_path, "a0", 2); | |
2786 | break; | |
2787 | case 2: | |
2788 | bcmstrncat(fw_path, "a2", 2); | |
2789 | break; | |
2790 | default: | |
2791 | DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__, | |
2792 | revid)); | |
2793 | break; | |
2794 | } | |
2795 | break; | |
2796 | default: | |
2797 | DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, | |
2798 | chipid)); | |
2799 | return 0; | |
2800 | } | |
2801 | /* load board specific nvram file */ | |
2802 | snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path); | |
2803 | /* load firmware */ | |
2804 | snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path); | |
2805 | #endif /* BCM_REQUEST_FW */ | |
2806 | ||
2807 | DHD_OS_WAKE_LOCK(bus->dhd); | |
2808 | ||
2809 | dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path); | |
2810 | dhd_set_bus_params(bus); | |
2811 | ||
2812 | ret = _dhdpcie_download_firmware(bus); | |
2813 | ||
2814 | DHD_OS_WAKE_UNLOCK(bus->dhd); | |
2815 | return ret; | |
2816 | } /* dhdpcie_download_firmware */ | |
2817 | ||
2818 | #define DHD_MEMORY_SET_PATTERN 0xAA | |
2819 | ||
2820 | /** | |
2821 | * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD | |
2822 | * is updated with the event logging partitions within that file as well. | |
2823 | * | |
2824 | * @param pfw_path Path to .bin or .bea file | |
2825 | */ | |
2826 | static int | |
2827 | dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) | |
2828 | { | |
2829 | int bcmerror = BCME_ERROR; | |
2830 | int offset = 0; | |
2831 | int len = 0; | |
2832 | bool store_reset; | |
2833 | char *imgbuf = NULL; | |
2834 | uint8 *memblock = NULL, *memptr = NULL; | |
2835 | uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct | |
2836 | int offset_end = bus->ramsize; | |
2837 | uint32 file_size = 0, read_len = 0; | |
2838 | ||
2839 | #if defined(DHD_FW_MEM_CORRUPTION) | |
2840 | if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { | |
2841 | dhd_tcm_test_enable = TRUE; | |
2842 | } else { | |
2843 | dhd_tcm_test_enable = FALSE; | |
2844 | } | |
2845 | #endif /* DHD_FW_MEM_CORRUPTION */ | |
2846 | DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable)); | |
2847 | /* TCM check */ | |
2848 | if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) { | |
2849 | DHD_ERROR(("dhd_bus_tcm_test failed\n")); | |
2850 | bcmerror = BCME_ERROR; | |
2851 | goto err; | |
2852 | } | |
2853 | DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); | |
2854 | ||
2855 | /* Should succeed in opening image if it is actually given through registry | |
2856 | * entry or in module param. | |
2857 | */ | |
2858 | imgbuf = dhd_os_open_image1(bus->dhd, pfw_path); | |
2859 | if (imgbuf == NULL) { | |
2860 | printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path); | |
2861 | goto err; | |
2862 | } | |
2863 | ||
2864 | file_size = dhd_os_get_image_size(imgbuf); | |
2865 | if (!file_size) { | |
2866 | DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__)); | |
2867 | goto err; | |
2868 | } | |
2869 | ||
2870 | memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); | |
2871 | if (memblock == NULL) { | |
2872 | DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); | |
2873 | bcmerror = BCME_NOMEM; | |
2874 | goto err; | |
2875 | } | |
2876 | if (dhd_msg_level & DHD_TRACE_VAL) { | |
2877 | memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); | |
2878 | if (memptr_tmp == NULL) { | |
2879 | DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); | |
2880 | goto err; | |
2881 | } | |
2882 | } | |
2883 | if ((uint32)(uintptr)memblock % DHD_SDALIGN) { | |
2884 | memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); | |
2885 | } | |
2886 | ||
2887 | /* check if CR4/CA7 */ | |
2888 | store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || | |
2889 | si_setcore(bus->sih, ARMCA7_CORE_ID, 0)); | |
2890 | /* Download image with MEMBLOCK size */ | |
2891 | while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) { | |
2892 | if (len < 0) { | |
2893 | DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); | |
2894 | bcmerror = BCME_ERROR; | |
2895 | goto err; | |
2896 | } | |
2897 | read_len += len; | |
2898 | if (read_len > file_size) { | |
2899 | DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;" | |
2900 | " file_size=%u truncating len to %d \n", __FUNCTION__, | |
2901 | len, read_len, file_size, (len - (read_len - file_size)))); | |
2902 | len -= (read_len - file_size); | |
2903 | } | |
2904 | ||
2905 | /* if address is 0, store the reset instruction to be written in 0 */ | |
2906 | if (store_reset) { | |
2907 | ASSERT(offset == 0); | |
2908 | bus->resetinstr = *(((uint32*)memptr)); | |
2909 | /* Add start of RAM address to the address given by user */ | |
2910 | offset += bus->dongle_ram_base; | |
2911 | offset_end += offset; | |
2912 | store_reset = FALSE; | |
2913 | } | |
2914 | ||
2915 | bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); | |
2916 | if (bcmerror) { | |
2917 | DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", | |
2918 | __FUNCTION__, bcmerror, MEMBLOCK, offset)); | |
2919 | goto err; | |
2920 | } | |
2921 | ||
2922 | if (dhd_msg_level & DHD_TRACE_VAL) { | |
2923 | bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len); | |
2924 | if (bcmerror) { | |
2925 | DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", | |
2926 | __FUNCTION__, bcmerror, MEMBLOCK, offset)); | |
2927 | goto err; | |
2928 | } | |
2929 | if (memcmp(memptr_tmp, memptr, len)) { | |
2930 | DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__)); | |
2931 | goto err; | |
2932 | } else | |
2933 | DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); | |
2934 | } | |
2935 | offset += MEMBLOCK; | |
2936 | ||
2937 | if (offset >= offset_end) { | |
2938 | DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", | |
2939 | __FUNCTION__, offset, offset_end)); | |
2940 | bcmerror = BCME_ERROR; | |
2941 | goto err; | |
2942 | } | |
2943 | ||
2944 | if (read_len >= file_size) { | |
2945 | break; | |
2946 | } | |
2947 | } | |
2948 | err: | |
2949 | if (memblock) { | |
2950 | MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); | |
2951 | if (dhd_msg_level & DHD_TRACE_VAL) { | |
2952 | if (memptr_tmp) | |
2953 | MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); | |
2954 | } | |
2955 | } | |
2956 | ||
2957 | if (imgbuf) { | |
2958 | dhd_os_close_image1(bus->dhd, imgbuf); | |
2959 | } | |
2960 | ||
2961 | return bcmerror; | |
2962 | } /* dhdpcie_download_code_file */ | |
2963 | ||
2964 | static int | |
2965 | dhdpcie_download_nvram(struct dhd_bus *bus) | |
2966 | { | |
2967 | int bcmerror = BCME_ERROR; | |
2968 | uint len; | |
2969 | char * memblock = NULL; | |
2970 | char *bufp; | |
2971 | char *pnv_path; | |
2972 | bool nvram_file_exists; | |
2973 | bool nvram_uefi_exists = FALSE; | |
2974 | bool local_alloc = FALSE; | |
2975 | pnv_path = bus->nv_path; | |
2976 | ||
2977 | nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); | |
2978 | ||
2979 | /* First try UEFI */ | |
2980 | len = MAX_NVRAMBUF_SIZE; | |
2981 | dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len); | |
2982 | ||
2983 | /* If UEFI empty, then read from file system */ | |
2984 | if ((len <= 0) || (memblock == NULL)) { | |
2985 | ||
2986 | if (nvram_file_exists) { | |
2987 | len = MAX_NVRAMBUF_SIZE; | |
2988 | dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len); | |
2989 | if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { | |
2990 | goto err; | |
2991 | } | |
2992 | } | |
2993 | else { | |
2994 | /* For SROM OTP no external file or UEFI required */ | |
2995 | bcmerror = BCME_OK; | |
2996 | } | |
2997 | } else { | |
2998 | nvram_uefi_exists = TRUE; | |
2999 | } | |
3000 | ||
3001 | DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); | |
3002 | ||
3003 | if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) { | |
3004 | bufp = (char *) memblock; | |
3005 | ||
3006 | { | |
3007 | bufp[len] = 0; | |
3008 | if (nvram_uefi_exists || nvram_file_exists) { | |
3009 | len = process_nvram_vars(bufp, len); | |
3010 | } | |
3011 | } | |
3012 | ||
3013 | DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len)); | |
3014 | ||
3015 | if (len % 4) { | |
3016 | len += 4 - (len % 4); | |
3017 | } | |
3018 | bufp += len; | |
3019 | *bufp++ = 0; | |
3020 | if (len) | |
3021 | bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1); | |
3022 | if (bcmerror) { | |
3023 | DHD_ERROR(("%s: error downloading vars: %d\n", | |
3024 | __FUNCTION__, bcmerror)); | |
3025 | } | |
3026 | } | |
3027 | ||
3028 | err: | |
3029 | if (memblock) { | |
3030 | if (local_alloc) { | |
3031 | MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); | |
3032 | } else { | |
3033 | dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE); | |
3034 | } | |
3035 | } | |
3036 | ||
3037 | return bcmerror; | |
3038 | } | |
3039 | ||
3040 | static int | |
3041 | dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len) | |
3042 | { | |
3043 | int bcmerror = BCME_ERROR; | |
3044 | char *imgbuf = NULL; | |
3045 | ||
3046 | if (buf == NULL || len == 0) | |
3047 | goto err; | |
3048 | ||
3049 | /* External image takes precedence if specified */ | |
3050 | if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { | |
3051 | // opens and seeks to correct file offset: | |
3052 | imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path); | |
3053 | if (imgbuf == NULL) { | |
3054 | DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__)); | |
3055 | goto err; | |
3056 | } | |
3057 | ||
3058 | /* Read it */ | |
3059 | if (len != dhd_os_get_image_block(buf, len, imgbuf)) { | |
3060 | DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len)); | |
3061 | goto err; | |
3062 | } | |
3063 | ||
3064 | bcmerror = BCME_OK; | |
3065 | } | |
3066 | ||
3067 | err: | |
3068 | if (imgbuf) | |
3069 | dhd_os_close_image1(bus->dhd, imgbuf); | |
3070 | ||
3071 | return bcmerror; | |
3072 | } | |
3073 | ||
3074 | /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem | |
3075 | * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation. | |
3076 | * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well. | |
3077 | */ | |
3078 | static void | |
3079 | dhdpcie_ramsize_adj(struct dhd_bus *bus) | |
3080 | { | |
3081 | int i, search_len = 0; | |
3082 | uint8 *memptr = NULL; | |
3083 | uint8 *ramsizeptr = NULL; | |
3084 | uint ramsizelen; | |
3085 | uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST}; | |
3086 | hnd_ramsize_ptr_t ramsize_info; | |
3087 | ||
3088 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
3089 | ||
3090 | /* Adjust dongle RAMSIZE already called. */ | |
3091 | if (bus->ramsize_adjusted) { | |
3092 | return; | |
3093 | } | |
3094 | ||
3095 | /* success or failure, we don't want to be here | |
3096 | * more than once. | |
3097 | */ | |
3098 | bus->ramsize_adjusted = TRUE; | |
3099 | ||
3100 | /* Not handle if user restrict dongle ram size enabled */ | |
3101 | if (dhd_dongle_memsize) { | |
3102 | DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__, | |
3103 | dhd_dongle_memsize)); | |
3104 | return; | |
3105 | } | |
3106 | ||
3107 | /* Out immediately if no image to download */ | |
3108 | if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { | |
3109 | DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); | |
3110 | return; | |
3111 | } | |
3112 | ||
3113 | /* Get maximum RAMSIZE info search length */ | |
3114 | for (i = 0; ; i++) { | |
3115 | if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) | |
3116 | break; | |
3117 | ||
3118 | if (search_len < (int)ramsize_ptr_ptr[i]) | |
3119 | search_len = (int)ramsize_ptr_ptr[i]; | |
3120 | } | |
3121 | ||
3122 | if (!search_len) | |
3123 | return; | |
3124 | ||
3125 | search_len += sizeof(hnd_ramsize_ptr_t); | |
3126 | ||
3127 | memptr = MALLOC(bus->dhd->osh, search_len); | |
3128 | if (memptr == NULL) { | |
3129 | DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len)); | |
3130 | return; | |
3131 | } | |
3132 | ||
3133 | /* External image takes precedence if specified */ | |
3134 | if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) { | |
3135 | goto err; | |
3136 | } | |
3137 | else { | |
3138 | ramsizeptr = memptr; | |
3139 | ramsizelen = search_len; | |
3140 | } | |
3141 | ||
3142 | if (ramsizeptr) { | |
3143 | /* Check Magic */ | |
3144 | for (i = 0; ; i++) { | |
3145 | if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) | |
3146 | break; | |
3147 | ||
3148 | if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen) | |
3149 | continue; | |
3150 | ||
3151 | memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i], | |
3152 | sizeof(hnd_ramsize_ptr_t)); | |
3153 | ||
3154 | if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) { | |
3155 | bus->orig_ramsize = LTOH32(ramsize_info.ram_size); | |
3156 | bus->ramsize = LTOH32(ramsize_info.ram_size); | |
3157 | DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__, | |
3158 | bus->ramsize)); | |
3159 | break; | |
3160 | } | |
3161 | } | |
3162 | } | |
3163 | ||
3164 | err: | |
3165 | if (memptr) | |
3166 | MFREE(bus->dhd->osh, memptr, search_len); | |
3167 | ||
3168 | return; | |
3169 | } /* dhdpcie_ramsize_adj */ | |
3170 | ||
3171 | /** | |
3172 | * Downloads firmware file given by 'bus->fw_path' into PCIe dongle | |
3173 | * | |
3174 | * BCMEMBEDIMAGE specific: | |
3175 | * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header | |
3176 | * file will be used instead. | |
3177 | * | |
3178 | */ | |
3179 | static int | |
3180 | _dhdpcie_download_firmware(struct dhd_bus *bus) | |
3181 | { | |
3182 | int bcmerror = -1; | |
3183 | ||
3184 | bool embed = FALSE; /* download embedded firmware */ | |
3185 | bool dlok = FALSE; /* download firmware succeeded */ | |
3186 | ||
3187 | /* Out immediately if no image to download */ | |
3188 | if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { | |
3189 | DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); | |
3190 | return 0; | |
3191 | } | |
3192 | /* Adjust ram size */ | |
3193 | dhdpcie_ramsize_adj(bus); | |
3194 | ||
3195 | /* Keep arm in reset */ | |
3196 | if (dhdpcie_bus_download_state(bus, TRUE)) { | |
3197 | DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); | |
3198 | goto err; | |
3199 | } | |
3200 | ||
3201 | /* External image takes precedence if specified */ | |
3202 | if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { | |
3203 | if (dhdpcie_download_code_file(bus, bus->fw_path)) { | |
3204 | DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__, | |
3205 | __LINE__)); | |
3206 | goto err; | |
3207 | } else { | |
3208 | embed = FALSE; | |
3209 | dlok = TRUE; | |
3210 | } | |
3211 | } | |
3212 | ||
3213 | BCM_REFERENCE(embed); | |
3214 | if (!dlok) { | |
3215 | DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__)); | |
3216 | goto err; | |
3217 | } | |
3218 | ||
3219 | /* EXAMPLE: nvram_array */ | |
3220 | /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ | |
3221 | /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ | |
3222 | ||
3223 | /* External nvram takes precedence if specified */ | |
3224 | if (dhdpcie_download_nvram(bus)) { | |
3225 | DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__)); | |
3226 | goto err; | |
3227 | } | |
3228 | ||
3229 | /* Take arm out of reset */ | |
3230 | if (dhdpcie_bus_download_state(bus, FALSE)) { | |
3231 | DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); | |
3232 | goto err; | |
3233 | } | |
3234 | ||
3235 | bcmerror = 0; | |
3236 | ||
3237 | err: | |
3238 | return bcmerror; | |
3239 | } /* _dhdpcie_download_firmware */ | |
3240 | ||
3241 | static int | |
3242 | dhdpcie_bus_readconsole(dhd_bus_t *bus) | |
3243 | { | |
3244 | dhd_console_t *c = &bus->console; | |
3245 | uint8 line[CONSOLE_LINE_MAX], ch; | |
3246 | uint32 n, idx, addr; | |
3247 | int rv; | |
3248 | uint readlen = 0; | |
3249 | uint i = 0; | |
3250 | ||
3251 | /* Don't do anything until FWREADY updates console address */ | |
3252 | if (bus->console_addr == 0) | |
3253 | return -1; | |
3254 | ||
3255 | /* Read console log struct */ | |
3256 | addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); | |
3257 | ||
3258 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) | |
3259 | return rv; | |
3260 | ||
3261 | /* Allocate console buffer (one time only) */ | |
3262 | if (c->buf == NULL) { | |
3263 | c->bufsize = ltoh32(c->log.buf_size); | |
3264 | if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) | |
3265 | return BCME_NOMEM; | |
3266 | DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize)); | |
3267 | } | |
3268 | idx = ltoh32(c->log.idx); | |
3269 | ||
3270 | /* Protect against corrupt value */ | |
3271 | if (idx > c->bufsize) | |
3272 | return BCME_ERROR; | |
3273 | ||
3274 | /* Skip reading the console buffer if the index pointer has not moved */ | |
3275 | if (idx == c->last) | |
3276 | return BCME_OK; | |
3277 | ||
3278 | DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf, | |
3279 | idx, c->last)); | |
3280 | ||
3281 | /* Read the console buffer data to a local buffer */ | |
3282 | /* optimize and read only the portion of the buffer needed, but | |
3283 | * important to handle wrap-around. | |
3284 | */ | |
3285 | addr = ltoh32(c->log.buf); | |
3286 | ||
3287 | /* wrap around case - write ptr < read ptr */ | |
3288 | if (idx < c->last) { | |
3289 | /* from read ptr to end of buffer */ | |
3290 | readlen = c->bufsize - c->last; | |
3291 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, | |
3292 | addr + c->last, c->buf, readlen)) < 0) { | |
3293 | DHD_ERROR(("conlog: read error[1] ! \n")); | |
3294 | return rv; | |
3295 | } | |
3296 | /* from beginning of buffer to write ptr */ | |
3297 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, | |
3298 | addr, c->buf + readlen, | |
3299 | idx)) < 0) { | |
3300 | DHD_ERROR(("conlog: read error[2] ! \n")); | |
3301 | return rv; | |
3302 | } | |
3303 | readlen += idx; | |
3304 | } else { | |
3305 | /* non-wraparound case, write ptr > read ptr */ | |
3306 | readlen = (uint)idx - c->last; | |
3307 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, | |
3308 | addr + c->last, c->buf, readlen)) < 0) { | |
3309 | DHD_ERROR(("conlog: read error[3] ! \n")); | |
3310 | return rv; | |
3311 | } | |
3312 | } | |
3313 | /* update read ptr */ | |
3314 | c->last = idx; | |
3315 | ||
3316 | /* now output the read data from the local buffer to the host console */ | |
3317 | while (i < readlen) { | |
3318 | for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) { | |
3319 | ch = c->buf[i]; | |
3320 | ++i; | |
3321 | if (ch == '\n') | |
3322 | break; | |
3323 | line[n] = ch; | |
3324 | } | |
3325 | ||
3326 | if (n > 0) { | |
3327 | if (line[n - 1] == '\r') | |
3328 | n--; | |
3329 | line[n] = 0; | |
3330 | DHD_FWLOG(("CONSOLE: %s\n", line)); | |
3331 | } | |
3332 | } | |
3333 | ||
3334 | return BCME_OK; | |
3335 | ||
3336 | } /* dhdpcie_bus_readconsole */ | |
3337 | ||
3338 | void | |
3339 | dhd_bus_dump_console_buffer(dhd_bus_t *bus) | |
3340 | { | |
3341 | uint32 n, i; | |
3342 | uint32 addr; | |
3343 | char *console_buffer = NULL; | |
3344 | uint32 console_ptr, console_size, console_index; | |
3345 | uint8 line[CONSOLE_LINE_MAX], ch; | |
3346 | int rv; | |
3347 | ||
3348 | DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__)); | |
3349 | ||
3350 | if (bus->is_linkdown) { | |
3351 | DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__)); | |
3352 | return; | |
3353 | } | |
3354 | ||
3355 | addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); | |
3356 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, | |
3357 | (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { | |
3358 | goto exit; | |
3359 | } | |
3360 | ||
3361 | addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); | |
3362 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, | |
3363 | (uint8 *)&console_size, sizeof(console_size))) < 0) { | |
3364 | goto exit; | |
3365 | } | |
3366 | ||
3367 | addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); | |
3368 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, | |
3369 | (uint8 *)&console_index, sizeof(console_index))) < 0) { | |
3370 | goto exit; | |
3371 | } | |
3372 | ||
3373 | console_ptr = ltoh32(console_ptr); | |
3374 | console_size = ltoh32(console_size); | |
3375 | console_index = ltoh32(console_index); | |
3376 | ||
3377 | if (console_size > CONSOLE_BUFFER_MAX || | |
3378 | !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { | |
3379 | goto exit; | |
3380 | } | |
3381 | ||
3382 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, | |
3383 | (uint8 *)console_buffer, console_size)) < 0) { | |
3384 | goto exit; | |
3385 | } | |
3386 | ||
3387 | for (i = 0, n = 0; i < console_size; i += n + 1) { | |
3388 | for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { | |
3389 | ch = console_buffer[(console_index + i + n) % console_size]; | |
3390 | if (ch == '\n') | |
3391 | break; | |
3392 | line[n] = ch; | |
3393 | } | |
3394 | ||
3395 | if (n > 0) { | |
3396 | if (line[n - 1] == '\r') | |
3397 | n--; | |
3398 | line[n] = 0; | |
3399 | /* Don't use DHD_ERROR macro since we print | |
3400 | * a lot of information quickly. The macro | |
3401 | * will truncate a lot of the printfs | |
3402 | */ | |
3403 | ||
3404 | DHD_FWLOG(("CONSOLE: %s\n", line)); | |
3405 | } | |
3406 | } | |
3407 | ||
3408 | exit: | |
3409 | if (console_buffer) | |
3410 | MFREE(bus->dhd->osh, console_buffer, console_size); | |
3411 | return; | |
3412 | } | |
3413 | ||
3414 | /** | |
3415 | * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file. | |
3416 | * | |
3417 | * @return BCME_OK on success | |
3418 | */ | |
3419 | static int | |
3420 | dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) | |
3421 | { | |
3422 | int bcmerror = 0; | |
3423 | uint msize = 512; | |
3424 | char *mbuffer = NULL; | |
3425 | uint maxstrlen = 256; | |
3426 | char *str = NULL; | |
3427 | pciedev_shared_t *local_pciedev_shared = bus->pcie_sh; | |
3428 | struct bcmstrbuf strbuf; | |
3429 | unsigned long flags; | |
3430 | bool dongle_trap_occured = FALSE; | |
3431 | ||
3432 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
3433 | ||
3434 | if (DHD_NOCHECKDIED_ON()) { | |
3435 | return 0; | |
3436 | } | |
3437 | ||
3438 | if (data == NULL) { | |
3439 | /* | |
3440 | * Called after a rx ctrl timeout. "data" is NULL. | |
3441 | * allocate memory to trace the trap or assert. | |
3442 | */ | |
3443 | size = msize; | |
3444 | mbuffer = data = MALLOC(bus->dhd->osh, msize); | |
3445 | ||
3446 | if (mbuffer == NULL) { | |
3447 | DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); | |
3448 | bcmerror = BCME_NOMEM; | |
3449 | goto done2; | |
3450 | } | |
3451 | } | |
3452 | ||
3453 | if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { | |
3454 | DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); | |
3455 | bcmerror = BCME_NOMEM; | |
3456 | goto done2; | |
3457 | } | |
3458 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
3459 | DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd); | |
3460 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
3461 | ||
3462 | if (MULTIBP_ENAB(bus->sih)) { | |
3463 | dhd_bus_pcie_pwr_req(bus); | |
3464 | } | |
3465 | if ((bcmerror = dhdpcie_readshared(bus)) < 0) { | |
3466 | goto done1; | |
3467 | } | |
3468 | ||
3469 | bcm_binit(&strbuf, data, size); | |
3470 | ||
3471 | bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", | |
3472 | local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr); | |
3473 | ||
3474 | if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { | |
3475 | /* NOTE: Misspelled assert is intentional - DO NOT FIX. | |
3476 | * (Avoids conflict with real asserts for programmatic parsing of output.) | |
3477 | */ | |
3478 | bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); | |
3479 | } | |
3480 | ||
3481 | if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) { | |
3482 | /* NOTE: Misspelled assert is intentional - DO NOT FIX. | |
3483 | * (Avoids conflict with real asserts for programmatic parsing of output.) | |
3484 | */ | |
3485 | bcm_bprintf(&strbuf, "No trap%s in dongle", | |
3486 | (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT) | |
3487 | ?"/assrt" :""); | |
3488 | } else { | |
3489 | if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) { | |
3490 | /* Download assert */ | |
3491 | bcm_bprintf(&strbuf, "Dongle assert"); | |
3492 | if (bus->pcie_sh->assert_exp_addr != 0) { | |
3493 | str[0] = '\0'; | |
3494 | if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, | |
3495 | bus->pcie_sh->assert_exp_addr, | |
3496 | (uint8 *)str, maxstrlen)) < 0) { | |
3497 | goto done1; | |
3498 | } | |
3499 | ||
3500 | str[maxstrlen - 1] = '\0'; | |
3501 | bcm_bprintf(&strbuf, " expr \"%s\"", str); | |
3502 | } | |
3503 | ||
3504 | if (bus->pcie_sh->assert_file_addr != 0) { | |
3505 | str[0] = '\0'; | |
3506 | if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, | |
3507 | bus->pcie_sh->assert_file_addr, | |
3508 | (uint8 *)str, maxstrlen)) < 0) { | |
3509 | goto done1; | |
3510 | } | |
3511 | ||
3512 | str[maxstrlen - 1] = '\0'; | |
3513 | bcm_bprintf(&strbuf, " file \"%s\"", str); | |
3514 | } | |
3515 | ||
3516 | bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line); | |
3517 | } | |
3518 | ||
3519 | if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { | |
3520 | trap_t *tr = &bus->dhd->last_trap_info; | |
3521 | dongle_trap_occured = TRUE; | |
3522 | if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, | |
3523 | bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) { | |
3524 | bus->dhd->dongle_trap_occured = TRUE; | |
3525 | goto done1; | |
3526 | } | |
3527 | dhd_bus_dump_trap_info(bus, &strbuf); | |
3528 | } | |
3529 | } | |
3530 | ||
3531 | if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { | |
3532 | DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); | |
3533 | ||
3534 | dhd_bus_dump_console_buffer(bus); | |
3535 | dhd_prot_debug_info_print(bus->dhd); | |
3536 | ||
3537 | #if defined(DHD_FW_COREDUMP) | |
3538 | /* save core dump or write to a file */ | |
3539 | if (bus->dhd->memdump_enabled) { | |
3540 | #ifdef DHD_SSSR_DUMP | |
3541 | bus->dhd->collect_sssr = TRUE; | |
3542 | #endif /* DHD_SSSR_DUMP */ | |
3543 | bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; | |
3544 | dhdpcie_mem_dump(bus); | |
3545 | } | |
3546 | #endif /* DHD_FW_COREDUMP */ | |
3547 | ||
3548 | /* set the trap occured flag only after all the memdump, | |
3549 | * logdump and sssr dump collection has been scheduled | |
3550 | */ | |
3551 | if (dongle_trap_occured) { | |
3552 | bus->dhd->dongle_trap_occured = TRUE; | |
3553 | } | |
3554 | ||
3555 | /* wake up IOCTL wait event */ | |
3556 | dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); | |
3557 | ||
3558 | dhd_schedule_reset(bus->dhd); | |
3559 | ||
3560 | } | |
3561 | ||
3562 | done1: | |
3563 | if (MULTIBP_ENAB(bus->sih)) { | |
3564 | dhd_bus_pcie_pwr_req_clear(bus); | |
3565 | } | |
3566 | ||
3567 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
3568 | DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd); | |
3569 | dhd_os_busbusy_wake(bus->dhd); | |
3570 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
3571 | done2: | |
3572 | if (mbuffer) | |
3573 | MFREE(bus->dhd->osh, mbuffer, msize); | |
3574 | if (str) | |
3575 | MFREE(bus->dhd->osh, str, maxstrlen); | |
3576 | ||
3577 | return bcmerror; | |
3578 | } /* dhdpcie_checkdied */ | |
3579 | ||
3580 | /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */ | |
3581 | void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) | |
3582 | { | |
3583 | int ret = 0; | |
3584 | int size; /* Full mem size */ | |
3585 | int start; /* Start address */ | |
3586 | int read_size = 0; /* Read size of each iteration */ | |
3587 | uint8 *databuf = buf; | |
3588 | ||
3589 | if (bus == NULL) { | |
3590 | return; | |
3591 | } | |
3592 | ||
3593 | start = bus->dongle_ram_base; | |
3594 | read_size = 4; | |
3595 | /* check for dead bus */ | |
3596 | { | |
3597 | uint test_word = 0; | |
3598 | ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size); | |
3599 | /* if read error or bus timeout */ | |
3600 | if (ret || (test_word == 0xFFFFFFFF)) { | |
3601 | return; | |
3602 | } | |
3603 | } | |
3604 | ||
3605 | /* Get full mem size */ | |
3606 | size = bus->ramsize; | |
3607 | /* Read mem content */ | |
3608 | while (size) | |
3609 | { | |
3610 | read_size = MIN(MEMBLOCK, size); | |
3611 | if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { | |
3612 | return; | |
3613 | } | |
3614 | ||
3615 | /* Decrement size and increment start address */ | |
3616 | size -= read_size; | |
3617 | start += read_size; | |
3618 | databuf += read_size; | |
3619 | } | |
3620 | bus->dhd->soc_ram = buf; | |
3621 | bus->dhd->soc_ram_length = bus->ramsize; | |
3622 | return; | |
3623 | } | |
3624 | ||
3625 | #if defined(DHD_FW_COREDUMP) | |
3626 | static int | |
3627 | dhdpcie_get_mem_dump(dhd_bus_t *bus) | |
3628 | { | |
3629 | int ret = BCME_OK; | |
3630 | int size = 0; | |
3631 | int start = 0; | |
3632 | int read_size = 0; /* Read size of each iteration */ | |
3633 | uint8 *p_buf = NULL, *databuf = NULL; | |
3634 | ||
3635 | if (!bus) { | |
3636 | DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); | |
3637 | return BCME_ERROR; | |
3638 | } | |
3639 | ||
3640 | if (!bus->dhd) { | |
3641 | DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); | |
3642 | return BCME_ERROR; | |
3643 | } | |
3644 | ||
3645 | size = bus->ramsize; /* Full mem size */ | |
3646 | start = bus->dongle_ram_base; /* Start address */ | |
3647 | ||
3648 | /* Get full mem size */ | |
3649 | p_buf = dhd_get_fwdump_buf(bus->dhd, size); | |
3650 | if (!p_buf) { | |
3651 | DHD_ERROR(("%s: Out of memory (%d bytes)\n", | |
3652 | __FUNCTION__, size)); | |
3653 | return BCME_ERROR; | |
3654 | } | |
3655 | ||
3656 | /* Read mem content */ | |
3657 | DHD_TRACE_HW4(("Dump dongle memory\n")); | |
3658 | databuf = p_buf; | |
3659 | while (size > 0) { | |
3660 | read_size = MIN(MEMBLOCK, size); | |
3661 | ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size); | |
3662 | if (ret) { | |
3663 | DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); | |
3664 | #ifdef DHD_DEBUG_UART | |
3665 | bus->dhd->memdump_success = FALSE; | |
3666 | #endif /* DHD_DEBUG_UART */ | |
3667 | break; | |
3668 | } | |
3669 | DHD_TRACE((".")); | |
3670 | ||
3671 | /* Decrement size and increment start address */ | |
3672 | size -= read_size; | |
3673 | start += read_size; | |
3674 | databuf += read_size; | |
3675 | } | |
3676 | ||
3677 | return ret; | |
3678 | } | |
3679 | ||
3680 | static int | |
3681 | dhdpcie_mem_dump(dhd_bus_t *bus) | |
3682 | { | |
3683 | dhd_pub_t *dhdp; | |
3684 | int ret; | |
3685 | ||
3686 | #ifdef EXYNOS_PCIE_DEBUG | |
3687 | exynos_pcie_register_dump(1); | |
3688 | #endif /* EXYNOS_PCIE_DEBUG */ | |
3689 | ||
3690 | dhdp = bus->dhd; | |
3691 | if (!dhdp) { | |
3692 | DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); | |
3693 | return BCME_ERROR; | |
3694 | } | |
3695 | ||
3696 | if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { | |
3697 | DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__)); | |
3698 | return BCME_ERROR; | |
3699 | } | |
3700 | ||
3701 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
3702 | if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0) | |
3703 | return BCME_ERROR; | |
3704 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
3705 | ||
3706 | ret = dhdpcie_get_mem_dump(bus); | |
3707 | if (ret) { | |
3708 | DHD_ERROR(("%s: failed to get mem dump, err=%d\n", | |
3709 | __FUNCTION__, ret)); | |
3710 | return ret; | |
3711 | } | |
3712 | ||
3713 | dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); | |
3714 | /* buf, actually soc_ram free handled in dhd_{free,clear} */ | |
3715 | ||
3716 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
3717 | pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); | |
3718 | pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); | |
3719 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
3720 | ||
3721 | return ret; | |
3722 | } | |
3723 | ||
3724 | int | |
3725 | dhd_bus_get_mem_dump(dhd_pub_t *dhdp) | |
3726 | { | |
3727 | if (!dhdp) { | |
3728 | DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); | |
3729 | return BCME_ERROR; | |
3730 | } | |
3731 | ||
3732 | return dhdpcie_get_mem_dump(dhdp->bus); | |
3733 | } | |
3734 | ||
3735 | int | |
3736 | dhd_bus_mem_dump(dhd_pub_t *dhdp) | |
3737 | { | |
3738 | dhd_bus_t *bus = dhdp->bus; | |
3739 | int ret = BCME_ERROR; | |
3740 | ||
3741 | if (dhdp->busstate == DHD_BUS_DOWN) { | |
3742 | DHD_ERROR(("%s bus is down\n", __FUNCTION__)); | |
3743 | return BCME_ERROR; | |
3744 | } | |
3745 | ||
3746 | /* Try to resume if already suspended or suspend in progress */ | |
3747 | ||
3748 | /* Skip if still in suspended or suspend in progress */ | |
3749 | if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) { | |
3750 | DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", | |
3751 | __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); | |
3752 | return BCME_ERROR; | |
3753 | } | |
3754 | ||
3755 | DHD_OS_WAKE_LOCK(dhdp); | |
3756 | ret = dhdpcie_mem_dump(bus); | |
3757 | DHD_OS_WAKE_UNLOCK(dhdp); | |
3758 | return ret; | |
3759 | } | |
3760 | #endif /* DHD_FW_COREDUMP */ | |
3761 | ||
3762 | int | |
3763 | dhd_socram_dump(dhd_bus_t *bus) | |
3764 | { | |
3765 | #if defined(DHD_FW_COREDUMP) | |
3766 | DHD_OS_WAKE_LOCK(bus->dhd); | |
3767 | dhd_bus_mem_dump(bus->dhd); | |
3768 | DHD_OS_WAKE_UNLOCK(bus->dhd); | |
3769 | return 0; | |
3770 | #else | |
3771 | return -1; | |
3772 | #endif // endif | |
3773 | } | |
3774 | ||
3775 | /** | |
3776 | * Transfers bytes from host to dongle using pio mode. | |
3777 | * Parameter 'address' is a backplane address. | |
3778 | */ | |
3779 | static int | |
3780 | dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size) | |
3781 | { | |
3782 | uint dsize; | |
3783 | int detect_endian_flag = 0x01; | |
3784 | bool little_endian; | |
3785 | ||
3786 | if (write && bus->is_linkdown) { | |
3787 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
3788 | return BCME_ERROR; | |
3789 | } | |
3790 | ||
3791 | if (MULTIBP_ENAB(bus->sih)) { | |
3792 | dhd_bus_pcie_pwr_req(bus); | |
3793 | } | |
3794 | /* Detect endianness. */ | |
3795 | little_endian = *(char *)&detect_endian_flag; | |
3796 | ||
3797 | /* In remap mode, adjust address beyond socram and redirect | |
3798 | * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize | |
3799 | * is not backplane accessible | |
3800 | */ | |
3801 | ||
3802 | /* Determine initial transfer parameters */ | |
3803 | #ifdef DHD_SUPPORT_64BIT | |
3804 | dsize = sizeof(uint64); | |
3805 | #else /* !DHD_SUPPORT_64BIT */ | |
3806 | dsize = sizeof(uint32); | |
3807 | #endif /* DHD_SUPPORT_64BIT */ | |
3808 | ||
3809 | /* Do the transfer(s) */ | |
3810 | DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n", | |
3811 | __FUNCTION__, (write ? "write" : "read"), size, address)); | |
3812 | if (write) { | |
3813 | while (size) { | |
3814 | #ifdef DHD_SUPPORT_64BIT | |
3815 | if (size >= sizeof(uint64) && little_endian && !(address % 8)) { | |
3816 | dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data)); | |
3817 | } | |
3818 | #else /* !DHD_SUPPORT_64BIT */ | |
3819 | if (size >= sizeof(uint32) && little_endian && !(address % 4)) { | |
3820 | dhdpcie_bus_wtcm32(bus, address, *((uint32*)data)); | |
3821 | } | |
3822 | #endif /* DHD_SUPPORT_64BIT */ | |
3823 | else { | |
3824 | dsize = sizeof(uint8); | |
3825 | dhdpcie_bus_wtcm8(bus, address, *data); | |
3826 | } | |
3827 | ||
3828 | /* Adjust for next transfer (if any) */ | |
3829 | if ((size -= dsize)) { | |
3830 | data += dsize; | |
3831 | address += dsize; | |
3832 | } | |
3833 | } | |
3834 | } else { | |
3835 | while (size) { | |
3836 | #ifdef DHD_SUPPORT_64BIT | |
3837 | if (size >= sizeof(uint64) && little_endian && !(address % 8)) | |
3838 | { | |
3839 | *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address); | |
3840 | } | |
3841 | #else /* !DHD_SUPPORT_64BIT */ | |
3842 | if (size >= sizeof(uint32) && little_endian && !(address % 4)) | |
3843 | { | |
3844 | *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address); | |
3845 | } | |
3846 | #endif /* DHD_SUPPORT_64BIT */ | |
3847 | else { | |
3848 | dsize = sizeof(uint8); | |
3849 | *data = dhdpcie_bus_rtcm8(bus, address); | |
3850 | } | |
3851 | ||
3852 | /* Adjust for next transfer (if any) */ | |
3853 | if ((size -= dsize) > 0) { | |
3854 | data += dsize; | |
3855 | address += dsize; | |
3856 | } | |
3857 | } | |
3858 | } | |
3859 | if (MULTIBP_ENAB(bus->sih)) { | |
3860 | dhd_bus_pcie_pwr_req_clear(bus); | |
3861 | } | |
3862 | return BCME_OK; | |
3863 | } /* dhdpcie_bus_membytes */ | |
3864 | ||
3865 | /** | |
3866 | * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue | |
3867 | * to the (non flow controlled) flow ring. | |
3868 | */ | |
3869 | int BCMFASTPATH | |
3870 | dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) | |
3871 | { | |
3872 | flow_ring_node_t *flow_ring_node; | |
3873 | int ret = BCME_OK; | |
3874 | #ifdef DHD_LOSSLESS_ROAMING | |
3875 | dhd_pub_t *dhdp = bus->dhd; | |
3876 | #endif // endif | |
3877 | DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); | |
3878 | ||
3879 | /* ASSERT on flow_id */ | |
3880 | if (flow_id >= bus->max_submission_rings) { | |
3881 | DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, | |
3882 | flow_id, bus->max_submission_rings)); | |
3883 | return 0; | |
3884 | } | |
3885 | ||
3886 | flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id); | |
3887 | ||
3888 | if (flow_ring_node->prot_info == NULL) { | |
3889 | DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__)); | |
3890 | return BCME_NOTREADY; | |
3891 | } | |
3892 | ||
3893 | #ifdef DHD_LOSSLESS_ROAMING | |
3894 | if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) { | |
3895 | DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n", | |
3896 | __FUNCTION__, flow_ring_node->flow_info.tid)); | |
3897 | return BCME_OK; | |
3898 | } | |
3899 | #endif /* DHD_LOSSLESS_ROAMING */ | |
3900 | ||
3901 | { | |
3902 | unsigned long flags; | |
3903 | void *txp = NULL; | |
3904 | flow_queue_t *queue; | |
3905 | #ifdef DHD_LOSSLESS_ROAMING | |
3906 | struct ether_header *eh; | |
3907 | uint8 *pktdata; | |
3908 | #endif /* DHD_LOSSLESS_ROAMING */ | |
3909 | ||
3910 | queue = &flow_ring_node->queue; /* queue associated with flow ring */ | |
3911 | ||
3912 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
3913 | ||
3914 | if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { | |
3915 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
3916 | return BCME_NOTREADY; | |
3917 | } | |
3918 | ||
3919 | while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { | |
3920 | if (bus->dhd->conf->orphan_move <= 1) | |
3921 | PKTORPHAN(txp, bus->dhd->conf->tsq); | |
3922 | ||
3923 | /* | |
3924 | * Modifying the packet length caused P2P cert failures. | |
3925 | * Specifically on test cases where a packet of size 52 bytes | |
3926 | * was injected, the sniffer capture showed 62 bytes because of | |
3927 | * which the cert tests failed. So making the below change | |
3928 | * only Router specific. | |
3929 | */ | |
3930 | ||
3931 | #ifdef DHDTCPACK_SUPPRESS | |
3932 | if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { | |
3933 | ret = dhd_tcpack_check_xmit(bus->dhd, txp); | |
3934 | if (ret != BCME_OK) { | |
3935 | DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n", | |
3936 | __FUNCTION__)); | |
3937 | } | |
3938 | } | |
3939 | #endif /* DHDTCPACK_SUPPRESS */ | |
3940 | #ifdef DHD_LOSSLESS_ROAMING | |
3941 | pktdata = (uint8 *)PKTDATA(OSH_NULL, txp); | |
3942 | eh = (struct ether_header *) pktdata; | |
3943 | if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { | |
3944 | uint8 prio = (uint8)PKTPRIO(txp); | |
3945 | /* Restore to original priority for 802.1X packet */ | |
3946 | if (prio == PRIO_8021D_NC) { | |
3947 | PKTSETPRIO(txp, dhdp->prio_8021x); | |
3948 | } | |
3949 | } | |
3950 | #endif /* DHD_LOSSLESS_ROAMING */ | |
3951 | /* Attempt to transfer packet over flow ring */ | |
3952 | ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex); | |
3953 | if (ret != BCME_OK) { /* may not have resources in flow ring */ | |
3954 | DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret)); | |
3955 | dhd_prot_txdata_write_flush(bus->dhd, flow_id); | |
3956 | /* reinsert at head */ | |
3957 | dhd_flow_queue_reinsert(bus->dhd, queue, txp); | |
3958 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
3959 | ||
3960 | /* If we are able to requeue back, return success */ | |
3961 | return BCME_OK; | |
3962 | } | |
3963 | } | |
3964 | ||
3965 | #ifdef DHD_HP2P | |
3966 | if (!flow_ring_node->hp2p_ring) { | |
3967 | dhd_prot_txdata_write_flush(bus->dhd, flow_id); | |
3968 | } | |
3969 | #else | |
3970 | dhd_prot_txdata_write_flush(bus->dhd, flow_id); | |
3971 | #endif // endif | |
3972 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
3973 | } | |
3974 | ||
3975 | return ret; | |
3976 | } /* dhd_bus_schedule_queue */ | |
3977 | ||
3978 | /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */ | |
3979 | int BCMFASTPATH | |
3980 | dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) | |
3981 | { | |
3982 | uint16 flowid; | |
3983 | #ifdef IDLE_TX_FLOW_MGMT | |
3984 | uint8 node_status; | |
3985 | #endif /* IDLE_TX_FLOW_MGMT */ | |
3986 | flow_queue_t *queue; | |
3987 | flow_ring_node_t *flow_ring_node; | |
3988 | unsigned long flags; | |
3989 | int ret = BCME_OK; | |
3990 | void *txp_pend = NULL; | |
3991 | ||
3992 | if (!bus->dhd->flowid_allocator) { | |
3993 | DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); | |
3994 | goto toss; | |
3995 | } | |
3996 | ||
3997 | flowid = DHD_PKT_GET_FLOWID(txp); | |
3998 | ||
3999 | flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); | |
4000 | ||
4001 | DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", | |
4002 | __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); | |
4003 | ||
4004 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
4005 | if ((flowid >= bus->dhd->num_flow_rings) || | |
4006 | #ifdef IDLE_TX_FLOW_MGMT | |
4007 | (!flow_ring_node->active)) | |
4008 | #else | |
4009 | (!flow_ring_node->active) || | |
4010 | (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || | |
4011 | (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) | |
4012 | #endif /* IDLE_TX_FLOW_MGMT */ | |
4013 | { | |
4014 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
4015 | DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", | |
4016 | __FUNCTION__, flowid, flow_ring_node->status, | |
4017 | flow_ring_node->active)); | |
4018 | ret = BCME_ERROR; | |
4019 | goto toss; | |
4020 | } | |
4021 | ||
4022 | #ifdef IDLE_TX_FLOW_MGMT | |
4023 | node_status = flow_ring_node->status; | |
4024 | ||
4025 | /* handle diffrent status states here!! */ | |
4026 | switch (node_status) | |
4027 | { | |
4028 | case FLOW_RING_STATUS_OPEN: | |
4029 | ||
4030 | if (bus->enable_idle_flowring_mgmt) { | |
4031 | /* Move the node to the head of active list */ | |
4032 | dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node); | |
4033 | } | |
4034 | break; | |
4035 | ||
4036 | case FLOW_RING_STATUS_SUSPENDED: | |
4037 | DHD_INFO(("Need to Initiate TX Flow resume\n")); | |
4038 | /* Issue resume_ring request */ | |
4039 | dhd_bus_flow_ring_resume_request(bus, | |
4040 | flow_ring_node); | |
4041 | break; | |
4042 | ||
4043 | case FLOW_RING_STATUS_CREATE_PENDING: | |
4044 | case FLOW_RING_STATUS_RESUME_PENDING: | |
4045 | /* Dont do anything here!! */ | |
4046 | DHD_INFO(("Waiting for Flow create/resume! status is %u\n", | |
4047 | node_status)); | |
4048 | break; | |
4049 | ||
4050 | case FLOW_RING_STATUS_DELETE_PENDING: | |
4051 | default: | |
4052 | DHD_ERROR(("Dropping packet!! flowid %u status is %u\n", | |
4053 | flowid, node_status)); | |
4054 | /* error here!! */ | |
4055 | ret = BCME_ERROR; | |
4056 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
4057 | goto toss; | |
4058 | } | |
4059 | /* Now queue the packet */ | |
4060 | #endif /* IDLE_TX_FLOW_MGMT */ | |
4061 | ||
4062 | queue = &flow_ring_node->queue; /* queue associated with flow ring */ | |
4063 | ||
4064 | if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) | |
4065 | txp_pend = txp; | |
4066 | ||
4067 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
4068 | ||
4069 | if (flow_ring_node->status) { | |
4070 | DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", | |
4071 | __FUNCTION__, flowid, flow_ring_node->status, | |
4072 | flow_ring_node->active)); | |
4073 | if (txp_pend) { | |
4074 | txp = txp_pend; | |
4075 | goto toss; | |
4076 | } | |
4077 | return BCME_OK; | |
4078 | } | |
4079 | ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ | |
4080 | ||
4081 | /* If we have anything pending, try to push into q */ | |
4082 | if (txp_pend) { | |
4083 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
4084 | ||
4085 | if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { | |
4086 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
4087 | txp = txp_pend; | |
4088 | goto toss; | |
4089 | } | |
4090 | ||
4091 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
4092 | } | |
4093 | ||
4094 | return ret; | |
4095 | ||
4096 | toss: | |
4097 | DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); | |
4098 | PKTCFREE(bus->dhd->osh, txp, TRUE); | |
4099 | return ret; | |
4100 | } /* dhd_bus_txdata */ | |
4101 | ||
4102 | void | |
4103 | dhd_bus_stop_queue(struct dhd_bus *bus) | |
4104 | { | |
4105 | dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); | |
4106 | } | |
4107 | ||
4108 | void | |
4109 | dhd_bus_start_queue(struct dhd_bus *bus) | |
4110 | { | |
4111 | /* | |
4112 | * Tx queue has been stopped due to resource shortage (or) | |
4113 | * bus is not in a state to turn on. | |
4114 | * | |
4115 | * Note that we try to re-start network interface only | |
4116 | * when we have enough resources, one has to first change the | |
4117 | * flag indicating we have all the resources. | |
4118 | */ | |
4119 | if (dhd_prot_check_tx_resource(bus->dhd)) { | |
4120 | DHD_ERROR(("%s: Interface NOT started, previously stopped " | |
4121 | "due to resource shortage\n", __FUNCTION__)); | |
4122 | return; | |
4123 | } | |
4124 | dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); | |
4125 | } | |
4126 | ||
4127 | /* Device console input function */ | |
4128 | int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) | |
4129 | { | |
4130 | dhd_bus_t *bus = dhd->bus; | |
4131 | uint32 addr, val; | |
4132 | int rv; | |
4133 | /* Address could be zero if CONSOLE := 0 in dongle Makefile */ | |
4134 | if (bus->console_addr == 0) | |
4135 | return BCME_UNSUPPORTED; | |
4136 | ||
4137 | /* Don't allow input if dongle is in reset */ | |
4138 | if (bus->dhd->dongle_reset) { | |
4139 | return BCME_NOTREADY; | |
4140 | } | |
4141 | ||
4142 | /* Zero cbuf_index */ | |
4143 | addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); | |
4144 | val = htol32(0); | |
4145 | if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) | |
4146 | goto done; | |
4147 | ||
4148 | /* Write message into cbuf */ | |
4149 | addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); | |
4150 | if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) | |
4151 | goto done; | |
4152 | ||
4153 | /* Write length into vcons_in */ | |
4154 | addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); | |
4155 | val = htol32(msglen); | |
4156 | if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) | |
4157 | goto done; | |
4158 | ||
4159 | /* generate an interrupt to dongle to indicate that it needs to process cons command */ | |
4160 | dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT); | |
4161 | done: | |
4162 | return rv; | |
4163 | } /* dhd_bus_console_in */ | |
4164 | ||
4165 | /** | |
4166 | * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is | |
4167 | * contained in 'pkt'. Processes rx frame, forwards up the layer to netif. | |
4168 | */ | |
4169 | void BCMFASTPATH | |
4170 | dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) | |
4171 | { | |
4172 | dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); | |
4173 | } | |
4174 | ||
4175 | void | |
4176 | dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr) | |
4177 | { | |
4178 | dhdpcie_os_setbar1win(bus, addr); | |
4179 | } | |
4180 | ||
4181 | /** 'offset' is a backplane address */ | |
4182 | void | |
4183 | dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) | |
4184 | { | |
4185 | if (bus->is_linkdown) { | |
4186 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4187 | return; | |
4188 | } else { | |
4189 | dhdpcie_os_wtcm8(bus, offset, data); | |
4190 | } | |
4191 | } | |
4192 | ||
4193 | uint8 | |
4194 | dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) | |
4195 | { | |
4196 | volatile uint8 data; | |
4197 | if (bus->is_linkdown) { | |
4198 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4199 | data = (uint8)-1; | |
4200 | } else { | |
4201 | data = dhdpcie_os_rtcm8(bus, offset); | |
4202 | } | |
4203 | return data; | |
4204 | } | |
4205 | ||
4206 | void | |
4207 | dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) | |
4208 | { | |
4209 | if (bus->is_linkdown) { | |
4210 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4211 | return; | |
4212 | } else { | |
4213 | dhdpcie_os_wtcm32(bus, offset, data); | |
4214 | } | |
4215 | } | |
4216 | void | |
4217 | dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) | |
4218 | { | |
4219 | if (bus->is_linkdown) { | |
4220 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4221 | return; | |
4222 | } else { | |
4223 | dhdpcie_os_wtcm16(bus, offset, data); | |
4224 | } | |
4225 | } | |
4226 | #ifdef DHD_SUPPORT_64BIT | |
4227 | void | |
4228 | dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) | |
4229 | { | |
4230 | if (bus->is_linkdown) { | |
4231 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4232 | return; | |
4233 | } else { | |
4234 | dhdpcie_os_wtcm64(bus, offset, data); | |
4235 | } | |
4236 | } | |
4237 | #endif /* DHD_SUPPORT_64BIT */ | |
4238 | ||
4239 | uint16 | |
4240 | dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) | |
4241 | { | |
4242 | volatile uint16 data; | |
4243 | if (bus->is_linkdown) { | |
4244 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4245 | data = (uint16)-1; | |
4246 | } else { | |
4247 | data = dhdpcie_os_rtcm16(bus, offset); | |
4248 | } | |
4249 | return data; | |
4250 | } | |
4251 | ||
4252 | uint32 | |
4253 | dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) | |
4254 | { | |
4255 | volatile uint32 data; | |
4256 | if (bus->is_linkdown) { | |
4257 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4258 | data = (uint32)-1; | |
4259 | } else { | |
4260 | data = dhdpcie_os_rtcm32(bus, offset); | |
4261 | } | |
4262 | return data; | |
4263 | } | |
4264 | ||
4265 | #ifdef DHD_SUPPORT_64BIT | |
4266 | uint64 | |
4267 | dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) | |
4268 | { | |
4269 | volatile uint64 data; | |
4270 | if (bus->is_linkdown) { | |
4271 | DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__)); | |
4272 | data = (uint64)-1; | |
4273 | } else { | |
4274 | data = dhdpcie_os_rtcm64(bus, offset); | |
4275 | } | |
4276 | return data; | |
4277 | } | |
4278 | #endif /* DHD_SUPPORT_64BIT */ | |
4279 | ||
4280 | /** A snippet of dongle memory is shared between host and dongle */ | |
4281 | void | |
4282 | dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) | |
4283 | { | |
4284 | uint64 long_data; | |
4285 | ulong addr; /* dongle address */ | |
4286 | ||
4287 | DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); | |
4288 | ||
4289 | if (bus->is_linkdown) { | |
4290 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
4291 | return; | |
4292 | } | |
4293 | ||
4294 | if (MULTIBP_ENAB(bus->sih)) { | |
4295 | dhd_bus_pcie_pwr_req(bus); | |
4296 | } | |
4297 | switch (type) { | |
4298 | case D2H_DMA_SCRATCH_BUF: | |
4299 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer); | |
4300 | long_data = HTOL64(*(uint64 *)data); | |
4301 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); | |
4302 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4303 | prhex(__FUNCTION__, data, len); | |
4304 | } | |
4305 | break; | |
4306 | ||
4307 | case D2H_DMA_SCRATCH_BUF_LEN : | |
4308 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len); | |
4309 | dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); | |
4310 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4311 | prhex(__FUNCTION__, data, len); | |
4312 | } | |
4313 | break; | |
4314 | ||
4315 | case H2D_DMA_INDX_WR_BUF: | |
4316 | long_data = HTOL64(*(uint64 *)data); | |
4317 | addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr); | |
4318 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); | |
4319 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4320 | prhex(__FUNCTION__, data, len); | |
4321 | } | |
4322 | break; | |
4323 | ||
4324 | case H2D_DMA_INDX_RD_BUF: | |
4325 | long_data = HTOL64(*(uint64 *)data); | |
4326 | addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr); | |
4327 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); | |
4328 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4329 | prhex(__FUNCTION__, data, len); | |
4330 | } | |
4331 | break; | |
4332 | ||
4333 | case D2H_DMA_INDX_WR_BUF: | |
4334 | long_data = HTOL64(*(uint64 *)data); | |
4335 | addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr); | |
4336 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); | |
4337 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4338 | prhex(__FUNCTION__, data, len); | |
4339 | } | |
4340 | break; | |
4341 | ||
4342 | case D2H_DMA_INDX_RD_BUF: | |
4343 | long_data = HTOL64(*(uint64 *)data); | |
4344 | addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr); | |
4345 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); | |
4346 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4347 | prhex(__FUNCTION__, data, len); | |
4348 | } | |
4349 | break; | |
4350 | ||
4351 | case H2D_IFRM_INDX_WR_BUF: | |
4352 | long_data = HTOL64(*(uint64 *)data); | |
4353 | addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr); | |
4354 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len); | |
4355 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4356 | prhex(__FUNCTION__, data, len); | |
4357 | } | |
4358 | break; | |
4359 | ||
4360 | case RING_ITEM_LEN : | |
4361 | addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items); | |
4362 | dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); | |
4363 | break; | |
4364 | ||
4365 | case RING_MAX_ITEMS : | |
4366 | addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item); | |
4367 | dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); | |
4368 | break; | |
4369 | ||
4370 | case RING_BUF_ADDR : | |
4371 | long_data = HTOL64(*(uint64 *)data); | |
4372 | addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr); | |
4373 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len); | |
4374 | if (dhd_msg_level & DHD_INFO_VAL) { | |
4375 | prhex(__FUNCTION__, data, len); | |
4376 | } | |
4377 | break; | |
4378 | ||
4379 | case RING_WR_UPD : | |
4380 | addr = bus->ring_sh[ringid].ring_state_w; | |
4381 | dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); | |
4382 | break; | |
4383 | ||
4384 | case RING_RD_UPD : | |
4385 | addr = bus->ring_sh[ringid].ring_state_r; | |
4386 | dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data)); | |
4387 | break; | |
4388 | ||
4389 | case D2H_MB_DATA: | |
4390 | addr = bus->d2h_mb_data_ptr_addr; | |
4391 | dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); | |
4392 | break; | |
4393 | ||
4394 | case H2D_MB_DATA: | |
4395 | addr = bus->h2d_mb_data_ptr_addr; | |
4396 | dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); | |
4397 | break; | |
4398 | ||
4399 | case HOST_API_VERSION: | |
4400 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap); | |
4401 | dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data)); | |
4402 | break; | |
4403 | ||
4404 | case DNGL_TO_HOST_TRAP_ADDR: | |
4405 | long_data = HTOL64(*(uint64 *)data); | |
4406 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr); | |
4407 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len); | |
4408 | DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data))); | |
4409 | break; | |
4410 | ||
4411 | case HOST_SCB_ADDR: | |
4412 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr); | |
4413 | #ifdef DHD_SUPPORT_64BIT | |
4414 | dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data)); | |
4415 | #else /* !DHD_SUPPORT_64BIT */ | |
4416 | dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data)); | |
4417 | #endif /* DHD_SUPPORT_64BIT */ | |
4418 | DHD_INFO(("Wrote host_scb_addr:0x%x\n", | |
4419 | (uint32) HTOL32(*(uint32 *)data))); | |
4420 | break; | |
4421 | ||
4422 | default: | |
4423 | break; | |
4424 | } | |
4425 | if (MULTIBP_ENAB(bus->sih)) { | |
4426 | dhd_bus_pcie_pwr_req_clear(bus); | |
4427 | } | |
4428 | } /* dhd_bus_cmn_writeshared */ | |
4429 | ||
4430 | /** A snippet of dongle memory is shared between host and dongle */ | |
4431 | void | |
4432 | dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) | |
4433 | { | |
4434 | ulong addr; /* dongle address */ | |
4435 | ||
4436 | if (MULTIBP_ENAB(bus->sih)) { | |
4437 | dhd_bus_pcie_pwr_req(bus); | |
4438 | } | |
4439 | switch (type) { | |
4440 | case RING_WR_UPD : | |
4441 | addr = bus->ring_sh[ringid].ring_state_w; | |
4442 | *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); | |
4443 | break; | |
4444 | ||
4445 | case RING_RD_UPD : | |
4446 | addr = bus->ring_sh[ringid].ring_state_r; | |
4447 | *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); | |
4448 | break; | |
4449 | ||
4450 | case TOTAL_LFRAG_PACKET_CNT : | |
4451 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt); | |
4452 | *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); | |
4453 | break; | |
4454 | ||
4455 | case H2D_MB_DATA: | |
4456 | addr = bus->h2d_mb_data_ptr_addr; | |
4457 | *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); | |
4458 | break; | |
4459 | ||
4460 | case D2H_MB_DATA: | |
4461 | addr = bus->d2h_mb_data_ptr_addr; | |
4462 | *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); | |
4463 | break; | |
4464 | ||
4465 | case MAX_HOST_RXBUFS : | |
4466 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs); | |
4467 | *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr)); | |
4468 | break; | |
4469 | ||
4470 | case HOST_SCB_ADDR: | |
4471 | addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size); | |
4472 | *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr)); | |
4473 | break; | |
4474 | ||
4475 | default : | |
4476 | break; | |
4477 | } | |
4478 | if (MULTIBP_ENAB(bus->sih)) { | |
4479 | dhd_bus_pcie_pwr_req_clear(bus); | |
4480 | } | |
4481 | } | |
4482 | ||
4483 | uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus) | |
4484 | { | |
4485 | return ((pciedev_shared_t*)bus->pcie_sh)->flags; | |
4486 | } | |
4487 | ||
4488 | void | |
4489 | dhd_bus_clearcounts(dhd_pub_t *dhdp) | |
4490 | { | |
4491 | } | |
4492 | ||
4493 | /** | |
4494 | * @param params input buffer, NULL for 'set' operation. | |
4495 | * @param plen length of 'params' buffer, 0 for 'set' operation. | |
4496 | * @param arg output buffer | |
4497 | */ | |
4498 | int | |
4499 | dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, | |
4500 | void *params, int plen, void *arg, int len, bool set) | |
4501 | { | |
4502 | dhd_bus_t *bus = dhdp->bus; | |
4503 | const bcm_iovar_t *vi = NULL; | |
4504 | int bcmerror = BCME_UNSUPPORTED; | |
4505 | int val_size; | |
4506 | uint32 actionid; | |
4507 | ||
4508 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
4509 | ||
4510 | ASSERT(name); | |
4511 | ASSERT(len >= 0); | |
4512 | if (!name || len < 0) | |
4513 | return BCME_BADARG; | |
4514 | ||
4515 | /* Get MUST have return space */ | |
4516 | ASSERT(set || (arg && len)); | |
4517 | if (!(set || (arg && len))) | |
4518 | return BCME_BADARG; | |
4519 | ||
4520 | /* Set does NOT take qualifiers */ | |
4521 | ASSERT(!set || (!params && !plen)); | |
4522 | if (!(!set || (!params && !plen))) | |
4523 | return BCME_BADARG; | |
4524 | ||
4525 | DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, | |
4526 | name, (set ? "set" : "get"), len, plen)); | |
4527 | ||
4528 | /* Look up var locally; if not found pass to host driver */ | |
4529 | if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) { | |
4530 | goto exit; | |
4531 | } | |
4532 | ||
4533 | if (MULTIBP_ENAB(bus->sih)) { | |
4534 | if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) { | |
4535 | DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__)); | |
4536 | } else { | |
4537 | dhd_bus_pcie_pwr_req(bus); | |
4538 | } | |
4539 | } | |
4540 | ||
4541 | /* set up 'params' pointer in case this is a set command so that | |
4542 | * the convenience int and bool code can be common to set and get | |
4543 | */ | |
4544 | if (params == NULL) { | |
4545 | params = arg; | |
4546 | plen = len; | |
4547 | } | |
4548 | ||
4549 | if (vi->type == IOVT_VOID) | |
4550 | val_size = 0; | |
4551 | else if (vi->type == IOVT_BUFFER) | |
4552 | val_size = len; | |
4553 | else | |
4554 | /* all other types are integer sized */ | |
4555 | val_size = sizeof(int); | |
4556 | ||
4557 | actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); | |
4558 | bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); | |
4559 | ||
4560 | exit: | |
4561 | /* In DEVRESET_QUIESCE/DEVRESET_ON, | |
4562 | * this includes dongle re-attach which initialize pwr_req_ref count to 0 and | |
4563 | * causes pwr_req_ref count miss-match in pwr req clear function and hang. | |
4564 | * In this case, bypass pwr req clear. | |
4565 | */ | |
4566 | if (bcmerror == BCME_DNGL_DEVRESET) { | |
4567 | bcmerror = BCME_OK; | |
4568 | } else { | |
4569 | if (MULTIBP_ENAB(bus->sih)) { | |
4570 | if (vi && (vi->flags & DHD_IOVF_PWRREQ_BYPASS)) { | |
4571 | DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__)); | |
4572 | } else { | |
4573 | dhd_bus_pcie_pwr_req_clear(bus); | |
4574 | } | |
4575 | } | |
4576 | } | |
4577 | return bcmerror; | |
4578 | } /* dhd_bus_iovar_op */ | |
4579 | ||
4580 | #ifdef BCM_BUZZZ | |
4581 | #include <bcm_buzzz.h> | |
4582 | ||
4583 | int | |
4584 | dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log, | |
4585 | const int num_counters) | |
4586 | { | |
4587 | int bytes = 0; | |
4588 | uint32 ctr; | |
4589 | uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX]; | |
4590 | uint32 delta[BCM_BUZZZ_COUNTERS_MAX]; | |
4591 | ||
4592 | /* Compute elapsed counter values per counter event type */ | |
4593 | for (ctr = 0U; ctr < num_counters; ctr++) { | |
4594 | prev[ctr] = core[ctr]; | |
4595 | curr[ctr] = *log++; | |
4596 | core[ctr] = curr[ctr]; /* saved for next log */ | |
4597 | ||
4598 | if (curr[ctr] < prev[ctr]) | |
4599 | delta[ctr] = curr[ctr] + (~0U - prev[ctr]); | |
4600 | else | |
4601 | delta[ctr] = (curr[ctr] - prev[ctr]); | |
4602 | ||
4603 | bytes += sprintf(p + bytes, "%12u ", delta[ctr]); | |
4604 | } | |
4605 | ||
4606 | return bytes; | |
4607 | } | |
4608 | ||
4609 | typedef union cm3_cnts { /* export this in bcm_buzzz.h */ | |
4610 | uint32 u32; | |
4611 | uint8 u8[4]; | |
4612 | struct { | |
4613 | uint8 cpicnt; | |
4614 | uint8 exccnt; | |
4615 | uint8 sleepcnt; | |
4616 | uint8 lsucnt; | |
4617 | }; | |
4618 | } cm3_cnts_t; | |
4619 | ||
4620 | int | |
4621 | dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log) | |
4622 | { | |
4623 | int bytes = 0; | |
4624 | ||
4625 | uint32 cyccnt, instrcnt; | |
4626 | cm3_cnts_t cm3_cnts; | |
4627 | uint8 foldcnt; | |
4628 | ||
4629 | { /* 32bit cyccnt */ | |
4630 | uint32 curr, prev, delta; | |
4631 | prev = core[0]; curr = *log++; core[0] = curr; | |
4632 | if (curr < prev) | |
4633 | delta = curr + (~0U - prev); | |
4634 | else | |
4635 | delta = (curr - prev); | |
4636 | ||
4637 | bytes += sprintf(p + bytes, "%12u ", delta); | |
4638 | cyccnt = delta; | |
4639 | } | |
4640 | ||
4641 | { /* Extract the 4 cnts: cpi, exc, sleep and lsu */ | |
4642 | int i; | |
4643 | uint8 max8 = ~0; | |
4644 | cm3_cnts_t curr, prev, delta; | |
4645 | prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32; | |
4646 | for (i = 0; i < 4; i++) { | |
4647 | if (curr.u8[i] < prev.u8[i]) | |
4648 | delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]); | |
4649 | else | |
4650 | delta.u8[i] = (curr.u8[i] - prev.u8[i]); | |
4651 | bytes += sprintf(p + bytes, "%4u ", delta.u8[i]); | |
4652 | } | |
4653 | cm3_cnts.u32 = delta.u32; | |
4654 | } | |
4655 | ||
4656 | { /* Extract the foldcnt from arg0 */ | |
4657 | uint8 curr, prev, delta, max8 = ~0; | |
4658 | bcm_buzzz_arg0_t arg0; arg0.u32 = *log; | |
4659 | prev = core[2]; curr = arg0.klog.cnt; core[2] = curr; | |
4660 | if (curr < prev) | |
4661 | delta = curr + (max8 - prev); | |
4662 | else | |
4663 | delta = (curr - prev); | |
4664 | bytes += sprintf(p + bytes, "%4u ", delta); | |
4665 | foldcnt = delta; | |
4666 | } | |
4667 | ||
4668 | instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2] | |
4669 | + cm3_cnts.u8[3]) + foldcnt; | |
4670 | if (instrcnt > 0xFFFFFF00) | |
4671 | bytes += sprintf(p + bytes, "[%10s] ", "~"); | |
4672 | else | |
4673 | bytes += sprintf(p + bytes, "[%10u] ", instrcnt); | |
4674 | return bytes; | |
4675 | } | |
4676 | ||
4677 | int | |
4678 | dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz) | |
4679 | { | |
4680 | int bytes = 0; | |
4681 | bcm_buzzz_arg0_t arg0; | |
4682 | static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS; | |
4683 | ||
4684 | if (buzzz->counters == 6) { | |
4685 | bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log); | |
4686 | log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */ | |
4687 | } else { | |
4688 | bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters); | |
4689 | log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */ | |
4690 | } | |
4691 | ||
4692 | /* Dump the logged arguments using the registered formats */ | |
4693 | arg0.u32 = *log++; | |
4694 | ||
4695 | switch (arg0.klog.args) { | |
4696 | case 0: | |
4697 | bytes += sprintf(p + bytes, fmt[arg0.klog.id]); | |
4698 | break; | |
4699 | case 1: | |
4700 | { | |
4701 | uint32 arg1 = *log++; | |
4702 | bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1); | |
4703 | break; | |
4704 | } | |
4705 | case 2: | |
4706 | { | |
4707 | uint32 arg1, arg2; | |
4708 | arg1 = *log++; arg2 = *log++; | |
4709 | bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2); | |
4710 | break; | |
4711 | } | |
4712 | case 3: | |
4713 | { | |
4714 | uint32 arg1, arg2, arg3; | |
4715 | arg1 = *log++; arg2 = *log++; arg3 = *log++; | |
4716 | bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3); | |
4717 | break; | |
4718 | } | |
4719 | case 4: | |
4720 | { | |
4721 | uint32 arg1, arg2, arg3, arg4; | |
4722 | arg1 = *log++; arg2 = *log++; | |
4723 | arg3 = *log++; arg4 = *log++; | |
4724 | bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4); | |
4725 | break; | |
4726 | } | |
4727 | default: | |
4728 | printf("%s: Maximum one argument supported\n", __FUNCTION__); | |
4729 | break; | |
4730 | } | |
4731 | ||
4732 | bytes += sprintf(p + bytes, "\n"); | |
4733 | ||
4734 | return bytes; | |
4735 | } | |
4736 | ||
4737 | void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p) | |
4738 | { | |
4739 | int i; | |
4740 | uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX]; | |
4741 | void * log; | |
4742 | ||
4743 | for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) { | |
4744 | core[i] = 0; | |
4745 | } | |
4746 | ||
4747 | log_sz = buzzz_p->log_sz; | |
4748 | ||
4749 | part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz; | |
4750 | ||
4751 | if (buzzz_p->wrap == TRUE) { | |
4752 | part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz; | |
4753 | total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz; | |
4754 | } else { | |
4755 | part2 = 0U; | |
4756 | total = buzzz_p->count; | |
4757 | } | |
4758 | ||
4759 | if (total == 0U) { | |
4760 | printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total); | |
4761 | return; | |
4762 | } else { | |
4763 | printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__, | |
4764 | total, part2, part1); | |
4765 | } | |
4766 | ||
4767 | if (part2) { /* with wrap */ | |
4768 | log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log)); | |
4769 | while (part2--) { /* from cur to end : part2 */ | |
4770 | p[0] = '\0'; | |
4771 | dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); | |
4772 | printf("%s", p); | |
4773 | log = (void*)((size_t)log + buzzz_p->log_sz); | |
4774 | } | |
4775 | } | |
4776 | ||
4777 | log = (void*)buffer_p; | |
4778 | while (part1--) { | |
4779 | p[0] = '\0'; | |
4780 | dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); | |
4781 | printf("%s", p); | |
4782 | log = (void*)((size_t)log + buzzz_p->log_sz); | |
4783 | } | |
4784 | ||
4785 | printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__); | |
4786 | } | |
4787 | ||
4788 | int dhd_buzzz_dump_dngl(dhd_bus_t *bus) | |
4789 | { | |
4790 | bcm_buzzz_t * buzzz_p = NULL; | |
4791 | void * buffer_p = NULL; | |
4792 | char * page_p = NULL; | |
4793 | pciedev_shared_t *sh; | |
4794 | int ret = 0; | |
4795 | ||
4796 | if (bus->dhd->busstate != DHD_BUS_DATA) { | |
4797 | return BCME_UNSUPPORTED; | |
4798 | } | |
4799 | if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) { | |
4800 | printf("%s: Page memory allocation failure\n", __FUNCTION__); | |
4801 | goto done; | |
4802 | } | |
4803 | if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) { | |
4804 | printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__); | |
4805 | goto done; | |
4806 | } | |
4807 | ||
4808 | ret = dhdpcie_readshared(bus); | |
4809 | if (ret < 0) { | |
4810 | DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); | |
4811 | goto done; | |
4812 | } | |
4813 | ||
4814 | sh = bus->pcie_sh; | |
4815 | ||
4816 | DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr)); | |
4817 | ||
4818 | if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */ | |
4819 | ||
4820 | dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr, | |
4821 | (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); | |
4822 | ||
4823 | printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " | |
4824 | "count<%u> status<%u> wrap<%u>\n" | |
4825 | "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", | |
4826 | (int)sh->buzz_dbg_ptr, | |
4827 | (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, | |
4828 | buzzz_p->count, buzzz_p->status, buzzz_p->wrap, | |
4829 | buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, | |
4830 | buzzz_p->buffer_sz, buzzz_p->log_sz); | |
4831 | ||
4832 | if (buzzz_p->count == 0) { | |
4833 | printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__); | |
4834 | goto done; | |
4835 | } | |
4836 | ||
4837 | /* Allocate memory for trace buffer and format strings */ | |
4838 | buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz); | |
4839 | if (buffer_p == NULL) { | |
4840 | printf("%s: Buffer memory allocation failure\n", __FUNCTION__); | |
4841 | goto done; | |
4842 | } | |
4843 | ||
4844 | /* Fetch the trace. format strings are exported via bcm_buzzz.h */ | |
4845 | dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */ | |
4846 | (uint8 *)buffer_p, buzzz_p->buffer_sz); | |
4847 | ||
4848 | /* Process and display the trace using formatted output */ | |
4849 | ||
4850 | { | |
4851 | int ctr; | |
4852 | for (ctr = 0; ctr < buzzz_p->counters; ctr++) { | |
4853 | printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]); | |
4854 | } | |
4855 | printf("<code execution point>\n"); | |
4856 | } | |
4857 | ||
4858 | dhd_buzzz_dump(buzzz_p, buffer_p, page_p); | |
4859 | ||
4860 | printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__); | |
4861 | ||
4862 | MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL; | |
4863 | } | |
4864 | ||
4865 | done: | |
4866 | ||
4867 | if (page_p) MFREE(bus->dhd->osh, page_p, 4096); | |
4868 | if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t)); | |
4869 | if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); | |
4870 | ||
4871 | return BCME_OK; | |
4872 | } | |
4873 | #endif /* BCM_BUZZZ */ | |
4874 | ||
4875 | #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ | |
4876 | ((sih)->buscoretype == PCIE2_CORE_ID)) | |
4877 | ||
4878 | #define PCIE_FLR_CAPAB_BIT 28 | |
4879 | #define PCIE_FUNCTION_LEVEL_RESET_BIT 15 | |
4880 | ||
4881 | /* Change delays for only QT HW, FPGA and silicon uses same delay */ | |
4882 | #ifdef BCMQT_HW | |
4883 | #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u | |
4884 | #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u | |
4885 | #else | |
4886 | #define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */ | |
4887 | #define DHD_SSRESET_STATUS_RETRY_DELAY 40u | |
4888 | #endif // endif | |
4889 | /* | |
4890 | * Increase SSReset de-assert time to 8ms. | |
4891 | * since it takes longer time if re-scan time on 4378B0. | |
4892 | */ | |
4893 | #define DHD_SSRESET_STATUS_RETRIES 200u | |
4894 | ||
4895 | static void | |
4896 | dhdpcie_enum_reg_init(dhd_bus_t *bus) | |
4897 | { | |
4898 | /* initialize Function control register (clear bit 4) to HW init value */ | |
4899 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4900 | OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0, | |
4901 | PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE); | |
4902 | ||
4903 | /* clear IntMask */ | |
4904 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4905 | OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0); | |
4906 | /* clear IntStatus */ | |
4907 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4908 | OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0, | |
4909 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4910 | OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0)); | |
4911 | ||
4912 | /* clear MSIVector */ | |
4913 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4914 | OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0); | |
4915 | /* clear MSIIntMask */ | |
4916 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4917 | OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0); | |
4918 | /* clear MSIIntStatus */ | |
4919 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4920 | OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0, | |
4921 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4922 | OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0)); | |
4923 | ||
4924 | /* clear PowerIntMask */ | |
4925 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4926 | OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0); | |
4927 | /* clear PowerIntStatus */ | |
4928 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4929 | OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0, | |
4930 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4931 | OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0)); | |
4932 | ||
4933 | /* clear MailboxIntMask */ | |
4934 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4935 | OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0); | |
4936 | /* clear MailboxInt */ | |
4937 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4938 | OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0, | |
4939 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
4940 | OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0)); | |
4941 | } | |
4942 | ||
4943 | int | |
4944 | dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail) | |
4945 | { | |
4946 | uint flr_capab; | |
4947 | uint val; | |
4948 | int retry = 0; | |
4949 | ||
4950 | DHD_ERROR(("******** Perform FLR ********\n")); | |
4951 | ||
4952 | if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) { | |
4953 | if (bus->pcie_mailbox_mask != 0) { | |
4954 | dhdpcie_bus_intr_disable(bus); | |
4955 | } | |
4956 | /* initialize F0 enum registers before FLR for rev66/67 */ | |
4957 | dhdpcie_enum_reg_init(bus); | |
4958 | } | |
4959 | ||
4960 | /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */ | |
4961 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val)); | |
4962 | flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT); | |
4963 | DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n", | |
4964 | PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab)); | |
4965 | if (!flr_capab) { | |
4966 | DHD_ERROR(("Chip does not support FLR\n")); | |
4967 | return BCME_UNSUPPORTED; | |
4968 | } | |
4969 | ||
4970 | /* Save pcie config space */ | |
4971 | DHD_INFO(("Save Pcie Config Space\n")); | |
4972 | DHD_PCIE_CONFIG_SAVE(bus); | |
4973 | ||
4974 | /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */ | |
4975 | DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", | |
4976 | PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); | |
4977 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); | |
4978 | DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); | |
4979 | val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT); | |
4980 | DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); | |
4981 | OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); | |
4982 | ||
4983 | /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */ | |
4984 | DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY)); | |
4985 | OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u); | |
4986 | ||
4987 | if (force_fail) { | |
4988 | DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n", | |
4989 | PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); | |
4990 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); | |
4991 | DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, | |
4992 | val)); | |
4993 | val = val | (1 << PCIE_SSRESET_DISABLE_BIT); | |
4994 | DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, | |
4995 | val)); | |
4996 | OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val); | |
4997 | ||
4998 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); | |
4999 | DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, | |
5000 | val)); | |
5001 | } | |
5002 | ||
5003 | /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */ | |
5004 | DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n", | |
5005 | PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL)); | |
5006 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val)); | |
5007 | DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); | |
5008 | val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT); | |
5009 | DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val)); | |
5010 | OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val); | |
5011 | ||
5012 | /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */ | |
5013 | DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)" | |
5014 | "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL)); | |
5015 | do { | |
5016 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val)); | |
5017 | DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", | |
5018 | PCIE_CFG_SUBSYSTEM_CONTROL, val)); | |
5019 | val = val & (1 << PCIE_SSRESET_STATUS_BIT); | |
5020 | OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY); | |
5021 | } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES)); | |
5022 | ||
5023 | if (val) { | |
5024 | DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", | |
5025 | PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT)); | |
5026 | /* User has to fire the IOVAR again, if force_fail is needed */ | |
5027 | if (force_fail) { | |
5028 | bus->flr_force_fail = FALSE; | |
5029 | DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__)); | |
5030 | } | |
5031 | return BCME_DONGLE_DOWN; | |
5032 | } | |
5033 | ||
5034 | /* Restore pcie config space */ | |
5035 | DHD_INFO(("Restore Pcie Config Space\n")); | |
5036 | DHD_PCIE_CONFIG_RESTORE(bus); | |
5037 | ||
5038 | DHD_ERROR(("******** FLR Succedeed ********\n")); | |
5039 | ||
5040 | return BCME_OK; | |
5041 | } | |
5042 | ||
5043 | #ifdef DHD_USE_BP_RESET | |
5044 | #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */ | |
5045 | ||
5046 | #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */ | |
5047 | #define DHD_BP_RESET_STATUS_RETRIES 50u | |
5048 | ||
5049 | #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10 | |
5050 | #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21 | |
5051 | int | |
5052 | dhd_bus_perform_bp_reset(struct dhd_bus *bus) | |
5053 | { | |
5054 | uint val; | |
5055 | int retry = 0; | |
5056 | uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev); | |
5057 | int ret = BCME_OK; | |
5058 | bool cond; | |
5059 | ||
5060 | DHD_ERROR(("******** Perform BP reset ********\n")); | |
5061 | ||
5062 | /* Disable ASPM */ | |
5063 | DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", | |
5064 | PCIECFGREG_LINK_STATUS_CTRL)); | |
5065 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); | |
5066 | DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); | |
5067 | val = val & (~PCIE_ASPM_ENAB); | |
5068 | DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); | |
5069 | OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); | |
5070 | ||
5071 | /* wait for delay usec */ | |
5072 | DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY)); | |
5073 | OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY); | |
5074 | ||
5075 | /* Set bit 10 of PCIECFGREG_SPROM_CTRL */ | |
5076 | DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n", | |
5077 | PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL)); | |
5078 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val)); | |
5079 | DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); | |
5080 | val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT); | |
5081 | DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); | |
5082 | OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val); | |
5083 | ||
5084 | /* Wait till bit backplane reset is ASSERTED i,e | |
5085 | * bit 10 of PCIECFGREG_SPROM_CTRL is cleared. | |
5086 | * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid | |
5087 | * else DAR register will read previous old value | |
5088 | */ | |
5089 | DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of " | |
5090 | "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n", | |
5091 | PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL)); | |
5092 | do { | |
5093 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val)); | |
5094 | DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val)); | |
5095 | cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT); | |
5096 | OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); | |
5097 | } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); | |
5098 | ||
5099 | if (cond) { | |
5100 | DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", | |
5101 | PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT)); | |
5102 | ret = BCME_ERROR; | |
5103 | goto aspm_enab; | |
5104 | } | |
5105 | ||
5106 | /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */ | |
5107 | DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of " | |
5108 | "dar_clk_ctrl_status_reg(0x%x) is cleared\n", | |
5109 | PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg)); | |
5110 | do { | |
5111 | val = si_corereg(bus->sih, bus->sih->buscoreidx, | |
5112 | dar_clk_ctrl_status_reg, 0, 0); | |
5113 | DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n", | |
5114 | dar_clk_ctrl_status_reg, val)); | |
5115 | cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT); | |
5116 | OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY); | |
5117 | } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES)); | |
5118 | ||
5119 | if (cond) { | |
5120 | DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n", | |
5121 | dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT)); | |
5122 | ret = BCME_ERROR; | |
5123 | } | |
5124 | ||
5125 | aspm_enab: | |
5126 | /* Enable ASPM */ | |
5127 | DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n", | |
5128 | PCIECFGREG_LINK_STATUS_CTRL)); | |
5129 | val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val)); | |
5130 | DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); | |
5131 | val = val | (PCIE_ASPM_L1_ENAB); | |
5132 | DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val)); | |
5133 | OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val); | |
5134 | ||
5135 | DHD_ERROR(("******** BP reset Succedeed ********\n")); | |
5136 | ||
5137 | return ret; | |
5138 | } | |
5139 | #endif /* DHD_USE_BP_RESET */ | |
5140 | ||
5141 | int | |
5142 | dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) | |
5143 | { | |
5144 | dhd_bus_t *bus = dhdp->bus; | |
5145 | int bcmerror = 0; | |
5146 | unsigned long flags; | |
5147 | unsigned long flags_bus; | |
5148 | #ifdef CONFIG_ARCH_MSM | |
5149 | int retry = POWERUP_MAX_RETRY; | |
5150 | #endif /* CONFIG_ARCH_MSM */ | |
5151 | ||
5152 | if (flag == TRUE) { /* Turn off WLAN */ | |
5153 | /* Removing Power */ | |
5154 | DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); | |
5155 | DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__)); | |
5156 | bus->dhd->up = FALSE; | |
5157 | ||
5158 | /* wait for other contexts to finish -- if required a call | |
5159 | * to OSL_DELAY for 1s can be added to give other contexts | |
5160 | * a chance to finish | |
5161 | */ | |
5162 | dhdpcie_advertise_bus_cleanup(bus->dhd); | |
5163 | ||
5164 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
5165 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
5166 | atomic_set(&bus->dhd->block_bus, TRUE); | |
5167 | dhd_flush_rx_tx_wq(bus->dhd); | |
5168 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
5169 | ||
5170 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
5171 | /* Clean up any pending host wake IRQ */ | |
5172 | dhd_bus_oob_intr_set(bus->dhd, FALSE); | |
5173 | dhd_bus_oob_intr_unregister(bus->dhd); | |
5174 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
5175 | dhd_os_wd_timer(dhdp, 0); | |
5176 | dhd_bus_stop(bus, TRUE); | |
5177 | if (bus->intr) { | |
5178 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
5179 | dhdpcie_bus_intr_disable(bus); | |
5180 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
5181 | dhdpcie_free_irq(bus); | |
5182 | } | |
5183 | dhd_deinit_bus_lock(bus); | |
5184 | dhd_deinit_backplane_access_lock(bus); | |
5185 | dhd_bus_release_dongle(bus); | |
5186 | dhdpcie_bus_free_resource(bus); | |
5187 | bcmerror = dhdpcie_bus_disable_device(bus); | |
5188 | if (bcmerror) { | |
5189 | DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", | |
5190 | __FUNCTION__, bcmerror)); | |
5191 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
5192 | atomic_set(&bus->dhd->block_bus, FALSE); | |
5193 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
5194 | } | |
5195 | /* Clean up protocol data after Bus Master Enable bit clear | |
5196 | * so that host can safely unmap DMA and remove the allocated buffers | |
5197 | * from the PKTID MAP. Some Applicantion Processors supported | |
5198 | * System MMU triggers Kernel panic when they detect to attempt to | |
5199 | * DMA-unmapped memory access from the devices which use the | |
5200 | * System MMU. Therefore, Kernel panic can be happened since it is | |
5201 | * possible that dongle can access to DMA-unmapped memory after | |
5202 | * calling the dhd_prot_reset(). | |
5203 | * For this reason, the dhd_prot_reset() and dhd_clear() functions | |
5204 | * should be located after the dhdpcie_bus_disable_device(). | |
5205 | */ | |
5206 | dhd_prot_reset(dhdp); | |
5207 | dhd_clear(dhdp); | |
5208 | #ifdef CONFIG_ARCH_MSM | |
5209 | bcmerror = dhdpcie_bus_clock_stop(bus); | |
5210 | if (bcmerror) { | |
5211 | DHD_ERROR(("%s: host clock stop failed: %d\n", | |
5212 | __FUNCTION__, bcmerror)); | |
5213 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
5214 | atomic_set(&bus->dhd->block_bus, FALSE); | |
5215 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
5216 | goto done; | |
5217 | } | |
5218 | #endif /* CONFIG_ARCH_MSM */ | |
5219 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
5220 | DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); | |
5221 | bus->dhd->busstate = DHD_BUS_DOWN; | |
5222 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
5223 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
5224 | atomic_set(&bus->dhd->block_bus, FALSE); | |
5225 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
5226 | } else { | |
5227 | if (bus->intr) { | |
5228 | dhdpcie_free_irq(bus); | |
5229 | } | |
5230 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
5231 | /* Clean up any pending host wake IRQ */ | |
5232 | dhd_bus_oob_intr_set(bus->dhd, FALSE); | |
5233 | dhd_bus_oob_intr_unregister(bus->dhd); | |
5234 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
5235 | dhd_dpc_kill(bus->dhd); | |
5236 | if (!bus->no_bus_init) { | |
5237 | dhd_bus_release_dongle(bus); | |
5238 | dhdpcie_bus_free_resource(bus); | |
5239 | bcmerror = dhdpcie_bus_disable_device(bus); | |
5240 | if (bcmerror) { | |
5241 | DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", | |
5242 | __FUNCTION__, bcmerror)); | |
5243 | } | |
5244 | ||
5245 | /* Clean up protocol data after Bus Master Enable bit clear | |
5246 | * so that host can safely unmap DMA and remove the allocated | |
5247 | * buffers from the PKTID MAP. Some Applicantion Processors | |
5248 | * supported System MMU triggers Kernel panic when they detect | |
5249 | * to attempt to DMA-unmapped memory access from the devices | |
5250 | * which use the System MMU. | |
5251 | * Therefore, Kernel panic can be happened since it is possible | |
5252 | * that dongle can access to DMA-unmapped memory after calling | |
5253 | * the dhd_prot_reset(). | |
5254 | * For this reason, the dhd_prot_reset() and dhd_clear() functions | |
5255 | * should be located after the dhdpcie_bus_disable_device(). | |
5256 | */ | |
5257 | dhd_prot_reset(dhdp); | |
5258 | dhd_clear(dhdp); | |
5259 | } else { | |
5260 | bus->no_bus_init = FALSE; | |
5261 | } | |
5262 | #ifdef CONFIG_ARCH_MSM | |
5263 | bcmerror = dhdpcie_bus_clock_stop(bus); | |
5264 | if (bcmerror) { | |
5265 | DHD_ERROR(("%s: host clock stop failed: %d\n", | |
5266 | __FUNCTION__, bcmerror)); | |
5267 | goto done; | |
5268 | } | |
5269 | #endif /* CONFIG_ARCH_MSM */ | |
5270 | } | |
5271 | ||
5272 | bus->dhd->dongle_reset = TRUE; | |
5273 | DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__)); | |
5274 | ||
5275 | } else { /* Turn on WLAN */ | |
5276 | if (bus->dhd->busstate == DHD_BUS_DOWN) { | |
5277 | /* Powering On */ | |
5278 | DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__)); | |
5279 | #ifdef CONFIG_ARCH_MSM | |
5280 | while (--retry) { | |
5281 | bcmerror = dhdpcie_bus_clock_start(bus); | |
5282 | if (!bcmerror) { | |
5283 | DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n", | |
5284 | __FUNCTION__)); | |
5285 | break; | |
5286 | } else { | |
5287 | OSL_SLEEP(10); | |
5288 | } | |
5289 | } | |
5290 | ||
5291 | if (bcmerror && !retry) { | |
5292 | DHD_ERROR(("%s: host pcie clock enable failed: %d\n", | |
5293 | __FUNCTION__, bcmerror)); | |
5294 | goto done; | |
5295 | } | |
5296 | #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON) | |
5297 | dhd_bus_aspm_enable_rc_ep(bus, FALSE); | |
5298 | #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */ | |
5299 | #endif /* CONFIG_ARCH_MSM */ | |
5300 | bus->is_linkdown = 0; | |
5301 | bus->cto_triggered = 0; | |
5302 | bcmerror = dhdpcie_bus_enable_device(bus); | |
5303 | if (bcmerror) { | |
5304 | DHD_ERROR(("%s: host configuration restore failed: %d\n", | |
5305 | __FUNCTION__, bcmerror)); | |
5306 | goto done; | |
5307 | } | |
5308 | ||
5309 | bcmerror = dhdpcie_bus_alloc_resource(bus); | |
5310 | if (bcmerror) { | |
5311 | DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n", | |
5312 | __FUNCTION__, bcmerror)); | |
5313 | goto done; | |
5314 | } | |
5315 | ||
5316 | bcmerror = dhdpcie_bus_dongle_attach(bus); | |
5317 | if (bcmerror) { | |
5318 | DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n", | |
5319 | __FUNCTION__, bcmerror)); | |
5320 | goto done; | |
5321 | } | |
5322 | ||
5323 | bcmerror = dhd_bus_request_irq(bus); | |
5324 | if (bcmerror) { | |
5325 | DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n", | |
5326 | __FUNCTION__, bcmerror)); | |
5327 | goto done; | |
5328 | } | |
5329 | ||
5330 | bus->dhd->dongle_reset = FALSE; | |
5331 | ||
5332 | #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) | |
5333 | dhd_irq_set_affinity(bus->dhd, cpumask_of(1)); | |
5334 | #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ | |
5335 | ||
5336 | bcmerror = dhd_bus_start(dhdp); | |
5337 | if (bcmerror) { | |
5338 | DHD_ERROR(("%s: dhd_bus_start: %d\n", | |
5339 | __FUNCTION__, bcmerror)); | |
5340 | goto done; | |
5341 | } | |
5342 | ||
5343 | bus->dhd->up = TRUE; | |
5344 | /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */ | |
5345 | if (bus->dhd->dhd_watchdog_ms_backup) { | |
5346 | DHD_ERROR(("%s: Enabling wdtick after dhd init\n", | |
5347 | __FUNCTION__)); | |
5348 | dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); | |
5349 | } | |
5350 | DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__)); | |
5351 | } else { | |
5352 | DHD_ERROR(("%s: what should we do here\n", __FUNCTION__)); | |
5353 | goto done; | |
5354 | } | |
5355 | } | |
5356 | ||
5357 | done: | |
5358 | if (bcmerror) { | |
5359 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
5360 | DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); | |
5361 | bus->dhd->busstate = DHD_BUS_DOWN; | |
5362 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
5363 | } | |
5364 | return bcmerror; | |
5365 | } | |
5366 | ||
5367 | /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its | |
5368 | * calls shall be serialized. This wrapper function provides such serialization | |
5369 | * and shall be used everywjer einstead of direct call of si_backplane_access() | |
5370 | * | |
5371 | * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet | |
5372 | * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar | |
5373 | * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race | |
5374 | * conditions calls of si_backplane_access() shall be serialized. Presence of | |
5375 | * tasklet context implies that serialization shall b ebased on spinlock. Hence | |
5376 | * Linux implementation of dhd_pcie_backplane_access_[un]lock() is | |
5377 | * spinlock-based. | |
5378 | * | |
5379 | * Other platforms may add their own implementations of | |
5380 | * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not | |
5381 | * needed implementation might be empty) | |
5382 | */ | |
5383 | static uint | |
5384 | serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read) | |
5385 | { | |
5386 | uint ret; | |
5387 | unsigned long flags; | |
5388 | DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags); | |
5389 | ret = si_backplane_access(bus->sih, addr, size, val, read); | |
5390 | DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags); | |
5391 | return ret; | |
5392 | } | |
5393 | ||
5394 | static int | |
5395 | dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd) | |
5396 | { | |
5397 | int h2d_support, d2h_support; | |
5398 | ||
5399 | d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0; | |
5400 | h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0; | |
5401 | return (d2h_support | (h2d_support << 1)); | |
5402 | ||
5403 | } | |
5404 | int | |
5405 | dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val) | |
5406 | { | |
5407 | int bcmerror = 0; | |
5408 | /* Can change it only during initialization/FW download */ | |
5409 | if (dhd->busstate == DHD_BUS_DOWN) { | |
5410 | if ((int_val > 3) || (int_val < 0)) { | |
5411 | DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n")); | |
5412 | bcmerror = BCME_BADARG; | |
5413 | } else { | |
5414 | dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE; | |
5415 | dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE; | |
5416 | dhd->dma_ring_upd_overwrite = TRUE; | |
5417 | } | |
5418 | } else { | |
5419 | DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", | |
5420 | __FUNCTION__)); | |
5421 | bcmerror = BCME_NOTDOWN; | |
5422 | } | |
5423 | ||
5424 | return bcmerror; | |
5425 | ||
5426 | } | |
5427 | ||
5428 | /** | |
5429 | * IOVAR handler of the DHD bus layer (in this case, the PCIe bus). | |
5430 | * | |
5431 | * @param actionid e.g. IOV_SVAL(IOV_PCIEREG) | |
5432 | * @param params input buffer | |
5433 | * @param plen length in [bytes] of input buffer 'params' | |
5434 | * @param arg output buffer | |
5435 | * @param len length in [bytes] of output buffer 'arg' | |
5436 | */ | |
5437 | static int | |
5438 | dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, | |
5439 | void *params, int plen, void *arg, int len, int val_size) | |
5440 | { | |
5441 | int bcmerror = 0; | |
5442 | int32 int_val = 0; | |
5443 | int32 int_val2 = 0; | |
5444 | int32 int_val3 = 0; | |
5445 | bool bool_val = 0; | |
5446 | ||
5447 | DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", | |
5448 | __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); | |
5449 | ||
5450 | if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) | |
5451 | goto exit; | |
5452 | ||
5453 | if (plen >= (int)sizeof(int_val)) | |
5454 | bcopy(params, &int_val, sizeof(int_val)); | |
5455 | ||
5456 | if (plen >= (int)sizeof(int_val) * 2) | |
5457 | bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); | |
5458 | ||
5459 | if (plen >= (int)sizeof(int_val) * 3) | |
5460 | bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3)); | |
5461 | ||
5462 | bool_val = (int_val != 0) ? TRUE : FALSE; | |
5463 | ||
5464 | /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ | |
5465 | if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || | |
5466 | actionid == IOV_GVAL(IOV_DEVRESET))) { | |
5467 | bcmerror = BCME_NOTREADY; | |
5468 | goto exit; | |
5469 | } | |
5470 | ||
5471 | switch (actionid) { | |
5472 | ||
5473 | case IOV_SVAL(IOV_VARS): | |
5474 | bcmerror = dhdpcie_downloadvars(bus, arg, len); | |
5475 | break; | |
5476 | case IOV_SVAL(IOV_PCIE_LPBK): | |
5477 | bcmerror = dhdpcie_bus_lpback_req(bus, int_val); | |
5478 | break; | |
5479 | ||
5480 | case IOV_SVAL(IOV_PCIE_DMAXFER): { | |
5481 | dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg; | |
5482 | ||
5483 | if (!dmaxfer) | |
5484 | return BCME_BADARG; | |
5485 | if (dmaxfer->version != DHD_DMAXFER_VERSION) | |
5486 | return BCME_VERSION; | |
5487 | if (dmaxfer->length != sizeof(dma_xfer_info_t)) { | |
5488 | return BCME_BADLEN; | |
5489 | } | |
5490 | ||
5491 | bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes, | |
5492 | dmaxfer->src_delay, dmaxfer->dest_delay, | |
5493 | dmaxfer->type, dmaxfer->core_num, | |
5494 | dmaxfer->should_wait); | |
5495 | ||
5496 | if (dmaxfer->should_wait && bcmerror >= 0) { | |
5497 | bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer); | |
5498 | } | |
5499 | break; | |
5500 | } | |
5501 | ||
5502 | case IOV_GVAL(IOV_PCIE_DMAXFER): { | |
5503 | dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params; | |
5504 | if (!dmaxfer) | |
5505 | return BCME_BADARG; | |
5506 | if (dmaxfer->version != DHD_DMAXFER_VERSION) | |
5507 | return BCME_VERSION; | |
5508 | if (dmaxfer->length != sizeof(dma_xfer_info_t)) { | |
5509 | return BCME_BADLEN; | |
5510 | } | |
5511 | bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer); | |
5512 | break; | |
5513 | } | |
5514 | ||
5515 | case IOV_GVAL(IOV_PCIE_SUSPEND): | |
5516 | int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; | |
5517 | bcopy(&int_val, arg, val_size); | |
5518 | break; | |
5519 | ||
5520 | case IOV_SVAL(IOV_PCIE_SUSPEND): | |
5521 | if (bool_val) { /* Suspend */ | |
5522 | int ret; | |
5523 | unsigned long flags; | |
5524 | ||
5525 | /* | |
5526 | * If some other context is busy, wait until they are done, | |
5527 | * before starting suspend | |
5528 | */ | |
5529 | ret = dhd_os_busbusy_wait_condition(bus->dhd, | |
5530 | &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR); | |
5531 | if (ret == 0) { | |
5532 | DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n", | |
5533 | __FUNCTION__, bus->dhd->dhd_bus_busy_state)); | |
5534 | return BCME_BUSY; | |
5535 | } | |
5536 | ||
5537 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
5538 | DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); | |
5539 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
5540 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
5541 | dhdpcie_bus_suspend(bus, TRUE, TRUE); | |
5542 | #else | |
5543 | dhdpcie_bus_suspend(bus, TRUE); | |
5544 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
5545 | ||
5546 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
5547 | DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); | |
5548 | dhd_os_busbusy_wake(bus->dhd); | |
5549 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
5550 | } else { /* Resume */ | |
5551 | unsigned long flags; | |
5552 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
5553 | DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); | |
5554 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
5555 | ||
5556 | dhdpcie_bus_suspend(bus, FALSE); | |
5557 | ||
5558 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
5559 | DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); | |
5560 | dhd_os_busbusy_wake(bus->dhd); | |
5561 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
5562 | } | |
5563 | break; | |
5564 | ||
5565 | case IOV_GVAL(IOV_MEMSIZE): | |
5566 | int_val = (int32)bus->ramsize; | |
5567 | bcopy(&int_val, arg, val_size); | |
5568 | break; | |
5569 | ||
5570 | /* Debug related. Dumps core registers or one of the dongle memory */ | |
5571 | case IOV_GVAL(IOV_DUMP_DONGLE): | |
5572 | { | |
5573 | dump_dongle_in_t ddi = *(dump_dongle_in_t*)params; | |
5574 | dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg; | |
5575 | uint32 *p = ddo->val; | |
5576 | const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */ | |
5577 | ||
5578 | if (plen < sizeof(ddi) || len < sizeof(ddo)) { | |
5579 | bcmerror = BCME_BADARG; | |
5580 | break; | |
5581 | } | |
5582 | ||
5583 | switch (ddi.type) { | |
5584 | case DUMP_DONGLE_COREREG: | |
5585 | ddo->n_bytes = 0; | |
5586 | ||
5587 | if (si_setcoreidx(bus->sih, ddi.index) == NULL) { | |
5588 | break; // beyond last core: core enumeration ended | |
5589 | } | |
5590 | ||
5591 | ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0); | |
5592 | ddo->address += ddi.offset; // BP address at which this dump starts | |
5593 | ||
5594 | ddo->id = si_coreid(bus->sih); | |
5595 | ddo->rev = si_corerev(bus->sih); | |
5596 | ||
5597 | while (ddi.offset < max_offset && | |
5598 | sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) { | |
5599 | *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0); | |
5600 | ddi.offset += sizeof(uint32); | |
5601 | ddo->n_bytes += sizeof(uint32); | |
5602 | } | |
5603 | break; | |
5604 | default: | |
5605 | // TODO: implement d11 SHM/TPL dumping | |
5606 | bcmerror = BCME_BADARG; | |
5607 | break; | |
5608 | } | |
5609 | break; | |
5610 | } | |
5611 | ||
5612 | /* Debug related. Returns a string with dongle capabilities */ | |
5613 | case IOV_GVAL(IOV_DNGL_CAPS): | |
5614 | { | |
5615 | strncpy(arg, bus->dhd->fw_capabilities, | |
5616 | MIN(strlen(bus->dhd->fw_capabilities), (size_t)len)); | |
5617 | ((char*)arg)[len - 1] = '\0'; | |
5618 | break; | |
5619 | } | |
5620 | ||
5621 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
5622 | case IOV_SVAL(IOV_GDB_SERVER): | |
5623 | /* debugger_*() functions may sleep, so cannot hold spinlock */ | |
5624 | DHD_PERIM_UNLOCK(bus->dhd); | |
5625 | if (int_val > 0) { | |
5626 | debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih)); | |
5627 | } else { | |
5628 | debugger_close(); | |
5629 | } | |
5630 | DHD_PERIM_LOCK(bus->dhd); | |
5631 | break; | |
5632 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
5633 | ||
5634 | #ifdef BCM_BUZZZ | |
5635 | /* Dump dongle side buzzz trace to console */ | |
5636 | case IOV_GVAL(IOV_BUZZZ_DUMP): | |
5637 | bcmerror = dhd_buzzz_dump_dngl(bus); | |
5638 | break; | |
5639 | #endif /* BCM_BUZZZ */ | |
5640 | ||
5641 | case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): | |
5642 | bcmerror = dhdpcie_bus_download_state(bus, bool_val); | |
5643 | break; | |
5644 | ||
5645 | case IOV_GVAL(IOV_RAMSIZE): | |
5646 | int_val = (int32)bus->ramsize; | |
5647 | bcopy(&int_val, arg, val_size); | |
5648 | break; | |
5649 | ||
5650 | case IOV_SVAL(IOV_RAMSIZE): | |
5651 | bus->ramsize = int_val; | |
5652 | bus->orig_ramsize = int_val; | |
5653 | break; | |
5654 | ||
5655 | case IOV_GVAL(IOV_RAMSTART): | |
5656 | int_val = (int32)bus->dongle_ram_base; | |
5657 | bcopy(&int_val, arg, val_size); | |
5658 | break; | |
5659 | ||
5660 | case IOV_GVAL(IOV_CC_NVMSHADOW): | |
5661 | { | |
5662 | struct bcmstrbuf dump_b; | |
5663 | ||
5664 | bcm_binit(&dump_b, arg, len); | |
5665 | bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b); | |
5666 | break; | |
5667 | } | |
5668 | ||
5669 | case IOV_GVAL(IOV_SLEEP_ALLOWED): | |
5670 | bool_val = bus->sleep_allowed; | |
5671 | bcopy(&bool_val, arg, val_size); | |
5672 | break; | |
5673 | ||
5674 | case IOV_SVAL(IOV_SLEEP_ALLOWED): | |
5675 | bus->sleep_allowed = bool_val; | |
5676 | break; | |
5677 | ||
5678 | case IOV_GVAL(IOV_DONGLEISOLATION): | |
5679 | int_val = bus->dhd->dongle_isolation; | |
5680 | bcopy(&int_val, arg, val_size); | |
5681 | break; | |
5682 | ||
5683 | case IOV_SVAL(IOV_DONGLEISOLATION): | |
5684 | bus->dhd->dongle_isolation = bool_val; | |
5685 | break; | |
5686 | ||
5687 | case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD): | |
5688 | int_val = bus->ltrsleep_on_unload; | |
5689 | bcopy(&int_val, arg, val_size); | |
5690 | break; | |
5691 | ||
5692 | case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD): | |
5693 | bus->ltrsleep_on_unload = bool_val; | |
5694 | break; | |
5695 | ||
5696 | case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK): | |
5697 | { | |
5698 | struct bcmstrbuf dump_b; | |
5699 | bcm_binit(&dump_b, arg, len); | |
5700 | bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b); | |
5701 | break; | |
5702 | } | |
5703 | case IOV_GVAL(IOV_DMA_RINGINDICES): | |
5704 | { | |
5705 | int_val = dhdpcie_get_dma_ring_indices(bus->dhd); | |
5706 | bcopy(&int_val, arg, sizeof(int_val)); | |
5707 | break; | |
5708 | } | |
5709 | case IOV_SVAL(IOV_DMA_RINGINDICES): | |
5710 | bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val); | |
5711 | break; | |
5712 | ||
5713 | case IOV_GVAL(IOV_METADATA_DBG): | |
5714 | int_val = dhd_prot_metadata_dbg_get(bus->dhd); | |
5715 | bcopy(&int_val, arg, val_size); | |
5716 | break; | |
5717 | case IOV_SVAL(IOV_METADATA_DBG): | |
5718 | dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0)); | |
5719 | break; | |
5720 | ||
5721 | case IOV_GVAL(IOV_RX_METADATALEN): | |
5722 | int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE); | |
5723 | bcopy(&int_val, arg, val_size); | |
5724 | break; | |
5725 | ||
5726 | case IOV_SVAL(IOV_RX_METADATALEN): | |
5727 | if (int_val > 64) { | |
5728 | bcmerror = BCME_BUFTOOLONG; | |
5729 | break; | |
5730 | } | |
5731 | dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE); | |
5732 | break; | |
5733 | ||
5734 | case IOV_SVAL(IOV_TXP_THRESHOLD): | |
5735 | dhd_prot_txp_threshold(bus->dhd, TRUE, int_val); | |
5736 | break; | |
5737 | ||
5738 | case IOV_GVAL(IOV_TXP_THRESHOLD): | |
5739 | int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val); | |
5740 | bcopy(&int_val, arg, val_size); | |
5741 | break; | |
5742 | ||
5743 | case IOV_SVAL(IOV_DB1_FOR_MB): | |
5744 | if (int_val) | |
5745 | bus->db1_for_mb = TRUE; | |
5746 | else | |
5747 | bus->db1_for_mb = FALSE; | |
5748 | break; | |
5749 | ||
5750 | case IOV_GVAL(IOV_DB1_FOR_MB): | |
5751 | if (bus->db1_for_mb) | |
5752 | int_val = 1; | |
5753 | else | |
5754 | int_val = 0; | |
5755 | bcopy(&int_val, arg, val_size); | |
5756 | break; | |
5757 | ||
5758 | case IOV_GVAL(IOV_TX_METADATALEN): | |
5759 | int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE); | |
5760 | bcopy(&int_val, arg, val_size); | |
5761 | break; | |
5762 | ||
5763 | case IOV_SVAL(IOV_TX_METADATALEN): | |
5764 | if (int_val > 64) { | |
5765 | bcmerror = BCME_BUFTOOLONG; | |
5766 | break; | |
5767 | } | |
5768 | dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE); | |
5769 | break; | |
5770 | ||
5771 | case IOV_SVAL(IOV_DEVRESET): | |
5772 | switch (int_val) { | |
5773 | case DHD_BUS_DEVRESET_ON: | |
5774 | bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val); | |
5775 | break; | |
5776 | case DHD_BUS_DEVRESET_OFF: | |
5777 | bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val); | |
5778 | break; | |
5779 | case DHD_BUS_DEVRESET_FLR: | |
5780 | bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail); | |
5781 | break; | |
5782 | case DHD_BUS_DEVRESET_FLR_FORCE_FAIL: | |
5783 | bus->flr_force_fail = TRUE; | |
5784 | break; | |
5785 | default: | |
5786 | DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__)); | |
5787 | break; | |
5788 | } | |
5789 | break; | |
5790 | case IOV_SVAL(IOV_FORCE_FW_TRAP): | |
5791 | if (bus->dhd->busstate == DHD_BUS_DATA) | |
5792 | dhdpcie_fw_trap(bus); | |
5793 | else { | |
5794 | DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__)); | |
5795 | bcmerror = BCME_NOTUP; | |
5796 | } | |
5797 | break; | |
5798 | case IOV_GVAL(IOV_FLOW_PRIO_MAP): | |
5799 | int_val = bus->dhd->flow_prio_map_type; | |
5800 | bcopy(&int_val, arg, val_size); | |
5801 | break; | |
5802 | ||
5803 | case IOV_SVAL(IOV_FLOW_PRIO_MAP): | |
5804 | int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val); | |
5805 | bcopy(&int_val, arg, val_size); | |
5806 | break; | |
5807 | ||
5808 | case IOV_GVAL(IOV_TXBOUND): | |
5809 | int_val = (int32)dhd_txbound; | |
5810 | bcopy(&int_val, arg, val_size); | |
5811 | break; | |
5812 | ||
5813 | case IOV_SVAL(IOV_TXBOUND): | |
5814 | dhd_txbound = (uint)int_val; | |
5815 | break; | |
5816 | ||
5817 | case IOV_SVAL(IOV_H2D_MAILBOXDATA): | |
5818 | dhdpcie_send_mb_data(bus, (uint)int_val); | |
5819 | break; | |
5820 | ||
5821 | case IOV_SVAL(IOV_INFORINGS): | |
5822 | dhd_prot_init_info_rings(bus->dhd); | |
5823 | break; | |
5824 | ||
5825 | case IOV_SVAL(IOV_H2D_PHASE): | |
5826 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
5827 | DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", | |
5828 | __FUNCTION__)); | |
5829 | bcmerror = BCME_NOTDOWN; | |
5830 | break; | |
5831 | } | |
5832 | if (int_val) | |
5833 | bus->dhd->h2d_phase_supported = TRUE; | |
5834 | else | |
5835 | bus->dhd->h2d_phase_supported = FALSE; | |
5836 | break; | |
5837 | ||
5838 | case IOV_GVAL(IOV_H2D_PHASE): | |
5839 | int_val = (int32) bus->dhd->h2d_phase_supported; | |
5840 | bcopy(&int_val, arg, val_size); | |
5841 | break; | |
5842 | ||
5843 | case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): | |
5844 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
5845 | DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", | |
5846 | __FUNCTION__)); | |
5847 | bcmerror = BCME_NOTDOWN; | |
5848 | break; | |
5849 | } | |
5850 | if (int_val) | |
5851 | bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE; | |
5852 | else | |
5853 | bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE; | |
5854 | break; | |
5855 | ||
5856 | case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): | |
5857 | int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase; | |
5858 | bcopy(&int_val, arg, val_size); | |
5859 | break; | |
5860 | ||
5861 | case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM): | |
5862 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
5863 | DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", | |
5864 | __FUNCTION__)); | |
5865 | bcmerror = BCME_NOTDOWN; | |
5866 | break; | |
5867 | } | |
5868 | dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val); | |
5869 | break; | |
5870 | ||
5871 | case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM): | |
5872 | int_val = dhd_prot_get_h2d_max_txpost(bus->dhd); | |
5873 | bcopy(&int_val, arg, val_size); | |
5874 | break; | |
5875 | ||
5876 | case IOV_GVAL(IOV_RXBOUND): | |
5877 | int_val = (int32)dhd_rxbound; | |
5878 | bcopy(&int_val, arg, val_size); | |
5879 | break; | |
5880 | ||
5881 | case IOV_SVAL(IOV_RXBOUND): | |
5882 | dhd_rxbound = (uint)int_val; | |
5883 | break; | |
5884 | ||
5885 | case IOV_GVAL(IOV_TRAPDATA): | |
5886 | { | |
5887 | struct bcmstrbuf dump_b; | |
5888 | bcm_binit(&dump_b, arg, len); | |
5889 | bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE); | |
5890 | break; | |
5891 | } | |
5892 | ||
5893 | case IOV_GVAL(IOV_TRAPDATA_RAW): | |
5894 | { | |
5895 | struct bcmstrbuf dump_b; | |
5896 | bcm_binit(&dump_b, arg, len); | |
5897 | bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE); | |
5898 | break; | |
5899 | } | |
5900 | case IOV_SVAL(IOV_HANGREPORT): | |
5901 | bus->dhd->hang_report = bool_val; | |
5902 | DHD_ERROR(("%s: Set hang_report as %d\n", | |
5903 | __FUNCTION__, bus->dhd->hang_report)); | |
5904 | break; | |
5905 | ||
5906 | case IOV_GVAL(IOV_HANGREPORT): | |
5907 | int_val = (int32)bus->dhd->hang_report; | |
5908 | bcopy(&int_val, arg, val_size); | |
5909 | break; | |
5910 | ||
5911 | case IOV_SVAL(IOV_CTO_PREVENTION): | |
5912 | bcmerror = dhdpcie_cto_init(bus, bool_val); | |
5913 | break; | |
5914 | ||
5915 | case IOV_GVAL(IOV_CTO_PREVENTION): | |
5916 | if (bus->sih->buscorerev < 19) { | |
5917 | bcmerror = BCME_UNSUPPORTED; | |
5918 | break; | |
5919 | } | |
5920 | int_val = (int32)bus->cto_enable; | |
5921 | bcopy(&int_val, arg, val_size); | |
5922 | break; | |
5923 | ||
5924 | case IOV_SVAL(IOV_CTO_THRESHOLD): | |
5925 | { | |
5926 | if (bus->sih->buscorerev < 19) { | |
5927 | bcmerror = BCME_UNSUPPORTED; | |
5928 | break; | |
5929 | } | |
5930 | bus->cto_threshold = (uint32)int_val; | |
5931 | } | |
5932 | break; | |
5933 | ||
5934 | case IOV_GVAL(IOV_CTO_THRESHOLD): | |
5935 | if (bus->sih->buscorerev < 19) { | |
5936 | bcmerror = BCME_UNSUPPORTED; | |
5937 | break; | |
5938 | } | |
5939 | if (bus->cto_threshold) | |
5940 | int_val = (int32)bus->cto_threshold; | |
5941 | else | |
5942 | int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT; | |
5943 | ||
5944 | bcopy(&int_val, arg, val_size); | |
5945 | break; | |
5946 | ||
5947 | case IOV_SVAL(IOV_PCIE_WD_RESET): | |
5948 | if (bool_val) { | |
5949 | /* Legacy chipcommon watchdog reset */ | |
5950 | dhdpcie_cc_watchdog_reset(bus); | |
5951 | } | |
5952 | break; | |
5953 | ||
5954 | case IOV_GVAL(IOV_HWA_ENAB_BMAP): | |
5955 | int_val = bus->hwa_enab_bmap; | |
5956 | bcopy(&int_val, arg, val_size); | |
5957 | break; | |
5958 | case IOV_SVAL(IOV_HWA_ENAB_BMAP): | |
5959 | bus->hwa_enab_bmap = (uint8)int_val; | |
5960 | break; | |
5961 | case IOV_GVAL(IOV_IDMA_ENABLE): | |
5962 | int_val = bus->idma_enabled; | |
5963 | bcopy(&int_val, arg, val_size); | |
5964 | break; | |
5965 | case IOV_SVAL(IOV_IDMA_ENABLE): | |
5966 | bus->idma_enabled = (bool)int_val; | |
5967 | break; | |
5968 | case IOV_GVAL(IOV_IFRM_ENABLE): | |
5969 | int_val = bus->ifrm_enabled; | |
5970 | bcopy(&int_val, arg, val_size); | |
5971 | break; | |
5972 | case IOV_SVAL(IOV_IFRM_ENABLE): | |
5973 | bus->ifrm_enabled = (bool)int_val; | |
5974 | break; | |
5975 | case IOV_GVAL(IOV_CLEAR_RING): | |
5976 | bcopy(&int_val, arg, val_size); | |
5977 | dhd_flow_rings_flush(bus->dhd, 0); | |
5978 | break; | |
5979 | case IOV_GVAL(IOV_DAR_ENABLE): | |
5980 | int_val = bus->dar_enabled; | |
5981 | bcopy(&int_val, arg, val_size); | |
5982 | break; | |
5983 | case IOV_SVAL(IOV_DAR_ENABLE): | |
5984 | bus->dar_enabled = (bool)int_val; | |
5985 | break; | |
5986 | case IOV_GVAL(IOV_HSCBSIZE): | |
5987 | bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg); | |
5988 | break; | |
5989 | ||
5990 | #ifdef DHD_HP2P | |
5991 | case IOV_SVAL(IOV_HP2P_ENABLE): | |
5992 | dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val); | |
5993 | break; | |
5994 | ||
5995 | case IOV_GVAL(IOV_HP2P_ENABLE): | |
5996 | int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val); | |
5997 | bcopy(&int_val, arg, val_size); | |
5998 | break; | |
5999 | ||
6000 | case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD): | |
6001 | dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val); | |
6002 | break; | |
6003 | ||
6004 | case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD): | |
6005 | int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val); | |
6006 | bcopy(&int_val, arg, val_size); | |
6007 | break; | |
6008 | ||
6009 | case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD): | |
6010 | dhd_prot_time_threshold(bus->dhd, TRUE, int_val); | |
6011 | break; | |
6012 | ||
6013 | case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD): | |
6014 | int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val); | |
6015 | bcopy(&int_val, arg, val_size); | |
6016 | break; | |
6017 | ||
6018 | case IOV_SVAL(IOV_HP2P_PKT_EXPIRY): | |
6019 | dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val); | |
6020 | break; | |
6021 | ||
6022 | case IOV_GVAL(IOV_HP2P_PKT_EXPIRY): | |
6023 | int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val); | |
6024 | bcopy(&int_val, arg, val_size); | |
6025 | break; | |
6026 | case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS): | |
6027 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
6028 | return BCME_NOTDOWN; | |
6029 | } | |
6030 | dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val); | |
6031 | break; | |
6032 | ||
6033 | case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS): | |
6034 | int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE); | |
6035 | bcopy(&int_val, arg, val_size); | |
6036 | break; | |
6037 | case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS): | |
6038 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
6039 | return BCME_NOTDOWN; | |
6040 | } | |
6041 | dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val); | |
6042 | break; | |
6043 | ||
6044 | case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS): | |
6045 | int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE); | |
6046 | bcopy(&int_val, arg, val_size); | |
6047 | break; | |
6048 | #endif /* DHD_HP2P */ | |
6049 | case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL): | |
6050 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
6051 | return BCME_NOTDOWN; | |
6052 | } | |
6053 | if (int_val) | |
6054 | bus->dhd->extdtxs_in_txcpl = TRUE; | |
6055 | else | |
6056 | bus->dhd->extdtxs_in_txcpl = FALSE; | |
6057 | break; | |
6058 | ||
6059 | case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL): | |
6060 | int_val = bus->dhd->extdtxs_in_txcpl; | |
6061 | bcopy(&int_val, arg, val_size); | |
6062 | break; | |
6063 | ||
6064 | case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT): | |
6065 | if (bus->dhd->busstate != DHD_BUS_DOWN) { | |
6066 | return BCME_NOTDOWN; | |
6067 | } | |
6068 | if (int_val) | |
6069 | bus->dhd->hostrdy_after_init = TRUE; | |
6070 | else | |
6071 | bus->dhd->hostrdy_after_init = FALSE; | |
6072 | break; | |
6073 | ||
6074 | case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT): | |
6075 | int_val = bus->dhd->hostrdy_after_init; | |
6076 | bcopy(&int_val, arg, val_size); | |
6077 | break; | |
6078 | ||
6079 | default: | |
6080 | bcmerror = BCME_UNSUPPORTED; | |
6081 | break; | |
6082 | } | |
6083 | ||
6084 | exit: | |
6085 | return bcmerror; | |
6086 | } /* dhdpcie_bus_doiovar */ | |
6087 | ||
6088 | /** Transfers bytes from host to dongle using pio mode */ | |
6089 | static int | |
6090 | dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) | |
6091 | { | |
6092 | if (bus->dhd == NULL) { | |
6093 | DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); | |
6094 | return 0; | |
6095 | } | |
6096 | if (bus->dhd->prot == NULL) { | |
6097 | DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); | |
6098 | return 0; | |
6099 | } | |
6100 | if (bus->dhd->busstate != DHD_BUS_DATA) { | |
6101 | DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); | |
6102 | return 0; | |
6103 | } | |
6104 | dhdmsgbuf_lpbk_req(bus->dhd, len); | |
6105 | return 0; | |
6106 | } | |
6107 | ||
6108 | void | |
6109 | dhd_bus_dump_dar_registers(struct dhd_bus *bus) | |
6110 | { | |
6111 | uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val, | |
6112 | dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val; | |
6113 | uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg, | |
6114 | dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg; | |
6115 | ||
6116 | if (bus->is_linkdown && !bus->cto_triggered) { | |
6117 | DHD_ERROR(("%s: link is down\n", __FUNCTION__)); | |
6118 | return; | |
6119 | } | |
6120 | ||
6121 | dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev); | |
6122 | dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev); | |
6123 | dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev); | |
6124 | dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev); | |
6125 | dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev); | |
6126 | dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev); | |
6127 | ||
6128 | if (bus->sih->buscorerev < 24) { | |
6129 | DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n", | |
6130 | __FUNCTION__, bus->sih->buscorerev)); | |
6131 | return; | |
6132 | } | |
6133 | ||
6134 | dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0); | |
6135 | dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0); | |
6136 | dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0); | |
6137 | dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0); | |
6138 | dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0); | |
6139 | dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0); | |
6140 | ||
6141 | DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n", | |
6142 | __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val, | |
6143 | dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val)); | |
6144 | ||
6145 | DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n", | |
6146 | __FUNCTION__, dar_errlog_reg, dar_errlog_val, | |
6147 | dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val)); | |
6148 | } | |
6149 | ||
6150 | /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */ | |
6151 | void | |
6152 | dhd_bus_hostready(struct dhd_bus *bus) | |
6153 | { | |
6154 | if (!bus->dhd->d2h_hostrdy_supported) { | |
6155 | return; | |
6156 | } | |
6157 | ||
6158 | if (bus->is_linkdown) { | |
6159 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
6160 | return; | |
6161 | } | |
6162 | ||
6163 | DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__, | |
6164 | dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32)))); | |
6165 | ||
6166 | if (DAR_PWRREQ(bus)) { | |
6167 | dhd_bus_pcie_pwr_req(bus); | |
6168 | } | |
6169 | ||
6170 | dhd_bus_dump_dar_registers(bus); | |
6171 | ||
6172 | si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678); | |
6173 | bus->hostready_count ++; | |
6174 | DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count)); | |
6175 | } | |
6176 | ||
6177 | /* Clear INTSTATUS */ | |
6178 | void | |
6179 | dhdpcie_bus_clear_intstatus(struct dhd_bus *bus) | |
6180 | { | |
6181 | uint32 intstatus = 0; | |
6182 | if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || | |
6183 | (bus->sih->buscorerev == 2)) { | |
6184 | intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); | |
6185 | dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); | |
6186 | } else { | |
6187 | /* this is a PCIE core register..not a config register... */ | |
6188 | intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0); | |
6189 | si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask, | |
6190 | intstatus); | |
6191 | } | |
6192 | } | |
6193 | ||
6194 | int | |
6195 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
6196 | dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint) | |
6197 | #else | |
6198 | dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) | |
6199 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
6200 | { | |
6201 | int timeleft; | |
6202 | int rc = 0; | |
6203 | unsigned long flags, flags_bus; | |
6204 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
6205 | int d3_read_retry = 0; | |
6206 | uint32 d2h_mb_data = 0; | |
6207 | uint32 zero = 0; | |
6208 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
6209 | ||
6210 | printf("%s: state=%d\n", __FUNCTION__, state); | |
6211 | if (bus->dhd == NULL) { | |
6212 | DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); | |
6213 | return BCME_ERROR; | |
6214 | } | |
6215 | if (bus->dhd->prot == NULL) { | |
6216 | DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); | |
6217 | return BCME_ERROR; | |
6218 | } | |
6219 | ||
6220 | if (dhd_query_bus_erros(bus->dhd)) { | |
6221 | return BCME_ERROR; | |
6222 | } | |
6223 | ||
6224 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
6225 | if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) { | |
6226 | DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__)); | |
6227 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6228 | return BCME_ERROR; | |
6229 | } | |
6230 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6231 | if (bus->dhd->dongle_reset) { | |
6232 | DHD_ERROR(("Dongle is in reset state.\n")); | |
6233 | return -EIO; | |
6234 | } | |
6235 | ||
6236 | /* Check whether we are already in the requested state. | |
6237 | * state=TRUE means Suspend | |
6238 | * state=FALSE meanse Resume | |
6239 | */ | |
6240 | if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) { | |
6241 | DHD_ERROR(("Bus is already in SUSPEND state.\n")); | |
6242 | return BCME_OK; | |
6243 | } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) { | |
6244 | DHD_ERROR(("Bus is already in RESUME state.\n")); | |
6245 | return BCME_OK; | |
6246 | } | |
6247 | ||
6248 | if (state) { | |
6249 | int idle_retry = 0; | |
6250 | int active; | |
6251 | ||
6252 | if (bus->is_linkdown) { | |
6253 | DHD_ERROR(("%s: PCIe link was down, state=%d\n", | |
6254 | __FUNCTION__, state)); | |
6255 | return BCME_ERROR; | |
6256 | } | |
6257 | ||
6258 | /* Suspend */ | |
6259 | DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__)); | |
6260 | ||
6261 | bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms; | |
6262 | if (bus->dhd->dhd_watchdog_ms_backup) { | |
6263 | DHD_ERROR(("%s: Disabling wdtick before going to suspend\n", | |
6264 | __FUNCTION__)); | |
6265 | dhd_os_wd_timer(bus->dhd, 0); | |
6266 | } | |
6267 | ||
6268 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
6269 | if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { | |
6270 | DHD_ERROR(("Tx Request is not ended\n")); | |
6271 | bus->dhd->busstate = DHD_BUS_DATA; | |
6272 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6273 | return -EBUSY; | |
6274 | } | |
6275 | ||
6276 | bus->last_suspend_start_time = OSL_LOCALTIME_NS(); | |
6277 | ||
6278 | /* stop all interface network queue. */ | |
6279 | dhd_bus_stop_queue(bus); | |
6280 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6281 | ||
6282 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
6283 | if (byint) { | |
6284 | DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); | |
6285 | /* Clear wait_for_d3_ack before sending D3_INFORM */ | |
6286 | bus->wait_for_d3_ack = 0; | |
6287 | dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); | |
6288 | ||
6289 | timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); | |
6290 | DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); | |
6291 | } else { | |
6292 | /* Clear wait_for_d3_ack before sending D3_INFORM */ | |
6293 | bus->wait_for_d3_ack = 0; | |
6294 | dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT); | |
6295 | while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) { | |
6296 | dhdpcie_handle_mb_data(bus); | |
6297 | usleep_range(1000, 1500); | |
6298 | d3_read_retry++; | |
6299 | } | |
6300 | } | |
6301 | #else | |
6302 | DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); | |
6303 | /* Clear wait_for_d3_ack before sending D3_INFORM */ | |
6304 | bus->wait_for_d3_ack = 0; | |
6305 | /* | |
6306 | * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state | |
6307 | * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata | |
6308 | * inside atomic context, so that no more DBs will be | |
6309 | * rung after sending D3_INFORM | |
6310 | */ | |
6311 | dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); | |
6312 | ||
6313 | /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */ | |
6314 | ||
6315 | timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); | |
6316 | ||
6317 | #ifdef DHD_RECOVER_TIMEOUT | |
6318 | if (bus->wait_for_d3_ack == 0) { | |
6319 | /* If wait_for_d3_ack was not updated because D2H MB was not received */ | |
6320 | uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, | |
6321 | bus->pcie_mailbox_int, 0, 0); | |
6322 | int host_irq_disabled = dhdpcie_irq_disabled(bus); | |
6323 | if ((intstatus) && (intstatus != (uint32)-1) && | |
6324 | (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) { | |
6325 | DHD_ERROR(("%s: D3 ACK trying again intstatus=%x" | |
6326 | " host_irq_disabled=%d\n", | |
6327 | __FUNCTION__, intstatus, host_irq_disabled)); | |
6328 | dhd_pcie_intr_count_dump(bus->dhd); | |
6329 | dhd_print_tasklet_status(bus->dhd); | |
6330 | if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && | |
6331 | !bus->use_mailbox) { | |
6332 | dhd_prot_process_ctrlbuf(bus->dhd); | |
6333 | } else { | |
6334 | dhdpcie_handle_mb_data(bus); | |
6335 | } | |
6336 | timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); | |
6337 | /* Clear Interrupts */ | |
6338 | dhdpcie_bus_clear_intstatus(bus); | |
6339 | } | |
6340 | } /* bus->wait_for_d3_ack was 0 */ | |
6341 | #endif /* DHD_RECOVER_TIMEOUT */ | |
6342 | ||
6343 | DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); | |
6344 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
6345 | ||
6346 | /* To allow threads that got pre-empted to complete. | |
6347 | */ | |
6348 | while ((active = dhd_os_check_wakelock_all(bus->dhd)) && | |
6349 | (idle_retry < MAX_WKLK_IDLE_CHECK)) { | |
6350 | OSL_SLEEP(1); | |
6351 | idle_retry++; | |
6352 | } | |
6353 | ||
6354 | if (bus->wait_for_d3_ack) { | |
6355 | DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); | |
6356 | /* Got D3 Ack. Suspend the bus */ | |
6357 | if (active) { | |
6358 | DHD_ERROR(("%s():Suspend failed because of wakelock" | |
6359 | "restoring Dongle to D0\n", __FUNCTION__)); | |
6360 | ||
6361 | if (bus->dhd->dhd_watchdog_ms_backup) { | |
6362 | DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n", | |
6363 | __FUNCTION__)); | |
6364 | dhd_os_wd_timer(bus->dhd, | |
6365 | bus->dhd->dhd_watchdog_ms_backup); | |
6366 | } | |
6367 | ||
6368 | /* | |
6369 | * Dongle still thinks that it has to be in D3 state until | |
6370 | * it gets a D0 Inform, but we are backing off from suspend. | |
6371 | * Ensure that Dongle is brought back to D0. | |
6372 | * | |
6373 | * Bringing back Dongle from D3 Ack state to D0 state is a | |
6374 | * 2 step process. Dongle would want to know that D0 Inform | |
6375 | * would be sent as a MB interrupt to bring it out of D3 Ack | |
6376 | * state to D0 state. So we have to send both this message. | |
6377 | */ | |
6378 | ||
6379 | /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */ | |
6380 | bus->wait_for_d3_ack = 0; | |
6381 | ||
6382 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
6383 | bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; | |
6384 | /* Enable back the intmask which was cleared in DPC | |
6385 | * after getting D3_ACK. | |
6386 | */ | |
6387 | bus->resume_intr_enable_count++; | |
6388 | ||
6389 | /* For Linux, Macos etc (otherthan NDIS) enable back the dongle | |
6390 | * interrupts using intmask and host interrupts | |
6391 | * which were disabled in the dhdpcie_bus_isr()-> | |
6392 | * dhd_bus_handle_d3_ack(). | |
6393 | */ | |
6394 | /* Enable back interrupt using Intmask!! */ | |
6395 | dhdpcie_bus_intr_enable(bus); | |
6396 | /* Enable back interrupt from Host side!! */ | |
6397 | dhdpcie_enable_irq(bus); | |
6398 | ||
6399 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
6400 | ||
6401 | if (bus->use_d0_inform) { | |
6402 | DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); | |
6403 | dhdpcie_send_mb_data(bus, | |
6404 | (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); | |
6405 | DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); | |
6406 | } | |
6407 | /* ring doorbell 1 (hostready) */ | |
6408 | dhd_bus_hostready(bus); | |
6409 | ||
6410 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
6411 | bus->dhd->busstate = DHD_BUS_DATA; | |
6412 | /* resume all interface network queue. */ | |
6413 | dhd_bus_start_queue(bus); | |
6414 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6415 | rc = BCME_ERROR; | |
6416 | } else { | |
6417 | /* Actual Suspend after no wakelock */ | |
6418 | /* At this time bus->bus_low_power_state will be | |
6419 | * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK | |
6420 | * in dhd_bus_handle_d3_ack() | |
6421 | */ | |
6422 | if (bus->use_d0_inform && | |
6423 | (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) { | |
6424 | DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); | |
6425 | dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); | |
6426 | DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); | |
6427 | } | |
6428 | ||
6429 | #if defined(BCMPCIE_OOB_HOST_WAKE) | |
6430 | if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) { | |
6431 | DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__)); | |
6432 | } else { | |
6433 | dhdpcie_oob_intr_set(bus, TRUE); | |
6434 | } | |
6435 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
6436 | ||
6437 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
6438 | /* The Host cannot process interrupts now so disable the same. | |
6439 | * No need to disable the dongle INTR using intmask, as we are | |
6440 | * already calling disabling INTRs from DPC context after | |
6441 | * getting D3_ACK in dhd_bus_handle_d3_ack. | |
6442 | * Code may not look symmetric between Suspend and | |
6443 | * Resume paths but this is done to close down the timing window | |
6444 | * between DPC and suspend context and bus->bus_low_power_state | |
6445 | * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC. | |
6446 | */ | |
6447 | bus->dhd->d3ackcnt_timeout = 0; | |
6448 | bus->dhd->busstate = DHD_BUS_SUSPEND; | |
6449 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6450 | dhdpcie_dump_resource(bus); | |
6451 | /* Handle Host Suspend */ | |
6452 | rc = dhdpcie_pci_suspend_resume(bus, state); | |
6453 | if (!rc) { | |
6454 | bus->last_suspend_end_time = OSL_LOCALTIME_NS(); | |
6455 | } | |
6456 | } | |
6457 | } else if (timeleft == 0) { /* D3 ACK Timeout */ | |
6458 | #ifdef DHD_FW_COREDUMP | |
6459 | uint32 cur_memdump_mode = bus->dhd->memdump_enabled; | |
6460 | #endif /* DHD_FW_COREDUMP */ | |
6461 | ||
6462 | /* check if the D3 ACK timeout due to scheduling issue */ | |
6463 | bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) && | |
6464 | bus->isr_entry_time > bus->last_d3_inform_time && | |
6465 | dhd_bus_query_dpc_sched_errors(bus->dhd); | |
6466 | bus->dhd->d3ack_timeout_occured = TRUE; | |
6467 | /* If the D3 Ack has timeout */ | |
6468 | bus->dhd->d3ackcnt_timeout++; | |
6469 | DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n", | |
6470 | __FUNCTION__, bus->dhd->is_sched_error ? | |
6471 | " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout)); | |
6472 | #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP) | |
6473 | if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) { | |
6474 | /* change g_assert_type to trigger Kernel panic */ | |
6475 | g_assert_type = 2; | |
6476 | /* use ASSERT() to trigger panic */ | |
6477 | ASSERT(0); | |
6478 | } | |
6479 | #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */ | |
6480 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
6481 | bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; | |
6482 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
6483 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
6484 | bus->dhd->busstate = DHD_BUS_DATA; | |
6485 | /* resume all interface network queue. */ | |
6486 | dhd_bus_start_queue(bus); | |
6487 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6488 | if (!bus->dhd->dongle_trap_occured && | |
6489 | !bus->is_linkdown && | |
6490 | !bus->cto_triggered) { | |
6491 | uint32 intstatus = 0; | |
6492 | ||
6493 | /* Check if PCIe bus status is valid */ | |
6494 | intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, | |
6495 | bus->pcie_mailbox_int, 0, 0); | |
6496 | if (intstatus == (uint32)-1) { | |
6497 | /* Invalidate PCIe bus status */ | |
6498 | bus->is_linkdown = 1; | |
6499 | } | |
6500 | ||
6501 | dhd_bus_dump_console_buffer(bus); | |
6502 | dhd_prot_debug_info_print(bus->dhd); | |
6503 | #ifdef DHD_FW_COREDUMP | |
6504 | if (cur_memdump_mode) { | |
6505 | /* write core dump to file */ | |
6506 | bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; | |
6507 | dhdpcie_mem_dump(bus); | |
6508 | } | |
6509 | #endif /* DHD_FW_COREDUMP */ | |
6510 | ||
6511 | DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n", | |
6512 | __FUNCTION__)); | |
6513 | dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); | |
6514 | } | |
6515 | #if defined(DHD_ERPOM) | |
6516 | dhd_schedule_reset(bus->dhd); | |
6517 | #endif // endif | |
6518 | rc = -ETIMEDOUT; | |
6519 | } | |
6520 | } else { | |
6521 | /* Resume */ | |
6522 | DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__)); | |
6523 | bus->last_resume_start_time = OSL_LOCALTIME_NS(); | |
6524 | ||
6525 | /** | |
6526 | * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold. | |
6527 | * si_backplane_access(function to read/write backplane) | |
6528 | * updates the window(PCIE2_BAR0_CORE2_WIN) only if | |
6529 | * window being accessed is different form the window | |
6530 | * being pointed by second_bar0win. | |
6531 | * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold, | |
6532 | * invalidating second_bar0win after resume updates | |
6533 | * PCIE2_BAR0_CORE2_WIN with right window. | |
6534 | */ | |
6535 | si_invalidate_second_bar0win(bus->sih); | |
6536 | #if defined(BCMPCIE_OOB_HOST_WAKE) | |
6537 | DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); | |
6538 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
6539 | rc = dhdpcie_pci_suspend_resume(bus, state); | |
6540 | dhdpcie_dump_resource(bus); | |
6541 | ||
6542 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
6543 | /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */ | |
6544 | bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; | |
6545 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
6546 | ||
6547 | if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) { | |
6548 | if (bus->use_d0_inform) { | |
6549 | DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); | |
6550 | dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); | |
6551 | DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); | |
6552 | } | |
6553 | /* ring doorbell 1 (hostready) */ | |
6554 | dhd_bus_hostready(bus); | |
6555 | } | |
6556 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
6557 | bus->dhd->busstate = DHD_BUS_DATA; | |
6558 | /* resume all interface network queue. */ | |
6559 | dhd_bus_start_queue(bus); | |
6560 | ||
6561 | /* TODO: for NDIS also we need to use enable_irq in future */ | |
6562 | bus->resume_intr_enable_count++; | |
6563 | ||
6564 | /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts | |
6565 | * using intmask and host interrupts | |
6566 | * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack(). | |
6567 | */ | |
6568 | dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */ | |
6569 | dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */ | |
6570 | ||
6571 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
6572 | ||
6573 | if (bus->dhd->dhd_watchdog_ms_backup) { | |
6574 | DHD_ERROR(("%s: Enabling wdtick after resume\n", | |
6575 | __FUNCTION__)); | |
6576 | dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup); | |
6577 | } | |
6578 | ||
6579 | bus->last_resume_end_time = OSL_LOCALTIME_NS(); | |
6580 | /* Update TCM rd index for EDL ring */ | |
6581 | DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd); | |
6582 | } | |
6583 | return rc; | |
6584 | } | |
6585 | ||
6586 | uint32 | |
6587 | dhdpcie_force_alp(struct dhd_bus *bus, bool enable) | |
6588 | { | |
6589 | ASSERT(bus && bus->sih); | |
6590 | if (enable) { | |
6591 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
6592 | OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP); | |
6593 | } else { | |
6594 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
6595 | OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0); | |
6596 | } | |
6597 | return 0; | |
6598 | } | |
6599 | ||
6600 | /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */ | |
6601 | uint32 | |
6602 | dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time) | |
6603 | { | |
6604 | uint reg_val; | |
6605 | ||
6606 | ASSERT(bus && bus->sih); | |
6607 | ||
6608 | si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, | |
6609 | 0x1004); | |
6610 | reg_val = si_corereg(bus->sih, bus->sih->buscoreidx, | |
6611 | OFFSETOF(sbpcieregs_t, configdata), 0, 0); | |
6612 | reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16); | |
6613 | si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, | |
6614 | reg_val); | |
6615 | ||
6616 | return 0; | |
6617 | } | |
6618 | ||
6619 | static uint32 | |
6620 | dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk) | |
6621 | { | |
6622 | uint16 chipid = si_chipid(bus->sih); | |
6623 | if ((chipid == BCM4375_CHIP_ID || | |
6624 | chipid == BCM4362_CHIP_ID || | |
6625 | chipid == BCM43751_CHIP_ID || | |
6626 | chipid == BCM43752_CHIP_ID || | |
6627 | chipid == BCM4377_CHIP_ID) && | |
6628 | (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) { | |
6629 | len += 8; | |
6630 | } | |
6631 | DHD_ERROR(("%s: len %d\n", __FUNCTION__, len)); | |
6632 | return len; | |
6633 | } | |
6634 | ||
6635 | /** Transfers bytes from host to dongle and to host again using DMA */ | |
6636 | static int | |
6637 | dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, | |
6638 | uint32 len, uint32 srcdelay, uint32 destdelay, | |
6639 | uint32 d11_lpbk, uint32 core_num, uint32 wait) | |
6640 | { | |
6641 | int ret = 0; | |
6642 | ||
6643 | if (bus->dhd == NULL) { | |
6644 | DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); | |
6645 | return BCME_ERROR; | |
6646 | } | |
6647 | if (bus->dhd->prot == NULL) { | |
6648 | DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); | |
6649 | return BCME_ERROR; | |
6650 | } | |
6651 | if (bus->dhd->busstate != DHD_BUS_DATA) { | |
6652 | DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); | |
6653 | return BCME_ERROR; | |
6654 | } | |
6655 | ||
6656 | if (len < 5 || len > 4194296) { | |
6657 | DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__)); | |
6658 | return BCME_ERROR; | |
6659 | } | |
6660 | ||
6661 | len = dhd_apply_d11_war_length(bus, len, d11_lpbk); | |
6662 | ||
6663 | bus->dmaxfer_complete = FALSE; | |
6664 | ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, | |
6665 | d11_lpbk, core_num); | |
6666 | if (ret != BCME_OK || !wait) { | |
6667 | DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__, | |
6668 | ret, wait)); | |
6669 | } else { | |
6670 | ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete); | |
6671 | if (ret < 0) | |
6672 | ret = BCME_NOTREADY; | |
6673 | } | |
6674 | ||
6675 | return ret; | |
6676 | ||
6677 | } | |
6678 | ||
6679 | bool | |
6680 | dhd_bus_is_multibp_capable(struct dhd_bus *bus) | |
6681 | { | |
6682 | return MULTIBP_CAP(bus->sih); | |
6683 | } | |
6684 | ||
6685 | #define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */ | |
6686 | #define PCIE_REV_FOR_4378B0 68 | |
6687 | ||
6688 | static int | |
6689 | dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) | |
6690 | { | |
6691 | int bcmerror = 0; | |
6692 | volatile uint32 *cr4_regs; | |
6693 | bool do_flr; | |
6694 | ||
6695 | if (!bus->sih) { | |
6696 | DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); | |
6697 | return BCME_ERROR; | |
6698 | } | |
6699 | ||
6700 | do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) && | |
6701 | (bus->sih->buscorerev != PCIE_REV_FOR_4378B0)); | |
6702 | ||
6703 | if (MULTIBP_ENAB(bus->sih) && !do_flr) { | |
6704 | dhd_bus_pcie_pwr_req(bus); | |
6705 | } | |
6706 | ||
6707 | /* To enter download state, disable ARM and reset SOCRAM. | |
6708 | * To exit download state, simply reset ARM (default is RAM boot). | |
6709 | */ | |
6710 | if (enter) { | |
6711 | ||
6712 | /* Make sure BAR1 maps to backplane address 0 */ | |
6713 | dhdpcie_setbar1win(bus, 0x00000000); | |
6714 | bus->alp_only = TRUE; | |
6715 | ||
6716 | /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ | |
6717 | cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); | |
6718 | ||
6719 | if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && | |
6720 | !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) && | |
6721 | !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { | |
6722 | DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); | |
6723 | bcmerror = BCME_ERROR; | |
6724 | goto fail; | |
6725 | } | |
6726 | ||
6727 | if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { | |
6728 | /* Halt ARM & remove reset */ | |
6729 | si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); | |
6730 | if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) { | |
6731 | DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__)); | |
6732 | bcmerror = BCME_ERROR; | |
6733 | goto fail; | |
6734 | } | |
6735 | si_core_reset(bus->sih, 0, 0); | |
6736 | /* reset last 4 bytes of RAM address. to be used for shared area */ | |
6737 | dhdpcie_init_shared_addr(bus); | |
6738 | } else if (cr4_regs == NULL) { /* no CR4 present on chip */ | |
6739 | si_core_disable(bus->sih, 0); | |
6740 | ||
6741 | if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { | |
6742 | DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); | |
6743 | bcmerror = BCME_ERROR; | |
6744 | goto fail; | |
6745 | } | |
6746 | ||
6747 | si_core_reset(bus->sih, 0, 0); | |
6748 | ||
6749 | /* Clear the top bit of memory */ | |
6750 | if (bus->ramsize) { | |
6751 | uint32 zeros = 0; | |
6752 | if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4, | |
6753 | (uint8*)&zeros, 4) < 0) { | |
6754 | bcmerror = BCME_ERROR; | |
6755 | goto fail; | |
6756 | } | |
6757 | } | |
6758 | } else { | |
6759 | /* For CR4, | |
6760 | * Halt ARM | |
6761 | * Remove ARM reset | |
6762 | * Read RAM base address [0x18_0000] | |
6763 | * [next] Download firmware | |
6764 | * [done at else] Populate the reset vector | |
6765 | * [done at else] Remove ARM halt | |
6766 | */ | |
6767 | /* Halt ARM & remove reset */ | |
6768 | si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); | |
6769 | if (BCM43602_CHIP(bus->sih->chip)) { | |
6770 | W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5); | |
6771 | W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); | |
6772 | W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7); | |
6773 | W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); | |
6774 | } | |
6775 | /* reset last 4 bytes of RAM address. to be used for shared area */ | |
6776 | dhdpcie_init_shared_addr(bus); | |
6777 | } | |
6778 | } else { | |
6779 | if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { | |
6780 | /* write vars */ | |
6781 | if ((bcmerror = dhdpcie_bus_write_vars(bus))) { | |
6782 | DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); | |
6783 | goto fail; | |
6784 | } | |
6785 | /* write random numbers to sysmem for the purpose of | |
6786 | * randomizing heap address space. | |
6787 | */ | |
6788 | if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) { | |
6789 | DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n", | |
6790 | __FUNCTION__)); | |
6791 | goto fail; | |
6792 | } | |
6793 | /* switch back to arm core again */ | |
6794 | if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { | |
6795 | DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__)); | |
6796 | bcmerror = BCME_ERROR; | |
6797 | goto fail; | |
6798 | } | |
6799 | /* write address 0 with reset instruction */ | |
6800 | bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, | |
6801 | (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); | |
6802 | /* now remove reset and halt and continue to run CA7 */ | |
6803 | } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { | |
6804 | if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { | |
6805 | DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); | |
6806 | bcmerror = BCME_ERROR; | |
6807 | goto fail; | |
6808 | } | |
6809 | ||
6810 | if (!si_iscoreup(bus->sih)) { | |
6811 | DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); | |
6812 | bcmerror = BCME_ERROR; | |
6813 | goto fail; | |
6814 | } | |
6815 | ||
6816 | /* Enable remap before ARM reset but after vars. | |
6817 | * No backplane access in remap mode | |
6818 | */ | |
6819 | if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && | |
6820 | !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { | |
6821 | DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); | |
6822 | bcmerror = BCME_ERROR; | |
6823 | goto fail; | |
6824 | } | |
6825 | ||
6826 | if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && | |
6827 | !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { | |
6828 | DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); | |
6829 | bcmerror = BCME_ERROR; | |
6830 | goto fail; | |
6831 | } | |
6832 | } else { | |
6833 | if (BCM43602_CHIP(bus->sih->chip)) { | |
6834 | /* Firmware crashes on SOCSRAM access when core is in reset */ | |
6835 | if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { | |
6836 | DHD_ERROR(("%s: Failed to find SOCRAM core!\n", | |
6837 | __FUNCTION__)); | |
6838 | bcmerror = BCME_ERROR; | |
6839 | goto fail; | |
6840 | } | |
6841 | si_core_reset(bus->sih, 0, 0); | |
6842 | si_setcore(bus->sih, ARMCR4_CORE_ID, 0); | |
6843 | } | |
6844 | ||
6845 | /* write vars */ | |
6846 | if ((bcmerror = dhdpcie_bus_write_vars(bus))) { | |
6847 | DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); | |
6848 | goto fail; | |
6849 | } | |
6850 | ||
6851 | /* write a random number to TCM for the purpose of | |
6852 | * randomizing heap address space. | |
6853 | */ | |
6854 | if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) { | |
6855 | DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n", | |
6856 | __FUNCTION__)); | |
6857 | goto fail; | |
6858 | } | |
6859 | ||
6860 | /* switch back to arm core again */ | |
6861 | if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { | |
6862 | DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); | |
6863 | bcmerror = BCME_ERROR; | |
6864 | goto fail; | |
6865 | } | |
6866 | ||
6867 | /* write address 0 with reset instruction */ | |
6868 | bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, | |
6869 | (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); | |
6870 | ||
6871 | if (bcmerror == BCME_OK) { | |
6872 | uint32 tmp; | |
6873 | ||
6874 | bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, | |
6875 | (uint8 *)&tmp, sizeof(tmp)); | |
6876 | ||
6877 | if (bcmerror == BCME_OK && tmp != bus->resetinstr) { | |
6878 | DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", | |
6879 | __FUNCTION__, bus->resetinstr)); | |
6880 | DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", | |
6881 | __FUNCTION__, tmp)); | |
6882 | bcmerror = BCME_ERROR; | |
6883 | goto fail; | |
6884 | } | |
6885 | } | |
6886 | ||
6887 | /* now remove reset and halt and continue to run CR4 */ | |
6888 | } | |
6889 | ||
6890 | si_core_reset(bus->sih, 0, 0); | |
6891 | ||
6892 | /* Allow HT Clock now that the ARM is running. */ | |
6893 | bus->alp_only = FALSE; | |
6894 | ||
6895 | bus->dhd->busstate = DHD_BUS_LOAD; | |
6896 | } | |
6897 | ||
6898 | fail: | |
6899 | /* Always return to PCIE core */ | |
6900 | si_setcore(bus->sih, PCIE2_CORE_ID, 0); | |
6901 | ||
6902 | if (MULTIBP_ENAB(bus->sih) && !do_flr) { | |
6903 | dhd_bus_pcie_pwr_req_clear(bus); | |
6904 | } | |
6905 | ||
6906 | return bcmerror; | |
6907 | } /* dhdpcie_bus_download_state */ | |
6908 | ||
6909 | static int | |
6910 | dhdpcie_bus_write_vars(dhd_bus_t *bus) | |
6911 | { | |
6912 | int bcmerror = 0; | |
6913 | uint32 varsize, phys_size; | |
6914 | uint32 varaddr; | |
6915 | uint8 *vbuffer; | |
6916 | uint32 varsizew; | |
6917 | #ifdef DHD_DEBUG | |
6918 | uint8 *nvram_ularray; | |
6919 | #endif /* DHD_DEBUG */ | |
6920 | ||
6921 | /* Even if there are no vars are to be written, we still need to set the ramsize. */ | |
6922 | varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; | |
6923 | varaddr = (bus->ramsize - 4) - varsize; | |
6924 | ||
6925 | varaddr += bus->dongle_ram_base; | |
6926 | ||
6927 | if (bus->vars) { | |
6928 | ||
6929 | vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); | |
6930 | if (!vbuffer) | |
6931 | return BCME_NOMEM; | |
6932 | ||
6933 | bzero(vbuffer, varsize); | |
6934 | bcopy(bus->vars, vbuffer, bus->varsz); | |
6935 | /* Write the vars list */ | |
6936 | bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); | |
6937 | ||
6938 | /* Implement read back and verify later */ | |
6939 | #ifdef DHD_DEBUG | |
6940 | /* Verify NVRAM bytes */ | |
6941 | DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize)); | |
6942 | nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); | |
6943 | if (!nvram_ularray) { | |
6944 | MFREE(bus->dhd->osh, vbuffer, varsize); | |
6945 | return BCME_NOMEM; | |
6946 | } | |
6947 | ||
6948 | /* Upload image to verify downloaded contents. */ | |
6949 | memset(nvram_ularray, 0xaa, varsize); | |
6950 | ||
6951 | /* Read the vars list to temp buffer for comparison */ | |
6952 | bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); | |
6953 | if (bcmerror) { | |
6954 | DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", | |
6955 | __FUNCTION__, bcmerror, varsize, varaddr)); | |
6956 | } | |
6957 | ||
6958 | /* Compare the org NVRAM with the one read from RAM */ | |
6959 | if (memcmp(vbuffer, nvram_ularray, varsize)) { | |
6960 | DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); | |
6961 | } else | |
6962 | DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", | |
6963 | __FUNCTION__)); | |
6964 | ||
6965 | MFREE(bus->dhd->osh, nvram_ularray, varsize); | |
6966 | #endif /* DHD_DEBUG */ | |
6967 | ||
6968 | MFREE(bus->dhd->osh, vbuffer, varsize); | |
6969 | } | |
6970 | ||
6971 | phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; | |
6972 | ||
6973 | phys_size += bus->dongle_ram_base; | |
6974 | ||
6975 | /* adjust to the user specified RAM */ | |
6976 | DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__, | |
6977 | phys_size, bus->ramsize)); | |
6978 | DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__, | |
6979 | varaddr, varsize)); | |
6980 | varsize = ((phys_size - 4) - varaddr); | |
6981 | ||
6982 | /* | |
6983 | * Determine the length token: | |
6984 | * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. | |
6985 | */ | |
6986 | if (bcmerror) { | |
6987 | varsizew = 0; | |
6988 | bus->nvram_csm = varsizew; | |
6989 | } else { | |
6990 | varsizew = varsize / 4; | |
6991 | varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); | |
6992 | bus->nvram_csm = varsizew; | |
6993 | varsizew = htol32(varsizew); | |
6994 | } | |
6995 | ||
6996 | DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew)); | |
6997 | ||
6998 | /* Write the length token to the last word */ | |
6999 | bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), | |
7000 | (uint8*)&varsizew, 4); | |
7001 | ||
7002 | return bcmerror; | |
7003 | } /* dhdpcie_bus_write_vars */ | |
7004 | ||
7005 | int | |
7006 | dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) | |
7007 | { | |
7008 | int bcmerror = BCME_OK; | |
7009 | ||
7010 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
7011 | ||
7012 | /* Basic sanity checks */ | |
7013 | if (bus->dhd->up) { | |
7014 | bcmerror = BCME_NOTDOWN; | |
7015 | goto err; | |
7016 | } | |
7017 | if (!len) { | |
7018 | bcmerror = BCME_BUFTOOSHORT; | |
7019 | goto err; | |
7020 | } | |
7021 | ||
7022 | /* Free the old ones and replace with passed variables */ | |
7023 | if (bus->vars) | |
7024 | MFREE(bus->dhd->osh, bus->vars, bus->varsz); | |
7025 | ||
7026 | bus->vars = MALLOC(bus->dhd->osh, len); | |
7027 | bus->varsz = bus->vars ? len : 0; | |
7028 | if (bus->vars == NULL) { | |
7029 | bcmerror = BCME_NOMEM; | |
7030 | goto err; | |
7031 | } | |
7032 | ||
7033 | /* Copy the passed variables, which should include the terminating double-null */ | |
7034 | bcopy(arg, bus->vars, bus->varsz); | |
7035 | ||
7036 | #ifdef DHD_USE_SINGLE_NVRAM_FILE | |
7037 | if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { | |
7038 | char *sp = NULL; | |
7039 | char *ep = NULL; | |
7040 | int i; | |
7041 | char tag[2][8] = {"ccode=", "regrev="}; | |
7042 | ||
7043 | /* Find ccode and regrev info */ | |
7044 | for (i = 0; i < 2; i++) { | |
7045 | sp = strnstr(bus->vars, tag[i], bus->varsz); | |
7046 | if (!sp) { | |
7047 | DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n", | |
7048 | __FUNCTION__, bus->nv_path)); | |
7049 | bcmerror = BCME_ERROR; | |
7050 | goto err; | |
7051 | } | |
7052 | sp = strchr(sp, '='); | |
7053 | ep = strchr(sp, '\0'); | |
7054 | /* We assumed that string length of both ccode and | |
7055 | * regrev values should not exceed WLC_CNTRY_BUF_SZ | |
7056 | */ | |
7057 | if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) { | |
7058 | sp++; | |
7059 | while (*sp != '\0') { | |
7060 | DHD_INFO(("%s: parse '%s', current sp = '%c'\n", | |
7061 | __FUNCTION__, tag[i], *sp)); | |
7062 | *sp++ = '0'; | |
7063 | } | |
7064 | } else { | |
7065 | DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n", | |
7066 | __FUNCTION__, tag[i])); | |
7067 | bcmerror = BCME_ERROR; | |
7068 | goto err; | |
7069 | } | |
7070 | } | |
7071 | } | |
7072 | #endif /* DHD_USE_SINGLE_NVRAM_FILE */ | |
7073 | ||
7074 | err: | |
7075 | return bcmerror; | |
7076 | } | |
7077 | ||
7078 | /* loop through the capability list and see if the pcie capabilty exists */ | |
7079 | uint8 | |
7080 | dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id) | |
7081 | { | |
7082 | uint8 cap_id; | |
7083 | uint8 cap_ptr = 0; | |
7084 | uint8 byte_val; | |
7085 | ||
7086 | /* check for Header type 0 */ | |
7087 | byte_val = read_pci_cfg_byte(PCI_CFG_HDR); | |
7088 | if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) { | |
7089 | DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__)); | |
7090 | goto end; | |
7091 | } | |
7092 | ||
7093 | /* check if the capability pointer field exists */ | |
7094 | byte_val = read_pci_cfg_byte(PCI_CFG_STAT); | |
7095 | if (!(byte_val & PCI_CAPPTR_PRESENT)) { | |
7096 | DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__)); | |
7097 | goto end; | |
7098 | } | |
7099 | ||
7100 | cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR); | |
7101 | /* check if the capability pointer is 0x00 */ | |
7102 | if (cap_ptr == 0x00) { | |
7103 | DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__)); | |
7104 | goto end; | |
7105 | } | |
7106 | ||
7107 | /* loop thr'u the capability list and see if the pcie capabilty exists */ | |
7108 | ||
7109 | cap_id = read_pci_cfg_byte(cap_ptr); | |
7110 | ||
7111 | while (cap_id != req_cap_id) { | |
7112 | cap_ptr = read_pci_cfg_byte((cap_ptr + 1)); | |
7113 | if (cap_ptr == 0x00) break; | |
7114 | cap_id = read_pci_cfg_byte(cap_ptr); | |
7115 | } | |
7116 | ||
7117 | end: | |
7118 | return cap_ptr; | |
7119 | } | |
7120 | ||
7121 | void | |
7122 | dhdpcie_pme_active(osl_t *osh, bool enable) | |
7123 | { | |
7124 | uint8 cap_ptr; | |
7125 | uint32 pme_csr; | |
7126 | ||
7127 | cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); | |
7128 | ||
7129 | if (!cap_ptr) { | |
7130 | DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); | |
7131 | return; | |
7132 | } | |
7133 | ||
7134 | pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32)); | |
7135 | DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr)); | |
7136 | ||
7137 | pme_csr |= PME_CSR_PME_STAT; | |
7138 | if (enable) { | |
7139 | pme_csr |= PME_CSR_PME_EN; | |
7140 | } else { | |
7141 | pme_csr &= ~PME_CSR_PME_EN; | |
7142 | } | |
7143 | ||
7144 | OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr); | |
7145 | } | |
7146 | ||
7147 | bool | |
7148 | dhdpcie_pme_cap(osl_t *osh) | |
7149 | { | |
7150 | uint8 cap_ptr; | |
7151 | uint32 pme_cap; | |
7152 | ||
7153 | cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); | |
7154 | ||
7155 | if (!cap_ptr) { | |
7156 | DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); | |
7157 | return FALSE; | |
7158 | } | |
7159 | ||
7160 | pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); | |
7161 | ||
7162 | DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap)); | |
7163 | ||
7164 | return ((pme_cap & PME_CAP_PM_STATES) != 0); | |
7165 | } | |
7166 | ||
7167 | uint32 | |
7168 | dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val) | |
7169 | { | |
7170 | ||
7171 | uint8 pcie_cap; | |
7172 | uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ | |
7173 | uint32 reg_val; | |
7174 | ||
7175 | pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); | |
7176 | ||
7177 | if (!pcie_cap) { | |
7178 | DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); | |
7179 | return 0; | |
7180 | } | |
7181 | ||
7182 | lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; | |
7183 | ||
7184 | /* set operation */ | |
7185 | if (mask) { | |
7186 | /* read */ | |
7187 | reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); | |
7188 | ||
7189 | /* modify */ | |
7190 | reg_val &= ~mask; | |
7191 | reg_val |= (mask & val); | |
7192 | ||
7193 | /* write */ | |
7194 | OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); | |
7195 | } | |
7196 | return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); | |
7197 | } | |
7198 | ||
7199 | uint8 | |
7200 | dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val) | |
7201 | { | |
7202 | uint8 pcie_cap; | |
7203 | uint32 reg_val; | |
7204 | uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ | |
7205 | ||
7206 | pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); | |
7207 | ||
7208 | if (!pcie_cap) { | |
7209 | DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); | |
7210 | return 0; | |
7211 | } | |
7212 | ||
7213 | lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; | |
7214 | ||
7215 | reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); | |
7216 | /* set operation */ | |
7217 | if (mask) { | |
7218 | if (val) | |
7219 | reg_val |= PCIE_CLKREQ_ENAB; | |
7220 | else | |
7221 | reg_val &= ~PCIE_CLKREQ_ENAB; | |
7222 | OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); | |
7223 | reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); | |
7224 | } | |
7225 | if (reg_val & PCIE_CLKREQ_ENAB) | |
7226 | return 1; | |
7227 | else | |
7228 | return 0; | |
7229 | } | |
7230 | ||
7231 | void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) | |
7232 | { | |
7233 | dhd_bus_t *bus; | |
7234 | uint64 current_time = OSL_LOCALTIME_NS(); | |
7235 | ||
7236 | if (!dhd) { | |
7237 | DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); | |
7238 | return; | |
7239 | } | |
7240 | ||
7241 | bus = dhd->bus; | |
7242 | if (!bus) { | |
7243 | DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); | |
7244 | return; | |
7245 | } | |
7246 | ||
7247 | bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n"); | |
7248 | bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" | |
7249 | "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" | |
7250 | "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n", | |
7251 | bus->resume_intr_enable_count, bus->dpc_intr_enable_count, | |
7252 | bus->isr_intr_disable_count, bus->suspend_intr_disable_count, | |
7253 | bus->dpc_return_busdown_count, bus->non_ours_irq_count); | |
7254 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
7255 | bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu" | |
7256 | " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT | |
7257 | " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT | |
7258 | " oob_irq_enabled=%d oob_gpio_level=%d\n", | |
7259 | bus->oob_intr_count, bus->oob_intr_enable_count, | |
7260 | bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus), | |
7261 | GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time), | |
7262 | GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus), | |
7263 | dhdpcie_get_oob_irq_level()); | |
7264 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
7265 | bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT | |
7266 | " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT | |
7267 | " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n" | |
7268 | "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT | |
7269 | " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT | |
7270 | " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT | |
7271 | "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n" | |
7272 | "last_d3_inform_time="SEC_USEC_FMT"\n", | |
7273 | GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time), | |
7274 | GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time), | |
7275 | GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time), | |
7276 | GET_SEC_USEC(bus->last_process_ctrlbuf_time), | |
7277 | GET_SEC_USEC(bus->last_process_flowring_time), | |
7278 | GET_SEC_USEC(bus->last_process_txcpl_time), | |
7279 | GET_SEC_USEC(bus->last_process_rxcpl_time), | |
7280 | GET_SEC_USEC(bus->last_process_infocpl_time), | |
7281 | GET_SEC_USEC(bus->last_process_edl_time), | |
7282 | GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time), | |
7283 | GET_SEC_USEC(bus->last_d3_inform_time)); | |
7284 | ||
7285 | bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time=" | |
7286 | SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time=" | |
7287 | SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time), | |
7288 | GET_SEC_USEC(bus->last_suspend_end_time), | |
7289 | GET_SEC_USEC(bus->last_resume_start_time), | |
7290 | GET_SEC_USEC(bus->last_resume_end_time)); | |
7291 | ||
7292 | #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) | |
7293 | bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT | |
7294 | " logtrace_thread_sem_down_time="SEC_USEC_FMT | |
7295 | "\nlogtrace_thread_flush_time="SEC_USEC_FMT | |
7296 | " logtrace_thread_unexpected_break_time="SEC_USEC_FMT | |
7297 | "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n", | |
7298 | GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), | |
7299 | GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), | |
7300 | GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), | |
7301 | GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time), | |
7302 | GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)); | |
7303 | #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ | |
7304 | } | |
7305 | ||
7306 | void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) | |
7307 | { | |
7308 | uint32 intstatus = 0; | |
7309 | uint32 intmask = 0; | |
7310 | uint32 d2h_db0 = 0; | |
7311 | uint32 d2h_mb_data = 0; | |
7312 | ||
7313 | intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
7314 | dhd->bus->pcie_mailbox_int, 0, 0); | |
7315 | intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
7316 | dhd->bus->pcie_mailbox_mask, 0, 0); | |
7317 | d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); | |
7318 | dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); | |
7319 | ||
7320 | bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", | |
7321 | intstatus, intmask, d2h_db0); | |
7322 | bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", | |
7323 | d2h_mb_data, dhd->bus->def_intmask); | |
7324 | } | |
7325 | /** Add bus dump output to a buffer */ | |
7326 | void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) | |
7327 | { | |
7328 | uint16 flowid; | |
7329 | int ix = 0; | |
7330 | flow_ring_node_t *flow_ring_node; | |
7331 | flow_info_t *flow_info; | |
7332 | #ifdef TX_STATUS_LATENCY_STATS | |
7333 | uint8 ifindex; | |
7334 | if_flow_lkup_t *if_flow_lkup; | |
7335 | dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS]; | |
7336 | #endif /* TX_STATUS_LATENCY_STATS */ | |
7337 | ||
7338 | if (dhdp->busstate != DHD_BUS_DATA) | |
7339 | return; | |
7340 | ||
7341 | #ifdef TX_STATUS_LATENCY_STATS | |
7342 | memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency)); | |
7343 | #endif /* TX_STATUS_LATENCY_STATS */ | |
7344 | #ifdef DHD_WAKE_STATUS | |
7345 | bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", | |
7346 | bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake, | |
7347 | dhdp->bus->wake_counts.rcwake); | |
7348 | #ifdef DHD_WAKE_RX_STATUS | |
7349 | bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n", | |
7350 | dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast, | |
7351 | dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp); | |
7352 | bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", | |
7353 | dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6, | |
7354 | dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other); | |
7355 | bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", | |
7356 | dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na, | |
7357 | dhdp->bus->wake_counts.rx_icmpv6_ns); | |
7358 | #endif /* DHD_WAKE_RX_STATUS */ | |
7359 | #ifdef DHD_WAKE_EVENT_STATUS | |
7360 | for (flowid = 0; flowid < WLC_E_LAST; flowid++) | |
7361 | if (dhdp->bus->wake_counts.rc_event[flowid] != 0) | |
7362 | bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid), | |
7363 | dhdp->bus->wake_counts.rc_event[flowid]); | |
7364 | bcm_bprintf(strbuf, "\n"); | |
7365 | #endif /* DHD_WAKE_EVENT_STATUS */ | |
7366 | #endif /* DHD_WAKE_STATUS */ | |
7367 | ||
7368 | dhd_prot_print_info(dhdp, strbuf); | |
7369 | dhd_dump_intr_registers(dhdp, strbuf); | |
7370 | dhd_dump_intr_counters(dhdp, strbuf); | |
7371 | bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", | |
7372 | dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); | |
7373 | bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); | |
7374 | #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS | |
7375 | bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n", | |
7376 | dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings); | |
7377 | #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */ | |
7378 | bcm_bprintf(strbuf, | |
7379 | "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ", | |
7380 | "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen", | |
7381 | " Overflows", " RD", " WR"); | |
7382 | ||
7383 | #ifdef TX_STATUS_LATENCY_STATS | |
7384 | /* Average Tx status/Completion Latency in micro secs */ | |
7385 | bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us"); | |
7386 | #endif /* TX_STATUS_LATENCY_STATS */ | |
7387 | ||
7388 | bcm_bprintf(strbuf, "\n"); | |
7389 | ||
7390 | for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { | |
7391 | flow_ring_node = DHD_FLOW_RING(dhdp, flowid); | |
7392 | if (!flow_ring_node->active) | |
7393 | continue; | |
7394 | ||
7395 | flow_info = &flow_ring_node->flow_info; | |
7396 | bcm_bprintf(strbuf, | |
7397 | "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++, | |
7398 | flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, | |
7399 | MAC2STRDBG(flow_info->da), | |
7400 | DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), | |
7401 | DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), | |
7402 | DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)), | |
7403 | DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); | |
7404 | dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, | |
7405 | "%4d %4d "); | |
7406 | ||
7407 | #ifdef TX_STATUS_LATENCY_STATS | |
7408 | bcm_bprintf(strbuf, "%16d %16d ", | |
7409 | flow_info->num_tx_pkts, | |
7410 | flow_info->num_tx_status ? | |
7411 | DIV_U64_BY_U64(flow_info->cum_tx_status_latency, | |
7412 | flow_info->num_tx_status) : 0); | |
7413 | ||
7414 | ifindex = flow_info->ifindex; | |
7415 | ASSERT(ifindex < DHD_MAX_IFS); | |
7416 | if (ifindex < DHD_MAX_IFS) { | |
7417 | if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status; | |
7418 | if_tx_status_latency[ifindex].cum_tx_status_latency += | |
7419 | flow_info->cum_tx_status_latency; | |
7420 | } else { | |
7421 | DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n", | |
7422 | __FUNCTION__, ifindex, flowid)); | |
7423 | } | |
7424 | #endif /* TX_STATUS_LATENCY_STATS */ | |
7425 | bcm_bprintf(strbuf, "\n"); | |
7426 | } | |
7427 | ||
7428 | #ifdef TX_STATUS_LATENCY_STATS | |
7429 | bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus"); | |
7430 | if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; | |
7431 | for (ix = 0; ix < DHD_MAX_IFS; ix++) { | |
7432 | if (!if_flow_lkup[ix].status) { | |
7433 | continue; | |
7434 | } | |
7435 | bcm_bprintf(strbuf, "%2d %16d %16d\n", | |
7436 | ix, | |
7437 | if_tx_status_latency[ix].num_tx_status ? | |
7438 | DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency, | |
7439 | if_tx_status_latency[ix].num_tx_status): 0, | |
7440 | if_tx_status_latency[ix].num_tx_status); | |
7441 | } | |
7442 | #endif /* TX_STATUS_LATENCY_STATS */ | |
7443 | ||
7444 | #ifdef DHD_HP2P | |
7445 | if (dhdp->hp2p_capable) { | |
7446 | bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1"); | |
7447 | ||
7448 | for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) { | |
7449 | hp2p_info_t *hp2p_info; | |
7450 | int bin; | |
7451 | ||
7452 | hp2p_info = &dhdp->hp2p_info[flowid]; | |
7453 | if (hp2p_info->num_timer_start == 0) | |
7454 | continue; | |
7455 | ||
7456 | bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid); | |
7457 | bcm_bprintf(strbuf, "\n%s", "Bin"); | |
7458 | ||
7459 | for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) { | |
7460 | bcm_bprintf(strbuf, "\n%2d %20d %16d", bin, | |
7461 | hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]); | |
7462 | } | |
7463 | ||
7464 | bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0"); | |
7465 | bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid); | |
7466 | bcm_bprintf(strbuf, "\n%s", "Bin"); | |
7467 | ||
7468 | for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) { | |
7469 | bcm_bprintf(strbuf, "\n%d %20d", bin, | |
7470 | hp2p_info->rx_t0[bin]); | |
7471 | } | |
7472 | ||
7473 | bcm_bprintf(strbuf, "\n%s %16s %16s", | |
7474 | "Packet limit", "Timer limit", "Timer start"); | |
7475 | bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit, | |
7476 | hp2p_info->num_timer_limit, hp2p_info->num_timer_start); | |
7477 | } | |
7478 | ||
7479 | bcm_bprintf(strbuf, "\n"); | |
7480 | } | |
7481 | #endif /* DHD_HP2P */ | |
7482 | ||
7483 | bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); | |
7484 | bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); | |
7485 | bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); | |
7486 | if (dhdp->d2h_hostrdy_supported) { | |
7487 | bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count); | |
7488 | } | |
7489 | bcm_bprintf(strbuf, "d2h_intr_method -> %s\n", | |
7490 | dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"); | |
7491 | } | |
7492 | ||
7493 | #ifdef DNGL_AXI_ERROR_LOGGING | |
7494 | bool | |
7495 | dhd_axi_sig_match(dhd_pub_t *dhdp) | |
7496 | { | |
7497 | uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr); | |
7498 | ||
7499 | if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) { | |
7500 | DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__)); | |
7501 | return FALSE; | |
7502 | } | |
7503 | ||
7504 | DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n", | |
7505 | __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base, | |
7506 | dhdp->bus->dongle_ram_base + dhdp->bus->ramsize)); | |
7507 | if (axi_tcm_addr >= dhdp->bus->dongle_ram_base && | |
7508 | axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) { | |
7509 | uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr + | |
7510 | OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature))); | |
7511 | if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) { | |
7512 | return TRUE; | |
7513 | } else { | |
7514 | DHD_ERROR(("%s: No AXI signature: 0x%x\n", | |
7515 | __FUNCTION__, axi_signature)); | |
7516 | return FALSE; | |
7517 | } | |
7518 | } else { | |
7519 | DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__)); | |
7520 | return FALSE; | |
7521 | } | |
7522 | } | |
7523 | ||
7524 | void | |
7525 | dhd_axi_error(dhd_pub_t *dhdp) | |
7526 | { | |
7527 | dhd_axi_error_dump_t *axi_err_dump; | |
7528 | uint8 *axi_err_buf = NULL; | |
7529 | uint8 *p_axi_err = NULL; | |
7530 | uint32 axi_logbuf_addr; | |
7531 | uint32 axi_tcm_addr; | |
7532 | int err, size; | |
7533 | ||
7534 | OSL_DELAY(75000); | |
7535 | ||
7536 | axi_logbuf_addr = dhdp->axierror_logbuf_addr; | |
7537 | if (!axi_logbuf_addr) { | |
7538 | DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__)); | |
7539 | goto sched_axi; | |
7540 | } | |
7541 | ||
7542 | axi_err_dump = dhdp->axi_err_dump; | |
7543 | if (!axi_err_dump) { | |
7544 | goto sched_axi; | |
7545 | } | |
7546 | ||
7547 | if (!dhd_axi_sig_match(dhdp)) { | |
7548 | goto sched_axi; | |
7549 | } | |
7550 | ||
7551 | /* Reading AXI error data for SMMU fault */ | |
7552 | DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__)); | |
7553 | axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr); | |
7554 | size = sizeof(hnd_ext_trap_axi_error_v1_t); | |
7555 | axi_err_buf = MALLOCZ(dhdp->osh, size); | |
7556 | if (axi_err_buf == NULL) { | |
7557 | DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); | |
7558 | goto sched_axi; | |
7559 | } | |
7560 | ||
7561 | p_axi_err = axi_err_buf; | |
7562 | err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size); | |
7563 | if (err) { | |
7564 | DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", | |
7565 | __FUNCTION__, err, size, axi_tcm_addr)); | |
7566 | goto sched_axi; | |
7567 | } | |
7568 | ||
7569 | /* Dump data to Dmesg */ | |
7570 | dhd_log_dump_axi_error(axi_err_buf); | |
7571 | err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size); | |
7572 | if (err) { | |
7573 | DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n", | |
7574 | __FUNCTION__, err)); | |
7575 | } | |
7576 | ||
7577 | sched_axi: | |
7578 | if (axi_err_buf) { | |
7579 | MFREE(dhdp->osh, axi_err_buf, size); | |
7580 | } | |
7581 | dhd_schedule_axi_error_dump(dhdp, NULL); | |
7582 | } | |
7583 | ||
7584 | static void | |
7585 | dhd_log_dump_axi_error(uint8 *axi_err) | |
7586 | { | |
7587 | dma_dentry_v1_t dma_dentry; | |
7588 | dma_fifo_v1_t dma_fifo; | |
7589 | int i = 0, j = 0; | |
7590 | ||
7591 | if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) { | |
7592 | hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err; | |
7593 | DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature)); | |
7594 | DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version)); | |
7595 | DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length)); | |
7596 | DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n", | |
7597 | __FUNCTION__, axi_err_v1->dma_fifo_valid_count)); | |
7598 | DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n", | |
7599 | __FUNCTION__, axi_err_v1->axi_errorlog_status)); | |
7600 | DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n", | |
7601 | __FUNCTION__, axi_err_v1->axi_errorlog_core)); | |
7602 | DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n", | |
7603 | __FUNCTION__, axi_err_v1->axi_errorlog_hi)); | |
7604 | DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n", | |
7605 | __FUNCTION__, axi_err_v1->axi_errorlog_lo)); | |
7606 | DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n", | |
7607 | __FUNCTION__, axi_err_v1->axi_errorlog_id)); | |
7608 | ||
7609 | for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) { | |
7610 | dma_fifo = axi_err_v1->dma_fifo[i]; | |
7611 | DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid)); | |
7612 | DHD_ERROR(("%s: direction:%d : 0x%x\n", | |
7613 | __FUNCTION__, i, dma_fifo.direction)); | |
7614 | DHD_ERROR(("%s: index:%d : 0x%x\n", | |
7615 | __FUNCTION__, i, dma_fifo.index)); | |
7616 | DHD_ERROR(("%s: dpa:%d : 0x%x\n", | |
7617 | __FUNCTION__, i, dma_fifo.dpa)); | |
7618 | DHD_ERROR(("%s: desc_lo:%d : 0x%x\n", | |
7619 | __FUNCTION__, i, dma_fifo.desc_lo)); | |
7620 | DHD_ERROR(("%s: desc_hi:%d : 0x%x\n", | |
7621 | __FUNCTION__, i, dma_fifo.desc_hi)); | |
7622 | DHD_ERROR(("%s: din:%d : 0x%x\n", | |
7623 | __FUNCTION__, i, dma_fifo.din)); | |
7624 | DHD_ERROR(("%s: dout:%d : 0x%x\n", | |
7625 | __FUNCTION__, i, dma_fifo.dout)); | |
7626 | for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) { | |
7627 | dma_dentry = axi_err_v1->dma_fifo[i].dentry[j]; | |
7628 | DHD_ERROR(("%s: ctrl1:%d : 0x%x\n", | |
7629 | __FUNCTION__, i, dma_dentry.ctrl1)); | |
7630 | DHD_ERROR(("%s: ctrl2:%d : 0x%x\n", | |
7631 | __FUNCTION__, i, dma_dentry.ctrl2)); | |
7632 | DHD_ERROR(("%s: addrlo:%d : 0x%x\n", | |
7633 | __FUNCTION__, i, dma_dentry.addrlo)); | |
7634 | DHD_ERROR(("%s: addrhi:%d : 0x%x\n", | |
7635 | __FUNCTION__, i, dma_dentry.addrhi)); | |
7636 | } | |
7637 | } | |
7638 | } | |
7639 | else { | |
7640 | DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err))); | |
7641 | } | |
7642 | } | |
7643 | #endif /* DNGL_AXI_ERROR_LOGGING */ | |
7644 | ||
7645 | /** | |
7646 | * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their | |
7647 | * flow queue to their flow ring. | |
7648 | */ | |
7649 | static void | |
7650 | dhd_update_txflowrings(dhd_pub_t *dhd) | |
7651 | { | |
7652 | unsigned long flags; | |
7653 | dll_t *item, *next; | |
7654 | flow_ring_node_t *flow_ring_node; | |
7655 | struct dhd_bus *bus = dhd->bus; | |
7656 | ||
7657 | if (dhd_query_bus_erros(dhd)) { | |
7658 | return; | |
7659 | } | |
7660 | ||
7661 | /* Hold flowring_list_lock to ensure no race condition while accessing the List */ | |
7662 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
7663 | for (item = dll_head_p(&bus->flowring_active_list); | |
7664 | (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item)); | |
7665 | item = next) { | |
7666 | if (dhd->hang_was_sent) { | |
7667 | break; | |
7668 | } | |
7669 | ||
7670 | next = dll_next_p(item); | |
7671 | flow_ring_node = dhd_constlist_to_flowring(item); | |
7672 | ||
7673 | /* Ensure that flow_ring_node in the list is Not Null */ | |
7674 | ASSERT(flow_ring_node != NULL); | |
7675 | ||
7676 | /* Ensure that the flowring node has valid contents */ | |
7677 | ASSERT(flow_ring_node->prot_info != NULL); | |
7678 | ||
7679 | dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info); | |
7680 | } | |
7681 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
7682 | } | |
7683 | ||
7684 | /** Mailbox ringbell Function */ | |
7685 | static void | |
7686 | dhd_bus_gen_devmb_intr(struct dhd_bus *bus) | |
7687 | { | |
7688 | if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || | |
7689 | (bus->sih->buscorerev == 4)) { | |
7690 | DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__)); | |
7691 | return; | |
7692 | } | |
7693 | if (bus->db1_for_mb) { | |
7694 | /* this is a pcie core register, not the config register */ | |
7695 | DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__)); | |
7696 | if (DAR_PWRREQ(bus)) { | |
7697 | dhd_bus_pcie_pwr_req(bus); | |
7698 | } | |
7699 | si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), | |
7700 | ~0, 0x12345678); | |
7701 | } else { | |
7702 | DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__)); | |
7703 | dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); | |
7704 | dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); | |
7705 | } | |
7706 | } | |
7707 | ||
7708 | /* Upon receiving a mailbox interrupt, | |
7709 | * if H2D_FW_TRAP bit is set in mailbox location | |
7710 | * device traps | |
7711 | */ | |
7712 | static void | |
7713 | dhdpcie_fw_trap(dhd_bus_t *bus) | |
7714 | { | |
7715 | /* Send the mailbox data and generate mailbox intr. */ | |
7716 | dhdpcie_send_mb_data(bus, H2D_FW_TRAP); | |
7717 | /* For FWs that cannot interprete H2D_FW_TRAP */ | |
7718 | (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0); | |
7719 | } | |
7720 | ||
7721 | /** mailbox doorbell ring function */ | |
7722 | void | |
7723 | dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) | |
7724 | { | |
7725 | /* Skip after sending D3_INFORM */ | |
7726 | if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { | |
7727 | DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", | |
7728 | __FUNCTION__, bus->bus_low_power_state)); | |
7729 | return; | |
7730 | } | |
7731 | ||
7732 | /* Skip in the case of link down */ | |
7733 | if (bus->is_linkdown) { | |
7734 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
7735 | return; | |
7736 | } | |
7737 | ||
7738 | if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || | |
7739 | (bus->sih->buscorerev == 4)) { | |
7740 | si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, | |
7741 | PCIE_INTB, PCIE_INTB); | |
7742 | } else { | |
7743 | /* this is a pcie core register, not the config regsiter */ | |
7744 | DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__)); | |
7745 | if (IDMA_ACTIVE(bus->dhd)) { | |
7746 | if (DAR_PWRREQ(bus)) { | |
7747 | dhd_bus_pcie_pwr_req(bus); | |
7748 | } | |
7749 | si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), | |
7750 | ~0, value); | |
7751 | } else { | |
7752 | if (DAR_PWRREQ(bus)) { | |
7753 | dhd_bus_pcie_pwr_req(bus); | |
7754 | } | |
7755 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
7756 | dhd_bus_db0_addr_get(bus), ~0, 0x12345678); | |
7757 | } | |
7758 | } | |
7759 | } | |
7760 | ||
7761 | /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */ | |
7762 | void | |
7763 | dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake) | |
7764 | { | |
7765 | /* this is a pcie core register, not the config regsiter */ | |
7766 | /* Skip after sending D3_INFORM */ | |
7767 | if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { | |
7768 | DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", | |
7769 | __FUNCTION__, bus->bus_low_power_state)); | |
7770 | return; | |
7771 | } | |
7772 | ||
7773 | /* Skip in the case of link down */ | |
7774 | if (bus->is_linkdown) { | |
7775 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
7776 | return; | |
7777 | } | |
7778 | ||
7779 | DHD_INFO(("writing a door bell 2 to the device\n")); | |
7780 | if (DAR_PWRREQ(bus)) { | |
7781 | dhd_bus_pcie_pwr_req(bus); | |
7782 | } | |
7783 | si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), | |
7784 | ~0, value); | |
7785 | } | |
7786 | ||
7787 | void | |
7788 | dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) | |
7789 | { | |
7790 | /* Skip after sending D3_INFORM */ | |
7791 | if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { | |
7792 | DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", | |
7793 | __FUNCTION__, bus->bus_low_power_state)); | |
7794 | return; | |
7795 | } | |
7796 | ||
7797 | /* Skip in the case of link down */ | |
7798 | if (bus->is_linkdown) { | |
7799 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
7800 | return; | |
7801 | } | |
7802 | ||
7803 | if (DAR_PWRREQ(bus)) { | |
7804 | dhd_bus_pcie_pwr_req(bus); | |
7805 | } | |
7806 | ||
7807 | #ifdef DHD_DB0TS | |
7808 | if (bus->dhd->db0ts_capable) { | |
7809 | uint64 ts; | |
7810 | ||
7811 | ts = local_clock(); | |
7812 | do_div(ts, 1000); | |
7813 | ||
7814 | value = htol32(ts & 0xFFFFFFFF); | |
7815 | DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value)); | |
7816 | } | |
7817 | #endif /* DHD_DB0TS */ | |
7818 | W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); | |
7819 | } | |
7820 | ||
7821 | void | |
7822 | dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake) | |
7823 | { | |
7824 | /* Skip after sending D3_INFORM */ | |
7825 | if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { | |
7826 | DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", | |
7827 | __FUNCTION__, bus->bus_low_power_state)); | |
7828 | return; | |
7829 | } | |
7830 | ||
7831 | /* Skip in the case of link down */ | |
7832 | if (bus->is_linkdown) { | |
7833 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
7834 | return; | |
7835 | } | |
7836 | ||
7837 | if (DAR_PWRREQ(bus)) { | |
7838 | dhd_bus_pcie_pwr_req(bus); | |
7839 | } | |
7840 | W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value); | |
7841 | } | |
7842 | ||
7843 | static void | |
7844 | dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) | |
7845 | { | |
7846 | uint32 w; | |
7847 | /* Skip after sending D3_INFORM */ | |
7848 | if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { | |
7849 | DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n", | |
7850 | __FUNCTION__, bus->bus_low_power_state)); | |
7851 | return; | |
7852 | } | |
7853 | ||
7854 | /* Skip in the case of link down */ | |
7855 | if (bus->is_linkdown) { | |
7856 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
7857 | return; | |
7858 | } | |
7859 | ||
7860 | w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; | |
7861 | W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); | |
7862 | } | |
7863 | ||
7864 | dhd_mb_ring_t | |
7865 | dhd_bus_get_mbintr_fn(struct dhd_bus *bus) | |
7866 | { | |
7867 | if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || | |
7868 | (bus->sih->buscorerev == 4)) { | |
7869 | bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, | |
7870 | bus->pcie_mailbox_int); | |
7871 | if (bus->pcie_mb_intr_addr) { | |
7872 | bus->pcie_mb_intr_osh = si_osh(bus->sih); | |
7873 | return dhd_bus_ringbell_oldpcie; | |
7874 | } | |
7875 | } else { | |
7876 | bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, | |
7877 | dhd_bus_db0_addr_get(bus)); | |
7878 | if (bus->pcie_mb_intr_addr) { | |
7879 | bus->pcie_mb_intr_osh = si_osh(bus->sih); | |
7880 | return dhdpcie_bus_ringbell_fast; | |
7881 | } | |
7882 | } | |
7883 | return dhd_bus_ringbell; | |
7884 | } | |
7885 | ||
7886 | dhd_mb_ring_2_t | |
7887 | dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus) | |
7888 | { | |
7889 | bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, | |
7890 | dhd_bus_db0_addr_2_get(bus)); | |
7891 | if (bus->pcie_mb_intr_2_addr) { | |
7892 | bus->pcie_mb_intr_osh = si_osh(bus->sih); | |
7893 | return dhdpcie_bus_ringbell_2_fast; | |
7894 | } | |
7895 | return dhd_bus_ringbell_2; | |
7896 | } | |
7897 | ||
7898 | bool BCMFASTPATH | |
7899 | dhd_bus_dpc(struct dhd_bus *bus) | |
7900 | { | |
7901 | bool resched = FALSE; /* Flag indicating resched wanted */ | |
7902 | unsigned long flags; | |
7903 | ||
7904 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
7905 | ||
7906 | bus->dpc_entry_time = OSL_LOCALTIME_NS(); | |
7907 | ||
7908 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
7909 | /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS | |
7910 | * to avoid IOCTL Resumed On timeout when ioctl is waiting for response | |
7911 | * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS | |
7912 | * and if we return from here, then IOCTL response will never be handled | |
7913 | */ | |
7914 | if (bus->dhd->busstate == DHD_BUS_DOWN) { | |
7915 | DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); | |
7916 | bus->intstatus = 0; | |
7917 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
7918 | bus->dpc_return_busdown_count++; | |
7919 | return 0; | |
7920 | } | |
7921 | DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); | |
7922 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
7923 | ||
7924 | #ifdef DHD_READ_INTSTATUS_IN_DPC | |
7925 | if (bus->ipend) { | |
7926 | bus->ipend = FALSE; | |
7927 | bus->intstatus = dhdpcie_bus_intstatus(bus); | |
7928 | /* Check if the interrupt is ours or not */ | |
7929 | if (bus->intstatus == 0) { | |
7930 | goto INTR_ON; | |
7931 | } | |
7932 | bus->intrcount++; | |
7933 | } | |
7934 | #endif /* DHD_READ_INTSTATUS_IN_DPC */ | |
7935 | ||
7936 | resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); | |
7937 | if (!resched) { | |
7938 | bus->intstatus = 0; | |
7939 | #ifdef DHD_READ_INTSTATUS_IN_DPC | |
7940 | INTR_ON: | |
7941 | #endif /* DHD_READ_INTSTATUS_IN_DPC */ | |
7942 | bus->dpc_intr_enable_count++; | |
7943 | /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts | |
7944 | * which has been disabled in the dhdpcie_bus_isr() | |
7945 | */ | |
7946 | dhdpcie_enable_irq(bus); /* Enable back interrupt!! */ | |
7947 | bus->dpc_exit_time = OSL_LOCALTIME_NS(); | |
7948 | } else { | |
7949 | bus->resched_dpc_time = OSL_LOCALTIME_NS(); | |
7950 | } | |
7951 | ||
7952 | bus->dpc_sched = resched; | |
7953 | ||
7954 | DHD_GENERAL_LOCK(bus->dhd, flags); | |
7955 | DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); | |
7956 | dhd_os_busbusy_wake(bus->dhd); | |
7957 | DHD_GENERAL_UNLOCK(bus->dhd, flags); | |
7958 | ||
7959 | return resched; | |
7960 | ||
7961 | } | |
7962 | ||
7963 | int | |
7964 | dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) | |
7965 | { | |
7966 | uint32 cur_h2d_mb_data = 0; | |
7967 | ||
7968 | DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); | |
7969 | ||
7970 | if (bus->is_linkdown) { | |
7971 | DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); | |
7972 | return BCME_ERROR; | |
7973 | } | |
7974 | ||
7975 | if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) { | |
7976 | DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n", | |
7977 | h2d_mb_data)); | |
7978 | /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */ | |
7979 | { | |
7980 | if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) { | |
7981 | DHD_ERROR(("failure sending the H2D Mailbox message " | |
7982 | "to firmware\n")); | |
7983 | goto fail; | |
7984 | } | |
7985 | } | |
7986 | goto done; | |
7987 | } | |
7988 | ||
7989 | dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); | |
7990 | ||
7991 | if (cur_h2d_mb_data != 0) { | |
7992 | uint32 i = 0; | |
7993 | DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data)); | |
7994 | while ((i++ < 100) && cur_h2d_mb_data) { | |
7995 | OSL_DELAY(10); | |
7996 | dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); | |
7997 | } | |
7998 | if (i >= 100) { | |
7999 | DHD_ERROR(("%s : waited 1ms for the dngl " | |
8000 | "to ack the previous mb transaction\n", __FUNCTION__)); | |
8001 | DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n", | |
8002 | __FUNCTION__, cur_h2d_mb_data)); | |
8003 | } | |
8004 | } | |
8005 | ||
8006 | dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); | |
8007 | dhd_bus_gen_devmb_intr(bus); | |
8008 | ||
8009 | done: | |
8010 | if (h2d_mb_data == H2D_HOST_D3_INFORM) { | |
8011 | DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); | |
8012 | bus->last_d3_inform_time = OSL_LOCALTIME_NS(); | |
8013 | bus->d3_inform_cnt++; | |
8014 | } | |
8015 | if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { | |
8016 | DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__)); | |
8017 | bus->d0_inform_in_use_cnt++; | |
8018 | } | |
8019 | if (h2d_mb_data == H2D_HOST_D0_INFORM) { | |
8020 | DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); | |
8021 | bus->d0_inform_cnt++; | |
8022 | } | |
8023 | return BCME_OK; | |
8024 | fail: | |
8025 | return BCME_ERROR; | |
8026 | } | |
8027 | ||
8028 | static void | |
8029 | dhd_bus_handle_d3_ack(dhd_bus_t *bus) | |
8030 | { | |
8031 | unsigned long flags_bus; | |
8032 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
8033 | bus->suspend_intr_disable_count++; | |
8034 | /* Disable dongle Interrupts Immediately after D3 */ | |
8035 | ||
8036 | /* For Linux, Macos etc (otherthan NDIS) along with disabling | |
8037 | * dongle interrupt by clearing the IntMask, disable directly | |
8038 | * interrupt from the host side as well. Also clear the intstatus | |
8039 | * if it is set to avoid unnecessary intrrupts after D3 ACK. | |
8040 | */ | |
8041 | dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */ | |
8042 | dhdpcie_bus_clear_intstatus(bus); | |
8043 | dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */ | |
8044 | ||
8045 | if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) { | |
8046 | /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */ | |
8047 | bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED; | |
8048 | DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__)); | |
8049 | } | |
8050 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
8051 | /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout. | |
8052 | * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout. | |
8053 | */ | |
8054 | if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) { | |
8055 | bus->wait_for_d3_ack = 1; | |
8056 | dhd_os_d3ack_wake(bus->dhd); | |
8057 | } else { | |
8058 | DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__)); | |
8059 | } | |
8060 | } | |
8061 | void | |
8062 | dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) | |
8063 | { | |
8064 | if (MULTIBP_ENAB(bus->sih)) { | |
8065 | dhd_bus_pcie_pwr_req(bus); | |
8066 | } | |
8067 | ||
8068 | DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data)); | |
8069 | ||
8070 | if (d2h_mb_data & D2H_DEV_FWHALT) { | |
8071 | DHD_ERROR(("FW trap has happened\n")); | |
8072 | dhdpcie_checkdied(bus, NULL, 0); | |
8073 | dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO); | |
8074 | goto exit; | |
8075 | } | |
8076 | if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { | |
8077 | bool ds_acked = FALSE; | |
8078 | BCM_REFERENCE(ds_acked); | |
8079 | if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { | |
8080 | DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n")); | |
8081 | DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__)); | |
8082 | bus->dhd->busstate = DHD_BUS_DOWN; | |
8083 | goto exit; | |
8084 | } | |
8085 | /* what should we do */ | |
8086 | DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n")); | |
8087 | { | |
8088 | dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); | |
8089 | DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n")); | |
8090 | } | |
8091 | } | |
8092 | if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { | |
8093 | /* what should we do */ | |
8094 | DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n")); | |
8095 | } | |
8096 | if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) { | |
8097 | /* what should we do */ | |
8098 | DHD_INFO(("D2H_MB_DATA: D0 ACK\n")); | |
8099 | } | |
8100 | if (d2h_mb_data & D2H_DEV_D3_ACK) { | |
8101 | /* what should we do */ | |
8102 | DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n")); | |
8103 | if (!bus->wait_for_d3_ack) { | |
8104 | dhd_bus_handle_d3_ack(bus); | |
8105 | } | |
8106 | } | |
8107 | ||
8108 | exit: | |
8109 | if (MULTIBP_ENAB(bus->sih)) { | |
8110 | dhd_bus_pcie_pwr_req_clear(bus); | |
8111 | } | |
8112 | } | |
8113 | ||
8114 | static void | |
8115 | dhdpcie_handle_mb_data(dhd_bus_t *bus) | |
8116 | { | |
8117 | uint32 d2h_mb_data = 0; | |
8118 | uint32 zero = 0; | |
8119 | ||
8120 | if (MULTIBP_ENAB(bus->sih)) { | |
8121 | dhd_bus_pcie_pwr_req(bus); | |
8122 | } | |
8123 | ||
8124 | dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); | |
8125 | if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) { | |
8126 | DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n", | |
8127 | __FUNCTION__, d2h_mb_data)); | |
8128 | goto exit; | |
8129 | } | |
8130 | ||
8131 | dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); | |
8132 | ||
8133 | DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data)); | |
8134 | if (d2h_mb_data & D2H_DEV_FWHALT) { | |
8135 | DHD_ERROR(("FW trap has happened\n")); | |
8136 | dhdpcie_checkdied(bus, NULL, 0); | |
8137 | /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ | |
8138 | goto exit; | |
8139 | } | |
8140 | if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { | |
8141 | /* what should we do */ | |
8142 | DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__)); | |
8143 | dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); | |
8144 | DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__)); | |
8145 | } | |
8146 | if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { | |
8147 | /* what should we do */ | |
8148 | DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__)); | |
8149 | } | |
8150 | if (d2h_mb_data & D2H_DEV_D3_ACK) { | |
8151 | /* what should we do */ | |
8152 | DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__)); | |
8153 | if (!bus->wait_for_d3_ack) { | |
8154 | dhd_bus_handle_d3_ack(bus); | |
8155 | } | |
8156 | } | |
8157 | ||
8158 | exit: | |
8159 | if (MULTIBP_ENAB(bus->sih)) { | |
8160 | dhd_bus_pcie_pwr_req_clear(bus); | |
8161 | } | |
8162 | } | |
8163 | ||
8164 | static void | |
8165 | dhdpcie_read_handle_mb_data(dhd_bus_t *bus) | |
8166 | { | |
8167 | uint32 d2h_mb_data = 0; | |
8168 | uint32 zero = 0; | |
8169 | ||
8170 | if (bus->is_linkdown) { | |
8171 | DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); | |
8172 | return; | |
8173 | } | |
8174 | ||
8175 | if (MULTIBP_ENAB(bus->sih)) { | |
8176 | dhd_bus_pcie_pwr_req(bus); | |
8177 | } | |
8178 | ||
8179 | dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); | |
8180 | if (!d2h_mb_data) { | |
8181 | goto exit; | |
8182 | } | |
8183 | ||
8184 | dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); | |
8185 | ||
8186 | dhd_bus_handle_mb_data(bus, d2h_mb_data); | |
8187 | ||
8188 | exit: | |
8189 | if (MULTIBP_ENAB(bus->sih)) { | |
8190 | dhd_bus_pcie_pwr_req_clear(bus); | |
8191 | } | |
8192 | } | |
8193 | ||
8194 | static bool | |
8195 | dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) | |
8196 | { | |
8197 | bool resched = FALSE; | |
8198 | unsigned long flags_bus; | |
8199 | ||
8200 | if (MULTIBP_ENAB(bus->sih)) { | |
8201 | dhd_bus_pcie_pwr_req(bus); | |
8202 | } | |
8203 | if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || | |
8204 | (bus->sih->buscorerev == 4)) { | |
8205 | /* Msg stream interrupt */ | |
8206 | if (intstatus & I_BIT1) { | |
8207 | resched = dhdpci_bus_read_frames(bus); | |
8208 | } else if (intstatus & I_BIT0) { | |
8209 | /* do nothing for Now */ | |
8210 | } | |
8211 | } else { | |
8212 | if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) | |
8213 | bus->api.handle_mb_data(bus); | |
8214 | ||
8215 | /* Do no process any rings after recieving D3_ACK */ | |
8216 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
8217 | if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) { | |
8218 | DHD_ERROR(("%s: D3 Ack Recieved. " | |
8219 | "Skip processing rest of ring buffers.\n", __FUNCTION__)); | |
8220 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
8221 | goto exit; | |
8222 | } | |
8223 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
8224 | ||
8225 | /* Validate intstatus only for INTX case */ | |
8226 | if ((bus->d2h_intr_method == PCIE_MSI) || | |
8227 | ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) { | |
8228 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM | |
8229 | if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) { | |
8230 | resched = dhdpci_bus_read_frames(bus); | |
8231 | pm_runtime_mark_last_busy(dhd_bus_to_dev(bus)); | |
8232 | pm_runtime_put_autosuspend(dhd_bus_to_dev(bus)); | |
8233 | } | |
8234 | #else | |
8235 | resched = dhdpci_bus_read_frames(bus); | |
8236 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ | |
8237 | } | |
8238 | } | |
8239 | ||
8240 | exit: | |
8241 | if (MULTIBP_ENAB(bus->sih)) { | |
8242 | dhd_bus_pcie_pwr_req_clear(bus); | |
8243 | } | |
8244 | return resched; | |
8245 | } | |
8246 | ||
8247 | #if defined(DHD_H2D_LOG_TIME_SYNC) | |
8248 | static void | |
8249 | dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus) | |
8250 | { | |
8251 | unsigned long time_elapsed; | |
8252 | ||
8253 | /* Poll for timeout value periodically */ | |
8254 | if ((bus->dhd->busstate == DHD_BUS_DATA) && | |
8255 | (bus->dhd->dhd_rte_time_sync_ms != 0) && | |
8256 | (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) { | |
8257 | time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count; | |
8258 | /* Compare time is milli seconds */ | |
8259 | if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) { | |
8260 | /* | |
8261 | * Its fine, if it has crossed the timeout value. No need to adjust the | |
8262 | * elapsed time | |
8263 | */ | |
8264 | bus->dhd_rte_time_sync_count += time_elapsed; | |
8265 | ||
8266 | /* Schedule deffered work. Work function will send IOVAR. */ | |
8267 | dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd); | |
8268 | } | |
8269 | } | |
8270 | } | |
8271 | #endif /* DHD_H2D_LOG_TIME_SYNC */ | |
8272 | ||
8273 | static bool | |
8274 | dhdpci_bus_read_frames(dhd_bus_t *bus) | |
8275 | { | |
8276 | bool more = FALSE; | |
8277 | unsigned long flags_bus; | |
8278 | ||
8279 | /* First check if there a FW trap */ | |
8280 | if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) && | |
8281 | (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) { | |
8282 | #ifdef DNGL_AXI_ERROR_LOGGING | |
8283 | if (bus->dhd->axi_error) { | |
8284 | DHD_ERROR(("AXI Error happened\n")); | |
8285 | return FALSE; | |
8286 | } | |
8287 | #endif /* DNGL_AXI_ERROR_LOGGING */ | |
8288 | dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT); | |
8289 | return FALSE; | |
8290 | } | |
8291 | ||
8292 | /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ | |
8293 | DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); | |
8294 | ||
8295 | dhd_prot_process_ctrlbuf(bus->dhd); | |
8296 | bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS(); | |
8297 | /* Unlock to give chance for resp to be handled */ | |
8298 | DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); | |
8299 | ||
8300 | /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */ | |
8301 | DHD_BUS_LOCK(bus->bus_lock, flags_bus); | |
8302 | if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) { | |
8303 | DHD_ERROR(("%s: Bus is in power save state (%d). " | |
8304 | "Skip processing rest of ring buffers.\n", | |
8305 | __FUNCTION__, bus->bus_low_power_state)); | |
8306 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
8307 | return FALSE; | |
8308 | } | |
8309 | DHD_BUS_UNLOCK(bus->bus_lock, flags_bus); | |
8310 | ||
8311 | DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); | |
8312 | /* update the flow ring cpls */ | |
8313 | dhd_update_txflowrings(bus->dhd); | |
8314 | bus->last_process_flowring_time = OSL_LOCALTIME_NS(); | |
8315 | ||
8316 | /* With heavy TX traffic, we could get a lot of TxStatus | |
8317 | * so add bound | |
8318 | */ | |
8319 | #ifdef DHD_HP2P | |
8320 | more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING); | |
8321 | #endif /* DHD_HP2P */ | |
8322 | more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING); | |
8323 | bus->last_process_txcpl_time = OSL_LOCALTIME_NS(); | |
8324 | ||
8325 | /* With heavy RX traffic, this routine potentially could spend some time | |
8326 | * processing RX frames without RX bound | |
8327 | */ | |
8328 | #ifdef DHD_HP2P | |
8329 | more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING); | |
8330 | #endif /* DHD_HP2P */ | |
8331 | more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING); | |
8332 | bus->last_process_rxcpl_time = OSL_LOCALTIME_NS(); | |
8333 | ||
8334 | /* Process info ring completion messages */ | |
8335 | #ifdef EWP_EDL | |
8336 | if (!bus->dhd->dongle_edl_support) | |
8337 | #endif // endif | |
8338 | { | |
8339 | more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND); | |
8340 | bus->last_process_infocpl_time = OSL_LOCALTIME_NS(); | |
8341 | } | |
8342 | #ifdef EWP_EDL | |
8343 | else { | |
8344 | more |= dhd_prot_process_msgbuf_edl(bus->dhd); | |
8345 | bus->last_process_edl_time = OSL_LOCALTIME_NS(); | |
8346 | } | |
8347 | #endif /* EWP_EDL */ | |
8348 | ||
8349 | #ifdef IDLE_TX_FLOW_MGMT | |
8350 | if (bus->enable_idle_flowring_mgmt) { | |
8351 | /* Look for idle flow rings */ | |
8352 | dhd_bus_check_idle_scan(bus); | |
8353 | } | |
8354 | #endif /* IDLE_TX_FLOW_MGMT */ | |
8355 | ||
8356 | /* don't talk to the dongle if fw is about to be reloaded */ | |
8357 | if (bus->dhd->hang_was_sent) { | |
8358 | more = FALSE; | |
8359 | } | |
8360 | DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); | |
8361 | ||
8362 | #if defined(DHD_H2D_LOG_TIME_SYNC) | |
8363 | dhdpci_bus_rte_log_time_sync_poll(bus); | |
8364 | #endif /* DHD_H2D_LOG_TIME_SYNC */ | |
8365 | return more; | |
8366 | } | |
8367 | ||
8368 | bool | |
8369 | dhdpcie_tcm_valid(dhd_bus_t *bus) | |
8370 | { | |
8371 | uint32 addr = 0; | |
8372 | int rv; | |
8373 | uint32 shaddr = 0; | |
8374 | pciedev_shared_t sh; | |
8375 | ||
8376 | shaddr = bus->dongle_ram_base + bus->ramsize - 4; | |
8377 | ||
8378 | /* Read last word in memory to determine address of pciedev_shared structure */ | |
8379 | addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); | |
8380 | ||
8381 | if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || | |
8382 | (addr > shaddr)) { | |
8383 | DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n", | |
8384 | __FUNCTION__, addr)); | |
8385 | return FALSE; | |
8386 | } | |
8387 | ||
8388 | /* Read hndrte_shared structure */ | |
8389 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh, | |
8390 | sizeof(pciedev_shared_t))) < 0) { | |
8391 | DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); | |
8392 | return FALSE; | |
8393 | } | |
8394 | ||
8395 | /* Compare any field in pciedev_shared_t */ | |
8396 | if (sh.console_addr != bus->pcie_sh->console_addr) { | |
8397 | DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n")); | |
8398 | return FALSE; | |
8399 | } | |
8400 | ||
8401 | return TRUE; | |
8402 | } | |
8403 | ||
8404 | static void | |
8405 | dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version) | |
8406 | { | |
8407 | snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)", | |
8408 | firmware_api_version, host_api_version); | |
8409 | return; | |
8410 | } | |
8411 | ||
8412 | static bool | |
8413 | dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) | |
8414 | { | |
8415 | bool retcode = FALSE; | |
8416 | ||
8417 | DHD_INFO(("firmware api revision %d, host api revision %d\n", | |
8418 | firmware_api_version, host_api_version)); | |
8419 | ||
8420 | switch (firmware_api_version) { | |
8421 | case PCIE_SHARED_VERSION_7: | |
8422 | case PCIE_SHARED_VERSION_6: | |
8423 | case PCIE_SHARED_VERSION_5: | |
8424 | retcode = TRUE; | |
8425 | break; | |
8426 | default: | |
8427 | if (firmware_api_version <= host_api_version) | |
8428 | retcode = TRUE; | |
8429 | } | |
8430 | return retcode; | |
8431 | } | |
8432 | ||
8433 | static int | |
8434 | dhdpcie_readshared(dhd_bus_t *bus) | |
8435 | { | |
8436 | uint32 addr = 0; | |
8437 | int rv, dma_indx_wr_buf, dma_indx_rd_buf; | |
8438 | uint32 shaddr = 0; | |
8439 | pciedev_shared_t *sh = bus->pcie_sh; | |
8440 | dhd_timeout_t tmo; | |
8441 | bool idma_en = FALSE; | |
8442 | ||
8443 | if (MULTIBP_ENAB(bus->sih)) { | |
8444 | dhd_bus_pcie_pwr_req(bus); | |
8445 | } | |
8446 | ||
8447 | shaddr = bus->dongle_ram_base + bus->ramsize - 4; | |
8448 | /* start a timer for 5 seconds */ | |
8449 | dhd_timeout_start(&tmo, MAX_READ_TIMEOUT); | |
8450 | ||
8451 | while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) { | |
8452 | /* Read last word in memory to determine address of pciedev_shared structure */ | |
8453 | addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); | |
8454 | } | |
8455 | ||
8456 | if (addr == (uint32)-1) { | |
8457 | DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__)); | |
8458 | bus->is_linkdown = 1; | |
8459 | return BCME_ERROR; | |
8460 | } | |
8461 | ||
8462 | if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || | |
8463 | (addr > shaddr)) { | |
8464 | DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n", | |
8465 | __FUNCTION__, addr)); | |
8466 | DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed)); | |
8467 | #ifdef DEBUG_DNGL_INIT_FAIL | |
8468 | if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */ | |
8469 | if (bus->dhd->memdump_enabled) { | |
8470 | bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE; | |
8471 | dhdpcie_mem_dump(bus); | |
8472 | } | |
8473 | } | |
8474 | #endif /* DEBUG_DNGL_INIT_FAIL */ | |
8475 | return BCME_ERROR; | |
8476 | } else { | |
8477 | bus->shared_addr = (ulong)addr; | |
8478 | DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec " | |
8479 | "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed)); | |
8480 | } | |
8481 | ||
8482 | /* Read hndrte_shared structure */ | |
8483 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh, | |
8484 | sizeof(pciedev_shared_t))) < 0) { | |
8485 | DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv)); | |
8486 | return rv; | |
8487 | } | |
8488 | ||
8489 | /* Endianness */ | |
8490 | sh->flags = ltoh32(sh->flags); | |
8491 | sh->trap_addr = ltoh32(sh->trap_addr); | |
8492 | sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); | |
8493 | sh->assert_file_addr = ltoh32(sh->assert_file_addr); | |
8494 | sh->assert_line = ltoh32(sh->assert_line); | |
8495 | sh->console_addr = ltoh32(sh->console_addr); | |
8496 | sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); | |
8497 | sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); | |
8498 | sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); | |
8499 | sh->flags2 = ltoh32(sh->flags2); | |
8500 | ||
8501 | /* load bus console address */ | |
8502 | bus->console_addr = sh->console_addr; | |
8503 | ||
8504 | /* Read the dma rx offset */ | |
8505 | bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset; | |
8506 | dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset); | |
8507 | ||
8508 | DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset)); | |
8509 | ||
8510 | bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK; | |
8511 | if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION))) | |
8512 | { | |
8513 | DHD_ERROR(("%s: pcie_shared version %d in dhd " | |
8514 | "is older than pciedev_shared version %d in dongle\n", | |
8515 | __FUNCTION__, PCIE_SHARED_VERSION, | |
8516 | bus->api.fw_rev)); | |
8517 | return BCME_ERROR; | |
8518 | } | |
8519 | dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION); | |
8520 | ||
8521 | bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? | |
8522 | sizeof(uint16) : sizeof(uint32); | |
8523 | DHD_INFO(("%s: Dongle advertizes %d size indices\n", | |
8524 | __FUNCTION__, bus->rw_index_sz)); | |
8525 | ||
8526 | #ifdef IDLE_TX_FLOW_MGMT | |
8527 | if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) { | |
8528 | DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n", | |
8529 | __FUNCTION__)); | |
8530 | bus->enable_idle_flowring_mgmt = TRUE; | |
8531 | } | |
8532 | #endif /* IDLE_TX_FLOW_MGMT */ | |
8533 | ||
8534 | if (IDMA_CAPABLE(bus)) { | |
8535 | if (bus->sih->buscorerev == 23) { | |
8536 | } else { | |
8537 | idma_en = TRUE; | |
8538 | } | |
8539 | } | |
8540 | ||
8541 | /* TODO: This need to be selected based on IPC instead of compile time */ | |
8542 | bus->dhd->hwa_enable = TRUE; | |
8543 | ||
8544 | if (idma_en) { | |
8545 | bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE; | |
8546 | bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE; | |
8547 | } | |
8548 | ||
8549 | bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; | |
8550 | ||
8551 | bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE; | |
8552 | ||
8553 | /* Does the FW support DMA'ing r/w indices */ | |
8554 | if (sh->flags & PCIE_SHARED_DMA_INDEX) { | |
8555 | if (!bus->dhd->dma_ring_upd_overwrite) { | |
8556 | { | |
8557 | if (!IFRM_ENAB(bus->dhd)) { | |
8558 | bus->dhd->dma_h2d_ring_upd_support = TRUE; | |
8559 | } | |
8560 | bus->dhd->dma_d2h_ring_upd_support = TRUE; | |
8561 | } | |
8562 | } | |
8563 | ||
8564 | if (bus->dhd->dma_d2h_ring_upd_support) | |
8565 | bus->dhd->d2h_sync_mode = 0; | |
8566 | ||
8567 | DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", | |
8568 | __FUNCTION__, | |
8569 | (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0), | |
8570 | (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0))); | |
8571 | } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) { | |
8572 | DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n", | |
8573 | __FUNCTION__)); | |
8574 | return BCME_UNSUPPORTED; | |
8575 | } else { | |
8576 | bus->dhd->dma_h2d_ring_upd_support = FALSE; | |
8577 | bus->dhd->dma_d2h_ring_upd_support = FALSE; | |
8578 | } | |
8579 | ||
8580 | /* Does the firmware support fast delete ring? */ | |
8581 | if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) { | |
8582 | DHD_INFO(("%s: Firmware supports fast delete ring\n", | |
8583 | __FUNCTION__)); | |
8584 | bus->dhd->fast_delete_ring_support = TRUE; | |
8585 | } else { | |
8586 | DHD_INFO(("%s: Firmware does not support fast delete ring\n", | |
8587 | __FUNCTION__)); | |
8588 | bus->dhd->fast_delete_ring_support = FALSE; | |
8589 | } | |
8590 | ||
8591 | /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */ | |
8592 | { | |
8593 | ring_info_t ring_info; | |
8594 | ||
8595 | /* boundary check */ | |
8596 | if (sh->rings_info_ptr > shaddr) { | |
8597 | DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n", | |
8598 | __FUNCTION__, sh->rings_info_ptr)); | |
8599 | return BCME_ERROR; | |
8600 | } | |
8601 | ||
8602 | if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr, | |
8603 | (uint8 *)&ring_info, sizeof(ring_info_t))) < 0) | |
8604 | return rv; | |
8605 | ||
8606 | bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr); | |
8607 | bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr); | |
8608 | ||
8609 | if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { | |
8610 | bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); | |
8611 | bus->max_submission_rings = ltoh16(ring_info.max_submission_queues); | |
8612 | bus->max_completion_rings = ltoh16(ring_info.max_completion_rings); | |
8613 | bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings; | |
8614 | bus->api.handle_mb_data = dhdpcie_read_handle_mb_data; | |
8615 | bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX; | |
8616 | } | |
8617 | else { | |
8618 | bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); | |
8619 | bus->max_submission_rings = bus->max_tx_flowrings; | |
8620 | bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS; | |
8621 | bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS; | |
8622 | bus->api.handle_mb_data = dhdpcie_handle_mb_data; | |
8623 | bus->use_mailbox = TRUE; | |
8624 | } | |
8625 | if (bus->max_completion_rings == 0) { | |
8626 | DHD_ERROR(("dongle completion rings are invalid %d\n", | |
8627 | bus->max_completion_rings)); | |
8628 | return BCME_ERROR; | |
8629 | } | |
8630 | if (bus->max_submission_rings == 0) { | |
8631 | DHD_ERROR(("dongle submission rings are invalid %d\n", | |
8632 | bus->max_submission_rings)); | |
8633 | return BCME_ERROR; | |
8634 | } | |
8635 | if (bus->max_tx_flowrings == 0) { | |
8636 | DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings)); | |
8637 | return BCME_ERROR; | |
8638 | } | |
8639 | ||
8640 | /* If both FW and Host support DMA'ing indices, allocate memory and notify FW | |
8641 | * The max_sub_queues is read from FW initialized ring_info | |
8642 | */ | |
8643 | if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) { | |
8644 | dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, | |
8645 | H2D_DMA_INDX_WR_BUF, bus->max_submission_rings); | |
8646 | dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, | |
8647 | D2H_DMA_INDX_RD_BUF, bus->max_completion_rings); | |
8648 | ||
8649 | if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { | |
8650 | DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" | |
8651 | "Host will use w/r indices in TCM\n", | |
8652 | __FUNCTION__)); | |
8653 | bus->dhd->dma_h2d_ring_upd_support = FALSE; | |
8654 | bus->dhd->idma_enable = FALSE; | |
8655 | } | |
8656 | } | |
8657 | ||
8658 | if (bus->dhd->dma_d2h_ring_upd_support) { | |
8659 | dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, | |
8660 | D2H_DMA_INDX_WR_BUF, bus->max_completion_rings); | |
8661 | dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, | |
8662 | H2D_DMA_INDX_RD_BUF, bus->max_submission_rings); | |
8663 | ||
8664 | if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { | |
8665 | DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" | |
8666 | "Host will use w/r indices in TCM\n", | |
8667 | __FUNCTION__)); | |
8668 | bus->dhd->dma_d2h_ring_upd_support = FALSE; | |
8669 | } | |
8670 | } | |
8671 | ||
8672 | if (IFRM_ENAB(bus->dhd)) { | |
8673 | dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, | |
8674 | H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings); | |
8675 | ||
8676 | if (dma_indx_wr_buf != BCME_OK) { | |
8677 | DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n", | |
8678 | __FUNCTION__)); | |
8679 | bus->dhd->ifrm_enable = FALSE; | |
8680 | } | |
8681 | } | |
8682 | ||
8683 | /* read ringmem and ringstate ptrs from shared area and store in host variables */ | |
8684 | dhd_fillup_ring_sharedptr_info(bus, &ring_info); | |
8685 | if (dhd_msg_level & DHD_INFO_VAL) { | |
8686 | bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); | |
8687 | } | |
8688 | DHD_INFO(("%s: ring_info\n", __FUNCTION__)); | |
8689 | ||
8690 | DHD_ERROR(("%s: max H2D queues %d\n", | |
8691 | __FUNCTION__, ltoh16(ring_info.max_tx_flowrings))); | |
8692 | ||
8693 | DHD_INFO(("mail box address\n")); | |
8694 | DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", | |
8695 | __FUNCTION__, bus->h2d_mb_data_ptr_addr)); | |
8696 | DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", | |
8697 | __FUNCTION__, bus->d2h_mb_data_ptr_addr)); | |
8698 | } | |
8699 | ||
8700 | DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", | |
8701 | __FUNCTION__, bus->dhd->d2h_sync_mode)); | |
8702 | ||
8703 | bus->dhd->d2h_hostrdy_supported = | |
8704 | ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT); | |
8705 | ||
8706 | bus->dhd->ext_trap_data_supported = | |
8707 | ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA); | |
8708 | ||
8709 | if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0) | |
8710 | bus->dhd->pcie_txs_metadata_enable = 0; | |
8711 | ||
8712 | bus->dhd->hscb_enable = | |
8713 | (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB; | |
8714 | ||
8715 | #ifdef EWP_EDL | |
8716 | if (host_edl_support) { | |
8717 | bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE; | |
8718 | DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support)); | |
8719 | } | |
8720 | #endif /* EWP_EDL */ | |
8721 | ||
8722 | bus->dhd->debug_buf_dest_support = | |
8723 | (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE; | |
8724 | DHD_ERROR(("FW supports debug buf dest ? %s \n", | |
8725 | bus->dhd->debug_buf_dest_support ? "Y" : "N")); | |
8726 | ||
8727 | #ifdef DHD_HP2P | |
8728 | if (bus->dhd->hp2p_enable) { | |
8729 | bus->dhd->hp2p_ts_capable = | |
8730 | (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP; | |
8731 | bus->dhd->hp2p_capable = | |
8732 | (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P; | |
8733 | bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable; | |
8734 | ||
8735 | DHD_ERROR(("FW supports HP2P ? %s \n", | |
8736 | bus->dhd->hp2p_capable ? "Y" : "N")); | |
8737 | ||
8738 | if (bus->dhd->hp2p_capable) { | |
8739 | bus->dhd->pkt_thresh = HP2P_PKT_THRESH; | |
8740 | bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY; | |
8741 | bus->dhd->time_thresh = HP2P_TIME_THRESH; | |
8742 | for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) { | |
8743 | hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr]; | |
8744 | ||
8745 | hp2p_info->hrtimer_init = FALSE; | |
8746 | tasklet_hrtimer_init(&hp2p_info->timer, | |
8747 | dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
8748 | } | |
8749 | } | |
8750 | } | |
8751 | #endif /* DHD_HP2P */ | |
8752 | ||
8753 | #ifdef DHD_DB0TS | |
8754 | bus->dhd->db0ts_capable = | |
8755 | (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0; | |
8756 | #endif /* DHD_DB0TS */ | |
8757 | ||
8758 | if (MULTIBP_ENAB(bus->sih)) { | |
8759 | dhd_bus_pcie_pwr_req_clear(bus); | |
8760 | ||
8761 | /* | |
8762 | * WAR to fix ARM cold boot; | |
8763 | * De-assert WL domain in DAR | |
8764 | */ | |
8765 | if (bus->sih->buscorerev >= 68) { | |
8766 | dhd_bus_pcie_pwr_req_wl_domain(bus, | |
8767 | DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE); | |
8768 | } | |
8769 | } | |
8770 | return BCME_OK; | |
8771 | } /* dhdpcie_readshared */ | |
8772 | ||
8773 | /** Read ring mem and ring state ptr info from shared memory area in device memory */ | |
8774 | static void | |
8775 | dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) | |
8776 | { | |
8777 | uint16 i = 0; | |
8778 | uint16 j = 0; | |
8779 | uint32 tcm_memloc; | |
8780 | uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr; | |
8781 | uint16 max_tx_flowrings = bus->max_tx_flowrings; | |
8782 | ||
8783 | /* Ring mem ptr info */ | |
8784 | /* Alloated in the order | |
8785 | H2D_MSGRING_CONTROL_SUBMIT 0 | |
8786 | H2D_MSGRING_RXPOST_SUBMIT 1 | |
8787 | D2H_MSGRING_CONTROL_COMPLETE 2 | |
8788 | D2H_MSGRING_TX_COMPLETE 3 | |
8789 | D2H_MSGRING_RX_COMPLETE 4 | |
8790 | */ | |
8791 | ||
8792 | { | |
8793 | /* ringmemptr holds start of the mem block address space */ | |
8794 | tcm_memloc = ltoh32(ring_info->ringmem_ptr); | |
8795 | ||
8796 | /* Find out ringmem ptr for each ring common ring */ | |
8797 | for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) { | |
8798 | bus->ring_sh[i].ring_mem_addr = tcm_memloc; | |
8799 | /* Update mem block */ | |
8800 | tcm_memloc = tcm_memloc + sizeof(ring_mem_t); | |
8801 | DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__, | |
8802 | i, bus->ring_sh[i].ring_mem_addr)); | |
8803 | } | |
8804 | } | |
8805 | ||
8806 | /* Ring state mem ptr info */ | |
8807 | { | |
8808 | d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr); | |
8809 | d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr); | |
8810 | h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr); | |
8811 | h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr); | |
8812 | ||
8813 | /* Store h2d common ring write/read pointers */ | |
8814 | for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) { | |
8815 | bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; | |
8816 | bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; | |
8817 | ||
8818 | /* update mem block */ | |
8819 | h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; | |
8820 | h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; | |
8821 | ||
8822 | DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i, | |
8823 | bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); | |
8824 | } | |
8825 | ||
8826 | /* Store d2h common ring write/read pointers */ | |
8827 | for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) { | |
8828 | bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; | |
8829 | bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; | |
8830 | ||
8831 | /* update mem block */ | |
8832 | d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; | |
8833 | d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; | |
8834 | ||
8835 | DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i, | |
8836 | bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); | |
8837 | } | |
8838 | ||
8839 | /* Store txflow ring write/read pointers */ | |
8840 | if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { | |
8841 | max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS; | |
8842 | } else { | |
8843 | /* Account for Debug info h2d ring located after the last tx flow ring */ | |
8844 | max_tx_flowrings = max_tx_flowrings + 1; | |
8845 | } | |
8846 | for (j = 0; j < max_tx_flowrings; i++, j++) | |
8847 | { | |
8848 | bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; | |
8849 | bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; | |
8850 | ||
8851 | /* update mem block */ | |
8852 | h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; | |
8853 | h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; | |
8854 | ||
8855 | DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n", | |
8856 | __FUNCTION__, i, | |
8857 | bus->ring_sh[i].ring_state_w, | |
8858 | bus->ring_sh[i].ring_state_r)); | |
8859 | } | |
8860 | /* store wr/rd pointers for debug info completion ring */ | |
8861 | bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; | |
8862 | bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; | |
8863 | d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; | |
8864 | d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; | |
8865 | DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, | |
8866 | bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); | |
8867 | } | |
8868 | } /* dhd_fillup_ring_sharedptr_info */ | |
8869 | ||
8870 | /** | |
8871 | * Initialize bus module: prepare for communication with the dongle. Called after downloading | |
8872 | * firmware into the dongle. | |
8873 | */ | |
8874 | int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) | |
8875 | { | |
8876 | dhd_bus_t *bus = dhdp->bus; | |
8877 | int ret = 0; | |
8878 | ||
8879 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
8880 | ||
8881 | ASSERT(bus->dhd); | |
8882 | if (!bus->dhd) | |
8883 | return 0; | |
8884 | ||
8885 | if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) { | |
8886 | dhd_bus_pcie_pwr_req_clear_reload_war(bus); | |
8887 | } | |
8888 | ||
8889 | if (MULTIBP_ENAB(bus->sih)) { | |
8890 | dhd_bus_pcie_pwr_req(bus); | |
8891 | } | |
8892 | ||
8893 | /* Configure AER registers to log the TLP header */ | |
8894 | dhd_bus_aer_config(bus); | |
8895 | ||
8896 | /* Make sure we're talking to the core. */ | |
8897 | bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); | |
8898 | ASSERT(bus->reg != NULL); | |
8899 | ||
8900 | /* before opening up bus for data transfer, check if shared are is intact */ | |
8901 | ret = dhdpcie_readshared(bus); | |
8902 | if (ret < 0) { | |
8903 | DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); | |
8904 | goto exit; | |
8905 | } | |
8906 | ||
8907 | /* Make sure we're talking to the core. */ | |
8908 | bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); | |
8909 | ASSERT(bus->reg != NULL); | |
8910 | ||
8911 | dhd_init_bus_lock(bus); | |
8912 | ||
8913 | dhd_init_backplane_access_lock(bus); | |
8914 | ||
8915 | /* Set bus state according to enable result */ | |
8916 | dhdp->busstate = DHD_BUS_DATA; | |
8917 | bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE; | |
8918 | dhdp->dhd_bus_busy_state = 0; | |
8919 | ||
8920 | /* D11 status via PCIe completion header */ | |
8921 | if ((ret = dhdpcie_init_d11status(bus)) < 0) { | |
8922 | goto exit; | |
8923 | } | |
8924 | ||
8925 | if (!dhd_download_fw_on_driverload) | |
8926 | dhd_dpc_enable(bus->dhd); | |
8927 | /* Enable the interrupt after device is up */ | |
8928 | dhdpcie_bus_intr_enable(bus); | |
8929 | ||
8930 | bus->intr_enabled = TRUE; | |
8931 | ||
8932 | /* bcmsdh_intr_unmask(bus->sdh); */ | |
8933 | bus->idletime = 0; | |
8934 | ||
8935 | /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */ | |
8936 | if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { | |
8937 | bus->use_d0_inform = TRUE; | |
8938 | } else { | |
8939 | bus->use_d0_inform = FALSE; | |
8940 | } | |
8941 | ||
8942 | exit: | |
8943 | if (MULTIBP_ENAB(bus->sih)) { | |
8944 | dhd_bus_pcie_pwr_req_clear(bus); | |
8945 | } | |
8946 | return ret; | |
8947 | } | |
8948 | ||
8949 | static void | |
8950 | dhdpcie_init_shared_addr(dhd_bus_t *bus) | |
8951 | { | |
8952 | uint32 addr = 0; | |
8953 | uint32 val = 0; | |
8954 | ||
8955 | addr = bus->dongle_ram_base + bus->ramsize - 4; | |
8956 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); | |
8957 | } | |
8958 | ||
8959 | bool | |
8960 | dhdpcie_chipmatch(uint16 vendor, uint16 device) | |
8961 | { | |
8962 | if (vendor != PCI_VENDOR_ID_BROADCOM) { | |
8963 | DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, | |
8964 | vendor, device)); | |
8965 | return (-ENODEV); | |
8966 | } | |
8967 | ||
8968 | if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) || | |
8969 | (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) || | |
8970 | (device == BCM43569_CHIP_ID)) { | |
8971 | return 0; | |
8972 | } | |
8973 | ||
8974 | if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) || | |
8975 | (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) { | |
8976 | return 0; | |
8977 | } | |
8978 | ||
8979 | if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) || | |
8980 | (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) { | |
8981 | return 0; | |
8982 | } | |
8983 | ||
8984 | if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) || | |
8985 | (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) { | |
8986 | return 0; | |
8987 | } | |
8988 | ||
8989 | if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) || | |
8990 | (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) { | |
8991 | return 0; | |
8992 | } | |
8993 | ||
8994 | if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) || | |
8995 | (device == BCM43452_D11AC5G_ID)) { | |
8996 | return 0; | |
8997 | } | |
8998 | ||
8999 | if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) || | |
9000 | (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) { | |
9001 | return 0; | |
9002 | } | |
9003 | ||
9004 | if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) || | |
9005 | (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) { | |
9006 | return 0; | |
9007 | } | |
9008 | ||
9009 | if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) || | |
9010 | (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) { | |
9011 | return 0; | |
9012 | } | |
9013 | ||
9014 | if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) || | |
9015 | (device == BCM4358_D11AC5G_ID)) { | |
9016 | return 0; | |
9017 | } | |
9018 | ||
9019 | if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) || | |
9020 | (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) { | |
9021 | return 0; | |
9022 | } | |
9023 | ||
9024 | if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) || | |
9025 | (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) { | |
9026 | return 0; | |
9027 | } | |
9028 | ||
9029 | if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) || | |
9030 | (device == BCM4359_D11AC5G_ID)) { | |
9031 | return 0; | |
9032 | } | |
9033 | ||
9034 | if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) || | |
9035 | (device == BCM43596_D11AC5G_ID)) { | |
9036 | return 0; | |
9037 | } | |
9038 | ||
9039 | if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) || | |
9040 | (device == BCM43597_D11AC5G_ID)) { | |
9041 | return 0; | |
9042 | } | |
9043 | ||
9044 | if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) || | |
9045 | (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) { | |
9046 | return 0; | |
9047 | } | |
9048 | ||
9049 | if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) || | |
9050 | (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) { | |
9051 | return 0; | |
9052 | } | |
9053 | if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) || | |
9054 | (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) { | |
9055 | return 0; | |
9056 | } | |
9057 | if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) || | |
9058 | (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) { | |
9059 | return 0; | |
9060 | } | |
9061 | if ((device == BCM43752_D11AX_ID) || (device == BCM43752_D11AX2G_ID) || | |
9062 | (device == BCM43752_D11AX5G_ID) || (device == BCM43752_CHIP_ID)) { | |
9063 | return 0; | |
9064 | } | |
9065 | if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) || | |
9066 | (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) { | |
9067 | return 0; | |
9068 | } | |
9069 | ||
9070 | if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) || | |
9071 | (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) { | |
9072 | return 0; | |
9073 | } | |
9074 | ||
9075 | if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) || | |
9076 | (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) || | |
9077 | (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) { | |
9078 | return 0; | |
9079 | } | |
9080 | ||
9081 | if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) || | |
9082 | (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) { | |
9083 | return 0; | |
9084 | } | |
9085 | ||
9086 | if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) || | |
9087 | (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) { | |
9088 | return 0; | |
9089 | } | |
9090 | ||
9091 | DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); | |
9092 | return (-ENODEV); | |
9093 | } /* dhdpcie_chipmatch */ | |
9094 | ||
9095 | /** | |
9096 | * Name: dhdpcie_cc_nvmshadow | |
9097 | * | |
9098 | * Description: | |
9099 | * A shadow of OTP/SPROM exists in ChipCommon Region | |
9100 | * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). | |
9101 | * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size | |
9102 | * can also be read from ChipCommon Registers. | |
9103 | */ | |
9104 | static int | |
9105 | dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) | |
9106 | { | |
9107 | uint16 dump_offset = 0; | |
9108 | uint32 dump_size = 0, otp_size = 0, sprom_size = 0; | |
9109 | ||
9110 | /* Table for 65nm OTP Size (in bits) */ | |
9111 | int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024}; | |
9112 | ||
9113 | volatile uint16 *nvm_shadow; | |
9114 | ||
9115 | uint cur_coreid; | |
9116 | uint chipc_corerev; | |
9117 | chipcregs_t *chipcregs; | |
9118 | ||
9119 | /* Save the current core */ | |
9120 | cur_coreid = si_coreid(bus->sih); | |
9121 | /* Switch to ChipC */ | |
9122 | chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0); | |
9123 | ASSERT(chipcregs != NULL); | |
9124 | ||
9125 | chipc_corerev = si_corerev(bus->sih); | |
9126 | ||
9127 | /* Check ChipcommonCore Rev */ | |
9128 | if (chipc_corerev < 44) { | |
9129 | DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev)); | |
9130 | return BCME_UNSUPPORTED; | |
9131 | } | |
9132 | ||
9133 | /* Check ChipID */ | |
9134 | if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) && | |
9135 | ((uint16)bus->sih->chip != BCM4355_CHIP_ID) && | |
9136 | ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) { | |
9137 | DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips" | |
9138 | "4350/4345/4355/4364 only\n", __FUNCTION__)); | |
9139 | return BCME_UNSUPPORTED; | |
9140 | } | |
9141 | ||
9142 | /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */ | |
9143 | if (chipcregs->sromcontrol & SRC_PRESENT) { | |
9144 | /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */ | |
9145 | sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK) | |
9146 | >> SRC_SIZE_SHIFT))) * 1024; | |
9147 | bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size); | |
9148 | } | |
9149 | ||
9150 | if (chipcregs->sromcontrol & SRC_OTPPRESENT) { | |
9151 | bcm_bprintf(b, "\nOTP Present"); | |
9152 | ||
9153 | if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT) | |
9154 | == OTPL_WRAP_TYPE_40NM) { | |
9155 | /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */ | |
9156 | /* Chipcommon rev51 is a variation on rev45 and does not support | |
9157 | * the latest OTP configuration. | |
9158 | */ | |
9159 | if (chipc_corerev != 51 && chipc_corerev >= 49) { | |
9160 | otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) | |
9161 | >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024; | |
9162 | bcm_bprintf(b, "(Size %d bits)\n", otp_size); | |
9163 | } else { | |
9164 | otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) | |
9165 | >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024; | |
9166 | bcm_bprintf(b, "(Size %d bits)\n", otp_size); | |
9167 | } | |
9168 | } else { | |
9169 | /* This part is untested since newer chips have 40nm OTP */ | |
9170 | /* Chipcommon rev51 is a variation on rev45 and does not support | |
9171 | * the latest OTP configuration. | |
9172 | */ | |
9173 | if (chipc_corerev != 51 && chipc_corerev >= 49) { | |
9174 | otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK) | |
9175 | >> OTPL_ROW_SIZE_SHIFT]; | |
9176 | bcm_bprintf(b, "(Size %d bits)\n", otp_size); | |
9177 | } else { | |
9178 | otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) | |
9179 | >> CC_CAP_OTPSIZE_SHIFT]; | |
9180 | bcm_bprintf(b, "(Size %d bits)\n", otp_size); | |
9181 | DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", | |
9182 | __FUNCTION__)); | |
9183 | } | |
9184 | } | |
9185 | } | |
9186 | ||
9187 | /* Chipcommon rev51 is a variation on rev45 and does not support | |
9188 | * the latest OTP configuration. | |
9189 | */ | |
9190 | if (chipc_corerev != 51 && chipc_corerev >= 49) { | |
9191 | if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && | |
9192 | ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) { | |
9193 | DHD_ERROR(("%s: SPROM and OTP could not be found " | |
9194 | "sromcontrol = %x, otplayout = %x \n", | |
9195 | __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout)); | |
9196 | return BCME_NOTFOUND; | |
9197 | } | |
9198 | } else { | |
9199 | if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && | |
9200 | ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { | |
9201 | DHD_ERROR(("%s: SPROM and OTP could not be found " | |
9202 | "sromcontrol = %x, capablities = %x \n", | |
9203 | __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities)); | |
9204 | return BCME_NOTFOUND; | |
9205 | } | |
9206 | } | |
9207 | ||
9208 | /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */ | |
9209 | if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) && | |
9210 | (chipcregs->sromcontrol & SRC_OTPPRESENT)) { | |
9211 | ||
9212 | bcm_bprintf(b, "OTP Strap selected.\n" | |
9213 | "\nOTP Shadow in ChipCommon:\n"); | |
9214 | ||
9215 | dump_size = otp_size / 16 ; /* 16bit words */ | |
9216 | ||
9217 | } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) && | |
9218 | (chipcregs->sromcontrol & SRC_PRESENT)) { | |
9219 | ||
9220 | bcm_bprintf(b, "SPROM Strap selected\n" | |
9221 | "\nSPROM Shadow in ChipCommon:\n"); | |
9222 | ||
9223 | /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */ | |
9224 | /* dump_size in 16bit words */ | |
9225 | dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16; | |
9226 | } else { | |
9227 | DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n", | |
9228 | __FUNCTION__)); | |
9229 | return BCME_NOTFOUND; | |
9230 | } | |
9231 | ||
9232 | if (bus->regs == NULL) { | |
9233 | DHD_ERROR(("ChipCommon Regs. not initialized\n")); | |
9234 | return BCME_NOTREADY; | |
9235 | } else { | |
9236 | bcm_bprintf(b, "\n OffSet:"); | |
9237 | ||
9238 | /* Chipcommon rev51 is a variation on rev45 and does not support | |
9239 | * the latest OTP configuration. | |
9240 | */ | |
9241 | if (chipc_corerev != 51 && chipc_corerev >= 49) { | |
9242 | /* Chip common can read only 8kbits, | |
9243 | * for ccrev >= 49 otp size is around 12 kbits so use GCI core | |
9244 | */ | |
9245 | nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0); | |
9246 | } else { | |
9247 | /* Point to the SPROM/OTP shadow in ChipCommon */ | |
9248 | nvm_shadow = chipcregs->sromotp; | |
9249 | } | |
9250 | ||
9251 | if (nvm_shadow == NULL) { | |
9252 | DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__)); | |
9253 | return BCME_NOTFOUND; | |
9254 | } | |
9255 | ||
9256 | /* | |
9257 | * Read 16 bits / iteration. | |
9258 | * dump_size & dump_offset in 16-bit words | |
9259 | */ | |
9260 | while (dump_offset < dump_size) { | |
9261 | if (dump_offset % 2 == 0) | |
9262 | /* Print the offset in the shadow space in Bytes */ | |
9263 | bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); | |
9264 | ||
9265 | bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); | |
9266 | dump_offset += 0x1; | |
9267 | } | |
9268 | } | |
9269 | ||
9270 | /* Switch back to the original core */ | |
9271 | si_setcore(bus->sih, cur_coreid, 0); | |
9272 | ||
9273 | return BCME_OK; | |
9274 | } /* dhdpcie_cc_nvmshadow */ | |
9275 | ||
9276 | /** Flow rings are dynamically created and destroyed */ | |
9277 | void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) | |
9278 | { | |
9279 | void *pkt; | |
9280 | flow_queue_t *queue; | |
9281 | flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node; | |
9282 | unsigned long flags; | |
9283 | ||
9284 | queue = &flow_ring_node->queue; | |
9285 | ||
9286 | #ifdef DHDTCPACK_SUPPRESS | |
9287 | /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, | |
9288 | * when there is a newly coming packet from network stack. | |
9289 | */ | |
9290 | dhd_tcpack_info_tbl_clean(bus->dhd); | |
9291 | #endif /* DHDTCPACK_SUPPRESS */ | |
9292 | ||
9293 | #ifdef DHD_HP2P | |
9294 | if (flow_ring_node->hp2p_ring) { | |
9295 | bus->dhd->hp2p_ring_active = FALSE; | |
9296 | flow_ring_node->hp2p_ring = FALSE; | |
9297 | } | |
9298 | #endif /* DHD_HP2P */ | |
9299 | ||
9300 | /* clean up BUS level info */ | |
9301 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
9302 | ||
9303 | /* Flush all pending packets in the queue, if any */ | |
9304 | while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { | |
9305 | PKTFREE(bus->dhd->osh, pkt, TRUE); | |
9306 | } | |
9307 | ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); | |
9308 | ||
9309 | /* Reinitialise flowring's queue */ | |
9310 | dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD); | |
9311 | flow_ring_node->status = FLOW_RING_STATUS_CLOSED; | |
9312 | flow_ring_node->active = FALSE; | |
9313 | ||
9314 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
9315 | ||
9316 | /* Hold flowring_list_lock to ensure no race condition while accessing the List */ | |
9317 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
9318 | dll_delete(&flow_ring_node->list); | |
9319 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
9320 | ||
9321 | /* Release the flowring object back into the pool */ | |
9322 | dhd_prot_flowrings_pool_release(bus->dhd, | |
9323 | flow_ring_node->flowid, flow_ring_node->prot_info); | |
9324 | ||
9325 | /* Free the flowid back to the flowid allocator */ | |
9326 | dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, | |
9327 | flow_ring_node->flowid); | |
9328 | } | |
9329 | ||
9330 | /** | |
9331 | * Allocate a Flow ring buffer, | |
9332 | * Init Ring buffer, send Msg to device about flow ring creation | |
9333 | */ | |
9334 | int | |
9335 | dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) | |
9336 | { | |
9337 | flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; | |
9338 | ||
9339 | DHD_INFO(("%s :Flow create\n", __FUNCTION__)); | |
9340 | ||
9341 | /* Send Msg to device about flow ring creation */ | |
9342 | if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK) | |
9343 | return BCME_NOMEM; | |
9344 | ||
9345 | return BCME_OK; | |
9346 | } | |
9347 | ||
9348 | /** Handle response from dongle on a 'flow ring create' request */ | |
9349 | void | |
9350 | dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) | |
9351 | { | |
9352 | flow_ring_node_t *flow_ring_node; | |
9353 | unsigned long flags; | |
9354 | ||
9355 | DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid)); | |
9356 | ||
9357 | /* Boundary check of the flowid */ | |
9358 | if (flowid >= bus->dhd->num_flow_rings) { | |
9359 | DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, | |
9360 | flowid, bus->dhd->num_flow_rings)); | |
9361 | return; | |
9362 | } | |
9363 | ||
9364 | flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); | |
9365 | if (!flow_ring_node) { | |
9366 | DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); | |
9367 | return; | |
9368 | } | |
9369 | ||
9370 | ASSERT(flow_ring_node->flowid == flowid); | |
9371 | if (flow_ring_node->flowid != flowid) { | |
9372 | DHD_ERROR(("%s: flowid %d is different from the flowid " | |
9373 | "of the flow_ring_node %d\n", __FUNCTION__, flowid, | |
9374 | flow_ring_node->flowid)); | |
9375 | return; | |
9376 | } | |
9377 | ||
9378 | if (status != BCME_OK) { | |
9379 | DHD_ERROR(("%s Flow create Response failure error status = %d \n", | |
9380 | __FUNCTION__, status)); | |
9381 | /* Call Flow clean up */ | |
9382 | dhd_bus_clean_flow_ring(bus, flow_ring_node); | |
9383 | return; | |
9384 | } | |
9385 | ||
9386 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
9387 | flow_ring_node->status = FLOW_RING_STATUS_OPEN; | |
9388 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
9389 | ||
9390 | /* Now add the Flow ring node into the active list | |
9391 | * Note that this code to add the newly created node to the active | |
9392 | * list was living in dhd_flowid_lookup. But note that after | |
9393 | * adding the node to the active list the contents of node is being | |
9394 | * filled in dhd_prot_flow_ring_create. | |
9395 | * If there is a D2H interrupt after the node gets added to the | |
9396 | * active list and before the node gets populated with values | |
9397 | * from the Bottom half dhd_update_txflowrings would be called. | |
9398 | * which will then try to walk through the active flow ring list, | |
9399 | * pickup the nodes and operate on them. Now note that since | |
9400 | * the function dhd_prot_flow_ring_create is not finished yet | |
9401 | * the contents of flow_ring_node can still be NULL leading to | |
9402 | * crashes. Hence the flow_ring_node should be added to the | |
9403 | * active list only after its truely created, which is after | |
9404 | * receiving the create response message from the Host. | |
9405 | */ | |
9406 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
9407 | dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); | |
9408 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
9409 | ||
9410 | dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ | |
9411 | ||
9412 | return; | |
9413 | } | |
9414 | ||
9415 | int | |
9416 | dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) | |
9417 | { | |
9418 | void * pkt; | |
9419 | flow_queue_t *queue; | |
9420 | flow_ring_node_t *flow_ring_node; | |
9421 | unsigned long flags; | |
9422 | ||
9423 | DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); | |
9424 | ||
9425 | flow_ring_node = (flow_ring_node_t *)arg; | |
9426 | ||
9427 | #ifdef DHDTCPACK_SUPPRESS | |
9428 | /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, | |
9429 | * when there is a newly coming packet from network stack. | |
9430 | */ | |
9431 | dhd_tcpack_info_tbl_clean(bus->dhd); | |
9432 | #endif /* DHDTCPACK_SUPPRESS */ | |
9433 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
9434 | if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { | |
9435 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
9436 | DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid)); | |
9437 | return BCME_ERROR; | |
9438 | } | |
9439 | flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; | |
9440 | ||
9441 | queue = &flow_ring_node->queue; /* queue associated with flow ring */ | |
9442 | ||
9443 | /* Flush all pending packets in the queue, if any */ | |
9444 | while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { | |
9445 | PKTFREE(bus->dhd->osh, pkt, TRUE); | |
9446 | } | |
9447 | ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); | |
9448 | ||
9449 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
9450 | ||
9451 | /* Send Msg to device about flow ring deletion */ | |
9452 | dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node); | |
9453 | ||
9454 | return BCME_OK; | |
9455 | } | |
9456 | ||
9457 | void | |
9458 | dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) | |
9459 | { | |
9460 | flow_ring_node_t *flow_ring_node; | |
9461 | ||
9462 | DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid)); | |
9463 | ||
9464 | /* Boundary check of the flowid */ | |
9465 | if (flowid >= bus->dhd->num_flow_rings) { | |
9466 | DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, | |
9467 | flowid, bus->dhd->num_flow_rings)); | |
9468 | return; | |
9469 | } | |
9470 | ||
9471 | flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); | |
9472 | if (!flow_ring_node) { | |
9473 | DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); | |
9474 | return; | |
9475 | } | |
9476 | ||
9477 | ASSERT(flow_ring_node->flowid == flowid); | |
9478 | if (flow_ring_node->flowid != flowid) { | |
9479 | DHD_ERROR(("%s: flowid %d is different from the flowid " | |
9480 | "of the flow_ring_node %d\n", __FUNCTION__, flowid, | |
9481 | flow_ring_node->flowid)); | |
9482 | return; | |
9483 | } | |
9484 | ||
9485 | if (status != BCME_OK) { | |
9486 | DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", | |
9487 | __FUNCTION__, status)); | |
9488 | return; | |
9489 | } | |
9490 | /* Call Flow clean up */ | |
9491 | dhd_bus_clean_flow_ring(bus, flow_ring_node); | |
9492 | ||
9493 | return; | |
9494 | ||
9495 | } | |
9496 | ||
9497 | int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) | |
9498 | { | |
9499 | void *pkt; | |
9500 | flow_queue_t *queue; | |
9501 | flow_ring_node_t *flow_ring_node; | |
9502 | unsigned long flags; | |
9503 | ||
9504 | DHD_INFO(("%s :Flow Flush\n", __FUNCTION__)); | |
9505 | ||
9506 | flow_ring_node = (flow_ring_node_t *)arg; | |
9507 | ||
9508 | DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); | |
9509 | queue = &flow_ring_node->queue; /* queue associated with flow ring */ | |
9510 | /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN | |
9511 | * once flow ring flush response is received for this flowring node. | |
9512 | */ | |
9513 | flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; | |
9514 | ||
9515 | #ifdef DHDTCPACK_SUPPRESS | |
9516 | /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, | |
9517 | * when there is a newly coming packet from network stack. | |
9518 | */ | |
9519 | dhd_tcpack_info_tbl_clean(bus->dhd); | |
9520 | #endif /* DHDTCPACK_SUPPRESS */ | |
9521 | ||
9522 | /* Flush all pending packets in the queue, if any */ | |
9523 | while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { | |
9524 | PKTFREE(bus->dhd->osh, pkt, TRUE); | |
9525 | } | |
9526 | ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); | |
9527 | ||
9528 | DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); | |
9529 | ||
9530 | /* Send Msg to device about flow ring flush */ | |
9531 | dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node); | |
9532 | ||
9533 | return BCME_OK; | |
9534 | } | |
9535 | ||
9536 | void | |
9537 | dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) | |
9538 | { | |
9539 | flow_ring_node_t *flow_ring_node; | |
9540 | ||
9541 | if (status != BCME_OK) { | |
9542 | DHD_ERROR(("%s Flow flush Response failure error status = %d \n", | |
9543 | __FUNCTION__, status)); | |
9544 | return; | |
9545 | } | |
9546 | ||
9547 | /* Boundary check of the flowid */ | |
9548 | if (flowid >= bus->dhd->num_flow_rings) { | |
9549 | DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, | |
9550 | flowid, bus->dhd->num_flow_rings)); | |
9551 | return; | |
9552 | } | |
9553 | ||
9554 | flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); | |
9555 | if (!flow_ring_node) { | |
9556 | DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__)); | |
9557 | return; | |
9558 | } | |
9559 | ||
9560 | ASSERT(flow_ring_node->flowid == flowid); | |
9561 | if (flow_ring_node->flowid != flowid) { | |
9562 | DHD_ERROR(("%s: flowid %d is different from the flowid " | |
9563 | "of the flow_ring_node %d\n", __FUNCTION__, flowid, | |
9564 | flow_ring_node->flowid)); | |
9565 | return; | |
9566 | } | |
9567 | ||
9568 | flow_ring_node->status = FLOW_RING_STATUS_OPEN; | |
9569 | return; | |
9570 | } | |
9571 | ||
9572 | uint32 | |
9573 | dhd_bus_max_h2d_queues(struct dhd_bus *bus) | |
9574 | { | |
9575 | return bus->max_submission_rings; | |
9576 | } | |
9577 | ||
9578 | /* To be symmetric with SDIO */ | |
9579 | void | |
9580 | dhd_bus_pktq_flush(dhd_pub_t *dhdp) | |
9581 | { | |
9582 | return; | |
9583 | } | |
9584 | ||
9585 | void | |
9586 | dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) | |
9587 | { | |
9588 | dhdp->bus->is_linkdown = val; | |
9589 | } | |
9590 | ||
9591 | int | |
9592 | dhd_bus_get_linkdown(dhd_pub_t *dhdp) | |
9593 | { | |
9594 | return dhdp->bus->is_linkdown; | |
9595 | } | |
9596 | ||
9597 | int | |
9598 | dhd_bus_get_cto(dhd_pub_t *dhdp) | |
9599 | { | |
9600 | return dhdp->bus->cto_triggered; | |
9601 | } | |
9602 | ||
9603 | #ifdef IDLE_TX_FLOW_MGMT | |
9604 | /* resume request */ | |
9605 | int | |
9606 | dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg) | |
9607 | { | |
9608 | flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; | |
9609 | ||
9610 | DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid)); | |
9611 | ||
9612 | flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING; | |
9613 | ||
9614 | /* Send Msg to device about flow ring resume */ | |
9615 | dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node); | |
9616 | ||
9617 | return BCME_OK; | |
9618 | } | |
9619 | ||
9620 | /* add the node back to active flowring */ | |
9621 | void | |
9622 | dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status) | |
9623 | { | |
9624 | ||
9625 | flow_ring_node_t *flow_ring_node; | |
9626 | ||
9627 | DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid)); | |
9628 | ||
9629 | flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); | |
9630 | ASSERT(flow_ring_node->flowid == flowid); | |
9631 | ||
9632 | if (status != BCME_OK) { | |
9633 | DHD_ERROR(("%s Error Status = %d \n", | |
9634 | __FUNCTION__, status)); | |
9635 | return; | |
9636 | } | |
9637 | ||
9638 | DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n", | |
9639 | __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len)); | |
9640 | ||
9641 | flow_ring_node->status = FLOW_RING_STATUS_OPEN; | |
9642 | ||
9643 | dhd_bus_schedule_queue(bus, flowid, FALSE); | |
9644 | return; | |
9645 | } | |
9646 | ||
9647 | /* scan the flow rings in active list for idle time out */ | |
9648 | void | |
9649 | dhd_bus_check_idle_scan(dhd_bus_t *bus) | |
9650 | { | |
9651 | uint64 time_stamp; /* in millisec */ | |
9652 | uint64 diff; | |
9653 | ||
9654 | time_stamp = OSL_SYSUPTIME(); | |
9655 | diff = time_stamp - bus->active_list_last_process_ts; | |
9656 | ||
9657 | if (diff > IDLE_FLOW_LIST_TIMEOUT) { | |
9658 | dhd_bus_idle_scan(bus); | |
9659 | bus->active_list_last_process_ts = OSL_SYSUPTIME(); | |
9660 | } | |
9661 | ||
9662 | return; | |
9663 | } | |
9664 | ||
9665 | /* scan the nodes in active list till it finds a non idle node */ | |
9666 | void | |
9667 | dhd_bus_idle_scan(dhd_bus_t *bus) | |
9668 | { | |
9669 | dll_t *item, *prev; | |
9670 | flow_ring_node_t *flow_ring_node; | |
9671 | uint64 time_stamp, diff; | |
9672 | unsigned long flags; | |
9673 | uint16 ringid[MAX_SUSPEND_REQ]; | |
9674 | uint16 count = 0; | |
9675 | ||
9676 | time_stamp = OSL_SYSUPTIME(); | |
9677 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
9678 | ||
9679 | for (item = dll_tail_p(&bus->flowring_active_list); | |
9680 | !dll_end(&bus->flowring_active_list, item); item = prev) { | |
9681 | prev = dll_prev_p(item); | |
9682 | ||
9683 | flow_ring_node = dhd_constlist_to_flowring(item); | |
9684 | ||
9685 | if (flow_ring_node->flowid == (bus->max_submission_rings - 1)) | |
9686 | continue; | |
9687 | ||
9688 | if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { | |
9689 | /* Takes care of deleting zombie rings */ | |
9690 | /* delete from the active list */ | |
9691 | DHD_INFO(("deleting flow id %u from active list\n", | |
9692 | flow_ring_node->flowid)); | |
9693 | __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); | |
9694 | continue; | |
9695 | } | |
9696 | ||
9697 | diff = time_stamp - flow_ring_node->last_active_ts; | |
9698 | ||
9699 | if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) { | |
9700 | DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid)); | |
9701 | /* delete from the active list */ | |
9702 | __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); | |
9703 | flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED; | |
9704 | ringid[count] = flow_ring_node->flowid; | |
9705 | count++; | |
9706 | if (count == MAX_SUSPEND_REQ) { | |
9707 | /* create a batch message now!! */ | |
9708 | dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); | |
9709 | count = 0; | |
9710 | } | |
9711 | ||
9712 | } else { | |
9713 | ||
9714 | /* No more scanning, break from here! */ | |
9715 | break; | |
9716 | } | |
9717 | } | |
9718 | ||
9719 | if (count) { | |
9720 | dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); | |
9721 | } | |
9722 | ||
9723 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
9724 | ||
9725 | return; | |
9726 | } | |
9727 | ||
9728 | void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) | |
9729 | { | |
9730 | unsigned long flags; | |
9731 | dll_t* list; | |
9732 | ||
9733 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
9734 | /* check if the node is already at head, otherwise delete it and prepend */ | |
9735 | list = dll_head_p(&bus->flowring_active_list); | |
9736 | if (&flow_ring_node->list != list) { | |
9737 | dll_delete(&flow_ring_node->list); | |
9738 | dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); | |
9739 | } | |
9740 | ||
9741 | /* update flow ring timestamp */ | |
9742 | flow_ring_node->last_active_ts = OSL_SYSUPTIME(); | |
9743 | ||
9744 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
9745 | ||
9746 | return; | |
9747 | } | |
9748 | ||
9749 | void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) | |
9750 | { | |
9751 | unsigned long flags; | |
9752 | ||
9753 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
9754 | ||
9755 | dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); | |
9756 | /* update flow ring timestamp */ | |
9757 | flow_ring_node->last_active_ts = OSL_SYSUPTIME(); | |
9758 | ||
9759 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
9760 | ||
9761 | return; | |
9762 | } | |
9763 | void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) | |
9764 | { | |
9765 | dll_delete(&flow_ring_node->list); | |
9766 | } | |
9767 | ||
9768 | void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) | |
9769 | { | |
9770 | unsigned long flags; | |
9771 | ||
9772 | DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); | |
9773 | ||
9774 | __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); | |
9775 | ||
9776 | DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); | |
9777 | ||
9778 | return; | |
9779 | } | |
9780 | #endif /* IDLE_TX_FLOW_MGMT */ | |
9781 | ||
9782 | int | |
9783 | dhdpcie_bus_clock_start(struct dhd_bus *bus) | |
9784 | { | |
9785 | return dhdpcie_start_host_pcieclock(bus); | |
9786 | } | |
9787 | ||
9788 | int | |
9789 | dhdpcie_bus_clock_stop(struct dhd_bus *bus) | |
9790 | { | |
9791 | return dhdpcie_stop_host_pcieclock(bus); | |
9792 | } | |
9793 | ||
9794 | int | |
9795 | dhdpcie_bus_disable_device(struct dhd_bus *bus) | |
9796 | { | |
9797 | return dhdpcie_disable_device(bus); | |
9798 | } | |
9799 | ||
9800 | int | |
9801 | dhdpcie_bus_enable_device(struct dhd_bus *bus) | |
9802 | { | |
9803 | return dhdpcie_enable_device(bus); | |
9804 | } | |
9805 | ||
9806 | int | |
9807 | dhdpcie_bus_alloc_resource(struct dhd_bus *bus) | |
9808 | { | |
9809 | return dhdpcie_alloc_resource(bus); | |
9810 | } | |
9811 | ||
9812 | void | |
9813 | dhdpcie_bus_free_resource(struct dhd_bus *bus) | |
9814 | { | |
9815 | dhdpcie_free_resource(bus); | |
9816 | } | |
9817 | ||
9818 | int | |
9819 | dhd_bus_request_irq(struct dhd_bus *bus) | |
9820 | { | |
9821 | return dhdpcie_bus_request_irq(bus); | |
9822 | } | |
9823 | ||
9824 | bool | |
9825 | dhdpcie_bus_dongle_attach(struct dhd_bus *bus) | |
9826 | { | |
9827 | return dhdpcie_dongle_attach(bus); | |
9828 | } | |
9829 | ||
9830 | int | |
9831 | dhd_bus_release_dongle(struct dhd_bus *bus) | |
9832 | { | |
9833 | bool dongle_isolation; | |
9834 | osl_t *osh; | |
9835 | ||
9836 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
9837 | ||
9838 | if (bus) { | |
9839 | osh = bus->osh; | |
9840 | ASSERT(osh); | |
9841 | ||
9842 | if (bus->dhd) { | |
9843 | #if defined(DEBUGGER) || defined(DHD_DSCOPE) | |
9844 | debugger_close(); | |
9845 | #endif /* DEBUGGER || DHD_DSCOPE */ | |
9846 | ||
9847 | dongle_isolation = bus->dhd->dongle_isolation; | |
9848 | dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); | |
9849 | } | |
9850 | } | |
9851 | ||
9852 | return 0; | |
9853 | } | |
9854 | ||
9855 | int | |
9856 | dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable) | |
9857 | { | |
9858 | uint32 val; | |
9859 | if (enable) { | |
9860 | dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, | |
9861 | PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR); | |
9862 | val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); | |
9863 | dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN); | |
9864 | } else { | |
9865 | dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0); | |
9866 | val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); | |
9867 | dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN); | |
9868 | } | |
9869 | return 0; | |
9870 | } | |
9871 | ||
9872 | int | |
9873 | dhdpcie_cto_init(struct dhd_bus *bus, bool enable) | |
9874 | { | |
9875 | if (bus->sih->buscorerev < 19) { | |
9876 | DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n", | |
9877 | __FUNCTION__, bus->sih->buscorerev)); | |
9878 | return BCME_UNSUPPORTED; | |
9879 | } | |
9880 | ||
9881 | if (bus->sih->buscorerev == 19) { | |
9882 | uint32 pcie_lnkst; | |
9883 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
9884 | OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS); | |
9885 | ||
9886 | pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx, | |
9887 | OFFSETOF(sbpcieregs_t, configdata), 0, 0); | |
9888 | ||
9889 | if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) & | |
9890 | PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) { | |
9891 | return BCME_UNSUPPORTED; | |
9892 | } | |
9893 | } | |
9894 | ||
9895 | bus->cto_enable = enable; | |
9896 | ||
9897 | dhdpcie_cto_cfg_init(bus, enable); | |
9898 | ||
9899 | if (enable) { | |
9900 | if (bus->cto_threshold == 0) { | |
9901 | bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT; | |
9902 | } | |
9903 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
9904 | OFFSETOF(sbpcieregs_t, ctoctrl), ~0, | |
9905 | ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) & | |
9906 | PCIE_CTO_TO_THRESHHOLD_MASK) | | |
9907 | ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) & | |
9908 | PCIE_CTO_CLKCHKCNT_MASK) | | |
9909 | PCIE_CTO_ENAB_MASK); | |
9910 | } else { | |
9911 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
9912 | OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0); | |
9913 | } | |
9914 | ||
9915 | DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n", | |
9916 | __FUNCTION__, bus->cto_enable)); | |
9917 | ||
9918 | return 0; | |
9919 | } | |
9920 | ||
9921 | static int | |
9922 | dhdpcie_cto_error_recovery(struct dhd_bus *bus) | |
9923 | { | |
9924 | uint32 pci_intmask, err_status; | |
9925 | uint8 i = 0; | |
9926 | uint32 val; | |
9927 | ||
9928 | pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4); | |
9929 | dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK); | |
9930 | ||
9931 | DHD_OS_WAKE_LOCK(bus->dhd); | |
9932 | ||
9933 | DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref)); | |
9934 | ||
9935 | /* | |
9936 | * DAR still accessible | |
9937 | */ | |
9938 | dhd_bus_dump_dar_registers(bus); | |
9939 | ||
9940 | /* reset backplane */ | |
9941 | val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); | |
9942 | dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST); | |
9943 | ||
9944 | /* clear timeout error */ | |
9945 | while (1) { | |
9946 | err_status = si_corereg(bus->sih, bus->sih->buscoreidx, | |
9947 | DAR_ERRLOG(bus->sih->buscorerev), | |
9948 | 0, 0); | |
9949 | if (err_status & PCIE_CTO_ERR_MASK) { | |
9950 | si_corereg(bus->sih, bus->sih->buscoreidx, | |
9951 | DAR_ERRLOG(bus->sih->buscorerev), | |
9952 | ~0, PCIE_CTO_ERR_MASK); | |
9953 | } else { | |
9954 | break; | |
9955 | } | |
9956 | OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000); | |
9957 | i++; | |
9958 | if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) { | |
9959 | DHD_ERROR(("cto recovery fail\n")); | |
9960 | ||
9961 | DHD_OS_WAKE_UNLOCK(bus->dhd); | |
9962 | return BCME_ERROR; | |
9963 | } | |
9964 | } | |
9965 | ||
9966 | /* clear interrupt status */ | |
9967 | dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK); | |
9968 | ||
9969 | /* Halt ARM & remove reset */ | |
9970 | /* TBD : we can add ARM Halt here in case */ | |
9971 | ||
9972 | /* reset SPROM_CFG_TO_SB_RST */ | |
9973 | val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); | |
9974 | ||
9975 | DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n", | |
9976 | PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); | |
9977 | dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST); | |
9978 | ||
9979 | val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4); | |
9980 | DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n", | |
9981 | PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val)); | |
9982 | ||
9983 | DHD_OS_WAKE_UNLOCK(bus->dhd); | |
9984 | ||
9985 | return BCME_OK; | |
9986 | } | |
9987 | ||
9988 | void | |
9989 | dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus) | |
9990 | { | |
9991 | uint32 val; | |
9992 | ||
9993 | val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4); | |
9994 | dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, | |
9995 | val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT)); | |
9996 | } | |
9997 | ||
9998 | #if defined(DBG_PKT_MON) | |
9999 | static int | |
10000 | dhdpcie_init_d11status(struct dhd_bus *bus) | |
10001 | { | |
10002 | uint32 addr; | |
10003 | uint32 flags2; | |
10004 | int ret = 0; | |
10005 | ||
10006 | if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) { | |
10007 | flags2 = bus->pcie_sh->flags2; | |
10008 | addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2); | |
10009 | flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS; | |
10010 | ret = dhdpcie_bus_membytes(bus, TRUE, addr, | |
10011 | (uint8 *)&flags2, sizeof(flags2)); | |
10012 | if (ret < 0) { | |
10013 | DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n", | |
10014 | __FUNCTION__)); | |
10015 | return ret; | |
10016 | } | |
10017 | bus->pcie_sh->flags2 = flags2; | |
10018 | bus->dhd->d11_tx_status = TRUE; | |
10019 | } | |
10020 | return ret; | |
10021 | } | |
10022 | ||
10023 | #else | |
10024 | static int | |
10025 | dhdpcie_init_d11status(struct dhd_bus *bus) | |
10026 | { | |
10027 | return 0; | |
10028 | } | |
10029 | #endif // endif | |
10030 | ||
10031 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
10032 | int | |
10033 | dhd_bus_oob_intr_register(dhd_pub_t *dhdp) | |
10034 | { | |
10035 | return dhdpcie_oob_intr_register(dhdp->bus); | |
10036 | } | |
10037 | ||
10038 | void | |
10039 | dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) | |
10040 | { | |
10041 | dhdpcie_oob_intr_unregister(dhdp->bus); | |
10042 | } | |
10043 | ||
10044 | void | |
10045 | dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) | |
10046 | { | |
10047 | dhdpcie_oob_intr_set(dhdp->bus, enable); | |
10048 | } | |
10049 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
10050 | ||
10051 | bool | |
10052 | dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus) | |
10053 | { | |
10054 | return bus->dhd->d2h_hostrdy_supported; | |
10055 | } | |
10056 | ||
10057 | void | |
10058 | dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr) | |
10059 | { | |
10060 | dhd_bus_t *bus = pub->bus; | |
10061 | uint32 coreoffset = index << 12; | |
10062 | uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset; | |
10063 | uint32 value; | |
10064 | ||
10065 | while (first_addr <= last_addr) { | |
10066 | core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr; | |
10067 | if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) { | |
10068 | DHD_ERROR(("Invalid size/addr combination \n")); | |
10069 | } | |
10070 | DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value)); | |
10071 | first_addr = first_addr + 4; | |
10072 | } | |
10073 | } | |
10074 | ||
10075 | bool | |
10076 | dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus) | |
10077 | { | |
10078 | if (!bus->dhd) | |
10079 | return FALSE; | |
10080 | else if (bus->hwa_enab_bmap) { | |
10081 | return bus->dhd->hwa_enable; | |
10082 | } else { | |
10083 | return FALSE; | |
10084 | } | |
10085 | } | |
10086 | ||
10087 | bool | |
10088 | dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus) | |
10089 | { | |
10090 | if (!bus->dhd) | |
10091 | return FALSE; | |
10092 | else if (bus->idma_enabled) { | |
10093 | return bus->dhd->idma_enable; | |
10094 | } else { | |
10095 | return FALSE; | |
10096 | } | |
10097 | } | |
10098 | ||
10099 | bool | |
10100 | dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus) | |
10101 | { | |
10102 | if (!bus->dhd) | |
10103 | return FALSE; | |
10104 | else if (bus->ifrm_enabled) { | |
10105 | return bus->dhd->ifrm_enable; | |
10106 | } else { | |
10107 | return FALSE; | |
10108 | } | |
10109 | } | |
10110 | ||
10111 | bool | |
10112 | dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus) | |
10113 | { | |
10114 | if (!bus->dhd) { | |
10115 | return FALSE; | |
10116 | } else if (bus->dar_enabled) { | |
10117 | return bus->dhd->dar_enable; | |
10118 | } else { | |
10119 | return FALSE; | |
10120 | } | |
10121 | } | |
10122 | ||
10123 | void | |
10124 | dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option) | |
10125 | { | |
10126 | DHD_ERROR(("ENABLING DW:%d\n", dw_option)); | |
10127 | bus->dw_option = dw_option; | |
10128 | } | |
10129 | ||
10130 | void | |
10131 | dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) | |
10132 | { | |
10133 | trap_t *tr = &bus->dhd->last_trap_info; | |
10134 | bcm_bprintf(strbuf, | |
10135 | "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," | |
10136 | " lp 0x%x, rpc 0x%x" | |
10137 | "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " | |
10138 | "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, " | |
10139 | "r10 0x%x, r11 0x%x, r12 0x%x\n\n", | |
10140 | ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), | |
10141 | ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), | |
10142 | ltoh32(bus->pcie_sh->trap_addr), | |
10143 | ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), | |
10144 | ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7), | |
10145 | ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10), | |
10146 | ltoh32(tr->r11), ltoh32(tr->r12)); | |
10147 | } | |
10148 | ||
10149 | int | |
10150 | dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) | |
10151 | { | |
10152 | int bcmerror = 0; | |
10153 | struct dhd_bus *bus = dhdp->bus; | |
10154 | ||
10155 | if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) { | |
10156 | DHD_ERROR(("Invalid size/addr combination \n")); | |
10157 | bcmerror = BCME_ERROR; | |
10158 | } | |
10159 | ||
10160 | return bcmerror; | |
10161 | } | |
10162 | ||
10163 | int | |
10164 | dhd_get_idletime(dhd_pub_t *dhd) | |
10165 | { | |
10166 | return dhd->bus->idletime; | |
10167 | } | |
10168 | ||
10169 | static INLINE void | |
10170 | dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read) | |
10171 | { | |
10172 | OSL_DELAY(1); | |
10173 | if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) { | |
10174 | DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr)); | |
10175 | } else { | |
10176 | DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read)); | |
10177 | } | |
10178 | return; | |
10179 | } | |
10180 | ||
10181 | #ifdef DHD_SSSR_DUMP | |
10182 | static int | |
10183 | dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, | |
10184 | uint addr_reg, uint data_reg) | |
10185 | { | |
10186 | uint addr; | |
10187 | uint val = 0; | |
10188 | int i; | |
10189 | ||
10190 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10191 | ||
10192 | if (!buf) { | |
10193 | DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); | |
10194 | return BCME_ERROR; | |
10195 | } | |
10196 | ||
10197 | if (!fifo_size) { | |
10198 | DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); | |
10199 | return BCME_ERROR; | |
10200 | } | |
10201 | ||
10202 | /* Set the base address offset to 0 */ | |
10203 | addr = addr_reg; | |
10204 | val = 0; | |
10205 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10206 | ||
10207 | addr = data_reg; | |
10208 | /* Read 4 bytes at once and loop for fifo_size / 4 */ | |
10209 | for (i = 0; i < fifo_size / 4; i++) { | |
10210 | if (serialized_backplane_access(dhd->bus, addr, | |
10211 | sizeof(uint), &val, TRUE) != BCME_OK) { | |
10212 | DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__)); | |
10213 | return BCME_ERROR; | |
10214 | } | |
10215 | buf[i] = val; | |
10216 | OSL_DELAY(1); | |
10217 | } | |
10218 | return BCME_OK; | |
10219 | } | |
10220 | ||
10221 | static int | |
10222 | dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, | |
10223 | uint addr_reg) | |
10224 | { | |
10225 | uint addr; | |
10226 | uint val = 0; | |
10227 | int i; | |
10228 | si_t *sih = dhd->bus->sih; | |
10229 | ||
10230 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10231 | ||
10232 | if (!buf) { | |
10233 | DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); | |
10234 | return BCME_ERROR; | |
10235 | } | |
10236 | ||
10237 | if (!fifo_size) { | |
10238 | DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); | |
10239 | return BCME_ERROR; | |
10240 | } | |
10241 | ||
10242 | if (addr_reg) { | |
10243 | ||
10244 | if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) && | |
10245 | dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) { | |
10246 | int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf, | |
10247 | fifo_size); | |
10248 | if (err != BCME_OK) { | |
10249 | DHD_ERROR(("%s: Error reading dig dump from dongle !\n", | |
10250 | __FUNCTION__)); | |
10251 | } | |
10252 | } else { | |
10253 | /* Check if vasip clk is disabled, if yes enable it */ | |
10254 | addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; | |
10255 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10256 | if (!val) { | |
10257 | val = 1; | |
10258 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10259 | } | |
10260 | ||
10261 | addr = addr_reg; | |
10262 | /* Read 4 bytes at once and loop for fifo_size / 4 */ | |
10263 | for (i = 0; i < fifo_size / 4; i++, addr += 4) { | |
10264 | if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), | |
10265 | &val, TRUE) != BCME_OK) { | |
10266 | DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__, | |
10267 | addr)); | |
10268 | return BCME_ERROR; | |
10269 | } | |
10270 | buf[i] = val; | |
10271 | OSL_DELAY(1); | |
10272 | } | |
10273 | } | |
10274 | } else { | |
10275 | uint cur_coreid; | |
10276 | uint chipc_corerev; | |
10277 | chipcregs_t *chipcregs; | |
10278 | ||
10279 | /* Save the current core */ | |
10280 | cur_coreid = si_coreid(sih); | |
10281 | ||
10282 | /* Switch to ChipC */ | |
10283 | chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); | |
10284 | ||
10285 | chipc_corerev = si_corerev(sih); | |
10286 | ||
10287 | if ((chipc_corerev == 64) || (chipc_corerev == 65)) { | |
10288 | W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0); | |
10289 | ||
10290 | /* Read 4 bytes at once and loop for fifo_size / 4 */ | |
10291 | for (i = 0; i < fifo_size / 4; i++) { | |
10292 | buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data); | |
10293 | OSL_DELAY(1); | |
10294 | } | |
10295 | } | |
10296 | ||
10297 | /* Switch back to the original core */ | |
10298 | si_setcore(sih, cur_coreid, 0); | |
10299 | } | |
10300 | ||
10301 | return BCME_OK; | |
10302 | } | |
10303 | ||
10304 | #if defined(EWP_ETD_PRSRV_LOGS) | |
10305 | void | |
10306 | dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, | |
10307 | uint8 *ext_trap_data, void *event_decode_data) | |
10308 | { | |
10309 | hnd_ext_trap_hdr_t *hdr = NULL; | |
10310 | bcm_tlv_t *tlv; | |
10311 | eventlog_trapdata_info_t *etd_evtlog = NULL; | |
10312 | eventlog_trap_buf_info_t *evtlog_buf_arr = NULL; | |
10313 | uint arr_size = 0; | |
10314 | int i = 0; | |
10315 | int err = 0; | |
10316 | uint32 seqnum = 0; | |
10317 | ||
10318 | if (!ext_trap_data || !event_decode_data || !dhd) | |
10319 | return; | |
10320 | ||
10321 | if (!dhd->concise_dbg_buf) | |
10322 | return; | |
10323 | ||
10324 | /* First word is original trap_data, skip */ | |
10325 | ext_trap_data += sizeof(uint32); | |
10326 | ||
10327 | hdr = (hnd_ext_trap_hdr_t *)ext_trap_data; | |
10328 | tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA); | |
10329 | if (tlv) { | |
10330 | uint32 baseaddr = 0; | |
10331 | uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4; | |
10332 | ||
10333 | etd_evtlog = (eventlog_trapdata_info_t *)tlv->data; | |
10334 | DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; " | |
10335 | "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__, | |
10336 | (etd_evtlog->num_elements), | |
10337 | ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr))); | |
10338 | arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements); | |
10339 | if (!arr_size) { | |
10340 | DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__)); | |
10341 | return; | |
10342 | } | |
10343 | evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size); | |
10344 | if (!evtlog_buf_arr) { | |
10345 | DHD_ERROR(("%s: out of memory !\n", __FUNCTION__)); | |
10346 | return; | |
10347 | } | |
10348 | ||
10349 | /* boundary check */ | |
10350 | baseaddr = etd_evtlog->log_arr_addr; | |
10351 | if ((baseaddr < dhd->bus->dongle_ram_base) || | |
10352 | ((baseaddr + arr_size) > endaddr)) { | |
10353 | DHD_ERROR(("%s: Error reading invalid address\n", | |
10354 | __FUNCTION__)); | |
10355 | goto err; | |
10356 | } | |
10357 | ||
10358 | /* read the eventlog_trap_buf_info_t array from dongle memory */ | |
10359 | err = dhdpcie_bus_membytes(dhd->bus, FALSE, | |
10360 | (ulong)(etd_evtlog->log_arr_addr), | |
10361 | (uint8 *)evtlog_buf_arr, arr_size); | |
10362 | if (err != BCME_OK) { | |
10363 | DHD_ERROR(("%s: Error reading event log array from dongle !\n", | |
10364 | __FUNCTION__)); | |
10365 | goto err; | |
10366 | } | |
10367 | /* ntoh is required only for seq_num, because in the original | |
10368 | * case of event logs from info ring, it is sent from dongle in that way | |
10369 | * so for ETD also dongle follows same convention | |
10370 | */ | |
10371 | seqnum = ntoh32(etd_evtlog->seq_num); | |
10372 | memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN); | |
10373 | for (i = 0; i < (etd_evtlog->num_elements); ++i) { | |
10374 | /* boundary check */ | |
10375 | baseaddr = evtlog_buf_arr[i].buf_addr; | |
10376 | if ((baseaddr < dhd->bus->dongle_ram_base) || | |
10377 | ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) { | |
10378 | DHD_ERROR(("%s: Error reading invalid address\n", | |
10379 | __FUNCTION__)); | |
10380 | goto err; | |
10381 | } | |
10382 | /* read each individual event log buf from dongle memory */ | |
10383 | err = dhdpcie_bus_membytes(dhd->bus, FALSE, | |
10384 | ((ulong)evtlog_buf_arr[i].buf_addr), | |
10385 | dhd->concise_dbg_buf, (evtlog_buf_arr[i].len)); | |
10386 | if (err != BCME_OK) { | |
10387 | DHD_ERROR(("%s: Error reading event log buffer from dongle !\n", | |
10388 | __FUNCTION__)); | |
10389 | goto err; | |
10390 | } | |
10391 | dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf, | |
10392 | event_decode_data, (evtlog_buf_arr[i].len), | |
10393 | FALSE, hton32(seqnum)); | |
10394 | ++seqnum; | |
10395 | } | |
10396 | err: | |
10397 | MFREE(dhd->osh, evtlog_buf_arr, arr_size); | |
10398 | } else { | |
10399 | DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__)); | |
10400 | } | |
10401 | } | |
10402 | #endif /* BCMPCIE && DHD_LOG_DUMP */ | |
10403 | ||
10404 | static uint32 | |
10405 | dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val) | |
10406 | { | |
10407 | uint addr; | |
10408 | uint val = 0; | |
10409 | ||
10410 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10411 | ||
10412 | /* conditionally clear bits [11:8] of PowerCtrl */ | |
10413 | addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; | |
10414 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10415 | if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) { | |
10416 | addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; | |
10417 | dhd_sbreg_op(dhd, addr, ®_val, FALSE); | |
10418 | } | |
10419 | return BCME_OK; | |
10420 | } | |
10421 | ||
10422 | static uint32 | |
10423 | dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd) | |
10424 | { | |
10425 | uint addr; | |
10426 | uint val = 0, reg_val = 0; | |
10427 | ||
10428 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10429 | ||
10430 | /* conditionally clear bits [11:8] of PowerCtrl */ | |
10431 | addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; | |
10432 | dhd_sbreg_op(dhd, addr, ®_val, TRUE); | |
10433 | if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) { | |
10434 | addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; | |
10435 | val = 0; | |
10436 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10437 | } | |
10438 | return reg_val; | |
10439 | } | |
10440 | ||
10441 | static int | |
10442 | dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd) | |
10443 | { | |
10444 | uint addr; | |
10445 | uint val; | |
10446 | ||
10447 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10448 | ||
10449 | /* clear chipcommon intmask */ | |
10450 | addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask; | |
10451 | val = 0x0; | |
10452 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10453 | ||
10454 | /* clear PMUIntMask0 */ | |
10455 | addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0; | |
10456 | val = 0x0; | |
10457 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10458 | ||
10459 | /* clear PMUIntMask1 */ | |
10460 | addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1; | |
10461 | val = 0x0; | |
10462 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10463 | ||
10464 | /* clear res_req_timer */ | |
10465 | addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer; | |
10466 | val = 0x0; | |
10467 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10468 | ||
10469 | /* clear macresreqtimer */ | |
10470 | addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer; | |
10471 | val = 0x0; | |
10472 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10473 | ||
10474 | /* clear macresreqtimer1 */ | |
10475 | addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1; | |
10476 | val = 0x0; | |
10477 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10478 | ||
10479 | /* clear VasipClkEn */ | |
10480 | if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { | |
10481 | addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; | |
10482 | val = 0x0; | |
10483 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10484 | } | |
10485 | ||
10486 | return BCME_OK; | |
10487 | } | |
10488 | ||
10489 | static void | |
10490 | dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd) | |
10491 | { | |
10492 | #define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1) | |
10493 | #define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4) | |
10494 | uint trap_data_mask[MAX_NUM_D11CORES] = | |
10495 | {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK}; | |
10496 | int i; | |
10497 | /* Apply only for 4375 chip */ | |
10498 | if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) { | |
10499 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10500 | if (dhd->sssr_d11_outofreset[i] && | |
10501 | (dhd->dongle_trap_data & trap_data_mask[i])) { | |
10502 | dhd->sssr_d11_outofreset[i] = TRUE; | |
10503 | } else { | |
10504 | dhd->sssr_d11_outofreset[i] = FALSE; | |
10505 | } | |
10506 | DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with " | |
10507 | "trap_data:0x%x-0x%x\n", | |
10508 | __FUNCTION__, i, dhd->sssr_d11_outofreset[i], | |
10509 | dhd->dongle_trap_data, trap_data_mask[i])); | |
10510 | } | |
10511 | } | |
10512 | } | |
10513 | ||
10514 | static int | |
10515 | dhdpcie_d11_check_outofreset(dhd_pub_t *dhd) | |
10516 | { | |
10517 | int i; | |
10518 | uint addr; | |
10519 | uint val = 0; | |
10520 | ||
10521 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10522 | ||
10523 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10524 | /* Check if bit 0 of resetctrl is cleared */ | |
10525 | addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; | |
10526 | if (!addr) { | |
10527 | DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n", | |
10528 | __FUNCTION__, i)); | |
10529 | continue; | |
10530 | } | |
10531 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10532 | if (!(val & 1)) { | |
10533 | dhd->sssr_d11_outofreset[i] = TRUE; | |
10534 | } else { | |
10535 | dhd->sssr_d11_outofreset[i] = FALSE; | |
10536 | } | |
10537 | DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n", | |
10538 | __FUNCTION__, i, dhd->sssr_d11_outofreset[i])); | |
10539 | } | |
10540 | dhdpcie_update_d11_status_from_trapdata(dhd); | |
10541 | ||
10542 | return BCME_OK; | |
10543 | } | |
10544 | ||
10545 | static int | |
10546 | dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd) | |
10547 | { | |
10548 | int i; | |
10549 | uint addr; | |
10550 | uint val = 0; | |
10551 | ||
10552 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10553 | ||
10554 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10555 | if (dhd->sssr_d11_outofreset[i]) { | |
10556 | /* clear request clk only if itopoobb is non zero */ | |
10557 | addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb; | |
10558 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10559 | if (val != 0) { | |
10560 | /* clear clockcontrolstatus */ | |
10561 | addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus; | |
10562 | val = | |
10563 | dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val; | |
10564 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10565 | } | |
10566 | } | |
10567 | } | |
10568 | return BCME_OK; | |
10569 | } | |
10570 | ||
10571 | static int | |
10572 | dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd) | |
10573 | { | |
10574 | uint addr; | |
10575 | uint val = 0; | |
10576 | ||
10577 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10578 | ||
10579 | /* Check if bit 0 of resetctrl is cleared */ | |
10580 | addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; | |
10581 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10582 | if (!(val & 1)) { | |
10583 | /* clear request clk only if itopoobb is non zero */ | |
10584 | addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb; | |
10585 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10586 | if (val != 0) { | |
10587 | /* clear clockcontrolstatus */ | |
10588 | addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus; | |
10589 | val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val; | |
10590 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10591 | } | |
10592 | ||
10593 | if (MULTIBP_ENAB(dhd->bus->sih)) { | |
10594 | uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; | |
10595 | ||
10596 | /* Just halt ARM but do not reset the core */ | |
10597 | resetctrl &= ~(SI_CORE_SIZE - 1); | |
10598 | resetctrl += OFFSETOF(aidmp_t, ioctrl); | |
10599 | ||
10600 | dhd_sbreg_op(dhd, resetctrl, &val, TRUE); | |
10601 | val |= SICF_CPUHALT; | |
10602 | dhd_sbreg_op(dhd, resetctrl, &val, FALSE); | |
10603 | } | |
10604 | } | |
10605 | return BCME_OK; | |
10606 | } | |
10607 | ||
10608 | static int | |
10609 | dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd) | |
10610 | { | |
10611 | uint addr; | |
10612 | uint val = 0; | |
10613 | ||
10614 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10615 | ||
10616 | /* Check if bit 0 of resetctrl is cleared */ | |
10617 | addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; | |
10618 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10619 | if (!(val & 1)) { | |
10620 | if (MULTIBP_ENAB(dhd->bus->sih)) { | |
10621 | uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; | |
10622 | ||
10623 | /* Take ARM out of halt but do not reset core */ | |
10624 | resetctrl &= ~(SI_CORE_SIZE - 1); | |
10625 | resetctrl += OFFSETOF(aidmp_t, ioctrl); | |
10626 | ||
10627 | dhd_sbreg_op(dhd, resetctrl, &val, TRUE); | |
10628 | val &= ~SICF_CPUHALT; | |
10629 | dhd_sbreg_op(dhd, resetctrl, &val, FALSE); | |
10630 | dhd_sbreg_op(dhd, resetctrl, &val, TRUE); | |
10631 | } | |
10632 | } | |
10633 | ||
10634 | return BCME_OK; | |
10635 | } | |
10636 | ||
10637 | static int | |
10638 | dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd) | |
10639 | { | |
10640 | uint addr; | |
10641 | uint val = 0; | |
10642 | ||
10643 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10644 | ||
10645 | /* clear request clk only if itopoobb is non zero */ | |
10646 | addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb; | |
10647 | dhd_sbreg_op(dhd, addr, &val, TRUE); | |
10648 | if (val) { | |
10649 | /* clear clockcontrolstatus */ | |
10650 | addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus; | |
10651 | val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val; | |
10652 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10653 | } | |
10654 | return BCME_OK; | |
10655 | } | |
10656 | ||
10657 | static int | |
10658 | dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd) | |
10659 | { | |
10660 | uint addr; | |
10661 | uint val = 0; | |
10662 | ||
10663 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10664 | ||
10665 | addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate; | |
10666 | val = LTR_ACTIVE; | |
10667 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10668 | ||
10669 | val = LTR_SLEEP; | |
10670 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10671 | ||
10672 | return BCME_OK; | |
10673 | } | |
10674 | ||
10675 | static int | |
10676 | dhdpcie_clear_clk_req(dhd_pub_t *dhd) | |
10677 | { | |
10678 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10679 | ||
10680 | dhdpcie_arm_clear_clk_req(dhd); | |
10681 | ||
10682 | dhdpcie_d11_clear_clk_req(dhd); | |
10683 | ||
10684 | dhdpcie_pcie_clear_clk_req(dhd); | |
10685 | ||
10686 | return BCME_OK; | |
10687 | } | |
10688 | ||
10689 | static int | |
10690 | dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd) | |
10691 | { | |
10692 | int i; | |
10693 | uint addr; | |
10694 | uint val = 0; | |
10695 | ||
10696 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10697 | ||
10698 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10699 | if (dhd->sssr_d11_outofreset[i]) { | |
10700 | /* disable core by setting bit 0 */ | |
10701 | addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; | |
10702 | val = 1; | |
10703 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10704 | OSL_DELAY(6000); | |
10705 | ||
10706 | addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl; | |
10707 | val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0]; | |
10708 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10709 | ||
10710 | val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1]; | |
10711 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10712 | ||
10713 | /* enable core by clearing bit 0 */ | |
10714 | addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; | |
10715 | val = 0; | |
10716 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10717 | ||
10718 | addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl; | |
10719 | val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2]; | |
10720 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10721 | ||
10722 | val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3]; | |
10723 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10724 | ||
10725 | val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4]; | |
10726 | dhd_sbreg_op(dhd, addr, &val, FALSE); | |
10727 | } | |
10728 | } | |
10729 | return BCME_OK; | |
10730 | } | |
10731 | ||
10732 | static int | |
10733 | dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd) | |
10734 | { | |
10735 | int i; | |
10736 | ||
10737 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10738 | ||
10739 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10740 | if (dhd->sssr_d11_outofreset[i]) { | |
10741 | dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i], | |
10742 | dhd->sssr_reg_info.mac_regs[i].sr_size, | |
10743 | dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress, | |
10744 | dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata); | |
10745 | } | |
10746 | } | |
10747 | ||
10748 | if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { | |
10749 | dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before, | |
10750 | dhd->sssr_reg_info.vasip_regs.vasip_sr_size, | |
10751 | dhd->sssr_reg_info.vasip_regs.vasip_sr_addr); | |
10752 | } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && | |
10753 | dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) { | |
10754 | dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before, | |
10755 | dhd->sssr_reg_info.dig_mem_info.dig_sr_size, | |
10756 | dhd->sssr_reg_info.dig_mem_info.dig_sr_addr); | |
10757 | } | |
10758 | ||
10759 | return BCME_OK; | |
10760 | } | |
10761 | ||
10762 | static int | |
10763 | dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd) | |
10764 | { | |
10765 | int i; | |
10766 | ||
10767 | DHD_ERROR(("%s\n", __FUNCTION__)); | |
10768 | ||
10769 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10770 | if (dhd->sssr_d11_outofreset[i]) { | |
10771 | dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i], | |
10772 | dhd->sssr_reg_info.mac_regs[i].sr_size, | |
10773 | dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress, | |
10774 | dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata); | |
10775 | } | |
10776 | } | |
10777 | ||
10778 | if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { | |
10779 | dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, | |
10780 | dhd->sssr_reg_info.vasip_regs.vasip_sr_size, | |
10781 | dhd->sssr_reg_info.vasip_regs.vasip_sr_addr); | |
10782 | } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) && | |
10783 | dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) { | |
10784 | dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, | |
10785 | dhd->sssr_reg_info.dig_mem_info.dig_sr_size, | |
10786 | dhd->sssr_reg_info.dig_mem_info.dig_sr_addr); | |
10787 | } | |
10788 | ||
10789 | return BCME_OK; | |
10790 | } | |
10791 | ||
10792 | int | |
10793 | dhdpcie_sssr_dump(dhd_pub_t *dhd) | |
10794 | { | |
10795 | uint32 powerctrl_val; | |
10796 | ||
10797 | if (!dhd->sssr_inited) { | |
10798 | DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); | |
10799 | return BCME_ERROR; | |
10800 | } | |
10801 | ||
10802 | if (dhd->bus->is_linkdown) { | |
10803 | DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); | |
10804 | return BCME_ERROR; | |
10805 | } | |
10806 | ||
10807 | DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) " | |
10808 | "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, | |
10809 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
10810 | OFFSETOF(chipcregs_t, powerctl), 0, 0), | |
10811 | si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), | |
10812 | PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), | |
10813 | PMU_REG(dhd->bus->sih, res_state, 0, 0))); | |
10814 | ||
10815 | dhdpcie_d11_check_outofreset(dhd); | |
10816 | ||
10817 | DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__)); | |
10818 | if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) { | |
10819 | DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__)); | |
10820 | return BCME_ERROR; | |
10821 | } | |
10822 | ||
10823 | dhdpcie_clear_intmask_and_timer(dhd); | |
10824 | powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd); | |
10825 | dhdpcie_clear_clk_req(dhd); | |
10826 | dhdpcie_pcie_send_ltrsleep(dhd); | |
10827 | ||
10828 | if (MULTIBP_ENAB(dhd->bus->sih)) { | |
10829 | dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE); | |
10830 | } | |
10831 | ||
10832 | /* Wait for some time before Restore */ | |
10833 | OSL_DELAY(6000); | |
10834 | ||
10835 | DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) " | |
10836 | "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, | |
10837 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
10838 | OFFSETOF(chipcregs_t, powerctl), 0, 0), | |
10839 | si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), | |
10840 | PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), | |
10841 | PMU_REG(dhd->bus->sih, res_state, 0, 0))); | |
10842 | ||
10843 | if (MULTIBP_ENAB(dhd->bus->sih)) { | |
10844 | dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE); | |
10845 | /* Add delay for WL domain to power up */ | |
10846 | OSL_DELAY(15000); | |
10847 | ||
10848 | DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) " | |
10849 | "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__, | |
10850 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
10851 | OFFSETOF(chipcregs_t, powerctl), 0, 0), | |
10852 | si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0), | |
10853 | PMU_REG(dhd->bus->sih, retention_ctl, 0, 0), | |
10854 | PMU_REG(dhd->bus->sih, res_state, 0, 0))); | |
10855 | } | |
10856 | ||
10857 | dhdpcie_arm_resume_clk_req(dhd); | |
10858 | dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val); | |
10859 | dhdpcie_bring_d11_outofreset(dhd); | |
10860 | ||
10861 | DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); | |
10862 | if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { | |
10863 | DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); | |
10864 | return BCME_ERROR; | |
10865 | } | |
10866 | dhd->sssr_dump_collected = TRUE; | |
10867 | dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR); | |
10868 | ||
10869 | return BCME_OK; | |
10870 | } | |
10871 | ||
10872 | static int | |
10873 | dhdpcie_fis_trigger(dhd_pub_t *dhd) | |
10874 | { | |
10875 | if (!dhd->sssr_inited) { | |
10876 | DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); | |
10877 | return BCME_ERROR; | |
10878 | } | |
10879 | ||
10880 | if (dhd->bus->is_linkdown) { | |
10881 | DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); | |
10882 | return BCME_ERROR; | |
10883 | } | |
10884 | ||
10885 | /* Trigger FIS */ | |
10886 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
10887 | DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK); | |
10888 | OSL_DELAY(100 * 1000); | |
10889 | ||
10890 | return BCME_OK; | |
10891 | } | |
10892 | ||
10893 | int | |
10894 | dhd_bus_fis_trigger(dhd_pub_t *dhd) | |
10895 | { | |
10896 | return dhdpcie_fis_trigger(dhd); | |
10897 | } | |
10898 | ||
10899 | static int | |
10900 | dhdpcie_fis_dump(dhd_pub_t *dhd) | |
10901 | { | |
10902 | int i; | |
10903 | ||
10904 | if (!dhd->sssr_inited) { | |
10905 | DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); | |
10906 | return BCME_ERROR; | |
10907 | } | |
10908 | ||
10909 | if (dhd->bus->is_linkdown) { | |
10910 | DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); | |
10911 | return BCME_ERROR; | |
10912 | } | |
10913 | ||
10914 | /* bring up all pmu resources */ | |
10915 | PMU_REG(dhd->bus->sih, min_res_mask, ~0, | |
10916 | PMU_REG(dhd->bus->sih, max_res_mask, 0, 0)); | |
10917 | OSL_DELAY(10 * 1000); | |
10918 | ||
10919 | for (i = 0; i < MAX_NUM_D11CORES; i++) { | |
10920 | dhd->sssr_d11_outofreset[i] = TRUE; | |
10921 | } | |
10922 | ||
10923 | dhdpcie_bring_d11_outofreset(dhd); | |
10924 | OSL_DELAY(6000); | |
10925 | ||
10926 | /* clear FIS Done */ | |
10927 | PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK); | |
10928 | ||
10929 | dhdpcie_d11_check_outofreset(dhd); | |
10930 | ||
10931 | DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); | |
10932 | if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { | |
10933 | DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); | |
10934 | return BCME_ERROR; | |
10935 | } | |
10936 | ||
10937 | dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS); | |
10938 | ||
10939 | return BCME_OK; | |
10940 | } | |
10941 | ||
10942 | int | |
10943 | dhd_bus_fis_dump(dhd_pub_t *dhd) | |
10944 | { | |
10945 | return dhdpcie_fis_dump(dhd); | |
10946 | } | |
10947 | #endif /* DHD_SSSR_DUMP */ | |
10948 | ||
10949 | #ifdef DHD_WAKE_STATUS | |
10950 | wake_counts_t* | |
10951 | dhd_bus_get_wakecount(dhd_pub_t *dhd) | |
10952 | { | |
10953 | return &dhd->bus->wake_counts; | |
10954 | } | |
10955 | int | |
10956 | dhd_bus_get_bus_wake(dhd_pub_t *dhd) | |
10957 | { | |
10958 | return bcmpcie_set_get_wake(dhd->bus, 0); | |
10959 | } | |
10960 | #endif /* DHD_WAKE_STATUS */ | |
10961 | ||
10962 | /* Writes random number(s) to the TCM. FW upon initialization reads this register | |
10963 | * to fetch the random number, and uses it to randomize heap address space layout. | |
10964 | */ | |
10965 | static int | |
10966 | dhdpcie_wrt_rnd(struct dhd_bus *bus) | |
10967 | { | |
10968 | bcm_rand_metadata_t rnd_data; | |
10969 | uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES]; | |
10970 | uint32 count = BCM_ENTROPY_HOST_NBYTES; | |
10971 | int ret = 0; | |
10972 | uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) - | |
10973 | ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data)); | |
10974 | ||
10975 | memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES); | |
10976 | rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE); | |
10977 | rnd_data.count = htol32(count); | |
10978 | /* write the metadata about random number */ | |
10979 | dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data)); | |
10980 | /* scale back by number of random number counts */ | |
10981 | addr -= count; | |
10982 | ||
10983 | /* Now get & write the random number(s) */ | |
10984 | ret = dhd_get_random_bytes(rand_buf, count); | |
10985 | if (ret != BCME_OK) { | |
10986 | return ret; | |
10987 | } | |
10988 | dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count); | |
10989 | ||
10990 | return BCME_OK; | |
10991 | } | |
10992 | ||
10993 | void | |
10994 | dhd_pcie_intr_count_dump(dhd_pub_t *dhd) | |
10995 | { | |
10996 | struct dhd_bus *bus = dhd->bus; | |
10997 | uint64 current_time; | |
10998 | ||
10999 | DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n")); | |
11000 | DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n", | |
11001 | bus->resume_intr_enable_count, bus->dpc_intr_enable_count)); | |
11002 | DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n", | |
11003 | bus->isr_intr_disable_count, bus->suspend_intr_disable_count)); | |
11004 | #ifdef BCMPCIE_OOB_HOST_WAKE | |
11005 | DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n", | |
11006 | bus->oob_intr_count, bus->oob_intr_enable_count, | |
11007 | bus->oob_intr_disable_count)); | |
11008 | DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n", | |
11009 | dhdpcie_get_oob_irq_num(bus), | |
11010 | GET_SEC_USEC(bus->last_oob_irq_time))); | |
11011 | DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT | |
11012 | " last_oob_irq_disable_time="SEC_USEC_FMT"\n", | |
11013 | GET_SEC_USEC(bus->last_oob_irq_enable_time), | |
11014 | GET_SEC_USEC(bus->last_oob_irq_disable_time))); | |
11015 | DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n", | |
11016 | dhdpcie_get_oob_irq_status(bus), | |
11017 | dhdpcie_get_oob_irq_level())); | |
11018 | #endif /* BCMPCIE_OOB_HOST_WAKE */ | |
11019 | DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n", | |
11020 | bus->dpc_return_busdown_count, bus->non_ours_irq_count)); | |
11021 | ||
11022 | current_time = OSL_LOCALTIME_NS(); | |
11023 | DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n", | |
11024 | GET_SEC_USEC(current_time))); | |
11025 | DHD_ERROR(("isr_entry_time="SEC_USEC_FMT | |
11026 | " isr_exit_time="SEC_USEC_FMT"\n", | |
11027 | GET_SEC_USEC(bus->isr_entry_time), | |
11028 | GET_SEC_USEC(bus->isr_exit_time))); | |
11029 | DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT | |
11030 | " last_non_ours_irq_time="SEC_USEC_FMT"\n", | |
11031 | GET_SEC_USEC(bus->dpc_sched_time), | |
11032 | GET_SEC_USEC(bus->last_non_ours_irq_time))); | |
11033 | DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT | |
11034 | " last_process_ctrlbuf_time="SEC_USEC_FMT"\n", | |
11035 | GET_SEC_USEC(bus->dpc_entry_time), | |
11036 | GET_SEC_USEC(bus->last_process_ctrlbuf_time))); | |
11037 | DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT | |
11038 | " last_process_txcpl_time="SEC_USEC_FMT"\n", | |
11039 | GET_SEC_USEC(bus->last_process_flowring_time), | |
11040 | GET_SEC_USEC(bus->last_process_txcpl_time))); | |
11041 | DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT | |
11042 | " last_process_infocpl_time="SEC_USEC_FMT | |
11043 | " last_process_edl_time="SEC_USEC_FMT"\n", | |
11044 | GET_SEC_USEC(bus->last_process_rxcpl_time), | |
11045 | GET_SEC_USEC(bus->last_process_infocpl_time), | |
11046 | GET_SEC_USEC(bus->last_process_edl_time))); | |
11047 | DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT | |
11048 | " resched_dpc_time="SEC_USEC_FMT"\n", | |
11049 | GET_SEC_USEC(bus->dpc_exit_time), | |
11050 | GET_SEC_USEC(bus->resched_dpc_time))); | |
11051 | DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n", | |
11052 | GET_SEC_USEC(bus->last_d3_inform_time))); | |
11053 | ||
11054 | DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT | |
11055 | " last_suspend_end_time="SEC_USEC_FMT"\n", | |
11056 | GET_SEC_USEC(bus->last_suspend_start_time), | |
11057 | GET_SEC_USEC(bus->last_suspend_end_time))); | |
11058 | DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT | |
11059 | " last_resume_end_time="SEC_USEC_FMT"\n", | |
11060 | GET_SEC_USEC(bus->last_resume_start_time), | |
11061 | GET_SEC_USEC(bus->last_resume_end_time))); | |
11062 | ||
11063 | #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE) | |
11064 | DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT | |
11065 | " logtrace_thread_sem_down_time="SEC_USEC_FMT | |
11066 | "\nlogtrace_thread_flush_time="SEC_USEC_FMT | |
11067 | " logtrace_thread_unexpected_break_time="SEC_USEC_FMT | |
11068 | "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n", | |
11069 | GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time), | |
11070 | GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time), | |
11071 | GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time), | |
11072 | GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time), | |
11073 | GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time))); | |
11074 | #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */ | |
11075 | } | |
11076 | ||
11077 | void | |
11078 | dhd_bus_intr_count_dump(dhd_pub_t *dhd) | |
11079 | { | |
11080 | dhd_pcie_intr_count_dump(dhd); | |
11081 | } | |
11082 | ||
11083 | int | |
11084 | dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd) | |
11085 | { | |
11086 | uint32 save_idx, val; | |
11087 | si_t *sih = dhd->bus->sih; | |
11088 | uint32 oob_base, oob_base1; | |
11089 | uint32 wrapper_dump_list[] = { | |
11090 | AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74, | |
11091 | AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74, | |
11092 | AI_RESETSTATUS, AI_RESETCTRL, | |
11093 | AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD, | |
11094 | AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT | |
11095 | }; | |
11096 | uint32 i; | |
11097 | hndoobr_reg_t *reg; | |
11098 | cr4regs_t *cr4regs; | |
11099 | ca7regs_t *ca7regs; | |
11100 | ||
11101 | save_idx = si_coreidx(sih); | |
11102 | ||
11103 | DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__)); | |
11104 | ||
11105 | if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) { | |
11106 | for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) { | |
11107 | val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0); | |
11108 | DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val)); | |
11109 | } | |
11110 | } | |
11111 | ||
11112 | if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) { | |
11113 | DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__)); | |
11114 | for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) { | |
11115 | val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0); | |
11116 | DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val)); | |
11117 | } | |
11118 | DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__)); | |
11119 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol)); | |
11120 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val)); | |
11121 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities)); | |
11122 | DHD_ERROR(("reg:0x%x val:0x%x\n", | |
11123 | (uint)OFFSETOF(cr4regs_t, corecapabilities), val)); | |
11124 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus)); | |
11125 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val)); | |
11126 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst)); | |
11127 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val)); | |
11128 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask)); | |
11129 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val)); | |
11130 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask)); | |
11131 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val)); | |
11132 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg)); | |
11133 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val)); | |
11134 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus)); | |
11135 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val)); | |
11136 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt)); | |
11137 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val)); | |
11138 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer)); | |
11139 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val)); | |
11140 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st)); | |
11141 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val)); | |
11142 | val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl)); | |
11143 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val)); | |
11144 | } | |
11145 | ||
11146 | if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) { | |
11147 | DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__)); | |
11148 | val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol)); | |
11149 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val)); | |
11150 | val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities)); | |
11151 | DHD_ERROR(("reg:0x%x val:0x%x\n", | |
11152 | (uint)OFFSETOF(ca7regs_t, corecapabilities), val)); | |
11153 | val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus)); | |
11154 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val)); | |
11155 | val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol)); | |
11156 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val)); | |
11157 | val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st)); | |
11158 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val)); | |
11159 | val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl)); | |
11160 | DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val)); | |
11161 | } | |
11162 | ||
11163 | DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__)); | |
11164 | ||
11165 | oob_base = si_oobr_baseaddr(sih, FALSE); | |
11166 | oob_base1 = si_oobr_baseaddr(sih, TRUE); | |
11167 | if (oob_base) { | |
11168 | dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE); | |
11169 | dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE); | |
11170 | dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE); | |
11171 | dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE); | |
11172 | } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) { | |
11173 | val = R_REG(dhd->osh, ®->intstatus[0]); | |
11174 | DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); | |
11175 | val = R_REG(dhd->osh, ®->intstatus[1]); | |
11176 | DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); | |
11177 | val = R_REG(dhd->osh, ®->intstatus[2]); | |
11178 | DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); | |
11179 | val = R_REG(dhd->osh, ®->intstatus[3]); | |
11180 | DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val)); | |
11181 | } | |
11182 | ||
11183 | if (oob_base1) { | |
11184 | DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__)); | |
11185 | ||
11186 | dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE); | |
11187 | dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE); | |
11188 | dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE); | |
11189 | dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE); | |
11190 | } | |
11191 | ||
11192 | si_setcoreidx(dhd->bus->sih, save_idx); | |
11193 | ||
11194 | return 0; | |
11195 | } | |
11196 | ||
11197 | static void | |
11198 | dhdpcie_hw_war_regdump(dhd_bus_t *bus) | |
11199 | { | |
11200 | uint32 save_idx, val; | |
11201 | volatile uint32 *reg; | |
11202 | ||
11203 | save_idx = si_coreidx(bus->sih); | |
11204 | if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) { | |
11205 | val = R_REG(bus->osh, reg + REG_WORK_AROUND); | |
11206 | DHD_ERROR(("CC HW_WAR :0x%x\n", val)); | |
11207 | } | |
11208 | ||
11209 | if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) { | |
11210 | val = R_REG(bus->osh, reg + REG_WORK_AROUND); | |
11211 | DHD_ERROR(("ARM HW_WAR:0x%x\n", val)); | |
11212 | } | |
11213 | ||
11214 | if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) { | |
11215 | val = R_REG(bus->osh, reg + REG_WORK_AROUND); | |
11216 | DHD_ERROR(("PCIE HW_WAR :0x%x\n", val)); | |
11217 | } | |
11218 | si_setcoreidx(bus->sih, save_idx); | |
11219 | ||
11220 | val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0); | |
11221 | DHD_ERROR(("MINRESMASK :0x%x\n", val)); | |
11222 | } | |
11223 | ||
11224 | int | |
11225 | dhd_pcie_dma_info_dump(dhd_pub_t *dhd) | |
11226 | { | |
11227 | if (dhd->bus->is_linkdown) { | |
11228 | DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers " | |
11229 | "due to PCIe link down ------- \r\n")); | |
11230 | return 0; | |
11231 | } | |
11232 | ||
11233 | DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n")); | |
11234 | ||
11235 | //HostToDev | |
11236 | DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", | |
11237 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0), | |
11238 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0))); | |
11239 | DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", | |
11240 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0), | |
11241 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0))); | |
11242 | DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", | |
11243 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0), | |
11244 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0))); | |
11245 | ||
11246 | DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", | |
11247 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0), | |
11248 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0))); | |
11249 | DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", | |
11250 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0), | |
11251 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0))); | |
11252 | DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", | |
11253 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0), | |
11254 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0))); | |
11255 | ||
11256 | //DevToHost | |
11257 | DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", | |
11258 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0), | |
11259 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0))); | |
11260 | DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", | |
11261 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0), | |
11262 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0))); | |
11263 | DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", | |
11264 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0), | |
11265 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0))); | |
11266 | ||
11267 | DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", | |
11268 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0), | |
11269 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0))); | |
11270 | DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", | |
11271 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0), | |
11272 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0))); | |
11273 | DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", | |
11274 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0), | |
11275 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0))); | |
11276 | ||
11277 | return 0; | |
11278 | } | |
11279 | ||
11280 | bool | |
11281 | dhd_pcie_dump_int_regs(dhd_pub_t *dhd) | |
11282 | { | |
11283 | uint32 intstatus = 0; | |
11284 | uint32 intmask = 0; | |
11285 | uint32 d2h_db0 = 0; | |
11286 | uint32 d2h_mb_data = 0; | |
11287 | ||
11288 | DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); | |
11289 | intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11290 | dhd->bus->pcie_mailbox_int, 0, 0); | |
11291 | if (intstatus == (uint32)-1) { | |
11292 | DHD_ERROR(("intstatus=0x%x \n", intstatus)); | |
11293 | return FALSE; | |
11294 | } | |
11295 | ||
11296 | intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11297 | dhd->bus->pcie_mailbox_mask, 0, 0); | |
11298 | if (intmask == (uint32) -1) { | |
11299 | DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask)); | |
11300 | return FALSE; | |
11301 | } | |
11302 | ||
11303 | d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11304 | PCID2H_MailBox, 0, 0); | |
11305 | if (d2h_db0 == (uint32)-1) { | |
11306 | DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", | |
11307 | intstatus, intmask, d2h_db0)); | |
11308 | return FALSE; | |
11309 | } | |
11310 | ||
11311 | DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", | |
11312 | intstatus, intmask, d2h_db0)); | |
11313 | dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); | |
11314 | DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, | |
11315 | dhd->bus->def_intmask)); | |
11316 | ||
11317 | return TRUE; | |
11318 | } | |
11319 | ||
11320 | void | |
11321 | dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd) | |
11322 | { | |
11323 | DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n")); | |
11324 | DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n", | |
11325 | dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
11326 | PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); | |
11327 | #ifdef EXTENDED_PCIE_DEBUG_DUMP | |
11328 | DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n", | |
11329 | dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
11330 | PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0), | |
11331 | dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
11332 | PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0), | |
11333 | dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
11334 | PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0), | |
11335 | dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
11336 | PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0))); | |
11337 | #endif /* EXTENDED_PCIE_DEBUG_DUMP */ | |
11338 | } | |
11339 | ||
11340 | int | |
11341 | dhd_pcie_debug_info_dump(dhd_pub_t *dhd) | |
11342 | { | |
11343 | int host_irq_disabled; | |
11344 | ||
11345 | DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state)); | |
11346 | host_irq_disabled = dhdpcie_irq_disabled(dhd->bus); | |
11347 | DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled)); | |
11348 | dhd_print_tasklet_status(dhd); | |
11349 | dhd_pcie_intr_count_dump(dhd); | |
11350 | ||
11351 | DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n")); | |
11352 | dhdpcie_dump_resource(dhd->bus); | |
11353 | ||
11354 | dhd_pcie_dump_rc_conf_space_cap(dhd); | |
11355 | ||
11356 | DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n", | |
11357 | dhd_debug_get_rc_linkcap(dhd->bus))); | |
11358 | DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n")); | |
11359 | DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x " | |
11360 | "PCIE_CFG_PMCSR(0x%x)=0x%x\n", | |
11361 | PCIECFGREG_STATUS_CMD, | |
11362 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)), | |
11363 | PCIECFGREG_BASEADDR0, | |
11364 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)), | |
11365 | PCIECFGREG_BASEADDR1, | |
11366 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)), | |
11367 | PCIE_CFG_PMCSR, | |
11368 | dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32)))); | |
11369 | DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x " | |
11370 | "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, | |
11371 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL, | |
11372 | sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2, | |
11373 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2, | |
11374 | sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1, | |
11375 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1, | |
11376 | sizeof(uint32)))); | |
11377 | #ifdef EXTENDED_PCIE_DEBUG_DUMP | |
11378 | DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n", | |
11379 | dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
11380 | PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); | |
11381 | DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x " | |
11382 | "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1, | |
11383 | dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)), | |
11384 | PCI_TLP_HDR_LOG2, | |
11385 | dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)), | |
11386 | PCI_TLP_HDR_LOG3, | |
11387 | dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)), | |
11388 | PCI_TLP_HDR_LOG4, | |
11389 | dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32)))); | |
11390 | if (dhd->bus->sih->buscorerev >= 24) { | |
11391 | DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x " | |
11392 | "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL, | |
11393 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL, | |
11394 | sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL, | |
11395 | dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, | |
11396 | sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2, | |
11397 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2, | |
11398 | sizeof(uint32)))); | |
11399 | dhd_bus_dump_dar_registers(dhd->bus); | |
11400 | } | |
11401 | #endif /* EXTENDED_PCIE_DEBUG_DUMP */ | |
11402 | ||
11403 | if (dhd->bus->is_linkdown) { | |
11404 | DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n")); | |
11405 | return 0; | |
11406 | } | |
11407 | ||
11408 | DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n")); | |
11409 | ||
11410 | DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x " | |
11411 | "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0, | |
11412 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0), | |
11413 | PCIECFGREG_PHY_DBG_CLKREQ1, | |
11414 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1), | |
11415 | PCIECFGREG_PHY_DBG_CLKREQ2, | |
11416 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2), | |
11417 | PCIECFGREG_PHY_DBG_CLKREQ3, | |
11418 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3))); | |
11419 | ||
11420 | #ifdef EXTENDED_PCIE_DEBUG_DUMP | |
11421 | if (dhd->bus->sih->buscorerev >= 24) { | |
11422 | ||
11423 | DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x " | |
11424 | "ltssm_hist_2(0x%x)=0x%x " | |
11425 | "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0, | |
11426 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0), | |
11427 | PCIECFGREG_PHY_LTSSM_HIST_1, | |
11428 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1), | |
11429 | PCIECFGREG_PHY_LTSSM_HIST_2, | |
11430 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2), | |
11431 | PCIECFGREG_PHY_LTSSM_HIST_3, | |
11432 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3))); | |
11433 | ||
11434 | DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n", | |
11435 | PCIECFGREG_TREFUP, | |
11436 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP), | |
11437 | PCIECFGREG_TREFUP_EXT, | |
11438 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT))); | |
11439 | DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x " | |
11440 | "Function_Intstatus(0x%x)=0x%x " | |
11441 | "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x " | |
11442 | "Power_Intmask(0x%x)=0x%x\n", | |
11443 | PCIE_CORE_REG_ERRLOG, | |
11444 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11445 | PCIE_CORE_REG_ERRLOG, 0, 0), | |
11446 | PCIE_CORE_REG_ERR_ADDR, | |
11447 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11448 | PCIE_CORE_REG_ERR_ADDR, 0, 0), | |
11449 | PCIFunctionIntstatus(dhd->bus->sih->buscorerev), | |
11450 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11451 | PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0), | |
11452 | PCIFunctionIntmask(dhd->bus->sih->buscorerev), | |
11453 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11454 | PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0), | |
11455 | PCIPowerIntstatus(dhd->bus->sih->buscorerev), | |
11456 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11457 | PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0), | |
11458 | PCIPowerIntmask(dhd->bus->sih->buscorerev), | |
11459 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11460 | PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0))); | |
11461 | DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x " | |
11462 | "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n", | |
11463 | (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), | |
11464 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11465 | OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0), | |
11466 | (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), | |
11467 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11468 | OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0), | |
11469 | (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), | |
11470 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11471 | OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0), | |
11472 | (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), | |
11473 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11474 | OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0))); | |
11475 | DHD_ERROR(("err_code(0x%x)=0x%x\n", | |
11476 | (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), | |
11477 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
11478 | OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0))); | |
11479 | ||
11480 | dhd_pcie_dump_wrapper_regs(dhd); | |
11481 | dhdpcie_hw_war_regdump(dhd->bus); | |
11482 | } | |
11483 | #endif /* EXTENDED_PCIE_DEBUG_DUMP */ | |
11484 | ||
11485 | dhd_pcie_dma_info_dump(dhd); | |
11486 | ||
11487 | return 0; | |
11488 | } | |
11489 | ||
11490 | bool | |
11491 | dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus) | |
11492 | { | |
11493 | return bus->force_bt_quiesce; | |
11494 | } | |
11495 | ||
11496 | #ifdef DHD_HP2P | |
11497 | uint16 | |
11498 | dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx) | |
11499 | { | |
11500 | if (tx) | |
11501 | return bus->hp2p_txcpl_max_items; | |
11502 | else | |
11503 | return bus->hp2p_rxcpl_max_items; | |
11504 | } | |
11505 | ||
11506 | static uint16 | |
11507 | dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val) | |
11508 | { | |
11509 | if (tx) | |
11510 | bus->hp2p_txcpl_max_items = val; | |
11511 | else | |
11512 | bus->hp2p_rxcpl_max_items = val; | |
11513 | return val; | |
11514 | } | |
11515 | #endif /* DHD_HP2P */ | |
11516 | ||
11517 | static bool | |
11518 | dhd_bus_tcm_test(struct dhd_bus *bus) | |
11519 | { | |
11520 | int ret = 0; | |
11521 | int size; /* Full mem size */ | |
11522 | int start; /* Start address */ | |
11523 | int read_size = 0; /* Read size of each iteration */ | |
11524 | int num = 0; | |
11525 | uint8 *read_buf, *write_buf; | |
11526 | uint8 init_val[NUM_PATTERNS] = { | |
11527 | 0xFFu, /* 11111111 */ | |
11528 | 0x00u, /* 00000000 */ | |
11529 | }; | |
11530 | ||
11531 | if (!bus) { | |
11532 | DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__)); | |
11533 | return FALSE; | |
11534 | } | |
11535 | ||
11536 | read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK); | |
11537 | ||
11538 | if (!read_buf) { | |
11539 | DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__)); | |
11540 | return FALSE; | |
11541 | } | |
11542 | ||
11543 | write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK); | |
11544 | ||
11545 | if (!write_buf) { | |
11546 | MFREE(bus->dhd->osh, read_buf, MEMBLOCK); | |
11547 | DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__)); | |
11548 | return FALSE; | |
11549 | } | |
11550 | ||
11551 | DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize)); | |
11552 | DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS)); | |
11553 | ||
11554 | while (num < NUM_PATTERNS) { | |
11555 | start = bus->dongle_ram_base; | |
11556 | /* Get full mem size */ | |
11557 | size = bus->ramsize; | |
11558 | ||
11559 | memset(write_buf, init_val[num], MEMBLOCK); | |
11560 | while (size > 0) { | |
11561 | read_size = MIN(MEMBLOCK, size); | |
11562 | memset(read_buf, 0, read_size); | |
11563 | ||
11564 | /* Write */ | |
11565 | if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) { | |
11566 | DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret)); | |
11567 | MFREE(bus->dhd->osh, read_buf, MEMBLOCK); | |
11568 | MFREE(bus->dhd->osh, write_buf, MEMBLOCK); | |
11569 | return FALSE; | |
11570 | } | |
11571 | ||
11572 | /* Read */ | |
11573 | if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) { | |
11574 | DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret)); | |
11575 | MFREE(bus->dhd->osh, read_buf, MEMBLOCK); | |
11576 | MFREE(bus->dhd->osh, write_buf, MEMBLOCK); | |
11577 | return FALSE; | |
11578 | } | |
11579 | ||
11580 | /* Compare */ | |
11581 | if (memcmp(read_buf, write_buf, read_size)) { | |
11582 | DHD_ERROR(("%s: Mismatch at %x, iter : %d\n", | |
11583 | __FUNCTION__, start, num)); | |
11584 | prhex("Readbuf", read_buf, read_size); | |
11585 | prhex("Writebuf", write_buf, read_size); | |
11586 | MFREE(bus->dhd->osh, read_buf, MEMBLOCK); | |
11587 | MFREE(bus->dhd->osh, write_buf, MEMBLOCK); | |
11588 | return FALSE; | |
11589 | } | |
11590 | ||
11591 | /* Decrement size and increment start address */ | |
11592 | size -= read_size; | |
11593 | start += read_size; | |
11594 | } | |
11595 | num++; | |
11596 | } | |
11597 | ||
11598 | MFREE(bus->dhd->osh, read_buf, MEMBLOCK); | |
11599 | MFREE(bus->dhd->osh, write_buf, MEMBLOCK); | |
11600 | ||
11601 | DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num)); | |
11602 | return TRUE; | |
11603 | } |