dhd: import wifi and bluetooth firmware
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.101.10.240.x / dhd_common.c
1 /*
2 * Broadcom Dongle Host Driver (DHD), common DHD core.
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Open:>>
22 *
23 * $Id$
24 */
25 #include <typedefs.h>
26 #include <osl.h>
27
28 #include <epivers.h>
29 #include <bcmutils.h>
30 #include <bcmstdlib_s.h>
31
32 #include <bcmendian.h>
33 #include <dngl_stats.h>
34 #include <dhd.h>
35 #include <dhd_ip.h>
36 #include <bcmevent.h>
37 #include <dhdioctl.h>
38 #ifdef DHD_SDTC_ETB_DUMP
39 #include <bcmiov.h>
40 #endif /* DHD_SDTC_ETB_DUMP */
41
42 #ifdef PCIE_FULL_DONGLE
43 #include <bcmmsgbuf.h>
44 #endif /* PCIE_FULL_DONGLE */
45
46 #ifdef SHOW_LOGTRACE
47 #include <event_log.h>
48 #endif /* SHOW_LOGTRACE */
49
50 #ifdef BCMPCIE
51 #include <dhd_flowring.h>
52 #endif
53
54 #include <dhd_bus.h>
55 #include <dhd_proto.h>
56 #include <bcmsdbus.h>
57 #include <dhd_dbg.h>
58 #include <802.1d.h>
59 #include <dhd_debug.h>
60 #include <dhd_dbg_ring.h>
61 #include <dhd_mschdbg.h>
62 #include <msgtrace.h>
63 #include <dhd_config.h>
64 #include <wl_android.h>
65
66 #ifdef WL_CFG80211
67 #include <wl_cfg80211.h>
68 #endif
69 #if defined(PNO_SUPPORT)
70 #include <dhd_pno.h>
71 #endif /* (OEM_ANDROID) && (PNO_SUPPORT) */
72 #ifdef RTT_SUPPORT
73 #include <dhd_rtt.h>
74 #endif
75
76 #ifdef DNGL_EVENT_SUPPORT
77 #include <dnglevent.h>
78 #endif
79
80 #define htod32(i) (i)
81 #define htod16(i) (i)
82 #define dtoh32(i) (i)
83 #define dtoh16(i) (i)
84 #define htodchanspec(i) (i)
85 #define dtohchanspec(i) (i)
86
87 #ifdef PROP_TXSTATUS
88 #include <wlfc_proto.h>
89 #include <dhd_wlfc.h>
90 #endif
91
92 #if defined(__linux__)
93 #include <dhd_linux.h>
94 #endif /* __linux__ */
95
96 #ifdef DHD_L2_FILTER
97 #include <dhd_l2_filter.h>
98 #endif /* DHD_L2_FILTER */
99
100 #ifdef DHD_PSTA
101 #include <dhd_psta.h>
102 #endif /* DHD_PSTA */
103
104 #ifdef DHD_WET
105 #include <dhd_wet.h>
106 #endif /* DHD_WET */
107
108 #ifdef DHD_LOG_DUMP
109 #include <dhd_dbg.h>
110 #ifdef DHD_PKT_LOGGING
111 #include <dhd_pktlog.h>
112 #endif
113 #endif /* DHD_LOG_DUMP */
114
115 #ifdef DHD_LOG_PRINT_RATE_LIMIT
116 int log_print_threshold = 0;
117 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
118
119 /* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */
120 int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;
121 /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
122
123 #ifdef DHD_DEBUG
124 #include <sdiovar.h>
125 #endif /* DHD_DEBUG */
126
127 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
128 #include <linux/pm_runtime.h>
129 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
130
131 #ifdef CSI_SUPPORT
132 #include <dhd_csi.h>
133 #endif /* CSI_SUPPORT */
134
135 #ifdef SOFTAP
136 char fw_path2[MOD_PARAM_PATHLEN];
137 extern bool softap_enabled;
138 #endif
139
140 #ifdef SHOW_LOGTRACE
141 #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
142 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
143 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
144 static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
145 static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
146 static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
147 #define RAMSTART_BIT 0x01
148 #define RDSTART_BIT 0x02
149 #define RDEND_BIT 0x04
150 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
151 #endif /* SHOW_LOGTRACE */
152
153 #ifdef SHOW_LOGTRACE
154 /* the fw file path is taken from either the module parameter at
155 * insmod time or is defined as a constant of different values
156 * for different platforms
157 */
158 extern char *st_str_file_path;
159 #endif /* SHOW_LOGTRACE */
160
161 #ifdef EWP_EDL
162 typedef struct msg_hdr_edl {
163 uint32 infobuf_ver;
164 info_buf_payload_hdr_t pyld_hdr;
165 msgtrace_hdr_t trace_hdr;
166 } msg_hdr_edl_t;
167 #endif /* EWP_EDL */
168
169 #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
170
171 /* Last connection success/failure status */
172 uint32 dhd_conn_event;
173 uint32 dhd_conn_status;
174 uint32 dhd_conn_reason;
175
176 extern int dhd_iscan_request(void * dhdp, uint16 action);
177 extern void dhd_ind_scan_confirm(void *h, bool status);
178 extern int dhd_iscan_in_progress(void *h);
179 void dhd_iscan_lock(void);
180 void dhd_iscan_unlock(void);
181 extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
182 #if !defined(AP) && defined(WLP2P)
183 extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
184 #endif
185
186 extern int dhd_socram_dump(struct dhd_bus *bus);
187 extern void dhd_set_packet_filter(dhd_pub_t *dhd);
188
189 #ifdef DNGL_EVENT_SUPPORT
190 static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
191 bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
192 static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
193 size_t pktlen);
194 #endif /* DNGL_EVENT_SUPPORT */
195
196 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
197 static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
198 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
199
200 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
201 #define MAX_IOCTL_SUSPEND_ERROR 10
202 static int ioctl_suspend_error = 0;
203 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
204
205 /* Should ideally read this from target(taken from wlu) */
206 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
207
208 /* note these variables will be used with wext */
209 bool ap_cfg_running = FALSE;
210 bool ap_fw_loaded = FALSE;
211
212 #define CHIPID_MISMATCH 8
213
214 #define DHD_VERSION "\nDongle Host Driver, version " EPI_VERSION_STR "\n"
215
216 #if defined(DHD_DEBUG) && defined(DHD_COMPILED)
217 const char dhd_version[] = DHD_VERSION DHD_COMPILED " compiled on "
218 __DATE__ " at " __TIME__ "\n\0<TIMESTAMP>";
219 #else
220 const char dhd_version[] = DHD_VERSION;
221 #endif /* DHD_DEBUG && DHD_COMPILED */
222
223 char fw_version[FW_VER_STR_LEN] = "\0";
224 char clm_version[CLM_VER_STR_LEN] = "\0";
225
226 char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
227
228 void dhd_set_timer(void *bus, uint wdtick);
229
230 static char* ioctl2str(uint32 ioctl);
231
232 /* IOVar table */
233 enum {
234 IOV_VERSION = 1,
235 IOV_WLMSGLEVEL,
236 IOV_MSGLEVEL,
237 IOV_BCMERRORSTR,
238 IOV_BCMERROR,
239 IOV_WDTICK,
240 IOV_DUMP,
241 IOV_CLEARCOUNTS,
242 IOV_LOGDUMP,
243 IOV_LOGCAL,
244 IOV_LOGSTAMP,
245 IOV_GPIOOB,
246 IOV_IOCTLTIMEOUT,
247 IOV_CONS,
248 IOV_DCONSOLE_POLL,
249 #if defined(DHD_DEBUG)
250 IOV_DHD_JOIN_TIMEOUT_DBG,
251 IOV_SCAN_TIMEOUT,
252 IOV_MEM_DEBUG,
253 #ifdef BCMPCIE
254 IOV_FLOW_RING_DEBUG,
255 #endif /* BCMPCIE */
256 #endif /* defined(DHD_DEBUG) */
257 #ifdef PROP_TXSTATUS
258 IOV_PROPTXSTATUS_ENABLE,
259 IOV_PROPTXSTATUS_MODE,
260 IOV_PROPTXSTATUS_OPT,
261 IOV_PROPTXSTATUS_MODULE_IGNORE,
262 IOV_PROPTXSTATUS_CREDIT_IGNORE,
263 IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
264 IOV_PROPTXSTATUS_RXPKT_CHK,
265 #endif /* PROP_TXSTATUS */
266 IOV_BUS_TYPE,
267 IOV_CHANGEMTU,
268 IOV_HOSTREORDER_FLOWS,
269 #ifdef DHDTCPACK_SUPPRESS
270 IOV_TCPACK_SUPPRESS,
271 #endif /* DHDTCPACK_SUPPRESS */
272 IOV_AP_ISOLATE,
273 #ifdef DHD_L2_FILTER
274 IOV_DHCP_UNICAST,
275 IOV_BLOCK_PING,
276 IOV_PROXY_ARP,
277 IOV_GRAT_ARP,
278 IOV_BLOCK_TDLS,
279 #endif /* DHD_L2_FILTER */
280 IOV_DHD_IE,
281 #ifdef DHD_PSTA
282 IOV_PSTA,
283 #endif /* DHD_PSTA */
284 #ifdef DHD_WET
285 IOV_WET,
286 IOV_WET_HOST_IPV4,
287 IOV_WET_HOST_MAC,
288 #endif /* DHD_WET */
289 IOV_CFG80211_OPMODE,
290 IOV_ASSERT_TYPE,
291 IOV_LMTEST,
292 #ifdef DHD_MCAST_REGEN
293 IOV_MCAST_REGEN_BSS_ENABLE,
294 #endif
295 #ifdef SHOW_LOGTRACE
296 IOV_DUMP_TRACE_LOG,
297 #endif /* SHOW_LOGTRACE */
298 IOV_DONGLE_TRAP_TYPE,
299 IOV_DONGLE_TRAP_INFO,
300 IOV_BPADDR,
301 IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
302 #if defined(DHD_LOG_DUMP)
303 IOV_LOG_DUMP,
304 #endif /* DHD_LOG_DUMP */
305 IOV_TPUT_TEST,
306 IOV_DEBUG_BUF_DEST_STAT,
307 #ifdef DHD_DEBUG
308 IOV_INDUCE_ERROR,
309 #endif /* DHD_DEBUG */
310 IOV_FIS_TRIGGER,
311 #ifdef RTT_GEOFENCE_CONT
312 #if defined(RTT_SUPPORT) && defined(WL_NAN)
313 IOV_RTT_GEOFENCE_TYPE_OVRD,
314 #endif /* RTT_SUPPORT && WL_NAN */
315 #endif /* RTT_GEOFENCE_CONT */
316 IOV_FW_VBS,
317 #ifdef DHD_TX_PROFILE
318 IOV_TX_PROFILE_TAG,
319 IOV_TX_PROFILE_ENABLE,
320 IOV_TX_PROFILE_DUMP,
321 #endif /* defined(DHD_TX_PROFILE) */
322 IOV_LAST
323 };
324
325 const bcm_iovar_t dhd_iovars[] = {
326 /* name varid flags flags2 type minlen */
327 {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, 0},
328 {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 },
329 #ifdef DHD_DEBUG
330 {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
331 {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
332 #ifdef BCMPCIE
333 {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
334 #endif /* BCMPCIE */
335 #endif /* DHD_DEBUG */
336 {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
337 {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
338 {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
339 {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
340 {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
341 {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
342 {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
343 {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
344 {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
345 #ifdef PROP_TXSTATUS
346 {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
347 /*
348 set the proptxtstatus operation mode:
349 0 - Do not do any proptxtstatus flow control
350 1 - Use implied credit from a packet status
351 2 - Use explicit credit
352 */
353 {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 },
354 {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
355 {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
356 {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
357 {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
358 {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
359 #endif /* PROP_TXSTATUS */
360 {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
361 {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
362 {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
363 (WLHOST_REORDERDATA_MAXFLOWS + 1) },
364 #ifdef DHDTCPACK_SUPPRESS
365 {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
366 #endif /* DHDTCPACK_SUPPRESS */
367 #ifdef DHD_L2_FILTER
368 {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
369 #endif /* DHD_L2_FILTER */
370 {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
371 #ifdef DHD_L2_FILTER
372 {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
373 {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
374 {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
375 {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
376 #endif /* DHD_L2_FILTER */
377 {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
378 #ifdef DHD_PSTA
379 /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
380 {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
381 #endif /* DHD PSTA */
382 #ifdef DHD_WET
383 /* WET Mode configuration. 0: DIABLED 1: WET */
384 {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
385 {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
386 {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
387 #endif /* DHD WET */
388 {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 },
389 {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
390 {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
391 #ifdef DHD_MCAST_REGEN
392 {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
393 #endif
394 #ifdef SHOW_LOGTRACE
395 {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
396 #endif /* SHOW_LOGTRACE */
397 {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
398 {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
399 #ifdef DHD_DEBUG
400 {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
401 #endif /* DHD_DEBUG */
402 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
403 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
404 #if defined(DHD_LOG_DUMP)
405 {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
406 #endif /* DHD_LOG_DUMP */
407 {"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
408 {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
409 #if defined(DHD_SSSR_DUMP)
410 {"fis_trigger", IOV_FIS_TRIGGER, 0, 0, IOVT_UINT32, 0},
411 #endif
412 #ifdef DHD_DEBUG
413 {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
414 #endif /* DHD_DEBUG */
415 #ifdef RTT_GEOFENCE_CONT
416 #if defined(RTT_SUPPORT) && defined(WL_NAN)
417 {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
418 #endif /* RTT_SUPPORT && WL_NAN */
419 #endif /* RTT_GEOFENCE_CONT */
420 {"fw_verbose", IOV_FW_VBS, 0, 0, IOVT_UINT32, 0},
421 #ifdef DHD_TX_PROFILE
422 {"tx_profile_tag", IOV_TX_PROFILE_TAG, 0, 0, IOVT_BUFFER,
423 sizeof(dhd_tx_profile_protocol_t)},
424 {"tx_profile_enable", IOV_TX_PROFILE_ENABLE, 0, 0, IOVT_BOOL, 0},
425 {"tx_profile_dump", IOV_TX_PROFILE_DUMP, 0, 0, IOVT_UINT32, 0},
426 #endif /* defined(DHD_TX_PROFILE) */
427 /* --- add new iovars *ABOVE* this line --- */
428 {NULL, 0, 0, 0, 0, 0 }
429 };
430
431 #define DHD_IOVAR_BUF_SIZE 128
432
433 bool
434 dhd_query_bus_erros(dhd_pub_t *dhdp)
435 {
436 bool ret = FALSE;
437
438 if (dhdp->dongle_reset) {
439 DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
440 __FUNCTION__));
441 ret = TRUE;
442 }
443
444 if (dhdp->dongle_trap_occured) {
445 DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
446 __FUNCTION__));
447 ret = TRUE;
448 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
449 dhd_os_send_hang_message(dhdp);
450 }
451
452 if (dhdp->iovar_timeout_occured) {
453 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
454 __FUNCTION__));
455 ret = TRUE;
456 }
457
458 #ifdef PCIE_FULL_DONGLE
459 if (dhdp->d3ack_timeout_occured) {
460 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
461 __FUNCTION__));
462 ret = TRUE;
463 }
464 if (dhdp->livelock_occured) {
465 DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
466 __FUNCTION__));
467 ret = TRUE;
468 }
469
470 if (dhdp->pktid_audit_failed) {
471 DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
472 __FUNCTION__));
473 ret = TRUE;
474 }
475 #endif /* PCIE_FULL_DONGLE */
476
477 if (dhdp->iface_op_failed) {
478 DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
479 __FUNCTION__));
480 ret = TRUE;
481 }
482
483 if (dhdp->scan_timeout_occurred) {
484 DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
485 __FUNCTION__));
486 ret = TRUE;
487 }
488
489 if (dhdp->scan_busy_occurred) {
490 DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
491 __FUNCTION__));
492 ret = TRUE;
493 }
494
495 #ifdef DNGL_AXI_ERROR_LOGGING
496 if (dhdp->axi_error) {
497 DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
498 __FUNCTION__));
499 ret = TRUE;
500 }
501 #endif /* DNGL_AXI_ERROR_LOGGING */
502
503 if (dhd_bus_get_linkdown(dhdp)) {
504 DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
505 __FUNCTION__));
506 ret = TRUE;
507 }
508
509 if (dhd_bus_get_cto(dhdp)) {
510 DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
511 __FUNCTION__));
512 ret = TRUE;
513 }
514
515 return ret;
516 }
517
518 void
519 dhd_clear_bus_errors(dhd_pub_t *dhdp)
520 {
521 if (!dhdp)
522 return;
523
524 dhdp->dongle_reset = FALSE;
525 dhdp->dongle_trap_occured = FALSE;
526 dhdp->iovar_timeout_occured = FALSE;
527 #ifdef PCIE_FULL_DONGLE
528 dhdp->d3ack_timeout_occured = FALSE;
529 dhdp->livelock_occured = FALSE;
530 dhdp->pktid_audit_failed = FALSE;
531 #endif
532 dhdp->iface_op_failed = FALSE;
533 dhdp->scan_timeout_occurred = FALSE;
534 dhdp->scan_busy_occurred = FALSE;
535 }
536
537 #ifdef DHD_SSSR_DUMP
538
539 /* This can be overwritten by module parameter defined in dhd_linux.c */
540 uint sssr_enab = TRUE;
541
542 #ifdef DHD_FIS_DUMP
543 uint fis_enab = TRUE;
544 #else
545 uint fis_enab = FALSE;
546 #endif /* DHD_FIS_DUMP */
547
548 int
549 dhd_sssr_mempool_init(dhd_pub_t *dhd)
550 {
551 dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
552 if (dhd->sssr_mempool == NULL) {
553 DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
554 __FUNCTION__));
555 return BCME_ERROR;
556 }
557 return BCME_OK;
558 }
559
560 void
561 dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
562 {
563 if (dhd->sssr_mempool) {
564 MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
565 dhd->sssr_mempool = NULL;
566 }
567 }
568
569 int
570 dhd_sssr_reg_info_init(dhd_pub_t *dhd)
571 {
572 dhd->sssr_reg_info = (sssr_reg_info_cmn_t *) MALLOCZ(dhd->osh, sizeof(sssr_reg_info_cmn_t));
573 if (dhd->sssr_reg_info == NULL) {
574 DHD_ERROR(("%s: MALLOC of sssr_reg_info failed\n",
575 __FUNCTION__));
576 return BCME_ERROR;
577 }
578 return BCME_OK;
579 }
580
581 void
582 dhd_sssr_reg_info_deinit(dhd_pub_t *dhd)
583 {
584 if (dhd->sssr_reg_info) {
585 MFREE(dhd->osh, dhd->sssr_reg_info, sizeof(sssr_reg_info_cmn_t));
586 dhd->sssr_reg_info = NULL;
587 }
588 }
589
590 void
591 dhd_dump_sssr_reg_info(dhd_pub_t *dhd)
592 {
593 }
594
595 int
596 dhd_get_sssr_reg_info(dhd_pub_t *dhd)
597 {
598 int ret;
599 /* get sssr_reg_info from firmware */
600 ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)dhd->sssr_reg_info,
601 sizeof(sssr_reg_info_cmn_t), FALSE);
602 if (ret < 0) {
603 DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
604 __FUNCTION__, ret));
605 return BCME_ERROR;
606 }
607
608 dhd_dump_sssr_reg_info(dhd);
609 return BCME_OK;
610 }
611
612 uint32
613 dhd_get_sssr_bufsize(dhd_pub_t *dhd)
614 {
615 int i;
616 uint32 sssr_bufsize = 0;
617 uint8 num_d11cores;
618
619 num_d11cores = dhd_d11_slices_num_get(dhd);
620
621 switch (dhd->sssr_reg_info->rev2.version) {
622 case SSSR_REG_INFO_VER_3 :
623 /* intentional fall through */
624 case SSSR_REG_INFO_VER_2 :
625 for (i = 0; i < num_d11cores; i++) {
626 sssr_bufsize += dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
627 }
628 if ((dhd->sssr_reg_info->rev2.length >
629 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
630 dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
631 sssr_bufsize += 0; /* TBD */
632 }
633 break;
634 case SSSR_REG_INFO_VER_1 :
635 for (i = 0; i < num_d11cores; i++) {
636 sssr_bufsize += dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
637 }
638 if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
639 sssr_bufsize += dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
640 } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
641 dig_mem_info)) && dhd->sssr_reg_info->rev1.
642 dig_mem_info.dig_sr_addr) {
643 sssr_bufsize += dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
644 }
645 break;
646 case SSSR_REG_INFO_VER_0 :
647 for (i = 0; i < num_d11cores; i++) {
648 sssr_bufsize += dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
649 }
650 if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
651 sssr_bufsize += dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
652 }
653 break;
654 default :
655 DHD_ERROR(("invalid sssr_reg_ver"));
656 return BCME_UNSUPPORTED;
657 }
658
659 #ifdef DHD_SSSR_DUMP_BEFORE_SR
660 /* Double the size as different dumps will be saved before and after SR */
661 sssr_bufsize = 2 * sssr_bufsize;
662 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
663
664 return sssr_bufsize;
665 }
666
667 int
668 dhd_sssr_dump_init(dhd_pub_t *dhd)
669 {
670 int i;
671 uint32 sssr_bufsize;
672 uint32 mempool_used = 0;
673 uint8 num_d11cores = 0;
674 bool alloc_sssr = FALSE;
675 uint32 sr_size = 0;
676
677 dhd->sssr_inited = FALSE;
678 if (!sssr_enab) {
679 DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
680 return BCME_OK;
681 }
682
683 /* check if sssr mempool is allocated */
684 if (dhd->sssr_mempool == NULL) {
685 DHD_ERROR(("%s: sssr_mempool is not allocated\n",
686 __FUNCTION__));
687 return BCME_ERROR;
688 }
689
690 /* check if sssr mempool is allocated */
691 if (dhd->sssr_reg_info == NULL) {
692 DHD_ERROR(("%s: sssr_reg_info is not allocated\n",
693 __FUNCTION__));
694 return BCME_ERROR;
695 }
696
697 /* Get SSSR reg info */
698 if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
699 DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
700 printf("DEBUG_SSSr: %s: dhd_get_sssr_reg_info failed\n", __FUNCTION__);
701 return BCME_ERROR;
702 }
703
704 num_d11cores = dhd_d11_slices_num_get(dhd);
705 /* Validate structure version and length */
706 switch (dhd->sssr_reg_info->rev2.version) {
707 case SSSR_REG_INFO_VER_3 :
708 if (dhd->sssr_reg_info->rev3.length != sizeof(sssr_reg_info_v3_t)) {
709 DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
710 "mismatch on rev2\n", __FUNCTION__,
711 (int)dhd->sssr_reg_info->rev3.length,
712 (int)sizeof(sssr_reg_info_v3_t)));
713 return BCME_ERROR;
714 }
715 break;
716 case SSSR_REG_INFO_VER_2 :
717 if (dhd->sssr_reg_info->rev2.length != sizeof(sssr_reg_info_v2_t)) {
718 DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
719 "mismatch on rev2\n", __FUNCTION__,
720 (int)dhd->sssr_reg_info->rev2.length,
721 (int)sizeof(sssr_reg_info_v2_t)));
722 return BCME_ERROR;
723 }
724 break;
725 case SSSR_REG_INFO_VER_1 :
726 if (dhd->sssr_reg_info->rev1.length != sizeof(sssr_reg_info_v1_t)) {
727 DHD_ERROR(("%s: dhd->sssr_reg_info->rev1.length (%d : %d)"
728 "mismatch on rev1\n", __FUNCTION__,
729 (int)dhd->sssr_reg_info->rev1.length,
730 (int)sizeof(sssr_reg_info_v1_t)));
731 return BCME_ERROR;
732 }
733 break;
734 case SSSR_REG_INFO_VER_0 :
735 if (dhd->sssr_reg_info->rev0.length != sizeof(sssr_reg_info_v0_t)) {
736 DHD_ERROR(("%s: dhd->sssr_reg_info->rev0.length (%d : %d)"
737 "mismatch on rev0\n", __FUNCTION__,
738 (int)dhd->sssr_reg_info->rev0.length,
739 (int)sizeof(sssr_reg_info_v0_t)));
740 return BCME_ERROR;
741 }
742 break;
743 default :
744 DHD_ERROR(("invalid sssr_reg_ver"));
745 return BCME_UNSUPPORTED;
746 }
747
748 /* validate fifo size */
749 sssr_bufsize = dhd_get_sssr_bufsize(dhd);
750 if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
751 DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
752 __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
753 return BCME_ERROR;
754 }
755
756 /* init all pointers to NULL */
757 for (i = 0; i < num_d11cores; i++) {
758 #ifdef DHD_SSSR_DUMP_BEFORE_SR
759 dhd->sssr_d11_before[i] = NULL;
760 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
761 dhd->sssr_d11_after[i] = NULL;
762 }
763
764 #ifdef DHD_SSSR_DUMP_BEFORE_SR
765 dhd->sssr_dig_buf_before = NULL;
766 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
767 dhd->sssr_dig_buf_after = NULL;
768
769 /* Allocate memory */
770 for (i = 0; i < num_d11cores; i++) {
771 alloc_sssr = FALSE;
772 sr_size = 0;
773
774 switch (dhd->sssr_reg_info->rev2.version) {
775 case SSSR_REG_INFO_VER_3 :
776 /* intentional fall through */
777 case SSSR_REG_INFO_VER_2 :
778 if (dhd->sssr_reg_info->rev2.mac_regs[i].sr_size) {
779 alloc_sssr = TRUE;
780 sr_size = dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
781 }
782 break;
783 case SSSR_REG_INFO_VER_1 :
784 if (dhd->sssr_reg_info->rev1.mac_regs[i].sr_size) {
785 alloc_sssr = TRUE;
786 sr_size = dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
787 }
788 break;
789 case SSSR_REG_INFO_VER_0 :
790 if (dhd->sssr_reg_info->rev0.mac_regs[i].sr_size) {
791 alloc_sssr = TRUE;
792 sr_size = dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
793 }
794 break;
795 default :
796 DHD_ERROR(("invalid sssr_reg_ver"));
797 return BCME_UNSUPPORTED;
798 }
799
800 if (alloc_sssr) {
801 #ifdef DHD_SSSR_DUMP_BEFORE_SR
802 dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
803 mempool_used += sr_size;
804 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
805
806 dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
807 mempool_used += sr_size;
808 }
809 }
810
811 /* Allocate dump memory for VASIP (version 0 or 1) or digital core (version 0, 1, or 2) */
812 alloc_sssr = FALSE;
813 sr_size = 0;
814 switch (dhd->sssr_reg_info->rev2.version) {
815 case SSSR_REG_INFO_VER_3 :
816 /* intentional fall through */
817 case SSSR_REG_INFO_VER_2 :
818 if ((dhd->sssr_reg_info->rev2.length >
819 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
820 dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
821 alloc_sssr = TRUE;
822 sr_size = dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
823 }
824 break;
825 case SSSR_REG_INFO_VER_1 :
826 if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
827 alloc_sssr = TRUE;
828 sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
829 } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
830 dig_mem_info)) && dhd->sssr_reg_info->rev1.
831 dig_mem_info.dig_sr_addr) {
832 alloc_sssr = TRUE;
833 sr_size = dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
834 }
835 break;
836 case SSSR_REG_INFO_VER_0 :
837 if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
838 alloc_sssr = TRUE;
839 sr_size = dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
840 }
841 break;
842 default :
843 DHD_ERROR(("invalid sssr_reg_ver"));
844 return BCME_UNSUPPORTED;
845 }
846
847 if (alloc_sssr) {
848 dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
849 mempool_used += sr_size;
850
851 #ifdef DHD_SSSR_DUMP_BEFORE_SR
852 /* DIG dump before suspend is not applicable. */
853 dhd->sssr_dig_buf_before = NULL;
854 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
855 }
856
857 dhd->sssr_inited = TRUE;
858
859 return BCME_OK;
860
861 }
862
863 void
864 dhd_sssr_dump_deinit(dhd_pub_t *dhd)
865 {
866 int i;
867 uint8 num_d11cores;
868
869 num_d11cores = dhd_d11_slices_num_get(dhd);
870
871 dhd->sssr_inited = FALSE;
872 /* init all pointers to NULL */
873 for (i = 0; i < num_d11cores; i++) {
874 #ifdef DHD_SSSR_DUMP_BEFORE_SR
875 dhd->sssr_d11_before[i] = NULL;
876 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
877 dhd->sssr_d11_after[i] = NULL;
878 }
879 #ifdef DHD_SSSR_DUMP_BEFORE_SR
880 dhd->sssr_dig_buf_before = NULL;
881 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
882 dhd->sssr_dig_buf_after = NULL;
883
884 return;
885 }
886
887 void
888 dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
889 {
890 bool print_info = FALSE;
891 int dump_mode;
892
893 if (!dhd || !path) {
894 DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
895 __FUNCTION__));
896 return;
897 }
898
899 if (!dhd->sssr_dump_collected) {
900 /* SSSR dump is not collected */
901 return;
902 }
903
904 dump_mode = dhd->sssr_dump_mode;
905
906 if (bcmstrstr(path, "core_0_before")) {
907 if (dhd->sssr_d11_outofreset[0] &&
908 dump_mode == SSSR_DUMP_MODE_SSSR) {
909 print_info = TRUE;
910 }
911 } else if (bcmstrstr(path, "core_0_after")) {
912 if (dhd->sssr_d11_outofreset[0]) {
913 print_info = TRUE;
914 }
915 } else if (bcmstrstr(path, "core_1_before")) {
916 if (dhd->sssr_d11_outofreset[1] &&
917 dump_mode == SSSR_DUMP_MODE_SSSR) {
918 print_info = TRUE;
919 }
920 } else if (bcmstrstr(path, "core_1_after")) {
921 if (dhd->sssr_d11_outofreset[1]) {
922 print_info = TRUE;
923 }
924 } else if (bcmstrstr(path, "core_2_before")) {
925 if (dhd->sssr_d11_outofreset[2] &&
926 dump_mode == SSSR_DUMP_MODE_SSSR) {
927 print_info = TRUE;
928 }
929 } else if (bcmstrstr(path, "core_2_after")) {
930 if (dhd->sssr_d11_outofreset[2]) {
931 print_info = TRUE;
932 }
933 } else {
934 print_info = TRUE;
935 }
936
937 if (print_info) {
938 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
939 path, FILE_NAME_HAL_TAG));
940 }
941 }
942 #endif /* DHD_SSSR_DUMP */
943
944 #ifdef DHD_SDTC_ETB_DUMP
945 /*
946 * sdtc: system debug trace controller
947 * etb: embedded trace buf
948 */
949 void
950 dhd_sdtc_etb_init(dhd_pub_t *dhd)
951 {
952 bcm_iov_buf_t iov_req;
953 etb_addr_info_t *p_etb_addr_info = NULL;
954 bcm_iov_buf_t *iov_resp;
955 uint8 *buf;
956 int ret = 0;
957 uint16 iovlen = 0;
958 uint16 version = 0;
959
960 BCM_REFERENCE(p_etb_addr_info);
961 dhd->sdtc_etb_inited = FALSE;
962
963 buf = MALLOCZ(dhd->osh, WLC_IOCTL_MAXLEN);
964 if (buf == NULL) {
965 DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__));
966 return;
967 }
968
969 /* fill header */
970 bzero(&iov_req, sizeof(iov_req));
971 iov_req.version = WL_SDTC_IOV_VERSION;
972 iov_req.id = WL_SDTC_CMD_ETB_INFO;
973 iov_req.len = sizeof(etb_addr_info_t);
974 iovlen = OFFSETOF(bcm_iov_buf_t, data) + iov_req.len;
975
976 ret = dhd_iovar(dhd, 0, "sdtc", (char *)&iov_req, iovlen,
977 (char *)buf, WLC_IOCTL_MAXLEN, FALSE);
978 if (ret < 0) {
979 DHD_ERROR(("%s failed to get sdtc etb_info %d\n", __FUNCTION__, ret));
980 goto exit;
981 }
982
983 version = dtoh16(*(uint16 *)buf);
984 /* Check for version */
985 if (version != WL_SDTC_IOV_VERSION) {
986 DHD_ERROR(("%s WL_SDTC_IOV_VERSION mis match\n", __FUNCTION__));
987 goto exit;
988 }
989 iov_resp = (bcm_iov_buf_t *)buf;
990 if (iov_resp->id == iov_req.id) {
991 p_etb_addr_info = (etb_addr_info_t*)iov_resp->data;
992 dhd->etb_addr_info.version = p_etb_addr_info->version;
993 dhd->etb_addr_info.len = p_etb_addr_info->len;
994 dhd->etb_addr_info.etbinfo_addr = p_etb_addr_info->etbinfo_addr;
995
996 DHD_ERROR(("%s etb_addr_info: ver:%d, len:%d, addr:0x%x\n", __FUNCTION__,
997 dhd->etb_addr_info.version, dhd->etb_addr_info.len,
998 dhd->etb_addr_info.etbinfo_addr));
999 } else {
1000 DHD_ERROR(("%s Unknown CMD-ID (%d) as response for request ID %d\n",
1001 __FUNCTION__, iov_resp->id, iov_req.id));
1002 goto exit;
1003 }
1004
1005 dhd->sdtc_etb_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SDTC_ETB_MEMPOOL_SIZE);
1006 if (dhd->sdtc_etb_mempool == NULL) {
1007 DHD_ERROR(("%s: MALLOC of sdtc_etb_mempool failed\n",
1008 __FUNCTION__));
1009 goto exit;
1010 }
1011
1012 /* since all the requirements for SDTC and ETB are met mark the capability as TRUE */
1013 dhd->sdtc_etb_inited = TRUE;
1014 DHD_ERROR(("%s sdtc_etb_inited: %d\n", __FUNCTION__, dhd->sdtc_etb_inited));
1015 exit:
1016 MFREE(dhd->osh, buf, WLC_IOCTL_MAXLEN);
1017 return;
1018 }
1019
1020 void
1021 dhd_sdtc_etb_deinit(dhd_pub_t *dhd)
1022 {
1023 dhd->sdtc_etb_inited = FALSE;
1024 if (dhd->sdtc_etb_mempool) {
1025 MFREE(dhd->osh, dhd->sdtc_etb_mempool, DHD_SDTC_ETB_MEMPOOL_SIZE);
1026 dhd->sdtc_etb_mempool = NULL;
1027 }
1028 }
1029 #endif /* DHD_SDTC_ETB_DUMP */
1030
1031 #ifdef DHD_FW_COREDUMP
1032 void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
1033 {
1034 if (!dhd_pub->soc_ram) {
1035 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
1036 dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
1037 DHD_PREALLOC_MEMDUMP_RAM, length);
1038 #else
1039 dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
1040 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
1041 }
1042
1043 if (dhd_pub->soc_ram == NULL) {
1044 DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
1045 __FUNCTION__));
1046 dhd_pub->soc_ram_length = 0;
1047 } else {
1048 memset(dhd_pub->soc_ram, 0, length);
1049 dhd_pub->soc_ram_length = length;
1050 }
1051
1052 /* soc_ram free handled in dhd_{free,clear} */
1053 return dhd_pub->soc_ram;
1054 }
1055 #endif /* DHD_FW_COREDUMP */
1056
1057 /* to NDIS developer, the structure dhd_common is redundant,
1058 * please do NOT merge it back from other branches !!!
1059 */
1060
1061 int
1062 dhd_common_socram_dump(dhd_pub_t *dhdp)
1063 {
1064 #ifdef BCMDBUS
1065 return 0;
1066 #else
1067 return dhd_socram_dump(dhdp->bus);
1068 #endif /* BCMDBUS */
1069 }
1070
1071 int
1072 dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
1073 {
1074 struct bcmstrbuf b;
1075 struct bcmstrbuf *strbuf = &b;
1076 #ifdef DHD_MEM_STATS
1077 uint64 malloc_mem = 0;
1078 uint64 total_txpath_mem = 0;
1079 uint64 txpath_bkpq_len = 0;
1080 uint64 txpath_bkpq_mem = 0;
1081 uint64 total_dhd_mem = 0;
1082 #endif /* DHD_MEM_STATS */
1083
1084 if (!dhdp || !dhdp->prot || !buf) {
1085 return BCME_ERROR;
1086 }
1087
1088 bcm_binit(strbuf, buf, buflen);
1089
1090 /* Base DHD info */
1091 bcm_bprintf(strbuf, "%s\n", dhd_version);
1092 bcm_bprintf(strbuf, "\n");
1093 bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
1094 dhdp->up, dhdp->txoff, dhdp->busstate);
1095 bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
1096 dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
1097 bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
1098 dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
1099 bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
1100
1101 bcm_bprintf(strbuf, "dongle stats:\n");
1102 bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
1103 dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
1104 dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
1105 bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
1106 dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
1107 dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
1108 bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
1109
1110 bcm_bprintf(strbuf, "bus stats:\n");
1111 bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
1112 dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
1113 bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
1114 dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
1115 bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
1116 dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
1117 bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
1118 dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
1119 bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
1120 dhdp->rx_readahead_cnt, dhdp->tx_realloc);
1121 bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
1122 dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
1123 bcm_bprintf(strbuf, "tx_big_packets %lu\n",
1124 dhdp->tx_big_packets);
1125 bcm_bprintf(strbuf, "\n");
1126 #ifdef DMAMAP_STATS
1127 /* Add DMA MAP info */
1128 bcm_bprintf(strbuf, "DMA MAP stats: \n");
1129 bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
1130 dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
1131 dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
1132 #ifndef IOCTLRESP_USE_CONSTMEM
1133 bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
1134 dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
1135 #endif /* !IOCTLRESP_USE_CONSTMEM */
1136 bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
1137 "TSBUF RX: %lu size %luK\n",
1138 dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
1139 dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
1140 dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
1141 bcm_bprintf(strbuf, "Total : %luK \n",
1142 KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
1143 dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
1144 dhdp->dma_stats.tsbuf_rx_sz));
1145 #endif /* DMAMAP_STATS */
1146 bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
1147 /* Add any prot info */
1148 dhd_prot_dump(dhdp, strbuf);
1149 bcm_bprintf(strbuf, "\n");
1150
1151 /* Add any bus info */
1152 dhd_bus_dump(dhdp, strbuf);
1153
1154 #if defined(DHD_LB_STATS)
1155 dhd_lb_stats_dump(dhdp, strbuf);
1156 #endif /* DHD_LB_STATS */
1157
1158 #ifdef DHD_MEM_STATS
1159
1160 malloc_mem = MALLOCED(dhdp->osh);
1161
1162 txpath_bkpq_len = dhd_active_tx_flowring_bkpq_len(dhdp);
1163 /*
1164 * Instead of traversing the entire queue to find the skbs length,
1165 * considering MAX_MTU_SZ as lenth of each skb.
1166 */
1167 txpath_bkpq_mem = (txpath_bkpq_len* MAX_MTU_SZ);
1168 total_txpath_mem = dhdp->txpath_mem + txpath_bkpq_mem;
1169
1170 bcm_bprintf(strbuf, "\nDHD malloc memory_usage: %llubytes %lluKB\n",
1171 malloc_mem, (malloc_mem / 1024));
1172
1173 bcm_bprintf(strbuf, "\nDHD tx-bkpq len: %llu memory_usage: %llubytes %lluKB\n",
1174 txpath_bkpq_len, txpath_bkpq_mem, (txpath_bkpq_mem / 1024));
1175 bcm_bprintf(strbuf, "DHD tx-path memory_usage: %llubytes %lluKB\n",
1176 total_txpath_mem, (total_txpath_mem / 1024));
1177
1178 total_dhd_mem = malloc_mem + total_txpath_mem;
1179 #if defined(DHD_LB_STATS)
1180 total_dhd_mem += dhd_lb_mem_usage(dhdp, strbuf);
1181 #endif /* DHD_LB_STATS */
1182 bcm_bprintf(strbuf, "\nDHD Totoal memory_usage: %llubytes %lluKB \n",
1183 total_dhd_mem, (total_dhd_mem / 1024));
1184 #endif /* DHD_MEM_STATS */
1185
1186 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
1187 dhd_mqstats_dump(dhdp, strbuf);
1188 #endif
1189
1190 #ifdef DHD_WET
1191 if (dhd_get_wet_mode(dhdp)) {
1192 bcm_bprintf(strbuf, "Wet Dump:\n");
1193 dhd_wet_dump(dhdp, strbuf);
1194 }
1195 #endif /* DHD_WET */
1196
1197 /* return remaining buffer length */
1198 return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
1199 }
1200
1201 void
1202 dhd_dump_to_kernelog(dhd_pub_t *dhdp)
1203 {
1204 char buf[512];
1205
1206 DHD_ERROR(("F/W version: %s\n", fw_version));
1207 bcm_bprintf_bypass = TRUE;
1208 dhd_dump(dhdp, buf, sizeof(buf));
1209 bcm_bprintf_bypass = FALSE;
1210 }
1211
1212 int
1213 dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
1214 {
1215 wl_ioctl_t ioc;
1216
1217 ioc.cmd = cmd;
1218 ioc.buf = arg;
1219 ioc.len = len;
1220 ioc.set = set;
1221
1222 return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
1223 }
1224
1225 int
1226 dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
1227 int cmd, uint8 set, int ifidx)
1228 {
1229 char iovbuf[WLC_IOCTL_SMLEN];
1230 int ret = -1;
1231
1232 memset(iovbuf, 0, sizeof(iovbuf));
1233 if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
1234 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
1235 if (!ret) {
1236 *pval = ltoh32(*((uint*)iovbuf));
1237 } else {
1238 DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
1239 __FUNCTION__, name, ret));
1240 }
1241 } else {
1242 DHD_ERROR(("%s: mkiovar %s failed\n",
1243 __FUNCTION__, name));
1244 }
1245
1246 return ret;
1247 }
1248
1249 int
1250 dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
1251 int cmd, uint8 set, int ifidx)
1252 {
1253 char iovbuf[WLC_IOCTL_SMLEN];
1254 int ret = -1;
1255 int lval = htol32(val);
1256 uint len;
1257
1258 len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
1259
1260 if (len) {
1261 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
1262 if (ret) {
1263 DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
1264 __FUNCTION__, name, ret));
1265 }
1266 } else {
1267 DHD_ERROR(("%s: mkiovar %s failed\n",
1268 __FUNCTION__, name));
1269 }
1270
1271 return ret;
1272 }
1273
1274 static struct ioctl2str_s {
1275 uint32 ioctl;
1276 char *name;
1277 } ioctl2str_array[] = {
1278 {WLC_UP, "UP"},
1279 {WLC_DOWN, "DOWN"},
1280 {WLC_SET_PROMISC, "SET_PROMISC"},
1281 {WLC_SET_INFRA, "SET_INFRA"},
1282 {WLC_SET_AUTH, "SET_AUTH"},
1283 {WLC_SET_SSID, "SET_SSID"},
1284 {WLC_RESTART, "RESTART"},
1285 {WLC_SET_CHANNEL, "SET_CHANNEL"},
1286 {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
1287 {WLC_SET_KEY, "SET_KEY"},
1288 {WLC_SCAN, "SCAN"},
1289 {WLC_DISASSOC, "DISASSOC"},
1290 {WLC_REASSOC, "REASSOC"},
1291 {WLC_SET_COUNTRY, "SET_COUNTRY"},
1292 {WLC_SET_WAKE, "SET_WAKE"},
1293 {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
1294 {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
1295 {WLC_SET_WSEC, "SET_WSEC"},
1296 {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
1297 {WLC_SET_RADAR, "SET_RADAR"},
1298 {0, NULL}
1299 };
1300
1301 static char *
1302 ioctl2str(uint32 ioctl)
1303 {
1304 struct ioctl2str_s *p = ioctl2str_array;
1305
1306 while (p->name != NULL) {
1307 if (p->ioctl == ioctl) {
1308 return p->name;
1309 }
1310 p++;
1311 }
1312
1313 return "";
1314 }
1315
1316 /**
1317 * @param ioc IO control struct, members are partially used by this function.
1318 * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
1319 * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
1320 */
1321 int
1322 dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
1323 {
1324 int ret = BCME_ERROR;
1325 unsigned long flags;
1326 #ifdef DUMP_IOCTL_IOV_LIST
1327 dhd_iov_li_t *iov_li;
1328 #endif /* DUMP_IOCTL_IOV_LIST */
1329 int hostsleep_set = 0;
1330 int hostsleep_val = 0;
1331
1332 if (dhd_query_bus_erros(dhd_pub)) {
1333 return -ENODEV;
1334 }
1335
1336 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1337 DHD_OS_WAKE_LOCK(dhd_pub);
1338 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
1339 DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
1340 DHD_OS_WAKE_UNLOCK(dhd_pub);
1341 return BCME_ERROR;
1342 }
1343 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1344
1345 #ifdef KEEPIF_ON_DEVICE_RESET
1346 if (ioc->cmd == WLC_GET_VAR) {
1347 dbus_config_t config;
1348 config.general_param = 0;
1349 if (buf) {
1350 if (!strcmp(buf, "wowl_activate")) {
1351 /* 1 (TRUE) after decreased by 1 */
1352 config.general_param = 2;
1353 } else if (!strcmp(buf, "wowl_clear")) {
1354 /* 0 (FALSE) after decreased by 1 */
1355 config.general_param = 1;
1356 }
1357 }
1358 if (config.general_param) {
1359 config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
1360 config.general_param--;
1361 dbus_set_config(dhd_pub->dbus, &config);
1362 }
1363 }
1364 #endif /* KEEPIF_ON_DEVICE_RESET */
1365
1366 if (dhd_os_proto_block(dhd_pub))
1367 {
1368 #ifdef DHD_LOG_DUMP
1369 int slen, val, lval, min_len;
1370 char *msg, tmp[64];
1371
1372 /* WLC_GET_VAR */
1373 if (ioc->cmd == WLC_GET_VAR && buf) {
1374 min_len = MIN(sizeof(tmp) - 1, strlen(buf));
1375 memset(tmp, 0, sizeof(tmp));
1376 bcopy(buf, tmp, min_len);
1377 tmp[min_len] = '\0';
1378 }
1379 #endif /* DHD_LOG_DUMP */
1380
1381 #ifdef DHD_DISCONNECT_TRACE
1382 if (WLC_DISASSOC == ioc->cmd || WLC_DOWN == ioc->cmd ||
1383 WLC_DISASSOC_MYAP == ioc->cmd) {
1384 DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
1385 }
1386 #endif /* HW_DISCONNECT_TRACE */
1387 /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1388 if (ioc->set == TRUE) {
1389 char *pars = (char *)buf; // points at user buffer
1390 if (ioc->cmd == WLC_SET_VAR && buf) {
1391 DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
1392 if (ioc->len > 1 + sizeof(uint32)) {
1393 // skip iovar name:
1394 pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
1395 pars++; // skip NULL character
1396 }
1397 } else {
1398 DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1399 ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
1400 }
1401 if (pars != NULL) {
1402 DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
1403 } else {
1404 DHD_DNGL_IOVAR_SET((" NULL\n"));
1405 }
1406 }
1407
1408 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1409 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
1410 DHD_INFO(("%s: returning as busstate=%d\n",
1411 __FUNCTION__, dhd_pub->busstate));
1412 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1413 dhd_os_proto_unblock(dhd_pub);
1414 return -ENODEV;
1415 }
1416 DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
1417 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1418
1419 #ifdef DHD_PCIE_RUNTIMEPM
1420 dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
1421 #endif /* DHD_PCIE_RUNTIMEPM */
1422
1423 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1424 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd_pub) ||
1425 dhd_pub->dhd_induce_error == DHD_INDUCE_IOCTL_SUSPEND_ERROR) {
1426 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1427 __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
1428 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1429 ioctl_suspend_error++;
1430 if (ioctl_suspend_error > MAX_IOCTL_SUSPEND_ERROR) {
1431 dhd_pub->hang_reason = HANG_REASON_IOCTL_SUSPEND_ERROR;
1432 dhd_os_send_hang_message(dhd_pub);
1433 ioctl_suspend_error = 0;
1434 }
1435 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1436 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1437 dhd_os_busbusy_wake(dhd_pub);
1438 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1439 dhd_os_proto_unblock(dhd_pub);
1440 return -ENODEV;
1441 }
1442 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1443 ioctl_suspend_error = 0;
1444 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1445 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1446
1447 #ifdef DUMP_IOCTL_IOV_LIST
1448 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
1449 if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
1450 DHD_ERROR(("iovar dump list item allocation Failed\n"));
1451 } else {
1452 iov_li->cmd = ioc->cmd;
1453 if (buf)
1454 bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
1455 dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
1456 &iov_li->list);
1457 }
1458 }
1459 #endif /* DUMP_IOCTL_IOV_LIST */
1460
1461 if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
1462 &hostsleep_set, &hostsleep_val, &ret))
1463 goto exit;
1464 ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
1465 dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
1466
1467 #ifdef DUMP_IOCTL_IOV_LIST
1468 if (ret == -ETIMEDOUT) {
1469 DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
1470 IOV_LIST_MAX_LEN));
1471 dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
1472 }
1473 #endif /* DUMP_IOCTL_IOV_LIST */
1474 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1475 if (ret == -ETIMEDOUT) {
1476 copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
1477 }
1478 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1479 #ifdef DHD_LOG_DUMP
1480 if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
1481 buf != NULL) {
1482 if (buf) {
1483 lval = 0;
1484 slen = strlen(buf) + 1;
1485 msg = (char*)buf;
1486 if (len >= slen + sizeof(lval)) {
1487 if (ioc->cmd == WLC_GET_VAR) {
1488 msg = tmp;
1489 lval = *(int*)buf;
1490 } else {
1491 min_len = MIN(ioc->len - slen, sizeof(int));
1492 bcopy((msg + slen), &lval, min_len);
1493 }
1494 if (!strncmp(msg, "cur_etheraddr",
1495 strlen("cur_etheraddr"))) {
1496 lval = 0;
1497 }
1498 }
1499 DHD_IOVAR_MEM((
1500 "%s: cmd: %d, msg: %s val: 0x%x,"
1501 " len: %d, set: %d, txn-id: %d\n",
1502 ioc->cmd == WLC_GET_VAR ?
1503 "WLC_GET_VAR" : "WLC_SET_VAR",
1504 ioc->cmd, msg, lval, ioc->len, ioc->set,
1505 dhd_prot_get_ioctl_trans_id(dhd_pub)));
1506 } else {
1507 DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
1508 ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
1509 ioc->cmd, ioc->len, ioc->set,
1510 dhd_prot_get_ioctl_trans_id(dhd_pub)));
1511 }
1512 } else {
1513 slen = ioc->len;
1514 if (buf != NULL && slen != 0) {
1515 if (slen >= 4) {
1516 val = *(int*)buf;
1517 } else if (slen >= 2) {
1518 val = *(short*)buf;
1519 } else {
1520 val = *(char*)buf;
1521 }
1522 /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
1523 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
1524 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
1525 "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
1526 }
1527 } else {
1528 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
1529 }
1530 }
1531 #endif /* DHD_LOG_DUMP */
1532 if (ret && dhd_pub->up) {
1533 /* Send hang event only if dhd_open() was success */
1534 dhd_os_check_hang(dhd_pub, ifidx, ret);
1535 }
1536
1537 if (ret == -ETIMEDOUT && !dhd_pub->up) {
1538 DHD_ERROR(("%s: 'resumed on timeout' error is "
1539 "occurred before the interface does not"
1540 " bring up\n", __FUNCTION__));
1541 }
1542
1543 exit:
1544 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1545 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1546 dhd_os_busbusy_wake(dhd_pub);
1547 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1548
1549 dhd_os_proto_unblock(dhd_pub);
1550
1551 #if 0
1552 if (ret < 0) {
1553 if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
1554 buf != NULL) {
1555 if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
1556 DHD_ERROR_MEM(("%s: %s: %s, %s\n",
1557 __FUNCTION__, ioc->cmd == WLC_GET_VAR ?
1558 "WLC_GET_VAR" : "WLC_SET_VAR",
1559 buf? (char *)buf:"NO MESSAGE",
1560 ret == BCME_UNSUPPORTED ? "UNSUPPORTED"
1561 : "NOT ASSOCIATED"));
1562 } else {
1563 DHD_ERROR_MEM(("%s: %s: %s, ret = %d\n",
1564 __FUNCTION__, ioc->cmd == WLC_GET_VAR ?
1565 "WLC_GET_VAR" : "WLC_SET_VAR",
1566 (char *)buf, ret));
1567 }
1568 } else {
1569 if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
1570 DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, %s\n",
1571 __FUNCTION__, ioc->cmd,
1572 ret == BCME_UNSUPPORTED ? "UNSUPPORTED" :
1573 "NOT ASSOCIATED"));
1574 } else {
1575 DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, ret = %d\n",
1576 __FUNCTION__, ioc->cmd, ret));
1577 }
1578 }
1579 }
1580 #endif
1581 }
1582
1583 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1584 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
1585 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
1586
1587 DHD_OS_WAKE_UNLOCK(dhd_pub);
1588 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1589
1590 #ifdef WL_MONITOR
1591 /* Intercept monitor ioctl here, add/del monitor if */
1592 if (ret == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
1593 int val = 0;
1594 if (buf != NULL && len != 0) {
1595 if (len >= 4) {
1596 val = *(int*)buf;
1597 } else if (len >= 2) {
1598 val = *(short*)buf;
1599 } else {
1600 val = *(char*)buf;
1601 }
1602 }
1603 dhd_set_monitor(dhd_pub, ifidx, val);
1604 }
1605 #endif /* WL_MONITOR */
1606
1607 return ret;
1608 }
1609
1610 uint wl_get_port_num(wl_io_pport_t *io_pport)
1611 {
1612 return 0;
1613 }
1614
1615 /* Get bssidx from iovar params
1616 * Input: dhd_pub - pointer to dhd_pub_t
1617 * params - IOVAR params
1618 * Output: idx - BSS index
1619 * val - ponter to the IOVAR arguments
1620 */
1621 static int
1622 dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
1623 {
1624 char *prefix = "bsscfg:";
1625 uint32 bssidx;
1626
1627 if (!(strncmp(params, prefix, strlen(prefix)))) {
1628 /* per bss setting should be prefixed with 'bsscfg:' */
1629 const char *p = params + strlen(prefix);
1630
1631 /* Skip Name */
1632 while (*p != '\0')
1633 p++;
1634 /* consider null */
1635 p = p + 1;
1636 bcopy(p, &bssidx, sizeof(uint32));
1637 /* Get corresponding dhd index */
1638 bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
1639
1640 if (bssidx >= DHD_MAX_IFS) {
1641 DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
1642 return BCME_ERROR;
1643 }
1644
1645 /* skip bss idx */
1646 p += sizeof(uint32);
1647 *val = p;
1648 *idx = bssidx;
1649 } else {
1650 DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
1651 return BCME_ERROR;
1652 }
1653
1654 return BCME_OK;
1655 }
1656
1657 #if defined(DHD_DEBUG) && defined(BCMDBUS)
1658 /* USB Device console input function */
1659 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
1660 {
1661 DHD_TRACE(("%s \n", __FUNCTION__));
1662
1663 return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
1664
1665 }
1666 #endif /* DHD_DEBUG && BCMDBUS */
1667
1668 #ifdef DHD_DEBUG
1669 int
1670 dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
1671 {
1672 unsigned long int_arg = 0;
1673 char *p;
1674 char *end_ptr = NULL;
1675 dhd_dbg_mwli_t *mw_li;
1676 dll_t *item, *next;
1677 /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
1678 p = bcmstrstr((char *)msg, " ");
1679 if (p != NULL) {
1680 /* space should be converted to null as separation flag for firmware */
1681 *p = '\0';
1682 /* store the argument in int_arg */
1683 int_arg = bcm_strtoul(p+1, &end_ptr, 10);
1684 }
1685
1686 if (!p && !strcmp(msg, "query")) {
1687 /* lets query the list inetrnally */
1688 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1689 DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
1690 } else {
1691 for (item = dll_head_p(&dhd->mw_list_head);
1692 !dll_end(&dhd->mw_list_head, item); item = next) {
1693 next = dll_next_p(item);
1694 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1695 DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
1696 }
1697 }
1698 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
1699 int32 alloc_handle;
1700 /* convert size into KB and append as integer */
1701 *((int32 *)(p+1)) = int_arg*1024;
1702 *(p+1+sizeof(int32)) = '\0';
1703
1704 /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
1705 * 1 bytes for null caracter
1706 */
1707 msglen = strlen(msg) + sizeof(int32) + 1;
1708 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
1709 DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
1710 }
1711
1712 /* returned allocated handle from dongle, basically address of the allocated unit */
1713 alloc_handle = *((int32 *)msg);
1714
1715 /* add a node in the list with tuple <id, handle, size> */
1716 if (alloc_handle == 0) {
1717 DHD_ERROR(("Reuqested size could not be allocated\n"));
1718 } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
1719 DHD_ERROR(("mw list item allocation Failed\n"));
1720 } else {
1721 mw_li->id = dhd->mw_id++;
1722 mw_li->handle = alloc_handle;
1723 mw_li->size = int_arg;
1724 /* append the node in the list */
1725 dll_append(&dhd->mw_list_head, &mw_li->list);
1726 }
1727 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
1728 /* inform dongle to free wasted chunk */
1729 int handle = 0;
1730 int size = 0;
1731 for (item = dll_head_p(&dhd->mw_list_head);
1732 !dll_end(&dhd->mw_list_head, item); item = next) {
1733 next = dll_next_p(item);
1734 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1735
1736 if (mw_li->id == (int)int_arg) {
1737 handle = mw_li->handle;
1738 size = mw_li->size;
1739 dll_delete(item);
1740 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1741 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1742 /* reset the id */
1743 dhd->mw_id = 0;
1744 }
1745 }
1746 }
1747 if (handle) {
1748 int len;
1749 /* append the free handle and the chunk size in first 8 bytes
1750 * after the command and null character
1751 */
1752 *((int32 *)(p+1)) = handle;
1753 *((int32 *)((p+1)+sizeof(int32))) = size;
1754 /* append null as terminator */
1755 *(p+1+2*sizeof(int32)) = '\0';
1756 /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
1757 * + 1 bytes for null caracter
1758 */
1759 len = strlen(msg) + 2*sizeof(int32) + 1;
1760 /* send iovar to free the chunk */
1761 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
1762 DHD_ERROR(("IOCTL failed for memdebug free\n"));
1763 }
1764 } else {
1765 DHD_ERROR(("specified id does not exist\n"));
1766 }
1767 } else {
1768 /* for all the wrong argument formats */
1769 return BCME_BADARG;
1770 }
1771 return 0;
1772 }
1773 extern void
1774 dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
1775 {
1776 dll_t *item;
1777 dhd_dbg_mwli_t *mw_li;
1778 while (!(dll_empty(list_head))) {
1779 item = dll_head_p(list_head);
1780 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1781 dll_delete(item);
1782 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1783 }
1784 }
1785 #ifdef BCMPCIE
1786 int
1787 dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
1788 {
1789 flow_ring_table_t *flow_ring_table;
1790 char *cmd;
1791 char *end_ptr = NULL;
1792 uint8 prio;
1793 uint16 flowid;
1794 int i;
1795 int ret = 0;
1796 cmd = bcmstrstr(msg, " ");
1797 BCM_REFERENCE(prio);
1798 if (cmd != NULL) {
1799 /* in order to use string operations append null */
1800 *cmd = '\0';
1801 } else {
1802 DHD_ERROR(("missing: create/delete args\n"));
1803 return BCME_ERROR;
1804 }
1805 if (cmd && !strcmp(msg, "create")) {
1806 /* extract <"source address", "destination address", "priority"> */
1807 uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
1808 BCM_REFERENCE(sa);
1809 BCM_REFERENCE(da);
1810 msg = msg + strlen("create") + 1;
1811 /* fill ethernet source address */
1812 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1813 sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1814 if (*end_ptr == ':') {
1815 msg = (end_ptr + 1);
1816 } else if (i != 5) {
1817 DHD_ERROR(("not a valid source mac addr\n"));
1818 return BCME_ERROR;
1819 }
1820 }
1821 if (*end_ptr != ' ') {
1822 DHD_ERROR(("missing: destiantion mac id\n"));
1823 return BCME_ERROR;
1824 } else {
1825 /* skip space */
1826 msg = end_ptr + 1;
1827 }
1828 /* fill ethernet destination address */
1829 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1830 da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1831 if (*end_ptr == ':') {
1832 msg = (end_ptr + 1);
1833 } else if (i != 5) {
1834 DHD_ERROR(("not a valid destination mac addr\n"));
1835 return BCME_ERROR;
1836 }
1837 }
1838 if (*end_ptr != ' ') {
1839 DHD_ERROR(("missing: priority\n"));
1840 return BCME_ERROR;
1841 } else {
1842 msg = end_ptr + 1;
1843 }
1844 /* parse priority */
1845 prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
1846 if (prio > MAXPRIO) {
1847 DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
1848 __FUNCTION__));
1849 return BCME_ERROR;
1850 }
1851
1852 if (*end_ptr != '\0') {
1853 DHD_ERROR(("msg not truncated with NULL character\n"));
1854 return BCME_ERROR;
1855 }
1856 ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
1857 if (ret != BCME_OK) {
1858 DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
1859 return BCME_ERROR;
1860 }
1861 return BCME_OK;
1862
1863 } else if (cmd && !strcmp(msg, "delete")) {
1864 msg = msg + strlen("delete") + 1;
1865 /* parse flowid */
1866 flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
1867 if (*end_ptr != '\0') {
1868 DHD_ERROR(("msg not truncated with NULL character\n"));
1869 return BCME_ERROR;
1870 }
1871
1872 /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
1873 if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
1874 {
1875 DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
1876 return BCME_ERROR;
1877 }
1878
1879 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
1880 ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
1881 if (ret != BCME_OK) {
1882 DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
1883 return BCME_ERROR;
1884 }
1885 return BCME_OK;
1886 }
1887 DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
1888 return BCME_ERROR;
1889 }
1890 #endif /* BCMPCIE */
1891 #endif /* DHD_DEBUG */
1892
1893 static int
1894 dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
1895 void *params, int plen, void *arg, uint len, int val_size)
1896 {
1897 int bcmerror = 0;
1898 int32 int_val = 0;
1899 uint32 dhd_ver_len, bus_api_rev_len;
1900
1901 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1902 DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
1903
1904 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
1905 goto exit;
1906
1907 if (plen >= (int)sizeof(int_val))
1908 bcopy(params, &int_val, sizeof(int_val));
1909
1910 switch (actionid) {
1911 case IOV_GVAL(IOV_VERSION):
1912 /* Need to have checked buffer length */
1913 dhd_ver_len = sizeof(dhd_version) - 1;
1914 bus_api_rev_len = strlen(bus_api_revision);
1915 if (len > dhd_ver_len + bus_api_rev_len) {
1916 bcmerror = memcpy_s((char *)arg, len, dhd_version, dhd_ver_len);
1917 if (bcmerror != BCME_OK) {
1918 break;
1919 }
1920 bcmerror = memcpy_s((char *)arg + dhd_ver_len, len - dhd_ver_len,
1921 bus_api_revision, bus_api_rev_len);
1922 if (bcmerror != BCME_OK) {
1923 break;
1924 }
1925 *((char *)arg + dhd_ver_len + bus_api_rev_len) = '\0';
1926 }
1927 #if defined(BCMSDIO) && defined(PKT_STATICS)
1928 dhd_bus_clear_txpktstatics(dhd_pub->bus);
1929 #endif
1930 break;
1931
1932 case IOV_GVAL(IOV_WLMSGLEVEL):
1933 printf("android_msg_level=0x%x\n", android_msg_level);
1934 printf("config_msg_level=0x%x\n", config_msg_level);
1935 #if defined(WL_WIRELESS_EXT)
1936 int_val = (int32)iw_msg_level;
1937 bcopy(&int_val, arg, val_size);
1938 printf("iw_msg_level=0x%x\n", iw_msg_level);
1939 #endif
1940 #ifdef WL_CFG80211
1941 int_val = (int32)wl_dbg_level;
1942 bcopy(&int_val, arg, val_size);
1943 printf("cfg_msg_level=0x%x\n", wl_dbg_level);
1944 #endif
1945 break;
1946
1947 case IOV_SVAL(IOV_WLMSGLEVEL):
1948 if (int_val & DHD_ANDROID_VAL) {
1949 android_msg_level = (uint)(int_val & 0xFFFF);
1950 printf("android_msg_level=0x%x\n", android_msg_level);
1951 }
1952 if (int_val & DHD_CONFIG_VAL) {
1953 config_msg_level = (uint)(int_val & 0xFFFF);
1954 printf("config_msg_level=0x%x\n", config_msg_level);
1955 }
1956 #if defined(WL_WIRELESS_EXT)
1957 if (int_val & DHD_IW_VAL) {
1958 iw_msg_level = (uint)(int_val & 0xFFFF);
1959 printf("iw_msg_level=0x%x\n", iw_msg_level);
1960 }
1961 #endif
1962 #ifdef WL_CFG80211
1963 if (int_val & DHD_CFG_VAL) {
1964 wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF));
1965 }
1966 #endif
1967 break;
1968
1969 case IOV_GVAL(IOV_MSGLEVEL):
1970 int_val = (int32)dhd_msg_level;
1971 bcopy(&int_val, arg, val_size);
1972 #if defined(BCMSDIO) && defined(PKT_STATICS)
1973 dhd_bus_dump_txpktstatics(dhd_pub->bus);
1974 #endif
1975 break;
1976
1977 case IOV_SVAL(IOV_MSGLEVEL):
1978 dhd_msg_level = int_val;
1979 break;
1980
1981 case IOV_GVAL(IOV_BCMERRORSTR):
1982 bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
1983 ((char *)arg)[BCME_STRLEN - 1] = 0x00;
1984 break;
1985
1986 case IOV_GVAL(IOV_BCMERROR):
1987 int_val = (int32)dhd_pub->bcmerror;
1988 bcopy(&int_val, arg, val_size);
1989 break;
1990
1991 #ifndef BCMDBUS
1992 case IOV_GVAL(IOV_WDTICK):
1993 #ifdef HOST_TPUT_TEST
1994 if (dhd_pub->net_ts.tv_sec == 0 && dhd_pub->net_ts.tv_nsec == 0) {
1995 osl_do_gettimeofday(&dhd_pub->net_ts);
1996 } else {
1997 struct osl_timespec cur_ts;
1998 uint32 diff_ms;
1999 osl_do_gettimeofday(&cur_ts);
2000 diff_ms = osl_do_gettimediff(&cur_ts, &dhd_pub->net_ts)/1000;
2001 int_val = (int32)((dhd_pub->net_len/1024/1024)*8)*1000/diff_ms;
2002 dhd_pub->net_len = 0;
2003 memcpy(&dhd_pub->net_ts, &cur_ts, sizeof(struct osl_timespec));
2004 bcopy(&int_val, arg, sizeof(int_val));
2005 }
2006 #else
2007 int_val = (int32)dhd_watchdog_ms;
2008 bcopy(&int_val, arg, val_size);
2009 #endif
2010 break;
2011 #endif /* !BCMDBUS */
2012
2013 case IOV_SVAL(IOV_WDTICK):
2014 if (!dhd_pub->up) {
2015 bcmerror = BCME_NOTUP;
2016 break;
2017 }
2018
2019 dhd_watchdog_ms = (uint)int_val;
2020
2021 dhd_os_wd_timer(dhd_pub, (uint)int_val);
2022 break;
2023
2024 case IOV_GVAL(IOV_DUMP):
2025 if (dhd_dump(dhd_pub, arg, len) <= 0)
2026 bcmerror = BCME_ERROR;
2027 else
2028 bcmerror = BCME_OK;
2029 break;
2030
2031 #ifndef BCMDBUS
2032 case IOV_GVAL(IOV_DCONSOLE_POLL):
2033 int_val = (int32)dhd_pub->dhd_console_ms;
2034 bcopy(&int_val, arg, val_size);
2035 break;
2036
2037 case IOV_SVAL(IOV_DCONSOLE_POLL):
2038 dhd_pub->dhd_console_ms = (uint)int_val;
2039 break;
2040
2041 #if defined(DHD_DEBUG)
2042 case IOV_SVAL(IOV_CONS):
2043 if (len > 0) {
2044 #ifdef CONSOLE_DPC
2045 bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1);
2046 #else
2047 bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
2048 #endif
2049 }
2050 break;
2051 #endif /* DHD_DEBUG */
2052 #endif /* !BCMDBUS */
2053
2054 case IOV_SVAL(IOV_CLEARCOUNTS):
2055 dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
2056 dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
2057 dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
2058 dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
2059 dhd_pub->tx_dropped = 0;
2060 dhd_pub->rx_dropped = 0;
2061 dhd_pub->tx_pktgetfail = 0;
2062 dhd_pub->rx_pktgetfail = 0;
2063 dhd_pub->rx_readahead_cnt = 0;
2064 dhd_pub->tx_realloc = 0;
2065 dhd_pub->wd_dpc_sched = 0;
2066 dhd_pub->tx_big_packets = 0;
2067 memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
2068 dhd_bus_clearcounts(dhd_pub);
2069 #ifdef PROP_TXSTATUS
2070 /* clear proptxstatus related counters */
2071 dhd_wlfc_clear_counts(dhd_pub);
2072 #endif /* PROP_TXSTATUS */
2073 #if defined(DHD_LB_STATS)
2074 DHD_LB_STATS_RESET(dhd_pub);
2075 #endif /* DHD_LB_STATS */
2076 break;
2077
2078 case IOV_GVAL(IOV_IOCTLTIMEOUT): {
2079 #ifdef HOST_TPUT_TEST
2080 if (dhd_pub->bus_ts.tv_sec == 0 && dhd_pub->bus_ts.tv_nsec == 0) {
2081 osl_do_gettimeofday(&dhd_pub->bus_ts);
2082 } else {
2083 struct osl_timespec cur_ts;
2084 uint32 diff_ms;
2085 osl_do_gettimeofday(&cur_ts);
2086 diff_ms = osl_do_gettimediff(&cur_ts, &dhd_pub->bus_ts)/1000;
2087 int_val = (int32)((dhd_pub->dstats.tx_bytes/1024/1024)*8)*1000/diff_ms;
2088 dhd_pub->dstats.tx_bytes = 0;
2089 memcpy(&dhd_pub->bus_ts, &cur_ts, sizeof(struct osl_timespec));
2090 bcopy(&int_val, arg, sizeof(int_val));
2091 }
2092 #else
2093 int_val = (int32)dhd_os_get_ioctl_resp_timeout();
2094 bcopy(&int_val, arg, sizeof(int_val));
2095 #endif
2096 break;
2097 }
2098
2099 case IOV_SVAL(IOV_IOCTLTIMEOUT): {
2100 if (int_val <= 0)
2101 bcmerror = BCME_BADARG;
2102 else
2103 dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
2104 break;
2105 }
2106
2107 #ifdef PROP_TXSTATUS
2108 case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
2109 bool wlfc_enab = FALSE;
2110 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
2111 if (bcmerror != BCME_OK)
2112 goto exit;
2113 int_val = wlfc_enab ? 1 : 0;
2114 bcopy(&int_val, arg, val_size);
2115 break;
2116 }
2117 case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
2118 bool wlfc_enab = FALSE;
2119 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
2120 if (bcmerror != BCME_OK)
2121 goto exit;
2122
2123 /* wlfc is already set as desired */
2124 if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
2125 goto exit;
2126
2127 if (int_val == TRUE)
2128 bcmerror = dhd_wlfc_init(dhd_pub);
2129 else
2130 bcmerror = dhd_wlfc_deinit(dhd_pub);
2131
2132 break;
2133 }
2134 case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
2135 bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
2136 if (bcmerror != BCME_OK)
2137 goto exit;
2138 bcopy(&int_val, arg, val_size);
2139 break;
2140
2141 case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
2142 dhd_wlfc_set_mode(dhd_pub, int_val);
2143 break;
2144
2145 case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
2146 bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
2147 if (bcmerror != BCME_OK)
2148 goto exit;
2149 bcopy(&int_val, arg, val_size);
2150 break;
2151
2152 case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
2153 dhd_wlfc_set_module_ignore(dhd_pub, int_val);
2154 break;
2155
2156 case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
2157 bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
2158 if (bcmerror != BCME_OK)
2159 goto exit;
2160 bcopy(&int_val, arg, val_size);
2161 break;
2162
2163 case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
2164 dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
2165 break;
2166
2167 case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
2168 bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
2169 if (bcmerror != BCME_OK)
2170 goto exit;
2171 bcopy(&int_val, arg, val_size);
2172 break;
2173
2174 case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
2175 dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
2176 break;
2177
2178 case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
2179 bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
2180 if (bcmerror != BCME_OK)
2181 goto exit;
2182 bcopy(&int_val, arg, val_size);
2183 break;
2184
2185 case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
2186 dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
2187 break;
2188
2189 #endif /* PROP_TXSTATUS */
2190
2191 case IOV_GVAL(IOV_BUS_TYPE):
2192 /* The dhd application queries the driver to check if its usb or sdio. */
2193 #ifdef BCMDBUS
2194 int_val = BUS_TYPE_USB;
2195 #endif
2196 #ifdef BCMSDIO
2197 int_val = BUS_TYPE_SDIO;
2198 #endif
2199 #ifdef PCIE_FULL_DONGLE
2200 int_val = BUS_TYPE_PCIE;
2201 #endif
2202 bcopy(&int_val, arg, val_size);
2203 break;
2204
2205 case IOV_SVAL(IOV_CHANGEMTU):
2206 int_val &= 0xffff;
2207 bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
2208 break;
2209
2210 case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
2211 {
2212 uint i = 0;
2213 uint8 *ptr = (uint8 *)arg;
2214 uint8 count = 0;
2215
2216 ptr++;
2217 for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
2218 if (dhd_pub->reorder_bufs[i] != NULL) {
2219 *ptr = dhd_pub->reorder_bufs[i]->flow_id;
2220 ptr++;
2221 count++;
2222 }
2223 }
2224 ptr = (uint8 *)arg;
2225 *ptr = count;
2226 break;
2227 }
2228 #ifdef DHDTCPACK_SUPPRESS
2229 case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
2230 int_val = (uint32)dhd_pub->tcpack_sup_mode;
2231 bcopy(&int_val, arg, val_size);
2232 break;
2233 }
2234 case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
2235 bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
2236 break;
2237 }
2238 #endif /* DHDTCPACK_SUPPRESS */
2239
2240 #ifdef DHD_L2_FILTER
2241 case IOV_GVAL(IOV_DHCP_UNICAST): {
2242 uint32 bssidx;
2243 const char *val;
2244 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2245 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2246 __FUNCTION__, name));
2247 bcmerror = BCME_BADARG;
2248 break;
2249 }
2250 int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
2251 memcpy(arg, &int_val, val_size);
2252 break;
2253 }
2254 case IOV_SVAL(IOV_DHCP_UNICAST): {
2255 uint32 bssidx;
2256 const char *val;
2257 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2258 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2259 __FUNCTION__, name));
2260 bcmerror = BCME_BADARG;
2261 break;
2262 }
2263 memcpy(&int_val, val, sizeof(int_val));
2264 bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
2265 break;
2266 }
2267 case IOV_GVAL(IOV_BLOCK_PING): {
2268 uint32 bssidx;
2269 const char *val;
2270
2271 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2272 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
2273 bcmerror = BCME_BADARG;
2274 break;
2275 }
2276 int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
2277 memcpy(arg, &int_val, val_size);
2278 break;
2279 }
2280 case IOV_SVAL(IOV_BLOCK_PING): {
2281 uint32 bssidx;
2282 const char *val;
2283
2284 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2285 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
2286 bcmerror = BCME_BADARG;
2287 break;
2288 }
2289 memcpy(&int_val, val, sizeof(int_val));
2290 bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
2291 break;
2292 }
2293 case IOV_GVAL(IOV_PROXY_ARP): {
2294 uint32 bssidx;
2295 const char *val;
2296
2297 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2298 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
2299 bcmerror = BCME_BADARG;
2300 break;
2301 }
2302 int_val = dhd_get_parp_status(dhd_pub, bssidx);
2303 bcopy(&int_val, arg, val_size);
2304 break;
2305 }
2306 case IOV_SVAL(IOV_PROXY_ARP): {
2307 uint32 bssidx;
2308 const char *val;
2309
2310 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2311 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
2312 bcmerror = BCME_BADARG;
2313 break;
2314 }
2315 bcopy(val, &int_val, sizeof(int_val));
2316
2317 /* Issue a iovar request to WL to update the proxy arp capability bit
2318 * in the Extended Capability IE of beacons/probe responses.
2319 */
2320 bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
2321 NULL, 0, TRUE);
2322 if (bcmerror == BCME_OK) {
2323 dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
2324 }
2325 break;
2326 }
2327 case IOV_GVAL(IOV_GRAT_ARP): {
2328 uint32 bssidx;
2329 const char *val;
2330
2331 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2332 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
2333 bcmerror = BCME_BADARG;
2334 break;
2335 }
2336 int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
2337 memcpy(arg, &int_val, val_size);
2338 break;
2339 }
2340 case IOV_SVAL(IOV_GRAT_ARP): {
2341 uint32 bssidx;
2342 const char *val;
2343
2344 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2345 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
2346 bcmerror = BCME_BADARG;
2347 break;
2348 }
2349 memcpy(&int_val, val, sizeof(int_val));
2350 bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
2351 break;
2352 }
2353 case IOV_GVAL(IOV_BLOCK_TDLS): {
2354 uint32 bssidx;
2355 const char *val;
2356
2357 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2358 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
2359 bcmerror = BCME_BADARG;
2360 break;
2361 }
2362 int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
2363 memcpy(arg, &int_val, val_size);
2364 break;
2365 }
2366 case IOV_SVAL(IOV_BLOCK_TDLS): {
2367 uint32 bssidx;
2368 const char *val;
2369
2370 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2371 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
2372 bcmerror = BCME_BADARG;
2373 break;
2374 }
2375 memcpy(&int_val, val, sizeof(int_val));
2376 bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
2377 break;
2378 }
2379 #endif /* DHD_L2_FILTER */
2380 case IOV_SVAL(IOV_DHD_IE): {
2381 uint32 bssidx;
2382 const char *val;
2383
2384 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2385 DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
2386 bcmerror = BCME_BADARG;
2387 break;
2388 }
2389
2390 break;
2391 }
2392 case IOV_GVAL(IOV_AP_ISOLATE): {
2393 uint32 bssidx;
2394 const char *val;
2395
2396 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2397 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
2398 bcmerror = BCME_BADARG;
2399 break;
2400 }
2401
2402 int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
2403 bcopy(&int_val, arg, val_size);
2404 break;
2405 }
2406 case IOV_SVAL(IOV_AP_ISOLATE): {
2407 uint32 bssidx;
2408 const char *val;
2409
2410 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2411 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
2412 bcmerror = BCME_BADARG;
2413 break;
2414 }
2415
2416 ASSERT(val);
2417 bcopy(val, &int_val, sizeof(uint32));
2418 dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
2419 break;
2420 }
2421 #ifdef DHD_PSTA
2422 case IOV_GVAL(IOV_PSTA): {
2423 int_val = dhd_get_psta_mode(dhd_pub);
2424 bcopy(&int_val, arg, val_size);
2425 break;
2426 }
2427 case IOV_SVAL(IOV_PSTA): {
2428 if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
2429 dhd_set_psta_mode(dhd_pub, int_val);
2430 } else {
2431 bcmerror = BCME_RANGE;
2432 }
2433 break;
2434 }
2435 #endif /* DHD_PSTA */
2436 #ifdef DHD_WET
2437 case IOV_GVAL(IOV_WET):
2438 int_val = dhd_get_wet_mode(dhd_pub);
2439 bcopy(&int_val, arg, val_size);
2440 break;
2441
2442 case IOV_SVAL(IOV_WET):
2443 if (int_val == 0 || int_val == 1) {
2444 dhd_set_wet_mode(dhd_pub, int_val);
2445 /* Delete the WET DB when disabled */
2446 if (!int_val) {
2447 dhd_wet_sta_delete_list(dhd_pub);
2448 }
2449 } else {
2450 bcmerror = BCME_RANGE;
2451 }
2452 break;
2453 case IOV_SVAL(IOV_WET_HOST_IPV4):
2454 dhd_set_wet_host_ipv4(dhd_pub, params, plen);
2455 break;
2456 case IOV_SVAL(IOV_WET_HOST_MAC):
2457 dhd_set_wet_host_mac(dhd_pub, params, plen);
2458 break;
2459 #endif /* DHD_WET */
2460 #ifdef DHD_MCAST_REGEN
2461 case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2462 uint32 bssidx;
2463 const char *val;
2464
2465 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2466 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2467 bcmerror = BCME_BADARG;
2468 break;
2469 }
2470
2471 int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
2472 bcopy(&int_val, arg, val_size);
2473 break;
2474 }
2475
2476 case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2477 uint32 bssidx;
2478 const char *val;
2479
2480 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2481 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2482 bcmerror = BCME_BADARG;
2483 break;
2484 }
2485
2486 ASSERT(val);
2487 bcopy(val, &int_val, sizeof(uint32));
2488 dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
2489 break;
2490 }
2491 #endif /* DHD_MCAST_REGEN */
2492
2493 case IOV_GVAL(IOV_CFG80211_OPMODE): {
2494 int_val = (int32)dhd_pub->op_mode;
2495 bcopy(&int_val, arg, sizeof(int_val));
2496 break;
2497 }
2498 case IOV_SVAL(IOV_CFG80211_OPMODE): {
2499 if (int_val <= 0)
2500 bcmerror = BCME_BADARG;
2501 else
2502 dhd_pub->op_mode = int_val;
2503 break;
2504 }
2505
2506 case IOV_GVAL(IOV_ASSERT_TYPE):
2507 int_val = g_assert_type;
2508 bcopy(&int_val, arg, val_size);
2509 break;
2510
2511 case IOV_SVAL(IOV_ASSERT_TYPE):
2512 g_assert_type = (uint32)int_val;
2513 break;
2514
2515 case IOV_GVAL(IOV_LMTEST): {
2516 *(uint32 *)arg = (uint32)lmtest;
2517 break;
2518 }
2519
2520 case IOV_SVAL(IOV_LMTEST): {
2521 uint32 val = *(uint32 *)arg;
2522 if (val > 50)
2523 bcmerror = BCME_BADARG;
2524 else {
2525 lmtest = (uint)val;
2526 DHD_ERROR(("%s: lmtest %s\n",
2527 __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
2528 }
2529 break;
2530 }
2531
2532 #ifdef SHOW_LOGTRACE
2533 case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
2534 trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
2535 dhd_dbg_ring_t *dbg_verbose_ring = NULL;
2536
2537 dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
2538 if (dbg_verbose_ring == NULL) {
2539 DHD_ERROR(("dbg_verbose_ring is NULL\n"));
2540 bcmerror = BCME_UNSUPPORTED;
2541 break;
2542 }
2543
2544 if (trace_buf_info != NULL) {
2545 bzero(trace_buf_info, sizeof(trace_buf_info_t));
2546 dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
2547 } else {
2548 DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
2549 bcmerror = BCME_NOMEM;
2550 }
2551 break;
2552 }
2553 #endif /* SHOW_LOGTRACE */
2554 #ifdef DHD_DEBUG
2555 #if defined(BCMSDIO) || defined(BCMPCIE)
2556 case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
2557 if (dhd_pub->dongle_trap_occured)
2558 int_val = ltoh32(dhd_pub->last_trap_info.type);
2559 else
2560 int_val = 0;
2561 bcopy(&int_val, arg, val_size);
2562 break;
2563
2564 case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
2565 {
2566 struct bcmstrbuf strbuf;
2567 bcm_binit(&strbuf, arg, len);
2568 if (dhd_pub->dongle_trap_occured == FALSE) {
2569 bcm_bprintf(&strbuf, "no trap recorded\n");
2570 break;
2571 }
2572 dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
2573 break;
2574 }
2575
2576 case IOV_GVAL(IOV_BPADDR):
2577 {
2578 sdreg_t sdreg;
2579 uint32 addr, size;
2580
2581 memcpy(&sdreg, params, sizeof(sdreg));
2582
2583 addr = sdreg.offset;
2584 size = sdreg.func;
2585
2586 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2587 (uint *)&int_val, TRUE);
2588
2589 memcpy(arg, &int_val, sizeof(int32));
2590
2591 break;
2592 }
2593
2594 case IOV_SVAL(IOV_BPADDR):
2595 {
2596 sdreg_t sdreg;
2597 uint32 addr, size;
2598
2599 memcpy(&sdreg, params, sizeof(sdreg));
2600
2601 addr = sdreg.offset;
2602 size = sdreg.func;
2603
2604 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2605 (uint *)&sdreg.value,
2606 FALSE);
2607
2608 break;
2609 }
2610 #endif /* BCMSDIO || BCMPCIE */
2611 #ifdef BCMPCIE
2612 case IOV_SVAL(IOV_FLOW_RING_DEBUG):
2613 {
2614 bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
2615 break;
2616 }
2617 #endif /* BCMPCIE */
2618 case IOV_SVAL(IOV_MEM_DEBUG):
2619 if (len > 0) {
2620 bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
2621 }
2622 break;
2623 #endif /* DHD_DEBUG */
2624 #if defined(DHD_LOG_DUMP)
2625 case IOV_GVAL(IOV_LOG_DUMP):
2626 {
2627 dhd_prot_debug_info_print(dhd_pub);
2628 dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
2629 break;
2630 }
2631 #endif /* DHD_LOG_DUMP */
2632
2633 case IOV_GVAL(IOV_TPUT_TEST):
2634 {
2635 tput_test_t *tput_data = NULL;
2636 if (params && plen >= sizeof(tput_test_t)) {
2637 tput_data = (tput_test_t *)params;
2638 bcmerror = dhd_tput_test(dhd_pub, tput_data);
2639 } else {
2640 DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
2641 bcmerror = BCME_BADARG;
2642 }
2643 break;
2644 }
2645 case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
2646 {
2647 if (dhd_pub->debug_buf_dest_support) {
2648 debug_buf_dest_stat_t *debug_buf_dest_stat =
2649 (debug_buf_dest_stat_t *)arg;
2650 memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
2651 sizeof(dhd_pub->debug_buf_dest_stat));
2652 } else {
2653 bcmerror = BCME_DISABLED;
2654 }
2655 break;
2656 }
2657
2658 #if defined(DHD_SSSR_DUMP)
2659 case IOV_GVAL(IOV_FIS_TRIGGER):
2660 bcmerror = dhd_bus_fis_trigger(dhd_pub);
2661
2662 if (bcmerror == BCME_OK) {
2663 bcmerror = dhd_bus_fis_dump(dhd_pub);
2664 }
2665
2666 int_val = bcmerror;
2667 bcopy(&int_val, arg, val_size);
2668 break;
2669 #endif /* defined(DHD_SSSR_DUMP) */
2670
2671 #ifdef DHD_DEBUG
2672 case IOV_SVAL(IOV_INDUCE_ERROR): {
2673 if (int_val >= DHD_INDUCE_ERROR_MAX) {
2674 DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
2675 } else {
2676 dhd_pub->dhd_induce_error = (uint16)int_val;
2677 }
2678 break;
2679 }
2680 #endif /* DHD_DEBUG */
2681 #ifdef RTT_GEOFENCE_CONT
2682 #if defined(RTT_SUPPORT) && defined(WL_NAN)
2683 case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
2684 bool enable = 0;
2685 dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
2686 int_val = enable ? 1 : 0;
2687 bcopy(&int_val, arg, val_size);
2688 break;
2689 }
2690 case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
2691 bool enable = *(bool *)arg;
2692 dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
2693 break;
2694 }
2695 #endif /* RTT_SUPPORT && WL_NAN */
2696 #endif /* RTT_GEOFENCE_CONT */
2697 case IOV_GVAL(IOV_FW_VBS): {
2698 *(uint32 *)arg = (uint32)dhd_dbg_get_fwverbose(dhd_pub);
2699 break;
2700 }
2701
2702 case IOV_SVAL(IOV_FW_VBS): {
2703 if (int_val < 0) {
2704 int_val = 0;
2705 }
2706 dhd_dbg_set_fwverbose(dhd_pub, (uint32)int_val);
2707 break;
2708 }
2709
2710 #ifdef DHD_TX_PROFILE
2711 case IOV_SVAL(IOV_TX_PROFILE_TAG):
2712 {
2713 /* note: under the current implementation only one type of packet may be
2714 * tagged per profile
2715 */
2716 const dhd_tx_profile_protocol_t *protocol = NULL;
2717 /* for example, we might have a profile of profile_index 6, but at
2718 * offset 2 from dhd_pub->protocol_filters.
2719 */
2720 uint8 offset;
2721
2722 if (params == NULL) {
2723 bcmerror = BCME_ERROR;
2724 break;
2725 }
2726
2727 protocol = (dhd_tx_profile_protocol_t *)params;
2728
2729 /* validate */
2730 if (protocol->version != DHD_TX_PROFILE_VERSION) {
2731 bcmerror = BCME_VERSION;
2732 break;
2733 }
2734 if (protocol->profile_index > DHD_MAX_PROFILE_INDEX) {
2735 DHD_ERROR(("%s:\tprofile index must be between 0 and %d\n",
2736 __FUNCTION__, DHD_MAX_PROFILE_INDEX));
2737 bcmerror = BCME_RANGE;
2738 break;
2739 }
2740 if (protocol->layer != DHD_TX_PROFILE_DATA_LINK_LAYER && protocol->layer
2741 != DHD_TX_PROFILE_NETWORK_LAYER) {
2742 DHD_ERROR(("%s:\tlayer must be %d or %d\n", __FUNCTION__,
2743 DHD_TX_PROFILE_DATA_LINK_LAYER,
2744 DHD_TX_PROFILE_NETWORK_LAYER));
2745 bcmerror = BCME_BADARG;
2746 break;
2747 }
2748 if (protocol->protocol_number > __UINT16_MAX__) {
2749 DHD_ERROR(("%s:\tprotocol number must be <= %d\n", __FUNCTION__,
2750 __UINT16_MAX__));
2751 bcmerror = BCME_BADLEN;
2752 break;
2753 }
2754
2755 /* find the dhd_tx_profile_protocol_t */
2756 for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
2757 if (dhd_pub->protocol_filters[offset].profile_index ==
2758 protocol->profile_index) {
2759 break;
2760 }
2761 }
2762
2763 if (offset >= DHD_MAX_PROFILES) {
2764 #if DHD_MAX_PROFILES > 1
2765 DHD_ERROR(("%s:\tonly %d profiles supported at present\n",
2766 __FUNCTION__, DHD_MAX_PROFILES));
2767 #else /* DHD_MAX_PROFILES > 1 */
2768 DHD_ERROR(("%s:\tonly %d profile supported at present\n",
2769 __FUNCTION__, DHD_MAX_PROFILES));
2770 DHD_ERROR(("%s:\tthere is a profile of index %d\n", __FUNCTION__,
2771 dhd_pub->protocol_filters->profile_index));
2772 #endif /* DHD_MAX_PROFILES > 1 */
2773 bcmerror = BCME_NOMEM;
2774 break;
2775 }
2776
2777 /* memory already allocated in dhd_attach; just assign the value */
2778 dhd_pub->protocol_filters[offset] = *protocol;
2779
2780 if (offset >= dhd_pub->num_profiles) {
2781 dhd_pub->num_profiles = offset + 1;
2782 }
2783
2784 break;
2785 }
2786
2787 case IOV_SVAL(IOV_TX_PROFILE_ENABLE):
2788 dhd_pub->tx_profile_enab = int_val ? TRUE : FALSE;
2789 break;
2790
2791 case IOV_GVAL(IOV_TX_PROFILE_ENABLE):
2792 int_val = dhd_pub->tx_profile_enab;
2793 bcmerror = memcpy_s(arg, val_size, &int_val, sizeof(int_val));
2794 break;
2795
2796 case IOV_SVAL(IOV_TX_PROFILE_DUMP):
2797 {
2798 const dhd_tx_profile_protocol_t *protocol = NULL;
2799 uint8 offset;
2800 char *format = "%s:\ttx_profile %s: %d\n";
2801
2802 for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
2803 if (dhd_pub->protocol_filters[offset].profile_index == int_val) {
2804 protocol = &(dhd_pub->protocol_filters[offset]);
2805 break;
2806 }
2807 }
2808
2809 if (protocol == NULL) {
2810 DHD_ERROR(("%s:\tno profile with index %d\n", __FUNCTION__,
2811 int_val));
2812 bcmerror = BCME_ERROR;
2813 break;
2814 }
2815
2816 printf(format, __FUNCTION__, "profile_index", protocol->profile_index);
2817 printf(format, __FUNCTION__, "layer", protocol->layer);
2818 printf(format, __FUNCTION__, "protocol_number", protocol->protocol_number);
2819 printf(format, __FUNCTION__, "src_port", protocol->src_port);
2820 printf(format, __FUNCTION__, "dest_port", protocol->dest_port);
2821
2822 break;
2823 }
2824 #endif /* defined(DHD_TX_PROFILE) */
2825
2826 default:
2827 bcmerror = BCME_UNSUPPORTED;
2828 break;
2829 }
2830
2831 exit:
2832 DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
2833 return bcmerror;
2834 }
2835
2836 /* Store the status of a connection attempt for later retrieval by an iovar */
2837 void
2838 dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
2839 {
2840 /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
2841 * because an encryption/rsn mismatch results in both events, and
2842 * the important information is in the WLC_E_PRUNE.
2843 */
2844 if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
2845 dhd_conn_event == WLC_E_PRUNE)) {
2846 dhd_conn_event = event;
2847 dhd_conn_status = status;
2848 dhd_conn_reason = reason;
2849 }
2850 }
2851
2852 bool
2853 dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
2854 {
2855 void *p;
2856 int eprec = -1; /* precedence to evict from */
2857 bool discard_oldest;
2858
2859 /* Fast case, precedence queue is not full and we are also not
2860 * exceeding total queue length
2861 */
2862 if (!pktqprec_full(q, prec) && !pktq_full(q)) {
2863 pktq_penq(q, prec, pkt);
2864 return TRUE;
2865 }
2866
2867 /* Determine precedence from which to evict packet, if any */
2868 if (pktqprec_full(q, prec))
2869 eprec = prec;
2870 else if (pktq_full(q)) {
2871 p = pktq_peek_tail(q, &eprec);
2872 ASSERT(p);
2873 if (eprec > prec || eprec < 0)
2874 return FALSE;
2875 }
2876
2877 /* Evict if needed */
2878 if (eprec >= 0) {
2879 /* Detect queueing to unconfigured precedence */
2880 ASSERT(!pktqprec_empty(q, eprec));
2881 discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
2882 if (eprec == prec && !discard_oldest)
2883 return FALSE; /* refuse newer (incoming) packet */
2884 /* Evict packet according to discard policy */
2885 p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
2886 ASSERT(p);
2887 #ifdef DHDTCPACK_SUPPRESS
2888 if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
2889 DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
2890 __FUNCTION__, __LINE__));
2891 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
2892 }
2893 #endif /* DHDTCPACK_SUPPRESS */
2894 PKTFREE(dhdp->osh, p, TRUE);
2895 }
2896
2897 /* Enqueue */
2898 p = pktq_penq(q, prec, pkt);
2899 ASSERT(p);
2900
2901 return TRUE;
2902 }
2903
2904 /*
2905 * Functions to drop proper pkts from queue:
2906 * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
2907 * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
2908 * If can't find pkts matching upper 2 cases, drop first pkt anyway
2909 */
2910 bool
2911 dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
2912 {
2913 struct pktq_prec *q = NULL;
2914 void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
2915 pkt_frag_t frag_info;
2916
2917 ASSERT(dhdp && pq);
2918 ASSERT(prec >= 0 && prec < pq->num_prec);
2919
2920 q = &pq->q[prec];
2921 p = q->head;
2922
2923 if (p == NULL)
2924 return FALSE;
2925
2926 while (p) {
2927 frag_info = pkt_frag_info(dhdp->osh, p);
2928 if (frag_info == DHD_PKT_FRAG_NONE) {
2929 break;
2930 } else if (frag_info == DHD_PKT_FRAG_FIRST) {
2931 if (first) {
2932 /* No last frag pkt, use prev as last */
2933 last = prev;
2934 break;
2935 } else {
2936 first = p;
2937 prev_first = prev;
2938 }
2939 } else if (frag_info == DHD_PKT_FRAG_LAST) {
2940 if (first) {
2941 last = p;
2942 break;
2943 }
2944 }
2945
2946 prev = p;
2947 p = PKTLINK(p);
2948 }
2949
2950 if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
2951 /* Not found matching pkts, use oldest */
2952 prev = NULL;
2953 p = q->head;
2954 frag_info = 0;
2955 }
2956
2957 if (frag_info == DHD_PKT_FRAG_NONE) {
2958 first = last = p;
2959 prev_first = prev;
2960 }
2961
2962 p = first;
2963 while (p) {
2964 next = PKTLINK(p);
2965 q->n_pkts--;
2966 pq->n_pkts_tot--;
2967
2968 #ifdef WL_TXQ_STALL
2969 q->dequeue_count++;
2970 #endif
2971
2972 PKTSETLINK(p, NULL);
2973
2974 if (fn)
2975 fn(dhdp, prec, p, TRUE);
2976
2977 if (p == last)
2978 break;
2979
2980 p = next;
2981 }
2982
2983 if (prev_first == NULL) {
2984 if ((q->head = next) == NULL)
2985 q->tail = NULL;
2986 } else {
2987 PKTSETLINK(prev_first, next);
2988 if (!next)
2989 q->tail = prev_first;
2990 }
2991
2992 return TRUE;
2993 }
2994
2995 static int
2996 dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
2997 void *params, int plen, void *arg, uint len, bool set)
2998 {
2999 int bcmerror = 0;
3000 uint val_size;
3001 const bcm_iovar_t *vi = NULL;
3002 uint32 actionid;
3003
3004 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3005
3006 ASSERT(name);
3007
3008 /* Get MUST have return space */
3009 ASSERT(set || (arg && len));
3010
3011 /* Set does NOT take qualifiers */
3012 ASSERT(!set || (!params && !plen));
3013
3014 if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
3015 bcmerror = BCME_UNSUPPORTED;
3016 goto exit;
3017 }
3018
3019 DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
3020 name, (set ? "set" : "get"), len, plen));
3021
3022 /* set up 'params' pointer in case this is a set command so that
3023 * the convenience int and bool code can be common to set and get
3024 */
3025 if (params == NULL) {
3026 params = arg;
3027 plen = len;
3028 }
3029
3030 if (vi->type == IOVT_VOID)
3031 val_size = 0;
3032 else if (vi->type == IOVT_BUFFER)
3033 val_size = len;
3034 else
3035 /* all other types are integer sized */
3036 val_size = sizeof(int);
3037
3038 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
3039
3040 bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
3041
3042 exit:
3043 return bcmerror;
3044 }
3045
3046 int
3047 dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
3048 {
3049 int bcmerror = 0;
3050 unsigned long flags;
3051
3052 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3053
3054 if (!buf) {
3055 return BCME_BADARG;
3056 }
3057
3058 dhd_os_dhdiovar_lock(dhd_pub);
3059 switch (ioc->cmd) {
3060 case DHD_GET_MAGIC:
3061 if (buflen < sizeof(int))
3062 bcmerror = BCME_BUFTOOSHORT;
3063 else
3064 *(int*)buf = DHD_IOCTL_MAGIC;
3065 break;
3066
3067 case DHD_GET_VERSION:
3068 if (buflen < sizeof(int))
3069 bcmerror = BCME_BUFTOOSHORT;
3070 else
3071 *(int*)buf = DHD_IOCTL_VERSION;
3072 break;
3073
3074 case DHD_GET_VAR:
3075 case DHD_SET_VAR:
3076 {
3077 char *arg;
3078 uint arglen;
3079
3080 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
3081 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
3082 bcmstricmp((char *)buf, "devreset")) {
3083 /* In platforms like FC19, the FW download is done via IOCTL
3084 * and should not return error for IOCTLs fired before FW
3085 * Download is done
3086 */
3087 if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
3088 DHD_ERROR(("%s: returning as busstate=%d\n",
3089 __FUNCTION__, dhd_pub->busstate));
3090 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
3091 dhd_os_dhdiovar_unlock(dhd_pub);
3092 return -ENODEV;
3093 }
3094 }
3095 DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
3096 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
3097
3098 #ifdef DHD_PCIE_RUNTIMEPM
3099 dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
3100 #endif /* DHD_PCIE_RUNTIMEPM */
3101
3102 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
3103 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
3104 /* If Suspend/Resume is tested via pcie_suspend IOVAR
3105 * then continue to execute the IOVAR, return from here for
3106 * other IOVARs, also include pciecfgreg and devreset to go
3107 * through.
3108 */
3109 if (bcmstricmp((char *)buf, "pcie_suspend") &&
3110 bcmstricmp((char *)buf, "pciecfgreg") &&
3111 bcmstricmp((char *)buf, "devreset") &&
3112 bcmstricmp((char *)buf, "sdio_suspend")) {
3113 DHD_ERROR(("%s: bus is in suspend(%d)"
3114 "or suspending(0x%x) state\n",
3115 __FUNCTION__, dhd_pub->busstate,
3116 dhd_pub->dhd_bus_busy_state));
3117 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
3118 dhd_os_busbusy_wake(dhd_pub);
3119 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
3120 dhd_os_dhdiovar_unlock(dhd_pub);
3121 return -ENODEV;
3122 }
3123 }
3124 /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
3125 * which will wait for all the busy contexts to get over for
3126 * particular time and call ASSERT if timeout happens. As during
3127 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
3128 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
3129 * not used in Production platforms but only used in FC19 setups.
3130 */
3131 if (!bcmstricmp((char *)buf, "devreset") ||
3132 #ifdef BCMPCIE
3133 (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
3134 !bcmstricmp((char *)buf, "dwnldstate")) ||
3135 #endif /* BCMPCIE */
3136 FALSE)
3137 {
3138 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
3139 }
3140 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
3141
3142 /* scan past the name to any arguments */
3143 for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
3144 ;
3145
3146 if (arglen == 0 || *arg) {
3147 bcmerror = BCME_BUFTOOSHORT;
3148 goto unlock_exit;
3149 }
3150
3151 /* account for the NUL terminator */
3152 arg++, arglen--;
3153 /* call with the appropriate arguments */
3154 if (ioc->cmd == DHD_GET_VAR) {
3155 bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
3156 buf, buflen, IOV_GET);
3157 } else {
3158 bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
3159 arg, arglen, IOV_SET);
3160 }
3161 if (bcmerror != BCME_UNSUPPORTED) {
3162 goto unlock_exit;
3163 }
3164
3165 /* not in generic table, try protocol module */
3166 if (ioc->cmd == DHD_GET_VAR) {
3167 bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
3168 arglen, buf, buflen, IOV_GET);
3169 } else {
3170 bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
3171 NULL, 0, arg, arglen, IOV_SET);
3172 }
3173 if (bcmerror != BCME_UNSUPPORTED) {
3174 goto unlock_exit;
3175 }
3176
3177 /* if still not found, try bus module */
3178 if (ioc->cmd == DHD_GET_VAR) {
3179 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
3180 arg, arglen, buf, buflen, IOV_GET);
3181 } else {
3182 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
3183 NULL, 0, arg, arglen, IOV_SET);
3184 }
3185 if (bcmerror != BCME_UNSUPPORTED) {
3186 goto unlock_exit;
3187 }
3188
3189 }
3190 goto unlock_exit;
3191
3192 default:
3193 bcmerror = BCME_UNSUPPORTED;
3194 }
3195 dhd_os_dhdiovar_unlock(dhd_pub);
3196 return bcmerror;
3197
3198 unlock_exit:
3199 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
3200 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
3201 dhd_os_busbusy_wake(dhd_pub);
3202 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
3203 dhd_os_dhdiovar_unlock(dhd_pub);
3204 return bcmerror;
3205 }
3206
3207 #ifdef SHOW_EVENTS
3208
3209 static void
3210 wl_show_roam_event(dhd_pub_t *dhd_pub, uint status, uint datalen,
3211 const char *event_name, char *eabuf, void *event_data)
3212 {
3213 if (status == WLC_E_STATUS_SUCCESS) {
3214 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
3215 } else {
3216 if (status == WLC_E_STATUS_FAIL) {
3217 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
3218 } else if (status == WLC_E_STATUS_NO_NETWORKS) {
3219 if (datalen) {
3220 uint8 id = *((uint8 *)event_data);
3221 if (id != DOT11_MNG_PROPR_ID) {
3222 wl_roam_event_t *roam_data =
3223 (wl_roam_event_t *)event_data;
3224 bcm_xtlv_t *tlv = (bcm_xtlv_t *)roam_data->xtlvs;
3225 if (tlv->id == WLC_ROAM_NO_NETWORKS_TLV_ID) {
3226 uint32 *fail_reason = (uint32 *)tlv->data;
3227 switch (*fail_reason) {
3228 case WLC_E_REASON_NO_NETWORKS:
3229 DHD_EVENT(("MACEVENT: %s,"
3230 " no networks found\n",
3231 event_name));
3232 break;
3233 case WLC_E_REASON_NO_NETWORKS_BY_SCORE:
3234 DHD_EVENT(("MACEVENT: %s,"
3235 " no networks found by score\n",
3236 event_name));
3237 break;
3238 default:
3239 DHD_ERROR(("MACEVENT: %s,"
3240 " unknown fail reason 0x%x\n",
3241 event_name,
3242 *fail_reason));
3243 ASSERT(0);
3244 }
3245 } else {
3246 DHD_EVENT(("MACEVENT: %s,"
3247 " no networks found\n",
3248 event_name));
3249 }
3250 } else {
3251 DHD_EVENT(("MACEVENT: %s,"
3252 " no networks found\n",
3253 event_name));
3254 }
3255 } else {
3256 DHD_EVENT(("MACEVENT: %s, no networks found\n",
3257 event_name));
3258 }
3259 } else {
3260 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
3261 event_name, (int)status));
3262 }
3263 }
3264 }
3265
3266 static void
3267 wl_show_roam_cache_update_event(const char *name, uint status,
3268 uint reason, uint datalen, void *event_data)
3269 {
3270 wlc_roam_cache_update_event_t *cache_update;
3271 uint16 len_of_tlvs;
3272 void *val_tlv_ptr;
3273 bcm_xtlv_t *val_xtlv;
3274 char ntoa_buf[ETHER_ADDR_STR_LEN];
3275 uint idx;
3276 const char* reason_name = NULL;
3277 const char* status_name = NULL;
3278 static struct {
3279 uint event;
3280 const char *event_name;
3281 } reason_names[] = {
3282 {WLC_E_REASON_INITIAL_ASSOC, "INITIAL ASSOCIATION"},
3283 {WLC_E_REASON_LOW_RSSI, "LOW_RSSI"},
3284 {WLC_E_REASON_DEAUTH, "RECEIVED DEAUTHENTICATION"},
3285 {WLC_E_REASON_DISASSOC, "RECEIVED DISASSOCATION"},
3286 {WLC_E_REASON_BCNS_LOST, "BEACONS LOST"},
3287 {WLC_E_REASON_BETTER_AP, "BETTER AP FOUND"},
3288 {WLC_E_REASON_MINTXRATE, "STUCK AT MIN TX RATE"},
3289 {WLC_E_REASON_BSSTRANS_REQ, "REQUESTED ROAM"},
3290 {WLC_E_REASON_TXFAIL, "TOO MANY TXFAILURES"}
3291 };
3292
3293 static struct {
3294 uint event;
3295 const char *event_name;
3296 } status_names[] = {
3297 {WLC_E_STATUS_SUCCESS, "operation was successful"},
3298 {WLC_E_STATUS_FAIL, "operation failed"},
3299 {WLC_E_STATUS_TIMEOUT, "operation timed out"},
3300 {WLC_E_STATUS_NO_NETWORKS, "failed due to no matching network found"},
3301 {WLC_E_STATUS_ABORT, "operation was aborted"},
3302 {WLC_E_STATUS_NO_ACK, "protocol failure: packet not ack'd"},
3303 {WLC_E_STATUS_UNSOLICITED, "AUTH or ASSOC packet was unsolicited"},
3304 {WLC_E_STATUS_ATTEMPT, "attempt to assoc to an auto auth configuration"},
3305 {WLC_E_STATUS_PARTIAL, "scan results are incomplete"},
3306 {WLC_E_STATUS_NEWSCAN, "scan aborted by another scan"},
3307 {WLC_E_STATUS_NEWASSOC, "scan aborted due to assoc in progress"},
3308 {WLC_E_STATUS_11HQUIET, "802.11h quiet period started"},
3309 {WLC_E_STATUS_SUPPRESS, "user disabled scanning"},
3310 {WLC_E_STATUS_NOCHANS, "no allowable channels to scan"},
3311 {WLC_E_STATUS_CS_ABORT, "abort channel select"},
3312 {WLC_E_STATUS_ERROR, "request failed due to error"},
3313 {WLC_E_STATUS_INVALID, "Invalid status code"}
3314 };
3315
3316 switch (reason) {
3317 case WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE:
3318 DHD_EVENT(("Current roam cache status %d, "
3319 "reason for cache update is new roam cache\n", status));
3320 break;
3321 case WLC_ROAM_CACHE_UPDATE_JOIN:
3322 DHD_EVENT(("Current roam cache status %d, "
3323 "reason for cache update is start of join\n", status));
3324 break;
3325 case WLC_ROAM_CACHE_UPDATE_RSSI_DELTA:
3326 DHD_EVENT(("Current roam cache status %d, "
3327 "reason for cache update is delta in rssi\n", status));
3328 break;
3329 case WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA:
3330 DHD_EVENT(("Current roam cache status %d, "
3331 "reason for cache update is motion delta in rssi\n", status));
3332 break;
3333 case WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS:
3334 DHD_EVENT(("Current roam cache status %d, "
3335 "reason for cache update is missed channel\n", status));
3336 break;
3337 case WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN:
3338 DHD_EVENT(("Current roam cache status %d, "
3339 "reason for cache update is start of split scan\n", status));
3340 break;
3341 case WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN:
3342 DHD_EVENT(("Current roam cache status %d, "
3343 "reason for cache update is start of full scan\n", status));
3344 break;
3345 case WLC_ROAM_CACHE_UPDATE_INIT_ASSOC:
3346 DHD_EVENT(("Current roam cache status %d, "
3347 "reason for cache update is init association\n", status));
3348 break;
3349 case WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED:
3350 DHD_EVENT(("Current roam cache status %d, "
3351 "reason for cache update is failure in full scan\n", status));
3352 break;
3353 case WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND:
3354 DHD_EVENT(("Current roam cache status %d, "
3355 "reason for cache update is empty scan result\n", status));
3356 break;
3357 case WLC_ROAM_CACHE_UPDATE_MISSING_AP:
3358 DHD_EVENT(("Current roam cache status %d, "
3359 "reason for cache update is missed ap\n", status));
3360 break;
3361 default:
3362 DHD_EVENT(("Current roam cache status %d, "
3363 "reason for cache update is unknown %d\n", status, reason));
3364 break;
3365 }
3366
3367 if (datalen < sizeof(wlc_roam_cache_update_event_t)) {
3368 DHD_ERROR(("MACEVENT: %s, missing event data\n", name));
3369 return;
3370 }
3371
3372 cache_update = (wlc_roam_cache_update_event_t *)event_data;
3373 val_tlv_ptr = (void *)cache_update->xtlvs;
3374 len_of_tlvs = datalen - sizeof(wlc_roam_cache_update_event_t);
3375 val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
3376 if (val_xtlv->id != WL_RMC_RPT_CMD_DATA) {
3377 DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
3378 name, val_xtlv->id));
3379 return;
3380 }
3381 val_tlv_ptr = (uint8 *)val_tlv_ptr + BCM_XTLV_HDR_SIZE;
3382 len_of_tlvs = val_xtlv->len;
3383
3384 while (len_of_tlvs && len_of_tlvs > BCM_XTLV_HDR_SIZE) {
3385 val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
3386 switch (val_xtlv->id) {
3387 case WL_RMC_RPT_XTLV_BSS_INFO:
3388 {
3389 rmc_bss_info_v1_t *bss_info = (rmc_bss_info_v1_t *)(val_xtlv->data);
3390 DHD_EVENT(("\t Current BSS INFO:\n"));
3391 DHD_EVENT(("\t\tRSSI: %d\n", bss_info->rssi));
3392 DHD_EVENT(("\t\tNumber of full scans performed "
3393 "on current BSS: %d\n", bss_info->fullscan_count));
3394 for (idx = 0; idx < ARRAYSIZE(reason_names); idx++) {
3395 if (reason_names[idx].event == bss_info->reason) {
3396 reason_name = reason_names[idx].event_name;
3397 }
3398 }
3399 DHD_EVENT(("\t\tReason code for last full scan: %s(%d)\n",
3400 reason_name, bss_info->reason));
3401 DHD_EVENT(("\t\tDelta between current time and "
3402 "last full scan: %d\n", bss_info->time_full_scan));
3403 for (idx = 0; idx < ARRAYSIZE(status_names); idx++) {
3404 if (status_names[idx].event == bss_info->status)
3405 status_name = status_names[idx].event_name;
3406 }
3407 DHD_EVENT(("\t\tLast status code for not roaming: %s(%d)\n",
3408 status_name, bss_info->status));
3409
3410 }
3411 break;
3412 case WL_RMC_RPT_XTLV_CANDIDATE_INFO:
3413 case WL_RMC_RPT_XTLV_USER_CACHE_INFO:
3414 {
3415 rmc_candidate_info_v1_t *candidate_info =
3416 (rmc_candidate_info_v1_t *)(val_xtlv->data);
3417 if (val_xtlv->id == WL_RMC_RPT_XTLV_CANDIDATE_INFO) {
3418 DHD_EVENT(("\t Candidate INFO:\n"));
3419 } else {
3420 DHD_EVENT(("\t User Candidate INFO:\n"));
3421 }
3422 DHD_EVENT(("\t\tBSSID: %s\n",
3423 bcm_ether_ntoa((const struct ether_addr *)
3424 &candidate_info->bssid, ntoa_buf)));
3425 DHD_EVENT(("\t\tRSSI: %d\n", candidate_info->rssi));
3426 DHD_EVENT(("\t\tChannel: %d\n", candidate_info->ctl_channel));
3427 DHD_EVENT(("\t\tDelta between current time and last "
3428 "seen time: %d\n", candidate_info->time_last_seen));
3429 DHD_EVENT(("\t\tBSS load: %d\n", candidate_info->bss_load));
3430 }
3431 break;
3432 default:
3433 DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
3434 name, val_xtlv->id));
3435 return;
3436 }
3437 val_tlv_ptr = (uint8 *)val_tlv_ptr + bcm_xtlv_size(val_xtlv,
3438 BCM_XTLV_OPTION_NONE);
3439 len_of_tlvs -= (uint16)bcm_xtlv_size(val_xtlv, BCM_XTLV_OPTION_NONE);
3440 }
3441 }
3442
3443 static void
3444 wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
3445 void *raw_event_ptr, char *eventmask)
3446 {
3447 uint i, status, reason;
3448 bool group = FALSE, flush_txq = FALSE, link = FALSE;
3449 bool host_data = FALSE; /* prints event data after the case when set */
3450 const char *auth_str;
3451 const char *event_name;
3452 const uchar *buf;
3453 char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
3454 uint event_type, flags, auth_type, datalen;
3455
3456 event_type = ntoh32(event->event_type);
3457 flags = ntoh16(event->flags);
3458 status = ntoh32(event->status);
3459 reason = ntoh32(event->reason);
3460 BCM_REFERENCE(reason);
3461 auth_type = ntoh32(event->auth_type);
3462 datalen = (event_data != NULL) ? ntoh32(event->datalen) : 0;
3463
3464 /* debug dump of event messages */
3465 snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
3466
3467 event_name = bcmevent_get_name(event_type);
3468 BCM_REFERENCE(event_name);
3469
3470 if (flags & WLC_EVENT_MSG_LINK)
3471 link = TRUE;
3472 if (flags & WLC_EVENT_MSG_GROUP)
3473 group = TRUE;
3474 if (flags & WLC_EVENT_MSG_FLUSHTXQ)
3475 flush_txq = TRUE;
3476
3477 switch (event_type) {
3478 case WLC_E_START:
3479 case WLC_E_DEAUTH:
3480 case WLC_E_DISASSOC:
3481 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
3482 break;
3483
3484 case WLC_E_ASSOC_IND:
3485 case WLC_E_REASSOC_IND:
3486
3487 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
3488
3489 break;
3490
3491 case WLC_E_ASSOC:
3492 case WLC_E_REASSOC:
3493 if (status == WLC_E_STATUS_SUCCESS) {
3494 DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
3495 } else if (status == WLC_E_STATUS_TIMEOUT) {
3496 DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
3497 } else if (status == WLC_E_STATUS_FAIL) {
3498 DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
3499 event_name, eabuf, (int)status, (int)reason));
3500 } else if (status == WLC_E_STATUS_SUPPRESS) {
3501 DHD_EVENT(("MACEVENT: %s, MAC %s, SUPPRESS\n", event_name, eabuf));
3502 } else if (status == WLC_E_STATUS_NO_ACK) {
3503 DHD_EVENT(("MACEVENT: %s, MAC %s, NOACK\n", event_name, eabuf));
3504 } else {
3505 DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
3506 event_name, eabuf, (int)status));
3507 }
3508
3509 break;
3510
3511 case WLC_E_DEAUTH_IND:
3512 case WLC_E_DISASSOC_IND:
3513 DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
3514 break;
3515
3516 case WLC_E_AUTH:
3517 case WLC_E_AUTH_IND:
3518 if (auth_type == DOT11_OPEN_SYSTEM)
3519 auth_str = "Open System";
3520 else if (auth_type == DOT11_SHARED_KEY)
3521 auth_str = "Shared Key";
3522 else if (auth_type == DOT11_SAE)
3523 auth_str = "SAE";
3524 else {
3525 snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
3526 auth_str = err_msg;
3527 }
3528
3529 if (event_type == WLC_E_AUTH_IND) {
3530 DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
3531 } else if (status == WLC_E_STATUS_SUCCESS) {
3532 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
3533 event_name, eabuf, auth_str));
3534 } else if (status == WLC_E_STATUS_TIMEOUT) {
3535 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
3536 event_name, eabuf, auth_str));
3537 } else if (status == WLC_E_STATUS_FAIL) {
3538 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
3539 event_name, eabuf, auth_str, (int)status, (int)reason));
3540 } else if (status == WLC_E_STATUS_SUPPRESS) {
3541 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUPPRESS\n",
3542 event_name, eabuf, auth_str));
3543 } else if (status == WLC_E_STATUS_NO_ACK) {
3544 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
3545 event_name, eabuf, auth_str));
3546 } else {
3547 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
3548 event_name, eabuf, auth_str, (int)status, (int)reason));
3549 }
3550 BCM_REFERENCE(auth_str);
3551
3552 break;
3553
3554 case WLC_E_ROAM:
3555 wl_show_roam_event(dhd_pub, status, datalen,
3556 event_name, eabuf, event_data);
3557 break;
3558 case WLC_E_ROAM_START:
3559 if (datalen >= sizeof(wlc_roam_start_event_t)) {
3560 const wlc_roam_start_event_t *roam_start =
3561 (wlc_roam_start_event_t *)event_data;
3562 DHD_EVENT(("MACEVENT: %s, current bss rssi %d\n",
3563 event_name, (int)roam_start->rssi));
3564 }
3565 break;
3566 case WLC_E_ROAM_PREP:
3567 if (datalen >= sizeof(wlc_roam_prep_event_t)) {
3568 const wlc_roam_prep_event_t *roam_prep =
3569 (wlc_roam_prep_event_t *)event_data;
3570 DHD_EVENT(("MACEVENT: %s, target bss rssi %d\n",
3571 event_name, (int)roam_prep->rssi));
3572 }
3573 break;
3574 case WLC_E_ROAM_CACHE_UPDATE:
3575 DHD_EVENT(("MACEVENT: %s\n", event_name));
3576 wl_show_roam_cache_update_event(event_name, status,
3577 reason, datalen, event_data);
3578 break;
3579 case WLC_E_JOIN:
3580 case WLC_E_SET_SSID:
3581 if (status == WLC_E_STATUS_SUCCESS) {
3582 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
3583 } else {
3584 if (status == WLC_E_STATUS_FAIL) {
3585 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
3586 } else if (status == WLC_E_STATUS_NO_NETWORKS) {
3587 DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
3588 } else {
3589 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
3590 event_name, (int)status));
3591 }
3592 }
3593 break;
3594
3595 case WLC_E_BEACON_RX:
3596 if (status == WLC_E_STATUS_SUCCESS) {
3597 DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
3598 } else if (status == WLC_E_STATUS_FAIL) {
3599 DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
3600 } else {
3601 DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
3602 }
3603 break;
3604
3605 case WLC_E_LINK:
3606 DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d reason:%d\n",
3607 event_name, link?"UP":"DOWN", flags, status, reason));
3608 #ifdef PCIE_FULL_DONGLE
3609 #endif
3610 BCM_REFERENCE(link);
3611 break;
3612
3613 case WLC_E_MIC_ERROR:
3614 DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
3615 event_name, eabuf, group, flush_txq));
3616 BCM_REFERENCE(group);
3617 BCM_REFERENCE(flush_txq);
3618 break;
3619
3620 case WLC_E_ICV_ERROR:
3621 case WLC_E_UNICAST_DECODE_ERROR:
3622 case WLC_E_MULTICAST_DECODE_ERROR:
3623 DHD_EVENT(("MACEVENT: %s, MAC %s\n",
3624 event_name, eabuf));
3625 break;
3626
3627 case WLC_E_TXFAIL:
3628 DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
3629 break;
3630
3631 case WLC_E_ASSOC_REQ_IE:
3632 case WLC_E_ASSOC_RESP_IE:
3633 case WLC_E_PMKID_CACHE:
3634 DHD_EVENT(("MACEVENT: %s\n", event_name));
3635 break;
3636
3637 case WLC_E_SCAN_COMPLETE:
3638 DHD_EVENT(("MACEVENT: %s\n", event_name));
3639 break;
3640 case WLC_E_RSSI_LQM:
3641 case WLC_E_PFN_NET_FOUND:
3642 case WLC_E_PFN_NET_LOST:
3643 case WLC_E_PFN_SCAN_COMPLETE:
3644 case WLC_E_PFN_SCAN_NONE:
3645 case WLC_E_PFN_SCAN_ALLGONE:
3646 case WLC_E_PFN_GSCAN_FULL_RESULT:
3647 case WLC_E_PFN_SSID_EXT:
3648 DHD_EVENT(("PNOEVENT: %s\n", event_name));
3649 break;
3650
3651 case WLC_E_PFN_SCAN_BACKOFF:
3652 case WLC_E_PFN_BSSID_SCAN_BACKOFF:
3653 DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
3654 event_name, (int)status, (int)reason));
3655 break;
3656
3657 case WLC_E_PSK_SUP:
3658 case WLC_E_PRUNE:
3659 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
3660 event_name, (int)status, (int)reason));
3661 break;
3662
3663 #ifdef WIFI_ACT_FRAME
3664 case WLC_E_ACTION_FRAME:
3665 DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
3666 break;
3667 case WLC_E_ACTION_FRAME_COMPLETE:
3668 if (datalen >= sizeof(uint32)) {
3669 const uint32 *pktid = event_data;
3670 BCM_REFERENCE(pktid);
3671 DHD_EVENT(("MACEVENT: %s status %d, reason %d, pktid 0x%x\n",
3672 event_name, (int)status, (int)reason, *pktid));
3673 }
3674 break;
3675 #endif /* WIFI_ACT_FRAME */
3676
3677 #ifdef SHOW_LOGTRACE
3678 case WLC_E_TRACE:
3679 {
3680 dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
3681 break;
3682 }
3683 #endif /* SHOW_LOGTRACE */
3684
3685 case WLC_E_RSSI:
3686 if (datalen >= sizeof(int)) {
3687 DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
3688 }
3689 break;
3690
3691 case WLC_E_SERVICE_FOUND:
3692 case WLC_E_P2PO_ADD_DEVICE:
3693 case WLC_E_P2PO_DEL_DEVICE:
3694 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
3695 break;
3696
3697 #ifdef BT_WIFI_HANDOBER
3698 case WLC_E_BT_WIFI_HANDOVER_REQ:
3699 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
3700 break;
3701 #endif
3702
3703 case WLC_E_CCA_CHAN_QUAL:
3704 if (datalen >= sizeof(cca_chan_qual_event_t)) {
3705 const cca_chan_qual_event_t *cca_event =
3706 (cca_chan_qual_event_t *)event_data;
3707 if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
3708 const cca_only_chan_qual_event_t *cca_only_event =
3709 (const cca_only_chan_qual_event_t *)cca_event;
3710 BCM_REFERENCE(cca_only_event);
3711 DHD_EVENT((
3712 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3713 " channel 0x%02x\n",
3714 event_name, event_type, eabuf, (int)status,
3715 (int)reason, (int)auth_type, cca_event->chanspec));
3716 DHD_EVENT((
3717 "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
3718 " ts 0x%08x)\n",
3719 cca_only_event->cca_busy_ext.duration,
3720 cca_only_event->cca_busy_ext.congest_ibss,
3721 cca_only_event->cca_busy_ext.congest_obss,
3722 cca_only_event->cca_busy_ext.interference,
3723 cca_only_event->cca_busy_ext.timestamp));
3724 DHD_EVENT((
3725 "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
3726 cca_only_event->cca_busy_nopm.duration,
3727 cca_only_event->cca_busy_nopm.congest_ibss,
3728 cca_only_event->cca_busy_nopm.congest_obss,
3729 cca_only_event->cca_busy_nopm.interference));
3730 DHD_EVENT((
3731 "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
3732 cca_only_event->cca_busy_pm.duration,
3733 cca_only_event->cca_busy_pm.congest_ibss,
3734 cca_only_event->cca_busy_pm.congest_obss,
3735 cca_only_event->cca_busy_pm.interference));
3736 } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
3737 DHD_EVENT((
3738 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3739 " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
3740 " ts 0x%08x)\n",
3741 event_name, event_type, eabuf, (int)status,
3742 (int)reason, (int)auth_type, cca_event->chanspec,
3743 cca_event->cca_busy_ext.duration,
3744 cca_event->cca_busy_ext.congest_ibss,
3745 cca_event->cca_busy_ext.congest_obss,
3746 cca_event->cca_busy_ext.interference,
3747 cca_event->cca_busy_ext.timestamp));
3748 } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
3749 DHD_EVENT((
3750 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3751 " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
3752 event_name, event_type, eabuf, (int)status,
3753 (int)reason, (int)auth_type, cca_event->chanspec,
3754 cca_event->cca_busy.duration,
3755 cca_event->cca_busy.congest,
3756 cca_event->cca_busy.timestamp));
3757 } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
3758 (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
3759 DHD_EVENT((
3760 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3761 " channel 0x%02x (NF[%d] %ddB)\n",
3762 event_name, event_type, eabuf, (int)status,
3763 (int)reason, (int)auth_type, cca_event->chanspec,
3764 cca_event->id, cca_event->noise));
3765 } else {
3766 DHD_EVENT((
3767 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3768 " channel 0x%02x (unknown ID %d)\n",
3769 event_name, event_type, eabuf, (int)status,
3770 (int)reason, (int)auth_type, cca_event->chanspec,
3771 cca_event->id));
3772 }
3773 }
3774 break;
3775 case WLC_E_ESCAN_RESULT:
3776 if (datalen >= sizeof(wl_escan_result_v2_t)) {
3777 const wl_escan_result_v2_t *escan_result =
3778 (wl_escan_result_v2_t *)event_data;
3779 BCM_REFERENCE(escan_result);
3780 /* Because WLC_E_ESCAN_RESULT event log are being print too many.
3781 * So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform.
3782 */
3783 DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
3784 event_name, event_type, eabuf, (int)status));
3785 }
3786 break;
3787 case WLC_E_IF:
3788 if (datalen >= sizeof(struct wl_event_data_if)) {
3789 const struct wl_event_data_if *ifevent =
3790 (struct wl_event_data_if *)event_data;
3791 BCM_REFERENCE(ifevent);
3792
3793 DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
3794 event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
3795 }
3796 break;
3797 #ifdef SHOW_LOGTRACE
3798 case WLC_E_MSCH:
3799 {
3800 wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
3801 break;
3802 }
3803 #endif /* SHOW_LOGTRACE */
3804
3805 case WLC_E_PSK_AUTH:
3806 DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
3807 event_name, eabuf, status, reason));
3808 break;
3809 case WLC_E_AGGR_EVENT:
3810 if (datalen >= sizeof(event_aggr_data_t)) {
3811 const event_aggr_data_t *aggrbuf = event_data;
3812 int j = 0, len = 0;
3813 const uint8 *data = aggrbuf->data;
3814 DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
3815 event_name, aggrbuf->num_events, aggrbuf->len));
3816 for (j = 0; j < aggrbuf->num_events; j++)
3817 {
3818 const wl_event_msg_t * sub_event = (const wl_event_msg_t *)data;
3819 if (len > aggrbuf->len) {
3820 DHD_ERROR(("%s: Aggr events corrupted!",
3821 __FUNCTION__));
3822 break;
3823 }
3824 DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
3825 len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
3826 sizeof(wl_event_msg_t)), sizeof(uint64));
3827 buf = (const uchar *)(data + sizeof(wl_event_msg_t));
3828 BCM_REFERENCE(buf);
3829 DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
3830 for (i = 0; i < ntoh32(sub_event->datalen); i++) {
3831 DHD_EVENT((" 0x%02x ", buf[i]));
3832 }
3833 data = aggrbuf->data + len;
3834 }
3835 DHD_EVENT(("\n"));
3836 }
3837 break;
3838 case WLC_E_PHY_CAL:
3839 {
3840 DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
3841 break;
3842 }
3843 case WLC_E_NAN_CRITICAL:
3844 {
3845 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
3846 break;
3847 }
3848 case WLC_E_NAN_NON_CRITICAL:
3849 {
3850 DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
3851 break;
3852 }
3853 case WLC_E_PROXD:
3854 if (datalen >= sizeof(wl_proxd_event_t)) {
3855 const wl_proxd_event_t *proxd =
3856 (wl_proxd_event_t*)event_data;
3857 DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
3858 event_name, proxd->type, reason));
3859 }
3860 break;
3861 case WLC_E_RPSNOA:
3862 if (datalen >= sizeof(rpsnoa_stats_t)) {
3863 const rpsnoa_stats_t *stat = event_data;
3864 if (datalen == sizeof(*stat)) {
3865 DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
3866 (stat->band == WLC_BAND_2G) ? "2G":"5G",
3867 stat->state, stat->last_pps));
3868 }
3869 }
3870 break;
3871 case WLC_E_WA_LQM:
3872 if (datalen >= sizeof(wl_event_wa_lqm_t)) {
3873 const wl_event_wa_lqm_t *event_wa_lqm =
3874 (wl_event_wa_lqm_t *)event_data;
3875 const bcm_xtlv_t *subevent;
3876 const wl_event_wa_lqm_basic_t *elqm_basic;
3877
3878 if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
3879 (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
3880 DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
3881 event_name, event_wa_lqm->ver, event_wa_lqm->len));
3882 break;
3883 }
3884
3885 subevent = (const bcm_xtlv_t *)event_wa_lqm->subevent;
3886 if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
3887 (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
3888 DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
3889 event_name, subevent->id, subevent->len));
3890 break;
3891 }
3892
3893 elqm_basic = (const wl_event_wa_lqm_basic_t *)subevent->data;
3894 BCM_REFERENCE(elqm_basic);
3895 DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
3896 event_name, elqm_basic->rssi, elqm_basic->snr,
3897 elqm_basic->tx_rate, elqm_basic->rx_rate));
3898 }
3899 break;
3900
3901 case WLC_E_OBSS_DETECTION:
3902 {
3903 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
3904 break;
3905 }
3906
3907 case WLC_E_AP_BCN_MUTE:
3908 if (datalen >= sizeof(wlc_bcn_mute_miti_event_data_v1_t)) {
3909 const wlc_bcn_mute_miti_event_data_v1_t
3910 *bcn_mute_miti_evnt_data = event_data;
3911 DHD_EVENT(("MACEVENT: %s, reason :%d uatbtt_count: %d\n",
3912 event_name, reason, bcn_mute_miti_evnt_data->uatbtt_count));
3913 }
3914 break;
3915
3916 default:
3917 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
3918 event_name, event_type, eabuf, (int)status, (int)reason,
3919 (int)auth_type));
3920 break;
3921 }
3922
3923 /* show any appended data if message level is set to bytes or host_data is set */
3924 if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
3925 buf = (uchar *) event_data;
3926 BCM_REFERENCE(buf);
3927 DHD_EVENT((" data (%d) : ", datalen));
3928 for (i = 0; i < datalen; i++) {
3929 DHD_EVENT((" 0x%02x ", buf[i]));
3930 }
3931 DHD_EVENT(("\n"));
3932 }
3933 } /* wl_show_host_event */
3934 #endif /* SHOW_EVENTS */
3935
3936 #ifdef DNGL_EVENT_SUPPORT
3937 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
3938 int
3939 dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3940 {
3941 bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
3942
3943 dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
3944 return BCME_OK;
3945 }
3946
3947 #ifdef PARSE_DONGLE_HOST_EVENT
3948 typedef struct hck_id_to_str_s {
3949 uint32 id;
3950 char *name;
3951 } hck_id_to_str_t;
3952
3953 hck_id_to_str_t hck_sw_id_to_str[] = {
3954 {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
3955 {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
3956 {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
3957 {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
3958 {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
3959 {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
3960 {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
3961 {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
3962 {0, NULL}
3963 };
3964
3965 hck_id_to_str_t hck_pcie_module_to_str[] = {
3966 {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
3967 {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
3968 {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
3969 {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
3970 {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
3971 {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
3972 {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
3973 {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
3974 {0, NULL}
3975 };
3976
3977 hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
3978 {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
3979 {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
3980 {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
3981 {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
3982 {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
3983 {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
3984 {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
3985 {0, NULL}
3986 };
3987
3988 static void
3989 dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
3990 {
3991 while (hck->name != NULL) {
3992 if (hck->id == id) {
3993 DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
3994 return;
3995 }
3996 hck++;
3997 }
3998 }
3999
4000 void
4001 dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
4002 {
4003
4004 wl_rx_hc_info_v2_t *hck_rx_stall_v2;
4005 uint16 id;
4006
4007 id = ltoh16(wl_hc->id);
4008
4009 if (id == WL_HC_DD_RX_STALL_V2) {
4010 /* map the hck_rx_stall_v2 structure to the value of the XTLV */
4011 hck_rx_stall_v2 =
4012 (wl_rx_hc_info_v2_t*)wl_hc;
4013 DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
4014 " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
4015 hck_rx_stall_v2->type,
4016 hck_rx_stall_v2->length,
4017 hck_rx_stall_v2->if_idx,
4018 hck_rx_stall_v2->ac,
4019 hck_rx_stall_v2->rx_hc_pkts,
4020 hck_rx_stall_v2->rx_hc_dropped_all,
4021 hck_rx_stall_v2->rx_hc_alert_th,
4022 hck_rx_stall_v2->reason,
4023 ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
4024 dhd_print_dongle_hck_id(
4025 ltoh32(hck_rx_stall_v2->reason),
4026 hck_rx_stall_v2_to_str);
4027 } else {
4028 dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
4029 hck_sw_id_to_str);
4030 }
4031
4032 }
4033
4034 #endif /* PARSE_DONGLE_HOST_EVENT */
4035
4036 void
4037 dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
4038 bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
4039 {
4040 uint8 *p = (uint8 *)(event + 1);
4041 uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
4042 uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
4043 uint16 version = ntoh16_ua((void *)&dngl_event->version);
4044
4045 DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
4046 if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
4047 return;
4048 }
4049 if (version != BCM_DNGL_EVENT_MSG_VERSION) {
4050 DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
4051 version, BCM_DNGL_EVENT_MSG_VERSION));
4052 return;
4053 }
4054 switch (type) {
4055 case DNGL_E_SOCRAM_IND:
4056 {
4057 bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
4058 uint16 tag = ltoh32(socramind_ptr->tag);
4059 uint16 taglen = ltoh32(socramind_ptr->length);
4060 p = (uint8 *)socramind_ptr->value;
4061 DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
4062 switch (tag) {
4063 case SOCRAM_IND_ASSERT_TAG:
4064 {
4065 /*
4066 * The payload consists of -
4067 * null terminated function name padded till 32 bit boundary +
4068 * Line number - (32 bits)
4069 * Caller address (32 bits)
4070 */
4071 char *fnname = (char *)p;
4072 if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
4073 sizeof(uint32) * 2)) {
4074 DHD_ERROR(("Wrong length:%d\n", datalen));
4075 return;
4076 }
4077 DHD_EVENT(("ASSRT Function:%s ", p));
4078 p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
4079 DHD_EVENT(("Line:%d ", *(uint32 *)p));
4080 p += sizeof(uint32);
4081 DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
4082 #ifdef PARSE_DONGLE_HOST_EVENT
4083 DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
4084 #endif /* PARSE_DONGLE_HOST_EVENT */
4085 break;
4086 }
4087 case SOCRAM_IND_TAG_HEALTH_CHECK:
4088 {
4089 bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
4090 DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
4091 ltoh32(dngl_hc->top_module_tag),
4092 ltoh32(dngl_hc->top_module_len),
4093 datalen));
4094 if (DHD_EVENT_ON()) {
4095 prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
4096 + BCM_XTLV_HDR_SIZE, datalen));
4097 }
4098 #ifdef DHD_LOG_DUMP
4099 memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
4100 memcpy(dhdp->health_chk_event_data, p,
4101 MIN(ltoh32(dngl_hc->top_module_len),
4102 HEALTH_CHK_BUF_SIZE));
4103 #endif /* DHD_LOG_DUMP */
4104 p = (uint8 *)dngl_hc->value;
4105
4106 switch (ltoh32(dngl_hc->top_module_tag)) {
4107 case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
4108 {
4109 bcm_dngl_pcie_hc_t *pcie_hc;
4110 pcie_hc = (bcm_dngl_pcie_hc_t *)p;
4111 BCM_REFERENCE(pcie_hc);
4112 if (ltoh32(dngl_hc->top_module_len) <
4113 sizeof(bcm_dngl_pcie_hc_t)) {
4114 DHD_ERROR(("Wrong length:%d\n",
4115 ltoh32(dngl_hc->top_module_len)));
4116 return;
4117 }
4118 DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
4119 " control:0x%x\n",
4120 ltoh32(pcie_hc->version),
4121 ltoh32(pcie_hc->pcie_err_ind_type),
4122 ltoh32(pcie_hc->pcie_flag),
4123 ltoh32(pcie_hc->pcie_control_reg)));
4124 #ifdef PARSE_DONGLE_HOST_EVENT
4125 dhd_print_dongle_hck_id(
4126 ltoh32(pcie_hc->pcie_err_ind_type),
4127 hck_pcie_module_to_str);
4128 #endif /* PARSE_DONGLE_HOST_EVENT */
4129 break;
4130 }
4131 #ifdef HCHK_COMMON_SW_EVENT
4132 case HCHK_SW_ENTITY_WL_PRIMARY:
4133 case HCHK_SW_ENTITY_WL_SECONDARY:
4134 {
4135 bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
4136
4137 if (ltoh32(dngl_hc->top_module_len) <
4138 sizeof(bcm_xtlv_t)) {
4139 DHD_ERROR(("WL SW HC Wrong length:%d\n",
4140 ltoh32(dngl_hc->top_module_len)));
4141 return;
4142 }
4143 BCM_REFERENCE(wl_hc);
4144 DHD_EVENT(("WL SW HC type %d len %d\n",
4145 ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
4146
4147 #ifdef PARSE_DONGLE_HOST_EVENT
4148 dhd_parse_hck_common_sw_event(wl_hc);
4149 #endif /* PARSE_DONGLE_HOST_EVENT */
4150 break;
4151
4152 }
4153 #endif /* HCHK_COMMON_SW_EVENT */
4154 default:
4155 {
4156 DHD_ERROR(("%s:Unknown module TAG:%d\n",
4157 __FUNCTION__,
4158 ltoh32(dngl_hc->top_module_tag)));
4159 break;
4160 }
4161 }
4162 break;
4163 }
4164 default:
4165 DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
4166 if (p && DHD_EVENT_ON()) {
4167 prhex("SOCRAMIND", p, taglen);
4168 }
4169 break;
4170 }
4171 break;
4172 }
4173 default:
4174 DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
4175 if (p && DHD_EVENT_ON()) {
4176 prhex("SOCRAMIND", p, datalen);
4177 }
4178 break;
4179 }
4180 #ifndef BCMDBUS
4181 #ifdef DHD_FW_COREDUMP
4182 if (dhdp->memdump_enabled) {
4183 dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
4184 if (
4185 #ifdef GDB_PROXY
4186 !dhdp->gdb_proxy_active &&
4187 #endif /* GDB_PROXY */
4188 dhd_socram_dump(dhdp->bus)) {
4189 DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
4190 }
4191 }
4192 #else
4193 dhd_dbg_send_urgent_evt(dhdp, p, datalen);
4194 #endif /* DHD_FW_COREDUMP */
4195 #endif /* !BCMDBUS */
4196 }
4197
4198 #endif /* DNGL_EVENT_SUPPORT */
4199
4200 /* Stub for now. Will become real function as soon as shim
4201 * is being integrated to Android, Linux etc.
4202 */
4203 int
4204 wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
4205 {
4206 return BCME_OK;
4207 }
4208
4209 int
4210 wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
4211 uint pktlen, void **data_ptr, void *raw_event)
4212 {
4213 wl_evt_pport_t evt_pport;
4214 wl_event_msg_t event;
4215 bcm_event_msg_u_t evu;
4216 int ret;
4217
4218 /* make sure it is a BRCM event pkt and record event data */
4219 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
4220 if (ret != BCME_OK) {
4221 return ret;
4222 }
4223
4224 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
4225
4226 /* convert event from network order to host order */
4227 wl_event_to_host_order(&event);
4228
4229 /* record event params to evt_pport */
4230 evt_pport.dhd_pub = dhd_pub;
4231 evt_pport.ifidx = ifidx;
4232 evt_pport.pktdata = pktdata;
4233 evt_pport.data_ptr = data_ptr;
4234 evt_pport.raw_event = raw_event;
4235 evt_pport.data_len = pktlen;
4236
4237 ret = wl_event_process_default(&event, &evt_pport);
4238
4239 return ret;
4240 } /* wl_event_process */
4241
4242 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
4243 int
4244 wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
4245 {
4246 int ret;
4247
4248 ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
4249 if (ret != BCME_OK) {
4250 DHD_ERROR(("%s: Invalid event frame, err = %d\n",
4251 __FUNCTION__, ret));
4252 }
4253
4254 return ret;
4255 }
4256
4257 int
4258 wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
4259 wl_event_msg_t *event, void **data_ptr, void *raw_event)
4260 {
4261 bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
4262 bcm_event_msg_u_t evu;
4263 uint8 *event_data;
4264 uint32 type, status, datalen, reason;
4265 uint16 flags;
4266 uint evlen;
4267 int ret;
4268 uint16 usr_subtype;
4269 #if defined(__linux__)
4270 dhd_if_t *ifp = NULL;
4271 BCM_REFERENCE(ifp);
4272 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4273
4274 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
4275 if (ret != BCME_OK) {
4276 return ret;
4277 }
4278
4279 usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
4280 switch (usr_subtype) {
4281 case BCMILCP_BCM_SUBTYPE_EVENT:
4282 memcpy(event, &evu.event, sizeof(wl_event_msg_t));
4283 *data_ptr = &pvt_data[1];
4284 break;
4285 case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
4286 #ifdef DNGL_EVENT_SUPPORT
4287 /* If it is a DNGL event process it first */
4288 if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
4289 /*
4290 * Return error purposely to prevent DNGL event being processed
4291 * as BRCM event
4292 */
4293 return BCME_ERROR;
4294 }
4295 #endif /* DNGL_EVENT_SUPPORT */
4296 return BCME_NOTFOUND;
4297 default:
4298 return BCME_NOTFOUND;
4299 }
4300
4301 /* start wl_event_msg process */
4302 event_data = *data_ptr;
4303 type = ntoh32_ua((void *)&event->event_type);
4304 flags = ntoh16_ua((void *)&event->flags);
4305 status = ntoh32_ua((void *)&event->status);
4306 reason = ntoh32_ua((void *)&event->reason);
4307 datalen = ntoh32_ua((void *)&event->datalen);
4308 evlen = datalen + sizeof(bcm_event_t);
4309
4310 switch (type) {
4311 #ifdef PROP_TXSTATUS
4312 case WLC_E_FIFO_CREDIT_MAP:
4313 dhd_wlfc_enable(dhd_pub);
4314 dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
4315 WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
4316 "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
4317 event_data[2],
4318 event_data[3], event_data[4], event_data[5]));
4319 break;
4320
4321 case WLC_E_BCMC_CREDIT_SUPPORT:
4322 dhd_wlfc_BCMCCredit_support_event(dhd_pub);
4323 break;
4324 #ifdef LIMIT_BORROW
4325 case WLC_E_ALLOW_CREDIT_BORROW:
4326 dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
4327 break;
4328 #endif /* LIMIT_BORROW */
4329 #endif /* PROP_TXSTATUS */
4330
4331 case WLC_E_ULP:
4332 break;
4333 case WLC_E_TDLS_PEER_EVENT:
4334 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
4335 {
4336 dhd_tdls_event_handler(dhd_pub, event);
4337 }
4338 #endif
4339 break;
4340
4341 case WLC_E_IF:
4342 {
4343 struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
4344
4345 /* Ignore the event if NOIF is set */
4346 if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
4347 DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
4348 return (BCME_UNSUPPORTED);
4349 }
4350 #ifdef PCIE_FULL_DONGLE
4351 dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
4352 ifevent->opcode, ifevent->role);
4353 #endif
4354 #ifdef PROP_TXSTATUS
4355 {
4356 uint8* ea = pvt_data->eth.ether_dhost;
4357 WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
4358 ifevent->ifidx,
4359 ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
4360 ((ifevent->role == 0) ? "STA":"AP "),
4361 MAC2STRDBG(ea)));
4362 (void)ea;
4363
4364 if (ifevent->opcode == WLC_E_IF_CHANGE)
4365 dhd_wlfc_interface_event(dhd_pub,
4366 eWLFC_MAC_ENTRY_ACTION_UPDATE,
4367 ifevent->ifidx, ifevent->role, ea);
4368 else
4369 dhd_wlfc_interface_event(dhd_pub,
4370 ((ifevent->opcode == WLC_E_IF_ADD) ?
4371 eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
4372 ifevent->ifidx, ifevent->role, ea);
4373
4374 /* dhd already has created an interface by default, for 0 */
4375 if (ifevent->ifidx == 0)
4376 break;
4377 }
4378 #endif /* PROP_TXSTATUS */
4379
4380 if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
4381 if (ifevent->opcode == WLC_E_IF_ADD) {
4382 if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
4383 event->addr.octet)) {
4384
4385 DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
4386 __FUNCTION__, ifevent->ifidx, event->ifname));
4387 return (BCME_ERROR);
4388 }
4389 } else if (ifevent->opcode == WLC_E_IF_DEL) {
4390 #ifdef PCIE_FULL_DONGLE
4391 dhd_flow_rings_delete(dhd_pub,
4392 (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname));
4393 #endif /* PCIE_FULL_DONGLE */
4394 dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
4395 event->addr.octet);
4396 } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
4397 #ifdef WL_CFG80211
4398 dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
4399 event->addr.octet);
4400 #endif /* WL_CFG80211 */
4401 }
4402 } else {
4403 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
4404 DHD_INFO(("%s: Invalid ifidx %d for %s\n",
4405 __FUNCTION__, ifevent->ifidx, event->ifname));
4406 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
4407 }
4408 /* send up the if event: btamp user needs it */
4409 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
4410 /* push up to external supp/auth */
4411 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
4412 break;
4413 }
4414
4415 case WLC_E_NDIS_LINK:
4416 break;
4417 case WLC_E_PFN_NET_FOUND:
4418 case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
4419 case WLC_E_PFN_NET_LOST:
4420 break;
4421 #if defined(PNO_SUPPORT)
4422 case WLC_E_PFN_BSSID_NET_FOUND:
4423 case WLC_E_PFN_BEST_BATCHING:
4424 dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
4425 break;
4426 #endif
4427 #if defined(RTT_SUPPORT)
4428 case WLC_E_PROXD:
4429 #ifndef WL_CFG80211
4430 dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
4431 #endif /* WL_CFG80211 */
4432 break;
4433 #endif /* RTT_SUPPORT */
4434 /* These are what external supplicant/authenticator wants */
4435 case WLC_E_ASSOC_IND:
4436 case WLC_E_AUTH_IND:
4437 case WLC_E_REASSOC_IND:
4438 dhd_findadd_sta(dhd_pub,
4439 dhd_ifname2idx(dhd_pub->info, event->ifname),
4440 &event->addr.octet);
4441 break;
4442 #ifndef BCMDBUS
4443 #if defined(DHD_FW_COREDUMP)
4444 case WLC_E_PSM_WATCHDOG:
4445 DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
4446 if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
4447 DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
4448 }
4449 break;
4450 #endif
4451 #endif /* !BCMDBUS */
4452 case WLC_E_NATOE_NFCT:
4453 #ifdef WL_NATOE
4454 DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
4455 dhd_natoe_ct_event(dhd_pub, event_data);
4456 #endif /* WL_NATOE */
4457 break;
4458 #ifdef WL_NAN
4459 case WLC_E_SLOTTED_BSS_PEER_OP:
4460 DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
4461 "" MACDBG ", status = %d\n",
4462 __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
4463 if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
4464 dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
4465 event->ifname), &event->addr.octet);
4466 } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
4467 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
4468 BCM_REFERENCE(ifindex);
4469 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
4470 event->ifname), &event->addr.octet);
4471 #ifdef PCIE_FULL_DONGLE
4472 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
4473 (char *)&event->addr.octet[0]);
4474 #endif
4475 } else {
4476 DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
4477 __FUNCTION__, status));
4478 }
4479 break;
4480 #endif /* WL_NAN */
4481 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
4482 case WLC_E_REASSOC:
4483 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
4484
4485 if (!ifp)
4486 break;
4487
4488 /* Consider STA role only since roam is disabled on P2P GC.
4489 * Drop EAPOL M1 frame only if roam is done to same BSS.
4490 */
4491 if ((status == WLC_E_STATUS_SUCCESS) &&
4492 IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
4493 wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
4494 ifp->recv_reassoc_evt = TRUE;
4495 }
4496 break;
4497 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4498 #if defined(CSI_SUPPORT)
4499 case WLC_E_CSI:
4500 dhd_csi_event_handler(dhd_pub, event, (void *)event_data);
4501 break;
4502 #endif /* CSI_SUPPORT */
4503 case WLC_E_LINK:
4504 #ifdef PCIE_FULL_DONGLE
4505 if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
4506 event->ifname), (uint8)flags) != BCME_OK) {
4507 DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
4508 __FUNCTION__));
4509 break;
4510 }
4511 if (!flags) {
4512 DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
4513 __FUNCTION__));
4514 /* Delete all sta and flowrings */
4515 dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
4516 dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
4517 event->ifname));
4518 }
4519 /* fall through */
4520 #endif /* PCIE_FULL_DONGLE */
4521 case WLC_E_DEAUTH:
4522 case WLC_E_DEAUTH_IND:
4523 case WLC_E_DISASSOC:
4524 case WLC_E_DISASSOC_IND:
4525 #ifdef PCIE_FULL_DONGLE
4526 if (type != WLC_E_LINK) {
4527 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
4528 uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
4529 uint8 del_sta = TRUE;
4530 #ifdef WL_CFG80211
4531 if (role == WLC_E_IF_ROLE_STA &&
4532 !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
4533 !wl_cfg80211_is_event_from_connected_bssid(
4534 dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
4535 del_sta = FALSE;
4536 }
4537 #endif /* WL_CFG80211 */
4538 DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
4539 __FUNCTION__, type, flags, status, role, del_sta));
4540
4541 if (del_sta) {
4542 DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
4543 __FUNCTION__, MAC2STRDBG(event->addr.octet)));
4544
4545 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
4546 event->ifname), &event->addr.octet);
4547 /* Delete all flowrings for STA and P2P Client */
4548 if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
4549 dhd_flow_rings_delete(dhd_pub, ifindex);
4550 } else {
4551 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
4552 (char *)&event->addr.octet[0]);
4553 }
4554 }
4555 }
4556 #endif /* PCIE_FULL_DONGLE */
4557 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
4558 /* fall through */
4559 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
4560 if (ifp) {
4561 ifp->recv_reassoc_evt = FALSE;
4562 ifp->post_roam_evt = FALSE;
4563 }
4564 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4565 /* fall through */
4566 default:
4567 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
4568 #ifdef DHD_UPDATE_INTF_MAC
4569 if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) {
4570 dhd_event_ifchange(dhd_pub->info,
4571 (struct wl_event_data_if *)event,
4572 event->ifname,
4573 event->addr.octet);
4574 }
4575 #endif /* DHD_UPDATE_INTF_MAC */
4576 /* push up to external supp/auth */
4577 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
4578 DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
4579 __FUNCTION__, type, flags, status));
4580 BCM_REFERENCE(flags);
4581 BCM_REFERENCE(status);
4582 BCM_REFERENCE(reason);
4583
4584 break;
4585 }
4586 #if defined(STBAP)
4587 /* For routers, EAPD will be working on these events.
4588 * Overwrite interface name to that event is pushed
4589 * to host with its registered interface name
4590 */
4591 memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
4592 #endif
4593
4594 #ifdef DHD_STATUS_LOGGING
4595 if (dhd_pub->statlog) {
4596 dhd_statlog_process_event(dhd_pub, type, *ifidx,
4597 status, reason, flags);
4598 }
4599 #endif /* DHD_STATUS_LOGGING */
4600
4601 #ifdef SHOW_EVENTS
4602 if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
4603 wl_show_host_event(dhd_pub, event,
4604 (void *)event_data, raw_event, dhd_pub->enable_log);
4605 }
4606 #endif /* SHOW_EVENTS */
4607
4608 return (BCME_OK);
4609 } /* wl_process_host_event */
4610
4611 int
4612 wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
4613 wl_event_msg_t *event, void **data_ptr, void *raw_event)
4614 {
4615 return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
4616 raw_event);
4617 }
4618
4619 void
4620 dhd_print_buf(void *pbuf, int len, int bytes_per_line)
4621 {
4622 #ifdef DHD_DEBUG
4623 int i, j = 0;
4624 unsigned char *buf = pbuf;
4625
4626 if (bytes_per_line == 0) {
4627 bytes_per_line = len;
4628 }
4629
4630 for (i = 0; i < len; i++) {
4631 printf("%2.2x", *buf++);
4632 j++;
4633 if (j == bytes_per_line) {
4634 printf("\n");
4635 j = 0;
4636 } else {
4637 printf(":");
4638 }
4639 }
4640 printf("\n");
4641 #endif /* DHD_DEBUG */
4642 }
4643 #ifndef strtoul
4644 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
4645 #endif
4646
4647 /* Convert user's input in hex pattern to byte-size mask */
4648 int
4649 wl_pattern_atoh(char *src, char *dst)
4650 {
4651 int i;
4652 if (strncmp(src, "0x", 2) != 0 &&
4653 strncmp(src, "0X", 2) != 0) {
4654 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
4655 return -1;
4656 }
4657 src = src + 2; /* Skip past 0x */
4658 if (strlen(src) % 2 != 0) {
4659 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
4660 return -1;
4661 }
4662 for (i = 0; *src != '\0'; i++) {
4663 char num[3];
4664 bcm_strncpy_s(num, sizeof(num), src, 2);
4665 num[2] = '\0';
4666 dst[i] = (uint8)strtoul(num, NULL, 16);
4667 src += 2;
4668 }
4669 return i;
4670 }
4671
4672 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
4673 int
4674 pattern_atoh_len(char *src, char *dst, int len)
4675 {
4676 int i;
4677 if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
4678 strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
4679 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
4680 return -1;
4681 }
4682 src = src + HD_PREFIX_SIZE; /* Skip past 0x */
4683 if (strlen(src) % HD_BYTE_SIZE != 0) {
4684 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
4685 return -1;
4686 }
4687 for (i = 0; *src != '\0'; i++) {
4688 char num[HD_BYTE_SIZE + 1];
4689
4690 if (i > len - 1) {
4691 DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
4692 return -1;
4693 }
4694 bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
4695 num[HD_BYTE_SIZE] = '\0';
4696 dst[i] = (uint8)strtoul(num, NULL, 16);
4697 src += HD_BYTE_SIZE;
4698 }
4699 return i;
4700 }
4701 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
4702
4703 #ifdef PKT_FILTER_SUPPORT
4704 void
4705 dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
4706 {
4707 char *argv[8];
4708 int i = 0;
4709 const char *str;
4710 int buf_len;
4711 int str_len;
4712 char *arg_save = 0, *arg_org = 0;
4713 int rc;
4714 char buf[32] = {0};
4715 wl_pkt_filter_enable_t enable_parm;
4716 wl_pkt_filter_enable_t * pkt_filterp;
4717
4718 if (!arg)
4719 return;
4720
4721 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
4722 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
4723 goto fail;
4724 }
4725 arg_org = arg_save;
4726 memcpy(arg_save, arg, strlen(arg) + 1);
4727
4728 argv[i] = bcmstrtok(&arg_save, " ", 0);
4729
4730 i = 0;
4731 if (argv[i] == NULL) {
4732 DHD_ERROR(("No args provided\n"));
4733 goto fail;
4734 }
4735
4736 str = "pkt_filter_enable";
4737 str_len = strlen(str);
4738 bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
4739 buf[ sizeof(buf) - 1 ] = '\0';
4740 buf_len = str_len + 1;
4741
4742 pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
4743
4744 /* Parse packet filter id. */
4745 enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
4746 if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
4747 goto fail;
4748
4749 /* Parse enable/disable value. */
4750 enable_parm.enable = htod32(enable);
4751
4752 buf_len += sizeof(enable_parm);
4753 memcpy((char *)pkt_filterp,
4754 &enable_parm,
4755 sizeof(enable_parm));
4756
4757 /* Enable/disable the specified filter. */
4758 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
4759 rc = rc >= 0 ? 0 : rc;
4760 if (rc) {
4761 DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
4762 __FUNCTION__, enable?"enable":"disable", arg, rc));
4763 dhd_set_packet_filter(dhd);
4764 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
4765 rc = rc >= 0 ? 0 : rc;
4766 if (rc) {
4767 DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
4768 __FUNCTION__, arg, rc));
4769 } else {
4770 DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
4771 __FUNCTION__, arg));
4772 }
4773 }
4774 else
4775 DHD_TRACE(("%s: successfully %s pktfilter %s\n",
4776 __FUNCTION__, enable?"enable":"disable", arg));
4777
4778 /* Contorl the master mode */
4779 rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
4780 master_mode, WLC_SET_VAR, TRUE, 0);
4781 rc = rc >= 0 ? 0 : rc;
4782 if (rc)
4783 DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
4784 __FUNCTION__, master_mode, rc));
4785
4786 fail:
4787 if (arg_org)
4788 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
4789 }
4790
4791 /* Packet filter section: extended filters have named offsets, add table here */
4792 typedef struct {
4793 char *name;
4794 uint16 base;
4795 } wl_pfbase_t;
4796
4797 static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
4798
4799 static int
4800 wl_pkt_filter_base_parse(char *name)
4801 {
4802 uint i;
4803 char *bname, *uname;
4804
4805 for (i = 0; i < ARRAYSIZE(basenames); i++) {
4806 bname = basenames[i].name;
4807 for (uname = name; *uname; bname++, uname++) {
4808 if (*bname != bcm_toupper(*uname)) {
4809 break;
4810 }
4811 }
4812 if (!*uname && !*bname) {
4813 break;
4814 }
4815 }
4816
4817 if (i < ARRAYSIZE(basenames)) {
4818 return basenames[i].base;
4819 } else {
4820 return -1;
4821 }
4822 }
4823
4824 void
4825 dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
4826 {
4827 const char *str;
4828 wl_pkt_filter_t pkt_filter;
4829 wl_pkt_filter_t *pkt_filterp;
4830 int buf_len;
4831 int str_len;
4832 int rc = -1;
4833 uint32 mask_size;
4834 uint32 pattern_size;
4835 char *argv[MAXPKT_ARG] = {0}, * buf = 0;
4836 int i = 0;
4837 char *arg_save = 0, *arg_org = 0;
4838
4839 if (!arg)
4840 return;
4841
4842 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
4843 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
4844 goto fail;
4845 }
4846
4847 arg_org = arg_save;
4848
4849 if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
4850 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
4851 goto fail;
4852 }
4853
4854 memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
4855 memcpy(arg_save, arg, strlen(arg) + 1);
4856
4857 if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
4858 DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
4859 goto fail;
4860 }
4861
4862 argv[i] = bcmstrtok(&arg_save, " ", 0);
4863 while (argv[i++]) {
4864 if (i >= MAXPKT_ARG) {
4865 DHD_ERROR(("Invalid args provided\n"));
4866 goto fail;
4867 }
4868 argv[i] = bcmstrtok(&arg_save, " ", 0);
4869 }
4870
4871 i = 0;
4872 if (argv[i] == NULL) {
4873 DHD_ERROR(("No args provided\n"));
4874 goto fail;
4875 }
4876
4877 str = "pkt_filter_add";
4878 str_len = strlen(str);
4879 bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
4880 buf[ str_len ] = '\0';
4881 buf_len = str_len + 1;
4882
4883 pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
4884
4885 /* Parse packet filter id. */
4886 pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
4887
4888 if (argv[++i] == NULL) {
4889 DHD_ERROR(("Polarity not provided\n"));
4890 goto fail;
4891 }
4892
4893 /* Parse filter polarity. */
4894 pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
4895
4896 if (argv[++i] == NULL) {
4897 DHD_ERROR(("Filter type not provided\n"));
4898 goto fail;
4899 }
4900
4901 /* Parse filter type. */
4902 pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
4903
4904 if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
4905 if (argv[++i] == NULL) {
4906 DHD_ERROR(("Offset not provided\n"));
4907 goto fail;
4908 }
4909
4910 /* Parse pattern filter offset. */
4911 pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
4912
4913 if (argv[++i] == NULL) {
4914 DHD_ERROR(("Bitmask not provided\n"));
4915 goto fail;
4916 }
4917
4918 /* Parse pattern filter mask. */
4919 rc = wl_pattern_atoh(argv[i],
4920 (char *) pkt_filterp->u.pattern.mask_and_pattern);
4921
4922 if (rc == -1) {
4923 DHD_ERROR(("Rejecting: %s\n", argv[i]));
4924 goto fail;
4925 }
4926 mask_size = htod32(rc);
4927 if (argv[++i] == NULL) {
4928 DHD_ERROR(("Pattern not provided\n"));
4929 goto fail;
4930 }
4931
4932 /* Parse pattern filter pattern. */
4933 rc = wl_pattern_atoh(argv[i],
4934 (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]);
4935
4936 if (rc == -1) {
4937 DHD_ERROR(("Rejecting: %s\n", argv[i]));
4938 goto fail;
4939 }
4940 pattern_size = htod32(rc);
4941 if (mask_size != pattern_size) {
4942 DHD_ERROR(("Mask and pattern not the same size\n"));
4943 goto fail;
4944 }
4945
4946 pkt_filter.u.pattern.size_bytes = mask_size;
4947 buf_len += WL_PKT_FILTER_FIXED_LEN;
4948 buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
4949
4950 /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
4951 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
4952 * guarantee that the buffer is properly aligned.
4953 */
4954 memcpy((char *)pkt_filterp,
4955 &pkt_filter,
4956 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
4957 } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
4958 int list_cnt = 0;
4959 char *endptr = NULL;
4960 wl_pkt_filter_pattern_listel_t *pf_el =
4961 (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
4962
4963 while (argv[++i] != NULL) {
4964 /* Check valid buffer size. */
4965 if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
4966 DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
4967 goto fail;
4968 }
4969
4970 /* Parse pattern filter base and offset. */
4971 if (bcm_isdigit(*argv[i])) {
4972 /* Numeric base */
4973 rc = strtoul(argv[i], &endptr, 0);
4974 } else {
4975 endptr = strchr(argv[i], ':');
4976 if (endptr) {
4977 *endptr = '\0';
4978 rc = wl_pkt_filter_base_parse(argv[i]);
4979 if (rc == -1) {
4980 printf("Invalid base %s\n", argv[i]);
4981 goto fail;
4982 }
4983 *endptr = ':';
4984 }
4985 }
4986
4987 if (endptr == NULL) {
4988 printf("Invalid [base:]offset format: %s\n", argv[i]);
4989 goto fail;
4990 }
4991
4992 if (*endptr == ':') {
4993 pf_el->base_offs = htod16(rc);
4994 rc = strtoul(endptr + 1, &endptr, 0);
4995 } else {
4996 /* Must have had a numeric offset only */
4997 pf_el->base_offs = htod16(0);
4998 }
4999
5000 if (*endptr) {
5001 printf("Invalid [base:]offset format: %s\n", argv[i]);
5002 goto fail;
5003 }
5004 if (rc > 0x0000FFFF) {
5005 printf("Offset too large\n");
5006 goto fail;
5007 }
5008 pf_el->rel_offs = htod16(rc);
5009
5010 /* Clear match_flag (may be set in parsing which follows) */
5011 pf_el->match_flags = htod16(0);
5012
5013 /* Parse pattern filter mask and pattern directly into ioctl buffer */
5014 if (argv[++i] == NULL) {
5015 printf("Bitmask not provided\n");
5016 goto fail;
5017 }
5018 rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
5019 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
5020 printf("Rejecting: %s\n", argv[i]);
5021 goto fail;
5022 }
5023 mask_size = htod16(rc);
5024
5025 if (argv[++i] == NULL) {
5026 printf("Pattern not provided\n");
5027 goto fail;
5028 }
5029
5030 if (*argv[i] == '!') {
5031 pf_el->match_flags =
5032 htod16(WL_PKT_FILTER_MFLAG_NEG);
5033 (argv[i])++;
5034 }
5035 if (*argv[i] == '\0') {
5036 printf("Pattern not provided\n");
5037 goto fail;
5038 }
5039 rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
5040 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
5041 printf("Rejecting: %s\n", argv[i]);
5042 goto fail;
5043 }
5044 pattern_size = htod16(rc);
5045
5046 if (mask_size != pattern_size) {
5047 printf("Mask and pattern not the same size\n");
5048 goto fail;
5049 }
5050
5051 pf_el->size_bytes = mask_size;
5052
5053 /* Account for the size of this pattern element */
5054 buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
5055
5056 /* Move to next element location in ioctl buffer */
5057 pf_el = (wl_pkt_filter_pattern_listel_t*)
5058 ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
5059
5060 /* Count list element */
5061 list_cnt++;
5062 }
5063
5064 /* Account for initial fixed size, and copy initial fixed fields */
5065 buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
5066
5067 if (buf_len > MAX_PKTFLT_BUF_SIZE) {
5068 DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
5069 goto fail;
5070 }
5071
5072 /* Update list count and total size */
5073 pkt_filter.u.patlist.list_cnt = list_cnt;
5074 pkt_filter.u.patlist.PAD1[0] = 0;
5075 pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
5076 pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
5077
5078 memcpy((char *)pkt_filterp, &pkt_filter,
5079 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
5080 } else {
5081 DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
5082 goto fail;
5083 }
5084
5085 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
5086 rc = rc >= 0 ? 0 : rc;
5087
5088 if (rc)
5089 DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
5090 __FUNCTION__, arg, rc));
5091 else
5092 DHD_TRACE(("%s: successfully added pktfilter %s\n",
5093 __FUNCTION__, arg));
5094
5095 fail:
5096 if (arg_org)
5097 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
5098
5099 if (buf)
5100 MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
5101 }
5102
5103 void
5104 dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
5105 {
5106 int ret;
5107
5108 ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
5109 id, WLC_SET_VAR, TRUE, 0);
5110 if (ret < 0) {
5111 DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
5112 __FUNCTION__, id, ret));
5113 }
5114 else
5115 DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
5116 __FUNCTION__, id));
5117 }
5118 #endif /* PKT_FILTER_SUPPORT */
5119
5120 /* ========================== */
5121 /* ==== ARP OFFLOAD SUPPORT = */
5122 /* ========================== */
5123 #ifdef ARP_OFFLOAD_SUPPORT
5124 void
5125 dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
5126 {
5127 int retcode;
5128
5129 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
5130 arp_mode, WLC_SET_VAR, TRUE, 0);
5131
5132 retcode = retcode >= 0 ? 0 : retcode;
5133 if (retcode) {
5134 DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
5135 __FUNCTION__, arp_mode, retcode));
5136 } else {
5137 DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
5138 __FUNCTION__, arp_mode));
5139 dhd->arpol_configured = TRUE;
5140 }
5141 }
5142
5143 void
5144 dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
5145 {
5146 int retcode;
5147
5148 if (!dhd->arpol_configured) {
5149 /* If arpol is not applied, apply it */
5150 dhd_arp_offload_set(dhd, dhd_arp_mode);
5151 }
5152
5153 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
5154 arp_enable, WLC_SET_VAR, TRUE, 0);
5155 retcode = retcode >= 0 ? 0 : retcode;
5156 if (retcode)
5157 DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
5158 __FUNCTION__, arp_enable, retcode));
5159 else
5160 #ifdef DHD_LOG_DUMP
5161 DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
5162 __FUNCTION__, arp_enable));
5163 #else
5164 DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
5165 __FUNCTION__, arp_enable));
5166 #endif /* DHD_LOG_DUMP */
5167 if (arp_enable) {
5168 uint32 version;
5169 retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
5170 &version, WLC_GET_VAR, FALSE, 0);
5171 if (retcode) {
5172 DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
5173 __FUNCTION__, retcode));
5174 dhd->arp_version = 1;
5175 }
5176 else {
5177 DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
5178 dhd->arp_version = version;
5179 }
5180 }
5181 }
5182
5183 /* XXX ANDREY: clear AOE arp_table */
5184 void
5185 dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
5186 {
5187 int ret = 0;
5188
5189 if (dhd == NULL) return;
5190 if (dhd->arp_version == 1)
5191 idx = 0;
5192
5193 ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
5194 if (ret < 0)
5195 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5196 else {
5197 #ifdef DHD_LOG_DUMP
5198 DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
5199 #else
5200 DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
5201 #endif /* DHD_LOG_DUMP */
5202 }
5203 /* mac address isn't cleared here but it will be cleared after dongle off */
5204 dhd->hmac_updated = 0;
5205 }
5206
5207 /* XXX ANDREY: clear hostip table */
5208 void
5209 dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
5210 {
5211 int ret = 0;
5212
5213 if (dhd == NULL) return;
5214 if (dhd->arp_version == 1)
5215 idx = 0;
5216
5217 ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
5218 if (ret < 0)
5219 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5220 else {
5221 #ifdef DHD_LOG_DUMP
5222 DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
5223 #else
5224 DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
5225 #endif /* DHD_LOG_DUMP */
5226 }
5227 }
5228
5229 void
5230 dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
5231 {
5232 int ret;
5233
5234 if (dhd == NULL) return;
5235 if (dhd->arp_version == 1)
5236 idx = 0;
5237
5238 ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
5239 NULL, 0, TRUE);
5240 if (ret < 0)
5241 DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
5242 else {
5243 /* mac address is updated in the dongle */
5244 dhd->hmac_updated = 1;
5245 #ifdef DHD_LOG_DUMP
5246 DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
5247 #else
5248 DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
5249 #endif /* DHD_LOG_DUMP */
5250 }
5251 }
5252
5253 int
5254 dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
5255 {
5256 int ret, i;
5257 uint32 *ptr32 = buf;
5258 bool clr_bottom = FALSE;
5259
5260 if (!buf)
5261 return -1;
5262 if (dhd == NULL) return -1;
5263 if (dhd->arp_version == 1)
5264 idx = 0;
5265
5266 ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
5267 FALSE);
5268 if (ret) {
5269 DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
5270 __FUNCTION__, ret));
5271
5272 return -1;
5273 }
5274
5275 /* clean up the buf, ascii reminder */
5276 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
5277 if (!clr_bottom) {
5278 if (*ptr32 == 0)
5279 clr_bottom = TRUE;
5280 } else {
5281 *ptr32 = 0;
5282 }
5283 ptr32++;
5284 }
5285
5286 return 0;
5287 }
5288 #endif /* ARP_OFFLOAD_SUPPORT */
5289
5290 /*
5291 * Neighbor Discovery Offload: enable NDO feature
5292 * Called by ipv6 event handler when interface comes up/goes down
5293 */
5294 int
5295 dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
5296 {
5297 int retcode;
5298
5299 if (dhd == NULL)
5300 return -1;
5301
5302 #if defined(WL_CFG80211) && defined(WL_NAN)
5303 if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
5304 /* If nan dp is active, skip NDO */
5305 DHD_INFO(("Active NAN DP, skip NDO\n"));
5306 return 0;
5307 }
5308 #endif /* WL_CFG80211 && WL_NAN */
5309 #ifdef WL_CFG80211
5310 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5311 /* NDO disable on STA+SOFTAP mode */
5312 ndo_enable = FALSE;
5313 }
5314 #endif /* WL_CFG80211 */
5315 retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
5316 ndo_enable, WLC_SET_VAR, TRUE, 0);
5317 if (retcode)
5318 DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
5319 __FUNCTION__, ndo_enable, retcode));
5320 else
5321 DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
5322 __FUNCTION__, ndo_enable));
5323
5324 return retcode;
5325 }
5326
5327 /*
5328 * Neighbor Discover Offload: enable NDO feature
5329 * Called by ipv6 event handler when interface comes up
5330 */
5331 int
5332 dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
5333 {
5334 int iov_len = 0;
5335 char iovbuf[DHD_IOVAR_BUF_SIZE];
5336 int retcode;
5337
5338 if (dhd == NULL)
5339 return -1;
5340
5341 iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
5342 IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
5343 if (!iov_len) {
5344 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5345 __FUNCTION__, sizeof(iovbuf)));
5346 return -1;
5347 }
5348 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
5349
5350 if (retcode)
5351 DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
5352 __FUNCTION__, retcode));
5353 else
5354 DHD_TRACE(("%s: ndo ipaddr entry added \n",
5355 __FUNCTION__));
5356
5357 return retcode;
5358 }
5359
5360 /*
5361 * Neighbor Discover Offload: enable NDO feature
5362 * Called by ipv6 event handler when interface goes down
5363 */
5364 int
5365 dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
5366 {
5367 int iov_len = 0;
5368 char iovbuf[DHD_IOVAR_BUF_SIZE];
5369 int retcode;
5370
5371 if (dhd == NULL)
5372 return -1;
5373
5374 iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
5375 0, iovbuf, sizeof(iovbuf));
5376 if (!iov_len) {
5377 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5378 __FUNCTION__, sizeof(iovbuf)));
5379 return -1;
5380 }
5381 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
5382
5383 if (retcode)
5384 DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
5385 __FUNCTION__, retcode));
5386 else
5387 DHD_TRACE(("%s: ndo ipaddr entry removed \n",
5388 __FUNCTION__));
5389
5390 return retcode;
5391 }
5392 /* Enhanced ND offload */
5393 uint16
5394 dhd_ndo_get_version(dhd_pub_t *dhdp)
5395 {
5396 char iovbuf[DHD_IOVAR_BUF_SIZE];
5397 wl_nd_hostip_t ndo_get_ver;
5398 int iov_len;
5399 int retcode;
5400 uint16 ver = 0;
5401
5402 if (dhdp == NULL) {
5403 return BCME_ERROR;
5404 }
5405
5406 memset(&iovbuf, 0, sizeof(iovbuf));
5407 ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
5408 ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
5409 ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
5410 ndo_get_ver.u.version = 0;
5411 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
5412 WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
5413
5414 if (!iov_len) {
5415 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5416 __FUNCTION__, sizeof(iovbuf)));
5417 return BCME_ERROR;
5418 }
5419
5420 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
5421
5422 if (retcode) {
5423 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
5424 /* ver iovar not supported. NDO version is 0 */
5425 ver = 0;
5426 } else {
5427 wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
5428
5429 if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
5430 (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
5431 (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
5432 + sizeof(uint16))) {
5433 /* nd_hostip iovar version */
5434 ver = dtoh16(ndo_ver_ret->u.version);
5435 }
5436
5437 DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
5438 }
5439
5440 return ver;
5441 }
5442
5443 int
5444 dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
5445 {
5446 char iovbuf[DHD_IOVAR_BUF_SIZE];
5447 wl_nd_hostip_t ndo_add_addr;
5448 int iov_len;
5449 int retcode;
5450
5451 if (dhdp == NULL || ipv6addr == 0) {
5452 return BCME_ERROR;
5453 }
5454
5455 /* wl_nd_hostip_t fixed param */
5456 ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
5457 ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
5458 ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
5459 /* wl_nd_host_ip_addr_t param for add */
5460 memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
5461 ndo_add_addr.u.host_ip.type = type;
5462
5463 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
5464 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
5465 if (!iov_len) {
5466 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5467 __FUNCTION__, sizeof(iovbuf)));
5468 return BCME_ERROR;
5469 }
5470
5471 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
5472 if (retcode) {
5473 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
5474 #ifdef NDO_CONFIG_SUPPORT
5475 if (retcode == BCME_NORESOURCE) {
5476 /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
5477 DHD_INFO(("%s: Host IP count exceed device capacity,"
5478 "ND offload deactivated\n", __FUNCTION__));
5479 dhdp->ndo_host_ip_overflow = TRUE;
5480 dhd_ndo_enable(dhdp, FALSE);
5481 }
5482 #endif /* NDO_CONFIG_SUPPORT */
5483 } else {
5484 DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
5485 }
5486
5487 return retcode;
5488 }
5489
5490 int
5491 dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
5492 {
5493 char iovbuf[DHD_IOVAR_BUF_SIZE];
5494 wl_nd_hostip_t ndo_del_addr;
5495 int iov_len;
5496 int retcode;
5497
5498 if (dhdp == NULL || ipv6addr == 0) {
5499 return BCME_ERROR;
5500 }
5501
5502 /* wl_nd_hostip_t fixed param */
5503 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
5504 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
5505 ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
5506 /* wl_nd_host_ip_addr_t param for del */
5507 memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
5508 ndo_del_addr.u.host_ip.type = 0; /* don't care */
5509
5510 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
5511 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
5512
5513 if (!iov_len) {
5514 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5515 __FUNCTION__, sizeof(iovbuf)));
5516 return BCME_ERROR;
5517 }
5518
5519 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
5520 if (retcode) {
5521 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
5522 } else {
5523 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
5524 }
5525
5526 return retcode;
5527 }
5528
5529 int
5530 dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
5531 {
5532 char iovbuf[DHD_IOVAR_BUF_SIZE];
5533 wl_nd_hostip_t ndo_del_addr;
5534 int iov_len;
5535 int retcode;
5536
5537 if (dhdp == NULL) {
5538 return BCME_ERROR;
5539 }
5540
5541 /* wl_nd_hostip_t fixed param */
5542 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
5543 if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
5544 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
5545 } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
5546 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
5547 } else {
5548 return BCME_BADARG;
5549 }
5550 ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
5551
5552 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
5553 iovbuf, sizeof(iovbuf));
5554
5555 if (!iov_len) {
5556 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5557 __FUNCTION__, sizeof(iovbuf)));
5558 return BCME_ERROR;
5559 }
5560
5561 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
5562 if (retcode) {
5563 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
5564 } else {
5565 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
5566 }
5567
5568 return retcode;
5569 }
5570
5571 int
5572 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
5573 {
5574 char iovbuf[DHD_IOVAR_BUF_SIZE];
5575 int iov_len;
5576 int retcode;
5577
5578 if (dhdp == NULL) {
5579 return BCME_ERROR;
5580 }
5581
5582 iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
5583 iovbuf, sizeof(iovbuf));
5584
5585 if (!iov_len) {
5586 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5587 __FUNCTION__, sizeof(iovbuf)));
5588 return BCME_ERROR;
5589 }
5590
5591 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
5592 if (retcode)
5593 DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
5594 __FUNCTION__, enable, retcode));
5595 else {
5596 DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
5597 __FUNCTION__, enable));
5598 }
5599
5600 return retcode;
5601 }
5602 #ifdef SIMPLE_ISCAN
5603
5604 uint iscan_thread_id = 0;
5605 iscan_buf_t * iscan_chain = 0;
5606
5607 iscan_buf_t *
5608 dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
5609 {
5610 iscan_buf_t *iscanbuf_alloc = 0;
5611 iscan_buf_t *iscanbuf_head;
5612
5613 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
5614 dhd_iscan_lock();
5615
5616 iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
5617 if (iscanbuf_alloc == NULL)
5618 goto fail;
5619
5620 iscanbuf_alloc->next = NULL;
5621 iscanbuf_head = *iscanbuf;
5622
5623 DHD_ISCAN(("%s: addr of allocated node = 0x%X"
5624 "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
5625 __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
5626
5627 if (iscanbuf_head == NULL) {
5628 *iscanbuf = iscanbuf_alloc;
5629 DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
5630 goto fail;
5631 }
5632
5633 while (iscanbuf_head->next)
5634 iscanbuf_head = iscanbuf_head->next;
5635
5636 iscanbuf_head->next = iscanbuf_alloc;
5637
5638 fail:
5639 dhd_iscan_unlock();
5640 return iscanbuf_alloc;
5641 }
5642
5643 void
5644 dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
5645 {
5646 iscan_buf_t *iscanbuf_free = 0;
5647 iscan_buf_t *iscanbuf_prv = 0;
5648 iscan_buf_t *iscanbuf_cur;
5649 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
5650 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
5651
5652 dhd_iscan_lock();
5653
5654 iscanbuf_cur = iscan_chain;
5655
5656 /* If iscan_delete is null then delete the entire
5657 * chain or else delete specific one provided
5658 */
5659 if (!iscan_delete) {
5660 while (iscanbuf_cur) {
5661 iscanbuf_free = iscanbuf_cur;
5662 iscanbuf_cur = iscanbuf_cur->next;
5663 iscanbuf_free->next = 0;
5664 MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
5665 }
5666 iscan_chain = 0;
5667 } else {
5668 while (iscanbuf_cur) {
5669 if (iscanbuf_cur == iscan_delete)
5670 break;
5671 iscanbuf_prv = iscanbuf_cur;
5672 iscanbuf_cur = iscanbuf_cur->next;
5673 }
5674 if (iscanbuf_prv)
5675 iscanbuf_prv->next = iscan_delete->next;
5676
5677 iscan_delete->next = 0;
5678 MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
5679
5680 if (!iscanbuf_prv)
5681 iscan_chain = 0;
5682 }
5683 dhd_iscan_unlock();
5684 }
5685
5686 iscan_buf_t *
5687 dhd_iscan_result_buf(void)
5688 {
5689 return iscan_chain;
5690 }
5691
5692 int
5693 dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
5694 {
5695 int rc = -1;
5696 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
5697 char *buf;
5698 char iovar[] = "iscan";
5699 uint32 allocSize = 0;
5700 wl_ioctl_t ioctl;
5701 int len;
5702
5703 if (pParams) {
5704 allocSize = (size + strlen(iovar) + 1);
5705 if ((allocSize < size) || (allocSize < strlen(iovar)))
5706 {
5707 DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
5708 __FUNCTION__, allocSize, size, strlen(iovar)));
5709 goto cleanUp;
5710 }
5711 buf = MALLOC(dhd->osh, allocSize);
5712
5713 if (buf == NULL)
5714 {
5715 DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
5716 goto cleanUp;
5717 }
5718 ioctl.cmd = WLC_SET_VAR;
5719 len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
5720 if (len == 0) {
5721 rc = BCME_BUFTOOSHORT;
5722 goto cleanUp;
5723 }
5724 rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
5725 }
5726
5727 cleanUp:
5728 if (buf) {
5729 MFREE(dhd->osh, buf, allocSize);
5730 }
5731
5732 return rc;
5733 }
5734
5735 static int
5736 dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
5737 {
5738 wl_iscan_results_t *list_buf;
5739 wl_iscan_results_t list;
5740 wl_scan_results_t *results;
5741 iscan_buf_t *iscan_cur;
5742 int status = -1;
5743 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
5744 int rc;
5745 wl_ioctl_t ioctl;
5746 int len;
5747
5748 DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
5749
5750 iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
5751 if (!iscan_cur) {
5752 DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
5753 dhd_iscan_free_buf(dhdp, 0);
5754 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
5755 dhd_ind_scan_confirm(dhdp, FALSE);
5756 goto fail;
5757 }
5758
5759 dhd_iscan_lock();
5760
5761 memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
5762 list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
5763 results = &list_buf->results;
5764 results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
5765 results->version = 0;
5766 results->count = 0;
5767
5768 memset(&list, 0, sizeof(list));
5769 list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
5770 len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
5771 iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
5772 if (len == 0) {
5773 dhd_iscan_free_buf(dhdp, 0);
5774 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
5775 dhd_ind_scan_confirm(dhdp, FALSE);
5776 status = BCME_BUFTOOSHORT;
5777 goto fail;
5778 }
5779 ioctl.cmd = WLC_GET_VAR;
5780 ioctl.set = FALSE;
5781 rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
5782
5783 results->buflen = dtoh32(results->buflen);
5784 results->version = dtoh32(results->version);
5785 *scan_count = results->count = dtoh32(results->count);
5786 status = dtoh32(list_buf->status);
5787 DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
5788
5789 dhd_iscan_unlock();
5790
5791 if (!(*scan_count)) {
5792 /* TODO: race condition when FLUSH already called */
5793 dhd_iscan_free_buf(dhdp, 0);
5794 }
5795 fail:
5796 return status;
5797 }
5798
5799 #endif /* SIMPLE_ISCAN */
5800
5801 /*
5802 * returns = TRUE if associated, FALSE if not associated
5803 */
5804 bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
5805 {
5806 char bssid[6], zbuf[6];
5807 int ret = -1;
5808
5809 bzero(bssid, 6);
5810 bzero(zbuf, 6);
5811
5812 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
5813 ETHER_ADDR_LEN, FALSE, ifidx);
5814 /* XXX:AS!!! res can be: -17(BCME_NOTASSOCIATED),-22(BCME_NORESOURCE), and 0(OK)
5815 OK - doesn't mean associated yet, the returned bssid
5816 still needs to be checked for non zero array
5817 */
5818 DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
5819
5820 if (ret == BCME_NOTASSOCIATED) {
5821 DHD_ERROR(("%s: WLC_GET_BSSID, NOT ASSOCIATED\n", __FUNCTION__));
5822 }
5823
5824 if (retval)
5825 *retval = ret;
5826
5827 if (ret < 0)
5828 return FALSE;
5829
5830 if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
5831 DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
5832 return FALSE;
5833 }
5834 return TRUE;
5835 }
5836
5837 /* Function to estimate possible DTIM_SKIP value */
5838 #if defined(BCMPCIE)
5839 int
5840 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
5841 {
5842 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5843 int ret = -1;
5844 int allowed_skip_dtim_cnt = 0;
5845
5846 if (dhd->disable_dtim_in_suspend) {
5847 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5848 bcn_li_dtim = 0;
5849 return bcn_li_dtim;
5850 }
5851
5852 /* Check if associated */
5853 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5854 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5855 return bcn_li_dtim;
5856 }
5857
5858 if (dtim_period == NULL || bcn_interval == NULL)
5859 return bcn_li_dtim;
5860
5861 /* read associated AP beacon interval */
5862 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5863 bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
5864 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5865 return bcn_li_dtim;
5866 }
5867
5868 /* read associated AP dtim setup */
5869 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5870 dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
5871 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5872 return bcn_li_dtim;
5873 }
5874
5875 /* if not assocated just return */
5876 if (*dtim_period == 0) {
5877 return bcn_li_dtim;
5878 }
5879
5880 if (dhd->max_dtim_enable) {
5881 bcn_li_dtim =
5882 (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
5883 if (bcn_li_dtim == 0) {
5884 bcn_li_dtim = 1;
5885 }
5886 } else {
5887 /* attemp to use platform defined dtim skip interval */
5888 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5889
5890 /* check if sta listen interval fits into AP dtim */
5891 if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
5892 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5893 bcn_li_dtim = NO_DTIM_SKIP;
5894 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5895 __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5896 return bcn_li_dtim;
5897 }
5898
5899 if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5900 allowed_skip_dtim_cnt =
5901 MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
5902 bcn_li_dtim =
5903 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5904 }
5905
5906 if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
5907 /* Round up dtim_skip to fit into STAs Listen Interval */
5908 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
5909 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5910 }
5911 }
5912
5913 if (dhd->conf->suspend_bcn_li_dtim >= 0)
5914 bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
5915 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5916 __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5917
5918 return bcn_li_dtim;
5919 }
5920 #else /* OEM_ANDROID && BCMPCIE */
5921 int
5922 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
5923 {
5924 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5925 int ret = -1;
5926 int dtim_period = 0;
5927 int ap_beacon = 0;
5928 int allowed_skip_dtim_cnt = 0;
5929
5930 if (dhd->disable_dtim_in_suspend) {
5931 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5932 bcn_li_dtim = 0;
5933 goto exit;
5934 }
5935
5936 /* Check if associated */
5937 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5938 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5939 goto exit;
5940 }
5941
5942 /* read associated AP beacon interval */
5943 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5944 &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
5945 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5946 goto exit;
5947 }
5948
5949 /* read associated ap's dtim setup */
5950 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5951 &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
5952 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5953 goto exit;
5954 }
5955
5956 /* if not assocated just exit */
5957 if (dtim_period == 0) {
5958 goto exit;
5959 }
5960
5961 if (dhd->max_dtim_enable) {
5962 bcn_li_dtim =
5963 (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
5964 if (bcn_li_dtim == 0) {
5965 bcn_li_dtim = 1;
5966 }
5967 } else {
5968 /* attemp to use platform defined dtim skip interval */
5969 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5970
5971 /* check if sta listen interval fits into AP dtim */
5972 if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
5973 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5974 bcn_li_dtim = NO_DTIM_SKIP;
5975 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5976 __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
5977 goto exit;
5978 }
5979
5980 if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5981 allowed_skip_dtim_cnt =
5982 MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
5983 bcn_li_dtim =
5984 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5985 }
5986
5987 if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
5988 /* Round up dtim_skip to fit into STAs Listen Interval */
5989 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
5990 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5991 }
5992 }
5993
5994 if (dhd->conf->suspend_bcn_li_dtim >= 0)
5995 bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
5996 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5997 __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
5998
5999 exit:
6000 return bcn_li_dtim;
6001 }
6002 #endif /* OEM_ANDROID && BCMPCIE */
6003
6004 #ifdef CONFIG_SILENT_ROAM
6005 int
6006 dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
6007 {
6008 int ret = BCME_OK;
6009 wlc_sroam_t *psroam;
6010 wlc_sroam_info_t *sroam;
6011 uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
6012
6013 /* Check if associated */
6014 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
6015 DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
6016 return ret;
6017 }
6018
6019 if (set && (dhd->op_mode &
6020 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
6021 DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
6022 return ret;
6023 }
6024
6025 if (!dhd->sroam_turn_on) {
6026 DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
6027 return ret;
6028 }
6029 psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
6030 if (!psroam) {
6031 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
6032 return BCME_NOMEM;
6033 }
6034
6035 ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
6036 if (ret < 0) {
6037 DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
6038 goto done;
6039 }
6040
6041 if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
6042 ret = BCME_VERSION;
6043 goto done;
6044 }
6045
6046 sroam = (wlc_sroam_info_t *)psroam->data;
6047 sroam->sroam_on = set;
6048 DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
6049
6050 ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
6051 if (ret < 0) {
6052 DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
6053 }
6054
6055 done:
6056 if (psroam) {
6057 MFREE(dhd->osh, psroam, sroamlen);
6058 }
6059
6060 return ret;
6061 }
6062 #endif /* CONFIG_SILENT_ROAM */
6063
6064 /* Check if the mode supports STA MODE */
6065 bool dhd_support_sta_mode(dhd_pub_t *dhd)
6066 {
6067
6068 #ifdef WL_CFG80211
6069 if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
6070 return FALSE;
6071 else
6072 #endif /* WL_CFG80211 */
6073 return TRUE;
6074 }
6075
6076 #if defined(KEEP_ALIVE)
6077 int dhd_keep_alive_onoff(dhd_pub_t *dhd)
6078 {
6079 char buf[32] = {0};
6080 const char *str;
6081 wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
6082 wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
6083 int buf_len;
6084 int str_len;
6085 int res = -1;
6086
6087 if (!dhd_support_sta_mode(dhd))
6088 return res;
6089
6090 DHD_TRACE(("%s execution\n", __FUNCTION__));
6091
6092 str = "mkeep_alive";
6093 str_len = strlen(str);
6094 strlcpy(buf, str, sizeof(buf));
6095 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
6096 mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
6097 buf_len = str_len + 1;
6098 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
6099 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
6100 /* Setup keep alive zero for null packet generation */
6101 mkeep_alive_pkt.keep_alive_id = 0;
6102 mkeep_alive_pkt.len_bytes = 0;
6103 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
6104 bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
6105 /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
6106 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
6107 * guarantee that the buffer is properly aligned.
6108 */
6109 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
6110
6111 res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6112
6113 return res;
6114 }
6115 #endif /* defined(KEEP_ALIVE) */
6116 #define CSCAN_TLV_TYPE_SSID_IE 'S'
6117 /*
6118 * SSIDs list parsing from cscan tlv list
6119 */
6120 int
6121 wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
6122 {
6123 char* str;
6124 int idx = 0;
6125 uint8 len;
6126
6127 if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
6128 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
6129 return BCME_BADARG;
6130 }
6131 str = *list_str;
6132 while (*bytes_left > 0) {
6133 if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
6134 *list_str = str;
6135 DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
6136 return idx;
6137 }
6138
6139 if (idx >= max) {
6140 DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
6141 return BCME_BADARG;
6142 }
6143
6144 /* Get proper CSCAN_TLV_TYPE_SSID_IE */
6145 *bytes_left -= 1;
6146 if (*bytes_left == 0) {
6147 DHD_ERROR(("%s no length field.\n", __FUNCTION__));
6148 return BCME_BADARG;
6149 }
6150 str += 1;
6151 ssid[idx].rssi_thresh = 0;
6152 ssid[idx].flags = 0;
6153 len = str[0];
6154 if (len == 0) {
6155 /* Broadcast SSID */
6156 ssid[idx].SSID_len = 0;
6157 memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
6158 *bytes_left -= 1;
6159 str += 1;
6160
6161 DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
6162 } else if (len <= DOT11_MAX_SSID_LEN) {
6163 /* Get proper SSID size */
6164 ssid[idx].SSID_len = len;
6165 *bytes_left -= 1;
6166 /* Get SSID */
6167 if (ssid[idx].SSID_len > *bytes_left) {
6168 DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
6169 __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
6170 return BCME_BADARG;
6171 }
6172 str += 1;
6173 memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
6174
6175 *bytes_left -= ssid[idx].SSID_len;
6176 str += ssid[idx].SSID_len;
6177 ssid[idx].hidden = TRUE;
6178
6179 DHD_TRACE(("%s :size=%d left=%d\n",
6180 (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
6181 } else {
6182 DHD_ERROR(("### SSID size more than %d\n", str[0]));
6183 return BCME_BADARG;
6184 }
6185 idx++;
6186 }
6187
6188 *list_str = str;
6189 return idx;
6190 }
6191
6192 #if defined(WL_WIRELESS_EXT)
6193 /* Android ComboSCAN support */
6194
6195 /*
6196 * data parsing from ComboScan tlv list
6197 */
6198 int
6199 wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
6200 int input_size, int *bytes_left)
6201 {
6202 char* str;
6203 uint16 short_temp;
6204 uint32 int_temp;
6205
6206 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
6207 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
6208 return -1;
6209 }
6210 str = *list_str;
6211
6212 /* Clean all dest bytes */
6213 memset(dst, 0, dst_size);
6214 if (*bytes_left > 0) {
6215
6216 if (str[0] != token) {
6217 DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
6218 __FUNCTION__, token, str[0], *bytes_left));
6219 return -1;
6220 }
6221
6222 *bytes_left -= 1;
6223 str += 1;
6224
6225 if (input_size == 1) {
6226 memcpy(dst, str, input_size);
6227 }
6228 else if (input_size == 2) {
6229 memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
6230 input_size);
6231 }
6232 else if (input_size == 4) {
6233 memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
6234 input_size);
6235 }
6236
6237 *bytes_left -= input_size;
6238 str += input_size;
6239 *list_str = str;
6240 return 1;
6241 }
6242 return 1;
6243 }
6244
6245 /*
6246 * channel list parsing from cscan tlv list
6247 */
6248 int
6249 wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
6250 int channel_num, int *bytes_left)
6251 {
6252 char* str;
6253 int idx = 0;
6254
6255 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
6256 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
6257 return -1;
6258 }
6259 str = *list_str;
6260
6261 while (*bytes_left > 0) {
6262
6263 if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
6264 *list_str = str;
6265 DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
6266 return idx;
6267 }
6268 /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
6269 *bytes_left -= 1;
6270 str += 1;
6271
6272 if (str[0] == 0) {
6273 /* All channels */
6274 channel_list[idx] = 0x0;
6275 }
6276 else {
6277 channel_list[idx] = (uint16)str[0];
6278 DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
6279 }
6280 *bytes_left -= 1;
6281 str += 1;
6282
6283 if (idx++ > 255) {
6284 DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
6285 return -1;
6286 }
6287 }
6288
6289 *list_str = str;
6290 return idx;
6291 }
6292
6293 /* Parse a comma-separated list from list_str into ssid array, starting
6294 * at index idx. Max specifies size of the ssid array. Parses ssids
6295 * and returns updated idx; if idx >= max not all fit, the excess have
6296 * not been copied. Returns -1 on empty string, or on ssid too long.
6297 */
6298 int
6299 wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
6300 {
6301 char* str, *ptr;
6302
6303 if ((list_str == NULL) || (*list_str == NULL))
6304 return -1;
6305
6306 for (str = *list_str; str != NULL; str = ptr) {
6307
6308 /* check for next TAG */
6309 if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
6310 *list_str = str + strlen(GET_CHANNEL);
6311 return idx;
6312 }
6313
6314 if ((ptr = strchr(str, ',')) != NULL) {
6315 *ptr++ = '\0';
6316 }
6317
6318 if (strlen(str) > DOT11_MAX_SSID_LEN) {
6319 DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
6320 return -1;
6321 }
6322
6323 if (strlen(str) == 0)
6324 ssid[idx].SSID_len = 0;
6325
6326 if (idx < max) {
6327 bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
6328 strlcpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID));
6329 ssid[idx].SSID_len = sizeof(ssid[idx].SSID);
6330 }
6331 idx++;
6332 }
6333 return idx;
6334 }
6335
6336 /*
6337 * Parse channel list from iwpriv CSCAN
6338 */
6339 int
6340 wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
6341 {
6342 int num;
6343 int val;
6344 char* str;
6345 char* endptr = NULL;
6346
6347 if ((list_str == NULL)||(*list_str == NULL))
6348 return -1;
6349
6350 str = *list_str;
6351 num = 0;
6352 while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
6353 val = (int)strtoul(str, &endptr, 0);
6354 if (endptr == str) {
6355 printf("could not parse channel number starting at"
6356 " substring \"%s\" in list:\n%s\n",
6357 str, *list_str);
6358 return -1;
6359 }
6360 str = endptr + strspn(endptr, " ,");
6361
6362 if (num == channel_num) {
6363 DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
6364 channel_num, *list_str));
6365 return -1;
6366 }
6367
6368 channel_list[num++] = (uint16)val;
6369 }
6370 *list_str = str;
6371 return num;
6372 }
6373 #endif
6374
6375 /* Given filename and download type, returns a buffer pointer and length
6376 * for download to f/w. Type can be FW or NVRAM.
6377 *
6378 */
6379 int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
6380 char ** buffer, int *length)
6381
6382 {
6383 int ret = BCME_ERROR;
6384 int len = 0;
6385 int file_len;
6386 void *image = NULL;
6387 uint8 *buf = NULL;
6388
6389 /* Point to cache if available. */
6390 /* No Valid cache found on this call */
6391 if (!len) {
6392 file_len = *length;
6393 *length = 0;
6394
6395 if (file_path) {
6396 image = dhd_os_open_image1(dhd, file_path);
6397 if (image == NULL) {
6398 printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
6399 goto err;
6400 }
6401 }
6402
6403 buf = MALLOCZ(dhd->osh, file_len);
6404 if (buf == NULL) {
6405 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
6406 __FUNCTION__, file_len));
6407 goto err;
6408 }
6409
6410 /* Download image */
6411 len = dhd_os_get_image_block((char *)buf, file_len, image);
6412 if ((len <= 0 || len > file_len)) {
6413 MFREE(dhd->osh, buf, file_len);
6414 goto err;
6415 }
6416 }
6417
6418 ret = BCME_OK;
6419 *length = len;
6420 *buffer = (char *)buf;
6421
6422 /* Cache if first call. */
6423
6424 err:
6425 if (image)
6426 dhd_os_close_image1(dhd, image);
6427
6428 return ret;
6429 }
6430
6431 int
6432 dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type,
6433 unsigned char *dload_buf, int len)
6434 {
6435 struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
6436 int err = 0;
6437 int dload_data_offset;
6438 static char iovar_buf[WLC_IOCTL_MEDLEN];
6439 int iovar_len;
6440
6441 memset(iovar_buf, 0, sizeof(iovar_buf));
6442
6443 dload_data_offset = OFFSETOF(wl_dload_data_t, data);
6444 dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
6445 dload_ptr->dload_type = dload_type;
6446 dload_ptr->len = htod32(len - dload_data_offset);
6447 dload_ptr->crc = 0;
6448 len = ROUNDUP(len, 8);
6449
6450 iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
6451 (uint)len, iovar_buf, sizeof(iovar_buf));
6452 if (iovar_len == 0) {
6453 DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
6454 __FUNCTION__, iovar));
6455 return BCME_BUFTOOSHORT;
6456 }
6457
6458 err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
6459 iovar_len, IOV_SET, 0);
6460
6461 return err;
6462 }
6463
6464 int
6465 dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
6466 uint32 len, char *iovar)
6467
6468 {
6469 int chunk_len;
6470 int size2alloc;
6471 unsigned char *new_buf;
6472 int err = 0, data_offset;
6473 uint16 dl_flag = DL_BEGIN;
6474
6475 data_offset = OFFSETOF(wl_dload_data_t, data);
6476 size2alloc = data_offset + MAX_CHUNK_LEN;
6477 size2alloc = ROUNDUP(size2alloc, 8);
6478
6479 if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
6480 do {
6481 chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
6482 MAX_CHUNK_LEN, buf);
6483 if (chunk_len < 0) {
6484 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
6485 __FUNCTION__, chunk_len));
6486 err = BCME_ERROR;
6487 goto exit;
6488 }
6489 if (len - chunk_len == 0)
6490 dl_flag |= DL_END;
6491
6492 err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
6493 new_buf, data_offset + chunk_len);
6494
6495 dl_flag &= ~DL_BEGIN;
6496
6497 len = len - chunk_len;
6498 } while ((len > 0) && (err == 0));
6499 } else {
6500 err = BCME_NOMEM;
6501 }
6502 exit:
6503 if (new_buf) {
6504 MFREE(dhd->osh, new_buf, size2alloc);
6505 }
6506 return err;
6507 }
6508
6509 int
6510 dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
6511 {
6512 return 0;
6513 }
6514
6515 int
6516 dhd_check_current_clm_data(dhd_pub_t *dhd)
6517 {
6518 char iovbuf[WLC_IOCTL_SMLEN];
6519 wl_country_t *cspec;
6520 int err = BCME_OK;
6521
6522 memset(iovbuf, 0, sizeof(iovbuf));
6523 err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
6524 if (err == 0) {
6525 err = BCME_BUFTOOSHORT;
6526 DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
6527 return err;
6528 }
6529 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
6530 if (err) {
6531 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
6532 return err;
6533 }
6534 cspec = (wl_country_t *)iovbuf;
6535 if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
6536 DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
6537 __FUNCTION__));
6538 return FALSE;
6539 }
6540 DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
6541 __FUNCTION__));
6542 return TRUE;
6543 }
6544
6545 int
6546 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
6547 {
6548 char *clm_blob_path;
6549 int len;
6550 char *memblock = NULL;
6551 int err = BCME_OK;
6552 char iovbuf[WLC_IOCTL_SMLEN];
6553 int status = FALSE;
6554
6555 if (clm_path && clm_path[0] != '\0') {
6556 if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
6557 DHD_ERROR(("clm path exceeds max len\n"));
6558 return BCME_ERROR;
6559 }
6560 clm_blob_path = clm_path;
6561 DHD_TRACE(("clm path from module param:%s\n", clm_path));
6562 } else {
6563 clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
6564 }
6565
6566 /* If CLM blob file is found on the filesystem, download the file.
6567 * After CLM file download or If the blob file is not present,
6568 * validate the country code before proceeding with the initialization.
6569 * If country code is not valid, fail the initialization.
6570 */
6571 memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
6572 if (memblock == NULL) {
6573 printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
6574 #if defined(DHD_BLOB_EXISTENCE_CHECK)
6575 if (dhd->is_blob) {
6576 err = BCME_ERROR;
6577 } else {
6578 status = dhd_check_current_clm_data(dhd);
6579 if (status == TRUE) {
6580 err = BCME_OK;
6581 } else {
6582 err = status;
6583 }
6584 }
6585 #endif /* DHD_BLOB_EXISTENCE_CHECK */
6586 goto exit;
6587 }
6588
6589 len = dhd_os_get_image_size(memblock);
6590
6591 if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
6592 status = dhd_check_current_clm_data(dhd);
6593 if (status == TRUE) {
6594 #if defined(DHD_BLOB_EXISTENCE_CHECK)
6595 if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
6596 if (dhd->is_blob) {
6597 err = BCME_ERROR;
6598 }
6599 goto exit;
6600 }
6601 #else
6602 DHD_ERROR(("%s: CLM already exist in F/W, "
6603 "new CLM data will be added to the end of existing CLM data!\n",
6604 __FUNCTION__));
6605 #endif /* DHD_BLOB_EXISTENCE_CHECK */
6606 } else if (status != FALSE) {
6607 err = status;
6608 goto exit;
6609 }
6610
6611 /* Found blob file. Download the file */
6612 DHD_TRACE(("clm file download from %s \n", clm_blob_path));
6613 err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
6614 if (err) {
6615 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
6616 /* Retrieve clmload_status and print */
6617 memset(iovbuf, 0, sizeof(iovbuf));
6618 len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
6619 if (len == 0) {
6620 err = BCME_BUFTOOSHORT;
6621 goto exit;
6622 }
6623 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
6624 if (err) {
6625 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
6626 __FUNCTION__, err));
6627 } else {
6628 DHD_ERROR(("%s: clmload_status: %d \n",
6629 __FUNCTION__, *((int *)iovbuf)));
6630 if (*((int *)iovbuf) == CHIPID_MISMATCH) {
6631 DHD_ERROR(("Chip ID mismatch error \n"));
6632 }
6633 }
6634 err = BCME_ERROR;
6635 goto exit;
6636 } else {
6637 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
6638 }
6639 } else {
6640 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
6641 }
6642
6643 /* Verify country code */
6644 status = dhd_check_current_clm_data(dhd);
6645
6646 if (status != TRUE) {
6647 /* Country code not initialized or CLM download not proper */
6648 DHD_ERROR(("country code not initialized\n"));
6649 err = status;
6650 }
6651 exit:
6652
6653 if (memblock) {
6654 dhd_os_close_image1(dhd, memblock);
6655 }
6656
6657 return err;
6658 }
6659
6660 void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
6661 {
6662 MFREE(dhd->osh, buffer, length);
6663 }
6664
6665 #ifdef SHOW_LOGTRACE
6666 int
6667 dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
6668 dhd_event_log_t *event_log)
6669 {
6670 uint32 *lognums = NULL;
6671 char *logstrs = NULL;
6672 logstr_trailer_t *trailer = NULL;
6673 int ram_index = 0;
6674 char **fmts = NULL;
6675 int num_fmts = 0;
6676 bool match_fail = TRUE;
6677 int32 i = 0;
6678 uint8 *pfw_id = NULL;
6679 uint32 fwid = 0;
6680 void *file = NULL;
6681 int file_len = 0;
6682 char fwid_str[FWID_STR_LEN];
6683 uint32 hdr_logstrs_size = 0;
6684
6685 /* Read last three words in the logstrs.bin file */
6686 trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
6687 sizeof(logstr_trailer_t));
6688
6689 if (trailer->log_magic == LOGSTRS_MAGIC) {
6690 /*
6691 * logstrs.bin has a header.
6692 */
6693 if (trailer->version == 1) {
6694 logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
6695 logstrs_size - sizeof(logstr_header_v1_t));
6696 DHD_INFO(("%s: logstr header version = %u\n",
6697 __FUNCTION__, hdr_v1->version));
6698 num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
6699 ram_index = (hdr_v1->ram_lognums_offset -
6700 hdr_v1->rom_lognums_offset) / sizeof(uint32);
6701 lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
6702 logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
6703 hdr_logstrs_size = hdr_v1->logstrs_size;
6704 } else if (trailer->version == 2) {
6705 logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6706 sizeof(logstr_header_t));
6707 DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
6708 __FUNCTION__, hdr->version, hdr->flags));
6709
6710 /* For ver. 2 of the header, need to match fwid of
6711 * both logstrs.bin and fw bin
6712 */
6713
6714 /* read the FWID from fw bin */
6715 file = dhd_os_open_image1(NULL, st_str_file_path);
6716 if (!file) {
6717 DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
6718 goto error;
6719 }
6720 file_len = dhd_os_get_image_size(file);
6721 if (file_len <= 0) {
6722 DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
6723 goto error;
6724 }
6725 /* fwid is at the end of fw bin in string format */
6726 if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
6727 DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
6728 goto error;
6729 }
6730
6731 memset(fwid_str, 0, sizeof(fwid_str));
6732 if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
6733 DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
6734 goto error;
6735 }
6736 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
6737 FWID_STR_1, strlen(FWID_STR_1));
6738 if (!pfw_id) {
6739 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
6740 FWID_STR_2, strlen(FWID_STR_2));
6741 if (!pfw_id) {
6742 DHD_ERROR(("%s: could not find id in FW bin!\n",
6743 __FUNCTION__));
6744 goto error;
6745 }
6746 }
6747 /* search for the '-' in the fw id str, after which the
6748 * actual 4 byte fw id is present
6749 */
6750 while (pfw_id && *pfw_id != '-') {
6751 ++pfw_id;
6752 }
6753 ++pfw_id;
6754 fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
6755
6756 /* check if fw id in logstrs.bin matches the fw one */
6757 if (hdr->fw_id != fwid) {
6758 DHD_ERROR(("%s: logstr id does not match FW!"
6759 "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
6760 __FUNCTION__, hdr->fw_id, fwid));
6761 goto error;
6762 }
6763
6764 match_fail = FALSE;
6765 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
6766 ram_index = (hdr->ram_lognums_offset -
6767 hdr->rom_lognums_offset) / sizeof(uint32);
6768 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6769 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
6770 hdr_logstrs_size = hdr->logstrs_size;
6771
6772 error:
6773 if (file) {
6774 dhd_os_close_image1(NULL, file);
6775 }
6776 if (match_fail) {
6777 return BCME_DECERR;
6778 }
6779 } else {
6780 DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
6781 trailer->version));
6782 return BCME_ERROR;
6783 }
6784 if (logstrs_size != hdr_logstrs_size) {
6785 DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
6786 return BCME_ERROR;
6787 }
6788 } else {
6789 /*
6790 * Legacy logstrs.bin format without header.
6791 */
6792 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6793
6794 /* Legacy RAM-only logstrs.bin format:
6795 * - RAM 'lognums' section
6796 * - RAM 'logstrs' section.
6797 *
6798 * 'lognums' is an array of indexes for the strings in the
6799 * 'logstrs' section. The first uint32 is an index to the
6800 * start of 'logstrs'. Therefore, if this index is divided
6801 * by 'sizeof(uint32)' it provides the number of logstr
6802 * entries.
6803 */
6804 ram_index = 0;
6805 lognums = (uint32 *) raw_fmts;
6806 logstrs = (char *) &raw_fmts[num_fmts << 2];
6807 }
6808 if (num_fmts) {
6809 if (event_log->fmts != NULL) {
6810 fmts = event_log->fmts; /* reuse existing malloced fmts */
6811 } else {
6812 fmts = MALLOC(osh, num_fmts * sizeof(char *));
6813 }
6814 }
6815 if (fmts == NULL) {
6816 DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
6817 return BCME_ERROR;
6818 }
6819 event_log->fmts_size = num_fmts * sizeof(char *);
6820
6821 for (i = 0; i < num_fmts; i++) {
6822 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6823 * (they are 0-indexed relative to 'rom_logstrs_offset').
6824 *
6825 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6826 * are 0-indexed relative to the start of the logstrs.bin file).
6827 */
6828 if (i == ram_index) {
6829 logstrs = raw_fmts;
6830 }
6831 fmts[i] = &logstrs[lognums[i]];
6832 }
6833 event_log->fmts = fmts;
6834 event_log->raw_fmts_size = logstrs_size;
6835 event_log->raw_fmts = raw_fmts;
6836 event_log->num_fmts = num_fmts;
6837 return BCME_OK;
6838 } /* dhd_parse_logstrs_file */
6839
6840 int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
6841 uint32 *rodata_end)
6842 {
6843 char *raw_fmts = NULL, *raw_fmts_loc = NULL;
6844 uint32 read_size = READ_NUM_BYTES;
6845 int error = 0;
6846 char * cptr = NULL;
6847 char c;
6848 uint8 count = 0;
6849
6850 *ramstart = 0;
6851 *rodata_start = 0;
6852 *rodata_end = 0;
6853
6854 /* Allocate 1 byte more than read_size to terminate it with NULL */
6855 raw_fmts = MALLOCZ(osh, read_size + 1);
6856 if (raw_fmts == NULL) {
6857 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6858 goto fail;
6859 }
6860
6861 /* read ram start, rodata_start and rodata_end values from map file */
6862 while (count != ALL_MAP_VAL)
6863 {
6864 error = dhd_os_read_file(file, raw_fmts, read_size);
6865 if (error < 0) {
6866 DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
6867 error));
6868 goto fail;
6869 }
6870
6871 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6872 raw_fmts[read_size] = '\0';
6873
6874 /* Get ramstart address */
6875 raw_fmts_loc = raw_fmts;
6876 if (!(count & RAMSTART_BIT) &&
6877 (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
6878 strlen(ramstart_str)))) {
6879 cptr = cptr - BYTES_AHEAD_NUM;
6880 sscanf(cptr, "%x %c text_start", ramstart, &c);
6881 count |= RAMSTART_BIT;
6882 }
6883
6884 /* Get ram rodata start address */
6885 raw_fmts_loc = raw_fmts;
6886 if (!(count & RDSTART_BIT) &&
6887 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
6888 strlen(rodata_start_str)))) {
6889 cptr = cptr - BYTES_AHEAD_NUM;
6890 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6891 count |= RDSTART_BIT;
6892 }
6893
6894 /* Get ram rodata end address */
6895 raw_fmts_loc = raw_fmts;
6896 if (!(count & RDEND_BIT) &&
6897 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
6898 strlen(rodata_end_str)))) {
6899 cptr = cptr - BYTES_AHEAD_NUM;
6900 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6901 count |= RDEND_BIT;
6902 }
6903
6904 if (error < (int)read_size) {
6905 /*
6906 * since we reset file pos back to earlier pos by
6907 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6908 * The reason for this is if string is spreaded across
6909 * bytes, the read function should not miss it.
6910 * So if ret value is less than read_size, reached EOF don't read further
6911 */
6912 break;
6913 }
6914 memset(raw_fmts, 0, read_size);
6915 /*
6916 * go back to predefined NUM of bytes so that we won't miss
6917 * the string and addr even if it comes as splited in next read.
6918 */
6919 dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
6920 }
6921
6922 fail:
6923 if (raw_fmts) {
6924 MFREE(osh, raw_fmts, read_size + 1);
6925 raw_fmts = NULL;
6926 }
6927 if (count == ALL_MAP_VAL) {
6928 return BCME_OK;
6929 }
6930 else {
6931 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
6932 count));
6933 return BCME_ERROR;
6934 }
6935
6936 } /* dhd_parse_map_file */
6937
6938 #ifdef PCIE_FULL_DONGLE
6939 int
6940 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
6941 dhd_event_log_t *event_data)
6942 {
6943 uint32 infobuf_version;
6944 info_buf_payload_hdr_t *payload_hdr_ptr;
6945 uint16 payload_hdr_type;
6946 uint16 payload_hdr_length;
6947
6948 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
6949
6950 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
6951 DHD_ERROR(("%s: infobuf too small for version field\n",
6952 __FUNCTION__));
6953 goto exit;
6954 }
6955 infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
6956 PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
6957 if (infobuf_version != PCIE_INFOBUF_V1) {
6958 DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
6959 __FUNCTION__, infobuf_version));
6960 goto exit;
6961 }
6962
6963 /* Version 1 infobuf has a single type/length (and then value) field */
6964 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
6965 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
6966 __FUNCTION__));
6967 goto exit;
6968 }
6969 /* Process/parse the common info payload header (type/length) */
6970 payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
6971 payload_hdr_type = ltoh16(payload_hdr_ptr->type);
6972 payload_hdr_length = ltoh16(payload_hdr_ptr->length);
6973 if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
6974 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
6975 __FUNCTION__, payload_hdr_type));
6976 goto exit;
6977 }
6978 PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
6979
6980 /* Validate that the specified length isn't bigger than the
6981 * provided data.
6982 */
6983 if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
6984 DHD_ERROR(("%s: infobuf logtrace length is bigger"
6985 " than actual buffer data\n", __FUNCTION__));
6986 goto exit;
6987 }
6988 dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
6989 event_data, payload_hdr_length);
6990
6991 return BCME_OK;
6992
6993 exit:
6994 return BCME_ERROR;
6995 } /* dhd_event_logtrace_infobuf_pkt_process */
6996 #endif /* PCIE_FULL_DONGLE */
6997 #endif /* SHOW_LOGTRACE */
6998
6999 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
7000
7001 /* To handle the TDLS event in the dhd_common.c
7002 */
7003 int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
7004 {
7005 int ret = BCME_OK;
7006
7007 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
7008 ret = dhd_tdls_update_peer_info(dhd_pub, event);
7009 GCC_DIAGNOSTIC_POP()
7010
7011 return ret;
7012 }
7013
7014 int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
7015 {
7016 tdls_peer_node_t *cur = NULL, *prev = NULL;
7017 if (!dhd_pub)
7018 return BCME_ERROR;
7019 cur = dhd_pub->peer_tbl.node;
7020
7021 if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
7022 return BCME_ERROR;
7023
7024 while (cur != NULL) {
7025 prev = cur;
7026 cur = cur->next;
7027 MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
7028 }
7029 dhd_pub->peer_tbl.tdls_peer_count = 0;
7030 dhd_pub->peer_tbl.node = NULL;
7031 return BCME_OK;
7032 }
7033 #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
7034
7035 /* pretty hex print a contiguous buffer
7036 * based on the debug level specified
7037 */
7038 void
7039 dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
7040 {
7041 char line[128], *p;
7042 int len = sizeof(line);
7043 int nchar;
7044 uint i;
7045
7046 if (msg && (msg[0] != '\0')) {
7047 if (dbg_level == DHD_ERROR_VAL)
7048 DHD_ERROR(("%s:\n", msg));
7049 else if (dbg_level == DHD_INFO_VAL)
7050 DHD_INFO(("%s:\n", msg));
7051 else if (dbg_level == DHD_TRACE_VAL)
7052 DHD_TRACE(("%s:\n", msg));
7053 }
7054
7055 p = line;
7056 for (i = 0; i < nbytes; i++) {
7057 if (i % 16 == 0) {
7058 nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
7059 p += nchar;
7060 len -= nchar;
7061 }
7062 if (len > 0) {
7063 nchar = snprintf(p, len, "%02x ", buf[i]);
7064 p += nchar;
7065 len -= nchar;
7066 }
7067
7068 if (i % 16 == 15) {
7069 /* flush line */
7070 if (dbg_level == DHD_ERROR_VAL)
7071 DHD_ERROR(("%s:\n", line));
7072 else if (dbg_level == DHD_INFO_VAL)
7073 DHD_INFO(("%s:\n", line));
7074 else if (dbg_level == DHD_TRACE_VAL)
7075 DHD_TRACE(("%s:\n", line));
7076 p = line;
7077 len = sizeof(line);
7078 }
7079 }
7080
7081 /* flush last partial line */
7082 if (p != line) {
7083 if (dbg_level == DHD_ERROR_VAL)
7084 DHD_ERROR(("%s:\n", line));
7085 else if (dbg_level == DHD_INFO_VAL)
7086 DHD_INFO(("%s:\n", line));
7087 else if (dbg_level == DHD_TRACE_VAL)
7088 DHD_TRACE(("%s:\n", line));
7089 }
7090 }
7091
7092 int
7093 dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
7094 {
7095 struct ether_header ether_hdr;
7096 tput_pkt_t tput_pkt;
7097 void *pkt = NULL;
7098 uint8 *pktdata = NULL;
7099 uint32 pktsize = 0;
7100 uint64 total_size = 0;
7101 uint32 *crc = 0;
7102 uint32 pktid = 0;
7103 uint32 total_num_tx_pkts = 0;
7104 int err = 0, err_exit = 0;
7105 uint32 i = 0;
7106 uint64 time_taken = 0;
7107 int max_txbufs = 0;
7108 uint32 n_batches = 0;
7109 uint32 n_remain = 0;
7110 uint8 tput_pkt_hdr_size = 0;
7111 bool batch_cnt = FALSE;
7112 bool tx_stop_pkt = FALSE;
7113
7114 if (tput_data->version != TPUT_TEST_T_VER ||
7115 tput_data->length != TPUT_TEST_T_LEN) {
7116 DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
7117 err_exit = BCME_BADARG;
7118 goto exit_error;
7119 }
7120
7121 if (dhd->tput_data.tput_test_running) {
7122 DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
7123 err_exit = BCME_BUSY;
7124 goto exit_error;
7125 }
7126 #ifdef PCIE_FULL_DONGLE
7127 /*
7128 * 100 bytes to accommodate ether header and tput header. As of today
7129 * both occupy 30 bytes. Rest is reserved.
7130 */
7131 if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
7132 (tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
7133 DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
7134 __FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
7135 (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
7136 err_exit = BCME_BUFTOOLONG;
7137 goto exit_error;
7138 }
7139 #endif
7140 max_txbufs = dhd_get_max_txbufs(dhd);
7141 max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
7142
7143 if (!(tput_data->num_pkts > 0)) {
7144 DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
7145 __FUNCTION__, tput_data->num_pkts));
7146 err_exit = BCME_ERROR;
7147 goto exit_error;
7148 }
7149
7150 memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
7151 memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
7152 dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
7153 dhd->tput_data.pkts_cmpl = 0;
7154 dhd->tput_start_ts = dhd->tput_stop_ts = 0;
7155
7156 if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
7157 pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
7158 (tput_data->payload_size - 12);
7159 } else {
7160 pktsize = sizeof(tput_pkt_t) +
7161 (tput_data->payload_size - 12);
7162 }
7163
7164 tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
7165 (uint8 *)&tput_pkt.mac_sta);
7166
7167 /* mark the tput test as started */
7168 dhd->tput_data.tput_test_running = TRUE;
7169
7170 if (tput_data->direction == TPUT_DIR_TX) {
7171 /* for ethernet header */
7172 memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
7173 memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
7174 ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
7175
7176 /* fill in the tput pkt */
7177 memset(&tput_pkt, 0, sizeof(tput_pkt));
7178 memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
7179 memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
7180 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
7181 tput_pkt.num_pkts = hton32(tput_data->num_pkts);
7182
7183 if (tput_data->num_pkts > (uint32)max_txbufs) {
7184 n_batches = tput_data->num_pkts / max_txbufs;
7185 n_remain = tput_data->num_pkts % max_txbufs;
7186 } else {
7187 n_batches = 0;
7188 n_remain = tput_data->num_pkts;
7189 }
7190 DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
7191 __FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
7192
7193 do {
7194 /* reset before every batch */
7195 dhd->batch_tx_pkts_cmpl = 0;
7196 if (n_batches) {
7197 dhd->batch_tx_num_pkts = max_txbufs;
7198 --n_batches;
7199 } else if (n_remain) {
7200 dhd->batch_tx_num_pkts = n_remain;
7201 n_remain = 0;
7202 } else {
7203 DHD_ERROR(("Invalid. This should not hit\n"));
7204 }
7205
7206 dhd->tput_start_ts = OSL_SYSUPTIME_US();
7207 for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
7208 pkt = PKTGET(dhd->osh, pktsize, TRUE);
7209 if (!pkt) {
7210 dhd->tput_data.tput_test_running = FALSE;
7211 DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
7212 __FUNCTION__));
7213 DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
7214 __FUNCTION__, dhd->tput_data.pkts_good,
7215 dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
7216 err_exit = BCME_NOMEM;
7217 goto exit_error;
7218 }
7219 pktdata = PKTDATA(dhd->osh, pkt);
7220 PKTSETLEN(dhd->osh, pkt, pktsize);
7221 memset(pktdata, 0, pktsize);
7222 if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
7223 memcpy(pktdata, &ether_hdr, sizeof(ether_hdr));
7224 pktdata += sizeof(ether_hdr);
7225 }
7226 /* send stop pkt as last pkt */
7227 if (tx_stop_pkt) {
7228 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
7229 tx_stop_pkt = FALSE;
7230 } else
7231 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
7232 tput_pkt.pkt_id = hton32(pktid++);
7233 tput_pkt.crc32 = 0;
7234 memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
7235 /* compute crc32 over the pkt-id, num-pkts and data fields */
7236 crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
7237 *crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
7238 8 + (tput_data->payload_size - 12),
7239 CRC32_INIT_VALUE));
7240
7241 err = dhd_sendpkt(dhd, 0, pkt);
7242 if (err != BCME_OK) {
7243 DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
7244 __FUNCTION__, pktid, err));
7245 dhd->tput_data.pkts_bad++;
7246 }
7247 total_num_tx_pkts++;
7248 if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
7249 tx_stop_pkt = TRUE;
7250 }
7251 }
7252 DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
7253 if (!dhd_os_tput_test_wait(dhd, NULL,
7254 TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
7255 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
7256 dhd->tput_data.tput_test_running = FALSE;
7257 DHD_ERROR(("%s: TX completion timeout !"
7258 " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
7259 __FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
7260 err_exit = BCME_ERROR;
7261 goto exit_error;
7262 }
7263 if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
7264 (dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
7265 if (!time_taken) {
7266 time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
7267 }
7268 } else {
7269 dhd->tput_data.tput_test_running = FALSE;
7270 DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
7271 __FUNCTION__));
7272 err_exit = BCME_ERROR;
7273 goto exit_error;
7274 }
7275 if (n_batches || n_remain) {
7276 batch_cnt = TRUE;
7277 } else {
7278 batch_cnt = FALSE;
7279 }
7280 } while (batch_cnt);
7281 } else {
7282 /* TPUT_DIR_RX */
7283 DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
7284 if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
7285 DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
7286 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
7287 }
7288 }
7289
7290 /* calculate the throughput in bits per sec */
7291 if (dhd->tput_start_ts && dhd->tput_stop_ts &&
7292 (dhd->tput_stop_ts > dhd->tput_start_ts)) {
7293 time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
7294 time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
7295 dhd->tput_data.time_ms = time_taken;
7296 if (time_taken) {
7297 total_size = pktsize * dhd->tput_data.pkts_cmpl * 8;
7298 dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
7299 /* convert from ms to seconds */
7300 dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * 1000;
7301 }
7302 } else {
7303 DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
7304 }
7305 DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
7306 dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
7307
7308 memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
7309
7310 dhd->tput_data.tput_test_running = FALSE;
7311
7312 err_exit = BCME_OK;
7313
7314 exit_error:
7315 DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
7316 __FUNCTION__, dhd->tput_data.pkts_good,
7317 dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
7318
7319 return err_exit;
7320 }
7321
7322 void
7323 dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
7324 {
7325 uint8 *pktdata = NULL;
7326 tput_pkt_t *tput_pkt = NULL;
7327 uint32 crc = 0;
7328 uint8 tput_pkt_hdr_size = 0;
7329
7330 pktdata = PKTDATA(dhd->osh, pkt);
7331 if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
7332 pktdata += sizeof(struct ether_header);
7333 tput_pkt = (tput_pkt_t *)pktdata;
7334
7335 /* record the timestamp of the first packet received */
7336 if (dhd->tput_data.pkts_cmpl == 0) {
7337 dhd->tput_start_ts = OSL_SYSUPTIME_US();
7338 }
7339
7340 if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
7341 dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
7342 dhd->tput_data.pkts_cmpl++;
7343 }
7344 /* drop rx packets received beyond the specified # */
7345 if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
7346 return;
7347
7348 DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
7349 ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
7350
7351 /* discard if mac addr of AP/STA does not match the specified ones */
7352 if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
7353 ETHER_ADDR_LEN) != 0) ||
7354 (memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
7355 ETHER_ADDR_LEN) != 0)) {
7356 dhd->tput_data.pkts_bad++;
7357 DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
7358 __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
7359 return;
7360 }
7361
7362 tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
7363 (uint8 *)&tput_pkt->mac_sta);
7364 pktdata += tput_pkt_hdr_size + 4;
7365 crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
7366 CRC32_INIT_VALUE);
7367 if (crc != ntoh32(tput_pkt->crc32)) {
7368 DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
7369 __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
7370 dhd->tput_data.pkts_bad++;
7371 return;
7372 }
7373
7374 if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
7375 dhd->tput_data.pkts_good++;
7376
7377 /* if we have received the stop packet or all the # of pkts, we're done */
7378 if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
7379 dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
7380 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
7381 dhd_os_tput_test_wake(dhd);
7382 }
7383 }
7384
7385 #ifdef DUMP_IOCTL_IOV_LIST
7386 void
7387 dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
7388 {
7389 dll_t *item;
7390 dhd_iov_li_t *iov_li;
7391 dhd->dump_iovlist_len++;
7392
7393 if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
7394 item = dll_head_p(list_head);
7395 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
7396 dll_delete(item);
7397 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
7398 dhd->dump_iovlist_len--;
7399 }
7400 dll_append(list_head, node);
7401 }
7402
7403 void
7404 dhd_iov_li_print(dll_t *list_head)
7405 {
7406 dhd_iov_li_t *iov_li;
7407 dll_t *item, *next;
7408 uint8 index = 0;
7409 for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
7410 next = dll_next_p(item);
7411 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
7412 DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
7413 }
7414 }
7415
7416 void
7417 dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
7418 {
7419 dll_t *item;
7420 dhd_iov_li_t *iov_li;
7421 while (!(dll_empty(list_head))) {
7422 item = dll_head_p(list_head);
7423 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
7424 dll_delete(item);
7425 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
7426 }
7427 }
7428 #endif /* DUMP_IOCTL_IOV_LIST */
7429
7430 #ifdef EWP_EDL
7431 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
7432 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
7433 * it is failing with an 'out of space in SWIOTLB' error
7434 */
7435 int
7436 dhd_edl_mem_init(dhd_pub_t *dhd)
7437 {
7438 int ret = 0;
7439
7440 memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
7441 ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
7442 if (ret != BCME_OK) {
7443 DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
7444 __FUNCTION__));
7445 return BCME_ERROR;
7446 }
7447 return BCME_OK;
7448 }
7449
7450 /*
7451 * NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
7452 * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
7453 */
7454 void
7455 dhd_edl_mem_deinit(dhd_pub_t *dhd)
7456 {
7457 if (dhd->edl_ring_mem.va != NULL)
7458 dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
7459 }
7460
7461 int
7462 dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
7463 void *evt_decode_data)
7464 {
7465 msg_hdr_edl_t *msg = NULL;
7466 cmn_msg_hdr_t *cmn_msg_hdr = NULL;
7467 uint8 *buf = NULL;
7468
7469 if (!data || !dhdp || !evt_decode_data) {
7470 DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
7471 return BCME_ERROR;
7472 }
7473
7474 /* format of data in each work item in the EDL ring:
7475 * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
7476 * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
7477 */
7478 cmn_msg_hdr = (cmn_msg_hdr_t *)data;
7479 msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
7480 buf = (uint8 *)msg;
7481 /* validate the fields */
7482 if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
7483 DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
7484 " expected (0x%x)\n", __FUNCTION__,
7485 msg->infobuf_ver, PCIE_INFOBUF_V1));
7486 return BCME_VERSION;
7487 }
7488
7489 /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
7490 if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
7491 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
7492 __FUNCTION__));
7493 return BCME_BUFTOOLONG;
7494 }
7495
7496 if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
7497 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
7498 __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
7499 return BCME_BADOPTION;
7500 }
7501
7502 if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
7503 DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
7504 " than available buffer size %u\n", __FUNCTION__,
7505 ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
7506 return BCME_BADLEN;
7507 }
7508
7509 /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
7510 buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
7511 dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
7512 ltoh16(msg->pyld_hdr.length));
7513
7514 /*
7515 * check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
7516 * copy the event data to the skb and send it up the stack
7517 */
7518 if (dhdp->logtrace_pkt_sendup) {
7519 DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
7520 (uint32)(ltoh16(msg->pyld_hdr.length) +
7521 sizeof(info_buf_payload_hdr_t) + 4)));
7522 dhd_sendup_info_buf(dhdp, (uint8 *)msg);
7523 }
7524
7525 return BCME_OK;
7526 }
7527 #endif /* EWP_EDL */
7528
7529 #ifdef DHD_LOG_DUMP
7530 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
7531 void
7532 dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
7533 {
7534 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7535 log_dump_type_t *flush_type;
7536 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7537 uint64 current_time_sec;
7538
7539 if (!dhdp) {
7540 DHD_ERROR(("dhdp is NULL !\n"));
7541 return;
7542 }
7543
7544 if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
7545 DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
7546 return;
7547 }
7548
7549 current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7550
7551 DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
7552 __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
7553 DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7554
7555 if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
7556 DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
7557 __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7558 return;
7559 }
7560
7561 clear_debug_dump_time(dhdp->debug_dump_time_str);
7562 #ifdef DHD_PCIE_RUNTIMEPM
7563 /* wake up RPM if SYSDUMP is triggered */
7564 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
7565 #endif /* DHD_PCIE_RUNTIMEPM */
7566 /* */
7567
7568 dhdp->debug_dump_subcmd = subcmd;
7569
7570 dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7571
7572 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7573 /* flush_type is freed at do_dhd_log_dump function */
7574 flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
7575 if (flush_type) {
7576 *flush_type = DLD_BUF_TYPE_ALL;
7577 dhd_schedule_log_dump(dhdp, flush_type);
7578 } else {
7579 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
7580 return;
7581 }
7582 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7583
7584 /* Inside dhd_mem_dump, event notification will be sent to HAL and
7585 * from other context DHD pushes memdump, debug_dump and pktlog dump
7586 * to HAL and HAL will write into file
7587 */
7588 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
7589 dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
7590 dhd_bus_mem_dump(dhdp);
7591 #endif /* BCMPCIE && DHD_FW_COREDUMP */
7592
7593 #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7594 dhd_schedule_pktlog_dump(dhdp);
7595 #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7596 }
7597 #endif /* DHD_LOG_DUMP */
7598
7599 #if defined(SHOW_LOGTRACE)
7600 int
7601 dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
7602 {
7603 void *file = NULL;
7604 int size = 0;
7605 char buf[FW_VER_STR_LEN];
7606 char *str = NULL;
7607 int ret = BCME_OK;
7608
7609 if (!fwpath)
7610 return BCME_BADARG;
7611
7612 file = dhd_os_open_image1(dhdp, fwpath);
7613 if (!file) {
7614 ret = BCME_ERROR;
7615 goto exit;
7616 }
7617 size = dhd_os_get_image_size(file);
7618 if (!size) {
7619 ret = BCME_ERROR;
7620 goto exit;
7621 }
7622
7623 /* seek to the last 'X' bytes in the file */
7624 if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
7625 ret = BCME_ERROR;
7626 goto exit;
7627 }
7628
7629 /* read the last 'X' bytes of the file to a buffer */
7630 memset(buf, 0, FW_VER_STR_LEN);
7631 if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
7632 ret = BCME_ERROR;
7633 goto exit;
7634 }
7635 /* search for 'Version' in the buffer */
7636 str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
7637 if (!str) {
7638 ret = BCME_ERROR;
7639 goto exit;
7640 }
7641 /* go back in the buffer to the last ascii character */
7642 while (str != buf &&
7643 (*str >= ' ' && *str <= '~')) {
7644 --str;
7645 }
7646 /* reverse the final decrement, so that str is pointing
7647 * to the first ascii character in the buffer
7648 */
7649 ++str;
7650
7651 if (strlen(str) > (FW_VER_STR_LEN - 1)) {
7652 ret = BCME_BADLEN;
7653 goto exit;
7654 }
7655
7656 DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
7657 /* copy to global variable, so that in case FW load fails, the
7658 * core capture logs will contain FW version read from the file
7659 */
7660 memset(fw_version, 0, FW_VER_STR_LEN);
7661 strlcpy(fw_version, str, FW_VER_STR_LEN);
7662
7663 exit:
7664 if (file)
7665 dhd_os_close_image1(dhdp, file);
7666
7667 return ret;
7668 }
7669 #endif
7670
7671 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
7672
7673 static void
7674 copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
7675 {
7676 int remain_len;
7677 int i;
7678 int *cnt;
7679 char *dest;
7680 int bytes_written;
7681 uint32 ioc_dwlen = 0;
7682
7683 if (!dhd || !dhd->hang_info) {
7684 DHD_ERROR(("%s dhd=%p hang_info=%p\n",
7685 __FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
7686 return;
7687 }
7688
7689 cnt = &dhd->hang_info_cnt;
7690 dest = dhd->hang_info;
7691
7692 memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
7693 (*cnt) = 0;
7694
7695 bytes_written = 0;
7696 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7697
7698 get_debug_dump_time(dhd->debug_dump_time_hang_str);
7699 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
7700
7701 bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
7702 HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
7703 dhd->debug_dump_time_hang_str,
7704 ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
7705 (*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
7706
7707 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
7708
7709 /* Access ioc->buf only if the ioc->len is more than 4 bytes */
7710 ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
7711 if (ioc_dwlen > 0) {
7712 const uint32 *ioc_buf = (const uint32 *)ioc->buf;
7713
7714 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7715 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
7716 bytes_written += scnprintf(&dest[bytes_written], remain_len,
7717 "%08x", *(uint32 *)(ioc_buf++));
7718 GCC_DIAGNOSTIC_POP();
7719 (*cnt)++;
7720 if ((*cnt) >= HANG_FIELD_CNT_MAX) {
7721 return;
7722 }
7723
7724 for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
7725 i++, (*cnt)++) {
7726 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7727 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
7728 bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
7729 HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
7730 GCC_DIAGNOSTIC_POP();
7731 }
7732 }
7733
7734 DHD_INFO(("%s hang info len: %d data: %s\n",
7735 __FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
7736 }
7737
7738 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
7739
7740 #if defined(DHD_H2D_LOG_TIME_SYNC)
7741 /*
7742 * Helper function:
7743 * Used for Dongle console message time syncing with Host printk
7744 */
7745 void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
7746 {
7747 uint64 ts;
7748
7749 /*
7750 * local_clock() returns time in nano seconds.
7751 * Dongle understand only milli seconds time.
7752 */
7753 ts = local_clock();
7754 /* Nano seconds to milli seconds */
7755 do_div(ts, 1000000);
7756 if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
7757 DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
7758 /* Stopping HOST Dongle console time syncing */
7759 dhd->dhd_rte_time_sync_ms = 0;
7760 }
7761 }
7762 #endif /* DHD_H2D_LOG_TIME_SYNC */
7763
7764 /* configuations of ecounters to be enabled by default in FW */
7765 static ecounters_cfg_t ecounters_cfg_tbl[] = {
7766 /* Global ecounters */
7767 {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
7768 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
7769 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
7770
7771 /* Slice specific ecounters */
7772 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
7773 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
7774 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
7775
7776 /* Interface specific ecounters */
7777 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
7778 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
7779 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
7780 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
7781
7782 /* secondary interface */
7783 /* XXX REMOVE for temporal, will be enabled after decision
7784 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
7785 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_GENERIC},
7786 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
7787 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_MGT_CNT},
7788 */
7789 };
7790
7791 /* XXX: Same event id shall be defined in consecutive order in the below table */
7792 static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
7793 /* Interface specific event ecounters */
7794 {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
7795 };
7796
7797 /* Accepts an argument to -s, -g or -f and creates an XTLV */
7798 int
7799 dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
7800 uint16 stats_rep, uint8 **xtlv)
7801 {
7802 uint8 *req_xtlv = NULL;
7803 ecounters_stats_types_report_req_t *req;
7804 bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
7805 ecountersv2_xtlv_list_elt_t temp;
7806 uint16 xtlv_len = 0, total_len = 0;
7807 int rc = BCME_OK;
7808
7809 /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
7810 temp.id = stats_rep;
7811 temp.len = 0;
7812
7813 /* Hence len/data = 0/NULL */
7814 xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
7815
7816 /* Total length of the container */
7817 total_len = BCM_XTLV_HDR_SIZE +
7818 OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
7819
7820 /* Now allocate a structure for the entire request */
7821 if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
7822 rc = BCME_NOMEM;
7823 goto fail;
7824 }
7825
7826 /* container XTLV context */
7827 bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
7828 BCM_XTLV_OPTION_ALIGN32);
7829
7830 /* Fill other XTLVs in the container. Leave space for XTLV headers */
7831 req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
7832 req->flags = type;
7833 if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
7834 req->slice_mask = 0x1 << if_slice_idx;
7835 } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
7836 req->if_index = if_slice_idx;
7837 }
7838
7839 /* Fill remaining XTLVs */
7840 bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
7841 BCM_XTLV_OPTION_ALIGN32);
7842 if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
7843 DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
7844 rc = BCME_ERROR;
7845 goto fail;
7846 }
7847
7848 /* fill the top level container and get done with the XTLV container */
7849 rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
7850 bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
7851 stats_types_req));
7852
7853 if (rc) {
7854 DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
7855 goto fail;
7856 }
7857
7858 fail:
7859 if (rc && req_xtlv) {
7860 MFREE(dhd->osh, req_xtlv, total_len);
7861 req_xtlv = NULL;
7862 }
7863
7864 /* update the xtlv pointer */
7865 *xtlv = req_xtlv;
7866 return rc;
7867 }
7868
7869 static int
7870 dhd_ecounter_autoconfig(dhd_pub_t *dhd)
7871 {
7872 int rc = BCME_OK;
7873 uint32 buf;
7874 rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
7875
7876 if (rc != BCME_OK) {
7877
7878 if (rc != BCME_UNSUPPORTED) {
7879 rc = BCME_OK;
7880 DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
7881 } else {
7882 DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
7883 }
7884 }
7885
7886 return rc;
7887 }
7888
7889 int
7890 dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
7891 {
7892 int rc = BCME_OK;
7893 if (enable) {
7894 if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
7895 if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
7896 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
7897 } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
7898 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
7899 }
7900 }
7901 } else {
7902 if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
7903 DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
7904 } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
7905 DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
7906 }
7907 }
7908 return rc;
7909 }
7910
7911 int
7912 dhd_start_ecounters(dhd_pub_t *dhd)
7913 {
7914 uint8 i = 0;
7915 uint8 *start_ptr;
7916 int rc = BCME_OK;
7917 bcm_xtlv_t *elt;
7918 ecounters_config_request_v2_t *req = NULL;
7919 ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
7920 ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
7921 uint16 total_processed_containers_len = 0;
7922
7923 for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
7924 ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
7925
7926 if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
7927 MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
7928 DHD_ERROR(("Ecounters v2: No memory to process\n"));
7929 goto fail;
7930 }
7931
7932 rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
7933 ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
7934
7935 if (rc) {
7936 DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
7937 ecounter_stat->stats_rep, rc));
7938
7939 /* Free allocated memory and go to fail to release any memories allocated
7940 * in previous iterations. Note that list_elt->data gets populated in
7941 * dhd_create_ecounters_params() and gets freed there itself.
7942 */
7943 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
7944 list_elt = NULL;
7945 goto fail;
7946 }
7947 elt = (bcm_xtlv_t *) list_elt->data;
7948
7949 /* Put the elements in the order they are processed */
7950 if (processed_containers_list == NULL) {
7951 processed_containers_list = list_elt;
7952 } else {
7953 tail->next = list_elt;
7954 }
7955 tail = list_elt;
7956 /* Size of the XTLV returned */
7957 total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
7958 }
7959
7960 /* Now create ecounters config request with totallength */
7961 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
7962 total_processed_containers_len);
7963
7964 if (req == NULL) {
7965 rc = BCME_NOMEM;
7966 goto fail;
7967 }
7968
7969 req->version = ECOUNTERS_VERSION_2;
7970 req->logset = EVENT_LOG_SET_ECOUNTERS;
7971 req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
7972 req->num_reports = ECOUNTERS_NUM_REPORTS;
7973 req->len = total_processed_containers_len +
7974 OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
7975
7976 /* Copy config */
7977 start_ptr = req->ecounters_xtlvs;
7978
7979 /* Now go element by element in the list */
7980 while (processed_containers_list) {
7981 list_elt = processed_containers_list;
7982
7983 elt = (bcm_xtlv_t *)list_elt->data;
7984
7985 memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
7986 start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
7987 processed_containers_list = processed_containers_list->next;
7988
7989 /* Free allocated memories */
7990 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
7991 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
7992 }
7993
7994 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
7995 DHD_ERROR(("failed to start ecounters\n"));
7996 }
7997
7998 fail:
7999 if (req) {
8000 MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
8001 }
8002
8003 /* Now go element by element in the list */
8004 while (processed_containers_list) {
8005 list_elt = processed_containers_list;
8006 elt = (bcm_xtlv_t *)list_elt->data;
8007 processed_containers_list = processed_containers_list->next;
8008
8009 /* Free allocated memories */
8010 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
8011 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
8012 }
8013 return rc;
8014 }
8015
8016 int
8017 dhd_stop_ecounters(dhd_pub_t *dhd)
8018 {
8019 int rc = BCME_OK;
8020 ecounters_config_request_v2_t *req;
8021
8022 /* Now create ecounters config request with totallength */
8023 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
8024
8025 if (req == NULL) {
8026 rc = BCME_NOMEM;
8027 goto fail;
8028 }
8029
8030 req->version = ECOUNTERS_VERSION_2;
8031 req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
8032
8033 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
8034 DHD_ERROR(("failed to stop ecounters\n"));
8035 }
8036
8037 fail:
8038 if (req) {
8039 MFREE(dhd->osh, req, sizeof(*req));
8040 }
8041 return rc;
8042 }
8043
8044 /* configured event_id_array for event ecounters */
8045 typedef struct event_id_array {
8046 uint8 event_id;
8047 uint8 str_idx;
8048 } event_id_array_t;
8049
8050 /* get event id array only from event_ecounters_cfg_tbl[] */
8051 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
8052 {
8053 uint8 i;
8054 uint8 idx = 0;
8055 int32 prev_evt_id = -1;
8056
8057 for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
8058 if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
8059 if (prev_evt_id >= 0)
8060 idx++;
8061 event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
8062 event_array[idx].str_idx = i;
8063 }
8064 prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
8065 }
8066 return idx;
8067 }
8068
8069 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
8070 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
8071
8072 int
8073 dhd_start_event_ecounters(dhd_pub_t *dhd)
8074 {
8075 uint8 i, j = 0;
8076 uint8 event_id_cnt = 0;
8077 uint16 processed_containers_len = 0;
8078 uint16 max_xtlv_len = 0;
8079 int rc = BCME_OK;
8080 uint8 *ptr;
8081 uint8 *data;
8082 event_id_array_t *id_array;
8083 bcm_xtlv_t *elt = NULL;
8084 event_ecounters_config_request_v2_t *req = NULL;
8085
8086 /* XXX: the size of id_array is limited by the size of event_ecounters_cfg_tbl */
8087 id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
8088 ARRAYSIZE(event_ecounters_cfg_tbl));
8089
8090 if (id_array == NULL) {
8091 rc = BCME_NOMEM;
8092 goto fail;
8093 }
8094 event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
8095
8096 max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
8097 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
8098 ECNTRS_MAX_XTLV_NUM);
8099
8100 /* Now create ecounters config request with max allowed length */
8101 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
8102 sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
8103
8104 if (req == NULL) {
8105 rc = BCME_NOMEM;
8106 goto fail;
8107 }
8108
8109 for (i = 0; i <= event_id_cnt; i++) {
8110 /* req initialization by event id */
8111 req->version = ECOUNTERS_VERSION_2;
8112 req->logset = EVENT_LOG_SET_ECOUNTERS;
8113 req->event_id = id_array[i].event_id;
8114 req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
8115 req->len = 0;
8116 processed_containers_len = 0;
8117
8118 /* Copy config */
8119 ptr = req->ecounters_xtlvs;
8120
8121 for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
8122 event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
8123 if (id_array[i].event_id != event_ecounter_stat->event_id)
8124 break;
8125
8126 rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
8127 event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
8128 &data);
8129
8130 if (rc) {
8131 DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
8132 __FUNCTION__, event_ecounter_stat->stats_rep, rc));
8133 goto fail;
8134 }
8135
8136 elt = (bcm_xtlv_t *)data;
8137
8138 memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
8139 ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
8140 processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
8141
8142 /* Free allocated memories alloced by dhd_create_ecounters_params */
8143 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
8144
8145 if (processed_containers_len > max_xtlv_len) {
8146 DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
8147 __FUNCTION__));
8148 rc = BCME_BADLEN;
8149 goto fail;
8150 }
8151 }
8152
8153 req->len = processed_containers_len +
8154 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
8155
8156 DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
8157 __FUNCTION__, req->version, req->logset, req->event_id,
8158 req->flags, req->len));
8159
8160 rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
8161
8162 if (rc < 0) {
8163 DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
8164 req->event_id, rc));
8165 goto fail;
8166 }
8167 }
8168
8169 fail:
8170 /* Free allocated memories */
8171 if (req) {
8172 MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
8173 }
8174 if (id_array) {
8175 MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
8176 ARRAYSIZE(event_ecounters_cfg_tbl));
8177 }
8178
8179 return rc;
8180 }
8181
8182 int
8183 dhd_stop_event_ecounters(dhd_pub_t *dhd)
8184 {
8185 int rc = BCME_OK;
8186 event_ecounters_config_request_v2_t *req;
8187
8188 /* Now create ecounters config request with totallength */
8189 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
8190
8191 if (req == NULL) {
8192 rc = BCME_NOMEM;
8193 goto fail;
8194 }
8195
8196 req->version = ECOUNTERS_VERSION_2;
8197 req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
8198 req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
8199
8200 if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
8201 DHD_ERROR(("failed to stop event_ecounters\n"));
8202 }
8203
8204 fail:
8205 if (req) {
8206 MFREE(dhd->osh, req, sizeof(*req));
8207 }
8208 return rc;
8209 }
8210 #ifdef DHD_LOG_DUMP
8211 int
8212 dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
8213 log_dump_section_hdr_t *sec_hdr,
8214 char *text_hdr, int buflen, uint32 sec_type)
8215 {
8216 uint32 rlen = 0;
8217 uint32 data_len = 0;
8218 void *data = NULL;
8219 unsigned long flags = 0;
8220 int ret = 0;
8221 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
8222 int pos = 0;
8223 int fpos_sechdr = 0;
8224
8225 if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
8226 return BCME_BADARG;
8227 }
8228 /* do not allow further writes to the ring
8229 * till we flush it
8230 */
8231 DHD_DBG_RING_LOCK(ring->lock, flags);
8232 ring->state = RING_SUSPEND;
8233 DHD_DBG_RING_UNLOCK(ring->lock, flags);
8234
8235 if (dhdp->concise_dbg_buf) {
8236 /* re-use concise debug buffer temporarily
8237 * to pull ring data, to write
8238 * record by record to file
8239 */
8240 data_len = CONCISE_DUMP_BUFLEN;
8241 data = dhdp->concise_dbg_buf;
8242 ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
8243 /* write the section header now with zero length,
8244 * once the correct length is found out, update
8245 * it later
8246 */
8247 fpos_sechdr = pos;
8248 sec_hdr->type = sec_type;
8249 sec_hdr->length = 0;
8250 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
8251 sizeof(*sec_hdr), &pos);
8252 do {
8253 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
8254 if (rlen > 0) {
8255 /* write the log */
8256 ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
8257 }
8258 DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
8259 } while ((rlen > 0));
8260 /* now update the section header length in the file */
8261 /* Complete ring size is dumped by HAL, hence updating length to ring size */
8262 sec_hdr->length = ring->ring_size;
8263 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
8264 sizeof(*sec_hdr), &fpos_sechdr);
8265 } else {
8266 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
8267 }
8268 DHD_DBG_RING_LOCK(ring->lock, flags);
8269 ring->state = RING_ACTIVE;
8270 /* Resetting both read and write pointer,
8271 * since all items are read.
8272 */
8273 ring->rp = ring->wp = 0;
8274 DHD_DBG_RING_UNLOCK(ring->lock, flags);
8275
8276 return ret;
8277 }
8278
8279 int
8280 dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
8281 unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
8282 char *text_hdr, uint32 sec_type)
8283 {
8284 uint32 rlen = 0;
8285 uint32 data_len = 0, total_len = 0;
8286 void *data = NULL;
8287 unsigned long fpos_sechdr = 0;
8288 unsigned long flags = 0;
8289 int ret = 0;
8290 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
8291
8292 if (!dhdp || !ring || !file || !sec_hdr ||
8293 !file_posn || !text_hdr)
8294 return BCME_BADARG;
8295
8296 /* do not allow further writes to the ring
8297 * till we flush it
8298 */
8299 DHD_DBG_RING_LOCK(ring->lock, flags);
8300 ring->state = RING_SUSPEND;
8301 DHD_DBG_RING_UNLOCK(ring->lock, flags);
8302
8303 if (dhdp->concise_dbg_buf) {
8304 /* re-use concise debug buffer temporarily
8305 * to pull ring data, to write
8306 * record by record to file
8307 */
8308 data_len = CONCISE_DUMP_BUFLEN;
8309 data = dhdp->concise_dbg_buf;
8310 dhd_os_write_file_posn(file, file_posn, text_hdr,
8311 strlen(text_hdr));
8312 /* write the section header now with zero length,
8313 * once the correct length is found out, update
8314 * it later
8315 */
8316 dhd_init_sec_hdr(sec_hdr);
8317 fpos_sechdr = *file_posn;
8318 sec_hdr->type = sec_type;
8319 sec_hdr->length = 0;
8320 dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
8321 sizeof(*sec_hdr));
8322 do {
8323 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
8324 if (rlen > 0) {
8325 /* write the log */
8326 ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
8327 if (ret < 0) {
8328 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
8329 DHD_DBG_RING_LOCK(ring->lock, flags);
8330 ring->state = RING_ACTIVE;
8331 DHD_DBG_RING_UNLOCK(ring->lock, flags);
8332 return BCME_ERROR;
8333 }
8334 }
8335 total_len += rlen;
8336 } while (rlen > 0);
8337 /* now update the section header length in the file */
8338 sec_hdr->length = total_len;
8339 dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
8340 } else {
8341 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
8342 }
8343
8344 DHD_DBG_RING_LOCK(ring->lock, flags);
8345 ring->state = RING_ACTIVE;
8346 /* Resetting both read and write pointer,
8347 * since all items are read.
8348 */
8349 ring->rp = ring->wp = 0;
8350 DHD_DBG_RING_UNLOCK(ring->lock, flags);
8351 return BCME_OK;
8352 }
8353
8354 /* logdump cookie */
8355 #define MAX_LOGUDMP_COOKIE_CNT 10u
8356 #define LOGDUMP_COOKIE_STR_LEN 50u
8357 int
8358 dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
8359 {
8360 uint32 ring_size;
8361
8362 if (!dhdp || !buf) {
8363 DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
8364 return BCME_ERROR;
8365 }
8366
8367 ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
8368 if (buf_size < ring_size) {
8369 DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
8370 ring_size, buf_size));
8371 return BCME_ERROR;
8372 }
8373
8374 dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
8375 LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
8376 DHD_RING_TYPE_FIXED);
8377 if (!dhdp->logdump_cookie) {
8378 DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
8379 return BCME_ERROR;
8380 }
8381
8382 return BCME_OK;
8383 }
8384
8385 void
8386 dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
8387 {
8388 if (!dhdp) {
8389 return;
8390 }
8391 if (dhdp->logdump_cookie) {
8392 dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
8393 }
8394
8395 return;
8396 }
8397
8398 #ifdef DHD_TX_PROFILE
8399 int
8400 dhd_tx_profile_detach(dhd_pub_t *dhdp)
8401 {
8402 int result = BCME_ERROR;
8403
8404 if (dhdp != NULL && dhdp->protocol_filters != NULL) {
8405 MFREE(dhdp->osh, dhdp->protocol_filters, DHD_MAX_PROFILES *
8406 sizeof(*(dhdp->protocol_filters)));
8407 dhdp->protocol_filters = NULL;
8408
8409 result = BCME_OK;
8410 }
8411
8412 return result;
8413 }
8414
8415 int
8416 dhd_tx_profile_attach(dhd_pub_t *dhdp)
8417 {
8418 int result = BCME_ERROR;
8419
8420 if (dhdp != NULL) {
8421 dhdp->protocol_filters = (dhd_tx_profile_protocol_t*)MALLOCZ(dhdp->osh,
8422 DHD_MAX_PROFILES * sizeof(*(dhdp->protocol_filters)));
8423
8424 if (dhdp->protocol_filters != NULL) {
8425 result = BCME_OK;
8426 }
8427 }
8428
8429 if (result != BCME_OK) {
8430 DHD_ERROR(("%s:\tMALLOC of tx profile protocol filters failed\n",
8431 __FUNCTION__));
8432 }
8433
8434 return result;
8435 }
8436 #endif /* defined(DHD_TX_PROFILE) */
8437
8438 void
8439 dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
8440 {
8441 char *ptr;
8442
8443 if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
8444 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
8445 " type = %p, cookie_cfg:%p\n", __FUNCTION__,
8446 dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
8447 return;
8448 }
8449 ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
8450 if (ptr == NULL) {
8451 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
8452 return;
8453 }
8454 scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
8455 return;
8456 }
8457
8458 int
8459 dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
8460 {
8461 char *ptr;
8462
8463 if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
8464 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
8465 "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
8466 dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
8467 return BCME_ERROR;
8468 }
8469 ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
8470 if (ptr == NULL) {
8471 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
8472 return BCME_ERROR;
8473 }
8474 memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
8475 dhd_ring_free_first(dhdp->logdump_cookie);
8476 return BCME_OK;
8477 }
8478
8479 int
8480 dhd_logdump_cookie_count(dhd_pub_t *dhdp)
8481 {
8482 if (!dhdp || !dhdp->logdump_cookie) {
8483 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
8484 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
8485 return 0;
8486 }
8487 return dhd_ring_get_cur_size(dhdp->logdump_cookie);
8488 }
8489
8490 static inline int
8491 __dhd_log_dump_cookie_to_file(
8492 dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
8493 char *buf, uint32 buf_size)
8494 {
8495
8496 uint32 remain = buf_size;
8497 int ret = BCME_ERROR;
8498 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
8499 log_dump_section_hdr_t sec_hdr;
8500 uint32 read_idx;
8501 uint32 write_idx;
8502
8503 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
8504 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
8505 while (dhd_logdump_cookie_count(dhdp) > 0) {
8506 memset(tmp_buf, 0, sizeof(tmp_buf));
8507 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
8508 if (ret != BCME_OK) {
8509 return ret;
8510 }
8511 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
8512 }
8513 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
8514 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
8515
8516 ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
8517 if (ret < 0) {
8518 DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
8519 return ret;
8520 }
8521 sec_hdr.magic = LOG_DUMP_MAGIC;
8522 sec_hdr.timestamp = local_clock();
8523 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
8524 sec_hdr.length = buf_size - remain;
8525
8526 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
8527 if (ret < 0) {
8528 DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
8529 return ret;
8530 }
8531
8532 ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
8533 if (ret < 0) {
8534 DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
8535 }
8536
8537 return ret;
8538 }
8539
8540 uint32
8541 dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
8542 {
8543 int len = 0;
8544 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
8545 log_dump_section_hdr_t sec_hdr;
8546 char *buf = NULL;
8547 int ret = BCME_ERROR;
8548 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
8549 uint32 read_idx;
8550 uint32 write_idx;
8551 uint32 remain;
8552
8553 remain = buf_size;
8554
8555 if (!dhdp || !dhdp->logdump_cookie) {
8556 DHD_ERROR(("%s At least one ptr is NULL "
8557 "dhdp = %p cookie %p\n",
8558 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
8559 goto exit;
8560 }
8561
8562 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
8563 if (!buf) {
8564 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
8565 goto exit;
8566 }
8567
8568 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
8569 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
8570 while (dhd_logdump_cookie_count(dhdp) > 0) {
8571 memset(tmp_buf, 0, sizeof(tmp_buf));
8572 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
8573 if (ret != BCME_OK) {
8574 goto exit;
8575 }
8576 remain -= (uint32)strlen(tmp_buf);
8577 }
8578 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
8579 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
8580 len += strlen(COOKIE_LOG_HDR);
8581 len += sizeof(sec_hdr);
8582 len += (buf_size - remain);
8583 exit:
8584 if (buf)
8585 MFREE(dhdp->osh, buf, buf_size);
8586 return len;
8587 }
8588
8589 int
8590 dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
8591 {
8592 int ret = BCME_ERROR;
8593 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
8594 log_dump_section_hdr_t sec_hdr;
8595 char *buf = NULL;
8596 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
8597 int pos = 0;
8598 uint32 read_idx;
8599 uint32 write_idx;
8600 uint32 remain;
8601
8602 remain = buf_size;
8603
8604 if (!dhdp || !dhdp->logdump_cookie) {
8605 DHD_ERROR(("%s At least one ptr is NULL "
8606 "dhdp = %p cookie %p\n",
8607 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
8608 goto exit;
8609 }
8610
8611 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
8612 if (!buf) {
8613 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
8614 goto exit;
8615 }
8616
8617 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
8618 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
8619 while (dhd_logdump_cookie_count(dhdp) > 0) {
8620 memset(tmp_buf, 0, sizeof(tmp_buf));
8621 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
8622 if (ret != BCME_OK) {
8623 goto exit;
8624 }
8625 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
8626 }
8627 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
8628 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
8629 ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
8630 sec_hdr.magic = LOG_DUMP_MAGIC;
8631 sec_hdr.timestamp = local_clock();
8632 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
8633 sec_hdr.length = buf_size - remain;
8634 ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
8635 ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
8636 exit:
8637 if (buf)
8638 MFREE(dhdp->osh, buf, buf_size);
8639 return ret;
8640 }
8641
8642 int
8643 dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
8644 {
8645 char *buf;
8646 int ret = BCME_ERROR;
8647 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
8648
8649 if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
8650 DHD_ERROR(("%s At least one ptr is NULL "
8651 "dhdp = %p cookie %p fp = %p f_pos = %p\n",
8652 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
8653 return ret;
8654 }
8655
8656 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
8657 if (!buf) {
8658 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
8659 return ret;
8660 }
8661 ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
8662 MFREE(dhdp->osh, buf, buf_size);
8663
8664 return ret;
8665 }
8666 #endif /* DHD_LOG_DUMP */
8667
8668 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
8669 int
8670 dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
8671 {
8672 int ret = BCME_OK;
8673 bcm_xtlv_t *pxtlv = NULL;
8674 uint8 mybuf[DHD_IOVAR_BUF_SIZE];
8675 uint16 mybuf_len = sizeof(mybuf);
8676 pxtlv = (bcm_xtlv_t *)mybuf;
8677
8678 ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
8679 &he_enab, BCM_XTLV_OPTION_ALIGN32);
8680
8681 if (ret != BCME_OK) {
8682 ret = -EINVAL;
8683 DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
8684 return ret;
8685 }
8686
8687 ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
8688 if (ret < 0) {
8689 DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
8690 __FUNCTION__, he_enab, bcmerrorstr(ret)));
8691 } else {
8692 DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
8693 }
8694
8695 return ret;
8696 }
8697 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
8698
8699 #ifdef CONFIG_ROAM_RSSI_LIMIT
8700 int
8701 dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g)
8702 {
8703 wlc_roam_rssi_limit_t *plmt;
8704 wlc_roam_rssi_lmt_info_v1_t *pinfo;
8705 int ret = BCME_OK;
8706 int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
8707
8708 plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
8709 if (!plmt) {
8710 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
8711 return BCME_NOMEM;
8712 }
8713
8714 /* Get roam rssi limit */
8715 ret = dhd_iovar(dhd, 0, "roam_rssi_limit", NULL, 0, (char *)plmt, plmt_len, FALSE);
8716 if (ret < 0) {
8717 DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
8718 goto done;
8719 }
8720
8721 if (plmt->ver != WLC_ROAM_RSSI_LMT_VER_1) {
8722 ret = BCME_VERSION;
8723 goto done;
8724 }
8725
8726 pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
8727 *lmt2g = (int)pinfo->rssi_limit_2g;
8728 *lmt5g = (int)pinfo->rssi_limit_5g;
8729
8730 done:
8731 if (plmt) {
8732 MFREE(dhd->osh, plmt, plmt_len);
8733 }
8734 return ret;
8735 }
8736
8737 int
8738 dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g)
8739 {
8740 wlc_roam_rssi_limit_t *plmt;
8741 wlc_roam_rssi_lmt_info_v1_t *pinfo;
8742 int ret = BCME_OK;
8743 int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
8744
8745 /* Sanity check RSSI limit Value */
8746 if ((lmt2g < ROAMRSSI_2G_MIN) || (lmt2g > ROAMRSSI_2G_MAX)) {
8747 DHD_ERROR(("%s Not In Range 2G ROAM RSSI Limit\n", __FUNCTION__));
8748 return BCME_RANGE;
8749 }
8750 if ((lmt2g < ROAMRSSI_5G_MIN) || (lmt2g > ROAMRSSI_5G_MAX)) {
8751 DHD_ERROR(("%s Not In Range 5G ROAM RSSI Limit\n", __FUNCTION__));
8752 return BCME_RANGE;
8753 }
8754
8755 plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
8756 if (!plmt) {
8757 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
8758 return BCME_NOMEM;
8759 }
8760 plmt->ver = WLC_ROAM_RSSI_LMT_VER_1;
8761 plmt->len = sizeof(*pinfo);
8762 pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
8763 pinfo->rssi_limit_2g = (int16)lmt2g;
8764 pinfo->rssi_limit_5g = (int16)lmt5g;
8765
8766 /* Set roam rssi limit */
8767 ret = dhd_iovar(dhd, 0, "roam_rssi_limit", (char *)plmt, plmt_len, NULL, 0, TRUE);
8768 if (ret < 0) {
8769 DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
8770 goto done;
8771 }
8772 done:
8773 if (plmt) {
8774 MFREE(dhd->osh, plmt, plmt_len);
8775 }
8776 return ret;
8777 }
8778 #endif /* CONFIG_ROAM_RSSI_LIMIT */
8779
8780 int
8781 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
8782 uint res_len, bool set)
8783 {
8784 char *buf = NULL;
8785 uint input_len;
8786 wl_ioctl_t ioc;
8787 int ret;
8788
8789 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
8790 return BCME_BADARG;
8791
8792 input_len = strlen(name) + 1 + param_len;
8793 if (input_len > WLC_IOCTL_MAXLEN)
8794 return BCME_BADARG;
8795
8796 buf = NULL;
8797 if (set) {
8798 if (res_buf || res_len != 0) {
8799 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
8800 ret = BCME_BADARG;
8801 goto exit;
8802 }
8803 buf = MALLOCZ(pub->osh, input_len);
8804 if (!buf) {
8805 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
8806 ret = BCME_NOMEM;
8807 goto exit;
8808 }
8809 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
8810 if (!ret) {
8811 ret = BCME_NOMEM;
8812 goto exit;
8813 }
8814
8815 ioc.cmd = WLC_SET_VAR;
8816 ioc.buf = buf;
8817 ioc.len = input_len;
8818 ioc.set = set;
8819
8820 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
8821 } else {
8822 if (!res_buf || !res_len) {
8823 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
8824 ret = BCME_BADARG;
8825 goto exit;
8826 }
8827
8828 if (res_len < input_len) {
8829 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
8830 res_len, input_len));
8831 buf = MALLOCZ(pub->osh, input_len);
8832 if (!buf) {
8833 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
8834 ret = BCME_NOMEM;
8835 goto exit;
8836 }
8837 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
8838 if (!ret) {
8839 ret = BCME_NOMEM;
8840 goto exit;
8841 }
8842
8843 ioc.cmd = WLC_GET_VAR;
8844 ioc.buf = buf;
8845 ioc.len = input_len;
8846 ioc.set = set;
8847
8848 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
8849
8850 if (ret == BCME_OK) {
8851 memcpy(res_buf, buf, res_len);
8852 }
8853 } else {
8854 memset(res_buf, 0, res_len);
8855 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
8856 if (!ret) {
8857 ret = BCME_NOMEM;
8858 goto exit;
8859 }
8860
8861 ioc.cmd = WLC_GET_VAR;
8862 ioc.buf = res_buf;
8863 ioc.len = res_len;
8864 ioc.set = set;
8865
8866 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
8867 }
8868 }
8869 exit:
8870 if (buf) {
8871 MFREE(pub->osh, buf, input_len);
8872 }
8873 return ret;
8874 }