2 * Broadcom Dongle Host Driver (DHD), common DHD core.
4 * Copyright (C) 2020, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
21 * <<Broadcom-WL-IPTag/Open:>>
30 #include <bcmstdlib_s.h>
32 #include <bcmendian.h>
33 #include <dngl_stats.h>
38 #ifdef DHD_SDTC_ETB_DUMP
40 #endif /* DHD_SDTC_ETB_DUMP */
42 #ifdef PCIE_FULL_DONGLE
43 #include <bcmmsgbuf.h>
44 #endif /* PCIE_FULL_DONGLE */
47 #include <event_log.h>
48 #endif /* SHOW_LOGTRACE */
51 #include <dhd_flowring.h>
55 #include <dhd_proto.h>
59 #include <dhd_debug.h>
60 #include <dhd_dbg_ring.h>
61 #include <dhd_mschdbg.h>
63 #include <dhd_config.h>
64 #include <wl_android.h>
67 #include <wl_cfg80211.h>
69 #if defined(PNO_SUPPORT)
71 #endif /* (OEM_ANDROID) && (PNO_SUPPORT) */
76 #ifdef DNGL_EVENT_SUPPORT
77 #include <dnglevent.h>
84 #define htodchanspec(i) (i)
85 #define dtohchanspec(i) (i)
88 #include <wlfc_proto.h>
92 #if defined(__linux__)
93 #include <dhd_linux.h>
94 #endif /* __linux__ */
97 #include <dhd_l2_filter.h>
98 #endif /* DHD_L2_FILTER */
101 #include <dhd_psta.h>
102 #endif /* DHD_PSTA */
110 #ifdef DHD_PKT_LOGGING
111 #include <dhd_pktlog.h>
113 #endif /* DHD_LOG_DUMP */
115 #ifdef DHD_LOG_PRINT_RATE_LIMIT
116 int log_print_threshold
= 0;
117 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
119 /* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */
120 int dhd_msg_level
= DHD_ERROR_VAL
| DHD_FWLOG_VAL
;
121 /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
125 #endif /* DHD_DEBUG */
127 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
128 #include <linux/pm_runtime.h>
129 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
133 #endif /* CSI_SUPPORT */
136 char fw_path2
[MOD_PARAM_PATHLEN
];
137 extern bool softap_enabled
;
141 #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
142 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
143 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
144 static char *ramstart_str
= " text_start"; /* string in mapfile has addr ramstart */
145 static char *rodata_start_str
= " rodata_start"; /* string in mapfile has addr rodata start */
146 static char *rodata_end_str
= " rodata_end"; /* string in mapfile has addr rodata end */
147 #define RAMSTART_BIT 0x01
148 #define RDSTART_BIT 0x02
149 #define RDEND_BIT 0x04
150 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
151 #endif /* SHOW_LOGTRACE */
154 /* the fw file path is taken from either the module parameter at
155 * insmod time or is defined as a constant of different values
156 * for different platforms
158 extern char *st_str_file_path
;
159 #endif /* SHOW_LOGTRACE */
162 typedef struct msg_hdr_edl
{
164 info_buf_payload_hdr_t pyld_hdr
;
165 msgtrace_hdr_t trace_hdr
;
169 #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
171 /* Last connection success/failure status */
172 uint32 dhd_conn_event
;
173 uint32 dhd_conn_status
;
174 uint32 dhd_conn_reason
;
176 extern int dhd_iscan_request(void * dhdp
, uint16 action
);
177 extern void dhd_ind_scan_confirm(void *h
, bool status
);
178 extern int dhd_iscan_in_progress(void *h
);
179 void dhd_iscan_lock(void);
180 void dhd_iscan_unlock(void);
181 extern int dhd_change_mtu(dhd_pub_t
*dhd
, int new_mtu
, int ifidx
);
182 #if !defined(AP) && defined(WLP2P)
183 extern int dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
);
186 extern int dhd_socram_dump(struct dhd_bus
*bus
);
187 extern void dhd_set_packet_filter(dhd_pub_t
*dhd
);
189 #ifdef DNGL_EVENT_SUPPORT
190 static void dngl_host_event_process(dhd_pub_t
*dhdp
, bcm_dngl_event_t
*event
,
191 bcm_dngl_event_msg_t
*dngl_event
, size_t pktlen
);
192 static int dngl_host_event(dhd_pub_t
*dhdp
, void *pktdata
, bcm_dngl_event_msg_t
*dngl_event
,
194 #endif /* DNGL_EVENT_SUPPORT */
196 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
197 static void copy_hang_info_ioctl_timeout(dhd_pub_t
*dhd
, int ifidx
, wl_ioctl_t
*ioc
);
198 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
200 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
201 #define MAX_IOCTL_SUSPEND_ERROR 10
202 static int ioctl_suspend_error
= 0;
203 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
205 /* Should ideally read this from target(taken from wlu) */
206 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
208 /* note these variables will be used with wext */
209 bool ap_cfg_running
= FALSE
;
210 bool ap_fw_loaded
= FALSE
;
212 #define CHIPID_MISMATCH 8
214 #define DHD_VERSION "\nDongle Host Driver, version " EPI_VERSION_STR "\n"
216 #if defined(DHD_DEBUG) && defined(DHD_COMPILED)
217 const char dhd_version
[] = DHD_VERSION DHD_COMPILED
" compiled on "
218 __DATE__
" at " __TIME__
"\n\0<TIMESTAMP>";
220 const char dhd_version
[] = DHD_VERSION
;
221 #endif /* DHD_DEBUG && DHD_COMPILED */
223 char fw_version
[FW_VER_STR_LEN
] = "\0";
224 char clm_version
[CLM_VER_STR_LEN
] = "\0";
226 char bus_api_revision
[BUS_API_REV_STR_LEN
] = "\0";
228 void dhd_set_timer(void *bus
, uint wdtick
);
230 static char* ioctl2str(uint32 ioctl
);
249 #if defined(DHD_DEBUG)
250 IOV_DHD_JOIN_TIMEOUT_DBG
,
256 #endif /* defined(DHD_DEBUG) */
258 IOV_PROPTXSTATUS_ENABLE
,
259 IOV_PROPTXSTATUS_MODE
,
260 IOV_PROPTXSTATUS_OPT
,
261 IOV_PROPTXSTATUS_MODULE_IGNORE
,
262 IOV_PROPTXSTATUS_CREDIT_IGNORE
,
263 IOV_PROPTXSTATUS_TXSTATUS_IGNORE
,
264 IOV_PROPTXSTATUS_RXPKT_CHK
,
265 #endif /* PROP_TXSTATUS */
268 IOV_HOSTREORDER_FLOWS
,
269 #ifdef DHDTCPACK_SUPPRESS
271 #endif /* DHDTCPACK_SUPPRESS */
279 #endif /* DHD_L2_FILTER */
283 #endif /* DHD_PSTA */
292 #ifdef DHD_MCAST_REGEN
293 IOV_MCAST_REGEN_BSS_ENABLE
,
297 #endif /* SHOW_LOGTRACE */
298 IOV_DONGLE_TRAP_TYPE
,
299 IOV_DONGLE_TRAP_INFO
,
301 IOV_DUMP_DONGLE
, /**< dumps core registers and d11 memories */
302 #if defined(DHD_LOG_DUMP)
304 #endif /* DHD_LOG_DUMP */
306 IOV_DEBUG_BUF_DEST_STAT
,
309 #endif /* DHD_DEBUG */
311 #ifdef RTT_GEOFENCE_CONT
312 #if defined(RTT_SUPPORT) && defined(WL_NAN)
313 IOV_RTT_GEOFENCE_TYPE_OVRD
,
314 #endif /* RTT_SUPPORT && WL_NAN */
315 #endif /* RTT_GEOFENCE_CONT */
317 #ifdef DHD_TX_PROFILE
319 IOV_TX_PROFILE_ENABLE
,
321 #endif /* defined(DHD_TX_PROFILE) */
325 const bcm_iovar_t dhd_iovars
[] = {
326 /* name varid flags flags2 type minlen */
327 {"version", IOV_VERSION
, 0, 0, IOVT_BUFFER
, 0},
328 {"wlmsglevel", IOV_WLMSGLEVEL
, 0, 0, IOVT_UINT32
, 0 },
330 {"msglevel", IOV_MSGLEVEL
, 0, 0, IOVT_UINT32
, 0},
331 {"mem_debug", IOV_MEM_DEBUG
, 0, 0, IOVT_BUFFER
, 0 },
333 {"flow_ring_debug", IOV_FLOW_RING_DEBUG
, 0, 0, IOVT_BUFFER
, 0 },
335 #endif /* DHD_DEBUG */
336 {"bcmerrorstr", IOV_BCMERRORSTR
, 0, 0, IOVT_BUFFER
, BCME_STRLEN
},
337 {"bcmerror", IOV_BCMERROR
, 0, 0, IOVT_INT8
, 0},
338 {"wdtick", IOV_WDTICK
, 0, 0, IOVT_UINT32
, 0},
339 {"dump", IOV_DUMP
, 0, 0, IOVT_BUFFER
, DHD_IOCTL_MAXLEN
},
340 {"cons", IOV_CONS
, 0, 0, IOVT_BUFFER
, 0},
341 {"dconpoll", IOV_DCONSOLE_POLL
, 0, 0, IOVT_UINT32
, 0},
342 {"clearcounts", IOV_CLEARCOUNTS
, 0, 0, IOVT_VOID
, 0},
343 {"gpioob", IOV_GPIOOB
, 0, 0, IOVT_UINT32
, 0},
344 {"ioctl_timeout", IOV_IOCTLTIMEOUT
, 0, 0, IOVT_UINT32
, 0},
346 {"proptx", IOV_PROPTXSTATUS_ENABLE
, 0, 0, IOVT_BOOL
, 0 },
348 set the proptxtstatus operation mode:
349 0 - Do not do any proptxtstatus flow control
350 1 - Use implied credit from a packet status
351 2 - Use explicit credit
353 {"ptxmode", IOV_PROPTXSTATUS_MODE
, 0, 0, IOVT_UINT32
, 0 },
354 {"proptx_opt", IOV_PROPTXSTATUS_OPT
, 0, 0, IOVT_UINT32
, 0 },
355 {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE
, 0, 0, IOVT_BOOL
, 0 },
356 {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE
, 0, 0, IOVT_BOOL
, 0 },
357 {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE
, 0, 0, IOVT_BOOL
, 0 },
358 {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK
, 0, 0, IOVT_BOOL
, 0 },
359 #endif /* PROP_TXSTATUS */
360 {"bustype", IOV_BUS_TYPE
, 0, 0, IOVT_UINT32
, 0},
361 {"changemtu", IOV_CHANGEMTU
, 0, 0, IOVT_UINT32
, 0 },
362 {"host_reorder_flows", IOV_HOSTREORDER_FLOWS
, 0, 0, IOVT_BUFFER
,
363 (WLHOST_REORDERDATA_MAXFLOWS
+ 1) },
364 #ifdef DHDTCPACK_SUPPRESS
365 {"tcpack_suppress", IOV_TCPACK_SUPPRESS
, 0, 0, IOVT_UINT8
, 0 },
366 #endif /* DHDTCPACK_SUPPRESS */
368 {"dhcp_unicast", IOV_DHCP_UNICAST
, (0), 0, IOVT_BOOL
, 0 },
369 #endif /* DHD_L2_FILTER */
370 {"ap_isolate", IOV_AP_ISOLATE
, (0), 0, IOVT_BOOL
, 0},
372 {"block_ping", IOV_BLOCK_PING
, (0), 0, IOVT_BOOL
, 0},
373 {"proxy_arp", IOV_PROXY_ARP
, (0), 0, IOVT_BOOL
, 0},
374 {"grat_arp", IOV_GRAT_ARP
, (0), 0, IOVT_BOOL
, 0},
375 {"block_tdls", IOV_BLOCK_TDLS
, (0), IOVT_BOOL
, 0},
376 #endif /* DHD_L2_FILTER */
377 {"dhd_ie", IOV_DHD_IE
, (0), 0, IOVT_BUFFER
, 0},
379 /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
380 {"psta", IOV_PSTA
, 0, 0, IOVT_UINT32
, 0},
381 #endif /* DHD PSTA */
383 /* WET Mode configuration. 0: DIABLED 1: WET */
384 {"wet", IOV_WET
, 0, 0, IOVT_UINT32
, 0},
385 {"wet_host_ipv4", IOV_WET_HOST_IPV4
, 0, 0, IOVT_UINT32
, 0},
386 {"wet_host_mac", IOV_WET_HOST_MAC
, 0, 0, IOVT_BUFFER
, 0},
388 {"op_mode", IOV_CFG80211_OPMODE
, 0, 0, IOVT_UINT32
, 0 },
389 {"assert_type", IOV_ASSERT_TYPE
, (0), 0, IOVT_UINT32
, 0},
390 {"lmtest", IOV_LMTEST
, 0, 0, IOVT_UINT32
, 0 },
391 #ifdef DHD_MCAST_REGEN
392 {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE
, 0, 0, IOVT_BOOL
, 0},
395 {"dump_trace_buf", IOV_DUMP_TRACE_LOG
, 0, 0, IOVT_BUFFER
, sizeof(trace_buf_info_t
) },
396 #endif /* SHOW_LOGTRACE */
397 {"trap_type", IOV_DONGLE_TRAP_TYPE
, 0, 0, IOVT_UINT32
, 0 },
398 {"trap_info", IOV_DONGLE_TRAP_INFO
, 0, 0, IOVT_BUFFER
, sizeof(trap_t
) },
400 {"bpaddr", IOV_BPADDR
, 0, 0, IOVT_BUFFER
, sizeof(sdreg_t
) },
401 #endif /* DHD_DEBUG */
402 {"dump_dongle", IOV_DUMP_DONGLE
, 0, 0, IOVT_BUFFER
,
403 MAX(sizeof(dump_dongle_in_t
), sizeof(dump_dongle_out_t
)) },
404 #if defined(DHD_LOG_DUMP)
405 {"log_dump", IOV_LOG_DUMP
, 0, 0, IOVT_UINT8
, 0},
406 #endif /* DHD_LOG_DUMP */
407 {"tput_test", IOV_TPUT_TEST
, 0, 0, IOVT_BUFFER
, sizeof(tput_test_t
)},
408 {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT
, 0, 0, IOVT_UINT32
, 0 },
409 #if defined(DHD_SSSR_DUMP)
410 {"fis_trigger", IOV_FIS_TRIGGER
, 0, 0, IOVT_UINT32
, 0},
413 {"induce_error", IOV_INDUCE_ERROR
, (0), 0, IOVT_UINT16
, 0 },
414 #endif /* DHD_DEBUG */
415 #ifdef RTT_GEOFENCE_CONT
416 #if defined(RTT_SUPPORT) && defined(WL_NAN)
417 {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD
, (0), 0, IOVT_BOOL
, 0},
418 #endif /* RTT_SUPPORT && WL_NAN */
419 #endif /* RTT_GEOFENCE_CONT */
420 {"fw_verbose", IOV_FW_VBS
, 0, 0, IOVT_UINT32
, 0},
421 #ifdef DHD_TX_PROFILE
422 {"tx_profile_tag", IOV_TX_PROFILE_TAG
, 0, 0, IOVT_BUFFER
,
423 sizeof(dhd_tx_profile_protocol_t
)},
424 {"tx_profile_enable", IOV_TX_PROFILE_ENABLE
, 0, 0, IOVT_BOOL
, 0},
425 {"tx_profile_dump", IOV_TX_PROFILE_DUMP
, 0, 0, IOVT_UINT32
, 0},
426 #endif /* defined(DHD_TX_PROFILE) */
427 /* --- add new iovars *ABOVE* this line --- */
428 {NULL
, 0, 0, 0, 0, 0 }
431 #define DHD_IOVAR_BUF_SIZE 128
434 dhd_query_bus_erros(dhd_pub_t
*dhdp
)
438 if (dhdp
->dongle_reset
) {
439 DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
444 if (dhdp
->dongle_trap_occured
) {
445 DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
448 dhdp
->hang_reason
= HANG_REASON_DONGLE_TRAP
;
449 dhd_os_send_hang_message(dhdp
);
452 if (dhdp
->iovar_timeout_occured
) {
453 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
458 #ifdef PCIE_FULL_DONGLE
459 if (dhdp
->d3ack_timeout_occured
) {
460 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
464 if (dhdp
->livelock_occured
) {
465 DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
470 if (dhdp
->pktid_audit_failed
) {
471 DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
475 #endif /* PCIE_FULL_DONGLE */
477 if (dhdp
->iface_op_failed
) {
478 DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
483 if (dhdp
->scan_timeout_occurred
) {
484 DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
489 if (dhdp
->scan_busy_occurred
) {
490 DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
495 #ifdef DNGL_AXI_ERROR_LOGGING
496 if (dhdp
->axi_error
) {
497 DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
501 #endif /* DNGL_AXI_ERROR_LOGGING */
503 if (dhd_bus_get_linkdown(dhdp
)) {
504 DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
509 if (dhd_bus_get_cto(dhdp
)) {
510 DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
519 dhd_clear_bus_errors(dhd_pub_t
*dhdp
)
524 dhdp
->dongle_reset
= FALSE
;
525 dhdp
->dongle_trap_occured
= FALSE
;
526 dhdp
->iovar_timeout_occured
= FALSE
;
527 #ifdef PCIE_FULL_DONGLE
528 dhdp
->d3ack_timeout_occured
= FALSE
;
529 dhdp
->livelock_occured
= FALSE
;
530 dhdp
->pktid_audit_failed
= FALSE
;
532 dhdp
->iface_op_failed
= FALSE
;
533 dhdp
->scan_timeout_occurred
= FALSE
;
534 dhdp
->scan_busy_occurred
= FALSE
;
539 /* This can be overwritten by module parameter defined in dhd_linux.c */
540 uint sssr_enab
= TRUE
;
543 uint fis_enab
= TRUE
;
545 uint fis_enab
= FALSE
;
546 #endif /* DHD_FIS_DUMP */
549 dhd_sssr_mempool_init(dhd_pub_t
*dhd
)
551 dhd
->sssr_mempool
= (uint8
*) MALLOCZ(dhd
->osh
, DHD_SSSR_MEMPOOL_SIZE
);
552 if (dhd
->sssr_mempool
== NULL
) {
553 DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
561 dhd_sssr_mempool_deinit(dhd_pub_t
*dhd
)
563 if (dhd
->sssr_mempool
) {
564 MFREE(dhd
->osh
, dhd
->sssr_mempool
, DHD_SSSR_MEMPOOL_SIZE
);
565 dhd
->sssr_mempool
= NULL
;
570 dhd_sssr_reg_info_init(dhd_pub_t
*dhd
)
572 dhd
->sssr_reg_info
= (sssr_reg_info_cmn_t
*) MALLOCZ(dhd
->osh
, sizeof(sssr_reg_info_cmn_t
));
573 if (dhd
->sssr_reg_info
== NULL
) {
574 DHD_ERROR(("%s: MALLOC of sssr_reg_info failed\n",
582 dhd_sssr_reg_info_deinit(dhd_pub_t
*dhd
)
584 if (dhd
->sssr_reg_info
) {
585 MFREE(dhd
->osh
, dhd
->sssr_reg_info
, sizeof(sssr_reg_info_cmn_t
));
586 dhd
->sssr_reg_info
= NULL
;
591 dhd_dump_sssr_reg_info(dhd_pub_t
*dhd
)
596 dhd_get_sssr_reg_info(dhd_pub_t
*dhd
)
599 /* get sssr_reg_info from firmware */
600 ret
= dhd_iovar(dhd
, 0, "sssr_reg_info", NULL
, 0, (char *)dhd
->sssr_reg_info
,
601 sizeof(sssr_reg_info_cmn_t
), FALSE
);
603 DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
608 dhd_dump_sssr_reg_info(dhd
);
613 dhd_get_sssr_bufsize(dhd_pub_t
*dhd
)
616 uint32 sssr_bufsize
= 0;
619 num_d11cores
= dhd_d11_slices_num_get(dhd
);
621 switch (dhd
->sssr_reg_info
->rev2
.version
) {
622 case SSSR_REG_INFO_VER_3
:
623 /* intentional fall through */
624 case SSSR_REG_INFO_VER_2
:
625 for (i
= 0; i
< num_d11cores
; i
++) {
626 sssr_bufsize
+= dhd
->sssr_reg_info
->rev2
.mac_regs
[i
].sr_size
;
628 if ((dhd
->sssr_reg_info
->rev2
.length
>
629 OFFSETOF(sssr_reg_info_v2_t
, dig_mem_info
)) &&
630 dhd
->sssr_reg_info
->rev2
.dig_mem_info
.dig_sr_addr
) {
631 sssr_bufsize
+= 0; /* TBD */
634 case SSSR_REG_INFO_VER_1
:
635 for (i
= 0; i
< num_d11cores
; i
++) {
636 sssr_bufsize
+= dhd
->sssr_reg_info
->rev1
.mac_regs
[i
].sr_size
;
638 if (dhd
->sssr_reg_info
->rev1
.vasip_regs
.vasip_sr_size
) {
639 sssr_bufsize
+= dhd
->sssr_reg_info
->rev1
.vasip_regs
.vasip_sr_size
;
640 } else if ((dhd
->sssr_reg_info
->rev1
.length
> OFFSETOF(sssr_reg_info_v1_t
,
641 dig_mem_info
)) && dhd
->sssr_reg_info
->rev1
.
642 dig_mem_info
.dig_sr_addr
) {
643 sssr_bufsize
+= dhd
->sssr_reg_info
->rev1
.dig_mem_info
.dig_sr_size
;
646 case SSSR_REG_INFO_VER_0
:
647 for (i
= 0; i
< num_d11cores
; i
++) {
648 sssr_bufsize
+= dhd
->sssr_reg_info
->rev0
.mac_regs
[i
].sr_size
;
650 if (dhd
->sssr_reg_info
->rev0
.vasip_regs
.vasip_sr_size
) {
651 sssr_bufsize
+= dhd
->sssr_reg_info
->rev0
.vasip_regs
.vasip_sr_size
;
655 DHD_ERROR(("invalid sssr_reg_ver"));
656 return BCME_UNSUPPORTED
;
659 #ifdef DHD_SSSR_DUMP_BEFORE_SR
660 /* Double the size as different dumps will be saved before and after SR */
661 sssr_bufsize
= 2 * sssr_bufsize
;
662 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
668 dhd_sssr_dump_init(dhd_pub_t
*dhd
)
672 uint32 mempool_used
= 0;
673 uint8 num_d11cores
= 0;
674 bool alloc_sssr
= FALSE
;
677 dhd
->sssr_inited
= FALSE
;
679 DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__
));
683 /* check if sssr mempool is allocated */
684 if (dhd
->sssr_mempool
== NULL
) {
685 DHD_ERROR(("%s: sssr_mempool is not allocated\n",
690 /* check if sssr mempool is allocated */
691 if (dhd
->sssr_reg_info
== NULL
) {
692 DHD_ERROR(("%s: sssr_reg_info is not allocated\n",
697 /* Get SSSR reg info */
698 if (dhd_get_sssr_reg_info(dhd
) != BCME_OK
) {
699 DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__
));
700 printf("DEBUG_SSSr: %s: dhd_get_sssr_reg_info failed\n", __FUNCTION__
);
704 num_d11cores
= dhd_d11_slices_num_get(dhd
);
705 /* Validate structure version and length */
706 switch (dhd
->sssr_reg_info
->rev2
.version
) {
707 case SSSR_REG_INFO_VER_3
:
708 if (dhd
->sssr_reg_info
->rev3
.length
!= sizeof(sssr_reg_info_v3_t
)) {
709 DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
710 "mismatch on rev2\n", __FUNCTION__
,
711 (int)dhd
->sssr_reg_info
->rev3
.length
,
712 (int)sizeof(sssr_reg_info_v3_t
)));
716 case SSSR_REG_INFO_VER_2
:
717 if (dhd
->sssr_reg_info
->rev2
.length
!= sizeof(sssr_reg_info_v2_t
)) {
718 DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
719 "mismatch on rev2\n", __FUNCTION__
,
720 (int)dhd
->sssr_reg_info
->rev2
.length
,
721 (int)sizeof(sssr_reg_info_v2_t
)));
725 case SSSR_REG_INFO_VER_1
:
726 if (dhd
->sssr_reg_info
->rev1
.length
!= sizeof(sssr_reg_info_v1_t
)) {
727 DHD_ERROR(("%s: dhd->sssr_reg_info->rev1.length (%d : %d)"
728 "mismatch on rev1\n", __FUNCTION__
,
729 (int)dhd
->sssr_reg_info
->rev1
.length
,
730 (int)sizeof(sssr_reg_info_v1_t
)));
734 case SSSR_REG_INFO_VER_0
:
735 if (dhd
->sssr_reg_info
->rev0
.length
!= sizeof(sssr_reg_info_v0_t
)) {
736 DHD_ERROR(("%s: dhd->sssr_reg_info->rev0.length (%d : %d)"
737 "mismatch on rev0\n", __FUNCTION__
,
738 (int)dhd
->sssr_reg_info
->rev0
.length
,
739 (int)sizeof(sssr_reg_info_v0_t
)));
744 DHD_ERROR(("invalid sssr_reg_ver"));
745 return BCME_UNSUPPORTED
;
748 /* validate fifo size */
749 sssr_bufsize
= dhd_get_sssr_bufsize(dhd
);
750 if (sssr_bufsize
> DHD_SSSR_MEMPOOL_SIZE
) {
751 DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
752 __FUNCTION__
, (int)sssr_bufsize
, DHD_SSSR_MEMPOOL_SIZE
));
756 /* init all pointers to NULL */
757 for (i
= 0; i
< num_d11cores
; i
++) {
758 #ifdef DHD_SSSR_DUMP_BEFORE_SR
759 dhd
->sssr_d11_before
[i
] = NULL
;
760 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
761 dhd
->sssr_d11_after
[i
] = NULL
;
764 #ifdef DHD_SSSR_DUMP_BEFORE_SR
765 dhd
->sssr_dig_buf_before
= NULL
;
766 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
767 dhd
->sssr_dig_buf_after
= NULL
;
769 /* Allocate memory */
770 for (i
= 0; i
< num_d11cores
; i
++) {
774 switch (dhd
->sssr_reg_info
->rev2
.version
) {
775 case SSSR_REG_INFO_VER_3
:
776 /* intentional fall through */
777 case SSSR_REG_INFO_VER_2
:
778 if (dhd
->sssr_reg_info
->rev2
.mac_regs
[i
].sr_size
) {
780 sr_size
= dhd
->sssr_reg_info
->rev2
.mac_regs
[i
].sr_size
;
783 case SSSR_REG_INFO_VER_1
:
784 if (dhd
->sssr_reg_info
->rev1
.mac_regs
[i
].sr_size
) {
786 sr_size
= dhd
->sssr_reg_info
->rev1
.mac_regs
[i
].sr_size
;
789 case SSSR_REG_INFO_VER_0
:
790 if (dhd
->sssr_reg_info
->rev0
.mac_regs
[i
].sr_size
) {
792 sr_size
= dhd
->sssr_reg_info
->rev0
.mac_regs
[i
].sr_size
;
796 DHD_ERROR(("invalid sssr_reg_ver"));
797 return BCME_UNSUPPORTED
;
801 #ifdef DHD_SSSR_DUMP_BEFORE_SR
802 dhd
->sssr_d11_before
[i
] = (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
803 mempool_used
+= sr_size
;
804 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
806 dhd
->sssr_d11_after
[i
] = (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
807 mempool_used
+= sr_size
;
811 /* Allocate dump memory for VASIP (version 0 or 1) or digital core (version 0, 1, or 2) */
814 switch (dhd
->sssr_reg_info
->rev2
.version
) {
815 case SSSR_REG_INFO_VER_3
:
816 /* intentional fall through */
817 case SSSR_REG_INFO_VER_2
:
818 if ((dhd
->sssr_reg_info
->rev2
.length
>
819 OFFSETOF(sssr_reg_info_v2_t
, dig_mem_info
)) &&
820 dhd
->sssr_reg_info
->rev2
.dig_mem_info
.dig_sr_addr
) {
822 sr_size
= dhd
->sssr_reg_info
->rev2
.dig_mem_info
.dig_sr_size
;
825 case SSSR_REG_INFO_VER_1
:
826 if (dhd
->sssr_reg_info
->rev1
.vasip_regs
.vasip_sr_size
) {
828 sr_size
= dhd
->sssr_reg_info
->rev1
.vasip_regs
.vasip_sr_size
;
829 } else if ((dhd
->sssr_reg_info
->rev1
.length
> OFFSETOF(sssr_reg_info_v1_t
,
830 dig_mem_info
)) && dhd
->sssr_reg_info
->rev1
.
831 dig_mem_info
.dig_sr_addr
) {
833 sr_size
= dhd
->sssr_reg_info
->rev1
.dig_mem_info
.dig_sr_size
;
836 case SSSR_REG_INFO_VER_0
:
837 if (dhd
->sssr_reg_info
->rev0
.vasip_regs
.vasip_sr_size
) {
839 sr_size
= dhd
->sssr_reg_info
->rev0
.vasip_regs
.vasip_sr_size
;
843 DHD_ERROR(("invalid sssr_reg_ver"));
844 return BCME_UNSUPPORTED
;
848 dhd
->sssr_dig_buf_after
= (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
849 mempool_used
+= sr_size
;
851 #ifdef DHD_SSSR_DUMP_BEFORE_SR
852 /* DIG dump before suspend is not applicable. */
853 dhd
->sssr_dig_buf_before
= NULL
;
854 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
857 dhd
->sssr_inited
= TRUE
;
864 dhd_sssr_dump_deinit(dhd_pub_t
*dhd
)
869 num_d11cores
= dhd_d11_slices_num_get(dhd
);
871 dhd
->sssr_inited
= FALSE
;
872 /* init all pointers to NULL */
873 for (i
= 0; i
< num_d11cores
; i
++) {
874 #ifdef DHD_SSSR_DUMP_BEFORE_SR
875 dhd
->sssr_d11_before
[i
] = NULL
;
876 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
877 dhd
->sssr_d11_after
[i
] = NULL
;
879 #ifdef DHD_SSSR_DUMP_BEFORE_SR
880 dhd
->sssr_dig_buf_before
= NULL
;
881 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
882 dhd
->sssr_dig_buf_after
= NULL
;
888 dhd_sssr_print_filepath(dhd_pub_t
*dhd
, char *path
)
890 bool print_info
= FALSE
;
894 DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
899 if (!dhd
->sssr_dump_collected
) {
900 /* SSSR dump is not collected */
904 dump_mode
= dhd
->sssr_dump_mode
;
906 if (bcmstrstr(path
, "core_0_before")) {
907 if (dhd
->sssr_d11_outofreset
[0] &&
908 dump_mode
== SSSR_DUMP_MODE_SSSR
) {
911 } else if (bcmstrstr(path
, "core_0_after")) {
912 if (dhd
->sssr_d11_outofreset
[0]) {
915 } else if (bcmstrstr(path
, "core_1_before")) {
916 if (dhd
->sssr_d11_outofreset
[1] &&
917 dump_mode
== SSSR_DUMP_MODE_SSSR
) {
920 } else if (bcmstrstr(path
, "core_1_after")) {
921 if (dhd
->sssr_d11_outofreset
[1]) {
924 } else if (bcmstrstr(path
, "core_2_before")) {
925 if (dhd
->sssr_d11_outofreset
[2] &&
926 dump_mode
== SSSR_DUMP_MODE_SSSR
) {
929 } else if (bcmstrstr(path
, "core_2_after")) {
930 if (dhd
->sssr_d11_outofreset
[2]) {
938 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__
,
939 path
, FILE_NAME_HAL_TAG
));
942 #endif /* DHD_SSSR_DUMP */
944 #ifdef DHD_SDTC_ETB_DUMP
946 * sdtc: system debug trace controller
947 * etb: embedded trace buf
950 dhd_sdtc_etb_init(dhd_pub_t
*dhd
)
952 bcm_iov_buf_t iov_req
;
953 etb_addr_info_t
*p_etb_addr_info
= NULL
;
954 bcm_iov_buf_t
*iov_resp
;
960 BCM_REFERENCE(p_etb_addr_info
);
961 dhd
->sdtc_etb_inited
= FALSE
;
963 buf
= MALLOCZ(dhd
->osh
, WLC_IOCTL_MAXLEN
);
965 DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__
));
970 bzero(&iov_req
, sizeof(iov_req
));
971 iov_req
.version
= WL_SDTC_IOV_VERSION
;
972 iov_req
.id
= WL_SDTC_CMD_ETB_INFO
;
973 iov_req
.len
= sizeof(etb_addr_info_t
);
974 iovlen
= OFFSETOF(bcm_iov_buf_t
, data
) + iov_req
.len
;
976 ret
= dhd_iovar(dhd
, 0, "sdtc", (char *)&iov_req
, iovlen
,
977 (char *)buf
, WLC_IOCTL_MAXLEN
, FALSE
);
979 DHD_ERROR(("%s failed to get sdtc etb_info %d\n", __FUNCTION__
, ret
));
983 version
= dtoh16(*(uint16
*)buf
);
984 /* Check for version */
985 if (version
!= WL_SDTC_IOV_VERSION
) {
986 DHD_ERROR(("%s WL_SDTC_IOV_VERSION mis match\n", __FUNCTION__
));
989 iov_resp
= (bcm_iov_buf_t
*)buf
;
990 if (iov_resp
->id
== iov_req
.id
) {
991 p_etb_addr_info
= (etb_addr_info_t
*)iov_resp
->data
;
992 dhd
->etb_addr_info
.version
= p_etb_addr_info
->version
;
993 dhd
->etb_addr_info
.len
= p_etb_addr_info
->len
;
994 dhd
->etb_addr_info
.etbinfo_addr
= p_etb_addr_info
->etbinfo_addr
;
996 DHD_ERROR(("%s etb_addr_info: ver:%d, len:%d, addr:0x%x\n", __FUNCTION__
,
997 dhd
->etb_addr_info
.version
, dhd
->etb_addr_info
.len
,
998 dhd
->etb_addr_info
.etbinfo_addr
));
1000 DHD_ERROR(("%s Unknown CMD-ID (%d) as response for request ID %d\n",
1001 __FUNCTION__
, iov_resp
->id
, iov_req
.id
));
1005 dhd
->sdtc_etb_mempool
= (uint8
*) MALLOCZ(dhd
->osh
, DHD_SDTC_ETB_MEMPOOL_SIZE
);
1006 if (dhd
->sdtc_etb_mempool
== NULL
) {
1007 DHD_ERROR(("%s: MALLOC of sdtc_etb_mempool failed\n",
1012 /* since all the requirements for SDTC and ETB are met mark the capability as TRUE */
1013 dhd
->sdtc_etb_inited
= TRUE
;
1014 DHD_ERROR(("%s sdtc_etb_inited: %d\n", __FUNCTION__
, dhd
->sdtc_etb_inited
));
1016 MFREE(dhd
->osh
, buf
, WLC_IOCTL_MAXLEN
);
1021 dhd_sdtc_etb_deinit(dhd_pub_t
*dhd
)
1023 dhd
->sdtc_etb_inited
= FALSE
;
1024 if (dhd
->sdtc_etb_mempool
) {
1025 MFREE(dhd
->osh
, dhd
->sdtc_etb_mempool
, DHD_SDTC_ETB_MEMPOOL_SIZE
);
1026 dhd
->sdtc_etb_mempool
= NULL
;
1029 #endif /* DHD_SDTC_ETB_DUMP */
1031 #ifdef DHD_FW_COREDUMP
1032 void* dhd_get_fwdump_buf(dhd_pub_t
*dhd_pub
, uint32 length
)
1034 if (!dhd_pub
->soc_ram
) {
1035 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
1036 dhd_pub
->soc_ram
= (uint8
*)DHD_OS_PREALLOC(dhd_pub
,
1037 DHD_PREALLOC_MEMDUMP_RAM
, length
);
1039 dhd_pub
->soc_ram
= (uint8
*) MALLOC(dhd_pub
->osh
, length
);
1040 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
1043 if (dhd_pub
->soc_ram
== NULL
) {
1044 DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
1046 dhd_pub
->soc_ram_length
= 0;
1048 memset(dhd_pub
->soc_ram
, 0, length
);
1049 dhd_pub
->soc_ram_length
= length
;
1052 /* soc_ram free handled in dhd_{free,clear} */
1053 return dhd_pub
->soc_ram
;
1055 #endif /* DHD_FW_COREDUMP */
1057 /* to NDIS developer, the structure dhd_common is redundant,
1058 * please do NOT merge it back from other branches !!!
1062 dhd_common_socram_dump(dhd_pub_t
*dhdp
)
1067 return dhd_socram_dump(dhdp
->bus
);
1068 #endif /* BCMDBUS */
1072 dhd_dump(dhd_pub_t
*dhdp
, char *buf
, int buflen
)
1075 struct bcmstrbuf
*strbuf
= &b
;
1076 #ifdef DHD_MEM_STATS
1077 uint64 malloc_mem
= 0;
1078 uint64 total_txpath_mem
= 0;
1079 uint64 txpath_bkpq_len
= 0;
1080 uint64 txpath_bkpq_mem
= 0;
1081 uint64 total_dhd_mem
= 0;
1082 #endif /* DHD_MEM_STATS */
1084 if (!dhdp
|| !dhdp
->prot
|| !buf
) {
1088 bcm_binit(strbuf
, buf
, buflen
);
1091 bcm_bprintf(strbuf
, "%s\n", dhd_version
);
1092 bcm_bprintf(strbuf
, "\n");
1093 bcm_bprintf(strbuf
, "pub.up %d pub.txoff %d pub.busstate %d\n",
1094 dhdp
->up
, dhdp
->txoff
, dhdp
->busstate
);
1095 bcm_bprintf(strbuf
, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
1096 dhdp
->hdrlen
, dhdp
->maxctl
, dhdp
->rxsz
);
1097 bcm_bprintf(strbuf
, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG
"\n",
1098 dhdp
->iswl
, dhdp
->drv_version
, MAC2STRDBG(&dhdp
->mac
));
1099 bcm_bprintf(strbuf
, "pub.bcmerror %d tickcnt %u\n", dhdp
->bcmerror
, dhdp
->tickcnt
);
1101 bcm_bprintf(strbuf
, "dongle stats:\n");
1102 bcm_bprintf(strbuf
, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
1103 dhdp
->dstats
.tx_packets
, dhdp
->dstats
.tx_bytes
,
1104 dhdp
->dstats
.tx_errors
, dhdp
->dstats
.tx_dropped
);
1105 bcm_bprintf(strbuf
, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
1106 dhdp
->dstats
.rx_packets
, dhdp
->dstats
.rx_bytes
,
1107 dhdp
->dstats
.rx_errors
, dhdp
->dstats
.rx_dropped
);
1108 bcm_bprintf(strbuf
, "multicast %lu\n", dhdp
->dstats
.multicast
);
1110 bcm_bprintf(strbuf
, "bus stats:\n");
1111 bcm_bprintf(strbuf
, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
1112 dhdp
->tx_packets
, dhdp
->tx_dropped
, dhdp
->tx_multicast
, dhdp
->tx_errors
);
1113 bcm_bprintf(strbuf
, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
1114 dhdp
->tx_ctlpkts
, dhdp
->tx_ctlerrs
);
1115 bcm_bprintf(strbuf
, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
1116 dhdp
->rx_packets
, dhdp
->rx_multicast
, dhdp
->rx_errors
);
1117 bcm_bprintf(strbuf
, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
1118 dhdp
->rx_ctlpkts
, dhdp
->rx_ctlerrs
, dhdp
->rx_dropped
);
1119 bcm_bprintf(strbuf
, "rx_readahead_cnt %lu tx_realloc %lu\n",
1120 dhdp
->rx_readahead_cnt
, dhdp
->tx_realloc
);
1121 bcm_bprintf(strbuf
, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
1122 dhdp
->tx_pktgetfail
, dhdp
->rx_pktgetfail
);
1123 bcm_bprintf(strbuf
, "tx_big_packets %lu\n",
1124 dhdp
->tx_big_packets
);
1125 bcm_bprintf(strbuf
, "\n");
1127 /* Add DMA MAP info */
1128 bcm_bprintf(strbuf
, "DMA MAP stats: \n");
1129 bcm_bprintf(strbuf
, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
1130 dhdp
->dma_stats
.txdata
, KB(dhdp
->dma_stats
.txdata_sz
),
1131 dhdp
->dma_stats
.rxdata
, KB(dhdp
->dma_stats
.rxdata_sz
));
1132 #ifndef IOCTLRESP_USE_CONSTMEM
1133 bcm_bprintf(strbuf
, "IOCTL RX: %lu size: %luK ,",
1134 dhdp
->dma_stats
.ioctl_rx
, KB(dhdp
->dma_stats
.ioctl_rx_sz
));
1135 #endif /* !IOCTLRESP_USE_CONSTMEM */
1136 bcm_bprintf(strbuf
, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
1137 "TSBUF RX: %lu size %luK\n",
1138 dhdp
->dma_stats
.event_rx
, KB(dhdp
->dma_stats
.event_rx_sz
),
1139 dhdp
->dma_stats
.info_rx
, KB(dhdp
->dma_stats
.info_rx_sz
),
1140 dhdp
->dma_stats
.tsbuf_rx
, KB(dhdp
->dma_stats
.tsbuf_rx_sz
));
1141 bcm_bprintf(strbuf
, "Total : %luK \n",
1142 KB(dhdp
->dma_stats
.txdata_sz
+ dhdp
->dma_stats
.rxdata_sz
+
1143 dhdp
->dma_stats
.ioctl_rx_sz
+ dhdp
->dma_stats
.event_rx_sz
+
1144 dhdp
->dma_stats
.tsbuf_rx_sz
));
1145 #endif /* DMAMAP_STATS */
1146 bcm_bprintf(strbuf
, "dhd_induce_error : %u\n", dhdp
->dhd_induce_error
);
1147 /* Add any prot info */
1148 dhd_prot_dump(dhdp
, strbuf
);
1149 bcm_bprintf(strbuf
, "\n");
1151 /* Add any bus info */
1152 dhd_bus_dump(dhdp
, strbuf
);
1154 #if defined(DHD_LB_STATS)
1155 dhd_lb_stats_dump(dhdp
, strbuf
);
1156 #endif /* DHD_LB_STATS */
1158 #ifdef DHD_MEM_STATS
1160 malloc_mem
= MALLOCED(dhdp
->osh
);
1162 txpath_bkpq_len
= dhd_active_tx_flowring_bkpq_len(dhdp
);
1164 * Instead of traversing the entire queue to find the skbs length,
1165 * considering MAX_MTU_SZ as lenth of each skb.
1167 txpath_bkpq_mem
= (txpath_bkpq_len
* MAX_MTU_SZ
);
1168 total_txpath_mem
= dhdp
->txpath_mem
+ txpath_bkpq_mem
;
1170 bcm_bprintf(strbuf
, "\nDHD malloc memory_usage: %llubytes %lluKB\n",
1171 malloc_mem
, (malloc_mem
/ 1024));
1173 bcm_bprintf(strbuf
, "\nDHD tx-bkpq len: %llu memory_usage: %llubytes %lluKB\n",
1174 txpath_bkpq_len
, txpath_bkpq_mem
, (txpath_bkpq_mem
/ 1024));
1175 bcm_bprintf(strbuf
, "DHD tx-path memory_usage: %llubytes %lluKB\n",
1176 total_txpath_mem
, (total_txpath_mem
/ 1024));
1178 total_dhd_mem
= malloc_mem
+ total_txpath_mem
;
1179 #if defined(DHD_LB_STATS)
1180 total_dhd_mem
+= dhd_lb_mem_usage(dhdp
, strbuf
);
1181 #endif /* DHD_LB_STATS */
1182 bcm_bprintf(strbuf
, "\nDHD Totoal memory_usage: %llubytes %lluKB \n",
1183 total_dhd_mem
, (total_dhd_mem
/ 1024));
1184 #endif /* DHD_MEM_STATS */
1186 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
1187 dhd_mqstats_dump(dhdp
, strbuf
);
1191 if (dhd_get_wet_mode(dhdp
)) {
1192 bcm_bprintf(strbuf
, "Wet Dump:\n");
1193 dhd_wet_dump(dhdp
, strbuf
);
1195 #endif /* DHD_WET */
1197 /* return remaining buffer length */
1198 return (!strbuf
->size
? BCME_BUFTOOSHORT
: strbuf
->size
);
1202 dhd_dump_to_kernelog(dhd_pub_t
*dhdp
)
1206 DHD_ERROR(("F/W version: %s\n", fw_version
));
1207 bcm_bprintf_bypass
= TRUE
;
1208 dhd_dump(dhdp
, buf
, sizeof(buf
));
1209 bcm_bprintf_bypass
= FALSE
;
1213 dhd_wl_ioctl_cmd(dhd_pub_t
*dhd_pub
, int cmd
, void *arg
, int len
, uint8 set
, int ifidx
)
1222 return dhd_wl_ioctl(dhd_pub
, ifidx
, &ioc
, arg
, len
);
1226 dhd_wl_ioctl_get_intiovar(dhd_pub_t
*dhd_pub
, char *name
, uint
*pval
,
1227 int cmd
, uint8 set
, int ifidx
)
1229 char iovbuf
[WLC_IOCTL_SMLEN
];
1232 memset(iovbuf
, 0, sizeof(iovbuf
));
1233 if (bcm_mkiovar(name
, NULL
, 0, iovbuf
, sizeof(iovbuf
))) {
1234 ret
= dhd_wl_ioctl_cmd(dhd_pub
, cmd
, iovbuf
, sizeof(iovbuf
), set
, ifidx
);
1236 *pval
= ltoh32(*((uint
*)iovbuf
));
1238 DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
1239 __FUNCTION__
, name
, ret
));
1242 DHD_ERROR(("%s: mkiovar %s failed\n",
1243 __FUNCTION__
, name
));
1250 dhd_wl_ioctl_set_intiovar(dhd_pub_t
*dhd_pub
, char *name
, uint val
,
1251 int cmd
, uint8 set
, int ifidx
)
1253 char iovbuf
[WLC_IOCTL_SMLEN
];
1255 int lval
= htol32(val
);
1258 len
= bcm_mkiovar(name
, (char*)&lval
, sizeof(lval
), iovbuf
, sizeof(iovbuf
));
1261 ret
= dhd_wl_ioctl_cmd(dhd_pub
, cmd
, iovbuf
, len
, set
, ifidx
);
1263 DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
1264 __FUNCTION__
, name
, ret
));
1267 DHD_ERROR(("%s: mkiovar %s failed\n",
1268 __FUNCTION__
, name
));
1274 static struct ioctl2str_s
{
1277 } ioctl2str_array
[] = {
1280 {WLC_SET_PROMISC
, "SET_PROMISC"},
1281 {WLC_SET_INFRA
, "SET_INFRA"},
1282 {WLC_SET_AUTH
, "SET_AUTH"},
1283 {WLC_SET_SSID
, "SET_SSID"},
1284 {WLC_RESTART
, "RESTART"},
1285 {WLC_SET_CHANNEL
, "SET_CHANNEL"},
1286 {WLC_SET_RATE_PARAMS
, "SET_RATE_PARAMS"},
1287 {WLC_SET_KEY
, "SET_KEY"},
1289 {WLC_DISASSOC
, "DISASSOC"},
1290 {WLC_REASSOC
, "REASSOC"},
1291 {WLC_SET_COUNTRY
, "SET_COUNTRY"},
1292 {WLC_SET_WAKE
, "SET_WAKE"},
1293 {WLC_SET_SCANSUPPRESS
, "SET_SCANSUPPRESS"},
1294 {WLC_SCB_DEAUTHORIZE
, "SCB_DEAUTHORIZE"},
1295 {WLC_SET_WSEC
, "SET_WSEC"},
1296 {WLC_SET_INTERFERENCE_MODE
, "SET_INTERFERENCE_MODE"},
1297 {WLC_SET_RADAR
, "SET_RADAR"},
1302 ioctl2str(uint32 ioctl
)
1304 struct ioctl2str_s
*p
= ioctl2str_array
;
1306 while (p
->name
!= NULL
) {
1307 if (p
->ioctl
== ioctl
) {
1317 * @param ioc IO control struct, members are partially used by this function.
1318 * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
1319 * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
1322 dhd_wl_ioctl(dhd_pub_t
*dhd_pub
, int ifidx
, wl_ioctl_t
*ioc
, void *buf
, int len
)
1324 int ret
= BCME_ERROR
;
1325 unsigned long flags
;
1326 #ifdef DUMP_IOCTL_IOV_LIST
1327 dhd_iov_li_t
*iov_li
;
1328 #endif /* DUMP_IOCTL_IOV_LIST */
1329 int hostsleep_set
= 0;
1330 int hostsleep_val
= 0;
1332 if (dhd_query_bus_erros(dhd_pub
)) {
1336 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1337 DHD_OS_WAKE_LOCK(dhd_pub
);
1338 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub
->bus
)) < 0) {
1339 DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__
));
1340 DHD_OS_WAKE_UNLOCK(dhd_pub
);
1343 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1345 #ifdef KEEPIF_ON_DEVICE_RESET
1346 if (ioc
->cmd
== WLC_GET_VAR
) {
1347 dbus_config_t config
;
1348 config
.general_param
= 0;
1350 if (!strcmp(buf
, "wowl_activate")) {
1351 /* 1 (TRUE) after decreased by 1 */
1352 config
.general_param
= 2;
1353 } else if (!strcmp(buf
, "wowl_clear")) {
1354 /* 0 (FALSE) after decreased by 1 */
1355 config
.general_param
= 1;
1358 if (config
.general_param
) {
1359 config
.config_id
= DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET
;
1360 config
.general_param
--;
1361 dbus_set_config(dhd_pub
->dbus
, &config
);
1364 #endif /* KEEPIF_ON_DEVICE_RESET */
1366 if (dhd_os_proto_block(dhd_pub
))
1369 int slen
, val
, lval
, min_len
;
1373 if (ioc
->cmd
== WLC_GET_VAR
&& buf
) {
1374 min_len
= MIN(sizeof(tmp
) - 1, strlen(buf
));
1375 memset(tmp
, 0, sizeof(tmp
));
1376 bcopy(buf
, tmp
, min_len
);
1377 tmp
[min_len
] = '\0';
1379 #endif /* DHD_LOG_DUMP */
1381 #ifdef DHD_DISCONNECT_TRACE
1382 if (WLC_DISASSOC
== ioc
->cmd
|| WLC_DOWN
== ioc
->cmd
||
1383 WLC_DISASSOC_MYAP
== ioc
->cmd
) {
1384 DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc
->cmd
));
1386 #endif /* HW_DISCONNECT_TRACE */
1387 /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1388 if (ioc
->set
== TRUE
) {
1389 char *pars
= (char *)buf
; // points at user buffer
1390 if (ioc
->cmd
== WLC_SET_VAR
&& buf
) {
1391 DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx
, pars
));
1392 if (ioc
->len
> 1 + sizeof(uint32
)) {
1394 pars
+= strnlen(pars
, ioc
->len
- 1 - sizeof(uint32
));
1395 pars
++; // skip NULL character
1398 DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1399 ifidx
, ioc
->cmd
, ioctl2str(ioc
->cmd
)));
1402 DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32
*)pars
));
1404 DHD_DNGL_IOVAR_SET((" NULL\n"));
1408 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
1409 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub
)) {
1410 DHD_INFO(("%s: returning as busstate=%d\n",
1411 __FUNCTION__
, dhd_pub
->busstate
));
1412 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1413 dhd_os_proto_unblock(dhd_pub
);
1416 DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub
);
1417 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1419 #ifdef DHD_PCIE_RUNTIMEPM
1420 dhdpcie_runtime_bus_wake(dhd_pub
, TRUE
, dhd_wl_ioctl
);
1421 #endif /* DHD_PCIE_RUNTIMEPM */
1423 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
1424 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd_pub
) ||
1425 dhd_pub
->dhd_induce_error
== DHD_INDUCE_IOCTL_SUSPEND_ERROR
) {
1426 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1427 __FUNCTION__
, dhd_pub
->busstate
, dhd_pub
->dhd_bus_busy_state
));
1428 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1429 ioctl_suspend_error
++;
1430 if (ioctl_suspend_error
> MAX_IOCTL_SUSPEND_ERROR
) {
1431 dhd_pub
->hang_reason
= HANG_REASON_IOCTL_SUSPEND_ERROR
;
1432 dhd_os_send_hang_message(dhd_pub
);
1433 ioctl_suspend_error
= 0;
1435 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1436 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub
);
1437 dhd_os_busbusy_wake(dhd_pub
);
1438 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1439 dhd_os_proto_unblock(dhd_pub
);
1442 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1443 ioctl_suspend_error
= 0;
1444 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1445 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1447 #ifdef DUMP_IOCTL_IOV_LIST
1448 if (ioc
->cmd
!= WLC_GET_MAGIC
&& ioc
->cmd
!= WLC_GET_VERSION
&& buf
) {
1449 if (!(iov_li
= MALLOC(dhd_pub
->osh
, sizeof(*iov_li
)))) {
1450 DHD_ERROR(("iovar dump list item allocation Failed\n"));
1452 iov_li
->cmd
= ioc
->cmd
;
1454 bcopy((char *)buf
, iov_li
->buff
, strlen((char *)buf
)+1);
1455 dhd_iov_li_append(dhd_pub
, &dhd_pub
->dump_iovlist_head
,
1459 #endif /* DUMP_IOCTL_IOV_LIST */
1461 if (dhd_conf_check_hostsleep(dhd_pub
, ioc
->cmd
, ioc
->buf
, len
,
1462 &hostsleep_set
, &hostsleep_val
, &ret
))
1464 ret
= dhd_prot_ioctl(dhd_pub
, ifidx
, ioc
, buf
, len
);
1465 dhd_conf_get_hostsleep(dhd_pub
, hostsleep_set
, hostsleep_val
, ret
);
1467 #ifdef DUMP_IOCTL_IOV_LIST
1468 if (ret
== -ETIMEDOUT
) {
1469 DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
1471 dhd_iov_li_print(&dhd_pub
->dump_iovlist_head
);
1473 #endif /* DUMP_IOCTL_IOV_LIST */
1474 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1475 if (ret
== -ETIMEDOUT
) {
1476 copy_hang_info_ioctl_timeout(dhd_pub
, ifidx
, ioc
);
1478 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1480 if ((ioc
->cmd
== WLC_GET_VAR
|| ioc
->cmd
== WLC_SET_VAR
) &&
1484 slen
= strlen(buf
) + 1;
1486 if (len
>= slen
+ sizeof(lval
)) {
1487 if (ioc
->cmd
== WLC_GET_VAR
) {
1491 min_len
= MIN(ioc
->len
- slen
, sizeof(int));
1492 bcopy((msg
+ slen
), &lval
, min_len
);
1494 if (!strncmp(msg
, "cur_etheraddr",
1495 strlen("cur_etheraddr"))) {
1500 "%s: cmd: %d, msg: %s val: 0x%x,"
1501 " len: %d, set: %d, txn-id: %d\n",
1502 ioc
->cmd
== WLC_GET_VAR
?
1503 "WLC_GET_VAR" : "WLC_SET_VAR",
1504 ioc
->cmd
, msg
, lval
, ioc
->len
, ioc
->set
,
1505 dhd_prot_get_ioctl_trans_id(dhd_pub
)));
1507 DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
1508 ioc
->cmd
== WLC_GET_VAR
? "WLC_GET_VAR" : "WLC_SET_VAR",
1509 ioc
->cmd
, ioc
->len
, ioc
->set
,
1510 dhd_prot_get_ioctl_trans_id(dhd_pub
)));
1514 if (buf
!= NULL
&& slen
!= 0) {
1517 } else if (slen
>= 2) {
1522 /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
1523 if (ioc
->cmd
!= WLC_GET_MAGIC
&& ioc
->cmd
!= WLC_GET_VERSION
) {
1524 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
1525 "set: %d\n", ioc
->cmd
, val
, ioc
->len
, ioc
->set
));
1528 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc
->cmd
));
1531 #endif /* DHD_LOG_DUMP */
1532 if (ret
&& dhd_pub
->up
) {
1533 /* Send hang event only if dhd_open() was success */
1534 dhd_os_check_hang(dhd_pub
, ifidx
, ret
);
1537 if (ret
== -ETIMEDOUT
&& !dhd_pub
->up
) {
1538 DHD_ERROR(("%s: 'resumed on timeout' error is "
1539 "occurred before the interface does not"
1540 " bring up\n", __FUNCTION__
));
1544 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
1545 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub
);
1546 dhd_os_busbusy_wake(dhd_pub
);
1547 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1549 dhd_os_proto_unblock(dhd_pub
);
1553 if ((ioc
->cmd
== WLC_GET_VAR
|| ioc
->cmd
== WLC_SET_VAR
) &&
1555 if (ret
== BCME_UNSUPPORTED
|| ret
== BCME_NOTASSOCIATED
) {
1556 DHD_ERROR_MEM(("%s: %s: %s, %s\n",
1557 __FUNCTION__
, ioc
->cmd
== WLC_GET_VAR
?
1558 "WLC_GET_VAR" : "WLC_SET_VAR",
1559 buf
? (char *)buf
:"NO MESSAGE",
1560 ret
== BCME_UNSUPPORTED
? "UNSUPPORTED"
1561 : "NOT ASSOCIATED"));
1563 DHD_ERROR_MEM(("%s: %s: %s, ret = %d\n",
1564 __FUNCTION__
, ioc
->cmd
== WLC_GET_VAR
?
1565 "WLC_GET_VAR" : "WLC_SET_VAR",
1569 if (ret
== BCME_UNSUPPORTED
|| ret
== BCME_NOTASSOCIATED
) {
1570 DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, %s\n",
1571 __FUNCTION__
, ioc
->cmd
,
1572 ret
== BCME_UNSUPPORTED
? "UNSUPPORTED" :
1575 DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, ret = %d\n",
1576 __FUNCTION__
, ioc
->cmd
, ret
));
1583 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1584 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub
->bus
));
1585 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub
->bus
));
1587 DHD_OS_WAKE_UNLOCK(dhd_pub
);
1588 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1591 /* Intercept monitor ioctl here, add/del monitor if */
1592 if (ret
== BCME_OK
&& ioc
->cmd
== WLC_SET_MONITOR
) {
1594 if (buf
!= NULL
&& len
!= 0) {
1597 } else if (len
>= 2) {
1603 dhd_set_monitor(dhd_pub
, ifidx
, val
);
1605 #endif /* WL_MONITOR */
1610 uint
wl_get_port_num(wl_io_pport_t
*io_pport
)
1615 /* Get bssidx from iovar params
1616 * Input: dhd_pub - pointer to dhd_pub_t
1617 * params - IOVAR params
1618 * Output: idx - BSS index
1619 * val - ponter to the IOVAR arguments
1622 dhd_iovar_parse_bssidx(dhd_pub_t
*dhd_pub
, const char *params
, uint32
*idx
, const char **val
)
1624 char *prefix
= "bsscfg:";
1627 if (!(strncmp(params
, prefix
, strlen(prefix
)))) {
1628 /* per bss setting should be prefixed with 'bsscfg:' */
1629 const char *p
= params
+ strlen(prefix
);
1636 bcopy(p
, &bssidx
, sizeof(uint32
));
1637 /* Get corresponding dhd index */
1638 bssidx
= dhd_bssidx2idx(dhd_pub
, htod32(bssidx
));
1640 if (bssidx
>= DHD_MAX_IFS
) {
1641 DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__
));
1646 p
+= sizeof(uint32
);
1650 DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__
));
1657 #if defined(DHD_DEBUG) && defined(BCMDBUS)
1658 /* USB Device console input function */
1659 int dhd_bus_console_in(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
1661 DHD_TRACE(("%s \n", __FUNCTION__
));
1663 return dhd_iovar(dhd
, 0, "cons", msg
, msglen
, NULL
, 0, TRUE
);
1666 #endif /* DHD_DEBUG && BCMDBUS */
1670 dhd_mem_debug(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
1672 unsigned long int_arg
= 0;
1674 char *end_ptr
= NULL
;
1675 dhd_dbg_mwli_t
*mw_li
;
1677 /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
1678 p
= bcmstrstr((char *)msg
, " ");
1680 /* space should be converted to null as separation flag for firmware */
1682 /* store the argument in int_arg */
1683 int_arg
= bcm_strtoul(p
+1, &end_ptr
, 10);
1686 if (!p
&& !strcmp(msg
, "query")) {
1687 /* lets query the list inetrnally */
1688 if (dll_empty(dll_head_p(&dhd
->mw_list_head
))) {
1689 DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
1691 for (item
= dll_head_p(&dhd
->mw_list_head
);
1692 !dll_end(&dhd
->mw_list_head
, item
); item
= next
) {
1693 next
= dll_next_p(item
);
1694 mw_li
= (dhd_dbg_mwli_t
*)CONTAINEROF(item
, dhd_dbg_mwli_t
, list
);
1695 DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li
->id
, mw_li
->size
));
1698 } else if (p
&& end_ptr
&& (*end_ptr
== '\0') && !strcmp(msg
, "alloc")) {
1700 /* convert size into KB and append as integer */
1701 *((int32
*)(p
+1)) = int_arg
*1024;
1702 *(p
+1+sizeof(int32
)) = '\0';
1704 /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
1705 * 1 bytes for null caracter
1707 msglen
= strlen(msg
) + sizeof(int32
) + 1;
1708 if (dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, msg
, msglen
+1, FALSE
, 0) < 0) {
1709 DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
1712 /* returned allocated handle from dongle, basically address of the allocated unit */
1713 alloc_handle
= *((int32
*)msg
);
1715 /* add a node in the list with tuple <id, handle, size> */
1716 if (alloc_handle
== 0) {
1717 DHD_ERROR(("Reuqested size could not be allocated\n"));
1718 } else if (!(mw_li
= MALLOC(dhd
->osh
, sizeof(*mw_li
)))) {
1719 DHD_ERROR(("mw list item allocation Failed\n"));
1721 mw_li
->id
= dhd
->mw_id
++;
1722 mw_li
->handle
= alloc_handle
;
1723 mw_li
->size
= int_arg
;
1724 /* append the node in the list */
1725 dll_append(&dhd
->mw_list_head
, &mw_li
->list
);
1727 } else if (p
&& end_ptr
&& (*end_ptr
== '\0') && !strcmp(msg
, "free")) {
1728 /* inform dongle to free wasted chunk */
1731 for (item
= dll_head_p(&dhd
->mw_list_head
);
1732 !dll_end(&dhd
->mw_list_head
, item
); item
= next
) {
1733 next
= dll_next_p(item
);
1734 mw_li
= (dhd_dbg_mwli_t
*)CONTAINEROF(item
, dhd_dbg_mwli_t
, list
);
1736 if (mw_li
->id
== (int)int_arg
) {
1737 handle
= mw_li
->handle
;
1740 MFREE(dhd
->osh
, mw_li
, sizeof(*mw_li
));
1741 if (dll_empty(dll_head_p(&dhd
->mw_list_head
))) {
1749 /* append the free handle and the chunk size in first 8 bytes
1750 * after the command and null character
1752 *((int32
*)(p
+1)) = handle
;
1753 *((int32
*)((p
+1)+sizeof(int32
))) = size
;
1754 /* append null as terminator */
1755 *(p
+1+2*sizeof(int32
)) = '\0';
1756 /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
1757 * + 1 bytes for null caracter
1759 len
= strlen(msg
) + 2*sizeof(int32
) + 1;
1760 /* send iovar to free the chunk */
1761 if (dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, msg
, len
, FALSE
, 0) < 0) {
1762 DHD_ERROR(("IOCTL failed for memdebug free\n"));
1765 DHD_ERROR(("specified id does not exist\n"));
1768 /* for all the wrong argument formats */
1774 dhd_mw_list_delete(dhd_pub_t
*dhd
, dll_t
*list_head
)
1777 dhd_dbg_mwli_t
*mw_li
;
1778 while (!(dll_empty(list_head
))) {
1779 item
= dll_head_p(list_head
);
1780 mw_li
= (dhd_dbg_mwli_t
*)CONTAINEROF(item
, dhd_dbg_mwli_t
, list
);
1782 MFREE(dhd
->osh
, mw_li
, sizeof(*mw_li
));
1787 dhd_flow_ring_debug(dhd_pub_t
*dhd
, char *msg
, uint msglen
)
1789 flow_ring_table_t
*flow_ring_table
;
1791 char *end_ptr
= NULL
;
1796 cmd
= bcmstrstr(msg
, " ");
1797 BCM_REFERENCE(prio
);
1799 /* in order to use string operations append null */
1802 DHD_ERROR(("missing: create/delete args\n"));
1805 if (cmd
&& !strcmp(msg
, "create")) {
1806 /* extract <"source address", "destination address", "priority"> */
1807 uint8 sa
[ETHER_ADDR_LEN
], da
[ETHER_ADDR_LEN
];
1810 msg
= msg
+ strlen("create") + 1;
1811 /* fill ethernet source address */
1812 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1813 sa
[i
] = (uint8
)bcm_strtoul(msg
, &end_ptr
, 16);
1814 if (*end_ptr
== ':') {
1815 msg
= (end_ptr
+ 1);
1816 } else if (i
!= 5) {
1817 DHD_ERROR(("not a valid source mac addr\n"));
1821 if (*end_ptr
!= ' ') {
1822 DHD_ERROR(("missing: destiantion mac id\n"));
1828 /* fill ethernet destination address */
1829 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1830 da
[i
] = (uint8
)bcm_strtoul(msg
, &end_ptr
, 16);
1831 if (*end_ptr
== ':') {
1832 msg
= (end_ptr
+ 1);
1833 } else if (i
!= 5) {
1834 DHD_ERROR(("not a valid destination mac addr\n"));
1838 if (*end_ptr
!= ' ') {
1839 DHD_ERROR(("missing: priority\n"));
1844 /* parse priority */
1845 prio
= (uint8
)bcm_strtoul(msg
, &end_ptr
, 10);
1846 if (prio
> MAXPRIO
) {
1847 DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
1852 if (*end_ptr
!= '\0') {
1853 DHD_ERROR(("msg not truncated with NULL character\n"));
1856 ret
= dhd_flowid_debug_create(dhd
, 0, prio
, (char *)sa
, (char *)da
, &flowid
);
1857 if (ret
!= BCME_OK
) {
1858 DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__
, ret
));
1863 } else if (cmd
&& !strcmp(msg
, "delete")) {
1864 msg
= msg
+ strlen("delete") + 1;
1866 flowid
= (uint16
)bcm_strtoul(msg
, &end_ptr
, 10);
1867 if (*end_ptr
!= '\0') {
1868 DHD_ERROR(("msg not truncated with NULL character\n"));
1872 /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
1873 if (dhd_flowid_find_by_ifidx(dhd
, 0, flowid
) != BCME_OK
)
1875 DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__
, flowid
));
1879 flow_ring_table
= (flow_ring_table_t
*)dhd
->flow_ring_table
;
1880 ret
= dhd_bus_flow_ring_delete_request(dhd
->bus
, (void *)&flow_ring_table
[flowid
]);
1881 if (ret
!= BCME_OK
) {
1882 DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__
, ret
));
1887 DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__
));
1890 #endif /* BCMPCIE */
1891 #endif /* DHD_DEBUG */
1894 dhd_doiovar(dhd_pub_t
*dhd_pub
, const bcm_iovar_t
*vi
, uint32 actionid
, const char *name
,
1895 void *params
, int plen
, void *arg
, uint len
, int val_size
)
1899 uint32 dhd_ver_len
, bus_api_rev_len
;
1901 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1902 DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__
, actionid
, name
));
1904 if ((bcmerror
= bcm_iovar_lencheck(vi
, arg
, len
, IOV_ISSET(actionid
))) != 0)
1907 if (plen
>= (int)sizeof(int_val
))
1908 bcopy(params
, &int_val
, sizeof(int_val
));
1911 case IOV_GVAL(IOV_VERSION
):
1912 /* Need to have checked buffer length */
1913 dhd_ver_len
= sizeof(dhd_version
) - 1;
1914 bus_api_rev_len
= strlen(bus_api_revision
);
1915 if (len
> dhd_ver_len
+ bus_api_rev_len
) {
1916 bcmerror
= memcpy_s((char *)arg
, len
, dhd_version
, dhd_ver_len
);
1917 if (bcmerror
!= BCME_OK
) {
1920 bcmerror
= memcpy_s((char *)arg
+ dhd_ver_len
, len
- dhd_ver_len
,
1921 bus_api_revision
, bus_api_rev_len
);
1922 if (bcmerror
!= BCME_OK
) {
1925 *((char *)arg
+ dhd_ver_len
+ bus_api_rev_len
) = '\0';
1927 #if defined(BCMSDIO) && defined(PKT_STATICS)
1928 dhd_bus_clear_txpktstatics(dhd_pub
->bus
);
1932 case IOV_GVAL(IOV_WLMSGLEVEL
):
1933 printf("android_msg_level=0x%x\n", android_msg_level
);
1934 printf("config_msg_level=0x%x\n", config_msg_level
);
1935 #if defined(WL_WIRELESS_EXT)
1936 int_val
= (int32
)iw_msg_level
;
1937 bcopy(&int_val
, arg
, val_size
);
1938 printf("iw_msg_level=0x%x\n", iw_msg_level
);
1941 int_val
= (int32
)wl_dbg_level
;
1942 bcopy(&int_val
, arg
, val_size
);
1943 printf("cfg_msg_level=0x%x\n", wl_dbg_level
);
1947 case IOV_SVAL(IOV_WLMSGLEVEL
):
1948 if (int_val
& DHD_ANDROID_VAL
) {
1949 android_msg_level
= (uint
)(int_val
& 0xFFFF);
1950 printf("android_msg_level=0x%x\n", android_msg_level
);
1952 if (int_val
& DHD_CONFIG_VAL
) {
1953 config_msg_level
= (uint
)(int_val
& 0xFFFF);
1954 printf("config_msg_level=0x%x\n", config_msg_level
);
1956 #if defined(WL_WIRELESS_EXT)
1957 if (int_val
& DHD_IW_VAL
) {
1958 iw_msg_level
= (uint
)(int_val
& 0xFFFF);
1959 printf("iw_msg_level=0x%x\n", iw_msg_level
);
1963 if (int_val
& DHD_CFG_VAL
) {
1964 wl_cfg80211_enable_trace((u32
)(int_val
& 0xFFFF));
1969 case IOV_GVAL(IOV_MSGLEVEL
):
1970 int_val
= (int32
)dhd_msg_level
;
1971 bcopy(&int_val
, arg
, val_size
);
1972 #if defined(BCMSDIO) && defined(PKT_STATICS)
1973 dhd_bus_dump_txpktstatics(dhd_pub
->bus
);
1977 case IOV_SVAL(IOV_MSGLEVEL
):
1978 dhd_msg_level
= int_val
;
1981 case IOV_GVAL(IOV_BCMERRORSTR
):
1982 bcm_strncpy_s((char *)arg
, len
, bcmerrorstr(dhd_pub
->bcmerror
), BCME_STRLEN
);
1983 ((char *)arg
)[BCME_STRLEN
- 1] = 0x00;
1986 case IOV_GVAL(IOV_BCMERROR
):
1987 int_val
= (int32
)dhd_pub
->bcmerror
;
1988 bcopy(&int_val
, arg
, val_size
);
1992 case IOV_GVAL(IOV_WDTICK
):
1993 #ifdef HOST_TPUT_TEST
1994 if (dhd_pub
->net_ts
.tv_sec
== 0 && dhd_pub
->net_ts
.tv_nsec
== 0) {
1995 osl_do_gettimeofday(&dhd_pub
->net_ts
);
1997 struct osl_timespec cur_ts
;
1999 osl_do_gettimeofday(&cur_ts
);
2000 diff_ms
= osl_do_gettimediff(&cur_ts
, &dhd_pub
->net_ts
)/1000;
2001 int_val
= (int32
)((dhd_pub
->net_len
/1024/1024)*8)*1000/diff_ms
;
2002 dhd_pub
->net_len
= 0;
2003 memcpy(&dhd_pub
->net_ts
, &cur_ts
, sizeof(struct osl_timespec
));
2004 bcopy(&int_val
, arg
, sizeof(int_val
));
2007 int_val
= (int32
)dhd_watchdog_ms
;
2008 bcopy(&int_val
, arg
, val_size
);
2011 #endif /* !BCMDBUS */
2013 case IOV_SVAL(IOV_WDTICK
):
2015 bcmerror
= BCME_NOTUP
;
2019 dhd_watchdog_ms
= (uint
)int_val
;
2021 dhd_os_wd_timer(dhd_pub
, (uint
)int_val
);
2024 case IOV_GVAL(IOV_DUMP
):
2025 if (dhd_dump(dhd_pub
, arg
, len
) <= 0)
2026 bcmerror
= BCME_ERROR
;
2032 case IOV_GVAL(IOV_DCONSOLE_POLL
):
2033 int_val
= (int32
)dhd_pub
->dhd_console_ms
;
2034 bcopy(&int_val
, arg
, val_size
);
2037 case IOV_SVAL(IOV_DCONSOLE_POLL
):
2038 dhd_pub
->dhd_console_ms
= (uint
)int_val
;
2041 #if defined(DHD_DEBUG)
2042 case IOV_SVAL(IOV_CONS
):
2045 bcmerror
= dhd_bus_txcons(dhd_pub
, arg
, len
- 1);
2047 bcmerror
= dhd_bus_console_in(dhd_pub
, arg
, len
- 1);
2051 #endif /* DHD_DEBUG */
2052 #endif /* !BCMDBUS */
2054 case IOV_SVAL(IOV_CLEARCOUNTS
):
2055 dhd_pub
->tx_packets
= dhd_pub
->rx_packets
= 0;
2056 dhd_pub
->tx_errors
= dhd_pub
->rx_errors
= 0;
2057 dhd_pub
->tx_ctlpkts
= dhd_pub
->rx_ctlpkts
= 0;
2058 dhd_pub
->tx_ctlerrs
= dhd_pub
->rx_ctlerrs
= 0;
2059 dhd_pub
->tx_dropped
= 0;
2060 dhd_pub
->rx_dropped
= 0;
2061 dhd_pub
->tx_pktgetfail
= 0;
2062 dhd_pub
->rx_pktgetfail
= 0;
2063 dhd_pub
->rx_readahead_cnt
= 0;
2064 dhd_pub
->tx_realloc
= 0;
2065 dhd_pub
->wd_dpc_sched
= 0;
2066 dhd_pub
->tx_big_packets
= 0;
2067 memset(&dhd_pub
->dstats
, 0, sizeof(dhd_pub
->dstats
));
2068 dhd_bus_clearcounts(dhd_pub
);
2069 #ifdef PROP_TXSTATUS
2070 /* clear proptxstatus related counters */
2071 dhd_wlfc_clear_counts(dhd_pub
);
2072 #endif /* PROP_TXSTATUS */
2073 #if defined(DHD_LB_STATS)
2074 DHD_LB_STATS_RESET(dhd_pub
);
2075 #endif /* DHD_LB_STATS */
2078 case IOV_GVAL(IOV_IOCTLTIMEOUT
): {
2079 #ifdef HOST_TPUT_TEST
2080 if (dhd_pub
->bus_ts
.tv_sec
== 0 && dhd_pub
->bus_ts
.tv_nsec
== 0) {
2081 osl_do_gettimeofday(&dhd_pub
->bus_ts
);
2083 struct osl_timespec cur_ts
;
2085 osl_do_gettimeofday(&cur_ts
);
2086 diff_ms
= osl_do_gettimediff(&cur_ts
, &dhd_pub
->bus_ts
)/1000;
2087 int_val
= (int32
)((dhd_pub
->dstats
.tx_bytes
/1024/1024)*8)*1000/diff_ms
;
2088 dhd_pub
->dstats
.tx_bytes
= 0;
2089 memcpy(&dhd_pub
->bus_ts
, &cur_ts
, sizeof(struct osl_timespec
));
2090 bcopy(&int_val
, arg
, sizeof(int_val
));
2093 int_val
= (int32
)dhd_os_get_ioctl_resp_timeout();
2094 bcopy(&int_val
, arg
, sizeof(int_val
));
2099 case IOV_SVAL(IOV_IOCTLTIMEOUT
): {
2101 bcmerror
= BCME_BADARG
;
2103 dhd_os_set_ioctl_resp_timeout((unsigned int)int_val
);
2107 #ifdef PROP_TXSTATUS
2108 case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE
): {
2109 bool wlfc_enab
= FALSE
;
2110 bcmerror
= dhd_wlfc_get_enable(dhd_pub
, &wlfc_enab
);
2111 if (bcmerror
!= BCME_OK
)
2113 int_val
= wlfc_enab
? 1 : 0;
2114 bcopy(&int_val
, arg
, val_size
);
2117 case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE
): {
2118 bool wlfc_enab
= FALSE
;
2119 bcmerror
= dhd_wlfc_get_enable(dhd_pub
, &wlfc_enab
);
2120 if (bcmerror
!= BCME_OK
)
2123 /* wlfc is already set as desired */
2124 if (wlfc_enab
== (int_val
== 0 ? FALSE
: TRUE
))
2127 if (int_val
== TRUE
)
2128 bcmerror
= dhd_wlfc_init(dhd_pub
);
2130 bcmerror
= dhd_wlfc_deinit(dhd_pub
);
2134 case IOV_GVAL(IOV_PROPTXSTATUS_MODE
):
2135 bcmerror
= dhd_wlfc_get_mode(dhd_pub
, &int_val
);
2136 if (bcmerror
!= BCME_OK
)
2138 bcopy(&int_val
, arg
, val_size
);
2141 case IOV_SVAL(IOV_PROPTXSTATUS_MODE
):
2142 dhd_wlfc_set_mode(dhd_pub
, int_val
);
2145 case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE
):
2146 bcmerror
= dhd_wlfc_get_module_ignore(dhd_pub
, &int_val
);
2147 if (bcmerror
!= BCME_OK
)
2149 bcopy(&int_val
, arg
, val_size
);
2152 case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE
):
2153 dhd_wlfc_set_module_ignore(dhd_pub
, int_val
);
2156 case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE
):
2157 bcmerror
= dhd_wlfc_get_credit_ignore(dhd_pub
, &int_val
);
2158 if (bcmerror
!= BCME_OK
)
2160 bcopy(&int_val
, arg
, val_size
);
2163 case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE
):
2164 dhd_wlfc_set_credit_ignore(dhd_pub
, int_val
);
2167 case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE
):
2168 bcmerror
= dhd_wlfc_get_txstatus_ignore(dhd_pub
, &int_val
);
2169 if (bcmerror
!= BCME_OK
)
2171 bcopy(&int_val
, arg
, val_size
);
2174 case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE
):
2175 dhd_wlfc_set_txstatus_ignore(dhd_pub
, int_val
);
2178 case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK
):
2179 bcmerror
= dhd_wlfc_get_rxpkt_chk(dhd_pub
, &int_val
);
2180 if (bcmerror
!= BCME_OK
)
2182 bcopy(&int_val
, arg
, val_size
);
2185 case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK
):
2186 dhd_wlfc_set_rxpkt_chk(dhd_pub
, int_val
);
2189 #endif /* PROP_TXSTATUS */
2191 case IOV_GVAL(IOV_BUS_TYPE
):
2192 /* The dhd application queries the driver to check if its usb or sdio. */
2194 int_val
= BUS_TYPE_USB
;
2197 int_val
= BUS_TYPE_SDIO
;
2199 #ifdef PCIE_FULL_DONGLE
2200 int_val
= BUS_TYPE_PCIE
;
2202 bcopy(&int_val
, arg
, val_size
);
2205 case IOV_SVAL(IOV_CHANGEMTU
):
2207 bcmerror
= dhd_change_mtu(dhd_pub
, int_val
, 0);
2210 case IOV_GVAL(IOV_HOSTREORDER_FLOWS
):
2213 uint8
*ptr
= (uint8
*)arg
;
2217 for (i
= 0; i
< WLHOST_REORDERDATA_MAXFLOWS
; i
++) {
2218 if (dhd_pub
->reorder_bufs
[i
] != NULL
) {
2219 *ptr
= dhd_pub
->reorder_bufs
[i
]->flow_id
;
2228 #ifdef DHDTCPACK_SUPPRESS
2229 case IOV_GVAL(IOV_TCPACK_SUPPRESS
): {
2230 int_val
= (uint32
)dhd_pub
->tcpack_sup_mode
;
2231 bcopy(&int_val
, arg
, val_size
);
2234 case IOV_SVAL(IOV_TCPACK_SUPPRESS
): {
2235 bcmerror
= dhd_tcpack_suppress_set(dhd_pub
, (uint8
)int_val
);
2238 #endif /* DHDTCPACK_SUPPRESS */
2240 #ifdef DHD_L2_FILTER
2241 case IOV_GVAL(IOV_DHCP_UNICAST
): {
2244 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2245 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2246 __FUNCTION__
, name
));
2247 bcmerror
= BCME_BADARG
;
2250 int_val
= dhd_get_dhcp_unicast_status(dhd_pub
, bssidx
);
2251 memcpy(arg
, &int_val
, val_size
);
2254 case IOV_SVAL(IOV_DHCP_UNICAST
): {
2257 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2258 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2259 __FUNCTION__
, name
));
2260 bcmerror
= BCME_BADARG
;
2263 memcpy(&int_val
, val
, sizeof(int_val
));
2264 bcmerror
= dhd_set_dhcp_unicast_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
2267 case IOV_GVAL(IOV_BLOCK_PING
): {
2271 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2272 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__
));
2273 bcmerror
= BCME_BADARG
;
2276 int_val
= dhd_get_block_ping_status(dhd_pub
, bssidx
);
2277 memcpy(arg
, &int_val
, val_size
);
2280 case IOV_SVAL(IOV_BLOCK_PING
): {
2284 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2285 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__
));
2286 bcmerror
= BCME_BADARG
;
2289 memcpy(&int_val
, val
, sizeof(int_val
));
2290 bcmerror
= dhd_set_block_ping_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
2293 case IOV_GVAL(IOV_PROXY_ARP
): {
2297 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2298 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__
));
2299 bcmerror
= BCME_BADARG
;
2302 int_val
= dhd_get_parp_status(dhd_pub
, bssidx
);
2303 bcopy(&int_val
, arg
, val_size
);
2306 case IOV_SVAL(IOV_PROXY_ARP
): {
2310 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2311 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__
));
2312 bcmerror
= BCME_BADARG
;
2315 bcopy(val
, &int_val
, sizeof(int_val
));
2317 /* Issue a iovar request to WL to update the proxy arp capability bit
2318 * in the Extended Capability IE of beacons/probe responses.
2320 bcmerror
= dhd_iovar(dhd_pub
, bssidx
, "proxy_arp_advertise", val
, sizeof(int_val
),
2322 if (bcmerror
== BCME_OK
) {
2323 dhd_set_parp_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
2327 case IOV_GVAL(IOV_GRAT_ARP
): {
2331 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2332 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__
));
2333 bcmerror
= BCME_BADARG
;
2336 int_val
= dhd_get_grat_arp_status(dhd_pub
, bssidx
);
2337 memcpy(arg
, &int_val
, val_size
);
2340 case IOV_SVAL(IOV_GRAT_ARP
): {
2344 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2345 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__
));
2346 bcmerror
= BCME_BADARG
;
2349 memcpy(&int_val
, val
, sizeof(int_val
));
2350 bcmerror
= dhd_set_grat_arp_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
2353 case IOV_GVAL(IOV_BLOCK_TDLS
): {
2357 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2358 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__
));
2359 bcmerror
= BCME_BADARG
;
2362 int_val
= dhd_get_block_tdls_status(dhd_pub
, bssidx
);
2363 memcpy(arg
, &int_val
, val_size
);
2366 case IOV_SVAL(IOV_BLOCK_TDLS
): {
2370 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2371 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__
));
2372 bcmerror
= BCME_BADARG
;
2375 memcpy(&int_val
, val
, sizeof(int_val
));
2376 bcmerror
= dhd_set_block_tdls_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
2379 #endif /* DHD_L2_FILTER */
2380 case IOV_SVAL(IOV_DHD_IE
): {
2384 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2385 DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__
));
2386 bcmerror
= BCME_BADARG
;
2392 case IOV_GVAL(IOV_AP_ISOLATE
): {
2396 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2397 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__
));
2398 bcmerror
= BCME_BADARG
;
2402 int_val
= dhd_get_ap_isolate(dhd_pub
, bssidx
);
2403 bcopy(&int_val
, arg
, val_size
);
2406 case IOV_SVAL(IOV_AP_ISOLATE
): {
2410 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
2411 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__
));
2412 bcmerror
= BCME_BADARG
;
2417 bcopy(val
, &int_val
, sizeof(uint32
));
2418 dhd_set_ap_isolate(dhd_pub
, bssidx
, int_val
);
2422 case IOV_GVAL(IOV_PSTA
): {
2423 int_val
= dhd_get_psta_mode(dhd_pub
);
2424 bcopy(&int_val
, arg
, val_size
);
2427 case IOV_SVAL(IOV_PSTA
): {
2428 if (int_val
>= DHD_MODE_PSTA_DISABLED
&& int_val
<= DHD_MODE_PSR
) {
2429 dhd_set_psta_mode(dhd_pub
, int_val
);
2431 bcmerror
= BCME_RANGE
;
2435 #endif /* DHD_PSTA */
2437 case IOV_GVAL(IOV_WET
):
2438 int_val
= dhd_get_wet_mode(dhd_pub
);
2439 bcopy(&int_val
, arg
, val_size
);
2442 case IOV_SVAL(IOV_WET
):
2443 if (int_val
== 0 || int_val
== 1) {
2444 dhd_set_wet_mode(dhd_pub
, int_val
);
2445 /* Delete the WET DB when disabled */
2447 dhd_wet_sta_delete_list(dhd_pub
);
2450 bcmerror
= BCME_RANGE
;
2453 case IOV_SVAL(IOV_WET_HOST_IPV4
):
2454 dhd_set_wet_host_ipv4(dhd_pub
, params
, plen
);
2456 case IOV_SVAL(IOV_WET_HOST_MAC
):
2457 dhd_set_wet_host_mac(dhd_pub
, params
, plen
);
2459 #endif /* DHD_WET */
2460 #ifdef DHD_MCAST_REGEN
2461 case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE
): {
2465 if (dhd_iovar_parse_bssidx(dhd_pub
, (char *)name
, &bssidx
, &val
) != BCME_OK
) {
2466 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__
));
2467 bcmerror
= BCME_BADARG
;
2471 int_val
= dhd_get_mcast_regen_bss_enable(dhd_pub
, bssidx
);
2472 bcopy(&int_val
, arg
, val_size
);
2476 case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE
): {
2480 if (dhd_iovar_parse_bssidx(dhd_pub
, (char *)name
, &bssidx
, &val
) != BCME_OK
) {
2481 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__
));
2482 bcmerror
= BCME_BADARG
;
2487 bcopy(val
, &int_val
, sizeof(uint32
));
2488 dhd_set_mcast_regen_bss_enable(dhd_pub
, bssidx
, int_val
);
2491 #endif /* DHD_MCAST_REGEN */
2493 case IOV_GVAL(IOV_CFG80211_OPMODE
): {
2494 int_val
= (int32
)dhd_pub
->op_mode
;
2495 bcopy(&int_val
, arg
, sizeof(int_val
));
2498 case IOV_SVAL(IOV_CFG80211_OPMODE
): {
2500 bcmerror
= BCME_BADARG
;
2502 dhd_pub
->op_mode
= int_val
;
2506 case IOV_GVAL(IOV_ASSERT_TYPE
):
2507 int_val
= g_assert_type
;
2508 bcopy(&int_val
, arg
, val_size
);
2511 case IOV_SVAL(IOV_ASSERT_TYPE
):
2512 g_assert_type
= (uint32
)int_val
;
2515 case IOV_GVAL(IOV_LMTEST
): {
2516 *(uint32
*)arg
= (uint32
)lmtest
;
2520 case IOV_SVAL(IOV_LMTEST
): {
2521 uint32 val
= *(uint32
*)arg
;
2523 bcmerror
= BCME_BADARG
;
2526 DHD_ERROR(("%s: lmtest %s\n",
2527 __FUNCTION__
, (lmtest
== FALSE
)? "OFF" : "ON"));
2532 #ifdef SHOW_LOGTRACE
2533 case IOV_GVAL(IOV_DUMP_TRACE_LOG
): {
2534 trace_buf_info_t
*trace_buf_info
= (trace_buf_info_t
*)arg
;
2535 dhd_dbg_ring_t
*dbg_verbose_ring
= NULL
;
2537 dbg_verbose_ring
= dhd_dbg_get_ring_from_ring_id(dhd_pub
, FW_VERBOSE_RING_ID
);
2538 if (dbg_verbose_ring
== NULL
) {
2539 DHD_ERROR(("dbg_verbose_ring is NULL\n"));
2540 bcmerror
= BCME_UNSUPPORTED
;
2544 if (trace_buf_info
!= NULL
) {
2545 bzero(trace_buf_info
, sizeof(trace_buf_info_t
));
2546 dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring
, trace_buf_info
);
2548 DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__
));
2549 bcmerror
= BCME_NOMEM
;
2553 #endif /* SHOW_LOGTRACE */
2555 #if defined(BCMSDIO) || defined(BCMPCIE)
2556 case IOV_GVAL(IOV_DONGLE_TRAP_TYPE
):
2557 if (dhd_pub
->dongle_trap_occured
)
2558 int_val
= ltoh32(dhd_pub
->last_trap_info
.type
);
2561 bcopy(&int_val
, arg
, val_size
);
2564 case IOV_GVAL(IOV_DONGLE_TRAP_INFO
):
2566 struct bcmstrbuf strbuf
;
2567 bcm_binit(&strbuf
, arg
, len
);
2568 if (dhd_pub
->dongle_trap_occured
== FALSE
) {
2569 bcm_bprintf(&strbuf
, "no trap recorded\n");
2572 dhd_bus_dump_trap_info(dhd_pub
->bus
, &strbuf
);
2576 case IOV_GVAL(IOV_BPADDR
):
2581 memcpy(&sdreg
, params
, sizeof(sdreg
));
2583 addr
= sdreg
.offset
;
2586 bcmerror
= dhd_bus_readwrite_bp_addr(dhd_pub
, addr
, size
,
2587 (uint
*)&int_val
, TRUE
);
2589 memcpy(arg
, &int_val
, sizeof(int32
));
2594 case IOV_SVAL(IOV_BPADDR
):
2599 memcpy(&sdreg
, params
, sizeof(sdreg
));
2601 addr
= sdreg
.offset
;
2604 bcmerror
= dhd_bus_readwrite_bp_addr(dhd_pub
, addr
, size
,
2605 (uint
*)&sdreg
.value
,
2610 #endif /* BCMSDIO || BCMPCIE */
2612 case IOV_SVAL(IOV_FLOW_RING_DEBUG
):
2614 bcmerror
= dhd_flow_ring_debug(dhd_pub
, arg
, len
);
2617 #endif /* BCMPCIE */
2618 case IOV_SVAL(IOV_MEM_DEBUG
):
2620 bcmerror
= dhd_mem_debug(dhd_pub
, arg
, len
- 1);
2623 #endif /* DHD_DEBUG */
2624 #if defined(DHD_LOG_DUMP)
2625 case IOV_GVAL(IOV_LOG_DUMP
):
2627 dhd_prot_debug_info_print(dhd_pub
);
2628 dhd_log_dump_trigger(dhd_pub
, CMD_DEFAULT
);
2631 #endif /* DHD_LOG_DUMP */
2633 case IOV_GVAL(IOV_TPUT_TEST
):
2635 tput_test_t
*tput_data
= NULL
;
2636 if (params
&& plen
>= sizeof(tput_test_t
)) {
2637 tput_data
= (tput_test_t
*)params
;
2638 bcmerror
= dhd_tput_test(dhd_pub
, tput_data
);
2640 DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__
));
2641 bcmerror
= BCME_BADARG
;
2645 case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT
):
2647 if (dhd_pub
->debug_buf_dest_support
) {
2648 debug_buf_dest_stat_t
*debug_buf_dest_stat
=
2649 (debug_buf_dest_stat_t
*)arg
;
2650 memcpy(debug_buf_dest_stat
, dhd_pub
->debug_buf_dest_stat
,
2651 sizeof(dhd_pub
->debug_buf_dest_stat
));
2653 bcmerror
= BCME_DISABLED
;
2658 #if defined(DHD_SSSR_DUMP)
2659 case IOV_GVAL(IOV_FIS_TRIGGER
):
2660 bcmerror
= dhd_bus_fis_trigger(dhd_pub
);
2662 if (bcmerror
== BCME_OK
) {
2663 bcmerror
= dhd_bus_fis_dump(dhd_pub
);
2667 bcopy(&int_val
, arg
, val_size
);
2669 #endif /* defined(DHD_SSSR_DUMP) */
2672 case IOV_SVAL(IOV_INDUCE_ERROR
): {
2673 if (int_val
>= DHD_INDUCE_ERROR_MAX
) {
2674 DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__
, (uint16
)int_val
));
2676 dhd_pub
->dhd_induce_error
= (uint16
)int_val
;
2680 #endif /* DHD_DEBUG */
2681 #ifdef RTT_GEOFENCE_CONT
2682 #if defined(RTT_SUPPORT) && defined(WL_NAN)
2683 case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD
): {
2685 dhd_rtt_get_geofence_cont_ind(dhd_pub
, &enable
);
2686 int_val
= enable
? 1 : 0;
2687 bcopy(&int_val
, arg
, val_size
);
2690 case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD
): {
2691 bool enable
= *(bool *)arg
;
2692 dhd_rtt_set_geofence_cont_ind(dhd_pub
, enable
);
2695 #endif /* RTT_SUPPORT && WL_NAN */
2696 #endif /* RTT_GEOFENCE_CONT */
2697 case IOV_GVAL(IOV_FW_VBS
): {
2698 *(uint32
*)arg
= (uint32
)dhd_dbg_get_fwverbose(dhd_pub
);
2702 case IOV_SVAL(IOV_FW_VBS
): {
2706 dhd_dbg_set_fwverbose(dhd_pub
, (uint32
)int_val
);
2710 #ifdef DHD_TX_PROFILE
2711 case IOV_SVAL(IOV_TX_PROFILE_TAG
):
2713 /* note: under the current implementation only one type of packet may be
2714 * tagged per profile
2716 const dhd_tx_profile_protocol_t
*protocol
= NULL
;
2717 /* for example, we might have a profile of profile_index 6, but at
2718 * offset 2 from dhd_pub->protocol_filters.
2722 if (params
== NULL
) {
2723 bcmerror
= BCME_ERROR
;
2727 protocol
= (dhd_tx_profile_protocol_t
*)params
;
2730 if (protocol
->version
!= DHD_TX_PROFILE_VERSION
) {
2731 bcmerror
= BCME_VERSION
;
2734 if (protocol
->profile_index
> DHD_MAX_PROFILE_INDEX
) {
2735 DHD_ERROR(("%s:\tprofile index must be between 0 and %d\n",
2736 __FUNCTION__
, DHD_MAX_PROFILE_INDEX
));
2737 bcmerror
= BCME_RANGE
;
2740 if (protocol
->layer
!= DHD_TX_PROFILE_DATA_LINK_LAYER
&& protocol
->layer
2741 != DHD_TX_PROFILE_NETWORK_LAYER
) {
2742 DHD_ERROR(("%s:\tlayer must be %d or %d\n", __FUNCTION__
,
2743 DHD_TX_PROFILE_DATA_LINK_LAYER
,
2744 DHD_TX_PROFILE_NETWORK_LAYER
));
2745 bcmerror
= BCME_BADARG
;
2748 if (protocol
->protocol_number
> __UINT16_MAX__
) {
2749 DHD_ERROR(("%s:\tprotocol number must be <= %d\n", __FUNCTION__
,
2751 bcmerror
= BCME_BADLEN
;
2755 /* find the dhd_tx_profile_protocol_t */
2756 for (offset
= 0; offset
< dhd_pub
->num_profiles
; offset
++) {
2757 if (dhd_pub
->protocol_filters
[offset
].profile_index
==
2758 protocol
->profile_index
) {
2763 if (offset
>= DHD_MAX_PROFILES
) {
2764 #if DHD_MAX_PROFILES > 1
2765 DHD_ERROR(("%s:\tonly %d profiles supported at present\n",
2766 __FUNCTION__
, DHD_MAX_PROFILES
));
2767 #else /* DHD_MAX_PROFILES > 1 */
2768 DHD_ERROR(("%s:\tonly %d profile supported at present\n",
2769 __FUNCTION__
, DHD_MAX_PROFILES
));
2770 DHD_ERROR(("%s:\tthere is a profile of index %d\n", __FUNCTION__
,
2771 dhd_pub
->protocol_filters
->profile_index
));
2772 #endif /* DHD_MAX_PROFILES > 1 */
2773 bcmerror
= BCME_NOMEM
;
2777 /* memory already allocated in dhd_attach; just assign the value */
2778 dhd_pub
->protocol_filters
[offset
] = *protocol
;
2780 if (offset
>= dhd_pub
->num_profiles
) {
2781 dhd_pub
->num_profiles
= offset
+ 1;
2787 case IOV_SVAL(IOV_TX_PROFILE_ENABLE
):
2788 dhd_pub
->tx_profile_enab
= int_val
? TRUE
: FALSE
;
2791 case IOV_GVAL(IOV_TX_PROFILE_ENABLE
):
2792 int_val
= dhd_pub
->tx_profile_enab
;
2793 bcmerror
= memcpy_s(arg
, val_size
, &int_val
, sizeof(int_val
));
2796 case IOV_SVAL(IOV_TX_PROFILE_DUMP
):
2798 const dhd_tx_profile_protocol_t
*protocol
= NULL
;
2800 char *format
= "%s:\ttx_profile %s: %d\n";
2802 for (offset
= 0; offset
< dhd_pub
->num_profiles
; offset
++) {
2803 if (dhd_pub
->protocol_filters
[offset
].profile_index
== int_val
) {
2804 protocol
= &(dhd_pub
->protocol_filters
[offset
]);
2809 if (protocol
== NULL
) {
2810 DHD_ERROR(("%s:\tno profile with index %d\n", __FUNCTION__
,
2812 bcmerror
= BCME_ERROR
;
2816 printf(format
, __FUNCTION__
, "profile_index", protocol
->profile_index
);
2817 printf(format
, __FUNCTION__
, "layer", protocol
->layer
);
2818 printf(format
, __FUNCTION__
, "protocol_number", protocol
->protocol_number
);
2819 printf(format
, __FUNCTION__
, "src_port", protocol
->src_port
);
2820 printf(format
, __FUNCTION__
, "dest_port", protocol
->dest_port
);
2824 #endif /* defined(DHD_TX_PROFILE) */
2827 bcmerror
= BCME_UNSUPPORTED
;
2832 DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__
, actionid
, bcmerror
));
2836 /* Store the status of a connection attempt for later retrieval by an iovar */
2838 dhd_store_conn_status(uint32 event
, uint32 status
, uint32 reason
)
2840 /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
2841 * because an encryption/rsn mismatch results in both events, and
2842 * the important information is in the WLC_E_PRUNE.
2844 if (!(event
== WLC_E_SET_SSID
&& status
== WLC_E_STATUS_FAIL
&&
2845 dhd_conn_event
== WLC_E_PRUNE
)) {
2846 dhd_conn_event
= event
;
2847 dhd_conn_status
= status
;
2848 dhd_conn_reason
= reason
;
2853 dhd_prec_enq(dhd_pub_t
*dhdp
, struct pktq
*q
, void *pkt
, int prec
)
2856 int eprec
= -1; /* precedence to evict from */
2857 bool discard_oldest
;
2859 /* Fast case, precedence queue is not full and we are also not
2860 * exceeding total queue length
2862 if (!pktqprec_full(q
, prec
) && !pktq_full(q
)) {
2863 pktq_penq(q
, prec
, pkt
);
2867 /* Determine precedence from which to evict packet, if any */
2868 if (pktqprec_full(q
, prec
))
2870 else if (pktq_full(q
)) {
2871 p
= pktq_peek_tail(q
, &eprec
);
2873 if (eprec
> prec
|| eprec
< 0)
2877 /* Evict if needed */
2879 /* Detect queueing to unconfigured precedence */
2880 ASSERT(!pktqprec_empty(q
, eprec
));
2881 discard_oldest
= AC_BITMAP_TST(dhdp
->wme_dp
, eprec
);
2882 if (eprec
== prec
&& !discard_oldest
)
2883 return FALSE
; /* refuse newer (incoming) packet */
2884 /* Evict packet according to discard policy */
2885 p
= discard_oldest
? pktq_pdeq(q
, eprec
) : pktq_pdeq_tail(q
, eprec
);
2887 #ifdef DHDTCPACK_SUPPRESS
2888 if (dhd_tcpack_check_xmit(dhdp
, p
) == BCME_ERROR
) {
2889 DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
2890 __FUNCTION__
, __LINE__
));
2891 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
2893 #endif /* DHDTCPACK_SUPPRESS */
2894 PKTFREE(dhdp
->osh
, p
, TRUE
);
2898 p
= pktq_penq(q
, prec
, pkt
);
2905 * Functions to drop proper pkts from queue:
2906 * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
2907 * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
2908 * If can't find pkts matching upper 2 cases, drop first pkt anyway
2911 dhd_prec_drop_pkts(dhd_pub_t
*dhdp
, struct pktq
*pq
, int prec
, f_droppkt_t fn
)
2913 struct pktq_prec
*q
= NULL
;
2914 void *p
, *prev
= NULL
, *next
= NULL
, *first
= NULL
, *last
= NULL
, *prev_first
= NULL
;
2915 pkt_frag_t frag_info
;
2918 ASSERT(prec
>= 0 && prec
< pq
->num_prec
);
2927 frag_info
= pkt_frag_info(dhdp
->osh
, p
);
2928 if (frag_info
== DHD_PKT_FRAG_NONE
) {
2930 } else if (frag_info
== DHD_PKT_FRAG_FIRST
) {
2932 /* No last frag pkt, use prev as last */
2939 } else if (frag_info
== DHD_PKT_FRAG_LAST
) {
2950 if ((p
== NULL
) || ((frag_info
!= DHD_PKT_FRAG_NONE
) && !(first
&& last
))) {
2951 /* Not found matching pkts, use oldest */
2957 if (frag_info
== DHD_PKT_FRAG_NONE
) {
2972 PKTSETLINK(p
, NULL
);
2975 fn(dhdp
, prec
, p
, TRUE
);
2983 if (prev_first
== NULL
) {
2984 if ((q
->head
= next
) == NULL
)
2987 PKTSETLINK(prev_first
, next
);
2989 q
->tail
= prev_first
;
2996 dhd_iovar_op(dhd_pub_t
*dhd_pub
, const char *name
,
2997 void *params
, int plen
, void *arg
, uint len
, bool set
)
3001 const bcm_iovar_t
*vi
= NULL
;
3004 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3008 /* Get MUST have return space */
3009 ASSERT(set
|| (arg
&& len
));
3011 /* Set does NOT take qualifiers */
3012 ASSERT(!set
|| (!params
&& !plen
));
3014 if ((vi
= bcm_iovar_lookup(dhd_iovars
, name
)) == NULL
) {
3015 bcmerror
= BCME_UNSUPPORTED
;
3019 DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__
,
3020 name
, (set
? "set" : "get"), len
, plen
));
3022 /* set up 'params' pointer in case this is a set command so that
3023 * the convenience int and bool code can be common to set and get
3025 if (params
== NULL
) {
3030 if (vi
->type
== IOVT_VOID
)
3032 else if (vi
->type
== IOVT_BUFFER
)
3035 /* all other types are integer sized */
3036 val_size
= sizeof(int);
3038 actionid
= set
? IOV_SVAL(vi
->varid
) : IOV_GVAL(vi
->varid
);
3040 bcmerror
= dhd_doiovar(dhd_pub
, vi
, actionid
, name
, params
, plen
, arg
, len
, val_size
);
3047 dhd_ioctl(dhd_pub_t
* dhd_pub
, dhd_ioctl_t
*ioc
, void *buf
, uint buflen
)
3050 unsigned long flags
;
3052 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3058 dhd_os_dhdiovar_lock(dhd_pub
);
3061 if (buflen
< sizeof(int))
3062 bcmerror
= BCME_BUFTOOSHORT
;
3064 *(int*)buf
= DHD_IOCTL_MAGIC
;
3067 case DHD_GET_VERSION
:
3068 if (buflen
< sizeof(int))
3069 bcmerror
= BCME_BUFTOOSHORT
;
3071 *(int*)buf
= DHD_IOCTL_VERSION
;
3080 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
3081 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub
) &&
3082 bcmstricmp((char *)buf
, "devreset")) {
3083 /* In platforms like FC19, the FW download is done via IOCTL
3084 * and should not return error for IOCTLs fired before FW
3087 if (dhd_fw_download_status(dhd_pub
) == FW_DOWNLOAD_DONE
) {
3088 DHD_ERROR(("%s: returning as busstate=%d\n",
3089 __FUNCTION__
, dhd_pub
->busstate
));
3090 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
3091 dhd_os_dhdiovar_unlock(dhd_pub
);
3095 DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub
);
3096 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
3098 #ifdef DHD_PCIE_RUNTIMEPM
3099 dhdpcie_runtime_bus_wake(dhd_pub
, TRUE
, dhd_ioctl
);
3100 #endif /* DHD_PCIE_RUNTIMEPM */
3102 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
3103 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub
)) {
3104 /* If Suspend/Resume is tested via pcie_suspend IOVAR
3105 * then continue to execute the IOVAR, return from here for
3106 * other IOVARs, also include pciecfgreg and devreset to go
3109 if (bcmstricmp((char *)buf
, "pcie_suspend") &&
3110 bcmstricmp((char *)buf
, "pciecfgreg") &&
3111 bcmstricmp((char *)buf
, "devreset") &&
3112 bcmstricmp((char *)buf
, "sdio_suspend")) {
3113 DHD_ERROR(("%s: bus is in suspend(%d)"
3114 "or suspending(0x%x) state\n",
3115 __FUNCTION__
, dhd_pub
->busstate
,
3116 dhd_pub
->dhd_bus_busy_state
));
3117 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub
);
3118 dhd_os_busbusy_wake(dhd_pub
);
3119 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
3120 dhd_os_dhdiovar_unlock(dhd_pub
);
3124 /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
3125 * which will wait for all the busy contexts to get over for
3126 * particular time and call ASSERT if timeout happens. As during
3127 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
3128 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
3129 * not used in Production platforms but only used in FC19 setups.
3131 if (!bcmstricmp((char *)buf
, "devreset") ||
3133 (dhd_bus_is_multibp_capable(dhd_pub
->bus
) &&
3134 !bcmstricmp((char *)buf
, "dwnldstate")) ||
3135 #endif /* BCMPCIE */
3138 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub
);
3140 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
3142 /* scan past the name to any arguments */
3143 for (arg
= buf
, arglen
= buflen
; *arg
&& arglen
; arg
++, arglen
--)
3146 if (arglen
== 0 || *arg
) {
3147 bcmerror
= BCME_BUFTOOSHORT
;
3151 /* account for the NUL terminator */
3153 /* call with the appropriate arguments */
3154 if (ioc
->cmd
== DHD_GET_VAR
) {
3155 bcmerror
= dhd_iovar_op(dhd_pub
, buf
, arg
, arglen
,
3156 buf
, buflen
, IOV_GET
);
3158 bcmerror
= dhd_iovar_op(dhd_pub
, buf
, NULL
, 0,
3159 arg
, arglen
, IOV_SET
);
3161 if (bcmerror
!= BCME_UNSUPPORTED
) {
3165 /* not in generic table, try protocol module */
3166 if (ioc
->cmd
== DHD_GET_VAR
) {
3167 bcmerror
= dhd_prot_iovar_op(dhd_pub
, buf
, arg
,
3168 arglen
, buf
, buflen
, IOV_GET
);
3170 bcmerror
= dhd_prot_iovar_op(dhd_pub
, buf
,
3171 NULL
, 0, arg
, arglen
, IOV_SET
);
3173 if (bcmerror
!= BCME_UNSUPPORTED
) {
3177 /* if still not found, try bus module */
3178 if (ioc
->cmd
== DHD_GET_VAR
) {
3179 bcmerror
= dhd_bus_iovar_op(dhd_pub
, buf
,
3180 arg
, arglen
, buf
, buflen
, IOV_GET
);
3182 bcmerror
= dhd_bus_iovar_op(dhd_pub
, buf
,
3183 NULL
, 0, arg
, arglen
, IOV_SET
);
3185 if (bcmerror
!= BCME_UNSUPPORTED
) {
3193 bcmerror
= BCME_UNSUPPORTED
;
3195 dhd_os_dhdiovar_unlock(dhd_pub
);
3199 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
3200 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub
);
3201 dhd_os_busbusy_wake(dhd_pub
);
3202 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
3203 dhd_os_dhdiovar_unlock(dhd_pub
);
3210 wl_show_roam_event(dhd_pub_t
*dhd_pub
, uint status
, uint datalen
,
3211 const char *event_name
, char *eabuf
, void *event_data
)
3213 if (status
== WLC_E_STATUS_SUCCESS
) {
3214 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
3216 if (status
== WLC_E_STATUS_FAIL
) {
3217 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name
, status
));
3218 } else if (status
== WLC_E_STATUS_NO_NETWORKS
) {
3220 uint8 id
= *((uint8
*)event_data
);
3221 if (id
!= DOT11_MNG_PROPR_ID
) {
3222 wl_roam_event_t
*roam_data
=
3223 (wl_roam_event_t
*)event_data
;
3224 bcm_xtlv_t
*tlv
= (bcm_xtlv_t
*)roam_data
->xtlvs
;
3225 if (tlv
->id
== WLC_ROAM_NO_NETWORKS_TLV_ID
) {
3226 uint32
*fail_reason
= (uint32
*)tlv
->data
;
3227 switch (*fail_reason
) {
3228 case WLC_E_REASON_NO_NETWORKS
:
3229 DHD_EVENT(("MACEVENT: %s,"
3230 " no networks found\n",
3233 case WLC_E_REASON_NO_NETWORKS_BY_SCORE
:
3234 DHD_EVENT(("MACEVENT: %s,"
3235 " no networks found by score\n",
3239 DHD_ERROR(("MACEVENT: %s,"
3240 " unknown fail reason 0x%x\n",
3246 DHD_EVENT(("MACEVENT: %s,"
3247 " no networks found\n",
3251 DHD_EVENT(("MACEVENT: %s,"
3252 " no networks found\n",
3256 DHD_EVENT(("MACEVENT: %s, no networks found\n",
3260 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
3261 event_name
, (int)status
));
3267 wl_show_roam_cache_update_event(const char *name
, uint status
,
3268 uint reason
, uint datalen
, void *event_data
)
3270 wlc_roam_cache_update_event_t
*cache_update
;
3273 bcm_xtlv_t
*val_xtlv
;
3274 char ntoa_buf
[ETHER_ADDR_STR_LEN
];
3276 const char* reason_name
= NULL
;
3277 const char* status_name
= NULL
;
3280 const char *event_name
;
3281 } reason_names
[] = {
3282 {WLC_E_REASON_INITIAL_ASSOC
, "INITIAL ASSOCIATION"},
3283 {WLC_E_REASON_LOW_RSSI
, "LOW_RSSI"},
3284 {WLC_E_REASON_DEAUTH
, "RECEIVED DEAUTHENTICATION"},
3285 {WLC_E_REASON_DISASSOC
, "RECEIVED DISASSOCATION"},
3286 {WLC_E_REASON_BCNS_LOST
, "BEACONS LOST"},
3287 {WLC_E_REASON_BETTER_AP
, "BETTER AP FOUND"},
3288 {WLC_E_REASON_MINTXRATE
, "STUCK AT MIN TX RATE"},
3289 {WLC_E_REASON_BSSTRANS_REQ
, "REQUESTED ROAM"},
3290 {WLC_E_REASON_TXFAIL
, "TOO MANY TXFAILURES"}
3295 const char *event_name
;
3296 } status_names
[] = {
3297 {WLC_E_STATUS_SUCCESS
, "operation was successful"},
3298 {WLC_E_STATUS_FAIL
, "operation failed"},
3299 {WLC_E_STATUS_TIMEOUT
, "operation timed out"},
3300 {WLC_E_STATUS_NO_NETWORKS
, "failed due to no matching network found"},
3301 {WLC_E_STATUS_ABORT
, "operation was aborted"},
3302 {WLC_E_STATUS_NO_ACK
, "protocol failure: packet not ack'd"},
3303 {WLC_E_STATUS_UNSOLICITED
, "AUTH or ASSOC packet was unsolicited"},
3304 {WLC_E_STATUS_ATTEMPT
, "attempt to assoc to an auto auth configuration"},
3305 {WLC_E_STATUS_PARTIAL
, "scan results are incomplete"},
3306 {WLC_E_STATUS_NEWSCAN
, "scan aborted by another scan"},
3307 {WLC_E_STATUS_NEWASSOC
, "scan aborted due to assoc in progress"},
3308 {WLC_E_STATUS_11HQUIET
, "802.11h quiet period started"},
3309 {WLC_E_STATUS_SUPPRESS
, "user disabled scanning"},
3310 {WLC_E_STATUS_NOCHANS
, "no allowable channels to scan"},
3311 {WLC_E_STATUS_CS_ABORT
, "abort channel select"},
3312 {WLC_E_STATUS_ERROR
, "request failed due to error"},
3313 {WLC_E_STATUS_INVALID
, "Invalid status code"}
3317 case WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE
:
3318 DHD_EVENT(("Current roam cache status %d, "
3319 "reason for cache update is new roam cache\n", status
));
3321 case WLC_ROAM_CACHE_UPDATE_JOIN
:
3322 DHD_EVENT(("Current roam cache status %d, "
3323 "reason for cache update is start of join\n", status
));
3325 case WLC_ROAM_CACHE_UPDATE_RSSI_DELTA
:
3326 DHD_EVENT(("Current roam cache status %d, "
3327 "reason for cache update is delta in rssi\n", status
));
3329 case WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA
:
3330 DHD_EVENT(("Current roam cache status %d, "
3331 "reason for cache update is motion delta in rssi\n", status
));
3333 case WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS
:
3334 DHD_EVENT(("Current roam cache status %d, "
3335 "reason for cache update is missed channel\n", status
));
3337 case WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN
:
3338 DHD_EVENT(("Current roam cache status %d, "
3339 "reason for cache update is start of split scan\n", status
));
3341 case WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN
:
3342 DHD_EVENT(("Current roam cache status %d, "
3343 "reason for cache update is start of full scan\n", status
));
3345 case WLC_ROAM_CACHE_UPDATE_INIT_ASSOC
:
3346 DHD_EVENT(("Current roam cache status %d, "
3347 "reason for cache update is init association\n", status
));
3349 case WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED
:
3350 DHD_EVENT(("Current roam cache status %d, "
3351 "reason for cache update is failure in full scan\n", status
));
3353 case WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND
:
3354 DHD_EVENT(("Current roam cache status %d, "
3355 "reason for cache update is empty scan result\n", status
));
3357 case WLC_ROAM_CACHE_UPDATE_MISSING_AP
:
3358 DHD_EVENT(("Current roam cache status %d, "
3359 "reason for cache update is missed ap\n", status
));
3362 DHD_EVENT(("Current roam cache status %d, "
3363 "reason for cache update is unknown %d\n", status
, reason
));
3367 if (datalen
< sizeof(wlc_roam_cache_update_event_t
)) {
3368 DHD_ERROR(("MACEVENT: %s, missing event data\n", name
));
3372 cache_update
= (wlc_roam_cache_update_event_t
*)event_data
;
3373 val_tlv_ptr
= (void *)cache_update
->xtlvs
;
3374 len_of_tlvs
= datalen
- sizeof(wlc_roam_cache_update_event_t
);
3375 val_xtlv
= (bcm_xtlv_t
*)val_tlv_ptr
;
3376 if (val_xtlv
->id
!= WL_RMC_RPT_CMD_DATA
) {
3377 DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
3378 name
, val_xtlv
->id
));
3381 val_tlv_ptr
= (uint8
*)val_tlv_ptr
+ BCM_XTLV_HDR_SIZE
;
3382 len_of_tlvs
= val_xtlv
->len
;
3384 while (len_of_tlvs
&& len_of_tlvs
> BCM_XTLV_HDR_SIZE
) {
3385 val_xtlv
= (bcm_xtlv_t
*)val_tlv_ptr
;
3386 switch (val_xtlv
->id
) {
3387 case WL_RMC_RPT_XTLV_BSS_INFO
:
3389 rmc_bss_info_v1_t
*bss_info
= (rmc_bss_info_v1_t
*)(val_xtlv
->data
);
3390 DHD_EVENT(("\t Current BSS INFO:\n"));
3391 DHD_EVENT(("\t\tRSSI: %d\n", bss_info
->rssi
));
3392 DHD_EVENT(("\t\tNumber of full scans performed "
3393 "on current BSS: %d\n", bss_info
->fullscan_count
));
3394 for (idx
= 0; idx
< ARRAYSIZE(reason_names
); idx
++) {
3395 if (reason_names
[idx
].event
== bss_info
->reason
) {
3396 reason_name
= reason_names
[idx
].event_name
;
3399 DHD_EVENT(("\t\tReason code for last full scan: %s(%d)\n",
3400 reason_name
, bss_info
->reason
));
3401 DHD_EVENT(("\t\tDelta between current time and "
3402 "last full scan: %d\n", bss_info
->time_full_scan
));
3403 for (idx
= 0; idx
< ARRAYSIZE(status_names
); idx
++) {
3404 if (status_names
[idx
].event
== bss_info
->status
)
3405 status_name
= status_names
[idx
].event_name
;
3407 DHD_EVENT(("\t\tLast status code for not roaming: %s(%d)\n",
3408 status_name
, bss_info
->status
));
3412 case WL_RMC_RPT_XTLV_CANDIDATE_INFO
:
3413 case WL_RMC_RPT_XTLV_USER_CACHE_INFO
:
3415 rmc_candidate_info_v1_t
*candidate_info
=
3416 (rmc_candidate_info_v1_t
*)(val_xtlv
->data
);
3417 if (val_xtlv
->id
== WL_RMC_RPT_XTLV_CANDIDATE_INFO
) {
3418 DHD_EVENT(("\t Candidate INFO:\n"));
3420 DHD_EVENT(("\t User Candidate INFO:\n"));
3422 DHD_EVENT(("\t\tBSSID: %s\n",
3423 bcm_ether_ntoa((const struct ether_addr
*)
3424 &candidate_info
->bssid
, ntoa_buf
)));
3425 DHD_EVENT(("\t\tRSSI: %d\n", candidate_info
->rssi
));
3426 DHD_EVENT(("\t\tChannel: %d\n", candidate_info
->ctl_channel
));
3427 DHD_EVENT(("\t\tDelta between current time and last "
3428 "seen time: %d\n", candidate_info
->time_last_seen
));
3429 DHD_EVENT(("\t\tBSS load: %d\n", candidate_info
->bss_load
));
3433 DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
3434 name
, val_xtlv
->id
));
3437 val_tlv_ptr
= (uint8
*)val_tlv_ptr
+ bcm_xtlv_size(val_xtlv
,
3438 BCM_XTLV_OPTION_NONE
);
3439 len_of_tlvs
-= (uint16
)bcm_xtlv_size(val_xtlv
, BCM_XTLV_OPTION_NONE
);
3444 wl_show_host_event(dhd_pub_t
*dhd_pub
, wl_event_msg_t
*event
, void *event_data
,
3445 void *raw_event_ptr
, char *eventmask
)
3447 uint i
, status
, reason
;
3448 bool group
= FALSE
, flush_txq
= FALSE
, link
= FALSE
;
3449 bool host_data
= FALSE
; /* prints event data after the case when set */
3450 const char *auth_str
;
3451 const char *event_name
;
3453 char err_msg
[256], eabuf
[ETHER_ADDR_STR_LEN
];
3454 uint event_type
, flags
, auth_type
, datalen
;
3456 event_type
= ntoh32(event
->event_type
);
3457 flags
= ntoh16(event
->flags
);
3458 status
= ntoh32(event
->status
);
3459 reason
= ntoh32(event
->reason
);
3460 BCM_REFERENCE(reason
);
3461 auth_type
= ntoh32(event
->auth_type
);
3462 datalen
= (event_data
!= NULL
) ? ntoh32(event
->datalen
) : 0;
3464 /* debug dump of event messages */
3465 snprintf(eabuf
, sizeof(eabuf
), MACDBG
, MAC2STRDBG(event
->addr
.octet
));
3467 event_name
= bcmevent_get_name(event_type
);
3468 BCM_REFERENCE(event_name
);
3470 if (flags
& WLC_EVENT_MSG_LINK
)
3472 if (flags
& WLC_EVENT_MSG_GROUP
)
3474 if (flags
& WLC_EVENT_MSG_FLUSHTXQ
)
3477 switch (event_type
) {
3480 case WLC_E_DISASSOC
:
3481 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
3484 case WLC_E_ASSOC_IND
:
3485 case WLC_E_REASSOC_IND
:
3487 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
3493 if (status
== WLC_E_STATUS_SUCCESS
) {
3494 DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name
, eabuf
));
3495 } else if (status
== WLC_E_STATUS_TIMEOUT
) {
3496 DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name
, eabuf
));
3497 } else if (status
== WLC_E_STATUS_FAIL
) {
3498 DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
3499 event_name
, eabuf
, (int)status
, (int)reason
));
3500 } else if (status
== WLC_E_STATUS_SUPPRESS
) {
3501 DHD_EVENT(("MACEVENT: %s, MAC %s, SUPPRESS\n", event_name
, eabuf
));
3502 } else if (status
== WLC_E_STATUS_NO_ACK
) {
3503 DHD_EVENT(("MACEVENT: %s, MAC %s, NOACK\n", event_name
, eabuf
));
3505 DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
3506 event_name
, eabuf
, (int)status
));
3511 case WLC_E_DEAUTH_IND
:
3512 case WLC_E_DISASSOC_IND
:
3513 DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name
, eabuf
, (int)reason
));
3517 case WLC_E_AUTH_IND
:
3518 if (auth_type
== DOT11_OPEN_SYSTEM
)
3519 auth_str
= "Open System";
3520 else if (auth_type
== DOT11_SHARED_KEY
)
3521 auth_str
= "Shared Key";
3522 else if (auth_type
== DOT11_SAE
)
3525 snprintf(err_msg
, sizeof(err_msg
), "AUTH unknown: %d", (int)auth_type
);
3529 if (event_type
== WLC_E_AUTH_IND
) {
3530 DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name
, eabuf
, auth_str
));
3531 } else if (status
== WLC_E_STATUS_SUCCESS
) {
3532 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
3533 event_name
, eabuf
, auth_str
));
3534 } else if (status
== WLC_E_STATUS_TIMEOUT
) {
3535 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
3536 event_name
, eabuf
, auth_str
));
3537 } else if (status
== WLC_E_STATUS_FAIL
) {
3538 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
3539 event_name
, eabuf
, auth_str
, (int)status
, (int)reason
));
3540 } else if (status
== WLC_E_STATUS_SUPPRESS
) {
3541 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUPPRESS\n",
3542 event_name
, eabuf
, auth_str
));
3543 } else if (status
== WLC_E_STATUS_NO_ACK
) {
3544 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
3545 event_name
, eabuf
, auth_str
));
3547 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
3548 event_name
, eabuf
, auth_str
, (int)status
, (int)reason
));
3550 BCM_REFERENCE(auth_str
);
3555 wl_show_roam_event(dhd_pub
, status
, datalen
,
3556 event_name
, eabuf
, event_data
);
3558 case WLC_E_ROAM_START
:
3559 if (datalen
>= sizeof(wlc_roam_start_event_t
)) {
3560 const wlc_roam_start_event_t
*roam_start
=
3561 (wlc_roam_start_event_t
*)event_data
;
3562 DHD_EVENT(("MACEVENT: %s, current bss rssi %d\n",
3563 event_name
, (int)roam_start
->rssi
));
3566 case WLC_E_ROAM_PREP
:
3567 if (datalen
>= sizeof(wlc_roam_prep_event_t
)) {
3568 const wlc_roam_prep_event_t
*roam_prep
=
3569 (wlc_roam_prep_event_t
*)event_data
;
3570 DHD_EVENT(("MACEVENT: %s, target bss rssi %d\n",
3571 event_name
, (int)roam_prep
->rssi
));
3574 case WLC_E_ROAM_CACHE_UPDATE
:
3575 DHD_EVENT(("MACEVENT: %s\n", event_name
));
3576 wl_show_roam_cache_update_event(event_name
, status
,
3577 reason
, datalen
, event_data
);
3580 case WLC_E_SET_SSID
:
3581 if (status
== WLC_E_STATUS_SUCCESS
) {
3582 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
3584 if (status
== WLC_E_STATUS_FAIL
) {
3585 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name
, status
));
3586 } else if (status
== WLC_E_STATUS_NO_NETWORKS
) {
3587 DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name
));
3589 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
3590 event_name
, (int)status
));
3595 case WLC_E_BEACON_RX
:
3596 if (status
== WLC_E_STATUS_SUCCESS
) {
3597 DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name
));
3598 } else if (status
== WLC_E_STATUS_FAIL
) {
3599 DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name
));
3601 DHD_EVENT(("MACEVENT: %s, status %d\n", event_name
, status
));
3606 DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d reason:%d\n",
3607 event_name
, link
?"UP":"DOWN", flags
, status
, reason
));
3608 #ifdef PCIE_FULL_DONGLE
3610 BCM_REFERENCE(link
);
3613 case WLC_E_MIC_ERROR
:
3614 DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
3615 event_name
, eabuf
, group
, flush_txq
));
3616 BCM_REFERENCE(group
);
3617 BCM_REFERENCE(flush_txq
);
3620 case WLC_E_ICV_ERROR
:
3621 case WLC_E_UNICAST_DECODE_ERROR
:
3622 case WLC_E_MULTICAST_DECODE_ERROR
:
3623 DHD_EVENT(("MACEVENT: %s, MAC %s\n",
3624 event_name
, eabuf
));
3628 DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name
, eabuf
, status
));
3631 case WLC_E_ASSOC_REQ_IE
:
3632 case WLC_E_ASSOC_RESP_IE
:
3633 case WLC_E_PMKID_CACHE
:
3634 DHD_EVENT(("MACEVENT: %s\n", event_name
));
3637 case WLC_E_SCAN_COMPLETE
:
3638 DHD_EVENT(("MACEVENT: %s\n", event_name
));
3640 case WLC_E_RSSI_LQM
:
3641 case WLC_E_PFN_NET_FOUND
:
3642 case WLC_E_PFN_NET_LOST
:
3643 case WLC_E_PFN_SCAN_COMPLETE
:
3644 case WLC_E_PFN_SCAN_NONE
:
3645 case WLC_E_PFN_SCAN_ALLGONE
:
3646 case WLC_E_PFN_GSCAN_FULL_RESULT
:
3647 case WLC_E_PFN_SSID_EXT
:
3648 DHD_EVENT(("PNOEVENT: %s\n", event_name
));
3651 case WLC_E_PFN_SCAN_BACKOFF
:
3652 case WLC_E_PFN_BSSID_SCAN_BACKOFF
:
3653 DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
3654 event_name
, (int)status
, (int)reason
));
3659 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
3660 event_name
, (int)status
, (int)reason
));
3663 #ifdef WIFI_ACT_FRAME
3664 case WLC_E_ACTION_FRAME
:
3665 DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name
, eabuf
));
3667 case WLC_E_ACTION_FRAME_COMPLETE
:
3668 if (datalen
>= sizeof(uint32
)) {
3669 const uint32
*pktid
= event_data
;
3670 BCM_REFERENCE(pktid
);
3671 DHD_EVENT(("MACEVENT: %s status %d, reason %d, pktid 0x%x\n",
3672 event_name
, (int)status
, (int)reason
, *pktid
));
3675 #endif /* WIFI_ACT_FRAME */
3677 #ifdef SHOW_LOGTRACE
3680 dhd_dbg_trace_evnt_handler(dhd_pub
, event_data
, raw_event_ptr
, datalen
);
3683 #endif /* SHOW_LOGTRACE */
3686 if (datalen
>= sizeof(int)) {
3687 DHD_EVENT(("MACEVENT: %s %d\n", event_name
, ntoh32(*((int *)event_data
))));
3691 case WLC_E_SERVICE_FOUND
:
3692 case WLC_E_P2PO_ADD_DEVICE
:
3693 case WLC_E_P2PO_DEL_DEVICE
:
3694 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
3697 #ifdef BT_WIFI_HANDOBER
3698 case WLC_E_BT_WIFI_HANDOVER_REQ
:
3699 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
3703 case WLC_E_CCA_CHAN_QUAL
:
3704 if (datalen
>= sizeof(cca_chan_qual_event_t
)) {
3705 const cca_chan_qual_event_t
*cca_event
=
3706 (cca_chan_qual_event_t
*)event_data
;
3707 if (cca_event
->id
== WL_CHAN_QUAL_FULLPM_CCA
) {
3708 const cca_only_chan_qual_event_t
*cca_only_event
=
3709 (const cca_only_chan_qual_event_t
*)cca_event
;
3710 BCM_REFERENCE(cca_only_event
);
3712 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3713 " channel 0x%02x\n",
3714 event_name
, event_type
, eabuf
, (int)status
,
3715 (int)reason
, (int)auth_type
, cca_event
->chanspec
));
3717 "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
3719 cca_only_event
->cca_busy_ext
.duration
,
3720 cca_only_event
->cca_busy_ext
.congest_ibss
,
3721 cca_only_event
->cca_busy_ext
.congest_obss
,
3722 cca_only_event
->cca_busy_ext
.interference
,
3723 cca_only_event
->cca_busy_ext
.timestamp
));
3725 "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
3726 cca_only_event
->cca_busy_nopm
.duration
,
3727 cca_only_event
->cca_busy_nopm
.congest_ibss
,
3728 cca_only_event
->cca_busy_nopm
.congest_obss
,
3729 cca_only_event
->cca_busy_nopm
.interference
));
3731 "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
3732 cca_only_event
->cca_busy_pm
.duration
,
3733 cca_only_event
->cca_busy_pm
.congest_ibss
,
3734 cca_only_event
->cca_busy_pm
.congest_obss
,
3735 cca_only_event
->cca_busy_pm
.interference
));
3736 } else if (cca_event
->id
== WL_CHAN_QUAL_FULL_CCA
) {
3738 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3739 " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
3741 event_name
, event_type
, eabuf
, (int)status
,
3742 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
3743 cca_event
->cca_busy_ext
.duration
,
3744 cca_event
->cca_busy_ext
.congest_ibss
,
3745 cca_event
->cca_busy_ext
.congest_obss
,
3746 cca_event
->cca_busy_ext
.interference
,
3747 cca_event
->cca_busy_ext
.timestamp
));
3748 } else if (cca_event
->id
== WL_CHAN_QUAL_CCA
) {
3750 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3751 " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
3752 event_name
, event_type
, eabuf
, (int)status
,
3753 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
3754 cca_event
->cca_busy
.duration
,
3755 cca_event
->cca_busy
.congest
,
3756 cca_event
->cca_busy
.timestamp
));
3757 } else if ((cca_event
->id
== WL_CHAN_QUAL_NF
) ||
3758 (cca_event
->id
== WL_CHAN_QUAL_NF_LTE
)) {
3760 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3761 " channel 0x%02x (NF[%d] %ddB)\n",
3762 event_name
, event_type
, eabuf
, (int)status
,
3763 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
3764 cca_event
->id
, cca_event
->noise
));
3767 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
3768 " channel 0x%02x (unknown ID %d)\n",
3769 event_name
, event_type
, eabuf
, (int)status
,
3770 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
3775 case WLC_E_ESCAN_RESULT
:
3776 if (datalen
>= sizeof(wl_escan_result_v2_t
)) {
3777 const wl_escan_result_v2_t
*escan_result
=
3778 (wl_escan_result_v2_t
*)event_data
;
3779 BCM_REFERENCE(escan_result
);
3780 /* Because WLC_E_ESCAN_RESULT event log are being print too many.
3781 * So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform.
3783 DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
3784 event_name
, event_type
, eabuf
, (int)status
));
3788 if (datalen
>= sizeof(struct wl_event_data_if
)) {
3789 const struct wl_event_data_if
*ifevent
=
3790 (struct wl_event_data_if
*)event_data
;
3791 BCM_REFERENCE(ifevent
);
3793 DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
3794 event_name
, ifevent
->opcode
, ifevent
->ifidx
, ifevent
->role
));
3797 #ifdef SHOW_LOGTRACE
3800 wl_mschdbg_event_handler(dhd_pub
, raw_event_ptr
, reason
, event_data
, datalen
);
3803 #endif /* SHOW_LOGTRACE */
3805 case WLC_E_PSK_AUTH
:
3806 DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
3807 event_name
, eabuf
, status
, reason
));
3809 case WLC_E_AGGR_EVENT
:
3810 if (datalen
>= sizeof(event_aggr_data_t
)) {
3811 const event_aggr_data_t
*aggrbuf
= event_data
;
3813 const uint8
*data
= aggrbuf
->data
;
3814 DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
3815 event_name
, aggrbuf
->num_events
, aggrbuf
->len
));
3816 for (j
= 0; j
< aggrbuf
->num_events
; j
++)
3818 const wl_event_msg_t
* sub_event
= (const wl_event_msg_t
*)data
;
3819 if (len
> aggrbuf
->len
) {
3820 DHD_ERROR(("%s: Aggr events corrupted!",
3824 DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event
->event_type
)));
3825 len
+= ALIGN_SIZE((ntoh32(sub_event
->datalen
) +
3826 sizeof(wl_event_msg_t
)), sizeof(uint64
));
3827 buf
= (const uchar
*)(data
+ sizeof(wl_event_msg_t
));
3829 DHD_EVENT((" data (%d) : ", ntoh32(sub_event
->datalen
)));
3830 for (i
= 0; i
< ntoh32(sub_event
->datalen
); i
++) {
3831 DHD_EVENT((" 0x%02x ", buf
[i
]));
3833 data
= aggrbuf
->data
+ len
;
3840 DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name
, reason
));
3843 case WLC_E_NAN_CRITICAL
:
3845 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name
, reason
));
3848 case WLC_E_NAN_NON_CRITICAL
:
3850 DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name
, reason
));
3854 if (datalen
>= sizeof(wl_proxd_event_t
)) {
3855 const wl_proxd_event_t
*proxd
=
3856 (wl_proxd_event_t
*)event_data
;
3857 DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
3858 event_name
, proxd
->type
, reason
));
3862 if (datalen
>= sizeof(rpsnoa_stats_t
)) {
3863 const rpsnoa_stats_t
*stat
= event_data
;
3864 if (datalen
== sizeof(*stat
)) {
3865 DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name
,
3866 (stat
->band
== WLC_BAND_2G
) ? "2G":"5G",
3867 stat
->state
, stat
->last_pps
));
3872 if (datalen
>= sizeof(wl_event_wa_lqm_t
)) {
3873 const wl_event_wa_lqm_t
*event_wa_lqm
=
3874 (wl_event_wa_lqm_t
*)event_data
;
3875 const bcm_xtlv_t
*subevent
;
3876 const wl_event_wa_lqm_basic_t
*elqm_basic
;
3878 if ((event_wa_lqm
->ver
!= WL_EVENT_WA_LQM_VER
) ||
3879 (event_wa_lqm
->len
< sizeof(wl_event_wa_lqm_t
) + BCM_XTLV_HDR_SIZE
)) {
3880 DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
3881 event_name
, event_wa_lqm
->ver
, event_wa_lqm
->len
));
3885 subevent
= (const bcm_xtlv_t
*)event_wa_lqm
->subevent
;
3886 if ((subevent
->id
!= WL_EVENT_WA_LQM_BASIC
) ||
3887 (subevent
->len
< sizeof(wl_event_wa_lqm_basic_t
))) {
3888 DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
3889 event_name
, subevent
->id
, subevent
->len
));
3893 elqm_basic
= (const wl_event_wa_lqm_basic_t
*)subevent
->data
;
3894 BCM_REFERENCE(elqm_basic
);
3895 DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
3896 event_name
, elqm_basic
->rssi
, elqm_basic
->snr
,
3897 elqm_basic
->tx_rate
, elqm_basic
->rx_rate
));
3901 case WLC_E_OBSS_DETECTION
:
3903 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name
, reason
));
3907 case WLC_E_AP_BCN_MUTE
:
3908 if (datalen
>= sizeof(wlc_bcn_mute_miti_event_data_v1_t
)) {
3909 const wlc_bcn_mute_miti_event_data_v1_t
3910 *bcn_mute_miti_evnt_data
= event_data
;
3911 DHD_EVENT(("MACEVENT: %s, reason :%d uatbtt_count: %d\n",
3912 event_name
, reason
, bcn_mute_miti_evnt_data
->uatbtt_count
));
3917 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
3918 event_name
, event_type
, eabuf
, (int)status
, (int)reason
,
3923 /* show any appended data if message level is set to bytes or host_data is set */
3924 if ((DHD_BYTES_ON() || (host_data
== TRUE
)) && DHD_EVENT_ON() && datalen
) {
3925 buf
= (uchar
*) event_data
;
3927 DHD_EVENT((" data (%d) : ", datalen
));
3928 for (i
= 0; i
< datalen
; i
++) {
3929 DHD_EVENT((" 0x%02x ", buf
[i
]));
3933 } /* wl_show_host_event */
3934 #endif /* SHOW_EVENTS */
3936 #ifdef DNGL_EVENT_SUPPORT
3937 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
3939 dngl_host_event(dhd_pub_t
*dhdp
, void *pktdata
, bcm_dngl_event_msg_t
*dngl_event
, size_t pktlen
)
3941 bcm_dngl_event_t
*pvt_data
= (bcm_dngl_event_t
*)pktdata
;
3943 dngl_host_event_process(dhdp
, pvt_data
, dngl_event
, pktlen
);
3947 #ifdef PARSE_DONGLE_HOST_EVENT
3948 typedef struct hck_id_to_str_s
{
3953 hck_id_to_str_t hck_sw_id_to_str
[] = {
3954 {WL_HC_DD_PCIE
, "WL_HC_DD_PCIE"},
3955 {WL_HC_DD_RX_DMA_STALL
, "WL_HC_DD_RX_DMA_STALL"},
3956 {WL_HC_DD_RX_STALL
, "WL_HC_DD_RX_STALL"},
3957 {WL_HC_DD_TX_STALL
, "WL_HC_DD_TX_STALL"},
3958 {WL_HC_DD_SCAN_STALL
, "WL_HC_DD_SCAN_STALL"},
3959 {WL_HC_DD_PHY
, "WL_HC_DD_PHY"},
3960 {WL_HC_DD_REINIT
, "WL_HC_DD_REINIT"},
3961 {WL_HC_DD_TXQ_STALL
, "WL_HC_DD_TXQ_STALL"},
3965 hck_id_to_str_t hck_pcie_module_to_str
[] = {
3966 {HEALTH_CHECK_PCIEDEV_INDUCED_IND
, "PCIEDEV_INDUCED_IND"},
3967 {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND
, "PCIEDEV_H2D_DMA_IND"},
3968 {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND
, "PCIEDEV_D2H_DMA_IND"},
3969 {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND
, "PCIEDEV_IOCTL_STALL_IND"},
3970 {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND
, "PCIEDEV_D3ACK_STALL_IND"},
3971 {HEALTH_CHECK_PCIEDEV_NODS_IND
, "PCIEDEV_NODS_IND"},
3972 {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND
, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
3973 {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND
, "PCIEDEV_DSACK_STALL_IND"},
3977 hck_id_to_str_t hck_rx_stall_v2_to_str
[] = {
3978 {BCM_RX_HC_RESERVED
, "BCM_RX_HC_RESERVED"},
3979 {BCM_RX_HC_UNSPECIFIED
, "BCM_RX_HC_UNSPECIFIED"},
3980 {BCM_RX_HC_UNICAST_DECRYPT_FAIL
, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
3981 {BCM_RX_HC_BCMC_DECRYPT_FAIL
, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
3982 {BCM_RX_HC_UNICAST_REPLAY
, "BCM_RX_HC_UNICAST_REPLAY"},
3983 {BCM_RX_HC_BCMC_REPLAY
, "BCM_RX_HC_BCMC_REPLAY"},
3984 {BCM_RX_HC_AMPDU_DUP
, "BCM_RX_HC_AMPDU_DUP"},
3989 dhd_print_dongle_hck_id(uint32 id
, hck_id_to_str_t
*hck
)
3991 while (hck
->name
!= NULL
) {
3992 if (hck
->id
== id
) {
3993 DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck
->name
));
4001 dhd_parse_hck_common_sw_event(bcm_xtlv_t
*wl_hc
)
4004 wl_rx_hc_info_v2_t
*hck_rx_stall_v2
;
4007 id
= ltoh16(wl_hc
->id
);
4009 if (id
== WL_HC_DD_RX_STALL_V2
) {
4010 /* map the hck_rx_stall_v2 structure to the value of the XTLV */
4012 (wl_rx_hc_info_v2_t
*)wl_hc
;
4013 DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
4014 " drop:%d alert_th:%d reason:%d peer_ea:"MACF
"\n",
4015 hck_rx_stall_v2
->type
,
4016 hck_rx_stall_v2
->length
,
4017 hck_rx_stall_v2
->if_idx
,
4018 hck_rx_stall_v2
->ac
,
4019 hck_rx_stall_v2
->rx_hc_pkts
,
4020 hck_rx_stall_v2
->rx_hc_dropped_all
,
4021 hck_rx_stall_v2
->rx_hc_alert_th
,
4022 hck_rx_stall_v2
->reason
,
4023 ETHER_TO_MACF(hck_rx_stall_v2
->peer_ea
)));
4024 dhd_print_dongle_hck_id(
4025 ltoh32(hck_rx_stall_v2
->reason
),
4026 hck_rx_stall_v2_to_str
);
4028 dhd_print_dongle_hck_id(ltoh16(wl_hc
->id
),
4034 #endif /* PARSE_DONGLE_HOST_EVENT */
4037 dngl_host_event_process(dhd_pub_t
*dhdp
, bcm_dngl_event_t
*event
,
4038 bcm_dngl_event_msg_t
*dngl_event
, size_t pktlen
)
4040 uint8
*p
= (uint8
*)(event
+ 1);
4041 uint16 type
= ntoh16_ua((void *)&dngl_event
->event_type
);
4042 uint16 datalen
= ntoh16_ua((void *)&dngl_event
->datalen
);
4043 uint16 version
= ntoh16_ua((void *)&dngl_event
->version
);
4045 DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version
, type
, datalen
));
4046 if (datalen
> (pktlen
- sizeof(bcm_dngl_event_t
) + ETHER_TYPE_LEN
)) {
4049 if (version
!= BCM_DNGL_EVENT_MSG_VERSION
) {
4050 DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__
,
4051 version
, BCM_DNGL_EVENT_MSG_VERSION
));
4055 case DNGL_E_SOCRAM_IND
:
4057 bcm_dngl_socramind_t
*socramind_ptr
= (bcm_dngl_socramind_t
*)p
;
4058 uint16 tag
= ltoh32(socramind_ptr
->tag
);
4059 uint16 taglen
= ltoh32(socramind_ptr
->length
);
4060 p
= (uint8
*)socramind_ptr
->value
;
4061 DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag
, taglen
, datalen
));
4063 case SOCRAM_IND_ASSERT_TAG
:
4066 * The payload consists of -
4067 * null terminated function name padded till 32 bit boundary +
4068 * Line number - (32 bits)
4069 * Caller address (32 bits)
4071 char *fnname
= (char *)p
;
4072 if (datalen
< (ROUNDUP(strlen(fnname
) + 1, sizeof(uint32
)) +
4073 sizeof(uint32
) * 2)) {
4074 DHD_ERROR(("Wrong length:%d\n", datalen
));
4077 DHD_EVENT(("ASSRT Function:%s ", p
));
4078 p
+= ROUNDUP(strlen(p
) + 1, sizeof(uint32
));
4079 DHD_EVENT(("Line:%d ", *(uint32
*)p
));
4080 p
+= sizeof(uint32
);
4081 DHD_EVENT(("Caller Addr:0x%x\n", *(uint32
*)p
));
4082 #ifdef PARSE_DONGLE_HOST_EVENT
4083 DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
4084 #endif /* PARSE_DONGLE_HOST_EVENT */
4087 case SOCRAM_IND_TAG_HEALTH_CHECK
:
4089 bcm_dngl_healthcheck_t
*dngl_hc
= (bcm_dngl_healthcheck_t
*)p
;
4090 DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
4091 ltoh32(dngl_hc
->top_module_tag
),
4092 ltoh32(dngl_hc
->top_module_len
),
4094 if (DHD_EVENT_ON()) {
4095 prhex("HEALTHCHECK", p
, MIN(ltoh32(dngl_hc
->top_module_len
)
4096 + BCM_XTLV_HDR_SIZE
, datalen
));
4099 memset(dhdp
->health_chk_event_data
, 0, HEALTH_CHK_BUF_SIZE
);
4100 memcpy(dhdp
->health_chk_event_data
, p
,
4101 MIN(ltoh32(dngl_hc
->top_module_len
),
4102 HEALTH_CHK_BUF_SIZE
));
4103 #endif /* DHD_LOG_DUMP */
4104 p
= (uint8
*)dngl_hc
->value
;
4106 switch (ltoh32(dngl_hc
->top_module_tag
)) {
4107 case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE
:
4109 bcm_dngl_pcie_hc_t
*pcie_hc
;
4110 pcie_hc
= (bcm_dngl_pcie_hc_t
*)p
;
4111 BCM_REFERENCE(pcie_hc
);
4112 if (ltoh32(dngl_hc
->top_module_len
) <
4113 sizeof(bcm_dngl_pcie_hc_t
)) {
4114 DHD_ERROR(("Wrong length:%d\n",
4115 ltoh32(dngl_hc
->top_module_len
)));
4118 DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
4120 ltoh32(pcie_hc
->version
),
4121 ltoh32(pcie_hc
->pcie_err_ind_type
),
4122 ltoh32(pcie_hc
->pcie_flag
),
4123 ltoh32(pcie_hc
->pcie_control_reg
)));
4124 #ifdef PARSE_DONGLE_HOST_EVENT
4125 dhd_print_dongle_hck_id(
4126 ltoh32(pcie_hc
->pcie_err_ind_type
),
4127 hck_pcie_module_to_str
);
4128 #endif /* PARSE_DONGLE_HOST_EVENT */
4131 #ifdef HCHK_COMMON_SW_EVENT
4132 case HCHK_SW_ENTITY_WL_PRIMARY
:
4133 case HCHK_SW_ENTITY_WL_SECONDARY
:
4135 bcm_xtlv_t
*wl_hc
= (bcm_xtlv_t
*)p
;
4137 if (ltoh32(dngl_hc
->top_module_len
) <
4138 sizeof(bcm_xtlv_t
)) {
4139 DHD_ERROR(("WL SW HC Wrong length:%d\n",
4140 ltoh32(dngl_hc
->top_module_len
)));
4143 BCM_REFERENCE(wl_hc
);
4144 DHD_EVENT(("WL SW HC type %d len %d\n",
4145 ltoh16(wl_hc
->id
), ltoh16(wl_hc
->len
)));
4147 #ifdef PARSE_DONGLE_HOST_EVENT
4148 dhd_parse_hck_common_sw_event(wl_hc
);
4149 #endif /* PARSE_DONGLE_HOST_EVENT */
4153 #endif /* HCHK_COMMON_SW_EVENT */
4156 DHD_ERROR(("%s:Unknown module TAG:%d\n",
4158 ltoh32(dngl_hc
->top_module_tag
)));
4165 DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__
));
4166 if (p
&& DHD_EVENT_ON()) {
4167 prhex("SOCRAMIND", p
, taglen
);
4174 DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__
, type
));
4175 if (p
&& DHD_EVENT_ON()) {
4176 prhex("SOCRAMIND", p
, datalen
);
4181 #ifdef DHD_FW_COREDUMP
4182 if (dhdp
->memdump_enabled
) {
4183 dhdp
->memdump_type
= DUMP_TYPE_DONGLE_HOST_EVENT
;
4186 !dhdp
->gdb_proxy_active
&&
4187 #endif /* GDB_PROXY */
4188 dhd_socram_dump(dhdp
->bus
)) {
4189 DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__
));
4193 dhd_dbg_send_urgent_evt(dhdp
, p
, datalen
);
4194 #endif /* DHD_FW_COREDUMP */
4195 #endif /* !BCMDBUS */
4198 #endif /* DNGL_EVENT_SUPPORT */
4200 /* Stub for now. Will become real function as soon as shim
4201 * is being integrated to Android, Linux etc.
4204 wl_event_process_default(wl_event_msg_t
*event
, struct wl_evt_pport
*evt_pport
)
4210 wl_event_process(dhd_pub_t
*dhd_pub
, int *ifidx
, void *pktdata
,
4211 uint pktlen
, void **data_ptr
, void *raw_event
)
4213 wl_evt_pport_t evt_pport
;
4214 wl_event_msg_t event
;
4215 bcm_event_msg_u_t evu
;
4218 /* make sure it is a BRCM event pkt and record event data */
4219 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
4220 if (ret
!= BCME_OK
) {
4224 memcpy(&event
, &evu
.event
, sizeof(wl_event_msg_t
));
4226 /* convert event from network order to host order */
4227 wl_event_to_host_order(&event
);
4229 /* record event params to evt_pport */
4230 evt_pport
.dhd_pub
= dhd_pub
;
4231 evt_pport
.ifidx
= ifidx
;
4232 evt_pport
.pktdata
= pktdata
;
4233 evt_pport
.data_ptr
= data_ptr
;
4234 evt_pport
.raw_event
= raw_event
;
4235 evt_pport
.data_len
= pktlen
;
4237 ret
= wl_event_process_default(&event
, &evt_pport
);
4240 } /* wl_event_process */
4242 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
4244 wl_host_event_get_data(void *pktdata
, uint pktlen
, bcm_event_msg_u_t
*evu
)
4248 ret
= is_wlc_event_frame(pktdata
, pktlen
, 0, evu
);
4249 if (ret
!= BCME_OK
) {
4250 DHD_ERROR(("%s: Invalid event frame, err = %d\n",
4251 __FUNCTION__
, ret
));
4258 wl_process_host_event(dhd_pub_t
*dhd_pub
, int *ifidx
, void *pktdata
, uint pktlen
,
4259 wl_event_msg_t
*event
, void **data_ptr
, void *raw_event
)
4261 bcm_event_t
*pvt_data
= (bcm_event_t
*)pktdata
;
4262 bcm_event_msg_u_t evu
;
4264 uint32 type
, status
, datalen
, reason
;
4269 #if defined(__linux__)
4270 dhd_if_t
*ifp
= NULL
;
4272 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4274 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
4275 if (ret
!= BCME_OK
) {
4279 usr_subtype
= ntoh16_ua((void *)&pvt_data
->bcm_hdr
.usr_subtype
);
4280 switch (usr_subtype
) {
4281 case BCMILCP_BCM_SUBTYPE_EVENT
:
4282 memcpy(event
, &evu
.event
, sizeof(wl_event_msg_t
));
4283 *data_ptr
= &pvt_data
[1];
4285 case BCMILCP_BCM_SUBTYPE_DNGLEVENT
:
4286 #ifdef DNGL_EVENT_SUPPORT
4287 /* If it is a DNGL event process it first */
4288 if (dngl_host_event(dhd_pub
, pktdata
, &evu
.dngl_event
, pktlen
) == BCME_OK
) {
4290 * Return error purposely to prevent DNGL event being processed
4295 #endif /* DNGL_EVENT_SUPPORT */
4296 return BCME_NOTFOUND
;
4298 return BCME_NOTFOUND
;
4301 /* start wl_event_msg process */
4302 event_data
= *data_ptr
;
4303 type
= ntoh32_ua((void *)&event
->event_type
);
4304 flags
= ntoh16_ua((void *)&event
->flags
);
4305 status
= ntoh32_ua((void *)&event
->status
);
4306 reason
= ntoh32_ua((void *)&event
->reason
);
4307 datalen
= ntoh32_ua((void *)&event
->datalen
);
4308 evlen
= datalen
+ sizeof(bcm_event_t
);
4311 #ifdef PROP_TXSTATUS
4312 case WLC_E_FIFO_CREDIT_MAP
:
4313 dhd_wlfc_enable(dhd_pub
);
4314 dhd_wlfc_FIFOcreditmap_event(dhd_pub
, event_data
);
4315 WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
4316 "(%d,%d,%d,%d),(%d),(%d)\n", event_data
[0], event_data
[1],
4318 event_data
[3], event_data
[4], event_data
[5]));
4321 case WLC_E_BCMC_CREDIT_SUPPORT
:
4322 dhd_wlfc_BCMCCredit_support_event(dhd_pub
);
4325 case WLC_E_ALLOW_CREDIT_BORROW
:
4326 dhd_wlfc_disable_credit_borrow_event(dhd_pub
, event_data
);
4328 #endif /* LIMIT_BORROW */
4329 #endif /* PROP_TXSTATUS */
4333 case WLC_E_TDLS_PEER_EVENT
:
4334 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
4336 dhd_tdls_event_handler(dhd_pub
, event
);
4343 struct wl_event_data_if
*ifevent
= (struct wl_event_data_if
*)event_data
;
4345 /* Ignore the event if NOIF is set */
4346 if (ifevent
->reserved
& WLC_E_IF_FLAGS_BSSCFG_NOIF
) {
4347 DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
4348 return (BCME_UNSUPPORTED
);
4350 #ifdef PCIE_FULL_DONGLE
4351 dhd_update_interface_flow_info(dhd_pub
, ifevent
->ifidx
,
4352 ifevent
->opcode
, ifevent
->role
);
4354 #ifdef PROP_TXSTATUS
4356 uint8
* ea
= pvt_data
->eth
.ether_dhost
;
4357 WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG
"]\n"
4359 ((ifevent
->opcode
== WLC_E_IF_ADD
) ? "ADD":"DEL"),
4360 ((ifevent
->role
== 0) ? "STA":"AP "),
4364 if (ifevent
->opcode
== WLC_E_IF_CHANGE
)
4365 dhd_wlfc_interface_event(dhd_pub
,
4366 eWLFC_MAC_ENTRY_ACTION_UPDATE
,
4367 ifevent
->ifidx
, ifevent
->role
, ea
);
4369 dhd_wlfc_interface_event(dhd_pub
,
4370 ((ifevent
->opcode
== WLC_E_IF_ADD
) ?
4371 eWLFC_MAC_ENTRY_ACTION_ADD
: eWLFC_MAC_ENTRY_ACTION_DEL
),
4372 ifevent
->ifidx
, ifevent
->role
, ea
);
4374 /* dhd already has created an interface by default, for 0 */
4375 if (ifevent
->ifidx
== 0)
4378 #endif /* PROP_TXSTATUS */
4380 if (ifevent
->ifidx
> 0 && ifevent
->ifidx
< DHD_MAX_IFS
) {
4381 if (ifevent
->opcode
== WLC_E_IF_ADD
) {
4382 if (dhd_event_ifadd(dhd_pub
->info
, ifevent
, event
->ifname
,
4383 event
->addr
.octet
)) {
4385 DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
4386 __FUNCTION__
, ifevent
->ifidx
, event
->ifname
));
4387 return (BCME_ERROR
);
4389 } else if (ifevent
->opcode
== WLC_E_IF_DEL
) {
4390 #ifdef PCIE_FULL_DONGLE
4391 dhd_flow_rings_delete(dhd_pub
,
4392 (uint8
)dhd_ifname2idx(dhd_pub
->info
, event
->ifname
));
4393 #endif /* PCIE_FULL_DONGLE */
4394 dhd_event_ifdel(dhd_pub
->info
, ifevent
, event
->ifname
,
4396 } else if (ifevent
->opcode
== WLC_E_IF_CHANGE
) {
4398 dhd_event_ifchange(dhd_pub
->info
, ifevent
, event
->ifname
,
4400 #endif /* WL_CFG80211 */
4403 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
4404 DHD_INFO(("%s: Invalid ifidx %d for %s\n",
4405 __FUNCTION__
, ifevent
->ifidx
, event
->ifname
));
4406 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
4408 /* send up the if event: btamp user needs it */
4409 *ifidx
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
4410 /* push up to external supp/auth */
4411 dhd_event(dhd_pub
->info
, (char *)pvt_data
, evlen
, *ifidx
);
4415 case WLC_E_NDIS_LINK
:
4417 case WLC_E_PFN_NET_FOUND
:
4418 case WLC_E_PFN_SCAN_ALLGONE
: /* share with WLC_E_PFN_BSSID_NET_LOST */
4419 case WLC_E_PFN_NET_LOST
:
4421 #if defined(PNO_SUPPORT)
4422 case WLC_E_PFN_BSSID_NET_FOUND
:
4423 case WLC_E_PFN_BEST_BATCHING
:
4424 dhd_pno_event_handler(dhd_pub
, event
, (void *)event_data
);
4427 #if defined(RTT_SUPPORT)
4430 dhd_rtt_event_handler(dhd_pub
, event
, (void *)event_data
);
4431 #endif /* WL_CFG80211 */
4433 #endif /* RTT_SUPPORT */
4434 /* These are what external supplicant/authenticator wants */
4435 case WLC_E_ASSOC_IND
:
4436 case WLC_E_AUTH_IND
:
4437 case WLC_E_REASSOC_IND
:
4438 dhd_findadd_sta(dhd_pub
,
4439 dhd_ifname2idx(dhd_pub
->info
, event
->ifname
),
4440 &event
->addr
.octet
);
4443 #if defined(DHD_FW_COREDUMP)
4444 case WLC_E_PSM_WATCHDOG
:
4445 DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__
));
4446 if (dhd_socram_dump(dhd_pub
->bus
) != BCME_OK
) {
4447 DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__
));
4451 #endif /* !BCMDBUS */
4452 case WLC_E_NATOE_NFCT
:
4454 DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__
));
4455 dhd_natoe_ct_event(dhd_pub
, event_data
);
4456 #endif /* WL_NATOE */
4459 case WLC_E_SLOTTED_BSS_PEER_OP
:
4460 DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
4461 "" MACDBG
", status = %d\n",
4462 __FUNCTION__
, MAC2STRDBG(event
->addr
.octet
), status
));
4463 if (status
== WLC_E_STATUS_SLOTTED_PEER_ADD
) {
4464 dhd_findadd_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
,
4465 event
->ifname
), &event
->addr
.octet
);
4466 } else if (status
== WLC_E_STATUS_SLOTTED_PEER_DEL
) {
4467 uint8 ifindex
= (uint8
)dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
4468 BCM_REFERENCE(ifindex
);
4469 dhd_del_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
,
4470 event
->ifname
), &event
->addr
.octet
);
4471 #ifdef PCIE_FULL_DONGLE
4472 dhd_flow_rings_delete_for_peer(dhd_pub
, ifindex
,
4473 (char *)&event
->addr
.octet
[0]);
4476 DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
4477 __FUNCTION__
, status
));
4481 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
4483 ifp
= dhd_get_ifp(dhd_pub
, event
->ifidx
);
4488 /* Consider STA role only since roam is disabled on P2P GC.
4489 * Drop EAPOL M1 frame only if roam is done to same BSS.
4491 if ((status
== WLC_E_STATUS_SUCCESS
) &&
4492 IS_STA_IFACE(ndev_to_wdev(ifp
->net
)) &&
4493 wl_cfg80211_is_event_from_connected_bssid(ifp
->net
, event
, event
->ifidx
)) {
4494 ifp
->recv_reassoc_evt
= TRUE
;
4497 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4498 #if defined(CSI_SUPPORT)
4500 dhd_csi_event_handler(dhd_pub
, event
, (void *)event_data
);
4502 #endif /* CSI_SUPPORT */
4504 #ifdef PCIE_FULL_DONGLE
4505 if (dhd_update_interface_link_status(dhd_pub
, (uint8
)dhd_ifname2idx(dhd_pub
->info
,
4506 event
->ifname
), (uint8
)flags
) != BCME_OK
) {
4507 DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
4512 DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
4514 /* Delete all sta and flowrings */
4515 dhd_del_all_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
, event
->ifname
));
4516 dhd_flow_rings_delete(dhd_pub
, (uint8
)dhd_ifname2idx(dhd_pub
->info
,
4520 #endif /* PCIE_FULL_DONGLE */
4522 case WLC_E_DEAUTH_IND
:
4523 case WLC_E_DISASSOC
:
4524 case WLC_E_DISASSOC_IND
:
4525 #ifdef PCIE_FULL_DONGLE
4526 if (type
!= WLC_E_LINK
) {
4527 uint8 ifindex
= (uint8
)dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
4528 uint8 role
= dhd_flow_rings_ifindex2role(dhd_pub
, ifindex
);
4529 uint8 del_sta
= TRUE
;
4531 if (role
== WLC_E_IF_ROLE_STA
&&
4532 !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub
, ifindex
)) &&
4533 !wl_cfg80211_is_event_from_connected_bssid(
4534 dhd_idx2net(dhd_pub
, ifindex
), event
, *ifidx
)) {
4537 #endif /* WL_CFG80211 */
4538 DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
4539 __FUNCTION__
, type
, flags
, status
, role
, del_sta
));
4542 DHD_EVENT(("%s: Deleting STA " MACDBG
"\n",
4543 __FUNCTION__
, MAC2STRDBG(event
->addr
.octet
)));
4545 dhd_del_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
,
4546 event
->ifname
), &event
->addr
.octet
);
4547 /* Delete all flowrings for STA and P2P Client */
4548 if (role
== WLC_E_IF_ROLE_STA
|| role
== WLC_E_IF_ROLE_P2P_CLIENT
) {
4549 dhd_flow_rings_delete(dhd_pub
, ifindex
);
4551 dhd_flow_rings_delete_for_peer(dhd_pub
, ifindex
,
4552 (char *)&event
->addr
.octet
[0]);
4556 #endif /* PCIE_FULL_DONGLE */
4557 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
4559 ifp
= dhd_get_ifp(dhd_pub
, event
->ifidx
);
4561 ifp
->recv_reassoc_evt
= FALSE
;
4562 ifp
->post_roam_evt
= FALSE
;
4564 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4567 *ifidx
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
4568 #ifdef DHD_UPDATE_INTF_MAC
4569 if ((WLC_E_LINK
==type
)&&(WLC_EVENT_MSG_LINK
&flags
)) {
4570 dhd_event_ifchange(dhd_pub
->info
,
4571 (struct wl_event_data_if
*)event
,
4575 #endif /* DHD_UPDATE_INTF_MAC */
4576 /* push up to external supp/auth */
4577 dhd_event(dhd_pub
->info
, (char *)pvt_data
, evlen
, *ifidx
);
4578 DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
4579 __FUNCTION__
, type
, flags
, status
));
4580 BCM_REFERENCE(flags
);
4581 BCM_REFERENCE(status
);
4582 BCM_REFERENCE(reason
);
4587 /* For routers, EAPD will be working on these events.
4588 * Overwrite interface name to that event is pushed
4589 * to host with its registered interface name
4591 memcpy(pvt_data
->event
.ifname
, dhd_ifname(dhd_pub
, *ifidx
), IFNAMSIZ
);
4594 #ifdef DHD_STATUS_LOGGING
4595 if (dhd_pub
->statlog
) {
4596 dhd_statlog_process_event(dhd_pub
, type
, *ifidx
,
4597 status
, reason
, flags
);
4599 #endif /* DHD_STATUS_LOGGING */
4602 if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
4603 wl_show_host_event(dhd_pub
, event
,
4604 (void *)event_data
, raw_event
, dhd_pub
->enable_log
);
4606 #endif /* SHOW_EVENTS */
4609 } /* wl_process_host_event */
4612 wl_host_event(dhd_pub_t
*dhd_pub
, int *ifidx
, void *pktdata
, uint pktlen
,
4613 wl_event_msg_t
*event
, void **data_ptr
, void *raw_event
)
4615 return wl_process_host_event(dhd_pub
, ifidx
, pktdata
, pktlen
, event
, data_ptr
,
4620 dhd_print_buf(void *pbuf
, int len
, int bytes_per_line
)
4624 unsigned char *buf
= pbuf
;
4626 if (bytes_per_line
== 0) {
4627 bytes_per_line
= len
;
4630 for (i
= 0; i
< len
; i
++) {
4631 printf("%2.2x", *buf
++);
4633 if (j
== bytes_per_line
) {
4641 #endif /* DHD_DEBUG */
4644 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
4647 /* Convert user's input in hex pattern to byte-size mask */
4649 wl_pattern_atoh(char *src
, char *dst
)
4652 if (strncmp(src
, "0x", 2) != 0 &&
4653 strncmp(src
, "0X", 2) != 0) {
4654 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
4657 src
= src
+ 2; /* Skip past 0x */
4658 if (strlen(src
) % 2 != 0) {
4659 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
4662 for (i
= 0; *src
!= '\0'; i
++) {
4664 bcm_strncpy_s(num
, sizeof(num
), src
, 2);
4666 dst
[i
] = (uint8
)strtoul(num
, NULL
, 16);
4672 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
4674 pattern_atoh_len(char *src
, char *dst
, int len
)
4677 if (strncmp(src
, "0x", HD_PREFIX_SIZE
) != 0 &&
4678 strncmp(src
, "0X", HD_PREFIX_SIZE
) != 0) {
4679 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
4682 src
= src
+ HD_PREFIX_SIZE
; /* Skip past 0x */
4683 if (strlen(src
) % HD_BYTE_SIZE
!= 0) {
4684 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
4687 for (i
= 0; *src
!= '\0'; i
++) {
4688 char num
[HD_BYTE_SIZE
+ 1];
4691 DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i
, len
));
4694 bcm_strncpy_s(num
, sizeof(num
), src
, HD_BYTE_SIZE
);
4695 num
[HD_BYTE_SIZE
] = '\0';
4696 dst
[i
] = (uint8
)strtoul(num
, NULL
, 16);
4697 src
+= HD_BYTE_SIZE
;
4701 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
4703 #ifdef PKT_FILTER_SUPPORT
4705 dhd_pktfilter_offload_enable(dhd_pub_t
* dhd
, char *arg
, int enable
, int master_mode
)
4712 char *arg_save
= 0, *arg_org
= 0;
4715 wl_pkt_filter_enable_t enable_parm
;
4716 wl_pkt_filter_enable_t
* pkt_filterp
;
4721 if (!(arg_save
= MALLOC(dhd
->osh
, strlen(arg
) + 1))) {
4722 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__
));
4726 memcpy(arg_save
, arg
, strlen(arg
) + 1);
4728 argv
[i
] = bcmstrtok(&arg_save
, " ", 0);
4731 if (argv
[i
] == NULL
) {
4732 DHD_ERROR(("No args provided\n"));
4736 str
= "pkt_filter_enable";
4737 str_len
= strlen(str
);
4738 bcm_strncpy_s(buf
, sizeof(buf
) - 1, str
, sizeof(buf
) - 1);
4739 buf
[ sizeof(buf
) - 1 ] = '\0';
4740 buf_len
= str_len
+ 1;
4742 pkt_filterp
= (wl_pkt_filter_enable_t
*)(buf
+ str_len
+ 1);
4744 /* Parse packet filter id. */
4745 enable_parm
.id
= htod32(strtoul(argv
[i
], NULL
, 0));
4746 if (dhd_conf_del_pkt_filter(dhd
, enable_parm
.id
))
4749 /* Parse enable/disable value. */
4750 enable_parm
.enable
= htod32(enable
);
4752 buf_len
+= sizeof(enable_parm
);
4753 memcpy((char *)pkt_filterp
,
4755 sizeof(enable_parm
));
4757 /* Enable/disable the specified filter. */
4758 rc
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
4759 rc
= rc
>= 0 ? 0 : rc
;
4761 DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
4762 __FUNCTION__
, enable
?"enable":"disable", arg
, rc
));
4763 dhd_set_packet_filter(dhd
);
4764 rc
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
4765 rc
= rc
>= 0 ? 0 : rc
;
4767 DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
4768 __FUNCTION__
, arg
, rc
));
4770 DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
4771 __FUNCTION__
, arg
));
4775 DHD_TRACE(("%s: successfully %s pktfilter %s\n",
4776 __FUNCTION__
, enable
?"enable":"disable", arg
));
4778 /* Contorl the master mode */
4779 rc
= dhd_wl_ioctl_set_intiovar(dhd
, "pkt_filter_mode",
4780 master_mode
, WLC_SET_VAR
, TRUE
, 0);
4781 rc
= rc
>= 0 ? 0 : rc
;
4783 DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
4784 __FUNCTION__
, master_mode
, rc
));
4788 MFREE(dhd
->osh
, arg_org
, strlen(arg
) + 1);
4791 /* Packet filter section: extended filters have named offsets, add table here */
4797 static wl_pfbase_t basenames
[] = { WL_PKT_FILTER_BASE_NAMES
};
4800 wl_pkt_filter_base_parse(char *name
)
4803 char *bname
, *uname
;
4805 for (i
= 0; i
< ARRAYSIZE(basenames
); i
++) {
4806 bname
= basenames
[i
].name
;
4807 for (uname
= name
; *uname
; bname
++, uname
++) {
4808 if (*bname
!= bcm_toupper(*uname
)) {
4812 if (!*uname
&& !*bname
) {
4817 if (i
< ARRAYSIZE(basenames
)) {
4818 return basenames
[i
].base
;
4825 dhd_pktfilter_offload_set(dhd_pub_t
* dhd
, char *arg
)
4828 wl_pkt_filter_t pkt_filter
;
4829 wl_pkt_filter_t
*pkt_filterp
;
4834 uint32 pattern_size
;
4835 char *argv
[MAXPKT_ARG
] = {0}, * buf
= 0;
4837 char *arg_save
= 0, *arg_org
= 0;
4842 if (!(arg_save
= MALLOC(dhd
->osh
, strlen(arg
) + 1))) {
4843 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__
));
4849 if (!(buf
= MALLOC(dhd
->osh
, MAX_PKTFLT_BUF_SIZE
))) {
4850 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__
));
4854 memset(buf
, 0, MAX_PKTFLT_BUF_SIZE
);
4855 memcpy(arg_save
, arg
, strlen(arg
) + 1);
4857 if (strlen(arg
) > MAX_PKTFLT_BUF_SIZE
) {
4858 DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg
), (int)sizeof(buf
)));
4862 argv
[i
] = bcmstrtok(&arg_save
, " ", 0);
4864 if (i
>= MAXPKT_ARG
) {
4865 DHD_ERROR(("Invalid args provided\n"));
4868 argv
[i
] = bcmstrtok(&arg_save
, " ", 0);
4872 if (argv
[i
] == NULL
) {
4873 DHD_ERROR(("No args provided\n"));
4877 str
= "pkt_filter_add";
4878 str_len
= strlen(str
);
4879 bcm_strncpy_s(buf
, MAX_PKTFLT_BUF_SIZE
, str
, str_len
);
4880 buf
[ str_len
] = '\0';
4881 buf_len
= str_len
+ 1;
4883 pkt_filterp
= (wl_pkt_filter_t
*) (buf
+ str_len
+ 1);
4885 /* Parse packet filter id. */
4886 pkt_filter
.id
= htod32(strtoul(argv
[i
], NULL
, 0));
4888 if (argv
[++i
] == NULL
) {
4889 DHD_ERROR(("Polarity not provided\n"));
4893 /* Parse filter polarity. */
4894 pkt_filter
.negate_match
= htod32(strtoul(argv
[i
], NULL
, 0));
4896 if (argv
[++i
] == NULL
) {
4897 DHD_ERROR(("Filter type not provided\n"));
4901 /* Parse filter type. */
4902 pkt_filter
.type
= htod32(strtoul(argv
[i
], NULL
, 0));
4904 if ((pkt_filter
.type
== 0) || (pkt_filter
.type
== 1)) {
4905 if (argv
[++i
] == NULL
) {
4906 DHD_ERROR(("Offset not provided\n"));
4910 /* Parse pattern filter offset. */
4911 pkt_filter
.u
.pattern
.offset
= htod32(strtoul(argv
[i
], NULL
, 0));
4913 if (argv
[++i
] == NULL
) {
4914 DHD_ERROR(("Bitmask not provided\n"));
4918 /* Parse pattern filter mask. */
4919 rc
= wl_pattern_atoh(argv
[i
],
4920 (char *) pkt_filterp
->u
.pattern
.mask_and_pattern
);
4923 DHD_ERROR(("Rejecting: %s\n", argv
[i
]));
4926 mask_size
= htod32(rc
);
4927 if (argv
[++i
] == NULL
) {
4928 DHD_ERROR(("Pattern not provided\n"));
4932 /* Parse pattern filter pattern. */
4933 rc
= wl_pattern_atoh(argv
[i
],
4934 (char *) &pkt_filterp
->u
.pattern
.mask_and_pattern
[mask_size
]);
4937 DHD_ERROR(("Rejecting: %s\n", argv
[i
]));
4940 pattern_size
= htod32(rc
);
4941 if (mask_size
!= pattern_size
) {
4942 DHD_ERROR(("Mask and pattern not the same size\n"));
4946 pkt_filter
.u
.pattern
.size_bytes
= mask_size
;
4947 buf_len
+= WL_PKT_FILTER_FIXED_LEN
;
4948 buf_len
+= (WL_PKT_FILTER_PATTERN_FIXED_LEN
+ 2 * mask_size
);
4950 /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
4951 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
4952 * guarantee that the buffer is properly aligned.
4954 memcpy((char *)pkt_filterp
,
4956 WL_PKT_FILTER_FIXED_LEN
+ WL_PKT_FILTER_PATTERN_FIXED_LEN
);
4957 } else if ((pkt_filter
.type
== 2) || (pkt_filter
.type
== 6)) {
4959 char *endptr
= NULL
;
4960 wl_pkt_filter_pattern_listel_t
*pf_el
=
4961 (wl_pkt_filter_pattern_listel_t
*)&pkt_filterp
->u
.patlist
.patterns
[0];
4963 while (argv
[++i
] != NULL
) {
4964 /* Check valid buffer size. */
4965 if ((buf_len
+ MAX_PKTFLT_FIXED_BUF_SIZE
) > MAX_PKTFLT_BUF_SIZE
) {
4966 DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
4970 /* Parse pattern filter base and offset. */
4971 if (bcm_isdigit(*argv
[i
])) {
4973 rc
= strtoul(argv
[i
], &endptr
, 0);
4975 endptr
= strchr(argv
[i
], ':');
4978 rc
= wl_pkt_filter_base_parse(argv
[i
]);
4980 printf("Invalid base %s\n", argv
[i
]);
4987 if (endptr
== NULL
) {
4988 printf("Invalid [base:]offset format: %s\n", argv
[i
]);
4992 if (*endptr
== ':') {
4993 pf_el
->base_offs
= htod16(rc
);
4994 rc
= strtoul(endptr
+ 1, &endptr
, 0);
4996 /* Must have had a numeric offset only */
4997 pf_el
->base_offs
= htod16(0);
5001 printf("Invalid [base:]offset format: %s\n", argv
[i
]);
5004 if (rc
> 0x0000FFFF) {
5005 printf("Offset too large\n");
5008 pf_el
->rel_offs
= htod16(rc
);
5010 /* Clear match_flag (may be set in parsing which follows) */
5011 pf_el
->match_flags
= htod16(0);
5013 /* Parse pattern filter mask and pattern directly into ioctl buffer */
5014 if (argv
[++i
] == NULL
) {
5015 printf("Bitmask not provided\n");
5018 rc
= wl_pattern_atoh(argv
[i
], (char*)pf_el
->mask_and_data
);
5019 if ((rc
== -1) || (rc
> MAX_PKTFLT_FIXED_PATTERN_SIZE
)) {
5020 printf("Rejecting: %s\n", argv
[i
]);
5023 mask_size
= htod16(rc
);
5025 if (argv
[++i
] == NULL
) {
5026 printf("Pattern not provided\n");
5030 if (*argv
[i
] == '!') {
5031 pf_el
->match_flags
=
5032 htod16(WL_PKT_FILTER_MFLAG_NEG
);
5035 if (*argv
[i
] == '\0') {
5036 printf("Pattern not provided\n");
5039 rc
= wl_pattern_atoh(argv
[i
], (char*)&pf_el
->mask_and_data
[rc
]);
5040 if ((rc
== -1) || (rc
> MAX_PKTFLT_FIXED_PATTERN_SIZE
)) {
5041 printf("Rejecting: %s\n", argv
[i
]);
5044 pattern_size
= htod16(rc
);
5046 if (mask_size
!= pattern_size
) {
5047 printf("Mask and pattern not the same size\n");
5051 pf_el
->size_bytes
= mask_size
;
5053 /* Account for the size of this pattern element */
5054 buf_len
+= WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN
+ 2 * rc
;
5056 /* Move to next element location in ioctl buffer */
5057 pf_el
= (wl_pkt_filter_pattern_listel_t
*)
5058 ((uint8
*)pf_el
+ WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN
+ 2 * rc
);
5060 /* Count list element */
5064 /* Account for initial fixed size, and copy initial fixed fields */
5065 buf_len
+= WL_PKT_FILTER_FIXED_LEN
+ WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN
;
5067 if (buf_len
> MAX_PKTFLT_BUF_SIZE
) {
5068 DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
5072 /* Update list count and total size */
5073 pkt_filter
.u
.patlist
.list_cnt
= list_cnt
;
5074 pkt_filter
.u
.patlist
.PAD1
[0] = 0;
5075 pkt_filter
.u
.patlist
.totsize
= buf
+ buf_len
- (char*)pkt_filterp
;
5076 pkt_filter
.u
.patlist
.totsize
-= WL_PKT_FILTER_FIXED_LEN
;
5078 memcpy((char *)pkt_filterp
, &pkt_filter
,
5079 WL_PKT_FILTER_FIXED_LEN
+ WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN
);
5081 DHD_ERROR(("Invalid filter type %d\n", pkt_filter
.type
));
5085 rc
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
5086 rc
= rc
>= 0 ? 0 : rc
;
5089 DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
5090 __FUNCTION__
, arg
, rc
));
5092 DHD_TRACE(("%s: successfully added pktfilter %s\n",
5093 __FUNCTION__
, arg
));
5097 MFREE(dhd
->osh
, arg_org
, strlen(arg
) + 1);
5100 MFREE(dhd
->osh
, buf
, MAX_PKTFLT_BUF_SIZE
);
5104 dhd_pktfilter_offload_delete(dhd_pub_t
*dhd
, int id
)
5108 ret
= dhd_wl_ioctl_set_intiovar(dhd
, "pkt_filter_delete",
5109 id
, WLC_SET_VAR
, TRUE
, 0);
5111 DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
5112 __FUNCTION__
, id
, ret
));
5115 DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
5118 #endif /* PKT_FILTER_SUPPORT */
5120 /* ========================== */
5121 /* ==== ARP OFFLOAD SUPPORT = */
5122 /* ========================== */
5123 #ifdef ARP_OFFLOAD_SUPPORT
5125 dhd_arp_offload_set(dhd_pub_t
* dhd
, int arp_mode
)
5129 retcode
= dhd_wl_ioctl_set_intiovar(dhd
, "arp_ol",
5130 arp_mode
, WLC_SET_VAR
, TRUE
, 0);
5132 retcode
= retcode
>= 0 ? 0 : retcode
;
5134 DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
5135 __FUNCTION__
, arp_mode
, retcode
));
5137 DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
5138 __FUNCTION__
, arp_mode
));
5139 dhd
->arpol_configured
= TRUE
;
5144 dhd_arp_offload_enable(dhd_pub_t
* dhd
, int arp_enable
)
5148 if (!dhd
->arpol_configured
) {
5149 /* If arpol is not applied, apply it */
5150 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
5153 retcode
= dhd_wl_ioctl_set_intiovar(dhd
, "arpoe",
5154 arp_enable
, WLC_SET_VAR
, TRUE
, 0);
5155 retcode
= retcode
>= 0 ? 0 : retcode
;
5157 DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
5158 __FUNCTION__
, arp_enable
, retcode
));
5161 DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
5162 __FUNCTION__
, arp_enable
));
5164 DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
5165 __FUNCTION__
, arp_enable
));
5166 #endif /* DHD_LOG_DUMP */
5169 retcode
= dhd_wl_ioctl_get_intiovar(dhd
, "arp_version",
5170 &version
, WLC_GET_VAR
, FALSE
, 0);
5172 DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
5173 __FUNCTION__
, retcode
));
5174 dhd
->arp_version
= 1;
5177 DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__
, version
));
5178 dhd
->arp_version
= version
;
5183 /* XXX ANDREY: clear AOE arp_table */
5185 dhd_aoe_arp_clr(dhd_pub_t
*dhd
, int idx
)
5189 if (dhd
== NULL
) return;
5190 if (dhd
->arp_version
== 1)
5193 ret
= dhd_iovar(dhd
, idx
, "arp_table_clear", NULL
, 0, NULL
, 0, TRUE
);
5195 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
5198 DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__
));
5200 DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__
));
5201 #endif /* DHD_LOG_DUMP */
5203 /* mac address isn't cleared here but it will be cleared after dongle off */
5204 dhd
->hmac_updated
= 0;
5207 /* XXX ANDREY: clear hostip table */
5209 dhd_aoe_hostip_clr(dhd_pub_t
*dhd
, int idx
)
5213 if (dhd
== NULL
) return;
5214 if (dhd
->arp_version
== 1)
5217 ret
= dhd_iovar(dhd
, idx
, "arp_hostip_clear", NULL
, 0, NULL
, 0, TRUE
);
5219 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
5222 DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__
));
5224 DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__
));
5225 #endif /* DHD_LOG_DUMP */
5230 dhd_arp_offload_add_ip(dhd_pub_t
*dhd
, uint32 ipaddr
, int idx
)
5234 if (dhd
== NULL
) return;
5235 if (dhd
->arp_version
== 1)
5238 ret
= dhd_iovar(dhd
, idx
, "arp_hostip", (char *)&ipaddr
, sizeof(ipaddr
),
5241 DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__
, ret
));
5243 /* mac address is updated in the dongle */
5244 dhd
->hmac_updated
= 1;
5246 DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__
));
5248 DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__
));
5249 #endif /* DHD_LOG_DUMP */
5254 dhd_arp_get_arp_hostip_table(dhd_pub_t
*dhd
, void *buf
, int buflen
, int idx
)
5257 uint32
*ptr32
= buf
;
5258 bool clr_bottom
= FALSE
;
5262 if (dhd
== NULL
) return -1;
5263 if (dhd
->arp_version
== 1)
5266 ret
= dhd_iovar(dhd
, idx
, "arp_hostip", NULL
, 0, (char *)buf
, buflen
,
5269 DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
5270 __FUNCTION__
, ret
));
5275 /* clean up the buf, ascii reminder */
5276 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
5288 #endif /* ARP_OFFLOAD_SUPPORT */
5291 * Neighbor Discovery Offload: enable NDO feature
5292 * Called by ipv6 event handler when interface comes up/goes down
5295 dhd_ndo_enable(dhd_pub_t
* dhd
, int ndo_enable
)
5302 #if defined(WL_CFG80211) && defined(WL_NAN)
5303 if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd
))) {
5304 /* If nan dp is active, skip NDO */
5305 DHD_INFO(("Active NAN DP, skip NDO\n"));
5308 #endif /* WL_CFG80211 && WL_NAN */
5310 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
5311 /* NDO disable on STA+SOFTAP mode */
5314 #endif /* WL_CFG80211 */
5315 retcode
= dhd_wl_ioctl_set_intiovar(dhd
, "ndoe",
5316 ndo_enable
, WLC_SET_VAR
, TRUE
, 0);
5318 DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
5319 __FUNCTION__
, ndo_enable
, retcode
));
5321 DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
5322 __FUNCTION__
, ndo_enable
));
5328 * Neighbor Discover Offload: enable NDO feature
5329 * Called by ipv6 event handler when interface comes up
5332 dhd_ndo_add_ip(dhd_pub_t
*dhd
, char* ipv6addr
, int idx
)
5335 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5341 iov_len
= bcm_mkiovar("nd_hostip", (char *)ipv6addr
,
5342 IPV6_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
5344 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5345 __FUNCTION__
, sizeof(iovbuf
)));
5348 retcode
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
5351 DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
5352 __FUNCTION__
, retcode
));
5354 DHD_TRACE(("%s: ndo ipaddr entry added \n",
5361 * Neighbor Discover Offload: enable NDO feature
5362 * Called by ipv6 event handler when interface goes down
5365 dhd_ndo_remove_ip(dhd_pub_t
*dhd
, int idx
)
5368 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5374 iov_len
= bcm_mkiovar("nd_hostip_clear", NULL
,
5375 0, iovbuf
, sizeof(iovbuf
));
5377 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5378 __FUNCTION__
, sizeof(iovbuf
)));
5381 retcode
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
5384 DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
5385 __FUNCTION__
, retcode
));
5387 DHD_TRACE(("%s: ndo ipaddr entry removed \n",
5392 /* Enhanced ND offload */
5394 dhd_ndo_get_version(dhd_pub_t
*dhdp
)
5396 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5397 wl_nd_hostip_t ndo_get_ver
;
5406 memset(&iovbuf
, 0, sizeof(iovbuf
));
5407 ndo_get_ver
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
5408 ndo_get_ver
.op_type
= htod16(WL_ND_HOSTIP_OP_VER
);
5409 ndo_get_ver
.length
= htod32(WL_ND_HOSTIP_FIXED_LEN
+ sizeof(uint16
));
5410 ndo_get_ver
.u
.version
= 0;
5411 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver
,
5412 WL_ND_HOSTIP_FIXED_LEN
+ sizeof(uint16
), iovbuf
, sizeof(iovbuf
));
5415 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5416 __FUNCTION__
, sizeof(iovbuf
)));
5420 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
, iov_len
, FALSE
, 0);
5423 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
5424 /* ver iovar not supported. NDO version is 0 */
5427 wl_nd_hostip_t
*ndo_ver_ret
= (wl_nd_hostip_t
*)iovbuf
;
5429 if ((dtoh16(ndo_ver_ret
->version
) == WL_ND_HOSTIP_IOV_VER
) &&
5430 (dtoh16(ndo_ver_ret
->op_type
) == WL_ND_HOSTIP_OP_VER
) &&
5431 (dtoh32(ndo_ver_ret
->length
) == WL_ND_HOSTIP_FIXED_LEN
5432 + sizeof(uint16
))) {
5433 /* nd_hostip iovar version */
5434 ver
= dtoh16(ndo_ver_ret
->u
.version
);
5437 DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__
, ver
));
5444 dhd_ndo_add_ip_with_type(dhd_pub_t
*dhdp
, char *ipv6addr
, uint8 type
, int idx
)
5446 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5447 wl_nd_hostip_t ndo_add_addr
;
5451 if (dhdp
== NULL
|| ipv6addr
== 0) {
5455 /* wl_nd_hostip_t fixed param */
5456 ndo_add_addr
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
5457 ndo_add_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_ADD
);
5458 ndo_add_addr
.length
= htod32(WL_ND_HOSTIP_WITH_ADDR_LEN
);
5459 /* wl_nd_host_ip_addr_t param for add */
5460 memcpy(&ndo_add_addr
.u
.host_ip
.ip_addr
, ipv6addr
, IPV6_ADDR_LEN
);
5461 ndo_add_addr
.u
.host_ip
.type
= type
;
5463 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr
,
5464 WL_ND_HOSTIP_WITH_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
5466 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5467 __FUNCTION__
, sizeof(iovbuf
)));
5471 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
5473 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
5474 #ifdef NDO_CONFIG_SUPPORT
5475 if (retcode
== BCME_NORESOURCE
) {
5476 /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
5477 DHD_INFO(("%s: Host IP count exceed device capacity,"
5478 "ND offload deactivated\n", __FUNCTION__
));
5479 dhdp
->ndo_host_ip_overflow
= TRUE
;
5480 dhd_ndo_enable(dhdp
, FALSE
);
5482 #endif /* NDO_CONFIG_SUPPORT */
5484 DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__
, retcode
));
5491 dhd_ndo_remove_ip_by_addr(dhd_pub_t
*dhdp
, char *ipv6addr
, int idx
)
5493 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5494 wl_nd_hostip_t ndo_del_addr
;
5498 if (dhdp
== NULL
|| ipv6addr
== 0) {
5502 /* wl_nd_hostip_t fixed param */
5503 ndo_del_addr
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
5504 ndo_del_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_DEL
);
5505 ndo_del_addr
.length
= htod32(WL_ND_HOSTIP_WITH_ADDR_LEN
);
5506 /* wl_nd_host_ip_addr_t param for del */
5507 memcpy(&ndo_del_addr
.u
.host_ip
.ip_addr
, ipv6addr
, IPV6_ADDR_LEN
);
5508 ndo_del_addr
.u
.host_ip
.type
= 0; /* don't care */
5510 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr
,
5511 WL_ND_HOSTIP_WITH_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
5514 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5515 __FUNCTION__
, sizeof(iovbuf
)));
5519 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
5521 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
5523 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__
, retcode
));
5530 dhd_ndo_remove_ip_by_type(dhd_pub_t
*dhdp
, uint8 type
, int idx
)
5532 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5533 wl_nd_hostip_t ndo_del_addr
;
5541 /* wl_nd_hostip_t fixed param */
5542 ndo_del_addr
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
5543 if (type
== WL_ND_IPV6_ADDR_TYPE_UNICAST
) {
5544 ndo_del_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_DEL_UC
);
5545 } else if (type
== WL_ND_IPV6_ADDR_TYPE_ANYCAST
) {
5546 ndo_del_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_DEL_AC
);
5550 ndo_del_addr
.length
= htod32(WL_ND_HOSTIP_FIXED_LEN
);
5552 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr
, WL_ND_HOSTIP_FIXED_LEN
,
5553 iovbuf
, sizeof(iovbuf
));
5556 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5557 __FUNCTION__
, sizeof(iovbuf
)));
5561 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
5563 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
5565 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__
, retcode
));
5572 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t
*dhdp
, int enable
)
5574 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
5582 iov_len
= bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable
, sizeof(int),
5583 iovbuf
, sizeof(iovbuf
));
5586 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
5587 __FUNCTION__
, sizeof(iovbuf
)));
5591 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, 0);
5593 DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
5594 __FUNCTION__
, enable
, retcode
));
5596 DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
5597 __FUNCTION__
, enable
));
5604 uint iscan_thread_id
= 0;
5605 iscan_buf_t
* iscan_chain
= 0;
5608 dhd_iscan_allocate_buf(dhd_pub_t
*dhd
, iscan_buf_t
**iscanbuf
)
5610 iscan_buf_t
*iscanbuf_alloc
= 0;
5611 iscan_buf_t
*iscanbuf_head
;
5613 DHD_ISCAN(("%s: Entered\n", __FUNCTION__
));
5616 iscanbuf_alloc
= (iscan_buf_t
*)MALLOC(dhd
->osh
, sizeof(iscan_buf_t
));
5617 if (iscanbuf_alloc
== NULL
)
5620 iscanbuf_alloc
->next
= NULL
;
5621 iscanbuf_head
= *iscanbuf
;
5623 DHD_ISCAN(("%s: addr of allocated node = 0x%X"
5624 "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
5625 __FUNCTION__
, iscanbuf_alloc
, iscanbuf_head
, dhd
));
5627 if (iscanbuf_head
== NULL
) {
5628 *iscanbuf
= iscanbuf_alloc
;
5629 DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__
));
5633 while (iscanbuf_head
->next
)
5634 iscanbuf_head
= iscanbuf_head
->next
;
5636 iscanbuf_head
->next
= iscanbuf_alloc
;
5640 return iscanbuf_alloc
;
5644 dhd_iscan_free_buf(void *dhdp
, iscan_buf_t
*iscan_delete
)
5646 iscan_buf_t
*iscanbuf_free
= 0;
5647 iscan_buf_t
*iscanbuf_prv
= 0;
5648 iscan_buf_t
*iscanbuf_cur
;
5649 dhd_pub_t
*dhd
= dhd_bus_pub(dhdp
);
5650 DHD_ISCAN(("%s: Entered\n", __FUNCTION__
));
5654 iscanbuf_cur
= iscan_chain
;
5656 /* If iscan_delete is null then delete the entire
5657 * chain or else delete specific one provided
5659 if (!iscan_delete
) {
5660 while (iscanbuf_cur
) {
5661 iscanbuf_free
= iscanbuf_cur
;
5662 iscanbuf_cur
= iscanbuf_cur
->next
;
5663 iscanbuf_free
->next
= 0;
5664 MFREE(dhd
->osh
, iscanbuf_free
, sizeof(iscan_buf_t
));
5668 while (iscanbuf_cur
) {
5669 if (iscanbuf_cur
== iscan_delete
)
5671 iscanbuf_prv
= iscanbuf_cur
;
5672 iscanbuf_cur
= iscanbuf_cur
->next
;
5675 iscanbuf_prv
->next
= iscan_delete
->next
;
5677 iscan_delete
->next
= 0;
5678 MFREE(dhd
->osh
, iscan_delete
, sizeof(iscan_buf_t
));
5687 dhd_iscan_result_buf(void)
5693 dhd_iscan_issue_request(void * dhdp
, wl_iscan_params_t
*pParams
, uint32 size
)
5696 dhd_pub_t
*dhd
= dhd_bus_pub(dhdp
);
5698 char iovar
[] = "iscan";
5699 uint32 allocSize
= 0;
5704 allocSize
= (size
+ strlen(iovar
) + 1);
5705 if ((allocSize
< size
) || (allocSize
< strlen(iovar
)))
5707 DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
5708 __FUNCTION__
, allocSize
, size
, strlen(iovar
)));
5711 buf
= MALLOC(dhd
->osh
, allocSize
);
5715 DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__
, allocSize
));
5718 ioctl
.cmd
= WLC_SET_VAR
;
5719 len
= bcm_mkiovar(iovar
, (char *)pParams
, size
, buf
, allocSize
);
5721 rc
= BCME_BUFTOOSHORT
;
5724 rc
= dhd_wl_ioctl(dhd
, 0, &ioctl
, buf
, len
);
5729 MFREE(dhd
->osh
, buf
, allocSize
);
5736 dhd_iscan_get_partial_result(void *dhdp
, uint
*scan_count
)
5738 wl_iscan_results_t
*list_buf
;
5739 wl_iscan_results_t list
;
5740 wl_scan_results_t
*results
;
5741 iscan_buf_t
*iscan_cur
;
5743 dhd_pub_t
*dhd
= dhd_bus_pub(dhdp
);
5748 DHD_ISCAN(("%s: Enter\n", __FUNCTION__
));
5750 iscan_cur
= dhd_iscan_allocate_buf(dhd
, &iscan_chain
);
5752 DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__
));
5753 dhd_iscan_free_buf(dhdp
, 0);
5754 dhd_iscan_request(dhdp
, WL_SCAN_ACTION_ABORT
);
5755 dhd_ind_scan_confirm(dhdp
, FALSE
);
5761 memset(iscan_cur
->iscan_buf
, 0, WLC_IW_ISCAN_MAXLEN
);
5762 list_buf
= (wl_iscan_results_t
*)iscan_cur
->iscan_buf
;
5763 results
= &list_buf
->results
;
5764 results
->buflen
= WL_ISCAN_RESULTS_FIXED_SIZE
;
5765 results
->version
= 0;
5768 memset(&list
, 0, sizeof(list
));
5769 list
.results
.buflen
= htod32(WLC_IW_ISCAN_MAXLEN
);
5770 len
= bcm_mkiovar("iscanresults", (char *)&list
, WL_ISCAN_RESULTS_FIXED_SIZE
,
5771 iscan_cur
->iscan_buf
, WLC_IW_ISCAN_MAXLEN
);
5773 dhd_iscan_free_buf(dhdp
, 0);
5774 dhd_iscan_request(dhdp
, WL_SCAN_ACTION_ABORT
);
5775 dhd_ind_scan_confirm(dhdp
, FALSE
);
5776 status
= BCME_BUFTOOSHORT
;
5779 ioctl
.cmd
= WLC_GET_VAR
;
5781 rc
= dhd_wl_ioctl(dhd
, 0, &ioctl
, iscan_cur
->iscan_buf
, WLC_IW_ISCAN_MAXLEN
);
5783 results
->buflen
= dtoh32(results
->buflen
);
5784 results
->version
= dtoh32(results
->version
);
5785 *scan_count
= results
->count
= dtoh32(results
->count
);
5786 status
= dtoh32(list_buf
->status
);
5787 DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__
, results
->count
, status
));
5791 if (!(*scan_count
)) {
5792 /* TODO: race condition when FLUSH already called */
5793 dhd_iscan_free_buf(dhdp
, 0);
5799 #endif /* SIMPLE_ISCAN */
5802 * returns = TRUE if associated, FALSE if not associated
5804 bool dhd_is_associated(dhd_pub_t
*dhd
, uint8 ifidx
, int *retval
)
5806 char bssid
[6], zbuf
[6];
5812 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_BSSID
, (char *)&bssid
,
5813 ETHER_ADDR_LEN
, FALSE
, ifidx
);
5814 /* XXX:AS!!! res can be: -17(BCME_NOTASSOCIATED),-22(BCME_NORESOURCE), and 0(OK)
5815 OK - doesn't mean associated yet, the returned bssid
5816 still needs to be checked for non zero array
5818 DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__
, ret
));
5820 if (ret
== BCME_NOTASSOCIATED
) {
5821 DHD_ERROR(("%s: WLC_GET_BSSID, NOT ASSOCIATED\n", __FUNCTION__
));
5830 if ((memcmp(bssid
, zbuf
, ETHER_ADDR_LEN
) == 0)) {
5831 DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__
));
5837 /* Function to estimate possible DTIM_SKIP value */
5838 #if defined(BCMPCIE)
5840 dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
, int *dtim_period
, int *bcn_interval
)
5842 int bcn_li_dtim
= 1; /* deafult no dtim skip setting */
5844 int allowed_skip_dtim_cnt
= 0;
5846 if (dhd
->disable_dtim_in_suspend
) {
5847 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__
));
5852 /* Check if associated */
5853 if (dhd_is_associated(dhd
, 0, NULL
) == FALSE
) {
5854 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__
, ret
));
5858 if (dtim_period
== NULL
|| bcn_interval
== NULL
)
5861 /* read associated AP beacon interval */
5862 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_BCNPRD
,
5863 bcn_interval
, sizeof(*bcn_interval
), FALSE
, 0)) < 0) {
5864 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__
, ret
));
5868 /* read associated AP dtim setup */
5869 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_DTIMPRD
,
5870 dtim_period
, sizeof(*dtim_period
), FALSE
, 0)) < 0) {
5871 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
5875 /* if not assocated just return */
5876 if (*dtim_period
== 0) {
5880 if (dhd
->max_dtim_enable
) {
5882 (int) (MAX_DTIM_ALLOWED_INTERVAL
/ ((*dtim_period
) * (*bcn_interval
)));
5883 if (bcn_li_dtim
== 0) {
5887 /* attemp to use platform defined dtim skip interval */
5888 bcn_li_dtim
= dhd
->suspend_bcn_li_dtim
;
5890 /* check if sta listen interval fits into AP dtim */
5891 if (*dtim_period
> CUSTOM_LISTEN_INTERVAL
) {
5892 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5893 bcn_li_dtim
= NO_DTIM_SKIP
;
5894 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5895 __FUNCTION__
, *dtim_period
, CUSTOM_LISTEN_INTERVAL
));
5899 if (((*dtim_period
) * (*bcn_interval
) * bcn_li_dtim
) > MAX_DTIM_ALLOWED_INTERVAL
) {
5900 allowed_skip_dtim_cnt
=
5901 MAX_DTIM_ALLOWED_INTERVAL
/ ((*dtim_period
) * (*bcn_interval
));
5903 (allowed_skip_dtim_cnt
!= 0) ? allowed_skip_dtim_cnt
: NO_DTIM_SKIP
;
5906 if ((bcn_li_dtim
* (*dtim_period
)) > CUSTOM_LISTEN_INTERVAL
) {
5907 /* Round up dtim_skip to fit into STAs Listen Interval */
5908 bcn_li_dtim
= (int)(CUSTOM_LISTEN_INTERVAL
/ *dtim_period
);
5909 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__
, bcn_li_dtim
));
5913 if (dhd
->conf
->suspend_bcn_li_dtim
>= 0)
5914 bcn_li_dtim
= dhd
->conf
->suspend_bcn_li_dtim
;
5915 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5916 __FUNCTION__
, *bcn_interval
, bcn_li_dtim
, *dtim_period
, CUSTOM_LISTEN_INTERVAL
));
5920 #else /* OEM_ANDROID && BCMPCIE */
5922 dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
)
5924 int bcn_li_dtim
= 1; /* deafult no dtim skip setting */
5926 int dtim_period
= 0;
5928 int allowed_skip_dtim_cnt
= 0;
5930 if (dhd
->disable_dtim_in_suspend
) {
5931 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__
));
5936 /* Check if associated */
5937 if (dhd_is_associated(dhd
, 0, NULL
) == FALSE
) {
5938 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__
, ret
));
5942 /* read associated AP beacon interval */
5943 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_BCNPRD
,
5944 &ap_beacon
, sizeof(ap_beacon
), FALSE
, 0)) < 0) {
5945 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__
, ret
));
5949 /* read associated ap's dtim setup */
5950 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_DTIMPRD
,
5951 &dtim_period
, sizeof(dtim_period
), FALSE
, 0)) < 0) {
5952 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
5956 /* if not assocated just exit */
5957 if (dtim_period
== 0) {
5961 if (dhd
->max_dtim_enable
) {
5963 (int) (MAX_DTIM_ALLOWED_INTERVAL
/ (ap_beacon
* dtim_period
));
5964 if (bcn_li_dtim
== 0) {
5968 /* attemp to use platform defined dtim skip interval */
5969 bcn_li_dtim
= dhd
->suspend_bcn_li_dtim
;
5971 /* check if sta listen interval fits into AP dtim */
5972 if (dtim_period
> CUSTOM_LISTEN_INTERVAL
) {
5973 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5974 bcn_li_dtim
= NO_DTIM_SKIP
;
5975 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5976 __FUNCTION__
, dtim_period
, CUSTOM_LISTEN_INTERVAL
));
5980 if ((dtim_period
* ap_beacon
* bcn_li_dtim
) > MAX_DTIM_ALLOWED_INTERVAL
) {
5981 allowed_skip_dtim_cnt
=
5982 MAX_DTIM_ALLOWED_INTERVAL
/ (dtim_period
* ap_beacon
);
5984 (allowed_skip_dtim_cnt
!= 0) ? allowed_skip_dtim_cnt
: NO_DTIM_SKIP
;
5987 if ((bcn_li_dtim
* dtim_period
) > CUSTOM_LISTEN_INTERVAL
) {
5988 /* Round up dtim_skip to fit into STAs Listen Interval */
5989 bcn_li_dtim
= (int)(CUSTOM_LISTEN_INTERVAL
/ dtim_period
);
5990 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__
, bcn_li_dtim
));
5994 if (dhd
->conf
->suspend_bcn_li_dtim
>= 0)
5995 bcn_li_dtim
= dhd
->conf
->suspend_bcn_li_dtim
;
5996 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5997 __FUNCTION__
, ap_beacon
, bcn_li_dtim
, dtim_period
, CUSTOM_LISTEN_INTERVAL
));
6002 #endif /* OEM_ANDROID && BCMPCIE */
6004 #ifdef CONFIG_SILENT_ROAM
6006 dhd_sroam_set_mon(dhd_pub_t
*dhd
, bool set
)
6009 wlc_sroam_t
*psroam
;
6010 wlc_sroam_info_t
*sroam
;
6011 uint sroamlen
= sizeof(*sroam
) + SROAM_HDRLEN
;
6013 /* Check if associated */
6014 if (dhd_is_associated(dhd
, 0, NULL
) == FALSE
) {
6015 DHD_TRACE(("%s NOT assoc\n", __FUNCTION__
));
6019 if (set
&& (dhd
->op_mode
&
6020 (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_P2P_GC_MODE
| DHD_FLAG_P2P_GO_MODE
))) {
6021 DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set
, dhd
->op_mode
));
6025 if (!dhd
->sroam_turn_on
) {
6026 DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set
, dhd
->sroam_turn_on
));
6029 psroam
= (wlc_sroam_t
*)MALLOCZ(dhd
->osh
, sroamlen
);
6031 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
6035 ret
= dhd_iovar(dhd
, 0, "sroam", NULL
, 0, (char *)psroam
, sroamlen
, FALSE
);
6037 DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__
, ret
));
6041 if (psroam
->ver
!= WLC_SILENT_ROAM_CUR_VER
) {
6046 sroam
= (wlc_sroam_info_t
*)psroam
->data
;
6047 sroam
->sroam_on
= set
;
6048 DHD_INFO((" Silent roam monitor mode %s\n", set
? "On" : "Off"));
6050 ret
= dhd_iovar(dhd
, 0, "sroam", (char *)psroam
, sroamlen
, NULL
, 0, TRUE
);
6052 DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__
, ret
));
6057 MFREE(dhd
->osh
, psroam
, sroamlen
);
6062 #endif /* CONFIG_SILENT_ROAM */
6064 /* Check if the mode supports STA MODE */
6065 bool dhd_support_sta_mode(dhd_pub_t
*dhd
)
6069 if (!(dhd
->op_mode
& DHD_FLAG_STA_MODE
))
6072 #endif /* WL_CFG80211 */
6076 #if defined(KEEP_ALIVE)
6077 int dhd_keep_alive_onoff(dhd_pub_t
*dhd
)
6081 wl_mkeep_alive_pkt_t mkeep_alive_pkt
= {0, 0, 0, 0, 0, {0}};
6082 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
;
6087 if (!dhd_support_sta_mode(dhd
))
6090 DHD_TRACE(("%s execution\n", __FUNCTION__
));
6092 str
= "mkeep_alive";
6093 str_len
= strlen(str
);
6094 strlcpy(buf
, str
, sizeof(buf
));
6095 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) (buf
+ str_len
+ 1);
6096 mkeep_alive_pkt
.period_msec
= dhd
->conf
->keep_alive_period
;
6097 buf_len
= str_len
+ 1;
6098 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
6099 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
6100 /* Setup keep alive zero for null packet generation */
6101 mkeep_alive_pkt
.keep_alive_id
= 0;
6102 mkeep_alive_pkt
.len_bytes
= 0;
6103 buf_len
+= WL_MKEEP_ALIVE_FIXED_LEN
;
6104 bzero(mkeep_alive_pkt
.data
, sizeof(mkeep_alive_pkt
.data
));
6105 /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
6106 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
6107 * guarantee that the buffer is properly aligned.
6109 memcpy((char *)mkeep_alive_pktp
, &mkeep_alive_pkt
, WL_MKEEP_ALIVE_FIXED_LEN
);
6111 res
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
6115 #endif /* defined(KEEP_ALIVE) */
6116 #define CSCAN_TLV_TYPE_SSID_IE 'S'
6118 * SSIDs list parsing from cscan tlv list
6121 wl_parse_ssid_list_tlv(char** list_str
, wlc_ssid_ext_t
* ssid
, int max
, int *bytes_left
)
6127 if ((list_str
== NULL
) || (*list_str
== NULL
) || (*bytes_left
< 0)) {
6128 DHD_ERROR(("%s error paramters\n", __FUNCTION__
));
6132 while (*bytes_left
> 0) {
6133 if (str
[0] != CSCAN_TLV_TYPE_SSID_IE
) {
6135 DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx
, *bytes_left
, str
[0]));
6140 DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__
, idx
));
6144 /* Get proper CSCAN_TLV_TYPE_SSID_IE */
6146 if (*bytes_left
== 0) {
6147 DHD_ERROR(("%s no length field.\n", __FUNCTION__
));
6151 ssid
[idx
].rssi_thresh
= 0;
6152 ssid
[idx
].flags
= 0;
6155 /* Broadcast SSID */
6156 ssid
[idx
].SSID_len
= 0;
6157 memset((char*)ssid
[idx
].SSID
, 0x0, DOT11_MAX_SSID_LEN
);
6161 DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left
));
6162 } else if (len
<= DOT11_MAX_SSID_LEN
) {
6163 /* Get proper SSID size */
6164 ssid
[idx
].SSID_len
= len
;
6167 if (ssid
[idx
].SSID_len
> *bytes_left
) {
6168 DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
6169 __FUNCTION__
, ssid
[idx
].SSID_len
, *bytes_left
));
6173 memcpy((char*)ssid
[idx
].SSID
, str
, ssid
[idx
].SSID_len
);
6175 *bytes_left
-= ssid
[idx
].SSID_len
;
6176 str
+= ssid
[idx
].SSID_len
;
6177 ssid
[idx
].hidden
= TRUE
;
6179 DHD_TRACE(("%s :size=%d left=%d\n",
6180 (char*)ssid
[idx
].SSID
, ssid
[idx
].SSID_len
, *bytes_left
));
6182 DHD_ERROR(("### SSID size more than %d\n", str
[0]));
6192 #if defined(WL_WIRELESS_EXT)
6193 /* Android ComboSCAN support */
6196 * data parsing from ComboScan tlv list
6199 wl_iw_parse_data_tlv(char** list_str
, void *dst
, int dst_size
, const char token
,
6200 int input_size
, int *bytes_left
)
6206 if ((list_str
== NULL
) || (*list_str
== NULL
) ||(bytes_left
== NULL
) || (*bytes_left
< 0)) {
6207 DHD_ERROR(("%s error paramters\n", __FUNCTION__
));
6212 /* Clean all dest bytes */
6213 memset(dst
, 0, dst_size
);
6214 if (*bytes_left
> 0) {
6216 if (str
[0] != token
) {
6217 DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
6218 __FUNCTION__
, token
, str
[0], *bytes_left
));
6225 if (input_size
== 1) {
6226 memcpy(dst
, str
, input_size
);
6228 else if (input_size
== 2) {
6229 memcpy(dst
, (char *)htod16(memcpy(&short_temp
, str
, input_size
)),
6232 else if (input_size
== 4) {
6233 memcpy(dst
, (char *)htod32(memcpy(&int_temp
, str
, input_size
)),
6237 *bytes_left
-= input_size
;
6246 * channel list parsing from cscan tlv list
6249 wl_iw_parse_channel_list_tlv(char** list_str
, uint16
* channel_list
,
6250 int channel_num
, int *bytes_left
)
6255 if ((list_str
== NULL
) || (*list_str
== NULL
) ||(bytes_left
== NULL
) || (*bytes_left
< 0)) {
6256 DHD_ERROR(("%s error paramters\n", __FUNCTION__
));
6261 while (*bytes_left
> 0) {
6263 if (str
[0] != CSCAN_TLV_TYPE_CHANNEL_IE
) {
6265 DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx
, *bytes_left
, str
[0]));
6268 /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
6274 channel_list
[idx
] = 0x0;
6277 channel_list
[idx
] = (uint16
)str
[0];
6278 DHD_TRACE(("%s channel=%d \n", __FUNCTION__
, channel_list
[idx
]));
6284 DHD_ERROR(("%s Too many channels \n", __FUNCTION__
));
6293 /* Parse a comma-separated list from list_str into ssid array, starting
6294 * at index idx. Max specifies size of the ssid array. Parses ssids
6295 * and returns updated idx; if idx >= max not all fit, the excess have
6296 * not been copied. Returns -1 on empty string, or on ssid too long.
6299 wl_iw_parse_ssid_list(char** list_str
, wlc_ssid_t
* ssid
, int idx
, int max
)
6303 if ((list_str
== NULL
) || (*list_str
== NULL
))
6306 for (str
= *list_str
; str
!= NULL
; str
= ptr
) {
6308 /* check for next TAG */
6309 if (!strncmp(str
, GET_CHANNEL
, strlen(GET_CHANNEL
))) {
6310 *list_str
= str
+ strlen(GET_CHANNEL
);
6314 if ((ptr
= strchr(str
, ',')) != NULL
) {
6318 if (strlen(str
) > DOT11_MAX_SSID_LEN
) {
6319 DHD_ERROR(("ssid <%s> exceeds %d\n", str
, DOT11_MAX_SSID_LEN
));
6323 if (strlen(str
) == 0)
6324 ssid
[idx
].SSID_len
= 0;
6327 bzero(ssid
[idx
].SSID
, sizeof(ssid
[idx
].SSID
));
6328 strlcpy((char*)ssid
[idx
].SSID
, str
, sizeof(ssid
[idx
].SSID
));
6329 ssid
[idx
].SSID_len
= sizeof(ssid
[idx
].SSID
);
6337 * Parse channel list from iwpriv CSCAN
6340 wl_iw_parse_channel_list(char** list_str
, uint16
* channel_list
, int channel_num
)
6345 char* endptr
= NULL
;
6347 if ((list_str
== NULL
)||(*list_str
== NULL
))
6352 while (strncmp(str
, GET_NPROBE
, strlen(GET_NPROBE
))) {
6353 val
= (int)strtoul(str
, &endptr
, 0);
6354 if (endptr
== str
) {
6355 printf("could not parse channel number starting at"
6356 " substring \"%s\" in list:\n%s\n",
6360 str
= endptr
+ strspn(endptr
, " ,");
6362 if (num
== channel_num
) {
6363 DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
6364 channel_num
, *list_str
));
6368 channel_list
[num
++] = (uint16
)val
;
6375 /* Given filename and download type, returns a buffer pointer and length
6376 * for download to f/w. Type can be FW or NVRAM.
6379 int dhd_get_download_buffer(dhd_pub_t
*dhd
, char *file_path
, download_type_t component
,
6380 char ** buffer
, int *length
)
6383 int ret
= BCME_ERROR
;
6389 /* Point to cache if available. */
6390 /* No Valid cache found on this call */
6396 image
= dhd_os_open_image1(dhd
, file_path
);
6397 if (image
== NULL
) {
6398 printf("%s: Open image file failed %s\n", __FUNCTION__
, file_path
);
6403 buf
= MALLOCZ(dhd
->osh
, file_len
);
6405 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
6406 __FUNCTION__
, file_len
));
6410 /* Download image */
6411 len
= dhd_os_get_image_block((char *)buf
, file_len
, image
);
6412 if ((len
<= 0 || len
> file_len
)) {
6413 MFREE(dhd
->osh
, buf
, file_len
);
6420 *buffer
= (char *)buf
;
6422 /* Cache if first call. */
6426 dhd_os_close_image1(dhd
, image
);
6432 dhd_download_2_dongle(dhd_pub_t
*dhd
, char *iovar
, uint16 flag
, uint16 dload_type
,
6433 unsigned char *dload_buf
, int len
)
6435 struct wl_dload_data
*dload_ptr
= (struct wl_dload_data
*)dload_buf
;
6437 int dload_data_offset
;
6438 static char iovar_buf
[WLC_IOCTL_MEDLEN
];
6441 memset(iovar_buf
, 0, sizeof(iovar_buf
));
6443 dload_data_offset
= OFFSETOF(wl_dload_data_t
, data
);
6444 dload_ptr
->flag
= (DLOAD_HANDLER_VER
<< DLOAD_FLAG_VER_SHIFT
) | flag
;
6445 dload_ptr
->dload_type
= dload_type
;
6446 dload_ptr
->len
= htod32(len
- dload_data_offset
);
6448 len
= ROUNDUP(len
, 8);
6450 iovar_len
= bcm_mkiovar(iovar
, (char *)dload_buf
,
6451 (uint
)len
, iovar_buf
, sizeof(iovar_buf
));
6452 if (iovar_len
== 0) {
6453 DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
6454 __FUNCTION__
, iovar
));
6455 return BCME_BUFTOOSHORT
;
6458 err
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovar_buf
,
6459 iovar_len
, IOV_SET
, 0);
6465 dhd_download_blob(dhd_pub_t
*dhd
, unsigned char *buf
,
6466 uint32 len
, char *iovar
)
6471 unsigned char *new_buf
;
6472 int err
= 0, data_offset
;
6473 uint16 dl_flag
= DL_BEGIN
;
6475 data_offset
= OFFSETOF(wl_dload_data_t
, data
);
6476 size2alloc
= data_offset
+ MAX_CHUNK_LEN
;
6477 size2alloc
= ROUNDUP(size2alloc
, 8);
6479 if ((new_buf
= (unsigned char *)MALLOCZ(dhd
->osh
, size2alloc
)) != NULL
) {
6481 chunk_len
= dhd_os_get_image_block((char *)(new_buf
+ data_offset
),
6482 MAX_CHUNK_LEN
, buf
);
6483 if (chunk_len
< 0) {
6484 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
6485 __FUNCTION__
, chunk_len
));
6489 if (len
- chunk_len
== 0)
6492 err
= dhd_download_2_dongle(dhd
, iovar
, dl_flag
, DL_TYPE_CLM
,
6493 new_buf
, data_offset
+ chunk_len
);
6495 dl_flag
&= ~DL_BEGIN
;
6497 len
= len
- chunk_len
;
6498 } while ((len
> 0) && (err
== 0));
6504 MFREE(dhd
->osh
, new_buf
, size2alloc
);
6510 dhd_apply_default_txcap(dhd_pub_t
*dhd
, char *path
)
6516 dhd_check_current_clm_data(dhd_pub_t
*dhd
)
6518 char iovbuf
[WLC_IOCTL_SMLEN
];
6519 wl_country_t
*cspec
;
6522 memset(iovbuf
, 0, sizeof(iovbuf
));
6523 err
= bcm_mkiovar("country", NULL
, 0, iovbuf
, sizeof(iovbuf
));
6525 err
= BCME_BUFTOOSHORT
;
6526 DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__
));
6529 err
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
6531 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__
));
6534 cspec
= (wl_country_t
*)iovbuf
;
6535 if ((strncmp(cspec
->ccode
, WL_CCODE_NULL_COUNTRY
, WLC_CNTRY_BUF_SZ
)) == 0) {
6536 DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
6540 DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
6546 dhd_apply_default_clm(dhd_pub_t
*dhd
, char *clm_path
)
6548 char *clm_blob_path
;
6550 char *memblock
= NULL
;
6552 char iovbuf
[WLC_IOCTL_SMLEN
];
6555 if (clm_path
&& clm_path
[0] != '\0') {
6556 if (strlen(clm_path
) > MOD_PARAM_PATHLEN
) {
6557 DHD_ERROR(("clm path exceeds max len\n"));
6560 clm_blob_path
= clm_path
;
6561 DHD_TRACE(("clm path from module param:%s\n", clm_path
));
6563 clm_blob_path
= VENDOR_PATH CONFIG_BCMDHD_CLM_PATH
;
6566 /* If CLM blob file is found on the filesystem, download the file.
6567 * After CLM file download or If the blob file is not present,
6568 * validate the country code before proceeding with the initialization.
6569 * If country code is not valid, fail the initialization.
6571 memblock
= dhd_os_open_image1(dhd
, (char *)clm_blob_path
);
6572 if (memblock
== NULL
) {
6573 printf("%s: Ignore clm file %s\n", __FUNCTION__
, clm_path
);
6574 #if defined(DHD_BLOB_EXISTENCE_CHECK)
6578 status
= dhd_check_current_clm_data(dhd
);
6579 if (status
== TRUE
) {
6585 #endif /* DHD_BLOB_EXISTENCE_CHECK */
6589 len
= dhd_os_get_image_size(memblock
);
6591 if ((len
> 0) && (len
< MAX_CLM_BUF_SIZE
) && memblock
) {
6592 status
= dhd_check_current_clm_data(dhd
);
6593 if (status
== TRUE
) {
6594 #if defined(DHD_BLOB_EXISTENCE_CHECK)
6595 if (dhd
->op_mode
!= DHD_FLAG_MFG_MODE
) {
6602 DHD_ERROR(("%s: CLM already exist in F/W, "
6603 "new CLM data will be added to the end of existing CLM data!\n",
6605 #endif /* DHD_BLOB_EXISTENCE_CHECK */
6606 } else if (status
!= FALSE
) {
6611 /* Found blob file. Download the file */
6612 DHD_TRACE(("clm file download from %s \n", clm_blob_path
));
6613 err
= dhd_download_blob(dhd
, (unsigned char*)memblock
, len
, "clmload");
6615 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__
, err
));
6616 /* Retrieve clmload_status and print */
6617 memset(iovbuf
, 0, sizeof(iovbuf
));
6618 len
= bcm_mkiovar("clmload_status", NULL
, 0, iovbuf
, sizeof(iovbuf
));
6620 err
= BCME_BUFTOOSHORT
;
6623 err
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
6625 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
6626 __FUNCTION__
, err
));
6628 DHD_ERROR(("%s: clmload_status: %d \n",
6629 __FUNCTION__
, *((int *)iovbuf
)));
6630 if (*((int *)iovbuf
) == CHIPID_MISMATCH
) {
6631 DHD_ERROR(("Chip ID mismatch error \n"));
6637 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__
));
6640 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len
, memblock
));
6643 /* Verify country code */
6644 status
= dhd_check_current_clm_data(dhd
);
6646 if (status
!= TRUE
) {
6647 /* Country code not initialized or CLM download not proper */
6648 DHD_ERROR(("country code not initialized\n"));
6654 dhd_os_close_image1(dhd
, memblock
);
6660 void dhd_free_download_buffer(dhd_pub_t
*dhd
, void *buffer
, int length
)
6662 MFREE(dhd
->osh
, buffer
, length
);
6665 #ifdef SHOW_LOGTRACE
6667 dhd_parse_logstrs_file(osl_t
*osh
, char *raw_fmts
, int logstrs_size
,
6668 dhd_event_log_t
*event_log
)
6670 uint32
*lognums
= NULL
;
6671 char *logstrs
= NULL
;
6672 logstr_trailer_t
*trailer
= NULL
;
6676 bool match_fail
= TRUE
;
6678 uint8
*pfw_id
= NULL
;
6682 char fwid_str
[FWID_STR_LEN
];
6683 uint32 hdr_logstrs_size
= 0;
6685 /* Read last three words in the logstrs.bin file */
6686 trailer
= (logstr_trailer_t
*) (raw_fmts
+ logstrs_size
-
6687 sizeof(logstr_trailer_t
));
6689 if (trailer
->log_magic
== LOGSTRS_MAGIC
) {
6691 * logstrs.bin has a header.
6693 if (trailer
->version
== 1) {
6694 logstr_header_v1_t
*hdr_v1
= (logstr_header_v1_t
*) (raw_fmts
+
6695 logstrs_size
- sizeof(logstr_header_v1_t
));
6696 DHD_INFO(("%s: logstr header version = %u\n",
6697 __FUNCTION__
, hdr_v1
->version
));
6698 num_fmts
= hdr_v1
->rom_logstrs_offset
/ sizeof(uint32
);
6699 ram_index
= (hdr_v1
->ram_lognums_offset
-
6700 hdr_v1
->rom_lognums_offset
) / sizeof(uint32
);
6701 lognums
= (uint32
*) &raw_fmts
[hdr_v1
->rom_lognums_offset
];
6702 logstrs
= (char *) &raw_fmts
[hdr_v1
->rom_logstrs_offset
];
6703 hdr_logstrs_size
= hdr_v1
->logstrs_size
;
6704 } else if (trailer
->version
== 2) {
6705 logstr_header_t
*hdr
= (logstr_header_t
*) (raw_fmts
+ logstrs_size
-
6706 sizeof(logstr_header_t
));
6707 DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
6708 __FUNCTION__
, hdr
->version
, hdr
->flags
));
6710 /* For ver. 2 of the header, need to match fwid of
6711 * both logstrs.bin and fw bin
6714 /* read the FWID from fw bin */
6715 file
= dhd_os_open_image1(NULL
, st_str_file_path
);
6717 DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__
));
6720 file_len
= dhd_os_get_image_size(file
);
6721 if (file_len
<= 0) {
6722 DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__
));
6725 /* fwid is at the end of fw bin in string format */
6726 if (dhd_os_seek_file(file
, file_len
- (sizeof(fwid_str
) - 1)) < 0) {
6727 DHD_ERROR(("%s: can't seek file \n", __FUNCTION__
));
6731 memset(fwid_str
, 0, sizeof(fwid_str
));
6732 if (dhd_os_get_image_block(fwid_str
, sizeof(fwid_str
) - 1, file
) <= 0) {
6733 DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__
));
6736 pfw_id
= (uint8
*)bcmstrnstr(fwid_str
, sizeof(fwid_str
) - 1,
6737 FWID_STR_1
, strlen(FWID_STR_1
));
6739 pfw_id
= (uint8
*)bcmstrnstr(fwid_str
, sizeof(fwid_str
) - 1,
6740 FWID_STR_2
, strlen(FWID_STR_2
));
6742 DHD_ERROR(("%s: could not find id in FW bin!\n",
6747 /* search for the '-' in the fw id str, after which the
6748 * actual 4 byte fw id is present
6750 while (pfw_id
&& *pfw_id
!= '-') {
6754 fwid
= bcm_strtoul((char *)pfw_id
, NULL
, 16);
6756 /* check if fw id in logstrs.bin matches the fw one */
6757 if (hdr
->fw_id
!= fwid
) {
6758 DHD_ERROR(("%s: logstr id does not match FW!"
6759 "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
6760 __FUNCTION__
, hdr
->fw_id
, fwid
));
6765 num_fmts
= hdr
->rom_logstrs_offset
/ sizeof(uint32
);
6766 ram_index
= (hdr
->ram_lognums_offset
-
6767 hdr
->rom_lognums_offset
) / sizeof(uint32
);
6768 lognums
= (uint32
*) &raw_fmts
[hdr
->rom_lognums_offset
];
6769 logstrs
= (char *) &raw_fmts
[hdr
->rom_logstrs_offset
];
6770 hdr_logstrs_size
= hdr
->logstrs_size
;
6774 dhd_os_close_image1(NULL
, file
);
6780 DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__
,
6784 if (logstrs_size
!= hdr_logstrs_size
) {
6785 DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__
, hdr_logstrs_size
));
6790 * Legacy logstrs.bin format without header.
6792 num_fmts
= *((uint32
*) (raw_fmts
)) / sizeof(uint32
);
6794 /* Legacy RAM-only logstrs.bin format:
6795 * - RAM 'lognums' section
6796 * - RAM 'logstrs' section.
6798 * 'lognums' is an array of indexes for the strings in the
6799 * 'logstrs' section. The first uint32 is an index to the
6800 * start of 'logstrs'. Therefore, if this index is divided
6801 * by 'sizeof(uint32)' it provides the number of logstr
6805 lognums
= (uint32
*) raw_fmts
;
6806 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
6809 if (event_log
->fmts
!= NULL
) {
6810 fmts
= event_log
->fmts
; /* reuse existing malloced fmts */
6812 fmts
= MALLOC(osh
, num_fmts
* sizeof(char *));
6816 DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__
));
6819 event_log
->fmts_size
= num_fmts
* sizeof(char *);
6821 for (i
= 0; i
< num_fmts
; i
++) {
6822 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6823 * (they are 0-indexed relative to 'rom_logstrs_offset').
6825 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6826 * are 0-indexed relative to the start of the logstrs.bin file).
6828 if (i
== ram_index
) {
6831 fmts
[i
] = &logstrs
[lognums
[i
]];
6833 event_log
->fmts
= fmts
;
6834 event_log
->raw_fmts_size
= logstrs_size
;
6835 event_log
->raw_fmts
= raw_fmts
;
6836 event_log
->num_fmts
= num_fmts
;
6838 } /* dhd_parse_logstrs_file */
6840 int dhd_parse_map_file(osl_t
*osh
, void *file
, uint32
*ramstart
, uint32
*rodata_start
,
6843 char *raw_fmts
= NULL
, *raw_fmts_loc
= NULL
;
6844 uint32 read_size
= READ_NUM_BYTES
;
6854 /* Allocate 1 byte more than read_size to terminate it with NULL */
6855 raw_fmts
= MALLOCZ(osh
, read_size
+ 1);
6856 if (raw_fmts
== NULL
) {
6857 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__
));
6861 /* read ram start, rodata_start and rodata_end values from map file */
6862 while (count
!= ALL_MAP_VAL
)
6864 error
= dhd_os_read_file(file
, raw_fmts
, read_size
);
6866 DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__
,
6871 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6872 raw_fmts
[read_size
] = '\0';
6874 /* Get ramstart address */
6875 raw_fmts_loc
= raw_fmts
;
6876 if (!(count
& RAMSTART_BIT
) &&
6877 (cptr
= bcmstrnstr(raw_fmts_loc
, read_size
, ramstart_str
,
6878 strlen(ramstart_str
)))) {
6879 cptr
= cptr
- BYTES_AHEAD_NUM
;
6880 sscanf(cptr
, "%x %c text_start", ramstart
, &c
);
6881 count
|= RAMSTART_BIT
;
6884 /* Get ram rodata start address */
6885 raw_fmts_loc
= raw_fmts
;
6886 if (!(count
& RDSTART_BIT
) &&
6887 (cptr
= bcmstrnstr(raw_fmts_loc
, read_size
, rodata_start_str
,
6888 strlen(rodata_start_str
)))) {
6889 cptr
= cptr
- BYTES_AHEAD_NUM
;
6890 sscanf(cptr
, "%x %c rodata_start", rodata_start
, &c
);
6891 count
|= RDSTART_BIT
;
6894 /* Get ram rodata end address */
6895 raw_fmts_loc
= raw_fmts
;
6896 if (!(count
& RDEND_BIT
) &&
6897 (cptr
= bcmstrnstr(raw_fmts_loc
, read_size
, rodata_end_str
,
6898 strlen(rodata_end_str
)))) {
6899 cptr
= cptr
- BYTES_AHEAD_NUM
;
6900 sscanf(cptr
, "%x %c rodata_end", rodata_end
, &c
);
6904 if (error
< (int)read_size
) {
6906 * since we reset file pos back to earlier pos by
6907 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6908 * The reason for this is if string is spreaded across
6909 * bytes, the read function should not miss it.
6910 * So if ret value is less than read_size, reached EOF don't read further
6914 memset(raw_fmts
, 0, read_size
);
6916 * go back to predefined NUM of bytes so that we won't miss
6917 * the string and addr even if it comes as splited in next read.
6919 dhd_os_seek_file(file
, -GO_BACK_FILE_POS_NUM_BYTES
);
6924 MFREE(osh
, raw_fmts
, read_size
+ 1);
6927 if (count
== ALL_MAP_VAL
) {
6931 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__
,
6936 } /* dhd_parse_map_file */
6938 #ifdef PCIE_FULL_DONGLE
6940 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t
*dhdp
, void *pktbuf
,
6941 dhd_event_log_t
*event_data
)
6943 uint32 infobuf_version
;
6944 info_buf_payload_hdr_t
*payload_hdr_ptr
;
6945 uint16 payload_hdr_type
;
6946 uint16 payload_hdr_length
;
6948 DHD_TRACE(("%s:Enter\n", __FUNCTION__
));
6950 if (PKTLEN(dhdp
->osh
, pktbuf
) < sizeof(uint32
)) {
6951 DHD_ERROR(("%s: infobuf too small for version field\n",
6955 infobuf_version
= *((uint32
*)PKTDATA(dhdp
->osh
, pktbuf
));
6956 PKTPULL(dhdp
->osh
, pktbuf
, sizeof(uint32
));
6957 if (infobuf_version
!= PCIE_INFOBUF_V1
) {
6958 DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
6959 __FUNCTION__
, infobuf_version
));
6963 /* Version 1 infobuf has a single type/length (and then value) field */
6964 if (PKTLEN(dhdp
->osh
, pktbuf
) < sizeof(info_buf_payload_hdr_t
)) {
6965 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
6969 /* Process/parse the common info payload header (type/length) */
6970 payload_hdr_ptr
= (info_buf_payload_hdr_t
*)PKTDATA(dhdp
->osh
, pktbuf
);
6971 payload_hdr_type
= ltoh16(payload_hdr_ptr
->type
);
6972 payload_hdr_length
= ltoh16(payload_hdr_ptr
->length
);
6973 if (payload_hdr_type
!= PCIE_INFOBUF_V1_TYPE_LOGTRACE
) {
6974 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
6975 __FUNCTION__
, payload_hdr_type
));
6978 PKTPULL(dhdp
->osh
, pktbuf
, sizeof(info_buf_payload_hdr_t
));
6980 /* Validate that the specified length isn't bigger than the
6983 if (payload_hdr_length
> PKTLEN(dhdp
->osh
, pktbuf
)) {
6984 DHD_ERROR(("%s: infobuf logtrace length is bigger"
6985 " than actual buffer data\n", __FUNCTION__
));
6988 dhd_dbg_trace_evnt_handler(dhdp
, PKTDATA(dhdp
->osh
, pktbuf
),
6989 event_data
, payload_hdr_length
);
6995 } /* dhd_event_logtrace_infobuf_pkt_process */
6996 #endif /* PCIE_FULL_DONGLE */
6997 #endif /* SHOW_LOGTRACE */
6999 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
7001 /* To handle the TDLS event in the dhd_common.c
7003 int dhd_tdls_event_handler(dhd_pub_t
*dhd_pub
, wl_event_msg_t
*event
)
7007 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
7008 ret
= dhd_tdls_update_peer_info(dhd_pub
, event
);
7009 GCC_DIAGNOSTIC_POP()
7014 int dhd_free_tdls_peer_list(dhd_pub_t
*dhd_pub
)
7016 tdls_peer_node_t
*cur
= NULL
, *prev
= NULL
;
7019 cur
= dhd_pub
->peer_tbl
.node
;
7021 if ((dhd_pub
->peer_tbl
.node
== NULL
) && !dhd_pub
->peer_tbl
.tdls_peer_count
)
7024 while (cur
!= NULL
) {
7027 MFREE(dhd_pub
->osh
, prev
, sizeof(tdls_peer_node_t
));
7029 dhd_pub
->peer_tbl
.tdls_peer_count
= 0;
7030 dhd_pub
->peer_tbl
.node
= NULL
;
7033 #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
7035 /* pretty hex print a contiguous buffer
7036 * based on the debug level specified
7039 dhd_prhex(const char *msg
, volatile uchar
*buf
, uint nbytes
, uint8 dbg_level
)
7042 int len
= sizeof(line
);
7046 if (msg
&& (msg
[0] != '\0')) {
7047 if (dbg_level
== DHD_ERROR_VAL
)
7048 DHD_ERROR(("%s:\n", msg
));
7049 else if (dbg_level
== DHD_INFO_VAL
)
7050 DHD_INFO(("%s:\n", msg
));
7051 else if (dbg_level
== DHD_TRACE_VAL
)
7052 DHD_TRACE(("%s:\n", msg
));
7056 for (i
= 0; i
< nbytes
; i
++) {
7058 nchar
= snprintf(p
, len
, " %04x: ", i
); /* line prefix */
7063 nchar
= snprintf(p
, len
, "%02x ", buf
[i
]);
7070 if (dbg_level
== DHD_ERROR_VAL
)
7071 DHD_ERROR(("%s:\n", line
));
7072 else if (dbg_level
== DHD_INFO_VAL
)
7073 DHD_INFO(("%s:\n", line
));
7074 else if (dbg_level
== DHD_TRACE_VAL
)
7075 DHD_TRACE(("%s:\n", line
));
7081 /* flush last partial line */
7083 if (dbg_level
== DHD_ERROR_VAL
)
7084 DHD_ERROR(("%s:\n", line
));
7085 else if (dbg_level
== DHD_INFO_VAL
)
7086 DHD_INFO(("%s:\n", line
));
7087 else if (dbg_level
== DHD_TRACE_VAL
)
7088 DHD_TRACE(("%s:\n", line
));
7093 dhd_tput_test(dhd_pub_t
*dhd
, tput_test_t
*tput_data
)
7095 struct ether_header ether_hdr
;
7096 tput_pkt_t tput_pkt
;
7098 uint8
*pktdata
= NULL
;
7100 uint64 total_size
= 0;
7103 uint32 total_num_tx_pkts
= 0;
7104 int err
= 0, err_exit
= 0;
7106 uint64 time_taken
= 0;
7108 uint32 n_batches
= 0;
7109 uint32 n_remain
= 0;
7110 uint8 tput_pkt_hdr_size
= 0;
7111 bool batch_cnt
= FALSE
;
7112 bool tx_stop_pkt
= FALSE
;
7114 if (tput_data
->version
!= TPUT_TEST_T_VER
||
7115 tput_data
->length
!= TPUT_TEST_T_LEN
) {
7116 DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__
));
7117 err_exit
= BCME_BADARG
;
7121 if (dhd
->tput_data
.tput_test_running
) {
7122 DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__
));
7123 err_exit
= BCME_BUSY
;
7126 #ifdef PCIE_FULL_DONGLE
7128 * 100 bytes to accommodate ether header and tput header. As of today
7129 * both occupy 30 bytes. Rest is reserved.
7131 if ((tput_data
->payload_size
> TPUT_TEST_MAX_PAYLOAD
) ||
7132 (tput_data
->payload_size
> (DHD_FLOWRING_RX_BUFPOST_PKTSZ
- 100))) {
7133 DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
7134 __FUNCTION__
, TPUT_TEST_MAX_PAYLOAD
,
7135 (DHD_FLOWRING_RX_BUFPOST_PKTSZ
- 100)));
7136 err_exit
= BCME_BUFTOOLONG
;
7140 max_txbufs
= dhd_get_max_txbufs(dhd
);
7141 max_txbufs
= MIN(max_txbufs
, DHD_TPUT_MAX_TX_PKTS_BATCH
);
7143 if (!(tput_data
->num_pkts
> 0)) {
7144 DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
7145 __FUNCTION__
, tput_data
->num_pkts
));
7146 err_exit
= BCME_ERROR
;
7150 memset(&dhd
->tput_data
, 0, sizeof(dhd
->tput_data
));
7151 memcpy(&dhd
->tput_data
, tput_data
, sizeof(*tput_data
));
7152 dhd
->tput_data
.pkts_bad
= dhd
->tput_data
.pkts_good
= 0;
7153 dhd
->tput_data
.pkts_cmpl
= 0;
7154 dhd
->tput_start_ts
= dhd
->tput_stop_ts
= 0;
7156 if (tput_data
->flags
& TPUT_TEST_USE_ETHERNET_HDR
) {
7157 pktsize
= sizeof(ether_hdr
) + sizeof(tput_pkt_t
) +
7158 (tput_data
->payload_size
- 12);
7160 pktsize
= sizeof(tput_pkt_t
) +
7161 (tput_data
->payload_size
- 12);
7164 tput_pkt_hdr_size
= (uint8
)((uint8
*)&tput_pkt
.crc32
-
7165 (uint8
*)&tput_pkt
.mac_sta
);
7167 /* mark the tput test as started */
7168 dhd
->tput_data
.tput_test_running
= TRUE
;
7170 if (tput_data
->direction
== TPUT_DIR_TX
) {
7171 /* for ethernet header */
7172 memcpy(ether_hdr
.ether_shost
, tput_data
->mac_sta
, ETHER_ADDR_LEN
);
7173 memcpy(ether_hdr
.ether_dhost
, tput_data
->mac_ap
, ETHER_ADDR_LEN
);
7174 ether_hdr
.ether_type
= hton16(ETHER_TYPE_IP
);
7176 /* fill in the tput pkt */
7177 memset(&tput_pkt
, 0, sizeof(tput_pkt
));
7178 memcpy(tput_pkt
.mac_ap
, tput_data
->mac_ap
, ETHER_ADDR_LEN
);
7179 memcpy(tput_pkt
.mac_sta
, tput_data
->mac_sta
, ETHER_ADDR_LEN
);
7180 tput_pkt
.pkt_type
= hton16(TPUT_PKT_TYPE_NORMAL
);
7181 tput_pkt
.num_pkts
= hton32(tput_data
->num_pkts
);
7183 if (tput_data
->num_pkts
> (uint32
)max_txbufs
) {
7184 n_batches
= tput_data
->num_pkts
/ max_txbufs
;
7185 n_remain
= tput_data
->num_pkts
% max_txbufs
;
7188 n_remain
= tput_data
->num_pkts
;
7190 DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
7191 __FUNCTION__
, tput_data
->num_pkts
, n_batches
, n_remain
));
7194 /* reset before every batch */
7195 dhd
->batch_tx_pkts_cmpl
= 0;
7197 dhd
->batch_tx_num_pkts
= max_txbufs
;
7199 } else if (n_remain
) {
7200 dhd
->batch_tx_num_pkts
= n_remain
;
7203 DHD_ERROR(("Invalid. This should not hit\n"));
7206 dhd
->tput_start_ts
= OSL_SYSUPTIME_US();
7207 for (i
= 0; (i
< dhd
->batch_tx_num_pkts
) || (tx_stop_pkt
); ++i
) {
7208 pkt
= PKTGET(dhd
->osh
, pktsize
, TRUE
);
7210 dhd
->tput_data
.tput_test_running
= FALSE
;
7211 DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
7213 DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
7214 __FUNCTION__
, dhd
->tput_data
.pkts_good
,
7215 dhd
->tput_data
.pkts_bad
, dhd
->tput_data
.pkts_cmpl
));
7216 err_exit
= BCME_NOMEM
;
7219 pktdata
= PKTDATA(dhd
->osh
, pkt
);
7220 PKTSETLEN(dhd
->osh
, pkt
, pktsize
);
7221 memset(pktdata
, 0, pktsize
);
7222 if (tput_data
->flags
& TPUT_TEST_USE_ETHERNET_HDR
) {
7223 memcpy(pktdata
, ðer_hdr
, sizeof(ether_hdr
));
7224 pktdata
+= sizeof(ether_hdr
);
7226 /* send stop pkt as last pkt */
7228 tput_pkt
.pkt_type
= hton16(TPUT_PKT_TYPE_STOP
);
7229 tx_stop_pkt
= FALSE
;
7231 tput_pkt
.pkt_type
= hton16(TPUT_PKT_TYPE_NORMAL
);
7232 tput_pkt
.pkt_id
= hton32(pktid
++);
7234 memcpy(pktdata
, &tput_pkt
, sizeof(tput_pkt
));
7235 /* compute crc32 over the pkt-id, num-pkts and data fields */
7236 crc
= (uint32
*)(pktdata
+ tput_pkt_hdr_size
);
7237 *crc
= hton32(hndcrc32(pktdata
+ tput_pkt_hdr_size
+ 4,
7238 8 + (tput_data
->payload_size
- 12),
7241 err
= dhd_sendpkt(dhd
, 0, pkt
);
7242 if (err
!= BCME_OK
) {
7243 DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
7244 __FUNCTION__
, pktid
, err
));
7245 dhd
->tput_data
.pkts_bad
++;
7247 total_num_tx_pkts
++;
7248 if ((total_num_tx_pkts
== tput_data
->num_pkts
) && (!tx_stop_pkt
)) {
7252 DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__
));
7253 if (!dhd_os_tput_test_wait(dhd
, NULL
,
7254 TPUT_TEST_WAIT_TIMEOUT_DEFAULT
)) {
7255 dhd
->tput_stop_ts
= OSL_SYSUPTIME_US();
7256 dhd
->tput_data
.tput_test_running
= FALSE
;
7257 DHD_ERROR(("%s: TX completion timeout !"
7258 " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
7259 __FUNCTION__
, total_num_tx_pkts
, dhd
->batch_tx_pkts_cmpl
));
7260 err_exit
= BCME_ERROR
;
7263 if ((dhd
->tput_start_ts
&& dhd
->tput_stop_ts
&&
7264 (dhd
->tput_stop_ts
> dhd
->tput_start_ts
)) || (time_taken
)) {
7266 time_taken
= dhd
->tput_stop_ts
- dhd
->tput_start_ts
;
7269 dhd
->tput_data
.tput_test_running
= FALSE
;
7270 DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
7272 err_exit
= BCME_ERROR
;
7275 if (n_batches
|| n_remain
) {
7280 } while (batch_cnt
);
7283 DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__
));
7284 if (!dhd_os_tput_test_wait(dhd
, NULL
, tput_data
->timeout_ms
)) {
7285 DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__
));
7286 dhd
->tput_stop_ts
= OSL_SYSUPTIME_US();
7290 /* calculate the throughput in bits per sec */
7291 if (dhd
->tput_start_ts
&& dhd
->tput_stop_ts
&&
7292 (dhd
->tput_stop_ts
> dhd
->tput_start_ts
)) {
7293 time_taken
= dhd
->tput_stop_ts
- dhd
->tput_start_ts
;
7294 time_taken
= DIV_U64_BY_U32(time_taken
, MSEC_PER_SEC
); /* convert to ms */
7295 dhd
->tput_data
.time_ms
= time_taken
;
7297 total_size
= pktsize
* dhd
->tput_data
.pkts_cmpl
* 8;
7298 dhd
->tput_data
.tput_bps
= DIV_U64_BY_U64(total_size
, time_taken
);
7299 /* convert from ms to seconds */
7300 dhd
->tput_data
.tput_bps
= dhd
->tput_data
.tput_bps
* 1000;
7303 DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__
));
7305 DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__
,
7306 dhd
->tput_data
.tput_bps
, dhd
->tput_data
.time_ms
));
7308 memcpy(tput_data
, &dhd
->tput_data
, sizeof(dhd
->tput_data
));
7310 dhd
->tput_data
.tput_test_running
= FALSE
;
7315 DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
7316 __FUNCTION__
, dhd
->tput_data
.pkts_good
,
7317 dhd
->tput_data
.pkts_bad
, dhd
->tput_data
.pkts_cmpl
));
7323 dhd_tput_test_rx(dhd_pub_t
*dhd
, void *pkt
)
7325 uint8
*pktdata
= NULL
;
7326 tput_pkt_t
*tput_pkt
= NULL
;
7328 uint8 tput_pkt_hdr_size
= 0;
7330 pktdata
= PKTDATA(dhd
->osh
, pkt
);
7331 if (dhd
->tput_data
.flags
& TPUT_TEST_USE_ETHERNET_HDR
)
7332 pktdata
+= sizeof(struct ether_header
);
7333 tput_pkt
= (tput_pkt_t
*)pktdata
;
7335 /* record the timestamp of the first packet received */
7336 if (dhd
->tput_data
.pkts_cmpl
== 0) {
7337 dhd
->tput_start_ts
= OSL_SYSUPTIME_US();
7340 if (ntoh16(tput_pkt
->pkt_type
) != TPUT_PKT_TYPE_STOP
&&
7341 dhd
->tput_data
.pkts_cmpl
<= dhd
->tput_data
.num_pkts
) {
7342 dhd
->tput_data
.pkts_cmpl
++;
7344 /* drop rx packets received beyond the specified # */
7345 if (dhd
->tput_data
.pkts_cmpl
> dhd
->tput_data
.num_pkts
)
7348 DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__
,
7349 ntoh32(tput_pkt
->pkt_id
), ntoh16(tput_pkt
->pkt_type
)));
7351 /* discard if mac addr of AP/STA does not match the specified ones */
7352 if ((memcmp(tput_pkt
->mac_ap
, dhd
->tput_data
.mac_ap
,
7353 ETHER_ADDR_LEN
) != 0) ||
7354 (memcmp(tput_pkt
->mac_sta
, dhd
->tput_data
.mac_sta
,
7355 ETHER_ADDR_LEN
) != 0)) {
7356 dhd
->tput_data
.pkts_bad
++;
7357 DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
7358 __FUNCTION__
, ntoh32(tput_pkt
->pkt_id
)));
7362 tput_pkt_hdr_size
= (uint8
)((uint8
*)&tput_pkt
->crc32
-
7363 (uint8
*)&tput_pkt
->mac_sta
);
7364 pktdata
+= tput_pkt_hdr_size
+ 4;
7365 crc
= hndcrc32(pktdata
, 8 + (dhd
->tput_data
.payload_size
- 12),
7367 if (crc
!= ntoh32(tput_pkt
->crc32
)) {
7368 DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
7369 __FUNCTION__
, ntoh32(tput_pkt
->pkt_id
)));
7370 dhd
->tput_data
.pkts_bad
++;
7374 if (ntoh16(tput_pkt
->pkt_type
) != TPUT_PKT_TYPE_STOP
)
7375 dhd
->tput_data
.pkts_good
++;
7377 /* if we have received the stop packet or all the # of pkts, we're done */
7378 if (ntoh16(tput_pkt
->pkt_type
) == TPUT_PKT_TYPE_STOP
||
7379 dhd
->tput_data
.pkts_cmpl
== dhd
->tput_data
.num_pkts
) {
7380 dhd
->tput_stop_ts
= OSL_SYSUPTIME_US();
7381 dhd_os_tput_test_wake(dhd
);
7385 #ifdef DUMP_IOCTL_IOV_LIST
7387 dhd_iov_li_append(dhd_pub_t
*dhd
, dll_t
*list_head
, dll_t
*node
)
7390 dhd_iov_li_t
*iov_li
;
7391 dhd
->dump_iovlist_len
++;
7393 if (dhd
->dump_iovlist_len
== IOV_LIST_MAX_LEN
+1) {
7394 item
= dll_head_p(list_head
);
7395 iov_li
= (dhd_iov_li_t
*)CONTAINEROF(item
, dhd_iov_li_t
, list
);
7397 MFREE(dhd
->osh
, iov_li
, sizeof(*iov_li
));
7398 dhd
->dump_iovlist_len
--;
7400 dll_append(list_head
, node
);
7404 dhd_iov_li_print(dll_t
*list_head
)
7406 dhd_iov_li_t
*iov_li
;
7409 for (item
= dll_head_p(list_head
); !dll_end(list_head
, item
); item
= next
) {
7410 next
= dll_next_p(item
);
7411 iov_li
= (dhd_iov_li_t
*)CONTAINEROF(item
, dhd_iov_li_t
, list
);
7412 DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index
, iov_li
->buff
, iov_li
->cmd
));
7417 dhd_iov_li_delete(dhd_pub_t
*dhd
, dll_t
*list_head
)
7420 dhd_iov_li_t
*iov_li
;
7421 while (!(dll_empty(list_head
))) {
7422 item
= dll_head_p(list_head
);
7423 iov_li
= (dhd_iov_li_t
*)CONTAINEROF(item
, dhd_iov_li_t
, list
);
7425 MFREE(dhd
->osh
, iov_li
, sizeof(*iov_li
));
7428 #endif /* DUMP_IOCTL_IOV_LIST */
7431 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
7432 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
7433 * it is failing with an 'out of space in SWIOTLB' error
7436 dhd_edl_mem_init(dhd_pub_t
*dhd
)
7440 memset(&dhd
->edl_ring_mem
, 0, sizeof(dhd
->edl_ring_mem
));
7441 ret
= dhd_dma_buf_alloc(dhd
, &dhd
->edl_ring_mem
, DHD_EDL_RING_SIZE
);
7442 if (ret
!= BCME_OK
) {
7443 DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
7451 * NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
7452 * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
7455 dhd_edl_mem_deinit(dhd_pub_t
*dhd
)
7457 if (dhd
->edl_ring_mem
.va
!= NULL
)
7458 dhd_dma_buf_free(dhd
, &dhd
->edl_ring_mem
);
7462 dhd_event_logtrace_process_edl(dhd_pub_t
*dhdp
, uint8
*data
,
7463 void *evt_decode_data
)
7465 msg_hdr_edl_t
*msg
= NULL
;
7466 cmn_msg_hdr_t
*cmn_msg_hdr
= NULL
;
7469 if (!data
|| !dhdp
|| !evt_decode_data
) {
7470 DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__
));
7474 /* format of data in each work item in the EDL ring:
7475 * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
7476 * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
7478 cmn_msg_hdr
= (cmn_msg_hdr_t
*)data
;
7479 msg
= (msg_hdr_edl_t
*)(data
+ sizeof(cmn_msg_hdr_t
));
7481 /* validate the fields */
7482 if (ltoh32(msg
->infobuf_ver
) != PCIE_INFOBUF_V1
) {
7483 DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
7484 " expected (0x%x)\n", __FUNCTION__
,
7485 msg
->infobuf_ver
, PCIE_INFOBUF_V1
));
7486 return BCME_VERSION
;
7489 /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
7490 if (sizeof(info_buf_payload_hdr_t
) > cmn_msg_hdr
->request_id
) {
7491 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
7493 return BCME_BUFTOOLONG
;
7496 if (ltoh16(msg
->pyld_hdr
.type
) != PCIE_INFOBUF_V1_TYPE_LOGTRACE
) {
7497 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
7498 __FUNCTION__
, ltoh16(msg
->pyld_hdr
.type
)));
7499 return BCME_BADOPTION
;
7502 if (ltoh16(msg
->pyld_hdr
.length
) > cmn_msg_hdr
->request_id
) {
7503 DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
7504 " than available buffer size %u\n", __FUNCTION__
,
7505 ltoh16(msg
->pyld_hdr
.length
), cmn_msg_hdr
->request_id
));
7509 /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
7510 buf
+= sizeof(msg
->infobuf_ver
) + sizeof(msg
->pyld_hdr
);
7511 dhd_dbg_trace_evnt_handler(dhdp
, buf
, evt_decode_data
,
7512 ltoh16(msg
->pyld_hdr
.length
));
7515 * check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
7516 * copy the event data to the skb and send it up the stack
7518 if (dhdp
->logtrace_pkt_sendup
) {
7519 DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__
,
7520 (uint32
)(ltoh16(msg
->pyld_hdr
.length
) +
7521 sizeof(info_buf_payload_hdr_t
) + 4)));
7522 dhd_sendup_info_buf(dhdp
, (uint8
*)msg
);
7527 #endif /* EWP_EDL */
7530 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
7532 dhd_log_dump_trigger(dhd_pub_t
*dhdp
, int subcmd
)
7534 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7535 log_dump_type_t
*flush_type
;
7536 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7537 uint64 current_time_sec
;
7540 DHD_ERROR(("dhdp is NULL !\n"));
7544 if (subcmd
>= CMD_MAX
|| subcmd
< CMD_DEFAULT
) {
7545 DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__
));
7549 current_time_sec
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
7551 DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
7552 __FUNCTION__
, current_time_sec
, dhdp
->debug_dump_time_sec
,
7553 DEBUG_DUMP_TRIGGER_INTERVAL_SEC
));
7555 if ((current_time_sec
- dhdp
->debug_dump_time_sec
) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC
) {
7556 DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
7557 __FUNCTION__
, dhdp
->debug_dump_time_sec
, DEBUG_DUMP_TRIGGER_INTERVAL_SEC
));
7561 clear_debug_dump_time(dhdp
->debug_dump_time_str
);
7562 #ifdef DHD_PCIE_RUNTIMEPM
7563 /* wake up RPM if SYSDUMP is triggered */
7564 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
7565 #endif /* DHD_PCIE_RUNTIMEPM */
7568 dhdp
->debug_dump_subcmd
= subcmd
;
7570 dhdp
->debug_dump_time_sec
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
7572 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7573 /* flush_type is freed at do_dhd_log_dump function */
7574 flush_type
= MALLOCZ(dhdp
->osh
, sizeof(log_dump_type_t
));
7576 *flush_type
= DLD_BUF_TYPE_ALL
;
7577 dhd_schedule_log_dump(dhdp
, flush_type
);
7579 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__
));
7582 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7584 /* Inside dhd_mem_dump, event notification will be sent to HAL and
7585 * from other context DHD pushes memdump, debug_dump and pktlog dump
7586 * to HAL and HAL will write into file
7588 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
7589 dhdp
->memdump_type
= DUMP_TYPE_BY_SYSDUMP
;
7590 dhd_bus_mem_dump(dhdp
);
7591 #endif /* BCMPCIE && DHD_FW_COREDUMP */
7593 #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7594 dhd_schedule_pktlog_dump(dhdp
);
7595 #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7597 #endif /* DHD_LOG_DUMP */
7599 #if defined(SHOW_LOGTRACE)
7601 dhd_print_fw_ver_from_file(dhd_pub_t
*dhdp
, char *fwpath
)
7605 char buf
[FW_VER_STR_LEN
];
7612 file
= dhd_os_open_image1(dhdp
, fwpath
);
7617 size
= dhd_os_get_image_size(file
);
7623 /* seek to the last 'X' bytes in the file */
7624 if (dhd_os_seek_file(file
, size
- FW_VER_STR_LEN
) != BCME_OK
) {
7629 /* read the last 'X' bytes of the file to a buffer */
7630 memset(buf
, 0, FW_VER_STR_LEN
);
7631 if (dhd_os_get_image_block(buf
, FW_VER_STR_LEN
- 1, file
) < 0) {
7635 /* search for 'Version' in the buffer */
7636 str
= bcmstrnstr(buf
, FW_VER_STR_LEN
, FW_VER_STR
, strlen(FW_VER_STR
));
7641 /* go back in the buffer to the last ascii character */
7642 while (str
!= buf
&&
7643 (*str
>= ' ' && *str
<= '~')) {
7646 /* reverse the final decrement, so that str is pointing
7647 * to the first ascii character in the buffer
7651 if (strlen(str
) > (FW_VER_STR_LEN
- 1)) {
7656 DHD_ERROR(("FW version in file '%s': %s\n", fwpath
, str
));
7657 /* copy to global variable, so that in case FW load fails, the
7658 * core capture logs will contain FW version read from the file
7660 memset(fw_version
, 0, FW_VER_STR_LEN
);
7661 strlcpy(fw_version
, str
, FW_VER_STR_LEN
);
7665 dhd_os_close_image1(dhdp
, file
);
7671 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
7674 copy_hang_info_ioctl_timeout(dhd_pub_t
*dhd
, int ifidx
, wl_ioctl_t
*ioc
)
7681 uint32 ioc_dwlen
= 0;
7683 if (!dhd
|| !dhd
->hang_info
) {
7684 DHD_ERROR(("%s dhd=%p hang_info=%p\n",
7685 __FUNCTION__
, dhd
, (dhd
? dhd
->hang_info
: NULL
)));
7689 cnt
= &dhd
->hang_info_cnt
;
7690 dest
= dhd
->hang_info
;
7692 memset(dest
, 0, VENDOR_SEND_HANG_EXT_INFO_LEN
);
7696 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- bytes_written
;
7698 get_debug_dump_time(dhd
->debug_dump_time_hang_str
);
7699 copy_debug_dump_time(dhd
->debug_dump_time_str
, dhd
->debug_dump_time_hang_str
);
7701 bytes_written
+= scnprintf(&dest
[bytes_written
], remain_len
, "%d %d %s %d %d %d %d %d %d ",
7702 HANG_REASON_IOCTL_RESP_TIMEOUT
, VENDOR_SEND_HANG_EXT_INFO_VER
,
7703 dhd
->debug_dump_time_hang_str
,
7704 ifidx
, ioc
->cmd
, ioc
->len
, ioc
->set
, ioc
->used
, ioc
->needed
);
7705 (*cnt
) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT
;
7707 clear_debug_dump_time(dhd
->debug_dump_time_hang_str
);
7709 /* Access ioc->buf only if the ioc->len is more than 4 bytes */
7710 ioc_dwlen
= (uint32
)(ioc
->len
/ sizeof(uint32
));
7711 if (ioc_dwlen
> 0) {
7712 const uint32
*ioc_buf
= (const uint32
*)ioc
->buf
;
7714 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- bytes_written
;
7715 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
7716 bytes_written
+= scnprintf(&dest
[bytes_written
], remain_len
,
7717 "%08x", *(uint32
*)(ioc_buf
++));
7718 GCC_DIAGNOSTIC_POP();
7720 if ((*cnt
) >= HANG_FIELD_CNT_MAX
) {
7724 for (i
= 1; i
< ioc_dwlen
&& *cnt
<= HANG_FIELD_CNT_MAX
;
7726 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- bytes_written
;
7727 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
7728 bytes_written
+= scnprintf(&dest
[bytes_written
], remain_len
, "%c%08x",
7729 HANG_RAW_DEL
, *(uint32
*)(ioc_buf
++));
7730 GCC_DIAGNOSTIC_POP();
7734 DHD_INFO(("%s hang info len: %d data: %s\n",
7735 __FUNCTION__
, (int)strlen(dhd
->hang_info
), dhd
->hang_info
));
7738 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
7740 #if defined(DHD_H2D_LOG_TIME_SYNC)
7743 * Used for Dongle console message time syncing with Host printk
7745 void dhd_h2d_log_time_sync(dhd_pub_t
*dhd
)
7750 * local_clock() returns time in nano seconds.
7751 * Dongle understand only milli seconds time.
7754 /* Nano seconds to milli seconds */
7755 do_div(ts
, 1000000);
7756 if (dhd_wl_ioctl_set_intiovar(dhd
, "rte_timesync", ts
, WLC_SET_VAR
, TRUE
, 0)) {
7757 DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__
));
7758 /* Stopping HOST Dongle console time syncing */
7759 dhd
->dhd_rte_time_sync_ms
= 0;
7762 #endif /* DHD_H2D_LOG_TIME_SYNC */
7764 /* configuations of ecounters to be enabled by default in FW */
7765 static ecounters_cfg_t ecounters_cfg_tbl
[] = {
7766 /* Global ecounters */
7767 {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL
, 0x0, WL_IFSTATS_XTLV_BUS_PCIE
},
7768 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
7769 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
7771 /* Slice specific ecounters */
7772 {ECOUNTERS_STATS_TYPES_FLAG_SLICE
, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE
},
7773 {ECOUNTERS_STATS_TYPES_FLAG_SLICE
, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE
},
7774 {ECOUNTERS_STATS_TYPES_FLAG_SLICE
, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX
},
7776 /* Interface specific ecounters */
7777 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE
},
7778 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_GENERIC
},
7779 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC
},
7780 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_MGT_CNT
},
7782 /* secondary interface */
7783 /* XXX REMOVE for temporal, will be enabled after decision
7784 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
7785 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_GENERIC},
7786 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
7787 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_MGT_CNT},
7791 /* XXX: Same event id shall be defined in consecutive order in the below table */
7792 static event_ecounters_cfg_t event_ecounters_cfg_tbl
[] = {
7793 /* Interface specific event ecounters */
7794 {WLC_E_DEAUTH_IND
, ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS
},
7797 /* Accepts an argument to -s, -g or -f and creates an XTLV */
7799 dhd_create_ecounters_params(dhd_pub_t
*dhd
, uint16 type
, uint16 if_slice_idx
,
7800 uint16 stats_rep
, uint8
**xtlv
)
7802 uint8
*req_xtlv
= NULL
;
7803 ecounters_stats_types_report_req_t
*req
;
7804 bcm_xtlvbuf_t xtlvbuf
, container_xtlvbuf
;
7805 ecountersv2_xtlv_list_elt_t temp
;
7806 uint16 xtlv_len
= 0, total_len
= 0;
7809 /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
7810 temp
.id
= stats_rep
;
7813 /* Hence len/data = 0/NULL */
7814 xtlv_len
+= temp
.len
+ BCM_XTLV_HDR_SIZE
;
7816 /* Total length of the container */
7817 total_len
= BCM_XTLV_HDR_SIZE
+
7818 OFFSETOF(ecounters_stats_types_report_req_t
, stats_types_req
) + xtlv_len
;
7820 /* Now allocate a structure for the entire request */
7821 if ((req_xtlv
= (uint8
*)MALLOCZ(dhd
->osh
, total_len
)) == NULL
) {
7826 /* container XTLV context */
7827 bcm_xtlv_buf_init(&container_xtlvbuf
, (uint8
*)req_xtlv
, total_len
,
7828 BCM_XTLV_OPTION_ALIGN32
);
7830 /* Fill other XTLVs in the container. Leave space for XTLV headers */
7831 req
= (ecounters_stats_types_report_req_t
*)(req_xtlv
+ BCM_XTLV_HDR_SIZE
);
7833 if (type
== ECOUNTERS_STATS_TYPES_FLAG_SLICE
) {
7834 req
->slice_mask
= 0x1 << if_slice_idx
;
7835 } else if (type
== ECOUNTERS_STATS_TYPES_FLAG_IFACE
) {
7836 req
->if_index
= if_slice_idx
;
7839 /* Fill remaining XTLVs */
7840 bcm_xtlv_buf_init(&xtlvbuf
, (uint8
*) req
->stats_types_req
, xtlv_len
,
7841 BCM_XTLV_OPTION_ALIGN32
);
7842 if (bcm_xtlv_put_data(&xtlvbuf
, temp
.id
, NULL
, temp
.len
)) {
7843 DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp
.id
));
7848 /* fill the top level container and get done with the XTLV container */
7849 rc
= bcm_xtlv_put_data(&container_xtlvbuf
, WL_ECOUNTERS_XTLV_REPORT_REQ
, NULL
,
7850 bcm_xtlv_buf_len(&xtlvbuf
) + OFFSETOF(ecounters_stats_types_report_req_t
,
7854 DHD_ERROR(("Error creating parent XTLV for type = %d\n", req
->flags
));
7859 if (rc
&& req_xtlv
) {
7860 MFREE(dhd
->osh
, req_xtlv
, total_len
);
7864 /* update the xtlv pointer */
7870 dhd_ecounter_autoconfig(dhd_pub_t
*dhd
)
7874 rc
= dhd_iovar(dhd
, 0, "ecounters_autoconfig", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
7876 if (rc
!= BCME_OK
) {
7878 if (rc
!= BCME_UNSUPPORTED
) {
7880 DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__
, rc
));
7882 DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__
));
7890 dhd_ecounter_configure(dhd_pub_t
*dhd
, bool enable
)
7894 if (dhd_ecounter_autoconfig(dhd
) != BCME_OK
) {
7895 if ((rc
= dhd_start_ecounters(dhd
)) != BCME_OK
) {
7896 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__
));
7897 } else if ((rc
= dhd_start_event_ecounters(dhd
)) != BCME_OK
) {
7898 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__
));
7902 if ((rc
= dhd_stop_ecounters(dhd
)) != BCME_OK
) {
7903 DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__
));
7904 } else if ((rc
= dhd_stop_event_ecounters(dhd
)) != BCME_OK
) {
7905 DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__
));
7912 dhd_start_ecounters(dhd_pub_t
*dhd
)
7918 ecounters_config_request_v2_t
*req
= NULL
;
7919 ecountersv2_processed_xtlv_list_elt
*list_elt
, *tail
= NULL
;
7920 ecountersv2_processed_xtlv_list_elt
*processed_containers_list
= NULL
;
7921 uint16 total_processed_containers_len
= 0;
7923 for (i
= 0; i
< ARRAYSIZE(ecounters_cfg_tbl
); i
++) {
7924 ecounters_cfg_t
*ecounter_stat
= &ecounters_cfg_tbl
[i
];
7926 if ((list_elt
= (ecountersv2_processed_xtlv_list_elt
*)
7927 MALLOCZ(dhd
->osh
, sizeof(*list_elt
))) == NULL
) {
7928 DHD_ERROR(("Ecounters v2: No memory to process\n"));
7932 rc
= dhd_create_ecounters_params(dhd
, ecounter_stat
->type
,
7933 ecounter_stat
->if_slice_idx
, ecounter_stat
->stats_rep
, &list_elt
->data
);
7936 DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
7937 ecounter_stat
->stats_rep
, rc
));
7939 /* Free allocated memory and go to fail to release any memories allocated
7940 * in previous iterations. Note that list_elt->data gets populated in
7941 * dhd_create_ecounters_params() and gets freed there itself.
7943 MFREE(dhd
->osh
, list_elt
, sizeof(*list_elt
));
7947 elt
= (bcm_xtlv_t
*) list_elt
->data
;
7949 /* Put the elements in the order they are processed */
7950 if (processed_containers_list
== NULL
) {
7951 processed_containers_list
= list_elt
;
7953 tail
->next
= list_elt
;
7956 /* Size of the XTLV returned */
7957 total_processed_containers_len
+= BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
;
7960 /* Now create ecounters config request with totallength */
7961 req
= (ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
, sizeof(*req
) +
7962 total_processed_containers_len
);
7969 req
->version
= ECOUNTERS_VERSION_2
;
7970 req
->logset
= EVENT_LOG_SET_ECOUNTERS
;
7971 req
->reporting_period
= ECOUNTERS_DEFAULT_PERIOD
;
7972 req
->num_reports
= ECOUNTERS_NUM_REPORTS
;
7973 req
->len
= total_processed_containers_len
+
7974 OFFSETOF(ecounters_config_request_v2_t
, ecounters_xtlvs
);
7977 start_ptr
= req
->ecounters_xtlvs
;
7979 /* Now go element by element in the list */
7980 while (processed_containers_list
) {
7981 list_elt
= processed_containers_list
;
7983 elt
= (bcm_xtlv_t
*)list_elt
->data
;
7985 memcpy(start_ptr
, list_elt
->data
, BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
7986 start_ptr
+= (size_t)(BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
7987 processed_containers_list
= processed_containers_list
->next
;
7989 /* Free allocated memories */
7990 MFREE(dhd
->osh
, elt
, elt
->len
+ BCM_XTLV_HDR_SIZE
);
7991 MFREE(dhd
->osh
, list_elt
, sizeof(*list_elt
));
7994 if ((rc
= dhd_iovar(dhd
, 0, "ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
)) < 0) {
7995 DHD_ERROR(("failed to start ecounters\n"));
8000 MFREE(dhd
->osh
, req
, sizeof(*req
) + total_processed_containers_len
);
8003 /* Now go element by element in the list */
8004 while (processed_containers_list
) {
8005 list_elt
= processed_containers_list
;
8006 elt
= (bcm_xtlv_t
*)list_elt
->data
;
8007 processed_containers_list
= processed_containers_list
->next
;
8009 /* Free allocated memories */
8010 MFREE(dhd
->osh
, elt
, elt
->len
+ BCM_XTLV_HDR_SIZE
);
8011 MFREE(dhd
->osh
, list_elt
, sizeof(*list_elt
));
8017 dhd_stop_ecounters(dhd_pub_t
*dhd
)
8020 ecounters_config_request_v2_t
*req
;
8022 /* Now create ecounters config request with totallength */
8023 req
= (ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
, sizeof(*req
));
8030 req
->version
= ECOUNTERS_VERSION_2
;
8031 req
->len
= OFFSETOF(ecounters_config_request_v2_t
, ecounters_xtlvs
);
8033 if ((rc
= dhd_iovar(dhd
, 0, "ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
)) < 0) {
8034 DHD_ERROR(("failed to stop ecounters\n"));
8039 MFREE(dhd
->osh
, req
, sizeof(*req
));
8044 /* configured event_id_array for event ecounters */
8045 typedef struct event_id_array
{
8050 /* get event id array only from event_ecounters_cfg_tbl[] */
8051 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t
*event_array
)
8055 int32 prev_evt_id
= -1;
8057 for (i
= 0; i
< (uint8
)ARRAYSIZE(event_ecounters_cfg_tbl
); i
++) {
8058 if (prev_evt_id
!= event_ecounters_cfg_tbl
[i
].event_id
) {
8059 if (prev_evt_id
>= 0)
8061 event_array
[idx
].event_id
= event_ecounters_cfg_tbl
[i
].event_id
;
8062 event_array
[idx
].str_idx
= i
;
8064 prev_evt_id
= event_ecounters_cfg_tbl
[i
].event_id
;
8069 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
8070 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
8073 dhd_start_event_ecounters(dhd_pub_t
*dhd
)
8076 uint8 event_id_cnt
= 0;
8077 uint16 processed_containers_len
= 0;
8078 uint16 max_xtlv_len
= 0;
8082 event_id_array_t
*id_array
;
8083 bcm_xtlv_t
*elt
= NULL
;
8084 event_ecounters_config_request_v2_t
*req
= NULL
;
8086 /* XXX: the size of id_array is limited by the size of event_ecounters_cfg_tbl */
8087 id_array
= (event_id_array_t
*)MALLOCZ(dhd
->osh
, sizeof(event_id_array_t
) *
8088 ARRAYSIZE(event_ecounters_cfg_tbl
));
8090 if (id_array
== NULL
) {
8094 event_id_cnt
= __dhd_event_ecounters_get_event_id_array(id_array
);
8096 max_xtlv_len
= ((BCM_XTLV_HDR_SIZE
+
8097 OFFSETOF(event_ecounters_config_request_v2_t
, ecounters_xtlvs
)) *
8098 ECNTRS_MAX_XTLV_NUM
);
8100 /* Now create ecounters config request with max allowed length */
8101 req
= (event_ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
,
8102 sizeof(event_ecounters_config_request_v2_t
*) + max_xtlv_len
);
8109 for (i
= 0; i
<= event_id_cnt
; i
++) {
8110 /* req initialization by event id */
8111 req
->version
= ECOUNTERS_VERSION_2
;
8112 req
->logset
= EVENT_LOG_SET_ECOUNTERS
;
8113 req
->event_id
= id_array
[i
].event_id
;
8114 req
->flags
= EVENT_ECOUNTERS_FLAGS_ADD
;
8116 processed_containers_len
= 0;
8119 ptr
= req
->ecounters_xtlvs
;
8121 for (j
= id_array
[i
].str_idx
; j
< (uint8
)ARRAYSIZE(event_ecounters_cfg_tbl
); j
++) {
8122 event_ecounters_cfg_t
*event_ecounter_stat
= &event_ecounters_cfg_tbl
[j
];
8123 if (id_array
[i
].event_id
!= event_ecounter_stat
->event_id
)
8126 rc
= dhd_create_ecounters_params(dhd
, event_ecounter_stat
->type
,
8127 event_ecounter_stat
->if_slice_idx
, event_ecounter_stat
->stats_rep
,
8131 DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
8132 __FUNCTION__
, event_ecounter_stat
->stats_rep
, rc
));
8136 elt
= (bcm_xtlv_t
*)data
;
8138 memcpy(ptr
, elt
, BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
8139 ptr
+= (size_t)(BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
8140 processed_containers_len
+= BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
;
8142 /* Free allocated memories alloced by dhd_create_ecounters_params */
8143 MFREE(dhd
->osh
, elt
, elt
->len
+ BCM_XTLV_HDR_SIZE
);
8145 if (processed_containers_len
> max_xtlv_len
) {
8146 DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
8153 req
->len
= processed_containers_len
+
8154 OFFSETOF(event_ecounters_config_request_v2_t
, ecounters_xtlvs
);
8156 DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
8157 __FUNCTION__
, req
->version
, req
->logset
, req
->event_id
,
8158 req
->flags
, req
->len
));
8160 rc
= dhd_iovar(dhd
, 0, "event_ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
);
8163 DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
8164 req
->event_id
, rc
));
8170 /* Free allocated memories */
8172 MFREE(dhd
->osh
, req
, sizeof(event_ecounters_config_request_v2_t
*) + max_xtlv_len
);
8175 MFREE(dhd
->osh
, id_array
, sizeof(event_id_array_t
) *
8176 ARRAYSIZE(event_ecounters_cfg_tbl
));
8183 dhd_stop_event_ecounters(dhd_pub_t
*dhd
)
8186 event_ecounters_config_request_v2_t
*req
;
8188 /* Now create ecounters config request with totallength */
8189 req
= (event_ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
, sizeof(*req
));
8196 req
->version
= ECOUNTERS_VERSION_2
;
8197 req
->flags
= EVENT_ECOUNTERS_FLAGS_DEL_ALL
;
8198 req
->len
= OFFSETOF(event_ecounters_config_request_v2_t
, ecounters_xtlvs
);
8200 if ((rc
= dhd_iovar(dhd
, 0, "event_ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
)) < 0) {
8201 DHD_ERROR(("failed to stop event_ecounters\n"));
8206 MFREE(dhd
->osh
, req
, sizeof(*req
));
8212 dhd_dump_debug_ring(dhd_pub_t
*dhdp
, void *ring_ptr
, const void *user_buf
,
8213 log_dump_section_hdr_t
*sec_hdr
,
8214 char *text_hdr
, int buflen
, uint32 sec_type
)
8217 uint32 data_len
= 0;
8219 unsigned long flags
= 0;
8221 dhd_dbg_ring_t
*ring
= (dhd_dbg_ring_t
*)ring_ptr
;
8223 int fpos_sechdr
= 0;
8225 if (!dhdp
|| !ring
|| !user_buf
|| !sec_hdr
|| !text_hdr
) {
8228 /* do not allow further writes to the ring
8231 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
8232 ring
->state
= RING_SUSPEND
;
8233 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
8235 if (dhdp
->concise_dbg_buf
) {
8236 /* re-use concise debug buffer temporarily
8237 * to pull ring data, to write
8238 * record by record to file
8240 data_len
= CONCISE_DUMP_BUFLEN
;
8241 data
= dhdp
->concise_dbg_buf
;
8242 ret
= dhd_export_debug_data(text_hdr
, NULL
, user_buf
, strlen(text_hdr
), &pos
);
8243 /* write the section header now with zero length,
8244 * once the correct length is found out, update
8248 sec_hdr
->type
= sec_type
;
8249 sec_hdr
->length
= 0;
8250 ret
= dhd_export_debug_data((char *)sec_hdr
, NULL
, user_buf
,
8251 sizeof(*sec_hdr
), &pos
);
8253 rlen
= dhd_dbg_ring_pull_single(ring
, data
, data_len
, TRUE
);
8256 ret
= dhd_export_debug_data(data
, NULL
, user_buf
, rlen
, &pos
);
8258 DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__
, rlen
));
8259 } while ((rlen
> 0));
8260 /* now update the section header length in the file */
8261 /* Complete ring size is dumped by HAL, hence updating length to ring size */
8262 sec_hdr
->length
= ring
->ring_size
;
8263 ret
= dhd_export_debug_data((char *)sec_hdr
, NULL
, user_buf
,
8264 sizeof(*sec_hdr
), &fpos_sechdr
);
8266 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__
));
8268 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
8269 ring
->state
= RING_ACTIVE
;
8270 /* Resetting both read and write pointer,
8271 * since all items are read.
8273 ring
->rp
= ring
->wp
= 0;
8274 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
8280 dhd_log_dump_ring_to_file(dhd_pub_t
*dhdp
, void *ring_ptr
, void *file
,
8281 unsigned long *file_posn
, log_dump_section_hdr_t
*sec_hdr
,
8282 char *text_hdr
, uint32 sec_type
)
8285 uint32 data_len
= 0, total_len
= 0;
8287 unsigned long fpos_sechdr
= 0;
8288 unsigned long flags
= 0;
8290 dhd_dbg_ring_t
*ring
= (dhd_dbg_ring_t
*)ring_ptr
;
8292 if (!dhdp
|| !ring
|| !file
|| !sec_hdr
||
8293 !file_posn
|| !text_hdr
)
8296 /* do not allow further writes to the ring
8299 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
8300 ring
->state
= RING_SUSPEND
;
8301 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
8303 if (dhdp
->concise_dbg_buf
) {
8304 /* re-use concise debug buffer temporarily
8305 * to pull ring data, to write
8306 * record by record to file
8308 data_len
= CONCISE_DUMP_BUFLEN
;
8309 data
= dhdp
->concise_dbg_buf
;
8310 dhd_os_write_file_posn(file
, file_posn
, text_hdr
,
8312 /* write the section header now with zero length,
8313 * once the correct length is found out, update
8316 dhd_init_sec_hdr(sec_hdr
);
8317 fpos_sechdr
= *file_posn
;
8318 sec_hdr
->type
= sec_type
;
8319 sec_hdr
->length
= 0;
8320 dhd_os_write_file_posn(file
, file_posn
, (char *)sec_hdr
,
8323 rlen
= dhd_dbg_ring_pull_single(ring
, data
, data_len
, TRUE
);
8326 ret
= dhd_os_write_file_posn(file
, file_posn
, data
, rlen
);
8328 DHD_ERROR(("%s: write file error !\n", __FUNCTION__
));
8329 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
8330 ring
->state
= RING_ACTIVE
;
8331 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
8337 /* now update the section header length in the file */
8338 sec_hdr
->length
= total_len
;
8339 dhd_os_write_file_posn(file
, &fpos_sechdr
, (char *)sec_hdr
, sizeof(*sec_hdr
));
8341 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__
));
8344 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
8345 ring
->state
= RING_ACTIVE
;
8346 /* Resetting both read and write pointer,
8347 * since all items are read.
8349 ring
->rp
= ring
->wp
= 0;
8350 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
8354 /* logdump cookie */
8355 #define MAX_LOGUDMP_COOKIE_CNT 10u
8356 #define LOGDUMP_COOKIE_STR_LEN 50u
8358 dhd_logdump_cookie_init(dhd_pub_t
*dhdp
, uint8
*buf
, uint32 buf_size
)
8362 if (!dhdp
|| !buf
) {
8363 DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp
, buf
));
8367 ring_size
= dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN
* MAX_LOGUDMP_COOKIE_CNT
;
8368 if (buf_size
< ring_size
) {
8369 DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
8370 ring_size
, buf_size
));
8374 dhdp
->logdump_cookie
= dhd_ring_init(dhdp
, buf
, buf_size
,
8375 LOGDUMP_COOKIE_STR_LEN
, MAX_LOGUDMP_COOKIE_CNT
,
8376 DHD_RING_TYPE_FIXED
);
8377 if (!dhdp
->logdump_cookie
) {
8378 DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
8386 dhd_logdump_cookie_deinit(dhd_pub_t
*dhdp
)
8391 if (dhdp
->logdump_cookie
) {
8392 dhd_ring_deinit(dhdp
, dhdp
->logdump_cookie
);
8398 #ifdef DHD_TX_PROFILE
8400 dhd_tx_profile_detach(dhd_pub_t
*dhdp
)
8402 int result
= BCME_ERROR
;
8404 if (dhdp
!= NULL
&& dhdp
->protocol_filters
!= NULL
) {
8405 MFREE(dhdp
->osh
, dhdp
->protocol_filters
, DHD_MAX_PROFILES
*
8406 sizeof(*(dhdp
->protocol_filters
)));
8407 dhdp
->protocol_filters
= NULL
;
8416 dhd_tx_profile_attach(dhd_pub_t
*dhdp
)
8418 int result
= BCME_ERROR
;
8421 dhdp
->protocol_filters
= (dhd_tx_profile_protocol_t
*)MALLOCZ(dhdp
->osh
,
8422 DHD_MAX_PROFILES
* sizeof(*(dhdp
->protocol_filters
)));
8424 if (dhdp
->protocol_filters
!= NULL
) {
8429 if (result
!= BCME_OK
) {
8430 DHD_ERROR(("%s:\tMALLOC of tx profile protocol filters failed\n",
8436 #endif /* defined(DHD_TX_PROFILE) */
8439 dhd_logdump_cookie_save(dhd_pub_t
*dhdp
, char *cookie
, char *type
)
8443 if (!dhdp
|| !cookie
|| !type
|| !dhdp
->logdump_cookie
) {
8444 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
8445 " type = %p, cookie_cfg:%p\n", __FUNCTION__
,
8446 dhdp
, cookie
, type
, dhdp
?dhdp
->logdump_cookie
: NULL
));
8449 ptr
= (char *)dhd_ring_get_empty(dhdp
->logdump_cookie
);
8451 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__
));
8454 scnprintf(ptr
, LOGDUMP_COOKIE_STR_LEN
, "%s: %s\n", type
, cookie
);
8459 dhd_logdump_cookie_get(dhd_pub_t
*dhdp
, char *ret_cookie
, uint32 buf_size
)
8463 if (!dhdp
|| !ret_cookie
|| !dhdp
->logdump_cookie
) {
8464 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
8465 "cookie=%p cookie_cfg:%p\n", __FUNCTION__
,
8466 dhdp
, ret_cookie
, dhdp
?dhdp
->logdump_cookie
: NULL
));
8469 ptr
= (char *)dhd_ring_get_first(dhdp
->logdump_cookie
);
8471 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__
));
8474 memcpy(ret_cookie
, ptr
, MIN(buf_size
, strlen(ptr
)));
8475 dhd_ring_free_first(dhdp
->logdump_cookie
);
8480 dhd_logdump_cookie_count(dhd_pub_t
*dhdp
)
8482 if (!dhdp
|| !dhdp
->logdump_cookie
) {
8483 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
8484 __FUNCTION__
, dhdp
, dhdp
?dhdp
->logdump_cookie
: NULL
));
8487 return dhd_ring_get_cur_size(dhdp
->logdump_cookie
);
8491 __dhd_log_dump_cookie_to_file(
8492 dhd_pub_t
*dhdp
, void *fp
, const void *user_buf
, unsigned long *f_pos
,
8493 char *buf
, uint32 buf_size
)
8496 uint32 remain
= buf_size
;
8497 int ret
= BCME_ERROR
;
8498 char tmp_buf
[LOGDUMP_COOKIE_STR_LEN
];
8499 log_dump_section_hdr_t sec_hdr
;
8503 read_idx
= dhd_ring_get_read_idx(dhdp
->logdump_cookie
);
8504 write_idx
= dhd_ring_get_write_idx(dhdp
->logdump_cookie
);
8505 while (dhd_logdump_cookie_count(dhdp
) > 0) {
8506 memset(tmp_buf
, 0, sizeof(tmp_buf
));
8507 ret
= dhd_logdump_cookie_get(dhdp
, tmp_buf
, LOGDUMP_COOKIE_STR_LEN
);
8508 if (ret
!= BCME_OK
) {
8511 remain
-= scnprintf(&buf
[buf_size
- remain
], remain
, "%s", tmp_buf
);
8513 dhd_ring_set_read_idx(dhdp
->logdump_cookie
, read_idx
);
8514 dhd_ring_set_write_idx(dhdp
->logdump_cookie
, write_idx
);
8516 ret
= dhd_export_debug_data(COOKIE_LOG_HDR
, fp
, user_buf
, strlen(COOKIE_LOG_HDR
), f_pos
);
8518 DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__
));
8521 sec_hdr
.magic
= LOG_DUMP_MAGIC
;
8522 sec_hdr
.timestamp
= local_clock();
8523 sec_hdr
.type
= LOG_DUMP_SECTION_COOKIE
;
8524 sec_hdr
.length
= buf_size
- remain
;
8526 ret
= dhd_export_debug_data((char *)&sec_hdr
, fp
, user_buf
, sizeof(sec_hdr
), f_pos
);
8528 DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__
));
8532 ret
= dhd_export_debug_data(buf
, fp
, user_buf
, sec_hdr
.length
, f_pos
);
8534 DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__
));
8541 dhd_log_dump_cookie_len(dhd_pub_t
*dhdp
)
8544 char tmp_buf
[LOGDUMP_COOKIE_STR_LEN
];
8545 log_dump_section_hdr_t sec_hdr
;
8547 int ret
= BCME_ERROR
;
8548 uint32 buf_size
= MAX_LOGUDMP_COOKIE_CNT
* LOGDUMP_COOKIE_STR_LEN
;
8555 if (!dhdp
|| !dhdp
->logdump_cookie
) {
8556 DHD_ERROR(("%s At least one ptr is NULL "
8557 "dhdp = %p cookie %p\n",
8558 __FUNCTION__
, dhdp
, dhdp
?dhdp
->logdump_cookie
:NULL
));
8562 buf
= (char *)MALLOCZ(dhdp
->osh
, buf_size
);
8564 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
8568 read_idx
= dhd_ring_get_read_idx(dhdp
->logdump_cookie
);
8569 write_idx
= dhd_ring_get_write_idx(dhdp
->logdump_cookie
);
8570 while (dhd_logdump_cookie_count(dhdp
) > 0) {
8571 memset(tmp_buf
, 0, sizeof(tmp_buf
));
8572 ret
= dhd_logdump_cookie_get(dhdp
, tmp_buf
, LOGDUMP_COOKIE_STR_LEN
);
8573 if (ret
!= BCME_OK
) {
8576 remain
-= (uint32
)strlen(tmp_buf
);
8578 dhd_ring_set_read_idx(dhdp
->logdump_cookie
, read_idx
);
8579 dhd_ring_set_write_idx(dhdp
->logdump_cookie
, write_idx
);
8580 len
+= strlen(COOKIE_LOG_HDR
);
8581 len
+= sizeof(sec_hdr
);
8582 len
+= (buf_size
- remain
);
8585 MFREE(dhdp
->osh
, buf
, buf_size
);
8590 dhd_log_dump_cookie(dhd_pub_t
*dhdp
, const void *user_buf
)
8592 int ret
= BCME_ERROR
;
8593 char tmp_buf
[LOGDUMP_COOKIE_STR_LEN
];
8594 log_dump_section_hdr_t sec_hdr
;
8596 uint32 buf_size
= MAX_LOGUDMP_COOKIE_CNT
* LOGDUMP_COOKIE_STR_LEN
;
8604 if (!dhdp
|| !dhdp
->logdump_cookie
) {
8605 DHD_ERROR(("%s At least one ptr is NULL "
8606 "dhdp = %p cookie %p\n",
8607 __FUNCTION__
, dhdp
, dhdp
?dhdp
->logdump_cookie
:NULL
));
8611 buf
= (char *)MALLOCZ(dhdp
->osh
, buf_size
);
8613 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
8617 read_idx
= dhd_ring_get_read_idx(dhdp
->logdump_cookie
);
8618 write_idx
= dhd_ring_get_write_idx(dhdp
->logdump_cookie
);
8619 while (dhd_logdump_cookie_count(dhdp
) > 0) {
8620 memset(tmp_buf
, 0, sizeof(tmp_buf
));
8621 ret
= dhd_logdump_cookie_get(dhdp
, tmp_buf
, LOGDUMP_COOKIE_STR_LEN
);
8622 if (ret
!= BCME_OK
) {
8625 remain
-= scnprintf(&buf
[buf_size
- remain
], remain
, "%s", tmp_buf
);
8627 dhd_ring_set_read_idx(dhdp
->logdump_cookie
, read_idx
);
8628 dhd_ring_set_write_idx(dhdp
->logdump_cookie
, write_idx
);
8629 ret
= dhd_export_debug_data(COOKIE_LOG_HDR
, NULL
, user_buf
, strlen(COOKIE_LOG_HDR
), &pos
);
8630 sec_hdr
.magic
= LOG_DUMP_MAGIC
;
8631 sec_hdr
.timestamp
= local_clock();
8632 sec_hdr
.type
= LOG_DUMP_SECTION_COOKIE
;
8633 sec_hdr
.length
= buf_size
- remain
;
8634 ret
= dhd_export_debug_data((char *)&sec_hdr
, NULL
, user_buf
, sizeof(sec_hdr
), &pos
);
8635 ret
= dhd_export_debug_data(buf
, NULL
, user_buf
, sec_hdr
.length
, &pos
);
8638 MFREE(dhdp
->osh
, buf
, buf_size
);
8643 dhd_log_dump_cookie_to_file(dhd_pub_t
*dhdp
, void *fp
, const void *user_buf
, unsigned long *f_pos
)
8646 int ret
= BCME_ERROR
;
8647 uint32 buf_size
= MAX_LOGUDMP_COOKIE_CNT
* LOGDUMP_COOKIE_STR_LEN
;
8649 if (!dhdp
|| !dhdp
->logdump_cookie
|| (!fp
&& !user_buf
) || !f_pos
) {
8650 DHD_ERROR(("%s At least one ptr is NULL "
8651 "dhdp = %p cookie %p fp = %p f_pos = %p\n",
8652 __FUNCTION__
, dhdp
, dhdp
?dhdp
->logdump_cookie
:NULL
, fp
, f_pos
));
8656 buf
= (char *)MALLOCZ(dhdp
->osh
, buf_size
);
8658 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
8661 ret
= __dhd_log_dump_cookie_to_file(dhdp
, fp
, user_buf
, f_pos
, buf
, buf_size
);
8662 MFREE(dhdp
->osh
, buf
, buf_size
);
8666 #endif /* DHD_LOG_DUMP */
8668 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
8670 dhd_control_he_enab(dhd_pub_t
* dhd
, uint8 he_enab
)
8673 bcm_xtlv_t
*pxtlv
= NULL
;
8674 uint8 mybuf
[DHD_IOVAR_BUF_SIZE
];
8675 uint16 mybuf_len
= sizeof(mybuf
);
8676 pxtlv
= (bcm_xtlv_t
*)mybuf
;
8678 ret
= bcm_pack_xtlv_entry((uint8
**)&pxtlv
, &mybuf_len
, WL_HE_CMD_ENAB
, sizeof(he_enab
),
8679 &he_enab
, BCM_XTLV_OPTION_ALIGN32
);
8681 if (ret
!= BCME_OK
) {
8683 DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__
, bcmerrorstr(ret
)));
8687 ret
= dhd_iovar(dhd
, 0, "he", (char *)&mybuf
, sizeof(mybuf
), NULL
, 0, TRUE
);
8689 DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
8690 __FUNCTION__
, he_enab
, bcmerrorstr(ret
)));
8692 DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__
, he_enab
));
8697 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
8699 #ifdef CONFIG_ROAM_RSSI_LIMIT
8701 dhd_roam_rssi_limit_get(dhd_pub_t
*dhd
, int *lmt2g
, int *lmt5g
)
8703 wlc_roam_rssi_limit_t
*plmt
;
8704 wlc_roam_rssi_lmt_info_v1_t
*pinfo
;
8706 int plmt_len
= sizeof(*pinfo
) + ROAMRSSI_HDRLEN
;
8708 plmt
= (wlc_roam_rssi_limit_t
*)MALLOCZ(dhd
->osh
, plmt_len
);
8710 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
8714 /* Get roam rssi limit */
8715 ret
= dhd_iovar(dhd
, 0, "roam_rssi_limit", NULL
, 0, (char *)plmt
, plmt_len
, FALSE
);
8717 DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__
, ret
));
8721 if (plmt
->ver
!= WLC_ROAM_RSSI_LMT_VER_1
) {
8726 pinfo
= (wlc_roam_rssi_lmt_info_v1_t
*)plmt
->data
;
8727 *lmt2g
= (int)pinfo
->rssi_limit_2g
;
8728 *lmt5g
= (int)pinfo
->rssi_limit_5g
;
8732 MFREE(dhd
->osh
, plmt
, plmt_len
);
8738 dhd_roam_rssi_limit_set(dhd_pub_t
*dhd
, int lmt2g
, int lmt5g
)
8740 wlc_roam_rssi_limit_t
*plmt
;
8741 wlc_roam_rssi_lmt_info_v1_t
*pinfo
;
8743 int plmt_len
= sizeof(*pinfo
) + ROAMRSSI_HDRLEN
;
8745 /* Sanity check RSSI limit Value */
8746 if ((lmt2g
< ROAMRSSI_2G_MIN
) || (lmt2g
> ROAMRSSI_2G_MAX
)) {
8747 DHD_ERROR(("%s Not In Range 2G ROAM RSSI Limit\n", __FUNCTION__
));
8750 if ((lmt2g
< ROAMRSSI_5G_MIN
) || (lmt2g
> ROAMRSSI_5G_MAX
)) {
8751 DHD_ERROR(("%s Not In Range 5G ROAM RSSI Limit\n", __FUNCTION__
));
8755 plmt
= (wlc_roam_rssi_limit_t
*)MALLOCZ(dhd
->osh
, plmt_len
);
8757 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
8760 plmt
->ver
= WLC_ROAM_RSSI_LMT_VER_1
;
8761 plmt
->len
= sizeof(*pinfo
);
8762 pinfo
= (wlc_roam_rssi_lmt_info_v1_t
*)plmt
->data
;
8763 pinfo
->rssi_limit_2g
= (int16
)lmt2g
;
8764 pinfo
->rssi_limit_5g
= (int16
)lmt5g
;
8766 /* Set roam rssi limit */
8767 ret
= dhd_iovar(dhd
, 0, "roam_rssi_limit", (char *)plmt
, plmt_len
, NULL
, 0, TRUE
);
8769 DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__
, ret
));
8774 MFREE(dhd
->osh
, plmt
, plmt_len
);
8778 #endif /* CONFIG_ROAM_RSSI_LIMIT */
8781 dhd_iovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *param_buf
, uint param_len
, char *res_buf
,
8782 uint res_len
, bool set
)
8789 if (res_len
> WLC_IOCTL_MAXLEN
|| param_len
> WLC_IOCTL_MAXLEN
)
8792 input_len
= strlen(name
) + 1 + param_len
;
8793 if (input_len
> WLC_IOCTL_MAXLEN
)
8798 if (res_buf
|| res_len
!= 0) {
8799 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__
));
8803 buf
= MALLOCZ(pub
->osh
, input_len
);
8805 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
8809 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
8815 ioc
.cmd
= WLC_SET_VAR
;
8817 ioc
.len
= input_len
;
8820 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
8822 if (!res_buf
|| !res_len
) {
8823 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__
));
8828 if (res_len
< input_len
) {
8829 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__
,
8830 res_len
, input_len
));
8831 buf
= MALLOCZ(pub
->osh
, input_len
);
8833 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
8837 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
8843 ioc
.cmd
= WLC_GET_VAR
;
8845 ioc
.len
= input_len
;
8848 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
8850 if (ret
== BCME_OK
) {
8851 memcpy(res_buf
, buf
, res_len
);
8854 memset(res_buf
, 0, res_len
);
8855 ret
= bcm_mkiovar(name
, param_buf
, param_len
, res_buf
, res_len
);
8861 ioc
.cmd
= WLC_GET_VAR
;
8866 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
8871 MFREE(pub
->osh
, buf
, input_len
);