2 * Broadcom Dongle Host Driver (DHD), common DHD core.
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: dhd_common.c 815855 2019-04-22 05:16:49Z $
35 #include <bcmendian.h>
36 #include <dngl_stats.h>
42 #ifdef PCIE_FULL_DONGLE
43 #include <bcmmsgbuf.h>
44 #endif /* PCIE_FULL_DONGLE */
47 #include <event_log.h>
48 #endif /* SHOW_LOGTRACE */
51 #include <dhd_flowring.h>
55 #include <dhd_proto.h>
58 #include <dhd_debug.h>
59 #include <dhd_dbg_ring.h>
60 #include <dhd_mschdbg.h>
64 #include <wl_cfg80211.h>
73 #ifdef DNGL_EVENT_SUPPORT
74 #include <dnglevent.h>
81 #define htodchanspec(i) (i)
82 #define dtohchanspec(i) (i)
85 #include <wlfc_proto.h>
90 #include <dhd_l2_filter.h>
91 #endif /* DHD_L2_FILTER */
103 #ifdef DHD_PKT_LOGGING
104 #include <dhd_pktlog.h>
105 #endif /* DHD_PKT_LOGGING */
106 #endif /* DHD_LOG_DUMP */
110 #endif /* DHD_SSSR_DUMP */
112 int dhd_msg_level
= DHD_ERROR_VAL
| DHD_FWLOG_VAL
| DHD_EVENT_VAL
113 /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
114 #if !defined(CUSTOMER_HW4)
116 #endif /* !defined(CUSTOMER_HW4) */
119 #if defined(WL_WIRELESS_EXT)
129 #endif /* DHD_DEBUG */
131 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
132 #include <linux/pm_runtime.h>
133 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
136 char fw_path2
[MOD_PARAM_PATHLEN
];
137 extern bool softap_enabled
;
141 #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
142 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
143 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
144 static char *ramstart_str
= " text_start"; /* string in mapfile has addr ramstart */
145 static char *rodata_start_str
= " rodata_start"; /* string in mapfile has addr rodata start */
146 static char *rodata_end_str
= " rodata_end"; /* string in mapfile has addr rodata end */
147 #define RAMSTART_BIT 0x01
148 #define RDSTART_BIT 0x02
149 #define RDEND_BIT 0x04
150 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
151 #endif /* SHOW_LOGTRACE */
154 /* the fw file path is taken from either the module parameter at
155 * insmod time or is defined as a constant of different values
156 * for different platforms
158 extern char *st_str_file_path
;
159 #endif /* SHOW_LOGTRACE */
161 #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
163 /* Last connection success/failure status */
164 uint32 dhd_conn_event
;
165 uint32 dhd_conn_status
;
166 uint32 dhd_conn_reason
;
168 extern int dhd_iscan_request(void * dhdp
, uint16 action
);
169 extern void dhd_ind_scan_confirm(void *h
, bool status
);
170 extern int dhd_iscan_in_progress(void *h
);
171 void dhd_iscan_lock(void);
172 void dhd_iscan_unlock(void);
173 extern int dhd_change_mtu(dhd_pub_t
*dhd
, int new_mtu
, int ifidx
);
174 #if !defined(AP) && defined(WLP2P)
175 extern int dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
);
178 extern int dhd_socram_dump(struct dhd_bus
*bus
);
179 extern void dhd_set_packet_filter(dhd_pub_t
*dhd
);
181 #ifdef DNGL_EVENT_SUPPORT
182 static void dngl_host_event_process(dhd_pub_t
*dhdp
, bcm_dngl_event_t
*event
,
183 bcm_dngl_event_msg_t
*dngl_event
, size_t pktlen
);
184 static int dngl_host_event(dhd_pub_t
*dhdp
, void *pktdata
, bcm_dngl_event_msg_t
*dngl_event
,
186 #endif /* DNGL_EVENT_SUPPORT */
188 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
189 static void copy_hang_info_ioctl_timeout(dhd_pub_t
*dhd
, int ifidx
, wl_ioctl_t
*ioc
);
190 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
192 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
194 bool ap_cfg_running
= FALSE
;
195 bool ap_fw_loaded
= FALSE
;
197 /* Version string to report */
200 #define SRCBASE "drivers/net/wireless/bcmdhd"
202 #define DHD_COMPILED "\nCompiled in " SRCBASE
203 #endif /* DHD_DEBUG */
205 #define CHIPID_MISMATCH 8
207 #if defined(DHD_DEBUG)
208 const char dhd_version
[] = "Dongle Host Driver, version " EPI_VERSION_STR
209 DHD_COMPILED
" on " __DATE__
" at " __TIME__
;
211 const char dhd_version
[] = "\nDongle Host Driver, version " EPI_VERSION_STR
"\nCompiled from ";
213 char fw_version
[FW_VER_STR_LEN
] = "\0";
214 char clm_version
[CLM_VER_STR_LEN
] = "\0";
216 char bus_api_revision
[BUS_API_REV_STR_LEN
] = "\0";
218 void dhd_set_timer(void *bus
, uint wdtick
);
220 static char* ioctl2str(uint32 ioctl
);
238 #if defined(DHD_DEBUG)
239 IOV_DHD_JOIN_TIMEOUT_DBG
,
245 #endif /* defined(DHD_DEBUG) */
247 IOV_PROPTXSTATUS_ENABLE
,
248 IOV_PROPTXSTATUS_MODE
,
249 IOV_PROPTXSTATUS_OPT
,
250 IOV_PROPTXSTATUS_MODULE_IGNORE
,
251 IOV_PROPTXSTATUS_CREDIT_IGNORE
,
252 IOV_PROPTXSTATUS_TXSTATUS_IGNORE
,
253 IOV_PROPTXSTATUS_RXPKT_CHK
,
254 #endif /* PROP_TXSTATUS */
257 IOV_HOSTREORDER_FLOWS
,
258 #ifdef DHDTCPACK_SUPPRESS
260 #endif /* DHDTCPACK_SUPPRESS */
268 #endif /* DHD_L2_FILTER */
272 #endif /* DHD_PSTA */
281 #ifdef DHD_MCAST_REGEN
282 IOV_MCAST_REGEN_BSS_ENABLE
,
286 #endif /* SHOW_LOGTRACE */
287 IOV_DONGLE_TRAP_TYPE
,
288 IOV_DONGLE_TRAP_INFO
,
290 IOV_DUMP_DONGLE
, /**< dumps core registers and d11 memories */
291 #if defined(DHD_LOG_DUMP)
293 #endif /* DHD_LOG_DUMP */
298 const bcm_iovar_t dhd_iovars
[] = {
299 /* name varid flags flags2 type minlen */
300 {"version", IOV_VERSION
, 0, 0, IOVT_BUFFER
, sizeof(dhd_version
)},
302 {"msglevel", IOV_MSGLEVEL
, 0, 0, IOVT_UINT32
, 0},
303 {"mem_debug", IOV_MEM_DEBUG
, 0, 0, IOVT_BUFFER
, 0 },
305 {"flow_ring_debug", IOV_FLOW_RING_DEBUG
, 0, 0, IOVT_BUFFER
, 0 },
307 #endif /* DHD_DEBUG */
308 {"bcmerrorstr", IOV_BCMERRORSTR
, 0, 0, IOVT_BUFFER
, BCME_STRLEN
},
309 {"bcmerror", IOV_BCMERROR
, 0, 0, IOVT_INT8
, 0},
310 {"wdtick", IOV_WDTICK
, 0, 0, IOVT_UINT32
, 0},
311 {"dump", IOV_DUMP
, 0, 0, IOVT_BUFFER
, DHD_IOCTL_MAXLEN
},
312 {"cons", IOV_CONS
, 0, 0, IOVT_BUFFER
, 0},
313 {"dconpoll", IOV_DCONSOLE_POLL
, 0, 0, IOVT_UINT32
, 0},
314 {"clearcounts", IOV_CLEARCOUNTS
, 0, 0, IOVT_VOID
, 0},
315 {"gpioob", IOV_GPIOOB
, 0, 0, IOVT_UINT32
, 0},
316 {"ioctl_timeout", IOV_IOCTLTIMEOUT
, 0, 0, IOVT_UINT32
, 0},
318 {"proptx", IOV_PROPTXSTATUS_ENABLE
, 0, 0, IOVT_BOOL
, 0 },
320 set the proptxtstatus operation mode:
321 0 - Do not do any proptxtstatus flow control
322 1 - Use implied credit from a packet status
323 2 - Use explicit credit
325 {"ptxmode", IOV_PROPTXSTATUS_MODE
, 0, 0, IOVT_UINT32
, 0 },
326 {"proptx_opt", IOV_PROPTXSTATUS_OPT
, 0, 0, IOVT_UINT32
, 0 },
327 {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE
, 0, 0, IOVT_BOOL
, 0 },
328 {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE
, 0, 0, IOVT_BOOL
, 0 },
329 {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE
, 0, 0, IOVT_BOOL
, 0 },
330 {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK
, 0, 0, IOVT_BOOL
, 0 },
331 #endif /* PROP_TXSTATUS */
332 {"bustype", IOV_BUS_TYPE
, 0, 0, IOVT_UINT32
, 0},
333 {"changemtu", IOV_CHANGEMTU
, 0, 0, IOVT_UINT32
, 0 },
334 {"host_reorder_flows", IOV_HOSTREORDER_FLOWS
, 0, 0, IOVT_BUFFER
,
335 (WLHOST_REORDERDATA_MAXFLOWS
+ 1) },
336 #ifdef DHDTCPACK_SUPPRESS
337 {"tcpack_suppress", IOV_TCPACK_SUPPRESS
, 0, 0, IOVT_UINT8
, 0 },
338 #endif /* DHDTCPACK_SUPPRESS */
340 {"dhcp_unicast", IOV_DHCP_UNICAST
, (0), 0, IOVT_BOOL
, 0 },
341 #endif /* DHD_L2_FILTER */
342 {"ap_isolate", IOV_AP_ISOLATE
, (0), 0, IOVT_BOOL
, 0},
344 {"block_ping", IOV_BLOCK_PING
, (0), 0, IOVT_BOOL
, 0},
345 {"proxy_arp", IOV_PROXY_ARP
, (0), 0, IOVT_BOOL
, 0},
346 {"grat_arp", IOV_GRAT_ARP
, (0), 0, IOVT_BOOL
, 0},
347 {"block_tdls", IOV_BLOCK_TDLS
, (0), IOVT_BOOL
, 0},
348 #endif /* DHD_L2_FILTER */
349 {"dhd_ie", IOV_DHD_IE
, (0), 0, IOVT_BUFFER
, 0},
351 /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
352 {"psta", IOV_PSTA
, 0, 0, IOVT_UINT32
, 0},
353 #endif /* DHD PSTA */
355 /* WET Mode configuration. 0: DIABLED 1: WET */
356 {"wet", IOV_WET
, 0, 0, IOVT_UINT32
, 0},
357 {"wet_host_ipv4", IOV_WET_HOST_IPV4
, 0, 0, IOVT_UINT32
, 0},
358 {"wet_host_mac", IOV_WET_HOST_MAC
, 0, 0, IOVT_BUFFER
, 0},
360 {"op_mode", IOV_CFG80211_OPMODE
, 0, 0, IOVT_UINT32
, 0 },
361 {"assert_type", IOV_ASSERT_TYPE
, (0), 0, IOVT_UINT32
, 0},
362 {"lmtest", IOV_LMTEST
, 0, 0, IOVT_UINT32
, 0 },
363 #ifdef DHD_MCAST_REGEN
364 {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE
, 0, 0, IOVT_BOOL
, 0},
367 {"dump_trace_buf", IOV_DUMP_TRACE_LOG
, 0, 0, IOVT_BUFFER
, sizeof(trace_buf_info_t
) },
368 #endif /* SHOW_LOGTRACE */
369 {"trap_type", IOV_DONGLE_TRAP_TYPE
, 0, 0, IOVT_UINT32
, 0 },
370 {"trap_info", IOV_DONGLE_TRAP_INFO
, 0, 0, IOVT_BUFFER
, sizeof(trap_t
) },
372 {"bpaddr", IOV_BPADDR
, 0, 0, IOVT_BUFFER
, sizeof(sdreg_t
) },
373 #endif /* DHD_DEBUG */
374 {"dump_dongle", IOV_DUMP_DONGLE
, 0, 0, IOVT_BUFFER
,
375 MAX(sizeof(dump_dongle_in_t
), sizeof(dump_dongle_out_t
)) },
376 #if defined(DHD_LOG_DUMP)
377 {"log_dump", IOV_LOG_DUMP
, 0, 0, IOVT_UINT8
, 0},
378 #endif /* DHD_LOG_DUMP */
379 {NULL
, 0, 0, 0, 0, 0 }
382 #define DHD_IOVAR_BUF_SIZE 128
385 dhd_query_bus_erros(dhd_pub_t
*dhdp
)
389 if (dhdp
->dongle_reset
) {
390 DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n",
395 if (dhdp
->dongle_trap_occured
) {
396 DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n",
399 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
400 dhdp
->hang_reason
= HANG_REASON_DONGLE_TRAP
;
401 dhd_os_send_hang_message(dhdp
);
402 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
405 if (dhdp
->iovar_timeout_occured
) {
406 DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
411 #ifdef PCIE_FULL_DONGLE
412 if (dhdp
->d3ack_timeout_occured
) {
413 DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
417 #endif /* PCIE_FULL_DONGLE */
420 if (dhd_bus_get_linkdown(dhdp
)) {
421 DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
432 dhd_sssr_mempool_init(dhd_pub_t
*dhd
)
434 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
435 dhd
->sssr_mempool
= (uint8
*) MALLOCZ(dhd
->osh
, DHD_SSSR_MEMPOOL_SIZE
);
436 if (dhd
->sssr_mempool
== NULL
) {
437 DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
445 dhd_sssr_mempool_deinit(dhd_pub_t
*dhd
)
447 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
448 if (dhd
->sssr_mempool
) {
449 MFREE(dhd
->osh
, dhd
->sssr_mempool
, DHD_SSSR_MEMPOOL_SIZE
);
450 dhd
->sssr_mempool
= NULL
;
455 dhd_dump_sssr_reg_info(sssr_reg_info_v1_t
*sssr_reg_info
)
460 dhd_get_sssr_reg_info_bcm4359(dhd_pub_t
*dhd
)
462 dhd
->sssr_reg_info
.version
= SSSR_REG_INFO_VER
;
463 dhd
->sssr_reg_info
.length
= sizeof(sssr_reg_info_t
);
466 dhd
->sssr_reg_info
.pmu_regs
.base_regs
.pmuintmask0
= 0x18000700;
467 dhd
->sssr_reg_info
.pmu_regs
.base_regs
.pmuintmask1
= 0x18000704;
468 dhd
->sssr_reg_info
.pmu_regs
.base_regs
.resreqtimer
= 0x18000644;
469 dhd
->sssr_reg_info
.pmu_regs
.base_regs
.macresreqtimer
= 0x18000688;
470 dhd
->sssr_reg_info
.pmu_regs
.base_regs
.macresreqtimer1
= 0x180006f0;
472 /* chipcommon regs */
473 dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.intmask
= 0x18000024;
474 dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
= 0x180001e8;
475 dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.clockcontrolstatus
= 0x180001e0;
476 dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
= 0xf00;
479 dhd
->sssr_reg_info
.arm_regs
.base_regs
.clockcontrolstatus
= 0x180021e0;
480 dhd
->sssr_reg_info
.arm_regs
.base_regs
.clockcontrolstatus_val
= 0x20;
481 dhd
->sssr_reg_info
.arm_regs
.wrapper_regs
.resetctrl
= 0x18102800;
482 dhd
->sssr_reg_info
.arm_regs
.wrapper_regs
.itopoobb
= 0x18102f34;
485 dhd
->sssr_reg_info
.pcie_regs
.base_regs
.ltrstate
= 0x180031a0;
486 dhd
->sssr_reg_info
.pcie_regs
.base_regs
.clockcontrolstatus
= 0x180031e0;
487 dhd
->sssr_reg_info
.pcie_regs
.base_regs
.clockcontrolstatus_val
= 0x0;
488 dhd
->sssr_reg_info
.pcie_regs
.wrapper_regs
.itopoobb
= 0x18103f34;
491 dhd
->sssr_reg_info
.mac_regs
[0].base_regs
.xmtdata
= 0x18001134;
492 dhd
->sssr_reg_info
.mac_regs
[0].base_regs
.xmtaddress
= 0x18001130;
493 dhd
->sssr_reg_info
.mac_regs
[0].base_regs
.clockcontrolstatus
= 0x180011e0;
494 dhd
->sssr_reg_info
.mac_regs
[0].base_regs
.clockcontrolstatus_val
= 0x20;
495 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.resetctrl
= 0x18101800;
496 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.itopoobb
= 0x18101f34;
497 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl
= 0x18101408;
498 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[0] = 0xc7;
499 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[1] = 0x15f;
500 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[2] = 0x151;
501 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[3] = 0x155;
502 dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[4] = 0xc5;
503 dhd
->sssr_reg_info
.mac_regs
[0].sr_size
= 0x40000;
509 dhd_get_sssr_reg_info(dhd_pub_t
*dhd
)
512 uint16 chipid
, chiprev
;
514 chipid
= dhd_get_chipid(dhd
);
515 chiprev
= dhd_bus_chiprev_id(dhd
);
517 /* get sssr_reg_info from firmware */
518 memset((void *)&dhd
->sssr_reg_info
, 0, sizeof(dhd
->sssr_reg_info
));
520 if ((chipid
== BCM4355_CHIP_ID
|| chipid
== BCM4359_CHIP_ID
) &&
522 /* Get SSSR reg info with alternative way for 4359C0/43596A0 */
523 ret
= dhd_get_sssr_reg_info_bcm4359(dhd
);
525 ret
= dhd_iovar(dhd
, 0, "sssr_reg_info", NULL
, 0, (char *)&dhd
->sssr_reg_info
,
526 sizeof(dhd
->sssr_reg_info
), FALSE
);
528 DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
533 if (ret
== BCME_OK
) {
534 dhd_dump_sssr_reg_info(&dhd
->sssr_reg_info
);
541 dhd_get_sssr_bufsize(dhd_pub_t
*dhd
)
544 uint32 sssr_bufsize
= 0;
545 /* Init all pointers to NULL */
546 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
547 sssr_bufsize
+= dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
;
549 sssr_bufsize
+= dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
;
551 /* Double the size as different dumps will be saved before and after SR */
552 sssr_bufsize
= 2 * sssr_bufsize
;
558 dhd_sssr_dump_init(dhd_pub_t
*dhd
)
562 uint32 mempool_used
= 0;
564 dhd
->sssr_inited
= FALSE
;
566 /* check if sssr mempool is allocated */
567 if (dhd
->sssr_mempool
== NULL
) {
568 DHD_ERROR(("%s: sssr_mempool is not allocated\n",
573 /* Get SSSR reg info */
574 if (dhd_get_sssr_reg_info(dhd
) != BCME_OK
) {
575 DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__
));
579 /* Validate structure version */
580 if (dhd
->sssr_reg_info
.version
> SSSR_REG_INFO_VER_1
) {
581 DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
582 __FUNCTION__
, (int)dhd
->sssr_reg_info
.version
, SSSR_REG_INFO_VER
));
586 /* Validate structure length */
587 if (dhd
->sssr_reg_info
.length
< sizeof(sssr_reg_info_v0_t
)) {
588 DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
589 __FUNCTION__
, (int)dhd
->sssr_reg_info
.length
,
590 (int)sizeof(dhd
->sssr_reg_info
)));
594 /* validate fifo size */
595 sssr_bufsize
= dhd_get_sssr_bufsize(dhd
);
596 if (sssr_bufsize
> DHD_SSSR_MEMPOOL_SIZE
) {
597 DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
598 __FUNCTION__
, (int)sssr_bufsize
, DHD_SSSR_MEMPOOL_SIZE
));
602 /* init all pointers to NULL */
603 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
604 dhd
->sssr_d11_before
[i
] = NULL
;
605 dhd
->sssr_d11_after
[i
] = NULL
;
607 dhd
->sssr_dig_buf_before
= NULL
;
608 dhd
->sssr_dig_buf_after
= NULL
;
610 /* Allocate memory */
611 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
612 if (dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
) {
613 dhd
->sssr_d11_before
[i
] = (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
614 mempool_used
+= dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
;
616 dhd
->sssr_d11_after
[i
] = (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
617 mempool_used
+= dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
;
619 DHD_ERROR(("%s: SR size for core[%d] is 0\n", __FUNCTION__
, i
));
623 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
624 dhd
->sssr_dig_buf_before
= (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
625 mempool_used
+= dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
;
627 dhd
->sssr_dig_buf_after
= (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
628 mempool_used
+= dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
;
629 } else if ((dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
630 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
) {
631 dhd
->sssr_dig_buf_before
= (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
632 mempool_used
+= dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_size
;
634 dhd
->sssr_dig_buf_after
= (uint32
*)(dhd
->sssr_mempool
+ mempool_used
);
635 mempool_used
+= dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_size
;
638 dhd
->sssr_inited
= TRUE
;
644 dhd_sssr_dump_deinit(dhd_pub_t
*dhd
)
648 dhd
->sssr_inited
= FALSE
;
649 /* init all pointers to NULL */
650 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
651 dhd
->sssr_d11_before
[i
] = NULL
;
652 dhd
->sssr_d11_after
[i
] = NULL
;
654 dhd
->sssr_dig_buf_before
= NULL
;
655 dhd
->sssr_dig_buf_after
= NULL
;
660 #endif /* DHD_SSSR_DUMP */
662 #ifdef DHD_FW_COREDUMP
663 void* dhd_get_fwdump_buf(dhd_pub_t
*dhd_pub
, uint32 length
)
665 if (!dhd_pub
->soc_ram
) {
666 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
667 dhd_pub
->soc_ram
= (uint8
*)DHD_OS_PREALLOC(dhd_pub
,
668 DHD_PREALLOC_MEMDUMP_RAM
, length
);
670 dhd_pub
->soc_ram
= (uint8
*) MALLOC(dhd_pub
->osh
, length
);
671 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
674 if (dhd_pub
->soc_ram
== NULL
) {
675 DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
677 dhd_pub
->soc_ram_length
= 0;
679 memset(dhd_pub
->soc_ram
, 0, length
);
680 dhd_pub
->soc_ram_length
= length
;
683 /* soc_ram free handled in dhd_{free,clear} */
684 return dhd_pub
->soc_ram
;
686 #endif /* DHD_FW_COREDUMP */
688 /* to NDIS developer, the structure dhd_common is redundant,
689 * please do NOT merge it back from other branches !!!
693 dhd_common_socram_dump(dhd_pub_t
*dhdp
)
695 return dhd_socram_dump(dhdp
->bus
);
699 dhd_dump(dhd_pub_t
*dhdp
, char *buf
, int buflen
)
701 char eabuf
[ETHER_ADDR_STR_LEN
];
704 struct bcmstrbuf
*strbuf
= &b
;
706 if (!dhdp
|| !dhdp
->prot
|| !buf
) {
710 bcm_binit(strbuf
, buf
, buflen
);
713 bcm_bprintf(strbuf
, "%s\n", dhd_version
);
714 bcm_bprintf(strbuf
, "\n");
715 bcm_bprintf(strbuf
, "pub.up %d pub.txoff %d pub.busstate %d\n",
716 dhdp
->up
, dhdp
->txoff
, dhdp
->busstate
);
717 bcm_bprintf(strbuf
, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
718 dhdp
->hdrlen
, dhdp
->maxctl
, dhdp
->rxsz
);
719 bcm_bprintf(strbuf
, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG
"\n",
720 dhdp
->iswl
, dhdp
->drv_version
, MAC2STRDBG(bcm_ether_ntoa(&dhdp
->mac
, eabuf
)));
721 bcm_bprintf(strbuf
, "pub.bcmerror %d tickcnt %u\n", dhdp
->bcmerror
, dhdp
->tickcnt
);
723 bcm_bprintf(strbuf
, "dongle stats:\n");
724 bcm_bprintf(strbuf
, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
725 dhdp
->dstats
.tx_packets
, dhdp
->dstats
.tx_bytes
,
726 dhdp
->dstats
.tx_errors
, dhdp
->dstats
.tx_dropped
);
727 bcm_bprintf(strbuf
, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
728 dhdp
->dstats
.rx_packets
, dhdp
->dstats
.rx_bytes
,
729 dhdp
->dstats
.rx_errors
, dhdp
->dstats
.rx_dropped
);
730 bcm_bprintf(strbuf
, "multicast %lu\n", dhdp
->dstats
.multicast
);
732 bcm_bprintf(strbuf
, "bus stats:\n");
733 bcm_bprintf(strbuf
, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
734 dhdp
->tx_packets
, dhdp
->tx_dropped
, dhdp
->tx_multicast
, dhdp
->tx_errors
);
735 bcm_bprintf(strbuf
, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
736 dhdp
->tx_ctlpkts
, dhdp
->tx_ctlerrs
);
737 bcm_bprintf(strbuf
, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
738 dhdp
->rx_packets
, dhdp
->rx_multicast
, dhdp
->rx_errors
);
739 bcm_bprintf(strbuf
, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
740 dhdp
->rx_ctlpkts
, dhdp
->rx_ctlerrs
, dhdp
->rx_dropped
);
741 bcm_bprintf(strbuf
, "rx_readahead_cnt %lu tx_realloc %lu\n",
742 dhdp
->rx_readahead_cnt
, dhdp
->tx_realloc
);
743 bcm_bprintf(strbuf
, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
744 dhdp
->tx_pktgetfail
, dhdp
->rx_pktgetfail
);
745 bcm_bprintf(strbuf
, "tx_big_packets %lu\n",
746 dhdp
->tx_big_packets
);
747 bcm_bprintf(strbuf
, "\n");
749 /* Add DMA MAP info */
750 bcm_bprintf(strbuf
, "DMA MAP stats: \n");
751 bcm_bprintf(strbuf
, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
752 dhdp
->dma_stats
.txdata
, KB(dhdp
->dma_stats
.txdata_sz
),
753 dhdp
->dma_stats
.rxdata
, KB(dhdp
->dma_stats
.rxdata_sz
));
754 #ifndef IOCTLRESP_USE_CONSTMEM
755 bcm_bprintf(strbuf
, "IOCTL RX: %lu size: %luK ,",
756 dhdp
->dma_stats
.ioctl_rx
, KB(dhdp
->dma_stats
.ioctl_rx_sz
));
757 #endif /* !IOCTLRESP_USE_CONSTMEM */
758 bcm_bprintf(strbuf
, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
759 "TSBUF RX: %lu size %luK\n",
760 dhdp
->dma_stats
.event_rx
, KB(dhdp
->dma_stats
.event_rx_sz
),
761 dhdp
->dma_stats
.info_rx
, KB(dhdp
->dma_stats
.info_rx_sz
),
762 dhdp
->dma_stats
.tsbuf_rx
, KB(dhdp
->dma_stats
.tsbuf_rx_sz
));
763 bcm_bprintf(strbuf
, "Total : %luK \n",
764 KB(dhdp
->dma_stats
.txdata_sz
+ dhdp
->dma_stats
.rxdata_sz
+
765 dhdp
->dma_stats
.ioctl_rx_sz
+ dhdp
->dma_stats
.event_rx_sz
+
766 dhdp
->dma_stats
.tsbuf_rx_sz
));
767 #endif /* DMAMAP_STATS */
769 /* Add any prot info */
770 dhd_prot_dump(dhdp
, strbuf
);
771 bcm_bprintf(strbuf
, "\n");
773 /* Add any bus info */
774 dhd_bus_dump(dhdp
, strbuf
);
776 #if defined(DHD_LB_STATS)
777 dhd_lb_stats_dump(dhdp
, strbuf
);
778 #endif /* DHD_LB_STATS */
780 if (dhd_get_wet_mode(dhdp
)) {
781 bcm_bprintf(strbuf
, "Wet Dump:\n");
782 dhd_wet_dump(dhdp
, strbuf
);
786 /* return remaining buffer length */
787 return (!strbuf
->size
? BCME_BUFTOOSHORT
: strbuf
->size
);
791 dhd_dump_to_kernelog(dhd_pub_t
*dhdp
)
795 DHD_ERROR(("F/W version: %s\n", fw_version
));
796 bcm_bprintf_bypass
= TRUE
;
797 dhd_dump(dhdp
, buf
, sizeof(buf
));
798 bcm_bprintf_bypass
= FALSE
;
802 dhd_wl_ioctl_cmd(dhd_pub_t
*dhd_pub
, int cmd
, void *arg
, int len
, uint8 set
, int ifidx
)
811 return dhd_wl_ioctl(dhd_pub
, ifidx
, &ioc
, arg
, len
);
815 dhd_wl_ioctl_get_intiovar(dhd_pub_t
*dhd_pub
, char *name
, uint
*pval
,
816 int cmd
, uint8 set
, int ifidx
)
818 char iovbuf
[WLC_IOCTL_SMLEN
];
821 memset(iovbuf
, 0, sizeof(iovbuf
));
822 if (bcm_mkiovar(name
, NULL
, 0, iovbuf
, sizeof(iovbuf
))) {
823 ret
= dhd_wl_ioctl_cmd(dhd_pub
, cmd
, iovbuf
, sizeof(iovbuf
), set
, ifidx
);
825 *pval
= ltoh32(*((uint
*)iovbuf
));
827 DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
828 __FUNCTION__
, name
, ret
));
831 DHD_ERROR(("%s: mkiovar %s failed\n",
832 __FUNCTION__
, name
));
839 dhd_wl_ioctl_set_intiovar(dhd_pub_t
*dhd_pub
, char *name
, uint val
,
840 int cmd
, uint8 set
, int ifidx
)
842 char iovbuf
[WLC_IOCTL_SMLEN
];
844 int lval
= htol32(val
);
847 len
= bcm_mkiovar(name
, (char*)&lval
, sizeof(lval
), iovbuf
, sizeof(iovbuf
));
850 ret
= dhd_wl_ioctl_cmd(dhd_pub
, cmd
, iovbuf
, len
, set
, ifidx
);
852 DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
853 __FUNCTION__
, name
, ret
));
856 DHD_ERROR(("%s: mkiovar %s failed\n",
857 __FUNCTION__
, name
));
863 static struct ioctl2str_s
{
866 } ioctl2str_array
[] = {
869 {WLC_SET_PROMISC
, "SET_PROMISC"},
870 {WLC_SET_INFRA
, "SET_INFRA"},
871 {WLC_SET_AUTH
, "SET_AUTH"},
872 {WLC_SET_SSID
, "SET_SSID"},
873 {WLC_RESTART
, "RESTART"},
874 {WLC_SET_CHANNEL
, "SET_CHANNEL"},
875 {WLC_SET_RATE_PARAMS
, "SET_RATE_PARAMS"},
876 {WLC_SET_KEY
, "SET_KEY"},
878 {WLC_DISASSOC
, "DISASSOC"},
879 {WLC_REASSOC
, "REASSOC"},
880 {WLC_SET_COUNTRY
, "SET_COUNTRY"},
881 {WLC_SET_WAKE
, "SET_WAKE"},
882 {WLC_SET_SCANSUPPRESS
, "SET_SCANSUPPRESS"},
883 {WLC_SCB_DEAUTHORIZE
, "SCB_DEAUTHORIZE"},
884 {WLC_SET_WSEC
, "SET_WSEC"},
885 {WLC_SET_INTERFERENCE_MODE
, "SET_INTERFERENCE_MODE"},
886 {WLC_SET_RADAR
, "SET_RADAR"},
891 ioctl2str(uint32 ioctl
)
893 struct ioctl2str_s
*p
= ioctl2str_array
;
895 while (p
->name
!= NULL
) {
896 if (p
->ioctl
== ioctl
) {
906 * @param ioc IO control struct, members are partially used by this function.
907 * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
908 * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
911 dhd_wl_ioctl(dhd_pub_t
*dhd_pub
, int ifidx
, wl_ioctl_t
*ioc
, void *buf
, int len
)
913 int ret
= BCME_ERROR
;
915 #ifdef DUMP_IOCTL_IOV_LIST
916 dhd_iov_li_t
*iov_li
;
917 #endif /* DUMP_IOCTL_IOV_LIST */
919 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
920 DHD_OS_WAKE_LOCK(dhd_pub
);
921 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub
->bus
)) < 0) {
922 DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__
));
923 DHD_OS_WAKE_UNLOCK(dhd_pub
);
926 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
928 #ifdef KEEPIF_ON_DEVICE_RESET
929 if (ioc
->cmd
== WLC_GET_VAR
) {
930 dbus_config_t config
;
931 config
.general_param
= 0;
933 if (!strcmp(buf
, "wowl_activate")) {
934 /* 1 (TRUE) after decreased by 1 */
935 config
.general_param
= 2;
936 } else if (!strcmp(buf
, "wowl_clear")) {
937 /* 0 (FALSE) after decreased by 1 */
938 config
.general_param
= 1;
941 if (config
.general_param
) {
942 config
.config_id
= DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET
;
943 config
.general_param
--;
944 dbus_set_config(dhd_pub
->dbus
, &config
);
947 #endif /* KEEPIF_ON_DEVICE_RESET */
949 if (dhd_os_proto_block(dhd_pub
))
952 int slen
, val
, lval
, min_len
;
956 if (ioc
->cmd
== WLC_GET_VAR
&& buf
) {
957 min_len
= MIN(sizeof(tmp
) - 1, strlen(buf
));
958 memset(tmp
, 0, sizeof(tmp
));
959 bcopy(buf
, tmp
, min_len
);
962 #endif /* DHD_LOG_DUMP */
963 /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
964 if (ioc
->set
== TRUE
) {
965 char *pars
= (char *)buf
; // points at user buffer
966 if (ioc
->cmd
== WLC_SET_VAR
&& buf
) {
967 DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx
, pars
));
968 if (ioc
->len
> 1 + sizeof(uint32
)) {
970 pars
+= strnlen(pars
, ioc
->len
- 1 - sizeof(uint32
));
971 pars
++; // skip NULL character
974 DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
975 ifidx
, ioc
->cmd
, ioctl2str(ioc
->cmd
)));
978 DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32
*)pars
));
980 DHD_DNGL_IOVAR_SET((" NULL\n"));
984 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
985 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub
)) {
986 DHD_INFO(("%s: returning as busstate=%d\n",
987 __FUNCTION__
, dhd_pub
->busstate
));
988 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
989 dhd_os_proto_unblock(dhd_pub
);
992 DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub
);
993 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
995 #ifdef DHD_PCIE_RUNTIMEPM
996 dhdpcie_runtime_bus_wake(dhd_pub
, TRUE
, dhd_wl_ioctl
);
997 #endif /* DHD_PCIE_RUNTIMEPM */
999 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
1000 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub
)) {
1001 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1002 __FUNCTION__
, dhd_pub
->busstate
, dhd_pub
->dhd_bus_busy_state
));
1003 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub
);
1004 dhd_os_busbusy_wake(dhd_pub
);
1005 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1006 dhd_os_proto_unblock(dhd_pub
);
1009 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1011 #ifdef DUMP_IOCTL_IOV_LIST
1012 if (ioc
->cmd
!= WLC_GET_MAGIC
&& ioc
->cmd
!= WLC_GET_VERSION
&& buf
) {
1013 if (!(iov_li
= MALLOC(dhd_pub
->osh
, sizeof(*iov_li
)))) {
1014 DHD_ERROR(("iovar dump list item allocation Failed\n"));
1016 iov_li
->cmd
= ioc
->cmd
;
1018 bcopy((char *)buf
, iov_li
->buff
, strlen((char *)buf
)+1);
1019 dhd_iov_li_append(dhd_pub
, &dhd_pub
->dump_iovlist_head
,
1023 #endif /* DUMP_IOCTL_IOV_LIST */
1025 ret
= dhd_prot_ioctl(dhd_pub
, ifidx
, ioc
, buf
, len
);
1027 #ifdef DUMP_IOCTL_IOV_LIST
1028 if (ret
== -ETIMEDOUT
) {
1029 DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
1031 dhd_iov_li_print(&dhd_pub
->dump_iovlist_head
);
1033 #endif /* DUMP_IOCTL_IOV_LIST */
1034 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1035 if (ret
== -ETIMEDOUT
) {
1036 copy_hang_info_ioctl_timeout(dhd_pub
, ifidx
, ioc
);
1038 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1040 if ((ioc
->cmd
== WLC_GET_VAR
|| ioc
->cmd
== WLC_SET_VAR
) &&
1044 slen
= strlen(buf
) + 1;
1046 if (len
>= slen
+ sizeof(lval
)) {
1047 if (ioc
->cmd
== WLC_GET_VAR
) {
1051 min_len
= MIN(ioc
->len
- slen
, sizeof(int));
1052 bcopy((msg
+ slen
), &lval
, min_len
);
1054 if (!strncmp(msg
, "cur_etheraddr",
1055 strlen("cur_etheraddr"))) {
1060 "%s: cmd: %d, msg: %s val: 0x%x,"
1061 " len: %d, set: %d, txn-id: %d\n",
1062 ioc
->cmd
== WLC_GET_VAR
?
1063 "WLC_GET_VAR" : "WLC_SET_VAR",
1064 ioc
->cmd
, msg
, lval
, ioc
->len
, ioc
->set
,
1065 dhd_prot_get_ioctl_trans_id(dhd_pub
)));
1067 DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
1068 ioc
->cmd
== WLC_GET_VAR
? "WLC_GET_VAR" : "WLC_SET_VAR",
1069 ioc
->cmd
, ioc
->len
, ioc
->set
,
1070 dhd_prot_get_ioctl_trans_id(dhd_pub
)));
1074 if (buf
!= NULL
&& slen
!= 0) {
1077 } else if (slen
>= 2) {
1082 /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
1083 if (ioc
->cmd
!= WLC_GET_MAGIC
&& ioc
->cmd
!= WLC_GET_VERSION
)
1084 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
1085 "set: %d\n", ioc
->cmd
, val
, ioc
->len
, ioc
->set
));
1087 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc
->cmd
));
1090 #endif /* DHD_LOG_DUMP */
1091 if (ret
&& dhd_pub
->up
) {
1092 /* Send hang event only if dhd_open() was success */
1093 dhd_os_check_hang(dhd_pub
, ifidx
, ret
);
1096 if (ret
== -ETIMEDOUT
&& !dhd_pub
->up
) {
1097 DHD_ERROR(("%s: 'resumed on timeout' error is "
1098 "occurred before the interface does not"
1099 " bring up\n", __FUNCTION__
));
1100 dhd_pub
->busstate
= DHD_BUS_DOWN
;
1103 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
1104 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub
);
1105 dhd_os_busbusy_wake(dhd_pub
);
1106 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
1108 dhd_os_proto_unblock(dhd_pub
);
1111 if ((ioc
->cmd
== WLC_GET_VAR
|| ioc
->cmd
== WLC_SET_VAR
) &&
1113 if (ret
== BCME_UNSUPPORTED
|| ret
== BCME_NOTASSOCIATED
) {
1114 DHD_ERROR(("%s: %s: %s, %s\n",
1115 __FUNCTION__
, ioc
->cmd
== WLC_GET_VAR
?
1116 "WLC_GET_VAR" : "WLC_SET_VAR",
1117 buf
? (char *)buf
:"NO MESSAGE",
1118 ret
== BCME_UNSUPPORTED
? "UNSUPPORTED"
1119 : "NOT ASSOCIATED"));
1121 DHD_ERROR(("%s: %s: %s, ret = %d\n",
1122 __FUNCTION__
, ioc
->cmd
== WLC_GET_VAR
?
1123 "WLC_GET_VAR" : "WLC_SET_VAR",
1127 if (ret
== BCME_UNSUPPORTED
|| ret
== BCME_NOTASSOCIATED
) {
1128 DHD_ERROR(("%s: WLC_IOCTL: cmd: %d, %s\n",
1129 __FUNCTION__
, ioc
->cmd
,
1130 ret
== BCME_UNSUPPORTED
? "UNSUPPORTED" :
1133 DHD_ERROR(("%s: WLC_IOCTL: cmd: %d, ret = %d\n",
1134 __FUNCTION__
, ioc
->cmd
, ret
));
1140 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1141 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub
->bus
));
1142 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub
->bus
));
1144 DHD_OS_WAKE_UNLOCK(dhd_pub
);
1145 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1150 uint
wl_get_port_num(wl_io_pport_t
*io_pport
)
1155 /* Get bssidx from iovar params
1156 * Input: dhd_pub - pointer to dhd_pub_t
1157 * params - IOVAR params
1158 * Output: idx - BSS index
1159 * val - ponter to the IOVAR arguments
1162 dhd_iovar_parse_bssidx(dhd_pub_t
*dhd_pub
, const char *params
, uint32
*idx
, const char **val
)
1164 char *prefix
= "bsscfg:";
1167 if (!(strncmp(params
, prefix
, strlen(prefix
)))) {
1168 /* per bss setting should be prefixed with 'bsscfg:' */
1169 const char *p
= params
+ strlen(prefix
);
1176 bcopy(p
, &bssidx
, sizeof(uint32
));
1177 /* Get corresponding dhd index */
1178 bssidx
= dhd_bssidx2idx(dhd_pub
, htod32(bssidx
));
1180 if (bssidx
>= DHD_MAX_IFS
) {
1181 DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__
));
1186 p
+= sizeof(uint32
);
1190 DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__
));
1197 #if defined(DHD_DEBUG) && defined(BCMDHDUSB)
1198 /* USB Device console input function */
1199 int dhd_bus_console_in(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
1201 DHD_TRACE(("%s \n", __FUNCTION__
));
1203 return dhd_iovar(dhd
, 0, "cons", msg
, msglen
, NULL
, 0, TRUE
);
1206 #endif /* DHD_DEBUG && BCMDHDUSB */
1210 dhd_mem_debug(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
1212 unsigned long int_arg
= 0;
1214 char *end_ptr
= NULL
;
1215 dhd_dbg_mwli_t
*mw_li
;
1217 /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
1218 p
= bcmstrstr((char *)msg
, " ");
1220 /* space should be converted to null as separation flag for firmware */
1222 /* store the argument in int_arg */
1223 int_arg
= bcm_strtoul(p
+1, &end_ptr
, 10);
1226 if (!p
&& !strcmp(msg
, "query")) {
1227 /* lets query the list inetrnally */
1228 if (dll_empty(dll_head_p(&dhd
->mw_list_head
))) {
1229 DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
1231 for (item
= dll_head_p(&dhd
->mw_list_head
);
1232 !dll_end(&dhd
->mw_list_head
, item
); item
= next
) {
1233 next
= dll_next_p(item
);
1234 mw_li
= (dhd_dbg_mwli_t
*)CONTAINEROF(item
, dhd_dbg_mwli_t
, list
);
1235 DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li
->id
, mw_li
->size
));
1238 } else if (p
&& end_ptr
&& (*end_ptr
== '\0') && !strcmp(msg
, "alloc")) {
1240 /* convert size into KB and append as integer */
1241 *((int32
*)(p
+1)) = int_arg
*1024;
1242 *(p
+1+sizeof(int32
)) = '\0';
1244 /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
1245 * 1 bytes for null caracter
1247 msglen
= strlen(msg
) + sizeof(int32
) + 1;
1248 if (dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, msg
, msglen
+1, FALSE
, 0) < 0) {
1249 DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
1252 /* returned allocated handle from dongle, basically address of the allocated unit */
1253 alloc_handle
= *((int32
*)msg
);
1255 /* add a node in the list with tuple <id, handle, size> */
1256 if (alloc_handle
== 0) {
1257 DHD_ERROR(("Reuqested size could not be allocated\n"));
1258 } else if (!(mw_li
= MALLOC(dhd
->osh
, sizeof(*mw_li
)))) {
1259 DHD_ERROR(("mw list item allocation Failed\n"));
1261 mw_li
->id
= dhd
->mw_id
++;
1262 mw_li
->handle
= alloc_handle
;
1263 mw_li
->size
= int_arg
;
1264 /* append the node in the list */
1265 dll_append(&dhd
->mw_list_head
, &mw_li
->list
);
1267 } else if (p
&& end_ptr
&& (*end_ptr
== '\0') && !strcmp(msg
, "free")) {
1268 /* inform dongle to free wasted chunk */
1271 for (item
= dll_head_p(&dhd
->mw_list_head
);
1272 !dll_end(&dhd
->mw_list_head
, item
); item
= next
) {
1273 next
= dll_next_p(item
);
1274 mw_li
= (dhd_dbg_mwli_t
*)CONTAINEROF(item
, dhd_dbg_mwli_t
, list
);
1276 if (mw_li
->id
== (int)int_arg
) {
1277 handle
= mw_li
->handle
;
1280 MFREE(dhd
->osh
, mw_li
, sizeof(*mw_li
));
1281 if (dll_empty(dll_head_p(&dhd
->mw_list_head
))) {
1289 /* append the free handle and the chunk size in first 8 bytes
1290 * after the command and null character
1292 *((int32
*)(p
+1)) = handle
;
1293 *((int32
*)((p
+1)+sizeof(int32
))) = size
;
1294 /* append null as terminator */
1295 *(p
+1+2*sizeof(int32
)) = '\0';
1296 /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
1297 * + 1 bytes for null caracter
1299 len
= strlen(msg
) + 2*sizeof(int32
) + 1;
1300 /* send iovar to free the chunk */
1301 if (dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, msg
, len
, FALSE
, 0) < 0) {
1302 DHD_ERROR(("IOCTL failed for memdebug free\n"));
1305 DHD_ERROR(("specified id does not exist\n"));
1308 /* for all the wrong argument formats */
1314 dhd_mw_list_delete(dhd_pub_t
*dhd
, dll_t
*list_head
)
1317 dhd_dbg_mwli_t
*mw_li
;
1318 while (!(dll_empty(list_head
))) {
1319 item
= dll_head_p(list_head
);
1320 mw_li
= (dhd_dbg_mwli_t
*)CONTAINEROF(item
, dhd_dbg_mwli_t
, list
);
1322 MFREE(dhd
->osh
, mw_li
, sizeof(*mw_li
));
1327 dhd_flow_ring_debug(dhd_pub_t
*dhd
, char *msg
, uint msglen
)
1329 flow_ring_table_t
*flow_ring_table
;
1331 char *end_ptr
= NULL
;
1336 cmd
= bcmstrstr(msg
, " ");
1337 BCM_REFERENCE(prio
);
1339 /* in order to use string operations append null */
1342 DHD_ERROR(("missing: create/delete args\n"));
1345 if (cmd
&& !strcmp(msg
, "create")) {
1346 /* extract <"source address", "destination address", "priority"> */
1347 uint8 sa
[ETHER_ADDR_LEN
], da
[ETHER_ADDR_LEN
];
1350 msg
= msg
+ strlen("create") + 1;
1351 /* fill ethernet source address */
1352 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1353 sa
[i
] = (uint8
)bcm_strtoul(msg
, &end_ptr
, 16);
1354 if (*end_ptr
== ':') {
1355 msg
= (end_ptr
+ 1);
1356 } else if (i
!= 5) {
1357 DHD_ERROR(("not a valid source mac addr\n"));
1361 if (*end_ptr
!= ' ') {
1362 DHD_ERROR(("missing: destiantion mac id\n"));
1368 /* fill ethernet destination address */
1369 for (i
= 0; i
< ETHER_ADDR_LEN
; i
++) {
1370 da
[i
] = (uint8
)bcm_strtoul(msg
, &end_ptr
, 16);
1371 if (*end_ptr
== ':') {
1372 msg
= (end_ptr
+ 1);
1373 } else if (i
!= 5) {
1374 DHD_ERROR(("not a valid destination mac addr\n"));
1378 if (*end_ptr
!= ' ') {
1379 DHD_ERROR(("missing: priority\n"));
1384 /* parse priority */
1385 prio
= (uint8
)bcm_strtoul(msg
, &end_ptr
, 10);
1386 if (prio
> MAXPRIO
) {
1387 DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
1392 if (*end_ptr
!= '\0') {
1393 DHD_ERROR(("msg not truncated with NULL character\n"));
1396 ret
= dhd_flowid_debug_create(dhd
, 0, prio
, (char *)sa
, (char *)da
, &flowid
);
1397 if (ret
!= BCME_OK
) {
1398 DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__
, ret
));
1403 } else if (cmd
&& !strcmp(msg
, "delete")) {
1404 msg
= msg
+ strlen("delete") + 1;
1406 flowid
= (uint16
)bcm_strtoul(msg
, &end_ptr
, 10);
1407 if (*end_ptr
!= '\0') {
1408 DHD_ERROR(("msg not truncated with NULL character\n"));
1412 /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
1413 if (dhd_flowid_find_by_ifidx(dhd
, 0, flowid
) != BCME_OK
)
1415 DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__
, flowid
));
1419 flow_ring_table
= (flow_ring_table_t
*)dhd
->flow_ring_table
;
1420 ret
= dhd_bus_flow_ring_delete_request(dhd
->bus
, (void *)&flow_ring_table
[flowid
]);
1421 if (ret
!= BCME_OK
) {
1422 DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__
, ret
));
1427 DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__
));
1430 #endif /* BCMPCIE */
1431 #endif /* DHD_DEBUG */
1434 dhd_doiovar(dhd_pub_t
*dhd_pub
, const bcm_iovar_t
*vi
, uint32 actionid
, const char *name
,
1435 void *params
, int plen
, void *arg
, int len
, int val_size
)
1439 uint32 dhd_ver_len
, bus_api_rev_len
;
1441 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1442 DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__
, actionid
, name
));
1444 if ((bcmerror
= bcm_iovar_lencheck(vi
, arg
, len
, IOV_ISSET(actionid
))) != 0)
1447 if (plen
>= (int)sizeof(int_val
))
1448 bcopy(params
, &int_val
, sizeof(int_val
));
1451 case IOV_GVAL(IOV_VERSION
):
1452 /* Need to have checked buffer length */
1453 dhd_ver_len
= strlen(dhd_version
);
1454 bus_api_rev_len
= strlen(bus_api_revision
);
1456 bcm_strncpy_s((char*)arg
, dhd_ver_len
, dhd_version
, dhd_ver_len
);
1457 if (bus_api_rev_len
)
1458 bcm_strncat_s((char*)arg
+ dhd_ver_len
, bus_api_rev_len
, bus_api_revision
,
1462 case IOV_GVAL(IOV_MSGLEVEL
):
1463 int_val
= (int32
)dhd_msg_level
;
1464 bcopy(&int_val
, arg
, val_size
);
1467 case IOV_SVAL(IOV_MSGLEVEL
):
1469 /* Enable DHD and WL logs in oneshot */
1470 if (int_val
& DHD_WL_VAL2
)
1471 wl_cfg80211_enable_trace(TRUE
, int_val
& (~DHD_WL_VAL2
));
1472 else if (int_val
& DHD_WL_VAL
)
1473 wl_cfg80211_enable_trace(FALSE
, WL_DBG_DBG
);
1474 if (!(int_val
& DHD_WL_VAL2
))
1475 #endif /* WL_CFG80211 */
1476 dhd_msg_level
= int_val
;
1478 case IOV_GVAL(IOV_BCMERRORSTR
):
1479 bcm_strncpy_s((char *)arg
, len
, bcmerrorstr(dhd_pub
->bcmerror
), BCME_STRLEN
);
1480 ((char *)arg
)[BCME_STRLEN
- 1] = 0x00;
1483 case IOV_GVAL(IOV_BCMERROR
):
1484 int_val
= (int32
)dhd_pub
->bcmerror
;
1485 bcopy(&int_val
, arg
, val_size
);
1488 case IOV_GVAL(IOV_WDTICK
):
1489 int_val
= (int32
)dhd_watchdog_ms
;
1490 bcopy(&int_val
, arg
, val_size
);
1493 case IOV_SVAL(IOV_WDTICK
):
1495 bcmerror
= BCME_NOTUP
;
1499 dhd_watchdog_ms
= (uint
)int_val
;
1501 dhd_os_wd_timer(dhd_pub
, (uint
)int_val
);
1504 case IOV_GVAL(IOV_DUMP
):
1505 if (dhd_dump(dhd_pub
, arg
, len
) <= 0)
1506 bcmerror
= BCME_ERROR
;
1511 case IOV_GVAL(IOV_DCONSOLE_POLL
):
1512 int_val
= (int32
)dhd_pub
->dhd_console_ms
;
1513 bcopy(&int_val
, arg
, val_size
);
1516 case IOV_SVAL(IOV_DCONSOLE_POLL
):
1517 dhd_pub
->dhd_console_ms
= (uint
)int_val
;
1520 #if defined(DHD_DEBUG)
1521 case IOV_SVAL(IOV_CONS
):
1523 bcmerror
= dhd_bus_console_in(dhd_pub
, arg
, len
- 1);
1525 #endif /* DHD_DEBUG */
1527 case IOV_SVAL(IOV_CLEARCOUNTS
):
1528 dhd_pub
->tx_packets
= dhd_pub
->rx_packets
= 0;
1529 dhd_pub
->tx_errors
= dhd_pub
->rx_errors
= 0;
1530 dhd_pub
->tx_ctlpkts
= dhd_pub
->rx_ctlpkts
= 0;
1531 dhd_pub
->tx_ctlerrs
= dhd_pub
->rx_ctlerrs
= 0;
1532 dhd_pub
->tx_dropped
= 0;
1533 dhd_pub
->rx_dropped
= 0;
1534 dhd_pub
->tx_pktgetfail
= 0;
1535 dhd_pub
->rx_pktgetfail
= 0;
1536 dhd_pub
->rx_readahead_cnt
= 0;
1537 dhd_pub
->tx_realloc
= 0;
1538 dhd_pub
->wd_dpc_sched
= 0;
1539 dhd_pub
->tx_big_packets
= 0;
1540 memset(&dhd_pub
->dstats
, 0, sizeof(dhd_pub
->dstats
));
1541 dhd_bus_clearcounts(dhd_pub
);
1542 #ifdef PROP_TXSTATUS
1543 /* clear proptxstatus related counters */
1544 dhd_wlfc_clear_counts(dhd_pub
);
1545 #endif /* PROP_TXSTATUS */
1546 #if defined(DHD_LB_STATS)
1547 DHD_LB_STATS_RESET(dhd_pub
);
1548 #endif /* DHD_LB_STATS */
1551 case IOV_GVAL(IOV_IOCTLTIMEOUT
): {
1552 int_val
= (int32
)dhd_os_get_ioctl_resp_timeout();
1553 bcopy(&int_val
, arg
, sizeof(int_val
));
1557 case IOV_SVAL(IOV_IOCTLTIMEOUT
): {
1559 bcmerror
= BCME_BADARG
;
1561 dhd_os_set_ioctl_resp_timeout((unsigned int)int_val
);
1565 #ifdef PROP_TXSTATUS
1566 case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE
): {
1567 bool wlfc_enab
= FALSE
;
1568 bcmerror
= dhd_wlfc_get_enable(dhd_pub
, &wlfc_enab
);
1569 if (bcmerror
!= BCME_OK
)
1571 int_val
= wlfc_enab
? 1 : 0;
1572 bcopy(&int_val
, arg
, val_size
);
1575 case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE
): {
1576 bool wlfc_enab
= FALSE
;
1577 bcmerror
= dhd_wlfc_get_enable(dhd_pub
, &wlfc_enab
);
1578 if (bcmerror
!= BCME_OK
)
1581 /* wlfc is already set as desired */
1582 if (wlfc_enab
== (int_val
== 0 ? FALSE
: TRUE
))
1585 if (int_val
== TRUE
)
1586 bcmerror
= dhd_wlfc_init(dhd_pub
);
1588 bcmerror
= dhd_wlfc_deinit(dhd_pub
);
1592 case IOV_GVAL(IOV_PROPTXSTATUS_MODE
):
1593 bcmerror
= dhd_wlfc_get_mode(dhd_pub
, &int_val
);
1594 if (bcmerror
!= BCME_OK
)
1596 bcopy(&int_val
, arg
, val_size
);
1599 case IOV_SVAL(IOV_PROPTXSTATUS_MODE
):
1600 dhd_wlfc_set_mode(dhd_pub
, int_val
);
1603 case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE
):
1604 bcmerror
= dhd_wlfc_get_module_ignore(dhd_pub
, &int_val
);
1605 if (bcmerror
!= BCME_OK
)
1607 bcopy(&int_val
, arg
, val_size
);
1610 case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE
):
1611 dhd_wlfc_set_module_ignore(dhd_pub
, int_val
);
1614 case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE
):
1615 bcmerror
= dhd_wlfc_get_credit_ignore(dhd_pub
, &int_val
);
1616 if (bcmerror
!= BCME_OK
)
1618 bcopy(&int_val
, arg
, val_size
);
1621 case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE
):
1622 dhd_wlfc_set_credit_ignore(dhd_pub
, int_val
);
1625 case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE
):
1626 bcmerror
= dhd_wlfc_get_txstatus_ignore(dhd_pub
, &int_val
);
1627 if (bcmerror
!= BCME_OK
)
1629 bcopy(&int_val
, arg
, val_size
);
1632 case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE
):
1633 dhd_wlfc_set_txstatus_ignore(dhd_pub
, int_val
);
1636 case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK
):
1637 bcmerror
= dhd_wlfc_get_rxpkt_chk(dhd_pub
, &int_val
);
1638 if (bcmerror
!= BCME_OK
)
1640 bcopy(&int_val
, arg
, val_size
);
1643 case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK
):
1644 dhd_wlfc_set_rxpkt_chk(dhd_pub
, int_val
);
1647 #endif /* PROP_TXSTATUS */
1649 case IOV_GVAL(IOV_BUS_TYPE
):
1650 /* The dhd application queries the driver to check if its usb or sdio. */
1652 int_val
= BUS_TYPE_USB
;
1655 int_val
= BUS_TYPE_SDIO
;
1657 #ifdef PCIE_FULL_DONGLE
1658 int_val
= BUS_TYPE_PCIE
;
1660 bcopy(&int_val
, arg
, val_size
);
1663 case IOV_SVAL(IOV_CHANGEMTU
):
1665 bcmerror
= dhd_change_mtu(dhd_pub
, int_val
, 0);
1668 case IOV_GVAL(IOV_HOSTREORDER_FLOWS
):
1671 uint8
*ptr
= (uint8
*)arg
;
1675 for (i
= 0; i
< WLHOST_REORDERDATA_MAXFLOWS
; i
++) {
1676 if (dhd_pub
->reorder_bufs
[i
] != NULL
) {
1677 *ptr
= dhd_pub
->reorder_bufs
[i
]->flow_id
;
1686 #ifdef DHDTCPACK_SUPPRESS
1687 case IOV_GVAL(IOV_TCPACK_SUPPRESS
): {
1688 int_val
= (uint32
)dhd_pub
->tcpack_sup_mode
;
1689 bcopy(&int_val
, arg
, val_size
);
1692 case IOV_SVAL(IOV_TCPACK_SUPPRESS
): {
1693 bcmerror
= dhd_tcpack_suppress_set(dhd_pub
, (uint8
)int_val
);
1696 #endif /* DHDTCPACK_SUPPRESS */
1698 #ifdef DHD_L2_FILTER
1699 case IOV_GVAL(IOV_DHCP_UNICAST
): {
1702 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1703 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1704 __FUNCTION__
, name
));
1705 bcmerror
= BCME_BADARG
;
1708 int_val
= dhd_get_dhcp_unicast_status(dhd_pub
, bssidx
);
1709 memcpy(arg
, &int_val
, val_size
);
1712 case IOV_SVAL(IOV_DHCP_UNICAST
): {
1715 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1716 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1717 __FUNCTION__
, name
));
1718 bcmerror
= BCME_BADARG
;
1721 memcpy(&int_val
, val
, sizeof(int_val
));
1722 bcmerror
= dhd_set_dhcp_unicast_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
1725 case IOV_GVAL(IOV_BLOCK_PING
): {
1729 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1730 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__
));
1731 bcmerror
= BCME_BADARG
;
1734 int_val
= dhd_get_block_ping_status(dhd_pub
, bssidx
);
1735 memcpy(arg
, &int_val
, val_size
);
1738 case IOV_SVAL(IOV_BLOCK_PING
): {
1742 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1743 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__
));
1744 bcmerror
= BCME_BADARG
;
1747 memcpy(&int_val
, val
, sizeof(int_val
));
1748 bcmerror
= dhd_set_block_ping_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
1751 case IOV_GVAL(IOV_PROXY_ARP
): {
1755 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1756 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__
));
1757 bcmerror
= BCME_BADARG
;
1760 int_val
= dhd_get_parp_status(dhd_pub
, bssidx
);
1761 bcopy(&int_val
, arg
, val_size
);
1764 case IOV_SVAL(IOV_PROXY_ARP
): {
1768 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1769 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__
));
1770 bcmerror
= BCME_BADARG
;
1773 bcopy(val
, &int_val
, sizeof(int_val
));
1775 /* Issue a iovar request to WL to update the proxy arp capability bit
1776 * in the Extended Capability IE of beacons/probe responses.
1778 bcmerror
= dhd_iovar(dhd_pub
, bssidx
, "proxy_arp_advertise", val
, sizeof(int_val
),
1780 if (bcmerror
== BCME_OK
) {
1781 dhd_set_parp_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
1785 case IOV_GVAL(IOV_GRAT_ARP
): {
1789 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1790 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__
));
1791 bcmerror
= BCME_BADARG
;
1794 int_val
= dhd_get_grat_arp_status(dhd_pub
, bssidx
);
1795 memcpy(arg
, &int_val
, val_size
);
1798 case IOV_SVAL(IOV_GRAT_ARP
): {
1802 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1803 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__
));
1804 bcmerror
= BCME_BADARG
;
1807 memcpy(&int_val
, val
, sizeof(int_val
));
1808 bcmerror
= dhd_set_grat_arp_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
1811 case IOV_GVAL(IOV_BLOCK_TDLS
): {
1815 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1816 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__
));
1817 bcmerror
= BCME_BADARG
;
1820 int_val
= dhd_get_block_tdls_status(dhd_pub
, bssidx
);
1821 memcpy(arg
, &int_val
, val_size
);
1824 case IOV_SVAL(IOV_BLOCK_TDLS
): {
1828 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1829 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__
));
1830 bcmerror
= BCME_BADARG
;
1833 memcpy(&int_val
, val
, sizeof(int_val
));
1834 bcmerror
= dhd_set_block_tdls_status(dhd_pub
, bssidx
, int_val
? 1 : 0);
1837 #endif /* DHD_L2_FILTER */
1838 case IOV_SVAL(IOV_DHD_IE
): {
1842 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1843 DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__
));
1844 bcmerror
= BCME_BADARG
;
1850 case IOV_GVAL(IOV_AP_ISOLATE
): {
1854 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1855 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__
));
1856 bcmerror
= BCME_BADARG
;
1860 int_val
= dhd_get_ap_isolate(dhd_pub
, bssidx
);
1861 bcopy(&int_val
, arg
, val_size
);
1864 case IOV_SVAL(IOV_AP_ISOLATE
): {
1868 if (dhd_iovar_parse_bssidx(dhd_pub
, name
, &bssidx
, &val
) != BCME_OK
) {
1869 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__
));
1870 bcmerror
= BCME_BADARG
;
1875 bcopy(val
, &int_val
, sizeof(uint32
));
1876 dhd_set_ap_isolate(dhd_pub
, bssidx
, int_val
);
1880 case IOV_GVAL(IOV_PSTA
): {
1881 int_val
= dhd_get_psta_mode(dhd_pub
);
1882 bcopy(&int_val
, arg
, val_size
);
1885 case IOV_SVAL(IOV_PSTA
): {
1886 if (int_val
>= DHD_MODE_PSTA_DISABLED
&& int_val
<= DHD_MODE_PSR
) {
1887 dhd_set_psta_mode(dhd_pub
, int_val
);
1889 bcmerror
= BCME_RANGE
;
1893 #endif /* DHD_PSTA */
1895 case IOV_GVAL(IOV_WET
):
1896 int_val
= dhd_get_wet_mode(dhd_pub
);
1897 bcopy(&int_val
, arg
, val_size
);
1900 case IOV_SVAL(IOV_WET
):
1901 if (int_val
== 0 || int_val
== 1) {
1902 dhd_set_wet_mode(dhd_pub
, int_val
);
1903 /* Delete the WET DB when disabled */
1905 dhd_wet_sta_delete_list(dhd_pub
);
1908 bcmerror
= BCME_RANGE
;
1911 case IOV_SVAL(IOV_WET_HOST_IPV4
):
1912 dhd_set_wet_host_ipv4(dhd_pub
, params
, plen
);
1914 case IOV_SVAL(IOV_WET_HOST_MAC
):
1915 dhd_set_wet_host_mac(dhd_pub
, params
, plen
);
1917 #endif /* DHD_WET */
1918 #ifdef DHD_MCAST_REGEN
1919 case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE
): {
1923 if (dhd_iovar_parse_bssidx(dhd_pub
, (char *)name
, &bssidx
, &val
) != BCME_OK
) {
1924 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__
));
1925 bcmerror
= BCME_BADARG
;
1929 int_val
= dhd_get_mcast_regen_bss_enable(dhd_pub
, bssidx
);
1930 bcopy(&int_val
, arg
, val_size
);
1934 case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE
): {
1938 if (dhd_iovar_parse_bssidx(dhd_pub
, (char *)name
, &bssidx
, &val
) != BCME_OK
) {
1939 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__
));
1940 bcmerror
= BCME_BADARG
;
1945 bcopy(val
, &int_val
, sizeof(uint32
));
1946 dhd_set_mcast_regen_bss_enable(dhd_pub
, bssidx
, int_val
);
1949 #endif /* DHD_MCAST_REGEN */
1951 case IOV_GVAL(IOV_CFG80211_OPMODE
): {
1952 int_val
= (int32
)dhd_pub
->op_mode
;
1953 bcopy(&int_val
, arg
, sizeof(int_val
));
1956 case IOV_SVAL(IOV_CFG80211_OPMODE
): {
1958 bcmerror
= BCME_BADARG
;
1960 dhd_pub
->op_mode
= int_val
;
1964 case IOV_GVAL(IOV_ASSERT_TYPE
):
1965 int_val
= g_assert_type
;
1966 bcopy(&int_val
, arg
, val_size
);
1969 case IOV_SVAL(IOV_ASSERT_TYPE
):
1970 g_assert_type
= (uint32
)int_val
;
1973 #if !defined(MACOSX_DHD)
1974 case IOV_GVAL(IOV_LMTEST
): {
1975 *(uint32
*)arg
= (uint32
)lmtest
;
1979 case IOV_SVAL(IOV_LMTEST
): {
1980 uint32 val
= *(uint32
*)arg
;
1982 bcmerror
= BCME_BADARG
;
1985 DHD_ERROR(("%s: lmtest %s\n",
1986 __FUNCTION__
, (lmtest
== FALSE
)? "OFF" : "ON"));
1992 #ifdef SHOW_LOGTRACE
1993 case IOV_GVAL(IOV_DUMP_TRACE_LOG
): {
1994 trace_buf_info_t
*trace_buf_info
;
1996 trace_buf_info
= (trace_buf_info_t
*)MALLOC(dhd_pub
->osh
,
1997 sizeof(trace_buf_info_t
));
1998 if (trace_buf_info
!= NULL
) {
1999 dhd_get_read_buf_ptr(dhd_pub
, trace_buf_info
);
2000 memcpy((void*)arg
, (void*)trace_buf_info
, sizeof(trace_buf_info_t
));
2001 MFREE(dhd_pub
->osh
, trace_buf_info
, sizeof(trace_buf_info_t
));
2003 DHD_ERROR(("Memory allocation Failed\n"));
2004 bcmerror
= BCME_NOMEM
;
2008 #endif /* SHOW_LOGTRACE */
2009 case IOV_GVAL(IOV_DONGLE_TRAP_TYPE
):
2010 if (dhd_pub
->dongle_trap_occured
)
2011 int_val
= ltoh32(dhd_pub
->last_trap_info
.type
);
2014 bcopy(&int_val
, arg
, val_size
);
2017 case IOV_GVAL(IOV_DONGLE_TRAP_INFO
):
2019 struct bcmstrbuf strbuf
;
2020 bcm_binit(&strbuf
, arg
, len
);
2021 if (dhd_pub
->dongle_trap_occured
== FALSE
) {
2022 bcm_bprintf(&strbuf
, "no trap recorded\n");
2025 dhd_bus_dump_trap_info(dhd_pub
->bus
, &strbuf
);
2029 #if defined(BCMSDIO) || defined(BCMPCIE)
2031 case IOV_GVAL(IOV_BPADDR
):
2036 memcpy(&sdreg
, params
, sizeof(sdreg
));
2038 addr
= sdreg
.offset
;
2041 bcmerror
= dhd_bus_readwrite_bp_addr(dhd_pub
, addr
, size
,
2042 (uint
*)&int_val
, TRUE
);
2044 memcpy(arg
, &int_val
, sizeof(int32
));
2049 case IOV_SVAL(IOV_BPADDR
):
2054 memcpy(&sdreg
, params
, sizeof(sdreg
));
2056 addr
= sdreg
.offset
;
2059 bcmerror
= dhd_bus_readwrite_bp_addr(dhd_pub
, addr
, size
,
2060 (uint
*)&sdreg
.value
,
2065 #endif /* BCMSDIO || BCMPCIE */
2067 case IOV_SVAL(IOV_FLOW_RING_DEBUG
):
2069 bcmerror
= dhd_flow_ring_debug(dhd_pub
, arg
, len
);
2072 #endif /* BCMPCIE */
2073 case IOV_SVAL(IOV_MEM_DEBUG
):
2075 bcmerror
= dhd_mem_debug(dhd_pub
, arg
, len
- 1);
2078 #endif /* DHD_DEBUG */
2079 #if defined(DHD_LOG_DUMP)
2080 case IOV_GVAL(IOV_LOG_DUMP
):
2082 dhd_prot_debug_info_print(dhd_pub
);
2083 dhd_log_dump_trigger(dhd_pub
, CMD_DEFAULT
);
2086 #endif /* DHD_LOG_DUMP */
2088 bcmerror
= BCME_UNSUPPORTED
;
2093 DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__
, actionid
, bcmerror
));
2097 /* Store the status of a connection attempt for later retrieval by an iovar */
2099 dhd_store_conn_status(uint32 event
, uint32 status
, uint32 reason
)
2101 /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
2102 * because an encryption/rsn mismatch results in both events, and
2103 * the important information is in the WLC_E_PRUNE.
2105 if (!(event
== WLC_E_SET_SSID
&& status
== WLC_E_STATUS_FAIL
&&
2106 dhd_conn_event
== WLC_E_PRUNE
)) {
2107 dhd_conn_event
= event
;
2108 dhd_conn_status
= status
;
2109 dhd_conn_reason
= reason
;
2114 dhd_prec_enq(dhd_pub_t
*dhdp
, struct pktq
*q
, void *pkt
, int prec
)
2117 int eprec
= -1; /* precedence to evict from */
2118 bool discard_oldest
;
2120 /* Fast case, precedence queue is not full and we are also not
2121 * exceeding total queue length
2123 if (!pktqprec_full(q
, prec
) && !pktq_full(q
)) {
2124 pktq_penq(q
, prec
, pkt
);
2128 /* Determine precedence from which to evict packet, if any */
2129 if (pktqprec_full(q
, prec
))
2131 else if (pktq_full(q
)) {
2132 p
= pktq_peek_tail(q
, &eprec
);
2134 if (eprec
> prec
|| eprec
< 0)
2138 /* Evict if needed */
2140 /* Detect queueing to unconfigured precedence */
2141 ASSERT(!pktqprec_empty(q
, eprec
));
2142 discard_oldest
= AC_BITMAP_TST(dhdp
->wme_dp
, eprec
);
2143 if (eprec
== prec
&& !discard_oldest
)
2144 return FALSE
; /* refuse newer (incoming) packet */
2145 /* Evict packet according to discard policy */
2146 p
= discard_oldest
? pktq_pdeq(q
, eprec
) : pktq_pdeq_tail(q
, eprec
);
2148 #ifdef DHDTCPACK_SUPPRESS
2149 if (dhd_tcpack_check_xmit(dhdp
, p
) == BCME_ERROR
) {
2150 DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
2151 __FUNCTION__
, __LINE__
));
2152 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
2154 #endif /* DHDTCPACK_SUPPRESS */
2155 PKTFREE(dhdp
->osh
, p
, TRUE
);
2159 p
= pktq_penq(q
, prec
, pkt
);
2166 * Functions to drop proper pkts from queue:
2167 * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
2168 * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
2169 * If can't find pkts matching upper 2 cases, drop first pkt anyway
2172 dhd_prec_drop_pkts(dhd_pub_t
*dhdp
, struct pktq
*pq
, int prec
, f_droppkt_t fn
)
2174 struct pktq_prec
*q
= NULL
;
2175 void *p
, *prev
= NULL
, *next
= NULL
, *first
= NULL
, *last
= NULL
, *prev_first
= NULL
;
2176 pkt_frag_t frag_info
;
2179 ASSERT(prec
>= 0 && prec
< pq
->num_prec
);
2188 frag_info
= pkt_frag_info(dhdp
->osh
, p
);
2189 if (frag_info
== DHD_PKT_FRAG_NONE
) {
2191 } else if (frag_info
== DHD_PKT_FRAG_FIRST
) {
2193 /* No last frag pkt, use prev as last */
2200 } else if (frag_info
== DHD_PKT_FRAG_LAST
) {
2211 if ((p
== NULL
) || ((frag_info
!= DHD_PKT_FRAG_NONE
) && !(first
&& last
))) {
2212 /* Not found matching pkts, use oldest */
2218 if (frag_info
== DHD_PKT_FRAG_NONE
) {
2233 PKTSETLINK(p
, NULL
);
2236 fn(dhdp
, prec
, p
, TRUE
);
2244 if (prev_first
== NULL
) {
2245 if ((q
->head
= next
) == NULL
)
2248 PKTSETLINK(prev_first
, next
);
2250 q
->tail
= prev_first
;
2257 dhd_iovar_op(dhd_pub_t
*dhd_pub
, const char *name
,
2258 void *params
, int plen
, void *arg
, int len
, bool set
)
2262 const bcm_iovar_t
*vi
= NULL
;
2265 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2270 /* Get MUST have return space */
2271 ASSERT(set
|| (arg
&& len
));
2273 /* Set does NOT take qualifiers */
2274 ASSERT(!set
|| (!params
&& !plen
));
2276 if ((vi
= bcm_iovar_lookup(dhd_iovars
, name
)) == NULL
) {
2277 bcmerror
= BCME_UNSUPPORTED
;
2281 DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__
,
2282 name
, (set
? "set" : "get"), len
, plen
));
2284 /* set up 'params' pointer in case this is a set command so that
2285 * the convenience int and bool code can be common to set and get
2287 if (params
== NULL
) {
2292 if (vi
->type
== IOVT_VOID
)
2294 else if (vi
->type
== IOVT_BUFFER
)
2297 /* all other types are integer sized */
2298 val_size
= sizeof(int);
2300 actionid
= set
? IOV_SVAL(vi
->varid
) : IOV_GVAL(vi
->varid
);
2302 bcmerror
= dhd_doiovar(dhd_pub
, vi
, actionid
, name
, params
, plen
, arg
, len
, val_size
);
2309 dhd_ioctl(dhd_pub_t
* dhd_pub
, dhd_ioctl_t
*ioc
, void *buf
, uint buflen
)
2312 unsigned long flags
;
2314 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2320 dhd_os_dhdiovar_lock(dhd_pub
);
2323 if (buflen
< sizeof(int))
2324 bcmerror
= BCME_BUFTOOSHORT
;
2326 *(int*)buf
= DHD_IOCTL_MAGIC
;
2329 case DHD_GET_VERSION
:
2330 if (buflen
< sizeof(int))
2331 bcmerror
= BCME_BUFTOOSHORT
;
2333 *(int*)buf
= DHD_IOCTL_VERSION
;
2342 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
2343 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub
) &&
2344 bcmstricmp((char *)buf
, "devreset")) {
2345 /* In platforms like FC19, the FW download is done via IOCTL
2346 * and should not return error for IOCTLs fired before FW
2349 if (dhd_fw_download_status(dhd_pub
)) {
2350 DHD_ERROR(("%s: returning as busstate=%d\n",
2351 __FUNCTION__
, dhd_pub
->busstate
));
2352 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
2353 dhd_os_dhdiovar_unlock(dhd_pub
);
2357 DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub
);
2358 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
2360 #ifdef DHD_PCIE_RUNTIMEPM
2361 dhdpcie_runtime_bus_wake(dhd_pub
, TRUE
, dhd_ioctl
);
2362 #endif /* DHD_PCIE_RUNTIMEPM */
2364 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
2365 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub
)) {
2366 /* If Suspend/Resume is tested via pcie_suspend IOVAR
2367 * then continue to execute the IOVAR, return from here for
2368 * other IOVARs, also include pciecfgreg and devreset to go
2371 if (bcmstricmp((char *)buf
, "pcie_suspend") &&
2372 bcmstricmp((char *)buf
, "pciecfgreg") &&
2373 bcmstricmp((char *)buf
, "devreset") &&
2374 bcmstricmp((char *)buf
, "sdio_suspend")) {
2375 DHD_ERROR(("%s: bus is in suspend(%d)"
2376 "or suspending(0x%x) state\n",
2377 __FUNCTION__
, dhd_pub
->busstate
,
2378 dhd_pub
->dhd_bus_busy_state
));
2379 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub
);
2380 dhd_os_busbusy_wake(dhd_pub
);
2381 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
2382 dhd_os_dhdiovar_unlock(dhd_pub
);
2386 /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
2387 * which will wait for all the busy contexts to get over for
2388 * particular time and call ASSERT if timeout happens. As during
2389 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
2390 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
2391 * not used in Production platforms but only used in FC19 setups.
2393 if (!bcmstricmp((char *)buf
, "devreset")) {
2394 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub
);
2396 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
2398 /* scan past the name to any arguments */
2399 for (arg
= buf
, arglen
= buflen
; *arg
&& arglen
; arg
++, arglen
--)
2403 bcmerror
= BCME_BUFTOOSHORT
;
2407 /* account for the NUL terminator */
2409 /* call with the appropriate arguments */
2410 if (ioc
->cmd
== DHD_GET_VAR
) {
2411 bcmerror
= dhd_iovar_op(dhd_pub
, buf
, arg
, arglen
,
2412 buf
, buflen
, IOV_GET
);
2414 bcmerror
= dhd_iovar_op(dhd_pub
, buf
, NULL
, 0,
2415 arg
, arglen
, IOV_SET
);
2417 if (bcmerror
!= BCME_UNSUPPORTED
) {
2421 /* not in generic table, try protocol module */
2422 if (ioc
->cmd
== DHD_GET_VAR
) {
2423 bcmerror
= dhd_prot_iovar_op(dhd_pub
, buf
, arg
,
2424 arglen
, buf
, buflen
, IOV_GET
);
2426 bcmerror
= dhd_prot_iovar_op(dhd_pub
, buf
,
2427 NULL
, 0, arg
, arglen
, IOV_SET
);
2429 if (bcmerror
!= BCME_UNSUPPORTED
) {
2433 /* if still not found, try bus module */
2434 if (ioc
->cmd
== DHD_GET_VAR
) {
2435 bcmerror
= dhd_bus_iovar_op(dhd_pub
, buf
,
2436 arg
, arglen
, buf
, buflen
, IOV_GET
);
2438 bcmerror
= dhd_bus_iovar_op(dhd_pub
, buf
,
2439 NULL
, 0, arg
, arglen
, IOV_SET
);
2441 if (bcmerror
!= BCME_UNSUPPORTED
) {
2449 bcmerror
= BCME_UNSUPPORTED
;
2451 dhd_os_dhdiovar_unlock(dhd_pub
);
2455 DHD_LINUX_GENERAL_LOCK(dhd_pub
, flags
);
2456 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub
);
2457 dhd_os_busbusy_wake(dhd_pub
);
2458 DHD_LINUX_GENERAL_UNLOCK(dhd_pub
, flags
);
2459 dhd_os_dhdiovar_unlock(dhd_pub
);
2465 #if defined(SHOW_EVENTS) && defined(SUPPORT_EVT_SDB_LOG)
2466 #define SDB_ENABLE_AP 0x01
2467 #define SDB_ENABLE_P2P 0x02
2468 #define SDB_IS_AP(i) (i & SDB_ENABLE_AP)
2469 #define SDB_IS_P2P(i) (i & SDB_ENABLE_P2P)
2471 #define WLC_RSDB_MODE_AUTO_MASK 0x80
2472 #define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK))))
2475 wl_event_sdb_transition_print(void *event_data
, const char *event_name
)
2477 wl_event_sdb_trans_t
*rdata
;
2478 wl_event_sdb_data_t
*value
;
2479 char *sta_mode
= "";
2481 char chanbuf
[CHANSPEC_STR_LEN
];
2482 rdata
= (wl_event_sdb_trans_t
*)event_data
;
2485 DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__
));
2489 if (rdata
->version
!= WL_EVENT_SDB_TRANSITION_VER
) {
2490 DHD_ERROR(("%s: invalid Version(%d)\n",
2491 __FUNCTION__
, rdata
->version
));
2495 if (rdata
->rsdb_mode
& WLC_RSDB_MODE_AUTO_MASK
) {
2496 DHD_ERROR((" RSDB Mode : Auto, "));
2498 DHD_ERROR(("Current RSDB Mode : %d\n", WLC_RSDB_EXTRACT_MODE(rdata
->rsdb_mode
)));
2500 for (i
= 0; i
< rdata
->enable_bsscfg
; i
++) {
2501 value
= &rdata
->values
[i
];
2503 if (SDB_IS_P2P(value
->is_iftype
)) {
2504 sta_mode
= SDB_IS_AP(value
->is_iftype
) ? "P2P_GO" : "P2P_GC";
2506 sta_mode
= SDB_IS_AP(value
->is_iftype
) ? "SoftAP" : "Station";
2509 wf_chspec_ntoa_ex(value
->chanspec
, chanbuf
);
2510 DHD_ERROR((" wlc%d <%s> \"%s\", %s(0x%04x)\n",
2511 value
->wlunit
, sta_mode
,
2512 value
->ssidbuf
, chanbuf
, value
->chanspec
));
2515 #endif /* SHOW_EVENTS && SUPPORT_EVT_SDB_LOG */
2518 wl_show_host_event(dhd_pub_t
*dhd_pub
, wl_event_msg_t
*event
, void *event_data
,
2519 void *raw_event_ptr
, char *eventmask
)
2521 uint i
, status
, reason
;
2522 bool group
= FALSE
, flush_txq
= FALSE
, link
= FALSE
;
2523 bool host_data
= FALSE
; /* prints event data after the case when set */
2524 const char *auth_str
;
2525 const char *event_name
;
2527 char err_msg
[256], eabuf
[ETHER_ADDR_STR_LEN
];
2528 uint event_type
, flags
, auth_type
, datalen
;
2530 event_type
= ntoh32(event
->event_type
);
2531 flags
= ntoh16(event
->flags
);
2532 status
= ntoh32(event
->status
);
2533 reason
= ntoh32(event
->reason
);
2534 BCM_REFERENCE(reason
);
2535 auth_type
= ntoh32(event
->auth_type
);
2536 datalen
= ntoh32(event
->datalen
);
2538 /* debug dump of event messages */
2539 snprintf(eabuf
, sizeof(eabuf
), MACDBG
, MAC2STRDBG(event
->addr
.octet
));
2541 event_name
= bcmevent_get_name(event_type
);
2542 BCM_REFERENCE(event_name
);
2544 if (flags
& WLC_EVENT_MSG_LINK
)
2546 if (flags
& WLC_EVENT_MSG_GROUP
)
2548 if (flags
& WLC_EVENT_MSG_FLUSHTXQ
)
2551 switch (event_type
) {
2554 case WLC_E_DISASSOC
:
2555 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
2558 case WLC_E_ASSOC_IND
:
2559 case WLC_E_REASSOC_IND
:
2561 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
2567 if (status
== WLC_E_STATUS_SUCCESS
) {
2568 DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name
, eabuf
));
2569 } else if (status
== WLC_E_STATUS_TIMEOUT
) {
2570 DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name
, eabuf
));
2571 } else if (status
== WLC_E_STATUS_FAIL
) {
2572 DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
2573 event_name
, eabuf
, (int)reason
));
2575 DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
2576 event_name
, eabuf
, (int)status
));
2581 case WLC_E_DEAUTH_IND
:
2582 case WLC_E_DISASSOC_IND
:
2583 DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name
, eabuf
, (int)reason
));
2587 case WLC_E_AUTH_IND
:
2588 if (auth_type
== DOT11_OPEN_SYSTEM
)
2589 auth_str
= "Open System";
2590 else if (auth_type
== DOT11_SHARED_KEY
)
2591 auth_str
= "Shared Key";
2593 snprintf(err_msg
, sizeof(err_msg
), "AUTH unknown: %d", (int)auth_type
);
2597 if (event_type
== WLC_E_AUTH_IND
) {
2598 DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name
, eabuf
, auth_str
));
2599 } else if (status
== WLC_E_STATUS_SUCCESS
) {
2600 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
2601 event_name
, eabuf
, auth_str
));
2602 } else if (status
== WLC_E_STATUS_TIMEOUT
) {
2603 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
2604 event_name
, eabuf
, auth_str
));
2605 } else if (status
== WLC_E_STATUS_FAIL
) {
2606 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
2607 event_name
, eabuf
, auth_str
, (int)reason
));
2609 BCM_REFERENCE(auth_str
);
2615 case WLC_E_SET_SSID
:
2616 if (status
== WLC_E_STATUS_SUCCESS
) {
2617 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
2619 if (status
== WLC_E_STATUS_FAIL
) {
2620 DHD_EVENT(("MACEVENT: %s, failed\n", event_name
));
2621 } else if (status
== WLC_E_STATUS_NO_NETWORKS
) {
2622 DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name
));
2624 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
2625 event_name
, (int)status
));
2630 case WLC_E_BEACON_RX
:
2631 if (status
== WLC_E_STATUS_SUCCESS
) {
2632 DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name
));
2633 } else if (status
== WLC_E_STATUS_FAIL
) {
2634 DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name
));
2636 DHD_EVENT(("MACEVENT: %s, status %d\n", event_name
, status
));
2641 DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n",
2642 event_name
, link
?"UP":"DOWN", flags
, status
));
2643 BCM_REFERENCE(link
);
2646 case WLC_E_MIC_ERROR
:
2647 DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
2648 event_name
, eabuf
, group
, flush_txq
));
2649 BCM_REFERENCE(group
);
2650 BCM_REFERENCE(flush_txq
);
2653 case WLC_E_ICV_ERROR
:
2654 case WLC_E_UNICAST_DECODE_ERROR
:
2655 case WLC_E_MULTICAST_DECODE_ERROR
:
2656 DHD_EVENT(("MACEVENT: %s, MAC %s\n",
2657 event_name
, eabuf
));
2661 DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name
, eabuf
, status
));
2664 case WLC_E_ASSOC_REQ_IE
:
2665 case WLC_E_ASSOC_RESP_IE
:
2666 case WLC_E_PMKID_CACHE
:
2667 DHD_EVENT(("MACEVENT: %s\n", event_name
));
2670 case WLC_E_SCAN_COMPLETE
:
2671 DHD_EVENT(("MACEVENT: %s\n", event_name
));
2673 case WLC_E_RSSI_LQM
:
2674 case WLC_E_PFN_NET_FOUND
:
2675 case WLC_E_PFN_NET_LOST
:
2676 case WLC_E_PFN_SCAN_COMPLETE
:
2677 case WLC_E_PFN_SCAN_NONE
:
2678 case WLC_E_PFN_SCAN_ALLGONE
:
2679 case WLC_E_PFN_GSCAN_FULL_RESULT
:
2680 case WLC_E_PFN_SSID_EXT
:
2681 DHD_EVENT(("PNOEVENT: %s\n", event_name
));
2684 case WLC_E_PFN_SCAN_BACKOFF
:
2685 case WLC_E_PFN_BSSID_SCAN_BACKOFF
:
2686 DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
2687 event_name
, (int)status
, (int)reason
));
2692 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
2693 event_name
, (int)status
, (int)reason
));
2696 #ifdef WIFI_ACT_FRAME
2697 case WLC_E_ACTION_FRAME
:
2698 DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name
, eabuf
));
2700 #endif /* WIFI_ACT_FRAME */
2702 #ifdef SHOW_LOGTRACE
2705 dhd_dbg_trace_evnt_handler(dhd_pub
, event_data
, raw_event_ptr
, datalen
);
2708 #endif /* SHOW_LOGTRACE */
2711 DHD_EVENT(("MACEVENT: %s %d\n", event_name
, ntoh32(*((int *)event_data
))));
2714 case WLC_E_SERVICE_FOUND
:
2715 case WLC_E_P2PO_ADD_DEVICE
:
2716 case WLC_E_P2PO_DEL_DEVICE
:
2717 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
2720 #ifdef BT_WIFI_HANDOBER
2721 case WLC_E_BT_WIFI_HANDOVER_REQ
:
2722 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name
, eabuf
));
2726 case WLC_E_CCA_CHAN_QUAL
:
2728 cca_chan_qual_event_t
*cca_event
= (cca_chan_qual_event_t
*)event_data
;
2729 if (cca_event
->id
== WL_CHAN_QUAL_FULL_CCA
) {
2731 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2732 " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
2734 event_name
, event_type
, eabuf
, (int)status
,
2735 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
2736 cca_event
->cca_busy_ext
.duration
,
2737 cca_event
->cca_busy_ext
.congest_ibss
,
2738 cca_event
->cca_busy_ext
.congest_obss
,
2739 cca_event
->cca_busy_ext
.interference
,
2740 cca_event
->cca_busy_ext
.timestamp
));
2741 } else if (cca_event
->id
== WL_CHAN_QUAL_CCA
) {
2743 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2744 " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
2745 event_name
, event_type
, eabuf
, (int)status
,
2746 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
2747 cca_event
->cca_busy
.duration
,
2748 cca_event
->cca_busy
.congest
,
2749 cca_event
->cca_busy
.timestamp
));
2750 } else if ((cca_event
->id
== WL_CHAN_QUAL_NF
) ||
2751 (cca_event
->id
== WL_CHAN_QUAL_NF_LTE
)) {
2753 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2754 " channel 0x%02x (NF[%d] %ddB)\n",
2755 event_name
, event_type
, eabuf
, (int)status
,
2756 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
2757 cca_event
->id
, cca_event
->noise
));
2760 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2761 " channel 0x%02x (unknown ID %d)\n",
2762 event_name
, event_type
, eabuf
, (int)status
,
2763 (int)reason
, (int)auth_type
, cca_event
->chanspec
,
2768 case WLC_E_ESCAN_RESULT
:
2770 wl_escan_result_v2_t
*escan_result
=
2771 (wl_escan_result_v2_t
*)event_data
;
2772 BCM_REFERENCE(escan_result
);
2773 if ((status
== WLC_E_STATUS_SUCCESS
) || (status
== WLC_E_STATUS_ABORT
)) {
2774 DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
2775 event_name
, event_type
, (int)status
,
2776 dtoh16(escan_result
->sync_id
)));
2778 DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
2779 event_name
, event_type
, eabuf
, (int)status
));
2786 struct wl_event_data_if
*ifevent
= (struct wl_event_data_if
*)event_data
;
2787 BCM_REFERENCE(ifevent
);
2789 DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
2790 event_name
, ifevent
->opcode
, ifevent
->ifidx
, ifevent
->role
));
2793 #ifdef SHOW_LOGTRACE
2796 wl_mschdbg_event_handler(dhd_pub
, raw_event_ptr
, reason
, event_data
, datalen
);
2799 #endif /* SHOW_LOGTRACE */
2801 case WLC_E_PSK_AUTH
:
2802 DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
2803 event_name
, eabuf
, status
, reason
));
2805 case WLC_E_AGGR_EVENT
:
2807 event_aggr_data_t
*aggrbuf
= event_data
;
2809 uint8
*data
= aggrbuf
->data
;
2810 DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
2811 event_name
, aggrbuf
->num_events
, aggrbuf
->len
));
2812 for (j
= 0; j
< aggrbuf
->num_events
; j
++)
2814 wl_event_msg_t
* sub_event
= (wl_event_msg_t
*)data
;
2815 if (len
> aggrbuf
->len
) {
2816 DHD_ERROR(("%s: Aggr events corrupted!",
2820 DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event
->event_type
)));
2821 len
+= ALIGN_SIZE((ntoh32(sub_event
->datalen
) +
2822 sizeof(wl_event_msg_t
)), sizeof(uint64
));
2823 buf
= (uchar
*)(data
+ sizeof(wl_event_msg_t
));
2825 DHD_EVENT((" data (%d) : ", ntoh32(sub_event
->datalen
)));
2826 for (i
= 0; i
< ntoh32(sub_event
->datalen
); i
++) {
2827 DHD_EVENT((" 0x%02x ", buf
[i
]));
2829 data
= aggrbuf
->data
+ len
;
2834 case WLC_E_NAN_CRITICAL
:
2836 DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name
, reason
));
2839 case WLC_E_NAN_NON_CRITICAL
:
2841 DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name
, reason
));
2846 rpsnoa_stats_t
*stat
= event_data
;
2847 if (datalen
== sizeof(*stat
)) {
2848 DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name
,
2849 (stat
->band
== WLC_BAND_2G
) ? "2G":"5G",
2850 stat
->state
, stat
->last_pps
));
2856 DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name
, reason
));
2859 #ifdef SUPPORT_EVT_SDB_LOG
2860 case WLC_E_SDB_TRANSITION
:
2862 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
2863 event_name
, (int)status
, (int)reason
));
2864 wl_event_sdb_transition_print(event_data
, event_name
);
2867 #endif /* SUPPORT_EVT_SDB_LOG */
2869 DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
2870 event_name
, event_type
, eabuf
, (int)status
, (int)reason
,
2875 /* show any appended data if message level is set to bytes or host_data is set */
2876 if ((DHD_BYTES_ON() || (host_data
== TRUE
)) && DHD_EVENT_ON() && datalen
) {
2877 buf
= (uchar
*) event_data
;
2879 DHD_EVENT((" data (%d) : ", datalen
));
2880 for (i
= 0; i
< datalen
; i
++) {
2881 DHD_EVENT((" 0x%02x ", buf
[i
]));
2885 } /* wl_show_host_event */
2886 #endif /* SHOW_EVENTS */
2888 #ifdef DNGL_EVENT_SUPPORT
2889 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
2891 dngl_host_event(dhd_pub_t
*dhdp
, void *pktdata
, bcm_dngl_event_msg_t
*dngl_event
, size_t pktlen
)
2893 bcm_dngl_event_t
*pvt_data
= (bcm_dngl_event_t
*)pktdata
;
2895 dngl_host_event_process(dhdp
, pvt_data
, dngl_event
, pktlen
);
2900 dngl_host_event_process(dhd_pub_t
*dhdp
, bcm_dngl_event_t
*event
,
2901 bcm_dngl_event_msg_t
*dngl_event
, size_t pktlen
)
2903 uint8
*p
= (uint8
*)(event
+ 1);
2904 uint16 type
= ntoh16_ua((void *)&dngl_event
->event_type
);
2905 uint16 datalen
= ntoh16_ua((void *)&dngl_event
->datalen
);
2906 uint16 version
= ntoh16_ua((void *)&dngl_event
->version
);
2908 DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version
, type
, datalen
));
2909 if (datalen
> (pktlen
- sizeof(bcm_dngl_event_t
) + ETHER_TYPE_LEN
)) {
2912 if (version
!= BCM_DNGL_EVENT_MSG_VERSION
) {
2913 DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__
,
2914 version
, BCM_DNGL_EVENT_MSG_VERSION
));
2918 case DNGL_E_SOCRAM_IND
:
2920 bcm_dngl_socramind_t
*socramind_ptr
= (bcm_dngl_socramind_t
*)p
;
2921 uint16 tag
= ltoh32(socramind_ptr
->tag
);
2922 uint16 taglen
= ltoh32(socramind_ptr
->length
);
2923 p
= (uint8
*)socramind_ptr
->value
;
2924 DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag
, taglen
, datalen
));
2926 case SOCRAM_IND_ASSERT_TAG
:
2929 * The payload consists of -
2930 * null terminated function name padded till 32 bit boundary +
2931 * Line number - (32 bits)
2932 * Caller address (32 bits)
2934 char *fnname
= (char *)p
;
2935 if (datalen
< (ROUNDUP(strlen(fnname
) + 1, sizeof(uint32
)) +
2936 sizeof(uint32
) * 2)) {
2937 DHD_ERROR(("Wrong length:%d\n", datalen
));
2940 DHD_EVENT(("ASSRT Function:%s ", p
));
2941 p
+= ROUNDUP(strlen(p
) + 1, sizeof(uint32
));
2942 DHD_EVENT(("Line:%d ", *(uint32
*)p
));
2943 p
+= sizeof(uint32
);
2944 DHD_EVENT(("Caller Addr:0x%x\n", *(uint32
*)p
));
2947 case SOCRAM_IND_TAG_HEALTH_CHECK
:
2949 bcm_dngl_healthcheck_t
*dngl_hc
= (bcm_dngl_healthcheck_t
*)p
;
2950 DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
2951 ltoh32(dngl_hc
->top_module_tag
),
2952 ltoh32(dngl_hc
->top_module_len
),
2954 if (DHD_EVENT_ON()) {
2955 prhex("HEALTHCHECK", p
, MIN(ltoh32(dngl_hc
->top_module_len
)
2956 + BCM_XTLV_HDR_SIZE
, datalen
));
2959 memset(dhdp
->health_chk_event_data
, 0, HEALTH_CHK_BUF_SIZE
);
2960 memcpy(dhdp
->health_chk_event_data
, p
,
2961 MIN(ltoh32(dngl_hc
->top_module_len
),
2962 HEALTH_CHK_BUF_SIZE
));
2963 #endif /* DHD_LOG_DUMP */
2964 p
= (uint8
*)dngl_hc
->value
;
2966 switch (ltoh32(dngl_hc
->top_module_tag
)) {
2967 case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE
:
2969 bcm_dngl_pcie_hc_t
*pcie_hc
;
2970 pcie_hc
= (bcm_dngl_pcie_hc_t
*)p
;
2971 BCM_REFERENCE(pcie_hc
);
2972 if (ltoh32(dngl_hc
->top_module_len
) <
2973 sizeof(bcm_dngl_pcie_hc_t
)) {
2974 DHD_ERROR(("Wrong length:%d\n",
2975 ltoh32(dngl_hc
->top_module_len
)));
2978 DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
2980 ltoh32(pcie_hc
->version
),
2981 ltoh32(pcie_hc
->pcie_err_ind_type
),
2982 ltoh32(pcie_hc
->pcie_flag
),
2983 ltoh32(pcie_hc
->pcie_control_reg
)));
2986 #ifdef HCHK_COMMON_SW_EVENT
2987 case HCHK_SW_ENTITY_WL_PRIMARY
:
2988 case HCHK_SW_ENTITY_WL_SECONDARY
:
2990 bcm_xtlv_t
*wl_hc
= (bcm_xtlv_t
*)p
;
2992 if (ltoh32(dngl_hc
->top_module_len
) <
2993 sizeof(bcm_xtlv_t
)) {
2994 DHD_ERROR(("WL SW HC Wrong length:%d\n",
2995 ltoh32(dngl_hc
->top_module_len
)));
2998 BCM_REFERENCE(wl_hc
);
2999 DHD_EVENT(("WL SW HC type %d len %d",
3000 ltoh16(wl_hc
->id
), ltoh16(wl_hc
->len
)));
3003 #endif /* HCHK_COMMON_SW_EVENT */
3006 DHD_ERROR(("%s:Unknown module TAG:%d\n",
3008 ltoh32(dngl_hc
->top_module_tag
)));
3015 DHD_ERROR(("%s:Unknown TAG", __FUNCTION__
));
3016 if (p
&& DHD_EVENT_ON()) {
3017 prhex("SOCRAMIND", p
, taglen
);
3024 DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__
, type
));
3025 if (p
&& DHD_EVENT_ON()) {
3026 prhex("SOCRAMIND", p
, datalen
);
3030 #ifdef DHD_FW_COREDUMP
3031 if (dhdp
->memdump_enabled
) {
3032 dhdp
->memdump_type
= DUMP_TYPE_DONGLE_HOST_EVENT
;
3033 if (dhd_socram_dump(dhdp
->bus
)) {
3034 DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__
));
3038 dhd_dbg_send_urgent_evt(dhdp
, p
, datalen
);
3039 #endif /* DHD_FW_COREDUMP */
3042 #endif /* DNGL_EVENT_SUPPORT */
3044 /* Stub for now. Will become real function as soon as shim
3045 * is being integrated to Android, Linux etc.
3048 wl_event_process_default(wl_event_msg_t
*event
, struct wl_evt_pport
*evt_pport
)
3054 wl_event_process(dhd_pub_t
*dhd_pub
, int *ifidx
, void *pktdata
,
3055 uint pktlen
, void **data_ptr
, void *raw_event
)
3057 wl_evt_pport_t evt_pport
;
3058 wl_event_msg_t event
;
3059 bcm_event_msg_u_t evu
;
3062 /* make sure it is a BRCM event pkt and record event data */
3063 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
3064 if (ret
!= BCME_OK
) {
3068 memcpy(&event
, &evu
.event
, sizeof(wl_event_msg_t
));
3070 /* convert event from network order to host order */
3071 wl_event_to_host_order(&event
);
3073 /* record event params to evt_pport */
3074 evt_pport
.dhd_pub
= dhd_pub
;
3075 evt_pport
.ifidx
= ifidx
;
3076 evt_pport
.pktdata
= pktdata
;
3077 evt_pport
.data_ptr
= data_ptr
;
3078 evt_pport
.raw_event
= raw_event
;
3079 evt_pport
.data_len
= pktlen
;
3081 ret
= wl_event_process_default(&event
, &evt_pport
);
3084 } /* wl_event_process */
3086 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
3088 wl_host_event_get_data(void *pktdata
, uint pktlen
, bcm_event_msg_u_t
*evu
)
3092 ret
= is_wlc_event_frame(pktdata
, pktlen
, 0, evu
);
3093 if (ret
!= BCME_OK
) {
3094 DHD_ERROR(("%s: Invalid event frame, err = %d\n",
3095 __FUNCTION__
, ret
));
3102 wl_process_host_event(dhd_pub_t
*dhd_pub
, int *ifidx
, void *pktdata
, uint pktlen
,
3103 wl_event_msg_t
*event
, void **data_ptr
, void *raw_event
)
3105 bcm_event_t
*pvt_data
= (bcm_event_t
*)pktdata
;
3106 bcm_event_msg_u_t evu
;
3108 uint32 type
, status
, datalen
, reason
;
3114 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
3115 if (ret
!= BCME_OK
) {
3119 usr_subtype
= ntoh16_ua((void *)&pvt_data
->bcm_hdr
.usr_subtype
);
3120 switch (usr_subtype
) {
3121 case BCMILCP_BCM_SUBTYPE_EVENT
:
3122 memcpy(event
, &evu
.event
, sizeof(wl_event_msg_t
));
3123 *data_ptr
= &pvt_data
[1];
3125 case BCMILCP_BCM_SUBTYPE_DNGLEVENT
:
3126 #ifdef DNGL_EVENT_SUPPORT
3127 /* If it is a DNGL event process it first */
3128 if (dngl_host_event(dhd_pub
, pktdata
, &evu
.dngl_event
, pktlen
) == BCME_OK
) {
3130 * Return error purposely to prevent DNGL event being processed
3135 #endif /* DNGL_EVENT_SUPPORT */
3136 return BCME_NOTFOUND
;
3138 return BCME_NOTFOUND
;
3141 /* start wl_event_msg process */
3142 event_data
= *data_ptr
;
3143 type
= ntoh32_ua((void *)&event
->event_type
);
3144 flags
= ntoh16_ua((void *)&event
->flags
);
3145 status
= ntoh32_ua((void *)&event
->status
);
3146 reason
= ntoh32_ua((void *)&event
->reason
);
3147 datalen
= ntoh32_ua((void *)&event
->datalen
);
3148 evlen
= datalen
+ sizeof(bcm_event_t
);
3151 #ifdef PROP_TXSTATUS
3152 case WLC_E_FIFO_CREDIT_MAP
:
3153 dhd_wlfc_enable(dhd_pub
);
3154 dhd_wlfc_FIFOcreditmap_event(dhd_pub
, event_data
);
3155 WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
3156 "(%d,%d,%d,%d),(%d),(%d)\n", event_data
[0], event_data
[1],
3158 event_data
[3], event_data
[4], event_data
[5]));
3161 case WLC_E_BCMC_CREDIT_SUPPORT
:
3162 dhd_wlfc_BCMCCredit_support_event(dhd_pub
);
3165 case WLC_E_ALLOW_CREDIT_BORROW
:
3166 dhd_wlfc_disable_credit_borrow_event(dhd_pub
, event_data
);
3168 #endif /* LIMIT_BORROW */
3169 #endif /* PROP_TXSTATUS */
3174 wl_ulp_event_t
*ulp_evt
= (wl_ulp_event_t
*)event_data
;
3176 /* Flush and disable console messages */
3177 if (ulp_evt
->ulp_dongle_action
== WL_ULP_DISABLE_CONSOLE
) {
3178 #ifdef DHD_ULP_NOT_USED
3179 dhd_bus_ulp_disable_console(dhd_pub
);
3180 #endif /* DHD_ULP_NOT_USED */
3182 if (ulp_evt
->ulp_dongle_action
== WL_ULP_UCODE_DOWNLOAD
) {
3183 dhd_bus_ucode_download(dhd_pub
->bus
);
3186 #endif /* DHD_ULP */
3188 case WLC_E_TDLS_PEER_EVENT
:
3189 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
3191 dhd_tdls_event_handler(dhd_pub
, event
);
3198 struct wl_event_data_if
*ifevent
= (struct wl_event_data_if
*)event_data
;
3200 /* Ignore the event if NOIF is set */
3201 if (ifevent
->reserved
& WLC_E_IF_FLAGS_BSSCFG_NOIF
) {
3202 DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
3203 return (BCME_UNSUPPORTED
);
3205 #ifdef PCIE_FULL_DONGLE
3206 dhd_update_interface_flow_info(dhd_pub
, ifevent
->ifidx
,
3207 ifevent
->opcode
, ifevent
->role
);
3209 #ifdef PROP_TXSTATUS
3211 uint8
* ea
= pvt_data
->eth
.ether_dhost
;
3212 WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG
"]\n"
3214 ((ifevent
->opcode
== WLC_E_IF_ADD
) ? "ADD":"DEL"),
3215 ((ifevent
->role
== 0) ? "STA":"AP "),
3219 if (ifevent
->opcode
== WLC_E_IF_CHANGE
)
3220 dhd_wlfc_interface_event(dhd_pub
,
3221 eWLFC_MAC_ENTRY_ACTION_UPDATE
,
3222 ifevent
->ifidx
, ifevent
->role
, ea
);
3224 dhd_wlfc_interface_event(dhd_pub
,
3225 ((ifevent
->opcode
== WLC_E_IF_ADD
) ?
3226 eWLFC_MAC_ENTRY_ACTION_ADD
: eWLFC_MAC_ENTRY_ACTION_DEL
),
3227 ifevent
->ifidx
, ifevent
->role
, ea
);
3229 /* dhd already has created an interface by default, for 0 */
3230 if (ifevent
->ifidx
== 0)
3233 #endif /* PROP_TXSTATUS */
3235 if (ifevent
->ifidx
> 0 && ifevent
->ifidx
< DHD_MAX_IFS
) {
3236 if (ifevent
->opcode
== WLC_E_IF_ADD
) {
3237 if (dhd_event_ifadd(dhd_pub
->info
, ifevent
, event
->ifname
,
3238 event
->addr
.octet
)) {
3240 DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
3241 __FUNCTION__
, ifevent
->ifidx
, event
->ifname
));
3242 return (BCME_ERROR
);
3244 } else if (ifevent
->opcode
== WLC_E_IF_DEL
) {
3245 #ifdef PCIE_FULL_DONGLE
3246 /* Delete flowrings unconditionally for i/f delete */
3247 dhd_flow_rings_delete(dhd_pub
, (uint8
)dhd_ifname2idx(dhd_pub
->info
,
3249 #endif /* PCIE_FULL_DONGLE */
3250 dhd_event_ifdel(dhd_pub
->info
, ifevent
, event
->ifname
,
3252 } else if (ifevent
->opcode
== WLC_E_IF_CHANGE
) {
3254 dhd_event_ifchange(dhd_pub
->info
, ifevent
, event
->ifname
,
3256 #endif /* WL_CFG80211 */
3259 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
3260 DHD_INFO(("%s: Invalid ifidx %d for %s\n",
3261 __FUNCTION__
, ifevent
->ifidx
, event
->ifname
));
3262 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
3264 /* send up the if event: btamp user needs it */
3265 *ifidx
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
3266 /* push up to external supp/auth */
3267 dhd_event(dhd_pub
->info
, (char *)pvt_data
, evlen
, *ifidx
);
3271 case WLC_E_NDIS_LINK
:
3273 case WLC_E_PFN_NET_FOUND
:
3274 case WLC_E_PFN_SCAN_ALLGONE
: /* share with WLC_E_PFN_BSSID_NET_LOST */
3275 case WLC_E_PFN_NET_LOST
:
3277 #if defined(PNO_SUPPORT)
3278 case WLC_E_PFN_BSSID_NET_FOUND
:
3279 case WLC_E_PFN_BEST_BATCHING
:
3280 dhd_pno_event_handler(dhd_pub
, event
, (void *)event_data
);
3283 #if defined(RTT_SUPPORT)
3285 dhd_rtt_event_handler(dhd_pub
, event
, (void *)event_data
);
3287 #endif /* RTT_SUPPORT */
3288 /* These are what external supplicant/authenticator wants */
3289 case WLC_E_ASSOC_IND
:
3290 case WLC_E_AUTH_IND
:
3291 case WLC_E_REASSOC_IND
:
3292 dhd_findadd_sta(dhd_pub
,
3293 dhd_ifname2idx(dhd_pub
->info
, event
->ifname
),
3294 &event
->addr
.octet
);
3296 #if defined(DHD_FW_COREDUMP)
3297 case WLC_E_PSM_WATCHDOG
:
3298 DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__
));
3299 if (dhd_socram_dump(dhd_pub
->bus
) != BCME_OK
) {
3300 DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__
));
3304 case WLC_E_NATOE_NFCT
:
3306 DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__
));
3307 dhd_natoe_ct_event(dhd_pub
, event_data
);
3308 #endif /* WL_NATOE */
3311 case WLC_E_SLOTTED_BSS_PEER_OP
:
3312 DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
3313 "" MACDBG
", status = %d\n",
3314 __FUNCTION__
, MAC2STRDBG(event
->addr
.octet
), status
));
3315 if (status
== WLC_E_STATUS_SLOTTED_PEER_ADD
) {
3316 dhd_findadd_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
,
3317 event
->ifname
), &event
->addr
.octet
);
3318 } else if (status
== WLC_E_STATUS_SLOTTED_PEER_DEL
) {
3319 uint8 ifindex
= (uint8
)dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
3320 BCM_REFERENCE(ifindex
);
3321 dhd_del_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
,
3322 event
->ifname
), &event
->addr
.octet
);
3323 #ifdef PCIE_FULL_DONGLE
3324 dhd_flow_rings_delete_for_peer(dhd_pub
, ifindex
,
3325 (char *)&event
->addr
.octet
[0]);
3328 DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
3329 __FUNCTION__
, status
));
3334 #ifdef PCIE_FULL_DONGLE
3335 if (dhd_update_interface_link_status(dhd_pub
, (uint8
)dhd_ifname2idx(dhd_pub
->info
,
3336 event
->ifname
), (uint8
)flags
) != BCME_OK
) {
3337 DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
3342 DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
3344 /* Delete all sta and flowrings */
3345 dhd_del_all_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
, event
->ifname
));
3346 dhd_flow_rings_delete(dhd_pub
, (uint8
)dhd_ifname2idx(dhd_pub
->info
,
3350 #endif /* PCIE_FULL_DONGLE */
3352 case WLC_E_DEAUTH_IND
:
3353 case WLC_E_DISASSOC
:
3354 case WLC_E_DISASSOC_IND
:
3355 #ifdef PCIE_FULL_DONGLE
3356 if (type
!= WLC_E_LINK
) {
3357 uint8 ifindex
= (uint8
)dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
3358 uint8 role
= dhd_flow_rings_ifindex2role(dhd_pub
, ifindex
);
3359 uint8 del_sta
= TRUE
;
3361 if (role
== WLC_E_IF_ROLE_STA
&&
3362 !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub
, ifindex
)) &&
3363 !wl_cfg80211_is_event_from_connected_bssid(
3364 dhd_idx2net(dhd_pub
, ifindex
), event
, *ifidx
)) {
3367 #endif /* WL_CFG80211 */
3368 DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
3369 __FUNCTION__
, type
, flags
, status
, role
, del_sta
));
3372 DHD_EVENT(("%s: Deleting STA " MACDBG
"\n",
3373 __FUNCTION__
, MAC2STRDBG(event
->addr
.octet
)));
3375 dhd_del_sta(dhd_pub
, dhd_ifname2idx(dhd_pub
->info
,
3376 event
->ifname
), &event
->addr
.octet
);
3377 /* Delete all flowrings for STA and P2P Client */
3378 if (role
== WLC_E_IF_ROLE_STA
|| role
== WLC_E_IF_ROLE_P2P_CLIENT
) {
3379 dhd_flow_rings_delete(dhd_pub
, ifindex
);
3381 dhd_flow_rings_delete_for_peer(dhd_pub
, ifindex
,
3382 (char *)&event
->addr
.octet
[0]);
3386 #endif /* PCIE_FULL_DONGLE */
3390 *ifidx
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
3391 /* push up to external supp/auth */
3392 dhd_event(dhd_pub
->info
, (char *)pvt_data
, evlen
, *ifidx
);
3393 DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
3394 __FUNCTION__
, type
, flags
, status
));
3395 BCM_REFERENCE(flags
);
3396 BCM_REFERENCE(status
);
3397 BCM_REFERENCE(reason
);
3402 /* For routers, EAPD will be working on these events.
3403 * Overwrite interface name to that event is pushed
3404 * to host with its registered interface name
3406 memcpy(pvt_data
->event
.ifname
, dhd_ifname(dhd_pub
, *ifidx
), IFNAMSIZ
);
3410 if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
3411 wl_show_host_event(dhd_pub
, event
,
3412 (void *)event_data
, raw_event
, dhd_pub
->enable_log
);
3414 #endif /* SHOW_EVENTS */
3417 } /* wl_process_host_event */
3420 wl_host_event(dhd_pub_t
*dhd_pub
, int *ifidx
, void *pktdata
, uint pktlen
,
3421 wl_event_msg_t
*event
, void **data_ptr
, void *raw_event
)
3423 return wl_process_host_event(dhd_pub
, ifidx
, pktdata
, pktlen
, event
, data_ptr
,
3428 dhd_print_buf(void *pbuf
, int len
, int bytes_per_line
)
3432 unsigned char *buf
= pbuf
;
3434 if (bytes_per_line
== 0) {
3435 bytes_per_line
= len
;
3438 for (i
= 0; i
< len
; i
++) {
3439 printf("%2.2x", *buf
++);
3441 if (j
== bytes_per_line
) {
3449 #endif /* DHD_DEBUG */
3452 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
3455 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
3456 /* Convert user's input in hex pattern to byte-size mask */
3458 wl_pattern_atoh(char *src
, char *dst
)
3461 if (strncmp(src
, "0x", 2) != 0 &&
3462 strncmp(src
, "0X", 2) != 0) {
3463 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3466 src
= src
+ 2; /* Skip past 0x */
3467 if (strlen(src
) % 2 != 0) {
3468 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3471 for (i
= 0; *src
!= '\0'; i
++) {
3473 bcm_strncpy_s(num
, sizeof(num
), src
, 2);
3475 dst
[i
] = (uint8
)strtoul(num
, NULL
, 16);
3482 pattern_atoh_len(char *src
, char *dst
, int len
)
3485 if (strncmp(src
, "0x", HD_PREFIX_SIZE
) != 0 &&
3486 strncmp(src
, "0X", HD_PREFIX_SIZE
) != 0) {
3487 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3490 src
= src
+ HD_PREFIX_SIZE
; /* Skip past 0x */
3491 if (strlen(src
) % HD_BYTE_SIZE
!= 0) {
3492 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3495 for (i
= 0; *src
!= '\0'; i
++) {
3496 char num
[HD_BYTE_SIZE
+ 1];
3499 DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i
, len
));
3502 bcm_strncpy_s(num
, sizeof(num
), src
, HD_BYTE_SIZE
);
3503 num
[HD_BYTE_SIZE
] = '\0';
3504 dst
[i
] = (uint8
)strtoul(num
, NULL
, 16);
3505 src
+= HD_BYTE_SIZE
;
3509 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
3511 #ifdef PKT_FILTER_SUPPORT
3513 dhd_pktfilter_offload_enable(dhd_pub_t
* dhd
, char *arg
, int enable
, int master_mode
)
3520 char *arg_save
= 0, *arg_org
= 0;
3523 wl_pkt_filter_enable_t enable_parm
;
3524 wl_pkt_filter_enable_t
* pkt_filterp
;
3529 if (!(arg_save
= MALLOC(dhd
->osh
, strlen(arg
) + 1))) {
3530 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__
));
3534 memcpy(arg_save
, arg
, strlen(arg
) + 1);
3536 argv
[i
] = bcmstrtok(&arg_save
, " ", 0);
3539 if (argv
[i
] == NULL
) {
3540 DHD_ERROR(("No args provided\n"));
3544 str
= "pkt_filter_enable";
3545 str_len
= strlen(str
);
3546 bcm_strncpy_s(buf
, sizeof(buf
) - 1, str
, sizeof(buf
) - 1);
3547 buf
[ sizeof(buf
) - 1 ] = '\0';
3548 buf_len
= str_len
+ 1;
3550 pkt_filterp
= (wl_pkt_filter_enable_t
*)(buf
+ str_len
+ 1);
3552 /* Parse packet filter id. */
3553 enable_parm
.id
= htod32(strtoul(argv
[i
], NULL
, 0));
3555 /* Parse enable/disable value. */
3556 enable_parm
.enable
= htod32(enable
);
3558 buf_len
+= sizeof(enable_parm
);
3559 memcpy((char *)pkt_filterp
,
3561 sizeof(enable_parm
));
3563 /* Enable/disable the specified filter. */
3564 rc
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
3565 rc
= rc
>= 0 ? 0 : rc
;
3567 DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3568 __FUNCTION__
, arg
, rc
));
3569 dhd_set_packet_filter(dhd
);
3570 rc
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
3571 rc
= rc
>= 0 ? 0 : rc
;
3573 DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
3574 __FUNCTION__
, arg
, rc
));
3576 DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
3577 __FUNCTION__
, arg
));
3581 DHD_TRACE(("%s: successfully added pktfilter %s\n",
3582 __FUNCTION__
, arg
));
3584 /* Contorl the master mode */
3585 rc
= dhd_wl_ioctl_set_intiovar(dhd
, "pkt_filter_mode",
3586 master_mode
, WLC_SET_VAR
, TRUE
, 0);
3587 rc
= rc
>= 0 ? 0 : rc
;
3589 DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3590 __FUNCTION__
, arg
, rc
));
3594 MFREE(dhd
->osh
, arg_org
, strlen(arg
) + 1);
3597 /* Packet filter section: extended filters have named offsets, add table here */
3603 static wl_pfbase_t basenames
[] = { WL_PKT_FILTER_BASE_NAMES
};
3606 wl_pkt_filter_base_parse(char *name
)
3609 char *bname
, *uname
;
3611 for (i
= 0; i
< ARRAYSIZE(basenames
); i
++) {
3612 bname
= basenames
[i
].name
;
3613 for (uname
= name
; *uname
; bname
++, uname
++) {
3614 if (*bname
!= bcm_toupper(*uname
)) {
3618 if (!*uname
&& !*bname
) {
3623 if (i
< ARRAYSIZE(basenames
)) {
3624 return basenames
[i
].base
;
3631 dhd_pktfilter_offload_set(dhd_pub_t
* dhd
, char *arg
)
3634 wl_pkt_filter_t pkt_filter
;
3635 wl_pkt_filter_t
*pkt_filterp
;
3640 uint32 pattern_size
;
3641 char *argv
[MAXPKT_ARG
] = {0}, * buf
= 0;
3643 char *arg_save
= 0, *arg_org
= 0;
3648 if (!(arg_save
= MALLOC(dhd
->osh
, strlen(arg
) + 1))) {
3649 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__
));
3655 if (!(buf
= MALLOC(dhd
->osh
, MAX_PKTFLT_BUF_SIZE
))) {
3656 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__
));
3660 memset(buf
, 0, MAX_PKTFLT_BUF_SIZE
);
3661 memcpy(arg_save
, arg
, strlen(arg
) + 1);
3663 if (strlen(arg
) > MAX_PKTFLT_BUF_SIZE
) {
3664 DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg
), (int)sizeof(buf
)));
3668 argv
[i
] = bcmstrtok(&arg_save
, " ", 0);
3670 if (i
>= MAXPKT_ARG
) {
3671 DHD_ERROR(("Invalid args provided\n"));
3674 argv
[i
] = bcmstrtok(&arg_save
, " ", 0);
3678 if (argv
[i
] == NULL
) {
3679 DHD_ERROR(("No args provided\n"));
3683 str
= "pkt_filter_add";
3684 str_len
= strlen(str
);
3685 bcm_strncpy_s(buf
, MAX_PKTFLT_BUF_SIZE
, str
, str_len
);
3686 buf
[ str_len
] = '\0';
3687 buf_len
= str_len
+ 1;
3689 pkt_filterp
= (wl_pkt_filter_t
*) (buf
+ str_len
+ 1);
3691 /* Parse packet filter id. */
3692 pkt_filter
.id
= htod32(strtoul(argv
[i
], NULL
, 0));
3694 if (argv
[++i
] == NULL
) {
3695 DHD_ERROR(("Polarity not provided\n"));
3699 /* Parse filter polarity. */
3700 pkt_filter
.negate_match
= htod32(strtoul(argv
[i
], NULL
, 0));
3702 if (argv
[++i
] == NULL
) {
3703 DHD_ERROR(("Filter type not provided\n"));
3707 /* Parse filter type. */
3708 pkt_filter
.type
= htod32(strtoul(argv
[i
], NULL
, 0));
3710 if ((pkt_filter
.type
== 0) || (pkt_filter
.type
== 1)) {
3711 if (argv
[++i
] == NULL
) {
3712 DHD_ERROR(("Offset not provided\n"));
3716 /* Parse pattern filter offset. */
3717 pkt_filter
.u
.pattern
.offset
= htod32(strtoul(argv
[i
], NULL
, 0));
3719 if (argv
[++i
] == NULL
) {
3720 DHD_ERROR(("Bitmask not provided\n"));
3724 /* Parse pattern filter mask. */
3725 rc
= wl_pattern_atoh(argv
[i
],
3726 (char *) pkt_filterp
->u
.pattern
.mask_and_pattern
);
3729 DHD_ERROR(("Rejecting: %s\n", argv
[i
]));
3732 mask_size
= htod32(rc
);
3733 if (argv
[++i
] == NULL
) {
3734 DHD_ERROR(("Pattern not provided\n"));
3738 /* Parse pattern filter pattern. */
3739 rc
= wl_pattern_atoh(argv
[i
],
3740 (char *) &pkt_filterp
->u
.pattern
.mask_and_pattern
[mask_size
]);
3743 DHD_ERROR(("Rejecting: %s\n", argv
[i
]));
3746 pattern_size
= htod32(rc
);
3747 if (mask_size
!= pattern_size
) {
3748 DHD_ERROR(("Mask and pattern not the same size\n"));
3752 pkt_filter
.u
.pattern
.size_bytes
= mask_size
;
3753 buf_len
+= WL_PKT_FILTER_FIXED_LEN
;
3754 buf_len
+= (WL_PKT_FILTER_PATTERN_FIXED_LEN
+ 2 * mask_size
);
3756 /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
3757 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
3758 * guarantee that the buffer is properly aligned.
3760 memcpy((char *)pkt_filterp
,
3762 WL_PKT_FILTER_FIXED_LEN
+ WL_PKT_FILTER_PATTERN_FIXED_LEN
);
3763 } else if ((pkt_filter
.type
== 2) || (pkt_filter
.type
== 6)) {
3765 char *endptr
= NULL
;
3766 wl_pkt_filter_pattern_listel_t
*pf_el
=
3767 (wl_pkt_filter_pattern_listel_t
*)&pkt_filterp
->u
.patlist
.patterns
[0];
3769 while (argv
[++i
] != NULL
) {
3770 /* Check valid buffer size. */
3771 if ((buf_len
+ MAX_PKTFLT_FIXED_BUF_SIZE
) > MAX_PKTFLT_BUF_SIZE
) {
3772 DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
3776 /* Parse pattern filter base and offset. */
3777 if (bcm_isdigit(*argv
[i
])) {
3779 rc
= strtoul(argv
[i
], &endptr
, 0);
3781 endptr
= strchr(argv
[i
], ':');
3784 rc
= wl_pkt_filter_base_parse(argv
[i
]);
3786 printf("Invalid base %s\n", argv
[i
]);
3791 printf("Invalid [base:]offset format: %s\n", argv
[i
]);
3796 if (*endptr
== ':') {
3797 pf_el
->base_offs
= htod16(rc
);
3798 rc
= strtoul(endptr
+ 1, &endptr
, 0);
3800 /* Must have had a numeric offset only */
3801 pf_el
->base_offs
= htod16(0);
3805 printf("Invalid [base:]offset format: %s\n", argv
[i
]);
3808 if (rc
> 0x0000FFFF) {
3809 printf("Offset too large\n");
3812 pf_el
->rel_offs
= htod16(rc
);
3814 /* Clear match_flag (may be set in parsing which follows) */
3815 pf_el
->match_flags
= htod16(0);
3817 /* Parse pattern filter mask and pattern directly into ioctl buffer */
3818 if (argv
[++i
] == NULL
) {
3819 printf("Bitmask not provided\n");
3822 rc
= wl_pattern_atoh(argv
[i
], (char*)pf_el
->mask_and_data
);
3823 if ((rc
== -1) || (rc
> MAX_PKTFLT_FIXED_PATTERN_SIZE
)) {
3824 printf("Rejecting: %s\n", argv
[i
]);
3827 mask_size
= htod16(rc
);
3829 if (argv
[++i
] == NULL
) {
3830 printf("Pattern not provided\n");
3834 if (*argv
[i
] == '!') {
3835 pf_el
->match_flags
=
3836 htod16(WL_PKT_FILTER_MFLAG_NEG
);
3839 if (argv
[i
] == '\0') {
3840 printf("Pattern not provided\n");
3843 rc
= wl_pattern_atoh(argv
[i
], (char*)&pf_el
->mask_and_data
[rc
]);
3844 if ((rc
== -1) || (rc
> MAX_PKTFLT_FIXED_PATTERN_SIZE
)) {
3845 printf("Rejecting: %s\n", argv
[i
]);
3848 pattern_size
= htod16(rc
);
3850 if (mask_size
!= pattern_size
) {
3851 printf("Mask and pattern not the same size\n");
3855 pf_el
->size_bytes
= mask_size
;
3857 /* Account for the size of this pattern element */
3858 buf_len
+= WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN
+ 2 * rc
;
3860 /* Move to next element location in ioctl buffer */
3861 pf_el
= (wl_pkt_filter_pattern_listel_t
*)
3862 ((uint8
*)pf_el
+ WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN
+ 2 * rc
);
3864 /* Count list element */
3868 /* Account for initial fixed size, and copy initial fixed fields */
3869 buf_len
+= WL_PKT_FILTER_FIXED_LEN
+ WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN
;
3871 if (buf_len
> MAX_PKTFLT_BUF_SIZE
) {
3872 DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
3875 /* Update list count and total size */
3876 pkt_filter
.u
.patlist
.list_cnt
= list_cnt
;
3877 pkt_filter
.u
.patlist
.PAD1
[0] = 0;
3878 pkt_filter
.u
.patlist
.totsize
= buf
+ buf_len
- (char*)pkt_filterp
;
3879 pkt_filter
.u
.patlist
.totsize
-= WL_PKT_FILTER_FIXED_LEN
;
3881 memcpy((char *)pkt_filterp
, &pkt_filter
,
3882 WL_PKT_FILTER_FIXED_LEN
+ WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN
);
3884 DHD_ERROR(("Invalid filter type %d\n", pkt_filter
.type
));
3888 rc
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
3889 rc
= rc
>= 0 ? 0 : rc
;
3892 DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3893 __FUNCTION__
, arg
, rc
));
3895 DHD_TRACE(("%s: successfully added pktfilter %s\n",
3896 __FUNCTION__
, arg
));
3900 MFREE(dhd
->osh
, arg_org
, strlen(arg
) + 1);
3903 MFREE(dhd
->osh
, buf
, MAX_PKTFLT_BUF_SIZE
);
3907 dhd_pktfilter_offload_delete(dhd_pub_t
*dhd
, int id
)
3911 ret
= dhd_wl_ioctl_set_intiovar(dhd
, "pkt_filter_delete",
3912 id
, WLC_SET_VAR
, TRUE
, 0);
3914 DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
3915 __FUNCTION__
, id
, ret
));
3918 #endif /* PKT_FILTER_SUPPORT */
3920 /* ========================== */
3921 /* ==== ARP OFFLOAD SUPPORT = */
3922 /* ========================== */
3923 #ifdef ARP_OFFLOAD_SUPPORT
3925 dhd_arp_offload_set(dhd_pub_t
* dhd
, int arp_mode
)
3929 retcode
= dhd_wl_ioctl_set_intiovar(dhd
, "arp_ol",
3930 arp_mode
, WLC_SET_VAR
, TRUE
, 0);
3932 retcode
= retcode
>= 0 ? 0 : retcode
;
3934 DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
3935 __FUNCTION__
, arp_mode
, retcode
));
3937 DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
3938 __FUNCTION__
, arp_mode
));
3942 dhd_arp_offload_enable(dhd_pub_t
* dhd
, int arp_enable
)
3946 retcode
= dhd_wl_ioctl_set_intiovar(dhd
, "arpoe",
3947 arp_enable
, WLC_SET_VAR
, TRUE
, 0);
3949 retcode
= retcode
>= 0 ? 0 : retcode
;
3951 DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
3952 __FUNCTION__
, arp_enable
, retcode
));
3954 DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
3955 __FUNCTION__
, arp_enable
));
3958 retcode
= dhd_wl_ioctl_get_intiovar(dhd
, "arp_version",
3959 &version
, WLC_GET_VAR
, FALSE
, 0);
3961 DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
3962 __FUNCTION__
, retcode
));
3963 dhd
->arp_version
= 1;
3966 DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__
, version
));
3967 dhd
->arp_version
= version
;
3973 dhd_aoe_arp_clr(dhd_pub_t
*dhd
, int idx
)
3977 if (dhd
== NULL
) return;
3978 if (dhd
->arp_version
== 1)
3981 ret
= dhd_iovar(dhd
, idx
, "arp_table_clear", NULL
, 0, NULL
, 0, TRUE
);
3983 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
3987 dhd_aoe_hostip_clr(dhd_pub_t
*dhd
, int idx
)
3991 if (dhd
== NULL
) return;
3992 if (dhd
->arp_version
== 1)
3995 ret
= dhd_iovar(dhd
, idx
, "arp_hostip_clear", NULL
, 0, NULL
, 0, TRUE
);
3997 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
4001 dhd_arp_offload_add_ip(dhd_pub_t
*dhd
, uint32 ipaddr
, int idx
)
4005 if (dhd
== NULL
) return;
4006 if (dhd
->arp_version
== 1)
4009 ret
= dhd_iovar(dhd
, idx
, "arp_hostip", (char *)&ipaddr
, sizeof(ipaddr
),
4012 DHD_TRACE(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__
, ret
));
4014 DHD_TRACE(("%s: sARP H ipaddr entry added \n",
4019 dhd_arp_get_arp_hostip_table(dhd_pub_t
*dhd
, void *buf
, int buflen
, int idx
)
4022 uint32
*ptr32
= buf
;
4023 bool clr_bottom
= FALSE
;
4027 if (dhd
== NULL
) return -1;
4028 if (dhd
->arp_version
== 1)
4031 ret
= dhd_iovar(dhd
, idx
, "arp_hostip", NULL
, 0, (char *)buf
, buflen
,
4034 DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
4035 __FUNCTION__
, ret
));
4040 /* clean up the buf, ascii reminder */
4041 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
4053 #endif /* ARP_OFFLOAD_SUPPORT */
4056 * Neighbor Discovery Offload: enable NDO feature
4057 * Called by ipv6 event handler when interface comes up/goes down
4060 dhd_ndo_enable(dhd_pub_t
* dhd
, int ndo_enable
)
4067 #if defined(WL_CFG80211) && defined(WL_NAN)
4068 if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd
))) {
4069 /* If nan dp is active, skip NDO */
4070 DHD_INFO(("Active NAN DP, skip NDO\n"));
4073 #endif /* WL_CFG80211 && WL_NAN */
4075 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
4076 /* NDO disable on STA+SOFTAP mode */
4079 #endif /* WL_CFG80211 */
4080 retcode
= dhd_wl_ioctl_set_intiovar(dhd
, "ndoe",
4081 ndo_enable
, WLC_SET_VAR
, TRUE
, 0);
4083 DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
4084 __FUNCTION__
, ndo_enable
, retcode
));
4086 DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
4087 __FUNCTION__
, ndo_enable
));
4093 * Neighbor Discover Offload: enable NDO feature
4094 * Called by ipv6 event handler when interface comes up
4097 dhd_ndo_add_ip(dhd_pub_t
*dhd
, char* ipv6addr
, int idx
)
4100 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4106 iov_len
= bcm_mkiovar("nd_hostip", (char *)ipv6addr
,
4107 IPV6_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
4109 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4110 __FUNCTION__
, sizeof(iovbuf
)));
4113 retcode
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
4116 DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
4117 __FUNCTION__
, retcode
));
4119 DHD_TRACE(("%s: ndo ipaddr entry added \n",
4125 * Neighbor Discover Offload: enable NDO feature
4126 * Called by ipv6 event handler when interface goes down
4129 dhd_ndo_remove_ip(dhd_pub_t
*dhd
, int idx
)
4132 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4138 iov_len
= bcm_mkiovar("nd_hostip_clear", NULL
,
4139 0, iovbuf
, sizeof(iovbuf
));
4141 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4142 __FUNCTION__
, sizeof(iovbuf
)));
4145 retcode
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
4148 DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
4149 __FUNCTION__
, retcode
));
4151 DHD_TRACE(("%s: ndo ipaddr entry removed \n",
4156 /* Enhanced ND offload */
4158 dhd_ndo_get_version(dhd_pub_t
*dhdp
)
4160 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4161 wl_nd_hostip_t ndo_get_ver
;
4170 memset(&iovbuf
, 0, sizeof(iovbuf
));
4171 ndo_get_ver
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
4172 ndo_get_ver
.op_type
= htod16(WL_ND_HOSTIP_OP_VER
);
4173 ndo_get_ver
.length
= htod32(WL_ND_HOSTIP_FIXED_LEN
+ sizeof(uint16
));
4174 ndo_get_ver
.u
.version
= 0;
4175 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver
,
4176 WL_ND_HOSTIP_FIXED_LEN
+ sizeof(uint16
), iovbuf
, sizeof(iovbuf
));
4179 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4180 __FUNCTION__
, sizeof(iovbuf
)));
4184 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
, iov_len
, FALSE
, 0);
4187 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
4188 /* ver iovar not supported. NDO version is 0 */
4191 wl_nd_hostip_t
*ndo_ver_ret
= (wl_nd_hostip_t
*)iovbuf
;
4193 if ((dtoh16(ndo_ver_ret
->version
) == WL_ND_HOSTIP_IOV_VER
) &&
4194 (dtoh16(ndo_ver_ret
->op_type
) == WL_ND_HOSTIP_OP_VER
) &&
4195 (dtoh32(ndo_ver_ret
->length
) == WL_ND_HOSTIP_FIXED_LEN
4196 + sizeof(uint16
))) {
4197 /* nd_hostip iovar version */
4198 ver
= dtoh16(ndo_ver_ret
->u
.version
);
4201 DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__
, ver
));
4208 dhd_ndo_add_ip_with_type(dhd_pub_t
*dhdp
, char *ipv6addr
, uint8 type
, int idx
)
4210 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4211 wl_nd_hostip_t ndo_add_addr
;
4215 if (dhdp
== NULL
|| ipv6addr
== 0) {
4219 /* wl_nd_hostip_t fixed param */
4220 ndo_add_addr
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
4221 ndo_add_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_ADD
);
4222 ndo_add_addr
.length
= htod32(WL_ND_HOSTIP_WITH_ADDR_LEN
);
4223 /* wl_nd_host_ip_addr_t param for add */
4224 memcpy(&ndo_add_addr
.u
.host_ip
.ip_addr
, ipv6addr
, IPV6_ADDR_LEN
);
4225 ndo_add_addr
.u
.host_ip
.type
= type
;
4227 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr
,
4228 WL_ND_HOSTIP_WITH_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
4230 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4231 __FUNCTION__
, sizeof(iovbuf
)));
4235 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
4237 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
4238 #ifdef NDO_CONFIG_SUPPORT
4239 if (retcode
== BCME_NORESOURCE
) {
4240 /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
4241 DHD_INFO(("%s: Host IP count exceed device capacity,"
4242 "ND offload deactivated\n", __FUNCTION__
));
4243 dhdp
->ndo_host_ip_overflow
= TRUE
;
4244 dhd_ndo_enable(dhdp
, FALSE
);
4246 #endif /* NDO_CONFIG_SUPPORT */
4248 DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__
, retcode
));
4255 dhd_ndo_remove_ip_by_addr(dhd_pub_t
*dhdp
, char *ipv6addr
, int idx
)
4257 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4258 wl_nd_hostip_t ndo_del_addr
;
4262 if (dhdp
== NULL
|| ipv6addr
== 0) {
4266 /* wl_nd_hostip_t fixed param */
4267 ndo_del_addr
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
4268 ndo_del_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_DEL
);
4269 ndo_del_addr
.length
= htod32(WL_ND_HOSTIP_WITH_ADDR_LEN
);
4270 /* wl_nd_host_ip_addr_t param for del */
4271 memcpy(&ndo_del_addr
.u
.host_ip
.ip_addr
, ipv6addr
, IPV6_ADDR_LEN
);
4272 ndo_del_addr
.u
.host_ip
.type
= 0; /* don't care */
4274 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr
,
4275 WL_ND_HOSTIP_WITH_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
4278 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4279 __FUNCTION__
, sizeof(iovbuf
)));
4283 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
4285 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
4287 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__
, retcode
));
4294 dhd_ndo_remove_ip_by_type(dhd_pub_t
*dhdp
, uint8 type
, int idx
)
4296 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4297 wl_nd_hostip_t ndo_del_addr
;
4305 /* wl_nd_hostip_t fixed param */
4306 ndo_del_addr
.version
= htod16(WL_ND_HOSTIP_IOV_VER
);
4307 if (type
== WL_ND_IPV6_ADDR_TYPE_UNICAST
) {
4308 ndo_del_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_DEL_UC
);
4309 } else if (type
== WL_ND_IPV6_ADDR_TYPE_ANYCAST
) {
4310 ndo_del_addr
.op_type
= htod16(WL_ND_HOSTIP_OP_DEL_AC
);
4314 ndo_del_addr
.length
= htod32(WL_ND_HOSTIP_FIXED_LEN
);
4316 iov_len
= bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr
, WL_ND_HOSTIP_FIXED_LEN
,
4317 iovbuf
, sizeof(iovbuf
));
4320 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4321 __FUNCTION__
, sizeof(iovbuf
)));
4325 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, idx
);
4327 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__
, retcode
));
4329 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__
, retcode
));
4336 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t
*dhdp
, int enable
)
4338 char iovbuf
[DHD_IOVAR_BUF_SIZE
];
4346 iov_len
= bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable
, sizeof(int),
4347 iovbuf
, sizeof(iovbuf
));
4350 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4351 __FUNCTION__
, sizeof(iovbuf
)));
4355 retcode
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, 0);
4357 DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
4358 __FUNCTION__
, enable
, retcode
));
4360 DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
4361 __FUNCTION__
, enable
));
4368 uint iscan_thread_id
= 0;
4369 iscan_buf_t
* iscan_chain
= 0;
4372 dhd_iscan_allocate_buf(dhd_pub_t
*dhd
, iscan_buf_t
**iscanbuf
)
4374 iscan_buf_t
*iscanbuf_alloc
= 0;
4375 iscan_buf_t
*iscanbuf_head
;
4377 DHD_ISCAN(("%s: Entered\n", __FUNCTION__
));
4380 iscanbuf_alloc
= (iscan_buf_t
*)MALLOC(dhd
->osh
, sizeof(iscan_buf_t
));
4381 if (iscanbuf_alloc
== NULL
)
4384 iscanbuf_alloc
->next
= NULL
;
4385 iscanbuf_head
= *iscanbuf
;
4387 DHD_ISCAN(("%s: addr of allocated node = 0x%X"
4388 "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
4389 __FUNCTION__
, iscanbuf_alloc
, iscanbuf_head
, dhd
));
4391 if (iscanbuf_head
== NULL
) {
4392 *iscanbuf
= iscanbuf_alloc
;
4393 DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__
));
4397 while (iscanbuf_head
->next
)
4398 iscanbuf_head
= iscanbuf_head
->next
;
4400 iscanbuf_head
->next
= iscanbuf_alloc
;
4404 return iscanbuf_alloc
;
4408 dhd_iscan_free_buf(void *dhdp
, iscan_buf_t
*iscan_delete
)
4410 iscan_buf_t
*iscanbuf_free
= 0;
4411 iscan_buf_t
*iscanbuf_prv
= 0;
4412 iscan_buf_t
*iscanbuf_cur
;
4413 dhd_pub_t
*dhd
= dhd_bus_pub(dhdp
);
4414 DHD_ISCAN(("%s: Entered\n", __FUNCTION__
));
4418 iscanbuf_cur
= iscan_chain
;
4420 /* If iscan_delete is null then delete the entire
4421 * chain or else delete specific one provided
4423 if (!iscan_delete
) {
4424 while (iscanbuf_cur
) {
4425 iscanbuf_free
= iscanbuf_cur
;
4426 iscanbuf_cur
= iscanbuf_cur
->next
;
4427 iscanbuf_free
->next
= 0;
4428 MFREE(dhd
->osh
, iscanbuf_free
, sizeof(iscan_buf_t
));
4432 while (iscanbuf_cur
) {
4433 if (iscanbuf_cur
== iscan_delete
)
4435 iscanbuf_prv
= iscanbuf_cur
;
4436 iscanbuf_cur
= iscanbuf_cur
->next
;
4439 iscanbuf_prv
->next
= iscan_delete
->next
;
4441 iscan_delete
->next
= 0;
4442 MFREE(dhd
->osh
, iscan_delete
, sizeof(iscan_buf_t
));
4451 dhd_iscan_result_buf(void)
4457 dhd_iscan_issue_request(void * dhdp
, wl_iscan_params_t
*pParams
, uint32 size
)
4460 dhd_pub_t
*dhd
= dhd_bus_pub(dhdp
);
4462 char iovar
[] = "iscan";
4463 uint32 allocSize
= 0;
4468 allocSize
= (size
+ strlen(iovar
) + 1);
4469 if ((allocSize
< size
) || (allocSize
< strlen(iovar
)))
4471 DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
4472 __FUNCTION__
, allocSize
, size
, strlen(iovar
)));
4475 buf
= MALLOC(dhd
->osh
, allocSize
);
4479 DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__
, allocSize
));
4482 ioctl
.cmd
= WLC_SET_VAR
;
4483 len
= bcm_mkiovar(iovar
, (char *)pParams
, size
, buf
, allocSize
);
4485 rc
= BCME_BUFTOOSHORT
;
4488 rc
= dhd_wl_ioctl(dhd
, 0, &ioctl
, buf
, len
);
4493 MFREE(dhd
->osh
, buf
, allocSize
);
4500 dhd_iscan_get_partial_result(void *dhdp
, uint
*scan_count
)
4502 wl_iscan_results_t
*list_buf
;
4503 wl_iscan_results_t list
;
4504 wl_scan_results_t
*results
;
4505 iscan_buf_t
*iscan_cur
;
4507 dhd_pub_t
*dhd
= dhd_bus_pub(dhdp
);
4512 DHD_ISCAN(("%s: Enter\n", __FUNCTION__
));
4514 iscan_cur
= dhd_iscan_allocate_buf(dhd
, &iscan_chain
);
4516 DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__
));
4517 dhd_iscan_free_buf(dhdp
, 0);
4518 dhd_iscan_request(dhdp
, WL_SCAN_ACTION_ABORT
);
4519 dhd_ind_scan_confirm(dhdp
, FALSE
);
4525 memset(iscan_cur
->iscan_buf
, 0, WLC_IW_ISCAN_MAXLEN
);
4526 list_buf
= (wl_iscan_results_t
*)iscan_cur
->iscan_buf
;
4527 results
= &list_buf
->results
;
4528 results
->buflen
= WL_ISCAN_RESULTS_FIXED_SIZE
;
4529 results
->version
= 0;
4532 memset(&list
, 0, sizeof(list
));
4533 list
.results
.buflen
= htod32(WLC_IW_ISCAN_MAXLEN
);
4534 len
= bcm_mkiovar("iscanresults", (char *)&list
, WL_ISCAN_RESULTS_FIXED_SIZE
,
4535 iscan_cur
->iscan_buf
, WLC_IW_ISCAN_MAXLEN
);
4537 dhd_iscan_free_buf(dhdp
, 0);
4538 dhd_iscan_request(dhdp
, WL_SCAN_ACTION_ABORT
);
4539 dhd_ind_scan_confirm(dhdp
, FALSE
);
4540 status
= BCME_BUFTOOSHORT
;
4543 ioctl
.cmd
= WLC_GET_VAR
;
4545 rc
= dhd_wl_ioctl(dhd
, 0, &ioctl
, iscan_cur
->iscan_buf
, WLC_IW_ISCAN_MAXLEN
);
4547 results
->buflen
= dtoh32(results
->buflen
);
4548 results
->version
= dtoh32(results
->version
);
4549 *scan_count
= results
->count
= dtoh32(results
->count
);
4550 status
= dtoh32(list_buf
->status
);
4551 DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__
, results
->count
, status
));
4555 if (!(*scan_count
)) {
4556 /* TODO: race condition when FLUSH already called */
4557 dhd_iscan_free_buf(dhdp
, 0);
4563 #endif /* SIMPLE_ISCAN */
4566 * returns = TRUE if associated, FALSE if not associated
4568 bool dhd_is_associated(dhd_pub_t
*dhd
, uint8 ifidx
, int *retval
)
4570 char bssid
[6], zbuf
[6];
4576 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_BSSID
, (char *)&bssid
,
4577 ETHER_ADDR_LEN
, FALSE
, ifidx
);
4578 DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__
, ret
));
4580 if (ret
== BCME_NOTASSOCIATED
) {
4581 DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__
, ret
));
4590 if ((memcmp(bssid
, zbuf
, ETHER_ADDR_LEN
) == 0)) {
4591 DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__
));
4597 /* Function to estimate possible DTIM_SKIP value */
4598 #if defined(BCMPCIE)
4600 dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
, int *dtim_period
, int *bcn_interval
)
4602 int bcn_li_dtim
= 1; /* deafult no dtim skip setting */
4604 int allowed_skip_dtim_cnt
= 0;
4606 if (dhd
->disable_dtim_in_suspend
) {
4607 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__
));
4612 /* Check if associated */
4613 if (dhd_is_associated(dhd
, 0, NULL
) == FALSE
) {
4614 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__
, ret
));
4618 if (dtim_period
== NULL
|| bcn_interval
== NULL
)
4621 /* read associated AP beacon interval */
4622 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_BCNPRD
,
4623 bcn_interval
, sizeof(*bcn_interval
), FALSE
, 0)) < 0) {
4624 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__
, ret
));
4628 /* read associated AP dtim setup */
4629 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_DTIMPRD
,
4630 dtim_period
, sizeof(*dtim_period
), FALSE
, 0)) < 0) {
4631 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
4635 /* if not assocated just return */
4636 if (*dtim_period
== 0) {
4640 if (dhd
->max_dtim_enable
) {
4642 (int) (MAX_DTIM_ALLOWED_INTERVAL
/ ((*dtim_period
) * (*bcn_interval
)));
4643 if (bcn_li_dtim
== 0) {
4647 /* attemp to use platform defined dtim skip interval */
4648 bcn_li_dtim
= dhd
->suspend_bcn_li_dtim
;
4650 /* check if sta listen interval fits into AP dtim */
4651 if (*dtim_period
> CUSTOM_LISTEN_INTERVAL
) {
4652 /* AP DTIM to big for our Listen Interval : no dtim skiping */
4653 bcn_li_dtim
= NO_DTIM_SKIP
;
4654 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
4655 __FUNCTION__
, *dtim_period
, CUSTOM_LISTEN_INTERVAL
));
4659 if (((*dtim_period
) * (*bcn_interval
) * bcn_li_dtim
) > MAX_DTIM_ALLOWED_INTERVAL
) {
4660 allowed_skip_dtim_cnt
=
4661 MAX_DTIM_ALLOWED_INTERVAL
/ ((*dtim_period
) * (*bcn_interval
));
4663 (allowed_skip_dtim_cnt
!= 0) ? allowed_skip_dtim_cnt
: NO_DTIM_SKIP
;
4666 if ((bcn_li_dtim
* (*dtim_period
)) > CUSTOM_LISTEN_INTERVAL
) {
4667 /* Round up dtim_skip to fit into STAs Listen Interval */
4668 bcn_li_dtim
= (int)(CUSTOM_LISTEN_INTERVAL
/ *dtim_period
);
4669 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__
, bcn_li_dtim
));
4673 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
4674 __FUNCTION__
, *bcn_interval
, bcn_li_dtim
, *dtim_period
, CUSTOM_LISTEN_INTERVAL
));
4678 #else /* OEM_ANDROID && BCMPCIE */
4680 dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
)
4682 int bcn_li_dtim
= 1; /* deafult no dtim skip setting */
4684 int dtim_period
= 0;
4686 int allowed_skip_dtim_cnt
= 0;
4688 if (dhd
->disable_dtim_in_suspend
) {
4689 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__
));
4694 /* Check if associated */
4695 if (dhd_is_associated(dhd
, 0, NULL
) == FALSE
) {
4696 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__
, ret
));
4700 /* read associated AP beacon interval */
4701 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_BCNPRD
,
4702 &ap_beacon
, sizeof(ap_beacon
), FALSE
, 0)) < 0) {
4703 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__
, ret
));
4707 /* read associated ap's dtim setup */
4708 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_DTIMPRD
,
4709 &dtim_period
, sizeof(dtim_period
), FALSE
, 0)) < 0) {
4710 DHD_ERROR(("%s failed code %d\n", __FUNCTION__
, ret
));
4714 /* if not assocated just exit */
4715 if (dtim_period
== 0) {
4719 if (dhd
->max_dtim_enable
) {
4721 (int) (MAX_DTIM_ALLOWED_INTERVAL
/ (ap_beacon
* dtim_period
));
4722 if (bcn_li_dtim
== 0) {
4726 /* attemp to use platform defined dtim skip interval */
4727 bcn_li_dtim
= dhd
->suspend_bcn_li_dtim
;
4729 /* check if sta listen interval fits into AP dtim */
4730 if (dtim_period
> CUSTOM_LISTEN_INTERVAL
) {
4731 /* AP DTIM to big for our Listen Interval : no dtim skiping */
4732 bcn_li_dtim
= NO_DTIM_SKIP
;
4733 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
4734 __FUNCTION__
, dtim_period
, CUSTOM_LISTEN_INTERVAL
));
4738 if ((dtim_period
* ap_beacon
* bcn_li_dtim
) > MAX_DTIM_ALLOWED_INTERVAL
) {
4739 allowed_skip_dtim_cnt
=
4740 MAX_DTIM_ALLOWED_INTERVAL
/ (dtim_period
* ap_beacon
);
4742 (allowed_skip_dtim_cnt
!= 0) ? allowed_skip_dtim_cnt
: NO_DTIM_SKIP
;
4745 if ((bcn_li_dtim
* dtim_period
) > CUSTOM_LISTEN_INTERVAL
) {
4746 /* Round up dtim_skip to fit into STAs Listen Interval */
4747 bcn_li_dtim
= (int)(CUSTOM_LISTEN_INTERVAL
/ dtim_period
);
4748 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__
, bcn_li_dtim
));
4752 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
4753 __FUNCTION__
, ap_beacon
, bcn_li_dtim
, dtim_period
, CUSTOM_LISTEN_INTERVAL
));
4758 #endif /* OEM_ANDROID && BCMPCIE */
4760 /* Check if the mode supports STA MODE */
4761 bool dhd_support_sta_mode(dhd_pub_t
*dhd
)
4765 if (!(dhd
->op_mode
& DHD_FLAG_STA_MODE
))
4768 #endif /* WL_CFG80211 */
4772 #if defined(KEEP_ALIVE)
4773 int dhd_keep_alive_onoff(dhd_pub_t
*dhd
)
4777 wl_mkeep_alive_pkt_t mkeep_alive_pkt
= {0, 0, 0, 0, 0, {0}};
4778 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
;
4783 if (!dhd_support_sta_mode(dhd
))
4786 DHD_TRACE(("%s execution\n", __FUNCTION__
));
4788 str
= "mkeep_alive";
4789 str_len
= strlen(str
);
4790 strncpy(buf
, str
, sizeof(buf
) - 1);
4791 buf
[ sizeof(buf
) - 1 ] = '\0';
4792 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) (buf
+ str_len
+ 1);
4793 mkeep_alive_pkt
.period_msec
= CUSTOM_KEEP_ALIVE_SETTING
;
4794 buf_len
= str_len
+ 1;
4795 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
4796 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
4797 /* Setup keep alive zero for null packet generation */
4798 mkeep_alive_pkt
.keep_alive_id
= 0;
4799 mkeep_alive_pkt
.len_bytes
= 0;
4800 buf_len
+= WL_MKEEP_ALIVE_FIXED_LEN
;
4801 bzero(mkeep_alive_pkt
.data
, sizeof(mkeep_alive_pkt
.data
));
4802 /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
4803 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
4804 * guarantee that the buffer is properly aligned.
4806 memcpy((char *)mkeep_alive_pktp
, &mkeep_alive_pkt
, WL_MKEEP_ALIVE_FIXED_LEN
);
4808 res
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, 0);
4812 #endif /* defined(KEEP_ALIVE) */
4813 #define CSCAN_TLV_TYPE_SSID_IE 'S'
4815 * SSIDs list parsing from cscan tlv list
4818 wl_parse_ssid_list_tlv(char** list_str
, wlc_ssid_ext_t
* ssid
, int max
, int *bytes_left
)
4824 if ((list_str
== NULL
) || (*list_str
== NULL
) || (*bytes_left
< 0)) {
4825 DHD_ERROR(("%s error paramters\n", __FUNCTION__
));
4829 while (*bytes_left
> 0) {
4830 if (str
[0] != CSCAN_TLV_TYPE_SSID_IE
) {
4832 DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx
, *bytes_left
, str
[0]));
4837 DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__
, idx
));
4841 /* Get proper CSCAN_TLV_TYPE_SSID_IE */
4843 if (*bytes_left
== 0) {
4844 DHD_ERROR(("%s no length field.\n", __FUNCTION__
));
4848 ssid
[idx
].rssi_thresh
= 0;
4849 ssid
[idx
].flags
= 0;
4852 /* Broadcast SSID */
4853 ssid
[idx
].SSID_len
= 0;
4854 memset((char*)ssid
[idx
].SSID
, 0x0, DOT11_MAX_SSID_LEN
);
4858 DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left
));
4859 } else if (len
<= DOT11_MAX_SSID_LEN
) {
4860 /* Get proper SSID size */
4861 ssid
[idx
].SSID_len
= len
;
4864 if (ssid
[idx
].SSID_len
> *bytes_left
) {
4865 DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
4866 __FUNCTION__
, ssid
[idx
].SSID_len
, *bytes_left
));
4870 memcpy((char*)ssid
[idx
].SSID
, str
, ssid
[idx
].SSID_len
);
4872 *bytes_left
-= ssid
[idx
].SSID_len
;
4873 str
+= ssid
[idx
].SSID_len
;
4874 ssid
[idx
].hidden
= TRUE
;
4876 DHD_TRACE(("%s :size=%d left=%d\n",
4877 (char*)ssid
[idx
].SSID
, ssid
[idx
].SSID_len
, *bytes_left
));
4879 DHD_ERROR(("### SSID size more than %d\n", str
[0]));
4888 /* Android ComboSCAN support */
4891 * data parsing from ComboScan tlv list
4894 wl_iw_parse_data_tlv(char** list_str
, void *dst
, int dst_size
, const char token
,
4895 int input_size
, int *bytes_left
)
4901 if ((list_str
== NULL
) || (*list_str
== NULL
) ||(bytes_left
== NULL
) || (*bytes_left
< 0)) {
4902 DHD_ERROR(("%s error paramters\n", __FUNCTION__
));
4907 /* Clean all dest bytes */
4908 memset(dst
, 0, dst_size
);
4909 if (*bytes_left
> 0) {
4911 if (str
[0] != token
) {
4912 DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
4913 __FUNCTION__
, token
, str
[0], *bytes_left
));
4920 if (input_size
== 1) {
4921 memcpy(dst
, str
, input_size
);
4923 else if (input_size
== 2) {
4924 memcpy(dst
, (char *)htod16(memcpy(&short_temp
, str
, input_size
)),
4927 else if (input_size
== 4) {
4928 memcpy(dst
, (char *)htod32(memcpy(&int_temp
, str
, input_size
)),
4932 *bytes_left
-= input_size
;
4941 * channel list parsing from cscan tlv list
4944 wl_iw_parse_channel_list_tlv(char** list_str
, uint16
* channel_list
,
4945 int channel_num
, int *bytes_left
)
4950 if ((list_str
== NULL
) || (*list_str
== NULL
) ||(bytes_left
== NULL
) || (*bytes_left
< 0)) {
4951 DHD_ERROR(("%s error paramters\n", __FUNCTION__
));
4956 while (*bytes_left
> 0) {
4958 if (str
[0] != CSCAN_TLV_TYPE_CHANNEL_IE
) {
4960 DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx
, *bytes_left
, str
[0]));
4963 /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
4969 channel_list
[idx
] = 0x0;
4972 channel_list
[idx
] = (uint16
)str
[0];
4973 DHD_TRACE(("%s channel=%d \n", __FUNCTION__
, channel_list
[idx
]));
4979 DHD_ERROR(("%s Too many channels \n", __FUNCTION__
));
4988 /* Parse a comma-separated list from list_str into ssid array, starting
4989 * at index idx. Max specifies size of the ssid array. Parses ssids
4990 * and returns updated idx; if idx >= max not all fit, the excess have
4991 * not been copied. Returns -1 on empty string, or on ssid too long.
4994 wl_iw_parse_ssid_list(char** list_str
, wlc_ssid_t
* ssid
, int idx
, int max
)
4998 if ((list_str
== NULL
) || (*list_str
== NULL
))
5001 for (str
= *list_str
; str
!= NULL
; str
= ptr
) {
5003 /* check for next TAG */
5004 if (!strncmp(str
, GET_CHANNEL
, strlen(GET_CHANNEL
))) {
5005 *list_str
= str
+ strlen(GET_CHANNEL
);
5009 if ((ptr
= strchr(str
, ',')) != NULL
) {
5013 if (strlen(str
) > DOT11_MAX_SSID_LEN
) {
5014 DHD_ERROR(("ssid <%s> exceeds %d\n", str
, DOT11_MAX_SSID_LEN
));
5018 if (strlen(str
) == 0)
5019 ssid
[idx
].SSID_len
= 0;
5022 bzero(ssid
[idx
].SSID
, sizeof(ssid
[idx
].SSID
));
5023 strncpy((char*)ssid
[idx
].SSID
, str
, sizeof(ssid
[idx
].SSID
) - 1);
5024 ssid
[idx
].SSID_len
= strlen(str
);
5032 * Parse channel list from iwpriv CSCAN
5035 wl_iw_parse_channel_list(char** list_str
, uint16
* channel_list
, int channel_num
)
5040 char* endptr
= NULL
;
5042 if ((list_str
== NULL
)||(*list_str
== NULL
))
5047 while (strncmp(str
, GET_NPROBE
, strlen(GET_NPROBE
))) {
5048 val
= (int)strtoul(str
, &endptr
, 0);
5049 if (endptr
== str
) {
5050 printf("could not parse channel number starting at"
5051 " substring \"%s\" in list:\n%s\n",
5055 str
= endptr
+ strspn(endptr
, " ,");
5057 if (num
== channel_num
) {
5058 DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
5059 channel_num
, *list_str
));
5063 channel_list
[num
++] = (uint16
)val
;
5069 /* Given filename and download type, returns a buffer pointer and length
5070 * for download to f/w. Type can be FW or NVRAM.
5073 int dhd_get_download_buffer(dhd_pub_t
*dhd
, char *file_path
, download_type_t component
,
5074 char ** buffer
, int *length
)
5077 int ret
= BCME_ERROR
;
5083 /* Point to cache if available. */
5084 /* No Valid cache found on this call */
5090 image
= dhd_os_open_image1(dhd
, file_path
);
5091 if (image
== NULL
) {
5096 buf
= MALLOCZ(dhd
->osh
, file_len
);
5098 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
5099 __FUNCTION__
, file_len
));
5103 /* Download image */
5104 len
= dhd_os_get_image_block((char *)buf
, file_len
, image
);
5105 if ((len
<= 0 || len
> file_len
)) {
5106 MFREE(dhd
->osh
, buf
, file_len
);
5113 *buffer
= (char *)buf
;
5115 /* Cache if first call. */
5119 dhd_os_close_image1(dhd
, image
);
5125 dhd_download_2_dongle(dhd_pub_t
*dhd
, char *iovar
, uint16 flag
, uint16 dload_type
,
5126 unsigned char *dload_buf
, int len
)
5128 struct wl_dload_data
*dload_ptr
= (struct wl_dload_data
*)dload_buf
;
5130 int dload_data_offset
;
5131 static char iovar_buf
[WLC_IOCTL_MEDLEN
];
5134 memset(iovar_buf
, 0, sizeof(iovar_buf
));
5136 dload_data_offset
= OFFSETOF(wl_dload_data_t
, data
);
5137 dload_ptr
->flag
= (DLOAD_HANDLER_VER
<< DLOAD_FLAG_VER_SHIFT
) | flag
;
5138 dload_ptr
->dload_type
= dload_type
;
5139 dload_ptr
->len
= htod32(len
- dload_data_offset
);
5141 len
= ROUNDUP(len
, 8);
5143 iovar_len
= bcm_mkiovar(iovar
, (char *)dload_buf
,
5144 (uint
)len
, iovar_buf
, sizeof(iovar_buf
));
5145 if (iovar_len
== 0) {
5146 DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
5147 __FUNCTION__
, iovar
));
5148 return BCME_BUFTOOSHORT
;
5151 err
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovar_buf
,
5152 iovar_len
, IOV_SET
, 0);
5158 dhd_download_blob(dhd_pub_t
*dhd
, unsigned char *buf
,
5159 uint32 len
, char *iovar
)
5164 unsigned char *new_buf
;
5165 int err
= 0, data_offset
;
5166 uint16 dl_flag
= DL_BEGIN
;
5168 data_offset
= OFFSETOF(wl_dload_data_t
, data
);
5169 size2alloc
= data_offset
+ MAX_CHUNK_LEN
;
5170 size2alloc
= ROUNDUP(size2alloc
, 8);
5172 if ((new_buf
= (unsigned char *)MALLOCZ(dhd
->osh
, size2alloc
)) != NULL
) {
5174 chunk_len
= dhd_os_get_image_block((char *)(new_buf
+ data_offset
),
5175 MAX_CHUNK_LEN
, buf
);
5176 if (chunk_len
< 0) {
5177 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
5178 __FUNCTION__
, chunk_len
));
5182 if (len
- chunk_len
== 0)
5185 err
= dhd_download_2_dongle(dhd
, iovar
, dl_flag
, DL_TYPE_CLM
,
5186 new_buf
, data_offset
+ chunk_len
);
5188 dl_flag
&= ~DL_BEGIN
;
5190 len
= len
- chunk_len
;
5191 } while ((len
> 0) && (err
== 0));
5197 MFREE(dhd
->osh
, new_buf
, size2alloc
);
5203 dhd_apply_default_txcap(dhd_pub_t
*dhd
, char *path
)
5209 dhd_check_current_clm_data(dhd_pub_t
*dhd
)
5211 char iovbuf
[WLC_IOCTL_SMLEN
];
5212 wl_country_t
*cspec
;
5215 memset(iovbuf
, 0, sizeof(iovbuf
));
5216 err
= bcm_mkiovar("country", NULL
, 0, iovbuf
, sizeof(iovbuf
));
5218 err
= BCME_BUFTOOSHORT
;
5219 DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__
));
5222 err
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
5224 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__
));
5227 cspec
= (wl_country_t
*)iovbuf
;
5228 if ((strncmp(cspec
->ccode
, WL_CCODE_NULL_COUNTRY
, WLC_CNTRY_BUF_SZ
)) == 0) {
5229 DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
5233 DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
5239 dhd_apply_default_clm(dhd_pub_t
*dhd
, char *clm_path
)
5241 char *clm_blob_path
;
5243 char *memblock
= NULL
;
5245 char iovbuf
[WLC_IOCTL_SMLEN
];
5248 if (clm_path
[0] != '\0') {
5249 if (strlen(clm_path
) > MOD_PARAM_PATHLEN
) {
5250 DHD_ERROR(("clm path exceeds max len\n"));
5253 clm_blob_path
= clm_path
;
5254 DHD_TRACE(("clm path from module param:%s\n", clm_path
));
5256 clm_blob_path
= VENDOR_PATH CONFIG_BCMDHD_CLM_PATH
;
5259 /* If CLM blob file is found on the filesystem, download the file.
5260 * After CLM file download or If the blob file is not present,
5261 * validate the country code before proceeding with the initialization.
5262 * If country code is not valid, fail the initialization.
5264 memblock
= dhd_os_open_image1(dhd
, (char *)clm_blob_path
);
5265 if (memblock
== NULL
) {
5266 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5270 status
= dhd_check_current_clm_data(dhd
);
5271 if (status
== TRUE
) {
5277 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5281 len
= dhd_os_get_image_size(memblock
);
5283 if ((len
> 0) && (len
< MAX_CLM_BUF_SIZE
) && memblock
) {
5284 status
= dhd_check_current_clm_data(dhd
);
5285 if (status
== TRUE
) {
5286 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5287 if (dhd
->op_mode
!= DHD_FLAG_MFG_MODE
) {
5294 DHD_ERROR(("%s: CLM already exist in F/W, "
5295 "new CLM data will be added to the end of existing CLM data!\n",
5297 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5298 } else if (status
!= FALSE
) {
5303 /* Found blob file. Download the file */
5304 DHD_TRACE(("clm file download from %s \n", clm_blob_path
));
5305 err
= dhd_download_blob(dhd
, (unsigned char*)memblock
, len
, "clmload");
5307 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__
, err
));
5308 /* Retrieve clmload_status and print */
5309 memset(iovbuf
, 0, sizeof(iovbuf
));
5310 len
= bcm_mkiovar("clmload_status", NULL
, 0, iovbuf
, sizeof(iovbuf
));
5312 err
= BCME_BUFTOOSHORT
;
5315 err
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
5317 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
5318 __FUNCTION__
, err
));
5320 DHD_ERROR(("%s: clmload_status: %d \n",
5321 __FUNCTION__
, *((int *)iovbuf
)));
5322 if (*((int *)iovbuf
) == CHIPID_MISMATCH
) {
5323 DHD_ERROR(("Chip ID mismatch error \n"));
5329 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__
));
5332 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len
, memblock
));
5333 #ifdef DHD_USE_CLMINFO_PARSER
5336 #endif /* DHD_USE_CLMINFO_PARSER */
5339 /* Verify country code */
5340 status
= dhd_check_current_clm_data(dhd
);
5342 if (status
!= TRUE
) {
5343 /* Country code not initialized or CLM download not proper */
5344 DHD_ERROR(("country code not initialized\n"));
5350 dhd_os_close_image1(dhd
, memblock
);
5356 #ifdef DHD_USE_CLMINFO_PARSER
5359 #define CLMINFO_PATH PLATFORM_PATH".clminfo"
5361 #define CLMINFO_PATH VENDOR_PATH"/etc/wifi/.clminfo"
5362 #endif /* PLATFORM_SLP */
5364 #define CLMINFO_PATH "/installmedia/.clminfo"
5365 #endif /* CUSTOMER_HW4 */
5367 extern struct cntry_locales_custom translate_custom_table
[NUM_OF_COUNTRYS
];
5370 process_clarification_vars(char *varbuf
, unsigned int varbuf_size
)
5375 unsigned int buf_len
, len
;
5379 findNewline
= FALSE
;
5382 for (len
= 0; len
< varbuf_size
; len
++) {
5383 if ((varbuf
[len
] == '\r') || (varbuf
[len
] == ' ')) {
5386 if (findNewline
&& varbuf
[len
] != '\n') {
5389 findNewline
= FALSE
;
5390 if (varbuf
[len
] == '#') {
5394 if (varbuf
[len
] == '\n') {
5401 *dp
++ = varbuf
[len
];
5404 buf_len
= (unsigned int)(dp
- varbuf
);
5406 while (dp
< varbuf
+ len
)
5413 dhd_get_clminfo(dhd_pub_t
*dhd
, char *clm_path
)
5415 int bcmerror
= BCME_OK
;
5416 char *clminfo_path
= CLMINFO_PATH
;
5418 char *memblock
= NULL
;
5420 uint len
= MAX_CLMINFO_BUF_SIZE
;
5422 char *tokenp
= NULL
;
5424 char *temp_buf
= NULL
;
5428 char *clm_blob_vendor_path
= VENDOR_PATH
;
5429 char *clm_blob_path
= NULL
;
5430 int clm_blob_path_len
= 0;
5432 /* Clears clm_path and translate_custom_table */
5433 memset(clm_path
, 0, MOD_PARAM_PATHLEN
);
5434 memset(translate_custom_table
, 0, sizeof(translate_custom_table
));
5437 * Read clm info from the .clminfo file
5438 * 1st line : CLM blob file path
5439 * 2nd ~ end of line: Country locales table
5441 if (dhd_get_download_buffer(dhd
, clminfo_path
, CLMINFO
, &memblock
, &len
) != 0) {
5442 DHD_ERROR(("%s: Cannot open .clminfo file\n", __FUNCTION__
));
5443 bcmerror
= BCME_ERROR
;
5444 dhd
->is_clm_mult_regrev
= FALSE
;
5448 dhd
->is_clm_mult_regrev
= TRUE
;
5450 if ((len
> 0) && (len
< MAX_CLMINFO_BUF_SIZE
) && memblock
) {
5451 /* Found clminfo file. Parsing the file */
5452 DHD_INFO(("clminfo file parsing from %s \n", clminfo_path
));
5454 bufp
= (char *) memblock
;
5457 /* clean up the file */
5458 len
= process_clarification_vars(bufp
, len
);
5460 tokenp
= bcmstrtok(&bufp
, "=", &tokdelim
);
5461 /* reduce the len of bufp by token byte(1) and ptr length */
5462 len
-= (strlen(tokenp
) + 1);
5464 if (strncmp(tokenp
, "clm_path", 8) != 0) {
5465 DHD_ERROR(("%s: Cannot found clm_path\n", __FUNCTION__
));
5466 bcmerror
= BCME_ERROR
;
5469 temp_buf
= bcmstrtok(&bufp
, ";", &tokdelim
);
5470 str_ln
= strlen(temp_buf
);
5472 strncpy(clm_path
, temp_buf
, str_ln
);
5473 len
-= (strlen(clm_path
) + 1);
5475 clm_blob_path_len
= strlen(clm_path
);
5476 clm_blob_path
= (char *)MALLOCZ(dhd
->osh
, clm_blob_path_len
);
5477 if (clm_blob_path
== NULL
) {
5478 bcmerror
= BCME_NOMEM
;
5479 DHD_ERROR(("%s: Failed to allocate memory!\n", __FUNCTION__
));
5482 memset(clm_blob_path
, 0, clm_blob_path_len
);
5483 strncpy(clm_blob_path
, clm_path
, strlen(clm_path
));
5485 /* Concannate VENDOR_PATH + CLM_PATH */
5486 memset(clm_path
, 0, MOD_PARAM_PATHLEN
);
5487 snprintf(clm_path
, (int)strlen(clm_blob_vendor_path
) + clm_blob_path_len
+ 1,
5488 "%s%s", clm_blob_vendor_path
, clm_blob_path
);
5489 clm_path
[strlen(clm_path
)] = '\0';
5491 DHD_INFO(("%s: Found clm_path %s\n", __FUNCTION__
, clm_path
));
5494 DHD_ERROR(("%s: Length is invalid\n", __FUNCTION__
));
5495 bcmerror
= BCME_ERROR
;
5499 /* reserved relocale map[0] to XZ/11 */
5500 memcpy(translate_custom_table
[cnt
].custom_locale
, "XZ", strlen("XZ"));
5501 translate_custom_table
[cnt
].custom_locale_rev
= 11;
5502 DHD_INFO(("%s: Relocale map - iso_aabrev %s custom locale %s "
5503 "custom locale rev %d\n",
5505 translate_custom_table
[cnt
].iso_abbrev
,
5506 translate_custom_table
[cnt
].custom_locale
,
5507 translate_custom_table
[cnt
].custom_locale_rev
));
5511 /* start parsing relocale map */
5514 if ((bufp
[0] == 0) && (len
> 0)) {
5515 DHD_ERROR(("%s: First byte is NULL character\n", __FUNCTION__
));
5516 bcmerror
= BCME_ERROR
;
5519 if ((bufp
[0] == '=') || (bufp
[0] == '/') || (bufp
[0] == ';')) {
5520 DHD_ERROR(("%s: Data is invalid\n", __FUNCTION__
));
5521 bcmerror
= BCME_ERROR
;
5525 /* parsing relocale data */
5526 tokenp
= bcmstrtok(&bufp
, "=/;", &tokdelim
);
5527 len
-= (strlen(tokenp
) + 1);
5529 if ((parse_step
== 0) && (tokdelim
== '=')) {
5530 memcpy(translate_custom_table
[cnt
].iso_abbrev
,
5531 tokenp
, strlen(tokenp
));
5533 } else if ((parse_step
== 1) && (tokdelim
== '/')) {
5534 memcpy(translate_custom_table
[cnt
].custom_locale
,
5535 tokenp
, strlen(tokenp
));
5537 } else if ((parse_step
== 2) && (tokdelim
== ';')) {
5538 char *str
, *endptr
= NULL
;
5542 locale_rev
= (int)strtoul(str
, &endptr
, 0);
5544 bcmerror
= BCME_ERROR
;
5548 translate_custom_table
[cnt
].custom_locale_rev
= locale_rev
;
5550 DHD_INFO(("%s: Relocale map - iso_aabrev %s"
5551 " custom locale %s custom locale rev %d\n",
5553 translate_custom_table
[cnt
].iso_abbrev
,
5554 translate_custom_table
[cnt
].custom_locale
,
5555 translate_custom_table
[cnt
].custom_locale_rev
));
5560 DHD_ERROR(("%s: CLM info data format is invalid\n", __FUNCTION__
));
5561 bcmerror
= BCME_ERROR
;
5568 if (clm_blob_path
) {
5569 MFREE(dhd
->osh
, clm_blob_path
, clm_blob_path_len
);
5572 dhd_free_download_buffer(dhd
, memblock
, MAX_CLMINFO_BUF_SIZE
);
5574 if (bcmerror
!= BCME_OK
) {
5575 DHD_ERROR(("%s: .clminfo parsing fail!!\n", __FUNCTION__
));
5580 #endif /* DHD_USE_CLMINFO_PARSER */
5582 void dhd_free_download_buffer(dhd_pub_t
*dhd
, void *buffer
, int length
)
5584 MFREE(dhd
->osh
, buffer
, length
);
5587 /* Parse EAPOL 4 way handshake messages */
5589 dhd_check_eapol_4way_message(char *dump_data
)
5592 int pair
, ack
, mic
, kerr
, req
, sec
, install
;
5593 unsigned short us_tmp
;
5594 type
= dump_data
[18];
5596 us_tmp
= (dump_data
[19] << 8) | dump_data
[20];
5597 pair
= 0 != (us_tmp
& 0x08);
5598 ack
= 0 != (us_tmp
& 0x80);
5599 mic
= 0 != (us_tmp
& 0x100);
5600 kerr
= 0 != (us_tmp
& 0x400);
5601 req
= 0 != (us_tmp
& 0x800);
5602 sec
= 0 != (us_tmp
& 0x200);
5603 install
= 0 != (us_tmp
& 0x40);
5604 if (!sec
&& !mic
&& ack
&& !install
&& pair
&& !kerr
&& !req
) {
5605 return EAPOL_4WAY_M1
;
5606 } else if (pair
&& !install
&& !ack
&& mic
&& !sec
&& !kerr
&& !req
) {
5607 return EAPOL_4WAY_M2
;
5608 } else if (pair
&& ack
&& mic
&& sec
&& !kerr
&& !req
) {
5609 return EAPOL_4WAY_M3
;
5610 } else if (pair
&& !install
&& !ack
&& mic
&& sec
&& !req
&& !kerr
) {
5611 return EAPOL_4WAY_M4
;
5620 #if defined(DHD_8021X_DUMP)
5621 #define EAP_PRINT(str) \
5622 DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \
5623 ifname, direction ? "TX" : "RX"));
5624 /* Parse EAPOL 4 way handshake messages */
5626 dhd_dump_eapol_4way_message(char *ifname
, unsigned char *dump_data
, bool direction
)
5630 type
= dump_data
[15];
5632 if ((dump_data
[22] == 1) && (dump_data
[18] == 1)) {
5633 EAP_PRINT("EAP Packet, Request, Identity");
5634 } else if ((dump_data
[22] == 1) && (dump_data
[18] == 2)) {
5635 EAP_PRINT("EAP Packet, Response, Identity");
5636 } else if (dump_data
[22] == 254) {
5637 if (dump_data
[30] == 1) {
5638 EAP_PRINT("EAP Packet, WSC Start");
5639 } else if (dump_data
[30] == 4) {
5640 if (dump_data
[41] == 4) {
5641 EAP_PRINT("EAP Packet, WPS M1");
5642 } else if (dump_data
[41] == 5) {
5643 EAP_PRINT("EAP Packet, WPS M2");
5644 } else if (dump_data
[41] == 7) {
5645 EAP_PRINT("EAP Packet, WPS M3");
5646 } else if (dump_data
[41] == 8) {
5647 EAP_PRINT("EAP Packet, WPS M4");
5648 } else if (dump_data
[41] == 9) {
5649 EAP_PRINT("EAP Packet, WPS M5");
5650 } else if (dump_data
[41] == 10) {
5651 EAP_PRINT("EAP Packet, WPS M6");
5652 } else if (dump_data
[41] == 11) {
5653 EAP_PRINT("EAP Packet, WPS M7");
5654 } else if (dump_data
[41] == 12) {
5655 EAP_PRINT("EAP Packet, WPS M8");
5657 } else if (dump_data
[30] == 5) {
5658 EAP_PRINT("EAP Packet, WSC Done");
5661 DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
5662 ifname
, direction
? "TX" : "RX",
5663 dump_data
[14], dump_data
[15], dump_data
[30]));
5665 } else if (type
== 3 && dump_data
[18] == 2) {
5666 switch (dhd_check_eapol_4way_message(dump_data
)) {
5668 EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
5671 EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
5674 EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
5677 EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
5680 DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
5681 ifname
, direction
? "TX" : "RX",
5682 dump_data
[14], dump_data
[15], dump_data
[30]));
5685 DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
5686 ifname
, direction
? "TX" : "RX",
5687 dump_data
[14], dump_data
[15], dump_data
[30]));
5690 #endif /* DHD_8021X_DUMP */
5692 #ifdef SHOW_LOGTRACE
5694 dhd_parse_logstrs_file(osl_t
*osh
, char *raw_fmts
, int logstrs_size
,
5695 dhd_event_log_t
*event_log
)
5697 uint32
*lognums
= NULL
;
5698 char *logstrs
= NULL
;
5699 logstr_trailer_t
*trailer
= NULL
;
5703 bool match_fail
= TRUE
;
5705 uint8
*pfw_id
= NULL
;
5710 uint32 hdr_logstrs_size
= 0;
5712 /* Read last three words in the logstrs.bin file */
5713 trailer
= (logstr_trailer_t
*) (raw_fmts
+ logstrs_size
-
5714 sizeof(logstr_trailer_t
));
5716 if (trailer
->log_magic
== LOGSTRS_MAGIC
) {
5718 * logstrs.bin has a header.
5720 if (trailer
->version
== 1) {
5721 logstr_header_v1_t
*hdr_v1
= (logstr_header_v1_t
*) (raw_fmts
+
5722 logstrs_size
- sizeof(logstr_header_v1_t
));
5723 DHD_INFO(("%s: logstr header version = %u\n",
5724 __FUNCTION__
, hdr_v1
->version
));
5725 num_fmts
= hdr_v1
->rom_logstrs_offset
/ sizeof(uint32
);
5726 ram_index
= (hdr_v1
->ram_lognums_offset
-
5727 hdr_v1
->rom_lognums_offset
) / sizeof(uint32
);
5728 lognums
= (uint32
*) &raw_fmts
[hdr_v1
->rom_lognums_offset
];
5729 logstrs
= (char *) &raw_fmts
[hdr_v1
->rom_logstrs_offset
];
5730 hdr_logstrs_size
= hdr_v1
->logstrs_size
;
5731 } else if (trailer
->version
== 2) {
5732 logstr_header_t
*hdr
= (logstr_header_t
*) (raw_fmts
+ logstrs_size
-
5733 sizeof(logstr_header_t
));
5734 DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
5735 __FUNCTION__
, hdr
->trailer
.version
, hdr
->trailer
.flags
));
5737 /* For ver. 2 of the header, need to match fwid of
5738 * both logstrs.bin and fw bin
5741 /* read the FWID from fw bin */
5742 file
= dhd_os_open_image1(NULL
, st_str_file_path
);
5744 DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__
));
5747 file_len
= dhd_os_get_image_size(file
);
5748 if (file_len
<= 0) {
5749 DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__
));
5752 /* fwid is at the end of fw bin in string format */
5753 if (dhd_os_seek_file(file
, file_len
- 32) < 0) {
5754 DHD_ERROR(("%s: can't seek file \n", __FUNCTION__
));
5758 memset(fwid_str
, 0, 64);
5759 if (dhd_os_get_image_block(fwid_str
, 32, file
) <= 0) {
5760 DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__
));
5763 pfw_id
= (uint8
*)bcmstrnstr(fwid_str
, 64, "FWID: 01-", 9);
5765 DHD_ERROR(("%s: could not find id in FW bin!\n", __FUNCTION__
));
5768 /* search for the '-' in the fw id str, after which the
5769 * actual 4 byte fw id is present
5771 while (pfw_id
&& *pfw_id
!= '-') {
5775 fwid
= bcm_strtoul((char *)pfw_id
, NULL
, 16);
5777 /* check if fw id in logstrs.bin matches the fw one */
5778 if (hdr
->trailer
.fw_id
!= fwid
) {
5779 DHD_ERROR(("%s: logstr id does not match FW!\n", __FUNCTION__
));
5784 num_fmts
= hdr
->rom_logstrs_offset
/ sizeof(uint32
);
5785 ram_index
= (hdr
->ram_lognums_offset
-
5786 hdr
->rom_lognums_offset
) / sizeof(uint32
);
5787 lognums
= (uint32
*) &raw_fmts
[hdr
->rom_lognums_offset
];
5788 logstrs
= (char *) &raw_fmts
[hdr
->rom_logstrs_offset
];
5789 hdr_logstrs_size
= hdr
->logstrs_size
;
5793 dhd_os_close_image1(NULL
, file
);
5799 DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__
,
5803 if (logstrs_size
!= hdr_logstrs_size
) {
5804 DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__
, hdr_logstrs_size
));
5809 * Legacy logstrs.bin format without header.
5811 num_fmts
= *((uint32
*) (raw_fmts
)) / sizeof(uint32
);
5813 /* Legacy RAM-only logstrs.bin format:
5814 * - RAM 'lognums' section
5815 * - RAM 'logstrs' section.
5817 * 'lognums' is an array of indexes for the strings in the
5818 * 'logstrs' section. The first uint32 is an index to the
5819 * start of 'logstrs'. Therefore, if this index is divided
5820 * by 'sizeof(uint32)' it provides the number of logstr
5824 lognums
= (uint32
*) raw_fmts
;
5825 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
5828 fmts
= MALLOC(osh
, num_fmts
* sizeof(char *));
5830 DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__
));
5833 event_log
->fmts_size
= num_fmts
* sizeof(char *);
5835 for (i
= 0; i
< num_fmts
; i
++) {
5836 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
5837 * (they are 0-indexed relative to 'rom_logstrs_offset').
5839 * RAM lognums are already indexed to point to the correct RAM logstrs (they
5840 * are 0-indexed relative to the start of the logstrs.bin file).
5842 if (i
== ram_index
) {
5845 fmts
[i
] = &logstrs
[lognums
[i
]];
5847 event_log
->fmts
= fmts
;
5848 event_log
->raw_fmts_size
= logstrs_size
;
5849 event_log
->raw_fmts
= raw_fmts
;
5850 event_log
->num_fmts
= num_fmts
;
5852 } /* dhd_parse_logstrs_file */
5854 int dhd_parse_map_file(osl_t
*osh
, void *file
, uint32
*ramstart
, uint32
*rodata_start
,
5857 char *raw_fmts
= NULL
, *raw_fmts_loc
= NULL
;
5858 uint32 read_size
= READ_NUM_BYTES
;
5868 /* Allocate 1 byte more than read_size to terminate it with NULL */
5869 raw_fmts
= MALLOCZ(osh
, read_size
+ 1);
5870 if (raw_fmts
== NULL
) {
5871 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__
));
5875 /* read ram start, rodata_start and rodata_end values from map file */
5876 while (count
!= ALL_MAP_VAL
)
5878 error
= dhd_os_read_file(file
, raw_fmts
, read_size
);
5880 DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__
,
5885 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
5886 raw_fmts
[read_size
] = '\0';
5888 /* Get ramstart address */
5889 raw_fmts_loc
= raw_fmts
;
5890 if (!(count
& RAMSTART_BIT
) &&
5891 (cptr
= bcmstrnstr(raw_fmts_loc
, read_size
, ramstart_str
,
5892 strlen(ramstart_str
)))) {
5893 cptr
= cptr
- BYTES_AHEAD_NUM
;
5894 sscanf(cptr
, "%x %c text_start", ramstart
, &c
);
5895 count
|= RAMSTART_BIT
;
5898 /* Get ram rodata start address */
5899 raw_fmts_loc
= raw_fmts
;
5900 if (!(count
& RDSTART_BIT
) &&
5901 (cptr
= bcmstrnstr(raw_fmts_loc
, read_size
, rodata_start_str
,
5902 strlen(rodata_start_str
)))) {
5903 cptr
= cptr
- BYTES_AHEAD_NUM
;
5904 sscanf(cptr
, "%x %c rodata_start", rodata_start
, &c
);
5905 count
|= RDSTART_BIT
;
5908 /* Get ram rodata end address */
5909 raw_fmts_loc
= raw_fmts
;
5910 if (!(count
& RDEND_BIT
) &&
5911 (cptr
= bcmstrnstr(raw_fmts_loc
, read_size
, rodata_end_str
,
5912 strlen(rodata_end_str
)))) {
5913 cptr
= cptr
- BYTES_AHEAD_NUM
;
5914 sscanf(cptr
, "%x %c rodata_end", rodata_end
, &c
);
5918 if (error
< (int)read_size
) {
5920 * since we reset file pos back to earlier pos by
5921 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
5922 * The reason for this is if string is spreaded across
5923 * bytes, the read function should not miss it.
5924 * So if ret value is less than read_size, reached EOF don't read further
5928 memset(raw_fmts
, 0, read_size
);
5930 * go back to predefined NUM of bytes so that we won't miss
5931 * the string and addr even if it comes as splited in next read.
5933 dhd_os_seek_file(file
, -GO_BACK_FILE_POS_NUM_BYTES
);
5938 MFREE(osh
, raw_fmts
, read_size
+ 1);
5941 if (count
== ALL_MAP_VAL
) {
5945 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__
,
5950 } /* dhd_parse_map_file */
5952 #ifdef PCIE_FULL_DONGLE
5954 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t
*dhdp
, void *pktbuf
,
5955 dhd_event_log_t
*event_data
)
5957 uint32 infobuf_version
;
5958 info_buf_payload_hdr_t
*payload_hdr_ptr
;
5959 uint16 payload_hdr_type
;
5960 uint16 payload_hdr_length
;
5962 DHD_TRACE(("%s:Enter\n", __FUNCTION__
));
5964 if (PKTLEN(dhdp
->osh
, pktbuf
) < sizeof(uint32
)) {
5965 DHD_ERROR(("%s: infobuf too small for version field\n",
5969 infobuf_version
= *((uint32
*)PKTDATA(dhdp
->osh
, pktbuf
));
5970 PKTPULL(dhdp
->osh
, pktbuf
, sizeof(uint32
));
5971 if (infobuf_version
!= PCIE_INFOBUF_V1
) {
5972 DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
5973 __FUNCTION__
, infobuf_version
));
5977 /* Version 1 infobuf has a single type/length (and then value) field */
5978 if (PKTLEN(dhdp
->osh
, pktbuf
) < sizeof(info_buf_payload_hdr_t
)) {
5979 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
5983 /* Process/parse the common info payload header (type/length) */
5984 payload_hdr_ptr
= (info_buf_payload_hdr_t
*)PKTDATA(dhdp
->osh
, pktbuf
);
5985 payload_hdr_type
= ltoh16(payload_hdr_ptr
->type
);
5986 payload_hdr_length
= ltoh16(payload_hdr_ptr
->length
);
5987 if (payload_hdr_type
!= PCIE_INFOBUF_V1_TYPE_LOGTRACE
) {
5988 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
5989 __FUNCTION__
, payload_hdr_type
));
5992 PKTPULL(dhdp
->osh
, pktbuf
, sizeof(info_buf_payload_hdr_t
));
5994 /* Validate that the specified length isn't bigger than the
5997 if (payload_hdr_length
> PKTLEN(dhdp
->osh
, pktbuf
)) {
5998 DHD_ERROR(("%s: infobuf logtrace length is bigger"
5999 " than actual buffer data\n", __FUNCTION__
));
6002 dhd_dbg_trace_evnt_handler(dhdp
, PKTDATA(dhdp
->osh
, pktbuf
),
6003 event_data
, payload_hdr_length
);
6009 } /* dhd_event_logtrace_infobuf_pkt_process */
6010 #endif /* PCIE_FULL_DONGLE */
6011 #endif /* SHOW_LOGTRACE */
6013 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
6015 /* To handle the TDLS event in the dhd_common.c
6017 int dhd_tdls_event_handler(dhd_pub_t
*dhd_pub
, wl_event_msg_t
*event
)
6020 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6021 #pragma GCC diagnostic push
6022 #pragma GCC diagnostic ignored "-Wcast-qual"
6024 ret
= dhd_tdls_update_peer_info(dhd_pub
, event
);
6025 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6026 #pragma GCC diagnostic pop
6031 int dhd_free_tdls_peer_list(dhd_pub_t
*dhd_pub
)
6033 tdls_peer_node_t
*cur
= NULL
, *prev
= NULL
;
6036 cur
= dhd_pub
->peer_tbl
.node
;
6038 if ((dhd_pub
->peer_tbl
.node
== NULL
) && !dhd_pub
->peer_tbl
.tdls_peer_count
)
6041 while (cur
!= NULL
) {
6044 MFREE(dhd_pub
->osh
, prev
, sizeof(tdls_peer_node_t
));
6046 dhd_pub
->peer_tbl
.tdls_peer_count
= 0;
6047 dhd_pub
->peer_tbl
.node
= NULL
;
6050 #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
6052 /* pretty hex print a contiguous buffer
6053 * based on the debug level specified
6056 dhd_prhex(const char *msg
, volatile uchar
*buf
, uint nbytes
, uint8 dbg_level
)
6059 int len
= sizeof(line
);
6063 if (msg
&& (msg
[0] != '\0')) {
6064 if (dbg_level
== DHD_ERROR_VAL
)
6065 DHD_ERROR(("%s:\n", msg
));
6066 else if (dbg_level
== DHD_INFO_VAL
)
6067 DHD_INFO(("%s:\n", msg
));
6068 else if (dbg_level
== DHD_TRACE_VAL
)
6069 DHD_TRACE(("%s:\n", msg
));
6073 for (i
= 0; i
< nbytes
; i
++) {
6075 nchar
= snprintf(p
, len
, " %04x: ", i
); /* line prefix */
6080 nchar
= snprintf(p
, len
, "%02x ", buf
[i
]);
6087 if (dbg_level
== DHD_ERROR_VAL
)
6088 DHD_ERROR(("%s:\n", line
));
6089 else if (dbg_level
== DHD_INFO_VAL
)
6090 DHD_INFO(("%s:\n", line
));
6091 else if (dbg_level
== DHD_TRACE_VAL
)
6092 DHD_TRACE(("%s:\n", line
));
6098 /* flush last partial line */
6100 if (dbg_level
== DHD_ERROR_VAL
)
6101 DHD_ERROR(("%s:\n", line
));
6102 else if (dbg_level
== DHD_INFO_VAL
)
6103 DHD_INFO(("%s:\n", line
));
6104 else if (dbg_level
== DHD_TRACE_VAL
)
6105 DHD_TRACE(("%s:\n", line
));
6109 #ifdef DUMP_IOCTL_IOV_LIST
6111 dhd_iov_li_append(dhd_pub_t
*dhd
, dll_t
*list_head
, dll_t
*node
)
6114 dhd_iov_li_t
*iov_li
;
6115 dhd
->dump_iovlist_len
++;
6117 if (dhd
->dump_iovlist_len
== IOV_LIST_MAX_LEN
+1) {
6118 item
= dll_head_p(list_head
);
6119 iov_li
= (dhd_iov_li_t
*)CONTAINEROF(item
, dhd_iov_li_t
, list
);
6121 MFREE(dhd
->osh
, iov_li
, sizeof(*iov_li
));
6122 dhd
->dump_iovlist_len
--;
6124 dll_append(list_head
, node
);
6128 dhd_iov_li_print(dll_t
*list_head
)
6130 dhd_iov_li_t
*iov_li
;
6133 for (item
= dll_head_p(list_head
); !dll_end(list_head
, item
); item
= next
) {
6134 next
= dll_next_p(item
);
6135 iov_li
= (dhd_iov_li_t
*)CONTAINEROF(item
, dhd_iov_li_t
, list
);
6136 DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index
, iov_li
->buff
, iov_li
->cmd
));
6141 dhd_iov_li_delete(dhd_pub_t
*dhd
, dll_t
*list_head
)
6144 dhd_iov_li_t
*iov_li
;
6145 while (!(dll_empty(list_head
))) {
6146 item
= dll_head_p(list_head
);
6147 iov_li
= (dhd_iov_li_t
*)CONTAINEROF(item
, dhd_iov_li_t
, list
);
6149 MFREE(dhd
->osh
, iov_li
, sizeof(*iov_li
));
6152 #endif /* DUMP_IOCTL_IOV_LIST */
6154 /* configuations of ecounters to be enabled by default in FW */
6155 static ecounters_cfg_t ecounters_cfg_tbl
[] = {
6156 /* Global ecounters */
6157 {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL
, 0x0, WL_IFSTATS_XTLV_BUS_PCIE
},
6158 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
6159 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
6161 /* Slice specific ecounters */
6162 {ECOUNTERS_STATS_TYPES_FLAG_SLICE
, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE
},
6163 {ECOUNTERS_STATS_TYPES_FLAG_SLICE
, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE
},
6164 {ECOUNTERS_STATS_TYPES_FLAG_SLICE
, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX
},
6166 /* Interface specific ecounters */
6167 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE
},
6168 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_GENERIC
},
6169 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC
},
6170 {ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_MGT_CNT
},
6172 /* secondary interface */
6175 static event_ecounters_cfg_t event_ecounters_cfg_tbl
[] = {
6176 /* Interface specific event ecounters */
6177 {WLC_E_LINK
, ECOUNTERS_STATS_TYPES_FLAG_IFACE
, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS
},
6180 /* Accepts an argument to -s, -g or -f and creates an XTLV */
6182 dhd_create_ecounters_params(dhd_pub_t
*dhd
, uint16 type
, uint16 if_slice_idx
,
6183 uint16 stats_rep
, uint8
**xtlv
)
6185 uint8
*req_xtlv
= NULL
;
6186 ecounters_stats_types_report_req_t
*req
;
6187 bcm_xtlvbuf_t xtlvbuf
, container_xtlvbuf
;
6188 ecountersv2_xtlv_list_elt_t temp
;
6189 uint16 xtlv_len
= 0, total_len
= 0;
6192 /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
6193 temp
.id
= stats_rep
;
6196 /* Hence len/data = 0/NULL */
6197 xtlv_len
+= temp
.len
+ BCM_XTLV_HDR_SIZE
;
6199 /* Total length of the container */
6200 total_len
= BCM_XTLV_HDR_SIZE
+
6201 OFFSETOF(ecounters_stats_types_report_req_t
, stats_types_req
) + xtlv_len
;
6203 /* Now allocate a structure for the entire request */
6204 if ((req_xtlv
= (uint8
*)MALLOCZ(dhd
->osh
, total_len
)) == NULL
) {
6209 /* container XTLV context */
6210 bcm_xtlv_buf_init(&container_xtlvbuf
, (uint8
*)req_xtlv
, total_len
,
6211 BCM_XTLV_OPTION_ALIGN32
);
6213 /* Fill other XTLVs in the container. Leave space for XTLV headers */
6214 req
= (ecounters_stats_types_report_req_t
*)(req_xtlv
+ BCM_XTLV_HDR_SIZE
);
6216 if (type
== ECOUNTERS_STATS_TYPES_FLAG_SLICE
) {
6217 req
->slice_mask
= 0x1 << if_slice_idx
;
6218 } else if (type
== ECOUNTERS_STATS_TYPES_FLAG_IFACE
) {
6219 req
->if_index
= if_slice_idx
;
6222 /* Fill remaining XTLVs */
6223 bcm_xtlv_buf_init(&xtlvbuf
, (uint8
*) req
->stats_types_req
, xtlv_len
,
6224 BCM_XTLV_OPTION_ALIGN32
);
6225 if (bcm_xtlv_put_data(&xtlvbuf
, temp
.id
, NULL
, temp
.len
)) {
6226 DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp
.id
));
6231 /* fill the top level container and get done with the XTLV container */
6232 rc
= bcm_xtlv_put_data(&container_xtlvbuf
, WL_ECOUNTERS_XTLV_REPORT_REQ
, NULL
,
6233 bcm_xtlv_buf_len(&xtlvbuf
) + OFFSETOF(ecounters_stats_types_report_req_t
,
6237 DHD_ERROR(("Error creating parent XTLV for type = %d\n", req
->flags
));
6242 if (rc
&& req_xtlv
) {
6243 MFREE(dhd
->osh
, req_xtlv
, total_len
);
6247 /* update the xtlv pointer */
6253 dhd_get_preserve_log_numbers(dhd_pub_t
*dhd
, uint32
*logset_mask
)
6255 wl_el_set_type_t logset_type
, logset_op
;
6256 int ret
= BCME_ERROR
;
6259 if (!dhd
|| !logset_mask
)
6263 memset(&logset_type
, 0, sizeof(logset_type
));
6264 memset(&logset_op
, 0, sizeof(logset_op
));
6265 logset_type
.version
= htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION
);
6266 logset_type
.len
= htod16(sizeof(wl_el_set_type_t
));
6267 for (i
= 0; i
< WL_MAX_PRESERVE_BUFFER
; i
++) {
6268 logset_type
.set
= i
;
6269 err
= dhd_iovar(dhd
, 0, "event_log_set_type", (char *)&logset_type
,
6270 sizeof(logset_type
), (char *)&logset_op
, sizeof(logset_op
), FALSE
);
6271 /* the iovar may return 'unsupported' error if a log set number is not present
6272 * in the fw, so we should not return on error !
6274 if (err
== BCME_OK
&&
6275 logset_op
.type
== EVENT_LOG_SET_TYPE_PRSRV
) {
6276 *logset_mask
|= 0x01u
<< i
;
6285 dhd_start_ecounters(dhd_pub_t
*dhd
)
6292 ecounters_config_request_v2_t
*req
= NULL
;
6293 ecountersv2_processed_xtlv_list_elt
*list_elt
, *tail
= NULL
;
6294 ecountersv2_processed_xtlv_list_elt
*processed_containers_list
= NULL
;
6295 uint16 total_processed_containers_len
= 0;
6297 rc
= dhd_iovar(dhd
, 0, "ecounters_autoconfig", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
6299 if (rc
!= BCME_UNSUPPORTED
)
6304 for (i
= 0; i
< ARRAYSIZE(ecounters_cfg_tbl
); i
++) {
6305 ecounters_cfg_t
*ecounter_stat
= &ecounters_cfg_tbl
[i
];
6307 if ((list_elt
= (ecountersv2_processed_xtlv_list_elt
*)
6308 MALLOCZ(dhd
->osh
, sizeof(*list_elt
))) == NULL
) {
6309 DHD_ERROR(("Ecounters v2: No memory to process\n"));
6313 rc
= dhd_create_ecounters_params(dhd
, ecounter_stat
->type
,
6314 ecounter_stat
->if_slice_idx
, ecounter_stat
->stats_rep
, &list_elt
->data
);
6317 DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
6318 ecounter_stat
->stats_rep
, rc
));
6320 /* Free allocated memory and go to fail to release any memories allocated
6321 * in previous iterations. Note that list_elt->data gets populated in
6322 * dhd_create_ecounters_params() and gets freed there itself.
6324 MFREE(dhd
->osh
, list_elt
, sizeof(*list_elt
));
6328 elt
= (bcm_xtlv_t
*) list_elt
->data
;
6330 /* Put the elements in the order they are processed */
6331 if (processed_containers_list
== NULL
) {
6332 processed_containers_list
= list_elt
;
6334 tail
->next
= list_elt
;
6337 /* Size of the XTLV returned */
6338 total_processed_containers_len
+= BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
;
6341 /* Now create ecounters config request with totallength */
6342 req
= (ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
, sizeof(*req
) +
6343 total_processed_containers_len
);
6350 req
->version
= ECOUNTERS_VERSION_2
;
6351 req
->logset
= EVENT_LOG_SET_ECOUNTERS
;
6352 req
->reporting_period
= ECOUNTERS_DEFAULT_PERIOD
;
6353 req
->num_reports
= ECOUNTERS_NUM_REPORTS
;
6354 req
->len
= total_processed_containers_len
+
6355 OFFSETOF(ecounters_config_request_v2_t
, ecounters_xtlvs
);
6358 start_ptr
= req
->ecounters_xtlvs
;
6360 /* Now go element by element in the list */
6361 while (processed_containers_list
) {
6362 list_elt
= processed_containers_list
;
6364 elt
= (bcm_xtlv_t
*)list_elt
->data
;
6366 memcpy(start_ptr
, list_elt
->data
, BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
6367 start_ptr
+= (size_t)(BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
6368 processed_containers_list
= processed_containers_list
->next
;
6370 /* Free allocated memories */
6371 MFREE(dhd
->osh
, elt
, elt
->len
+ BCM_XTLV_HDR_SIZE
);
6372 MFREE(dhd
->osh
, list_elt
, sizeof(*list_elt
));
6375 if ((rc
= dhd_iovar(dhd
, 0, "ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
)) < 0) {
6376 DHD_ERROR(("failed to start ecounters\n"));
6381 MFREE(dhd
->osh
, req
, sizeof(*req
) + total_processed_containers_len
);
6384 /* Now go element by element in the list */
6385 while (processed_containers_list
) {
6386 list_elt
= processed_containers_list
;
6387 elt
= (bcm_xtlv_t
*)list_elt
->data
;
6388 processed_containers_list
= processed_containers_list
->next
;
6390 /* Free allocated memories */
6391 MFREE(dhd
->osh
, elt
, elt
->len
+ BCM_XTLV_HDR_SIZE
);
6392 MFREE(dhd
->osh
, list_elt
, sizeof(*list_elt
));
6398 dhd_stop_ecounters(dhd_pub_t
*dhd
)
6401 ecounters_config_request_v2_t
*req
;
6403 /* Now create ecounters config request with totallength */
6404 req
= (ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
, sizeof(*req
));
6411 req
->version
= ECOUNTERS_VERSION_2
;
6412 req
->len
= OFFSETOF(ecounters_config_request_v2_t
, ecounters_xtlvs
);
6414 if ((rc
= dhd_iovar(dhd
, 0, "ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
)) < 0) {
6415 DHD_ERROR(("failed to stop ecounters\n"));
6420 MFREE(dhd
->osh
, req
, sizeof(*req
));
6425 /* configured event_id_array for event ecounters */
6426 typedef struct event_id_array
{
6431 /* get event id array only from event_ecounters_cfg_tbl[] */
6432 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t
*event_array
)
6436 int32 prev_evt_id
= -1;
6438 for (i
= 0; i
< (uint8
)ARRAYSIZE(event_ecounters_cfg_tbl
); i
++) {
6439 if (prev_evt_id
!= event_ecounters_cfg_tbl
[i
].event_id
) {
6440 if (prev_evt_id
>= 0)
6442 event_array
[idx
].event_id
= event_ecounters_cfg_tbl
[i
].event_id
;
6443 event_array
[idx
].str_idx
= i
;
6445 prev_evt_id
= event_ecounters_cfg_tbl
[i
].event_id
;
6450 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
6451 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
6454 dhd_start_event_ecounters(dhd_pub_t
*dhd
)
6457 uint8 event_id_cnt
= 0;
6458 uint16 processed_containers_len
= 0;
6459 uint16 max_xtlv_len
= 0;
6463 event_id_array_t
*id_array
;
6464 bcm_xtlv_t
*elt
= NULL
;
6465 event_ecounters_config_request_v2_t
*req
= NULL
;
6467 id_array
= (event_id_array_t
*)MALLOCZ(dhd
->osh
, sizeof(event_id_array_t
) *
6468 ARRAYSIZE(event_ecounters_cfg_tbl
));
6470 if (id_array
== NULL
) {
6474 event_id_cnt
= __dhd_event_ecounters_get_event_id_array(id_array
);
6476 max_xtlv_len
= ((BCM_XTLV_HDR_SIZE
+
6477 OFFSETOF(event_ecounters_config_request_v2_t
, ecounters_xtlvs
)) *
6478 ECNTRS_MAX_XTLV_NUM
);
6480 /* Now create ecounters config request with max allowed length */
6481 req
= (event_ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
,
6482 sizeof(event_ecounters_config_request_v2_t
*) + max_xtlv_len
);
6489 for (i
= 0; i
<= event_id_cnt
; i
++) {
6490 /* req initialization by event id */
6491 req
->version
= ECOUNTERS_VERSION_2
;
6492 req
->logset
= EVENT_LOG_SET_ECOUNTERS
;
6493 req
->event_id
= id_array
[i
].event_id
;
6494 req
->flags
= EVENT_ECOUNTERS_FLAGS_ADD
;
6496 processed_containers_len
= 0;
6499 ptr
= req
->ecounters_xtlvs
;
6501 for (j
= id_array
[i
].str_idx
; j
< (uint8
)ARRAYSIZE(event_ecounters_cfg_tbl
); j
++) {
6502 event_ecounters_cfg_t
*event_ecounter_stat
= &event_ecounters_cfg_tbl
[j
];
6503 if (id_array
[i
].event_id
!= event_ecounter_stat
->event_id
)
6506 rc
= dhd_create_ecounters_params(dhd
, event_ecounter_stat
->type
,
6507 event_ecounter_stat
->if_slice_idx
, event_ecounter_stat
->stats_rep
,
6511 DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
6512 __FUNCTION__
, event_ecounter_stat
->stats_rep
, rc
));
6516 elt
= (bcm_xtlv_t
*)data
;
6518 memcpy(ptr
, elt
, BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
6519 ptr
+= (size_t)(BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
);
6520 processed_containers_len
+= BCM_XTLV_LEN(elt
) + BCM_XTLV_HDR_SIZE
;
6522 /* Free allocated memories alloced by dhd_create_ecounters_params */
6523 MFREE(dhd
->osh
, elt
, elt
->len
+ BCM_XTLV_HDR_SIZE
);
6525 if (processed_containers_len
> max_xtlv_len
) {
6526 DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
6533 req
->len
= processed_containers_len
+
6534 OFFSETOF(event_ecounters_config_request_v2_t
, ecounters_xtlvs
);
6536 DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
6537 __FUNCTION__
, req
->version
, req
->logset
, req
->event_id
,
6538 req
->flags
, req
->len
));
6540 rc
= dhd_iovar(dhd
, 0, "event_ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
);
6543 DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
6544 req
->event_id
, rc
));
6550 /* Free allocated memories */
6552 MFREE(dhd
->osh
, req
, sizeof(event_ecounters_config_request_v2_t
*) + max_xtlv_len
);
6555 MFREE(dhd
->osh
, id_array
, sizeof(event_id_array_t
) *
6556 ARRAYSIZE(event_ecounters_cfg_tbl
));
6563 dhd_stop_event_ecounters(dhd_pub_t
*dhd
)
6566 event_ecounters_config_request_v2_t
*req
;
6568 /* Now create ecounters config request with totallength */
6569 req
= (event_ecounters_config_request_v2_t
*)MALLOCZ(dhd
->osh
, sizeof(*req
));
6576 req
->version
= ECOUNTERS_VERSION_2
;
6577 req
->flags
= EVENT_ECOUNTERS_FLAGS_DEL_ALL
;
6578 req
->len
= OFFSETOF(event_ecounters_config_request_v2_t
, ecounters_xtlvs
);
6580 if ((rc
= dhd_iovar(dhd
, 0, "event_ecounters", (char *)req
, req
->len
, NULL
, 0, TRUE
)) < 0) {
6581 DHD_ERROR(("failed to stop event_ecounters\n"));
6586 MFREE(dhd
->osh
, req
, sizeof(*req
));
6593 dhd_log_dump_ring_to_file(dhd_pub_t
*dhdp
, void *ring_ptr
, void *file
,
6594 unsigned long *file_posn
, log_dump_section_hdr_t
*sec_hdr
)
6597 uint32 data_len
= 0, total_len
= 0;
6599 unsigned long fpos_sechdr
= 0;
6600 unsigned long flags
= 0;
6602 dhd_dbg_ring_t
*ring
= (dhd_dbg_ring_t
*)ring_ptr
;
6604 if (!dhdp
|| !ring
|| !file
|| !sec_hdr
|| !file_posn
)
6607 /* do not allow further writes to the ring
6610 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
6611 ring
->state
= RING_SUSPEND
;
6612 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
6614 if (dhdp
->concise_dbg_buf
) {
6615 /* re-use concise debug buffer temporarily
6616 * to pull ring data, to write
6617 * record by record to file
6619 data_len
= CONCISE_DUMP_BUFLEN
;
6620 data
= dhdp
->concise_dbg_buf
;
6621 dhd_os_write_file_posn(file
, file_posn
, ECNTRS_LOG_HDR
,
6622 strlen(ECNTRS_LOG_HDR
));
6623 /* write the section header now with zero length,
6624 * once the correct length is found out, update
6627 fpos_sechdr
= *file_posn
;
6628 sec_hdr
->type
= LOG_DUMP_SECTION_ECNTRS
;
6629 sec_hdr
->length
= 0;
6630 dhd_os_write_file_posn(file
, file_posn
, (char *)sec_hdr
,
6633 rlen
= dhd_dbg_ring_pull_single(ring
, data
, data_len
, TRUE
);
6636 ret
= dhd_os_write_file_posn(file
, file_posn
, data
, rlen
);
6638 DHD_ERROR(("%s: write file error !\n", __FUNCTION__
));
6639 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
6640 ring
->state
= RING_ACTIVE
;
6641 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
6647 /* now update the section header length in the file */
6648 sec_hdr
->length
= total_len
;
6649 dhd_os_write_file_posn(file
, &fpos_sechdr
, (char *)sec_hdr
, sizeof(*sec_hdr
));
6651 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__
));
6654 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
6655 ring
->state
= RING_ACTIVE
;
6656 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
6661 /* logdump cookie */
6662 #define MAX_LOGUDMP_COOKIE_CNT 10u
6663 #define LOGDUMP_COOKIE_STR_LEN 50u
6665 dhd_logdump_cookie_init(dhd_pub_t
*dhdp
, uint8
*buf
, uint32 buf_size
)
6669 if (!dhdp
|| !buf
) {
6670 DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp
, buf
));
6674 ring_size
= dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN
* MAX_LOGUDMP_COOKIE_CNT
;
6675 if (buf_size
< ring_size
) {
6676 DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
6677 ring_size
, buf_size
));
6681 dhdp
->logdump_cookie
= dhd_ring_init(buf
, buf_size
,
6682 LOGDUMP_COOKIE_STR_LEN
, MAX_LOGUDMP_COOKIE_CNT
);
6683 if (!dhdp
->logdump_cookie
) {
6684 DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
6692 dhd_logdump_cookie_deinit(dhd_pub_t
*dhdp
)
6697 if (dhdp
->logdump_cookie
) {
6698 dhd_ring_deinit(dhdp
->logdump_cookie
);
6705 dhd_logdump_cookie_save(dhd_pub_t
*dhdp
, char *cookie
, char *type
)
6709 if (!dhdp
|| !cookie
|| !type
|| !dhdp
->logdump_cookie
) {
6710 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
6711 " type = %p, cookie_cfg:%p\n", __FUNCTION__
,
6712 dhdp
, cookie
, type
, dhdp
?dhdp
->logdump_cookie
: NULL
));
6715 ptr
= (char *)dhd_ring_get_empty(dhdp
->logdump_cookie
);
6717 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__
));
6720 scnprintf(ptr
, LOGDUMP_COOKIE_STR_LEN
, "%s: %s\n", type
, cookie
);
6725 dhd_logdump_cookie_get(dhd_pub_t
*dhdp
, char *ret_cookie
, uint32 buf_size
)
6729 if (!dhdp
|| !ret_cookie
|| !dhdp
->logdump_cookie
) {
6730 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
6731 "cookie=%p cookie_cfg:%p\n", __FUNCTION__
,
6732 dhdp
, ret_cookie
, dhdp
?dhdp
->logdump_cookie
: NULL
));
6735 ptr
= (char *)dhd_ring_get_first(dhdp
->logdump_cookie
);
6737 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__
));
6740 memcpy(ret_cookie
, ptr
, MIN(buf_size
, strlen(ptr
)));
6741 dhd_ring_free_first(dhdp
->logdump_cookie
);
6746 dhd_logdump_cookie_count(dhd_pub_t
*dhdp
)
6748 if (!dhdp
|| !dhdp
->logdump_cookie
) {
6749 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
6750 __FUNCTION__
, dhdp
, dhdp
?dhdp
->logdump_cookie
: NULL
));
6753 return dhd_ring_get_cur_size(dhdp
->logdump_cookie
);
6757 __dhd_log_dump_cookie_to_file(
6758 dhd_pub_t
*dhdp
, void *fp
, unsigned long *f_pos
, char *buf
, uint32 buf_size
)
6761 uint32 remain
= buf_size
;
6762 int ret
= BCME_ERROR
;
6763 char tmp_buf
[LOGDUMP_COOKIE_STR_LEN
];
6764 log_dump_section_hdr_t sec_hdr
;
6765 while (dhd_logdump_cookie_count(dhdp
) > 0) {
6766 memset(tmp_buf
, 0, sizeof(tmp_buf
));
6767 ret
= dhd_logdump_cookie_get(dhdp
, tmp_buf
, LOGDUMP_COOKIE_STR_LEN
);
6768 if (ret
!= BCME_OK
) {
6771 remain
-= scnprintf(&buf
[buf_size
- remain
], remain
, "%s", tmp_buf
);
6773 ret
= dhd_os_write_file_posn(fp
, f_pos
, COOKIE_LOG_HDR
, strlen(COOKIE_LOG_HDR
));
6775 DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__
));
6778 sec_hdr
.magic
= LOG_DUMP_MAGIC
;
6779 sec_hdr
.timestamp
= local_clock();
6780 sec_hdr
.type
= LOG_DUMP_SECTION_COOKIE
;
6781 sec_hdr
.length
= buf_size
- remain
;
6782 ret
= dhd_os_write_file_posn(fp
, f_pos
, (char *)&sec_hdr
, sizeof(sec_hdr
));
6784 DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__
));
6788 ret
= dhd_os_write_file_posn(fp
, f_pos
, buf
, sec_hdr
.length
);
6790 DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__
));
6797 dhd_log_dump_cookie_to_file(dhd_pub_t
*dhdp
, void *fp
, unsigned long *f_pos
)
6800 int ret
= BCME_ERROR
;
6801 uint32 buf_size
= MAX_LOGUDMP_COOKIE_CNT
* LOGDUMP_COOKIE_STR_LEN
;
6803 if (!dhdp
|| !dhdp
->logdump_cookie
||!fp
|| !f_pos
) {
6804 DHD_ERROR(("%s At least one ptr is NULL "
6805 "dhdp = %p cookie %p fp = %p f_pos = %p\n",
6806 __FUNCTION__
, dhdp
, dhdp
?dhdp
->logdump_cookie
:NULL
, fp
, f_pos
));
6810 buf
= (char *)MALLOCZ(dhdp
->osh
, buf_size
);
6812 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__
));
6815 ret
= __dhd_log_dump_cookie_to_file(dhdp
, fp
, f_pos
, buf
, buf_size
);
6816 MFREE(dhdp
->osh
, buf
, buf_size
);
6821 #endif /* DHD_LOG_DUMP */
6825 dhd_log_dump_trigger(dhd_pub_t
*dhdp
, int subcmd
)
6827 log_dump_type_t
*flush_type
;
6830 DHD_ERROR(("dhdp is NULL !\n"));
6834 if (subcmd
>= CMD_MAX
|| subcmd
< CMD_DEFAULT
) {
6835 DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__
));
6839 flush_type
= MALLOCZ(dhdp
->osh
, sizeof(log_dump_type_t
));
6841 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__
));
6844 clear_debug_dump_time(dhdp
->debug_dump_time_str
);
6845 #ifdef DHD_PCIE_RUNTIMEPM
6846 /* wake up RPM if SYSDUMP is triggered */
6847 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
6848 #endif /* DHD_PCIE_RUNTIMEPM */
6850 dhdp
->debug_dump_subcmd
= subcmd
;
6853 *flush_type
= DLD_BUF_TYPE_ALL
;
6854 dhd_schedule_log_dump(dhdp
, flush_type
);
6856 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
6857 dhdp
->memdump_type
= DUMP_TYPE_BY_SYSDUMP
;
6858 dhd_bus_mem_dump(dhdp
);
6859 #endif /* BCMPCIE && DHD_FW_COREDUMP */
6860 #ifdef DHD_PKT_LOGGING
6861 dhd_schedule_pktlog_dump(dhdp
);
6862 #endif /* DHD_PKT_LOGGING */
6864 #endif /* DHD_LOG_DUMP */
6866 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
6868 copy_hang_info_ioctl_timeout(dhd_pub_t
*dhd
, int ifidx
, wl_ioctl_t
*ioc
)
6875 uint32 ioc_dwlen
= 0;
6877 if (!dhd
|| !dhd
->hang_info
) {
6878 DHD_ERROR(("%s dhd=%p hang_info=%p\n",
6879 __FUNCTION__
, dhd
, (dhd
? dhd
->hang_info
: NULL
)));
6883 cnt
= &dhd
->hang_info_cnt
;
6884 dest
= dhd
->hang_info
;
6886 memset(dest
, 0, VENDOR_SEND_HANG_EXT_INFO_LEN
);
6890 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- bytes_written
;
6892 get_debug_dump_time(dhd
->debug_dump_time_hang_str
);
6893 copy_debug_dump_time(dhd
->debug_dump_time_str
, dhd
->debug_dump_time_hang_str
);
6895 bytes_written
+= scnprintf(&dest
[bytes_written
], remain_len
, "%d %d %s %d %d %d %d %d %d ",
6896 HANG_REASON_IOCTL_RESP_TIMEOUT
, VENDOR_SEND_HANG_EXT_INFO_VER
,
6897 dhd
->debug_dump_time_hang_str
,
6898 ifidx
, ioc
->cmd
, ioc
->len
, ioc
->set
, ioc
->used
, ioc
->needed
);
6899 (*cnt
) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT
;
6901 clear_debug_dump_time(dhd
->debug_dump_time_hang_str
);
6903 /* Access ioc->buf only if the ioc->len is more than 4 bytes */
6904 ioc_dwlen
= (uint32
)(ioc
->len
/ sizeof(uint32
));
6905 if (ioc_dwlen
> 0) {
6906 const uint32
*ioc_buf
= (const uint32
*)ioc
->buf
;
6908 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- bytes_written
;
6909 bytes_written
+= scnprintf(&dest
[bytes_written
], remain_len
,
6910 "%08x", *(uint32
*)(ioc_buf
++));
6912 if ((*cnt
) >= HANG_FIELD_CNT_MAX
) {
6916 for (i
= 1; i
< ioc_dwlen
&& *cnt
<= HANG_FIELD_CNT_MAX
;
6918 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- bytes_written
;
6919 bytes_written
+= scnprintf(&dest
[bytes_written
], remain_len
, "%c%08x",
6920 HANG_RAW_DEL
, *(uint32
*)(ioc_buf
++));
6924 DHD_INFO(("%s hang info len: %d data: %s\n",
6925 __FUNCTION__
, (int)strlen(dhd
->hang_info
), dhd
->hang_info
));
6927 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */