2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2019, Broadcom.
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
26 * <<Broadcom-WL-IPTag/Open:>>
28 * $Id: dhd_linux.c 818247 2019-05-07 04:15:13Z $
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
39 #ifdef PCIE_FULL_DONGLE
40 #include <bcmmsgbuf.h>
41 #endif /* PCIE_FULL_DONGLE */
43 #include <linux/init.h>
44 #include <linux/kernel.h>
45 #include <linux/slab.h>
46 #include <linux/skbuff.h>
47 #include <linux/netdevice.h>
48 #include <linux/inetdevice.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/etherdevice.h>
51 #include <linux/random.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/fcntl.h>
57 #include <linux/reboot.h>
58 #include <linux/notifier.h>
59 #include <linux/irq.h>
60 #include <net/addrconf.h>
61 #ifdef ENABLE_ADAPTIVE_SCHED
62 #include <linux/cpufreq.h>
63 #endif /* ENABLE_ADAPTIVE_SCHED */
64 #include <linux/rtc.h>
66 #include <linux/namei.h>
67 #endif /* DHD_DUMP_MNGR */
68 #include <asm/uaccess.h>
69 #include <asm/unaligned.h>
70 #include <dhd_linux_priv.h>
74 #include <bcmendian.h>
83 #include <dhd_linux_wq.h>
85 #include <dhd_linux.h>
89 #ifdef PCIE_FULL_DONGLE
90 #include <dhd_flowring.h>
93 #include <dhd_proto.h>
95 #include <dhd_dbg_ring.h>
96 #include <dhd_debug.h>
97 #ifdef CONFIG_HAS_WAKELOCK
98 #include <linux/wakelock.h>
100 #if defined(WL_CFG80211)
101 #include <wl_cfg80211.h>
105 #endif /* WL_CFG80211 */
114 #include <linux/compat.h>
117 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
118 #include <linux/exynos-pci-ctrl.h>
119 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
123 #include <bcm_l2_filter.h>
124 #include <dhd_l2_filter.h>
125 #endif /* DHD_L2_FILTER */
128 #include <dhd_psta.h>
129 #endif /* DHD_PSTA */
131 #ifdef AMPDU_VO_ENABLE
133 #endif /* AMPDU_VO_ENABLE */
135 #if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
137 #endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
138 #include <dhd_daemon.h>
139 #ifdef DHD_PKT_LOGGING
140 #include <dhd_pktlog.h>
141 #endif /* DHD_PKT_LOGGING */
142 #ifdef DHD_DEBUG_PAGEALLOC
143 typedef void (*page_corrupt_cb_t
)(void *handle
, void *addr_corrupt
, size_t len
);
144 void dhd_page_corrupt_cb(void *handle
, void *addr_corrupt
, size_t len
);
145 extern void register_page_corrupt_cb(page_corrupt_cb_t cb
, void* handle
);
146 #endif /* DHD_DEBUG_PAGEALLOC */
148 #define IP_PROT_RESERVED 0xFF
150 #ifdef DHDTCPSYNC_FLOOD_BLK
151 static void dhd_blk_tsfl_handler(struct work_struct
* work
);
152 #endif /* DHDTCPSYNC_FLOOD_BLK */
155 #include <dhd_linux_nfct.h>
156 #endif /* WL_NATOE */
159 extern bool ap_cfg_running
;
160 extern bool ap_fw_loaded
;
164 #if defined(DHD_LB_RXP)
165 static void dhd_rx_napi_dispatcher_fn(struct work_struct
* work
);
166 #endif /* DHD_LB_RXP */
167 #if defined(DHD_LB_TXP)
168 static void dhd_lb_tx_handler(unsigned long data
);
169 static void dhd_tx_dispatcher_work(struct work_struct
* work
);
170 static void dhd_tx_dispatcher_fn(dhd_pub_t
*dhdp
);
171 static void dhd_lb_tx_dispatch(dhd_pub_t
*dhdp
);
172 #endif /* DHD_LB_TXP */
175 #ifdef FIX_CPU_MIN_CLOCK
176 #include <linux/pm_qos.h>
177 #endif /* FIX_CPU_MIN_CLOCK */
179 #ifdef SET_RANDOM_MAC_SOFTAP
180 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
181 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
183 static u32 vendor_oui
= CONFIG_DHD_SET_RANDOM_MAC_VAL
;
184 #endif /* SET_RANDOM_MAC_SOFTAP */
186 #ifdef ENABLE_ADAPTIVE_SCHED
187 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
188 #ifndef CUSTOM_CPUFREQ_THRESH
189 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
190 #endif /* CUSTOM_CPUFREQ_THRESH */
191 #endif /* ENABLE_ADAPTIVE_SCHED */
193 /* enable HOSTIP cache update from the host side when an eth0:N is up */
194 #define AOE_IP_ALIAS_SUPPORT 1
197 #include <wlfc_proto.h>
198 #include <dhd_wlfc.h>
201 #include <wl_android.h>
203 /* Maximum STA per radio */
204 #define DHD_MAX_STA 32
206 #ifdef DHD_EVENT_LOG_FILTER
207 #include <dhd_event_log_filter.h>
208 #endif /* DHD_EVENT_LOG_FILTER */
211 * Start of Host DMA whitelist region.
215 module_param(wlreg_l
, uint
, 0644);
216 module_param(wlreg_h
, uint
, 0644);
219 * Sizeof whitelist region. The dongle will allow DMA to only wlreg to wlreg+wlreg_len.
220 * If length of whitelist region is zero, host will not program whitelist region to dongle.
222 uint32 wlreg_len_h
= 0;
223 uint32 wlreg_len_l
= 0;
225 module_param(wlreg_len_l
, uint
, 0644);
226 module_param(wlreg_len_h
, uint
, 0644);
228 const uint8 wme_fifo2ac
[] = { 0, 1, 2, 3, 1, 1 };
229 const uint8 prio2fifo
[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
230 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
232 #ifdef ARP_OFFLOAD_SUPPORT
233 void aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
);
234 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
235 unsigned long event
, void *ptr
);
236 static struct notifier_block dhd_inetaddr_notifier
= {
237 .notifier_call
= dhd_inetaddr_notifier_call
239 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
240 * created in kernel notifier link list (with 'next' pointing to itself)
242 static bool dhd_inetaddr_notifier_registered
= FALSE
;
243 #endif /* ARP_OFFLOAD_SUPPORT */
245 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
246 int dhd_inet6addr_notifier_call(struct notifier_block
*this,
247 unsigned long event
, void *ptr
);
248 static struct notifier_block dhd_inet6addr_notifier
= {
249 .notifier_call
= dhd_inet6addr_notifier_call
251 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
252 * created in kernel notifier link list (with 'next' pointing to itself)
254 static bool dhd_inet6addr_notifier_registered
= FALSE
;
255 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
258 #include <linux/suspend.h>
259 volatile bool dhd_mmc_suspend
= FALSE
;
260 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait
);
261 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
263 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
264 extern void dhd_enable_oob_intr(struct dhd_bus
*bus
, bool enable
);
265 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
267 static void dhd_hang_process(struct work_struct
*work_data
);
269 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
270 MODULE_LICENSE("GPL and additional rights");
271 #endif /* LinuxVer */
273 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
274 #define MAX_CONSECUTIVE_HANG_COUNTS 5
275 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
283 #ifndef PROP_TXSTATUS
284 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
286 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
290 extern bool dhd_wlfc_skip_fc(void * dhdp
, uint8 idx
);
291 extern void dhd_wlfc_plat_init(void *dhd
);
292 extern void dhd_wlfc_plat_deinit(void *dhd
);
293 #endif /* PROP_TXSTATUS */
294 #ifdef USE_DYNAMIC_F2_BLKSIZE
295 extern uint sd_f2_blocksize
;
296 extern int dhdsdio_func_blocksize(dhd_pub_t
*dhd
, int function_num
, int block_size
);
297 #endif /* USE_DYNAMIC_F2_BLKSIZE */
299 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
305 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
307 /* Linux wireless extension support */
308 #if defined(WL_WIRELESS_EXT)
310 extern wl_iw_extra_params_t g_wl_iw_params
;
311 #endif /* defined(WL_WIRELESS_EXT) */
313 #ifdef CONFIG_PARTIALSUSPEND_SLP
314 #include <linux/partialsuspend_slp.h>
315 #define CONFIG_HAS_EARLYSUSPEND
316 #define DHD_USE_EARLYSUSPEND
317 #define register_early_suspend register_pre_suspend
318 #define unregister_early_suspend unregister_pre_suspend
319 #define early_suspend pre_suspend
320 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
322 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
323 #include <linux/earlysuspend.h>
324 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
325 #endif /* CONFIG_PARTIALSUSPEND_SLP */
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
328 #include <linux/nl80211.h>
329 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
331 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
332 static int __dhd_apf_add_filter(struct net_device
*ndev
, uint32 filter_id
,
333 u8
* program
, uint32 program_len
);
334 static int __dhd_apf_config_filter(struct net_device
*ndev
, uint32 filter_id
,
335 uint32 mode
, uint32 enable
);
336 static int __dhd_apf_delete_filter(struct net_device
*ndev
, uint32 filter_id
);
337 #endif /* PKT_FILTER_SUPPORT && APF */
339 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
340 defined(ARGOS_NOTIFY_CB)
341 /* ARGOS notifer data */
342 static struct notifier_block argos_wifi
; /* STA */
343 static struct notifier_block argos_p2p
; /* P2P */
344 argos_rps_ctrl argos_rps_ctrl_data
;
345 #ifdef DYNAMIC_MUMIMO_CONTROL
346 argos_mumimo_ctrl argos_mumimo_ctrl_data
;
347 #ifdef CONFIG_SPLIT_ARGOS_SET
348 static struct notifier_block argos_mimo
; /* STA */
349 #endif /* CONFIG_SPLIT_ARGOS_SET */
350 #endif /* DYNAMIC_MUMIMO_CONTROL */
351 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
353 #ifdef DHD_FW_COREDUMP
354 static void dhd_mem_dump(void *dhd_info
, void *event_info
, u8 event
);
355 #endif /* DHD_FW_COREDUMP */
359 struct dhd_log_dump_buf g_dld_buf
[DLD_BUFFER_NUM
];
361 /* Only header for log dump buffers is stored in array
362 * header for sections like 'dhd dump', 'ext trap'
363 * etc, is not in the array, because they are not log
366 dld_hdr_t dld_hdrs
[DLD_BUFFER_NUM
] = {
367 {GENERAL_LOG_HDR
, LOG_DUMP_SECTION_GENERAL
},
368 {PRESERVE_LOG_HDR
, LOG_DUMP_SECTION_PRESERVE
},
369 {SPECIAL_LOG_HDR
, LOG_DUMP_SECTION_SPECIAL
}
372 static int dld_buf_size
[DLD_BUFFER_NUM
] = {
373 LOG_DUMP_GENERAL_MAX_BUFSIZE
, /* DLD_BUF_TYPE_GENERAL */
374 LOG_DUMP_PRESERVE_MAX_BUFSIZE
, /* DLD_BUF_TYPE_PRESERVE */
375 LOG_DUMP_SPECIAL_MAX_BUFSIZE
, /* DLD_BUF_TYPE_SPECIAL */
377 static void dhd_log_dump_init(dhd_pub_t
*dhd
);
378 static void dhd_log_dump_deinit(dhd_pub_t
*dhd
);
379 static void dhd_log_dump(void *handle
, void *event_info
, u8 event
);
380 static int do_dhd_log_dump(dhd_pub_t
*dhdp
, log_dump_type_t
*type
);
381 static void dhd_print_buf_addr(dhd_pub_t
*dhdp
, char *name
, void *buf
, unsigned int size
);
382 #endif /* DHD_LOG_DUMP */
384 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
385 #include <linux/workqueue.h>
386 #include <linux/pm_runtime.h>
387 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
389 #ifdef DHD_DEBUG_UART
390 #include <linux/kmod.h>
391 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
392 static void dhd_debug_uart_exec_rd(void *handle
, void *event_info
, u8 event
);
393 static void dhd_debug_uart_exec(dhd_pub_t
*dhdp
, char *cmd
);
394 #endif /* DHD_DEBUG_UART */
396 static int dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
);
397 static struct notifier_block dhd_reboot_notifier
= {
398 .notifier_call
= dhd_reboot_callback
,
403 static int is_reboot
= 0;
406 dhd_pub_t
*g_dhd_pub
= NULL
;
408 #if defined(BT_OVER_SDIO)
409 #include "dhd_bt_interface.h"
410 #endif /* defined (BT_OVER_SDIO) */
413 static int dhd_trace_open_proc(struct inode
*inode
, struct file
*file
);
414 ssize_t
dhd_trace_read_proc(struct file
*file
, char *buffer
, size_t tt
, loff_t
*loff
);
416 static const struct file_operations proc_file_fops
= {
417 .read
= dhd_trace_read_proc
,
418 .open
= dhd_trace_open_proc
,
419 .release
= seq_release
,
424 bool dhd_is_static_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
);
425 #endif /* WL_STATIC_IF */
427 atomic_t exit_in_progress
= ATOMIC_INIT(0);
429 static void dhd_process_daemon_msg(struct sk_buff
*skb
);
430 static void dhd_destroy_to_notifier_skt(void);
431 static int dhd_create_to_notifier_skt(void);
432 static struct sock
*nl_to_event_sk
= NULL
;
435 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
436 struct netlink_kernel_cfg dhd_netlink_cfg
= {
438 .input
= dhd_process_daemon_msg
,
440 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
442 #if defined(BT_OVER_SDIO)
443 /* Flag to indicate if driver is initialized */
444 uint dhd_driver_init_done
= TRUE
;
446 /* Flag to indicate if driver is initialized */
447 uint dhd_driver_init_done
= FALSE
;
449 /* Flag to indicate if we should download firmware on driver load */
450 uint dhd_download_fw_on_driverload
= TRUE
;
452 /* Definitions to provide path to the firmware and nvram
453 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
455 char firmware_path
[MOD_PARAM_PATHLEN
];
456 char nvram_path
[MOD_PARAM_PATHLEN
];
457 char clm_path
[MOD_PARAM_PATHLEN
];
458 #ifdef DHD_UCODE_DOWNLOAD
459 char ucode_path
[MOD_PARAM_PATHLEN
];
460 #endif /* DHD_UCODE_DOWNLOAD */
462 module_param_string(clm_path
, clm_path
, MOD_PARAM_PATHLEN
, 0660);
464 /* backup buffer for firmware and nvram path */
465 char fw_bak_path
[MOD_PARAM_PATHLEN
];
466 char nv_bak_path
[MOD_PARAM_PATHLEN
];
468 /* information string to keep firmware, chio, cheip version info visiable from log */
469 char info_string
[MOD_PARAM_INFOLEN
];
470 module_param_string(info_string
, info_string
, MOD_PARAM_INFOLEN
, 0444);
472 int disable_proptx
= 0;
473 module_param(op_mode
, int, 0644);
474 extern int wl_control_wl_start(struct net_device
*dev
);
475 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
476 struct semaphore dhd_registration_sem
;
477 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
480 int logdump_max_filesize
= LOG_DUMP_MAX_FILESIZE
;
481 module_param(logdump_max_filesize
, int, 0644);
482 int logdump_max_bufsize
= LOG_DUMP_GENERAL_MAX_BUFSIZE
;
483 module_param(logdump_max_bufsize
, int, 0644);
484 int logdump_prsrv_tailsize
= DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE
;
485 int logdump_periodic_flush
= FALSE
;
486 module_param(logdump_periodic_flush
, int, 0644);
487 #ifdef DEBUGABILITY_ECNTRS_LOGGING
488 int logdump_ecntr_enable
= TRUE
;
490 int logdump_ecntr_enable
= FALSE
;
491 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
492 module_param(logdump_ecntr_enable
, int, 0644);
493 #endif /* DHD_LOG_DUMP */
495 /* deferred handlers */
496 static void dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
);
497 static void dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
);
498 static void dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
);
499 static void dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
);
501 static void dhd_natoe_ct_event_hanlder(void *handle
, void *event_info
, u8 event
);
502 static void dhd_natoe_ct_ioctl_handler(void *handle
, void *event_info
, uint8 event
);
503 #endif /* WL_NATOE */
505 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
506 static void dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
);
507 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
509 extern void dhd_netdev_free(struct net_device
*ndev
);
510 #endif /* WL_CFG80211 */
511 static dhd_if_t
* dhd_get_ifp_by_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
);
513 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
514 /* update rx_pkt_chainable state of dhd interface */
515 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t
* dhdp
, uint32 idx
);
516 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
519 module_param(dhd_msg_level
, int, 0);
521 #ifdef ARP_OFFLOAD_SUPPORT
522 /* ARP offload enable */
523 uint dhd_arp_enable
= TRUE
;
524 module_param(dhd_arp_enable
, uint
, 0);
526 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
528 #ifdef ENABLE_ARP_SNOOP_MODE
529 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
| ARP_OL_SNOOP
| ARP_OL_HOST_AUTO_REPLY
;
531 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
;
532 #endif /* ENABLE_ARP_SNOOP_MODE */
534 module_param(dhd_arp_mode
, uint
, 0);
535 #endif /* ARP_OFFLOAD_SUPPORT */
537 /* Disable Prop tx */
538 module_param(disable_proptx
, int, 0644);
539 /* load firmware and/or nvram values from the filesystem */
540 module_param_string(firmware_path
, firmware_path
, MOD_PARAM_PATHLEN
, 0660);
541 module_param_string(nvram_path
, nvram_path
, MOD_PARAM_PATHLEN
, 0660);
542 #ifdef DHD_UCODE_DOWNLOAD
543 module_param_string(ucode_path
, ucode_path
, MOD_PARAM_PATHLEN
, 0660);
544 #endif /* DHD_UCODE_DOWNLOAD */
546 /* wl event forwarding */
548 uint wl_event_enable
= true;
550 uint wl_event_enable
= false;
551 #endif /* WL_EVENT_ENAB */
552 module_param(wl_event_enable
, uint
, 0660);
554 /* wl event forwarding */
555 #ifdef LOGTRACE_PKT_SENDUP
556 uint logtrace_pkt_sendup
= true;
558 uint logtrace_pkt_sendup
= false;
559 #endif /* LOGTRACE_PKT_SENDUP */
560 module_param(logtrace_pkt_sendup
, uint
, 0660);
562 /* Watchdog interval */
563 /* extend watchdog expiration to 2 seconds when DPC is running */
564 #define WATCHDOG_EXTEND_INTERVAL (2000)
566 uint dhd_watchdog_ms
= CUSTOM_DHD_WATCHDOG_MS
;
567 module_param(dhd_watchdog_ms
, uint
, 0);
569 #ifdef DHD_PCIE_RUNTIMEPM
570 uint dhd_runtimepm_ms
= CUSTOM_DHD_RUNTIME_MS
;
571 #endif /* DHD_PCIE_RUNTIMEPMT */
572 #if defined(DHD_DEBUG)
573 /* Console poll interval */
574 uint dhd_console_ms
= 0;
575 module_param(dhd_console_ms
, uint
, 0644);
577 uint dhd_console_ms
= 0;
578 #endif /* DHD_DEBUG */
580 uint dhd_slpauto
= TRUE
;
581 module_param(dhd_slpauto
, uint
, 0);
583 #ifdef PKT_FILTER_SUPPORT
584 /* Global Pkt filter enable control */
585 uint dhd_pkt_filter_enable
= TRUE
;
586 module_param(dhd_pkt_filter_enable
, uint
, 0);
589 /* Pkt filter init setup */
590 uint dhd_pkt_filter_init
= 0;
591 module_param(dhd_pkt_filter_init
, uint
, 0);
593 /* Pkt filter mode control */
594 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
595 uint dhd_master_mode
= FALSE
;
597 uint dhd_master_mode
= TRUE
;
598 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
599 module_param(dhd_master_mode
, uint
, 0);
601 int dhd_watchdog_prio
= 0;
602 module_param(dhd_watchdog_prio
, int, 0);
604 /* DPC thread priority */
605 int dhd_dpc_prio
= CUSTOM_DPC_PRIO_SETTING
;
606 module_param(dhd_dpc_prio
, int, 0);
608 /* RX frame thread priority */
609 int dhd_rxf_prio
= CUSTOM_RXF_PRIO_SETTING
;
610 module_param(dhd_rxf_prio
, int, 0);
612 #if !defined(BCMDHDUSB)
613 extern int dhd_dongle_ramsize
;
614 module_param(dhd_dongle_ramsize
, int, 0);
615 #endif /* BCMDHDUSB */
618 int passive_channel_skip
= 0;
619 module_param(passive_channel_skip
, int, (S_IRUSR
|S_IWUSR
));
620 #endif /* WL_CFG80211 */
622 #ifdef DHD_MSI_SUPPORT
623 uint enable_msi
= TRUE
;
624 module_param(enable_msi
, uint
, 0);
625 #endif /* PCIE_FULL_DONGLE */
627 /* Keep track of number of instances */
628 static int dhd_found
= 0;
629 static int instance_base
= 0; /* Starting instance number */
630 module_param(instance_base
, int, 0644);
632 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
633 static int dhd_napi_weight
= 32;
634 module_param(dhd_napi_weight
, int, 0644);
635 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
637 #ifdef PCIE_FULL_DONGLE
638 extern int h2d_max_txpost
;
639 module_param(h2d_max_txpost
, int, 0644);
641 extern uint dma_ring_indices
;
642 module_param(dma_ring_indices
, uint
, 0644);
644 extern bool h2d_phase
;
645 module_param(h2d_phase
, bool, 0644);
646 extern bool force_trap_bad_h2d_phase
;
647 module_param(force_trap_bad_h2d_phase
, bool, 0644);
648 #endif /* PCIE_FULL_DONGLE */
652 struct iphdr ip_header
;
653 struct udphdr udp_header
;
658 uint32 transaction_id
;
665 uint8 hw_address
[16];
666 uint8 server_name
[64];
667 uint8 file_name
[128];
671 static const uint8 bootp_magic_cookie
[4] = { 99, 130, 83, 99 };
672 static const char dhcp_ops
[][10] = {
673 "NA", "REQUEST", "REPLY"
675 static const char dhcp_types
[][10] = {
676 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
678 static void dhd_dhcp_dump(char *ifname
, uint8
*pktdata
, bool tx
);
679 #endif /* DHD_DHCP_DUMP */
682 #include <net/icmp.h>
683 static void dhd_icmp_dump(char *ifname
, uint8
*pktdata
, bool tx
);
684 #endif /* DHD_ICMP_DUMP */
687 #if defined(CUSTOMER_HW4_DEBUG)
688 static char *logstrs_path
= PLATFORM_PATH
"logstrs.bin";
689 char *st_str_file_path
= PLATFORM_PATH
"rtecdc.bin";
690 static char *map_file_path
= PLATFORM_PATH
"rtecdc.map";
691 static char *rom_st_str_file_path
= PLATFORM_PATH
"roml.bin";
692 static char *rom_map_file_path
= PLATFORM_PATH
"roml.map";
694 static char *logstrs_path
= "/installmedia/logstrs.bin";
695 char *st_str_file_path
= "/installmedia/rtecdc.bin";
696 static char *map_file_path
= "/installmedia/rtecdc.map";
697 static char *rom_st_str_file_path
= "/installmedia/roml.bin";
698 static char *rom_map_file_path
= "/installmedia/roml.map";
700 static char *ram_file_str
= "rtecdc";
701 static char *rom_file_str
= "roml";
703 module_param(logstrs_path
, charp
, S_IRUGO
);
704 module_param(st_str_file_path
, charp
, S_IRUGO
);
705 module_param(map_file_path
, charp
, S_IRUGO
);
706 module_param(rom_st_str_file_path
, charp
, S_IRUGO
);
707 module_param(rom_map_file_path
, charp
, S_IRUGO
);
709 static int dhd_init_logstrs_array(osl_t
*osh
, dhd_event_log_t
*temp
);
710 static int dhd_read_map(osl_t
*osh
, char *fname
, uint32
*ramstart
, uint32
*rodata_start
,
712 static int dhd_init_static_strs_array(osl_t
*osh
, dhd_event_log_t
*temp
, char *str_file
,
714 #endif /* SHOW_LOGTRACE */
717 void dhd_d2h_minidump(dhd_pub_t
*dhdp
);
718 #endif /* D2H_MINIDUMP */
720 #ifdef DHDTCPSYNC_FLOOD_BLK
721 extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t
*ifp
);
722 #endif /* DHDTCPSYNC_FLOOD_BLK */
727 dhd_lb_set_default_cpus(dhd_info_t
*dhd
)
729 /* Default CPU allocation for the jobs */
730 atomic_set(&dhd
->rx_napi_cpu
, 1);
731 atomic_set(&dhd
->rx_compl_cpu
, 2);
732 atomic_set(&dhd
->tx_compl_cpu
, 2);
733 atomic_set(&dhd
->tx_cpu
, 2);
734 atomic_set(&dhd
->net_tx_cpu
, 0);
738 dhd_cpumasks_deinit(dhd_info_t
*dhd
)
740 free_cpumask_var(dhd
->cpumask_curr_avail
);
741 free_cpumask_var(dhd
->cpumask_primary
);
742 free_cpumask_var(dhd
->cpumask_primary_new
);
743 free_cpumask_var(dhd
->cpumask_secondary
);
744 free_cpumask_var(dhd
->cpumask_secondary_new
);
748 dhd_cpumasks_init(dhd_info_t
*dhd
)
751 uint32 cpus
, num_cpus
= num_possible_cpus();
754 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__
,
755 DHD_LB_PRIMARY_CPUS
, DHD_LB_SECONDARY_CPUS
));
757 if (!alloc_cpumask_var(&dhd
->cpumask_curr_avail
, GFP_KERNEL
) ||
758 !alloc_cpumask_var(&dhd
->cpumask_primary
, GFP_KERNEL
) ||
759 !alloc_cpumask_var(&dhd
->cpumask_primary_new
, GFP_KERNEL
) ||
760 !alloc_cpumask_var(&dhd
->cpumask_secondary
, GFP_KERNEL
) ||
761 !alloc_cpumask_var(&dhd
->cpumask_secondary_new
, GFP_KERNEL
)) {
762 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__
));
767 cpumask_copy(dhd
->cpumask_curr_avail
, cpu_online_mask
);
768 cpumask_clear(dhd
->cpumask_primary
);
769 cpumask_clear(dhd
->cpumask_secondary
);
772 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__
, num_cpus
));
776 cpus
= DHD_LB_PRIMARY_CPUS
;
777 for (id
= 0; id
< num_cpus
; id
++) {
778 if (isset(&cpus
, id
))
779 cpumask_set_cpu(id
, dhd
->cpumask_primary
);
782 cpus
= DHD_LB_SECONDARY_CPUS
;
783 for (id
= 0; id
< num_cpus
; id
++) {
784 if (isset(&cpus
, id
))
785 cpumask_set_cpu(id
, dhd
->cpumask_secondary
);
790 dhd_cpumasks_deinit(dhd
);
795 * The CPU Candidacy Algorithm
796 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
797 * The available CPUs for selection are divided into two groups
798 * Primary Set - A CPU mask that carries the First Choice CPUs
799 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
801 * There are two types of Job, that needs to be assigned to
802 * the CPUs, from one of the above mentioned CPU group. The Jobs are
803 * 1) Rx Packet Processing - napi_cpu
804 * 2) Completion Processiong (Tx, RX) - compl_cpu
806 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
807 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
808 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
809 * If there are more processors free, it assigns one to compl_cpu.
810 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
811 * CPU, as much as possible.
813 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
814 * would allow Tx completion skb's to be released into a local free pool from
815 * which the rx buffer posts could have been serviced. it is important to note
816 * that a Tx packet may not have a large enough buffer for rx posting.
818 void dhd_select_cpu_candidacy(dhd_info_t
*dhd
)
820 uint32 primary_available_cpus
; /* count of primary available cpus */
821 uint32 secondary_available_cpus
; /* count of secondary available cpus */
822 uint32 napi_cpu
= 0; /* cpu selected for napi rx processing */
823 uint32 compl_cpu
= 0; /* cpu selected for completion jobs */
824 uint32 tx_cpu
= 0; /* cpu selected for tx processing job */
826 cpumask_clear(dhd
->cpumask_primary_new
);
827 cpumask_clear(dhd
->cpumask_secondary_new
);
830 * Now select from the primary mask. Even if a Job is
831 * already running on a CPU in secondary group, we still move
832 * to primary CPU. So no conditional checks.
834 cpumask_and(dhd
->cpumask_primary_new
, dhd
->cpumask_primary
,
835 dhd
->cpumask_curr_avail
);
837 cpumask_and(dhd
->cpumask_secondary_new
, dhd
->cpumask_secondary
,
838 dhd
->cpumask_curr_avail
);
840 primary_available_cpus
= cpumask_weight(dhd
->cpumask_primary_new
);
842 if (primary_available_cpus
> 0) {
843 napi_cpu
= cpumask_first(dhd
->cpumask_primary_new
);
845 /* If no further CPU is available,
846 * cpumask_next returns >= nr_cpu_ids
848 tx_cpu
= cpumask_next(napi_cpu
, dhd
->cpumask_primary_new
);
849 if (tx_cpu
>= nr_cpu_ids
)
852 /* In case there are no more CPUs, do completions & Tx in same CPU */
853 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_primary_new
);
854 if (compl_cpu
>= nr_cpu_ids
)
858 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
859 __FUNCTION__
, napi_cpu
, compl_cpu
, tx_cpu
));
861 /* -- Now check for the CPUs from the secondary mask -- */
862 secondary_available_cpus
= cpumask_weight(dhd
->cpumask_secondary_new
);
864 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
865 __FUNCTION__
, secondary_available_cpus
, nr_cpu_ids
));
867 if (secondary_available_cpus
> 0) {
868 /* At this point if napi_cpu is unassigned it means no CPU
869 * is online from Primary Group
872 napi_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
873 tx_cpu
= cpumask_next(napi_cpu
, dhd
->cpumask_secondary_new
);
874 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_secondary_new
);
875 } else if (tx_cpu
== 0) {
876 tx_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
877 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_secondary_new
);
878 } else if (compl_cpu
== 0) {
879 compl_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
882 /* If no CPU was available for tx processing, choose CPU 0 */
883 if (tx_cpu
>= nr_cpu_ids
)
886 /* If no CPU was available for completion, choose CPU 0 */
887 if (compl_cpu
>= nr_cpu_ids
)
890 if ((primary_available_cpus
== 0) &&
891 (secondary_available_cpus
== 0)) {
892 /* No CPUs available from primary or secondary mask */
898 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
899 __FUNCTION__
, napi_cpu
, compl_cpu
, tx_cpu
));
901 ASSERT(napi_cpu
< nr_cpu_ids
);
902 ASSERT(compl_cpu
< nr_cpu_ids
);
903 ASSERT(tx_cpu
< nr_cpu_ids
);
905 atomic_set(&dhd
->rx_napi_cpu
, napi_cpu
);
906 atomic_set(&dhd
->tx_compl_cpu
, compl_cpu
);
907 atomic_set(&dhd
->rx_compl_cpu
, compl_cpu
);
908 atomic_set(&dhd
->tx_cpu
, tx_cpu
);
914 * Function to handle CPU Hotplug notifications.
915 * One of the task it does is to trigger the CPU Candidacy algorithm
916 * for load balancing.
919 dhd_cpu_callback(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
921 unsigned long int cpu
= (unsigned long int)hcpu
;
923 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
924 #pragma GCC diagnostic push
925 #pragma GCC diagnostic ignored "-Wcast-qual"
927 dhd_info_t
*dhd
= container_of(nfb
, dhd_info_t
, cpu_notifier
);
928 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
929 #pragma GCC diagnostic pop
932 if (!dhd
|| !(dhd
->dhd_state
& DHD_ATTACH_STATE_LB_ATTACH_DONE
)) {
933 DHD_INFO(("%s(): LB data is not initialized yet.\n",
941 case CPU_ONLINE_FROZEN
:
942 DHD_LB_STATS_INCR(dhd
->cpu_online_cnt
[cpu
]);
943 cpumask_set_cpu(cpu
, dhd
->cpumask_curr_avail
);
944 dhd_select_cpu_candidacy(dhd
);
947 case CPU_DOWN_PREPARE
:
948 case CPU_DOWN_PREPARE_FROZEN
:
949 DHD_LB_STATS_INCR(dhd
->cpu_offline_cnt
[cpu
]);
950 cpumask_clear_cpu(cpu
, dhd
->cpumask_curr_avail
);
951 dhd_select_cpu_candidacy(dhd
);
960 #if defined(DHD_LB_STATS)
961 void dhd_lb_stats_init(dhd_pub_t
*dhdp
)
964 int i
, j
, num_cpus
= num_possible_cpus();
965 int alloc_size
= sizeof(uint32
) * num_cpus
;
968 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
975 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
979 DHD_LB_STATS_CLR(dhd
->dhd_dpc_cnt
);
980 DHD_LB_STATS_CLR(dhd
->napi_sched_cnt
);
982 dhd
->napi_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
983 if (!dhd
->napi_percpu_run_cnt
) {
984 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
988 for (i
= 0; i
< num_cpus
; i
++)
989 DHD_LB_STATS_CLR(dhd
->napi_percpu_run_cnt
[i
]);
991 DHD_LB_STATS_CLR(dhd
->rxc_sched_cnt
);
993 dhd
->rxc_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
994 if (!dhd
->rxc_percpu_run_cnt
) {
995 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
999 for (i
= 0; i
< num_cpus
; i
++)
1000 DHD_LB_STATS_CLR(dhd
->rxc_percpu_run_cnt
[i
]);
1002 DHD_LB_STATS_CLR(dhd
->txc_sched_cnt
);
1004 dhd
->txc_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1005 if (!dhd
->txc_percpu_run_cnt
) {
1006 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1010 for (i
= 0; i
< num_cpus
; i
++)
1011 DHD_LB_STATS_CLR(dhd
->txc_percpu_run_cnt
[i
]);
1013 dhd
->cpu_online_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1014 if (!dhd
->cpu_online_cnt
) {
1015 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1019 for (i
= 0; i
< num_cpus
; i
++)
1020 DHD_LB_STATS_CLR(dhd
->cpu_online_cnt
[i
]);
1022 dhd
->cpu_offline_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1023 if (!dhd
->cpu_offline_cnt
) {
1024 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1028 for (i
= 0; i
< num_cpus
; i
++)
1029 DHD_LB_STATS_CLR(dhd
->cpu_offline_cnt
[i
]);
1031 dhd
->txp_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1032 if (!dhd
->txp_percpu_run_cnt
) {
1033 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1037 for (i
= 0; i
< num_cpus
; i
++)
1038 DHD_LB_STATS_CLR(dhd
->txp_percpu_run_cnt
[i
]);
1040 dhd
->tx_start_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1041 if (!dhd
->tx_start_percpu_run_cnt
) {
1042 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1046 for (i
= 0; i
< num_cpus
; i
++)
1047 DHD_LB_STATS_CLR(dhd
->tx_start_percpu_run_cnt
[i
]);
1049 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1050 dhd
->napi_rx_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1051 if (!dhd
->napi_rx_hist
[j
]) {
1052 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1056 for (i
= 0; i
< num_cpus
; i
++) {
1057 DHD_LB_STATS_CLR(dhd
->napi_rx_hist
[j
][i
]);
1061 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1062 dhd
->txc_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1063 if (!dhd
->txc_hist
[j
]) {
1064 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1068 for (i
= 0; i
< num_cpus
; i
++) {
1069 DHD_LB_STATS_CLR(dhd
->txc_hist
[j
][i
]);
1072 #endif /* DHD_LB_TXC */
1074 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1075 dhd
->rxc_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1076 if (!dhd
->rxc_hist
[j
]) {
1077 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1081 for (i
= 0; i
< num_cpus
; i
++) {
1082 DHD_LB_STATS_CLR(dhd
->rxc_hist
[j
][i
]);
1085 #endif /* DHD_LB_RXC */
1089 void dhd_lb_stats_deinit(dhd_pub_t
*dhdp
)
1092 int j
, num_cpus
= num_possible_cpus();
1093 int alloc_size
= sizeof(uint32
) * num_cpus
;
1096 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1103 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1107 if (dhd
->napi_percpu_run_cnt
) {
1108 MFREE(dhdp
->osh
, dhd
->napi_percpu_run_cnt
, alloc_size
);
1109 dhd
->napi_percpu_run_cnt
= NULL
;
1111 if (dhd
->rxc_percpu_run_cnt
) {
1112 MFREE(dhdp
->osh
, dhd
->rxc_percpu_run_cnt
, alloc_size
);
1113 dhd
->rxc_percpu_run_cnt
= NULL
;
1115 if (dhd
->txc_percpu_run_cnt
) {
1116 MFREE(dhdp
->osh
, dhd
->txc_percpu_run_cnt
, alloc_size
);
1117 dhd
->txc_percpu_run_cnt
= NULL
;
1119 if (dhd
->cpu_online_cnt
) {
1120 MFREE(dhdp
->osh
, dhd
->cpu_online_cnt
, alloc_size
);
1121 dhd
->cpu_online_cnt
= NULL
;
1123 if (dhd
->cpu_offline_cnt
) {
1124 MFREE(dhdp
->osh
, dhd
->cpu_offline_cnt
, alloc_size
);
1125 dhd
->cpu_offline_cnt
= NULL
;
1128 if (dhd
->txp_percpu_run_cnt
) {
1129 MFREE(dhdp
->osh
, dhd
->txp_percpu_run_cnt
, alloc_size
);
1130 dhd
->txp_percpu_run_cnt
= NULL
;
1132 if (dhd
->tx_start_percpu_run_cnt
) {
1133 MFREE(dhdp
->osh
, dhd
->tx_start_percpu_run_cnt
, alloc_size
);
1134 dhd
->tx_start_percpu_run_cnt
= NULL
;
1137 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1138 if (dhd
->napi_rx_hist
[j
]) {
1139 MFREE(dhdp
->osh
, dhd
->napi_rx_hist
[j
], alloc_size
);
1140 dhd
->napi_rx_hist
[j
] = NULL
;
1143 if (dhd
->txc_hist
[j
]) {
1144 MFREE(dhdp
->osh
, dhd
->txc_hist
[j
], alloc_size
);
1145 dhd
->txc_hist
[j
] = NULL
;
1147 #endif /* DHD_LB_TXC */
1149 if (dhd
->rxc_hist
[j
]) {
1150 MFREE(dhdp
->osh
, dhd
->rxc_hist
[j
], alloc_size
);
1151 dhd
->rxc_hist
[j
] = NULL
;
1153 #endif /* DHD_LB_RXC */
1159 static void dhd_lb_stats_dump_histo(dhd_pub_t
*dhdp
,
1160 struct bcmstrbuf
*strbuf
, uint32
**hist
)
1163 uint32
*per_cpu_total
;
1165 uint32 num_cpus
= num_possible_cpus();
1167 per_cpu_total
= (uint32
*)MALLOC(dhdp
->osh
, sizeof(uint32
) * num_cpus
);
1168 if (!per_cpu_total
) {
1169 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__
));
1172 bzero(per_cpu_total
, sizeof(uint32
) * num_cpus
);
1174 bcm_bprintf(strbuf
, "CPU: \t\t");
1175 for (i
= 0; i
< num_cpus
; i
++)
1176 bcm_bprintf(strbuf
, "%d\t", i
);
1177 bcm_bprintf(strbuf
, "\nBin\n");
1179 for (i
= 0; i
< HIST_BIN_SIZE
; i
++) {
1180 bcm_bprintf(strbuf
, "%d:\t\t", 1<<i
);
1181 for (j
= 0; j
< num_cpus
; j
++) {
1182 bcm_bprintf(strbuf
, "%d\t", hist
[i
][j
]);
1184 bcm_bprintf(strbuf
, "\n");
1186 bcm_bprintf(strbuf
, "Per CPU Total \t");
1188 for (i
= 0; i
< num_cpus
; i
++) {
1189 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1190 per_cpu_total
[i
] += (hist
[j
][i
] * (1<<j
));
1192 bcm_bprintf(strbuf
, "%d\t", per_cpu_total
[i
]);
1193 total
+= per_cpu_total
[i
];
1195 bcm_bprintf(strbuf
, "\nTotal\t\t%d \n", total
);
1197 if (per_cpu_total
) {
1198 MFREE(dhdp
->osh
, per_cpu_total
, sizeof(uint32
) * num_cpus
);
1199 per_cpu_total
= NULL
;
1204 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf
*strbuf
, uint32
*p
)
1206 int i
, num_cpus
= num_possible_cpus();
1208 bcm_bprintf(strbuf
, "CPU: \t");
1209 for (i
= 0; i
< num_cpus
; i
++)
1210 bcm_bprintf(strbuf
, "%d\t", i
);
1211 bcm_bprintf(strbuf
, "\n");
1213 bcm_bprintf(strbuf
, "Val: \t");
1214 for (i
= 0; i
< num_cpus
; i
++)
1215 bcm_bprintf(strbuf
, "%u\t", *(p
+i
));
1216 bcm_bprintf(strbuf
, "\n");
1220 void dhd_lb_stats_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
1224 if (dhdp
== NULL
|| strbuf
== NULL
) {
1225 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1226 __FUNCTION__
, dhdp
, strbuf
));
1232 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1236 bcm_bprintf(strbuf
, "\ncpu_online_cnt:\n");
1237 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->cpu_online_cnt
);
1239 bcm_bprintf(strbuf
, "\ncpu_offline_cnt:\n");
1240 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->cpu_offline_cnt
);
1242 bcm_bprintf(strbuf
, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1243 dhd
->dhd_dpc_cnt
, dhd
->napi_sched_cnt
, dhd
->rxc_sched_cnt
,
1244 dhd
->txc_sched_cnt
);
1247 bcm_bprintf(strbuf
, "\nnapi_percpu_run_cnt:\n");
1248 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->napi_percpu_run_cnt
);
1249 bcm_bprintf(strbuf
, "\nNAPI Packets Received Histogram:\n");
1250 dhd_lb_stats_dump_histo(dhdp
, strbuf
, dhd
->napi_rx_hist
);
1251 #endif /* DHD_LB_RXP */
1254 bcm_bprintf(strbuf
, "\nrxc_percpu_run_cnt:\n");
1255 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->rxc_percpu_run_cnt
);
1256 bcm_bprintf(strbuf
, "\nRX Completions (Buffer Post) Histogram:\n");
1257 dhd_lb_stats_dump_histo(dhdp
, strbuf
, dhd
->rxc_hist
);
1258 #endif /* DHD_LB_RXC */
1261 bcm_bprintf(strbuf
, "\ntxc_percpu_run_cnt:\n");
1262 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->txc_percpu_run_cnt
);
1263 bcm_bprintf(strbuf
, "\nTX Completions (Buffer Free) Histogram:\n");
1264 dhd_lb_stats_dump_histo(dhdp
, strbuf
, dhd
->txc_hist
);
1265 #endif /* DHD_LB_TXC */
1268 bcm_bprintf(strbuf
, "\ntxp_percpu_run_cnt:\n");
1269 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->txp_percpu_run_cnt
);
1271 bcm_bprintf(strbuf
, "\ntx_start_percpu_run_cnt:\n");
1272 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->tx_start_percpu_run_cnt
);
1273 #endif /* DHD_LB_TXP */
1276 /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
1277 static inline uint32
next_larger_power2(uint32 num
)
1289 static void dhd_lb_stats_update_histo(uint32
**bin
, uint32 count
, uint32 cpu
)
1293 bin_power
= next_larger_power2(count
);
1295 switch (bin_power
) {
1296 case 1: p
= bin
[0] + cpu
; break;
1297 case 2: p
= bin
[1] + cpu
; break;
1298 case 4: p
= bin
[2] + cpu
; break;
1299 case 8: p
= bin
[3] + cpu
; break;
1300 case 16: p
= bin
[4] + cpu
; break;
1301 case 32: p
= bin
[5] + cpu
; break;
1302 case 64: p
= bin
[6] + cpu
; break;
1303 case 128: p
= bin
[7] + cpu
; break;
1304 default : p
= bin
[8] + cpu
; break;
1311 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t
*dhdp
, uint32 count
)
1314 dhd_info_t
*dhd
= dhdp
->info
;
1318 dhd_lb_stats_update_histo(dhd
->napi_rx_hist
, count
, cpu
);
1323 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t
*dhdp
, uint32 count
)
1326 dhd_info_t
*dhd
= dhdp
->info
;
1330 dhd_lb_stats_update_histo(dhd
->txc_hist
, count
, cpu
);
1335 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t
*dhdp
, uint32 count
)
1338 dhd_info_t
*dhd
= dhdp
->info
;
1342 dhd_lb_stats_update_histo(dhd
->rxc_hist
, count
, cpu
);
1347 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t
*dhdp
)
1349 dhd_info_t
*dhd
= dhdp
->info
;
1350 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txc_percpu_run_cnt
);
1353 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t
*dhdp
)
1355 dhd_info_t
*dhd
= dhdp
->info
;
1356 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->rxc_percpu_run_cnt
);
1358 #endif /* DHD_LB_STATS */
1362 #ifdef USE_WFA_CERT_CONF
1363 int g_frameburst
= 1;
1364 #endif /* USE_WFA_CERT_CONF */
1366 static int dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
);
1368 /* DHD Perimiter lock only used in router with bypass forwarding. */
1369 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1370 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1371 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1373 #ifdef PCIE_FULL_DONGLE
1374 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1375 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1376 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1377 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1378 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1380 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1381 static struct list_head
* dhd_sta_list_snapshot(dhd_info_t
*dhd
, dhd_if_t
*ifp
,
1382 struct list_head
*snapshot_list
);
1383 static void dhd_sta_list_snapshot_free(dhd_info_t
*dhd
, struct list_head
*snapshot_list
);
1384 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1385 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1386 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1387 #endif /* PCIE_FULL_DONGLE */
1389 /* Control fw roaming */
1391 uint dhd_roam_disable
= 0;
1393 uint dhd_roam_disable
= 0;
1397 extern void dhd_dbgfs_init(dhd_pub_t
*dhdp
);
1398 extern void dhd_dbgfs_remove(void);
1401 static uint pcie_txs_metadata_enable
= 0; /* Enable TX status metadta report */
1402 module_param(pcie_txs_metadata_enable
, int, 0);
1404 /* Control radio state */
1405 uint dhd_radio_up
= 1;
1407 /* Network inteface name */
1408 char iface_name
[IFNAMSIZ
] = {'\0'};
1409 module_param_string(iface_name
, iface_name
, IFNAMSIZ
, 0);
1411 /* The following are specific to the SDIO dongle */
1413 /* IOCTL response timeout */
1414 int dhd_ioctl_timeout_msec
= IOCTL_RESP_TIMEOUT
;
1416 /* DS Exit response timeout */
1417 int ds_exit_timeout_msec
= DS_EXIT_TIMEOUT
;
1419 /* Idle timeout for backplane clock */
1420 int dhd_idletime
= DHD_IDLETIME_TICKS
;
1421 module_param(dhd_idletime
, int, 0);
1424 uint dhd_poll
= FALSE
;
1425 module_param(dhd_poll
, uint
, 0);
1427 /* Use interrupts */
1428 uint dhd_intr
= TRUE
;
1429 module_param(dhd_intr
, uint
, 0);
1431 /* SDIO Drive Strength (in milliamps) */
1432 uint dhd_sdiod_drive_strength
= 6;
1433 module_param(dhd_sdiod_drive_strength
, uint
, 0);
1437 extern uint dhd_txbound
;
1438 extern uint dhd_rxbound
;
1439 module_param(dhd_txbound
, uint
, 0);
1440 module_param(dhd_rxbound
, uint
, 0);
1442 /* Deferred transmits */
1443 extern uint dhd_deferred_tx
;
1444 module_param(dhd_deferred_tx
, uint
, 0);
1446 #endif /* BCMSDIO */
1449 /* Echo packet generator (pkts/s) */
1450 uint dhd_pktgen
= 0;
1451 module_param(dhd_pktgen
, uint
, 0);
1453 /* Echo packet len (0 => sawtooth, max 2040) */
1454 uint dhd_pktgen_len
= 0;
1455 module_param(dhd_pktgen_len
, uint
, 0);
1458 #if defined(BCMSUP_4WAY_HANDSHAKE)
1459 /* Use in dongle supplicant for 4-way handshake */
1460 #if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
1461 /* Enable idsup by default (if supported in fw) */
1462 uint dhd_use_idsup
= 1;
1464 uint dhd_use_idsup
= 0;
1465 #endif /* WLFBT || WL_ENABLE_IDSUP */
1466 module_param(dhd_use_idsup
, uint
, 0);
1467 #endif /* BCMSUP_4WAY_HANDSHAKE */
1469 /* Allow delayed firmware download for debug purpose */
1470 int allow_delay_fwdl
= FALSE
;
1471 module_param(allow_delay_fwdl
, int, 0);
1473 #ifdef ECOUNTER_PERIODIC_DISABLE
1474 uint enable_ecounter
= FALSE
;
1476 uint enable_ecounter
= TRUE
;
1478 module_param(enable_ecounter
, uint
, 0);
1480 extern char dhd_version
[];
1481 extern char fw_version
[];
1482 extern char clm_version
[];
1484 int dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
);
1485 static void dhd_net_if_lock_local(dhd_info_t
*dhd
);
1486 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
);
1487 static void dhd_suspend_lock(dhd_pub_t
*dhdp
);
1488 static void dhd_suspend_unlock(dhd_pub_t
*dhdp
);
1490 /* Monitor interface */
1491 int dhd_monitor_init(void *dhd_pub
);
1492 int dhd_monitor_uninit(void);
1494 #ifdef DHD_PM_CONTROL_FROM_FILE
1496 #ifdef DHD_EXPORT_CNTL_FILE
1498 #endif /* DHD_EXPORT_CNTL_FILE */
1499 void sec_control_pm(dhd_pub_t
*dhd
, uint
*);
1500 #endif /* DHD_PM_CONTROL_FROM_FILE */
1502 #if defined(WL_WIRELESS_EXT)
1503 struct iw_statistics
*dhd_get_wireless_stats(struct net_device
*dev
);
1504 #endif /* defined(WL_WIRELESS_EXT) */
1506 static void dhd_dpc(ulong data
);
1508 extern int dhd_wait_pend8021x(struct net_device
*dev
);
1509 void dhd_os_wd_timer_extend(void *bus
, bool extend
);
1513 #error TOE requires BDC
1515 static int dhd_toe_get(dhd_info_t
*dhd
, int idx
, uint32
*toe_ol
);
1516 static int dhd_toe_set(dhd_info_t
*dhd
, int idx
, uint32 toe_ol
);
1519 static int dhd_wl_host_event(dhd_info_t
*dhd
, int ifidx
, void *pktdata
, uint16 pktlen
,
1520 wl_event_msg_t
*event_ptr
, void **data_ptr
);
1522 #if defined(CONFIG_PM_SLEEP)
1523 static int dhd_pm_callback(struct notifier_block
*nfb
, unsigned long action
, void *ignored
)
1525 int ret
= NOTIFY_DONE
;
1526 bool suspend
= FALSE
;
1528 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1529 #pragma GCC diagnostic push
1530 #pragma GCC diagnostic ignored "-Wcast-qual"
1532 dhd_info_t
*dhdinfo
= (dhd_info_t
*)container_of(nfb
, struct dhd_info
, pm_notifier
);
1533 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1534 #pragma GCC diagnostic pop
1537 BCM_REFERENCE(dhdinfo
);
1538 BCM_REFERENCE(suspend
);
1541 case PM_HIBERNATION_PREPARE
:
1542 case PM_SUSPEND_PREPARE
:
1546 case PM_POST_HIBERNATION
:
1547 case PM_POST_SUSPEND
:
1552 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1554 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo
->pub
);
1555 dhd_wlfc_suspend(&dhdinfo
->pub
);
1556 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo
->pub
);
1558 dhd_wlfc_resume(&dhdinfo
->pub
);
1560 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1562 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1563 KERNEL_VERSION(2, 6, 39))
1564 dhd_mmc_suspend
= suspend
;
1571 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1572 * created in kernel notifier link list (with 'next' pointing to itself)
1574 static bool dhd_pm_notifier_registered
= FALSE
;
1576 extern int register_pm_notifier(struct notifier_block
*nb
);
1577 extern int unregister_pm_notifier(struct notifier_block
*nb
);
1578 #endif /* CONFIG_PM_SLEEP */
1580 /* Request scheduling of the bus rx frame */
1581 static void dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
);
1582 static void dhd_os_rxflock(dhd_pub_t
*pub
);
1583 static void dhd_os_rxfunlock(dhd_pub_t
*pub
);
1585 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1586 typedef struct dhd_dev_priv
{
1587 dhd_info_t
* dhd
; /* cached pointer to dhd_info in netdevice priv */
1588 dhd_if_t
* ifp
; /* cached pointer to dhd_if in netdevice priv */
1589 int ifidx
; /* interface index */
1593 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1594 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1595 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1596 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1597 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1598 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1600 #if defined(DHD_OF_SUPPORT)
1601 extern int dhd_wlan_init(void);
1602 #endif /* defined(DHD_OF_SUPPORT) */
1603 /** Clear the dhd net_device's private structure. */
1605 dhd_dev_priv_clear(struct net_device
* dev
)
1607 dhd_dev_priv_t
* dev_priv
;
1608 ASSERT(dev
!= (struct net_device
*)NULL
);
1609 dev_priv
= DHD_DEV_PRIV(dev
);
1610 dev_priv
->dhd
= (dhd_info_t
*)NULL
;
1611 dev_priv
->ifp
= (dhd_if_t
*)NULL
;
1612 dev_priv
->ifidx
= DHD_BAD_IF
;
1613 dev_priv
->lkup
= (void *)NULL
;
1616 /** Setup the dhd net_device's private structure. */
1618 dhd_dev_priv_save(struct net_device
* dev
, dhd_info_t
* dhd
, dhd_if_t
* ifp
,
1621 dhd_dev_priv_t
* dev_priv
;
1622 ASSERT(dev
!= (struct net_device
*)NULL
);
1623 dev_priv
= DHD_DEV_PRIV(dev
);
1624 dev_priv
->dhd
= dhd
;
1625 dev_priv
->ifp
= ifp
;
1626 dev_priv
->ifidx
= ifidx
;
1629 /* Return interface pointer */
1630 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
)
1632 ASSERT(ifidx
< DHD_MAX_IFS
);
1634 if (ifidx
>= DHD_MAX_IFS
)
1637 return dhdp
->info
->iflist
[ifidx
];
1640 #ifdef PCIE_FULL_DONGLE
1642 /** Dummy objects are defined with state representing bad|down.
1643 * Performance gains from reducing branch conditionals, instruction parallelism,
1644 * dual issue, reducing load shadows, avail of larger pipelines.
1645 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1646 * is accessed via the dhd_sta_t.
1649 /* Dummy dhd_info object */
1650 dhd_info_t dhd_info_null
= {
1652 .info
= &dhd_info_null
,
1653 #ifdef DHDTCPACK_SUPPRESS
1654 .tcpack_sup_mode
= TCPACK_SUP_REPLACE
,
1655 #endif /* DHDTCPACK_SUPPRESS */
1657 .busstate
= DHD_BUS_DOWN
1660 #define DHD_INFO_NULL (&dhd_info_null)
1661 #define DHD_PUB_NULL (&dhd_info_null.pub)
1663 /* Dummy netdevice object */
1664 struct net_device dhd_net_dev_null
= {
1665 .reg_state
= NETREG_UNREGISTERED
1667 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1669 /* Dummy dhd_if object */
1670 dhd_if_t dhd_if_null
= {
1672 .wmf
= { .wmf_enable
= TRUE
},
1674 .info
= DHD_INFO_NULL
,
1675 .net
= DHD_NET_DEV_NULL
,
1678 #define DHD_IF_NULL (&dhd_if_null)
1680 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1682 /** Interface STA list management. */
1684 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1685 static void dhd_sta_free(dhd_pub_t
*pub
, dhd_sta_t
*sta
);
1686 static dhd_sta_t
* dhd_sta_alloc(dhd_pub_t
* dhdp
);
1688 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1689 static void dhd_if_del_sta_list(dhd_if_t
* ifp
);
1690 static void dhd_if_flush_sta(dhd_if_t
* ifp
);
1692 /* Construct/Destruct a sta pool. */
1693 static int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
);
1694 static void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
);
1695 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1696 static void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
);
1698 /** Reset a dhd_sta object and free into the dhd pool. */
1700 dhd_sta_free(dhd_pub_t
* dhdp
, dhd_sta_t
* sta
)
1704 ASSERT((sta
!= DHD_STA_NULL
) && (sta
->idx
!= ID16_INVALID
));
1706 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
1709 * Flush and free all packets in all flowring's queues belonging to sta.
1710 * Packets in flow ring will be flushed later.
1712 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
1713 uint16 flowid
= sta
->flowid
[prio
];
1715 if (flowid
!= FLOWID_INVALID
) {
1716 unsigned long flags
;
1717 flow_ring_node_t
* flow_ring_node
;
1719 #ifdef DHDTCPACK_SUPPRESS
1720 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1721 * when there is a newly coming packet from network stack.
1723 dhd_tcpack_info_tbl_clean(dhdp
);
1724 #endif /* DHDTCPACK_SUPPRESS */
1726 flow_ring_node
= dhd_flow_ring_node(dhdp
, flowid
);
1727 if (flow_ring_node
) {
1728 flow_queue_t
*queue
= &flow_ring_node
->queue
;
1730 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
1731 flow_ring_node
->status
= FLOW_RING_STATUS_STA_FREEING
;
1733 if (!DHD_FLOW_QUEUE_EMPTY(queue
)) {
1735 while ((pkt
= dhd_flow_queue_dequeue(dhdp
, queue
)) !=
1737 PKTFREE(dhdp
->osh
, pkt
, TRUE
);
1741 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
1742 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
1746 sta
->flowid
[prio
] = FLOWID_INVALID
;
1749 id16_map_free(dhdp
->staid_allocator
, sta
->idx
);
1750 DHD_CUMM_CTR_INIT(&sta
->cumm_ctr
);
1751 sta
->ifp
= DHD_IF_NULL
; /* dummy dhd_if object */
1752 sta
->ifidx
= DHD_BAD_IF
;
1753 bzero(sta
->ea
.octet
, ETHER_ADDR_LEN
);
1754 INIT_LIST_HEAD(&sta
->list
);
1755 sta
->idx
= ID16_INVALID
; /* implying free */
1758 /** Allocate a dhd_sta object from the dhd pool. */
1760 dhd_sta_alloc(dhd_pub_t
* dhdp
)
1764 dhd_sta_pool_t
* sta_pool
;
1766 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
1768 idx
= id16_map_alloc(dhdp
->staid_allocator
);
1769 if (idx
== ID16_INVALID
) {
1770 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__
));
1771 return DHD_STA_NULL
;
1774 sta_pool
= (dhd_sta_pool_t
*)(dhdp
->sta_pool
);
1775 sta
= &sta_pool
[idx
];
1777 ASSERT((sta
->idx
== ID16_INVALID
) &&
1778 (sta
->ifp
== DHD_IF_NULL
) && (sta
->ifidx
== DHD_BAD_IF
));
1780 DHD_CUMM_CTR_INIT(&sta
->cumm_ctr
);
1782 sta
->idx
= idx
; /* implying allocated */
1787 /** Delete all STAs in an interface's STA list. */
1789 dhd_if_del_sta_list(dhd_if_t
*ifp
)
1791 dhd_sta_t
*sta
, *next
;
1792 unsigned long flags
;
1794 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1795 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1796 #pragma GCC diagnostic push
1797 #pragma GCC diagnostic ignored "-Wcast-qual"
1799 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1800 list_del(&sta
->list
);
1801 dhd_sta_free(&ifp
->info
->pub
, sta
);
1803 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1804 #pragma GCC diagnostic pop
1806 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1811 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1813 dhd_if_flush_sta(dhd_if_t
* ifp
)
1817 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1819 dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
)
1821 int idx
, prio
, sta_pool_memsz
;
1823 dhd_sta_pool_t
* sta_pool
;
1824 void * staid_allocator
;
1826 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
1827 ASSERT((dhdp
->staid_allocator
== NULL
) && (dhdp
->sta_pool
== NULL
));
1829 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1830 staid_allocator
= id16_map_init(dhdp
->osh
, max_sta
, 1);
1831 if (staid_allocator
== NULL
) {
1832 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__
));
1836 /* Pre allocate a pool of dhd_sta objects (one extra). */
1837 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
)); /* skip idx 0 */
1838 sta_pool
= (dhd_sta_pool_t
*)MALLOC(dhdp
->osh
, sta_pool_memsz
);
1839 if (sta_pool
== NULL
) {
1840 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__
));
1841 id16_map_fini(dhdp
->osh
, staid_allocator
);
1845 dhdp
->sta_pool
= sta_pool
;
1846 dhdp
->staid_allocator
= staid_allocator
;
1848 /* Initialize all sta(s) for the pre-allocated free pool. */
1849 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1850 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1851 sta
= &sta_pool
[idx
];
1852 sta
->idx
= id16_map_alloc(staid_allocator
);
1853 ASSERT(sta
->idx
<= max_sta
);
1856 /* Now place them into the pre-allocated free pool. */
1857 for (idx
= 1; idx
<= max_sta
; idx
++) {
1858 sta
= &sta_pool
[idx
];
1859 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
1860 sta
->flowid
[prio
] = FLOWID_INVALID
; /* Flow rings do not exist */
1862 dhd_sta_free(dhdp
, sta
);
1868 /** Destruct the pool of dhd_sta_t objects.
1869 * Caller must ensure that no STA objects are currently associated with an if.
1872 dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
)
1874 dhd_sta_pool_t
* sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1878 int sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1879 for (idx
= 1; idx
<= max_sta
; idx
++) {
1880 ASSERT(sta_pool
[idx
].ifp
== DHD_IF_NULL
);
1881 ASSERT(sta_pool
[idx
].idx
== ID16_INVALID
);
1883 MFREE(dhdp
->osh
, dhdp
->sta_pool
, sta_pool_memsz
);
1884 dhdp
->sta_pool
= NULL
;
1887 id16_map_fini(dhdp
->osh
, dhdp
->staid_allocator
);
1888 dhdp
->staid_allocator
= NULL
;
1891 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1893 dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
)
1895 int idx
, prio
, sta_pool_memsz
;
1897 dhd_sta_pool_t
* sta_pool
;
1898 void *staid_allocator
;
1901 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
1905 sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1906 staid_allocator
= dhdp
->staid_allocator
;
1909 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__
));
1913 if (!staid_allocator
) {
1914 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__
));
1918 /* clear free pool */
1919 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1920 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1922 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1923 id16_map_clear(staid_allocator
, max_sta
, 1);
1925 /* Initialize all sta(s) for the pre-allocated free pool. */
1926 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1927 sta
= &sta_pool
[idx
];
1928 sta
->idx
= id16_map_alloc(staid_allocator
);
1929 ASSERT(sta
->idx
<= max_sta
);
1931 /* Now place them into the pre-allocated free pool. */
1932 for (idx
= 1; idx
<= max_sta
; idx
++) {
1933 sta
= &sta_pool
[idx
];
1934 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
1935 sta
->flowid
[prio
] = FLOWID_INVALID
; /* Flow rings do not exist */
1937 dhd_sta_free(dhdp
, sta
);
1941 /** Find STA with MAC address ea in an interface's STA list. */
1943 dhd_find_sta(void *pub
, int ifidx
, void *ea
)
1947 unsigned long flags
;
1950 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1952 return DHD_STA_NULL
;
1954 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1955 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1956 #pragma GCC diagnostic push
1957 #pragma GCC diagnostic ignored "-Wcast-qual"
1959 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
1960 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
1961 DHD_INFO(("%s: Found STA " MACDBG
"\n",
1962 __FUNCTION__
, MAC2STRDBG((char *)ea
)));
1963 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1967 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1968 #pragma GCC diagnostic pop
1970 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1972 return DHD_STA_NULL
;
1975 /** Add STA into the interface's STA list. */
1977 dhd_add_sta(void *pub
, int ifidx
, void *ea
)
1981 unsigned long flags
;
1984 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1986 return DHD_STA_NULL
;
1988 if (!memcmp(ifp
->net
->dev_addr
, ea
, ETHER_ADDR_LEN
)) {
1989 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__
, ea
));
1990 return DHD_STA_NULL
;
1993 sta
= dhd_sta_alloc((dhd_pub_t
*)pub
);
1994 if (sta
== DHD_STA_NULL
) {
1995 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__
));
1996 return DHD_STA_NULL
;
1999 memcpy(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
);
2001 /* link the sta and the dhd interface */
2004 INIT_LIST_HEAD(&sta
->list
);
2006 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2008 list_add_tail(&sta
->list
, &ifp
->sta_list
);
2010 DHD_ERROR(("%s: Adding STA " MACDBG
"\n",
2011 __FUNCTION__
, MAC2STRDBG((char *)ea
)));
2013 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2018 /** Delete all STAs from the interface's STA list. */
2020 dhd_del_all_sta(void *pub
, int ifidx
)
2022 dhd_sta_t
*sta
, *next
;
2024 unsigned long flags
;
2026 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2030 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2031 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2032 #pragma GCC diagnostic push
2033 #pragma GCC diagnostic ignored "-Wcast-qual"
2035 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2037 list_del(&sta
->list
);
2038 dhd_sta_free(&ifp
->info
->pub
, sta
);
2039 #ifdef DHD_L2_FILTER
2040 if (ifp
->parp_enable
) {
2041 /* clear Proxy ARP cache of specific Ethernet Address */
2042 bcm_l2_filter_arp_table_update(((dhd_pub_t
*)pub
)->osh
,
2043 ifp
->phnd_arp_table
, FALSE
,
2044 sta
->ea
.octet
, FALSE
, ((dhd_pub_t
*)pub
)->tickcnt
);
2046 #endif /* DHD_L2_FILTER */
2048 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2049 #pragma GCC diagnostic pop
2051 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2056 /** Delete STA from the interface's STA list. */
2058 dhd_del_sta(void *pub
, int ifidx
, void *ea
)
2060 dhd_sta_t
*sta
, *next
;
2062 unsigned long flags
;
2065 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2069 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2070 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2071 #pragma GCC diagnostic push
2072 #pragma GCC diagnostic ignored "-Wcast-qual"
2074 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2075 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
2076 DHD_ERROR(("%s: Deleting STA " MACDBG
"\n",
2077 __FUNCTION__
, MAC2STRDBG(sta
->ea
.octet
)));
2078 list_del(&sta
->list
);
2079 dhd_sta_free(&ifp
->info
->pub
, sta
);
2082 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2083 #pragma GCC diagnostic pop
2085 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2086 #ifdef DHD_L2_FILTER
2087 if (ifp
->parp_enable
) {
2088 /* clear Proxy ARP cache of specific Ethernet Address */
2089 bcm_l2_filter_arp_table_update(((dhd_pub_t
*)pub
)->osh
, ifp
->phnd_arp_table
, FALSE
,
2090 ea
, FALSE
, ((dhd_pub_t
*)pub
)->tickcnt
);
2092 #endif /* DHD_L2_FILTER */
2096 /** Add STA if it doesn't exist. Not reentrant. */
2098 dhd_findadd_sta(void *pub
, int ifidx
, void *ea
)
2102 sta
= dhd_find_sta(pub
, ifidx
, ea
);
2106 sta
= dhd_add_sta(pub
, ifidx
, ea
);
2112 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2113 static struct list_head
*
2114 dhd_sta_list_snapshot(dhd_info_t
*dhd
, dhd_if_t
*ifp
, struct list_head
*snapshot_list
)
2116 unsigned long flags
;
2117 dhd_sta_t
*sta
, *snapshot
;
2119 INIT_LIST_HEAD(snapshot_list
);
2121 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2123 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
2124 /* allocate one and add to snapshot */
2125 snapshot
= (dhd_sta_t
*)MALLOC(dhd
->pub
.osh
, sizeof(dhd_sta_t
));
2126 if (snapshot
== NULL
) {
2127 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__
));
2131 memcpy(snapshot
->ea
.octet
, sta
->ea
.octet
, ETHER_ADDR_LEN
);
2133 INIT_LIST_HEAD(&snapshot
->list
);
2134 list_add_tail(&snapshot
->list
, snapshot_list
);
2137 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2139 return snapshot_list
;
2143 dhd_sta_list_snapshot_free(dhd_info_t
*dhd
, struct list_head
*snapshot_list
)
2145 dhd_sta_t
*sta
, *next
;
2147 list_for_each_entry_safe(sta
, next
, snapshot_list
, list
) {
2148 list_del(&sta
->list
);
2149 MFREE(dhd
->pub
.osh
, sta
, sizeof(dhd_sta_t
));
2152 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2155 static inline void dhd_if_flush_sta(dhd_if_t
* ifp
) { }
2156 static inline void dhd_if_del_sta_list(dhd_if_t
*ifp
) {}
2157 static inline int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
) { return BCME_OK
; }
2158 static inline void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
) {}
2159 static inline void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
) {}
2160 dhd_sta_t
*dhd_findadd_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
2161 dhd_sta_t
*dhd_find_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
2162 void dhd_del_sta(void *pub
, int ifidx
, void *ea
) {}
2163 #endif /* PCIE_FULL_DONGLE */
2167 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \
2170 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2171 * CPU and schedules a tasklet.
2172 * @tasklet: opaque pointer to the tasklet
2175 dhd_tasklet_schedule(void *tasklet
)
2177 tasklet_schedule((struct tasklet_struct
*)tasklet
);
2180 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2181 * @tasklet: tasklet to be scheduled
2182 * @on_cpu: cpu core id
2184 * If the requested cpu is online, then an IPI is sent to this cpu via the
2185 * smp_call_function_single with no wait and the tasklet_schedule function
2186 * will be invoked to schedule the specified tasklet on the requested CPU.
2189 dhd_tasklet_schedule_on(struct tasklet_struct
*tasklet
, int on_cpu
)
2192 smp_call_function_single(on_cpu
,
2193 dhd_tasklet_schedule
, (void *)tasklet
, wait
);
2197 * dhd_work_schedule_on - Executes the passed work in a given CPU
2198 * @work: work to be scheduled
2199 * @on_cpu: cpu core id
2201 * If the requested cpu is online, then an IPI is sent to this cpu via the
2202 * schedule_work_on and the work function
2203 * will be invoked to schedule the specified work on the requested CPU.
2207 dhd_work_schedule_on(struct work_struct
*work
, int on_cpu
)
2209 schedule_work_on(on_cpu
, work
);
2211 #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */
2213 #if defined(DHD_LB_TXC)
2215 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2216 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2217 * freeing the packets placed in the tx_compl workq
2220 dhd_lb_tx_compl_dispatch(dhd_pub_t
*dhdp
)
2222 dhd_info_t
*dhd
= dhdp
->info
;
2223 int curr_cpu
, on_cpu
;
2225 if (dhd
->rx_napi_netdev
== NULL
) {
2226 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2230 DHD_LB_STATS_INCR(dhd
->txc_sched_cnt
);
2232 * If the destination CPU is NOT online or is same as current CPU
2233 * no need to schedule the work
2235 curr_cpu
= get_cpu();
2238 on_cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2240 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2241 dhd_tasklet_schedule(&dhd
->tx_compl_tasklet
);
2243 schedule_work(&dhd
->tx_compl_dispatcher_work
);
2247 static void dhd_tx_compl_dispatcher_fn(struct work_struct
* work
)
2249 struct dhd_info
*dhd
=
2250 container_of(work
, struct dhd_info
, tx_compl_dispatcher_work
);
2254 cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2255 if (!cpu_online(cpu
))
2256 dhd_tasklet_schedule(&dhd
->tx_compl_tasklet
);
2258 dhd_tasklet_schedule_on(&dhd
->tx_compl_tasklet
, cpu
);
2261 #endif /* DHD_LB_TXC */
2263 #if defined(DHD_LB_RXC)
2265 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2266 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2267 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2268 * placed in the rx_compl workq.
2270 * @dhdp: pointer to dhd_pub object
2273 dhd_lb_rx_compl_dispatch(dhd_pub_t
*dhdp
)
2275 dhd_info_t
*dhd
= dhdp
->info
;
2276 int curr_cpu
, on_cpu
;
2278 if (dhd
->rx_napi_netdev
== NULL
) {
2279 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2283 DHD_LB_STATS_INCR(dhd
->rxc_sched_cnt
);
2285 * If the destination CPU is NOT online or is same as current CPU
2286 * no need to schedule the work
2288 curr_cpu
= get_cpu();
2290 on_cpu
= atomic_read(&dhd
->rx_compl_cpu
);
2292 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2293 dhd_tasklet_schedule(&dhd
->rx_compl_tasklet
);
2295 schedule_work(&dhd
->rx_compl_dispatcher_work
);
2299 static void dhd_rx_compl_dispatcher_fn(struct work_struct
* work
)
2301 struct dhd_info
*dhd
=
2302 container_of(work
, struct dhd_info
, rx_compl_dispatcher_work
);
2306 cpu
= atomic_read(&dhd
->rx_compl_cpu
);
2307 if (!cpu_online(cpu
))
2308 dhd_tasklet_schedule(&dhd
->rx_compl_tasklet
);
2310 dhd_tasklet_schedule_on(&dhd
->rx_compl_tasklet
, cpu
);
2314 #endif /* DHD_LB_RXC */
2316 #if defined(DHD_LB_TXP)
2317 static void dhd_tx_dispatcher_work(struct work_struct
* work
)
2319 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2320 #pragma GCC diagnostic push
2321 #pragma GCC diagnostic ignored "-Wcast-qual"
2323 struct dhd_info
*dhd
=
2324 container_of(work
, struct dhd_info
, tx_dispatcher_work
);
2325 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2326 #pragma GCC diagnostic pop
2328 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
2331 static void dhd_tx_dispatcher_fn(dhd_pub_t
*dhdp
)
2335 dhd_info_t
*dhd
= dhdp
->info
;
2338 cpu
= atomic_read(&dhd
->tx_cpu
);
2339 net_tx_cpu
= atomic_read(&dhd
->net_tx_cpu
);
2342 * Now if the NET_TX has pushed the packet in the same
2343 * CPU that is chosen for Tx processing, seperate it out
2344 * i.e run the TX processing tasklet in compl_cpu
2346 if (net_tx_cpu
== cpu
)
2347 cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2349 if (!cpu_online(cpu
)) {
2351 * Ooohh... but the Chosen CPU is not online,
2352 * Do the job in the current CPU itself.
2354 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
2357 * Schedule tx_dispatcher_work to on the cpu which
2358 * in turn will schedule tx_tasklet.
2360 dhd_work_schedule_on(&dhd
->tx_dispatcher_work
, cpu
);
2366 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2367 * on another cpu. The tx_tasklet will take care of actually putting
2368 * the skbs into appropriate flow ring and ringing H2D interrupt
2370 * @dhdp: pointer to dhd_pub object
2373 dhd_lb_tx_dispatch(dhd_pub_t
*dhdp
)
2375 dhd_info_t
*dhd
= dhdp
->info
;
2378 curr_cpu
= get_cpu();
2381 /* Record the CPU in which the TX request from Network stack came */
2382 atomic_set(&dhd
->net_tx_cpu
, curr_cpu
);
2384 /* Schedule the work to dispatch ... */
2385 dhd_tx_dispatcher_fn(dhdp
);
2387 #endif /* DHD_LB_TXP */
2389 #if defined(DHD_LB_RXP)
2391 * dhd_napi_poll - Load balance napi poll function to process received
2392 * packets and send up the network stack using netif_receive_skb()
2394 * @napi: napi object in which context this poll function is invoked
2395 * @budget: number of packets to be processed.
2397 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2398 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2399 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2400 * packet tag and sendup.
2403 dhd_napi_poll(struct napi_struct
*napi
, int budget
)
2406 const int pkt_count
= 1;
2408 struct sk_buff
* skb
;
2409 unsigned long flags
;
2410 struct dhd_info
*dhd
;
2412 struct sk_buff_head rx_process_queue
;
2414 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2415 #pragma GCC diagnostic push
2416 #pragma GCC diagnostic ignored "-Wcast-qual"
2418 dhd
= container_of(napi
, struct dhd_info
, rx_napi_struct
);
2419 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2420 #pragma GCC diagnostic pop
2423 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2424 __FUNCTION__
, skb_queue_len(&dhd
->rx_napi_queue
), budget
));
2425 __skb_queue_head_init(&rx_process_queue
);
2427 /* extract the entire rx_napi_queue into local rx_process_queue */
2428 spin_lock_irqsave(&dhd
->rx_napi_queue
.lock
, flags
);
2429 skb_queue_splice_tail_init(&dhd
->rx_napi_queue
, &rx_process_queue
);
2430 spin_unlock_irqrestore(&dhd
->rx_napi_queue
.lock
, flags
);
2432 while ((skb
= __skb_dequeue(&rx_process_queue
)) != NULL
) {
2433 OSL_PREFETCH(skb
->data
);
2435 ifid
= DHD_PKTTAG_IFID((dhd_pkttag_fr_t
*)PKTTAG(skb
));
2437 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2438 __FUNCTION__
, skb
, ifid
));
2440 dhd_rx_frame(&dhd
->pub
, ifid
, skb
, pkt_count
, chan
);
2444 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd
->pub
, processed
);
2446 DHD_INFO(("%s processed %d\n", __FUNCTION__
, processed
));
2447 napi_complete(napi
);
2453 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2454 * poll list. This function may be invoked via the smp_call_function_single
2455 * from a remote CPU.
2457 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2458 * after the napi_struct is added to the softnet data's poll_list
2460 * @info: pointer to a dhd_info struct
2463 dhd_napi_schedule(void *info
)
2465 dhd_info_t
*dhd
= (dhd_info_t
*)info
;
2467 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2468 __FUNCTION__
, &dhd
->rx_napi_struct
, atomic_read(&dhd
->rx_napi_cpu
)));
2470 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2471 if (napi_schedule_prep(&dhd
->rx_napi_struct
)) {
2472 __napi_schedule(&dhd
->rx_napi_struct
);
2473 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->napi_percpu_run_cnt
);
2474 #ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
2475 raise_softirq(NET_RX_SOFTIRQ
);
2476 #endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
2480 * If the rx_napi_struct was already running, then we let it complete
2481 * processing all its packets. The rx_napi_struct may only run on one
2482 * core at a time, to avoid out-of-order handling.
2487 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2488 * action after placing the dhd's rx_process napi object in the the remote CPU's
2489 * softnet data's poll_list.
2491 * @dhd: dhd_info which has the rx_process napi object
2492 * @on_cpu: desired remote CPU id
2495 dhd_napi_schedule_on(dhd_info_t
*dhd
, int on_cpu
)
2497 int wait
= 0; /* asynchronous IPI */
2498 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2499 __FUNCTION__
, dhd
, &dhd
->rx_napi_struct
, on_cpu
));
2501 if (smp_call_function_single(on_cpu
, dhd_napi_schedule
, dhd
, wait
)) {
2502 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2503 __FUNCTION__
, on_cpu
));
2506 DHD_LB_STATS_INCR(dhd
->napi_sched_cnt
);
2512 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2513 * Why should we do this?
2514 * The candidacy algorithm is run from the call back function
2515 * registered to CPU hotplug notifier. This call back happens from Worker
2516 * context. The dhd_napi_schedule_on is also from worker context.
2517 * Note that both of this can run on two different CPUs at the same time.
2518 * So we can possibly have a window where a given CPUn is being brought
2519 * down from CPUm while we try to run a function on CPUn.
2520 * To prevent this its better have the whole code to execute an SMP
2521 * function under get_online_cpus.
2522 * This function call ensures that hotplug mechanism does not kick-in
2523 * until we are done dealing with online CPUs
2524 * If the hotplug worker is already running, no worries because the
2525 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2527 * The below mentioned code structure is proposed in
2528 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2530 * Q: I need to ensure that a particular cpu is not removed when there is some
2531 * work specific to this cpu is in progress
2533 * According to the documentation calling get_online_cpus is NOT required, if
2534 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2535 * run from Work Queue context we have to call these functions
2537 static void dhd_rx_napi_dispatcher_fn(struct work_struct
* work
)
2539 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2540 #pragma GCC diagnostic push
2541 #pragma GCC diagnostic ignored "-Wcast-qual"
2543 struct dhd_info
*dhd
=
2544 container_of(work
, struct dhd_info
, rx_napi_dispatcher_work
);
2545 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2546 #pragma GCC diagnostic pop
2549 dhd_napi_schedule(dhd
);
2553 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2554 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2555 * the packets enqueued into the rx_napi_queue and sendup.
2556 * The producer's rx packet queue is appended to the rx_napi_queue before
2557 * dispatching the rx_napi_struct.
2560 dhd_lb_rx_napi_dispatch(dhd_pub_t
*dhdp
)
2562 unsigned long flags
;
2563 dhd_info_t
*dhd
= dhdp
->info
;
2566 #ifdef DHD_LB_IRQSET
2568 #endif /* DHD_LB_IRQSET */
2570 if (dhd
->rx_napi_netdev
== NULL
) {
2571 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2575 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__
,
2576 skb_queue_len(&dhd
->rx_napi_queue
), skb_queue_len(&dhd
->rx_pend_queue
)));
2578 /* append the producer's queue of packets to the napi's rx process queue */
2579 spin_lock_irqsave(&dhd
->rx_napi_queue
.lock
, flags
);
2580 skb_queue_splice_tail_init(&dhd
->rx_pend_queue
, &dhd
->rx_napi_queue
);
2581 spin_unlock_irqrestore(&dhd
->rx_napi_queue
.lock
, flags
);
2584 * If the destination CPU is NOT online or is same as current CPU
2585 * no need to schedule the work
2587 curr_cpu
= get_cpu();
2591 on_cpu
= atomic_read(&dhd
->rx_napi_cpu
);
2592 #ifdef DHD_LB_IRQSET
2593 if (cpumask_and(&cpus
, cpumask_of(curr_cpu
), dhd
->cpumask_primary
) ||
2594 (!cpu_online(on_cpu
))) {
2596 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2597 #endif /* DHD_LB_IRQSET */
2598 DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__
,
2599 curr_cpu
, *cpumask_bits(dhd
->cpumask_primary
)));
2600 dhd_napi_schedule(dhd
);
2602 DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
2603 __FUNCTION__
, curr_cpu
, *cpumask_bits(dhd
->cpumask_primary
)));
2604 dhd_work_schedule_on(&dhd
->rx_napi_dispatcher_work
, on_cpu
);
2610 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2613 dhd_lb_rx_pkt_enqueue(dhd_pub_t
*dhdp
, void *pkt
, int ifidx
)
2615 dhd_info_t
*dhd
= dhdp
->info
;
2617 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__
,
2618 pkt
, ifidx
, skb_queue_len(&dhd
->rx_pend_queue
)));
2619 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t
*)PKTTAG(pkt
), ifidx
);
2620 __skb_queue_tail(&dhd
->rx_pend_queue
, pkt
);
2622 #endif /* DHD_LB_RXP */
2624 #ifdef DHD_LB_IRQSET
2626 dhd_irq_set_affinity(dhd_pub_t
*dhdp
)
2628 unsigned int irq
= (unsigned int)-1;
2632 DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__
));
2637 DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__
));
2641 dhdpcie_get_pcieirq(dhdp
->bus
, &irq
);
2642 err
= irq_set_affinity(irq
, dhdp
->info
->cpumask_primary
);
2644 DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
2645 __FUNCTION__
, *cpumask_bits(dhdp
->info
->cpumask_primary
)));
2647 #endif /* DHD_LB_IRQSET */
2650 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2651 int dhd_bssidx2idx(dhd_pub_t
*dhdp
, uint32 bssidx
)
2654 dhd_info_t
*dhd
= dhdp
->info
;
2657 ASSERT(bssidx
< DHD_MAX_IFS
);
2660 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2661 ifp
= dhd
->iflist
[i
];
2662 if (ifp
&& (ifp
->bssidx
== bssidx
)) {
2663 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2664 ifp
->name
, bssidx
, i
));
2671 static inline int dhd_rxf_enqueue(dhd_pub_t
*dhdp
, void* skb
)
2677 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2681 dhd_os_rxflock(dhdp
);
2682 store_idx
= dhdp
->store_idx
;
2683 sent_idx
= dhdp
->sent_idx
;
2684 if (dhdp
->skbbuf
[store_idx
] != NULL
) {
2685 /* Make sure the previous packets are processed */
2686 dhd_os_rxfunlock(dhdp
);
2687 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2688 skb
, store_idx
, sent_idx
));
2689 /* removed msleep here, should use wait_event_timeout if we
2690 * want to give rx frame thread a chance to run
2692 #if defined(WAIT_DEQUEUE)
2697 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2698 skb
, store_idx
, (store_idx
+ 1) & (MAXSKBPEND
- 1)));
2699 dhdp
->skbbuf
[store_idx
] = skb
;
2700 dhdp
->store_idx
= (store_idx
+ 1) & (MAXSKBPEND
- 1);
2701 dhd_os_rxfunlock(dhdp
);
2706 static inline void* dhd_rxf_dequeue(dhd_pub_t
*dhdp
)
2712 dhd_os_rxflock(dhdp
);
2714 store_idx
= dhdp
->store_idx
;
2715 sent_idx
= dhdp
->sent_idx
;
2716 skb
= dhdp
->skbbuf
[sent_idx
];
2719 dhd_os_rxfunlock(dhdp
);
2720 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2721 store_idx
, sent_idx
));
2725 dhdp
->skbbuf
[sent_idx
] = NULL
;
2726 dhdp
->sent_idx
= (sent_idx
+ 1) & (MAXSKBPEND
- 1);
2728 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2731 dhd_os_rxfunlock(dhdp
);
2736 int dhd_process_cid_mac(dhd_pub_t
*dhdp
, bool prepost
)
2738 if (prepost
) { /* pre process */
2740 dhd_check_module_cid(dhdp
);
2741 dhd_check_module_mac(dhdp
);
2742 dhd_set_macaddr_from_file(dhdp
);
2743 } else { /* post process */
2744 dhd_write_macaddr(&dhdp
->mac
);
2745 dhd_clear_cis(dhdp
);
2751 #ifdef PKT_FILTER_SUPPORT
2752 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2754 _turn_on_arp_filter(dhd_pub_t
*dhd
, int op_mode_param
)
2756 bool _apply
= FALSE
;
2757 /* In case of IBSS mode, apply arp pkt filter */
2758 if (op_mode_param
& DHD_FLAG_IBSS_MODE
) {
2762 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2763 if (op_mode_param
& (DHD_FLAG_P2P_GC_MODE
| DHD_FLAG_P2P_GO_MODE
)) {
2771 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2774 dhd_set_packet_filter(dhd_pub_t
*dhd
)
2778 DHD_TRACE(("%s: enter\n", __FUNCTION__
));
2779 if (dhd_pkt_filter_enable
) {
2780 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
2781 dhd_pktfilter_offload_set(dhd
, dhd
->pktfilter
[i
]);
2787 dhd_enable_packet_filter(int value
, dhd_pub_t
*dhd
)
2791 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__
, value
));
2792 if ((dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) && value
) {
2793 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__
));
2796 /* 1 - Enable packet filter, only allow unicast packet to send up */
2797 /* 0 - Disable packet filter */
2798 if (dhd_pkt_filter_enable
&& (!value
||
2799 (dhd_support_sta_mode(dhd
) && !dhd
->dhcp_in_progress
)))
2801 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
2802 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2803 if (value
&& (i
== DHD_ARP_FILTER_NUM
) &&
2804 !_turn_on_arp_filter(dhd
, dhd
->op_mode
)) {
2805 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2806 "val %d, cnt %d, op_mode 0x%x\n",
2807 value
, i
, dhd
->op_mode
));
2810 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2811 dhd_pktfilter_offload_enable(dhd
, dhd
->pktfilter
[i
],
2812 value
, dhd_master_mode
);
2818 dhd_packet_filter_add_remove(dhd_pub_t
*dhdp
, int add_remove
, int num
)
2820 char *filterp
= NULL
;
2824 case DHD_BROADCAST_FILTER_NUM
:
2825 filterp
= "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
2828 case DHD_MULTICAST4_FILTER_NUM
:
2830 if (FW_SUPPORTED((dhdp
), pf6
)) {
2831 if (dhdp
->pktfilter
[num
] != NULL
) {
2832 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
2833 dhdp
->pktfilter
[num
] = NULL
;
2836 filterp
= DISCARD_IPV4_MCAST
;
2841 filterp
= "102 0 0 0 0xFFFFFF 0x01005E";
2843 case DHD_MULTICAST6_FILTER_NUM
:
2845 if (FW_SUPPORTED((dhdp
), pf6
)) {
2846 if (dhdp
->pktfilter
[num
] != NULL
) {
2847 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
2848 dhdp
->pktfilter
[num
] = NULL
;
2851 filterp
= DISCARD_IPV6_MCAST
;
2856 filterp
= "103 0 0 0 0xFFFF 0x3333";
2858 case DHD_MDNS_FILTER_NUM
:
2859 filterp
= "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
2862 case DHD_ARP_FILTER_NUM
:
2863 filterp
= "105 0 0 12 0xFFFF 0x0806";
2866 case DHD_BROADCAST_ARP_FILTER_NUM
:
2867 filterp
= "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
2868 " 0xFFFFFFFFFFFF0000000000000806";
2877 dhdp
->pktfilter
[num
] = filterp
;
2878 dhd_pktfilter_offload_set(dhdp
, dhdp
->pktfilter
[num
]);
2879 } else { /* Delete filter */
2880 if (dhdp
->pktfilter
[num
]) {
2881 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
2882 dhdp
->pktfilter
[num
] = NULL
;
2888 #endif /* PKT_FILTER_SUPPORT */
2890 static int dhd_set_suspend(int value
, dhd_pub_t
*dhd
)
2892 #ifndef SUPPORT_PM2_ONLY
2893 int power_mode
= PM_MAX
;
2894 #endif /* SUPPORT_PM2_ONLY */
2895 /* wl_pkt_filter_enable_t enable_parm; */
2896 int bcn_li_dtim
= 0; /* Default bcn_li_dtim in resume mode is 0 */
2898 #ifdef DHD_USE_EARLYSUSPEND
2899 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2900 int bcn_timeout
= 0;
2901 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2902 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2903 int roam_time_thresh
= 0; /* (ms) */
2904 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2905 #ifndef ENABLE_FW_ROAM_SUSPEND
2907 #endif /* ENABLE_FW_ROAM_SUSPEND */
2908 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2910 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2911 uint nd_ra_filter
= 0;
2912 #ifdef ENABLE_IPMCAST_FILTER
2913 int ipmcast_l2filter
;
2914 #endif /* ENABLE_IPMCAST_FILTER */
2915 #ifdef CUSTOM_EVENT_PM_WAKE
2916 uint32 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
;
2917 #endif /* CUSTOM_EVENT_PM_WAKE */
2918 #endif /* DHD_USE_EARLYSUSPEND */
2919 #ifdef PASS_ALL_MCAST_PKTS
2920 struct dhd_info
*dhdinfo
;
2923 #endif /* PASS_ALL_MCAST_PKTS */
2924 #ifdef DYNAMIC_SWOOB_DURATION
2925 #ifndef CUSTOM_INTR_WIDTH
2926 #define CUSTOM_INTR_WIDTH 100
2928 #endif /* CUSTOM_INTR_WIDTH */
2929 #endif /* DYNAMIC_SWOOB_DURATION */
2931 #if defined(BCMPCIE)
2933 int dtim_period
= 0;
2934 int bcn_interval
= 0;
2936 #if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2937 bcn_timeout
= CUSTOM_BCN_TIMEOUT_SETTING
;
2939 int bcn_timeout
= CUSTOM_BCN_TIMEOUT_SETTING
;
2940 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
2941 #endif /* OEM_ANDROID && BCMPCIE */
2946 #ifdef PASS_ALL_MCAST_PKTS
2947 dhdinfo
= dhd
->info
;
2948 #endif /* PASS_ALL_MCAST_PKTS */
2950 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2951 __FUNCTION__
, value
, dhd
->in_suspend
));
2953 dhd_suspend_lock(dhd
);
2955 #ifdef CUSTOM_SET_CPUCORE
2956 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__
, value
));
2957 /* set specific cpucore */
2958 dhd_set_cpucore(dhd
, TRUE
);
2959 #endif /* CUSTOM_SET_CPUCORE */
2961 if (value
&& dhd
->in_suspend
) {
2962 #ifdef PKT_FILTER_SUPPORT
2963 dhd
->early_suspended
= 1;
2965 /* Kernel suspended */
2966 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__
));
2968 #ifndef SUPPORT_PM2_ONLY
2969 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
2970 sizeof(power_mode
), TRUE
, 0);
2971 #endif /* SUPPORT_PM2_ONLY */
2973 #ifdef PKT_FILTER_SUPPORT
2974 /* Enable packet filter,
2975 * only allow unicast packet to send up
2977 dhd_enable_packet_filter(1, dhd
);
2979 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd
));
2981 #endif /* PKT_FILTER_SUPPORT */
2983 #ifdef PASS_ALL_MCAST_PKTS
2985 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2986 if (dhdinfo
->iflist
[i
] && dhdinfo
->iflist
[i
]->net
)
2987 ret
= dhd_iovar(dhd
, i
, "allmulti",
2992 DHD_ERROR(("%s allmulti failed %d\n",
2993 __FUNCTION__
, ret
));
2996 #endif /* PASS_ALL_MCAST_PKTS */
2998 /* If DTIM skip is set up as default, force it to wake
2999 * each third DTIM for better power savings. Note that
3000 * one side effect is a chance to miss BC/MC packet.
3003 /* Do not set bcn_li_ditm on WFD mode */
3004 if (dhd
->tdls_mode
) {
3008 #if defined(BCMPCIE)
3009 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
, &dtim_period
,
3011 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3012 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3014 DHD_ERROR(("%s bcn_li_dtim failed %d\n",
3015 __FUNCTION__
, ret
));
3017 if ((bcn_li_dtim
* dtim_period
* bcn_interval
) >=
3018 MIN_DTIM_FOR_ROAM_THRES_EXTEND
) {
3020 * Increase max roaming threshold from 2 secs to 8 secs
3021 * the real roam threshold is MIN(max_roam_threshold,
3025 ret
= dhd_iovar(dhd
, 0, "lpas", (char *)&lpas
, sizeof(lpas
),
3028 DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__
,
3033 * if bcn_to_dly is 1, the real roam threshold is
3034 * MIN(max_roam_threshold, bcn_timeout -1);
3035 * notify link down event after roaming procedure complete
3036 * if we hit bcn_timeout while we are in roaming progress.
3038 ret
= dhd_iovar(dhd
, 0, "bcn_to_dly", (char *)&bcn_to_dly
,
3039 sizeof(bcn_to_dly
), NULL
, 0, TRUE
);
3041 DHD_ERROR(("%s bcn_to_dly failed %d\n",
3042 __FUNCTION__
, ret
));
3044 /* Increase beacon timeout to 6 secs or use bigger one */
3045 bcn_timeout
= max(bcn_timeout
, BCN_TIMEOUT_IN_SUSPEND
);
3046 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3047 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3049 DHD_ERROR(("%s bcn_timeout failed %d\n",
3050 __FUNCTION__
, ret
));
3054 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
);
3055 if (dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3056 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
) < 0)
3057 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__
));
3058 #endif /* OEM_ANDROID && BCMPCIE */
3060 #ifdef DHD_USE_EARLYSUSPEND
3061 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3062 bcn_timeout
= CUSTOM_BCN_TIMEOUT_IN_SUSPEND
;
3063 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3064 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3066 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__
,
3069 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3070 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3071 roam_time_thresh
= CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
;
3072 ret
= dhd_iovar(dhd
, 0, "roam_time_thresh",
3073 (char *)&roam_time_thresh
,
3074 sizeof(roam_time_thresh
), NULL
, 0, TRUE
);
3076 DHD_ERROR(("%s roam_time_thresh failed %d\n",
3077 __FUNCTION__
, ret
));
3079 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3080 #ifndef ENABLE_FW_ROAM_SUSPEND
3081 /* Disable firmware roaming during suspend */
3082 ret
= dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
,
3083 sizeof(roamvar
), NULL
, 0, TRUE
);
3085 DHD_ERROR(("%s roam_off failed %d\n",
3086 __FUNCTION__
, ret
));
3088 #endif /* ENABLE_FW_ROAM_SUSPEND */
3089 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3093 ret
= dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
,
3094 sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
3096 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__
, ret
));
3098 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3099 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
3100 ret
= wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd
));
3101 if (ret
!= BCME_OK
) {
3102 DHD_ERROR(("failed to stop beacon recv event on"
3103 " suspend state (%d)\n", ret
));
3105 #endif /* WL_CFG80211 && WL_BCNRECV */
3106 #ifdef NDO_CONFIG_SUPPORT
3107 if (dhd
->ndo_enable
) {
3108 if (!dhd
->ndo_host_ip_overflow
) {
3109 /* enable ND offload on suspend */
3110 ret
= dhd_ndo_enable(dhd
, TRUE
);
3112 DHD_ERROR(("%s: failed to enable NDO\n",
3116 DHD_INFO(("%s: NDO disabled on suspend due to"
3117 "HW capacity\n", __FUNCTION__
));
3120 #endif /* NDO_CONFIG_SUPPORT */
3122 if (FW_SUPPORTED(dhd
, ndoe
)) {
3124 if (FW_SUPPORTED(dhd
, ndoe
) && !FW_SUPPORTED(dhd
, apf
)) {
3126 /* enable IPv6 RA filter in firmware during suspend */
3128 ret
= dhd_iovar(dhd
, 0, "nd_ra_filter_enable",
3129 (char *)&nd_ra_filter
, sizeof(nd_ra_filter
),
3132 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3135 dhd_os_suppress_logging(dhd
, TRUE
);
3136 #ifdef ENABLE_IPMCAST_FILTER
3137 ipmcast_l2filter
= 1;
3138 ret
= dhd_iovar(dhd
, 0, "ipmcast_l2filter",
3139 (char *)&ipmcast_l2filter
, sizeof(ipmcast_l2filter
),
3142 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret
));
3144 #endif /* ENABLE_IPMCAST_FILTER */
3145 #ifdef DYNAMIC_SWOOB_DURATION
3146 intr_width
= CUSTOM_INTR_WIDTH
;
3147 ret
= dhd_iovar(dhd
, 0, "bus:intr_width", (char *)&intr_width
,
3148 sizeof(intr_width
), NULL
, 0, TRUE
);
3150 DHD_ERROR(("failed to set intr_width (%d)\n", ret
));
3152 #endif /* DYNAMIC_SWOOB_DURATION */
3153 #ifdef CUSTOM_EVENT_PM_WAKE
3154 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
* 4;
3155 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh",
3156 (char *)&pm_awake_thresh
,
3157 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
3159 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3160 __FUNCTION__
, ret
));
3162 #endif /* CUSTOM_EVENT_PM_WAKE */
3163 #endif /* DHD_USE_EARLYSUSPEND */
3165 #ifdef PKT_FILTER_SUPPORT
3166 dhd
->early_suspended
= 0;
3168 /* Kernel resumed */
3169 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__
));
3170 #ifdef DYNAMIC_SWOOB_DURATION
3172 ret
= dhd_iovar(dhd
, 0, "bus:intr_width", (char *)&intr_width
,
3173 sizeof(intr_width
), NULL
, 0, TRUE
);
3175 DHD_ERROR(("failed to set intr_width (%d)\n", ret
));
3177 #endif /* DYNAMIC_SWOOB_DURATION */
3178 #ifndef SUPPORT_PM2_ONLY
3179 power_mode
= PM_FAST
;
3180 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
3181 sizeof(power_mode
), TRUE
, 0);
3182 #endif /* SUPPORT_PM2_ONLY */
3183 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
3184 ret
= wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd
));
3185 if (ret
!= BCME_OK
) {
3186 DHD_ERROR(("failed to resume beacon recv state (%d)\n",
3189 #endif /* WL_CF80211 && WL_BCNRECV */
3190 #ifdef PKT_FILTER_SUPPORT
3191 /* disable pkt filter */
3192 dhd_enable_packet_filter(0, dhd
);
3194 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd
));
3196 #endif /* PKT_FILTER_SUPPORT */
3197 #ifdef PASS_ALL_MCAST_PKTS
3199 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3200 if (dhdinfo
->iflist
[i
] && dhdinfo
->iflist
[i
]->net
)
3201 ret
= dhd_iovar(dhd
, i
, "allmulti",
3203 sizeof(allmulti
), NULL
,
3206 DHD_ERROR(("%s: allmulti failed:%d\n",
3207 __FUNCTION__
, ret
));
3210 #endif /* PASS_ALL_MCAST_PKTS */
3211 #if defined(BCMPCIE)
3212 /* restore pre-suspend setting */
3213 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3214 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3216 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
3217 __FUNCTION__
, ret
));
3219 ret
= dhd_iovar(dhd
, 0, "lpas", (char *)&lpas
, sizeof(lpas
), NULL
,
3222 DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__
, ret
));
3224 ret
= dhd_iovar(dhd
, 0, "bcn_to_dly", (char *)&bcn_to_dly
,
3225 sizeof(bcn_to_dly
), NULL
, 0, TRUE
);
3227 DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__
, ret
));
3229 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3230 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3232 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3233 __FUNCTION__
, ret
));
3236 /* restore pre-suspend setting for dtim_skip */
3237 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3238 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3240 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__
, ret
));
3242 #endif /* OEM_ANDROID && BCMPCIE */
3243 #ifdef DHD_USE_EARLYSUSPEND
3244 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3245 bcn_timeout
= CUSTOM_BCN_TIMEOUT
;
3246 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3247 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3249 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3250 __FUNCTION__
, ret
));
3252 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3253 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3254 roam_time_thresh
= 2000;
3255 ret
= dhd_iovar(dhd
, 0, "roam_time_thresh",
3256 (char *)&roam_time_thresh
,
3257 sizeof(roam_time_thresh
), NULL
, 0, TRUE
);
3259 DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
3260 __FUNCTION__
, ret
));
3263 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3264 #ifndef ENABLE_FW_ROAM_SUSPEND
3265 roamvar
= dhd_roam_disable
;
3266 ret
= dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
,
3267 sizeof(roamvar
), NULL
, 0, TRUE
);
3269 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__
, ret
));
3271 #endif /* ENABLE_FW_ROAM_SUSPEND */
3272 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3273 ret
= dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
,
3274 sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
3276 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
3277 __FUNCTION__
, ret
));
3279 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3280 #ifdef NDO_CONFIG_SUPPORT
3281 if (dhd
->ndo_enable
) {
3282 /* Disable ND offload on resume */
3283 ret
= dhd_ndo_enable(dhd
, FALSE
);
3285 DHD_ERROR(("%s: failed to disable NDO\n",
3289 #endif /* NDO_CONFIG_SUPPORT */
3291 if (FW_SUPPORTED(dhd
, ndoe
)) {
3293 if (FW_SUPPORTED(dhd
, ndoe
) && !FW_SUPPORTED(dhd
, apf
)) {
3295 /* disable IPv6 RA filter in firmware during suspend */
3297 ret
= dhd_iovar(dhd
, 0, "nd_ra_filter_enable",
3298 (char *)&nd_ra_filter
, sizeof(nd_ra_filter
),
3301 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3305 dhd_os_suppress_logging(dhd
, FALSE
);
3306 #ifdef ENABLE_IPMCAST_FILTER
3307 ipmcast_l2filter
= 0;
3308 ret
= dhd_iovar(dhd
, 0, "ipmcast_l2filter",
3309 (char *)&ipmcast_l2filter
, sizeof(ipmcast_l2filter
),
3312 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret
));
3314 #endif /* ENABLE_IPMCAST_FILTER */
3315 #ifdef CUSTOM_EVENT_PM_WAKE
3316 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh",
3317 (char *)&pm_awake_thresh
,
3318 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
3320 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3321 __FUNCTION__
, ret
));
3323 #endif /* CUSTOM_EVENT_PM_WAKE */
3324 #endif /* DHD_USE_EARLYSUSPEND */
3325 #ifdef DHD_LB_IRQSET
3326 dhd_irq_set_affinity(dhd
);
3327 #endif /* DHD_LB_IRQSET */
3330 dhd_suspend_unlock(dhd
);
3335 static int dhd_suspend_resume_helper(struct dhd_info
*dhd
, int val
, int force
)
3337 dhd_pub_t
*dhdp
= &dhd
->pub
;
3340 DHD_OS_WAKE_LOCK(dhdp
);
3341 DHD_PERIM_LOCK(dhdp
);
3343 /* Set flag when early suspend was called */
3344 dhdp
->in_suspend
= val
;
3345 if ((force
|| !dhdp
->suspend_disable_flag
) &&
3346 dhd_support_sta_mode(dhdp
))
3348 ret
= dhd_set_suspend(val
, dhdp
);
3351 DHD_PERIM_UNLOCK(dhdp
);
3352 DHD_OS_WAKE_UNLOCK(dhdp
);
3356 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3357 static void dhd_early_suspend(struct early_suspend
*h
)
3359 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
3360 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
3363 dhd_suspend_resume_helper(dhd
, 1, 0);
3366 static void dhd_late_resume(struct early_suspend
*h
)
3368 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
3369 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
3372 dhd_suspend_resume_helper(dhd
, 0, 0);
3374 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3377 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3378 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3380 * dhd_timeout_start(&tmo, usec);
3381 * while (!dhd_timeout_expired(&tmo))
3382 * if (poll_something())
3384 * if (dhd_timeout_expired(&tmo))
3389 dhd_timeout_start(dhd_timeout_t
*tmo
, uint usec
)
3394 tmo
->tick
= jiffies_to_usecs(1);
3398 dhd_timeout_expired(dhd_timeout_t
*tmo
)
3400 /* Does nothing the first call */
3401 if (tmo
->increment
== 0) {
3406 if (tmo
->elapsed
>= tmo
->limit
)
3409 /* Add the delay that's about to take place */
3410 tmo
->elapsed
+= tmo
->increment
;
3412 if ((!CAN_SLEEP()) || tmo
->increment
< tmo
->tick
) {
3413 OSL_DELAY(tmo
->increment
);
3414 tmo
->increment
*= 2;
3415 if (tmo
->increment
> tmo
->tick
)
3416 tmo
->increment
= tmo
->tick
;
3419 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
3420 * context where the exact wakeup time is flexible, it would be good
3421 * to use usleep_range() instead of udelay(). It takes a few advantages
3422 * such as improving responsiveness and reducing power.
3424 OSL_SLEEP(jiffies_to_msecs(1));
3431 dhd_net2idx(dhd_info_t
*dhd
, struct net_device
*net
)
3436 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__
));
3440 while (i
< DHD_MAX_IFS
) {
3441 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->net
&& (dhd
->iflist
[i
]->net
== net
))
3449 struct net_device
* dhd_idx2net(void *pub
, int ifidx
)
3451 struct dhd_pub
*dhd_pub
= (struct dhd_pub
*)pub
;
3452 struct dhd_info
*dhd_info
;
3454 if (!dhd_pub
|| ifidx
< 0 || ifidx
>= DHD_MAX_IFS
)
3456 dhd_info
= dhd_pub
->info
;
3457 if (dhd_info
&& dhd_info
->iflist
[ifidx
])
3458 return dhd_info
->iflist
[ifidx
]->net
;
3463 dhd_ifname2idx(dhd_info_t
*dhd
, char *name
)
3465 int i
= DHD_MAX_IFS
;
3469 if (name
== NULL
|| *name
== '\0')
3473 if (dhd
->iflist
[i
] && !strncmp(dhd
->iflist
[i
]->dngl_name
, name
, IFNAMSIZ
))
3476 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__
, i
, name
));
3478 return i
; /* default - the primary interface */
3482 dhd_ifname(dhd_pub_t
*dhdp
, int ifidx
)
3484 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
3488 if (ifidx
< 0 || ifidx
>= DHD_MAX_IFS
) {
3489 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__
, ifidx
));
3493 if (dhd
->iflist
[ifidx
] == NULL
) {
3494 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__
, ifidx
));
3498 if (dhd
->iflist
[ifidx
]->net
)
3499 return dhd
->iflist
[ifidx
]->net
->name
;
3505 dhd_bssidx2bssid(dhd_pub_t
*dhdp
, int idx
)
3508 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
;
3511 for (i
= 0; i
< DHD_MAX_IFS
; i
++)
3512 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->bssidx
== idx
)
3513 return dhd
->iflist
[i
]->mac_addr
;
3519 _dhd_set_multicast_list(dhd_info_t
*dhd
, int ifidx
)
3521 struct net_device
*dev
;
3522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3523 struct netdev_hw_addr
*ha
;
3525 struct dev_mc_list
*mclist
;
3527 uint32 allmulti
, cnt
;
3534 #ifdef MCAST_LIST_ACCUMULATION
3536 uint32 cnt_iface
[DHD_MAX_IFS
];
3540 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3541 if (dhd
->iflist
[i
]) {
3542 dev
= dhd
->iflist
[i
]->net
;
3545 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3546 netif_addr_lock_bh(dev
);
3547 #endif /* LINUX >= 2.6.27 */
3548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3549 cnt_iface
[i
] = netdev_mc_count(dev
);
3550 cnt
+= cnt_iface
[i
];
3552 cnt
+= dev
->mc_count
;
3553 #endif /* LINUX >= 2.6.35 */
3554 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3555 netif_addr_unlock_bh(dev
);
3556 #endif /* LINUX >= 2.6.27 */
3558 /* Determine initial value of allmulti flag */
3559 allmulti
|= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
3562 #else /* !MCAST_LIST_ACCUMULATION */
3563 if (!dhd
->iflist
[ifidx
]) {
3564 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__
, ifidx
));
3567 dev
= dhd
->iflist
[ifidx
]->net
;
3570 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3571 netif_addr_lock_bh(dev
);
3572 #endif /* LINUX >= 2.6.27 */
3573 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3574 cnt
= netdev_mc_count(dev
);
3576 cnt
= dev
->mc_count
;
3577 #endif /* LINUX >= 2.6.35 */
3578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3579 netif_addr_unlock_bh(dev
);
3580 #endif /* LINUX >= 2.6.27 */
3582 /* Determine initial value of allmulti flag */
3583 allmulti
= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
3584 #endif /* MCAST_LIST_ACCUMULATION */
3586 #ifdef PASS_ALL_MCAST_PKTS
3587 #ifdef PKT_FILTER_SUPPORT
3588 if (!dhd
->pub
.early_suspended
)
3589 #endif /* PKT_FILTER_SUPPORT */
3591 #endif /* PASS_ALL_MCAST_PKTS */
3593 /* Send down the multicast list first. */
3595 buflen
= sizeof("mcast_list") + sizeof(cnt
) + (cnt
* ETHER_ADDR_LEN
);
3596 if (!(bufp
= buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
3597 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3598 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
3602 strncpy(bufp
, "mcast_list", buflen
- 1);
3603 bufp
[buflen
- 1] = '\0';
3604 bufp
+= strlen("mcast_list") + 1;
3607 memcpy(bufp
, &cnt
, sizeof(cnt
));
3608 bufp
+= sizeof(cnt
);
3610 #ifdef MCAST_LIST_ACCUMULATION
3611 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3612 if (dhd
->iflist
[i
]) {
3613 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i
));
3614 dev
= dhd
->iflist
[i
]->net
;
3616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3617 netif_addr_lock_bh(dev
);
3618 #endif /* LINUX >= 2.6.27 */
3619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3620 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3621 #pragma GCC diagnostic push
3622 #pragma GCC diagnostic ignored "-Wcast-qual"
3624 netdev_for_each_mc_addr(ha
, dev
) {
3625 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3626 #pragma GCC diagnostic pop
3630 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
3631 bufp
+= ETHER_ADDR_LEN
;
3632 DHD_TRACE(("_dhd_set_multicast_list: cnt "
3634 cnt_iface
[i
], MAC2STRDBG(ha
->addr
)));
3637 #else /* LINUX < 2.6.35 */
3638 for (mclist
= dev
->mc_list
; (mclist
&& (cnt_iface
[i
] > 0));
3639 cnt_iface
[i
]--, mclist
= mclist
->next
) {
3640 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
3641 bufp
+= ETHER_ADDR_LEN
;
3643 #endif /* LINUX >= 2.6.35 */
3644 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3645 netif_addr_unlock_bh(dev
);
3646 #endif /* LINUX >= 2.6.27 */
3649 #else /* !MCAST_LIST_ACCUMULATION */
3650 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3651 netif_addr_lock_bh(dev
);
3652 #endif /* LINUX >= 2.6.27 */
3653 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3654 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3655 #pragma GCC diagnostic push
3656 #pragma GCC diagnostic ignored "-Wcast-qual"
3658 netdev_for_each_mc_addr(ha
, dev
) {
3661 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
3662 bufp
+= ETHER_ADDR_LEN
;
3665 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3666 #pragma GCC diagnostic pop
3668 #else /* LINUX < 2.6.35 */
3669 for (mclist
= dev
->mc_list
; (mclist
&& (cnt
> 0));
3670 cnt
--, mclist
= mclist
->next
) {
3671 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
3672 bufp
+= ETHER_ADDR_LEN
;
3674 #endif /* LINUX >= 2.6.35 */
3675 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3676 netif_addr_unlock_bh(dev
);
3677 #endif /* LINUX >= 2.6.27 */
3678 #endif /* MCAST_LIST_ACCUMULATION */
3680 memset(&ioc
, 0, sizeof(ioc
));
3681 ioc
.cmd
= WLC_SET_VAR
;
3686 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
3688 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3689 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
3690 allmulti
= cnt
? TRUE
: allmulti
;
3693 MFREE(dhd
->pub
.osh
, buf
, buflen
);
3695 /* Now send the allmulti setting. This is based on the setting in the
3696 * net_device flags, but might be modified above to be turned on if we
3697 * were trying to set some addresses and dongle rejected it...
3700 allmulti
= htol32(allmulti
);
3701 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "allmulti", (char *)&allmulti
,
3702 sizeof(allmulti
), NULL
, 0, TRUE
);
3704 DHD_ERROR(("%s: set allmulti %d failed\n",
3705 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
3708 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3710 #ifdef MCAST_LIST_ACCUMULATION
3712 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3713 if (dhd
->iflist
[i
]) {
3714 dev
= dhd
->iflist
[i
]->net
;
3715 allmulti
|= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
3719 allmulti
= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
3720 #endif /* MCAST_LIST_ACCUMULATION */
3722 allmulti
= htol32(allmulti
);
3724 memset(&ioc
, 0, sizeof(ioc
));
3725 ioc
.cmd
= WLC_SET_PROMISC
;
3726 ioc
.buf
= &allmulti
;
3727 ioc
.len
= sizeof(allmulti
);
3730 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
3732 DHD_ERROR(("%s: set promisc %d failed\n",
3733 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
3738 _dhd_set_mac_address(dhd_info_t
*dhd
, int ifidx
, uint8
*addr
)
3742 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "cur_etheraddr", (char *)addr
,
3743 ETHER_ADDR_LEN
, NULL
, 0, TRUE
);
3745 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd
->pub
, ifidx
)));
3747 memcpy(dhd
->iflist
[ifidx
]->net
->dev_addr
, addr
, ETHER_ADDR_LEN
);
3749 memcpy(dhd
->pub
.mac
.octet
, addr
, ETHER_ADDR_LEN
);
3756 extern struct net_device
*ap_net_dev
;
3757 extern tsk_ctl_t ap_eth_ctl
; /* ap netdev heper thread ctl */
3761 /* Get psta/psr configuration configuration */
3762 int dhd_get_psta_mode(dhd_pub_t
*dhdp
)
3764 dhd_info_t
*dhd
= dhdp
->info
;
3765 return (int)dhd
->psta_mode
;
3767 /* Set psta/psr configuration configuration */
3768 int dhd_set_psta_mode(dhd_pub_t
*dhdp
, uint32 val
)
3770 dhd_info_t
*dhd
= dhdp
->info
;
3771 dhd
->psta_mode
= val
;
3774 #endif /* DHD_PSTA */
3776 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
3778 dhd_update_rx_pkt_chainable_state(dhd_pub_t
* dhdp
, uint32 idx
)
3780 dhd_info_t
*dhd
= dhdp
->info
;
3783 ASSERT(idx
< DHD_MAX_IFS
);
3785 ifp
= dhd
->iflist
[idx
];
3788 #ifdef DHD_L2_FILTER
3789 (ifp
->block_ping
) ||
3794 #ifdef DHD_MCAST_REGEN
3795 (ifp
->mcast_regen_bss_enable
) ||
3798 ifp
->rx_pkt_chainable
= FALSE
;
3801 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
3804 /* Get wet configuration configuration */
3805 int dhd_get_wet_mode(dhd_pub_t
*dhdp
)
3807 dhd_info_t
*dhd
= dhdp
->info
;
3808 return (int)dhd
->wet_mode
;
3811 /* Set wet configuration configuration */
3812 int dhd_set_wet_mode(dhd_pub_t
*dhdp
, uint32 val
)
3814 dhd_info_t
*dhd
= dhdp
->info
;
3815 dhd
->wet_mode
= val
;
3816 dhd_update_rx_pkt_chainable_state(dhdp
, 0);
3819 #endif /* DHD_WET */
3821 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3822 int32
dhd_role_to_nl80211_iftype(int32 role
)
3825 case WLC_E_IF_ROLE_STA
:
3826 return NL80211_IFTYPE_STATION
;
3827 case WLC_E_IF_ROLE_AP
:
3828 return NL80211_IFTYPE_AP
;
3829 case WLC_E_IF_ROLE_WDS
:
3830 return NL80211_IFTYPE_WDS
;
3831 case WLC_E_IF_ROLE_P2P_GO
:
3832 return NL80211_IFTYPE_P2P_GO
;
3833 case WLC_E_IF_ROLE_P2P_CLIENT
:
3834 return NL80211_IFTYPE_P2P_CLIENT
;
3835 case WLC_E_IF_ROLE_IBSS
:
3836 case WLC_E_IF_ROLE_NAN
:
3837 return NL80211_IFTYPE_ADHOC
;
3839 return NL80211_IFTYPE_UNSPECIFIED
;
3842 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3845 dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
)
3847 dhd_info_t
*dhd
= handle
;
3848 dhd_if_event_t
*if_event
= event_info
;
3851 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3852 struct wl_if_event_info info
;
3854 struct net_device
*ndev
;
3855 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3858 if (event
!= DHD_WQ_WORK_IF_ADD
) {
3859 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
3864 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
3869 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
3873 dhd_net_if_lock_local(dhd
);
3874 DHD_OS_WAKE_LOCK(&dhd
->pub
);
3875 DHD_PERIM_LOCK(&dhd
->pub
);
3877 ifidx
= if_event
->event
.ifidx
;
3878 bssidx
= if_event
->event
.bssidx
;
3879 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__
, ifidx
));
3881 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3882 if (if_event
->event
.ifidx
> 0) {
3884 bzero(&info
, sizeof(info
));
3886 info
.bssidx
= bssidx
;
3887 info
.role
= if_event
->event
.role
;
3888 strncpy(info
.name
, if_event
->name
, IFNAMSIZ
);
3889 if (is_valid_ether_addr(if_event
->mac
)) {
3890 mac_addr
= if_event
->mac
;
3895 if (wl_cfg80211_post_ifcreate(dhd
->pub
.info
->iflist
[0]->net
,
3896 &info
, mac_addr
, NULL
, true) == NULL
) {
3897 /* Do the post interface create ops */
3898 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
3903 /* This path is for non-android case */
3904 /* The interface name in host and in event msg are same */
3905 /* if name in event msg is used to create dongle if list on host */
3906 ndev
= dhd_allocate_if(&dhd
->pub
, ifidx
, if_event
->name
,
3907 if_event
->mac
, bssidx
, TRUE
, if_event
->name
);
3909 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__
));
3913 DHD_PERIM_UNLOCK(&dhd
->pub
);
3914 ret
= dhd_register_if(&dhd
->pub
, ifidx
, TRUE
);
3915 DHD_PERIM_LOCK(&dhd
->pub
);
3916 if (ret
!= BCME_OK
) {
3917 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__
));
3918 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
3921 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3923 #ifndef PCIE_FULL_DONGLE
3924 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3925 if (FW_SUPPORTED((&dhd
->pub
), ap
) && (if_event
->event
.role
!= WLC_E_IF_ROLE_STA
)) {
3927 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "ap_isolate", (char *)&var_int
, sizeof(var_int
),
3929 if (ret
!= BCME_OK
) {
3930 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__
));
3931 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
3934 #endif /* PCIE_FULL_DONGLE */
3937 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
3939 DHD_PERIM_UNLOCK(&dhd
->pub
);
3940 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3941 dhd_net_if_unlock_local(dhd
);
3945 dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
)
3947 dhd_info_t
*dhd
= handle
;
3949 dhd_if_event_t
*if_event
= event_info
;
3951 if (event
!= DHD_WQ_WORK_IF_DEL
) {
3952 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
3957 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
3962 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
3966 dhd_net_if_lock_local(dhd
);
3967 DHD_OS_WAKE_LOCK(&dhd
->pub
);
3968 DHD_PERIM_LOCK(&dhd
->pub
);
3970 ifidx
= if_event
->event
.ifidx
;
3971 DHD_TRACE(("Removing interface with idx %d\n", ifidx
));
3973 DHD_PERIM_UNLOCK(&dhd
->pub
);
3974 if (!dhd
->pub
.info
->iflist
[ifidx
]) {
3975 /* No matching netdev found */
3976 DHD_ERROR(("Netdev not found! Do nothing.\n"));
3979 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3980 if (if_event
->event
.ifidx
> 0) {
3981 /* Do the post interface del ops */
3982 if (wl_cfg80211_post_ifdel(dhd
->pub
.info
->iflist
[ifidx
]->net
,
3983 true, if_event
->event
.ifidx
) != 0) {
3984 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
3989 /* For non-cfg80211 drivers */
3990 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
3991 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3994 DHD_PERIM_LOCK(&dhd
->pub
);
3995 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
3996 DHD_PERIM_UNLOCK(&dhd
->pub
);
3997 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3998 dhd_net_if_unlock_local(dhd
);
4002 dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
)
4004 dhd_info_t
*dhd
= handle
;
4005 dhd_if_t
*ifp
= event_info
;
4007 if (event
!= DHD_WQ_WORK_SET_MAC
) {
4008 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4012 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4016 dhd_net_if_lock_local(dhd
);
4017 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4018 DHD_PERIM_LOCK(&dhd
->pub
);
4022 unsigned long flags
;
4024 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4025 in_ap
= (ap_net_dev
!= NULL
);
4026 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4029 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
4036 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4037 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4041 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__
));
4042 ifp
->set_macaddress
= FALSE
;
4043 if (_dhd_set_mac_address(dhd
, ifp
->idx
, ifp
->mac_addr
) == 0)
4044 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__
));
4046 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__
));
4049 DHD_PERIM_UNLOCK(&dhd
->pub
);
4050 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4051 dhd_net_if_unlock_local(dhd
);
4055 dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
)
4057 dhd_info_t
*dhd
= handle
;
4058 int ifidx
= (int)((long int)event_info
);
4059 dhd_if_t
*ifp
= NULL
;
4061 if (event
!= DHD_WQ_WORK_SET_MCAST_LIST
) {
4062 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4067 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4071 dhd_net_if_lock_local(dhd
);
4072 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4073 DHD_PERIM_LOCK(&dhd
->pub
);
4075 ifp
= dhd
->iflist
[ifidx
];
4077 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4078 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4085 unsigned long flags
;
4086 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4087 in_ap
= (ap_net_dev
!= NULL
);
4088 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4091 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
4093 ifp
->set_multicast
= FALSE
;
4101 #ifdef MCAST_LIST_ACCUMULATION
4103 #endif /* MCAST_LIST_ACCUMULATION */
4105 _dhd_set_multicast_list(dhd
, ifidx
);
4106 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__
, ifidx
));
4109 DHD_PERIM_UNLOCK(&dhd
->pub
);
4110 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4111 dhd_net_if_unlock_local(dhd
);
4115 dhd_set_mac_address(struct net_device
*dev
, void *addr
)
4119 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
4120 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
4124 ifidx
= dhd_net2idx(dhd
, dev
);
4125 if (ifidx
== DHD_BAD_IF
)
4128 dhdif
= dhd
->iflist
[ifidx
];
4130 dhd_net_if_lock_local(dhd
);
4131 memcpy(dhdif
->mac_addr
, sa
->sa_data
, ETHER_ADDR_LEN
);
4132 dhdif
->set_macaddress
= TRUE
;
4133 dhd_net_if_unlock_local(dhd
);
4134 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhdif
, DHD_WQ_WORK_SET_MAC
,
4135 dhd_set_mac_addr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4140 dhd_set_multicast_list(struct net_device
*dev
)
4142 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
4145 ifidx
= dhd_net2idx(dhd
, dev
);
4146 if (ifidx
== DHD_BAD_IF
)
4149 dhd
->iflist
[ifidx
]->set_multicast
= TRUE
;
4150 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)((long int)ifidx
),
4151 DHD_WQ_WORK_SET_MCAST_LIST
, dhd_set_mcast_list_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4154 #ifdef DHD_UCODE_DOWNLOAD
4155 /* Get ucode path */
4157 dhd_get_ucode_path(dhd_pub_t
*dhdp
)
4159 dhd_info_t
*dhd
= dhdp
->info
;
4160 return dhd
->uc_path
;
4162 #endif /* DHD_UCODE_DOWNLOAD */
4164 #ifdef PROP_TXSTATUS
4166 dhd_os_wlfc_block(dhd_pub_t
*pub
)
4168 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
4170 spin_lock_bh(&di
->wlfc_spinlock
);
4175 dhd_os_wlfc_unblock(dhd_pub_t
*pub
)
4177 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
4180 spin_unlock_bh(&di
->wlfc_spinlock
);
4184 #endif /* PROP_TXSTATUS */
4186 /* This routine do not support Packet chain feature, Currently tested for
4189 int dhd_sendup(dhd_pub_t
*dhdp
, int ifidx
, void *p
)
4191 struct sk_buff
*skb
;
4192 void *skbhead
= NULL
;
4193 void *skbprev
= NULL
;
4195 ASSERT(!PKTISCHAINED(p
));
4196 skb
= PKTTONATIVE(dhdp
->osh
, p
);
4198 ifp
= dhdp
->info
->iflist
[ifidx
];
4199 skb
->dev
= ifp
->net
;
4201 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4203 if (in_interrupt()) {
4204 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4205 __FUNCTION__
, __LINE__
);
4208 if (dhdp
->info
->rxthread_enabled
) {
4212 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
4216 /* If the receive is not processed inside an ISR,
4217 * the softirqd must be woken explicitly to service
4218 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4219 * by netif_rx_ni(), but in earlier kernels, we need
4220 * to do it manually.
4222 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4223 __FUNCTION__
, __LINE__
);
4224 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4229 local_irq_save(flags
);
4231 local_irq_restore(flags
);
4232 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4236 if (dhdp
->info
->rxthread_enabled
&& skbhead
)
4237 dhd_sched_rxf(dhdp
, skbhead
);
4243 __dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
4246 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
4247 struct ether_header
*eh
= NULL
;
4248 #if defined(DHD_L2_FILTER)
4249 dhd_if_t
*ifp
= dhd_get_ifp(dhdp
, ifidx
);
4252 /* Reject if down */
4253 if (!dhdp
->up
|| (dhdp
->busstate
== DHD_BUS_DOWN
)) {
4254 /* free the packet here since the caller won't */
4255 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4259 #ifdef PCIE_FULL_DONGLE
4260 if (dhdp
->busstate
== DHD_BUS_SUSPEND
) {
4261 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
4262 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4263 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4266 return NETDEV_TX_BUSY
;
4267 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4269 #endif /* PCIE_FULL_DONGLE */
4271 /* Reject if pktlen > MAX_MTU_SZ */
4272 if (PKTLEN(dhdp
->osh
, pktbuf
) > MAX_MTU_SZ
) {
4273 /* free the packet here since the caller won't */
4274 dhdp
->tx_big_packets
++;
4275 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4279 #ifdef DHD_L2_FILTER
4280 /* if dhcp_unicast is enabled, we need to convert the */
4281 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4282 if (ifp
->dhcp_unicast
) {
4284 uint8
* ehptr
= NULL
;
4286 ret
= bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp
->osh
, pktbuf
, ifidx
, &mac_addr
);
4287 if (ret
== BCME_OK
) {
4288 /* if given mac address having valid entry in sta list
4289 * copy the given mac address, and return with BCME_OK
4291 if (dhd_find_sta(dhdp
, ifidx
, mac_addr
)) {
4292 ehptr
= PKTDATA(dhdp
->osh
, pktbuf
);
4293 bcopy(mac_addr
, ehptr
+ ETHER_DEST_OFFSET
, ETHER_ADDR_LEN
);
4298 if (ifp
->grat_arp
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
4299 if (bcm_l2_filter_gratuitous_arp(dhdp
->osh
, pktbuf
) == BCME_OK
) {
4300 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4305 if (ifp
->parp_enable
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
4306 ret
= dhd_l2_filter_pkt_handle(dhdp
, ifidx
, pktbuf
, TRUE
);
4308 /* Drop the packets if l2 filter has processed it already
4309 * otherwise continue with the normal path
4311 if (ret
== BCME_OK
) {
4312 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4316 #endif /* DHD_L2_FILTER */
4317 /* Update multicast statistic */
4318 if (PKTLEN(dhdp
->osh
, pktbuf
) >= ETHER_HDR_LEN
) {
4319 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
4320 eh
= (struct ether_header
*)pktdata
;
4322 if (ETHER_ISMULTI(eh
->ether_dhost
))
4323 dhdp
->tx_multicast
++;
4324 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_802_1X
) {
4325 #ifdef DHD_LOSSLESS_ROAMING
4326 uint8 prio
= (uint8
)PKTPRIO(pktbuf
);
4328 /* back up 802.1x's priority */
4329 dhdp
->prio_8021x
= prio
;
4330 #endif /* DHD_LOSSLESS_ROAMING */
4331 DBG_EVENT_LOG(dhdp
, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED
);
4332 atomic_inc(&dhd
->pend_8021x_cnt
);
4333 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
4334 wl_handle_wps_states(dhd_idx2net(dhdp
, ifidx
),
4335 pktdata
, PKTLEN(dhdp
->osh
, pktbuf
), TRUE
);
4336 #endif /* WL_CFG80211 && WL_WPS_SYNC */
4337 #if defined(DHD_8021X_DUMP)
4338 dhd_dump_eapol_4way_message(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4339 #endif /* DHD_8021X_DUMP */
4342 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) {
4343 #ifdef DHD_DHCP_DUMP
4344 dhd_dhcp_dump(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4345 #endif /* DHD_DHCP_DUMP */
4346 #ifdef DHD_ICMP_DUMP
4347 dhd_icmp_dump(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4348 #endif /* DHD_ICMP_DUMP */
4351 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4356 /* Look into the packet and update the packet priority */
4357 #ifndef PKTPRIO_OVERRIDE
4358 if (PKTPRIO(pktbuf
) == 0)
4359 #endif /* !PKTPRIO_OVERRIDE */
4361 #if defined(QOS_MAP_SET)
4362 pktsetprio_qms(pktbuf
, wl_get_up_table(dhdp
, ifidx
), FALSE
);
4364 pktsetprio(pktbuf
, FALSE
);
4365 #endif /* QOS_MAP_SET */
4367 #ifndef PKTPRIO_OVERRIDE
4369 /* Some protocols like OZMO use priority values from 256..263.
4370 * these are magic values to indicate a specific 802.1d priority.
4371 * make sure that priority field is in range of 0..7
4373 PKTSETPRIO(pktbuf
, PKTPRIO(pktbuf
) & 0x7);
4375 #endif /* !PKTPRIO_OVERRIDE */
4378 #ifdef SUPPORT_SET_TID
4379 dhd_set_tid_based_on_uid(dhdp
, pktbuf
);
4380 #endif /* SUPPORT_SET_TID */
4382 #ifdef PCIE_FULL_DONGLE
4384 * Lkup the per interface hash table, for a matching flowring. If one is not
4385 * available, allocate a unique flowid and add a flowring entry.
4386 * The found or newly created flowid is placed into the pktbuf's tag.
4388 ret
= dhd_flowid_update(dhdp
, ifidx
, dhdp
->flow_prio_map
[(PKTPRIO(pktbuf
))], pktbuf
);
4389 if (ret
!= BCME_OK
) {
4390 PKTCFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
4395 #ifdef PROP_TXSTATUS
4396 if (dhd_wlfc_is_supported(dhdp
)) {
4397 /* store the interface ID */
4398 DHD_PKTTAG_SETIF(PKTTAG(pktbuf
), ifidx
);
4400 /* store destination MAC in the tag as well */
4401 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf
), eh
->ether_dhost
);
4403 /* decide which FIFO this packet belongs to */
4404 if (ETHER_ISMULTI(eh
->ether_dhost
))
4405 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4406 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), AC_COUNT
);
4408 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), WME_PRIO2AC(PKTPRIO(pktbuf
)));
4410 #endif /* PROP_TXSTATUS */
4412 /* If the protocol uses a data header, apply it */
4413 dhd_prot_hdrpush(dhdp
, ifidx
, pktbuf
);
4416 /* Use bus module to send data frame */
4417 #ifdef DYNAMIC_MUMIMO_CONTROL
4418 if (dhdp
->reassoc_mumimo_sw
&&
4419 dhd_check_eapol_4way_message(PKTDATA(dhdp
->osh
, pktbuf
)) == EAPOL_4WAY_M4
) {
4420 dhdp
->reassoc_mumimo_sw
= 0;
4421 DHD_ENABLE_RUNTIME_PM(dhdp
);
4423 #endif /* DYNAMIC_MUMIMO_CONTROL */
4424 #ifdef PROP_TXSTATUS
4426 if (dhd_wlfc_commit_packets(dhdp
, (f_commitpkt_t
)dhd_bus_txdata
,
4427 dhdp
->bus
, pktbuf
, TRUE
) == WLFC_UNSUPPORTED
) {
4428 /* non-proptxstatus way */
4430 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
4432 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
4433 #endif /* BCMPCIE */
4438 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
4440 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
4441 #endif /* BCMPCIE */
4442 #endif /* PROP_TXSTATUS */
4448 dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
4451 unsigned long flags
;
4454 DHD_GENERAL_LOCK(dhdp
, flags
);
4455 ifp
= dhd_get_ifp(dhdp
, ifidx
);
4456 if (!ifp
|| ifp
->del_in_progress
) {
4457 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
4458 __FUNCTION__
, ifp
, ifp
? ifp
->del_in_progress
: 0));
4459 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4460 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4463 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
4464 DHD_ERROR(("%s: returning as busstate=%d\n",
4465 __FUNCTION__
, dhdp
->busstate
));
4466 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4467 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4470 DHD_IF_SET_TX_ACTIVE(ifp
, DHD_TX_SEND_PKT
);
4471 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp
);
4472 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4474 #ifdef DHD_PCIE_RUNTIMEPM
4475 if (dhdpcie_runtime_bus_wake(dhdp
, FALSE
, __builtin_return_address(0))) {
4476 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
4477 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4481 #endif /* DHD_PCIE_RUNTIMEPM */
4483 DHD_GENERAL_LOCK(dhdp
, flags
);
4484 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
4485 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4486 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
4487 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp
);
4488 DHD_IF_CLR_TX_ACTIVE(ifp
, DHD_TX_SEND_PKT
);
4489 dhd_os_tx_completion_wake(dhdp
);
4490 dhd_os_busbusy_wake(dhdp
);
4491 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4492 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4495 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4497 ret
= __dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
4499 #ifdef DHD_PCIE_RUNTIMEPM
4502 DHD_GENERAL_LOCK(dhdp
, flags
);
4503 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp
);
4504 DHD_IF_CLR_TX_ACTIVE(ifp
, DHD_TX_SEND_PKT
);
4505 dhd_os_tx_completion_wake(dhdp
);
4506 dhd_os_busbusy_wake(dhdp
);
4507 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4511 #if defined(DHD_LB_TXP)
4514 dhd_lb_sendpkt(dhd_info_t
*dhd
, struct net_device
*net
,
4515 int ifidx
, void *skb
)
4517 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->tx_start_percpu_run_cnt
);
4519 /* If the feature is disabled run-time do TX from here */
4520 if (atomic_read(&dhd
->lb_txp_active
) == 0) {
4521 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txp_percpu_run_cnt
);
4522 return __dhd_sendpkt(&dhd
->pub
, ifidx
, skb
);
4525 /* Store the address of net device and interface index in the Packet tag */
4526 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
), net
);
4527 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
), ifidx
);
4529 /* Enqueue the skb into tx_pend_queue */
4530 skb_queue_tail(&dhd
->tx_pend_queue
, skb
);
4532 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__
, skb
, net
));
4534 /* Dispatch the Tx job to be processed by the tx_tasklet */
4535 dhd_lb_tx_dispatch(&dhd
->pub
);
4537 return NETDEV_TX_OK
;
4539 #endif /* DHD_LB_TXP */
4542 dhd_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
4547 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4548 dhd_if_t
*ifp
= NULL
;
4550 unsigned long flags
;
4551 uint8 htsfdlystat_sz
= 0;
4553 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4555 if (dhd_query_bus_erros(&dhd
->pub
)) {
4559 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4560 DHD_BUS_BUSY_SET_IN_TX(&dhd
->pub
);
4561 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4563 #ifdef DHD_PCIE_RUNTIMEPM
4564 if (dhdpcie_runtime_bus_wake(&dhd
->pub
, FALSE
, dhd_start_xmit
)) {
4565 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
4566 /* stop the network queue temporarily until resume done */
4567 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4568 if (!dhdpcie_is_resume_done(&dhd
->pub
)) {
4569 dhd_bus_stop_queue(dhd
->pub
.bus
);
4571 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4572 dhd_os_busbusy_wake(&dhd
->pub
);
4573 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4574 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4577 return NETDEV_TX_BUSY
;
4580 #endif /* DHD_PCIE_RUNTIMEPM */
4582 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4583 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
4584 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4585 __FUNCTION__
, dhd
->pub
.busstate
, dhd
->pub
.dhd_bus_busy_state
));
4586 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4587 #ifdef PCIE_FULL_DONGLE
4588 /* Stop tx queues if suspend is in progress */
4589 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
4590 dhd_bus_stop_queue(dhd
->pub
.bus
);
4592 #endif /* PCIE_FULL_DONGLE */
4593 dhd_os_busbusy_wake(&dhd
->pub
);
4594 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4595 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4598 return NETDEV_TX_BUSY
;
4602 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4603 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4605 #if defined(DHD_HANG_SEND_UP_TEST)
4606 if (dhd
->pub
.req_hang_type
== HANG_REASON_BUS_DOWN
) {
4607 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
4609 #endif /* DHD_HANG_SEND_UP_TEST */
4611 /* Reject if down */
4612 if (dhd
->pub
.hang_was_sent
|| DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd
->pub
)) {
4613 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
4614 __FUNCTION__
, dhd
->pub
.up
, dhd
->pub
.busstate
));
4615 netif_stop_queue(net
);
4616 /* Send Event when bus down detected during data session */
4617 if (dhd
->pub
.up
&& !dhd
->pub
.hang_was_sent
) {
4618 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__
));
4619 dhd
->pub
.hang_reason
= HANG_REASON_BUS_DOWN
;
4620 net_os_send_hang_message(net
);
4622 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4623 dhd_os_busbusy_wake(&dhd
->pub
);
4624 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4625 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4626 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4630 return NETDEV_TX_BUSY
;
4634 ifp
= DHD_DEV_IFP(net
);
4635 ifidx
= DHD_DEV_IFIDX(net
);
4636 if (!ifp
|| (ifidx
== DHD_BAD_IF
) ||
4637 ifp
->del_in_progress
) {
4638 DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
4639 __FUNCTION__
, ifidx
, ifp
, (ifp
? ifp
->del_in_progress
: 0)));
4640 netif_stop_queue(net
);
4641 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4642 dhd_os_busbusy_wake(&dhd
->pub
);
4643 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4644 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4645 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4646 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4649 return NETDEV_TX_BUSY
;
4653 DHD_IF_SET_TX_ACTIVE(ifp
, DHD_TX_START_XMIT
);
4654 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4656 ASSERT(ifidx
== dhd_net2idx(dhd
, net
));
4657 ASSERT((ifp
!= NULL
) && ((ifidx
< DHD_MAX_IFS
) && (ifp
== dhd
->iflist
[ifidx
])));
4659 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
4661 /* re-align socket buffer if "skb->data" is odd address */
4662 if (((unsigned long)(skb
->data
)) & 0x1) {
4663 unsigned char *data
= skb
->data
;
4664 uint32 length
= skb
->len
;
4665 PKTPUSH(dhd
->pub
.osh
, skb
, 1);
4666 memmove(skb
->data
, data
, length
);
4667 PKTSETLEN(dhd
->pub
.osh
, skb
, length
);
4670 datalen
= PKTLEN(dhd
->pub
.osh
, skb
);
4672 /* Make sure there's enough room for any header */
4673 if (skb_headroom(skb
) < dhd
->pub
.hdrlen
+ htsfdlystat_sz
) {
4674 struct sk_buff
*skb2
;
4676 DHD_INFO(("%s: insufficient headroom\n",
4677 dhd_ifname(&dhd
->pub
, ifidx
)));
4678 dhd
->pub
.tx_realloc
++;
4680 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
4681 skb2
= skb_realloc_headroom(skb
, dhd
->pub
.hdrlen
+ htsfdlystat_sz
);
4684 if ((skb
= skb2
) == NULL
) {
4685 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4686 dhd_ifname(&dhd
->pub
, ifidx
)));
4690 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
4693 /* Convert to packet */
4694 if (!(pktbuf
= PKTFRMNATIVE(dhd
->pub
.osh
, skb
))) {
4695 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4696 dhd_ifname(&dhd
->pub
, ifidx
)));
4697 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
4698 dev_kfree_skb_any(skb
);
4704 /* wet related packet proto manipulation should be done in DHD
4705 since dongle doesn't have complete payload
4707 if (WET_ENABLED(&dhd
->pub
) &&
4708 (dhd_wet_send_proc(dhd
->pub
.wet_info
, pktbuf
, &pktbuf
) < 0)) {
4709 DHD_INFO(("%s:%s: wet send proc failed\n",
4710 __FUNCTION__
, dhd_ifname(&dhd
->pub
, ifidx
)));
4711 PKTFREE(dhd
->pub
.osh
, pktbuf
, FALSE
);
4715 #endif /* DHD_WET */
4718 /* PSR related packet proto manipulation should be done in DHD
4719 * since dongle doesn't have complete payload
4721 if (PSR_ENABLED(&dhd
->pub
) &&
4722 (dhd_psta_proc(&dhd
->pub
, ifidx
, &pktbuf
, TRUE
) < 0)) {
4724 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__
,
4725 dhd_ifname(&dhd
->pub
, ifidx
)));
4727 #endif /* DHD_PSTA */
4729 #ifdef DHDTCPSYNC_FLOOD_BLK
4730 if (dhd_tcpdata_get_flag(&dhd
->pub
, pktbuf
) == FLAG_SYNCACK
) {
4731 ifp
->tsyncack_txed
++;
4733 #endif /* DHDTCPSYNC_FLOOD_BLK */
4735 #ifdef DHDTCPACK_SUPPRESS
4736 if (dhd
->pub
.tcpack_sup_mode
== TCPACK_SUP_HOLD
) {
4737 /* If this packet has been hold or got freed, just return */
4738 if (dhd_tcpack_hold(&dhd
->pub
, pktbuf
, ifidx
)) {
4743 /* If this packet has replaced another packet and got freed, just return */
4744 if (dhd_tcpack_suppress(&dhd
->pub
, pktbuf
)) {
4749 #endif /* DHDTCPACK_SUPPRESS */
4752 * If Load Balance is enabled queue the packet
4753 * else send directly from here.
4755 #if defined(DHD_LB_TXP)
4756 ret
= dhd_lb_sendpkt(dhd
, net
, ifidx
, pktbuf
);
4758 ret
= __dhd_sendpkt(&dhd
->pub
, ifidx
, pktbuf
);
4763 ifp
->stats
.tx_dropped
++;
4764 dhd
->pub
.tx_dropped
++;
4766 #ifdef PROP_TXSTATUS
4767 /* tx_packets counter can counted only when wlfc is disabled */
4768 if (!dhd_wlfc_is_supported(&dhd
->pub
))
4771 dhd
->pub
.tx_packets
++;
4772 ifp
->stats
.tx_packets
++;
4773 ifp
->stats
.tx_bytes
+= datalen
;
4777 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4778 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4779 DHD_IF_CLR_TX_ACTIVE(ifp
, DHD_TX_START_XMIT
);
4780 dhd_os_tx_completion_wake(&dhd
->pub
);
4781 dhd_os_busbusy_wake(&dhd
->pub
);
4782 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4783 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4784 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4785 /* Return ok: we always eat the packet */
4786 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4789 return NETDEV_TX_OK
;
4793 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4794 void dhd_rx_wq_wakeup(struct work_struct
*ptr
)
4796 struct dhd_rx_tx_work
*work
;
4797 struct dhd_pub
* pub
;
4799 work
= container_of(ptr
, struct dhd_rx_tx_work
, work
);
4803 DHD_RPM(("%s: ENTER. \n", __FUNCTION__
));
4805 if (atomic_read(&pub
->block_bus
) || pub
->busstate
== DHD_BUS_DOWN
) {
4809 DHD_OS_WAKE_LOCK(pub
);
4810 if (pm_runtime_get_sync(dhd_bus_to_dev(pub
->bus
)) >= 0) {
4812 // do nothing but wakeup the bus.
4813 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub
->bus
));
4814 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub
->bus
));
4816 DHD_OS_WAKE_UNLOCK(pub
);
4820 void dhd_start_xmit_wq_adapter(struct work_struct
*ptr
)
4822 struct dhd_rx_tx_work
*work
;
4825 struct dhd_bus
* bus
;
4827 work
= container_of(ptr
, struct dhd_rx_tx_work
, work
);
4829 dhd
= DHD_DEV_INFO(work
->net
);
4833 if (atomic_read(&dhd
->pub
.block_bus
)) {
4834 kfree_skb(work
->skb
);
4836 dhd_netif_start_queue(bus
);
4840 if (pm_runtime_get_sync(dhd_bus_to_dev(bus
)) >= 0) {
4841 ret
= dhd_start_xmit(work
->skb
, work
->net
);
4842 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
4843 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
4846 dhd_netif_start_queue(bus
);
4849 netdev_err(work
->net
,
4850 "error: dhd_start_xmit():%d\n", ret
);
4854 dhd_start_xmit_wrapper(struct sk_buff
*skb
, struct net_device
*net
)
4856 struct dhd_rx_tx_work
*start_xmit_work
;
4858 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4860 if (dhd
->pub
.busstate
== DHD_BUS_SUSPEND
) {
4861 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__
));
4863 dhd_netif_stop_queue(dhd
->pub
.bus
);
4865 start_xmit_work
= (struct dhd_rx_tx_work
*)
4866 kmalloc(sizeof(*start_xmit_work
), GFP_ATOMIC
);
4868 if (!start_xmit_work
) {
4870 "error: failed to alloc start_xmit_work\n");
4875 INIT_WORK(&start_xmit_work
->work
, dhd_start_xmit_wq_adapter
);
4876 start_xmit_work
->skb
= skb
;
4877 start_xmit_work
->net
= net
;
4878 queue_work(dhd
->tx_wq
, &start_xmit_work
->work
);
4879 ret
= NET_XMIT_SUCCESS
;
4881 } else if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
4882 ret
= dhd_start_xmit(skb
, net
);
4884 /* when bus is down */
4892 dhd_bus_wakeup_work(dhd_pub_t
*dhdp
)
4894 struct dhd_rx_tx_work
*rx_work
;
4895 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
4897 rx_work
= kmalloc(sizeof(*rx_work
), GFP_ATOMIC
);
4899 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__
));
4903 INIT_WORK(&rx_work
->work
, dhd_rx_wq_wakeup
);
4904 rx_work
->pub
= dhdp
;
4905 queue_work(dhd
->rx_wq
, &rx_work
->work
);
4908 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4910 dhd_txflowcontrol(dhd_pub_t
*dhdp
, int ifidx
, bool state
)
4912 struct net_device
*net
;
4913 dhd_info_t
*dhd
= dhdp
->info
;
4916 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4920 #ifdef DHD_LOSSLESS_ROAMING
4921 /* block flowcontrol during roaming */
4922 if ((dhdp
->dequeue_prec_map
== 1 << PRIO_8021D_NC
) && state
== ON
) {
4927 if (ifidx
== ALL_INTERFACES
) {
4928 /* Flow control on all active interfaces */
4929 dhdp
->txoff
= state
;
4930 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
4931 if (dhd
->iflist
[i
]) {
4932 net
= dhd
->iflist
[i
]->net
;
4934 netif_stop_queue(net
);
4936 netif_wake_queue(net
);
4940 if (dhd
->iflist
[ifidx
]) {
4941 net
= dhd
->iflist
[ifidx
]->net
;
4943 netif_stop_queue(net
);
4945 netif_wake_queue(net
);
4956 static const PKTTYPE_INFO packet_type_info
[] =
4958 { ETHER_TYPE_IP
, "IP" },
4959 { ETHER_TYPE_ARP
, "ARP" },
4960 { ETHER_TYPE_BRCM
, "BRCM" },
4961 { ETHER_TYPE_802_1X
, "802.1X" },
4963 { ETHER_TYPE_WAI
, "WAPI" },
4964 #endif /* BCMWAPI_WAI */
4968 static const char *_get_packet_type_str(uint16 type
)
4971 int n
= sizeof(packet_type_info
)/sizeof(packet_type_info
[1]) - 1;
4973 for (i
= 0; i
< n
; i
++) {
4974 if (packet_type_info
[i
].type
== type
)
4975 return packet_type_info
[i
].str
;
4978 return packet_type_info
[n
].str
;
4980 #endif /* DHD_RX_DUMP */
4982 #ifdef DHD_MCAST_REGEN
4984 * Description: This function is called to do the reverse translation
4986 * Input eh - pointer to the ethernet header
4989 dhd_mcast_reverse_translation(struct ether_header
*eh
)
4994 iph
= (uint8
*)eh
+ ETHER_HDR_LEN
;
4995 dest_ip
= ntoh32(*((uint32
*)(iph
+ IPV4_DEST_IP_OFFSET
)));
4997 /* Only IP packets are handled */
4998 if (eh
->ether_type
!= hton16(ETHER_TYPE_IP
))
5001 /* Non-IPv4 multicast packets are not handled */
5002 if (IP_VER(iph
) != IP_VER_4
)
5006 * The packet has a multicast IP and unicast MAC. That means
5007 * we have to do the reverse translation
5009 if (IPV4_ISMULTI(dest_ip
) && !ETHER_ISMULTI(&eh
->ether_dhost
)) {
5010 ETHER_FILL_MCAST_ADDR_FROM_IP(eh
->ether_dhost
, dest_ip
);
5016 #endif /* MCAST_REGEN */
5018 #ifdef SHOW_LOGTRACE
5020 dhd_netif_rx_ni(struct sk_buff
* skb
)
5022 /* Do not call netif_recieve_skb as this workqueue scheduler is
5023 * not from NAPI Also as we are not in INTR context, do not call
5024 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
5025 * does netif_rx, disables irq, raise NET_IF_RX softirq and
5026 * enables interrupts back
5028 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5033 local_irq_save(flags
);
5035 local_irq_restore(flags
);
5036 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5040 dhd_event_logtrace_pkt_process(dhd_pub_t
*dhdp
, struct sk_buff
* skb
)
5042 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5045 bcm_event_msg_u_t evu
;
5047 void *pktdata
= NULL
;
5048 bcm_event_t
*pvt_data
;
5051 DHD_TRACE(("%s:Enter\n", __FUNCTION__
));
5053 /* In dhd_rx_frame, header is stripped using skb_pull
5054 * of size ETH_HLEN, so adjust pktlen accordingly
5056 pktlen
= skb
->len
+ ETH_HLEN
;
5058 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5059 pktdata
= (void *)skb_mac_header(skb
);
5061 pktdata
= (void *)skb
->mac
.raw
;
5062 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5064 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
5066 if (ret
!= BCME_OK
) {
5067 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5068 __FUNCTION__
, ret
));
5072 datalen
= ntoh32(evu
.event
.datalen
);
5074 pvt_data
= (bcm_event_t
*)pktdata
;
5075 data
= &pvt_data
[1];
5077 dhd_dbg_trace_evnt_handler(dhdp
, data
, &dhd
->event_data
, datalen
);
5083 #define DHD_EVENT_LOGTRACE_BOUND 12
5084 #define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 1
5087 dhd_event_logtrace_process(struct work_struct
* work
)
5089 /* Ignore compiler warnings due to -Werror=cast-qual */
5090 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5091 #pragma GCC diagnostic push
5092 #pragma GCC diagnostic ignored "-Wcast-qual"
5094 struct delayed_work
*dw
= to_delayed_work(work
);
5095 struct dhd_info
*dhd
=
5096 container_of(dw
, struct dhd_info
, event_log_dispatcher_work
);
5097 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5098 #pragma GCC diagnostic pop
5102 struct sk_buff
*skb
;
5107 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
5114 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__
));
5118 qlen
= skb_queue_len(&dhd
->evt_trace_queue
);
5119 process_len
= MIN(qlen
, DHD_EVENT_LOGTRACE_BOUND
);
5121 /* Run while loop till bound is reached or skb queue is empty */
5122 while (process_len
--) {
5124 skb
= skb_dequeue(&dhd
->evt_trace_queue
);
5126 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
5130 BCM_REFERENCE(ifid
);
5131 #ifdef PCIE_FULL_DONGLE
5132 /* Check if pkt is from INFO ring or WLC_E_TRACE */
5133 ifid
= DHD_PKTTAG_IFID((dhd_pkttag_fr_t
*)PKTTAG(skb
));
5134 if (ifid
== DHD_DUMMY_INFO_IF
) {
5135 /* Process logtrace from info rings */
5136 dhd_event_logtrace_infobuf_pkt_process(dhdp
, skb
, &dhd
->event_data
);
5138 #endif /* PCIE_FULL_DONGLE */
5140 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
5141 dhd_event_logtrace_pkt_process(dhdp
, skb
);
5144 /* Send packet up if logtrace_pkt_sendup is TRUE */
5145 if (dhdp
->logtrace_pkt_sendup
) {
5146 #ifdef DHD_USE_STATIC_CTRLBUF
5147 /* If bufs are allocated via static buf pool
5148 * and logtrace_pkt_sendup enabled, make a copy,
5149 * free the local one and send the copy up.
5151 void *npkt
= PKTDUP(dhdp
->osh
, skb
);
5152 /* Clone event and send it up */
5153 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5157 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
5158 /* Packet is already freed, go to next packet */
5161 #endif /* DHD_USE_STATIC_CTRLBUF */
5162 #ifdef PCIE_FULL_DONGLE
5163 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
5164 * to send skb to network layer, assign skb->dev with
5165 * Primary interface n/w device
5167 if (ifid
== DHD_DUMMY_INFO_IF
) {
5168 skb
= PKTTONATIVE(dhdp
->osh
, skb
);
5169 skb
->dev
= dhd
->iflist
[0]->net
;
5171 #endif /* PCIE_FULL_DONGLE */
5173 dhd_netif_rx_ni(skb
);
5175 /* Don't send up. Free up the packet. */
5176 #ifdef DHD_USE_STATIC_CTRLBUF
5177 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5179 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5180 #endif /* DHD_USE_STATIC_CTRLBUF */
5184 /* Reschedule the workqueue if more packets to be processed */
5185 if (qlen
>= DHD_EVENT_LOGTRACE_BOUND
) {
5186 schedule_delayed_work(&dhd
->event_log_dispatcher_work
,
5187 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS
));
5192 dhd_event_logtrace_enqueue(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
5194 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5196 #ifdef PCIE_FULL_DONGLE
5197 /* Add ifidx in the PKTTAG */
5198 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t
*)PKTTAG(pktbuf
), ifidx
);
5199 #endif /* PCIE_FULL_DONGLE */
5200 skb_queue_tail(&dhd
->evt_trace_queue
, pktbuf
);
5202 schedule_delayed_work(&dhd
->event_log_dispatcher_work
, 0);
5206 dhd_event_logtrace_flush_queue(dhd_pub_t
*dhdp
)
5208 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5209 struct sk_buff
*skb
;
5211 while ((skb
= skb_dequeue(&dhd
->evt_trace_queue
)) != NULL
) {
5212 #ifdef DHD_USE_STATIC_CTRLBUF
5213 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5215 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5216 #endif /* DHD_USE_STATIC_CTRLBUF */
5219 #endif /* SHOW_LOGTRACE */
5221 /** Called when a frame is received by the dongle on interface 'ifidx' */
5223 dhd_rx_frame(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
, int numpkt
, uint8 chan
)
5225 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5226 struct sk_buff
*skb
;
5229 void *data
, *pnext
= NULL
;
5232 wl_event_msg_t event
;
5235 void *skbhead
= NULL
;
5236 void *skbprev
= NULL
;
5238 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5239 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5240 unsigned char *dump_data
;
5241 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5242 #ifdef DHD_MCAST_REGEN
5243 uint8 interface_role
;
5244 if_flow_lkup_t
*if_flow_lkup
;
5245 unsigned long flags
;
5247 #ifdef DHD_WAKE_STATUS
5249 wake_counts_t
*wcp
= NULL
;
5250 #endif /* DHD_WAKE_STATUS */
5252 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5254 for (i
= 0; pktbuf
&& i
< numpkt
; i
++, pktbuf
= pnext
) {
5255 struct ether_header
*eh
;
5257 pnext
= PKTNEXT(dhdp
->osh
, pktbuf
);
5258 PKTSETNEXT(dhdp
->osh
, pktbuf
, NULL
);
5260 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5261 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
5262 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5264 if (ifidx
== DHD_DUMMY_INFO_IF
) {
5265 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5266 * context in case of PCIe FD, in case of other bus this will be from
5267 * DPC context. If we get bunch of events from Dongle then printing all
5268 * of them from Tasklet/DPC context that too in data path is costly.
5269 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5270 * events with type WLC_E_TRACE.
5271 * We'll print this console logs from the WorkQueue context by enqueing SKB
5272 * here and Dequeuing will be done in WorkQueue and will be freed only if
5273 * logtrace_pkt_sendup is TRUE
5275 #ifdef SHOW_LOGTRACE
5276 dhd_event_logtrace_enqueue(dhdp
, ifidx
, pktbuf
);
5277 #else /* !SHOW_LOGTRACE */
5278 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
5279 * free the PKT here itself
5281 #ifdef DHD_USE_STATIC_CTRLBUF
5282 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5284 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5285 #endif /* DHD_USE_STATIC_CTRLBUF */
5286 #endif /* SHOW_LOGTRACE */
5289 #ifdef DHD_WAKE_STATUS
5290 pkt_wake
= dhd_bus_get_bus_wake(dhdp
);
5291 wcp
= dhd_bus_get_wakecount(dhdp
);
5293 /* If wakeinfo count buffer is null do not update wake count values */
5296 #endif /* DHD_WAKE_STATUS */
5298 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
5300 if (ifidx
>= DHD_MAX_IFS
) {
5301 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
5302 __FUNCTION__
, ifidx
));
5303 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_BRCM
) {
5304 #ifdef DHD_USE_STATIC_CTRLBUF
5305 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5307 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5308 #endif /* DHD_USE_STATIC_CTRLBUF */
5310 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5315 ifp
= dhd
->iflist
[ifidx
];
5317 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5319 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_BRCM
) {
5320 #ifdef DHD_USE_STATIC_CTRLBUF
5321 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5323 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5324 #endif /* DHD_USE_STATIC_CTRLBUF */
5326 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5331 /* Dropping only data packets before registering net device to avoid kernel panic */
5332 #ifndef PROP_TXSTATUS_VSDB
5333 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
) &&
5334 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
5336 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
|| !dhd
->pub
.up
) &&
5337 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
5338 #endif /* PROP_TXSTATUS_VSDB */
5340 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5342 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5346 #ifdef PROP_TXSTATUS
5347 if (dhd_wlfc_is_header_only_pkt(dhdp
, pktbuf
)) {
5348 /* WLFC may send header only packet when
5349 there is an urgent message but no packet to
5352 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5356 #ifdef DHD_L2_FILTER
5357 /* If block_ping is enabled drop the ping packet */
5358 if (ifp
->block_ping
) {
5359 if (bcm_l2_filter_block_ping(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5360 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5364 if (ifp
->grat_arp
&& DHD_IF_ROLE_STA(dhdp
, ifidx
)) {
5365 if (bcm_l2_filter_gratuitous_arp(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5366 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5370 if (ifp
->parp_enable
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
5371 int ret
= dhd_l2_filter_pkt_handle(dhdp
, ifidx
, pktbuf
, FALSE
);
5373 /* Drop the packets if l2 filter has processed it already
5374 * otherwise continue with the normal path
5376 if (ret
== BCME_OK
) {
5377 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
5381 if (ifp
->block_tdls
) {
5382 if (bcm_l2_filter_block_tdls(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5383 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5387 #endif /* DHD_L2_FILTER */
5389 #ifdef DHD_MCAST_REGEN
5390 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
5391 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
5392 ASSERT(if_flow_lkup
);
5394 interface_role
= if_flow_lkup
[ifidx
].role
;
5395 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
5397 if (ifp
->mcast_regen_bss_enable
&& (interface_role
!= WLC_E_IF_ROLE_WDS
) &&
5398 !DHD_IF_ROLE_AP(dhdp
, ifidx
) &&
5399 ETHER_ISUCAST(eh
->ether_dhost
)) {
5400 if (dhd_mcast_reverse_translation(eh
) == BCME_OK
) {
5402 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5403 if ((dhd_get_psta_mode(dhdp
) == DHD_MODE_PSTA
) ||
5404 (dhd_get_psta_mode(dhdp
) == DHD_MODE_PSR
)) {
5406 /* Let the primary in PSTA interface handle this
5407 * frame after unicast to Multicast conversion
5409 ifp
= dhd_get_ifp(dhdp
, 0);
5416 #endif /* MCAST_REGEN */
5418 #ifdef DHDTCPSYNC_FLOOD_BLK
5419 if (dhd_tcpdata_get_flag(dhdp
, pktbuf
) == FLAG_SYNC
) {
5423 u64 curr_time
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
5425 delta_sync
= ifp
->tsync_rcvd
- ifp
->tsyncack_txed
;
5426 delta_sec
= curr_time
- ifp
->last_sync
;
5427 if (delta_sec
> 1) {
5428 sync_per_sec
= delta_sync
/delta_sec
;
5429 if (sync_per_sec
> TCP_SYNC_FLOOD_LIMIT
) {
5430 schedule_work(&ifp
->blk_tsfl_work
);
5431 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
5432 "sync recvied %d pkt/sec \n",
5433 ifidx
, sync_per_sec
));
5435 dhd_reset_tcpsync_info_by_ifp(ifp
);
5439 #endif /* DHDTCPSYNC_FLOOD_BLK */
5441 #ifdef DHDTCPACK_SUPPRESS
5442 dhd_tcpdata_info_get(dhdp
, pktbuf
);
5444 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
5447 skb
->dev
= ifp
->net
;
5449 /* wet related packet proto manipulation should be done in DHD
5450 * since dongle doesn't have complete payload
5452 if (WET_ENABLED(&dhd
->pub
) && (dhd_wet_recv_proc(dhd
->pub
.wet_info
,
5454 DHD_INFO(("%s:%s: wet recv proc failed\n",
5455 __FUNCTION__
, dhd_ifname(dhdp
, ifidx
)));
5457 #endif /* DHD_WET */
5460 if (PSR_ENABLED(dhdp
) &&
5461 (dhd_psta_proc(dhdp
, ifidx
, &pktbuf
, FALSE
) < 0)) {
5462 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__
,
5463 dhd_ifname(dhdp
, ifidx
)));
5465 #endif /* DHD_PSTA */
5467 #ifdef PCIE_FULL_DONGLE
5468 if ((DHD_IF_ROLE_AP(dhdp
, ifidx
) || DHD_IF_ROLE_P2PGO(dhdp
, ifidx
)) &&
5469 (!ifp
->ap_isolate
)) {
5470 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
5471 if (ETHER_ISUCAST(eh
->ether_dhost
)) {
5472 if (dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_dhost
)) {
5473 dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
5477 void *npktbuf
= NULL
;
5478 if ((ntoh16(eh
->ether_type
) != ETHER_TYPE_IAPP_L2_UPDATE
) &&
5479 (npktbuf
= PKTDUP(dhdp
->osh
, pktbuf
)) != NULL
) {
5480 dhd_sendpkt(dhdp
, ifidx
, npktbuf
);
5484 #endif /* PCIE_FULL_DONGLE */
5485 #ifdef DYNAMIC_MUMIMO_CONTROL
5486 if (dhdp
->reassoc_mumimo_sw
&& dhdp
->murx_block_eapol
&&
5487 dhd_check_eapol_4way_message((void *)(skb
->data
)) == EAPOL_4WAY_M1
) {
5488 DHD_ERROR(("%s: Reassoc is in progress..."
5489 " drop EAPOL M1 frame\n", __FUNCTION__
));
5490 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5493 #endif /* DYNAMIC_MUMIMO_CONTROL */
5495 /* Get the protocol, maintain skb around eth_type_trans()
5496 * The main reason for this hack is for the limitation of
5497 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5498 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5499 * coping of the packet coming from the network stack to add
5500 * BDC, Hardware header etc, during network interface registration
5501 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5502 * for BDC, Hardware header etc. and not just the ETH_HLEN
5507 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5508 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5509 dump_data
= skb
->data
;
5510 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5512 protocol
= (skb
->data
[12] << 8) | skb
->data
[13];
5513 if (protocol
== ETHER_TYPE_802_1X
) {
5514 DBG_EVENT_LOG(dhdp
, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED
);
5515 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
5516 wl_handle_wps_states(ifp
->net
, dump_data
, len
, FALSE
);
5517 #endif /* WL_CFG80211 && WL_WPS_SYNC */
5518 #ifdef DHD_8021X_DUMP
5519 dhd_dump_eapol_4way_message(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5520 #endif /* DHD_8021X_DUMP */
5523 if (protocol
!= ETHER_TYPE_BRCM
&& protocol
== ETHER_TYPE_IP
) {
5524 #ifdef DHD_DHCP_DUMP
5525 dhd_dhcp_dump(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5526 #endif /* DHD_DHCP_DUMP */
5527 #ifdef DHD_ICMP_DUMP
5528 dhd_icmp_dump(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5529 #endif /* DHD_ICMP_DUMP */
5532 DHD_ERROR(("RX DUMP[%s] - %s\n",
5533 dhd_ifname(dhdp
, ifidx
), _get_packet_type_str(protocol
)));
5534 if (protocol
!= ETHER_TYPE_BRCM
) {
5535 if (dump_data
[0] == 0xFF) {
5536 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__
));
5538 if ((dump_data
[12] == 8) &&
5539 (dump_data
[13] == 6)) {
5540 DHD_ERROR(("%s: ARP %d\n",
5541 __FUNCTION__
, dump_data
[0x15]));
5543 } else if (dump_data
[0] & 1) {
5544 DHD_ERROR(("%s: MULTICAST: " MACDBG
"\n",
5545 __FUNCTION__
, MAC2STRDBG(dump_data
)));
5547 #ifdef DHD_RX_FULL_DUMP
5550 for (k
= 0; k
< skb
->len
; k
++) {
5551 DHD_ERROR(("%02X ", dump_data
[k
]));
5557 #endif /* DHD_RX_FULL_DUMP */
5559 #endif /* DHD_RX_DUMP */
5561 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5563 prhex("[wakepkt_dump]", (char*)dump_data
, MIN(len
, 32));
5565 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5567 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5569 if (skb
->pkt_type
== PACKET_MULTICAST
) {
5570 dhd
->pub
.rx_multicast
++;
5571 ifp
->stats
.multicast
++;
5577 DHD_DBG_PKT_MON_RX(dhdp
, skb
);
5578 #ifdef DHD_PKT_LOGGING
5579 DHD_PKTLOG_RX(dhdp
, skb
);
5580 #endif /* DHD_PKT_LOGGING */
5581 /* Strip header, count, deliver upward */
5582 skb_pull(skb
, ETH_HLEN
);
5584 /* Process special event packets and then discard them */
5585 memset(&event
, 0, sizeof(event
));
5587 if (ntoh16(skb
->protocol
) == ETHER_TYPE_BRCM
) {
5588 bcm_event_msg_u_t evu
;
5592 ret_event
= wl_host_event_get_data(
5593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5594 skb_mac_header(skb
),
5597 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5600 if (ret_event
!= BCME_OK
) {
5601 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5602 __FUNCTION__
, ret_event
));
5603 #ifdef DHD_USE_STATIC_CTRLBUF
5604 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5606 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5611 memcpy(&event
, &evu
.event
, sizeof(wl_event_msg_t
));
5612 event_type
= ntoh32_ua((void *)&event
.event_type
);
5613 #ifdef SHOW_LOGTRACE
5614 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5615 * context in case of PCIe FD, in case of other bus this will be from
5616 * DPC context. If we get bunch of events from Dongle then printing all
5617 * of them from Tasklet/DPC context that too in data path is costly.
5618 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5619 * events with type WLC_E_TRACE.
5620 * We'll print this console logs from the WorkQueue context by enqueing SKB
5621 * here and Dequeuing will be done in WorkQueue and will be freed only if
5622 * logtrace_pkt_sendup is true
5624 if (event_type
== WLC_E_TRACE
) {
5625 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__
));
5626 dhd_event_logtrace_enqueue(dhdp
, ifidx
, pktbuf
);
5629 #endif /* SHOW_LOGTRACE */
5631 ret_event
= dhd_wl_host_event(dhd
, ifidx
,
5632 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5633 skb_mac_header(skb
),
5636 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5637 len
, &event
, &data
);
5639 wl_event_to_host_order(&event
);
5641 tout_ctrl
= DHD_PACKET_TIMEOUT_MS
;
5643 #if defined(PNO_SUPPORT)
5644 if (event_type
== WLC_E_PFN_NET_FOUND
) {
5645 /* enforce custom wake lock to garantee that Kernel not suspended */
5646 tout_ctrl
= CUSTOM_PNO_EVENT_LOCK_xTIME
* DHD_PACKET_TIMEOUT_MS
;
5648 #endif /* PNO_SUPPORT */
5650 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
5654 #ifdef DHD_WAKE_STATUS
5655 if (unlikely(pkt_wake
)) {
5656 #ifdef DHD_WAKE_EVENT_STATUS
5657 if (event
.event_type
< WLC_E_LAST
) {
5658 wcp
->rc_event
[event
.event_type
]++;
5662 #endif /* DHD_WAKE_EVENT_STATUS */
5664 #endif /* DHD_WAKE_STATUS */
5666 /* For delete virtual interface event, wl_host_event returns positive
5667 * i/f index, do not proceed. just free the pkt.
5669 if ((event_type
== WLC_E_IF
) && (ret_event
> 0)) {
5670 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
5672 #ifdef DHD_USE_STATIC_CTRLBUF
5673 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5675 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5681 * For the event packets, there is a possibility
5682 * of ifidx getting modifed.Thus update the ifp
5685 ASSERT(ifidx
< DHD_MAX_IFS
&& dhd
->iflist
[ifidx
]);
5686 ifp
= dhd
->iflist
[ifidx
];
5687 #ifndef PROP_TXSTATUS_VSDB
5688 if (!(ifp
&& ifp
->net
&& (ifp
->net
->reg_state
== NETREG_REGISTERED
)))
5690 if (!(ifp
&& ifp
->net
&& (ifp
->net
->reg_state
== NETREG_REGISTERED
) &&
5692 #endif /* PROP_TXSTATUS_VSDB */
5694 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
5696 #ifdef DHD_USE_STATIC_CTRLBUF
5697 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5699 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5704 if (dhdp
->wl_event_enabled
) {
5705 #ifdef DHD_USE_STATIC_CTRLBUF
5706 /* If event bufs are allocated via static buf pool
5707 * and wl events are enabled, make a copy, free the
5708 * local one and send the copy up.
5710 void *npkt
= PKTDUP(dhdp
->osh
, skb
);
5711 /* Clone event and send it up */
5712 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5716 DHD_ERROR(("skb clone failed. dropping event.\n"));
5719 #endif /* DHD_USE_STATIC_CTRLBUF */
5721 /* If event enabled not explictly set, drop events */
5722 #ifdef DHD_USE_STATIC_CTRLBUF
5723 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5725 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5726 #endif /* DHD_USE_STATIC_CTRLBUF */
5730 tout_rx
= DHD_PACKET_TIMEOUT_MS
;
5732 #ifdef PROP_TXSTATUS
5733 dhd_wlfc_save_rxpath_ac_time(dhdp
, (uint8
)PKTPRIO(skb
));
5734 #endif /* PROP_TXSTATUS */
5736 #ifdef DHD_WAKE_STATUS
5737 if (unlikely(pkt_wake
)) {
5739 #ifdef DHD_WAKE_RX_STATUS
5740 #define ETHER_ICMP6_HEADER 20
5741 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
5742 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
5743 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
5745 if (ntoh16(skb
->protocol
) == ETHER_TYPE_ARP
) /* ARP */
5747 if (dump_data
[0] == 0xFF) { /* Broadcast */
5749 } else if (dump_data
[0] & 0x01) { /* Multicast */
5751 if (ntoh16(skb
->protocol
) == ETHER_TYPE_IPV6
) {
5752 wcp
->rx_multi_ipv6
++;
5753 if ((skb
->len
> ETHER_ICMP6_HEADER
) &&
5754 (dump_data
[ETHER_ICMP6_HEADER
] == IPPROTO_ICMPV6
)) {
5756 if (skb
->len
> ETHER_ICMPV6_TYPE
) {
5757 switch (dump_data
[ETHER_ICMPV6_TYPE
]) {
5758 case NDISC_ROUTER_ADVERTISEMENT
:
5759 wcp
->rx_icmpv6_ra
++;
5761 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
5762 wcp
->rx_icmpv6_na
++;
5764 case NDISC_NEIGHBOUR_SOLICITATION
:
5765 wcp
->rx_icmpv6_ns
++;
5770 } else if (dump_data
[2] == 0x5E) {
5771 wcp
->rx_multi_ipv4
++;
5773 wcp
->rx_multi_other
++;
5775 } else { /* Unicast */
5778 #undef ETHER_ICMP6_HEADER
5779 #undef ETHER_IPV6_SADDR
5780 #undef ETHER_IPV6_DAADR
5781 #undef ETHER_ICMPV6_TYPE
5782 #endif /* DHD_WAKE_RX_STATUS */
5785 #endif /* DHD_WAKE_STATUS */
5788 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
5789 ifp
->net
->last_rx
= jiffies
;
5790 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
5792 if (ntoh16(skb
->protocol
) != ETHER_TYPE_BRCM
) {
5793 dhdp
->dstats
.rx_bytes
+= skb
->len
;
5794 dhdp
->rx_packets
++; /* Local count */
5795 ifp
->stats
.rx_bytes
+= skb
->len
;
5796 ifp
->stats
.rx_packets
++;
5799 if (in_interrupt()) {
5800 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
5801 __FUNCTION__
, __LINE__
);
5802 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5803 #if defined(DHD_LB_RXP)
5804 netif_receive_skb(skb
);
5805 #else /* !defined(DHD_LB_RXP) */
5807 #endif /* !defined(DHD_LB_RXP) */
5808 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5810 if (dhd
->rxthread_enabled
) {
5814 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
5818 /* If the receive is not processed inside an ISR,
5819 * the softirqd must be woken explicitly to service
5820 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5821 * by netif_rx_ni(), but in earlier kernels, we need
5822 * to do it manually.
5824 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
5825 __FUNCTION__
, __LINE__
);
5827 #if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
5828 defined(ARGOS_NOTIFY_CB)
5829 argos_register_notifier_deinit();
5830 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
5831 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
5832 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
5833 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
5834 #if defined(DHD_LB_RXP)
5835 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5836 netif_receive_skb(skb
);
5837 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5838 #else /* !defined(DHD_LB_RXP) */
5839 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5840 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5842 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5845 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5847 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5848 local_irq_save(flags
);
5850 local_irq_restore(flags
);
5851 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5852 #endif /* !defined(DHD_LB_RXP) */
5857 if (dhd
->rxthread_enabled
&& skbhead
)
5858 dhd_sched_rxf(dhdp
, skbhead
);
5860 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp
, tout_rx
);
5861 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp
, tout_ctrl
);
5865 dhd_event(struct dhd_info
*dhd
, char *evpkt
, int evlen
, int ifidx
)
5867 /* Linux version has nothing to do */
5872 dhd_txcomplete(dhd_pub_t
*dhdp
, void *txp
, bool success
)
5874 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
5875 struct ether_header
*eh
;
5878 dhd_prot_hdrpull(dhdp
, NULL
, txp
, NULL
, NULL
);
5880 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, txp
);
5881 type
= ntoh16(eh
->ether_type
);
5883 if (type
== ETHER_TYPE_802_1X
) {
5884 atomic_dec(&dhd
->pend_8021x_cnt
);
5887 #ifdef PROP_TXSTATUS
5888 if (dhdp
->wlfc_state
&& (dhdp
->proptxstatus_mode
!= WLFC_FCMODE_NONE
)) {
5889 dhd_if_t
*ifp
= dhd
->iflist
[DHD_PKTTAG_IF(PKTTAG(txp
))];
5890 uint datalen
= PKTLEN(dhd
->pub
.osh
, txp
);
5893 dhd
->pub
.tx_packets
++;
5894 ifp
->stats
.tx_packets
++;
5895 ifp
->stats
.tx_bytes
+= datalen
;
5897 ifp
->stats
.tx_dropped
++;
5904 static struct net_device_stats
*
5905 dhd_get_stats(struct net_device
*net
)
5907 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
5910 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5913 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__
));
5917 ifp
= dhd_get_ifp_by_ndev(&dhd
->pub
, net
);
5919 /* return empty stats */
5920 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__
));
5925 /* Use the protocol to get dongle stats */
5926 dhd_prot_dstats(&dhd
->pub
);
5931 memset(&net
->stats
, 0, sizeof(net
->stats
));
5936 dhd_watchdog_thread(void *data
)
5938 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
5939 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
5940 /* This thread doesn't need any user-level access,
5941 * so get rid of all our resources
5943 if (dhd_watchdog_prio
> 0) {
5944 struct sched_param param
;
5945 param
.sched_priority
= (dhd_watchdog_prio
< MAX_RT_PRIO
)?
5946 dhd_watchdog_prio
:(MAX_RT_PRIO
-1);
5947 setScheduler(current
, SCHED_FIFO
, ¶m
);
5951 if (down_interruptible (&tsk
->sema
) == 0) {
5952 unsigned long flags
;
5953 unsigned long jiffies_at_start
= jiffies
;
5954 unsigned long time_lapse
;
5956 DHD_OS_WD_WAKE_LOCK(&dhd
->pub
);
5957 #endif /* BCMPCIE */
5959 SMP_RD_BARRIER_DEPENDS();
5960 if (tsk
->terminated
) {
5962 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
5963 #endif /* BCMPCIE */
5967 if (dhd
->pub
.dongle_reset
== FALSE
) {
5968 DHD_TIMER(("%s:\n", __FUNCTION__
));
5969 dhd_bus_watchdog(&dhd
->pub
);
5971 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5972 /* Count the tick for reference */
5974 #ifdef DHD_L2_FILTER
5975 dhd_l2_filter_watchdog(&dhd
->pub
);
5976 #endif /* DHD_L2_FILTER */
5977 time_lapse
= jiffies
- jiffies_at_start
;
5979 /* Reschedule the watchdog */
5980 if (dhd
->wd_timer_valid
) {
5981 mod_timer(&dhd
->timer
,
5983 msecs_to_jiffies(dhd_watchdog_ms
) -
5984 min(msecs_to_jiffies(dhd_watchdog_ms
), time_lapse
));
5986 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5989 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
5990 #endif /* BCMPCIE */
5996 complete_and_exit(&tsk
->completed
, 0);
5999 static void dhd_watchdog(ulong data
)
6001 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
6002 unsigned long flags
;
6004 if (dhd
->pub
.dongle_reset
) {
6008 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
6009 up(&dhd
->thr_wdt_ctl
.sema
);
6014 DHD_OS_WD_WAKE_LOCK(&dhd
->pub
);
6015 #endif /* BCMPCIE */
6016 /* Call the bus module watchdog */
6017 dhd_bus_watchdog(&dhd
->pub
);
6019 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6020 /* Count the tick for reference */
6023 #ifdef DHD_L2_FILTER
6024 dhd_l2_filter_watchdog(&dhd
->pub
);
6025 #endif /* DHD_L2_FILTER */
6026 /* Reschedule the watchdog */
6027 if (dhd
->wd_timer_valid
)
6028 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
6029 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6031 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
6032 #endif /* BCMPCIE */
6035 #ifdef DHD_PCIE_RUNTIMEPM
6037 dhd_rpm_state_thread(void *data
)
6039 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6040 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6043 if (down_interruptible (&tsk
->sema
) == 0) {
6044 unsigned long flags
;
6045 unsigned long jiffies_at_start
= jiffies
;
6046 unsigned long time_lapse
;
6048 SMP_RD_BARRIER_DEPENDS();
6049 if (tsk
->terminated
) {
6053 if (dhd
->pub
.dongle_reset
== FALSE
) {
6054 DHD_TIMER(("%s:\n", __FUNCTION__
));
6056 dhd_runtimepm_state(&dhd
->pub
);
6059 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6060 time_lapse
= jiffies
- jiffies_at_start
;
6062 /* Reschedule the watchdog */
6063 if (dhd
->rpm_timer_valid
) {
6064 mod_timer(&dhd
->rpm_timer
,
6066 msecs_to_jiffies(dhd_runtimepm_ms
) -
6067 min(msecs_to_jiffies(dhd_runtimepm_ms
),
6070 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6077 complete_and_exit(&tsk
->completed
, 0);
6080 static void dhd_runtimepm(ulong data
)
6082 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
6084 if (dhd
->pub
.dongle_reset
) {
6088 if (dhd
->thr_rpm_ctl
.thr_pid
>= 0) {
6089 up(&dhd
->thr_rpm_ctl
.sema
);
6094 void dhd_runtime_pm_disable(dhd_pub_t
*dhdp
)
6096 dhd_os_runtimepm_timer(dhdp
, 0);
6097 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
6098 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6101 void dhd_runtime_pm_enable(dhd_pub_t
*dhdp
)
6103 if (dhd_get_idletime(dhdp
)) {
6104 dhd_os_runtimepm_timer(dhdp
, dhd_runtimepm_ms
);
6105 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6109 #endif /* DHD_PCIE_RUNTIMEPM */
6111 #ifdef ENABLE_ADAPTIVE_SCHED
6113 dhd_sched_policy(int prio
)
6115 struct sched_param param
;
6116 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH
) {
6117 param
.sched_priority
= 0;
6118 setScheduler(current
, SCHED_NORMAL
, ¶m
);
6120 if (get_scheduler_policy(current
) != SCHED_FIFO
) {
6121 param
.sched_priority
= (prio
< MAX_RT_PRIO
)? prio
: (MAX_RT_PRIO
-1);
6122 setScheduler(current
, SCHED_FIFO
, ¶m
);
6126 #endif /* ENABLE_ADAPTIVE_SCHED */
6127 #ifdef DEBUG_CPU_FREQ
6128 static int dhd_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
6130 dhd_info_t
*dhd
= container_of(nb
, struct dhd_info
, freq_trans
);
6131 struct cpufreq_freqs
*freq
= data
;
6135 if (val
== CPUFREQ_POSTCHANGE
) {
6136 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6137 freq
->new, freq
->cpu
));
6138 *per_cpu_ptr(dhd
->new_freq
, freq
->cpu
) = freq
->new;
6144 #endif /* DEBUG_CPU_FREQ */
6146 dhd_dpc_thread(void *data
)
6148 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6149 !defined(CONFIG_SOC_EXYNOS7870)
6151 unsigned long flags
;
6152 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6153 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6154 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6156 /* This thread doesn't need any user-level access,
6157 * so get rid of all our resources
6159 if (dhd_dpc_prio
> 0)
6161 struct sched_param param
;
6162 param
.sched_priority
= (dhd_dpc_prio
< MAX_RT_PRIO
)?dhd_dpc_prio
:(MAX_RT_PRIO
-1);
6163 setScheduler(current
, SCHED_FIFO
, ¶m
);
6166 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6167 !defined(CONFIG_SOC_EXYNOS7870)
6168 if (!zalloc_cpumask_var(&dhd
->pub
.default_cpu_mask
, GFP_KERNEL
)) {
6169 DHD_ERROR(("dpc_thread, zalloc_cpumask_var error\n"));
6170 dhd
->pub
.affinity_isdpc
= FALSE
;
6172 if (!zalloc_cpumask_var(&dhd
->pub
.dpc_affinity_cpu_mask
, GFP_KERNEL
)) {
6173 DHD_ERROR(("dpc_thread, dpc_affinity_cpu_mask error\n"));
6174 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
6175 dhd
->pub
.affinity_isdpc
= FALSE
;
6177 cpumask_copy(dhd
->pub
.default_cpu_mask
, &hmp_slow_cpu_mask
);
6178 cpumask_or(dhd
->pub
.dpc_affinity_cpu_mask
,
6179 dhd
->pub
.dpc_affinity_cpu_mask
, cpumask_of(DPC_CPUCORE
));
6181 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6182 if ((ret
= argos_task_affinity_setup_label(current
, "WIFI",
6183 dhd
->pub
.dpc_affinity_cpu_mask
,
6184 dhd
->pub
.default_cpu_mask
)) < 0) {
6185 DHD_ERROR(("Failed to add CPU affinity(dpc) error=%d\n",
6187 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
6188 free_cpumask_var(dhd
->pub
.dpc_affinity_cpu_mask
);
6189 dhd
->pub
.affinity_isdpc
= FALSE
;
6191 unsigned int irq
= -1;
6193 if (dhdpcie_get_pcieirq(dhd
->pub
.bus
, &irq
)) {
6194 DHD_ERROR(("%s : Can't get interrupt number\n",
6197 #endif /* BCMPCIE */
6199 wifi_adapter_info_t
*adapter
= dhd
->adapter
;
6200 irq
= adapter
->irq_num
;
6201 #endif /* BCMSDIO */
6202 DHD_ERROR(("Argos set Completed : dpcthread\n"));
6203 set_irq_cpucore(irq
, dhd
->pub
.default_cpu_mask
,
6204 dhd
->pub
.dpc_affinity_cpu_mask
);
6205 dhd
->pub
.affinity_isdpc
= TRUE
;
6207 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6210 #else /* ARGOS_CPU_SCHEDULER */
6211 #ifdef CUSTOM_DPC_CPUCORE
6212 set_cpus_allowed_ptr(current
, cpumask_of(CUSTOM_DPC_CPUCORE
));
6214 #ifdef CUSTOM_SET_CPUCORE
6215 dhd
->pub
.current_dpc
= current
;
6216 #endif /* CUSTOM_SET_CPUCORE */
6217 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6218 /* Run until signal received */
6220 if (!binary_sema_down(tsk
)) {
6221 #ifdef ENABLE_ADAPTIVE_SCHED
6222 dhd_sched_policy(dhd_dpc_prio
);
6223 #endif /* ENABLE_ADAPTIVE_SCHED */
6224 SMP_RD_BARRIER_DEPENDS();
6225 if (tsk
->terminated
) {
6229 /* Call bus dpc unless it indicated down (then clean stop) */
6230 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
6231 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6232 int resched_cnt
= 0;
6233 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6234 dhd_os_wd_timer_extend(&dhd
->pub
, TRUE
);
6235 while (dhd_bus_dpc(dhd
->pub
.bus
)) {
6236 /* process all data */
6237 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6239 if (resched_cnt
> MAX_RESCHED_CNT
) {
6240 DHD_INFO(("%s Calling msleep to"
6241 "let other processes run. \n",
6243 dhd
->pub
.dhd_bug_on
= true;
6247 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6249 dhd_os_wd_timer_extend(&dhd
->pub
, FALSE
);
6250 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
6253 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
6254 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
6260 #ifdef ARGOS_CPU_SCHEDULER
6261 if (dhd
->pub
.affinity_isdpc
== TRUE
) {
6262 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
6263 free_cpumask_var(dhd
->pub
.dpc_affinity_cpu_mask
);
6264 dhd
->pub
.affinity_isdpc
= FALSE
;
6266 #endif /* ARGOS_CPU_SCHEDULER */
6267 complete_and_exit(&tsk
->completed
, 0);
6271 dhd_rxf_thread(void *data
)
6273 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6274 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6275 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6276 !defined(CONFIG_SOC_EXYNOS7870)
6278 unsigned long flags
;
6279 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && CONFIG_SOC_EXYNOS7870 */
6280 #if defined(WAIT_DEQUEUE)
6281 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6282 ulong watchdogTime
= OSL_SYSUPTIME(); /* msec */
6284 dhd_pub_t
*pub
= &dhd
->pub
;
6286 /* This thread doesn't need any user-level access,
6287 * so get rid of all our resources
6289 if (dhd_rxf_prio
> 0)
6291 struct sched_param param
;
6292 param
.sched_priority
= (dhd_rxf_prio
< MAX_RT_PRIO
)?dhd_rxf_prio
:(MAX_RT_PRIO
-1);
6293 setScheduler(current
, SCHED_FIFO
, ¶m
);
6296 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6297 !defined(CONFIG_SOC_EXYNOS7870)
6298 if (!zalloc_cpumask_var(&dhd
->pub
.rxf_affinity_cpu_mask
, GFP_KERNEL
)) {
6299 DHD_ERROR(("rxthread zalloc_cpumask_var error\n"));
6300 dhd
->pub
.affinity_isrxf
= FALSE
;
6302 cpumask_or(dhd
->pub
.rxf_affinity_cpu_mask
, dhd
->pub
.rxf_affinity_cpu_mask
,
6303 cpumask_of(RXF_CPUCORE
));
6305 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6306 if ((ret
= argos_task_affinity_setup_label(current
, "WIFI",
6307 dhd
->pub
.rxf_affinity_cpu_mask
, dhd
->pub
.default_cpu_mask
)) < 0) {
6308 DHD_ERROR(("Failed to add CPU affinity(rxf) error=%d\n", ret
));
6309 dhd
->pub
.affinity_isrxf
= FALSE
;
6310 free_cpumask_var(dhd
->pub
.rxf_affinity_cpu_mask
);
6312 DHD_ERROR(("RXthread affinity completed\n"));
6313 dhd
->pub
.affinity_isrxf
= TRUE
;
6315 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6317 #else /* ARGOS_CPU_SCHEDULER */
6318 #ifdef CUSTOM_SET_CPUCORE
6319 dhd
->pub
.current_rxf
= current
;
6320 #endif /* CUSTOM_SET_CPUCORE */
6321 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6322 /* Run until signal received */
6324 if (down_interruptible(&tsk
->sema
) == 0) {
6326 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6329 #ifdef ENABLE_ADAPTIVE_SCHED
6330 dhd_sched_policy(dhd_rxf_prio
);
6331 #endif /* ENABLE_ADAPTIVE_SCHED */
6333 SMP_RD_BARRIER_DEPENDS();
6335 if (tsk
->terminated
) {
6338 skb
= dhd_rxf_dequeue(pub
);
6344 void *skbnext
= PKTNEXT(pub
->osh
, skb
);
6345 PKTSETNEXT(pub
->osh
, skb
, NULL
);
6346 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6347 __FUNCTION__
, __LINE__
);
6348 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6352 local_irq_save(flags
);
6354 local_irq_restore(flags
);
6359 #if defined(WAIT_DEQUEUE)
6360 if (OSL_SYSUPTIME() - watchdogTime
> RXF_WATCHDOG_TIME
) {
6362 watchdogTime
= OSL_SYSUPTIME();
6366 DHD_OS_WAKE_UNLOCK(pub
);
6371 #ifdef ARGOS_CPU_SCHEDULER
6372 if (dhd
->pub
.affinity_isrxf
== TRUE
) {
6373 free_cpumask_var(dhd
->pub
.rxf_affinity_cpu_mask
);
6374 dhd
->pub
.affinity_isrxf
= FALSE
;
6376 #endif /* ARGOS_CPU_SCHEDULER */
6377 complete_and_exit(&tsk
->completed
, 0);
6381 void dhd_dpc_enable(dhd_pub_t
*dhdp
)
6383 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6386 if (!dhdp
|| !dhdp
->info
)
6389 #endif /* DHD_LB_RXP || DHD_LB_TXP */
6392 __skb_queue_head_init(&dhd
->rx_pend_queue
);
6393 #endif /* DHD_LB_RXP */
6396 skb_queue_head_init(&dhd
->tx_pend_queue
);
6397 #endif /* DHD_LB_TXP */
6399 #endif /* BCMPCIE */
6403 dhd_dpc_kill(dhd_pub_t
*dhdp
)
6417 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
6418 tasklet_kill(&dhd
->tasklet
);
6419 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__
));
6424 cancel_work_sync(&dhd
->rx_napi_dispatcher_work
);
6425 __skb_queue_purge(&dhd
->rx_pend_queue
);
6426 #endif /* DHD_LB_RXP */
6428 cancel_work_sync(&dhd
->tx_dispatcher_work
);
6429 skb_queue_purge(&dhd
->tx_pend_queue
);
6430 #endif /* DHD_LB_TXP */
6432 /* Kill the Load Balancing Tasklets */
6433 #if defined(DHD_LB_TXC)
6434 tasklet_kill(&dhd
->tx_compl_tasklet
);
6435 #endif /* DHD_LB_TXC */
6436 #if defined(DHD_LB_RXC)
6437 tasklet_kill(&dhd
->rx_compl_tasklet
);
6438 #endif /* DHD_LB_RXC */
6439 #if defined(DHD_LB_TXP)
6440 tasklet_kill(&dhd
->tx_tasklet
);
6441 #endif /* DHD_LB_TXP */
6446 dhd_dpc_tasklet_kill(dhd_pub_t
*dhdp
)
6460 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
6461 tasklet_kill(&dhd
->tasklet
);
6464 #endif /* BCMPCIE */
6471 dhd
= (dhd_info_t
*)data
;
6473 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6474 * down below , wake lock is set,
6475 * the tasklet is initialized in dhd_attach()
6477 /* Call bus dpc unless it indicated down (then clean stop) */
6478 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
6479 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6480 DHD_LB_STATS_INCR(dhd
->dhd_dpc_cnt
);
6481 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6482 if (dhd_bus_dpc(dhd
->pub
.bus
)) {
6483 tasklet_schedule(&dhd
->tasklet
);
6486 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
6491 dhd_sched_dpc(dhd_pub_t
*dhdp
)
6493 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6495 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
6496 DHD_OS_WAKE_LOCK(dhdp
);
6497 /* If the semaphore does not get up,
6498 * wake unlock should be done here
6500 if (!binary_sema_up(&dhd
->thr_dpc_ctl
)) {
6501 DHD_OS_WAKE_UNLOCK(dhdp
);
6505 dhd_bus_set_dpc_sched_time(dhdp
);
6506 tasklet_schedule(&dhd
->tasklet
);
6511 dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
)
6513 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6515 DHD_OS_WAKE_LOCK(dhdp
);
6517 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6519 if (dhd_rxf_enqueue(dhdp
, skb
) == BCME_OK
)
6522 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
6523 up(&dhd
->thr_rxf_ctl
.sema
);
6528 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6529 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6532 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6534 dhd_toe_get(dhd_info_t
*dhd
, int ifidx
, uint32
*toe_ol
)
6539 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe_ol", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
6543 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd
->pub
,
6548 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6552 memcpy(toe_ol
, buf
, sizeof(uint32
));
6556 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6558 dhd_toe_set(dhd_info_t
*dhd
, int ifidx
, uint32 toe_ol
)
6562 /* Set toe_ol as requested */
6563 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe_ol", (char *)&toe_ol
, sizeof(toe_ol
), NULL
, 0, TRUE
);
6565 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6566 dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6570 /* Enable toe globally only if any components are enabled. */
6571 toe
= (toe_ol
!= 0);
6572 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe", (char *)&toe
, sizeof(toe
), NULL
, 0, TRUE
);
6574 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6582 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6583 void dhd_set_scb_probe(dhd_pub_t
*dhd
)
6585 wl_scb_probe_t scb_probe
;
6586 char iovbuf
[WL_EVENTING_MASK_LEN
+ sizeof(wl_scb_probe_t
)];
6589 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
6593 ret
= dhd_iovar(dhd
, 0, "scb_probe", NULL
, 0, iovbuf
, sizeof(iovbuf
), FALSE
);
6595 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__
));
6598 memcpy(&scb_probe
, iovbuf
, sizeof(wl_scb_probe_t
));
6600 scb_probe
.scb_max_probe
= NUM_SCB_MAX_PROBE
;
6602 ret
= dhd_iovar(dhd
, 0, "scb_probe", (char *)&scb_probe
, sizeof(wl_scb_probe_t
), NULL
, 0,
6605 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__
));
6609 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6613 dhd_ethtool_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*info
)
6615 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
6617 snprintf(info
->driver
, sizeof(info
->driver
), "wl");
6618 snprintf(info
->version
, sizeof(info
->version
), "%lu", dhd
->pub
.drv_version
);
6621 struct ethtool_ops dhd_ethtool_ops
= {
6622 .get_drvinfo
= dhd_ethtool_get_drvinfo
6624 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6626 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6628 dhd_ethtool(dhd_info_t
*dhd
, void *uaddr
)
6630 struct ethtool_drvinfo info
;
6631 char drvname
[sizeof(info
.driver
)];
6634 struct ethtool_value edata
;
6635 uint32 toe_cmpnt
, csum_dir
;
6639 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6641 /* all ethtool calls start with a cmd word */
6642 if (copy_from_user(&cmd
, uaddr
, sizeof (uint32
)))
6646 case ETHTOOL_GDRVINFO
:
6647 /* Copy out any request driver name */
6648 if (copy_from_user(&info
, uaddr
, sizeof(info
)))
6650 strncpy(drvname
, info
.driver
, sizeof(info
.driver
));
6651 drvname
[sizeof(info
.driver
)-1] = '\0';
6653 /* clear struct for return */
6654 memset(&info
, 0, sizeof(info
));
6657 /* if dhd requested, identify ourselves */
6658 if (strcmp(drvname
, "?dhd") == 0) {
6659 snprintf(info
.driver
, sizeof(info
.driver
), "dhd");
6660 strncpy(info
.version
, EPI_VERSION_STR
, sizeof(info
.version
) - 1);
6661 info
.version
[sizeof(info
.version
) - 1] = '\0';
6664 /* otherwise, require dongle to be up */
6665 else if (!dhd
->pub
.up
) {
6666 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__
));
6670 /* finally, report dongle driver type */
6671 else if (dhd
->pub
.iswl
)
6672 snprintf(info
.driver
, sizeof(info
.driver
), "wl");
6674 snprintf(info
.driver
, sizeof(info
.driver
), "xx");
6676 snprintf(info
.version
, sizeof(info
.version
), "%lu", dhd
->pub
.drv_version
);
6677 if (copy_to_user(uaddr
, &info
, sizeof(info
)))
6679 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__
,
6680 (int)sizeof(drvname
), drvname
, info
.driver
));
6684 /* Get toe offload components from dongle */
6685 case ETHTOOL_GRXCSUM
:
6686 case ETHTOOL_GTXCSUM
:
6687 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
6690 csum_dir
= (cmd
== ETHTOOL_GTXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
6693 edata
.data
= (toe_cmpnt
& csum_dir
) ? 1 : 0;
6695 if (copy_to_user(uaddr
, &edata
, sizeof(edata
)))
6699 /* Set toe offload components in dongle */
6700 case ETHTOOL_SRXCSUM
:
6701 case ETHTOOL_STXCSUM
:
6702 if (copy_from_user(&edata
, uaddr
, sizeof(edata
)))
6705 /* Read the current settings, update and write back */
6706 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
6709 csum_dir
= (cmd
== ETHTOOL_STXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
6711 if (edata
.data
!= 0)
6712 toe_cmpnt
|= csum_dir
;
6714 toe_cmpnt
&= ~csum_dir
;
6716 if ((ret
= dhd_toe_set(dhd
, 0, toe_cmpnt
)) < 0)
6719 /* If setting TX checksum mode, tell Linux the new mode */
6720 if (cmd
== ETHTOOL_STXCSUM
) {
6722 dhd
->iflist
[0]->net
->features
|= NETIF_F_IP_CSUM
;
6724 dhd
->iflist
[0]->net
->features
&= ~NETIF_F_IP_CSUM
;
6736 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
6738 static bool dhd_check_hang(struct net_device
*net
, dhd_pub_t
*dhdp
, int error
)
6741 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
6748 #if !defined(BCMPCIE)
6749 if (dhdp
->info
->thr_dpc_ctl
.thr_pid
< 0) {
6750 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__
));
6755 if ((error
== -ETIMEDOUT
) || (error
== -EREMOTEIO
) ||
6756 ((dhdp
->busstate
== DHD_BUS_DOWN
) && (!dhdp
->dongle_reset
))) {
6758 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
6759 __FUNCTION__
, dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
,
6760 dhdp
->d3ackcnt_timeout
, error
, dhdp
->busstate
));
6762 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__
,
6763 dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
, error
, dhdp
->busstate
));
6764 #endif /* BCMPCIE */
6765 if (dhdp
->hang_reason
== 0) {
6766 if (dhdp
->dongle_trap_occured
) {
6767 dhdp
->hang_reason
= HANG_REASON_DONGLE_TRAP
;
6769 } else if (dhdp
->d3ackcnt_timeout
) {
6770 dhdp
->hang_reason
= dhdp
->is_sched_error
?
6771 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR
:
6772 HANG_REASON_D3_ACK_TIMEOUT
;
6773 #endif /* BCMPCIE */
6775 dhdp
->hang_reason
= dhdp
->is_sched_error
?
6776 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR
:
6777 HANG_REASON_IOCTL_RESP_TIMEOUT
;
6780 net_os_send_hang_message(net
);
6788 dhd_monitor_enabled(dhd_pub_t
*dhd
, int ifidx
)
6790 return (dhd
->info
->monitor_type
!= 0);
6794 dhd_rx_mon_pkt(dhd_pub_t
*dhdp
, host_rxbuf_cmpl_t
* msg
, void *pkt
, int ifidx
)
6796 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6798 uint8 amsdu_flag
= (msg
->flags
& BCMPCIE_PKT_FLAGS_MONITOR_MASK
) >>
6799 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT
;
6800 switch (amsdu_flag
) {
6801 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU
:
6803 if (!dhd
->monitor_skb
) {
6804 if ((dhd
->monitor_skb
= PKTTONATIVE(dhdp
->osh
, pkt
))
6808 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
6809 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
6811 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6812 dhd
->monitor_skb
= NULL
;
6815 dhd
->monitor_skb
->protocol
=
6816 eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
6817 dhd
->monitor_len
= 0;
6820 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT
:
6821 if (!dhd
->monitor_skb
) {
6822 if ((dhd
->monitor_skb
= dev_alloc_skb(MAX_MON_PKT_SIZE
))
6825 dhd
->monitor_len
= 0;
6827 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
6828 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
6830 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6831 dev_kfree_skb(dhd
->monitor_skb
);
6834 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
),
6835 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
6836 dhd
->monitor_len
= PKTLEN(dhdp
->osh
, pkt
);
6837 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6840 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT
:
6841 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
) + dhd
->monitor_len
,
6842 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
6843 dhd
->monitor_len
+= PKTLEN(dhdp
->osh
, pkt
);
6844 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6847 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT
:
6848 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
) + dhd
->monitor_len
,
6849 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
6850 dhd
->monitor_len
+= PKTLEN(dhdp
->osh
, pkt
);
6851 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6852 skb_put(dhd
->monitor_skb
, dhd
->monitor_len
);
6853 dhd
->monitor_skb
->protocol
=
6854 eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
6855 dhd
->monitor_len
= 0;
6860 if (in_interrupt()) {
6861 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6862 __FUNCTION__
, __LINE__
);
6863 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6864 netif_rx(dhd
->monitor_skb
);
6865 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6867 /* If the receive is not processed inside an ISR,
6868 * the softirqd must be woken explicitly to service
6869 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6870 * by netif_rx_ni(), but in earlier kernels, we need
6871 * to do it manually.
6873 bcm_object_trace_opr(dhd
->monitor_skb
, BCM_OBJDBG_REMOVE
,
6874 __FUNCTION__
, __LINE__
);
6876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6877 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6878 netif_rx_ni(dhd
->monitor_skb
);
6879 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6882 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6883 netif_rx(dhd
->monitor_skb
);
6884 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6885 local_irq_save(flags
);
6887 local_irq_restore(flags
);
6888 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6891 dhd
->monitor_skb
= NULL
;
6894 typedef struct dhd_mon_dev_priv
{
6895 struct net_device_stats stats
;
6896 } dhd_mon_dev_priv_t
;
6898 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
6899 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
6900 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
6903 dhd_monitor_start(struct sk_buff
*skb
, struct net_device
*dev
)
6905 PKTFREE(NULL
, skb
, FALSE
);
6909 #if defined(BT_OVER_SDIO)
6912 dhdsdio_bus_usr_cnt_inc(dhd_pub_t
*dhdp
)
6914 dhdp
->info
->bus_user_count
++;
6918 dhdsdio_bus_usr_cnt_dec(dhd_pub_t
*dhdp
)
6920 dhdp
->info
->bus_user_count
--;
6924 * Success: Returns 0
6925 * Failure: Returns -1 or errono code
6928 dhd_bus_get(wlan_bt_handle_t handle
, bus_owner_t owner
)
6930 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
6931 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6934 mutex_lock(&dhd
->bus_user_lock
);
6935 ++dhd
->bus_user_count
;
6936 if (dhd
->bus_user_count
< 0) {
6937 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__
));
6942 if (dhd
->bus_user_count
== 1) {
6944 dhd
->pub
.hang_was_sent
= 0;
6946 /* First user, turn on WL_REG, start the bus */
6947 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__
));
6949 if (!wifi_platform_set_power(dhd
->adapter
, TRUE
, WIFI_TURNON_DELAY
)) {
6951 ret
= dhd_bus_resume(dhdp
, 0);
6953 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
6954 __FUNCTION__
, ret
));
6959 dhd_update_fw_nv_path(dhd
);
6960 /* update firmware and nvram path to sdio bus */
6961 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
6962 dhd
->fw_path
, dhd
->nv_path
);
6963 /* download the firmware, Enable F2 */
6964 /* TODO: Should be done only in case of FW switch */
6965 ret
= dhd_bus_devreset(dhdp
, FALSE
);
6966 dhd_bus_resume(dhdp
, 1);
6968 if (dhd_sync_with_dongle(&dhd
->pub
) < 0) {
6969 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__
));
6973 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__
, ret
));
6976 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
6977 __FUNCTION__
, dhd
->bus_user_count
));
6980 mutex_unlock(&dhd
->bus_user_lock
);
6983 EXPORT_SYMBOL(dhd_bus_get
);
6986 * Success: Returns 0
6987 * Failure: Returns -1 or errono code
6990 dhd_bus_put(wlan_bt_handle_t handle
, bus_owner_t owner
)
6992 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
6993 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6995 BCM_REFERENCE(owner
);
6997 mutex_lock(&dhd
->bus_user_lock
);
6998 --dhd
->bus_user_count
;
6999 if (dhd
->bus_user_count
< 0) {
7000 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__
));
7001 dhd
->bus_user_count
= 0;
7006 if (dhd
->bus_user_count
== 0) {
7007 /* Last user, stop the bus and turn Off WL_REG */
7008 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
7010 #ifdef PROP_TXSTATUS
7011 if (dhd
->pub
.wlfc_enabled
) {
7012 dhd_wlfc_deinit(&dhd
->pub
);
7014 #endif /* PROP_TXSTATUS */
7016 if (dhd
->pub
.pno_state
) {
7017 dhd_pno_deinit(&dhd
->pub
);
7019 #endif /* PNO_SUPPORT */
7021 if (dhd
->pub
.rtt_state
) {
7022 dhd_rtt_deinit(&dhd
->pub
);
7024 #endif /* RTT_SUPPORT */
7025 ret
= dhd_bus_devreset(dhdp
, TRUE
);
7027 dhd_bus_suspend(dhdp
);
7028 wifi_platform_set_power(dhd
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
7031 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7032 __FUNCTION__
, dhd
->bus_user_count
));
7035 mutex_unlock(&dhd
->bus_user_lock
);
7038 EXPORT_SYMBOL(dhd_bus_put
);
7041 dhd_net_bus_get(struct net_device
*dev
)
7043 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
7044 return dhd_bus_get(&dhd
->pub
, WLAN_MODULE
);
7048 dhd_net_bus_put(struct net_device
*dev
)
7050 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
7051 return dhd_bus_put(&dhd
->pub
, WLAN_MODULE
);
7055 * Function to enable the Bus Clock
7056 * Returns BCME_OK on success and BCME_xxx on failure
7058 * This function is not callable from non-sleepable context
7060 int dhd_bus_clk_enable(wlan_bt_handle_t handle
, bus_owner_t owner
)
7062 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7066 dhd_os_sdlock(dhdp
);
7068 * The second argument is TRUE, that means, we expect
7069 * the function to "wait" until the clocks are really
7072 ret
= __dhdsdio_clk_enable(dhdp
->bus
, owner
, TRUE
);
7073 dhd_os_sdunlock(dhdp
);
7077 EXPORT_SYMBOL(dhd_bus_clk_enable
);
7080 * Function to disable the Bus Clock
7081 * Returns BCME_OK on success and BCME_xxx on failure
7083 * This function is not callable from non-sleepable context
7085 int dhd_bus_clk_disable(wlan_bt_handle_t handle
, bus_owner_t owner
)
7087 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7091 dhd_os_sdlock(dhdp
);
7093 * The second argument is TRUE, that means, we expect
7094 * the function to "wait" until the clocks are really
7097 ret
= __dhdsdio_clk_disable(dhdp
->bus
, owner
, TRUE
);
7098 dhd_os_sdunlock(dhdp
);
7102 EXPORT_SYMBOL(dhd_bus_clk_disable
);
7105 * Function to reset bt_use_count counter to zero.
7107 * This function is not callable from non-sleepable context
7109 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle
)
7111 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7113 /* take the lock and reset bt use count */
7114 dhd_os_sdlock(dhdp
);
7115 dhdsdio_reset_bt_use_count(dhdp
->bus
);
7116 dhd_os_sdunlock(dhdp
);
7118 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count
);
7120 void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle
)
7122 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7123 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7125 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7126 dhdp
->hang_was_sent
= 0;
7128 dhd_os_send_hang_message(&dhd
->pub
);
7130 DHD_ERROR(("%s: unsupported\n", __FUNCTION__
));
7133 EXPORT_SYMBOL(dhd_bus_retry_hang_recovery
);
7135 #endif /* BT_OVER_SDIO */
7138 dhd_monitor_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7143 static struct net_device_stats
*
7144 dhd_monitor_get_stats(struct net_device
*dev
)
7146 return &DHD_MON_DEV_STATS(dev
);
7149 static const struct net_device_ops netdev_monitor_ops
=
7151 .ndo_start_xmit
= dhd_monitor_start
,
7152 .ndo_get_stats
= dhd_monitor_get_stats
,
7153 .ndo_do_ioctl
= dhd_monitor_ioctl
7157 dhd_add_monitor_if(dhd_info_t
*dhd
)
7159 struct net_device
*dev
;
7161 uint32 scan_suppress
= FALSE
;
7165 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
7169 if (dhd
->monitor_dev
) {
7170 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__
));
7174 dev
= alloc_etherdev(DHD_MON_DEV_PRIV_SIZE
);
7176 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__
));
7180 devname
= "radiotap";
7182 snprintf(dev
->name
, sizeof(dev
->name
), "%s%u", devname
, dhd
->unit
);
7184 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7185 #define ARPHRD_IEEE80211_PRISM 802
7188 #ifndef ARPHRD_IEEE80211_RADIOTAP
7189 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7190 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7192 dev
->type
= ARPHRD_IEEE80211_RADIOTAP
;
7194 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7195 dev
->hard_start_xmit
= dhd_monitor_start
;
7196 dev
->do_ioctl
= dhd_monitor_ioctl
;
7197 dev
->get_stats
= dhd_monitor_get_stats
;
7199 dev
->netdev_ops
= &netdev_monitor_ops
;
7202 if (register_netdevice(dev
)) {
7203 DHD_ERROR(("%s, register_netdev failed for %s\n",
7204 __FUNCTION__
, dev
->name
));
7209 if (FW_SUPPORTED((&dhd
->pub
), monitor
)) {
7210 #ifdef DHD_PCIE_RUNTIMEPM
7211 /* Disable RuntimePM in monitor mode */
7212 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
7213 DHD_ERROR(("%s : Disable RuntimePM in Monitor Mode\n", __FUNCTION__
));
7214 #endif /* DHD_PCIE_RUNTIME_PM */
7215 scan_suppress
= TRUE
;
7216 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
7217 ret
= dhd_iovar(&dhd
->pub
, 0, "scansuppress", (char *)&scan_suppress
,
7218 sizeof(scan_suppress
), NULL
, 0, TRUE
);
7220 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__
, ret
));
7224 dhd
->monitor_dev
= dev
;
7228 dhd_del_monitor_if(dhd_info_t
*dhd
)
7232 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
7236 if (!dhd
->monitor_dev
) {
7237 DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__
));
7241 if (dhd
->monitor_dev
) {
7242 if (dhd
->monitor_dev
->reg_state
== NETREG_UNINITIALIZED
) {
7243 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7244 MFREE(dhd
->osh
, dhd
->monitor_dev
->priv
, DHD_MON_DEV_PRIV_SIZE
);
7245 MFREE(dhd
->osh
, dhd
->monitor_dev
, sizeof(struct net_device
));
7247 free_netdev(dhd
->monitor_dev
);
7250 unregister_netdevice(dhd
->monitor_dev
);
7252 dhd
->monitor_dev
= NULL
;
7257 dhd_set_monitor(dhd_pub_t
*pub
, int ifidx
, int val
)
7259 dhd_info_t
*dhd
= pub
->info
;
7261 DHD_TRACE(("%s: val %d\n", __FUNCTION__
, val
));
7263 dhd_net_if_lock_local(dhd
);
7265 /* Delete monitor */
7266 dhd_del_monitor_if(dhd
);
7269 dhd_add_monitor_if(dhd
);
7271 dhd
->monitor_type
= val
;
7272 dhd_net_if_unlock_local(dhd
);
7274 #endif /* WL_MONITOR */
7276 int dhd_ioctl_process(dhd_pub_t
*pub
, int ifidx
, dhd_ioctl_t
*ioc
, void *data_buf
)
7278 int bcmerror
= BCME_OK
;
7280 struct net_device
*net
;
7282 net
= dhd_idx2net(pub
, ifidx
);
7284 bcmerror
= BCME_BADARG
;
7286 * The netdev pointer is bad means the DHD can't communicate
7287 * to higher layers, so just return from here
7292 /* check for local dhd ioctl and handle it */
7293 if (ioc
->driver
== DHD_IOCTL_MAGIC
) {
7294 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7296 buflen
= MIN(ioc
->len
, DHD_IOCTL_MAXLEN
);
7297 bcmerror
= dhd_ioctl((void *)pub
, ioc
, data_buf
, buflen
);
7299 pub
->bcmerror
= bcmerror
;
7303 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7305 buflen
= MIN(ioc
->len
, WLC_IOCTL_MAXLEN
);
7307 /* send to dongle (must be up, and wl). */
7308 if (pub
->busstate
== DHD_BUS_DOWN
|| pub
->busstate
== DHD_BUS_LOAD
) {
7309 if ((!pub
->dongle_trap_occured
) && allow_delay_fwdl
) {
7311 if (atomic_read(&exit_in_progress
)) {
7312 DHD_ERROR(("%s module exit in progress\n", __func__
));
7313 bcmerror
= BCME_DONGLE_DOWN
;
7316 ret
= dhd_bus_start(pub
);
7318 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
7319 bcmerror
= BCME_DONGLE_DOWN
;
7323 bcmerror
= BCME_DONGLE_DOWN
;
7329 bcmerror
= BCME_DONGLE_DOWN
;
7334 * Flush the TX queue if required for proper message serialization:
7335 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7336 * prevent M4 encryption and
7337 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7338 * prevent disassoc frame being sent before WPS-DONE frame.
7340 if (ioc
->cmd
== WLC_SET_KEY
||
7341 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
7342 strncmp("wsec_key", data_buf
, 9) == 0) ||
7343 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
7344 strncmp("bsscfg:wsec_key", data_buf
, 15) == 0) ||
7345 ioc
->cmd
== WLC_DISASSOC
)
7346 dhd_wait_pend8021x(net
);
7348 if ((ioc
->cmd
== WLC_SET_VAR
|| ioc
->cmd
== WLC_GET_VAR
) &&
7349 data_buf
!= NULL
&& strncmp("rpc_", data_buf
, 4) == 0) {
7350 bcmerror
= BCME_UNSUPPORTED
;
7353 bcmerror
= dhd_wl_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
7356 /* Intercept monitor ioctl here, add/del monitor if */
7357 if (bcmerror
== BCME_OK
&& ioc
->cmd
== WLC_SET_MONITOR
) {
7359 if (data_buf
!= NULL
&& buflen
!= 0) {
7361 val
= *(int*)data_buf
;
7362 } else if (buflen
>= 2) {
7363 val
= *(short*)data_buf
;
7365 val
= *(char*)data_buf
;
7368 dhd_set_monitor(pub
, ifidx
, val
);
7370 #endif /* WL_MONITOR */
7373 dhd_check_hang(net
, pub
, bcmerror
);
7379 * Called by the OS (optionally via a wrapper function).
7380 * @param net Linux per dongle instance
7381 * @param ifr Linux request structure
7382 * @param cmd e.g. SIOCETHTOOL
7385 dhd_ioctl_entry(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
7387 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7392 void *local_buf
= NULL
; /**< buffer in kernel space */
7393 void __user
*ioc_buf_user
= NULL
; /**< buffer in user space */
7396 if (atomic_read(&exit_in_progress
)) {
7397 DHD_ERROR(("%s module exit in progress\n", __func__
));
7398 bcmerror
= BCME_DONGLE_DOWN
;
7399 return OSL_ERROR(bcmerror
);
7402 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7403 DHD_PERIM_LOCK(&dhd
->pub
);
7405 /* Interface up check for built-in type */
7406 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== FALSE
) {
7407 DHD_TRACE(("%s: Interface is down \n", __FUNCTION__
));
7408 DHD_PERIM_UNLOCK(&dhd
->pub
);
7409 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7410 return OSL_ERROR(BCME_NOTUP
);
7413 ifidx
= dhd_net2idx(dhd
, net
);
7414 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__
, ifidx
, cmd
));
7416 #if defined(WL_STATIC_IF)
7417 /* skip for static ndev when it is down */
7418 if (dhd_is_static_ndev(&dhd
->pub
, net
) && !(net
->flags
& IFF_UP
)) {
7419 DHD_PERIM_UNLOCK(&dhd
->pub
);
7420 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7423 #endif /* WL_STATIC_iF */
7425 if (ifidx
== DHD_BAD_IF
) {
7426 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__
));
7427 DHD_PERIM_UNLOCK(&dhd
->pub
);
7428 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7432 #if defined(WL_WIRELESS_EXT)
7433 /* linux wireless extensions */
7434 if ((cmd
>= SIOCIWFIRST
) && (cmd
<= SIOCIWLAST
)) {
7435 /* may recurse, do NOT lock */
7436 ret
= wl_iw_ioctl(net
, ifr
, cmd
);
7437 DHD_PERIM_UNLOCK(&dhd
->pub
);
7438 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7441 #endif /* defined(WL_WIRELESS_EXT) */
7443 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7444 if (cmd
== SIOCETHTOOL
) {
7445 ret
= dhd_ethtool(dhd
, (void*)ifr
->ifr_data
);
7446 DHD_PERIM_UNLOCK(&dhd
->pub
);
7447 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7450 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7452 if (cmd
== SIOCDEVPRIVATE
+1) {
7453 ret
= wl_android_priv_cmd(net
, ifr
);
7454 dhd_check_hang(net
, &dhd
->pub
, ret
);
7455 DHD_PERIM_UNLOCK(&dhd
->pub
);
7456 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7460 if (cmd
!= SIOCDEVPRIVATE
) {
7461 DHD_PERIM_UNLOCK(&dhd
->pub
);
7462 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7466 memset(&ioc
, 0, sizeof(ioc
));
7468 #ifdef CONFIG_COMPAT
7469 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7470 if (in_compat_syscall()) {
7472 if (is_compat_task()) {
7473 #endif /* LINUX_VER >= 4.6 */
7474 compat_wl_ioctl_t compat_ioc
;
7475 if (copy_from_user(&compat_ioc
, ifr
->ifr_data
, sizeof(compat_wl_ioctl_t
))) {
7476 bcmerror
= BCME_BADADDR
;
7479 ioc
.cmd
= compat_ioc
.cmd
;
7480 ioc
.buf
= compat_ptr(compat_ioc
.buf
);
7481 ioc
.len
= compat_ioc
.len
;
7482 ioc
.set
= compat_ioc
.set
;
7483 ioc
.used
= compat_ioc
.used
;
7484 ioc
.needed
= compat_ioc
.needed
;
7485 /* To differentiate between wl and dhd read 4 more byes */
7486 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(compat_wl_ioctl_t
),
7487 sizeof(uint
)) != 0)) {
7488 bcmerror
= BCME_BADADDR
;
7492 #endif /* CONFIG_COMPAT */
7494 /* Copy the ioc control structure part of ioctl request */
7495 if (copy_from_user(&ioc
, ifr
->ifr_data
, sizeof(wl_ioctl_t
))) {
7496 bcmerror
= BCME_BADADDR
;
7500 /* To differentiate between wl and dhd read 4 more byes */
7501 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(wl_ioctl_t
),
7502 sizeof(uint
)) != 0)) {
7503 bcmerror
= BCME_BADADDR
;
7508 if (!capable(CAP_NET_ADMIN
)) {
7509 bcmerror
= BCME_EPERM
;
7513 /* Take backup of ioc.buf and restore later */
7514 ioc_buf_user
= ioc
.buf
;
7517 buflen
= MIN(ioc
.len
, DHD_IOCTL_MAXLEN
);
7518 if (!(local_buf
= MALLOC(dhd
->pub
.osh
, buflen
+1))) {
7519 bcmerror
= BCME_NOMEM
;
7523 DHD_PERIM_UNLOCK(&dhd
->pub
);
7524 if (copy_from_user(local_buf
, ioc
.buf
, buflen
)) {
7525 DHD_PERIM_LOCK(&dhd
->pub
);
7526 bcmerror
= BCME_BADADDR
;
7529 DHD_PERIM_LOCK(&dhd
->pub
);
7531 *((char *)local_buf
+ buflen
) = '\0';
7533 /* For some platforms accessing userspace memory
7534 * of ioc.buf is causing kernel panic, so to avoid that
7535 * make ioc.buf pointing to kernel space memory local_buf
7537 ioc
.buf
= local_buf
;
7540 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7541 if (ioc
.driver
!= DHD_IOCTL_MAGIC
&& dhd
->pub
.hang_was_sent
) {
7542 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__
));
7543 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd
->pub
, DHD_EVENT_TIMEOUT_MS
);
7544 bcmerror
= BCME_DONGLE_DOWN
;
7548 bcmerror
= dhd_ioctl_process(&dhd
->pub
, ifidx
, &ioc
, local_buf
);
7550 /* Restore back userspace pointer to ioc.buf */
7551 ioc
.buf
= ioc_buf_user
;
7553 if (!bcmerror
&& buflen
&& local_buf
&& ioc
.buf
) {
7554 DHD_PERIM_UNLOCK(&dhd
->pub
);
7555 if (copy_to_user(ioc
.buf
, local_buf
, buflen
))
7557 DHD_PERIM_LOCK(&dhd
->pub
);
7562 MFREE(dhd
->pub
.osh
, local_buf
, buflen
+1);
7564 DHD_PERIM_UNLOCK(&dhd
->pub
);
7565 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7567 return OSL_ERROR(bcmerror
);
7570 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
7571 /* Flags to indicate if we distingish power off policy when
7572 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
7574 int trigger_deep_sleep
= 0;
7575 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
7577 #ifdef FIX_CPU_MIN_CLOCK
7578 static int dhd_init_cpufreq_fix(dhd_info_t
*dhd
)
7581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7582 mutex_init(&dhd
->cpufreq_fix
);
7584 dhd
->cpufreq_fix_status
= FALSE
;
7589 static void dhd_fix_cpu_freq(dhd_info_t
*dhd
)
7591 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7592 mutex_lock(&dhd
->cpufreq_fix
);
7594 if (dhd
&& !dhd
->cpufreq_fix_status
) {
7595 pm_qos_add_request(&dhd
->dhd_cpu_qos
, PM_QOS_CPU_FREQ_MIN
, 300000);
7596 #ifdef FIX_BUS_MIN_CLOCK
7597 pm_qos_add_request(&dhd
->dhd_bus_qos
, PM_QOS_BUS_THROUGHPUT
, 400000);
7598 #endif /* FIX_BUS_MIN_CLOCK */
7599 DHD_ERROR(("pm_qos_add_requests called\n"));
7601 dhd
->cpufreq_fix_status
= TRUE
;
7603 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7604 mutex_unlock(&dhd
->cpufreq_fix
);
7608 static void dhd_rollback_cpu_freq(dhd_info_t
*dhd
)
7610 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7611 mutex_lock(&dhd
->cpufreq_fix
);
7613 if (dhd
&& dhd
->cpufreq_fix_status
!= TRUE
) {
7614 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7615 mutex_unlock(&dhd
->cpufreq_fix
);
7620 pm_qos_remove_request(&dhd
->dhd_cpu_qos
);
7621 #ifdef FIX_BUS_MIN_CLOCK
7622 pm_qos_remove_request(&dhd
->dhd_bus_qos
);
7623 #endif /* FIX_BUS_MIN_CLOCK */
7624 DHD_ERROR(("pm_qos_add_requests called\n"));
7626 dhd
->cpufreq_fix_status
= FALSE
;
7627 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7628 mutex_unlock(&dhd
->cpufreq_fix
);
7631 #endif /* FIX_CPU_MIN_CLOCK */
7633 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7635 dhd_ioctl_entry_wrapper(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
7638 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7640 if (atomic_read(&dhd
->pub
.block_bus
))
7643 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd
->pub
.bus
)) < 0)
7646 error
= dhd_ioctl_entry(net
, ifr
, cmd
);
7648 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd
->pub
.bus
));
7649 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd
->pub
.bus
));
7653 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7656 dhd_stop(struct net_device
*net
)
7659 bool skip_reset
= false;
7660 #if defined(WL_CFG80211)
7661 unsigned long flags
= 0;
7663 struct bcm_cfg80211
*cfg
= wl_get_cfg(net
);
7664 #endif /* WL_STATIC_IF */
7665 #endif /* WL_CFG80211 */
7666 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7667 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7668 DHD_PERIM_LOCK(&dhd
->pub
);
7669 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__
, net
));
7670 dhd
->pub
.rxcnt_timeout
= 0;
7671 dhd
->pub
.txcnt_timeout
= 0;
7674 dhd
->pub
.d3ackcnt_timeout
= 0;
7675 #endif /* BCMPCIE */
7677 mutex_lock(&dhd
->pub
.ndev_op_sync
);
7679 if (dhd
->pub
.up
== 0) {
7682 #if defined(DHD_HANG_SEND_UP_TEST)
7683 if (dhd
->pub
.req_hang_type
) {
7684 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
7685 __FUNCTION__
, dhd
->pub
.req_hang_type
));
7686 dhd
->pub
.req_hang_type
= 0;
7688 #endif /* DHD_HANG_SEND_UP_TEST */
7690 dhd_if_flush_sta(DHD_DEV_IFP(net
));
7692 #ifdef FIX_CPU_MIN_CLOCK
7693 if (dhd_get_fw_mode(dhd
) == DHD_FLAG_HOSTAP_MODE
)
7694 dhd_rollback_cpu_freq(dhd
);
7695 #endif /* FIX_CPU_MIN_CLOCK */
7697 ifidx
= dhd_net2idx(dhd
, net
);
7698 BCM_REFERENCE(ifidx
);
7700 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7701 /* If static if is operational, don't reset the chip */
7702 if (IS_CFG80211_STATIC_IF_ACTIVE(cfg
)) {
7703 DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n"));
7704 wl_cfg80211_sta_ifdown(net
);
7708 #endif /* WL_STATIC_IF && WL_CFG80211 */
7711 /* Disable Runtime PM before interface down */
7712 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
7714 spin_lock_irqsave(&dhd
->pub
.up_lock
, flags
);
7716 spin_unlock_irqrestore(&dhd
->pub
.up_lock
, flags
);
7719 #endif /* WL_CFG80211 */
7724 wl_cfg80211_down(net
);
7726 ifp
= dhd
->iflist
[0];
7728 * For CFG80211: Clean up all the left over virtual interfaces
7729 * when the primary Interface is brought down. [ifconfig wlan0 down]
7731 if (!dhd_download_fw_on_driverload
) {
7732 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) &&
7733 (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
7735 #ifdef WL_CFG80211_P2P_DEV_IF
7736 wl_cfg80211_del_p2p_wdev(net
);
7737 #endif /* WL_CFG80211_P2P_DEV_IF */
7739 dhd_net_if_lock_local(dhd
);
7740 for (i
= 1; i
< DHD_MAX_IFS
; i
++)
7741 dhd_remove_if(&dhd
->pub
, i
, FALSE
);
7743 if (ifp
&& ifp
->net
) {
7744 dhd_if_del_sta_list(ifp
);
7746 #ifdef ARP_OFFLOAD_SUPPORT
7747 if (dhd_inetaddr_notifier_registered
) {
7748 dhd_inetaddr_notifier_registered
= FALSE
;
7749 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
7751 #endif /* ARP_OFFLOAD_SUPPORT */
7752 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7753 if (dhd_inet6addr_notifier_registered
) {
7754 dhd_inet6addr_notifier_registered
= FALSE
;
7755 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
7757 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7758 dhd_net_if_unlock_local(dhd
);
7760 cancel_work_sync(dhd
->dhd_deferred_wq
);
7762 #ifdef SHOW_LOGTRACE
7763 /* Wait till event_log_dispatcher_work finishes */
7764 cancel_delayed_work_sync(&dhd
->event_log_dispatcher_work
);
7765 #endif /* SHOW_LOGTRACE */
7767 #if defined(DHD_LB_RXP)
7768 __skb_queue_purge(&dhd
->rx_pend_queue
);
7769 #endif /* DHD_LB_RXP */
7771 #if defined(DHD_LB_TXP)
7772 skb_queue_purge(&dhd
->tx_pend_queue
);
7773 #endif /* DHD_LB_TXP */
7776 #if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
7777 defined(ARGOS_NOTIFY_CB)
7778 argos_register_notifier_deinit();
7779 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
7780 #ifdef DHDTCPACK_SUPPRESS
7781 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
7782 #endif /* DHDTCPACK_SUPPRESS */
7783 #if defined(DHD_LB_RXP)
7784 if (ifp
&& ifp
->net
== dhd
->rx_napi_netdev
) {
7785 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
7786 __FUNCTION__
, &dhd
->rx_napi_struct
, net
, net
->name
));
7787 skb_queue_purge(&dhd
->rx_napi_queue
);
7788 napi_disable(&dhd
->rx_napi_struct
);
7789 netif_napi_del(&dhd
->rx_napi_struct
);
7790 dhd
->rx_napi_netdev
= NULL
;
7792 #endif /* DHD_LB_RXP */
7794 #endif /* WL_CFG80211 */
7796 DHD_SSSR_DUMP_DEINIT(&dhd
->pub
);
7798 #ifdef PROP_TXSTATUS
7799 dhd_wlfc_cleanup(&dhd
->pub
, NULL
, 0);
7801 #ifdef SHOW_LOGTRACE
7802 if (!dhd_download_fw_on_driverload
) {
7803 /* Release the skbs from queue for WLC_E_TRACE event */
7804 dhd_event_logtrace_flush_queue(&dhd
->pub
);
7805 if (dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
) {
7806 if (dhd
->event_data
.fmts
) {
7807 MFREE(dhd
->pub
.osh
, dhd
->event_data
.fmts
,
7808 dhd
->event_data
.fmts_size
);
7809 dhd
->event_data
.fmts
= NULL
;
7811 if (dhd
->event_data
.raw_fmts
) {
7812 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_fmts
,
7813 dhd
->event_data
.raw_fmts_size
);
7814 dhd
->event_data
.raw_fmts
= NULL
;
7816 if (dhd
->event_data
.raw_sstr
) {
7817 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_sstr
,
7818 dhd
->event_data
.raw_sstr_size
);
7819 dhd
->event_data
.raw_sstr
= NULL
;
7821 if (dhd
->event_data
.rom_raw_sstr
) {
7822 MFREE(dhd
->pub
.osh
, dhd
->event_data
.rom_raw_sstr
,
7823 dhd
->event_data
.rom_raw_sstr_size
);
7824 dhd
->event_data
.rom_raw_sstr
= NULL
;
7826 dhd
->dhd_state
&= ~DHD_ATTACH_LOGTRACE_INIT
;
7829 #endif /* SHOW_LOGTRACE */
7831 dhd_dev_apf_delete_filter(net
);
7834 /* Stop the protocol module */
7835 dhd_prot_stop(&dhd
->pub
);
7837 OLD_MOD_DEC_USE_COUNT
;
7839 if (skip_reset
== false) {
7840 #if defined(WL_CFG80211)
7841 if (ifidx
== 0 && !dhd_download_fw_on_driverload
) {
7842 #if defined(BT_OVER_SDIO)
7843 dhd_bus_put(&dhd
->pub
, WLAN_MODULE
);
7844 wl_android_set_wifi_on_flag(FALSE
);
7846 wl_android_wifi_off(net
, TRUE
);
7847 #endif /* BT_OVER_SDIO */
7849 #ifdef SUPPORT_DEEP_SLEEP
7851 /* CSP#505233: Flags to indicate if we distingish
7852 * power off policy when user set the memu
7853 * "Keep Wi-Fi on during sleep" to "Never"
7855 if (trigger_deep_sleep
) {
7856 dhd_deepsleep(net
, 1);
7857 trigger_deep_sleep
= 0;
7860 #endif /* SUPPORT_DEEP_SLEEP */
7862 dhd
->pub
.hang_was_sent
= 0;
7864 /* Clear country spec for for built-in type driver */
7865 if (!dhd_download_fw_on_driverload
) {
7866 dhd
->pub
.dhd_cspec
.country_abbrev
[0] = 0x00;
7867 dhd
->pub
.dhd_cspec
.rev
= 0;
7868 dhd
->pub
.dhd_cspec
.ccode
[0] = 0x00;
7876 DHD_PERIM_UNLOCK(&dhd
->pub
);
7877 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7879 /* Destroy wakelock */
7880 if (!dhd_download_fw_on_driverload
&&
7881 (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) &&
7882 (skip_reset
== false)) {
7883 DHD_OS_WAKE_LOCK_DESTROY(dhd
);
7884 dhd
->dhd_state
&= ~DHD_ATTACH_STATE_WAKELOCKS_INIT
;
7887 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
7891 #if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
7892 defined(USE_INITIAL_SHORT_DWELL_TIME))
7893 extern bool g_first_broadcast_scan
;
7894 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
7897 static int dhd_interworking_enable(dhd_pub_t
*dhd
)
7899 uint32 enable
= true;
7902 ret
= dhd_iovar(dhd
, 0, "interworking", (char *)&enable
, sizeof(enable
), NULL
, 0, TRUE
);
7904 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__
, ret
));
7912 dhd_open(struct net_device
*net
)
7914 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7921 #if defined(PREVENT_REOPEN_DURING_HANG)
7922 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
7923 if (dhd
->pub
.hang_was_sent
== 1) {
7924 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__
));
7925 /* Force to bring down WLAN interface in case dhd_stop() is not called
7926 * from the upper layer when HANG event is triggered.
7928 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== 1) {
7929 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__
));
7935 #endif /* PREVENT_REOPEN_DURING_HANG */
7937 mutex_lock(&dhd
->pub
.ndev_op_sync
);
7939 if (dhd
->pub
.up
== 1) {
7941 DHD_ERROR(("Primary net_device is already up \n"));
7942 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
7946 if (!dhd_download_fw_on_driverload
) {
7947 if (!dhd_driver_init_done
) {
7948 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__
));
7949 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
7953 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
7954 DHD_OS_WAKE_LOCK_INIT(dhd
);
7955 dhd
->dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
7958 #ifdef SHOW_LOGTRACE
7959 skb_queue_head_init(&dhd
->evt_trace_queue
);
7961 if (!(dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
)) {
7962 ret
= dhd_init_logstrs_array(dhd
->pub
.osh
, &dhd
->event_data
);
7963 if (ret
== BCME_OK
) {
7964 dhd_init_static_strs_array(dhd
->pub
.osh
, &dhd
->event_data
,
7965 st_str_file_path
, map_file_path
);
7966 dhd_init_static_strs_array(dhd
->pub
.osh
, &dhd
->event_data
,
7967 rom_st_str_file_path
, rom_map_file_path
);
7968 dhd
->dhd_state
|= DHD_ATTACH_LOGTRACE_INIT
;
7971 #endif /* SHOW_LOGTRACE */
7974 #if defined(MULTIPLE_SUPPLICANT)
7975 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
7976 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
7977 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__
));
7979 mutex_lock(&_dhd_sdio_mutex_lock_
);
7981 #endif /* MULTIPLE_SUPPLICANT */
7983 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7984 DHD_PERIM_LOCK(&dhd
->pub
);
7985 dhd
->pub
.dongle_trap_occured
= 0;
7986 dhd
->pub
.hang_was_sent
= 0;
7987 dhd
->pub
.hang_reason
= 0;
7988 dhd
->pub
.iovar_timeout_occured
= 0;
7989 #ifdef PCIE_FULL_DONGLE
7990 dhd
->pub
.d3ack_timeout_occured
= 0;
7991 #endif /* PCIE_FULL_DONGLE */
7992 #ifdef DHD_MAP_LOGGING
7993 dhd
->pub
.smmu_fault_occurred
= 0;
7994 #endif /* DHD_MAP_LOGGING */
7996 #ifdef DHD_LOSSLESS_ROAMING
7997 dhd
->pub
.dequeue_prec_map
= ALLPRIO
;
8000 #if !defined(WL_CFG80211)
8002 * Force start if ifconfig_up gets called before START command
8003 * We keep WEXT's wl_control_wl_start to provide backward compatibility
8004 * This should be removed in the future
8006 ret
= wl_control_wl_start(net
);
8008 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
8015 ifidx
= dhd_net2idx(dhd
, net
);
8016 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
8019 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__
));
8024 if (!dhd
->iflist
[ifidx
]) {
8025 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__
));
8031 atomic_set(&dhd
->pend_8021x_cnt
, 0);
8032 #if defined(WL_CFG80211)
8033 if (!dhd_download_fw_on_driverload
) {
8034 DHD_ERROR(("\n%s\n", dhd_version
));
8035 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8036 g_first_broadcast_scan
= TRUE
;
8037 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8038 #if defined(BT_OVER_SDIO)
8039 ret
= dhd_bus_get(&dhd
->pub
, WLAN_MODULE
);
8040 wl_android_set_wifi_on_flag(TRUE
);
8042 ret
= wl_android_wifi_on(net
);
8043 #endif /* BT_OVER_SDIO */
8045 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8046 __FUNCTION__
, ret
));
8051 #ifdef SUPPORT_DEEP_SLEEP
8053 /* Flags to indicate if we distingish
8054 * power off policy when user set the memu
8055 * "Keep Wi-Fi on during sleep" to "Never"
8057 if (trigger_deep_sleep
) {
8058 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8059 g_first_broadcast_scan
= TRUE
;
8060 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8061 dhd_deepsleep(net
, 0);
8062 trigger_deep_sleep
= 0;
8065 #endif /* SUPPORT_DEEP_SLEEP */
8066 #ifdef FIX_CPU_MIN_CLOCK
8067 if (dhd_get_fw_mode(dhd
) == DHD_FLAG_HOSTAP_MODE
) {
8068 dhd_init_cpufreq_fix(dhd
);
8069 dhd_fix_cpu_freq(dhd
);
8071 #endif /* FIX_CPU_MIN_CLOCK */
8074 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
8076 /* try to bring up bus */
8077 DHD_PERIM_UNLOCK(&dhd
->pub
);
8079 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8080 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd
->pub
.bus
)) >= 0) {
8081 ret
= dhd_bus_start(&dhd
->pub
);
8082 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd
->pub
.bus
));
8083 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd
->pub
.bus
));
8086 ret
= dhd_bus_start(&dhd
->pub
);
8087 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8089 DHD_PERIM_LOCK(&dhd
->pub
);
8091 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
8099 if (dhd
->pub
.is_bt_recovery_required
) {
8100 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__
));
8101 bcmsdh_btsdio_process_dhd_hang_notification(TRUE
);
8103 dhd
->pub
.is_bt_recovery_required
= FALSE
;
8106 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8107 memcpy(net
->dev_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
8110 /* Get current TOE mode from dongle */
8111 if (dhd_toe_get(dhd
, ifidx
, &toe_ol
) >= 0 && (toe_ol
& TOE_TX_CSUM_OL
) != 0) {
8112 dhd
->iflist
[ifidx
]->net
->features
|= NETIF_F_IP_CSUM
;
8114 dhd
->iflist
[ifidx
]->net
->features
&= ~NETIF_F_IP_CSUM
;
8118 #if defined(DHD_LB_RXP)
8119 __skb_queue_head_init(&dhd
->rx_pend_queue
);
8120 if (dhd
->rx_napi_netdev
== NULL
) {
8121 dhd
->rx_napi_netdev
= dhd
->iflist
[ifidx
]->net
;
8122 memset(&dhd
->rx_napi_struct
, 0, sizeof(struct napi_struct
));
8123 netif_napi_add(dhd
->rx_napi_netdev
, &dhd
->rx_napi_struct
,
8124 dhd_napi_poll
, dhd_napi_weight
);
8125 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8126 __FUNCTION__
, &dhd
->rx_napi_struct
, net
, net
->name
));
8127 napi_enable(&dhd
->rx_napi_struct
);
8128 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__
));
8129 skb_queue_head_init(&dhd
->rx_napi_queue
);
8130 } /* rx_napi_netdev == NULL */
8131 #endif /* DHD_LB_RXP */
8132 #ifdef DHD_LB_IRQSET
8133 dhd_irq_set_affinity(&dhd
->pub
);
8134 #endif /* DHD_LB_IRQSET */
8136 #if defined(DHD_LB_TXP)
8137 /* Use the variant that uses locks */
8138 skb_queue_head_init(&dhd
->tx_pend_queue
);
8139 #endif /* DHD_LB_TXP */
8141 #if defined(WL_CFG80211)
8142 if (unlikely(wl_cfg80211_up(net
))) {
8143 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__
));
8147 if (!dhd_download_fw_on_driverload
) {
8148 #ifdef ARP_OFFLOAD_SUPPORT
8149 dhd
->pend_ipaddr
= 0;
8150 if (!dhd_inetaddr_notifier_registered
) {
8151 dhd_inetaddr_notifier_registered
= TRUE
;
8152 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
8154 #endif /* ARP_OFFLOAD_SUPPORT */
8155 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8156 if (!dhd_inet6addr_notifier_registered
) {
8157 dhd_inet6addr_notifier_registered
= TRUE
;
8158 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
8160 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8163 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
8164 defined(ARGOS_NOTIFY_CB)
8165 argos_register_notifier_init(net
);
8166 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
8167 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
8168 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
8169 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
8171 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
8172 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
8173 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
8174 #if defined(NUM_SCB_MAX_PROBE)
8175 dhd_set_scb_probe(&dhd
->pub
);
8176 #endif /* NUM_SCB_MAX_PROBE */
8177 #endif /* WL_CFG80211 */
8182 if (wl_event_enable
) {
8183 /* For wl utility to receive events */
8184 dhd
->pub
.wl_event_enabled
= true;
8186 dhd
->pub
.wl_event_enabled
= false;
8189 if (logtrace_pkt_sendup
) {
8190 /* For any deamon to recieve logtrace */
8191 dhd
->pub
.logtrace_pkt_sendup
= true;
8193 dhd
->pub
.logtrace_pkt_sendup
= false;
8196 OLD_MOD_INC_USE_COUNT
;
8199 dhd_dbgfs_init(&dhd
->pub
);
8203 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
8208 DHD_PERIM_UNLOCK(&dhd
->pub
);
8209 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
8211 #if defined(MULTIPLE_SUPPLICANT)
8212 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8213 mutex_unlock(&_dhd_sdio_mutex_lock_
);
8215 #endif /* MULTIPLE_SUPPLICANT */
8221 * ndo_start handler for primary ndev
8224 dhd_pri_open(struct net_device
*net
)
8228 ret
= dhd_open(net
);
8229 if (unlikely(ret
)) {
8230 DHD_ERROR(("Failed to open primary dev ret %d\n", ret
));
8234 /* Allow transmit calls */
8235 netif_start_queue(net
);
8236 DHD_ERROR(("[%s] tx queue started\n", net
->name
));
8241 * ndo_stop handler for primary ndev
8244 dhd_pri_stop(struct net_device
*net
)
8249 netif_stop_queue(net
);
8250 DHD_ERROR(("[%s] tx queue stopped\n", net
->name
));
8252 ret
= dhd_stop(net
);
8253 if (unlikely(ret
)) {
8254 DHD_ERROR(("dhd_stop failed: %d\n", ret
));
8261 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
8263 * For static I/Fs, the firmware interface init
8264 * is done from the IFF_UP context.
8267 dhd_static_if_open(struct net_device
*net
)
8270 struct bcm_cfg80211
*cfg
;
8271 struct net_device
*primary_netdev
= NULL
;
8273 cfg
= wl_get_cfg(net
);
8274 primary_netdev
= bcmcfg_to_prmry_ndev(cfg
);
8276 if (!IS_CFG80211_STATIC_IF(cfg
, net
)) {
8277 DHD_TRACE(("non-static interface (%s)..do nothing \n", net
->name
));
8282 DHD_INFO(("[%s][STATIC_IF] Enter \n", net
->name
));
8283 /* Ensure fw is initialized. If it is already initialized,
8284 * dhd_open will return success.
8286 ret
= dhd_open(primary_netdev
);
8287 if (unlikely(ret
)) {
8288 DHD_ERROR(("Failed to open primary dev ret %d\n", ret
));
8292 ret
= wl_cfg80211_static_if_open(net
);
8294 /* Allow transmit calls */
8295 netif_start_queue(net
);
8302 dhd_static_if_stop(struct net_device
*net
)
8304 struct bcm_cfg80211
*cfg
;
8305 struct net_device
*primary_netdev
= NULL
;
8307 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
8309 DHD_INFO(("[%s][STATIC_IF] Enter \n", net
->name
));
8311 /* Ensure queue is disabled */
8312 netif_tx_disable(net
);
8314 cfg
= wl_get_cfg(net
);
8315 if (!IS_CFG80211_STATIC_IF(cfg
, net
)) {
8316 DHD_TRACE(("non-static interface (%s)..do nothing \n", net
->name
));
8320 ret
= wl_cfg80211_static_if_close(net
);
8322 if (dhd
->pub
.up
== 0) {
8323 /* If fw is down, return */
8324 DHD_ERROR(("fw down\n"));
8327 /* If STA iface is not in operational, invoke dhd_close from this
8330 primary_netdev
= bcmcfg_to_prmry_ndev(cfg
);
8331 if (!(primary_netdev
->flags
& IFF_UP
)) {
8332 ret
= dhd_stop(primary_netdev
);
8334 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
8339 #endif /* WL_STATIC_IF && WL_CF80211 */
8341 int dhd_do_driver_init(struct net_device
*net
)
8343 dhd_info_t
*dhd
= NULL
;
8346 DHD_ERROR(("Primary Interface not initialized \n"));
8350 #ifdef MULTIPLE_SUPPLICANT
8351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8352 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
8353 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__
));
8356 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8357 #endif /* MULTIPLE_SUPPLICANT */
8359 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8360 dhd
= DHD_DEV_INFO(net
);
8362 /* If driver is already initialized, do nothing
8364 if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
8365 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8369 if (dhd_open(net
) < 0) {
8370 DHD_ERROR(("Driver Init Failed \n"));
8378 dhd_event_ifadd(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8382 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8383 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
, ifevent
->role
) == BCME_OK
)
8387 /* handle IF event caused by wl commands, SoftAP, WEXT and
8388 * anything else. This has to be done asynchronously otherwise
8389 * DPC will be blocked (and iovars will timeout as DPC has no chance
8390 * to read the response back)
8392 if (ifevent
->ifidx
> 0) {
8393 dhd_if_event_t
*if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8394 if (if_event
== NULL
) {
8395 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8396 MALLOCED(dhdinfo
->pub
.osh
)));
8400 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8401 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8402 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8403 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8404 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
,
8405 DHD_WQ_WORK_IF_ADD
, dhd_ifadd_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8412 dhd_event_ifdel(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8414 dhd_if_event_t
*if_event
;
8417 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8418 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
8420 #endif /* WL_CFG80211 */
8422 /* handle IF event caused by wl commands, SoftAP, WEXT and
8425 if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8426 if (if_event
== NULL
) {
8427 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8428 MALLOCED(dhdinfo
->pub
.osh
)));
8431 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8432 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8433 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8434 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8435 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
, DHD_WQ_WORK_IF_DEL
,
8436 dhd_ifdel_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8442 dhd_event_ifchange(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8445 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8446 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
);
8447 #endif /* WL_CFG80211 */
8452 /* Handler to update natoe info and bind with new subscriptions if there is change in config */
8454 dhd_natoe_ct_event_hanlder(void *handle
, void *event_info
, u8 event
)
8456 dhd_info_t
*dhd
= handle
;
8457 wl_event_data_natoe_t
*natoe
= event_info
;
8458 dhd_nfct_info_t
*nfct
= dhd
->pub
.nfct
;
8460 if (event
!= DHD_WQ_WORK_NATOE_EVENT
) {
8461 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
8466 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
8469 if (natoe
->natoe_active
&& natoe
->sta_ip
&& natoe
->start_port
&& natoe
->end_port
&&
8470 (natoe
->start_port
< natoe
->end_port
)) {
8471 /* Rebind subscriptions to start receiving notifications from groups */
8472 if (dhd_ct_nl_bind(nfct
, nfct
->subscriptions
) < 0) {
8475 dhd_ct_send_dump_req(nfct
);
8476 } else if (!natoe
->natoe_active
) {
8477 /* Rebind subscriptions to stop receiving notifications from groups */
8478 if (dhd_ct_nl_bind(nfct
, CT_NULL_SUBSCRIPTION
) < 0) {
8484 /* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
8485 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
8488 dhd_natoe_ct_event(dhd_pub_t
*dhd
, char *data
)
8490 wl_event_data_natoe_t
*event_data
= (wl_event_data_natoe_t
*)data
;
8493 wl_event_data_natoe_t
*natoe
= dhd
->nfct
->natoe_info
;
8494 uint8 prev_enable
= natoe
->natoe_active
;
8496 spin_lock_bh(&dhd
->nfct_lock
);
8497 memcpy(natoe
, event_data
, sizeof(*event_data
));
8498 spin_unlock_bh(&dhd
->nfct_lock
);
8500 if (prev_enable
!= event_data
->natoe_active
) {
8501 dhd_deferred_schedule_work(dhd
->info
->dhd_deferred_wq
,
8502 (void *)natoe
, DHD_WQ_WORK_NATOE_EVENT
,
8503 dhd_natoe_ct_event_hanlder
, DHD_WQ_WORK_PRIORITY_LOW
);
8507 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__
));
8511 /* Handler to send natoe ioctl to dongle */
8513 dhd_natoe_ct_ioctl_handler(void *handle
, void *event_info
, uint8 event
)
8515 dhd_info_t
*dhd
= handle
;
8516 dhd_ct_ioc_t
*ct_ioc
= event_info
;
8518 if (event
!= DHD_WQ_WORK_NATOE_IOCTL
) {
8519 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
8524 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
8528 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd
->pub
, ct_ioc
) < 0) {
8529 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__
));
8533 /* When Netlink message contains port collision info, the info must be sent to dongle FW
8534 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
8537 dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t
*dhd
, dhd_ct_ioc_t
*ioc
)
8540 dhd_deferred_schedule_work(dhd
->info
->dhd_deferred_wq
, (void *)ioc
,
8541 DHD_WQ_WORK_NATOE_IOCTL
, dhd_natoe_ct_ioctl_handler
,
8542 DHD_WQ_WORK_PRIORITY_HIGH
);
8544 #endif /* WL_NATOE */
8546 /* This API maps ndev to ifp inclusive of static IFs */
8548 dhd_get_ifp_by_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
)
8550 dhd_if_t
*ifp
= NULL
;
8552 u32 ifidx
= (DHD_MAX_IFS
+ DHD_MAX_STATIC_IFS
- 1);
8554 u32 ifidx
= (DHD_MAX_IFS
- 1);
8555 #endif /* WL_STATIC_IF */
8557 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdp
->info
;
8559 ifp
= dhdinfo
->iflist
[ifidx
];
8560 if (ifp
&& (ifp
->net
== ndev
)) {
8561 DHD_TRACE(("match found for %s. ifidx:%d\n",
8562 ndev
->name
, ifidx
));
8567 DHD_ERROR(("no entry found for %s\n", ndev
->name
));
8572 dhd_is_static_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
)
8574 dhd_if_t
*ifp
= NULL
;
8576 if (!dhdp
|| !ndev
) {
8577 DHD_ERROR(("wrong input\n"));
8582 ifp
= dhd_get_ifp_by_ndev(dhdp
, ndev
);
8583 return (ifp
&& (ifp
->static_if
== true));
8587 /* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
8588 * are not known. For e.g: static i/f case. This function lets to update it once
8592 dhd_update_iflist_info(dhd_pub_t
*dhdp
, struct net_device
*ndev
, int ifidx
,
8593 uint8
*mac
, uint8 bssidx
, const char *dngl_name
, int if_state
)
8595 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdp
->info
;
8596 dhd_if_t
*ifp
, *ifp_new
;
8598 dhd_dev_priv_t
* dev_priv
;
8600 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
8603 ASSERT(dhdinfo
&& (ifidx
< (DHD_MAX_IFS
+ DHD_MAX_STATIC_IFS
)));
8605 if ((ifp
= dhd_get_ifp_by_ndev(dhdp
, ndev
)) == NULL
) {
8610 if (if_state
== NDEV_STATE_OS_IF_CREATED
) {
8611 /* mark static if */
8612 ifp
->static_if
= TRUE
;
8616 ifp_new
= dhdinfo
->iflist
[ifidx
];
8617 if (ifp_new
&& (ifp_new
!= ifp
)) {
8618 /* There should be only one entry for a given ifidx. */
8619 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx
));
8621 dhdp
->hang_reason
= HANG_REASON_IFACE_ADD_FAILURE
;
8622 net_os_send_hang_message(ifp
->net
);
8626 /* For static if delete case, cleanup the if before ifidx update */
8627 if ((if_state
== NDEV_STATE_FW_IF_DELETED
) ||
8628 (if_state
== NDEV_STATE_FW_IF_FAILED
)) {
8629 dhd_cleanup_if(ifp
->net
);
8630 dev_priv
= DHD_DEV_PRIV(ndev
);
8631 dev_priv
->ifidx
= ifidx
;
8634 /* update the iflist ifidx slot with cached info */
8635 dhdinfo
->iflist
[ifidx
] = ifp
;
8636 dhdinfo
->iflist
[cur_idx
] = NULL
;
8638 /* update the values */
8640 ifp
->bssidx
= bssidx
;
8642 if (if_state
== NDEV_STATE_FW_IF_CREATED
) {
8643 dhd_dev_priv_save(ndev
, dhdinfo
, ifp
, ifidx
);
8644 /* initialize the dongle provided if name */
8646 strlcpy(ifp
->dngl_name
, dngl_name
, IFNAMSIZ
);
8647 } else if (ndev
->name
[0] != '\0') {
8648 strlcpy(ifp
->dngl_name
, ndev
->name
, IFNAMSIZ
);
8651 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
8653 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
8654 ifidx
, cur_idx
, if_state
));
8657 #endif /* WL_STATIC_IF */
8659 /* unregister and free the existing net_device interface (if any) in iflist and
8660 * allocate a new one. the slot is reused. this function does NOT register the
8661 * new interface to linux kernel. dhd_register_if does the job
8664 dhd_allocate_if(dhd_pub_t
*dhdpub
, int ifidx
, const char *name
,
8665 uint8
*mac
, uint8 bssidx
, bool need_rtnl_lock
, const char *dngl_name
)
8667 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
8670 ASSERT(dhdinfo
&& (ifidx
< (DHD_MAX_IFS
+ DHD_MAX_STATIC_IFS
)));
8672 ifp
= dhdinfo
->iflist
[ifidx
];
8675 if (ifp
->net
!= NULL
) {
8676 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8677 __FUNCTION__
, ifp
->net
->name
, ifidx
));
8680 /* For primary ifidx (0), there shouldn't be
8681 * any netdev present already.
8683 DHD_ERROR(("Primary ifidx populated already\n"));
8688 dhd_dev_priv_clear(ifp
->net
); /* clear net_device private */
8690 /* in unregister_netdev case, the interface gets freed by net->destructor
8691 * (which is set to free_netdev)
8693 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
8694 free_netdev(ifp
->net
);
8696 netif_stop_queue(ifp
->net
);
8698 unregister_netdev(ifp
->net
);
8700 unregister_netdevice(ifp
->net
);
8705 ifp
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_t
));
8707 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__
, sizeof(dhd_if_t
)));
8712 memset(ifp
, 0, sizeof(dhd_if_t
));
8713 ifp
->info
= dhdinfo
;
8715 ifp
->bssidx
= bssidx
;
8716 #ifdef DHD_MCAST_REGEN
8717 ifp
->mcast_regen_bss_enable
= FALSE
;
8719 /* set to TRUE rx_pkt_chainable at alloc time */
8720 ifp
->rx_pkt_chainable
= TRUE
;
8723 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
8725 /* Allocate etherdev, including space for private structure */
8726 ifp
->net
= alloc_etherdev(DHD_DEV_PRIV_SIZE
);
8727 if (ifp
->net
== NULL
) {
8728 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__
, sizeof(dhdinfo
)));
8732 /* Setup the dhd interface's netdevice private structure. */
8733 dhd_dev_priv_save(ifp
->net
, dhdinfo
, ifp
, ifidx
);
8735 if (name
&& name
[0]) {
8736 strncpy(ifp
->net
->name
, name
, IFNAMSIZ
);
8737 ifp
->net
->name
[IFNAMSIZ
- 1] = '\0';
8742 ifp
->net
->destructor
= free_netdev
;
8744 ifp
->net
->destructor
= dhd_netdev_free
;
8746 ifp
->net
->destructor
= free_netdev
;
8747 #endif /* WL_CFG80211 */
8748 strncpy(ifp
->name
, ifp
->net
->name
, IFNAMSIZ
);
8749 ifp
->name
[IFNAMSIZ
- 1] = '\0';
8750 dhdinfo
->iflist
[ifidx
] = ifp
;
8752 /* initialize the dongle provided if name */
8754 strncpy(ifp
->dngl_name
, dngl_name
, IFNAMSIZ
);
8756 strncpy(ifp
->dngl_name
, name
, IFNAMSIZ
);
8759 #ifdef PCIE_FULL_DONGLE
8760 /* Initialize STA info list */
8761 INIT_LIST_HEAD(&ifp
->sta_list
);
8762 DHD_IF_STA_LIST_LOCK_INIT(ifp
);
8763 #endif /* PCIE_FULL_DONGLE */
8765 #ifdef DHD_L2_FILTER
8766 ifp
->phnd_arp_table
= init_l2_filter_arp_table(dhdpub
->osh
);
8767 ifp
->parp_allnode
= TRUE
;
8768 #endif /* DHD_L2_FILTER */
8770 DHD_CUMM_CTR_INIT(&ifp
->cumm_ctr
);
8772 #ifdef DHDTCPSYNC_FLOOD_BLK
8773 INIT_WORK(&ifp
->blk_tsfl_work
, dhd_blk_tsfl_handler
);
8774 dhd_reset_tcpsync_info_by_ifp(ifp
);
8775 #endif /* DHDTCPSYNC_FLOOD_BLK */
8781 if (ifp
->net
!= NULL
) {
8782 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8783 if (ifp
->net
== dhdinfo
->rx_napi_netdev
) {
8784 napi_disable(&dhdinfo
->rx_napi_struct
);
8785 netif_napi_del(&dhdinfo
->rx_napi_struct
);
8786 skb_queue_purge(&dhdinfo
->rx_napi_queue
);
8787 dhdinfo
->rx_napi_netdev
= NULL
;
8789 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8790 dhd_dev_priv_clear(ifp
->net
);
8791 free_netdev(ifp
->net
);
8794 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
8798 dhdinfo
->iflist
[ifidx
] = NULL
;
8803 dhd_cleanup_ifp(dhd_pub_t
*dhdp
, dhd_if_t
*ifp
)
8805 #ifdef PCIE_FULL_DONGLE
8807 if_flow_lkup_t
*if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
8808 #endif /* PCIE_FULL_DONGLE */
8811 if ((ifp
->idx
< 0) || (ifp
->idx
>= DHD_MAX_IFS
)) {
8812 DHD_ERROR(("Wrong idx:%d \n", ifp
->idx
));
8816 #ifdef DHD_L2_FILTER
8817 bcm_l2_filter_arp_table_update(dhdpub
->osh
, ifp
->phnd_arp_table
, TRUE
,
8818 NULL
, FALSE
, dhdpub
->tickcnt
);
8819 deinit_l2_filter_arp_table(dhdpub
->osh
, ifp
->phnd_arp_table
);
8820 ifp
->phnd_arp_table
= NULL
;
8821 #endif /* DHD_L2_FILTER */
8823 dhd_if_del_sta_list(ifp
);
8824 #ifdef PCIE_FULL_DONGLE
8825 /* Delete flowrings of virtual interface */
8827 if ((ifidx
!= 0) && (if_flow_lkup
[ifidx
].role
!= WLC_E_IF_ROLE_AP
)) {
8828 dhd_flow_rings_delete(dhdp
, ifidx
);
8830 #endif /* PCIE_FULL_DONGLE */
8835 dhd_cleanup_if(struct net_device
*net
)
8837 dhd_info_t
*dhdinfo
= DHD_DEV_INFO(net
);
8838 dhd_pub_t
*dhdp
= &dhdinfo
->pub
;
8841 if (!(ifp
= dhd_get_ifp_by_ndev(dhdp
, net
)) ||
8842 (ifp
->idx
>= DHD_MAX_IFS
)) {
8843 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp
, ifp
? ifp
->idx
: -1));
8848 dhd_cleanup_ifp(dhdp
, ifp
);
8851 /* unregister and free the the net_device interface associated with the indexed
8852 * slot, also free the slot memory and set the slot pointer to NULL
8854 #define DHD_TX_COMPLETION_TIMEOUT 5000
8856 dhd_remove_if(dhd_pub_t
*dhdpub
, int ifidx
, bool need_rtnl_lock
)
8858 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
8860 unsigned long flags
;
8863 ifp
= dhdinfo
->iflist
[ifidx
];
8866 #ifdef DHDTCPSYNC_FLOOD_BLK
8867 cancel_work_sync(&ifp
->blk_tsfl_work
);
8868 #endif /* DHDTCPSYNC_FLOOD_BLK */
8870 /* static IF will be handled in detach */
8871 if (ifp
->static_if
) {
8872 DHD_TRACE(("Skip del iface for static interface\n"));
8875 #endif /* WL_STATIC_IF */
8876 if (ifp
->net
!= NULL
) {
8877 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp
->net
->name
, ifp
->idx
));
8879 DHD_GENERAL_LOCK(dhdpub
, flags
);
8880 ifp
->del_in_progress
= true;
8881 DHD_GENERAL_UNLOCK(dhdpub
, flags
);
8883 /* If TX is in progress, hold the if del */
8884 if (DHD_IF_IS_TX_ACTIVE(ifp
)) {
8885 DHD_INFO(("TX in progress. Wait for it to be complete."));
8886 timeout
= wait_event_timeout(dhdpub
->tx_completion_wait
,
8887 ((ifp
->tx_paths_active
& DHD_TX_CONTEXT_MASK
) == 0),
8888 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT
));
8890 /* Tx completion timeout. Attempt proceeding ahead */
8891 DHD_ERROR(("Tx completion timed out!\n"));
8895 DHD_TRACE(("No outstanding TX!\n"));
8897 dhdinfo
->iflist
[ifidx
] = NULL
;
8898 /* in unregister_netdev case, the interface gets freed by net->destructor
8899 * (which is set to free_netdev)
8901 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
8902 free_netdev(ifp
->net
);
8904 netif_tx_disable(ifp
->net
);
8906 #if defined(SET_RPS_CPUS)
8907 custom_rps_map_clear(ifp
->net
->_rx
);
8908 #endif /* SET_RPS_CPUS */
8909 #if (defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL))
8910 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
8911 dhd_tcpack_suppress_set(dhdpub
, TCPACK_SUP_OFF
);
8912 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
8913 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
8915 unregister_netdev(ifp
->net
);
8917 unregister_netdevice(ifp
->net
);
8920 DHD_GENERAL_LOCK(dhdpub
, flags
);
8921 ifp
->del_in_progress
= false;
8922 DHD_GENERAL_UNLOCK(dhdpub
, flags
);
8924 dhd_cleanup_ifp(dhdpub
, ifp
);
8925 DHD_CUMM_CTR_INIT(&ifp
->cumm_ctr
);
8927 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
8934 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
8935 static struct net_device_ops dhd_ops_pri
= {
8936 .ndo_open
= dhd_pri_open
,
8937 .ndo_stop
= dhd_pri_stop
,
8938 .ndo_get_stats
= dhd_get_stats
,
8939 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8940 .ndo_do_ioctl
= dhd_ioctl_entry_wrapper
,
8941 .ndo_start_xmit
= dhd_start_xmit_wrapper
,
8943 .ndo_do_ioctl
= dhd_ioctl_entry
,
8944 .ndo_start_xmit
= dhd_start_xmit
,
8945 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8946 .ndo_set_mac_address
= dhd_set_mac_address
,
8947 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8948 .ndo_set_rx_mode
= dhd_set_multicast_list
,
8950 .ndo_set_multicast_list
= dhd_set_multicast_list
,
8954 static struct net_device_ops dhd_ops_virt
= {
8955 #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8956 .ndo_open
= dhd_static_if_open
,
8957 .ndo_stop
= dhd_static_if_stop
,
8959 .ndo_get_stats
= dhd_get_stats
,
8960 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8961 .ndo_do_ioctl
= dhd_ioctl_entry_wrapper
,
8962 .ndo_start_xmit
= dhd_start_xmit_wrapper
,
8964 .ndo_do_ioctl
= dhd_ioctl_entry
,
8965 .ndo_start_xmit
= dhd_start_xmit
,
8966 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8967 .ndo_set_mac_address
= dhd_set_mac_address
,
8968 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8969 .ndo_set_rx_mode
= dhd_set_multicast_list
,
8971 .ndo_set_multicast_list
= dhd_set_multicast_list
,
8974 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
8977 dhd_os_write_file_posn(void *fp
, unsigned long *posn
, void *buf
,
8978 unsigned long buflen
)
8980 loff_t wr_posn
= *posn
;
8982 if (!fp
|| !buf
|| buflen
== 0)
8985 if (vfs_write((struct file
*)fp
, buf
, buflen
, &wr_posn
) < 0)
8992 #ifdef SHOW_LOGTRACE
8994 dhd_os_read_file(void *file
, char *buf
, uint32 size
)
8996 struct file
*filep
= (struct file
*)file
;
9001 return vfs_read(filep
, buf
, size
, &filep
->f_pos
);
9005 dhd_os_seek_file(void *file
, int64 offset
)
9007 struct file
*filep
= (struct file
*)file
;
9011 /* offset can be -ve */
9012 filep
->f_pos
= filep
->f_pos
+ offset
;
9018 dhd_init_logstrs_array(osl_t
*osh
, dhd_event_log_t
*temp
)
9020 struct file
*filep
= NULL
;
9023 char *raw_fmts
= NULL
;
9024 int logstrs_size
= 0;
9030 filep
= filp_open(logstrs_path
, O_RDONLY
, 0);
9032 if (IS_ERR(filep
)) {
9033 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__
, logstrs_path
));
9036 error
= vfs_stat(logstrs_path
, &stat
);
9038 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__
, logstrs_path
));
9041 logstrs_size
= (int) stat
.size
;
9043 if (logstrs_size
== 0) {
9044 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__
));
9048 raw_fmts
= MALLOC(osh
, logstrs_size
);
9049 if (raw_fmts
== NULL
) {
9050 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__
));
9054 if (vfs_read(filep
, raw_fmts
, logstrs_size
, &filep
->f_pos
) != logstrs_size
) {
9055 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__
, logstrs_path
));
9059 if (dhd_parse_logstrs_file(osh
, raw_fmts
, logstrs_size
, temp
)
9061 filp_close(filep
, NULL
);
9068 MFREE(osh
, raw_fmts
, logstrs_size
);
9074 filp_close(filep
, NULL
);
9082 dhd_read_map(osl_t
*osh
, char *fname
, uint32
*ramstart
, uint32
*rodata_start
,
9085 struct file
*filep
= NULL
;
9087 int err
= BCME_ERROR
;
9089 if (fname
== NULL
) {
9090 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__
));
9097 filep
= filp_open(fname
, O_RDONLY
, 0);
9098 if (IS_ERR(filep
)) {
9099 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__
, fname
));
9103 if ((err
= dhd_parse_map_file(osh
, filep
, ramstart
,
9104 rodata_start
, rodata_end
)) < 0)
9109 filp_close(filep
, NULL
);
9117 dhd_init_static_strs_array(osl_t
*osh
, dhd_event_log_t
*temp
, char *str_file
, char *map_file
)
9119 struct file
*filep
= NULL
;
9121 char *raw_fmts
= NULL
;
9122 uint32 logstrs_size
= 0;
9124 uint32 ramstart
= 0;
9125 uint32 rodata_start
= 0;
9126 uint32 rodata_end
= 0;
9127 uint32 logfilebase
= 0;
9129 error
= dhd_read_map(osh
, map_file
, &ramstart
, &rodata_start
, &rodata_end
);
9130 if (error
!= BCME_OK
) {
9131 DHD_ERROR(("readmap Error!! \n"));
9132 /* don't do event log parsing in actual case */
9133 if (strstr(str_file
, ram_file_str
) != NULL
) {
9134 temp
->raw_sstr
= NULL
;
9135 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9136 temp
->rom_raw_sstr
= NULL
;
9140 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9141 ramstart
, rodata_start
, rodata_end
));
9146 filep
= filp_open(str_file
, O_RDONLY
, 0);
9147 if (IS_ERR(filep
)) {
9148 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__
, str_file
));
9153 /* Full file size is huge. Just read required part */
9154 logstrs_size
= rodata_end
- rodata_start
;
9155 logfilebase
= rodata_start
- ramstart
;
9158 if (logstrs_size
== 0) {
9159 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__
));
9163 raw_fmts
= MALLOC(osh
, logstrs_size
);
9164 if (raw_fmts
== NULL
) {
9165 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__
));
9170 error
= generic_file_llseek(filep
, logfilebase
, SEEK_SET
);
9172 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__
, str_file
, error
));
9177 error
= vfs_read(filep
, raw_fmts
, logstrs_size
, (&filep
->f_pos
));
9178 if (error
!= logstrs_size
) {
9179 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__
, str_file
, error
));
9183 if (strstr(str_file
, ram_file_str
) != NULL
) {
9184 temp
->raw_sstr
= raw_fmts
;
9185 temp
->raw_sstr_size
= logstrs_size
;
9186 temp
->rodata_start
= rodata_start
;
9187 temp
->rodata_end
= rodata_end
;
9188 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9189 temp
->rom_raw_sstr
= raw_fmts
;
9190 temp
->rom_raw_sstr_size
= logstrs_size
;
9191 temp
->rom_rodata_start
= rodata_start
;
9192 temp
->rom_rodata_end
= rodata_end
;
9195 filp_close(filep
, NULL
);
9202 MFREE(osh
, raw_fmts
, logstrs_size
);
9208 filp_close(filep
, NULL
);
9212 if (strstr(str_file
, ram_file_str
) != NULL
) {
9213 temp
->raw_sstr
= NULL
;
9214 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9215 temp
->rom_raw_sstr
= NULL
;
9219 } /* dhd_init_static_strs_array */
9222 dhd_trace_open_proc(struct inode
*inode
, struct file
*file
)
9224 return single_open(file
, 0, NULL
);
9228 dhd_trace_read_proc(struct file
*file
, char __user
*buffer
, size_t tt
, loff_t
*loff
)
9230 trace_buf_info_t
*trace_buf_info
;
9231 int ret
= BCME_ERROR
;
9234 mutex_lock(&g_dhd_pub
->dhd_trace_lock
);
9235 trace_buf_info
= (trace_buf_info_t
*)MALLOC(g_dhd_pub
->osh
,
9236 sizeof(trace_buf_info_t
));
9237 if (trace_buf_info
) {
9238 dhd_get_read_buf_ptr(g_dhd_pub
, trace_buf_info
);
9239 if (copy_to_user(buffer
, (void*)trace_buf_info
->buf
, MIN(trace_buf_info
->size
, tt
)))
9244 if (trace_buf_info
->availability
== BUF_NOT_AVAILABLE
)
9245 ret
= BUF_NOT_AVAILABLE
;
9247 ret
= trace_buf_info
->size
;
9249 DHD_ERROR(("Memory allocation Failed\n"));
9252 if (trace_buf_info
) {
9253 MFREE(g_dhd_pub
->osh
, trace_buf_info
, sizeof(trace_buf_info_t
));
9255 mutex_unlock(&g_dhd_pub
->dhd_trace_lock
);
9258 #endif /* SHOW_LOGTRACE */
9261 uint enable_erpom
= 0;
9262 module_param(enable_erpom
, int, 0);
9265 dhd_wlan_power_off_handler(void *handler
, unsigned char reason
)
9267 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handler
;
9268 bool dongle_isolation
= dhdp
->dongle_isolation
;
9270 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__
, reason
));
9272 if ((reason
== BY_BT_DUE_TO_BT
) || (reason
== BY_BT_DUE_TO_WLAN
)) {
9273 #if defined(DHD_FW_COREDUMP)
9274 /* save core dump to a file */
9275 if (dhdp
->memdump_enabled
) {
9276 #ifdef DHD_SSSR_DUMP
9277 if (dhdp
->sssr_inited
) {
9278 dhdp
->info
->no_wq_sssrdump
= TRUE
;
9279 dhd_bus_sssr_dump(dhdp
);
9280 dhdp
->info
->no_wq_sssrdump
= FALSE
;
9282 #endif /* DHD_SSSR_DUMP */
9283 dhdp
->memdump_type
= DUMP_TYPE_DUE_TO_BT
;
9284 dhd_bus_mem_dump(dhdp
);
9286 #endif /* DHD_FW_COREDUMP */
9289 /* pause data on all the interfaces */
9290 dhd_bus_stop_queue(dhdp
->bus
);
9292 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9293 dhdp
->dongle_isolation
= TRUE
;
9294 dhd_bus_devreset(dhdp
, 1); /* DHD structure cleanup */
9295 dhdp
->dongle_isolation
= dongle_isolation
; /* Restore the old value */
9300 dhd_wlan_power_on_handler(void *handler
, unsigned char reason
)
9302 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handler
;
9303 bool dongle_isolation
= dhdp
->dongle_isolation
;
9305 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__
, reason
));
9306 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9307 dhdp
->dongle_isolation
= TRUE
;
9308 dhd_bus_devreset(dhdp
, 0); /* DHD structure re-init */
9309 dhdp
->dongle_isolation
= dongle_isolation
; /* Restore the old value */
9310 /* resume data on all the interfaces */
9311 dhd_bus_start_queue(dhdp
->bus
);
9316 #endif /* DHD_ERPOM */
9317 /** Called once for each hardware (dongle) instance that this DHD manages */
9319 dhd_attach(osl_t
*osh
, struct dhd_bus
*bus
, uint bus_hdrlen
)
9321 dhd_info_t
*dhd
= NULL
;
9322 struct net_device
*net
= NULL
;
9323 char if_name
[IFNAMSIZ
] = {'\0'};
9324 uint32 bus_type
= -1;
9325 uint32 bus_num
= -1;
9326 uint32 slot_num
= -1;
9327 #ifdef SHOW_LOGTRACE
9329 #endif /* SHOW_LOGTRACE */
9331 pom_func_handler_t
*pom_handler
;
9332 #endif /* DHD_ERPOM */
9333 wifi_adapter_info_t
*adapter
= NULL
;
9335 dhd_attach_states_t dhd_state
= DHD_ATTACH_STATE_INIT
;
9336 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
9338 #ifdef PCIE_FULL_DONGLE
9339 ASSERT(sizeof(dhd_pkttag_fd_t
) <= OSL_PKTTAG_SZ
);
9340 ASSERT(sizeof(dhd_pkttag_fr_t
) <= OSL_PKTTAG_SZ
);
9341 #endif /* PCIE_FULL_DONGLE */
9343 /* will implement get_ids for DBUS later */
9344 #if defined(BCMSDIO)
9345 dhd_bus_get_ids(bus
, &bus_type
, &bus_num
, &slot_num
);
9347 adapter
= dhd_wifi_platform_get_adapter(bus_type
, bus_num
, slot_num
);
9349 /* Allocate primary dhd_info */
9350 dhd
= wifi_platform_prealloc(adapter
, DHD_PREALLOC_DHD_INFO
, sizeof(dhd_info_t
));
9352 dhd
= MALLOC(osh
, sizeof(dhd_info_t
));
9354 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__
));
9358 memset(dhd
, 0, sizeof(dhd_info_t
));
9359 dhd_state
|= DHD_ATTACH_STATE_DHD_ALLOC
;
9361 dhd
->unit
= dhd_found
+ instance_base
; /* do not increment dhd_found, yet */
9364 #ifdef DUMP_IOCTL_IOV_LIST
9365 dll_init(&(dhd
->pub
.dump_iovlist_head
));
9366 #endif /* DUMP_IOCTL_IOV_LIST */
9367 dhd
->adapter
= adapter
;
9369 dhd
->pub
.is_bt_recovery_required
= FALSE
;
9370 mutex_init(&dhd
->bus_user_lock
);
9371 #endif /* BT_OVER_SDIO */
9374 dll_init(&(dhd
->pub
.mw_list_head
));
9375 #endif /* DHD_DEBUG */
9377 #ifdef GET_CUSTOM_MAC_ENABLE
9378 wifi_platform_get_mac_addr(dhd
->adapter
, dhd
->pub
.mac
.octet
);
9379 #endif /* GET_CUSTOM_MAC_ENABLE */
9380 #ifdef CUSTOM_FORCE_NODFS_FLAG
9381 dhd
->pub
.dhd_cflags
|= WLAN_PLAT_NODFS_FLAG
;
9382 dhd
->pub
.force_country_change
= TRUE
;
9383 #endif /* CUSTOM_FORCE_NODFS_FLAG */
9384 #ifdef CUSTOM_COUNTRY_CODE
9385 get_customized_country_code(dhd
->adapter
,
9386 dhd
->pub
.dhd_cspec
.country_abbrev
, &dhd
->pub
.dhd_cspec
,
9387 dhd
->pub
.dhd_cflags
);
9388 #endif /* CUSTOM_COUNTRY_CODE */
9389 dhd
->thr_dpc_ctl
.thr_pid
= DHD_PID_KT_TL_INVALID
;
9390 dhd
->thr_wdt_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
9392 dhd
->pub
.wet_info
= dhd_get_wet_info(&dhd
->pub
);
9393 #endif /* DHD_WET */
9394 /* Initialize thread based operation and lock */
9395 sema_init(&dhd
->sdsem
, 1);
9397 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9398 * This is indeed a hack but we have to make it work properly before we have a better
9401 dhd_update_fw_nv_path(dhd
);
9402 dhd
->pub
.pcie_txs_metadata_enable
= pcie_txs_metadata_enable
;
9404 /* Link to info module */
9405 dhd
->pub
.info
= dhd
;
9407 /* Link to bus module */
9409 dhd
->pub
.hdrlen
= bus_hdrlen
;
9411 /* Set network interface name if it was provided as module parameter */
9412 if (iface_name
[0]) {
9415 strncpy(if_name
, iface_name
, IFNAMSIZ
);
9416 if_name
[IFNAMSIZ
- 1] = 0;
9417 len
= strlen(if_name
);
9418 ch
= if_name
[len
- 1];
9419 if ((ch
> '9' || ch
< '0') && (len
< IFNAMSIZ
- 2))
9420 strncat(if_name
, "%d", 2);
9423 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9424 net
= dhd_allocate_if(&dhd
->pub
, 0, if_name
, NULL
, 0, TRUE
, NULL
);
9428 mutex_init(&dhd
->pub
.ndev_op_sync
);
9429 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
9430 /* Init ARGOS notifier data */
9431 argos_wifi
.notifier_call
= NULL
;
9432 argos_p2p
.notifier_call
= NULL
;
9433 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
9435 dhd_state
|= DHD_ATTACH_STATE_ADD_IF
;
9436 #ifdef DHD_L2_FILTER
9437 /* initialize the l2_filter_cnt */
9438 dhd
->pub
.l2_filter_cnt
= 0;
9440 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9443 net
->netdev_ops
= NULL
;
9446 mutex_init(&dhd
->dhd_iovar_mutex
);
9447 sema_init(&dhd
->proto_sem
, 1);
9449 if (!(dhd_ulp_init(osh
, &dhd
->pub
)))
9451 #endif /* DHD_ULP */
9453 #if defined(DHD_HANG_SEND_UP_TEST)
9454 dhd
->pub
.req_hang_type
= 0;
9455 #endif /* DHD_HANG_SEND_UP_TEST */
9457 #ifdef PROP_TXSTATUS
9458 spin_lock_init(&dhd
->wlfc_spinlock
);
9460 dhd
->pub
.skip_fc
= dhd_wlfc_skip_fc
;
9461 dhd
->pub
.plat_init
= dhd_wlfc_plat_init
;
9462 dhd
->pub
.plat_deinit
= dhd_wlfc_plat_deinit
;
9464 #ifdef DHD_WLFC_THREAD
9465 init_waitqueue_head(&dhd
->pub
.wlfc_wqhead
);
9466 dhd
->pub
.wlfc_thread
= kthread_create(dhd_wlfc_transfer_packets
, &dhd
->pub
, "wlfc-thread");
9467 if (IS_ERR(dhd
->pub
.wlfc_thread
)) {
9468 DHD_ERROR(("create wlfc thread failed\n"));
9471 wake_up_process(dhd
->pub
.wlfc_thread
);
9473 #endif /* DHD_WLFC_THREAD */
9474 #endif /* PROP_TXSTATUS */
9476 /* Initialize other structure content */
9477 init_waitqueue_head(&dhd
->ioctl_resp_wait
);
9478 init_waitqueue_head(&dhd
->d3ack_wait
);
9479 init_waitqueue_head(&dhd
->ctrl_wait
);
9480 init_waitqueue_head(&dhd
->dhd_bus_busy_state_wait
);
9481 init_waitqueue_head(&dhd
->dmaxfer_wait
);
9482 init_waitqueue_head(&dhd
->pub
.tx_completion_wait
);
9483 dhd
->pub
.dhd_bus_busy_state
= 0;
9485 /* Initialize the spinlocks */
9486 spin_lock_init(&dhd
->sdlock
);
9487 spin_lock_init(&dhd
->txqlock
);
9488 spin_lock_init(&dhd
->dhd_lock
);
9489 spin_lock_init(&dhd
->rxf_lock
);
9491 spin_lock_init(&dhd
->pub
.tdls_lock
);
9493 #if defined(RXFRAME_THREAD)
9494 dhd
->rxthread_enabled
= TRUE
;
9495 #endif /* defined(RXFRAME_THREAD) */
9497 #ifdef DHDTCPACK_SUPPRESS
9498 spin_lock_init(&dhd
->tcpack_lock
);
9499 #endif /* DHDTCPACK_SUPPRESS */
9501 /* Initialize Wakelock stuff */
9502 spin_lock_init(&dhd
->wakelock_spinlock
);
9503 spin_lock_init(&dhd
->wakelock_evt_spinlock
);
9504 DHD_OS_WAKE_LOCK_INIT(dhd
);
9505 dhd
->wakelock_counter
= 0;
9506 /* wakelocks prevent a system from going into a low power state */
9507 #ifdef CONFIG_HAS_WAKELOCK
9508 wake_lock_init(&dhd
->wl_wdwake
, WAKE_LOCK_SUSPEND
, "wlan_wd_wake");
9509 #endif /* CONFIG_HAS_WAKELOCK */
9511 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9512 mutex_init(&dhd
->dhd_net_if_mutex
);
9513 mutex_init(&dhd
->dhd_suspend_mutex
);
9514 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
9515 mutex_init(&dhd
->dhd_apf_mutex
);
9516 #endif /* PKT_FILTER_SUPPORT && APF */
9518 dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
9520 /* Attach and link in the protocol */
9521 if (dhd_prot_attach(&dhd
->pub
) != 0) {
9522 DHD_ERROR(("dhd_prot_attach failed\n"));
9525 dhd_state
|= DHD_ATTACH_STATE_PROT_ATTACH
;
9528 spin_lock_init(&dhd
->pub
.up_lock
);
9529 /* Attach and link in the cfg80211 */
9530 if (unlikely(wl_cfg80211_attach(net
, &dhd
->pub
))) {
9531 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9535 dhd_monitor_init(&dhd
->pub
);
9536 dhd_state
|= DHD_ATTACH_STATE_CFG80211
;
9539 #if defined(WL_WIRELESS_EXT)
9540 /* Attach and link in the iw */
9541 if (!(dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
9542 if (wl_iw_attach(net
, (void *)&dhd
->pub
) != 0) {
9543 DHD_ERROR(("wl_iw_attach failed\n"));
9546 dhd_state
|= DHD_ATTACH_STATE_WL_ATTACH
;
9548 #endif /* defined(WL_WIRELESS_EXT) */
9550 #ifdef SHOW_LOGTRACE
9551 ret
= dhd_init_logstrs_array(osh
, &dhd
->event_data
);
9552 if (ret
== BCME_OK
) {
9553 dhd_init_static_strs_array(osh
, &dhd
->event_data
, st_str_file_path
, map_file_path
);
9554 dhd_init_static_strs_array(osh
, &dhd
->event_data
, rom_st_str_file_path
,
9556 dhd_state
|= DHD_ATTACH_LOGTRACE_INIT
;
9558 #endif /* SHOW_LOGTRACE */
9561 /* attach debug if support */
9562 if (dhd_os_dbg_attach(&dhd
->pub
)) {
9563 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__
));
9567 #if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
9568 /* enable verbose ring to support dump_trace_buf */
9569 dhd_os_start_logging(&dhd
->pub
, FW_VERBOSE_RING_NAME
, 3, 0, 0, 0);
9570 #endif /* SHOW_LOGTRACE */
9573 dhd
->pub
.dbg
->pkt_mon_lock
= dhd_os_spin_lock_init(dhd
->pub
.osh
);
9574 #ifdef DBG_PKT_MON_INIT_DEFAULT
9575 dhd_os_dbg_attach_pkt_monitor(&dhd
->pub
);
9576 #endif /* DBG_PKT_MON_INIT_DEFAULT */
9577 #endif /* DBG_PKT_MON */
9578 #endif /* DEBUGABILITY */
9581 dhd_log_dump_init(&dhd
->pub
);
9582 #endif /* DHD_LOG_DUMP */
9584 #ifdef DHD_PKT_LOGGING
9585 dhd_os_attach_pktlog(&dhd
->pub
);
9586 #endif /* DHD_PKT_LOGGING */
9587 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9588 dhd
->pub
.hang_info
= MALLOCZ(osh
, VENDOR_SEND_HANG_EXT_INFO_LEN
);
9589 if (dhd
->pub
.hang_info
== NULL
) {
9590 DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__
));
9592 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
9593 if (dhd_sta_pool_init(&dhd
->pub
, DHD_MAX_STA
) != BCME_OK
) {
9594 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__
, DHD_MAX_STA
));
9598 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9599 dhd
->tx_wq
= alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI
| WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1);
9601 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__
));
9604 dhd
->rx_wq
= alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI
| WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1);
9606 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__
));
9607 destroy_workqueue(dhd
->tx_wq
);
9611 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9613 /* Set up the watchdog timer */
9614 init_timer(&dhd
->timer
);
9615 dhd
->timer
.data
= (ulong
)dhd
;
9616 dhd
->timer
.function
= dhd_watchdog
;
9617 dhd
->default_wd_interval
= dhd_watchdog_ms
;
9619 if (dhd_watchdog_prio
>= 0) {
9620 /* Initialize watchdog thread */
9621 PROC_START(dhd_watchdog_thread
, dhd
, &dhd
->thr_wdt_ctl
, 0, "dhd_watchdog_thread");
9622 if (dhd
->thr_wdt_ctl
.thr_pid
< 0) {
9627 dhd
->thr_wdt_ctl
.thr_pid
= -1;
9630 #ifdef DHD_PCIE_RUNTIMEPM
9631 /* Setup up the runtime PM Idlecount timer */
9632 init_timer(&dhd
->rpm_timer
);
9633 dhd
->rpm_timer
.data
= (ulong
)dhd
;
9634 dhd
->rpm_timer
.function
= dhd_runtimepm
;
9635 dhd
->rpm_timer_valid
= FALSE
;
9637 dhd
->thr_rpm_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
9638 PROC_START(dhd_rpm_state_thread
, dhd
, &dhd
->thr_rpm_ctl
, 0, "dhd_rpm_state_thread");
9639 if (dhd
->thr_rpm_ctl
.thr_pid
< 0) {
9642 #endif /* DHD_PCIE_RUNTIMEPM */
9644 #ifdef SHOW_LOGTRACE
9645 skb_queue_head_init(&dhd
->evt_trace_queue
);
9646 if (proc_create("dhd_trace", S_IRUSR
, NULL
, &proc_file_fops
) == NULL
)
9647 DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
9648 mutex_init(&dhd
->pub
.dhd_trace_lock
);
9649 #endif /* SHOW_LOGTRACE */
9651 /* Set up the bottom half handler */
9652 if (dhd_dpc_prio
>= 0) {
9653 /* Initialize DPC thread */
9654 PROC_START(dhd_dpc_thread
, dhd
, &dhd
->thr_dpc_ctl
, 0, "dhd_dpc");
9655 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
9659 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_DPC_TASKLET_CTL) && \
9660 !defined(DHD_LB_IRQSET)
9661 if (!zalloc_cpumask_var(&dhd
->pub
.default_cpu_mask
, GFP_KERNEL
)) {
9662 DHD_ERROR(("dpc tasklet, zalloc_cpumask_var error\n"));
9663 dhd
->pub
.affinity_isdpc
= FALSE
;
9665 if (!zalloc_cpumask_var(&dhd
->pub
.dpc_affinity_cpu_mask
, GFP_KERNEL
)) {
9666 DHD_ERROR(("dpc thread, dpc_affinity_cpu_mask error\n"));
9667 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
9668 dhd
->pub
.affinity_isdpc
= FALSE
;
9670 unsigned int irq
= -1;
9672 if (dhdpcie_get_pcieirq(bus
, &irq
)) {
9673 DHD_ERROR(("%s : Can't get interrupt number\n",
9677 #endif /* BCMPCIE */
9679 irq
= adapter
->irq_num
;
9680 #endif /* BCMSDIO */
9682 cpumask_copy(dhd
->pub
.default_cpu_mask
, &hmp_slow_cpu_mask
);
9683 cpumask_or(dhd
->pub
.dpc_affinity_cpu_mask
,
9684 dhd
->pub
.dpc_affinity_cpu_mask
,
9685 cpumask_of(TASKLET_CPUCORE
));
9687 set_irq_cpucore(irq
, dhd
->pub
.default_cpu_mask
,
9688 dhd
->pub
.dpc_affinity_cpu_mask
);
9689 dhd
->pub
.affinity_isdpc
= TRUE
;
9692 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_DPC_TASKLET_CTL && !DHD_LB_IRQSET */
9693 /* use tasklet for dpc */
9694 tasklet_init(&dhd
->tasklet
, dhd_dpc
, (ulong
)dhd
);
9695 dhd
->thr_dpc_ctl
.thr_pid
= -1;
9698 if (dhd
->rxthread_enabled
) {
9699 bzero(&dhd
->pub
.skbbuf
[0], sizeof(void *) * MAXSKBPEND
);
9700 /* Initialize RXF thread */
9701 PROC_START(dhd_rxf_thread
, dhd
, &dhd
->thr_rxf_ctl
, 0, "dhd_rxf");
9702 if (dhd
->thr_rxf_ctl
.thr_pid
< 0) {
9707 dhd_state
|= DHD_ATTACH_STATE_THREADS_CREATED
;
9709 #if defined(CONFIG_PM_SLEEP)
9710 if (!dhd_pm_notifier_registered
) {
9711 dhd_pm_notifier_registered
= TRUE
;
9712 dhd
->pm_notifier
.notifier_call
= dhd_pm_callback
;
9713 dhd
->pm_notifier
.priority
= 10;
9714 register_pm_notifier(&dhd
->pm_notifier
);
9717 #endif /* CONFIG_PM_SLEEP */
9719 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9720 dhd
->early_suspend
.level
= EARLY_SUSPEND_LEVEL_BLANK_SCREEN
+ 20;
9721 dhd
->early_suspend
.suspend
= dhd_early_suspend
;
9722 dhd
->early_suspend
.resume
= dhd_late_resume
;
9723 register_early_suspend(&dhd
->early_suspend
);
9724 dhd_state
|= DHD_ATTACH_STATE_EARLYSUSPEND_DONE
;
9725 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9727 #ifdef ARP_OFFLOAD_SUPPORT
9728 dhd
->pend_ipaddr
= 0;
9729 if (!dhd_inetaddr_notifier_registered
) {
9730 dhd_inetaddr_notifier_registered
= TRUE
;
9731 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
9733 #endif /* ARP_OFFLOAD_SUPPORT */
9735 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9736 if (!dhd_inet6addr_notifier_registered
) {
9737 dhd_inet6addr_notifier_registered
= TRUE
;
9738 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
9740 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9741 dhd
->dhd_deferred_wq
= dhd_deferred_work_init((void *)dhd
);
9742 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9743 INIT_WORK(&dhd
->dhd_hang_process_work
, dhd_hang_process
);
9745 #ifdef DEBUG_CPU_FREQ
9746 dhd
->new_freq
= alloc_percpu(int);
9747 dhd
->freq_trans
.notifier_call
= dhd_cpufreq_notifier
;
9748 cpufreq_register_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
9750 #ifdef DHDTCPACK_SUPPRESS
9752 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_DELAYTX
);
9753 #elif defined(BCMPCIE)
9754 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_HOLD
);
9756 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
9757 #endif /* BCMSDIO */
9758 #endif /* DHDTCPACK_SUPPRESS */
9760 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9761 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9763 #ifdef DHD_DEBUG_PAGEALLOC
9764 register_page_corrupt_cb(dhd_page_corrupt_cb
, &dhd
->pub
);
9765 #endif /* DHD_DEBUG_PAGEALLOC */
9769 dhd_lb_set_default_cpus(dhd
);
9771 /* Initialize the CPU Masks */
9772 if (dhd_cpumasks_init(dhd
) == 0) {
9773 /* Now we have the current CPU maps, run through candidacy */
9774 dhd_select_cpu_candidacy(dhd
);
9776 * If we are able to initialize CPU masks, lets register to the
9777 * CPU Hotplug framework to change the CPU for each job dynamically
9778 * using candidacy algorithm.
9780 dhd
->cpu_notifier
.notifier_call
= dhd_cpu_callback
;
9781 register_hotcpu_notifier(&dhd
->cpu_notifier
); /* Register a callback */
9784 * We are unable to initialize CPU masks, so candidacy algorithm
9785 * won't run, but still Load Balancing will be honoured based
9786 * on the CPUs allocated for a given job statically during init
9788 dhd
->cpu_notifier
.notifier_call
= NULL
;
9789 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9794 #ifdef DHD_LB_TXP_DEFAULT_ENAB
9795 /* Trun ON the feature by default */
9796 atomic_set(&dhd
->lb_txp_active
, 1);
9798 /* Trun OFF the feature by default */
9799 atomic_set(&dhd
->lb_txp_active
, 0);
9800 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
9801 #endif /* DHD_LB_TXP */
9803 DHD_LB_STATS_INIT(&dhd
->pub
);
9805 /* Initialize the Load Balancing Tasklets and Napi object */
9806 #if defined(DHD_LB_TXC)
9807 tasklet_init(&dhd
->tx_compl_tasklet
,
9808 dhd_lb_tx_compl_handler
, (ulong
)(&dhd
->pub
));
9809 INIT_WORK(&dhd
->tx_compl_dispatcher_work
, dhd_tx_compl_dispatcher_fn
);
9810 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__
));
9811 #endif /* DHD_LB_TXC */
9813 #if defined(DHD_LB_RXC)
9814 tasklet_init(&dhd
->rx_compl_tasklet
,
9815 dhd_lb_rx_compl_handler
, (ulong
)(&dhd
->pub
));
9816 INIT_WORK(&dhd
->rx_compl_dispatcher_work
, dhd_rx_compl_dispatcher_fn
);
9817 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__
));
9818 #endif /* DHD_LB_RXC */
9820 #if defined(DHD_LB_RXP)
9821 __skb_queue_head_init(&dhd
->rx_pend_queue
);
9822 skb_queue_head_init(&dhd
->rx_napi_queue
);
9823 /* Initialize the work that dispatches NAPI job to a given core */
9824 INIT_WORK(&dhd
->rx_napi_dispatcher_work
, dhd_rx_napi_dispatcher_fn
);
9825 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__
));
9826 #endif /* DHD_LB_RXP */
9828 #if defined(DHD_LB_TXP)
9829 INIT_WORK(&dhd
->tx_dispatcher_work
, dhd_tx_dispatcher_work
);
9830 skb_queue_head_init(&dhd
->tx_pend_queue
);
9831 /* Initialize the work that dispatches TX job to a given core */
9832 tasklet_init(&dhd
->tx_tasklet
,
9833 dhd_lb_tx_handler
, (ulong
)(dhd
));
9834 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__
));
9835 #endif /* DHD_LB_TXP */
9837 dhd_state
|= DHD_ATTACH_STATE_LB_ATTACH_DONE
;
9840 #if defined(BCMPCIE)
9841 dhd
->pub
.extended_trap_data
= MALLOCZ(osh
, BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
9842 if (dhd
->pub
.extended_trap_data
== NULL
) {
9843 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__
));
9845 #endif /* BCMPCIE && ETD */
9847 #ifdef SHOW_LOGTRACE
9848 INIT_DELAYED_WORK(&dhd
->event_log_dispatcher_work
, dhd_event_logtrace_process
);
9849 #endif /* SHOW_LOGTRACE */
9851 DHD_INFO(("%s: sssr mempool init\n", __FUNCTION__
));
9852 DHD_SSSR_MEMPOOL_INIT(&dhd
->pub
);
9854 (void)dhd_sysfs_init(dhd
);
9857 /* Open Netlink socket for NF_CONNTRACK notifications */
9858 dhd
->pub
.nfct
= dhd_ct_open(&dhd
->pub
, NFNL_SUBSYS_CTNETLINK
| NFNL_SUBSYS_CTNETLINK_EXP
,
9860 #endif /* WL_NATOE */
9862 dhd_state
|= DHD_ATTACH_STATE_DONE
;
9863 dhd
->dhd_state
= dhd_state
;
9867 g_dhd_pub
= &dhd
->pub
;
9869 #ifdef DHD_DUMP_MNGR
9870 dhd
->pub
.dump_file_manage
=
9871 (dhd_dump_file_manage_t
*)MALLOCZ(dhd
->pub
.osh
, sizeof(dhd_dump_file_manage_t
));
9872 if (unlikely(!dhd
->pub
.dump_file_manage
)) {
9873 DHD_ERROR(("%s(): could not allocate memory for - "
9874 "dhd_dump_file_manage_t\n", __FUNCTION__
));
9876 #endif /* DHD_DUMP_MNGR */
9877 #ifdef DHD_FW_COREDUMP
9878 /* Set memdump default values */
9879 #ifdef CUSTOMER_HW4_DEBUG
9880 dhd
->pub
.memdump_enabled
= DUMP_DISABLED
;
9882 dhd
->pub
.memdump_enabled
= DUMP_MEMFILE_BUGON
;
9883 #endif /* CUSTOMER_HW4_DEBUG */
9884 /* Check the memdump capability */
9885 dhd_get_memdump_info(&dhd
->pub
);
9886 #endif /* DHD_FW_COREDUMP */
9890 pom_handler
= &dhd
->pub
.pom_wlan_handler
;
9891 pom_handler
->func_id
= WLAN_FUNC_ID
;
9892 pom_handler
->handler
= (void *)g_dhd_pub
;
9893 pom_handler
->power_off
= dhd_wlan_power_off_handler
;
9894 pom_handler
->power_on
= dhd_wlan_power_on_handler
;
9896 dhd
->pub
.pom_func_register
= NULL
;
9897 dhd
->pub
.pom_func_deregister
= NULL
;
9898 dhd
->pub
.pom_toggle_reg_on
= NULL
;
9900 dhd
->pub
.pom_func_register
= symbol_get(pom_func_register
);
9901 dhd
->pub
.pom_func_deregister
= symbol_get(pom_func_deregister
);
9902 dhd
->pub
.pom_toggle_reg_on
= symbol_get(pom_toggle_reg_on
);
9904 symbol_put(pom_func_register
);
9905 symbol_put(pom_func_deregister
);
9906 symbol_put(pom_toggle_reg_on
);
9908 if (!dhd
->pub
.pom_func_register
||
9909 !dhd
->pub
.pom_func_deregister
||
9910 !dhd
->pub
.pom_toggle_reg_on
) {
9911 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
9912 "POM is not loaded\n", __FUNCTION__
));
9916 dhd
->pub
.pom_func_register(pom_handler
);
9917 dhd
->pub
.enable_erpom
= TRUE
;
9920 #endif /* DHD_ERPOM */
9924 if (dhd_state
>= DHD_ATTACH_STATE_DHD_ALLOC
) {
9925 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9926 __FUNCTION__
, dhd_state
, &dhd
->pub
));
9927 dhd
->dhd_state
= dhd_state
;
9928 dhd_detach(&dhd
->pub
);
9929 dhd_free(&dhd
->pub
);
9936 int dhd_get_fw_mode(dhd_info_t
*dhdinfo
)
9938 if (strstr(dhdinfo
->fw_path
, "_apsta") != NULL
)
9939 return DHD_FLAG_HOSTAP_MODE
;
9940 if (strstr(dhdinfo
->fw_path
, "_p2p") != NULL
)
9941 return DHD_FLAG_P2P_MODE
;
9942 if (strstr(dhdinfo
->fw_path
, "_ibss") != NULL
)
9943 return DHD_FLAG_IBSS_MODE
;
9944 if (strstr(dhdinfo
->fw_path
, "_mfg") != NULL
)
9945 return DHD_FLAG_MFG_MODE
;
9947 return DHD_FLAG_STA_MODE
;
9950 int dhd_bus_get_fw_mode(dhd_pub_t
*dhdp
)
9952 return dhd_get_fw_mode(dhdp
->info
);
9955 extern char * nvram_get(const char *name
);
9956 bool dhd_update_fw_nv_path(dhd_info_t
*dhdinfo
)
9960 const char *fw
= NULL
;
9961 const char *nv
= NULL
;
9962 #ifdef DHD_UCODE_DOWNLOAD
9964 const char *uc
= NULL
;
9965 #endif /* DHD_UCODE_DOWNLOAD */
9966 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
9967 int fw_path_len
= sizeof(dhdinfo
->fw_path
);
9968 int nv_path_len
= sizeof(dhdinfo
->nv_path
);
9970 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9971 * The path from adapter info is used for initialization only (as it won't change).
9973 * The firmware_path/nvram_path module parameter may be changed by the system at run
9974 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9975 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9976 * module parameter after it is copied. We won't update the path until the module parameter
9977 * is changed again (first character is not '\0')
9980 /* set default firmware and nvram path for built-in type driver */
9981 if (!dhd_download_fw_on_driverload
) {
9982 #ifdef CONFIG_BCMDHD_FW_PATH
9983 fw
= VENDOR_PATH CONFIG_BCMDHD_FW_PATH
;
9984 #endif /* CONFIG_BCMDHD_FW_PATH */
9985 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9986 nv
= VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH
;
9987 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9990 /* check if we need to initialize the path */
9991 if (dhdinfo
->fw_path
[0] == '\0') {
9992 if (adapter
&& adapter
->fw_path
&& adapter
->fw_path
[0] != '\0')
9993 fw
= adapter
->fw_path
;
9995 if (dhdinfo
->nv_path
[0] == '\0') {
9996 if (adapter
&& adapter
->nv_path
&& adapter
->nv_path
[0] != '\0')
9997 nv
= adapter
->nv_path
;
10000 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10002 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
10004 if (firmware_path
[0] != '\0')
10005 fw
= firmware_path
;
10007 if (nvram_path
[0] != '\0')
10010 #ifdef DHD_UCODE_DOWNLOAD
10011 if (ucode_path
[0] != '\0')
10013 #endif /* DHD_UCODE_DOWNLOAD */
10015 if (fw
&& fw
[0] != '\0') {
10016 fw_len
= strlen(fw
);
10017 if (fw_len
>= fw_path_len
) {
10018 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
10021 strncpy(dhdinfo
->fw_path
, fw
, fw_path_len
);
10022 if (dhdinfo
->fw_path
[fw_len
-1] == '\n')
10023 dhdinfo
->fw_path
[fw_len
-1] = '\0';
10025 if (nv
&& nv
[0] != '\0') {
10026 nv_len
= strlen(nv
);
10027 if (nv_len
>= nv_path_len
) {
10028 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
10031 memset(dhdinfo
->nv_path
, 0, nv_path_len
);
10032 strncpy(dhdinfo
->nv_path
, nv
, nv_path_len
);
10033 dhdinfo
->nv_path
[nv_len
] = '\0';
10034 #ifdef DHD_USE_SINGLE_NVRAM_FILE
10035 /* Remove "_net" or "_mfg" tag from current nvram path */
10037 char *nvram_tag
= "nvram_";
10038 char *ext_tag
= ".txt";
10039 char *sp_nvram
= strnstr(dhdinfo
->nv_path
, nvram_tag
, nv_path_len
);
10040 bool valid_buf
= sp_nvram
&& ((uint32
)(sp_nvram
+ strlen(nvram_tag
) +
10041 strlen(ext_tag
) - dhdinfo
->nv_path
) <= nv_path_len
);
10043 char *sp
= sp_nvram
+ strlen(nvram_tag
) - 1;
10044 uint32 padding_size
= (uint32
)(dhdinfo
->nv_path
+
10046 memset(sp
, 0, padding_size
);
10047 strncat(dhdinfo
->nv_path
, ext_tag
, strlen(ext_tag
));
10048 nv_len
= strlen(dhdinfo
->nv_path
);
10049 DHD_INFO(("%s: new nvram path = %s\n",
10050 __FUNCTION__
, dhdinfo
->nv_path
));
10051 } else if (sp_nvram
) {
10052 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10056 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10057 " nvram path = %s\n", __FUNCTION__
, dhdinfo
->nv_path
));
10060 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
10061 if (dhdinfo
->nv_path
[nv_len
-1] == '\n')
10062 dhdinfo
->nv_path
[nv_len
-1] = '\0';
10064 #ifdef DHD_UCODE_DOWNLOAD
10065 if (uc
&& uc
[0] != '\0') {
10066 uc_len
= strlen(uc
);
10067 if (uc_len
>= sizeof(dhdinfo
->uc_path
)) {
10068 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10071 strncpy(dhdinfo
->uc_path
, uc
, sizeof(dhdinfo
->uc_path
));
10072 if (dhdinfo
->uc_path
[uc_len
-1] == '\n')
10073 dhdinfo
->uc_path
[uc_len
-1] = '\0';
10075 #endif /* DHD_UCODE_DOWNLOAD */
10077 /* clear the path in module parameter */
10078 if (dhd_download_fw_on_driverload
) {
10079 firmware_path
[0] = '\0';
10080 nvram_path
[0] = '\0';
10082 #ifdef DHD_UCODE_DOWNLOAD
10083 ucode_path
[0] = '\0';
10084 DHD_ERROR(("ucode path: %s\n", dhdinfo
->uc_path
));
10085 #endif /* DHD_UCODE_DOWNLOAD */
10087 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10088 if (dhdinfo
->fw_path
[0] == '\0') {
10089 DHD_ERROR(("firmware path not found\n"));
10092 if (dhdinfo
->nv_path
[0] == '\0') {
10093 DHD_ERROR(("nvram path not found\n"));
10100 #if defined(BT_OVER_SDIO)
10101 extern bool dhd_update_btfw_path(dhd_info_t
*dhdinfo
, char* btfw_path
)
10104 const char *fw
= NULL
;
10105 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
10107 /* Update bt firmware path. The path may be from adapter info or module parameter
10108 * The path from adapter info is used for initialization only (as it won't change).
10110 * The btfw_path module parameter may be changed by the system at run
10111 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10112 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10113 * module parameter after it is copied. We won't update the path until the module parameter
10114 * is changed again (first character is not '\0')
10117 /* set default firmware and nvram path for built-in type driver */
10118 if (!dhd_download_fw_on_driverload
) {
10119 #ifdef CONFIG_BCMDHD_BTFW_PATH
10120 fw
= CONFIG_BCMDHD_BTFW_PATH
;
10121 #endif /* CONFIG_BCMDHD_FW_PATH */
10124 /* check if we need to initialize the path */
10125 if (dhdinfo
->btfw_path
[0] == '\0') {
10126 if (adapter
&& adapter
->btfw_path
&& adapter
->btfw_path
[0] != '\0')
10127 fw
= adapter
->btfw_path
;
10130 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10132 if (btfw_path
[0] != '\0')
10135 if (fw
&& fw
[0] != '\0') {
10136 fw_len
= strlen(fw
);
10137 if (fw_len
>= sizeof(dhdinfo
->btfw_path
)) {
10138 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10141 strncpy(dhdinfo
->btfw_path
, fw
, sizeof(dhdinfo
->btfw_path
));
10142 if (dhdinfo
->btfw_path
[fw_len
-1] == '\n')
10143 dhdinfo
->btfw_path
[fw_len
-1] = '\0';
10146 /* clear the path in module parameter */
10147 btfw_path
[0] = '\0';
10149 if (dhdinfo
->btfw_path
[0] == '\0') {
10150 DHD_ERROR(("bt firmware path not found\n"));
10156 #endif /* defined (BT_OVER_SDIO) */
10158 #ifdef CUSTOMER_HW4_DEBUG
10159 bool dhd_validate_chipid(dhd_pub_t
*dhdp
)
10161 uint chipid
= dhd_bus_chip_id(dhdp
);
10162 uint config_chipid
;
10164 #ifdef BCM4375_CHIP
10165 config_chipid
= BCM4375_CHIP_ID
;
10166 #elif defined(BCM4361_CHIP)
10167 config_chipid
= BCM4361_CHIP_ID
;
10168 #elif defined(BCM4359_CHIP)
10169 config_chipid
= BCM4359_CHIP_ID
;
10170 #elif defined(BCM4358_CHIP)
10171 config_chipid
= BCM4358_CHIP_ID
;
10172 #elif defined(BCM4354_CHIP)
10173 config_chipid
= BCM4354_CHIP_ID
;
10174 #elif defined(BCM4339_CHIP)
10175 config_chipid
= BCM4339_CHIP_ID
;
10176 #elif defined(BCM4335_CHIP)
10177 config_chipid
= BCM4335_CHIP_ID
;
10178 #elif defined(BCM43430_CHIP)
10179 config_chipid
= BCM43430_CHIP_ID
;
10180 #elif defined(BCM43018_CHIP)
10181 config_chipid
= BCM43018_CHIP_ID
;
10182 #elif defined(BCM43455_CHIP) || defined(BCM43456_CHIP)
10183 config_chipid
= BCM4345_CHIP_ID
;
10184 #elif defined(BCM43454_CHIP)
10185 config_chipid
= BCM43454_CHIP_ID
;
10186 #elif defined(BCM43012_CHIP_)
10187 config_chipid
= BCM43012_CHIP_ID
;
10189 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10190 " please add CONFIG_BCMXXXX into the Kernel and"
10191 " BCMXXXX_CHIP definition into the DHD driver\n",
10196 #endif /* BCM4354_CHIP */
10198 #ifdef SUPPORT_MULTIPLE_CHIP_4345X
10199 if (config_chipid
== BCM43454_CHIP_ID
|| config_chipid
== BCM4345_CHIP_ID
) {
10202 #endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10203 #if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10204 if (chipid
== BCM4350_CHIP_ID
&& config_chipid
== BCM4354_CHIP_ID
) {
10207 #endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
10208 #if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10209 if (chipid
== BCM43569_CHIP_ID
&& config_chipid
== BCM4358_CHIP_ID
) {
10212 #endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
10213 #if defined(BCM4359_CHIP)
10214 if (chipid
== BCM4355_CHIP_ID
&& config_chipid
== BCM4359_CHIP_ID
) {
10217 #endif /* BCM4359_CHIP */
10218 #if defined(BCM4361_CHIP)
10219 if (chipid
== BCM4347_CHIP_ID
&& config_chipid
== BCM4361_CHIP_ID
) {
10222 #endif /* BCM4361_CHIP */
10224 return config_chipid
== chipid
;
10226 #endif /* CUSTOMER_HW4_DEBUG */
10228 #if defined(BT_OVER_SDIO)
10229 wlan_bt_handle_t
dhd_bt_get_pub_hndl(void)
10231 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__
, g_dhd_pub
));
10232 /* assuming that dhd_pub_t type pointer is available from a global variable */
10233 return (wlan_bt_handle_t
) g_dhd_pub
;
10234 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl
);
10236 int dhd_download_btfw(wlan_bt_handle_t handle
, char* btfw_path
)
10239 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
10240 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10242 /* Download BT firmware image to the dongle */
10243 if (dhd
->pub
.busstate
== DHD_BUS_DATA
&& dhd_update_btfw_path(dhd
, btfw_path
)) {
10244 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__
, dhd
->btfw_path
));
10245 ret
= dhd_bus_download_btfw(dhd
->pub
.bus
, dhd
->pub
.osh
, dhd
->btfw_path
);
10247 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10248 __FUNCTION__
, dhd
->btfw_path
));
10253 } EXPORT_SYMBOL(dhd_download_btfw
);
10254 #endif /* defined (BT_OVER_SDIO) */
10257 dhd_bus_start(dhd_pub_t
*dhdp
)
10260 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10261 unsigned long flags
;
10263 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10264 int fw_download_start
= 0, fw_download_end
= 0, f2_sync_start
= 0, f2_sync_end
= 0;
10265 #endif /* DHD_DEBUG && BCMSDIO */
10268 DHD_TRACE(("Enter %s:\n", __FUNCTION__
));
10269 dhdp
->dongle_trap_occured
= 0;
10270 dhdp
->iovar_timeout_occured
= 0;
10271 #ifdef PCIE_FULL_DONGLE
10272 dhdp
->d3ack_timeout_occured
= 0;
10273 #endif /* PCIE_FULL_DONGLE */
10274 #ifdef DHD_MAP_LOGGING
10275 dhdp
->smmu_fault_occurred
= 0;
10276 #endif /* DHD_MAP_LOGGING */
10278 DHD_PERIM_LOCK(dhdp
);
10279 /* try to download image and nvram to the dongle */
10280 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
&& dhd_update_fw_nv_path(dhd
)) {
10281 /* Indicate FW Download has not yet done */
10282 dhd
->pub
.fw_download_done
= FALSE
;
10283 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__
, dhd
->fw_path
, dhd
->nv_path
));
10284 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10285 fw_download_start
= OSL_SYSUPTIME();
10286 #endif /* DHD_DEBUG && BCMSDIO */
10287 ret
= dhd_bus_download_firmware(dhd
->pub
.bus
, dhd
->pub
.osh
,
10288 dhd
->fw_path
, dhd
->nv_path
);
10289 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10290 fw_download_end
= OSL_SYSUPTIME();
10291 #endif /* DHD_DEBUG && BCMSDIO */
10293 DHD_ERROR(("%s: failed to download firmware %s\n",
10294 __FUNCTION__
, dhd
->fw_path
));
10295 DHD_PERIM_UNLOCK(dhdp
);
10298 /* Indicate FW Download has succeeded */
10299 dhd
->pub
.fw_download_done
= TRUE
;
10301 if (dhd
->pub
.busstate
!= DHD_BUS_LOAD
) {
10302 DHD_PERIM_UNLOCK(dhdp
);
10307 dhd_os_sdlock(dhdp
);
10308 #endif /* BCMSDIO */
10310 /* Start the watchdog timer */
10311 dhd
->pub
.tickcnt
= 0;
10312 dhd_os_wd_timer(&dhd
->pub
, dhd_watchdog_ms
);
10314 /* Bring up the bus */
10315 if ((ret
= dhd_bus_init(&dhd
->pub
, FALSE
)) != 0) {
10317 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__
, ret
));
10319 dhd_os_sdunlock(dhdp
);
10320 #endif /* BCMSDIO */
10321 DHD_PERIM_UNLOCK(dhdp
);
10325 DHD_ENABLE_RUNTIME_PM(&dhd
->pub
);
10328 dhd_ulp_set_ulp_state(dhdp
, DHD_ULP_DISABLED
);
10329 #endif /* DHD_ULP */
10330 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
10331 /* Host registration for OOB interrupt */
10332 if (dhd_bus_oob_intr_register(dhdp
)) {
10333 /* deactivate timer and wait for the handler to finish */
10334 #if !defined(BCMPCIE_OOB_HOST_WAKE)
10335 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10336 dhd
->wd_timer_valid
= FALSE
;
10337 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10338 del_timer_sync(&dhd
->timer
);
10340 #endif /* !BCMPCIE_OOB_HOST_WAKE */
10341 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
10342 DHD_PERIM_UNLOCK(dhdp
);
10343 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__
));
10344 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10348 #if defined(BCMPCIE_OOB_HOST_WAKE)
10349 dhd_bus_oob_intr_set(dhdp
, TRUE
);
10351 /* Enable oob at firmware */
10352 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
10353 #endif /* BCMPCIE_OOB_HOST_WAKE */
10354 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
10355 #ifdef PCIE_FULL_DONGLE
10357 /* max_h2d_rings includes H2D common rings */
10358 uint32 max_h2d_rings
= dhd_bus_max_h2d_queues(dhd
->pub
.bus
);
10360 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__
,
10362 if ((ret
= dhd_flow_rings_init(&dhd
->pub
, max_h2d_rings
)) != BCME_OK
) {
10364 dhd_os_sdunlock(dhdp
);
10365 #endif /* BCMSDIO */
10366 DHD_PERIM_UNLOCK(dhdp
);
10370 #endif /* PCIE_FULL_DONGLE */
10372 /* Do protocol initialization necessary for IOCTL/IOVAR */
10373 ret
= dhd_prot_init(&dhd
->pub
);
10374 if (unlikely(ret
) != BCME_OK
) {
10375 DHD_PERIM_UNLOCK(dhdp
);
10376 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10380 /* If bus is not ready, can't come up */
10381 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
10382 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10383 dhd
->wd_timer_valid
= FALSE
;
10384 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10385 del_timer_sync(&dhd
->timer
);
10386 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__
));
10387 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
10389 dhd_os_sdunlock(dhdp
);
10390 #endif /* BCMSDIO */
10391 DHD_PERIM_UNLOCK(dhdp
);
10392 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10397 dhd_os_sdunlock(dhdp
);
10398 #endif /* BCMSDIO */
10400 /* Bus is ready, query any dongle information */
10401 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10402 f2_sync_start
= OSL_SYSUPTIME();
10403 #endif /* DHD_DEBUG && BCMSDIO */
10404 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
10405 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10406 dhd
->wd_timer_valid
= FALSE
;
10407 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10408 del_timer_sync(&dhd
->timer
);
10409 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__
));
10410 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10411 DHD_PERIM_UNLOCK(dhdp
);
10414 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
10415 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__
));
10416 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI
);
10417 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
10419 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10420 f2_sync_end
= OSL_SYSUPTIME();
10421 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10422 (fw_download_end
- fw_download_start
) + (f2_sync_end
- f2_sync_start
)));
10423 #endif /* DHD_DEBUG && BCMSDIO */
10425 #ifdef ARP_OFFLOAD_SUPPORT
10426 if (dhd
->pend_ipaddr
) {
10427 #ifdef AOE_IP_ALIAS_SUPPORT
10428 aoe_update_host_ipv4_table(&dhd
->pub
, dhd
->pend_ipaddr
, TRUE
, 0);
10429 #endif /* AOE_IP_ALIAS_SUPPORT */
10430 dhd
->pend_ipaddr
= 0;
10432 #endif /* ARP_OFFLOAD_SUPPORT */
10434 DHD_PERIM_UNLOCK(dhdp
);
10439 int _dhd_tdls_enable(dhd_pub_t
*dhd
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
10441 uint32 tdls
= tdls_on
;
10443 uint32 tdls_auto_op
= 0;
10444 uint32 tdls_idle_time
= CUSTOM_TDLS_IDLE_MODE_SETTING
;
10445 int32 tdls_rssi_high
= CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
;
10446 int32 tdls_rssi_low
= CUSTOM_TDLS_RSSI_THRESHOLD_LOW
;
10447 uint32 tdls_pktcnt_high
= CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH
;
10448 uint32 tdls_pktcnt_low
= CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW
;
10450 BCM_REFERENCE(mac
);
10451 if (!FW_SUPPORTED(dhd
, tdls
))
10454 if (dhd
->tdls_enable
== tdls_on
)
10456 ret
= dhd_iovar(dhd
, 0, "tdls_enable", (char *)&tdls
, sizeof(tdls
), NULL
, 0, TRUE
);
10458 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__
, tdls
, ret
));
10461 dhd
->tdls_enable
= tdls_on
;
10464 tdls_auto_op
= auto_on
;
10465 ret
= dhd_iovar(dhd
, 0, "tdls_auto_op", (char *)&tdls_auto_op
, sizeof(tdls_auto_op
), NULL
,
10468 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__
, ret
));
10472 if (tdls_auto_op
) {
10473 ret
= dhd_iovar(dhd
, 0, "tdls_idle_time", (char *)&tdls_idle_time
,
10474 sizeof(tdls_idle_time
), NULL
, 0, TRUE
);
10476 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__
, ret
));
10479 ret
= dhd_iovar(dhd
, 0, "tdls_rssi_high", (char *)&tdls_rssi_high
,
10480 sizeof(tdls_rssi_high
), NULL
, 0, TRUE
);
10482 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__
, ret
));
10485 ret
= dhd_iovar(dhd
, 0, "tdls_rssi_low", (char *)&tdls_rssi_low
,
10486 sizeof(tdls_rssi_low
), NULL
, 0, TRUE
);
10488 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__
, ret
));
10491 ret
= dhd_iovar(dhd
, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high
,
10492 sizeof(tdls_pktcnt_high
), NULL
, 0, TRUE
);
10494 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__
, ret
));
10497 ret
= dhd_iovar(dhd
, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low
,
10498 sizeof(tdls_pktcnt_low
), NULL
, 0, TRUE
);
10500 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__
, ret
));
10508 int dhd_tdls_enable(struct net_device
*dev
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
10510 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
10513 ret
= _dhd_tdls_enable(&dhd
->pub
, tdls_on
, auto_on
, mac
);
10519 dhd_tdls_set_mode(dhd_pub_t
*dhd
, bool wfd_mode
)
10522 bool auto_on
= false;
10523 uint32 mode
= wfd_mode
;
10525 #ifdef ENABLE_TDLS_AUTO_MODE
10533 #endif /* ENABLE_TDLS_AUTO_MODE */
10534 ret
= _dhd_tdls_enable(dhd
, false, auto_on
, NULL
);
10536 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret
));
10540 ret
= dhd_iovar(dhd
, 0, "tdls_wfd_mode", (char *)&mode
, sizeof(mode
), NULL
, 0, TRUE
);
10541 if ((ret
< 0) && (ret
!= BCME_UNSUPPORTED
)) {
10542 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__
, ret
));
10546 ret
= _dhd_tdls_enable(dhd
, true, auto_on
, NULL
);
10548 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret
));
10552 dhd
->tdls_mode
= mode
;
10555 #ifdef PCIE_FULL_DONGLE
10556 int dhd_tdls_update_peer_info(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
)
10558 dhd_pub_t
*dhd_pub
= dhdp
;
10559 tdls_peer_node_t
*cur
= dhd_pub
->peer_tbl
.node
;
10560 tdls_peer_node_t
*new = NULL
, *prev
= NULL
;
10561 int ifindex
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
10562 uint8
*da
= (uint8
*)&event
->addr
.octet
[0];
10563 bool connect
= FALSE
;
10564 uint32 reason
= ntoh32(event
->reason
);
10565 unsigned long flags
;
10567 if (reason
== WLC_E_TDLS_PEER_CONNECTED
)
10569 else if (reason
== WLC_E_TDLS_PEER_DISCONNECTED
)
10573 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__
));
10576 if (ifindex
== DHD_BAD_IF
)
10580 while (cur
!= NULL
) {
10581 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
10582 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10583 __FUNCTION__
, __LINE__
));
10589 new = MALLOC(dhd_pub
->osh
, sizeof(tdls_peer_node_t
));
10591 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__
));
10594 memcpy(new->addr
, da
, ETHER_ADDR_LEN
);
10595 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
10596 new->next
= dhd_pub
->peer_tbl
.node
;
10597 dhd_pub
->peer_tbl
.node
= new;
10598 dhd_pub
->peer_tbl
.tdls_peer_count
++;
10599 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
10602 while (cur
!= NULL
) {
10603 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
10604 dhd_flow_rings_delete_for_peer(dhd_pub
, (uint8
)ifindex
, da
);
10605 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
10607 prev
->next
= cur
->next
;
10609 dhd_pub
->peer_tbl
.node
= cur
->next
;
10610 MFREE(dhd_pub
->osh
, cur
, sizeof(tdls_peer_node_t
));
10611 dhd_pub
->peer_tbl
.tdls_peer_count
--;
10612 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
10618 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__
));
10622 #endif /* PCIE_FULL_DONGLE */
10625 bool dhd_is_concurrent_mode(dhd_pub_t
*dhd
)
10630 if (dhd
->op_mode
& DHD_FLAG_CONCURR_MULTI_CHAN_MODE
)
10632 else if ((dhd
->op_mode
& DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
) ==
10633 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
)
10638 #if !defined(AP) && defined(WLP2P)
10639 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10640 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10641 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10642 * would still be named as fw_bcmdhd_apsta.
10645 dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
)
10648 char buf
[WLC_IOCTL_SMLEN
];
10649 bool mchan_supported
= FALSE
;
10650 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10651 * test mode, that means we only will use the mode as it is
10653 if (dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))
10655 if (FW_SUPPORTED(dhd
, vsdb
)) {
10656 mchan_supported
= TRUE
;
10658 if (!FW_SUPPORTED(dhd
, p2p
)) {
10659 DHD_TRACE(("Chip does not support p2p\n"));
10662 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10663 memset(buf
, 0, sizeof(buf
));
10664 ret
= dhd_iovar(dhd
, 0, "p2p", NULL
, 0, (char *)&buf
,
10665 sizeof(buf
), FALSE
);
10667 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__
, ret
));
10671 /* By default, chip supports single chan concurrency,
10672 * now lets check for mchan
10674 ret
= DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
;
10675 if (mchan_supported
)
10676 ret
|= DHD_FLAG_CONCURR_MULTI_CHAN_MODE
;
10677 if (FW_SUPPORTED(dhd
, rsdb
)) {
10678 ret
|= DHD_FLAG_RSDB_MODE
;
10680 #ifdef WL_SUPPORT_MULTIP2P
10681 if (FW_SUPPORTED(dhd
, mp2p
)) {
10682 ret
|= DHD_FLAG_MP2P_MODE
;
10684 #endif /* WL_SUPPORT_MULTIP2P */
10685 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10689 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10699 dhd_preinit_aibss_ioctls(dhd_pub_t
*dhd
, char *iov_buf_smlen
)
10702 aibss_bcn_force_config_t bcn_config
;
10707 #endif /* WLAIBSS_PS */
10711 ret
= dhd_iovar(dhd
, 0, "aibss", (char *)&aibss
, sizeof(aibss
), NULL
, 0, TRUE
);
10713 if (ret
== BCME_UNSUPPORTED
) {
10714 DHD_ERROR(("%s aibss is not supported\n",
10718 DHD_ERROR(("%s Set aibss to %d failed %d\n",
10719 __FUNCTION__
, aibss
, ret
));
10726 ret
= dhd_iovar(dhd
, 0, "aibss_ps", (char *)&aibss_ps
, sizeof(aibss_ps
), NULL
, 0, TRUE
);
10728 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
10729 __FUNCTION__
, aibss
, ret
));
10734 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ATIM
,
10735 (char *)&atim
, sizeof(atim
), TRUE
, 0)) < 0) {
10736 DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
10737 __FUNCTION__
, ret
));
10740 #endif /* WLAIBSS_PS */
10742 memset(&bcn_config
, 0, sizeof(bcn_config
));
10743 bcn_config
.initial_min_bcn_dur
= AIBSS_INITIAL_MIN_BCN_DUR
;
10744 bcn_config
.min_bcn_dur
= AIBSS_MIN_BCN_DUR
;
10745 bcn_config
.bcn_flood_dur
= AIBSS_BCN_FLOOD_DUR
;
10746 bcn_config
.version
= AIBSS_BCN_FORCE_CONFIG_VER_0
;
10747 bcn_config
.len
= sizeof(bcn_config
);
10749 ret
= dhd_iovar(dhd
, 0, "aibss_bcn_force_config", (char *)&bcn_config
,
10750 sizeof(aibss_bcn_force_config_t
), NULL
, 0, TRUE
);
10752 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
10753 __FUNCTION__
, AIBSS_INITIAL_MIN_BCN_DUR
, AIBSS_MIN_BCN_DUR
,
10754 AIBSS_BCN_FLOOD_DUR
, ret
));
10758 ibss_coalesce
= IBSS_COALESCE_DEFAULT
;
10759 ret
= dhd_iovar(dhd
, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce
,
10760 sizeof(ibss_coalesce
), NULL
, 0, TRUE
);
10762 DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
10763 __FUNCTION__
, ret
));
10767 dhd
->op_mode
|= DHD_FLAG_IBSS_MODE
;
10770 #endif /* WLAIBSS */
10772 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10775 dhd_check_adps_bad_ap(dhd_pub_t
*dhd
)
10777 struct net_device
*ndev
;
10778 struct bcm_cfg80211
*cfg
;
10779 struct wl_profile
*profile
;
10780 struct ether_addr bssid
;
10782 if (!dhd_is_associated(dhd
, 0, NULL
)) {
10783 DHD_ERROR(("%s - not associated\n", __FUNCTION__
));
10787 ndev
= dhd_linux_get_primary_netdev(dhd
);
10789 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__
));
10793 cfg
= wl_get_cfg(ndev
);
10795 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__
));
10799 profile
= wl_get_profile_by_netdev(cfg
, ndev
);
10800 memcpy(bssid
.octet
, profile
->bssid
, ETHER_ADDR_LEN
);
10801 if (wl_adps_bad_ap_check(cfg
, &bssid
)) {
10802 if (wl_adps_enabled(cfg
, ndev
)) {
10803 wl_adps_set_suspend(cfg
, ndev
, ADPS_SUSPEND
);
10809 #endif /* WL_BAM */
10812 dhd_enable_adps(dhd_pub_t
*dhd
, uint8 on
)
10818 bcm_iov_buf_t
*iov_buf
= NULL
;
10819 wl_adps_params_v1_t
*data
= NULL
;
10821 len
= OFFSETOF(bcm_iov_buf_t
, data
) + sizeof(*data
);
10822 iov_buf
= MALLOC(dhd
->osh
, len
);
10823 if (iov_buf
== NULL
) {
10824 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__
, len
));
10829 iov_buf
->version
= WL_ADPS_IOV_VER
;
10830 iov_buf
->len
= sizeof(*data
);
10831 iov_buf
->id
= WL_ADPS_IOV_MODE
;
10833 data
= (wl_adps_params_v1_t
*)iov_buf
->data
;
10834 data
->version
= ADPS_SUB_IOV_VERSION_1
;
10835 data
->length
= sizeof(*data
);
10838 for (i
= 1; i
<= MAX_BANDS
; i
++) {
10840 ret
= dhd_iovar(dhd
, 0, "adps", (char *)iov_buf
, len
, NULL
, 0, TRUE
);
10842 if (ret
== BCME_UNSUPPORTED
) {
10843 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__
));
10848 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10849 __FUNCTION__
, on
? "On" : "Off", i
, ret
));
10857 dhd_check_adps_bad_ap(dhd
);
10859 #endif /* WL_BAM */
10863 MFREE(dhd
->osh
, iov_buf
, len
);
10868 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10871 dhd_preinit_ioctls(dhd_pub_t
*dhd
)
10874 char eventmask
[WL_EVENTING_MASK_LEN
];
10875 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12]; /* Room for "event_msgs" + '\0' + bitvec */
10876 uint32 buf_key_b4_m4
= 1;
10878 eventmsgs_ext_t
*eventmask_msg
= NULL
;
10879 char* iov_buf
= NULL
;
10881 uint32 wnm_cap
= 0;
10882 #if defined(BCMSUP_4WAY_HANDSHAKE)
10883 uint32 sup_wpa
= 1;
10884 #endif /* BCMSUP_4WAY_HANDSHAKE */
10885 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
10886 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
10887 uint32 ampdu_ba_wsize
= 0;
10888 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
10889 #if defined(CUSTOM_AMPDU_MPDU)
10890 int32 ampdu_mpdu
= 0;
10892 #if defined(CUSTOM_AMPDU_RELEASE)
10893 int32 ampdu_release
= 0;
10895 #if defined(CUSTOM_AMSDU_AGGSF)
10896 int32 amsdu_aggsf
= 0;
10899 #if defined(BCMSDIO)
10900 #ifdef PROP_TXSTATUS
10901 int wlfc_enable
= TRUE
;
10902 #ifndef DISABLE_11N
10903 uint32 hostreorder
= 1;
10904 #endif /* DISABLE_11N */
10905 #endif /* PROP_TXSTATUS */
10907 #ifndef PCIE_FULL_DONGLE
10908 uint32 wl_ap_isolate
;
10909 #endif /* PCIE_FULL_DONGLE */
10910 uint32 frameburst
= CUSTOM_FRAMEBURST_SET
;
10911 uint wnm_bsstrans_resp
= 0;
10912 #ifdef SUPPORT_SET_CAC
10913 #ifdef SUPPORT_CUSTOM_SET_CAC
10917 #endif /* SUPPORT_CUSTOM_SET_CAC */
10918 #endif /* SUPPORT_SET_CAC */
10920 #if defined(DHD_NON_DMA_M2M_CORRUPTION)
10921 dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk
;
10922 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
10924 #ifdef DHD_ENABLE_LPC
10926 #endif /* DHD_ENABLE_LPC */
10927 uint power_mode
= PM_FAST
;
10928 #if defined(BCMSDIO)
10929 uint32 dongle_align
= DHD_SDALIGN
;
10930 uint32 glom
= CUSTOM_GLOM_SETTING
;
10931 #endif /* defined(BCMSDIO) */
10932 uint bcn_timeout
= CUSTOM_BCN_TIMEOUT
;
10933 uint scancache_enab
= TRUE
;
10934 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10935 uint32 bcn_li_bcn
= 1;
10936 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10937 uint retry_max
= CUSTOM_ASSOC_RETRY_MAX
;
10938 #if defined(ARP_OFFLOAD_SUPPORT)
10941 int scan_assoc_time
= DHD_SCAN_ASSOC_ACTIVE_TIME
;
10942 int scan_unassoc_time
= DHD_SCAN_UNASSOC_ACTIVE_TIME
;
10943 int scan_passive_time
= DHD_SCAN_PASSIVE_TIME
;
10944 char buf
[WLC_IOCTL_SMLEN
];
10946 uint32 listen_interval
= CUSTOM_LISTEN_INTERVAL
; /* Default Listen Interval in Beacons */
10947 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10948 wl_el_tag_params_t
*el_tag
= NULL
;
10949 #endif /* DHD_8021X_DUMP */
10952 int roam_trigger
[2] = {CUSTOM_ROAM_TRIGGER_SETTING
, WLC_BAND_ALL
};
10953 int roam_scan_period
[2] = {10, WLC_BAND_ALL
};
10954 int roam_delta
[2] = {CUSTOM_ROAM_DELTA_SETTING
, WLC_BAND_ALL
};
10955 #ifdef ROAM_AP_ENV_DETECTION
10956 int roam_env_mode
= AP_ENV_INDETERMINATE
;
10957 #endif /* ROAM_AP_ENV_DETECTION */
10958 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10959 int roam_fullscan_period
= 60;
10960 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10961 int roam_fullscan_period
= 120;
10962 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10963 #ifdef DISABLE_BCNLOSS_ROAM
10964 uint roam_bcnloss_off
= 1;
10965 #endif /* DISABLE_BCNLOSS_ROAM */
10967 #ifdef DISABLE_BUILTIN_ROAM
10969 #endif /* DISABLE_BUILTIN_ROAM */
10970 #endif /* ROAM_ENABLE */
10972 #if defined(SOFTAP)
10975 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10976 struct ether_addr p2p_ea
;
10981 #ifdef SOFTAP_UAPSD_OFF
10982 uint32 wme_apsd
= 0;
10983 #endif /* SOFTAP_UAPSD_OFF */
10984 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10985 uint32 apsta
= 1; /* Enable APSTA mode */
10986 #elif defined(SOFTAP_AND_GC)
10989 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10990 #ifdef GET_CUSTOM_MAC_ENABLE
10991 struct ether_addr ea_addr
;
10992 #endif /* GET_CUSTOM_MAC_ENABLE */
10999 #endif /* DISABLE_11N */
11003 #endif /* USE_WL_TXBF */
11004 #ifdef DISABLE_TXBFR
11005 uint32 txbf_bfr_cap
= 0;
11006 #endif /* DISABLE_TXBFR */
11007 #ifdef AMPDU_VO_ENABLE
11008 struct ampdu_tid_control tid
;
11010 #if defined(PROP_TXSTATUS)
11011 #ifdef USE_WFA_CERT_CONF
11013 #endif /* USE_WFA_CERT_CONF */
11014 #endif /* PROP_TXSTATUS */
11015 #ifdef DHD_SET_FW_HIGHSPEED
11016 uint32 ack_ratio
= 250;
11017 uint32 ack_ratio_depth
= 64;
11018 #endif /* DHD_SET_FW_HIGHSPEED */
11019 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11020 uint32 vht_features
= 0; /* init to 0, will be set based on each support */
11021 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11022 #ifdef DISABLE_11N_PROPRIETARY_RATES
11023 uint32 ht_features
= 0;
11024 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11025 #ifdef CUSTOM_EVENT_PM_WAKE
11026 uint32 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
;
11027 #endif /* CUSTOM_EVENT_PM_WAKE */
11028 #ifdef DISABLE_PRUNED_SCAN
11029 uint32 scan_features
= 0;
11030 #endif /* DISABLE_PRUNED_SCAN */
11031 #ifdef DHD_2G_ONLY_SUPPORT
11032 uint band
= WLC_BAND_2G
;
11033 #endif /* DHD_2G_ONLY_SUPPORT */
11034 #ifdef BCMPCIE_OOB_HOST_WAKE
11035 uint32 hostwake_oob
= 0;
11036 #endif /* BCMPCIE_OOB_HOST_WAKE */
11037 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
11038 uint32 btmdelta
= WBTEXT_BTMDELTA
;
11039 #endif /* WBTEXT && WBTEXT_BTMDELTA */
11041 #ifdef PKT_FILTER_SUPPORT
11042 dhd_pkt_filter_enable
= TRUE
;
11044 dhd
->apf_set
= FALSE
;
11046 #endif /* PKT_FILTER_SUPPORT */
11047 dhd
->suspend_bcn_li_dtim
= CUSTOM_SUSPEND_BCN_LI_DTIM
;
11048 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
11049 dhd
->max_dtim_enable
= TRUE
;
11051 dhd
->max_dtim_enable
= FALSE
;
11052 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
11053 dhd
->disable_dtim_in_suspend
= FALSE
;
11054 #ifdef CUSTOM_SET_OCLOFF
11055 dhd
->ocl_off
= FALSE
;
11056 #endif /* CUSTOM_SET_OCLOFF */
11057 #ifdef SUPPORT_SET_TID
11058 dhd
->tid_mode
= SET_TID_OFF
;
11059 dhd
->target_uid
= 0;
11060 dhd
->target_tid
= 0;
11061 #endif /* SUPPORT_SET_TID */
11062 DHD_TRACE(("Enter %s\n", __FUNCTION__
));
11065 #ifdef CUSTOMER_HW4_DEBUG
11066 if (!dhd_validate_chipid(dhd
)) {
11067 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
11068 __FUNCTION__
, dhd_bus_chip_id(dhd
)));
11069 #ifndef SUPPORT_MULTIPLE_CHIPS
11072 #endif /* !SUPPORT_MULTIPLE_CHIPS */
11074 #endif /* CUSTOMER_HW4_DEBUG */
11075 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
11076 (op_mode
== DHD_FLAG_MFG_MODE
)) {
11077 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
11078 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11079 /* disable runtimePM by default in MFG mode. */
11080 pm_runtime_disable(dhd_bus_to_dev(dhd
->bus
));
11081 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11082 #ifdef DHD_PCIE_RUNTIMEPM
11083 /* Disable RuntimePM in mfg mode */
11084 DHD_DISABLE_RUNTIME_PM(dhd
);
11085 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__
));
11086 #endif /* DHD_PCIE_RUNTIME_PM */
11087 /* Check and adjust IOCTL response timeout for Manufactring firmware */
11088 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT
);
11089 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
11092 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT
);
11093 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__
));
11095 #ifdef BCMPCIE_OOB_HOST_WAKE
11096 ret
= dhd_iovar(dhd
, 0, "bus:hostwake_oob", NULL
, 0, (char *)&hostwake_oob
,
11097 sizeof(hostwake_oob
), FALSE
);
11099 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__
));
11101 if (hostwake_oob
== 0) {
11102 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
11104 ret
= BCME_UNSUPPORTED
;
11107 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__
));
11110 #endif /* BCMPCIE_OOB_HOST_WAKE */
11111 #ifdef GET_CUSTOM_MAC_ENABLE
11112 ret
= wifi_platform_get_mac_addr(dhd
->info
->adapter
, ea_addr
.octet
);
11114 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", (char *)&ea_addr
, ETHER_ADDR_LEN
, NULL
, 0,
11117 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
11121 memcpy(dhd
->mac
.octet
, ea_addr
.octet
, ETHER_ADDR_LEN
);
11123 #endif /* GET_CUSTOM_MAC_ENABLE */
11124 /* Get the default device MAC address directly from firmware */
11125 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
11127 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__
, ret
));
11131 /* Update public MAC address after reading from Firmware */
11132 memcpy(dhd
->mac
.octet
, buf
, ETHER_ADDR_LEN
);
11134 #ifdef GET_CUSTOM_MAC_ENABLE
11136 #endif /* GET_CUSTOM_MAC_ENABLE */
11138 #ifdef DHD_USE_CLMINFO_PARSER
11139 if ((ret
= dhd_get_clminfo(dhd
, clm_path
)) < 0) {
11140 if (dhd
->is_clm_mult_regrev
) {
11141 DHD_ERROR(("%s: CLM Information load failed. Abort initialization.\n",
11146 #endif /* DHD_USE_CLMINFO_PARSER */
11147 if ((ret
= dhd_apply_default_clm(dhd
, clm_path
)) < 0) {
11148 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__
));
11152 /* get a capabilities from firmware */
11154 uint32 cap_buf_size
= sizeof(dhd
->fw_capabilities
);
11155 memset(dhd
->fw_capabilities
, 0, cap_buf_size
);
11156 ret
= dhd_iovar(dhd
, 0, "cap", NULL
, 0, dhd
->fw_capabilities
, (cap_buf_size
- 1),
11159 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11160 __FUNCTION__
, ret
));
11164 memmove(&dhd
->fw_capabilities
[1], dhd
->fw_capabilities
, (cap_buf_size
- 1));
11165 dhd
->fw_capabilities
[0] = ' ';
11166 dhd
->fw_capabilities
[cap_buf_size
- 2] = ' ';
11167 dhd
->fw_capabilities
[cap_buf_size
- 1] = '\0';
11170 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_HOSTAP_MODE
) ||
11171 (op_mode
== DHD_FLAG_HOSTAP_MODE
)) {
11172 #ifdef SET_RANDOM_MAC_SOFTAP
11174 #endif /* SET_RANDOM_MAC_SOFTAP */
11175 dhd
->op_mode
= DHD_FLAG_HOSTAP_MODE
;
11176 #if defined(ARP_OFFLOAD_SUPPORT)
11179 #ifdef PKT_FILTER_SUPPORT
11180 dhd_pkt_filter_enable
= FALSE
;
11182 #ifdef SET_RANDOM_MAC_SOFTAP
11183 SRANDOM32((uint
)jiffies
);
11184 rand_mac
= RANDOM32();
11185 iovbuf
[0] = (unsigned char)(vendor_oui
>> 16) | 0x02; /* local admin bit */
11186 iovbuf
[1] = (unsigned char)(vendor_oui
>> 8);
11187 iovbuf
[2] = (unsigned char)vendor_oui
;
11188 iovbuf
[3] = (unsigned char)(rand_mac
& 0x0F) | 0xF0;
11189 iovbuf
[4] = (unsigned char)(rand_mac
>> 8);
11190 iovbuf
[5] = (unsigned char)(rand_mac
>> 16);
11192 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", (char *)&iovbuf
, ETHER_ADDR_LEN
, NULL
, 0,
11195 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
11197 memcpy(dhd
->mac
.octet
, iovbuf
, ETHER_ADDR_LEN
);
11198 #endif /* SET_RANDOM_MAC_SOFTAP */
11199 #ifdef USE_DYNAMIC_F2_BLKSIZE
11200 dhdsdio_func_blocksize(dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
11201 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11202 #ifdef SOFTAP_UAPSD_OFF
11203 ret
= dhd_iovar(dhd
, 0, "wme_apsd", (char *)&wme_apsd
, sizeof(wme_apsd
), NULL
, 0,
11206 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11207 __FUNCTION__
, ret
));
11209 #endif /* SOFTAP_UAPSD_OFF */
11210 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
11211 (op_mode
== DHD_FLAG_MFG_MODE
)) {
11212 #if defined(ARP_OFFLOAD_SUPPORT)
11214 #endif /* ARP_OFFLOAD_SUPPORT */
11215 #ifdef PKT_FILTER_SUPPORT
11216 dhd_pkt_filter_enable
= FALSE
;
11217 #endif /* PKT_FILTER_SUPPORT */
11218 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
11219 #ifdef USE_DYNAMIC_F2_BLKSIZE
11220 dhdsdio_func_blocksize(dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
11221 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11222 #ifndef CUSTOM_SET_ANTNPM
11223 if (FW_SUPPORTED(dhd
, rsdb
)) {
11224 wl_config_t rsdb_mode
;
11225 memset(&rsdb_mode
, 0, sizeof(rsdb_mode
));
11226 ret
= dhd_iovar(dhd
, 0, "rsdb_mode", (char *)&rsdb_mode
, sizeof(rsdb_mode
),
11229 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11230 __FUNCTION__
, ret
));
11233 #endif /* !CUSTOM_SET_ANTNPM */
11235 uint32 concurrent_mode
= 0;
11236 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_P2P_MODE
) ||
11237 (op_mode
== DHD_FLAG_P2P_MODE
)) {
11238 #if defined(ARP_OFFLOAD_SUPPORT)
11241 #ifdef PKT_FILTER_SUPPORT
11242 dhd_pkt_filter_enable
= FALSE
;
11244 dhd
->op_mode
= DHD_FLAG_P2P_MODE
;
11245 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_IBSS_MODE
) ||
11246 (op_mode
== DHD_FLAG_IBSS_MODE
)) {
11247 dhd
->op_mode
= DHD_FLAG_IBSS_MODE
;
11249 dhd
->op_mode
= DHD_FLAG_STA_MODE
;
11250 #if !defined(AP) && defined(WLP2P)
11251 if (dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
&&
11252 (concurrent_mode
= dhd_get_concurrent_capabilites(dhd
))) {
11253 #if defined(ARP_OFFLOAD_SUPPORT)
11256 dhd
->op_mode
|= concurrent_mode
;
11259 /* Check if we are enabling p2p */
11260 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
11261 ret
= dhd_iovar(dhd
, 0, "apsta", (char *)&apsta
, sizeof(apsta
), NULL
, 0,
11264 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__
, ret
));
11266 #if defined(SOFTAP_AND_GC)
11267 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_AP
,
11268 (char *)&ap_mode
, sizeof(ap_mode
), TRUE
, 0)) < 0) {
11269 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__
, ret
));
11272 memcpy(&p2p_ea
, &dhd
->mac
, ETHER_ADDR_LEN
);
11273 ETHER_SET_LOCALADDR(&p2p_ea
);
11274 ret
= dhd_iovar(dhd
, 0, "p2p_da_override", (char *)&p2p_ea
, sizeof(p2p_ea
),
11277 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__
, ret
));
11279 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11282 (void)concurrent_mode
;
11286 #ifdef DISABLE_PRUNED_SCAN
11287 if (FW_SUPPORTED(dhd
, rsdb
)) {
11288 ret
= dhd_iovar(dhd
, 0, "scan_features", (char *)&scan_features
,
11289 sizeof(scan_features
), iovbuf
, sizeof(iovbuf
), FALSE
);
11291 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11292 __FUNCTION__
, ret
));
11294 memcpy(&scan_features
, iovbuf
, 4);
11295 scan_features
&= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM
;
11296 ret
= dhd_iovar(dhd
, 0, "scan_features", (char *)&scan_features
,
11297 sizeof(scan_features
), NULL
, 0, TRUE
);
11299 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11300 __FUNCTION__
, ret
));
11304 #endif /* DISABLE_PRUNED_SCAN */
11306 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG
"\n",
11307 dhd
->op_mode
, MAC2STRDBG(dhd
->mac
.octet
)));
11309 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11310 if (dhd
->op_mode
== DHD_FLAG_HOSTAP_MODE
)
11311 dhd
->info
->rxthread_enabled
= FALSE
;
11313 dhd
->info
->rxthread_enabled
= TRUE
;
11315 /* Set Country code */
11316 if (dhd
->dhd_cspec
.ccode
[0] != 0) {
11317 ret
= dhd_iovar(dhd
, 0, "country", (char *)&dhd
->dhd_cspec
, sizeof(wl_country_t
),
11320 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__
));
11323 #ifdef DHD_2G_ONLY_SUPPORT
11324 DHD_ERROR(("Enabled DHD 2G only support!!\n"));
11325 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_BAND
, (char *)&band
, sizeof(band
), TRUE
, 0);
11327 DHD_ERROR(("%s Set Band B failed %d\n", __FUNCTION__
, ret
));
11329 #endif /* DHD_2G_ONLY_SUPPORT */
11331 /* Set Listen Interval */
11332 ret
= dhd_iovar(dhd
, 0, "assoc_listen", (char *)&listen_interval
, sizeof(listen_interval
),
11335 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__
, ret
));
11337 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11338 #ifdef USE_WFA_CERT_CONF
11339 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_ROAMOFF
, &roamvar
) == BCME_OK
) {
11340 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__
, roamvar
));
11342 #endif /* USE_WFA_CERT_CONF */
11343 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11344 ret
= dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
, sizeof(roamvar
), NULL
, 0, TRUE
);
11345 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11346 #if defined(ROAM_ENABLE)
11347 #ifdef DISABLE_BCNLOSS_ROAM
11348 ret
= dhd_iovar(dhd
, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off
,
11349 sizeof(roam_bcnloss_off
), NULL
, 0, TRUE
);
11350 #endif /* DISABLE_BCNLOSS_ROAM */
11351 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, roam_trigger
,
11352 sizeof(roam_trigger
), TRUE
, 0)) < 0)
11353 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__
, ret
));
11354 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
, roam_scan_period
,
11355 sizeof(roam_scan_period
), TRUE
, 0)) < 0)
11356 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__
, ret
));
11357 if ((dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, roam_delta
,
11358 sizeof(roam_delta
), TRUE
, 0)) < 0)
11359 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__
, ret
));
11360 ret
= dhd_iovar(dhd
, 0, "fullroamperiod", (char *)&roam_fullscan_period
,
11361 sizeof(roam_fullscan_period
), NULL
, 0, TRUE
);
11363 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__
, ret
));
11364 #ifdef ROAM_AP_ENV_DETECTION
11365 if (roam_trigger
[0] == WL_AUTO_ROAM_TRIGGER
) {
11366 if (dhd_iovar(dhd
, 0, "roam_env_detection", (char *)&roam_env_mode
,
11367 sizeof(roam_env_mode
), NULL
, 0, TRUE
) == BCME_OK
)
11368 dhd
->roam_env_detection
= TRUE
;
11370 dhd
->roam_env_detection
= FALSE
;
11372 #endif /* ROAM_AP_ENV_DETECTION */
11373 #endif /* ROAM_ENABLE */
11375 #ifdef CUSTOM_EVENT_PM_WAKE
11376 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh", (char *)&pm_awake_thresh
,
11377 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
11379 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__
, ret
));
11381 #endif /* CUSTOM_EVENT_PM_WAKE */
11383 ret
= dhd_iovar(dhd
, 0, "okc_enable", (char *)&okc
, sizeof(okc
), NULL
, 0, TRUE
);
11386 ret
= dhd_iovar(dhd
, 0, "ccx_enable", (char *)&ccx
, sizeof(ccx
), NULL
, 0, TRUE
);
11387 #endif /* BCMCCX */
11390 dhd
->tdls_enable
= FALSE
;
11391 dhd_tdls_set_mode(dhd
, false);
11392 #endif /* WLTDLS */
11394 #ifdef DHD_ENABLE_LPC
11396 ret
= dhd_iovar(dhd
, 0, "lpc", (char *)&lpc
, sizeof(lpc
), NULL
, 0, TRUE
);
11398 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
11400 if (ret
== BCME_NOTDOWN
) {
11402 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
11403 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11404 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__
, ret
, lpc
));
11406 ret
= dhd_iovar(dhd
, 0, "lpc", (char *)&lpc
, sizeof(lpc
), NULL
, 0, TRUE
);
11407 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__
, ret
));
11410 #endif /* DHD_ENABLE_LPC */
11413 if (dhd
->op_mode
& DHD_FLAG_STA_MODE
) {
11414 if ((ret
= dhd_enable_adps(dhd
, ADPS_ENABLE
)) != BCME_OK
) {
11415 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11416 __FUNCTION__
, ret
));
11419 #endif /* WLADPS */
11421 #ifdef DHD_PM_CONTROL_FROM_FILE
11422 sec_control_pm(dhd
, &power_mode
);
11424 /* Set PowerSave mode */
11425 (void) dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
, sizeof(power_mode
), TRUE
, 0);
11426 #endif /* DHD_PM_CONTROL_FROM_FILE */
11428 #if defined(BCMSDIO)
11429 /* Match Host and Dongle rx alignment */
11430 ret
= dhd_iovar(dhd
, 0, "bus:txglomalign", (char *)&dongle_align
, sizeof(dongle_align
),
11433 #ifdef USE_WFA_CERT_CONF
11434 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_BUS_TXGLOM_MODE
, &glom
) == BCME_OK
) {
11435 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__
, glom
));
11437 #endif /* USE_WFA_CERT_CONF */
11438 if (glom
!= DEFAULT_GLOM_VALUE
) {
11439 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__
, glom
));
11440 ret
= dhd_iovar(dhd
, 0, "bus:txglom", (char *)&glom
, sizeof(glom
), NULL
, 0, TRUE
);
11442 #endif /* defined(BCMSDIO) */
11444 /* Setup timeout if Beacons are lost and roam is off to report link down */
11445 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
, sizeof(bcn_timeout
), NULL
, 0,
11448 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11449 ret
= dhd_iovar(dhd
, 0, "assoc_retry_max", (char *)&retry_max
, sizeof(retry_max
), NULL
, 0,
11452 #if defined(AP) && !defined(WLP2P)
11453 ret
= dhd_iovar(dhd
, 0, "apsta", (char *)&apsta
, sizeof(apsta
), NULL
, 0, TRUE
);
11455 #endif /* defined(AP) && !defined(WLP2P) */
11457 #ifdef MIMO_ANT_SETTING
11458 dhd_sel_ant_from_file(dhd
);
11459 #endif /* MIMO_ANT_SETTING */
11461 #if defined(SOFTAP)
11462 if (ap_fw_loaded
== TRUE
) {
11463 dhd_wl_ioctl_cmd(dhd
, WLC_SET_DTIMPRD
, (char *)&dtim
, sizeof(dtim
), TRUE
, 0);
11467 #if defined(KEEP_ALIVE)
11469 /* Set Keep Alive : be sure to use FW with -keepalive */
11472 #if defined(SOFTAP)
11473 if (ap_fw_loaded
== FALSE
)
11475 if (!(dhd
->op_mode
&
11476 (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))) {
11477 if ((res
= dhd_keep_alive_onoff(dhd
)) < 0)
11478 DHD_ERROR(("%s set keeplive failed %d\n",
11479 __FUNCTION__
, res
));
11482 #endif /* defined(KEEP_ALIVE) */
11485 ret
= dhd_iovar(dhd
, 0, "txbf", (char *)&txbf
, sizeof(txbf
), NULL
, 0, TRUE
);
11487 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__
, ret
));
11489 #endif /* USE_WL_TXBF */
11491 ret
= dhd_iovar(dhd
, 0, "scancache", (char *)&scancache_enab
, sizeof(scancache_enab
), NULL
,
11494 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__
, ret
));
11497 #ifdef DISABLE_TXBFR
11498 ret
= dhd_iovar(dhd
, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap
, sizeof(txbf_bfr_cap
), NULL
,
11501 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__
, ret
));
11503 #endif /* DISABLE_TXBFR */
11505 #ifdef USE_WFA_CERT_CONF
11506 #ifdef USE_WL_FRAMEBURST
11507 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_FRAMEBURST
, &frameburst
) == BCME_OK
) {
11508 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__
, frameburst
));
11510 #endif /* USE_WL_FRAMEBURST */
11511 g_frameburst
= frameburst
;
11512 #endif /* USE_WFA_CERT_CONF */
11513 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11514 /* Disable Framebursting for SofAP */
11515 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
11518 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11519 /* Set frameburst to value */
11520 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_FAKEFRAG
, (char *)&frameburst
,
11521 sizeof(frameburst
), TRUE
, 0)) < 0) {
11522 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__
, ret
));
11524 #ifdef DHD_SET_FW_HIGHSPEED
11525 /* Set ack_ratio */
11526 ret
= dhd_iovar(dhd
, 0, "ack_ratio", (char *)&ack_ratio
, sizeof(ack_ratio
), NULL
, 0, TRUE
);
11528 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__
, ret
));
11531 /* Set ack_ratio_depth */
11532 ret
= dhd_iovar(dhd
, 0, "ack_ratio_depth", (char *)&ack_ratio_depth
,
11533 sizeof(ack_ratio_depth
), NULL
, 0, TRUE
);
11535 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__
, ret
));
11537 #endif /* DHD_SET_FW_HIGHSPEED */
11539 iov_buf
= (char*)MALLOC(dhd
->osh
, WLC_IOCTL_SMLEN
);
11540 if (iov_buf
== NULL
) {
11541 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN
));
11547 /* Apply AIBSS configurations */
11548 if ((ret
= dhd_preinit_aibss_ioctls(dhd
, iov_buf
)) != BCME_OK
) {
11549 DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
11550 __FUNCTION__
, ret
));
11553 #endif /* WLAIBSS */
11555 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
11556 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
11557 /* Set ampdu ba wsize to 64 or 16 */
11558 #ifdef CUSTOM_AMPDU_BA_WSIZE
11559 ampdu_ba_wsize
= CUSTOM_AMPDU_BA_WSIZE
;
11561 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
11562 if (dhd
->op_mode
== DHD_FLAG_IBSS_MODE
)
11563 ampdu_ba_wsize
= CUSTOM_IBSS_AMPDU_BA_WSIZE
;
11564 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
11565 if (ampdu_ba_wsize
!= 0) {
11566 ret
= dhd_iovar(dhd
, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize
,
11567 sizeof(ampdu_ba_wsize
), NULL
, 0, TRUE
);
11569 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11570 __FUNCTION__
, ampdu_ba_wsize
, ret
));
11573 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
11575 #if defined(CUSTOM_AMPDU_MPDU)
11576 ampdu_mpdu
= CUSTOM_AMPDU_MPDU
;
11577 if (ampdu_mpdu
!= 0 && (ampdu_mpdu
<= ampdu_ba_wsize
)) {
11578 ret
= dhd_iovar(dhd
, 0, "ampdu_mpdu", (char *)&du_mpdu
, sizeof(ampdu_mpdu
),
11581 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11582 __FUNCTION__
, CUSTOM_AMPDU_MPDU
, ret
));
11585 #endif /* CUSTOM_AMPDU_MPDU */
11587 #if defined(CUSTOM_AMPDU_RELEASE)
11588 ampdu_release
= CUSTOM_AMPDU_RELEASE
;
11589 if (ampdu_release
!= 0 && (ampdu_release
<= ampdu_ba_wsize
)) {
11590 ret
= dhd_iovar(dhd
, 0, "ampdu_release", (char *)&du_release
,
11591 sizeof(ampdu_release
), NULL
, 0, TRUE
);
11593 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11594 __FUNCTION__
, CUSTOM_AMPDU_RELEASE
, ret
));
11597 #endif /* CUSTOM_AMPDU_RELEASE */
11599 #if defined(CUSTOM_AMSDU_AGGSF)
11600 amsdu_aggsf
= CUSTOM_AMSDU_AGGSF
;
11601 if (amsdu_aggsf
!= 0) {
11602 ret
= dhd_iovar(dhd
, 0, "amsdu_aggsf", (char *)&amsdu_aggsf
, sizeof(amsdu_aggsf
),
11605 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11606 __FUNCTION__
, CUSTOM_AMSDU_AGGSF
, ret
));
11609 #endif /* CUSTOM_AMSDU_AGGSF */
11611 #if defined(BCMSUP_4WAY_HANDSHAKE)
11612 /* Read 4-way handshake requirements */
11613 if (dhd_use_idsup
== 1) {
11614 ret
= dhd_iovar(dhd
, 0, "sup_wpa", (char *)&sup_wpa
, sizeof(sup_wpa
),
11615 (char *)&iovbuf
, sizeof(iovbuf
), FALSE
);
11616 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
11617 * in-dongle supplicant.
11619 if (ret
>= 0 || ret
== BCME_NOTREADY
)
11620 dhd
->fw_4way_handshake
= TRUE
;
11621 DHD_TRACE(("4-way handshake mode is: %d\n", dhd
->fw_4way_handshake
));
11623 #endif /* BCMSUP_4WAY_HANDSHAKE */
11624 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11625 ret
= dhd_iovar(dhd
, 0, "vht_features", NULL
, 0,
11626 (char *)&vht_features
, sizeof(vht_features
), FALSE
);
11628 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__
, ret
));
11631 #ifdef SUPPORT_2G_VHT
11632 vht_features
|= 0x3; /* 2G support */
11633 #endif /* SUPPORT_2G_VHT */
11634 #ifdef SUPPORT_5G_1024QAM_VHT
11635 vht_features
|= 0x6; /* 5G 1024 QAM support */
11636 #endif /* SUPPORT_5G_1024QAM_VHT */
11638 if (vht_features
) {
11639 ret
= dhd_iovar(dhd
, 0, "vht_features", (char *)&vht_features
, sizeof(vht_features
),
11642 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__
, ret
));
11644 if (ret
== BCME_NOTDOWN
) {
11646 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
11647 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11648 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11649 " vht_features = 0x%x\n",
11650 __FUNCTION__
, ret
, vht_features
));
11652 ret
= dhd_iovar(dhd
, 0, "vht_features", (char *)&vht_features
,
11653 sizeof(vht_features
), NULL
, 0, TRUE
);
11655 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__
, ret
));
11659 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11660 #ifdef DISABLE_11N_PROPRIETARY_RATES
11661 ret
= dhd_iovar(dhd
, 0, "ht_features", (char *)&ht_features
, sizeof(ht_features
), NULL
, 0,
11664 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__
, ret
));
11666 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11667 #ifdef DHD_DISABLE_VHTMODE
11668 dhd_disable_vhtmode(dhd
);
11669 #endif /* DHD_DISABLE_VHTMODE */
11671 ret
= dhd_iovar(dhd
, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4
, sizeof(buf_key_b4_m4
),
11674 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__
, ret
));
11676 #ifdef SUPPORT_SET_CAC
11677 ret
= dhd_iovar(dhd
, 0, "cac", (char *)&cac
, sizeof(cac
), NULL
, 0, TRUE
);
11679 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__
, cac
, ret
));
11681 #endif /* SUPPORT_SET_CAC */
11683 /* Get the required details from dongle during preinit ioctl */
11684 dhd_ulp_preinit(dhd
);
11685 #endif /* DHD_ULP */
11687 /* Read event_msgs mask */
11688 ret
= dhd_iovar(dhd
, 0, "event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
,
11689 sizeof(iovbuf
), FALSE
);
11691 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__
, ret
));
11694 bcopy(iovbuf
, eventmask
, WL_EVENTING_MASK_LEN
);
11696 /* Setup event_msgs */
11697 setbit(eventmask
, WLC_E_SET_SSID
);
11698 setbit(eventmask
, WLC_E_PRUNE
);
11699 setbit(eventmask
, WLC_E_AUTH
);
11700 setbit(eventmask
, WLC_E_AUTH_IND
);
11701 setbit(eventmask
, WLC_E_ASSOC
);
11702 setbit(eventmask
, WLC_E_REASSOC
);
11703 setbit(eventmask
, WLC_E_REASSOC_IND
);
11704 if (!(dhd
->op_mode
& DHD_FLAG_IBSS_MODE
))
11705 setbit(eventmask
, WLC_E_DEAUTH
);
11706 setbit(eventmask
, WLC_E_DEAUTH_IND
);
11707 setbit(eventmask
, WLC_E_DISASSOC_IND
);
11708 setbit(eventmask
, WLC_E_DISASSOC
);
11709 setbit(eventmask
, WLC_E_JOIN
);
11710 setbit(eventmask
, WLC_E_START
);
11711 setbit(eventmask
, WLC_E_ASSOC_IND
);
11712 setbit(eventmask
, WLC_E_PSK_SUP
);
11713 setbit(eventmask
, WLC_E_LINK
);
11714 setbit(eventmask
, WLC_E_MIC_ERROR
);
11715 setbit(eventmask
, WLC_E_ASSOC_REQ_IE
);
11716 setbit(eventmask
, WLC_E_ASSOC_RESP_IE
);
11717 #ifdef LIMIT_BORROW
11718 setbit(eventmask
, WLC_E_ALLOW_CREDIT_BORROW
);
11720 #ifndef WL_CFG80211
11721 setbit(eventmask
, WLC_E_PMKID_CACHE
);
11722 setbit(eventmask
, WLC_E_TXFAIL
);
11724 setbit(eventmask
, WLC_E_JOIN_START
);
11725 setbit(eventmask
, WLC_E_SCAN_COMPLETE
);
11727 setbit(eventmask
, WLC_E_SCAN_CONFIRM_IND
);
11730 setbit(eventmask
, WLC_E_PFN_NET_FOUND
);
11731 setbit(eventmask
, WLC_E_PFN_BEST_BATCHING
);
11732 setbit(eventmask
, WLC_E_PFN_BSSID_NET_FOUND
);
11733 setbit(eventmask
, WLC_E_PFN_BSSID_NET_LOST
);
11734 #endif /* PNO_SUPPORT */
11735 /* enable dongle roaming event */
11737 #if !defined(ROAM_EVT_DISABLE)
11738 setbit(eventmask
, WLC_E_ROAM
);
11739 #endif /* !ROAM_EVT_DISABLE */
11740 setbit(eventmask
, WLC_E_BSSID
);
11741 #endif /* WL_CFG80211 */
11743 setbit(eventmask
, WLC_E_ADDTS_IND
);
11744 setbit(eventmask
, WLC_E_DELTS_IND
);
11745 #endif /* BCMCCX */
11747 setbit(eventmask
, WLC_E_TDLS_PEER_EVENT
);
11748 #endif /* WLTDLS */
11750 setbit(eventmask
, WLC_E_PROXD
);
11751 #endif /* RTT_SUPPORT */
11753 setbit(eventmask
, WLC_E_ESCAN_RESULT
);
11754 setbit(eventmask
, WLC_E_AP_STARTED
);
11755 setbit(eventmask
, WLC_E_ACTION_FRAME_RX
);
11756 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
11757 setbit(eventmask
, WLC_E_P2P_DISC_LISTEN_COMPLETE
);
11759 #endif /* WL_CFG80211 */
11761 setbit(eventmask
, WLC_E_AIBSS_TXFAIL
);
11762 #endif /* WLAIBSS */
11764 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11765 if (dhd_logtrace_from_file(dhd
)) {
11766 setbit(eventmask
, WLC_E_TRACE
);
11768 clrbit(eventmask
, WLC_E_TRACE
);
11770 #elif defined(SHOW_LOGTRACE)
11771 setbit(eventmask
, WLC_E_TRACE
);
11773 clrbit(eventmask
, WLC_E_TRACE
);
11774 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11776 setbit(eventmask
, WLC_E_CSA_COMPLETE_IND
);
11777 #ifdef CUSTOM_EVENT_PM_WAKE
11778 setbit(eventmask
, WLC_E_EXCESS_PM_WAKE_EVENT
);
11779 #endif /* CUSTOM_EVENT_PM_WAKE */
11780 #ifdef DHD_LOSSLESS_ROAMING
11781 setbit(eventmask
, WLC_E_ROAM_PREP
);
11784 setbit(eventmask
, WLC_E_NAN
);
11785 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11786 dhd_update_flow_prio_map(dhd
, DHD_FLOW_PRIO_LLR_MAP
);
11787 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11789 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11790 dhd_update_flow_prio_map(dhd
, DHD_FLOW_PRIO_LLR_MAP
);
11791 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11793 /* Write updated Event mask */
11794 ret
= dhd_iovar(dhd
, 0, "event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, NULL
, 0, TRUE
);
11796 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__
, ret
));
11800 /* make up event mask ext message iovar for event larger than 128 */
11801 msglen
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
+ EVENTMSGS_EXT_STRUCT_SIZE
;
11802 eventmask_msg
= (eventmsgs_ext_t
*)MALLOC(dhd
->osh
, msglen
);
11803 if (eventmask_msg
== NULL
) {
11804 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen
));
11808 bzero(eventmask_msg
, msglen
);
11809 eventmask_msg
->ver
= EVENTMSGS_VER
;
11810 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
11812 /* Read event_msgs_ext mask */
11813 ret2
= dhd_iovar(dhd
, 0, "event_msgs_ext", (char *)eventmask_msg
, msglen
, iov_buf
,
11814 WLC_IOCTL_SMLEN
, FALSE
);
11816 if (ret2
== 0) { /* event_msgs_ext must be supported */
11817 bcopy(iov_buf
, eventmask_msg
, msglen
);
11818 #ifdef RSSI_MONITOR_SUPPORT
11819 setbit(eventmask_msg
->mask
, WLC_E_RSSI_LQM
);
11820 #endif /* RSSI_MONITOR_SUPPORT */
11821 #ifdef GSCAN_SUPPORT
11822 setbit(eventmask_msg
->mask
, WLC_E_PFN_GSCAN_FULL_RESULT
);
11823 setbit(eventmask_msg
->mask
, WLC_E_PFN_SCAN_COMPLETE
);
11824 setbit(eventmask_msg
->mask
, WLC_E_PFN_SSID_EXT
);
11825 setbit(eventmask_msg
->mask
, WLC_E_ROAM_EXP_EVENT
);
11826 #endif /* GSCAN_SUPPORT */
11827 setbit(eventmask_msg
->mask
, WLC_E_RSSI_LQM
);
11828 #ifdef BT_WIFI_HANDOVER
11829 setbit(eventmask_msg
->mask
, WLC_E_BT_WIFI_HANDOVER_REQ
);
11830 #endif /* BT_WIFI_HANDOVER */
11832 setbit(eventmask_msg
->mask
, WLC_E_ROAM_PREP
);
11833 #endif /* DBG_PKT_MON */
11835 setbit(eventmask_msg
->mask
, WLC_E_ULP
);
11838 setbit(eventmask_msg
->mask
, WLC_E_NATOE_NFCT
);
11839 #endif /* WL_NATOE */
11841 setbit(eventmask_msg
->mask
, WLC_E_SLOTTED_BSS_PEER_OP
);
11842 #endif /* WL_NAN */
11843 #ifdef SUPPORT_EVT_SDB_LOG
11844 setbit(eventmask_msg
->mask
, WLC_E_SDB_TRANSITION
);
11845 #endif /* SUPPORT_EVT_SDB_LOG */
11847 setbit(eventmask_msg
->mask
, WLC_E_BCNRECV_ABORTED
);
11848 #endif /* WL_BCNRECV */
11849 /* Write updated Event mask */
11850 eventmask_msg
->ver
= EVENTMSGS_VER
;
11851 eventmask_msg
->command
= EVENTMSGS_SET_MASK
;
11852 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
11853 ret
= dhd_iovar(dhd
, 0, "event_msgs_ext", (char *)eventmask_msg
, msglen
, NULL
, 0,
11856 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__
, ret
));
11859 } else if (ret2
== BCME_UNSUPPORTED
|| ret2
== BCME_VERSION
) {
11860 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11861 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11862 __FUNCTION__
, ret2
));
11864 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__
, ret2
));
11869 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11870 /* Enabling event log trace for EAP events */
11871 el_tag
= (wl_el_tag_params_t
*)MALLOC(dhd
->osh
, sizeof(wl_el_tag_params_t
));
11872 if (el_tag
== NULL
) {
11873 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11874 (int)sizeof(wl_el_tag_params_t
)));
11878 el_tag
->tag
= EVENT_LOG_TAG_4WAYHANDSHAKE
;
11880 el_tag
->flags
= EVENT_LOG_TAG_FLAG_LOG
;
11881 ret
= dhd_iovar(dhd
, 0, "event_log_tag_control", (char *)el_tag
, sizeof(*el_tag
), NULL
, 0,
11883 #endif /* DHD_8021X_DUMP */
11885 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_CHANNEL_TIME
, (char *)&scan_assoc_time
,
11886 sizeof(scan_assoc_time
), TRUE
, 0);
11887 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_UNASSOC_TIME
, (char *)&scan_unassoc_time
,
11888 sizeof(scan_unassoc_time
), TRUE
, 0);
11889 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_PASSIVE_TIME
, (char *)&scan_passive_time
,
11890 sizeof(scan_passive_time
), TRUE
, 0);
11892 #ifdef ARP_OFFLOAD_SUPPORT
11893 /* Set and enable ARP offload feature for STA only */
11894 #if defined(SOFTAP)
11895 if (arpoe
&& !ap_fw_loaded
) {
11899 dhd_arp_offload_enable(dhd
, TRUE
);
11900 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
11902 dhd_arp_offload_enable(dhd
, FALSE
);
11903 dhd_arp_offload_set(dhd
, 0);
11905 dhd_arp_enable
= arpoe
;
11906 #endif /* ARP_OFFLOAD_SUPPORT */
11908 #ifdef PKT_FILTER_SUPPORT
11909 /* Setup default defintions for pktfilter , enable in suspend */
11910 dhd
->pktfilter_count
= 6;
11911 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = NULL
;
11912 if (!FW_SUPPORTED(dhd
, pf6
)) {
11913 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = NULL
;
11914 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
11916 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11917 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = DISCARD_IPV4_MCAST
;
11918 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = DISCARD_IPV6_MCAST
;
11920 /* apply APP pktfilter */
11921 dhd
->pktfilter
[DHD_ARP_FILTER_NUM
] = "105 0 0 12 0xFFFF 0x0806";
11923 #ifdef BLOCK_IPV6_PACKET
11924 /* Setup filter to allow only IPv4 unicast frames */
11925 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 "
11926 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
11928 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR
;
11930 /* Setup filter to allow only unicast */
11931 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0x01 0x00";
11932 #endif /* BLOCK_IPV6_PACKET */
11934 #ifdef PASS_IPV4_SUSPEND
11935 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = "104 0 0 0 0xFFFFFF 0x01005E";
11937 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11938 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = NULL
;
11939 #endif /* PASS_IPV4_SUSPEND */
11940 if (FW_SUPPORTED(dhd
, pf6
)) {
11941 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11942 dhd
->pktfilter
[DHD_IP4BCAST_DROP_FILTER_NUM
] = DISCARD_IPV4_BCAST
;
11943 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11944 dhd
->pktfilter
[DHD_LLC_STP_DROP_FILTER_NUM
] = DISCARD_LLC_STP
;
11945 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11946 dhd
->pktfilter
[DHD_LLC_XID_DROP_FILTER_NUM
] = DISCARD_LLC_XID
;
11947 dhd
->pktfilter_count
= 10;
11950 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11951 dhd
->pktfilter_count
= 4;
11952 /* Setup filter to block broadcast and NAT Keepalive packets */
11953 /* discard all broadcast packets */
11954 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0xffffff 0xffffff";
11955 /* discard NAT Keepalive packets */
11956 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = "102 0 0 36 0xffffffff 0x11940009";
11957 /* discard NAT Keepalive packets */
11958 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = "104 0 0 38 0xffffffff 0x11940009";
11959 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
11960 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11962 #if defined(SOFTAP)
11963 if (ap_fw_loaded
) {
11964 dhd_enable_packet_filter(0, dhd
);
11966 #endif /* defined(SOFTAP) */
11967 dhd_set_packet_filter(dhd
);
11968 #endif /* PKT_FILTER_SUPPORT */
11970 ret
= dhd_iovar(dhd
, 0, "nmode", (char *)&nmode
, sizeof(nmode
), NULL
, 0, TRUE
);
11972 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__
, ret
));
11973 #endif /* DISABLE_11N */
11975 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11976 ret
= dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
, sizeof(bcn_li_bcn
), NULL
, 0,
11978 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11979 #ifdef AMPDU_VO_ENABLE
11980 tid
.tid
= PRIO_8021D_VO
; /* Enable TID(6) for voice */
11982 ret
= dhd_iovar(dhd
, 0, "ampdu_tid", (char *)&tid
, sizeof(tid
), NULL
, 0, TRUE
);
11984 tid
.tid
= PRIO_8021D_NC
; /* Enable TID(7) for voice */
11986 ret
= dhd_iovar(dhd
, 0, "ampdu_tid", (char *)&tid
, sizeof(tid
), NULL
, 0, TRUE
);
11988 /* query for 'clmver' to get clm version info from firmware */
11989 memset(buf
, 0, sizeof(buf
));
11990 ret
= dhd_iovar(dhd
, 0, "clmver", NULL
, 0, buf
, sizeof(buf
), FALSE
);
11992 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
11994 char *ver_temp_buf
= NULL
;
11996 if ((ver_temp_buf
= bcmstrstr(buf
, "Data:")) == NULL
) {
11997 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11999 ptr
= (ver_temp_buf
+ strlen("Data:"));
12000 if ((ver_temp_buf
= bcmstrtok(&ptr
, "\n", 0)) == NULL
) {
12001 DHD_ERROR(("Couldn't find New line character\n"));
12003 memset(clm_version
, 0, CLM_VER_STR_LEN
);
12004 strncpy(clm_version
, ver_temp_buf
,
12005 MIN(strlen(ver_temp_buf
) + 1, CLM_VER_STR_LEN
- 1));
12006 DHD_INFO(("CLM version = %s\n", clm_version
));
12010 #if defined(CUSTOMER_HW4_DEBUG)
12011 if ((ver_temp_buf
= bcmstrstr(ptr
, "Customization:")) == NULL
) {
12012 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
12015 ptr
= (ver_temp_buf
+ strlen("Customization:"));
12016 if ((ver_temp_buf
= bcmstrtok(&ptr
, "(\n", &tokenlim
)) == NULL
) {
12017 DHD_ERROR(("Couldn't find project blob version"
12018 "or New line character\n"));
12019 } else if (tokenlim
== '(') {
12020 snprintf(clm_version
,
12021 CLM_VER_STR_LEN
- 1, "%s, Blob ver = Major : %s minor : ",
12022 clm_version
, ver_temp_buf
);
12023 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version
));
12024 if ((ver_temp_buf
= bcmstrtok(&ptr
, "\n", &tokenlim
)) == NULL
) {
12025 DHD_ERROR(("Couldn't find New line character\n"));
12027 snprintf(clm_version
,
12028 strlen(clm_version
) + strlen(ver_temp_buf
),
12029 "%s%s", clm_version
, ver_temp_buf
);
12030 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
12034 } else if (tokenlim
== '\n') {
12035 snprintf(clm_version
,
12036 strlen(clm_version
) + strlen(", Blob ver = Major : ") + 1,
12037 "%s, Blob ver = Major : ", clm_version
);
12038 snprintf(clm_version
,
12039 strlen(clm_version
) + strlen(ver_temp_buf
) + 1,
12040 "%s%s", clm_version
, ver_temp_buf
);
12041 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version
));
12044 #endif /* CUSTOMER_HW4_DEBUG */
12045 if (strlen(clm_version
)) {
12046 DHD_ERROR(("CLM version = %s\n", clm_version
));
12048 DHD_ERROR(("Couldn't find CLM version!\n"));
12052 /* query for 'ver' to get version info from firmware */
12053 memset(buf
, 0, sizeof(buf
));
12055 ret
= dhd_iovar(dhd
, 0, "ver", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
12057 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
12059 bcmstrtok(&ptr
, "\n", 0);
12060 /* Print fw version info */
12061 DHD_ERROR(("Firmware version = %s\n", buf
));
12062 strncpy(fw_version
, buf
, FW_VER_STR_LEN
);
12063 fw_version
[FW_VER_STR_LEN
-1] = '\0';
12064 #if defined(BCMSDIO) || defined(BCMPCIE)
12065 dhd_set_version_info(dhd
, buf
);
12066 #endif /* BCMSDIO || BCMPCIE */
12067 #ifdef WRITE_WLANINFO
12068 sec_save_wlinfo(buf
, EPI_VERSION_STR
, dhd
->info
->nv_path
, clm_version
);
12069 #endif /* WRITE_WLANINFO */
12071 #ifdef GEN_SOFTAP_INFO_FILE
12072 sec_save_softap_info();
12073 #endif /* GEN_SOFTAP_INFO_FILE */
12075 #if defined(BCMSDIO)
12076 dhd_txglom_enable(dhd
, TRUE
);
12077 #endif /* defined(BCMSDIO) */
12079 #if defined(BCMSDIO)
12080 #ifdef PROP_TXSTATUS
12081 if (disable_proptx
||
12082 #ifdef PROP_TXSTATUS_VSDB
12083 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
12084 (dhd
->op_mode
!= DHD_FLAG_HOSTAP_MODE
&&
12085 dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
) ||
12086 #endif /* PROP_TXSTATUS_VSDB */
12088 wlfc_enable
= FALSE
;
12091 #if defined(PROP_TXSTATUS)
12092 #ifdef USE_WFA_CERT_CONF
12093 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_PROPTX
, &proptx
) == BCME_OK
) {
12094 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__
, proptx
));
12095 wlfc_enable
= proptx
;
12097 #endif /* USE_WFA_CERT_CONF */
12098 #endif /* PROP_TXSTATUS */
12100 #ifndef DISABLE_11N
12101 ret2
= dhd_iovar(dhd
, 0, "ampdu_hostreorder", (char *)&hostreorder
, sizeof(hostreorder
),
12104 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__
, ret2
));
12105 if (ret2
!= BCME_UNSUPPORTED
)
12108 if (ret
== BCME_NOTDOWN
) {
12110 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
,
12111 sizeof(wl_down
), TRUE
, 0);
12112 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
12113 __FUNCTION__
, ret2
, hostreorder
));
12115 ret2
= dhd_iovar(dhd
, 0, "ampdu_hostreorder", (char *)&hostreorder
,
12116 sizeof(hostreorder
), NULL
, 0, TRUE
);
12117 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__
, ret2
));
12118 if (ret2
!= BCME_UNSUPPORTED
)
12121 if (ret2
!= BCME_OK
)
12124 #endif /* DISABLE_11N */
12127 dhd_wlfc_init(dhd
);
12128 #ifndef DISABLE_11N
12129 else if (hostreorder
)
12130 dhd_wlfc_hostreorder_init(dhd
);
12131 #endif /* DISABLE_11N */
12133 #endif /* PROP_TXSTATUS */
12134 #endif /* BCMSDIO || BCMBUS */
12135 #ifndef PCIE_FULL_DONGLE
12136 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
12137 if (FW_SUPPORTED(dhd
, ap
)) {
12138 wl_ap_isolate
= AP_ISOLATE_SENDUP_ALL
;
12139 ret
= dhd_iovar(dhd
, 0, "ap_isolate", (char *)&wl_ap_isolate
, sizeof(wl_ap_isolate
),
12142 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
12144 #endif /* PCIE_FULL_DONGLE */
12146 if (!dhd
->pno_state
) {
12151 if (!dhd
->rtt_state
) {
12152 ret
= dhd_rtt_init(dhd
);
12154 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__
));
12159 /* Failure to configure filter IE is not a fatal error, ignore it. */
12160 if (!(dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
)))
12161 dhd_read_from_file(dhd
);
12162 #endif /* FILTER_IE */
12164 dhd_interworking_enable(dhd
);
12167 #ifdef NDO_CONFIG_SUPPORT
12168 dhd
->ndo_enable
= FALSE
;
12169 dhd
->ndo_host_ip_overflow
= FALSE
;
12170 dhd
->ndo_max_host_ip
= NDO_MAX_HOST_IP_ENTRIES
;
12171 #endif /* NDO_CONFIG_SUPPORT */
12173 /* ND offload version supported */
12174 dhd
->ndo_version
= dhd_ndo_get_version(dhd
);
12175 if (dhd
->ndo_version
> 0) {
12176 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__
, dhd
->ndo_version
));
12178 #ifdef NDO_CONFIG_SUPPORT
12179 /* enable Unsolicited NA filter */
12180 ret
= dhd_ndo_unsolicited_na_filter_enable(dhd
, 1);
12182 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__
));
12184 #endif /* NDO_CONFIG_SUPPORT */
12187 /* check dongle supports wbtext (product policy) or not */
12188 dhd
->wbtext_support
= FALSE
;
12189 if (dhd_wl_ioctl_get_intiovar(dhd
, "wnm_bsstrans_resp", &wnm_bsstrans_resp
,
12190 WLC_GET_VAR
, FALSE
, 0) != BCME_OK
) {
12191 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12193 dhd
->wbtext_policy
= wnm_bsstrans_resp
;
12194 if (dhd
->wbtext_policy
== WL_BSSTRANS_POLICY_PRODUCT_WBTEXT
) {
12195 dhd
->wbtext_support
= TRUE
;
12198 /* driver can turn off wbtext feature through makefile */
12199 if (dhd
->wbtext_support
) {
12200 if (dhd_wl_ioctl_set_intiovar(dhd
, "wnm_bsstrans_resp",
12201 WL_BSSTRANS_POLICY_ROAM_ALWAYS
,
12202 WLC_SET_VAR
, FALSE
, 0) != BCME_OK
) {
12203 DHD_ERROR(("failed to disable WBTEXT\n"));
12206 #endif /* !WBTEXT */
12208 #if defined(DHD_NON_DMA_M2M_CORRUPTION)
12209 /* check pcie non dma loopback */
12210 if (dhd
->op_mode
== DHD_FLAG_MFG_MODE
) {
12211 memset(&pcie_dmaxfer_lpbk
, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t
));
12212 pcie_dmaxfer_lpbk
.u
.length
= PCIE_DMAXFER_LPBK_LENGTH
;
12213 pcie_dmaxfer_lpbk
.lpbkmode
= M2M_NON_DMA_LPBK
;
12214 pcie_dmaxfer_lpbk
.wait
= TRUE
;
12216 if ((ret
= dhd_bus_iovar_op(dhd
, "pcie_dmaxfer", NULL
, 0,
12217 (char *)&pcie_dmaxfer_lpbk
, sizeof(dhd_pcie_dmaxfer_lpbk_t
),
12219 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n",
12224 if (pcie_dmaxfer_lpbk
.u
.status
!= BCME_OK
) {
12225 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d"
12226 " Status : %d\n", ret
, pcie_dmaxfer_lpbk
.u
.status
));
12231 DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n"));
12234 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
12236 /* WNM capabilities */
12239 | WL_WNM_BSSTRANS
| WL_WNM_NOTIF
12242 | WL_WNM_BSSTRANS
| WL_WNM_MAXIDLE
12245 if (dhd_iovar(dhd
, 0, "wnm", (char *)&wnm_cap
, sizeof(wnm_cap
), NULL
, 0, TRUE
) < 0) {
12246 DHD_ERROR(("failed to set WNM capabilities\n"));
12249 if (FW_SUPPORTED(dhd
, ecounters
) && enable_ecounter
) {
12250 if (dhd_start_ecounters(dhd
) != BCME_OK
) {
12251 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__
));
12252 } else if (dhd_start_event_ecounters(dhd
) != BCME_OK
) {
12253 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__
));
12258 /* store the preserve log set numbers */
12259 if (dhd_get_preserve_log_numbers(dhd
, &dhd
->logset_prsrv_mask
)
12261 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__
));
12264 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
12265 if (dhd_iovar(dhd
, 0, "wnm_btmdelta", (char *)&btmdelta
, sizeof(btmdelta
),
12266 NULL
, 0, TRUE
) < 0) {
12267 DHD_ERROR(("failed to set BTM delta\n"));
12269 #endif /* WBTEXT && WBTEXT_BTMDELTA */
12272 if (FW_SUPPORTED(dhd
, monitor
)) {
12273 dhd
->monitor_enable
= TRUE
;
12274 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__
));
12276 dhd
->monitor_enable
= FALSE
;
12277 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__
));
12279 #endif /* WL_MONITOR */
12283 if (eventmask_msg
) {
12284 MFREE(dhd
->osh
, eventmask_msg
, msglen
);
12285 eventmask_msg
= NULL
;
12288 MFREE(dhd
->osh
, iov_buf
, WLC_IOCTL_SMLEN
);
12291 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12293 MFREE(dhd
->osh
, el_tag
, sizeof(wl_el_tag_params_t
));
12296 #endif /* DHD_8021X_DUMP */
12301 dhd_iovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *param_buf
, uint param_len
, char *res_buf
,
12302 uint res_len
, int set
)
12309 if (res_len
> WLC_IOCTL_MAXLEN
|| param_len
> WLC_IOCTL_MAXLEN
)
12310 return BCME_BADARG
;
12312 input_len
= strlen(name
) + 1 + param_len
;
12313 if (input_len
> WLC_IOCTL_MAXLEN
)
12314 return BCME_BADARG
;
12318 if (res_buf
|| res_len
!= 0) {
12319 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__
));
12323 buf
= MALLOCZ(pub
->osh
, input_len
);
12325 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
12329 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
12335 ioc
.cmd
= WLC_SET_VAR
;
12337 ioc
.len
= input_len
;
12340 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12342 if (!res_buf
|| !res_len
) {
12343 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__
));
12348 if (res_len
< input_len
) {
12349 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__
,
12350 res_len
, input_len
));
12351 buf
= MALLOCZ(pub
->osh
, input_len
);
12353 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
12357 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
12363 ioc
.cmd
= WLC_GET_VAR
;
12365 ioc
.len
= input_len
;
12368 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12370 if (ret
== BCME_OK
) {
12371 memcpy(res_buf
, buf
, res_len
);
12374 memset(res_buf
, 0, res_len
);
12375 ret
= bcm_mkiovar(name
, param_buf
, param_len
, res_buf
, res_len
);
12381 ioc
.cmd
= WLC_GET_VAR
;
12386 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12391 MFREE(pub
->osh
, buf
, input_len
);
12398 dhd_getiovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *cmd_buf
,
12399 uint cmd_len
, char **resptr
, uint resp_len
)
12401 int len
= resp_len
;
12403 char *buf
= *resptr
;
12405 if (resp_len
> WLC_IOCTL_MAXLEN
)
12406 return BCME_BADARG
;
12408 memset(buf
, 0, resp_len
);
12410 ret
= bcm_mkiovar(name
, cmd_buf
, cmd_len
, buf
, len
);
12412 return BCME_BUFTOOSHORT
;
12415 memset(&ioc
, 0, sizeof(ioc
));
12417 ioc
.cmd
= WLC_GET_VAR
;
12422 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12427 int dhd_change_mtu(dhd_pub_t
*dhdp
, int new_mtu
, int ifidx
)
12429 struct dhd_info
*dhd
= dhdp
->info
;
12430 struct net_device
*dev
= NULL
;
12432 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
12433 dev
= dhd
->iflist
[ifidx
]->net
;
12436 if (netif_running(dev
)) {
12437 DHD_ERROR(("%s: Must be down to change its MTU", dev
->name
));
12438 return BCME_NOTDOWN
;
12441 #define DHD_MIN_MTU 1500
12442 #define DHD_MAX_MTU 1752
12444 if ((new_mtu
< DHD_MIN_MTU
) || (new_mtu
> DHD_MAX_MTU
)) {
12445 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__
, new_mtu
));
12446 return BCME_BADARG
;
12449 dev
->mtu
= new_mtu
;
12453 #ifdef ARP_OFFLOAD_SUPPORT
12454 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12456 aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
)
12458 u32 ipv4_buf
[MAX_IPV4_ENTRIES
]; /* temp save for AOE host_ip table */
12462 bzero(ipv4_buf
, sizeof(ipv4_buf
));
12464 /* display what we've got */
12465 ret
= dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
12466 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__
));
12468 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
12470 /* now we saved hoste_ip table, clr it in the dongle AOE */
12471 dhd_aoe_hostip_clr(dhd_pub
, idx
);
12474 DHD_ERROR(("%s failed\n", __FUNCTION__
));
12478 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
12479 if (add
&& (ipv4_buf
[i
] == 0)) {
12481 add
= FALSE
; /* added ipa to local table */
12482 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12484 } else if (ipv4_buf
[i
] == ipa
) {
12486 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12487 __FUNCTION__
, ipa
, i
));
12490 if (ipv4_buf
[i
] != 0) {
12491 /* add back host_ip entries from our local cache */
12492 dhd_arp_offload_add_ip(dhd_pub
, ipv4_buf
[i
], idx
);
12493 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12494 __FUNCTION__
, ipv4_buf
[i
], i
));
12498 /* see the resulting hostip table */
12499 dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
12500 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__
));
12501 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
12506 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12507 * whenever there is an event related to an IP address.
12508 * ptr : kernel provided pointer to IP address that has changed
12510 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
12511 unsigned long event
,
12514 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
12517 dhd_pub_t
*dhd_pub
;
12520 if (!dhd_arp_enable
)
12521 return NOTIFY_DONE
;
12522 if (!ifa
|| !(ifa
->ifa_dev
->dev
))
12523 return NOTIFY_DONE
;
12525 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12526 /* Filter notifications meant for non Broadcom devices */
12527 if ((ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_pri
) &&
12528 (ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_virt
)) {
12529 #if defined(WL_ENABLE_P2P_IF)
12530 if (!wl_cfgp2p_is_ifops(ifa
->ifa_dev
->dev
->netdev_ops
))
12531 #endif /* WL_ENABLE_P2P_IF */
12532 return NOTIFY_DONE
;
12534 #endif /* LINUX_VERSION_CODE */
12536 dhd
= DHD_DEV_INFO(ifa
->ifa_dev
->dev
);
12538 return NOTIFY_DONE
;
12540 dhd_pub
= &dhd
->pub
;
12542 if (dhd_pub
->arp_version
== 1) {
12545 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
12546 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
== ifa
->ifa_dev
->dev
)
12549 if (idx
< DHD_MAX_IFS
)
12550 DHD_TRACE(("ifidx : %p %s %d\n", dhd
->iflist
[idx
]->net
,
12551 dhd
->iflist
[idx
]->name
, dhd
->iflist
[idx
]->idx
));
12553 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa
->ifa_label
));
12560 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12561 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
12564 * Skip if Bus is not in a state to transport the IOVAR
12565 * (or) the Dongle is not ready.
12567 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd
->pub
) ||
12568 dhd
->pub
.busstate
== DHD_BUS_LOAD
) {
12569 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
12570 __FUNCTION__
, dhd
->pub
.busstate
));
12571 if (dhd
->pend_ipaddr
) {
12572 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12573 __FUNCTION__
, dhd
->pend_ipaddr
));
12575 dhd
->pend_ipaddr
= ifa
->ifa_address
;
12579 #ifdef AOE_IP_ALIAS_SUPPORT
12580 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12582 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, TRUE
, idx
);
12583 #endif /* AOE_IP_ALIAS_SUPPORT */
12587 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12588 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
12589 dhd
->pend_ipaddr
= 0;
12590 #ifdef AOE_IP_ALIAS_SUPPORT
12591 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12593 if ((dhd_pub
->op_mode
& DHD_FLAG_HOSTAP_MODE
) ||
12594 (ifa
->ifa_dev
->dev
!= dhd_linux_get_primary_netdev(dhd_pub
))) {
12595 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, FALSE
, idx
);
12597 #endif /* AOE_IP_ALIAS_SUPPORT */
12599 dhd_aoe_hostip_clr(&dhd
->pub
, idx
);
12600 dhd_aoe_arp_clr(&dhd
->pub
, idx
);
12605 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12606 __func__
, ifa
->ifa_label
, event
));
12609 return NOTIFY_DONE
;
12611 #endif /* ARP_OFFLOAD_SUPPORT */
12613 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12614 /* Neighbor Discovery Offload: defered handler */
12616 dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
)
12618 struct ipv6_work_info_t
*ndo_work
= (struct ipv6_work_info_t
*)event_data
;
12619 dhd_info_t
*dhd
= (dhd_info_t
*)dhd_info
;
12624 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__
));
12629 if (event
!= DHD_WQ_WORK_IPV6_NDO
) {
12630 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__
));
12635 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__
));
12639 switch (ndo_work
->event
) {
12641 #ifndef NDO_CONFIG_SUPPORT
12642 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__
));
12643 ret
= dhd_ndo_enable(dhdp
, TRUE
);
12645 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__
, ret
));
12647 #endif /* !NDO_CONFIG_SUPPORT */
12648 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__
));
12649 if (dhdp
->ndo_version
> 0) {
12650 /* inet6 addr notifier called only for unicast address */
12651 ret
= dhd_ndo_add_ip_with_type(dhdp
, &ndo_work
->ipv6_addr
[0],
12652 WL_ND_IPV6_ADDR_TYPE_UNICAST
, ndo_work
->if_idx
);
12654 ret
= dhd_ndo_add_ip(dhdp
, &ndo_work
->ipv6_addr
[0],
12658 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12659 __FUNCTION__
, ret
));
12663 if (dhdp
->ndo_version
> 0) {
12664 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__
));
12665 ret
= dhd_ndo_remove_ip_by_addr(dhdp
,
12666 &ndo_work
->ipv6_addr
[0], ndo_work
->if_idx
);
12668 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__
));
12669 ret
= dhd_ndo_remove_ip(dhdp
, ndo_work
->if_idx
);
12672 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12673 __FUNCTION__
, ret
));
12676 #ifdef NDO_CONFIG_SUPPORT
12677 if (dhdp
->ndo_host_ip_overflow
) {
12678 ret
= dhd_dev_ndo_update_inet6addr(
12679 dhd_idx2net(dhdp
, ndo_work
->if_idx
));
12680 if ((ret
< 0) && (ret
!= BCME_NORESOURCE
)) {
12681 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12682 __FUNCTION__
, ret
));
12686 #else /* !NDO_CONFIG_SUPPORT */
12687 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__
));
12688 ret
= dhd_ndo_enable(dhdp
, FALSE
);
12690 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__
, ret
));
12693 #endif /* NDO_CONFIG_SUPPORT */
12697 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__
));
12702 /* free ndo_work. alloced while scheduling the work */
12708 } /* dhd_init_logstrs_array */
12711 * Neighbor Discovery Offload: Called when an interface
12712 * is assigned with ipv6 address.
12713 * Handles only primary interface
12715 int dhd_inet6addr_notifier_call(struct notifier_block
*this, unsigned long event
, void *ptr
)
12719 struct inet6_ifaddr
*inet6_ifa
= ptr
;
12720 struct ipv6_work_info_t
*ndo_info
;
12723 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12724 /* Filter notifications meant for non Broadcom devices */
12725 if (inet6_ifa
->idev
->dev
->netdev_ops
!= &dhd_ops_pri
) {
12726 return NOTIFY_DONE
;
12728 #endif /* LINUX_VERSION_CODE */
12730 dhd
= DHD_DEV_INFO(inet6_ifa
->idev
->dev
);
12732 return NOTIFY_DONE
;
12736 /* Supports only primary interface */
12737 idx
= dhd_net2idx(dhd
, inet6_ifa
->idev
->dev
);
12739 return NOTIFY_DONE
;
12742 /* FW capability */
12743 if (!FW_SUPPORTED(dhdp
, ndoe
)) {
12744 return NOTIFY_DONE
;
12747 ndo_info
= (struct ipv6_work_info_t
*)kzalloc(sizeof(struct ipv6_work_info_t
), GFP_ATOMIC
);
12749 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__
));
12750 return NOTIFY_DONE
;
12753 /* fill up ndo_info */
12754 ndo_info
->event
= event
;
12755 ndo_info
->if_idx
= idx
;
12756 memcpy(ndo_info
->ipv6_addr
, &inet6_ifa
->addr
, IPV6_ADDR_LEN
);
12758 /* defer the work to thread as it may block kernel */
12759 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)ndo_info
, DHD_WQ_WORK_IPV6_NDO
,
12760 dhd_inet6_work_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
12761 return NOTIFY_DONE
;
12763 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12765 /* Network attach to be invoked from the bus probe handlers */
12767 dhd_attach_net(dhd_pub_t
*dhdp
, bool need_rtnl_lock
)
12769 struct net_device
*primary_ndev
;
12770 BCM_REFERENCE(primary_ndev
);
12772 /* Register primary net device */
12773 if (dhd_register_if(dhdp
, 0, need_rtnl_lock
) != 0) {
12777 #if defined(WL_CFG80211)
12778 primary_ndev
= dhd_linux_get_primary_netdev(dhdp
);
12779 if (wl_cfg80211_net_attach(primary_ndev
) < 0) {
12780 /* fail the init */
12781 dhd_remove_if(dhdp
, 0, TRUE
);
12784 #endif /* WL_CFG80211 */
12789 dhd_register_if(dhd_pub_t
*dhdp
, int ifidx
, bool need_rtnl_lock
)
12791 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
12793 struct net_device
*net
= NULL
;
12795 uint8 temp_addr
[ETHER_ADDR_LEN
] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12797 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
12799 if (dhd
== NULL
|| dhd
->iflist
[ifidx
] == NULL
) {
12800 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__
));
12804 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
12805 ifp
= dhd
->iflist
[ifidx
];
12807 ASSERT(net
&& (ifp
->idx
== ifidx
));
12809 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12810 ASSERT(!net
->open
);
12811 net
->get_stats
= dhd_get_stats
;
12812 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
12813 net
->do_ioctl
= dhd_ioctl_entry_wrapper
;
12814 net
->hard_start_xmit
= dhd_start_xmit_wrapper
;
12816 net
->do_ioctl
= dhd_ioctl_entry
;
12817 net
->hard_start_xmit
= dhd_start_xmit
;
12818 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
12820 net
->set_mac_address
= dhd_set_mac_address
;
12821 net
->set_multicast_list
= dhd_set_multicast_list
;
12822 net
->open
= net
->stop
= NULL
;
12824 ASSERT(!net
->netdev_ops
);
12825 net
->netdev_ops
= &dhd_ops_virt
;
12826 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12828 /* Ok, link into the network layer... */
12831 * device functions for the primary interface only
12833 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12834 net
->open
= dhd_pri_open
;
12835 net
->stop
= dhd_pri_stop
;
12837 net
->netdev_ops
= &dhd_ops_pri
;
12838 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12839 if (!ETHER_ISNULLADDR(dhd
->pub
.mac
.octet
))
12840 memcpy(temp_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
12843 * We have to use the primary MAC for virtual interfaces
12845 memcpy(temp_addr
, ifp
->mac_addr
, ETHER_ADDR_LEN
);
12847 * Android sets the locally administered bit to indicate that this is a
12848 * portable hotspot. This will not work in simultaneous AP/STA mode,
12849 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12851 if (!memcmp(temp_addr
, dhd
->iflist
[0]->mac_addr
,
12853 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12854 __func__
, net
->name
));
12855 temp_addr
[0] |= 0x02;
12859 net
->hard_header_len
= ETH_HLEN
+ dhd
->pub
.hdrlen
;
12860 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12861 net
->ethtool_ops
= &dhd_ethtool_ops
;
12862 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12864 #if defined(WL_WIRELESS_EXT)
12865 #if WIRELESS_EXT < 19
12866 net
->get_wireless_stats
= dhd_get_wireless_stats
;
12867 #endif /* WIRELESS_EXT < 19 */
12868 #if WIRELESS_EXT > 12
12869 net
->wireless_handlers
= &wl_iw_handler_def
;
12870 #endif /* WIRELESS_EXT > 12 */
12871 #endif /* defined(WL_WIRELESS_EXT) */
12873 dhd
->pub
.rxsz
= DBUS_RX_BUFFER_SIZE_DHD(net
);
12875 memcpy(net
->dev_addr
, temp_addr
, ETHER_ADDR_LEN
);
12878 printf("%s\n", dhd_version
);
12880 if (need_rtnl_lock
)
12881 err
= register_netdev(net
);
12883 err
= register_netdevice(net
);
12886 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net
->name
, err
));
12890 printf("Register interface [%s] MAC: "MACDBG
"\n\n", net
->name
,
12891 #if defined(CUSTOMER_HW4_DEBUG)
12892 MAC2STRDBG(dhd
->pub
.mac
.octet
));
12894 MAC2STRDBG(net
->dev_addr
));
12895 #endif /* CUSTOMER_HW4_DEBUG */
12897 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12898 wl_iw_iscan_set_scan_broadcast_prep(net
, 1);
12901 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12902 KERNEL_VERSION(2, 6, 27))))
12905 up(&dhd_registration_sem
);
12906 #endif /* BCMLXSDMMC */
12907 if (!dhd_download_fw_on_driverload
) {
12909 wl_terminate_event_handler(net
);
12910 #endif /* WL_CFG80211 */
12911 #if defined(DHD_LB_RXP)
12912 __skb_queue_purge(&dhd
->rx_pend_queue
);
12913 #endif /* DHD_LB_RXP */
12915 #if defined(DHD_LB_TXP)
12916 skb_queue_purge(&dhd
->tx_pend_queue
);
12917 #endif /* DHD_LB_TXP */
12919 #ifdef SHOW_LOGTRACE
12920 /* Release the skbs from queue for WLC_E_TRACE event */
12921 dhd_event_logtrace_flush_queue(dhdp
);
12922 #endif /* SHOW_LOGTRACE */
12924 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
12925 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
12926 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
12927 dhd_net_bus_devreset(net
, TRUE
);
12929 dhd_net_bus_suspend(net
);
12930 #endif /* BCMLXSDMMC */
12931 wifi_platform_set_power(dhdp
->info
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
12932 #if defined(BT_OVER_SDIO)
12933 dhd
->bus_user_count
--;
12934 #endif /* BT_OVER_SDIO */
12937 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12941 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12944 net
->netdev_ops
= NULL
;
12950 dhd_bus_detach(dhd_pub_t
*dhdp
)
12954 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
12957 dhd
= (dhd_info_t
*)dhdp
->info
;
12961 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12962 * calling stop again will cuase SD read/write errors.
12964 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
12965 /* Stop the protocol module */
12966 dhd_prot_stop(&dhd
->pub
);
12968 /* Stop the bus module */
12969 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
12972 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12973 dhd_bus_oob_intr_unregister(dhdp
);
12974 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12979 void dhd_detach(dhd_pub_t
*dhdp
)
12982 unsigned long flags
;
12983 int timer_valid
= FALSE
;
12984 struct net_device
*dev
;
12986 struct bcm_cfg80211
*cfg
= NULL
;
12991 dhd
= (dhd_info_t
*)dhdp
->info
;
12995 dev
= dhd
->iflist
[0]->net
;
12999 if (dev
->flags
& IFF_UP
) {
13000 /* If IFF_UP is still up, it indicates that
13001 * "ifconfig wlan0 down" hasn't been called.
13002 * So invoke dev_close explicitly here to
13003 * bring down the interface.
13005 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
13011 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__
, dhd
->dhd_state
));
13014 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
13015 /* Give sufficient time for threads to start running in case
13016 * dhd_attach() has failed
13021 dhd_free_wet_info(&dhd
->pub
, dhd
->pub
.wet_info
);
13022 #endif /* DHD_WET */
13023 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
13024 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
13026 #ifdef PROP_TXSTATUS
13027 #ifdef DHD_WLFC_THREAD
13028 if (dhd
->pub
.wlfc_thread
) {
13029 kthread_stop(dhd
->pub
.wlfc_thread
);
13030 dhdp
->wlfc_thread_go
= TRUE
;
13031 wake_up_interruptible(&dhdp
->wlfc_wqhead
);
13033 dhd
->pub
.wlfc_thread
= NULL
;
13034 #endif /* DHD_WLFC_THREAD */
13035 #endif /* PROP_TXSTATUS */
13039 wl_cfg80211_down(dev
);
13040 #endif /* WL_CFG80211 */
13042 if (dhd
->dhd_state
& DHD_ATTACH_STATE_PROT_ATTACH
) {
13044 dhd_bus_detach(dhdp
);
13046 if (is_reboot
== SYS_RESTART
) {
13047 extern bcmdhd_wifi_platdata_t
*dhd_wifi_platdata
;
13048 if (dhd_wifi_platdata
&& !dhdp
->dongle_reset
) {
13049 dhdpcie_bus_clock_stop(dhdp
->bus
);
13050 wifi_platform_set_power(dhd_wifi_platdata
->adapters
,
13051 FALSE
, WIFI_TURNOFF_DELAY
);
13054 #endif /* BCMPCIE */
13055 #ifndef PCIE_FULL_DONGLE
13057 dhd_prot_detach(dhdp
);
13058 #endif /* !PCIE_FULL_DONGLE */
13061 #ifdef ARP_OFFLOAD_SUPPORT
13062 if (dhd_inetaddr_notifier_registered
) {
13063 dhd_inetaddr_notifier_registered
= FALSE
;
13064 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
13066 #endif /* ARP_OFFLOAD_SUPPORT */
13067 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
13068 if (dhd_inet6addr_notifier_registered
) {
13069 dhd_inet6addr_notifier_registered
= FALSE
;
13070 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
13072 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
13073 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
13074 if (dhd
->dhd_state
& DHD_ATTACH_STATE_EARLYSUSPEND_DONE
) {
13075 if (dhd
->early_suspend
.suspend
)
13076 unregister_early_suspend(&dhd
->early_suspend
);
13078 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
13080 #if defined(WL_WIRELESS_EXT)
13081 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WL_ATTACH
) {
13082 /* Detatch and unlink in the iw */
13085 #endif /* defined(WL_WIRELESS_EXT) */
13088 dhd_ulp_deinit(dhd
->pub
.osh
, dhdp
);
13089 #endif /* DHD_ULP */
13091 /* delete all interfaces, start with virtual */
13092 if (dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) {
13096 /* Cleanup virtual interfaces */
13097 dhd_net_if_lock_local(dhd
);
13098 for (i
= 1; i
< DHD_MAX_IFS
; i
++) {
13099 if (dhd
->iflist
[i
]) {
13100 dhd_remove_if(&dhd
->pub
, i
, TRUE
);
13103 dhd_net_if_unlock_local(dhd
);
13105 /* delete primary interface 0 */
13106 ifp
= dhd
->iflist
[0];
13107 if (ifp
&& ifp
->net
) {
13110 cfg
= wl_get_cfg(ifp
->net
);
13112 /* in unregister_netdev case, the interface gets freed by net->destructor
13113 * (which is set to free_netdev)
13115 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
13116 free_netdev(ifp
->net
);
13118 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
13119 defined(ARGOS_NOTIFY_CB)
13120 argos_register_notifier_deinit();
13121 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
13122 #ifdef SET_RPS_CPUS
13123 custom_rps_map_clear(ifp
->net
->_rx
);
13124 #endif /* SET_RPS_CPUS */
13125 netif_tx_disable(ifp
->net
);
13126 unregister_netdev(ifp
->net
);
13128 #ifdef PCIE_FULL_DONGLE
13129 ifp
->net
= DHD_NET_DEV_NULL
;
13132 #endif /* PCIE_FULL_DONGLE */
13134 #ifdef DHD_L2_FILTER
13135 bcm_l2_filter_arp_table_update(dhdp
->osh
, ifp
->phnd_arp_table
, TRUE
,
13136 NULL
, FALSE
, dhdp
->tickcnt
);
13137 deinit_l2_filter_arp_table(dhdp
->osh
, ifp
->phnd_arp_table
);
13138 ifp
->phnd_arp_table
= NULL
;
13139 #endif /* DHD_L2_FILTER */
13141 dhd_if_del_sta_list(ifp
);
13143 MFREE(dhd
->pub
.osh
, ifp
, sizeof(*ifp
));
13144 dhd
->iflist
[0] = NULL
;
13148 /* Clear the watchdog timer */
13149 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
13150 timer_valid
= dhd
->wd_timer_valid
;
13151 dhd
->wd_timer_valid
= FALSE
;
13152 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
13154 del_timer_sync(&dhd
->timer
);
13155 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
13157 if (dhd
->dhd_state
& DHD_ATTACH_STATE_THREADS_CREATED
) {
13158 #ifdef DHD_PCIE_RUNTIMEPM
13159 if (dhd
->thr_rpm_ctl
.thr_pid
>= 0) {
13160 PROC_STOP(&dhd
->thr_rpm_ctl
);
13162 #endif /* DHD_PCIE_RUNTIMEPM */
13163 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
13164 PROC_STOP(&dhd
->thr_wdt_ctl
);
13167 if (dhd
->rxthread_enabled
&& dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
13168 PROC_STOP(&dhd
->thr_rxf_ctl
);
13171 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
13172 PROC_STOP(&dhd
->thr_dpc_ctl
);
13175 tasklet_kill(&dhd
->tasklet
);
13180 if (dhd
->pub
.nfct
) {
13181 dhd_ct_close(dhd
->pub
.nfct
);
13183 #endif /* WL_NATOE */
13186 if (dhd
->dhd_state
& DHD_ATTACH_STATE_LB_ATTACH_DONE
) {
13187 /* Clear the flag first to avoid calling the cpu notifier */
13188 dhd
->dhd_state
&= ~DHD_ATTACH_STATE_LB_ATTACH_DONE
;
13190 /* Kill the Load Balancing Tasklets */
13192 cancel_work_sync(&dhd
->rx_napi_dispatcher_work
);
13193 __skb_queue_purge(&dhd
->rx_pend_queue
);
13194 #endif /* DHD_LB_RXP */
13196 cancel_work_sync(&dhd
->tx_dispatcher_work
);
13197 tasklet_kill(&dhd
->tx_tasklet
);
13198 __skb_queue_purge(&dhd
->tx_pend_queue
);
13199 #endif /* DHD_LB_TXP */
13201 cancel_work_sync(&dhd
->tx_compl_dispatcher_work
);
13202 tasklet_kill(&dhd
->tx_compl_tasklet
);
13203 #endif /* DHD_LB_TXC */
13205 tasklet_kill(&dhd
->rx_compl_tasklet
);
13206 #endif /* DHD_LB_RXC */
13208 if (dhd
->cpu_notifier
.notifier_call
!= NULL
) {
13209 unregister_cpu_notifier(&dhd
->cpu_notifier
);
13211 dhd_cpumasks_deinit(dhd
);
13212 DHD_LB_STATS_DEINIT(&dhd
->pub
);
13214 #endif /* DHD_LB */
13216 DHD_SSSR_MEMPOOL_DEINIT(&dhd
->pub
);
13219 if (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
) {
13221 DHD_ERROR(("cfg NULL!\n"));
13224 wl_cfg80211_detach(cfg
);
13225 dhd_monitor_uninit();
13230 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13231 destroy_workqueue(dhd
->tx_wq
);
13233 destroy_workqueue(dhd
->rx_wq
);
13235 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13236 #ifdef DEBUGABILITY
13239 dhd_os_dbg_detach_pkt_monitor(dhdp
);
13240 dhd_os_spin_lock_deinit(dhd
->pub
.osh
, dhd
->pub
.dbg
->pkt_mon_lock
);
13241 #endif /* DBG_PKT_MON */
13242 dhd_os_dbg_detach(dhdp
);
13244 #endif /* DEBUGABILITY */
13245 #ifdef DHD_PKT_LOGGING
13246 dhd_os_detach_pktlog(dhdp
);
13247 #endif /* DHD_PKT_LOGGING */
13248 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13249 if (dhd
->pub
.hang_info
) {
13250 MFREE(dhd
->pub
.osh
, dhd
->pub
.hang_info
, VENDOR_SEND_HANG_EXT_INFO_LEN
);
13252 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13253 #ifdef SHOW_LOGTRACE
13254 /* Release the skbs from queue for WLC_E_TRACE event */
13255 dhd_event_logtrace_flush_queue(dhdp
);
13257 if (dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
) {
13258 if (dhd
->event_data
.fmts
) {
13259 MFREE(dhd
->pub
.osh
, dhd
->event_data
.fmts
,
13260 dhd
->event_data
.fmts_size
);
13261 dhd
->event_data
.fmts
= NULL
;
13263 if (dhd
->event_data
.raw_fmts
) {
13264 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_fmts
,
13265 dhd
->event_data
.raw_fmts_size
);
13266 dhd
->event_data
.raw_fmts
= NULL
;
13268 if (dhd
->event_data
.raw_sstr
) {
13269 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_sstr
,
13270 dhd
->event_data
.raw_sstr_size
);
13271 dhd
->event_data
.raw_sstr
= NULL
;
13273 if (dhd
->event_data
.rom_raw_sstr
) {
13274 MFREE(dhd
->pub
.osh
, dhd
->event_data
.rom_raw_sstr
,
13275 dhd
->event_data
.rom_raw_sstr_size
);
13276 dhd
->event_data
.rom_raw_sstr
= NULL
;
13278 dhd
->dhd_state
&= ~DHD_ATTACH_LOGTRACE_INIT
;
13280 #endif /* SHOW_LOGTRACE */
13282 if (dhdp
->pno_state
)
13283 dhd_pno_deinit(dhdp
);
13286 if (dhdp
->rtt_state
) {
13287 dhd_rtt_deinit(dhdp
);
13290 #if defined(CONFIG_PM_SLEEP)
13291 if (dhd_pm_notifier_registered
) {
13292 unregister_pm_notifier(&dhd
->pm_notifier
);
13293 dhd_pm_notifier_registered
= FALSE
;
13295 #endif /* CONFIG_PM_SLEEP */
13297 #ifdef DEBUG_CPU_FREQ
13299 free_percpu(dhd
->new_freq
);
13300 dhd
->new_freq
= NULL
;
13301 cpufreq_unregister_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
13303 #ifdef CONFIG_HAS_WAKELOCK
13304 dhd
->wakelock_wd_counter
= 0;
13305 wake_lock_destroy(&dhd
->wl_wdwake
);
13306 #endif /* CONFIG_HAS_WAKELOCK */
13307 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) {
13308 DHD_TRACE(("wd wakelock count:%d\n", dhd
->wakelock_wd_counter
));
13309 DHD_OS_WAKE_LOCK_DESTROY(dhd
);
13312 #ifdef ARGOS_CPU_SCHEDULER
13313 if (dhd
->pub
.affinity_isdpc
== TRUE
) {
13314 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
13315 free_cpumask_var(dhd
->pub
.dpc_affinity_cpu_mask
);
13316 dhd
->pub
.affinity_isdpc
= FALSE
;
13318 #endif /* ARGOS_CPU_SCHEDULER */
13320 #ifdef DHDTCPACK_SUPPRESS
13321 /* This will free all MEM allocated for TCPACK SUPPRESS */
13322 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
13323 #endif /* DHDTCPACK_SUPPRESS */
13325 #ifdef PCIE_FULL_DONGLE
13326 dhd_flow_rings_deinit(dhdp
);
13328 dhd_prot_detach(dhdp
);
13331 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13332 dhd_free_tdls_peer_list(dhdp
);
13335 #ifdef DUMP_IOCTL_IOV_LIST
13336 dhd_iov_li_delete(dhdp
, &(dhdp
->dump_iovlist_head
));
13337 #endif /* DUMP_IOCTL_IOV_LIST */
13339 /* memory waste feature list initilization */
13340 dhd_mw_list_delete(dhdp
, &(dhdp
->mw_list_head
));
13341 #endif /* DHD_DEBUG */
13343 dhd_del_monitor_if(dhd
);
13344 #endif /* WL_MONITOR */
13347 if (dhdp
->enable_erpom
) {
13348 dhdp
->pom_func_deregister(&dhdp
->pom_wlan_handler
);
13350 #endif /* DHD_ERPOM */
13352 cancel_work_sync(&dhd
->dhd_hang_process_work
);
13354 /* Prefer adding de-init code above this comment unless necessary.
13355 * The idea is to cancel work queue, sysfs and flags at the end.
13357 dhd_deferred_work_deinit(dhd
->dhd_deferred_wq
);
13358 dhd
->dhd_deferred_wq
= NULL
;
13360 /* log dump related buffers should be freed after wq is purged */
13361 #ifdef DHD_LOG_DUMP
13362 dhd_log_dump_deinit(&dhd
->pub
);
13363 #endif /* DHD_LOG_DUMP */
13364 #if defined(BCMPCIE)
13365 if (dhdp
->extended_trap_data
)
13367 MFREE(dhdp
->osh
, dhdp
->extended_trap_data
, BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
13368 dhdp
->extended_trap_data
= NULL
;
13370 #endif /* BCMPCIE */
13372 #ifdef SHOW_LOGTRACE
13373 /* Wait till event_log_dispatcher_work finishes */
13374 cancel_delayed_work_sync(&dhd
->event_log_dispatcher_work
);
13375 mutex_lock(&dhd
->pub
.dhd_trace_lock
);
13376 remove_proc_entry("dhd_trace", NULL
);
13377 mutex_unlock(&dhd
->pub
.dhd_trace_lock
);
13378 #endif /* SHOW_LOGTRACE */
13380 #ifdef DHD_DUMP_MNGR
13381 if (dhd
->pub
.dump_file_manage
) {
13382 MFREE(dhd
->pub
.osh
, dhd
->pub
.dump_file_manage
,
13383 sizeof(dhd_dump_file_manage_t
));
13385 #endif /* DHD_DUMP_MNGR */
13386 dhd_sysfs_exit(dhd
);
13387 dhd
->pub
.fw_download_done
= FALSE
;
13389 #if defined(BT_OVER_SDIO)
13390 mutex_destroy(&dhd
->bus_user_lock
);
13391 #endif /* BT_OVER_SDIO */
13396 dhd_free(dhd_pub_t
*dhdp
)
13399 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13403 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
13404 if (dhdp
->reorder_bufs
[i
]) {
13405 reorder_info_t
*ptr
;
13406 uint32 buf_size
= sizeof(struct reorder_info
);
13408 ptr
= dhdp
->reorder_bufs
[i
];
13410 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
13411 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13412 i
, ptr
->max_idx
, buf_size
));
13414 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
13415 dhdp
->reorder_bufs
[i
] = NULL
;
13419 dhd_sta_pool_fini(dhdp
, DHD_MAX_STA
);
13421 dhd
= (dhd_info_t
*)dhdp
->info
;
13422 if (dhdp
->soc_ram
) {
13423 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13424 DHD_OS_PREFREE(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13426 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13427 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13428 dhdp
->soc_ram
= NULL
;
13432 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13433 if (dhd
!= (dhd_info_t
*)dhd_os_prealloc(dhdp
,
13434 DHD_PREALLOC_DHD_INFO
, 0, FALSE
))
13435 MFREE(dhd
->pub
.osh
, dhd
, sizeof(*dhd
));
13442 dhd_clear(dhd_pub_t
*dhdp
)
13444 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13448 #ifdef DHDTCPACK_SUPPRESS
13449 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13450 dhd_tcpack_info_tbl_clean(dhdp
);
13451 #endif /* DHDTCPACK_SUPPRESS */
13452 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
13453 if (dhdp
->reorder_bufs
[i
]) {
13454 reorder_info_t
*ptr
;
13455 uint32 buf_size
= sizeof(struct reorder_info
);
13457 ptr
= dhdp
->reorder_bufs
[i
];
13459 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
13460 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13461 i
, ptr
->max_idx
, buf_size
));
13463 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
13464 dhdp
->reorder_bufs
[i
] = NULL
;
13468 dhd_sta_pool_clear(dhdp
, DHD_MAX_STA
);
13470 if (dhdp
->soc_ram
) {
13471 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13472 DHD_OS_PREFREE(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13474 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13475 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13476 dhdp
->soc_ram
= NULL
;
13482 dhd_module_cleanup(void)
13484 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13486 dhd_bus_unregister();
13490 dhd_wifi_platform_unregister_drv();
13494 dhd_module_exit(void)
13496 atomic_set(&exit_in_progress
, 1);
13497 dhd_module_cleanup();
13498 unregister_reboot_notifier(&dhd_reboot_notifier
);
13499 dhd_destroy_to_notifier_skt();
13503 dhd_module_init(void)
13506 int retry
= POWERUP_MAX_RETRY
;
13508 DHD_ERROR(("%s in\n", __FUNCTION__
));
13510 DHD_PERIM_RADIO_INIT();
13512 if (firmware_path
[0] != '\0') {
13513 strncpy(fw_bak_path
, firmware_path
, MOD_PARAM_PATHLEN
);
13514 fw_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13517 if (nvram_path
[0] != '\0') {
13518 strncpy(nv_bak_path
, nvram_path
, MOD_PARAM_PATHLEN
);
13519 nv_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13523 err
= dhd_wifi_platform_register_drv();
13525 register_reboot_notifier(&dhd_reboot_notifier
);
13528 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13529 __FUNCTION__
, retry
));
13530 strncpy(firmware_path
, fw_bak_path
, MOD_PARAM_PATHLEN
);
13531 firmware_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13532 strncpy(nvram_path
, nv_bak_path
, MOD_PARAM_PATHLEN
);
13533 nvram_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13537 dhd_create_to_notifier_skt();
13540 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__
));
13542 if (!dhd_download_fw_on_driverload
) {
13543 dhd_driver_init_done
= TRUE
;
13547 DHD_ERROR(("%s out\n", __FUNCTION__
));
13553 dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
)
13555 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__
, code
));
13556 if (code
== SYS_RESTART
) {
13559 #endif /* BCMPCIE */
13561 return NOTIFY_DONE
;
13564 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13565 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13566 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13567 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
13568 defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
13569 defined(CONFIG_ARCH_SDM845)
13570 deferred_module_init_sync(dhd_module_init
);
13572 deferred_module_init(dhd_module_init
);
13573 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13574 * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
13575 * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845
13577 #elif defined(USE_LATE_INITCALL_SYNC)
13578 late_initcall_sync(dhd_module_init
);
13580 late_initcall(dhd_module_init
);
13581 #endif /* USE_LATE_INITCALL_SYNC */
13583 module_init(dhd_module_init
);
13584 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13586 module_exit(dhd_module_exit
);
13589 * OS specific functions required to implement DHD driver in OS independent way
13592 dhd_os_proto_block(dhd_pub_t
*pub
)
13594 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13597 DHD_PERIM_UNLOCK(pub
);
13599 down(&dhd
->proto_sem
);
13601 DHD_PERIM_LOCK(pub
);
13609 dhd_os_proto_unblock(dhd_pub_t
*pub
)
13611 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13614 up(&dhd
->proto_sem
);
13622 dhd_os_dhdiovar_lock(dhd_pub_t
*pub
)
13624 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13627 mutex_lock(&dhd
->dhd_iovar_mutex
);
13632 dhd_os_dhdiovar_unlock(dhd_pub_t
*pub
)
13634 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13637 mutex_unlock(&dhd
->dhd_iovar_mutex
);
13642 dhd_os_logdump_lock(dhd_pub_t
*pub
)
13644 dhd_info_t
*dhd
= NULL
;
13649 dhd
= (dhd_info_t
*)(pub
->info
);
13652 mutex_lock(&dhd
->logdump_lock
);
13657 dhd_os_logdump_unlock(dhd_pub_t
*pub
)
13659 dhd_info_t
*dhd
= NULL
;
13664 dhd
= (dhd_info_t
*)(pub
->info
);
13667 mutex_unlock(&dhd
->logdump_lock
);
13672 dhd_os_dbgring_lock(void *lock
)
13677 mutex_lock((struct mutex
*)lock
);
13683 dhd_os_dbgring_unlock(void *lock
, unsigned long flags
)
13685 BCM_REFERENCE(flags
);
13690 mutex_unlock((struct mutex
*)lock
);
13694 dhd_os_get_ioctl_resp_timeout(void)
13696 return ((unsigned int)dhd_ioctl_timeout_msec
);
13700 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec
)
13702 dhd_ioctl_timeout_msec
= (int)timeout_msec
;
13706 dhd_os_ioctl_resp_wait(dhd_pub_t
*pub
, uint
*condition
)
13708 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13711 /* Convert timeout in millsecond to jiffies */
13712 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13713 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
13715 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
13718 DHD_PERIM_UNLOCK(pub
);
13720 timeout
= wait_event_timeout(dhd
->ioctl_resp_wait
, (*condition
), timeout
);
13722 DHD_PERIM_LOCK(pub
);
13728 dhd_os_ioctl_resp_wake(dhd_pub_t
*pub
)
13730 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13732 wake_up(&dhd
->ioctl_resp_wait
);
13737 dhd_os_d3ack_wait(dhd_pub_t
*pub
, uint
*condition
)
13739 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13742 /* Convert timeout in millsecond to jiffies */
13743 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13744 timeout
= msecs_to_jiffies(D3_ACK_RESP_TIMEOUT
);
13746 timeout
= D3_ACK_RESP_TIMEOUT
* HZ
/ 1000;
13749 DHD_PERIM_UNLOCK(pub
);
13751 timeout
= wait_event_timeout(dhd
->d3ack_wait
, (*condition
), timeout
);
13753 DHD_PERIM_LOCK(pub
);
13759 dhd_os_d3ack_wake(dhd_pub_t
*pub
)
13761 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13763 wake_up(&dhd
->d3ack_wait
);
13768 dhd_os_busbusy_wait_negation(dhd_pub_t
*pub
, uint
*condition
)
13770 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13773 /* Wait for bus usage contexts to gracefully exit within some timeout value
13774 * Set time out to little higher than dhd_ioctl_timeout_msec,
13775 * so that IOCTL timeout should not get affected.
13777 /* Convert timeout in millsecond to jiffies */
13778 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13779 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13781 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13784 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
, !(*condition
), timeout
);
13790 * Wait until the condition *var == condition is met.
13791 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13792 * Returns 1 if the @condition evaluated to true
13795 dhd_os_busbusy_wait_condition(dhd_pub_t
*pub
, uint
*var
, uint condition
)
13797 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13800 /* Convert timeout in millsecond to jiffies */
13801 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13802 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13804 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13807 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
, (*var
== condition
), timeout
);
13813 * Wait until the '(*var & bitmask) == condition' is met.
13814 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13815 * Returns 1 if the @condition evaluated to true
13818 dhd_os_busbusy_wait_bitmask(dhd_pub_t
*pub
, uint
*var
,
13819 uint bitmask
, uint condition
)
13821 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13824 /* Convert timeout in millsecond to jiffies */
13825 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13826 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13828 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13831 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
,
13832 ((*var
& bitmask
) == condition
), timeout
);
13838 dhd_os_dmaxfer_wait(dhd_pub_t
*pub
, uint
*condition
)
13841 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13843 DHD_PERIM_UNLOCK(pub
);
13844 ret
= wait_event_interruptible(dhd
->dmaxfer_wait
, (*condition
));
13845 DHD_PERIM_LOCK(pub
);
13852 dhd_os_dmaxfer_wake(dhd_pub_t
*pub
)
13854 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13856 wake_up(&dhd
->dmaxfer_wait
);
13861 dhd_os_tx_completion_wake(dhd_pub_t
*dhd
)
13863 /* Call wmb() to make sure before waking up the other event value gets updated */
13865 wake_up(&dhd
->tx_completion_wait
);
13868 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13869 /* Fix compilation error for FC11 */
13873 dhd_os_busbusy_wake(dhd_pub_t
*pub
)
13875 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13876 /* Call wmb() to make sure before waking up the other event value gets updated */
13878 wake_up(&dhd
->dhd_bus_busy_state_wait
);
13883 dhd_os_wd_timer_extend(void *bus
, bool extend
)
13885 dhd_pub_t
*pub
= bus
;
13886 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13889 dhd_os_wd_timer(bus
, WATCHDOG_EXTEND_INTERVAL
);
13891 dhd_os_wd_timer(bus
, dhd
->default_wd_interval
);
13895 dhd_os_wd_timer(void *bus
, uint wdtick
)
13897 dhd_pub_t
*pub
= bus
;
13898 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13899 unsigned long flags
;
13901 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13904 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
13908 DHD_GENERAL_LOCK(pub
, flags
);
13910 /* don't start the wd until fw is loaded */
13911 if (pub
->busstate
== DHD_BUS_DOWN
) {
13912 DHD_GENERAL_UNLOCK(pub
, flags
);
13915 DHD_OS_WD_WAKE_UNLOCK(pub
);
13917 #endif /* BCMSDIO */
13921 /* Totally stop the timer */
13922 if (!wdtick
&& dhd
->wd_timer_valid
== TRUE
) {
13923 dhd
->wd_timer_valid
= FALSE
;
13924 DHD_GENERAL_UNLOCK(pub
, flags
);
13925 del_timer_sync(&dhd
->timer
);
13927 DHD_OS_WD_WAKE_UNLOCK(pub
);
13928 #endif /* BCMSDIO */
13934 DHD_OS_WD_WAKE_LOCK(pub
);
13935 dhd_watchdog_ms
= (uint
)wdtick
;
13936 #endif /* BCMSDIO */
13937 /* Re arm the timer, at last watchdog period */
13938 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
13939 dhd
->wd_timer_valid
= TRUE
;
13941 DHD_GENERAL_UNLOCK(pub
, flags
);
13944 #ifdef DHD_PCIE_RUNTIMEPM
13946 dhd_os_runtimepm_timer(void *bus
, uint tick
)
13948 dhd_pub_t
*pub
= bus
;
13949 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13950 unsigned long flags
;
13952 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13955 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
13959 DHD_GENERAL_LOCK(pub
, flags
);
13961 /* don't start the RPM until fw is loaded */
13962 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub
)) {
13963 DHD_GENERAL_UNLOCK(pub
, flags
);
13967 /* If tick is non-zero, the request is to start the timer */
13969 /* Start the timer only if its not already running */
13970 if (dhd
->rpm_timer_valid
== FALSE
) {
13971 mod_timer(&dhd
->rpm_timer
, jiffies
+ msecs_to_jiffies(dhd_runtimepm_ms
));
13972 dhd
->rpm_timer_valid
= TRUE
;
13975 /* tick is zero, we have to stop the timer */
13976 /* Stop the timer only if its running, otherwise we don't have to do anything */
13977 if (dhd
->rpm_timer_valid
== TRUE
) {
13978 dhd
->rpm_timer_valid
= FALSE
;
13979 DHD_GENERAL_UNLOCK(pub
, flags
);
13980 del_timer_sync(&dhd
->rpm_timer
);
13981 /* we have already released the lock, so just go to exit */
13986 DHD_GENERAL_UNLOCK(pub
, flags
);
13992 #endif /* DHD_PCIE_RUNTIMEPM */
13995 dhd_os_open_image1(dhd_pub_t
*pub
, char *filename
)
14000 fp
= filp_open(filename
, O_RDONLY
, 0);
14002 * 2.6.11 (FC4) supports filp_open() but later revs don't?
14004 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
14012 if (!S_ISREG(file_inode(fp
)->i_mode
)) {
14013 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__
, filename
));
14018 size
= i_size_read(file_inode(fp
));
14020 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__
, filename
, size
));
14025 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__
, filename
, size
));
14032 dhd_os_get_image_block(char *buf
, int len
, void *image
)
14034 struct file
*fp
= (struct file
*)image
;
14042 size
= i_size_read(file_inode(fp
));
14043 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, MIN(len
, size
));
14045 if (len
>= size
&& size
!= rdlen
) {
14050 fp
->f_pos
+= rdlen
;
14056 #if defined(BT_OVER_SDIO)
14058 dhd_os_gets_image(dhd_pub_t
*pub
, char *str
, int len
, void *image
)
14060 struct file
*fp
= (struct file
*)image
;
14063 char *str_end
= NULL
;
14068 rd_len
= kernel_read(fp
, fp
->f_pos
, str
, len
);
14069 str_end
= strnchr(str
, len
, '\n');
14070 if (str_end
== NULL
) {
14073 str_len
= (uint
)(str_end
- str
);
14075 /* Advance file pointer past the string length */
14076 fp
->f_pos
+= str_len
+ 1;
14077 bzero(str_end
, rd_len
- str_len
);
14082 #endif /* defined (BT_OVER_SDIO) */
14085 dhd_os_get_image_size(void *image
)
14087 struct file
*fp
= (struct file
*)image
;
14093 size
= i_size_read(file_inode(fp
));
14099 dhd_os_close_image1(dhd_pub_t
*pub
, void *image
)
14102 filp_close((struct file
*)image
, NULL
);
14107 dhd_os_sdlock(dhd_pub_t
*pub
)
14111 dhd
= (dhd_info_t
*)(pub
->info
);
14113 if (dhd_dpc_prio
>= 0)
14116 spin_lock_bh(&dhd
->sdlock
);
14120 dhd_os_sdunlock(dhd_pub_t
*pub
)
14124 dhd
= (dhd_info_t
*)(pub
->info
);
14126 if (dhd_dpc_prio
>= 0)
14129 spin_unlock_bh(&dhd
->sdlock
);
14133 dhd_os_sdlock_txq(dhd_pub_t
*pub
)
14137 dhd
= (dhd_info_t
*)(pub
->info
);
14138 spin_lock_bh(&dhd
->txqlock
);
14142 dhd_os_sdunlock_txq(dhd_pub_t
*pub
)
14146 dhd
= (dhd_info_t
*)(pub
->info
);
14147 spin_unlock_bh(&dhd
->txqlock
);
14151 dhd_os_sdlock_rxq(dhd_pub_t
*pub
)
14156 dhd_os_sdunlock_rxq(dhd_pub_t
*pub
)
14161 dhd_os_rxflock(dhd_pub_t
*pub
)
14165 dhd
= (dhd_info_t
*)(pub
->info
);
14166 spin_lock_bh(&dhd
->rxf_lock
);
14171 dhd_os_rxfunlock(dhd_pub_t
*pub
)
14175 dhd
= (dhd_info_t
*)(pub
->info
);
14176 spin_unlock_bh(&dhd
->rxf_lock
);
14179 #ifdef DHDTCPACK_SUPPRESS
14181 dhd_os_tcpacklock(dhd_pub_t
*pub
)
14184 unsigned long flags
= 0;
14186 dhd
= (dhd_info_t
*)(pub
->info
);
14190 spin_lock_bh(&dhd
->tcpack_lock
);
14192 spin_lock_irqsave(&dhd
->tcpack_lock
, flags
);
14193 #endif /* BCMSDIO */
14200 dhd_os_tcpackunlock(dhd_pub_t
*pub
, unsigned long flags
)
14205 BCM_REFERENCE(flags
);
14206 #endif /* BCMSDIO */
14208 dhd
= (dhd_info_t
*)(pub
->info
);
14212 spin_unlock_bh(&dhd
->tcpack_lock
);
14214 spin_unlock_irqrestore(&dhd
->tcpack_lock
, flags
);
14215 #endif /* BCMSDIO */
14218 #endif /* DHDTCPACK_SUPPRESS */
14220 uint8
* dhd_os_prealloc(dhd_pub_t
*dhdpub
, int section
, uint size
, bool kmalloc_if_fail
)
14223 gfp_t flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
14225 buf
= (uint8
*)wifi_platform_prealloc(dhdpub
->info
->adapter
, section
, size
);
14226 if (buf
== NULL
&& kmalloc_if_fail
)
14227 buf
= kmalloc(size
, flags
);
14232 void dhd_os_prefree(dhd_pub_t
*dhdpub
, void *addr
, uint size
)
14236 #if defined(WL_WIRELESS_EXT)
14237 struct iw_statistics
*
14238 dhd_get_wireless_stats(struct net_device
*dev
)
14241 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14243 if (!dhd
->pub
.up
) {
14247 res
= wl_iw_get_wireless_stats(dev
, &dhd
->iw
.wstats
);
14250 return &dhd
->iw
.wstats
;
14254 #endif /* defined(WL_WIRELESS_EXT) */
14257 dhd_wl_host_event(dhd_info_t
*dhd
, int ifidx
, void *pktdata
, uint16 pktlen
,
14258 wl_event_msg_t
*event
, void **data
)
14262 unsigned long flags
= 0;
14263 #ifdef DYNAMIC_MUMIMO_CONTROL
14264 static uint32 reassoc_err
= 0;
14265 #endif /* DYNAMIC_MUMIMO_CONTROL */
14266 #endif /* WL_CFG80211 */
14267 ASSERT(dhd
!= NULL
);
14269 #ifdef SHOW_LOGTRACE
14270 bcmerror
= wl_process_host_event(&dhd
->pub
, &ifidx
, pktdata
, pktlen
, event
, data
,
14273 bcmerror
= wl_process_host_event(&dhd
->pub
, &ifidx
, pktdata
, pktlen
, event
, data
,
14275 #endif /* SHOW_LOGTRACE */
14276 if (unlikely(bcmerror
!= BCME_OK
)) {
14280 if (ntoh32(event
->event_type
) == WLC_E_IF
) {
14281 /* WLC_E_IF event types are consumed by wl_process_host_event.
14282 * For ifadd/del ops, the netdev ptr may not be valid at this
14283 * point. so return before invoking cfg80211/wext handlers.
14288 #if defined(WL_WIRELESS_EXT)
14289 if (event
->bsscfgidx
== 0) {
14291 * Wireless ext is on primary interface only
14293 ASSERT(dhd
->iflist
[ifidx
] != NULL
);
14294 ASSERT(dhd
->iflist
[ifidx
]->net
!= NULL
);
14296 if (dhd
->iflist
[ifidx
]->net
) {
14297 wl_iw_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14300 #endif /* defined(WL_WIRELESS_EXT) */
14303 if (dhd
->iflist
[ifidx
]->net
) {
14304 spin_lock_irqsave(&dhd
->pub
.up_lock
, flags
);
14306 wl_cfg80211_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14308 spin_unlock_irqrestore(&dhd
->pub
.up_lock
, flags
);
14310 #ifdef DYNAMIC_MUMIMO_CONTROL
14311 #define REASSOC_ERROR_RETRY_LIMIT 1
14312 if (dhd
->pub
.reassoc_mumimo_sw
) {
14313 uint event_type
= ntoh32(event
->event_type
);
14314 uint status
= ntoh32(event
->status
);
14316 if (event_type
== WLC_E_REASSOC
) {
14317 if (status
== WLC_E_STATUS_SUCCESS
) {
14323 if (reassoc_err
> REASSOC_ERROR_RETRY_LIMIT
) {
14324 dhd
->pub
.reassoc_mumimo_sw
= FALSE
;
14325 dhd
->pub
.murx_block_eapol
= FALSE
;
14326 DHD_ENABLE_RUNTIME_PM(&dhd
->pub
);
14327 dhd_txflowcontrol(&dhd
->pub
, ALL_INTERFACES
, OFF
);
14331 #undef REASSOC_ERROR_RETRY_LIMIT
14332 #endif /* DYNAMIC_MUMIMO_CONTROL */
14333 #endif /* defined(WL_CFG80211) */
14338 /* send up locally generated event */
14340 dhd_sendup_event(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
, void *data
)
14342 switch (ntoh32(event
->event_type
)) {
14343 /* Handle error case or further events here */
14349 #ifdef LOG_INTO_TCPDUMP
14351 dhd_sendup_log(dhd_pub_t
*dhdp
, void *data
, int data_len
)
14353 struct sk_buff
*p
, *skb
;
14360 struct ether_header eth
;
14362 pktlen
= sizeof(eth
) + data_len
;
14365 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
14366 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
14368 bcopy(&dhdp
->mac
, ð
.ether_dhost
, ETHER_ADDR_LEN
);
14369 bcopy(&dhdp
->mac
, ð
.ether_shost
, ETHER_ADDR_LEN
);
14370 ETHER_TOGGLE_LOCALADDR(ð
.ether_shost
);
14371 eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
14373 bcopy((void *)ð
, PKTDATA(dhdp
->osh
, p
), sizeof(eth
));
14374 bcopy(data
, PKTDATA(dhdp
->osh
, p
) + sizeof(eth
), data_len
);
14375 skb
= PKTTONATIVE(dhdp
->osh
, p
);
14376 skb_data
= skb
->data
;
14379 ifidx
= dhd_ifname2idx(dhd
, "wlan0");
14380 ifp
= dhd
->iflist
[ifidx
];
14382 ifp
= dhd
->iflist
[0];
14385 skb
->dev
= ifp
->net
;
14386 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
14387 skb
->data
= skb_data
;
14390 /* Strip header, count, deliver upward */
14391 skb_pull(skb
, ETH_HLEN
);
14393 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
14394 __FUNCTION__
, __LINE__
);
14395 /* Send the packet */
14396 if (in_interrupt()) {
14402 /* Could not allocate a sk_buf */
14403 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__
));
14406 #endif /* LOG_INTO_TCPDUMP */
14408 void dhd_wait_for_event(dhd_pub_t
*dhd
, bool *lockvar
)
14410 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14411 struct dhd_info
*dhdinfo
= dhd
->info
;
14413 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14414 int timeout
= msecs_to_jiffies(IOCTL_RESP_TIMEOUT
);
14416 int timeout
= (IOCTL_RESP_TIMEOUT
/ 1000) * HZ
;
14417 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14419 dhd_os_sdunlock(dhd
);
14420 wait_event_timeout(dhdinfo
->ctrl_wait
, (*lockvar
== FALSE
), timeout
);
14421 dhd_os_sdlock(dhd
);
14422 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14424 } /* dhd_init_static_strs_array */
14426 void dhd_wait_event_wakeup(dhd_pub_t
*dhd
)
14428 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14429 struct dhd_info
*dhdinfo
= dhd
->info
;
14430 if (waitqueue_active(&dhdinfo
->ctrl_wait
))
14431 wake_up(&dhdinfo
->ctrl_wait
);
14436 #if defined(BCMSDIO) || defined(BCMPCIE)
14438 dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
)
14442 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14444 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14445 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd
->pub
.bus
)) < 0)
14447 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14449 if (flag
== TRUE
) {
14450 /* Issue wl down command before resetting the chip */
14451 if (dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
14452 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__
));
14454 #ifdef PROP_TXSTATUS
14455 if (dhd
->pub
.wlfc_enabled
) {
14456 dhd_wlfc_deinit(&dhd
->pub
);
14458 #endif /* PROP_TXSTATUS */
14460 if (dhd
->pub
.pno_state
) {
14461 dhd_pno_deinit(&dhd
->pub
);
14465 if (dhd
->pub
.rtt_state
) {
14466 dhd_rtt_deinit(&dhd
->pub
);
14468 #endif /* RTT_SUPPORT */
14470 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14471 dhd_os_dbg_detach_pkt_monitor(&dhd
->pub
);
14472 #endif /* DBG_PKT_MON */
14477 dhd_update_fw_nv_path(dhd
);
14478 /* update firmware and nvram path to sdio bus */
14479 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
14480 dhd
->fw_path
, dhd
->nv_path
);
14482 #endif /* BCMSDIO */
14484 ret
= dhd_bus_devreset(&dhd
->pub
, flag
);
14486 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14487 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd
->pub
.bus
));
14488 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd
->pub
.bus
));
14489 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14492 /* Clear some flags for recovery logic */
14493 dhd
->pub
.dongle_trap_occured
= 0;
14494 dhd
->pub
.iovar_timeout_occured
= 0;
14495 #ifdef PCIE_FULL_DONGLE
14496 dhd
->pub
.d3ack_timeout_occured
= 0;
14497 #endif /* PCIE_FULL_DONGLE */
14498 #ifdef DHD_MAP_LOGGING
14499 dhd
->pub
.smmu_fault_occurred
= 0;
14500 #endif /* DHD_MAP_LOGGING */
14504 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__
, ret
));
14512 dhd_net_bus_suspend(struct net_device
*dev
)
14514 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14515 return dhd_bus_suspend(&dhd
->pub
);
14519 dhd_net_bus_resume(struct net_device
*dev
, uint8 stage
)
14521 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14522 return dhd_bus_resume(&dhd
->pub
, stage
);
14525 #endif /* BCMSDIO */
14526 #endif /* BCMSDIO || BCMPCIE */
14528 int net_os_set_suspend_disable(struct net_device
*dev
, int val
)
14530 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14534 ret
= dhd
->pub
.suspend_disable_flag
;
14535 dhd
->pub
.suspend_disable_flag
= val
;
14540 int net_os_set_suspend(struct net_device
*dev
, int val
, int force
)
14543 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14546 #ifdef CONFIG_MACH_UNIVERSAL7420
14547 #if defined(ARGOS_RPS_CPU_CTL) && defined(DHD_LB_RXP)
14549 /* Force to set rps_cpus to specific CPU core */
14550 dhd_rps_cpus_enable(dev
, TRUE
);
14552 #endif /* ARGOS_RPS_CPU_CTL && DHD_LB_RXP */
14553 #endif /* CONFIG_MACH_UNIVERSAL7420 */
14554 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14555 ret
= dhd_set_suspend(val
, &dhd
->pub
);
14557 ret
= dhd_suspend_resume_helper(dhd
, val
, force
);
14560 wl_cfg80211_update_power_mode(dev
);
14566 int net_os_set_suspend_bcn_li_dtim(struct net_device
*dev
, int val
)
14568 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14571 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
14572 __FUNCTION__
, val
));
14573 dhd
->pub
.suspend_bcn_li_dtim
= val
;
14579 int net_os_set_max_dtim_enable(struct net_device
*dev
, int val
)
14581 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14584 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14585 __FUNCTION__
, (val
? "Enable" : "Disable")));
14587 dhd
->pub
.max_dtim_enable
= TRUE
;
14589 dhd
->pub
.max_dtim_enable
= FALSE
;
14599 net_os_set_disable_dtim_in_suspend(struct net_device
*dev
, int val
)
14601 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14604 DHD_ERROR(("%s: Disable bcn_li_dtim in suspend : %s\n",
14605 __FUNCTION__
, (val
? "Enable" : "Disable")));
14607 dhd
->pub
.disable_dtim_in_suspend
= TRUE
;
14609 dhd
->pub
.disable_dtim_in_suspend
= FALSE
;
14618 #ifdef PKT_FILTER_SUPPORT
14619 int net_os_rxfilter_add_remove(struct net_device
*dev
, int add_remove
, int num
)
14623 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14624 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14626 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__
, add_remove
, num
));
14627 if (!dhd
|| (num
== DHD_UNICAST_FILTER_NUM
)) {
14631 #ifdef BLOCK_IPV6_PACKET
14632 /* customer want to use NO IPV6 packets only */
14633 if (num
== DHD_MULTICAST6_FILTER_NUM
) {
14636 #endif /* BLOCK_IPV6_PACKET */
14638 if (num
>= dhd
->pub
.pktfilter_count
) {
14642 ret
= dhd_packet_filter_add_remove(&dhd
->pub
, add_remove
, num
);
14643 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14648 int dhd_os_enable_packet_filter(dhd_pub_t
*dhdp
, int val
)
14653 /* Packet filtering is set only if we still in early-suspend and
14654 * we need either to turn it ON or turn it OFF
14655 * We can always turn it OFF in case of early-suspend, but we turn it
14656 * back ON only if suspend_disable_flag was not set
14658 if (dhdp
&& dhdp
->up
) {
14659 if (dhdp
->in_suspend
) {
14660 if (!val
|| (val
&& !dhdp
->suspend_disable_flag
))
14661 dhd_enable_packet_filter(val
, dhdp
);
14667 /* function to enable/disable packet for Network device */
14668 int net_os_enable_packet_filter(struct net_device
*dev
, int val
)
14670 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14672 DHD_ERROR(("%s: val = %d\n", __FUNCTION__
, val
));
14673 return dhd_os_enable_packet_filter(&dhd
->pub
, val
);
14675 #endif /* PKT_FILTER_SUPPORT */
14678 dhd_dev_init_ioctl(struct net_device
*dev
)
14680 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14683 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0)
14691 dhd_dev_get_feature_set(struct net_device
*dev
)
14693 dhd_info_t
*ptr
= *(dhd_info_t
**)netdev_priv(dev
);
14694 dhd_pub_t
*dhd
= (&ptr
->pub
);
14695 int feature_set
= 0;
14697 if (FW_SUPPORTED(dhd
, sta
))
14698 feature_set
|= WIFI_FEATURE_INFRA
;
14699 if (FW_SUPPORTED(dhd
, dualband
))
14700 feature_set
|= WIFI_FEATURE_INFRA_5G
;
14701 if (FW_SUPPORTED(dhd
, p2p
))
14702 feature_set
|= WIFI_FEATURE_P2P
;
14703 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
)
14704 feature_set
|= WIFI_FEATURE_SOFT_AP
;
14705 if (FW_SUPPORTED(dhd
, tdls
))
14706 feature_set
|= WIFI_FEATURE_TDLS
;
14707 if (FW_SUPPORTED(dhd
, vsdb
))
14708 feature_set
|= WIFI_FEATURE_TDLS_OFFCHANNEL
;
14709 if (FW_SUPPORTED(dhd
, nan
)) {
14710 feature_set
|= WIFI_FEATURE_NAN
;
14711 /* NAN is essentail for d2d rtt */
14712 if (FW_SUPPORTED(dhd
, rttd2d
))
14713 feature_set
|= WIFI_FEATURE_D2D_RTT
;
14716 feature_set
|= WIFI_FEATURE_D2D_RTT
;
14717 feature_set
|= WIFI_FEATURE_D2AP_RTT
;
14718 #endif /* RTT_SUPPORT */
14719 #ifdef LINKSTAT_SUPPORT
14720 feature_set
|= WIFI_FEATURE_LINKSTAT
;
14721 #endif /* LINKSTAT_SUPPORT */
14723 #if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
14724 if (dhd_is_pno_supported(dhd
)) {
14725 feature_set
|= WIFI_FEATURE_PNO
;
14726 #ifdef GSCAN_SUPPORT
14727 feature_set
|= WIFI_FEATURE_GSCAN
;
14728 feature_set
|= WIFI_FEATURE_HAL_EPNO
;
14729 #endif /* GSCAN_SUPPORT */
14731 #endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
14732 #ifdef RSSI_MONITOR_SUPPORT
14733 if (FW_SUPPORTED(dhd
, rssi_mon
)) {
14734 feature_set
|= WIFI_FEATURE_RSSI_MONITOR
;
14736 #endif /* RSSI_MONITOR_SUPPORT */
14738 feature_set
|= WIFI_FEATURE_HOTSPOT
;
14740 #ifdef NDO_CONFIG_SUPPORT
14741 feature_set
|= WIFI_FEATURE_CONFIG_NDO
;
14742 #endif /* NDO_CONFIG_SUPPORT */
14744 feature_set
|= WIFI_FEATURE_MKEEP_ALIVE
;
14745 #endif /* KEEP_ALIVE */
14746 #ifdef SUPPORT_RANDOM_MAC_SCAN
14747 feature_set
|= WIFI_FEATURE_SCAN_RAND
;
14748 #endif /* SUPPORT_RANDOM_MAC_SCAN */
14750 if (FW_SUPPORTED(dhd
, fie
)) {
14751 feature_set
|= WIFI_FEATURE_FILTER_IE
;
14753 #endif /* FILTER_IE */
14754 #ifdef ROAMEXP_SUPPORT
14755 /* Check if the Android O roam feature is supported by FW */
14756 if (!(BCME_UNSUPPORTED
== dhd_dev_set_whitelist_ssid(dev
, NULL
, 0, true))) {
14757 feature_set
|= WIFI_FEATURE_CONTROL_ROAMING
;
14759 #endif /* ROAMEXP_SUPPORT */
14760 return feature_set
;
14764 dhd_dev_get_feature_set_matrix(struct net_device
*dev
, int num
)
14766 int feature_set_full
;
14769 feature_set_full
= dhd_dev_get_feature_set(dev
);
14771 /* Common feature set for all interface */
14772 ret
= (feature_set_full
& WIFI_FEATURE_INFRA
) |
14773 (feature_set_full
& WIFI_FEATURE_INFRA_5G
) |
14774 (feature_set_full
& WIFI_FEATURE_D2D_RTT
) |
14775 (feature_set_full
& WIFI_FEATURE_D2AP_RTT
) |
14776 (feature_set_full
& WIFI_FEATURE_RSSI_MONITOR
) |
14777 (feature_set_full
& WIFI_FEATURE_EPR
);
14779 /* Specific feature group for each interface */
14782 ret
|= (feature_set_full
& WIFI_FEATURE_P2P
) |
14783 /* Not supported yet */
14784 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14785 (feature_set_full
& WIFI_FEATURE_TDLS
) |
14786 (feature_set_full
& WIFI_FEATURE_PNO
) |
14787 (feature_set_full
& WIFI_FEATURE_HAL_EPNO
) |
14788 (feature_set_full
& WIFI_FEATURE_BATCH_SCAN
) |
14789 (feature_set_full
& WIFI_FEATURE_GSCAN
) |
14790 (feature_set_full
& WIFI_FEATURE_HOTSPOT
) |
14791 (feature_set_full
& WIFI_FEATURE_ADDITIONAL_STA
);
14795 ret
|= (feature_set_full
& WIFI_FEATURE_P2P
);
14796 /* Not yet verified NAN with P2P */
14797 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14801 ret
|= (feature_set_full
& WIFI_FEATURE_NAN
) |
14802 (feature_set_full
& WIFI_FEATURE_TDLS
) |
14803 (feature_set_full
& WIFI_FEATURE_TDLS_OFFCHANNEL
);
14807 ret
= WIFI_FEATURE_INVALID
;
14808 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__
, num
));
14814 #ifdef CUSTOM_FORCE_NODFS_FLAG
14816 dhd_dev_set_nodfs(struct net_device
*dev
, u32 nodfs
)
14818 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14821 dhd
->pub
.dhd_cflags
|= WLAN_PLAT_NODFS_FLAG
;
14823 dhd
->pub
.dhd_cflags
&= ~WLAN_PLAT_NODFS_FLAG
;
14824 dhd
->pub
.force_country_change
= TRUE
;
14827 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14828 #ifdef NDO_CONFIG_SUPPORT
14830 dhd_dev_ndo_cfg(struct net_device
*dev
, u8 enable
)
14832 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14833 dhd_pub_t
*dhdp
= &dhd
->pub
;
14837 /* enable ND offload feature (will be enabled in FW on suspend) */
14838 dhdp
->ndo_enable
= TRUE
;
14840 /* Update changes of anycast address & DAD failed address */
14841 ret
= dhd_dev_ndo_update_inet6addr(dev
);
14842 if ((ret
< 0) && (ret
!= BCME_NORESOURCE
)) {
14843 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__
, ret
));
14847 /* disable ND offload feature */
14848 dhdp
->ndo_enable
= FALSE
;
14850 /* disable ND offload in FW */
14851 ret
= dhd_ndo_enable(dhdp
, FALSE
);
14853 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__
, ret
));
14859 /* #pragma used as a WAR to fix build failure,
14860 * ignore dropping of 'const' qualifier in 'list_entry' macro
14861 * this pragma disables the warning only for the following function
14863 #pragma GCC diagnostic push
14864 #pragma GCC diagnostic ignored "-Wcast-qual"
14867 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev
*inet6
)
14869 struct inet6_ifaddr
*ifa
;
14870 struct ifacaddr6
*acaddr
= NULL
;
14871 int addr_count
= 0;
14874 read_lock_bh(&inet6
->lock
);
14876 /* Count valid unicast address */
14877 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14878 if ((ifa
->flags
& IFA_F_DADFAILED
) == 0) {
14883 /* Count anycast address */
14884 acaddr
= inet6
->ac_list
;
14887 acaddr
= acaddr
->aca_next
;
14891 read_unlock_bh(&inet6
->lock
);
14897 dhd_dev_ndo_update_inet6addr(struct net_device
*dev
)
14901 struct inet6_dev
*inet6
;
14902 struct inet6_ifaddr
*ifa
;
14903 struct ifacaddr6
*acaddr
= NULL
;
14904 struct in6_addr
*ipv6_addr
= NULL
;
14909 * this function evaulates host ip address in struct inet6_dev
14910 * unicast addr in inet6_dev->addr_list
14911 * anycast addr in inet6_dev->ac_list
14912 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14913 * access on null(freed) pointer.
14917 inet6
= dev
->ip6_ptr
;
14919 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__
));
14923 dhd
= DHD_DEV_INFO(dev
);
14925 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__
));
14930 if (dhd_net2idx(dhd
, dev
) != 0) {
14931 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__
));
14935 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__
));
14939 /* Check host IP overflow */
14940 cnt
= dhd_dev_ndo_get_valid_inet6addr_count(inet6
);
14941 if (cnt
> dhdp
->ndo_max_host_ip
) {
14942 if (!dhdp
->ndo_host_ip_overflow
) {
14943 dhdp
->ndo_host_ip_overflow
= TRUE
;
14944 /* Disable ND offload in FW */
14945 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__
));
14946 ret
= dhd_ndo_enable(dhdp
, FALSE
);
14953 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14954 * driver need to lock inet6_dev while accessing structure. but, driver
14955 * cannot use ioctl while inet6_dev locked since it requires scheduling
14956 * hence, copy addresses to the buffer and do ioctl after unlock.
14958 ipv6_addr
= (struct in6_addr
*)MALLOC(dhdp
->osh
,
14959 sizeof(struct in6_addr
) * dhdp
->ndo_max_host_ip
);
14961 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__
));
14965 /* Find DAD failed unicast address to be removed */
14967 read_lock_bh(&inet6
->lock
);
14968 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14969 /* DAD failed unicast address */
14970 if ((ifa
->flags
& IFA_F_DADFAILED
) &&
14971 (cnt
< dhdp
->ndo_max_host_ip
)) {
14972 memcpy(&ipv6_addr
[cnt
], &ifa
->addr
, sizeof(struct in6_addr
));
14976 read_unlock_bh(&inet6
->lock
);
14978 /* Remove DAD failed unicast address */
14979 for (i
= 0; i
< cnt
; i
++) {
14980 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__
));
14981 ret
= dhd_ndo_remove_ip_by_addr(dhdp
, (char *)&ipv6_addr
[i
], 0);
14987 /* Remove all anycast address */
14988 ret
= dhd_ndo_remove_ip_by_type(dhdp
, WL_ND_IPV6_ADDR_TYPE_ANYCAST
, 0);
14994 * if ND offload was disabled due to host ip overflow,
14995 * attempt to add valid unicast address.
14997 if (dhdp
->ndo_host_ip_overflow
) {
14998 /* Find valid unicast address */
15000 read_lock_bh(&inet6
->lock
);
15001 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
15002 /* valid unicast address */
15003 if (!(ifa
->flags
& IFA_F_DADFAILED
) &&
15004 (cnt
< dhdp
->ndo_max_host_ip
)) {
15005 memcpy(&ipv6_addr
[cnt
], &ifa
->addr
,
15006 sizeof(struct in6_addr
));
15010 read_unlock_bh(&inet6
->lock
);
15012 /* Add valid unicast address */
15013 for (i
= 0; i
< cnt
; i
++) {
15014 ret
= dhd_ndo_add_ip_with_type(dhdp
,
15015 (char *)&ipv6_addr
[i
], WL_ND_IPV6_ADDR_TYPE_UNICAST
, 0);
15022 /* Find anycast address */
15024 read_lock_bh(&inet6
->lock
);
15025 acaddr
= inet6
->ac_list
;
15027 if (cnt
< dhdp
->ndo_max_host_ip
) {
15028 memcpy(&ipv6_addr
[cnt
], &acaddr
->aca_addr
, sizeof(struct in6_addr
));
15031 acaddr
= acaddr
->aca_next
;
15033 read_unlock_bh(&inet6
->lock
);
15035 /* Add anycast address */
15036 for (i
= 0; i
< cnt
; i
++) {
15037 ret
= dhd_ndo_add_ip_with_type(dhdp
,
15038 (char *)&ipv6_addr
[i
], WL_ND_IPV6_ADDR_TYPE_ANYCAST
, 0);
15044 /* Now All host IP addr were added successfully */
15045 if (dhdp
->ndo_host_ip_overflow
) {
15046 dhdp
->ndo_host_ip_overflow
= FALSE
;
15047 if (dhdp
->in_suspend
) {
15048 /* drvier is in (early) suspend state, need to enable ND offload in FW */
15049 DHD_INFO(("%s: enable NDO\n", __FUNCTION__
));
15050 ret
= dhd_ndo_enable(dhdp
, TRUE
);
15056 MFREE(dhdp
->osh
, ipv6_addr
, sizeof(struct in6_addr
) * dhdp
->ndo_max_host_ip
);
15061 #pragma GCC diagnostic pop
15063 #endif /* NDO_CONFIG_SUPPORT */
15066 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
15068 dhd_dev_pno_stop_for_ssid(struct net_device
*dev
)
15070 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15072 return (dhd_pno_stop_for_ssid(&dhd
->pub
));
15074 /* Linux wrapper to call common dhd_pno_set_for_ssid */
15076 dhd_dev_pno_set_for_ssid(struct net_device
*dev
, wlc_ssid_ext_t
* ssids_local
, int nssid
,
15077 uint16 scan_fr
, int pno_repeat
, int pno_freq_expo_max
, uint16
*channel_list
, int nchan
)
15079 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15081 return (dhd_pno_set_for_ssid(&dhd
->pub
, ssids_local
, nssid
, scan_fr
,
15082 pno_repeat
, pno_freq_expo_max
, channel_list
, nchan
));
15085 /* Linux wrapper to call common dhd_pno_enable */
15087 dhd_dev_pno_enable(struct net_device
*dev
, int enable
)
15089 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15091 return (dhd_pno_enable(&dhd
->pub
, enable
));
15094 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
15096 dhd_dev_pno_set_for_hotlist(struct net_device
*dev
, wl_pfn_bssid_t
*p_pfn_bssid
,
15097 struct dhd_pno_hotlist_params
*hotlist_params
)
15099 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15100 return (dhd_pno_set_for_hotlist(&dhd
->pub
, p_pfn_bssid
, hotlist_params
));
15102 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
15104 dhd_dev_pno_stop_for_batch(struct net_device
*dev
)
15106 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15107 return (dhd_pno_stop_for_batch(&dhd
->pub
));
15109 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
15111 dhd_dev_pno_set_for_batch(struct net_device
*dev
, struct dhd_pno_batch_params
*batch_params
)
15113 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15114 return (dhd_pno_set_for_batch(&dhd
->pub
, batch_params
));
15116 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
15118 dhd_dev_pno_get_for_batch(struct net_device
*dev
, char *buf
, int bufsize
)
15120 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15121 return (dhd_pno_get_for_batch(&dhd
->pub
, buf
, bufsize
, PNO_STATUS_NORMAL
));
15123 #endif /* PNO_SUPPORT */
15125 #if defined(PNO_SUPPORT)
15126 #ifdef GSCAN_SUPPORT
15128 dhd_dev_is_legacy_pno_enabled(struct net_device
*dev
)
15130 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15132 return (dhd_is_legacy_pno_enabled(&dhd
->pub
));
15136 dhd_dev_set_epno(struct net_device
*dev
)
15138 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15142 return dhd_pno_set_epno(&dhd
->pub
);
15145 dhd_dev_flush_fw_epno(struct net_device
*dev
)
15147 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15151 return dhd_pno_flush_fw_epno(&dhd
->pub
);
15154 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15156 dhd_dev_pno_set_cfg_gscan(struct net_device
*dev
, dhd_pno_gscan_cmd_cfg_t type
,
15157 void *buf
, bool flush
)
15159 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15161 return (dhd_pno_set_cfg_gscan(&dhd
->pub
, type
, buf
, flush
));
15164 /* Linux wrapper to call common dhd_wait_batch_results_complete */
15166 dhd_dev_wait_batch_results_complete(struct net_device
*dev
)
15168 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15170 return (dhd_wait_batch_results_complete(&dhd
->pub
));
15173 /* Linux wrapper to call common dhd_pno_lock_batch_results */
15175 dhd_dev_pno_lock_access_batch_results(struct net_device
*dev
)
15177 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15179 return (dhd_pno_lock_batch_results(&dhd
->pub
));
15181 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
15183 dhd_dev_pno_unlock_access_batch_results(struct net_device
*dev
)
15185 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15187 return (dhd_pno_unlock_batch_results(&dhd
->pub
));
15190 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
15192 dhd_dev_pno_run_gscan(struct net_device
*dev
, bool run
, bool flush
)
15194 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15196 return (dhd_pno_initiate_gscan_request(&dhd
->pub
, run
, flush
));
15199 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
15201 dhd_dev_pno_enable_full_scan_result(struct net_device
*dev
, bool real_time_flag
)
15203 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15205 return (dhd_pno_enable_full_scan_result(&dhd
->pub
, real_time_flag
));
15208 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
15210 dhd_dev_hotlist_scan_event(struct net_device
*dev
,
15211 const void *data
, int *send_evt_bytes
, hotlist_type_t type
, u32
*buf_len
)
15213 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15215 return (dhd_handle_hotlist_scan_evt(&dhd
->pub
, data
, send_evt_bytes
, type
, buf_len
));
15218 /* Linux wrapper to call common dhd_process_full_gscan_result */
15220 dhd_dev_process_full_gscan_result(struct net_device
*dev
,
15221 const void *data
, uint32 len
, int *send_evt_bytes
)
15223 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15225 return (dhd_process_full_gscan_result(&dhd
->pub
, data
, len
, send_evt_bytes
));
15229 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device
*dev
, hotlist_type_t type
)
15231 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15233 dhd_gscan_hotlist_cache_cleanup(&dhd
->pub
, type
);
15239 dhd_dev_gscan_batch_cache_cleanup(struct net_device
*dev
)
15241 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15243 return (dhd_gscan_batch_cache_cleanup(&dhd
->pub
));
15246 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
15248 dhd_dev_retrieve_batch_scan(struct net_device
*dev
)
15250 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15252 return (dhd_retreive_batch_scan_results(&dhd
->pub
));
15254 /* Linux wrapper to call common dhd_pno_process_epno_result */
15255 void * dhd_dev_process_epno_result(struct net_device
*dev
,
15256 const void *data
, uint32 event
, int *send_evt_bytes
)
15258 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15260 return (dhd_pno_process_epno_result(&dhd
->pub
, data
, event
, send_evt_bytes
));
15264 dhd_dev_set_lazy_roam_cfg(struct net_device
*dev
,
15265 wlc_roam_exp_params_t
*roam_param
)
15267 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15268 wl_roam_exp_cfg_t roam_exp_cfg
;
15272 return BCME_BADARG
;
15275 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
15276 roam_param
->a_band_boost_threshold
, roam_param
->a_band_penalty_threshold
));
15277 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
15278 roam_param
->a_band_boost_factor
, roam_param
->a_band_penalty_factor
,
15279 roam_param
->cur_bssid_boost
));
15280 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
15281 roam_param
->alert_roam_trigger_threshold
, roam_param
->a_band_max_boost
));
15283 memcpy(&roam_exp_cfg
.params
, roam_param
, sizeof(*roam_param
));
15284 roam_exp_cfg
.version
= ROAM_EXP_CFG_VERSION
;
15285 roam_exp_cfg
.flags
= ROAM_EXP_CFG_PRESENT
;
15286 if (dhd
->pub
.lazy_roam_enable
) {
15287 roam_exp_cfg
.flags
|= ROAM_EXP_ENABLE_FLAG
;
15289 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_params",
15290 (char *)&roam_exp_cfg
, sizeof(roam_exp_cfg
), NULL
, 0,
15293 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__
, err
));
15299 dhd_dev_lazy_roam_enable(struct net_device
*dev
, uint32 enable
)
15302 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15303 wl_roam_exp_cfg_t roam_exp_cfg
;
15305 memset(&roam_exp_cfg
, 0, sizeof(roam_exp_cfg
));
15306 roam_exp_cfg
.version
= ROAM_EXP_CFG_VERSION
;
15308 roam_exp_cfg
.flags
= ROAM_EXP_ENABLE_FLAG
;
15311 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_params",
15312 (char *)&roam_exp_cfg
, sizeof(roam_exp_cfg
), NULL
, 0,
15315 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__
, err
));
15317 dhd
->pub
.lazy_roam_enable
= (enable
!= 0);
15322 dhd_dev_set_lazy_roam_bssid_pref(struct net_device
*dev
,
15323 wl_bssid_pref_cfg_t
*bssid_pref
, uint32 flush
)
15327 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15329 bssid_pref
->version
= BSSID_PREF_LIST_VERSION
;
15330 /* By default programming bssid pref flushes out old values */
15331 bssid_pref
->flags
= (flush
&& !bssid_pref
->count
) ? ROAM_EXP_CLEAR_BSSID_PREF
: 0;
15332 len
= sizeof(wl_bssid_pref_cfg_t
);
15333 if (bssid_pref
->count
) {
15334 len
+= (bssid_pref
->count
- 1) * sizeof(wl_bssid_pref_list_t
);
15336 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_bssid_pref",
15337 (char *)bssid_pref
, len
, NULL
, 0, TRUE
);
15338 if (err
!= BCME_OK
) {
15339 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__
, err
));
15343 #endif /* GSCAN_SUPPORT */
15344 #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
15346 dhd_dev_set_blacklist_bssid(struct net_device
*dev
, maclist_t
*blacklist
,
15347 uint32 len
, uint32 flush
)
15350 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15354 err
= dhd_wl_ioctl_cmd(&(dhd
->pub
), WLC_SET_MACLIST
, (char *)blacklist
,
15356 if (err
!= BCME_OK
) {
15357 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__
, err
));
15361 /* By default programming blacklist flushes out old values */
15362 macmode
= (flush
&& !blacklist
) ? WLC_MACMODE_DISABLED
: WLC_MACMODE_DENY
;
15363 err
= dhd_wl_ioctl_cmd(&(dhd
->pub
), WLC_SET_MACMODE
, (char *)&macmode
,
15364 sizeof(macmode
), TRUE
, 0);
15365 if (err
!= BCME_OK
) {
15366 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__
, err
));
15371 dhd_dev_set_whitelist_ssid(struct net_device
*dev
, wl_ssid_whitelist_t
*ssid_whitelist
,
15372 uint32 len
, uint32 flush
)
15375 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15376 wl_ssid_whitelist_t whitelist_ssid_flush
;
15378 if (!ssid_whitelist
) {
15380 ssid_whitelist
= &whitelist_ssid_flush
;
15381 ssid_whitelist
->ssid_count
= 0;
15383 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__
));
15384 return BCME_BADARG
;
15387 ssid_whitelist
->version
= SSID_WHITELIST_VERSION
;
15388 ssid_whitelist
->flags
= flush
? ROAM_EXP_CLEAR_SSID_WHITELIST
: 0;
15389 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist
, len
, NULL
,
15391 if (err
!= BCME_OK
) {
15392 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__
, err
));
15396 #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
15397 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15398 /* Linux wrapper to call common dhd_pno_get_gscan */
15400 dhd_dev_pno_get_gscan(struct net_device
*dev
, dhd_pno_gscan_cmd_cfg_t type
,
15401 void *info
, uint32
*len
)
15403 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15405 return (dhd_pno_get_gscan(&dhd
->pub
, type
, info
, len
));
15407 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15410 #ifdef RSSI_MONITOR_SUPPORT
15412 dhd_dev_set_rssi_monitor_cfg(struct net_device
*dev
, int start
,
15413 int8 max_rssi
, int8 min_rssi
)
15416 wl_rssi_monitor_cfg_t rssi_monitor
;
15417 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15419 rssi_monitor
.version
= RSSI_MONITOR_VERSION
;
15420 rssi_monitor
.max_rssi
= max_rssi
;
15421 rssi_monitor
.min_rssi
= min_rssi
;
15422 rssi_monitor
.flags
= start
? 0: RSSI_MONITOR_STOP
;
15423 err
= dhd_iovar(&dhd
->pub
, 0, "rssi_monitor", (char *)&rssi_monitor
, sizeof(rssi_monitor
),
15425 if (err
< 0 && err
!= BCME_UNSUPPORTED
) {
15426 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__
, err
));
15430 #endif /* RSSI_MONITOR_SUPPORT */
15432 #ifdef DHDTCPACK_SUPPRESS
15434 dhd_dev_set_tcpack_sup_mode_cfg(struct net_device
*dev
, uint8 enable
)
15437 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15439 err
= dhd_tcpack_suppress_set(&dhd
->pub
, enable
);
15440 if (err
!= BCME_OK
) {
15441 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__
, err
));
15445 #endif /* DHDTCPACK_SUPPRESS */
15448 dhd_dev_cfg_rand_mac_oui(struct net_device
*dev
, uint8
*oui
)
15450 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15451 dhd_pub_t
*dhdp
= &dhd
->pub
;
15453 if (!dhdp
|| !oui
) {
15454 DHD_ERROR(("NULL POINTER : %s\n",
15458 if (ETHER_ISMULTI(oui
)) {
15459 DHD_ERROR(("Expected unicast OUI\n"));
15462 uint8
*rand_mac_oui
= dhdp
->rand_mac_oui
;
15463 memcpy(rand_mac_oui
, oui
, DOT11_OUI_LEN
);
15464 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG
"\n",
15465 MACOUI2STRDBG(rand_mac_oui
)));
15471 dhd_set_rand_mac_oui(dhd_pub_t
*dhd
)
15474 wl_pfn_macaddr_cfg_t wl_cfg
;
15475 uint8
*rand_mac_oui
= dhd
->rand_mac_oui
;
15477 memset(&wl_cfg
.macaddr
, 0, ETHER_ADDR_LEN
);
15478 memcpy(&wl_cfg
.macaddr
, rand_mac_oui
, DOT11_OUI_LEN
);
15479 wl_cfg
.version
= WL_PFN_MACADDR_CFG_VER
;
15480 if (ETHER_ISNULLADDR(&wl_cfg
.macaddr
)) {
15483 wl_cfg
.flags
= (WL_PFN_MAC_OUI_ONLY_MASK
| WL_PFN_SET_MAC_UNASSOC_MASK
);
15486 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG
"\n",
15487 MACOUI2STRDBG(rand_mac_oui
)));
15489 err
= dhd_iovar(dhd
, 0, "pfn_macaddr", (char *)&wl_cfg
, sizeof(wl_cfg
), NULL
, 0, TRUE
);
15491 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__
, err
));
15497 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15499 dhd_dev_rtt_set_cfg(struct net_device
*dev
, void *buf
)
15501 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15503 return (dhd_rtt_set_cfg(&dhd
->pub
, buf
));
15507 dhd_dev_rtt_cancel_cfg(struct net_device
*dev
, struct ether_addr
*mac_list
, int mac_cnt
)
15509 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15511 return (dhd_rtt_stop(&dhd
->pub
, mac_list
, mac_cnt
));
15515 dhd_dev_rtt_register_noti_callback(struct net_device
*dev
, void *ctx
, dhd_rtt_compl_noti_fn noti_fn
)
15517 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15519 return (dhd_rtt_register_noti_callback(&dhd
->pub
, ctx
, noti_fn
));
15523 dhd_dev_rtt_unregister_noti_callback(struct net_device
*dev
, dhd_rtt_compl_noti_fn noti_fn
)
15525 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15527 return (dhd_rtt_unregister_noti_callback(&dhd
->pub
, noti_fn
));
15531 dhd_dev_rtt_capability(struct net_device
*dev
, rtt_capabilities_t
*capa
)
15533 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15535 return (dhd_rtt_capability(&dhd
->pub
, capa
));
15539 dhd_dev_rtt_avail_channel(struct net_device
*dev
, wifi_channel_info
*channel_info
)
15541 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15542 return (dhd_rtt_avail_channel(&dhd
->pub
, channel_info
));
15546 dhd_dev_rtt_enable_responder(struct net_device
*dev
, wifi_channel_info
*channel_info
)
15548 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15549 return (dhd_rtt_enable_responder(&dhd
->pub
, channel_info
));
15552 int dhd_dev_rtt_cancel_responder(struct net_device
*dev
)
15554 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15555 return (dhd_rtt_cancel_responder(&dhd
->pub
));
15558 #endif /* RTT_SUPPORT */
15561 #define KA_TEMP_BUF_SIZE 512
15562 #define KA_FRAME_SIZE 300
15565 dhd_dev_start_mkeep_alive(dhd_pub_t
*dhd_pub
, uint8 mkeep_alive_id
, uint8
*ip_pkt
,
15566 uint16 ip_pkt_len
, uint8
* src_mac
, uint8
* dst_mac
, uint32 period_msec
)
15568 const int ETHERTYPE_LEN
= 2;
15571 wl_mkeep_alive_pkt_t mkeep_alive_pkt
;
15572 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
= NULL
;
15575 int res
= BCME_ERROR
;
15579 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15580 char *pmac_frame
= NULL
;
15581 char *pmac_frame_begin
= NULL
;
15584 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15585 * dongle shall reject a mkeep_alive request.
15587 if (!dhd_support_sta_mode(dhd_pub
))
15590 DHD_TRACE(("%s execution\n", __FUNCTION__
));
15592 if ((pbuf
= MALLOCZ(dhd_pub
->osh
, KA_TEMP_BUF_SIZE
)) == NULL
) {
15593 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE
));
15598 if ((pmac_frame
= MALLOCZ(dhd_pub
->osh
, KA_FRAME_SIZE
)) == NULL
) {
15599 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE
));
15603 pmac_frame_begin
= pmac_frame
;
15606 * Get current mkeep-alive status.
15608 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive", &mkeep_alive_id
, sizeof(mkeep_alive_id
), pbuf
,
15609 KA_TEMP_BUF_SIZE
, FALSE
);
15611 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__
, res
));
15614 /* Check available ID whether it is occupied */
15615 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) pbuf
;
15616 if (dtoh32(mkeep_alive_pktp
->period_msec
!= 0)) {
15617 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15618 __FUNCTION__
, mkeep_alive_id
));
15620 /* Current occupied ID info */
15621 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__
));
15622 DHD_ERROR((" Id : %d\n"
15623 " Period: %d msec\n"
15626 mkeep_alive_pktp
->keep_alive_id
,
15627 dtoh32(mkeep_alive_pktp
->period_msec
),
15628 dtoh16(mkeep_alive_pktp
->len_bytes
)));
15630 for (i
= 0; i
< mkeep_alive_pktp
->len_bytes
; i
++) {
15631 DHD_ERROR(("%02x", mkeep_alive_pktp
->data
[i
]));
15635 res
= BCME_NOTFOUND
;
15640 /* Request the specified ID */
15641 memset(&mkeep_alive_pkt
, 0, sizeof(wl_mkeep_alive_pkt_t
));
15642 memset(pbuf
, 0, KA_TEMP_BUF_SIZE
);
15643 str
= "mkeep_alive";
15644 str_len
= strlen(str
);
15645 strncpy(pbuf
, str
, str_len
);
15646 pbuf
[str_len
] = '\0';
15648 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) (pbuf
+ str_len
+ 1);
15649 mkeep_alive_pkt
.period_msec
= htod32(period_msec
);
15650 buf_len
= str_len
+ 1;
15651 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
15652 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
15655 mkeep_alive_pkt
.keep_alive_id
= mkeep_alive_id
;
15657 buf_len
+= WL_MKEEP_ALIVE_FIXED_LEN
;
15660 * Build up Ethernet Frame
15663 /* Mapping dest mac addr */
15664 memcpy(pmac_frame
, dst_mac
, ETHER_ADDR_LEN
);
15665 pmac_frame
+= ETHER_ADDR_LEN
;
15667 /* Mapping src mac addr */
15668 memcpy(pmac_frame
, src_mac
, ETHER_ADDR_LEN
);
15669 pmac_frame
+= ETHER_ADDR_LEN
;
15671 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15672 *(pmac_frame
++) = 0x08;
15673 *(pmac_frame
++) = 0x00;
15675 /* Mapping IP pkt */
15676 memcpy(pmac_frame
, ip_pkt
, ip_pkt_len
);
15677 pmac_frame
+= ip_pkt_len
;
15680 * Length of ether frame (assume to be all hexa bytes)
15681 * = src mac + dst mac + ether type + ip pkt len
15683 len_bytes
= ETHER_ADDR_LEN
*2 + ETHERTYPE_LEN
+ ip_pkt_len
;
15684 memcpy(mkeep_alive_pktp
->data
, pmac_frame_begin
, len_bytes
);
15685 buf_len
+= len_bytes
;
15686 mkeep_alive_pkt
.len_bytes
= htod16(len_bytes
);
15689 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15690 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15691 * guarantee that the buffer is properly aligned.
15693 memcpy((char *)mkeep_alive_pktp
, &mkeep_alive_pkt
, WL_MKEEP_ALIVE_FIXED_LEN
);
15695 res
= dhd_wl_ioctl_cmd(dhd_pub
, WLC_SET_VAR
, pbuf
, buf_len
, TRUE
, 0);
15697 if (pmac_frame_begin
) {
15698 MFREE(dhd_pub
->osh
, pmac_frame_begin
, KA_FRAME_SIZE
);
15699 pmac_frame_begin
= NULL
;
15702 MFREE(dhd_pub
->osh
, pbuf
, KA_TEMP_BUF_SIZE
);
15709 dhd_dev_stop_mkeep_alive(dhd_pub_t
*dhd_pub
, uint8 mkeep_alive_id
)
15712 wl_mkeep_alive_pkt_t mkeep_alive_pkt
;
15713 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
= NULL
;
15714 int res
= BCME_ERROR
;
15718 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15719 * dongle shall reject a mkeep_alive request.
15721 if (!dhd_support_sta_mode(dhd_pub
))
15724 DHD_TRACE(("%s execution\n", __FUNCTION__
));
15727 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15729 if ((pbuf
= MALLOC(dhd_pub
->osh
, KA_TEMP_BUF_SIZE
)) == NULL
) {
15730 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE
));
15734 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive", &mkeep_alive_id
,
15735 sizeof(mkeep_alive_id
), pbuf
, KA_TEMP_BUF_SIZE
, FALSE
);
15737 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__
, res
));
15740 /* Check occupied ID */
15741 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) pbuf
;
15742 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__
));
15743 DHD_INFO((" Id : %d\n"
15744 " Period: %d msec\n"
15747 mkeep_alive_pktp
->keep_alive_id
,
15748 dtoh32(mkeep_alive_pktp
->period_msec
),
15749 dtoh16(mkeep_alive_pktp
->len_bytes
)));
15751 for (i
= 0; i
< mkeep_alive_pktp
->len_bytes
; i
++) {
15752 DHD_INFO(("%02x", mkeep_alive_pktp
->data
[i
]));
15757 /* Make it stop if available */
15758 if (dtoh32(mkeep_alive_pktp
->period_msec
!= 0)) {
15759 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id
));
15760 memset(&mkeep_alive_pkt
, 0, sizeof(wl_mkeep_alive_pkt_t
));
15762 mkeep_alive_pkt
.period_msec
= 0;
15763 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
15764 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
15765 mkeep_alive_pkt
.keep_alive_id
= mkeep_alive_id
;
15767 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive",
15768 (char *)&mkeep_alive_pkt
,
15769 WL_MKEEP_ALIVE_FIXED_LEN
, NULL
, 0, TRUE
);
15771 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__
, mkeep_alive_id
));
15772 res
= BCME_NOTFOUND
;
15776 MFREE(dhd_pub
->osh
, pbuf
, KA_TEMP_BUF_SIZE
);
15781 #endif /* KEEP_ALIVE */
15783 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
15784 static void _dhd_apf_lock_local(dhd_info_t
*dhd
)
15786 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15788 mutex_lock(&dhd
->dhd_apf_mutex
);
15793 static void _dhd_apf_unlock_local(dhd_info_t
*dhd
)
15795 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15797 mutex_unlock(&dhd
->dhd_apf_mutex
);
15803 __dhd_apf_add_filter(struct net_device
*ndev
, uint32 filter_id
,
15804 u8
* program
, uint32 program_len
)
15806 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15807 dhd_pub_t
*dhdp
= &dhd
->pub
;
15808 wl_pkt_filter_t
* pkt_filterp
;
15809 wl_apf_program_t
*apf_program
;
15811 u32 cmd_len
, buf_len
;
15813 char cmd
[] = "pkt_filter_add";
15815 ifidx
= dhd_net2idx(dhd
, ndev
);
15816 if (ifidx
== DHD_BAD_IF
) {
15817 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15821 cmd_len
= sizeof(cmd
);
15823 /* Check if the program_len is more than the expected len
15824 * and if the program is NULL return from here.
15826 if ((program_len
> WL_APF_PROGRAM_MAX_SIZE
) || (program
== NULL
)) {
15827 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15828 __FUNCTION__
, program_len
, program
));
15831 buf_len
= cmd_len
+ WL_PKT_FILTER_FIXED_LEN
+
15832 WL_APF_PROGRAM_FIXED_LEN
+ program_len
;
15834 buf
= MALLOCZ(dhdp
->osh
, buf_len
);
15835 if (unlikely(!buf
)) {
15836 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__
, buf_len
));
15840 memcpy(buf
, cmd
, cmd_len
);
15842 pkt_filterp
= (wl_pkt_filter_t
*) (buf
+ cmd_len
);
15843 pkt_filterp
->id
= htod32(filter_id
);
15844 pkt_filterp
->negate_match
= htod32(FALSE
);
15845 pkt_filterp
->type
= htod32(WL_PKT_FILTER_TYPE_APF_MATCH
);
15847 apf_program
= &pkt_filterp
->u
.apf_program
;
15848 apf_program
->version
= htod16(WL_APF_INTERNAL_VERSION
);
15849 apf_program
->instr_len
= htod16(program_len
);
15850 memcpy(apf_program
->instrs
, program
, program_len
);
15852 ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, ifidx
);
15853 if (unlikely(ret
)) {
15854 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15855 __FUNCTION__
, filter_id
, ret
));
15859 MFREE(dhdp
->osh
, buf
, buf_len
);
15865 __dhd_apf_config_filter(struct net_device
*ndev
, uint32 filter_id
,
15866 uint32 mode
, uint32 enable
)
15868 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15869 dhd_pub_t
*dhdp
= &dhd
->pub
;
15870 wl_pkt_filter_enable_t
* pkt_filterp
;
15872 u32 cmd_len
, buf_len
;
15874 char cmd
[] = "pkt_filter_enable";
15876 ifidx
= dhd_net2idx(dhd
, ndev
);
15877 if (ifidx
== DHD_BAD_IF
) {
15878 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15882 cmd_len
= sizeof(cmd
);
15883 buf_len
= cmd_len
+ sizeof(*pkt_filterp
);
15885 buf
= MALLOCZ(dhdp
->osh
, buf_len
);
15886 if (unlikely(!buf
)) {
15887 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__
, buf_len
));
15891 memcpy(buf
, cmd
, cmd_len
);
15893 pkt_filterp
= (wl_pkt_filter_enable_t
*) (buf
+ cmd_len
);
15894 pkt_filterp
->id
= htod32(filter_id
);
15895 pkt_filterp
->enable
= htod32(enable
);
15897 ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, ifidx
);
15898 if (unlikely(ret
)) {
15899 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15900 __FUNCTION__
, filter_id
, ret
));
15904 ret
= dhd_wl_ioctl_set_intiovar(dhdp
, "pkt_filter_mode", dhd_master_mode
,
15905 WLC_SET_VAR
, TRUE
, ifidx
);
15906 if (unlikely(ret
)) {
15907 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15908 __FUNCTION__
, filter_id
, ret
));
15913 MFREE(dhdp
->osh
, buf
, buf_len
);
15919 __dhd_apf_delete_filter(struct net_device
*ndev
, uint32 filter_id
)
15921 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(ndev
);
15922 dhd_pub_t
*dhdp
= &dhd
->pub
;
15925 ifidx
= dhd_net2idx(dhd
, ndev
);
15926 if (ifidx
== DHD_BAD_IF
) {
15927 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15931 ret
= dhd_wl_ioctl_set_intiovar(dhdp
, "pkt_filter_delete",
15932 htod32(filter_id
), WLC_SET_VAR
, TRUE
, ifidx
);
15933 if (unlikely(ret
)) {
15934 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15935 __FUNCTION__
, filter_id
, ret
));
15941 void dhd_apf_lock(struct net_device
*dev
)
15943 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15944 _dhd_apf_lock_local(dhd
);
15947 void dhd_apf_unlock(struct net_device
*dev
)
15949 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15950 _dhd_apf_unlock_local(dhd
);
15954 dhd_dev_apf_get_version(struct net_device
*ndev
, uint32
*version
)
15956 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15957 dhd_pub_t
*dhdp
= &dhd
->pub
;
15960 if (!FW_SUPPORTED(dhdp
, apf
)) {
15961 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__
));
15964 * Notify Android framework that APF is not supported by setting
15971 ifidx
= dhd_net2idx(dhd
, ndev
);
15972 if (ifidx
== DHD_BAD_IF
) {
15973 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15977 ret
= dhd_wl_ioctl_get_intiovar(dhdp
, "apf_ver", version
,
15978 WLC_GET_VAR
, FALSE
, ifidx
);
15979 if (unlikely(ret
)) {
15980 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15981 __FUNCTION__
, ret
));
15988 dhd_dev_apf_get_max_len(struct net_device
*ndev
, uint32
*max_len
)
15990 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(ndev
);
15991 dhd_pub_t
*dhdp
= &dhd
->pub
;
15994 if (!FW_SUPPORTED(dhdp
, apf
)) {
15995 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__
));
16000 ifidx
= dhd_net2idx(dhd
, ndev
);
16001 if (ifidx
== DHD_BAD_IF
) {
16002 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
16006 ret
= dhd_wl_ioctl_get_intiovar(dhdp
, "apf_size_limit", max_len
,
16007 WLC_GET_VAR
, FALSE
, ifidx
);
16008 if (unlikely(ret
)) {
16009 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
16010 __FUNCTION__
, ret
));
16017 dhd_dev_apf_add_filter(struct net_device
*ndev
, u8
* program
,
16018 uint32 program_len
)
16020 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16021 dhd_pub_t
*dhdp
= &dhd
->pub
;
16024 DHD_APF_LOCK(ndev
);
16026 /* delete, if filter already exists */
16027 if (dhdp
->apf_set
) {
16028 ret
= __dhd_apf_delete_filter(ndev
, PKT_FILTER_APF_ID
);
16029 if (unlikely(ret
)) {
16032 dhdp
->apf_set
= FALSE
;
16035 ret
= __dhd_apf_add_filter(ndev
, PKT_FILTER_APF_ID
, program
, program_len
);
16039 dhdp
->apf_set
= TRUE
;
16041 if (dhdp
->in_suspend
&& dhdp
->apf_set
&& !(dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
)) {
16042 /* Driver is still in (early) suspend state, enable APF filter back */
16043 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
16044 PKT_FILTER_MODE_FORWARD_ON_MATCH
, TRUE
);
16047 DHD_APF_UNLOCK(ndev
);
16053 dhd_dev_apf_enable_filter(struct net_device
*ndev
)
16055 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16056 dhd_pub_t
*dhdp
= &dhd
->pub
;
16058 bool nan_dp_active
= false;
16060 DHD_APF_LOCK(ndev
);
16062 nan_dp_active
= wl_cfgnan_is_dp_active(ndev
);
16063 #endif /* WL_NAN */
16064 if (dhdp
->apf_set
&& (!(dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
) &&
16066 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
16067 PKT_FILTER_MODE_FORWARD_ON_MATCH
, TRUE
);
16070 DHD_APF_UNLOCK(ndev
);
16076 dhd_dev_apf_disable_filter(struct net_device
*ndev
)
16078 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16079 dhd_pub_t
*dhdp
= &dhd
->pub
;
16082 DHD_APF_LOCK(ndev
);
16084 if (dhdp
->apf_set
) {
16085 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
16086 PKT_FILTER_MODE_FORWARD_ON_MATCH
, FALSE
);
16089 DHD_APF_UNLOCK(ndev
);
16095 dhd_dev_apf_delete_filter(struct net_device
*ndev
)
16097 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16098 dhd_pub_t
*dhdp
= &dhd
->pub
;
16101 DHD_APF_LOCK(ndev
);
16103 if (dhdp
->apf_set
) {
16104 ret
= __dhd_apf_delete_filter(ndev
, PKT_FILTER_APF_ID
);
16106 dhdp
->apf_set
= FALSE
;
16110 DHD_APF_UNLOCK(ndev
);
16114 #endif /* PKT_FILTER_SUPPORT && APF */
16116 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16117 static void dhd_hang_process(struct work_struct
*work_data
)
16119 struct net_device
*dev
;
16120 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
16121 struct net_device
*ndev
;
16123 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16124 /* Ignore compiler warnings due to -Werror=cast-qual */
16125 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16126 #pragma GCC diagnostic push
16127 #pragma GCC diagnostic ignored "-Wcast-qual"
16129 struct dhd_info
*dhd
=
16130 container_of(work_data
, dhd_info_t
, dhd_hang_process_work
);
16131 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16132 #pragma GCC diagnostic pop
16135 dev
= dhd
->iflist
[0]->net
;
16138 #if defined(WL_WIRELESS_EXT)
16139 wl_iw_send_priv_event(dev
, "HANG");
16141 #if defined(WL_CFG80211)
16142 wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
16145 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
16147 * For HW2, dev_close need to be done to recover
16148 * from upper layer after hang. For Interposer skip
16149 * dev_close so that dhd iovars can be used to take
16150 * socramdump after crash, also skip for HW4 as
16151 * handling of hang event is different
16155 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
16156 ndev
= dhd
->iflist
[i
] ? dhd
->iflist
[i
]->net
: NULL
;
16157 if (ndev
&& (ndev
->flags
& IFF_UP
)) {
16158 DHD_ERROR(("ndev->name : %s dev close\n",
16164 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16167 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
16168 extern dhd_pub_t
*link_recovery
;
16169 void dhd_host_recover_link(void)
16171 DHD_ERROR(("****** %s ******\n", __FUNCTION__
));
16172 link_recovery
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
16173 dhd_bus_set_linkdown(link_recovery
, TRUE
);
16174 dhd_os_send_hang_message(link_recovery
);
16176 EXPORT_SYMBOL(dhd_host_recover_link
);
16177 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
16179 int dhd_os_send_hang_message(dhd_pub_t
*dhdp
)
16185 struct net_device
*primary_ndev
;
16186 struct bcm_cfg80211
*cfg
;
16188 primary_ndev
= dhd_linux_get_primary_netdev(dhdp
);
16189 if (!primary_ndev
) {
16190 DHD_ERROR(("%s: Cannot find primary netdev\n",
16195 cfg
= wl_get_cfg(primary_ndev
);
16197 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__
));
16201 /* Skip sending HANG event to framework if driver is not ready */
16202 if (!wl_get_drv_status(cfg
, READY
, primary_ndev
)) {
16203 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__
));
16206 #endif /* WL_CFG80211 */
16208 #if defined(DHD_HANG_SEND_UP_TEST)
16209 if (dhdp
->req_hang_type
) {
16210 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
16211 __FUNCTION__
, dhdp
->req_hang_type
));
16212 dhdp
->req_hang_type
= 0;
16214 #endif /* DHD_HANG_SEND_UP_TEST */
16216 if (!dhdp
->hang_was_sent
) {
16217 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
16218 dhdp
->hang_counts
++;
16219 if (dhdp
->hang_counts
>= MAX_CONSECUTIVE_HANG_COUNTS
) {
16220 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
16221 __func__
, dhdp
->hang_counts
));
16224 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
16225 #ifdef DHD_DEBUG_UART
16226 /* If PCIe lane has broken, execute the debug uart application
16227 * to gether a ramdump data from dongle via uart
16229 if (!dhdp
->info
->duart_execute
) {
16230 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
16231 (void *)dhdp
, DHD_WQ_WORK_DEBUG_UART_DUMP
,
16232 dhd_debug_uart_exec_rd
, DHD_WQ_WORK_PRIORITY_HIGH
);
16234 #endif /* DHD_DEBUG_UART */
16235 dhdp
->hang_was_sent
= 1;
16236 #ifdef BT_OVER_SDIO
16237 dhdp
->is_bt_recovery_required
= TRUE
;
16239 schedule_work(&dhdp
->info
->dhd_hang_process_work
);
16246 int net_os_send_hang_message(struct net_device
*dev
)
16248 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16252 /* Report FW problem when enabled */
16253 if (dhd
->pub
.hang_report
) {
16254 #ifdef BT_OVER_SDIO
16255 if (netif_running(dev
)) {
16256 #endif /* BT_OVER_SDIO */
16257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16258 ret
= dhd_os_send_hang_message(&dhd
->pub
);
16260 ret
= wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
16262 #ifdef BT_OVER_SDIO
16264 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__
));
16265 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev
));
16266 #endif /* BT_OVER_SDIO */
16268 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
16275 int net_os_send_hang_message_reason(struct net_device
*dev
, const char *string_num
)
16277 dhd_info_t
*dhd
= NULL
;
16278 dhd_pub_t
*dhdp
= NULL
;
16281 dhd
= DHD_DEV_INFO(dev
);
16286 if (!dhd
|| !dhdp
) {
16290 reason
= bcm_strtoul(string_num
, NULL
, 0);
16291 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__
, reason
));
16293 if ((reason
<= HANG_REASON_MASK
) || (reason
>= HANG_REASON_MAX
)) {
16297 dhdp
->hang_reason
= reason
;
16299 return net_os_send_hang_message(dev
);
16301 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
16303 int dhd_net_wifi_platform_set_power(struct net_device
*dev
, bool on
, unsigned long delay_msec
)
16305 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16306 return wifi_platform_set_power(dhd
->adapter
, on
, delay_msec
);
16309 bool dhd_force_country_change(struct net_device
*dev
)
16311 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16313 if (dhd
&& dhd
->pub
.up
)
16314 return dhd
->pub
.force_country_change
;
16318 void dhd_get_customized_country_code(struct net_device
*dev
, char *country_iso_code
,
16319 wl_country_t
*cspec
)
16321 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16322 dhd_pub_t
*dhdp
= &dhd
->pub
;
16324 BCM_REFERENCE(dhdp
);
16325 if (!CHECK_IS_BLOB(dhdp
) || CHECK_IS_MULT_REGREV(dhdp
)) {
16326 #if defined(CUSTOM_COUNTRY_CODE)
16327 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
,
16328 dhd
->pub
.dhd_cflags
);
16330 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
);
16331 #endif /* CUSTOM_COUNTRY_CODE */
16333 #if !defined(CUSTOM_COUNTRY_CODE)
16335 /* Replace the ccode to XZ if ccode is undefined country */
16336 if (strncmp(country_iso_code
, "", WLC_CNTRY_BUF_SZ
) == 0) {
16337 strlcpy(country_iso_code
, "XZ", WLC_CNTRY_BUF_SZ
);
16338 strlcpy(cspec
->country_abbrev
, country_iso_code
, WLC_CNTRY_BUF_SZ
);
16339 strlcpy(cspec
->ccode
, country_iso_code
, WLC_CNTRY_BUF_SZ
);
16340 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__
, country_iso_code
));
16343 #endif /* !CUSTOM_COUNTRY_CODE */
16345 #if defined(KEEP_KR_REGREV)
16346 if (strncmp(country_iso_code
, "KR", 3) == 0) {
16347 if (!CHECK_IS_BLOB(dhdp
) || CHECK_IS_MULT_REGREV(dhdp
)) {
16348 if (strncmp(dhd
->pub
.vars_ccode
, "KR", 3) == 0) {
16349 cspec
->rev
= dhd
->pub
.vars_regrev
;
16353 #endif /* KEEP_KR_REGREV */
16355 #ifdef KEEP_JP_REGREV
16356 if (strncmp(country_iso_code
, "JP", 3) == 0) {
16357 if (CHECK_IS_BLOB(dhdp
) && !CHECK_IS_MULT_REGREV(dhdp
)) {
16358 if (strncmp(dhd
->pub
.vars_ccode
, "J1", 3) == 0) {
16359 memcpy(cspec
->ccode
, dhd
->pub
.vars_ccode
,
16360 sizeof(dhd
->pub
.vars_ccode
));
16363 if (strncmp(dhd
->pub
.vars_ccode
, "JP", 3) == 0) {
16364 cspec
->rev
= dhd
->pub
.vars_regrev
;
16368 #endif /* KEEP_JP_REGREV */
16369 BCM_REFERENCE(dhd
);
16371 void dhd_bus_country_set(struct net_device
*dev
, wl_country_t
*cspec
, bool notify
)
16373 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16375 struct bcm_cfg80211
*cfg
= wl_get_cfg(dev
);
16378 if (dhd
&& dhd
->pub
.up
) {
16379 memcpy(&dhd
->pub
.dhd_cspec
, cspec
, sizeof(wl_country_t
));
16380 #ifdef DHD_DISABLE_VHTMODE
16381 dhd_disable_vhtmode(&dhd
->pub
);
16382 #endif /* DHD_DISABLE_VHTMODE */
16385 wl_update_wiphybands(cfg
, notify
);
16390 #ifdef DHD_DISABLE_VHTMODE
16392 dhd_disable_vhtmode(dhd_pub_t
*dhd
)
16395 uint32 vhtmode
= FALSE
;
16399 ret
= dhd_iovar(dhd
, 0, "vhtmode", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
16401 DHD_ERROR(("%s Get vhtmode Fail ret %d\n", __FUNCTION__
, ret
));
16404 memcpy(&vhtmode
, buf
, sizeof(uint32
));
16405 if (vhtmode
== 0) {
16406 DHD_ERROR(("%s Get vhtmode is 0\n", __FUNCTION__
));
16412 ret
= dhd_iovar(dhd
, 0, "vhtmode", (char *)&vhtmode
, sizeof(vhtmode
), NULL
, 0, TRUE
);
16414 DHD_ERROR(("%s Set vhtmode Success %d\n", __FUNCTION__
, vhtmode
));
16416 if (ret
== BCME_NOTDOWN
) {
16418 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
16419 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
16421 DHD_ERROR(("%s WL_DOWN Fail ret %d\n", __FUNCTION__
, ret
));
16425 ret
= dhd_iovar(dhd
, 0, "vhtmode", (char *)&vhtmode
,
16426 sizeof(vhtmode
), NULL
, 0, TRUE
);
16427 DHD_ERROR(("%s Set vhtmode %d, ret %d\n", __FUNCTION__
, vhtmode
, ret
));
16429 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_UP
,
16430 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
16432 DHD_ERROR(("%s WL_UP Fail ret %d\n", __FUNCTION__
, ret
));
16435 DHD_ERROR(("%s Set vhtmode 0 failed %d\n", __FUNCTION__
, ret
));
16439 #endif /* DHD_DISABLE_VHTMODE */
16441 void dhd_bus_band_set(struct net_device
*dev
, uint band
)
16443 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16445 struct bcm_cfg80211
*cfg
= wl_get_cfg(dev
);
16447 if (dhd
&& dhd
->pub
.up
) {
16449 wl_update_wiphybands(cfg
, true);
16454 int dhd_net_set_fw_path(struct net_device
*dev
, char *fw
)
16456 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16458 if (!fw
|| fw
[0] == '\0')
16461 strncpy(dhd
->fw_path
, fw
, sizeof(dhd
->fw_path
) - 1);
16462 dhd
->fw_path
[sizeof(dhd
->fw_path
)-1] = '\0';
16464 #if defined(SOFTAP)
16465 if (strstr(fw
, "apsta") != NULL
) {
16466 DHD_INFO(("GOT APSTA FIRMWARE\n"));
16467 ap_fw_loaded
= TRUE
;
16469 DHD_INFO(("GOT STA FIRMWARE\n"));
16470 ap_fw_loaded
= FALSE
;
16476 void dhd_net_if_lock(struct net_device
*dev
)
16478 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16479 dhd_net_if_lock_local(dhd
);
16482 void dhd_net_if_unlock(struct net_device
*dev
)
16484 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16485 dhd_net_if_unlock_local(dhd
);
16488 static void dhd_net_if_lock_local(dhd_info_t
*dhd
)
16490 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16492 mutex_lock(&dhd
->dhd_net_if_mutex
);
16496 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
)
16498 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16500 mutex_unlock(&dhd
->dhd_net_if_mutex
);
16504 static void dhd_suspend_lock(dhd_pub_t
*pub
)
16506 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16507 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16509 mutex_lock(&dhd
->dhd_suspend_mutex
);
16513 static void dhd_suspend_unlock(dhd_pub_t
*pub
)
16515 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16516 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16518 mutex_unlock(&dhd
->dhd_suspend_mutex
);
16522 unsigned long dhd_os_general_spin_lock(dhd_pub_t
*pub
)
16524 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16525 unsigned long flags
= 0;
16528 spin_lock_irqsave(&dhd
->dhd_lock
, flags
);
16533 void dhd_os_general_spin_unlock(dhd_pub_t
*pub
, unsigned long flags
)
16535 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16538 spin_unlock_irqrestore(&dhd
->dhd_lock
, flags
);
16541 /* Linux specific multipurpose spinlock API */
16543 dhd_os_spin_lock_init(osl_t
*osh
)
16545 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16546 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16547 /* and this results in kernel asserts in internal builds */
16548 spinlock_t
* lock
= MALLOC(osh
, sizeof(spinlock_t
) + 4);
16550 spin_lock_init(lock
);
16551 return ((void *)lock
);
16554 dhd_os_spin_lock_deinit(osl_t
*osh
, void *lock
)
16557 MFREE(osh
, lock
, sizeof(spinlock_t
) + 4);
16560 dhd_os_spin_lock(void *lock
)
16562 unsigned long flags
= 0;
16565 spin_lock_irqsave((spinlock_t
*)lock
, flags
);
16570 dhd_os_spin_unlock(void *lock
, unsigned long flags
)
16573 spin_unlock_irqrestore((spinlock_t
*)lock
, flags
);
16577 dhd_os_dbgring_lock_init(osl_t
*osh
)
16579 struct mutex
*mtx
= NULL
;
16581 mtx
= MALLOCZ(osh
, sizeof(*mtx
));
16589 dhd_os_dbgring_lock_deinit(osl_t
*osh
, void *mtx
)
16592 mutex_destroy(mtx
);
16593 MFREE(osh
, mtx
, sizeof(struct mutex
));
16598 dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
)
16600 return (atomic_read(&dhd
->pend_8021x_cnt
));
16603 #define MAX_WAIT_FOR_8021X_TX 100
16606 dhd_wait_pend8021x(struct net_device
*dev
)
16608 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16609 int timeout
= msecs_to_jiffies(10);
16610 int ntimes
= MAX_WAIT_FOR_8021X_TX
;
16611 int pend
= dhd_get_pend_8021x_cnt(dhd
);
16613 while (ntimes
&& pend
) {
16615 set_current_state(TASK_INTERRUPTIBLE
);
16616 DHD_PERIM_UNLOCK(&dhd
->pub
);
16617 schedule_timeout(timeout
);
16618 DHD_PERIM_LOCK(&dhd
->pub
);
16619 set_current_state(TASK_RUNNING
);
16622 pend
= dhd_get_pend_8021x_cnt(dhd
);
16626 atomic_set(&dhd
->pend_8021x_cnt
, 0);
16627 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__
));
16632 #if defined(DHD_DEBUG)
16633 int write_file(const char * file_name
, uint32 flags
, uint8
*buf
, int size
)
16636 struct file
*fp
= NULL
;
16637 mm_segment_t old_fs
;
16639 /* change to KERNEL_DS address limit */
16643 /* open file to write */
16644 fp
= filp_open(file_name
, flags
, 0664);
16646 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp
)));
16650 /* Write buf to file */
16651 ret
= vfs_write(fp
, buf
, size
, &pos
);
16653 DHD_ERROR(("write file error, err = %d\n", ret
));
16657 /* Sync file from filesystem to physical media */
16658 ret
= vfs_fsync(fp
, 0);
16660 DHD_ERROR(("sync file error, error = %d\n", ret
));
16666 /* close file before return */
16668 filp_close(fp
, current
->files
);
16670 /* restore previous address limit */
16679 dhd_convert_memdump_type_to_str(uint32 type
, char *buf
, int substr_type
)
16681 char *type_str
= NULL
;
16684 case DUMP_TYPE_RESUMED_ON_TIMEOUT
:
16685 type_str
= "resumed_on_timeout";
16687 case DUMP_TYPE_D3_ACK_TIMEOUT
:
16688 type_str
= "D3_ACK_timeout";
16690 case DUMP_TYPE_DONGLE_TRAP
:
16691 type_str
= "Dongle_Trap";
16693 case DUMP_TYPE_MEMORY_CORRUPTION
:
16694 type_str
= "Memory_Corruption";
16696 case DUMP_TYPE_PKTID_AUDIT_FAILURE
:
16697 type_str
= "PKTID_AUDIT_Fail";
16699 case DUMP_TYPE_PKTID_INVALID
:
16700 type_str
= "PKTID_INVALID";
16702 case DUMP_TYPE_SCAN_TIMEOUT
:
16703 type_str
= "SCAN_timeout";
16705 case DUMP_TYPE_SCAN_BUSY
:
16706 type_str
= "SCAN_Busy";
16708 case DUMP_TYPE_BY_SYSDUMP
:
16709 if (substr_type
== CMD_UNWANTED
) {
16710 type_str
= "BY_SYSDUMP_FORUSER_unwanted";
16711 } else if (substr_type
== CMD_DISCONNECTED
) {
16712 type_str
= "BY_SYSDUMP_FORUSER_disconnected";
16714 type_str
= "BY_SYSDUMP_FORUSER";
16717 case DUMP_TYPE_BY_LIVELOCK
:
16718 type_str
= "BY_LIVELOCK";
16720 case DUMP_TYPE_AP_LINKUP_FAILURE
:
16721 type_str
= "BY_AP_LINK_FAILURE";
16723 case DUMP_TYPE_AP_ABNORMAL_ACCESS
:
16724 type_str
= "INVALID_ACCESS";
16726 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX
:
16727 type_str
= "ERROR_RX_TIMED_OUT";
16729 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX
:
16730 type_str
= "ERROR_TX_TIMED_OUT";
16732 case DUMP_TYPE_CFG_VENDOR_TRIGGERED
:
16733 type_str
= "CFG_VENDOR_TRIGGERED";
16735 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR
:
16736 type_str
= "BY_INVALID_RING_RDWR";
16738 case DUMP_TYPE_IFACE_OP_FAILURE
:
16739 type_str
= "BY_IFACE_OP_FAILURE";
16741 case DUMP_TYPE_TRANS_ID_MISMATCH
:
16742 type_str
= "BY_TRANS_ID_MISMATCH";
16744 #ifdef DEBUG_DNGL_INIT_FAIL
16745 case DUMP_TYPE_DONGLE_INIT_FAILURE
:
16746 type_str
= "DONGLE_INIT_FAIL";
16748 #endif /* DEBUG_DNGL_INIT_FAIL */
16749 #ifdef SUPPORT_LINKDOWN_RECOVERY
16750 case DUMP_TYPE_READ_SHM_FAIL
:
16751 type_str
= "READ_SHM_FAIL";
16753 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16754 case DUMP_TYPE_DONGLE_HOST_EVENT
:
16755 type_str
= "BY_DONGLE_HOST_EVENT";
16757 case DUMP_TYPE_SMMU_FAULT
:
16758 type_str
= "SMMU_FAULT";
16760 case DUMP_TYPE_BY_USER
:
16761 type_str
= "BY_USER";
16764 case DUMP_TYPE_DUE_TO_BT
:
16765 type_str
= "DUE_TO_BT";
16767 #endif /* DHD_ERPOM */
16768 case DUMP_TYPE_LOGSET_BEYOND_RANGE
:
16769 type_str
= "LOGSET_BEYOND_RANGE";
16771 case DUMP_TYPE_CTO_RECOVERY
:
16772 type_str
= "CTO_RECOVERY";
16774 case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR
:
16775 type_str
= "SEQUENTIAL_PRIVCMD_ERROR";
16778 type_str
= "Unknown_type";
16782 strncpy(buf
, type_str
, strlen(type_str
));
16783 buf
[strlen(type_str
)] = 0;
16787 write_dump_to_file(dhd_pub_t
*dhd
, uint8
*buf
, int size
, char *fname
)
16790 char memdump_path
[128];
16791 char memdump_type
[32];
16792 struct timeval curtime
;
16795 /* Init file name */
16796 memset(memdump_path
, 0, sizeof(memdump_path
));
16797 memset(memdump_type
, 0, sizeof(memdump_type
));
16798 do_gettimeofday(&curtime
);
16799 dhd_convert_memdump_type_to_str(dhd
->memdump_type
, memdump_type
, dhd
->debug_dump_subcmd
);
16800 #ifdef CUSTOMER_HW4_DEBUG
16801 get_debug_dump_time(dhd
->debug_dump_time_str
);
16802 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_" "%s",
16803 DHD_COMMON_DUMP_PATH
, fname
, memdump_type
, dhd
->debug_dump_time_str
);
16804 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16805 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16806 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16807 "/data/misc/wifi/", fname
, memdump_type
,
16808 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16809 file_mode
= O_CREAT
| O_WRONLY
;
16811 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16812 "/installmedia/", fname
, memdump_type
,
16813 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16814 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16815 * calling BUG_ON immediately after collecting the socram dump.
16816 * So the file write operation should directly write the contents into the
16817 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16818 * instead of appending.
16820 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16822 struct file
*fp
= filp_open(memdump_path
, file_mode
, 0664);
16823 /* Check if it is live Brix image having /installmedia, else use /data */
16825 DHD_ERROR(("open file %s, try /data/\n", memdump_path
));
16826 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16827 "/data/", fname
, memdump_type
,
16828 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16830 filp_close(fp
, NULL
);
16833 #endif /* CUSTOMER_HW4_DEBUG */
16835 /* print SOCRAM dump file path */
16836 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__
, memdump_path
));
16838 #ifdef DHD_LOG_DUMP
16839 dhd_print_buf_addr(dhd
, "write_dump_to_file", buf
, size
);
16840 #endif /* DHD_LOG_DUMP */
16843 ret
= write_file(memdump_path
, file_mode
, buf
, size
);
16845 #ifdef DHD_DUMP_MNGR
16846 if (ret
== BCME_OK
) {
16847 dhd_dump_file_manage_enqueue(dhd
, memdump_path
, fname
);
16849 #endif /* DHD_DUMP_MNGR */
16853 #endif /* DHD_DEBUG */
16855 int dhd_os_wake_lock_timeout(dhd_pub_t
*pub
)
16857 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16858 unsigned long flags
;
16861 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16862 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16863 ret
= dhd
->wakelock_rx_timeout_enable
> dhd
->wakelock_ctrl_timeout_enable
?
16864 dhd
->wakelock_rx_timeout_enable
: dhd
->wakelock_ctrl_timeout_enable
;
16865 #ifdef CONFIG_HAS_WAKELOCK
16866 if (dhd
->wakelock_rx_timeout_enable
)
16867 wake_lock_timeout(&dhd
->wl_rxwake
,
16868 msecs_to_jiffies(dhd
->wakelock_rx_timeout_enable
));
16869 if (dhd
->wakelock_ctrl_timeout_enable
)
16870 wake_lock_timeout(&dhd
->wl_ctrlwake
,
16871 msecs_to_jiffies(dhd
->wakelock_ctrl_timeout_enable
));
16873 dhd
->wakelock_rx_timeout_enable
= 0;
16874 dhd
->wakelock_ctrl_timeout_enable
= 0;
16875 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16880 int net_os_wake_lock_timeout(struct net_device
*dev
)
16882 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16886 ret
= dhd_os_wake_lock_timeout(&dhd
->pub
);
16890 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t
*pub
, int val
)
16892 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16893 unsigned long flags
;
16895 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16896 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16897 if (val
> dhd
->wakelock_rx_timeout_enable
)
16898 dhd
->wakelock_rx_timeout_enable
= val
;
16899 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16904 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t
*pub
, int val
)
16906 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16907 unsigned long flags
;
16909 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16910 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16911 if (val
> dhd
->wakelock_ctrl_timeout_enable
)
16912 dhd
->wakelock_ctrl_timeout_enable
= val
;
16913 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16918 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t
*pub
)
16920 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16921 unsigned long flags
;
16923 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16924 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16925 dhd
->wakelock_ctrl_timeout_enable
= 0;
16926 #ifdef CONFIG_HAS_WAKELOCK
16927 if (wake_lock_active(&dhd
->wl_ctrlwake
))
16928 wake_unlock(&dhd
->wl_ctrlwake
);
16930 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16935 int net_os_wake_lock_rx_timeout_enable(struct net_device
*dev
, int val
)
16937 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16941 ret
= dhd_os_wake_lock_rx_timeout_enable(&dhd
->pub
, val
);
16945 int net_os_wake_lock_ctrl_timeout_enable(struct net_device
*dev
, int val
)
16947 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16951 ret
= dhd_os_wake_lock_ctrl_timeout_enable(&dhd
->pub
, val
);
16955 #if defined(DHD_TRACE_WAKE_LOCK)
16956 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16957 #include <linux/hashtable.h>
16959 #include <linux/hash.h>
16960 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16962 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16963 /* Define 2^5 = 32 bucket size hash table */
16964 DEFINE_HASHTABLE(wklock_history
, 5);
16966 /* Define 2^5 = 32 bucket size hash table */
16967 struct hlist_head wklock_history
[32] = { [0 ... 31] = HLIST_HEAD_INIT
};
16968 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16970 atomic_t trace_wklock_onoff
;
16971 typedef enum dhd_wklock_type
{
16978 struct wk_trace_record
{
16979 unsigned long addr
; /* Address of the instruction */
16980 dhd_wklock_t lock_type
; /* lock_type */
16981 unsigned long long counter
; /* counter information */
16982 struct hlist_node wklock_node
; /* hash node */
16985 static struct wk_trace_record
*find_wklock_entry(unsigned long addr
)
16987 struct wk_trace_record
*wklock_info
;
16988 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16989 hash_for_each_possible(wklock_history
, wklock_info
, wklock_node
, addr
)
16991 struct hlist_node
*entry
;
16992 int index
= hash_long(addr
, ilog2(ARRAY_SIZE(wklock_history
)));
16993 hlist_for_each_entry(wklock_info
, entry
, &wklock_history
[index
], wklock_node
)
16994 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16996 if (wklock_info
->addr
== addr
) {
16997 return wklock_info
;
17003 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17004 #define HASH_ADD(hashtable, node, key) \
17006 hash_add(hashtable, node, key); \
17009 #define HASH_ADD(hashtable, node, key) \
17011 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
17012 hlist_add_head(node, &hashtable[index]); \
17014 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
17016 #define STORE_WKLOCK_RECORD(wklock_type) \
17018 struct wk_trace_record *wklock_info = NULL; \
17019 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
17020 wklock_info = find_wklock_entry(func_addr); \
17021 if (wklock_info) { \
17022 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
17023 wklock_info->counter = dhd->wakelock_counter; \
17025 wklock_info->counter++; \
17028 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
17029 if (!wklock_info) {\
17030 printk("Can't allocate wk_trace_record \n"); \
17032 wklock_info->addr = func_addr; \
17033 wklock_info->lock_type = wklock_type; \
17034 if (wklock_type == DHD_WAIVE_LOCK || \
17035 wklock_type == DHD_RESTORE_LOCK) { \
17036 wklock_info->counter = dhd->wakelock_counter; \
17038 wklock_info->counter++; \
17040 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
17045 static inline void dhd_wk_lock_rec_dump(void)
17048 struct wk_trace_record
*wklock_info
;
17050 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17051 hash_for_each(wklock_history
, bkt
, wklock_info
, wklock_node
)
17053 struct hlist_node
*entry
= NULL
;
17054 int max_index
= ARRAY_SIZE(wklock_history
);
17055 for (bkt
= 0; bkt
< max_index
; bkt
++)
17056 hlist_for_each_entry(wklock_info
, entry
, &wklock_history
[bkt
], wklock_node
)
17057 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17059 switch (wklock_info
->lock_type
) {
17060 case DHD_WAKE_LOCK
:
17061 printk("wakelock lock : %pS lock_counter : %llu \n",
17062 (void *)wklock_info
->addr
, wklock_info
->counter
);
17064 case DHD_WAKE_UNLOCK
:
17065 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
17066 (void *)wklock_info
->addr
, wklock_info
->counter
);
17068 case DHD_WAIVE_LOCK
:
17069 printk("wakelock waive : %pS before_waive : %llu \n",
17070 (void *)wklock_info
->addr
, wklock_info
->counter
);
17072 case DHD_RESTORE_LOCK
:
17073 printk("wakelock restore : %pS, after_waive : %llu \n",
17074 (void *)wklock_info
->addr
, wklock_info
->counter
);
17080 static void dhd_wk_lock_trace_init(struct dhd_info
*dhd
)
17082 unsigned long flags
;
17083 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17085 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17087 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17088 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17089 hash_init(wklock_history
);
17091 for (i
= 0; i
< ARRAY_SIZE(wklock_history
); i
++)
17092 INIT_HLIST_HEAD(&wklock_history
[i
]);
17093 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17094 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17095 atomic_set(&trace_wklock_onoff
, 1);
17098 static void dhd_wk_lock_trace_deinit(struct dhd_info
*dhd
)
17101 struct wk_trace_record
*wklock_info
;
17102 struct hlist_node
*tmp
;
17103 unsigned long flags
;
17104 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17105 struct hlist_node
*entry
= NULL
;
17106 int max_index
= ARRAY_SIZE(wklock_history
);
17107 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17109 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17110 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17111 hash_for_each_safe(wklock_history
, bkt
, tmp
, wklock_info
, wklock_node
)
17113 for (bkt
= 0; bkt
< max_index
; bkt
++)
17114 hlist_for_each_entry_safe(wklock_info
, entry
, tmp
,
17115 &wklock_history
[bkt
], wklock_node
)
17116 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17118 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17119 hash_del(&wklock_info
->wklock_node
);
17121 hlist_del_init(&wklock_info
->wklock_node
);
17122 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17123 kfree(wklock_info
);
17125 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17128 void dhd_wk_lock_stats_dump(dhd_pub_t
*dhdp
)
17130 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
17131 unsigned long flags
;
17133 printk(KERN_ERR
"DHD Printing wl_wake Lock/Unlock Record \r\n");
17134 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17135 dhd_wk_lock_rec_dump();
17136 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17140 #define STORE_WKLOCK_RECORD(wklock_type)
17141 #endif /* ! DHD_TRACE_WAKE_LOCK */
17143 int dhd_os_wake_lock(dhd_pub_t
*pub
)
17145 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17146 unsigned long flags
;
17149 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
17150 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17151 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
17152 #ifdef CONFIG_HAS_WAKELOCK
17153 wake_lock(&dhd
->wl_wifi
);
17154 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17155 dhd_bus_dev_pm_stay_awake(pub
);
17158 #ifdef DHD_TRACE_WAKE_LOCK
17159 if (atomic_read(&trace_wklock_onoff
)) {
17160 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK
);
17162 #endif /* DHD_TRACE_WAKE_LOCK */
17163 dhd
->wakelock_counter
++;
17164 ret
= dhd
->wakelock_counter
;
17165 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17171 void dhd_event_wake_lock(dhd_pub_t
*pub
)
17173 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17176 #ifdef CONFIG_HAS_WAKELOCK
17177 wake_lock(&dhd
->wl_evtwake
);
17178 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17179 dhd_bus_dev_pm_stay_awake(pub
);
17185 dhd_pm_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17187 #ifdef CONFIG_HAS_WAKELOCK
17188 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17191 wake_lock_timeout(&dhd
->wl_pmwake
, msecs_to_jiffies(val
));
17193 #endif /* CONFIG_HAS_WAKE_LOCK */
17197 dhd_txfl_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17199 #ifdef CONFIG_HAS_WAKELOCK
17200 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17203 wake_lock_timeout(&dhd
->wl_txflwake
, msecs_to_jiffies(val
));
17205 #endif /* CONFIG_HAS_WAKE_LOCK */
17208 int net_os_wake_lock(struct net_device
*dev
)
17210 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
17214 ret
= dhd_os_wake_lock(&dhd
->pub
);
17218 int dhd_os_wake_unlock(dhd_pub_t
*pub
)
17220 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17221 unsigned long flags
;
17224 dhd_os_wake_lock_timeout(pub
);
17225 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
17226 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17228 if (dhd
->wakelock_counter
> 0) {
17229 dhd
->wakelock_counter
--;
17230 #ifdef DHD_TRACE_WAKE_LOCK
17231 if (atomic_read(&trace_wklock_onoff
)) {
17232 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK
);
17234 #endif /* DHD_TRACE_WAKE_LOCK */
17235 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
17236 #ifdef CONFIG_HAS_WAKELOCK
17237 wake_unlock(&dhd
->wl_wifi
);
17238 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17239 dhd_bus_dev_pm_relax(pub
);
17242 ret
= dhd
->wakelock_counter
;
17244 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17249 void dhd_event_wake_unlock(dhd_pub_t
*pub
)
17251 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17254 #ifdef CONFIG_HAS_WAKELOCK
17255 wake_unlock(&dhd
->wl_evtwake
);
17256 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17257 dhd_bus_dev_pm_relax(pub
);
17262 void dhd_pm_wake_unlock(dhd_pub_t
*pub
)
17264 #ifdef CONFIG_HAS_WAKELOCK
17265 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17268 /* if wl_pmwake is active, unlock it */
17269 if (wake_lock_active(&dhd
->wl_pmwake
)) {
17270 wake_unlock(&dhd
->wl_pmwake
);
17273 #endif /* CONFIG_HAS_WAKELOCK */
17276 void dhd_txfl_wake_unlock(dhd_pub_t
*pub
)
17278 #ifdef CONFIG_HAS_WAKELOCK
17279 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17282 /* if wl_txflwake is active, unlock it */
17283 if (wake_lock_active(&dhd
->wl_txflwake
)) {
17284 wake_unlock(&dhd
->wl_txflwake
);
17287 #endif /* CONFIG_HAS_WAKELOCK */
17290 int dhd_os_check_wakelock(dhd_pub_t
*pub
)
17292 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17293 KERNEL_VERSION(2, 6, 36)))
17298 dhd
= (dhd_info_t
*)(pub
->info
);
17299 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17301 #ifdef CONFIG_HAS_WAKELOCK
17302 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
17303 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
17304 (wake_lock_active(&dhd
->wl_wdwake
))))
17306 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17307 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
17314 dhd_os_check_wakelock_all(dhd_pub_t
*pub
)
17316 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17317 KERNEL_VERSION(2, 6, 36)))
17318 #if defined(CONFIG_HAS_WAKELOCK)
17319 int l1
, l2
, l3
, l4
, l7
, l8
, l9
;
17320 int l5
= 0, l6
= 0;
17321 int c
, lock_active
;
17322 #endif /* CONFIG_HAS_WAKELOCK */
17328 dhd
= (dhd_info_t
*)(pub
->info
);
17332 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17334 #ifdef CONFIG_HAS_WAKELOCK
17335 c
= dhd
->wakelock_counter
;
17336 l1
= wake_lock_active(&dhd
->wl_wifi
);
17337 l2
= wake_lock_active(&dhd
->wl_wdwake
);
17338 l3
= wake_lock_active(&dhd
->wl_rxwake
);
17339 l4
= wake_lock_active(&dhd
->wl_ctrlwake
);
17340 l7
= wake_lock_active(&dhd
->wl_evtwake
);
17341 #ifdef BCMPCIE_OOB_HOST_WAKE
17342 l5
= wake_lock_active(&dhd
->wl_intrwake
);
17343 #endif /* BCMPCIE_OOB_HOST_WAKE */
17344 #ifdef DHD_USE_SCAN_WAKELOCK
17345 l6
= wake_lock_active(&dhd
->wl_scanwake
);
17346 #endif /* DHD_USE_SCAN_WAKELOCK */
17347 l8
= wake_lock_active(&dhd
->wl_pmwake
);
17348 l9
= wake_lock_active(&dhd
->wl_txflwake
);
17349 lock_active
= (l1
|| l2
|| l3
|| l4
|| l5
|| l6
|| l7
|| l8
|| l9
);
17351 /* Indicate to the Host to avoid going to suspend if internal locks are up */
17353 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
17354 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
17355 __FUNCTION__
, c
, l1
, l2
, l3
, l4
, l5
, l6
, l7
, l8
, l9
));
17358 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17359 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
)) {
17362 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
17366 int net_os_wake_unlock(struct net_device
*dev
)
17368 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
17372 ret
= dhd_os_wake_unlock(&dhd
->pub
);
17376 int dhd_os_wd_wake_lock(dhd_pub_t
*pub
)
17378 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17379 unsigned long flags
;
17383 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17384 if (dhd
->wakelock_wd_counter
== 0 && !dhd
->waive_wakelock
) {
17385 #ifdef CONFIG_HAS_WAKELOCK
17386 /* if wakelock_wd_counter was never used : lock it at once */
17387 wake_lock(&dhd
->wl_wdwake
);
17390 dhd
->wakelock_wd_counter
++;
17391 ret
= dhd
->wakelock_wd_counter
;
17392 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17397 int dhd_os_wd_wake_unlock(dhd_pub_t
*pub
)
17399 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17400 unsigned long flags
;
17404 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17405 if (dhd
->wakelock_wd_counter
> 0) {
17406 dhd
->wakelock_wd_counter
= 0;
17407 if (!dhd
->waive_wakelock
) {
17408 #ifdef CONFIG_HAS_WAKELOCK
17409 wake_unlock(&dhd
->wl_wdwake
);
17413 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17418 #ifdef BCMPCIE_OOB_HOST_WAKE
17420 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17422 #ifdef CONFIG_HAS_WAKELOCK
17423 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17426 wake_lock_timeout(&dhd
->wl_intrwake
, msecs_to_jiffies(val
));
17428 #endif /* CONFIG_HAS_WAKELOCK */
17432 dhd_os_oob_irq_wake_unlock(dhd_pub_t
*pub
)
17434 #ifdef CONFIG_HAS_WAKELOCK
17435 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17438 /* if wl_intrwake is active, unlock it */
17439 if (wake_lock_active(&dhd
->wl_intrwake
)) {
17440 wake_unlock(&dhd
->wl_intrwake
);
17443 #endif /* CONFIG_HAS_WAKELOCK */
17445 #endif /* BCMPCIE_OOB_HOST_WAKE */
17447 #ifdef DHD_USE_SCAN_WAKELOCK
17449 dhd_os_scan_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17451 #ifdef CONFIG_HAS_WAKELOCK
17452 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17455 wake_lock_timeout(&dhd
->wl_scanwake
, msecs_to_jiffies(val
));
17457 #endif /* CONFIG_HAS_WAKELOCK */
17461 dhd_os_scan_wake_unlock(dhd_pub_t
*pub
)
17463 #ifdef CONFIG_HAS_WAKELOCK
17464 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17467 /* if wl_scanwake is active, unlock it */
17468 if (wake_lock_active(&dhd
->wl_scanwake
)) {
17469 wake_unlock(&dhd
->wl_scanwake
);
17472 #endif /* CONFIG_HAS_WAKELOCK */
17474 #endif /* DHD_USE_SCAN_WAKELOCK */
17476 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
17477 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
17479 int dhd_os_wake_lock_waive(dhd_pub_t
*pub
)
17481 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17482 unsigned long flags
;
17485 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
17486 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17488 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17489 if (dhd
->waive_wakelock
== FALSE
) {
17490 #ifdef DHD_TRACE_WAKE_LOCK
17491 if (atomic_read(&trace_wklock_onoff
)) {
17492 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK
);
17494 #endif /* DHD_TRACE_WAKE_LOCK */
17495 /* record current lock status */
17496 dhd
->wakelock_before_waive
= dhd
->wakelock_counter
;
17497 dhd
->waive_wakelock
= TRUE
;
17499 ret
= dhd
->wakelock_wd_counter
;
17500 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17505 int dhd_os_wake_lock_restore(dhd_pub_t
*pub
)
17507 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17508 unsigned long flags
;
17513 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) == 0)
17516 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17518 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17519 if (!dhd
->waive_wakelock
)
17522 dhd
->waive_wakelock
= FALSE
;
17523 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
17524 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
17525 * the lock in between, do the same by calling wake_unlock or pm_relax
17527 #ifdef DHD_TRACE_WAKE_LOCK
17528 if (atomic_read(&trace_wklock_onoff
)) {
17529 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK
);
17531 #endif /* DHD_TRACE_WAKE_LOCK */
17533 if (dhd
->wakelock_before_waive
== 0 && dhd
->wakelock_counter
> 0) {
17534 #ifdef CONFIG_HAS_WAKELOCK
17535 wake_lock(&dhd
->wl_wifi
);
17536 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17537 dhd_bus_dev_pm_stay_awake(&dhd
->pub
);
17539 } else if (dhd
->wakelock_before_waive
> 0 && dhd
->wakelock_counter
== 0) {
17540 #ifdef CONFIG_HAS_WAKELOCK
17541 wake_unlock(&dhd
->wl_wifi
);
17542 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17543 dhd_bus_dev_pm_relax(&dhd
->pub
);
17546 dhd
->wakelock_before_waive
= 0;
17548 ret
= dhd
->wakelock_wd_counter
;
17549 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17553 void dhd_os_wake_lock_init(struct dhd_info
*dhd
)
17555 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__
));
17556 dhd
->wakelock_counter
= 0;
17557 dhd
->wakelock_rx_timeout_enable
= 0;
17558 dhd
->wakelock_ctrl_timeout_enable
= 0;
17559 /* wakelocks prevent a system from going into a low power state */
17560 #ifdef CONFIG_HAS_WAKELOCK
17561 wake_lock_init(&dhd
->wl_wifi
, WAKE_LOCK_SUSPEND
, "wlan_wake");
17562 wake_lock_init(&dhd
->wl_rxwake
, WAKE_LOCK_SUSPEND
, "wlan_rx_wake");
17563 wake_lock_init(&dhd
->wl_ctrlwake
, WAKE_LOCK_SUSPEND
, "wlan_ctrl_wake");
17564 wake_lock_init(&dhd
->wl_evtwake
, WAKE_LOCK_SUSPEND
, "wlan_evt_wake");
17565 wake_lock_init(&dhd
->wl_pmwake
, WAKE_LOCK_SUSPEND
, "wlan_pm_wake");
17566 wake_lock_init(&dhd
->wl_txflwake
, WAKE_LOCK_SUSPEND
, "wlan_txfl_wake");
17567 #ifdef BCMPCIE_OOB_HOST_WAKE
17568 wake_lock_init(&dhd
->wl_intrwake
, WAKE_LOCK_SUSPEND
, "wlan_oob_irq_wake");
17569 #endif /* BCMPCIE_OOB_HOST_WAKE */
17570 #ifdef DHD_USE_SCAN_WAKELOCK
17571 wake_lock_init(&dhd
->wl_scanwake
, WAKE_LOCK_SUSPEND
, "wlan_scan_wake");
17572 #endif /* DHD_USE_SCAN_WAKELOCK */
17573 #endif /* CONFIG_HAS_WAKELOCK */
17574 #ifdef DHD_TRACE_WAKE_LOCK
17575 dhd_wk_lock_trace_init(dhd
);
17576 #endif /* DHD_TRACE_WAKE_LOCK */
17579 void dhd_os_wake_lock_destroy(struct dhd_info
*dhd
)
17581 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__
));
17582 #ifdef CONFIG_HAS_WAKELOCK
17583 dhd
->wakelock_counter
= 0;
17584 dhd
->wakelock_rx_timeout_enable
= 0;
17585 dhd
->wakelock_ctrl_timeout_enable
= 0;
17586 wake_lock_destroy(&dhd
->wl_wifi
);
17587 wake_lock_destroy(&dhd
->wl_rxwake
);
17588 wake_lock_destroy(&dhd
->wl_ctrlwake
);
17589 wake_lock_destroy(&dhd
->wl_evtwake
);
17590 wake_lock_destroy(&dhd
->wl_pmwake
);
17591 wake_lock_destroy(&dhd
->wl_txflwake
);
17592 #ifdef BCMPCIE_OOB_HOST_WAKE
17593 wake_lock_destroy(&dhd
->wl_intrwake
);
17594 #endif /* BCMPCIE_OOB_HOST_WAKE */
17595 #ifdef DHD_USE_SCAN_WAKELOCK
17596 wake_lock_destroy(&dhd
->wl_scanwake
);
17597 #endif /* DHD_USE_SCAN_WAKELOCK */
17598 #ifdef DHD_TRACE_WAKE_LOCK
17599 dhd_wk_lock_trace_deinit(dhd
);
17600 #endif /* DHD_TRACE_WAKE_LOCK */
17601 #endif /* CONFIG_HAS_WAKELOCK */
17604 bool dhd_os_check_if_up(dhd_pub_t
*pub
)
17611 #if defined(BCMSDIO) || defined(BCMPCIE)
17612 /* function to collect firmware, chip id and chip version info */
17613 void dhd_set_version_info(dhd_pub_t
*dhdp
, char *fw
)
17617 i
= snprintf(info_string
, sizeof(info_string
),
17618 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR
, fw
);
17623 i
= snprintf(&info_string
[i
], sizeof(info_string
) - i
,
17624 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp
),
17625 dhd_bus_chiprev_id(dhdp
), dhd_bus_chippkg_id(dhdp
));
17627 #endif /* BCMSDIO || BCMPCIE */
17628 int dhd_ioctl_entry_local(struct net_device
*net
, wl_ioctl_t
*ioc
, int cmd
)
17632 dhd_info_t
*dhd
= NULL
;
17634 if (!net
|| !DEV_PRIV(net
)) {
17635 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
17636 __FUNCTION__
, net
, DEV_PRIV(net
)));
17640 dhd
= DHD_DEV_INFO(net
);
17644 ifidx
= dhd_net2idx(dhd
, net
);
17645 if (ifidx
== DHD_BAD_IF
) {
17646 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
17650 DHD_OS_WAKE_LOCK(&dhd
->pub
);
17651 DHD_PERIM_LOCK(&dhd
->pub
);
17653 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, ioc
, ioc
->buf
, ioc
->len
);
17654 dhd_check_hang(net
, &dhd
->pub
, ret
);
17656 DHD_PERIM_UNLOCK(&dhd
->pub
);
17657 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
17662 bool dhd_os_check_hang(dhd_pub_t
*dhdp
, int ifidx
, int ret
)
17664 struct net_device
*net
;
17666 net
= dhd_idx2net(dhdp
, ifidx
);
17668 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__
, ifidx
));
17672 return dhd_check_hang(net
, dhdp
, ret
);
17675 /* Return instance */
17676 int dhd_get_instance(dhd_pub_t
*dhdp
)
17678 return dhdp
->info
->unit
;
17681 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
17682 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
17683 int dhd_deepsleep(struct net_device
*dev
, int flag
)
17692 dhd
= DHD_DEV_INFO(dev
);
17696 case 1 : /* Deepsleep on */
17697 DHD_ERROR(("[WiFi] Deepsleep On\n"));
17698 /* give some time to sysioc_work before deepsleep */
17700 #ifdef PKT_FILTER_SUPPORT
17701 /* disable pkt filter */
17702 dhd_enable_packet_filter(0, dhdp
);
17703 #endif /* PKT_FILTER_SUPPORT */
17706 ret
= dhd_iovar(dhdp
, 0, "mpc", (char *)&powervar
, sizeof(powervar
), NULL
,
17709 /* Enable Deepsleep */
17711 ret
= dhd_iovar(dhdp
, 0, "deepsleep", (char *)&powervar
, sizeof(powervar
),
17715 case 0: /* Deepsleep Off */
17716 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
17718 /* Disable Deepsleep */
17719 for (cnt
= 0; cnt
< MAX_TRY_CNT
; cnt
++) {
17721 ret
= dhd_iovar(dhdp
, 0, "deepsleep", (char *)&powervar
,
17722 sizeof(powervar
), NULL
, 0, TRUE
);
17724 ret
= dhd_iovar(dhdp
, 0, "deepsleep", (char *)&powervar
,
17725 sizeof(powervar
), iovbuf
, sizeof(iovbuf
), FALSE
);
17727 DHD_ERROR(("the error of dhd deepsleep status"
17728 " ret value :%d\n", ret
));
17730 if (!(*(int *)iovbuf
)) {
17731 DHD_ERROR(("deepsleep mode is 0,"
17732 " count: %d\n", cnt
));
17740 ret
= dhd_iovar(dhdp
, 0, "mpc", (char *)&powervar
, sizeof(powervar
), NULL
,
17747 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
17749 #ifdef PROP_TXSTATUS
17751 void dhd_wlfc_plat_init(void *dhd
)
17753 #ifdef USE_DYNAMIC_F2_BLKSIZE
17754 dhdsdio_func_blocksize((dhd_pub_t
*)dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
17755 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17759 void dhd_wlfc_plat_deinit(void *dhd
)
17761 #ifdef USE_DYNAMIC_F2_BLKSIZE
17762 dhdsdio_func_blocksize((dhd_pub_t
*)dhd
, 2, sd_f2_blocksize
);
17763 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17767 bool dhd_wlfc_skip_fc(void * dhdp
, uint8 idx
)
17769 #ifdef SKIP_WLFC_ON_CONCURRENT
17772 struct net_device
* net
= dhd_idx2net((dhd_pub_t
*)dhdp
, idx
);
17774 /* enable flow control in vsdb mode */
17775 return !(wl_cfg80211_is_concurrent_mode(net
));
17777 return TRUE
; /* skip flow control */
17778 #endif /* WL_CFG80211 */
17782 #endif /* SKIP_WLFC_ON_CONCURRENT */
17785 #endif /* PROP_TXSTATUS */
17788 #include <linux/debugfs.h>
17790 typedef struct dhd_dbgfs
{
17791 struct dentry
*debugfs_dir
;
17792 struct dentry
*debugfs_mem
;
17797 dhd_dbgfs_t g_dbgfs
;
17799 extern uint32
dhd_readregl(void *bp
, uint32 addr
);
17800 extern uint32
dhd_writeregl(void *bp
, uint32 addr
, uint32 data
);
17803 dhd_dbg_state_open(struct inode
*inode
, struct file
*file
)
17805 file
->private_data
= inode
->i_private
;
17810 dhd_dbg_state_read(struct file
*file
, char __user
*ubuf
,
17811 size_t count
, loff_t
*ppos
)
17815 loff_t pos
= *ppos
;
17820 if (pos
>= g_dbgfs
.size
|| !count
)
17822 if (count
> g_dbgfs
.size
- pos
)
17823 count
= g_dbgfs
.size
- pos
;
17825 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17826 tmp
= dhd_readregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3));
17828 ret
= copy_to_user(ubuf
, &tmp
, 4);
17833 *ppos
= pos
+ count
;
17840 dhd_debugfs_write(struct file
*file
, const char __user
*ubuf
, size_t count
, loff_t
*ppos
)
17842 loff_t pos
= *ppos
;
17848 if (pos
>= g_dbgfs
.size
|| !count
)
17850 if (count
> g_dbgfs
.size
- pos
)
17851 count
= g_dbgfs
.size
- pos
;
17853 ret
= copy_from_user(&buf
, ubuf
, sizeof(uint32
));
17857 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17858 dhd_writeregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3), buf
);
17864 dhd_debugfs_lseek(struct file
*file
, loff_t off
, int whence
)
17873 pos
= file
->f_pos
+ off
;
17876 pos
= g_dbgfs
.size
- off
;
17878 return (pos
< 0 || pos
> g_dbgfs
.size
) ? -EINVAL
: (file
->f_pos
= pos
);
17881 static const struct file_operations dhd_dbg_state_ops
= {
17882 .read
= dhd_dbg_state_read
,
17883 .write
= dhd_debugfs_write
,
17884 .open
= dhd_dbg_state_open
,
17885 .llseek
= dhd_debugfs_lseek
17888 static void dhd_dbgfs_create(void)
17890 if (g_dbgfs
.debugfs_dir
) {
17891 g_dbgfs
.debugfs_mem
= debugfs_create_file("mem", 0644, g_dbgfs
.debugfs_dir
,
17892 NULL
, &dhd_dbg_state_ops
);
17896 void dhd_dbgfs_init(dhd_pub_t
*dhdp
)
17898 g_dbgfs
.dhdp
= dhdp
;
17899 g_dbgfs
.size
= 0x20000000; /* Allow access to various cores regs */
17901 g_dbgfs
.debugfs_dir
= debugfs_create_dir("dhd", 0);
17902 if (IS_ERR(g_dbgfs
.debugfs_dir
)) {
17903 g_dbgfs
.debugfs_dir
= NULL
;
17907 dhd_dbgfs_create();
17912 void dhd_dbgfs_remove(void)
17914 debugfs_remove(g_dbgfs
.debugfs_mem
);
17915 debugfs_remove(g_dbgfs
.debugfs_dir
);
17917 bzero((unsigned char *) &g_dbgfs
, sizeof(g_dbgfs
));
17919 #endif /* BCMDBGFS */
17921 #ifdef CUSTOM_SET_CPUCORE
17922 void dhd_set_cpucore(dhd_pub_t
*dhd
, int set
)
17924 int e_dpc
= 0, e_rxf
= 0, retry_set
= 0;
17926 if (!(dhd
->chan_isvht80
)) {
17927 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__
, dhd
->chan_isvht80
));
17934 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
17935 cpumask_of(DPC_CPUCORE
));
17937 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
17938 cpumask_of(PRIMARY_CPUCORE
));
17940 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
17941 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__
, e_dpc
));
17946 } while (e_dpc
< 0);
17951 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
17952 cpumask_of(RXF_CPUCORE
));
17954 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
17955 cpumask_of(PRIMARY_CPUCORE
));
17957 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
17958 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__
, e_rxf
));
17963 } while (e_rxf
< 0);
17965 #ifdef DHD_OF_SUPPORT
17966 interrupt_set_cpucore(set
, DPC_CPUCORE
, PRIMARY_CPUCORE
);
17967 #endif /* DHD_OF_SUPPORT */
17968 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__
, set
));
17972 #endif /* CUSTOM_SET_CPUCORE */
17974 #ifdef DHD_MCAST_REGEN
17975 /* Get interface specific ap_isolate configuration */
17976 int dhd_get_mcast_regen_bss_enable(dhd_pub_t
*dhdp
, uint32 idx
)
17978 dhd_info_t
*dhd
= dhdp
->info
;
17981 ASSERT(idx
< DHD_MAX_IFS
);
17983 ifp
= dhd
->iflist
[idx
];
17985 return ifp
->mcast_regen_bss_enable
;
17988 /* Set interface specific mcast_regen configuration */
17989 int dhd_set_mcast_regen_bss_enable(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
17991 dhd_info_t
*dhd
= dhdp
->info
;
17994 ASSERT(idx
< DHD_MAX_IFS
);
17996 ifp
= dhd
->iflist
[idx
];
17998 ifp
->mcast_regen_bss_enable
= val
;
18000 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
18003 dhd_update_rx_pkt_chainable_state(dhdp
, idx
);
18006 #endif /* DHD_MCAST_REGEN */
18008 /* Get interface specific ap_isolate configuration */
18009 int dhd_get_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
)
18011 dhd_info_t
*dhd
= dhdp
->info
;
18014 ASSERT(idx
< DHD_MAX_IFS
);
18016 ifp
= dhd
->iflist
[idx
];
18018 return ifp
->ap_isolate
;
18021 /* Set interface specific ap_isolate configuration */
18022 int dhd_set_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
18024 dhd_info_t
*dhd
= dhdp
->info
;
18027 ASSERT(idx
< DHD_MAX_IFS
);
18029 ifp
= dhd
->iflist
[idx
];
18032 ifp
->ap_isolate
= val
;
18037 #ifdef DHD_FW_COREDUMP
18038 void dhd_schedule_memdump(dhd_pub_t
*dhdp
, uint8
*buf
, uint32 size
)
18040 unsigned long flags
= 0;
18041 dhd_dump_t
*dump
= NULL
;
18042 dhd_info_t
*dhd_info
= NULL
;
18043 dhd_info
= (dhd_info_t
*)dhdp
->info
;
18044 dump
= (dhd_dump_t
*)MALLOC(dhdp
->osh
, sizeof(dhd_dump_t
));
18045 if (dump
== NULL
) {
18046 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__
));
18050 dump
->bufsize
= size
;
18051 #ifdef DHD_LOG_DUMP
18052 dhd_print_buf_addr(dhdp
, "memdump", buf
, size
);
18053 #endif /* DHD_LOG_DUMP */
18055 if (dhdp
->memdump_enabled
== DUMP_MEMONLY
) {
18059 #if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM)
18061 #if defined(DEBUG_DNGL_INIT_FAIL)
18062 (dhdp
->memdump_type
== DUMP_TYPE_DONGLE_INIT_FAILURE
) ||
18063 #endif /* DEBUG_DNGL_INIT_FAIL */
18065 (dhdp
->memdump_type
== DUMP_TYPE_DUE_TO_BT
) ||
18066 #endif /* DHD_ERPOM */
18069 #ifdef DHD_LOG_DUMP
18070 log_dump_type_t
*flush_type
= NULL
;
18072 dhd_info
->scheduled_memdump
= FALSE
;
18073 dhd_mem_dump((void *)dhdp
->info
, (void *)dump
, 0);
18074 /* for dongle init fail cases, 'dhd_mem_dump' does
18075 * not call 'dhd_log_dump', so call it here.
18077 #ifdef DHD_LOG_DUMP
18078 flush_type
= MALLOCZ(dhdp
->osh
,
18079 sizeof(log_dump_type_t
));
18081 *flush_type
= DLD_BUF_TYPE_ALL
;
18082 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__
));
18083 dhd_log_dump(dhdp
->info
, flush_type
, 0);
18085 #endif /* DHD_LOG_DUMP */
18088 #endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */
18090 dhd_info
->scheduled_memdump
= TRUE
;
18091 /* bus busy bit for mem dump will be cleared in mem dump
18092 * work item context, after mem dump file is written
18094 DHD_GENERAL_LOCK(dhdp
, flags
);
18095 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp
);
18096 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18097 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__
));
18098 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dump
,
18099 DHD_WQ_WORK_SOC_RAM_DUMP
, dhd_mem_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18102 dhd_mem_dump(void *handle
, void *event_info
, u8 event
)
18104 dhd_info_t
*dhd
= handle
;
18105 dhd_pub_t
*dhdp
= NULL
;
18106 dhd_dump_t
*dump
= event_info
;
18107 unsigned long flags
= 0;
18109 DHD_ERROR(("%s: ENTER \n", __FUNCTION__
));
18112 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
18118 DHD_GENERAL_LOCK(dhdp
, flags
);
18119 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
18120 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18121 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__
));
18124 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18126 #ifdef D2H_MINIDUMP
18127 /* dump minidump */
18128 if (dhd_bus_is_minidump_enabled(dhdp
)) {
18129 dhd_d2h_minidump(&dhd
->pub
);
18131 DHD_ERROR(("minidump is not enabled\n"));
18133 #endif /* D2H_MINIDUMP */
18136 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__
));
18140 if (write_dump_to_file(&dhd
->pub
, dump
->buf
, dump
->bufsize
, "mem_dump")) {
18141 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__
));
18142 #ifdef DHD_DEBUG_UART
18143 dhd
->pub
.memdump_success
= FALSE
;
18144 #endif /* DHD_DEBUG_UART */
18147 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
18148 * context, no need to schedule another work queue for log dump. In case of
18149 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
18150 * cfg layer is itself scheduling the log_dump work queue.
18151 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
18152 * collect debug_dump as it may be called from non-sleepable context.
18154 #ifdef DHD_LOG_DUMP
18155 if (dhd
->scheduled_memdump
&&
18156 dhdp
->memdump_type
!= DUMP_TYPE_BY_SYSDUMP
) {
18157 log_dump_type_t
*flush_type
= MALLOCZ(dhdp
->osh
,
18158 sizeof(log_dump_type_t
));
18160 *flush_type
= DLD_BUF_TYPE_ALL
;
18161 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__
));
18162 dhd_log_dump(dhd
, flush_type
, 0);
18165 #endif /* DHD_LOG_DUMP */
18167 #ifdef DHD_PKT_LOGGING
18168 copy_debug_dump_time(dhdp
->debug_dump_time_pktlog_str
, dhdp
->debug_dump_time_str
);
18169 #endif /* DHD_PKT_LOGGING */
18170 clear_debug_dump_time(dhdp
->debug_dump_time_str
);
18172 /* before calling bug on, wait for other logs to be dumped.
18173 * we cannot wait in case dhd_mem_dump is called directly
18174 * as it may not be in a sleepable context
18176 if (dhd
->scheduled_memdump
) {
18179 #ifdef DHD_SSSR_DUMP
18180 bitmask
|= DHD_BUS_BUSY_IN_SSSRDUMP
;
18182 if (bitmask
!= 0) {
18183 timeleft
= dhd_os_busbusy_wait_bitmask(dhdp
,
18184 &dhdp
->dhd_bus_busy_state
, bitmask
, 0);
18185 if ((timeleft
== 0) || (timeleft
== 1)) {
18186 DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n",
18187 __FUNCTION__
, dhdp
->dhd_bus_busy_state
));
18192 if (dhd
->pub
.memdump_enabled
== DUMP_MEMFILE_BUGON
&&
18193 #ifdef DHD_LOG_DUMP
18194 dhd
->pub
.memdump_type
!= DUMP_TYPE_BY_SYSDUMP
&&
18195 #endif /* DHD_LOG_DUMP */
18196 dhd
->pub
.memdump_type
!= DUMP_TYPE_BY_USER
&&
18197 #ifdef DHD_DEBUG_UART
18198 dhd
->pub
.memdump_success
== TRUE
&&
18199 #endif /* DHD_DEBUG_UART */
18200 #ifdef DNGL_EVENT_SUPPORT
18201 dhd
->pub
.memdump_type
!= DUMP_TYPE_DONGLE_HOST_EVENT
&&
18202 #endif /* DNGL_EVENT_SUPPORT */
18203 dhd
->pub
.memdump_type
!= DUMP_TYPE_CFG_VENDOR_TRIGGERED
) {
18205 #ifdef SHOW_LOGTRACE
18206 /* Wait till event_log_dispatcher_work finishes */
18207 cancel_delayed_work_sync(&dhd
->event_log_dispatcher_work
);
18208 #endif /* SHOW_LOGTRACE */
18215 MFREE(dhd
->pub
.osh
, dump
, sizeof(dhd_dump_t
));
18216 DHD_GENERAL_LOCK(dhdp
, flags
);
18217 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd
->pub
);
18218 dhd_os_busbusy_wake(dhdp
);
18219 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18220 dhd
->scheduled_memdump
= FALSE
;
18222 #endif /* DHD_FW_COREDUMP */
18224 #ifdef D2H_MINIDUMP
18226 dhd_d2h_minidump(dhd_pub_t
*dhdp
)
18228 char d2h_minidump
[128];
18229 dhd_dma_buf_t
*minidump_buf
;
18231 minidump_buf
= dhd_prot_get_minidump_buf(dhdp
);
18232 if (minidump_buf
->va
== NULL
) {
18233 DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__
));
18237 /* Init file name */
18238 memset(d2h_minidump
, 0, sizeof(d2h_minidump
));
18239 snprintf(d2h_minidump
, sizeof(d2h_minidump
), "%s", "d2h_minidump");
18241 if (write_dump_to_file(dhdp
, (uint8
*)minidump_buf
->va
,
18242 BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN
, d2h_minidump
)) {
18243 DHD_ERROR(("%s: failed to dump d2h_minidump to file\n",
18247 #endif /* D2H_MINIDUMP */
18249 #ifdef DHD_SSSR_DUMP
18252 dhd_sssr_dump(void *handle
, void *event_info
, u8 event
)
18254 dhd_info_t
*dhd
= handle
;
18257 char before_sr_dump
[128];
18258 char after_sr_dump
[128];
18259 unsigned long flags
= 0;
18260 uint dig_buf_size
= 0;
18262 DHD_ERROR(("%s: ENTER \n", __FUNCTION__
));
18265 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
18271 DHD_GENERAL_LOCK(dhdp
, flags
);
18272 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
18273 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18274 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__
));
18277 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18279 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
18280 /* Init file name */
18281 memset(before_sr_dump
, 0, sizeof(before_sr_dump
));
18282 memset(after_sr_dump
, 0, sizeof(after_sr_dump
));
18284 snprintf(before_sr_dump
, sizeof(before_sr_dump
), "%s_%d_%s",
18285 "sssr_core", i
, "before_SR");
18286 snprintf(after_sr_dump
, sizeof(after_sr_dump
), "%s_%d_%s",
18287 "sssr_core", i
, "after_SR");
18289 if (dhdp
->sssr_d11_before
[i
] && dhdp
->sssr_d11_outofreset
[i
]) {
18290 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_d11_before
[i
],
18291 dhdp
->sssr_reg_info
.mac_regs
[i
].sr_size
, before_sr_dump
)) {
18292 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
18296 if (dhdp
->sssr_d11_after
[i
] && dhdp
->sssr_d11_outofreset
[i
]) {
18297 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_d11_after
[i
],
18298 dhdp
->sssr_reg_info
.mac_regs
[i
].sr_size
, after_sr_dump
)) {
18299 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
18305 if (dhdp
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
18306 dig_buf_size
= dhdp
->sssr_reg_info
.vasip_regs
.vasip_sr_size
;
18307 } else if ((dhdp
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
18308 dhdp
->sssr_reg_info
.dig_mem_info
.dig_sr_size
) {
18309 dig_buf_size
= dhdp
->sssr_reg_info
.dig_mem_info
.dig_sr_size
;
18312 if (dhdp
->sssr_dig_buf_before
) {
18313 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_dig_buf_before
,
18314 dig_buf_size
, "sssr_dig_before_SR")) {
18315 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
18320 if (dhdp
->sssr_dig_buf_after
) {
18321 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_dig_buf_after
,
18322 dig_buf_size
, "sssr_dig_after_SR")) {
18323 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
18329 DHD_GENERAL_LOCK(dhdp
, flags
);
18330 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp
);
18331 dhd_os_busbusy_wake(dhdp
);
18332 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18336 dhd_schedule_sssr_dump(dhd_pub_t
*dhdp
)
18338 unsigned long flags
= 0;
18340 /* bus busy bit for sssr dump will be cleared in sssr dump
18341 * work item context, after sssr dump files are created
18343 DHD_GENERAL_LOCK(dhdp
, flags
);
18344 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp
);
18345 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18347 if (dhdp
->info
->no_wq_sssrdump
) {
18348 dhd_sssr_dump(dhdp
->info
, 0, 0);
18352 DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__
));
18353 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, NULL
,
18354 DHD_WQ_WORK_SSSR_DUMP
, dhd_sssr_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18356 #endif /* DHD_SSSR_DUMP */
18358 #ifdef DHD_LOG_DUMP
18360 dhd_log_dump(void *handle
, void *event_info
, u8 event
)
18362 dhd_info_t
*dhd
= handle
;
18363 log_dump_type_t
*type
= (log_dump_type_t
*)event_info
;
18365 if (!dhd
|| !type
) {
18366 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
18371 /* flush the fw side logs */
18372 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd
->pub
),
18373 FW_LOGSET_MASK_ALL
);
18375 /* there are currently 3 possible contexts from which
18376 * log dump can be scheduled -
18377 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
18378 * 3.HEALTH CHECK event
18379 * The concise debug info buffer is a shared resource
18380 * and in case a trap is one of the contexts then both the
18381 * scheduled work queues need to run because trap data is
18382 * essential for debugging. Hence a mutex lock is acquired
18383 * before calling do_dhd_log_dump().
18385 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__
));
18386 dhd_os_logdump_lock(&dhd
->pub
);
18387 DHD_OS_WAKE_LOCK(&dhd
->pub
);
18388 if (do_dhd_log_dump(&dhd
->pub
, type
) != BCME_OK
) {
18389 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__
));
18391 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
18392 dhd_os_logdump_unlock(&dhd
->pub
);
18395 void dhd_schedule_log_dump(dhd_pub_t
*dhdp
, void *type
)
18397 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__
));
18398 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
18399 type
, DHD_WQ_WORK_DHD_LOG_DUMP
,
18400 dhd_log_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18404 dhd_print_buf_addr(dhd_pub_t
*dhdp
, char *name
, void *buf
, unsigned int size
)
18406 if ((dhdp
->memdump_enabled
== DUMP_MEMONLY
) ||
18407 (dhdp
->memdump_enabled
== DUMP_MEMFILE_BUGON
)) {
18408 #if defined(CONFIG_ARM64)
18409 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
18410 name
, (uint64
)buf
, (uint64
)__virt_to_phys((ulong
)buf
), size
));
18411 #elif defined(__ARM_ARCH_7A__)
18412 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
18413 name
, (uint32
)buf
, (uint32
)__virt_to_phys((ulong
)buf
), size
));
18414 #endif /* __ARM_ARCH_7A__ */
18419 dhd_log_dump_buf_addr(dhd_pub_t
*dhdp
, log_dump_type_t
*type
)
18422 unsigned long wr_size
= 0;
18423 struct dhd_log_dump_buf
*dld_buf
= &g_dld_buf
[0];
18424 size_t log_size
= 0;
18425 char buf_name
[DHD_PRINT_BUF_NAME_LEN
];
18426 dhd_dbg_ring_t
*ring
= NULL
;
18428 BCM_REFERENCE(ring
);
18430 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
18431 dld_buf
= &g_dld_buf
[i
];
18432 log_size
= (unsigned long)dld_buf
->max
-
18433 (unsigned long)dld_buf
->buffer
;
18434 if (dld_buf
->wraparound
) {
18435 wr_size
= log_size
;
18437 wr_size
= (unsigned long)dld_buf
->present
-
18438 (unsigned long)dld_buf
->front
;
18440 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d]", i
);
18441 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
, dld_buf_size
[i
]);
18442 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d] buffer", i
);
18443 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
->buffer
, wr_size
);
18444 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d] present", i
);
18445 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
->present
, wr_size
);
18446 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d] front", i
);
18447 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
->front
, wr_size
);
18450 #ifdef DEBUGABILITY_ECNTRS_LOGGING
18451 /* periodic flushing of ecounters is NOT supported */
18452 if (*type
== DLD_BUF_TYPE_ALL
&&
18453 logdump_ecntr_enable
&&
18454 dhdp
->ecntr_dbg_ring
) {
18456 ring
= (dhd_dbg_ring_t
*)dhdp
->ecntr_dbg_ring
;
18457 dhd_print_buf_addr(dhdp
, "ecntr_dbg_ring", ring
, LOG_DUMP_ECNTRS_MAX_BUFSIZE
);
18458 dhd_print_buf_addr(dhdp
, "ecntr_dbg_ring ring_buf", ring
->ring_buf
,
18459 LOG_DUMP_ECNTRS_MAX_BUFSIZE
);
18461 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
18464 if (dhdp
->dongle_trap_occured
&& dhdp
->extended_trap_data
) {
18465 dhd_print_buf_addr(dhdp
, "extended_trap_data", dhdp
->extended_trap_data
,
18466 BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
18468 #endif /* BCMPCIE */
18470 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18471 /* if health check event was received */
18472 if (dhdp
->memdump_type
== DUMP_TYPE_DONGLE_HOST_EVENT
) {
18473 dhd_print_buf_addr(dhdp
, "health_chk_event_data", dhdp
->health_chk_event_data
,
18474 HEALTH_CHK_BUF_SIZE
);
18476 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18478 /* append the concise debug information */
18479 if (dhdp
->concise_dbg_buf
) {
18480 dhd_print_buf_addr(dhdp
, "concise_dbg_buf", dhdp
->concise_dbg_buf
,
18481 CONCISE_DUMP_BUFLEN
);
18485 #ifdef CUSTOMER_HW4_DEBUG
18487 dhd_log_dump_print_to_kmsg(char *bufptr
, unsigned long len
)
18489 char tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
+ 1];
18491 unsigned long plen
= 0;
18493 if (!bufptr
|| !len
)
18496 memset(tmp_buf
, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
);
18497 end
= bufptr
+ len
;
18498 while (bufptr
< end
) {
18499 if ((bufptr
+ DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
) < end
) {
18500 memcpy(tmp_buf
, bufptr
, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
);
18501 tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
] = '\0';
18502 printf("%s", tmp_buf
);
18503 bufptr
+= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
;
18505 plen
= (unsigned long)end
- (unsigned long)bufptr
;
18506 memcpy(tmp_buf
, bufptr
, plen
);
18507 tmp_buf
[plen
] = '\0';
18508 printf("%s", tmp_buf
);
18515 dhd_log_dump_print_tail(dhd_pub_t
*dhdp
,
18516 struct dhd_log_dump_buf
*dld_buf
,
18519 char *flush_ptr1
= NULL
, *flush_ptr2
= NULL
;
18520 unsigned long len_flush1
= 0, len_flush2
= 0;
18521 unsigned long flags
= 0;
18523 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18524 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18525 flush_ptr1
= dld_buf
->present
- tail_len
;
18526 if (flush_ptr1
>= dld_buf
->front
) {
18527 /* tail content is within the buffer */
18529 len_flush1
= tail_len
;
18530 } else if (dld_buf
->wraparound
) {
18531 /* tail content spans the buffer length i.e, wrap around */
18532 flush_ptr1
= dld_buf
->front
;
18533 len_flush1
= (unsigned long)dld_buf
->present
- (unsigned long)flush_ptr1
;
18534 len_flush2
= (unsigned long)tail_len
- len_flush1
;
18535 flush_ptr2
= (char *)((unsigned long)dld_buf
->max
-
18536 (unsigned long)len_flush2
);
18538 /* amt of logs in buffer is less than tail size */
18539 flush_ptr1
= dld_buf
->front
;
18541 len_flush1
= (unsigned long)dld_buf
->present
- (unsigned long)dld_buf
->front
;
18543 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18545 printf("\n================= LOG_DUMP tail =================\n");
18547 dhd_log_dump_print_to_kmsg(flush_ptr2
, len_flush2
);
18549 dhd_log_dump_print_to_kmsg(flush_ptr1
, len_flush1
);
18550 printf("\n===================================================\n");
18552 #endif /* CUSTOMER_HW4_DEBUG */
18554 /* Must hold 'dhd_os_logdump_lock' before calling this function ! */
18556 do_dhd_log_dump(dhd_pub_t
*dhdp
, log_dump_type_t
*type
)
18558 int ret
= 0, i
= 0;
18559 struct file
*fp
= NULL
;
18560 mm_segment_t old_fs
;
18562 unsigned int wr_size
= 0;
18563 char dump_path
[128];
18565 unsigned long flags
= 0;
18566 struct dhd_log_dump_buf
*dld_buf
= &g_dld_buf
[0];
18567 size_t log_size
= 0;
18568 size_t fspace_remain
= 0;
18570 char time_str
[128];
18572 uint32 remain_len
= 0;
18573 log_dump_section_hdr_t sec_hdr
;
18574 dhd_info_t
*dhd_info
= NULL
;
18576 DHD_ERROR(("%s: ENTER \n", __FUNCTION__
));
18578 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
18579 * so not freeing 'type' here is ok, even if we want to free 'type'
18580 * we cannot do so, since 'dhdp->osh' is unavailable
18583 if (!dhdp
|| !type
) {
18585 DHD_GENERAL_LOCK(dhdp
, flags
);
18586 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp
);
18587 dhd_os_busbusy_wake(dhdp
);
18588 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18593 DHD_GENERAL_LOCK(dhdp
, flags
);
18594 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
18595 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp
);
18596 dhd_os_busbusy_wake(dhdp
);
18597 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18598 MFREE(dhdp
->osh
, type
, sizeof(*type
));
18599 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__
));
18602 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp
);
18603 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18605 dhd_info
= (dhd_info_t
*)dhdp
->info
;
18606 BCM_REFERENCE(dhd_info
);
18608 /* in case of trap get preserve logs from ETD */
18609 #if defined(BCMPCIE) && defined(DEBUGABILITY_ETD_PRSRV_LOGS)
18610 if (dhdp
->dongle_trap_occured
&&
18611 dhdp
->extended_trap_data
) {
18612 dhdpcie_get_etd_preserve_logs(dhdp
, (uint8
*)dhdp
->extended_trap_data
,
18613 &dhd_info
->event_data
);
18615 #endif /* BCMPCIE */
18617 #ifdef SHOW_LOGTRACE
18618 /* flush the event work items to get any fw events/logs
18619 * flush_work is a blocking call
18621 flush_delayed_work(&dhd_info
->event_log_dispatcher_work
);
18622 #endif /* SHOW_LOGTRACE */
18624 #ifdef CUSTOMER_HW4_DEBUG
18625 /* print last 'x' KB of preserve buffer data to kmsg console
18626 * this is to address cases where debug_dump is not
18627 * available for debugging
18629 dhd_log_dump_print_tail(dhdp
,
18630 &g_dld_buf
[DLD_BUF_TYPE_PRESERVE
], logdump_prsrv_tailsize
);
18631 #endif /* CUSTOMER_HW4_DEBUG */
18633 /* change to KERNEL_DS address limit */
18637 /* Init file name */
18638 memset(dump_path
, 0, sizeof(dump_path
));
18639 switch (dhdp
->debug_dump_subcmd
) {
18641 snprintf(dump_path
, sizeof(dump_path
), "%s",
18642 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18643 DHD_DUMP_SUBSTR_UNWANTED
);
18645 case CMD_DISCONNECTED
:
18646 snprintf(dump_path
, sizeof(dump_path
), "%s",
18647 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18648 DHD_DUMP_SUBSTR_DISCONNECTED
);
18651 snprintf(dump_path
, sizeof(dump_path
), "%s",
18652 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
);
18655 if (!dhdp
->logdump_periodic_flush
) {
18656 get_debug_dump_time(dhdp
->debug_dump_time_str
);
18657 snprintf(dump_path
+ strlen(dump_path
),
18658 sizeof(dump_path
) - strlen(dump_path
),
18659 "_%s", dhdp
->debug_dump_time_str
);
18662 memset(time_str
, 0, sizeof(time_str
));
18663 ts
= dhd_log_dump_get_timestamp();
18664 snprintf(time_str
, sizeof(time_str
),
18665 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts
);
18667 DHD_ERROR(("DHD version: %s\n", dhd_version
));
18668 DHD_ERROR(("F/W version: %s\n", fw_version
));
18669 DHD_ERROR(("debug_dump_path = %s\n", dump_path
));
18671 dhd_log_dump_buf_addr(dhdp
, type
);
18673 /* if this is the first time after dhd is loaded,
18674 * or, if periodic flush is disabled, clear the log file
18676 if (!dhdp
->logdump_periodic_flush
|| dhdp
->last_file_posn
== 0)
18677 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
| O_TRUNC
;
18679 file_mode
= O_CREAT
| O_RDWR
| O_SYNC
;
18681 fp
= filp_open(dump_path
, file_mode
, 0664);
18683 /* If android installed image, try '/data' directory */
18684 #if defined(CONFIG_X86)
18685 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
18687 snprintf(dump_path
, sizeof(dump_path
), "/data/" DHD_DEBUG_DUMP_TYPE
);
18688 if (!dhdp
->logdump_periodic_flush
) {
18689 snprintf(dump_path
+ strlen(dump_path
),
18690 sizeof(dump_path
) - strlen(dump_path
),
18691 "_%s", dhdp
->debug_dump_time_str
);
18693 fp
= filp_open(dump_path
, file_mode
, 0664);
18696 DHD_ERROR(("open file error, err = %d\n", ret
));
18699 DHD_ERROR(("debug_dump_path = %s\n", dump_path
));
18702 DHD_ERROR(("open file error, err = %d\n", ret
));
18704 #endif /* CONFIG_X86 && OEM_ANDROID */
18707 ret
= vfs_stat(dump_path
, &stat
);
18709 DHD_ERROR(("file stat error, err = %d\n", ret
));
18713 /* if some one else has changed the file */
18714 if (dhdp
->last_file_posn
!= 0 &&
18715 stat
.size
< dhdp
->last_file_posn
) {
18716 dhdp
->last_file_posn
= 0;
18719 if (dhdp
->logdump_periodic_flush
) {
18720 log_size
= strlen(time_str
) + strlen(DHD_DUMP_LOG_HDR
) + sizeof(sec_hdr
);
18721 /* calculate the amount of space required to dump all logs */
18722 for (i
= 0; i
< DLD_BUFFER_NUM
; ++i
) {
18723 if (*type
!= DLD_BUF_TYPE_ALL
&& i
!= *type
)
18726 if (g_dld_buf
[i
].wraparound
) {
18727 log_size
+= (unsigned long)g_dld_buf
[i
].max
18728 - (unsigned long)g_dld_buf
[i
].buffer
;
18730 spin_lock_irqsave(&g_dld_buf
[i
].lock
, flags
);
18731 log_size
+= (unsigned long)g_dld_buf
[i
].present
-
18732 (unsigned long)g_dld_buf
[i
].front
;
18733 spin_unlock_irqrestore(&g_dld_buf
[i
].lock
, flags
);
18735 log_size
+= strlen(dld_hdrs
[i
].hdr_str
) + sizeof(sec_hdr
);
18737 if (*type
!= DLD_BUF_TYPE_ALL
&& i
== *type
)
18741 ret
= generic_file_llseek(fp
, dhdp
->last_file_posn
, SEEK_CUR
);
18743 DHD_ERROR(("file seek last posn error ! err = %d \n", ret
));
18748 /* if the max file size is reached, wrap around to beginning of the file
18749 * we're treating the file as a large ring buffer
18751 fspace_remain
= logdump_max_filesize
- pos
;
18752 if (log_size
> fspace_remain
) {
18757 /* write the timestamp hdr to the file first */
18758 ret
= vfs_write(fp
, time_str
, strlen(time_str
), &pos
);
18760 DHD_ERROR(("write file error, err = %d\n", ret
));
18764 /* prep the section header */
18765 memset(&sec_hdr
, 0, sizeof(sec_hdr
));
18766 sec_hdr
.magic
= LOG_DUMP_MAGIC
;
18767 sec_hdr
.timestamp
= local_clock();
18769 for (i
= 0; i
< DLD_BUFFER_NUM
; ++i
) {
18770 unsigned int buf_size
= 0;
18772 if (*type
!= DLD_BUF_TYPE_ALL
&& i
!= *type
)
18775 /* calculate the length of the log */
18776 dld_buf
= &g_dld_buf
[i
];
18777 buf_size
= (unsigned long)dld_buf
->max
-
18778 (unsigned long)dld_buf
->buffer
;
18779 if (dld_buf
->wraparound
) {
18780 wr_size
= buf_size
;
18782 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18783 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18784 wr_size
= (unsigned long)dld_buf
->present
-
18785 (unsigned long)dld_buf
->front
;
18786 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18789 /* write the section header first */
18790 sec_hdr
.type
= dld_hdrs
[i
].sec_type
;
18791 sec_hdr
.length
= wr_size
;
18792 vfs_write(fp
, dld_hdrs
[i
].hdr_str
, strlen(dld_hdrs
[i
].hdr_str
), &pos
);
18793 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18794 /* write the log */
18795 ret
= vfs_write(fp
, dld_buf
->buffer
, wr_size
, &pos
);
18797 DHD_ERROR(("write file error, err = %d\n", ret
));
18801 /* re-init dhd_log_dump_buf structure */
18802 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18803 dld_buf
->wraparound
= 0;
18804 dld_buf
->present
= dld_buf
->front
;
18805 dld_buf
->remain
= buf_size
;
18806 bzero(dld_buf
->buffer
, buf_size
);
18807 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18809 if (*type
!= DLD_BUF_TYPE_ALL
)
18813 #ifdef DEBUGABILITY_ECNTRS_LOGGING
18814 /* periodic flushing of ecounters is NOT supported */
18815 if (*type
== DLD_BUF_TYPE_ALL
&&
18816 logdump_ecntr_enable
&&
18817 dhdp
->ecntr_dbg_ring
) {
18818 dhd_log_dump_ring_to_file(dhdp
, dhdp
->ecntr_dbg_ring
,
18819 fp
, (unsigned long *)&pos
, &sec_hdr
);
18821 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
18824 /* append extended trap data to the file in case of traps */
18825 if (dhdp
->dongle_trap_occured
&&
18826 dhdp
->extended_trap_data
) {
18827 /* write the section header first */
18828 vfs_write(fp
, EXT_TRAP_LOG_HDR
, strlen(EXT_TRAP_LOG_HDR
), &pos
);
18829 sec_hdr
.type
= LOG_DUMP_SECTION_EXT_TRAP
;
18830 sec_hdr
.length
= BCMPCIE_EXT_TRAP_DATA_MAXLEN
;
18831 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18832 /* write the log */
18833 ret
= vfs_write(fp
, (char *)dhdp
->extended_trap_data
,
18834 BCMPCIE_EXT_TRAP_DATA_MAXLEN
, &pos
);
18836 DHD_ERROR(("write file error of ext trap info,"
18837 " err = %d\n", ret
));
18841 #endif /* BCMPCIE */
18843 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18844 /* if health check event was received, dump to file */
18845 if (dhdp
->memdump_type
== DUMP_TYPE_DONGLE_HOST_EVENT
) {
18846 /* write the section header first */
18847 vfs_write(fp
, HEALTH_CHK_LOG_HDR
, strlen(HEALTH_CHK_LOG_HDR
), &pos
);
18848 sec_hdr
.type
= LOG_DUMP_SECTION_HEALTH_CHK
;
18849 sec_hdr
.length
= HEALTH_CHK_BUF_SIZE
;
18850 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18851 /* write the log */
18852 ret
= vfs_write(fp
, (char *)dhdp
->health_chk_event_data
,
18853 HEALTH_CHK_BUF_SIZE
, &pos
);
18855 DHD_ERROR(("write file error of health chk info,"
18856 " err = %d\n", ret
));
18860 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18862 #ifdef DHD_DUMP_PCIE_RINGS
18863 /* write the section header first */
18864 vfs_write(fp
, FLOWRING_DUMP_HDR
, strlen(FLOWRING_DUMP_HDR
), &pos
);
18865 /* Write the ring summary */
18866 ret
= vfs_write(fp
, dhdp
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
- remain_len
, &pos
);
18868 DHD_ERROR(("write file error of concise debug info,"
18869 " err = %d\n", ret
));
18872 sec_hdr
.type
= LOG_DUMP_SECTION_FLOWRING
;
18873 sec_hdr
.length
= ((H2DRING_TXPOST_ITEMSIZE
18874 * H2DRING_TXPOST_MAX_ITEM
)
18875 + (D2HRING_TXCMPLT_ITEMSIZE
18876 * D2HRING_TXCMPLT_MAX_ITEM
)
18877 + (H2DRING_RXPOST_ITEMSIZE
18878 * H2DRING_RXPOST_MAX_ITEM
)
18879 + (D2HRING_RXCMPLT_ITEMSIZE
18880 * D2HRING_RXCMPLT_MAX_ITEM
)
18881 + (H2DRING_CTRL_SUB_ITEMSIZE
18882 * H2DRING_CTRL_SUB_MAX_ITEM
)
18883 + (D2HRING_CTRL_CMPLT_ITEMSIZE
18884 * D2HRING_CTRL_CMPLT_MAX_ITEM
)
18885 + (H2DRING_INFO_BUFPOST_ITEMSIZE
18886 * H2DRING_DYNAMIC_INFO_MAX_ITEM
)
18887 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE
18888 * D2HRING_DYNAMIC_INFO_MAX_ITEM
));
18889 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18890 /* write the log */
18891 ret
= dhd_d2h_h2d_ring_dump(dhdp
, fp
, (unsigned long *)&pos
);
18893 DHD_ERROR(("%s: error dumping ring data!\n",
18897 #endif /* DHD_DUMP_PCIE_RINGS */
18899 /* append the concise debug information to the file.
18900 * This is the information which is seen
18901 * when a 'dhd dump' iovar is fired
18903 if (dhdp
->concise_dbg_buf
) {
18904 remain_len
= dhd_dump(dhdp
, (char *)dhdp
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
);
18905 if (remain_len
<= 0) {
18906 DHD_ERROR(("%s: error getting concise debug info !\n",
18910 /* write the section header first */
18911 vfs_write(fp
, DHD_DUMP_LOG_HDR
, strlen(DHD_DUMP_LOG_HDR
), &pos
);
18912 sec_hdr
.type
= LOG_DUMP_SECTION_DHD_DUMP
;
18913 sec_hdr
.length
= CONCISE_DUMP_BUFLEN
- remain_len
;
18914 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18915 /* write the log */
18916 ret
= vfs_write(fp
, dhdp
->concise_dbg_buf
,
18917 CONCISE_DUMP_BUFLEN
- remain_len
, &pos
);
18919 DHD_ERROR(("write file error of concise debug info,"
18920 " err = %d\n", ret
));
18926 if (dhdp
->logdump_cookie
&& dhd_logdump_cookie_count(dhdp
) > 0) {
18927 ret
= dhd_log_dump_cookie_to_file(dhdp
, fp
, (unsigned long *)&pos
);
18929 DHD_ERROR(("write file error of cooke info, err = %d\n", ret
));
18934 if (dhdp
->logdump_periodic_flush
) {
18935 /* store the last position written to in the file for future use */
18936 dhdp
->last_file_posn
= pos
;
18940 MFREE(dhdp
->osh
, type
, sizeof(*type
));
18941 if (!IS_ERR(fp
) && fp
!= NULL
) {
18942 filp_close(fp
, NULL
);
18943 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
18944 __FUNCTION__
, dump_path
));
18947 DHD_GENERAL_LOCK(dhdp
, flags
);
18948 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp
);
18949 dhd_os_busbusy_wake(dhdp
);
18950 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18952 #ifdef DHD_DUMP_MNGR
18954 dhd_dump_file_manage_enqueue(dhdp
, dump_path
, DHD_DEBUG_DUMP_TYPE
);
18956 #endif /* DHD_DUMP_MNGR */
18958 return (ret
< 0) ? BCME_ERROR
: BCME_OK
;
18960 #endif /* DHD_LOG_DUMP */
18963 * This call is to get the memdump size so that,
18964 * halutil can alloc that much buffer in user space.
18967 dhd_os_socram_dump(struct net_device
*dev
, uint32
*dump_size
)
18970 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
18971 dhd_pub_t
*dhdp
= &dhd
->pub
;
18973 if (dhdp
->busstate
== DHD_BUS_DOWN
) {
18974 DHD_ERROR(("%s: bus is down\n", __FUNCTION__
));
18978 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
18979 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18980 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
18983 #ifdef DHD_PCIE_RUNTIMEPM
18984 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
18985 #endif /* DHD_PCIE_RUNTIMEPM */
18986 ret
= dhd_common_socram_dump(dhdp
);
18987 if (ret
== BCME_OK
) {
18988 *dump_size
= dhdp
->soc_ram_length
;
18994 * This is to get the actual memdup after getting the memdump size
18997 dhd_os_get_socram_dump(struct net_device
*dev
, char **buf
, uint32
*size
)
19001 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
19002 dhd_pub_t
*dhdp
= &dhd
->pub
;
19006 if (dhdp
->soc_ram
) {
19007 if (orig_len
>= dhdp
->soc_ram_length
) {
19008 memcpy(*buf
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
19009 /* reset the storage of dump */
19010 memset(dhdp
->soc_ram
, 0, dhdp
->soc_ram_length
);
19011 *size
= dhdp
->soc_ram_length
;
19013 ret
= BCME_BUFTOOSHORT
;
19014 DHD_ERROR(("The length of the buffer is too short"
19015 " to save the memory dump with %d\n", dhdp
->soc_ram_length
));
19018 DHD_ERROR(("socram_dump is not ready to get\n"));
19019 ret
= BCME_NOTREADY
;
19025 dhd_os_get_version(struct net_device
*dev
, bool dhd_ver
, char **buf
, uint32 size
)
19030 return BCME_BADARG
;
19032 fw_str
= strstr(info_string
, "Firmware: ");
19033 if (fw_str
== NULL
) {
19037 memset(*buf
, 0, size
);
19039 strncpy(*buf
, dhd_version
, size
- 1);
19041 strncpy(*buf
, fw_str
, size
- 1);
19046 bool dhd_sta_associated(dhd_pub_t
*dhdp
, uint32 bssidx
, uint8
*mac
)
19048 return dhd_find_sta(dhdp
, bssidx
, mac
) ? TRUE
: FALSE
;
19051 #ifdef DHD_L2_FILTER
19053 dhd_get_ifp_arp_table_handle(dhd_pub_t
*dhdp
, uint32 bssidx
)
19055 dhd_info_t
*dhd
= dhdp
->info
;
19058 ASSERT(bssidx
< DHD_MAX_IFS
);
19060 ifp
= dhd
->iflist
[bssidx
];
19061 return ifp
->phnd_arp_table
;
19064 int dhd_get_parp_status(dhd_pub_t
*dhdp
, uint32 idx
)
19066 dhd_info_t
*dhd
= dhdp
->info
;
19069 ASSERT(idx
< DHD_MAX_IFS
);
19071 ifp
= dhd
->iflist
[idx
];
19074 return ifp
->parp_enable
;
19079 /* Set interface specific proxy arp configuration */
19080 int dhd_set_parp_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19082 dhd_info_t
*dhd
= dhdp
->info
;
19084 ASSERT(idx
< DHD_MAX_IFS
);
19085 ifp
= dhd
->iflist
[idx
];
19090 /* At present all 3 variables are being
19093 ifp
->parp_enable
= val
;
19094 ifp
->parp_discard
= val
;
19095 ifp
->parp_allnode
= val
;
19097 /* Flush ARP entries when disabled */
19098 if (val
== FALSE
) {
19099 bcm_l2_filter_arp_table_update(dhdp
->osh
, ifp
->phnd_arp_table
, TRUE
, NULL
,
19100 FALSE
, dhdp
->tickcnt
);
19105 bool dhd_parp_discard_is_enabled(dhd_pub_t
*dhdp
, uint32 idx
)
19107 dhd_info_t
*dhd
= dhdp
->info
;
19110 ASSERT(idx
< DHD_MAX_IFS
);
19112 ifp
= dhd
->iflist
[idx
];
19115 return ifp
->parp_discard
;
19119 dhd_parp_allnode_is_enabled(dhd_pub_t
*dhdp
, uint32 idx
)
19121 dhd_info_t
*dhd
= dhdp
->info
;
19124 ASSERT(idx
< DHD_MAX_IFS
);
19126 ifp
= dhd
->iflist
[idx
];
19130 return ifp
->parp_allnode
;
19133 int dhd_get_dhcp_unicast_status(dhd_pub_t
*dhdp
, uint32 idx
)
19135 dhd_info_t
*dhd
= dhdp
->info
;
19138 ASSERT(idx
< DHD_MAX_IFS
);
19140 ifp
= dhd
->iflist
[idx
];
19144 return ifp
->dhcp_unicast
;
19147 int dhd_set_dhcp_unicast_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19149 dhd_info_t
*dhd
= dhdp
->info
;
19151 ASSERT(idx
< DHD_MAX_IFS
);
19152 ifp
= dhd
->iflist
[idx
];
19156 ifp
->dhcp_unicast
= val
;
19160 int dhd_get_block_ping_status(dhd_pub_t
*dhdp
, uint32 idx
)
19162 dhd_info_t
*dhd
= dhdp
->info
;
19165 ASSERT(idx
< DHD_MAX_IFS
);
19167 ifp
= dhd
->iflist
[idx
];
19171 return ifp
->block_ping
;
19174 int dhd_set_block_ping_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19176 dhd_info_t
*dhd
= dhdp
->info
;
19178 ASSERT(idx
< DHD_MAX_IFS
);
19179 ifp
= dhd
->iflist
[idx
];
19183 ifp
->block_ping
= val
;
19184 /* Disable rx_pkt_chain feature for interface if block_ping option is
19187 dhd_update_rx_pkt_chainable_state(dhdp
, idx
);
19191 int dhd_get_grat_arp_status(dhd_pub_t
*dhdp
, uint32 idx
)
19193 dhd_info_t
*dhd
= dhdp
->info
;
19196 ASSERT(idx
< DHD_MAX_IFS
);
19198 ifp
= dhd
->iflist
[idx
];
19202 return ifp
->grat_arp
;
19205 int dhd_set_grat_arp_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19207 dhd_info_t
*dhd
= dhdp
->info
;
19209 ASSERT(idx
< DHD_MAX_IFS
);
19210 ifp
= dhd
->iflist
[idx
];
19214 ifp
->grat_arp
= val
;
19219 int dhd_get_block_tdls_status(dhd_pub_t
*dhdp
, uint32 idx
)
19221 dhd_info_t
*dhd
= dhdp
->info
;
19224 ASSERT(idx
< DHD_MAX_IFS
);
19226 ifp
= dhd
->iflist
[idx
];
19230 return ifp
->block_tdls
;
19233 int dhd_set_block_tdls_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19235 dhd_info_t
*dhd
= dhdp
->info
;
19237 ASSERT(idx
< DHD_MAX_IFS
);
19238 ifp
= dhd
->iflist
[idx
];
19242 ifp
->block_tdls
= val
;
19246 #endif /* DHD_L2_FILTER */
19248 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
19249 int dhd_rps_cpus_enable(struct net_device
*net
, int enable
)
19251 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
19254 char * RPS_CPU_SETBUF
;
19256 ifidx
= dhd_net2idx(dhd
, net
);
19257 if (ifidx
== DHD_BAD_IF
) {
19258 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
19262 if (ifidx
== PRIMARY_INF
) {
19263 if (dhd
->pub
.op_mode
== DHD_FLAG_IBSS_MODE
) {
19264 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__
));
19265 RPS_CPU_SETBUF
= RPS_CPUS_MASK_IBSS
;
19267 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__
));
19268 RPS_CPU_SETBUF
= RPS_CPUS_MASK
;
19270 } else if (ifidx
== VIRTUAL_INF
) {
19271 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__
));
19272 RPS_CPU_SETBUF
= RPS_CPUS_MASK_P2P
;
19274 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__
, ifidx
));
19278 ifp
= dhd
->iflist
[ifidx
];
19281 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__
, RPS_CPU_SETBUF
));
19282 custom_rps_map_set(ifp
->net
->_rx
, RPS_CPU_SETBUF
, strlen(RPS_CPU_SETBUF
));
19284 custom_rps_map_clear(ifp
->net
->_rx
);
19287 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__
));
19293 int custom_rps_map_set(struct netdev_rx_queue
*queue
, char *buf
, size_t len
)
19295 struct rps_map
*old_map
, *map
;
19296 cpumask_var_t mask
;
19298 static DEFINE_SPINLOCK(rps_map_lock
);
19300 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
19302 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
19303 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__
));
19307 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
19309 free_cpumask_var(mask
);
19310 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__
));
19314 map
= kzalloc(max_t(unsigned int,
19315 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
19318 free_cpumask_var(mask
);
19319 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__
));
19324 for_each_cpu(cpu
, mask
) {
19325 map
->cpus
[i
++] = cpu
;
19333 free_cpumask_var(mask
);
19334 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__
));
19338 spin_lock(&rps_map_lock
);
19339 old_map
= rcu_dereference_protected(queue
->rps_map
,
19340 lockdep_is_held(&rps_map_lock
));
19341 rcu_assign_pointer(queue
->rps_map
, map
);
19342 spin_unlock(&rps_map_lock
);
19345 static_key_slow_inc(&rps_needed
);
19348 kfree_rcu(old_map
, rcu
);
19349 static_key_slow_dec(&rps_needed
);
19351 free_cpumask_var(mask
);
19353 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__
, map
->len
));
19357 void custom_rps_map_clear(struct netdev_rx_queue
*queue
)
19359 struct rps_map
*map
;
19361 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
19363 map
= rcu_dereference_protected(queue
->rps_map
, 1);
19365 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
19366 kfree_rcu(map
, rcu
);
19367 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__
));
19370 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
19372 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
19373 defined(ARGOS_NOTIFY_CB)
19375 static int argos_status_notifier_wifi_cb(struct notifier_block
*notifier
,
19376 unsigned long speed
, void *v
);
19377 static int argos_status_notifier_p2p_cb(struct notifier_block
*notifier
,
19378 unsigned long speed
, void *v
);
19379 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19380 static int argos_status_notifier_config_mumimo_cb(struct notifier_block
*notifier
,
19381 unsigned long speed
, void *v
);
19382 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19384 #ifdef DYNAMIC_MUMIMO_CONTROL
19385 #define MUMIMO_CONTROL_TIMER_INTERVAL_MS 5000
19388 argos_config_mumimo_timer(unsigned long data
)
19390 argos_mumimo_ctrl
*ctrl_data
= (argos_mumimo_ctrl
*)data
;
19392 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
19393 schedule_work(&ctrl_data
->mumimo_ctrl_work
);
19397 argos_config_mumimo_handler(struct work_struct
*work
)
19399 argos_mumimo_ctrl
*ctrl_data
;
19400 struct net_device
*dev
;
19404 ctrl_data
= container_of(work
, argos_mumimo_ctrl
, mumimo_ctrl_work
);
19406 dev
= ctrl_data
->dev
;
19412 new_cap
= ctrl_data
->cur_murx_bfe_cap
;
19413 err
= wl_set_murx_bfe_cap(dev
, new_cap
, TRUE
);
19415 DHD_ERROR(("%s: Failed to set murx_bfe_cap to %d, err=%d\n",
19416 __FUNCTION__
, new_cap
, err
));
19418 DHD_ERROR(("%s: Newly configured murx_bfe_cap = %d\n",
19419 __FUNCTION__
, new_cap
));
19424 argos_status_notifier_config_mumimo(struct notifier_block
*notifier
,
19425 unsigned long speed
, void *v
)
19427 struct net_device
*dev
;
19428 int prev_murx_bfe_cap
;
19432 dev
= argos_mumimo_ctrl_data
.dev
;
19437 dhd
= DHD_DEV_INFO(dev
);
19442 /* Check if STA reassociate with the AP after murx configuration */
19443 if (dhd
->pub
.reassoc_mumimo_sw
) {
19444 /* Cancel the MU-MIMO control timer */
19445 if (timer_pending(&argos_mumimo_ctrl_data
.config_timer
)) {
19446 del_timer_sync(&argos_mumimo_ctrl_data
.config_timer
);
19449 DHD_ERROR(("%s: Reassociation is in progress...\n", __FUNCTION__
));
19453 /* Check if current associated AP supports MU-MIMO capability
19454 * or current Tput meets the condition for MU-MIMO configuration
19456 if ((wl_check_bss_support_mumimo(dev
) <= 0) ||
19457 ((speed
< MUMIMO_TO_SUMIMO_TPUT_THRESHOLD
) &&
19458 (speed
>= SUMIMO_TO_MUMIMO_TPUT_THRESHOLD
))) {
19462 prev_murx_bfe_cap
= argos_mumimo_ctrl_data
.cur_murx_bfe_cap
;
19464 /* Check the TPut condition */
19465 if (speed
>= MUMIMO_TO_SUMIMO_TPUT_THRESHOLD
) {
19471 if (prev_murx_bfe_cap
!= cap
) {
19472 /* Cancel the MU-MIMO control timer */
19473 if (timer_pending(&argos_mumimo_ctrl_data
.config_timer
)) {
19474 del_timer_sync(&argos_mumimo_ctrl_data
.config_timer
);
19477 /* Update the new value */
19478 argos_mumimo_ctrl_data
.cur_murx_bfe_cap
= cap
;
19480 /* Arm the MU-MIMO control timer */
19481 mod_timer(&argos_mumimo_ctrl_data
.config_timer
,
19482 jiffies
+ msecs_to_jiffies(MUMIMO_CONTROL_TIMER_INTERVAL_MS
));
19484 DHD_ERROR(("%s: Arm the MU-MIMO control timer, cur_murx_bfe_cap=%d\n",
19485 __FUNCTION__
, cap
));
19490 argos_config_mumimo_init(struct net_device
*dev
)
19492 init_timer(&argos_mumimo_ctrl_data
.config_timer
);
19493 argos_mumimo_ctrl_data
.config_timer
.data
= (unsigned long)&argos_mumimo_ctrl_data
;
19494 argos_mumimo_ctrl_data
.config_timer
.function
= argos_config_mumimo_timer
;
19495 argos_mumimo_ctrl_data
.dev
= dev
;
19496 INIT_WORK(&argos_mumimo_ctrl_data
.mumimo_ctrl_work
, argos_config_mumimo_handler
);
19497 argos_mumimo_ctrl_data
.cur_murx_bfe_cap
= -1;
19501 argos_config_mumimo_deinit(void)
19503 argos_mumimo_ctrl_data
.dev
= NULL
;
19504 if (timer_pending(&argos_mumimo_ctrl_data
.config_timer
)) {
19505 del_timer_sync(&argos_mumimo_ctrl_data
.config_timer
);
19508 cancel_work_sync(&argos_mumimo_ctrl_data
.mumimo_ctrl_work
);
19512 argos_config_mumimo_reset(void)
19514 argos_mumimo_ctrl_data
.cur_murx_bfe_cap
= -1;
19516 #endif /* DYNAMIC_MUMIMO_CONTROL */
19519 argos_register_notifier_init(struct net_device
*net
)
19523 DHD_INFO(("DHD: %s: \n", __FUNCTION__
));
19524 argos_rps_ctrl_data
.wlan_primary_netdev
= net
;
19525 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
19526 #ifdef DYNAMIC_MUMIMO_CONTROL
19527 argos_config_mumimo_init(net
);
19528 #endif /* DYNAMIC_MUMIMO_CONTROL */
19530 if (argos_wifi
.notifier_call
== NULL
) {
19531 argos_wifi
.notifier_call
= argos_status_notifier_wifi_cb
;
19532 ret
= sec_argos_register_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19534 DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret
));
19539 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19540 if (argos_mimo
.notifier_call
== NULL
) {
19541 argos_mimo
.notifier_call
= argos_status_notifier_config_mumimo_cb
;
19542 ret
= sec_argos_register_notifier(&argos_mimo
, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL
);
19544 DHD_ERROR(("DHD:Failed to register WIFI for MIMO notifier, ret=%d\n", ret
));
19545 sec_argos_unregister_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19549 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19551 if (argos_p2p
.notifier_call
== NULL
) {
19552 argos_p2p
.notifier_call
= argos_status_notifier_p2p_cb
;
19553 ret
= sec_argos_register_notifier(&argos_p2p
, ARGOS_P2P_TABLE_LABEL
);
19555 DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret
));
19556 sec_argos_unregister_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19557 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19558 sec_argos_unregister_notifier(&argos_mimo
, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL
);
19559 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19567 if (argos_wifi
.notifier_call
) {
19568 argos_wifi
.notifier_call
= NULL
;
19571 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19572 if (argos_mimo
.notifier_call
) {
19573 argos_mimo
.notifier_call
= NULL
;
19575 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19577 if (argos_p2p
.notifier_call
) {
19578 argos_p2p
.notifier_call
= NULL
;
19585 argos_register_notifier_deinit(void)
19587 DHD_INFO(("DHD: %s: \n", __FUNCTION__
));
19589 if (argos_rps_ctrl_data
.wlan_primary_netdev
== NULL
) {
19590 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__
));
19594 #ifdef DYNAMIC_MUMIMO_CONTROL
19595 argos_config_mumimo_deinit();
19596 #endif /* DYNAMIC_MUMIMO_CONTROL */
19598 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19599 custom_rps_map_clear(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
);
19600 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19602 if (argos_p2p
.notifier_call
) {
19603 sec_argos_unregister_notifier(&argos_p2p
, ARGOS_P2P_TABLE_LABEL
);
19604 argos_p2p
.notifier_call
= NULL
;
19607 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19608 if (argos_mimo
.notifier_call
) {
19609 sec_argos_unregister_notifier(&argos_mimo
, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL
);
19610 argos_mimo
.notifier_call
= NULL
;
19612 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19614 if (argos_wifi
.notifier_call
) {
19615 sec_argos_unregister_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19616 argos_wifi
.notifier_call
= NULL
;
19619 argos_rps_ctrl_data
.wlan_primary_netdev
= NULL
;
19620 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
19626 argos_status_notifier_cb(struct notifier_block
*notifier
,
19627 unsigned long speed
, void *v
)
19632 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19634 if (argos_rps_ctrl_data
.wlan_primary_netdev
== NULL
) {
19638 dhd
= DHD_DEV_INFO(argos_rps_ctrl_data
.wlan_primary_netdev
);
19644 if (dhdp
== NULL
|| !dhdp
->up
) {
19647 /* Check if reported TPut value is more than threshold value */
19648 if (speed
> RPS_TPUT_THRESHOLD
) {
19649 if (argos_rps_ctrl_data
.argos_rps_cpus_enabled
== 0) {
19650 /* It does not need to configre rps_cpus
19651 * if Load Balance is enabled
19653 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19656 if (cpu_online(RPS_CPUS_WLAN_CORE_ID
)) {
19657 err
= custom_rps_map_set(
19658 argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
,
19659 RPS_CPUS_MASK
, strlen(RPS_CPUS_MASK
));
19661 DHD_ERROR(("DHD: %s: RPS_Set fail,"
19662 " Core=%d Offline\n", __FUNCTION__
,
19663 RPS_CPUS_WLAN_CORE_ID
));
19668 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
19669 "speed=%ld, error=%d\n",
19670 __FUNCTION__
, speed
, err
));
19672 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19673 #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19674 if (dhdp
->tcpack_sup_mode
!= TCPACK_SUP_HOLD
) {
19675 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
19676 __FUNCTION__
, TCPACK_SUP_HOLD
));
19677 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_HOLD
);
19679 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19680 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 1;
19681 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19682 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
19683 __FUNCTION__
, speed
));
19685 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19688 if (argos_rps_ctrl_data
.argos_rps_cpus_enabled
== 1) {
19689 #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19690 if (dhdp
->tcpack_sup_mode
!= TCPACK_SUP_OFF
) {
19691 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
19693 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
19695 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19696 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19697 /* It does not need to configre rps_cpus
19698 * if Load Balance is enabled
19700 custom_rps_map_clear(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
);
19701 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__
, speed
));
19702 OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS
);
19703 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19704 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
19713 argos_status_notifier_wifi_cb(struct notifier_block
*notifier
,
19714 unsigned long speed
, void *v
)
19716 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19717 argos_status_notifier_cb(notifier
, speed
, v
);
19718 #if !defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19719 argos_status_notifier_config_mumimo(notifier
, speed
, v
);
19720 #endif /* !CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19725 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19727 argos_status_notifier_config_mumimo_cb(struct notifier_block
*notifier
,
19728 unsigned long speed
, void *v
)
19730 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19731 argos_status_notifier_config_mumimo(notifier
, speed
, v
);
19735 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19738 argos_status_notifier_p2p_cb(struct notifier_block
*notifier
,
19739 unsigned long speed
, void *v
)
19741 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19742 argos_status_notifier_cb(notifier
, speed
, v
);
19746 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
19748 #ifdef DHD_DEBUG_PAGEALLOC
19751 dhd_page_corrupt_cb(void *handle
, void *addr_corrupt
, size_t len
)
19753 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
19755 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
19756 __FUNCTION__
, addr_corrupt
, (uint32
)len
));
19758 DHD_OS_WAKE_LOCK(dhdp
);
19759 prhex("Page Corruption:", addr_corrupt
, len
);
19760 dhd_dump_to_kernelog(dhdp
);
19761 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
19762 /* Load the dongle side dump to host memory and then BUG_ON() */
19763 dhdp
->memdump_enabled
= DUMP_MEMONLY
;
19764 dhdp
->memdump_type
= DUMP_TYPE_MEMORY_CORRUPTION
;
19765 dhd_bus_mem_dump(dhdp
);
19766 #endif /* BCMPCIE && DHD_FW_COREDUMP */
19767 DHD_OS_WAKE_UNLOCK(dhdp
);
19769 EXPORT_SYMBOL(dhd_page_corrupt_cb
);
19770 #endif /* DHD_DEBUG_PAGEALLOC */
19772 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
19774 dhd_pktid_error_handler(dhd_pub_t
*dhdp
)
19776 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__
));
19777 DHD_OS_WAKE_LOCK(dhdp
);
19778 dhd_dump_to_kernelog(dhdp
);
19779 #ifdef DHD_FW_COREDUMP
19780 /* Load the dongle side dump to host memory */
19781 if (dhdp
->memdump_enabled
== DUMP_DISABLED
) {
19782 dhdp
->memdump_enabled
= DUMP_MEMFILE
;
19784 dhdp
->memdump_type
= DUMP_TYPE_PKTID_AUDIT_FAILURE
;
19785 dhd_bus_mem_dump(dhdp
);
19786 #endif /* DHD_FW_COREDUMP */
19787 dhdp
->hang_reason
= HANG_REASON_PCIE_PKTID_ERROR
;
19788 dhd_os_check_hang(dhdp
, 0, -EREMOTEIO
);
19789 DHD_OS_WAKE_UNLOCK(dhdp
);
19791 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
19793 struct net_device
*
19794 dhd_linux_get_primary_netdev(dhd_pub_t
*dhdp
)
19796 dhd_info_t
*dhd
= dhdp
->info
;
19798 if (dhd
->iflist
[0] && dhd
->iflist
[0]->net
)
19799 return dhd
->iflist
[0]->net
;
19804 #ifdef DHD_DHCP_DUMP
19806 dhd_dhcp_dump(char *ifname
, uint8
*pktdata
, bool tx
)
19808 struct bootp_fmt
*b
= (struct bootp_fmt
*) &pktdata
[ETHER_HDR_LEN
];
19809 struct iphdr
*h
= &b
->ip_header
;
19810 uint8
*ptr
, *opt
, *end
= (uint8
*) b
+ ntohs(b
->ip_header
.tot_len
);
19811 int dhcp_type
= 0, len
, opt_len
;
19813 /* check IP header */
19814 if (h
->ihl
!= 5 || h
->version
!= 4 || h
->protocol
!= IPPROTO_UDP
) {
19818 /* check UDP port for bootp (67, 68) */
19819 if (b
->udp_header
.source
!= htons(67) && b
->udp_header
.source
!= htons(68) &&
19820 b
->udp_header
.dest
!= htons(67) && b
->udp_header
.dest
!= htons(68)) {
19824 /* check header length */
19825 if (ntohs(h
->tot_len
) < ntohs(b
->udp_header
.len
) + sizeof(struct iphdr
)) {
19829 len
= ntohs(b
->udp_header
.len
) - sizeof(struct udphdr
);
19831 - (sizeof(*b
) - sizeof(struct iphdr
) - sizeof(struct udphdr
) - sizeof(b
->options
));
19833 /* parse bootp options */
19834 if (opt_len
>= 4 && !memcmp(b
->options
, bootp_magic_cookie
, 4)) {
19835 ptr
= &b
->options
[4];
19836 while (ptr
< end
&& *ptr
!= 0xff) {
19845 /* 53 is dhcp type */
19848 dhcp_type
= opt
[2];
19849 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
19850 ifname
, dhcp_types
[dhcp_type
],
19851 tx
? "TX" : "RX", dhcp_ops
[b
->op
]));
19858 #endif /* DHD_DHCP_DUMP */
19860 #ifdef DHD_ICMP_DUMP
19862 dhd_icmp_dump(char *ifname
, uint8
*pktdata
, bool tx
)
19864 uint8
*pkt
= (uint8
*)&pktdata
[ETHER_HDR_LEN
];
19865 struct iphdr
*iph
= (struct iphdr
*)pkt
;
19866 struct icmphdr
*icmph
;
19868 /* check IP header */
19869 if (iph
->ihl
!= 5 || iph
->version
!= 4 || iph
->protocol
!= IP_PROT_ICMP
) {
19873 icmph
= (struct icmphdr
*)((uint8
*)pkt
+ sizeof(struct iphdr
));
19874 if (icmph
->type
== ICMP_ECHO
) {
19875 DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
19876 ifname
, tx
? "TX" : "RX", ntoh16(icmph
->un
.echo
.sequence
)));
19877 } else if (icmph
->type
== ICMP_ECHOREPLY
) {
19878 DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
19879 ifname
, tx
? "TX" : "RX", ntoh16(icmph
->un
.echo
.sequence
)));
19881 DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
19882 ifname
, tx
? "TX" : "RX", icmph
->type
, icmph
->code
));
19885 #endif /* DHD_ICMP_DUMP */
19887 #ifdef SHOW_LOGTRACE
19889 dhd_get_read_buf_ptr(dhd_pub_t
*dhd_pub
, trace_buf_info_t
*trace_buf_info
)
19891 dhd_dbg_ring_status_t ring_status
;
19893 #if defined(DEBUGABILITY)
19894 rlen
= dhd_dbg_pull_single_from_ring(dhd_pub
, FW_VERBOSE_RING_ID
, trace_buf_info
->buf
,
19895 TRACE_LOG_BUF_MAX_SIZE
, TRUE
);
19896 #elif defined(DEBUGABILITY_ECNTRS_LOGGING)
19897 rlen
= dhd_dbg_ring_pull_single(dhd_pub
->ecntr_dbg_ring
, trace_buf_info
->buf
,
19898 TRACE_LOG_BUF_MAX_SIZE
, TRUE
);
19901 #endif /* DEBUGABILITY */
19903 trace_buf_info
->size
= rlen
;
19904 trace_buf_info
->availability
= NEXT_BUF_NOT_AVAIL
;
19906 trace_buf_info
->availability
= BUF_NOT_AVAILABLE
;
19909 dhd_dbg_get_ring_status(dhd_pub
, FW_VERBOSE_RING_ID
, &ring_status
);
19910 if (ring_status
.written_bytes
!= ring_status
.read_bytes
) {
19911 trace_buf_info
->availability
= NEXT_BUF_AVAIL
;
19914 #endif /* SHOW_LOGTRACE */
19917 dhd_fw_download_status(dhd_pub_t
* dhd_pub
)
19919 return dhd_pub
->fw_download_done
;
19923 dhd_create_to_notifier_skt(void)
19925 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
19926 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
19927 /* Kernel version 3.6 is a special case which accepts 4 arguments */
19928 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, &dhd_netlink_cfg
);
19929 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
19930 /* Kernel version 3.5 and below use this old API format */
19931 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, 0,
19932 dhd_process_daemon_msg
, NULL
, THIS_MODULE
);
19934 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, THIS_MODULE
,
19936 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
19937 if (!nl_to_event_sk
)
19939 printf("Error creating socket.\n");
19942 DHD_INFO(("nl_to socket created successfully...\n"));
19947 dhd_destroy_to_notifier_skt(void)
19949 DHD_INFO(("Destroying nl_to socket\n"));
19950 netlink_kernel_release(nl_to_event_sk
);
19954 dhd_recv_msg_from_daemon(struct sk_buff
*skb
)
19956 struct nlmsghdr
*nlh
;
19957 bcm_to_info_t
*cmd
;
19959 nlh
= (struct nlmsghdr
*)skb
->data
;
19960 cmd
= (bcm_to_info_t
*)nlmsg_data(nlh
);
19961 if ((cmd
->magic
== BCM_TO_MAGIC
) && (cmd
->reason
== REASON_DAEMON_STARTED
)) {
19962 sender_pid
= ((struct nlmsghdr
*)(skb
->data
))->nlmsg_pid
;
19963 DHD_INFO(("DHD Daemon Started\n"));
19968 dhd_send_msg_to_daemon(struct sk_buff
*skb
, void *data
, int size
)
19970 struct nlmsghdr
*nlh
;
19971 struct sk_buff
*skb_out
;
19973 BCM_REFERENCE(skb
);
19974 if (sender_pid
== 0) {
19975 DHD_INFO(("Invalid PID 0\n"));
19979 if ((skb_out
= nlmsg_new(size
, 0)) == NULL
) {
19980 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__
));
19983 nlh
= nlmsg_put(skb_out
, 0, 0, NLMSG_DONE
, size
, 0);
19984 NETLINK_CB(skb_out
).dst_group
= 0; /* Unicast */
19985 memcpy(nlmsg_data(nlh
), (char *)data
, size
);
19987 if ((nlmsg_unicast(nl_to_event_sk
, skb_out
, sender_pid
)) < 0) {
19988 DHD_INFO(("Error sending message\n"));
19994 dhd_process_daemon_msg(struct sk_buff
*skb
)
19996 bcm_to_info_t to_info
;
19998 to_info
.magic
= BCM_TO_MAGIC
;
19999 to_info
.reason
= REASON_DAEMON_STARTED
;
20000 to_info
.trap
= NO_TRAP
;
20002 dhd_recv_msg_from_daemon(skb
);
20003 dhd_send_msg_to_daemon(skb
, &to_info
, sizeof(to_info
));
20006 #ifdef DHD_LOG_DUMP
20008 dhd_log_dump_ecntr_enabled(void)
20010 return (bool)logdump_ecntr_enable
;
20014 dhd_log_dump_init(dhd_pub_t
*dhd
)
20016 struct dhd_log_dump_buf
*dld_buf
, *dld_buf_special
;
20018 uint8
*prealloc_buf
= NULL
, *bufptr
= NULL
;
20019 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20020 int prealloc_idx
= DHD_PREALLOC_DHD_LOG_DUMP_BUF
;
20021 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20023 dhd_dbg_ring_t
*ring
= NULL
;
20024 unsigned long flags
= 0;
20025 dhd_info_t
*dhd_info
= dhd
->info
;
20026 void *cookie_buf
= NULL
;
20028 BCM_REFERENCE(ret
);
20029 BCM_REFERENCE(ring
);
20030 BCM_REFERENCE(flags
);
20033 if (logdump_prsrv_tailsize
<= 0 ||
20034 logdump_prsrv_tailsize
> DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE
) {
20035 logdump_prsrv_tailsize
= DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE
;
20037 /* now adjust the preserve log flush size based on the
20038 * kernel printk log buffer size
20040 #ifdef CONFIG_LOG_BUF_SHIFT
20041 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
20042 " limit prsrv tail size to = %uKB\n",
20043 __FUNCTION__
, (1 << CONFIG_LOG_BUF_SHIFT
)/1024,
20044 logdump_prsrv_tailsize
/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE
/1024));
20046 if (logdump_prsrv_tailsize
> LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE
) {
20047 logdump_prsrv_tailsize
= LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE
;
20050 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
20051 __FUNCTION__
, logdump_prsrv_tailsize
/1024);
20052 #endif /* CONFIG_LOG_BUF_SHIFT */
20054 mutex_init(&dhd_info
->logdump_lock
);
20056 /* initialize log dump buf structures */
20057 memset(g_dld_buf
, 0, sizeof(struct dhd_log_dump_buf
) * DLD_BUFFER_NUM
);
20059 /* set the log dump buffer size based on the module_param */
20060 if (logdump_max_bufsize
> LOG_DUMP_GENERAL_MAX_BUFSIZE
||
20061 logdump_max_bufsize
<= 0)
20062 dld_buf_size
[DLD_BUF_TYPE_GENERAL
] = LOG_DUMP_GENERAL_MAX_BUFSIZE
;
20064 dld_buf_size
[DLD_BUF_TYPE_GENERAL
] = logdump_max_bufsize
;
20066 /* pre-alloc the memory for the log buffers & 'special' buffer */
20067 dld_buf_special
= &g_dld_buf
[DLD_BUF_TYPE_SPECIAL
];
20068 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20069 DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
20070 __FUNCTION__
, LOG_DUMP_TOTAL_BUFSIZE
, LOG_DUMP_SPECIAL_MAX_BUFSIZE
));
20071 prealloc_buf
= DHD_OS_PREALLOC(dhd
, prealloc_idx
++, LOG_DUMP_TOTAL_BUFSIZE
);
20072 dld_buf_special
->buffer
= DHD_OS_PREALLOC(dhd
, prealloc_idx
++,
20073 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20075 prealloc_buf
= MALLOCZ(dhd
->osh
, LOG_DUMP_TOTAL_BUFSIZE
);
20076 dld_buf_special
->buffer
= MALLOCZ(dhd
->osh
, dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20077 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20078 if (!prealloc_buf
) {
20079 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
20082 if (!dld_buf_special
->buffer
) {
20083 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
20087 bufptr
= prealloc_buf
;
20088 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
20089 dld_buf
= &g_dld_buf
[i
];
20090 dld_buf
->dhd_pub
= dhd
;
20091 spin_lock_init(&dld_buf
->lock
);
20092 dld_buf
->wraparound
= 0;
20093 if (i
!= DLD_BUF_TYPE_SPECIAL
) {
20094 dld_buf
->buffer
= bufptr
;
20095 dld_buf
->max
= (unsigned long)dld_buf
->buffer
+ dld_buf_size
[i
];
20096 bufptr
= (uint8
*)dld_buf
->max
;
20098 dld_buf
->max
= (unsigned long)dld_buf
->buffer
+ dld_buf_size
[i
];
20100 dld_buf
->present
= dld_buf
->front
= dld_buf
->buffer
;
20101 dld_buf
->remain
= dld_buf_size
[i
];
20102 dld_buf
->enable
= 1;
20105 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20106 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20107 dhd
->ecntr_dbg_ring
= MALLOCZ(dhd
->osh
, sizeof(dhd_dbg_ring_t
));
20108 if (!dhd
->ecntr_dbg_ring
)
20111 ring
= (dhd_dbg_ring_t
*)dhd
->ecntr_dbg_ring
;
20112 ret
= dhd_dbg_ring_init(dhd
, ring
, ECNTR_RING_ID
,
20113 ECNTR_RING_NAME
, LOG_DUMP_ECNTRS_MAX_BUFSIZE
,
20115 if (ret
!= BCME_OK
) {
20116 DHD_ERROR(("%s: unable to init ecntr ring !\n",
20120 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
20121 ring
->state
= RING_ACTIVE
;
20122 ring
->threshold
= 0;
20123 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
20125 bufptr
+= LOG_DUMP_ECNTRS_MAX_BUFSIZE
;
20126 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20128 /* Concise buffer is used as intermediate buffer for following purposes
20129 * a) pull ecounters records temporarily before
20130 * writing it to file
20131 * b) to store dhd dump data before putting it to file
20132 * It should have a size equal to
20133 * MAX(largest possible ecntr record, 'dhd dump' data size)
20135 dhd
->concise_dbg_buf
= MALLOC(dhd
->osh
, CONCISE_DUMP_BUFLEN
);
20136 if (!dhd
->concise_dbg_buf
) {
20137 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
20142 #if defined(DHD_EVENT_LOG_FILTER)
20143 ret
= dhd_event_log_filter_init(dhd
,
20145 LOG_DUMP_FILTER_MAX_BUFSIZE
);
20146 if (ret
!= BCME_OK
) {
20149 #endif /* DHD_EVENT_LOG_FILTER */
20151 cookie_buf
= MALLOC(dhd
->osh
, LOG_DUMP_COOKIE_BUFSIZE
);
20153 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
20157 ret
= dhd_logdump_cookie_init(dhd
, cookie_buf
, LOG_DUMP_COOKIE_BUFSIZE
);
20158 if (ret
!= BCME_OK
) {
20159 MFREE(dhd
->osh
, cookie_buf
, LOG_DUMP_COOKIE_BUFSIZE
);
20166 if (dhd
->logdump_cookie
) {
20167 dhd_logdump_cookie_deinit(dhd
);
20168 MFREE(dhd
->osh
, dhd
->logdump_cookie
, LOG_DUMP_COOKIE_BUFSIZE
);
20169 dhd
->logdump_cookie
= NULL
;
20171 #if defined(DHD_EVENT_LOG_FILTER)
20172 if (dhd
->event_log_filter
) {
20173 dhd_event_log_filter_deinit(dhd
);
20175 #endif /* DHD_EVENT_LOG_FILTER */
20177 if (dhd
->concise_dbg_buf
) {
20178 MFREE(dhd
->osh
, dhd
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
);
20181 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20182 if (dhd
->ecntr_dbg_ring
) {
20183 ring
= (dhd_dbg_ring_t
*)dhd
->ecntr_dbg_ring
;
20184 dhd_dbg_ring_deinit(dhd
, ring
);
20185 ring
->ring_buf
= NULL
;
20186 ring
->ring_size
= 0;
20187 MFREE(dhd
->osh
, ring
, sizeof(dhd_dbg_ring_t
));
20188 dhd
->ecntr_dbg_ring
= NULL
;
20190 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20192 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20193 if (prealloc_buf
) {
20194 DHD_OS_PREFREE(dhd
, prealloc_buf
, LOG_DUMP_TOTAL_BUFSIZE
);
20196 if (dld_buf_special
->buffer
) {
20197 DHD_OS_PREFREE(dhd
, dld_buf_special
->buffer
,
20198 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20201 if (prealloc_buf
) {
20202 MFREE(dhd
->osh
, prealloc_buf
, LOG_DUMP_TOTAL_BUFSIZE
);
20204 if (dld_buf_special
->buffer
) {
20205 MFREE(dhd
->osh
, dld_buf_special
->buffer
,
20206 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20208 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20209 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
20210 dld_buf
= &g_dld_buf
[i
];
20211 dld_buf
->enable
= 0;
20212 dld_buf
->buffer
= NULL
;
20215 mutex_destroy(&dhd_info
->logdump_lock
);
20219 dhd_log_dump_deinit(dhd_pub_t
*dhd
)
20221 struct dhd_log_dump_buf
*dld_buf
= NULL
, *dld_buf_special
= NULL
;
20223 dhd_info_t
*dhd_info
= dhd
->info
;
20224 dhd_dbg_ring_t
*ring
= NULL
;
20226 BCM_REFERENCE(ring
);
20228 if (dhd
->concise_dbg_buf
) {
20229 MFREE(dhd
->osh
, dhd
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
);
20230 dhd
->concise_dbg_buf
= NULL
;
20233 if (dhd
->logdump_cookie
) {
20234 dhd_logdump_cookie_deinit(dhd
);
20235 MFREE(dhd
->osh
, dhd
->logdump_cookie
, LOG_DUMP_COOKIE_BUFSIZE
);
20236 dhd
->logdump_cookie
= NULL
;
20239 #if defined(DHD_EVENT_LOG_FILTER)
20240 if (dhd
->event_log_filter
) {
20241 dhd_event_log_filter_deinit(dhd
);
20243 #endif /* DHD_EVENT_LOG_FILTER */
20245 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20246 if (dhd
->ecntr_dbg_ring
) {
20247 ring
= (dhd_dbg_ring_t
*)dhd
->ecntr_dbg_ring
;
20248 dhd_dbg_ring_deinit(dhd
, ring
);
20249 ring
->ring_buf
= NULL
;
20250 ring
->ring_size
= 0;
20251 MFREE(dhd
->osh
, ring
, sizeof(dhd_dbg_ring_t
));
20252 dhd
->ecntr_dbg_ring
= NULL
;
20254 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20256 /* 'general' buffer points to start of the pre-alloc'd memory */
20257 dld_buf
= &g_dld_buf
[DLD_BUF_TYPE_GENERAL
];
20258 dld_buf_special
= &g_dld_buf
[DLD_BUF_TYPE_SPECIAL
];
20259 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20260 if (dld_buf
->buffer
) {
20261 DHD_OS_PREFREE(dhd
, dld_buf
->buffer
, LOG_DUMP_TOTAL_BUFSIZE
);
20263 if (dld_buf_special
->buffer
) {
20264 DHD_OS_PREFREE(dhd
, dld_buf_special
->buffer
,
20265 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20268 if (dld_buf
->buffer
) {
20269 MFREE(dhd
->osh
, dld_buf
->buffer
, LOG_DUMP_TOTAL_BUFSIZE
);
20271 if (dld_buf_special
->buffer
) {
20272 MFREE(dhd
->osh
, dld_buf_special
->buffer
,
20273 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20275 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20276 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
20277 dld_buf
= &g_dld_buf
[i
];
20278 dld_buf
->enable
= 0;
20279 dld_buf
->buffer
= NULL
;
20282 mutex_destroy(&dhd_info
->logdump_lock
);
20286 dhd_log_dump_write(int type
, char *binary_data
,
20287 int binary_len
, const char *fmt
, ...)
20290 char tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
] = {0, };
20292 unsigned long flags
= 0;
20293 struct dhd_log_dump_buf
*dld_buf
= NULL
;
20294 bool flush_log
= FALSE
;
20296 if (type
< 0 || type
>= DLD_BUFFER_NUM
) {
20297 DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
20298 __FUNCTION__
, type
));
20302 dld_buf
= &g_dld_buf
[type
];
20304 if (dld_buf
->enable
!= 1) {
20308 va_start(args
, fmt
);
20309 len
= vsnprintf(tmp_buf
, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
, fmt
, args
);
20310 /* Non ANSI C99 compliant returns -1,
20311 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
20318 if (len
>= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
) {
20319 len
= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
- 1;
20320 tmp_buf
[len
] = '\0';
20323 /* make a critical section to eliminate race conditions */
20324 spin_lock_irqsave(&dld_buf
->lock
, flags
);
20325 if (dld_buf
->remain
< len
) {
20326 dld_buf
->wraparound
= 1;
20327 dld_buf
->present
= dld_buf
->front
;
20328 dld_buf
->remain
= dld_buf_size
[type
];
20329 /* if wrap around happens, flush the ring buffer to the file */
20333 memcpy(dld_buf
->present
, tmp_buf
, len
);
20334 dld_buf
->remain
-= len
;
20335 dld_buf
->present
+= len
;
20336 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
20338 /* double check invalid memory operation */
20339 ASSERT((unsigned long)dld_buf
->present
<= dld_buf
->max
);
20341 if (dld_buf
->dhd_pub
) {
20342 dhd_pub_t
*dhdp
= (dhd_pub_t
*)dld_buf
->dhd_pub
;
20343 dhdp
->logdump_periodic_flush
=
20344 logdump_periodic_flush
;
20345 if (logdump_periodic_flush
&& flush_log
) {
20346 log_dump_type_t
*flush_type
= MALLOCZ(dhdp
->osh
,
20347 sizeof(log_dump_type_t
));
20349 *flush_type
= type
;
20350 dhd_schedule_log_dump(dld_buf
->dhd_pub
, flush_type
);
20357 dhd_log_dump_get_timestamp(void)
20359 static char buf
[16];
20361 unsigned long rem_nsec
;
20363 ts_nsec
= local_clock();
20364 rem_nsec
= DIV_AND_MOD_U64_BY_U32(ts_nsec
, NSEC_PER_SEC
);
20365 snprintf(buf
, sizeof(buf
), "%5lu.%06lu",
20366 (unsigned long)ts_nsec
, rem_nsec
/ NSEC_PER_USEC
);
20370 #endif /* DHD_LOG_DUMP */
20372 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
20374 dhd_flush_rx_tx_wq(dhd_pub_t
*dhdp
)
20381 flush_workqueue(dhd
->tx_wq
);
20382 flush_workqueue(dhd
->rx_wq
);
20388 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
20391 #define DHD_LB_TXBOUND 64
20393 * Function that performs the TX processing on a given CPU
20396 dhd_lb_tx_process(dhd_info_t
*dhd
)
20398 struct sk_buff
*skb
;
20400 struct net_device
*net
;
20402 bool resched
= FALSE
;
20404 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__
));
20406 DHD_ERROR((" Null pointer DHD \r\n"));
20410 BCM_REFERENCE(net
);
20412 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txp_percpu_run_cnt
);
20414 /* Base Loop to perform the actual Tx */
20416 skb
= skb_dequeue(&dhd
->tx_pend_queue
);
20418 DHD_TRACE(("Dequeued a Null Packet \r\n"));
20423 net
= DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
));
20424 ifidx
= DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
));
20426 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb
,
20429 __dhd_sendpkt(&dhd
->pub
, ifidx
, skb
);
20431 if (cnt
>= DHD_LB_TXBOUND
) {
20438 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__
, cnt
));
20444 dhd_lb_tx_handler(unsigned long data
)
20446 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
20448 if (dhd_lb_tx_process(dhd
)) {
20449 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
20453 #endif /* DHD_LB_TXP */
20455 #ifdef DHD_DEBUG_UART
20457 dhd_debug_uart_is_running(struct net_device
*dev
)
20459 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
20461 if (dhd
->duart_execute
) {
20469 dhd_debug_uart_exec_rd(void *handle
, void *event_info
, u8 event
)
20471 dhd_pub_t
*dhdp
= handle
;
20472 dhd_debug_uart_exec(dhdp
, "rd");
20476 dhd_debug_uart_exec(dhd_pub_t
*dhdp
, char *cmd
)
20480 char *argv
[] = {DHD_DEBUG_UART_EXEC_PATH
, cmd
, NULL
};
20481 char *envp
[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL
};
20483 #ifdef DHD_FW_COREDUMP
20484 if (dhdp
->memdump_enabled
== DUMP_MEMFILE_BUGON
)
20487 if (dhdp
->hang_reason
== HANG_REASON_PCIE_LINK_DOWN
||
20488 #ifdef DHD_FW_COREDUMP
20489 dhdp
->memdump_success
== FALSE
||
20492 dhdp
->info
->duart_execute
= TRUE
;
20493 DHD_ERROR(("DHD: %s - execute %s %s\n",
20494 __FUNCTION__
, DHD_DEBUG_UART_EXEC_PATH
, cmd
));
20495 ret
= call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_PROC
);
20496 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
20497 __FUNCTION__
, DHD_DEBUG_UART_EXEC_PATH
, cmd
, ret
));
20498 dhdp
->info
->duart_execute
= FALSE
;
20500 #ifdef DHD_LOG_DUMP
20501 if (dhdp
->memdump_type
!= DUMP_TYPE_BY_SYSDUMP
)
20509 #endif /* DHD_DEBUG_UART */
20511 #if defined(DHD_BLOB_EXISTENCE_CHECK)
20513 dhd_set_blob_support(dhd_pub_t
*dhdp
, char *fw_path
)
20516 char *filepath
= VENDOR_PATH CONFIG_BCMDHD_CLM_PATH
;
20517 fp
= filp_open(filepath
, O_RDONLY
, 0);
20519 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__
,
20521 dhdp
->is_blob
= FALSE
;
20523 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__
, filepath
));
20524 dhdp
->is_blob
= TRUE
;
20525 #if defined(CONCATE_BLOB)
20526 strncat(fw_path
, "_blob", strlen("_blob"));
20528 BCM_REFERENCE(fw_path
);
20529 #endif /* SKIP_CONCATE_BLOB */
20530 filp_close(fp
, NULL
);
20533 #endif /* DHD_BLOB_EXISTENCE_CHECK */
20535 #if defined(PCIE_FULL_DONGLE)
20536 /** test / loopback */
20538 dmaxfer_free_dmaaddr_handler(void *handle
, void *event_info
, u8 event
)
20540 dmaxref_mem_map_t
*dmmap
= (dmaxref_mem_map_t
*)event_info
;
20541 dhd_info_t
*dhd_info
= (dhd_info_t
*)handle
;
20543 if (event
!= DHD_WQ_WORK_DMA_LB_MEM_REL
) {
20544 DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__
));
20547 if (dhd_info
== NULL
) {
20548 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__
));
20551 if (dmmap
== NULL
) {
20552 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__
));
20555 dmaxfer_free_prev_dmaaddr(&dhd_info
->pub
, dmmap
);
20559 dhd_schedule_dmaxfer_free(dhd_pub_t
*dhdp
, dmaxref_mem_map_t
*dmmap
)
20561 dhd_info_t
*dhd_info
= dhdp
->info
;
20563 dhd_deferred_schedule_work(dhd_info
->dhd_deferred_wq
, (void *)dmmap
,
20564 DHD_WQ_WORK_DMA_LB_MEM_REL
, dmaxfer_free_dmaaddr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
20566 #endif /* PCIE_FULL_DONGLE */
20567 /* ---------------------------- End of sysfs implementation ------------------------------------- */
20569 #ifdef SET_PCIE_IRQ_CPU_CORE
20571 dhd_set_irq_cpucore(dhd_pub_t
*dhdp
, int affinity_cmd
)
20573 unsigned int pcie_irq
= 0;
20576 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__
));
20581 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__
));
20585 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__
, affinity_cmd
));
20587 if (dhdpcie_get_pcieirq(dhdp
->bus
, &pcie_irq
)) {
20588 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__
));
20593 irq_set_affinity() assign dedicated CPU core PCIe interrupt
20594 If dedicated CPU core is not on-line,
20595 PCIe interrupt scheduled on CPU core 0
20597 switch (affinity_cmd
) {
20598 case PCIE_IRQ_AFFINITY_OFF
:
20600 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY
:
20601 irq_set_affinity(pcie_irq
, dhdp
->info
->cpumask_primary
);
20603 #ifdef CONFIG_SOC_EXYNOS9810
20604 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS
:
20605 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
20606 __FUNCTION__
, pcie_irq
, PCIE_IRQ_CPU_CORE
));
20607 irq_set_affinity(pcie_irq
, cpumask_of(PCIE_IRQ_CPU_CORE
));
20609 #endif /* CONFIG_SOC_EXYNOS9810 */
20611 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
20612 __FUNCTION__
, affinity_cmd
));
20615 #endif /* SET_PCIE_IRQ_CPU_CORE */
20618 dhd_write_file(const char *filepath
, char *buf
, int buf_len
)
20620 struct file
*fp
= NULL
;
20621 mm_segment_t old_fs
;
20624 /* change to KERNEL_DS address limit */
20628 /* File is always created. */
20629 fp
= filp_open(filepath
, O_RDWR
| O_CREAT
, 0664);
20631 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
20632 __FUNCTION__
, filepath
, PTR_ERR(fp
)));
20635 if (fp
->f_mode
& FMODE_WRITE
) {
20636 ret
= vfs_write(fp
, buf
, buf_len
, &fp
->f_pos
);
20638 DHD_ERROR(("%s: Couldn't write file '%s'\n",
20639 __FUNCTION__
, filepath
));
20645 filp_close(fp
, NULL
);
20648 /* restore previous address limit */
20655 dhd_read_file(const char *filepath
, char *buf
, int buf_len
)
20657 struct file
*fp
= NULL
;
20658 mm_segment_t old_fs
;
20661 /* change to KERNEL_DS address limit */
20665 fp
= filp_open(filepath
, O_RDONLY
, 0);
20668 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__
, filepath
));
20672 ret
= kernel_read(fp
, 0, buf
, buf_len
);
20673 filp_close(fp
, NULL
);
20675 /* restore previous address limit */
20678 /* Return the number of bytes read */
20680 /* Success to read */
20683 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
20684 __FUNCTION__
, filepath
, ret
));
20692 dhd_write_file_and_check(const char *filepath
, char *buf
, int buf_len
)
20696 ret
= dhd_write_file(filepath
, buf
, buf_len
);
20701 /* Read the file again and check if the file size is not zero */
20702 memset(buf
, 0, buf_len
);
20703 ret
= dhd_read_file(filepath
, buf
, buf_len
);
20709 int dhd_read_from_file(dhd_pub_t
*dhd
)
20711 int ret
= 0, nread
= 0;
20714 NULL_CHECK(dhd
, "dhd is NULL", ret
);
20716 buf
= MALLOCZ(dhd
->osh
, FILE_BLOCK_READ_SIZE
);
20718 DHD_ERROR(("error: failed to alllocate buf.\n"));
20722 /* open file to read */
20723 fd
= dhd_os_open_image1(dhd
, FILTER_IE_PATH
);
20725 DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH
));
20729 nread
= dhd_os_get_image_block(buf
, (FILE_BLOCK_READ_SIZE
- 1), fd
);
20732 if ((ret
= dhd_parse_filter_ie(dhd
, buf
)) < 0) {
20733 DHD_ERROR(("error: failed to parse filter ie\n"));
20736 DHD_ERROR(("error: zero length file.failed to read\n"));
20739 dhd_os_close_image1(dhd
, fd
);
20742 MFREE(dhd
->osh
, buf
, FILE_BLOCK_READ_SIZE
);
20748 int dhd_get_filter_ie_count(dhd_pub_t
*dhdp
, uint8
* buf
)
20751 int element_count
= 0;
20757 while (*pstr
!= '\0') {
20758 if (*pstr
== '\n') {
20764 * New line character must not be present after last line.
20765 * To count last line
20769 return element_count
;
20772 int dhd_parse_oui(dhd_pub_t
*dhd
, uint8
*inbuf
, uint8
*oui
, int len
)
20774 uint8 i
, j
, msb
, lsb
, oui_len
= 0;
20776 * OUI can vary from 3 bytes to 5 bytes.
20777 * While reading from file as ascii input it can
20778 * take maximum size of 14 bytes and minumum size of
20779 * 8 bytes including ":"
20780 * Example 5byte OUI <AB:DE:BE:CD:FA>
20781 * Example 3byte OUI <AB:DC:EF>
20784 if ((inbuf
== NULL
) || (len
< 8) || (len
> 14)) {
20785 DHD_ERROR(("error: failed to parse OUI \n"));
20789 for (j
= 0, i
= 0; i
< len
; i
+= 3, ++j
) {
20790 if (!bcm_isxdigit(inbuf
[i
]) || !bcm_isxdigit(inbuf
[i
+ 1])) {
20791 DHD_ERROR(("error: invalid OUI format \n"));
20794 msb
= inbuf
[i
] > '9' ? bcm_toupper(inbuf
[i
]) - 'A' + 10 : inbuf
[i
] - '0';
20795 lsb
= inbuf
[i
+ 1] > '9' ? bcm_toupper(inbuf
[i
+ 1]) -
20796 'A' + 10 : inbuf
[i
+ 1] - '0';
20797 oui
[j
] = (msb
<< 4) | lsb
;
20799 /* Size of oui.It can vary from 3/4/5 */
20805 int dhd_check_valid_ie(dhd_pub_t
*dhdp
, uint8
* buf
, int len
)
20810 if (!bcm_isdigit(buf
[i
])) {
20811 DHD_ERROR(("error: non digit value found in filter_ie \n"));
20816 if (bcm_atoi((char*)buf
) > 255) {
20817 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
20824 int dhd_parse_filter_ie(dhd_pub_t
*dhd
, uint8
*buf
)
20826 int element_count
= 0, i
= 0, oui_size
= 0, ret
= 0;
20827 uint16 bufsize
, buf_space_left
, id
= 0, len
= 0;
20828 uint16 filter_iovsize
, all_tlvsize
;
20829 wl_filter_ie_tlv_t
*p_ie_tlv
= NULL
;
20830 wl_filter_ie_iov_v1_t
*p_filter_iov
= (wl_filter_ie_iov_v1_t
*) NULL
;
20831 char *token
= NULL
, *ele_token
= NULL
, *oui_token
= NULL
, *type
= NULL
;
20834 element_count
= dhd_get_filter_ie_count(dhd
, buf
);
20835 DHD_INFO(("total element count %d \n", element_count
));
20836 /* Calculate the whole buffer size */
20837 filter_iovsize
= sizeof(wl_filter_ie_iov_v1_t
) + FILTER_IE_BUFSZ
;
20838 p_filter_iov
= MALLOCZ(dhd
->osh
, filter_iovsize
);
20840 if (p_filter_iov
== NULL
) {
20841 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize
));
20845 /* setup filter iovar header */
20846 p_filter_iov
->version
= WL_FILTER_IE_VERSION
;
20847 p_filter_iov
->len
= filter_iovsize
;
20848 p_filter_iov
->fixed_length
= p_filter_iov
->len
- FILTER_IE_BUFSZ
;
20849 p_filter_iov
->pktflag
= FC_PROBE_REQ
;
20850 p_filter_iov
->option
= WL_FILTER_IE_CHECK_SUB_OPTION
;
20852 bufsize
= filter_iovsize
- WL_FILTER_IE_IOV_HDR_SIZE
; /* adjust available size for TLVs */
20853 p_ie_tlv
= (wl_filter_ie_tlv_t
*)&p_filter_iov
->tlvs
[0];
20854 buf_space_left
= bufsize
;
20856 while ((i
< element_count
) && (buf
!= NULL
)) {
20858 /* token contains one line of input data */
20859 token
= bcmstrtok((char**)&buf
, "\n", NULL
);
20860 if (token
== NULL
) {
20863 if ((ele_token
= bcmstrstr(token
, ",")) == NULL
) {
20864 /* only element id is present */
20865 if (dhd_check_valid_ie(dhd
, token
, strlen(token
)) == BCME_ERROR
) {
20866 DHD_ERROR(("error: Invalid element id \n"));
20870 id
= bcm_atoi((char*)token
);
20871 data
[len
++] = WL_FILTER_IE_SET
;
20873 /* oui is present */
20874 ele_token
= bcmstrtok(&token
, ",", NULL
);
20875 if ((ele_token
== NULL
) || (dhd_check_valid_ie(dhd
, ele_token
,
20876 strlen(ele_token
)) == BCME_ERROR
)) {
20877 DHD_ERROR(("error: Invalid element id \n"));
20881 id
= bcm_atoi((char*)ele_token
);
20882 data
[len
++] = WL_FILTER_IE_SET
;
20883 if ((oui_token
= bcmstrstr(token
, ",")) == NULL
) {
20884 oui_size
= dhd_parse_oui(dhd
, token
, &(data
[len
]), strlen(token
));
20885 if (oui_size
== BCME_ERROR
) {
20886 DHD_ERROR(("error: Invalid OUI \n"));
20892 /* type is present */
20893 oui_token
= bcmstrtok(&token
, ",", NULL
);
20894 if ((oui_token
== NULL
) || ((oui_size
=
20895 dhd_parse_oui(dhd
, oui_token
,
20896 &(data
[len
]), strlen(oui_token
))) == BCME_ERROR
)) {
20897 DHD_ERROR(("error: Invalid OUI \n"));
20902 if ((type
= bcmstrstr(token
, ",")) == NULL
) {
20903 if (dhd_check_valid_ie(dhd
, token
,
20904 strlen(token
)) == BCME_ERROR
) {
20905 DHD_ERROR(("error: Invalid type \n"));
20909 data
[len
++] = bcm_atoi((char*)token
);
20911 /* subtype is present */
20912 type
= bcmstrtok(&token
, ",", NULL
);
20913 if ((type
== NULL
) || (dhd_check_valid_ie(dhd
, type
,
20914 strlen(type
)) == BCME_ERROR
)) {
20915 DHD_ERROR(("error: Invalid type \n"));
20919 data
[len
++] = bcm_atoi((char*)type
);
20920 /* subtype is last element */
20921 if ((token
== NULL
) || (*token
== '\0') ||
20922 (dhd_check_valid_ie(dhd
, token
,
20923 strlen(token
)) == BCME_ERROR
)) {
20924 DHD_ERROR(("error: Invalid subtype \n"));
20928 data
[len
++] = bcm_atoi((char*)token
);
20932 ret
= bcm_pack_xtlv_entry((uint8
**)&p_ie_tlv
,
20933 &buf_space_left
, id
, len
, data
, BCM_XTLV_OPTION_ALIGN32
);
20934 if (ret
!= BCME_OK
) {
20935 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
20936 "status=%d\n", __FUNCTION__
, ret
));
20942 /* file is empty or first line is blank */
20943 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
20947 /* update the iov header, set len to include all TLVs + header */
20948 all_tlvsize
= (bufsize
- buf_space_left
);
20949 p_filter_iov
->len
= htol16(all_tlvsize
+ WL_FILTER_IE_IOV_HDR_SIZE
);
20950 ret
= dhd_iovar(dhd
, 0, "filter_ie", (void *)p_filter_iov
,
20951 p_filter_iov
->len
, NULL
, 0, TRUE
);
20952 if (ret
!= BCME_OK
) {
20953 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret
));
20957 if (p_filter_iov
) {
20958 MFREE(dhd
->osh
, p_filter_iov
, filter_iovsize
);
20959 p_filter_iov
= NULL
;
20963 #endif /* FILTER_IE */
20964 #ifdef DHD_WAKE_STATUS
20966 dhd_get_wakecount(dhd_pub_t
*dhdp
)
20968 return dhd_bus_get_wakecount(dhdp
);
20970 #endif /* DHD_WAKE_STATUS */
20973 dhd_get_random_bytes(uint8
*buf
, uint len
)
20976 get_random_bytes_arch(buf
, len
);
20977 #endif /* BCMPCIE */
20981 #if defined(DHD_HANG_SEND_UP_TEST)
20983 dhd_make_hang_with_reason(struct net_device
*dev
, const char *string_num
)
20985 dhd_info_t
*dhd
= NULL
;
20986 dhd_pub_t
*dhdp
= NULL
;
20987 uint reason
= HANG_REASON_MAX
;
20988 uint32 fw_test_code
= 0;
20989 dhd
= DHD_DEV_INFO(dev
);
20995 if (!dhd
|| !dhdp
) {
20999 reason
= (uint
) bcm_strtoul(string_num
, NULL
, 0);
21000 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__
, reason
));
21003 if (dhdp
->req_hang_type
) {
21004 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
21005 __FUNCTION__
, dhdp
->req_hang_type
));
21006 dhdp
->req_hang_type
= 0;
21009 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__
));
21012 } else if ((reason
<= HANG_REASON_MASK
) || (reason
>= HANG_REASON_MAX
)) {
21013 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason
));
21017 if (dhdp
->req_hang_type
!= 0) {
21018 DHD_ERROR(("Already HANG requested for test\n"));
21023 case HANG_REASON_IOCTL_RESP_TIMEOUT
:
21024 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason
));
21025 dhdp
->req_hang_type
= reason
;
21026 fw_test_code
= 102; /* resumed on timeour */
21027 dhd_wl_ioctl_set_intiovar(dhdp
, "bus:disconnect", fw_test_code
,
21028 WLC_SET_VAR
, TRUE
, 0);
21030 case HANG_REASON_DONGLE_TRAP
:
21031 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason
));
21032 dhdp
->req_hang_type
= reason
;
21033 fw_test_code
= 99; /* dongle trap */
21034 dhd_wl_ioctl_set_intiovar(dhdp
, "bus:disconnect", fw_test_code
,
21035 WLC_SET_VAR
, TRUE
, 0);
21037 case HANG_REASON_D3_ACK_TIMEOUT
:
21038 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason
));
21039 dhdp
->req_hang_type
= reason
;
21041 case HANG_REASON_BUS_DOWN
:
21042 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason
));
21043 dhdp
->req_hang_type
= reason
;
21045 case HANG_REASON_PCIE_LINK_DOWN
:
21046 case HANG_REASON_MSGBUF_LIVELOCK
:
21047 dhdp
->req_hang_type
= 0;
21048 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason
));
21050 case HANG_REASON_IFACE_DEL_FAILURE
:
21051 dhdp
->req_hang_type
= 0;
21052 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason
));
21054 case HANG_REASON_HT_AVAIL_ERROR
:
21055 dhdp
->req_hang_type
= 0;
21056 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason
));
21058 case HANG_REASON_PCIE_RC_LINK_UP_FAIL
:
21059 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason
));
21060 dhdp
->req_hang_type
= reason
;
21063 dhdp
->req_hang_type
= 0;
21064 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason
));
21068 #endif /* DHD_HANG_SEND_UP_TEST */
21072 dhd_error_recovery(void *handle
, void *event_info
, u8 event
)
21074 dhd_info_t
*dhd
= handle
;
21079 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
21085 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
21086 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
21091 ret
= dhd_bus_perform_flr_with_quiesce(dhdp
);
21092 if (ret
!= BCME_DNGL_DEVRESET
) {
21093 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
21094 "toggle REG_ON\n", __FUNCTION__
, ret
));
21095 /* toggle REG_ON */
21096 dhdp
->pom_toggle_reg_on(WLAN_FUNC_ID
, BY_WLAN_DUE_TO_WLAN
);
21102 dhd_schedule_reset(dhd_pub_t
*dhdp
)
21104 if (dhdp
->enable_erpom
) {
21105 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, NULL
,
21106 DHD_WQ_WORK_ERROR_RECOVERY
, dhd_error_recovery
, DHD_WQ_WORK_PRIORITY_HIGH
);
21109 #endif /* DHD_ERPOM */
21111 #ifdef DHD_PKT_LOGGING
21113 dhd_pktlog_dump(void *handle
, void *event_info
, u8 event
)
21115 dhd_info_t
*dhd
= handle
;
21118 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
21122 if (dhd_pktlog_write_file(&dhd
->pub
)) {
21123 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__
));
21129 dhd_schedule_pktlog_dump(dhd_pub_t
*dhdp
)
21131 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
21132 (void*)NULL
, DHD_WQ_WORK_PKTLOG_DUMP
,
21133 dhd_pktlog_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
21135 #endif /* DHD_PKT_LOGGING */
21137 #ifdef BIGDATA_SOFTAP
21138 void dhd_schedule_gather_ap_stadata(void *bcm_cfg
, void *ndev
, const wl_event_msg_t
*e
)
21140 struct bcm_cfg80211
*cfg
;
21142 ap_sta_wq_data_t
*p_wq_data
;
21144 if (!bcm_cfg
|| !ndev
|| !e
) {
21145 WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg
, ndev
, e
));
21149 cfg
= (struct bcm_cfg80211
*)bcm_cfg
;
21150 dhdp
= (dhd_pub_t
*)cfg
->pub
;
21152 if (!dhdp
|| !cfg
->ap_sta_info
) {
21153 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp
, cfg
->ap_sta_info
));
21157 p_wq_data
= (ap_sta_wq_data_t
*)MALLOCZ(dhdp
->osh
, sizeof(ap_sta_wq_data_t
));
21158 if (unlikely(!p_wq_data
)) {
21159 DHD_ERROR(("%s(): could not allocate memory for - "
21160 "ap_sta_wq_data_t\n", __FUNCTION__
));
21164 mutex_lock(&cfg
->ap_sta_info
->wq_data_sync
);
21166 memcpy(&p_wq_data
->e
, e
, sizeof(wl_event_msg_t
));
21167 p_wq_data
->dhdp
= dhdp
;
21168 p_wq_data
->bcm_cfg
= cfg
;
21169 p_wq_data
->ndev
= (struct net_device
*)ndev
;
21171 mutex_unlock(&cfg
->ap_sta_info
->wq_data_sync
);
21173 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
21174 p_wq_data
, DHD_WQ_WORK_GET_BIGDATA_AP
,
21175 wl_gather_ap_stadata
, DHD_WQ_WORK_PRIORITY_HIGH
);
21178 #endif /* BIGDATA_SOFTAP */
21181 get_debug_dump_time(char *str
)
21183 struct timeval curtime
;
21184 unsigned long local_time
;
21185 struct rtc_time tm
;
21187 if (!strlen(str
)) {
21188 do_gettimeofday(&curtime
);
21189 local_time
= (u32
)(curtime
.tv_sec
-
21190 (sys_tz
.tz_minuteswest
* DHD_LOG_DUMP_TS_MULTIPLIER_VALUE
));
21191 rtc_time_to_tm(local_time
, &tm
);
21193 snprintf(str
, DEBUG_DUMP_TIME_BUF_LEN
, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS
,
21194 tm
.tm_year
- 100, tm
.tm_mon
+ 1, tm
.tm_mday
, tm
.tm_hour
, tm
.tm_min
,
21195 tm
.tm_sec
, (int)(curtime
.tv_usec
/NSEC_PER_USEC
));
21200 clear_debug_dump_time(char *str
)
21202 memset(str
, 0, DEBUG_DUMP_TIME_BUF_LEN
);
21204 #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
21206 copy_debug_dump_time(char *dest
, char *src
)
21208 memcpy(dest
, src
, DEBUG_DUMP_TIME_BUF_LEN
);
21210 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
21212 #define KIRQ_PRINT_BUF_LEN 256
21215 dhd_print_kirqstats(dhd_pub_t
*dhd
, unsigned int irq_num
)
21217 unsigned long flags
= 0;
21218 struct irq_desc
*desc
;
21219 int i
; /* cpu iterator */
21220 struct bcmstrbuf strbuf
;
21221 char tmp_buf
[KIRQ_PRINT_BUF_LEN
];
21223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
21224 desc
= irq_to_desc(irq_num
);
21226 DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__
));
21229 bcm_binit(&strbuf
, tmp_buf
, KIRQ_PRINT_BUF_LEN
);
21230 raw_spin_lock_irqsave(&desc
->lock
, flags
);
21231 bcm_bprintf(&strbuf
, "dhd irq %u:", irq_num
);
21232 for_each_online_cpu(i
)
21233 bcm_bprintf(&strbuf
, "%10u ",
21234 desc
->kstat_irqs
? *per_cpu_ptr(desc
->kstat_irqs
, i
) : 0);
21235 if (desc
->irq_data
.chip
) {
21236 if (desc
->irq_data
.chip
->name
)
21237 bcm_bprintf(&strbuf
, " %8s", desc
->irq_data
.chip
->name
);
21239 bcm_bprintf(&strbuf
, " %8s", "-");
21241 bcm_bprintf(&strbuf
, " %8s", "None");
21243 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
21244 if (desc
->irq_data
.domain
)
21245 bcm_bprintf(&strbuf
, " %d", (int)desc
->irq_data
.hwirq
);
21246 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
21247 bcm_bprintf(&strbuf
, " %-8s", irqd_is_level_type(&desc
->irq_data
) ? "Level" : "Edge");
21249 #endif /* LINUX VERSION > 3.1.0 */
21252 bcm_bprintf(&strbuf
, "-%-8s", desc
->name
);
21254 DHD_ERROR(("%s\n", strbuf
.origbuf
));
21255 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
21256 #endif /* LINUX VERSION > 2.6.28 */
21260 dhd_show_kirqstats(dhd_pub_t
*dhd
)
21262 unsigned int irq
= -1;
21264 dhdpcie_get_pcieirq(dhd
->bus
, &irq
);
21265 #endif /* BCMPCIE */
21267 irq
= ((wifi_adapter_info_t
*)dhd
->info
->adapter
)->irq_num
;
21268 #endif /* BCMSDIO */
21271 DHD_ERROR(("DUMP data kernel irq stats : \n"));
21272 #endif /* BCMPCIE */
21274 DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
21275 #endif /* BCMSDIO */
21276 dhd_print_kirqstats(dhd
, irq
);
21278 #ifdef BCMPCIE_OOB_HOST_WAKE
21279 irq
= dhdpcie_get_oob_irq_num(dhd
->bus
);
21281 DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
21282 dhd_print_kirqstats(dhd
, irq
);
21284 #endif /* BCMPCIE_OOB_HOST_WAKE */
21288 dhd_print_tasklet_status(dhd_pub_t
*dhd
)
21290 dhd_info_t
*dhdinfo
;
21293 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__
));
21297 dhdinfo
= dhd
->info
;
21300 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__
));
21304 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo
->tasklet
.state
));
21310 #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
21311 #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
21313 #define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
21314 #define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
21316 #define DHD_RING_MAGIC 0x20170910
21317 #define DHD_RING_IDX_INVALID 0xffffffff
21322 uint32 write_idx
; /* next write index, -1 : not started */
21323 uint32 read_idx
; /* next read index, -1 : not start */
21325 /* protected elements during serialization */
21326 int lock_idx
; /* start index of locked, element will not be overried */
21327 int lock_count
; /* number of locked, from lock idx */
21329 /* saved data elements */
21331 } dhd_fixed_ring_info_t
;
21336 struct mutex ring_sync
; /* pointer to mutex */
21338 dhd_fixed_ring_info_t fixed
;
21343 dhd_ring_get_hdr_size(void)
21345 return sizeof(dhd_ring_info_t
);
21349 dhd_ring_init(uint8
*buf
, uint32 buf_size
, uint32 elem_size
, uint32 elem_cnt
)
21351 dhd_ring_info_t
*ret_ring
;
21354 DHD_RING_ERR(("NO RING BUFFER\n"));
21357 if (buf_size
< dhd_ring_get_hdr_size() + elem_size
* elem_cnt
) {
21358 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
21362 ret_ring
= (dhd_ring_info_t
*)buf
;
21363 ret_ring
->type
= DHD_RING_TYPE_FIXED
;
21364 mutex_init(&ret_ring
->ring_sync
);
21365 ret_ring
->fixed
.read_idx
= DHD_RING_IDX_INVALID
;
21366 ret_ring
->fixed
.write_idx
= DHD_RING_IDX_INVALID
;
21367 ret_ring
->fixed
.lock_idx
= DHD_RING_IDX_INVALID
;
21368 ret_ring
->fixed
.elem
= buf
+ sizeof(dhd_ring_info_t
);
21369 ret_ring
->fixed
.elem_size
= elem_size
;
21370 ret_ring
->fixed
.elem_cnt
= elem_cnt
;
21371 ret_ring
->magic
= DHD_RING_MAGIC
;
21376 dhd_ring_deinit(void *_ring
)
21378 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21379 dhd_fixed_ring_info_t
*fixed
;
21384 if (ring
->magic
!= DHD_RING_MAGIC
) {
21388 mutex_destroy(&ring
->ring_sync
);
21389 fixed
= &ring
->fixed
;
21390 memset(fixed
->elem
, 0, fixed
->elem_size
* fixed
->elem_cnt
);
21391 fixed
->elem_size
= fixed
->elem_cnt
= 0;
21397 /* get counts between two indexes of ring buffer (internal only) */
21399 __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t
*ring
, int start
, int end
)
21401 if (start
== DHD_RING_IDX_INVALID
|| end
== DHD_RING_IDX_INVALID
) {
21405 return (ring
->elem_cnt
+ end
- start
) % ring
->elem_cnt
+ 1;
21409 __dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t
*ring
)
21411 return __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, ring
->write_idx
);
21414 static inline void *
21415 __dhd_fixed_ring_get_first(dhd_fixed_ring_info_t
*ring
)
21417 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21420 return (uint8
*)ring
->elem
+ (ring
->elem_size
* ring
->read_idx
);
21424 __dhd_fixed_ring_free_first(dhd_fixed_ring_info_t
*ring
)
21428 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21429 DHD_RING_ERR(("EMPTY RING\n"));
21433 next_idx
= (ring
->read_idx
+ 1) % ring
->elem_cnt
;
21434 if (ring
->read_idx
== ring
->write_idx
) {
21436 ring
->read_idx
= ring
->write_idx
= DHD_RING_IDX_INVALID
;
21440 ring
->read_idx
= next_idx
;
21444 static inline void *
21445 __dhd_fixed_ring_get_last(dhd_fixed_ring_info_t
*ring
)
21447 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21450 return (uint8
*)ring
->elem
+ (ring
->elem_size
* ring
->write_idx
);
21453 static inline void *
21454 __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t
*ring
)
21458 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21459 ring
->read_idx
= ring
->write_idx
= 0;
21460 return (uint8
*)ring
->elem
;
21463 /* check next index is not locked */
21464 tmp_idx
= (ring
->write_idx
+ 1) % ring
->elem_cnt
;
21465 if (ring
->lock_idx
== tmp_idx
) {
21469 ring
->write_idx
= tmp_idx
;
21470 if (ring
->write_idx
== ring
->read_idx
) {
21471 /* record is full, drop oldest one */
21472 ring
->read_idx
= (ring
->read_idx
+ 1) % ring
->elem_cnt
;
21475 return (uint8
*)ring
->elem
+ (ring
->elem_size
* ring
->write_idx
);
21478 static inline uint32
21479 __dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t
*ring
, void *ptr
, char *sig
)
21482 uint32 ret_idx
= (uint32
)DHD_RING_IDX_INVALID
;
21484 if (ptr
< ring
->elem
) {
21485 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig
, ptr
, ring
->elem
));
21488 diff
= (uint32
)((uint8
*)ptr
- (uint8
*)ring
->elem
);
21489 if (diff
% ring
->elem_size
!= 0) {
21490 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig
, ptr
, ring
->elem
));
21493 ret_idx
= diff
/ ring
->elem_size
;
21494 if (ret_idx
>= ring
->elem_cnt
) {
21495 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring
->elem_cnt
, ret_idx
));
21500 static inline void *
21501 __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t
*ring
, void *prev
)
21505 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21506 DHD_RING_ERR(("EMPTY RING\n"));
21510 cur_idx
= __dhd_fixed_ring_ptr2idx(ring
, prev
, "NEXT");
21511 if (cur_idx
>= ring
->elem_cnt
) {
21515 if (cur_idx
== ring
->write_idx
) {
21516 /* no more new record */
21520 cur_idx
= (cur_idx
+ 1) % ring
->elem_cnt
;
21521 return (uint8
*)ring
->elem
+ ring
->elem_size
* cur_idx
;
21524 static inline void *
21525 __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t
*ring
, void *prev
)
21529 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21530 DHD_RING_ERR(("EMPTY RING\n"));
21533 cur_idx
= __dhd_fixed_ring_ptr2idx(ring
, prev
, "PREV");
21534 if (cur_idx
>= ring
->elem_cnt
) {
21537 if (cur_idx
== ring
->read_idx
) {
21538 /* no more new record */
21542 cur_idx
= (cur_idx
+ ring
->elem_cnt
- 1) % ring
->elem_cnt
;
21543 return (uint8
*)ring
->elem
+ ring
->elem_size
* cur_idx
;
21547 __dhd_fixed_ring_lock(dhd_fixed_ring_info_t
*ring
, void *first_ptr
, void *last_ptr
)
21551 uint32 ring_filled_cnt
;
21554 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21555 DHD_RING_ERR(("EMPTY RING\n"));
21560 first_idx
= __dhd_fixed_ring_ptr2idx(ring
, first_ptr
, "LCK FIRST");
21561 if (first_idx
>= ring
->elem_cnt
) {
21565 first_idx
= ring
->read_idx
;
21569 last_idx
= __dhd_fixed_ring_ptr2idx(ring
, last_ptr
, "LCK LAST");
21570 if (last_idx
>= ring
->elem_cnt
) {
21574 last_idx
= ring
->write_idx
;
21577 ring_filled_cnt
= __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, ring
->write_idx
);
21578 tmp_cnt
= __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, first_idx
);
21579 if (tmp_cnt
> ring_filled_cnt
) {
21580 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21581 ring
->write_idx
, ring
->read_idx
, first_idx
));
21585 tmp_cnt
= __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, last_idx
);
21586 if (tmp_cnt
> ring_filled_cnt
) {
21587 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21588 ring
->write_idx
, ring
->read_idx
, last_idx
));
21592 ring
->lock_idx
= first_idx
;
21593 ring
->lock_count
= __dhd_fixed_ring_get_count(ring
, first_idx
, last_idx
);
21598 __dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t
*ring
)
21600 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21601 DHD_RING_ERR(("EMPTY RING\n"));
21605 ring
->lock_idx
= DHD_RING_IDX_INVALID
;
21606 ring
->lock_count
= 0;
21609 static inline void *
21610 __dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t
*ring
)
21612 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21613 DHD_RING_ERR(("EMPTY RING\n"));
21616 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21617 DHD_RING_ERR(("NO LOCK POINT\n"));
21620 return (uint8
*)ring
->elem
+ ring
->elem_size
* ring
->lock_idx
;
21623 static inline void *
21624 __dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t
*ring
)
21627 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21628 DHD_RING_ERR(("EMPTY RING\n"));
21631 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21632 DHD_RING_ERR(("NO LOCK POINT\n"));
21636 lock_last_idx
= (ring
->lock_idx
+ ring
->lock_count
- 1) % ring
->elem_cnt
;
21637 return (uint8
*)ring
->elem
+ ring
->elem_size
* lock_last_idx
;
21641 __dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t
*ring
)
21643 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21644 DHD_RING_ERR(("EMPTY RING\n"));
21647 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21648 DHD_RING_ERR(("NO LOCK POINT\n"));
21651 return ring
->lock_count
;
21655 __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t
*ring
)
21657 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21658 DHD_RING_ERR(("EMPTY RING\n"));
21661 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21662 DHD_RING_ERR(("NO LOCK POINT\n"));
21666 ring
->lock_count
--;
21667 if (ring
->lock_count
<= 0) {
21668 ring
->lock_idx
= DHD_RING_IDX_INVALID
;
21670 ring
->lock_idx
= (ring
->lock_idx
+ 1) % ring
->elem_cnt
;
21675 /* Get first element : oldest element */
21677 dhd_ring_get_first(void *_ring
)
21679 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21682 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21683 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21687 mutex_lock(&ring
->ring_sync
);
21688 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21689 ret
= __dhd_fixed_ring_get_first(&ring
->fixed
);
21691 mutex_unlock(&ring
->ring_sync
);
21695 /* Free first element : oldest element */
21697 dhd_ring_free_first(void *_ring
)
21699 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21701 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21702 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21706 mutex_lock(&ring
->ring_sync
);
21707 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21708 __dhd_fixed_ring_free_first(&ring
->fixed
);
21710 mutex_unlock(&ring
->ring_sync
);
21714 /* Get latest element */
21716 dhd_ring_get_last(void *_ring
)
21718 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21721 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21722 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21726 mutex_lock(&ring
->ring_sync
);
21727 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21728 ret
= __dhd_fixed_ring_get_last(&ring
->fixed
);
21730 mutex_unlock(&ring
->ring_sync
);
21734 /* Get next point can be written
21735 * will overwrite which doesn't read
21736 * will return NULL if next pointer is locked
21739 dhd_ring_get_empty(void *_ring
)
21741 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21744 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21745 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21749 mutex_lock(&ring
->ring_sync
);
21750 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21751 ret
= __dhd_fixed_ring_get_empty(&ring
->fixed
);
21753 mutex_unlock(&ring
->ring_sync
);
21758 dhd_ring_get_next(void *_ring
, void *cur
)
21760 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21763 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21764 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21768 mutex_lock(&ring
->ring_sync
);
21769 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21770 ret
= __dhd_fixed_ring_get_next(&ring
->fixed
, cur
);
21772 mutex_unlock(&ring
->ring_sync
);
21777 dhd_ring_get_prev(void *_ring
, void *cur
)
21779 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21782 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21783 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21787 mutex_lock(&ring
->ring_sync
);
21788 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21789 ret
= __dhd_fixed_ring_get_prev(&ring
->fixed
, cur
);
21791 mutex_unlock(&ring
->ring_sync
);
21796 dhd_ring_get_cur_size(void *_ring
)
21798 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21801 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21802 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21806 mutex_lock(&ring
->ring_sync
);
21807 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21808 cnt
= __dhd_fixed_ring_get_cur_size(&ring
->fixed
);
21810 mutex_unlock(&ring
->ring_sync
);
21814 /* protect element between lock_ptr and write_idx */
21816 dhd_ring_lock(void *_ring
, void *first_ptr
, void *last_ptr
)
21818 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21820 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21821 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21825 mutex_lock(&ring
->ring_sync
);
21826 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21827 __dhd_fixed_ring_lock(&ring
->fixed
, first_ptr
, last_ptr
);
21829 mutex_unlock(&ring
->ring_sync
);
21833 /* free all lock */
21835 dhd_ring_lock_free(void *_ring
)
21837 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21839 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21840 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21844 mutex_lock(&ring
->ring_sync
);
21845 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21846 __dhd_fixed_ring_lock_free(&ring
->fixed
);
21848 mutex_unlock(&ring
->ring_sync
);
21853 dhd_ring_lock_get_first(void *_ring
)
21855 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21858 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21859 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21863 mutex_lock(&ring
->ring_sync
);
21864 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21865 ret
= __dhd_fixed_ring_lock_get_first(&ring
->fixed
);
21867 mutex_unlock(&ring
->ring_sync
);
21872 dhd_ring_lock_get_last(void *_ring
)
21874 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21877 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21878 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21882 mutex_lock(&ring
->ring_sync
);
21883 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21884 ret
= __dhd_fixed_ring_lock_get_last(&ring
->fixed
);
21886 mutex_unlock(&ring
->ring_sync
);
21891 dhd_ring_lock_get_count(void *_ring
)
21893 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21894 int ret
= BCME_ERROR
;
21896 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21897 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21901 mutex_lock(&ring
->ring_sync
);
21902 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21903 ret
= __dhd_fixed_ring_lock_get_count(&ring
->fixed
);
21905 mutex_unlock(&ring
->ring_sync
);
21909 /* free first locked element */
21911 dhd_ring_lock_free_first(void *_ring
)
21913 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21915 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21916 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21920 mutex_lock(&ring
->ring_sync
);
21921 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21922 __dhd_fixed_ring_lock_free_first(&ring
->fixed
);
21924 mutex_unlock(&ring
->ring_sync
);
21928 #ifdef DHD_DUMP_MNGR
21929 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
21930 #define DHD_VFS_INODE(dir) (dir->d_inode)
21932 #define DHD_VFS_INODE(dir) d_inode(dir)
21933 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
21935 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
21936 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
21938 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
21939 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
21942 dhd_file_delete(char *path
)
21944 struct path file_path
;
21946 struct dentry
*dir
;
21948 err
= kern_path(path
, 0, &file_path
);
21954 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
21955 !d_is_file(file_path
.dentry
) ||
21956 #if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
21957 d_really_is_negative(file_path
.dentry
)
21958 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
21959 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
21964 dir
= dget_parent(file_path
.dentry
);
21966 if (!IS_ERR(dir
)) {
21967 err
= DHD_VFS_UNLINK(dir
, file_path
.dentry
, NULL
);
21970 err
= PTR_ERR(dir
);
21974 path_put(&file_path
);
21977 DHD_ERROR(("Failed to delete file: %s error: %d\n", path
, err
));
21984 dhd_dump_file_manage_idx(dhd_dump_file_manage_t
*fm_ptr
, char *fname
)
21989 for (i
= 0; i
< DHD_DUMP_TYPE_COUNT_MAX
; i
++) {
21990 if (strlen(fm_ptr
->elems
[i
].type_name
) == 0) {
21994 if (!(strncmp(fname
, fm_ptr
->elems
[i
].type_name
, strlen(fname
)))) {
22000 if (fm_idx
== -1) {
22004 if (strlen(fm_ptr
->elems
[fm_idx
].type_name
) == 0) {
22005 strncpy(fm_ptr
->elems
[fm_idx
].type_name
, fname
, DHD_DUMP_TYPE_NAME_SIZE
);
22006 fm_ptr
->elems
[fm_idx
].type_name
[DHD_DUMP_TYPE_NAME_SIZE
- 1] = '\0';
22007 fm_ptr
->elems
[fm_idx
].file_idx
= 0;
22014 * dhd_dump_file_manage_enqueue - enqueue dump file path
22015 * and delete odest file if file count is max.
22018 dhd_dump_file_manage_enqueue(dhd_pub_t
*dhd
, char *dump_path
, char *fname
)
22022 dhd_dump_file_manage_t
*fm_ptr
;
22025 if (!dhd
|| !dhd
->dump_file_manage
) {
22026 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
22027 __FUNCTION__
, dhd
, (dhd
? dhd
->dump_file_manage
: NULL
)));
22031 fm_ptr
= dhd
->dump_file_manage
;
22033 /* find file_manage idx */
22034 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__
, fname
, dump_path
));
22035 if ((fm_idx
= dhd_dump_file_manage_idx(fm_ptr
, fname
)) < 0) {
22036 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
22037 __FUNCTION__
, fname
));
22041 elem
= &fm_ptr
->elems
[fm_idx
];
22042 fp_idx
= elem
->file_idx
;
22043 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
22044 __FUNCTION__
, fm_idx
, fp_idx
, elem
->file_path
[fp_idx
]));
22046 /* delete oldest file */
22047 if (strlen(elem
->file_path
[fp_idx
]) != 0) {
22048 if (dhd_file_delete(elem
->file_path
[fp_idx
]) < 0) {
22049 DHD_ERROR(("%s(): Failed to delete file: %s\n",
22050 __FUNCTION__
, elem
->file_path
[fp_idx
]));
22052 DHD_ERROR(("%s(): Successed to delete file: %s\n",
22053 __FUNCTION__
, elem
->file_path
[fp_idx
]));
22057 /* save dump file path */
22058 strncpy(elem
->file_path
[fp_idx
], dump_path
, DHD_DUMP_FILE_PATH_SIZE
);
22059 elem
->file_path
[fp_idx
][DHD_DUMP_FILE_PATH_SIZE
- 1] = '\0';
22061 /* change file index to next file index */
22062 elem
->file_idx
= (elem
->file_idx
+ 1) % DHD_DUMP_FILE_COUNT_MAX
;
22064 #endif /* DHD_DUMP_MNGR */
22066 #ifdef DHD_MAP_LOGGING
22067 /* Will be called from SMMU fault handler */
22069 dhd_debug_info_dump(void)
22071 dhd_pub_t
*dhdp
= (dhd_pub_t
*)g_dhd_pub
;
22072 uint32 irq
= (uint32
)-1;
22074 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__
));
22075 dhdp
->smmu_fault_occurred
= TRUE
;
22077 /* Disable PCIe IRQ */
22078 dhdpcie_get_pcieirq(dhdp
->bus
, &irq
);
22079 if (irq
!= (uint32
)-1) {
22080 disable_irq_nosync(irq
);
22083 DHD_OS_WAKE_LOCK(dhdp
);
22084 dhd_prot_debug_info_print(dhdp
);
22085 osl_dma_map_dump(dhdp
->osh
);
22086 #ifdef DHD_MAP_PKTID_LOGGING
22087 dhd_pktid_logging_dump(dhdp
);
22088 #endif /* DHD_MAP_PKTID_LOGGING */
22089 #ifdef DHD_FW_COREDUMP
22090 /* Load the dongle side dump to host memory */
22091 dhdp
->memdump_enabled
= DUMP_MEMONLY
;
22092 dhdp
->memdump_type
= DUMP_TYPE_SMMU_FAULT
;
22093 dhd_bus_mem_dump(dhdp
);
22094 #endif /* DHD_FW_COREDUMP */
22095 DHD_OS_WAKE_UNLOCK(dhdp
);
22097 EXPORT_SYMBOL(dhd_debug_info_dump
);
22098 #endif /* DHD_MAP_LOGGING */
22100 dhd_get_host_whitelist_region(void *buf
, uint len
)
22102 dma_wl_addr_region_host_t
*host_reg
;
22105 if ((wlreg_len_h
== 0) && (wlreg_len_l
== 0)) {
22109 host_reg
= (dma_wl_addr_region_host_t
*)buf
;
22110 wl_end
= wlreg_len_h
+ wlreg_h
;
22111 wl_end
= (wl_end
& MASK_32_BITS
) << 32;
22113 wl_end
+= wlreg_len_l
;
22114 /* Now write whitelist region(s) */
22115 host_reg
->hreg_start
.addr_low
= wlreg_l
;
22116 host_reg
->hreg_start
.addr_high
= wlreg_h
;
22117 host_reg
->hreg_end
.addr_low
= EXTRACT_LOW32(wl_end
);
22118 host_reg
->hreg_end
.addr_high
= EXTRACT_HIGH32(wl_end
);
22122 #ifdef SUPPORT_SET_TID
22124 * Set custom TID value for UDP frame based on UID value.
22125 * This will be triggered by android private command below.
22126 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
22127 * Mode 0(SET_TID_OFF) : Disable changing TID
22128 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
22129 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
22132 dhd_set_tid_based_on_uid(dhd_pub_t
*dhdp
, void *pkt
)
22134 struct ether_header
*eh
= NULL
;
22135 struct sock
*sk
= NULL
;
22136 uint8
*pktdata
= NULL
;
22137 uint8
*ip_hdr
= NULL
;
22142 if (dhdp
->tid_mode
== SET_TID_OFF
) {
22146 pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pkt
);
22147 eh
= (struct ether_header
*) pktdata
;
22148 ip_hdr
= (uint8
*)eh
+ ETHER_HDR_LEN
;
22150 if (IPV4_PROT(ip_hdr
) != IP_PROT_UDP
) {
22154 cur_prio
= PKTPRIO(pkt
);
22155 prio
= dhdp
->target_tid
;
22156 uid
= dhdp
->target_uid
;
22158 if ((cur_prio
== prio
) ||
22159 (cur_prio
!= PRIO_8021D_BE
)) {
22163 sk
= ((struct sk_buff
*)(pkt
))->sk
;
22165 if ((dhdp
->tid_mode
== SET_TID_ALL_UDP
) ||
22166 (sk
&& (uid
== __kuid_val(sock_i_uid(sk
))))) {
22167 PKTSETPRIO(pkt
, prio
);
22170 #endif /* SUPPORT_SET_TID */
22171 #ifdef DHDTCPSYNC_FLOOD_BLK
22172 static void dhd_blk_tsfl_handler(struct work_struct
* work
)
22174 dhd_if_t
*ifp
= NULL
;
22175 dhd_pub_t
*dhdp
= NULL
;
22176 /* Ignore compiler warnings due to -Werror=cast-qual */
22177 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22178 #pragma GCC diagnostic push
22179 #pragma GCC diagnostic ignored "-Wcast-qual"
22180 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22181 ifp
= container_of(work
, dhd_if_t
, blk_tsfl_work
);
22182 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22183 #pragma GCC diagnostic pop
22184 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22186 dhdp
= &ifp
->info
->pub
;
22188 if ((dhdp
->op_mode
& DHD_FLAG_P2P_GO_MODE
)||
22189 (dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
)) {
22190 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
22191 wl_cfg80211_del_all_sta(ifp
->net
, WLAN_REASON_UNSPECIFIED
);
22192 } else if ((dhdp
->op_mode
& DHD_FLAG_P2P_GC_MODE
)||
22193 (dhdp
->op_mode
& DHD_FLAG_STA_MODE
)) {
22194 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
22195 wl_cfg80211_disassoc(ifp
->net
);
22200 void dhd_reset_tcpsync_info_by_ifp(dhd_if_t
*ifp
)
22202 ifp
->tsync_rcvd
= 0;
22203 ifp
->tsyncack_txed
= 0;
22204 ifp
->last_sync
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
22206 void dhd_reset_tcpsync_info_by_dev(struct net_device
*dev
)
22208 dhd_if_t
*ifp
= NULL
;
22210 ifp
= DHD_DEV_IFP(dev
);
22213 ifp
->tsync_rcvd
= 0;
22214 ifp
->tsyncack_txed
= 0;
22215 ifp
->last_sync
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
22218 #endif /* DHDTCPSYNC_FLOOD_BLK */