2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2019, Broadcom.
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
26 * <<Broadcom-WL-IPTag/Open:>>
28 * $Id: dhd_linux.c 796863 2018-12-27 07:39:27Z $
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
39 #ifdef PCIE_FULL_DONGLE
40 #include <bcmmsgbuf.h>
41 #endif /* PCIE_FULL_DONGLE */
43 #include <linux/init.h>
44 #include <linux/kernel.h>
45 #include <linux/slab.h>
46 #include <linux/skbuff.h>
47 #include <linux/netdevice.h>
48 #include <linux/inetdevice.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/etherdevice.h>
51 #include <linux/random.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/fcntl.h>
57 #include <linux/reboot.h>
58 #include <linux/notifier.h>
59 #include <linux/irq.h>
60 #include <net/addrconf.h>
61 #ifdef ENABLE_ADAPTIVE_SCHED
62 #include <linux/cpufreq.h>
63 #endif /* ENABLE_ADAPTIVE_SCHED */
64 #include <linux/rtc.h>
66 #include <linux/namei.h>
67 #endif /* DHD_DUMP_MNGR */
68 #include <asm/uaccess.h>
69 #include <asm/unaligned.h>
70 #include <dhd_linux_priv.h>
74 #include <bcmendian.h>
83 #include <dhd_linux_wq.h>
85 #include <dhd_linux.h>
89 #ifdef PCIE_FULL_DONGLE
90 #include <dhd_flowring.h>
93 #include <dhd_proto.h>
95 #include <dhd_dbg_ring.h>
96 #include <dhd_debug.h>
97 #ifdef CONFIG_HAS_WAKELOCK
98 #include <linux/wakelock.h>
100 #if defined(WL_CFG80211)
101 #include <wl_cfg80211.h>
105 #endif /* WL_CFG80211 */
114 #include <linux/compat.h>
117 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
118 #include <linux/exynos-pci-ctrl.h>
119 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
123 #include <bcm_l2_filter.h>
124 #include <dhd_l2_filter.h>
125 #endif /* DHD_L2_FILTER */
128 #include <dhd_psta.h>
129 #endif /* DHD_PSTA */
131 #ifdef AMPDU_VO_ENABLE
133 #endif /* AMPDU_VO_ENABLE */
135 #if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
137 #endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
138 #include <dhd_daemon.h>
139 #ifdef DHD_PKT_LOGGING
140 #include <dhd_pktlog.h>
141 #endif /* DHD_PKT_LOGGING */
142 #ifdef DHD_DEBUG_PAGEALLOC
143 typedef void (*page_corrupt_cb_t
)(void *handle
, void *addr_corrupt
, size_t len
);
144 void dhd_page_corrupt_cb(void *handle
, void *addr_corrupt
, size_t len
);
145 extern void register_page_corrupt_cb(page_corrupt_cb_t cb
, void* handle
);
146 #endif /* DHD_DEBUG_PAGEALLOC */
148 #define IP_PROT_RESERVED 0xFF
150 #ifdef DHDTCPSYNC_FLOOD_BLK
151 static void dhd_blk_tsfl_handler(struct work_struct
* work
);
152 #endif /* DHDTCPSYNC_FLOOD_BLK */
155 #include <dhd_linux_nfct.h>
156 #endif /* WL_NATOE */
159 extern bool ap_cfg_running
;
160 extern bool ap_fw_loaded
;
164 #if defined(DHD_LB_RXP)
165 static void dhd_rx_napi_dispatcher_fn(struct work_struct
* work
);
166 #endif /* DHD_LB_RXP */
167 #if defined(DHD_LB_TXP)
168 static void dhd_lb_tx_handler(unsigned long data
);
169 static void dhd_tx_dispatcher_work(struct work_struct
* work
);
170 static void dhd_tx_dispatcher_fn(dhd_pub_t
*dhdp
);
171 static void dhd_lb_tx_dispatch(dhd_pub_t
*dhdp
);
172 #endif /* DHD_LB_TXP */
175 #ifdef FIX_CPU_MIN_CLOCK
176 #include <linux/pm_qos.h>
177 #endif /* FIX_CPU_MIN_CLOCK */
179 #ifdef SET_RANDOM_MAC_SOFTAP
180 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
181 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
183 static u32 vendor_oui
= CONFIG_DHD_SET_RANDOM_MAC_VAL
;
184 #endif /* SET_RANDOM_MAC_SOFTAP */
186 #ifdef ENABLE_ADAPTIVE_SCHED
187 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
188 #ifndef CUSTOM_CPUFREQ_THRESH
189 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
190 #endif /* CUSTOM_CPUFREQ_THRESH */
191 #endif /* ENABLE_ADAPTIVE_SCHED */
193 /* enable HOSTIP cache update from the host side when an eth0:N is up */
194 #define AOE_IP_ALIAS_SUPPORT 1
197 #include <wlfc_proto.h>
198 #include <dhd_wlfc.h>
201 #include <wl_android.h>
203 /* Maximum STA per radio */
204 #define DHD_MAX_STA 32
206 #ifdef DHD_EVENT_LOG_FILTER
207 #include <dhd_event_log_filter.h>
208 #endif /* DHD_EVENT_LOG_FILTER */
211 * Start of Host DMA whitelist region.
215 module_param(wlreg_l
, uint
, 0644);
216 module_param(wlreg_h
, uint
, 0644);
219 * Sizeof whitelist region. The dongle will allow DMA to only wlreg to wlreg+wlreg_len.
220 * If length of whitelist region is zero, host will not program whitelist region to dongle.
222 uint32 wlreg_len_h
= 0;
223 uint32 wlreg_len_l
= 0;
225 module_param(wlreg_len_l
, uint
, 0644);
226 module_param(wlreg_len_h
, uint
, 0644);
228 const uint8 wme_fifo2ac
[] = { 0, 1, 2, 3, 1, 1 };
229 const uint8 prio2fifo
[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
230 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
232 #ifdef ARP_OFFLOAD_SUPPORT
233 void aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
);
234 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
235 unsigned long event
, void *ptr
);
236 static struct notifier_block dhd_inetaddr_notifier
= {
237 .notifier_call
= dhd_inetaddr_notifier_call
239 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
240 * created in kernel notifier link list (with 'next' pointing to itself)
242 static bool dhd_inetaddr_notifier_registered
= FALSE
;
243 #endif /* ARP_OFFLOAD_SUPPORT */
245 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
246 int dhd_inet6addr_notifier_call(struct notifier_block
*this,
247 unsigned long event
, void *ptr
);
248 static struct notifier_block dhd_inet6addr_notifier
= {
249 .notifier_call
= dhd_inet6addr_notifier_call
251 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
252 * created in kernel notifier link list (with 'next' pointing to itself)
254 static bool dhd_inet6addr_notifier_registered
= FALSE
;
255 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
258 #include <linux/suspend.h>
259 volatile bool dhd_mmc_suspend
= FALSE
;
260 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait
);
261 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
263 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
264 extern void dhd_enable_oob_intr(struct dhd_bus
*bus
, bool enable
);
265 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
267 static void dhd_hang_process(struct work_struct
*work_data
);
269 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
270 MODULE_LICENSE("GPL and additional rights");
271 #endif /* LinuxVer */
273 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
274 #define MAX_CONSECUTIVE_HANG_COUNTS 5
275 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
283 #ifndef PROP_TXSTATUS
284 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
286 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
290 extern bool dhd_wlfc_skip_fc(void * dhdp
, uint8 idx
);
291 extern void dhd_wlfc_plat_init(void *dhd
);
292 extern void dhd_wlfc_plat_deinit(void *dhd
);
293 #endif /* PROP_TXSTATUS */
294 #ifdef USE_DYNAMIC_F2_BLKSIZE
295 extern uint sd_f2_blocksize
;
296 extern int dhdsdio_func_blocksize(dhd_pub_t
*dhd
, int function_num
, int block_size
);
297 #endif /* USE_DYNAMIC_F2_BLKSIZE */
299 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
305 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
307 /* Linux wireless extension support */
308 #if defined(WL_WIRELESS_EXT)
310 extern wl_iw_extra_params_t g_wl_iw_params
;
311 #endif /* defined(WL_WIRELESS_EXT) */
313 #ifdef CONFIG_PARTIALSUSPEND_SLP
314 #include <linux/partialsuspend_slp.h>
315 #define CONFIG_HAS_EARLYSUSPEND
316 #define DHD_USE_EARLYSUSPEND
317 #define register_early_suspend register_pre_suspend
318 #define unregister_early_suspend unregister_pre_suspend
319 #define early_suspend pre_suspend
320 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
322 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
323 #include <linux/earlysuspend.h>
324 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
325 #endif /* CONFIG_PARTIALSUSPEND_SLP */
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
328 #include <linux/nl80211.h>
329 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
331 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
332 static int __dhd_apf_add_filter(struct net_device
*ndev
, uint32 filter_id
,
333 u8
* program
, uint32 program_len
);
334 static int __dhd_apf_config_filter(struct net_device
*ndev
, uint32 filter_id
,
335 uint32 mode
, uint32 enable
);
336 static int __dhd_apf_delete_filter(struct net_device
*ndev
, uint32 filter_id
);
337 #endif /* PKT_FILTER_SUPPORT && APF */
339 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
340 defined(ARGOS_NOTIFY_CB)
341 /* ARGOS notifer data */
342 static struct notifier_block argos_wifi
; /* STA */
343 static struct notifier_block argos_p2p
; /* P2P */
344 argos_rps_ctrl argos_rps_ctrl_data
;
345 #ifdef DYNAMIC_MUMIMO_CONTROL
346 argos_mumimo_ctrl argos_mumimo_ctrl_data
;
347 #ifdef CONFIG_SPLIT_ARGOS_SET
348 static struct notifier_block argos_mimo
; /* STA */
349 #endif /* CONFIG_SPLIT_ARGOS_SET */
350 #endif /* DYNAMIC_MUMIMO_CONTROL */
351 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
353 #ifdef DHD_FW_COREDUMP
354 static void dhd_mem_dump(void *dhd_info
, void *event_info
, u8 event
);
355 #endif /* DHD_FW_COREDUMP */
359 struct dhd_log_dump_buf g_dld_buf
[DLD_BUFFER_NUM
];
361 /* Only header for log dump buffers is stored in array
362 * header for sections like 'dhd dump', 'ext trap'
363 * etc, is not in the array, because they are not log
366 dld_hdr_t dld_hdrs
[DLD_BUFFER_NUM
] = {
367 {GENERAL_LOG_HDR
, LOG_DUMP_SECTION_GENERAL
},
368 {PRESERVE_LOG_HDR
, LOG_DUMP_SECTION_PRESERVE
},
369 {SPECIAL_LOG_HDR
, LOG_DUMP_SECTION_SPECIAL
}
372 static int dld_buf_size
[DLD_BUFFER_NUM
] = {
373 LOG_DUMP_GENERAL_MAX_BUFSIZE
, /* DLD_BUF_TYPE_GENERAL */
374 LOG_DUMP_PRESERVE_MAX_BUFSIZE
, /* DLD_BUF_TYPE_PRESERVE */
375 LOG_DUMP_SPECIAL_MAX_BUFSIZE
, /* DLD_BUF_TYPE_SPECIAL */
377 static void dhd_log_dump_init(dhd_pub_t
*dhd
);
378 static void dhd_log_dump_deinit(dhd_pub_t
*dhd
);
379 static void dhd_log_dump(void *handle
, void *event_info
, u8 event
);
380 static int do_dhd_log_dump(dhd_pub_t
*dhdp
, log_dump_type_t
*type
);
381 static void dhd_print_buf_addr(dhd_pub_t
*dhdp
, char *name
, void *buf
, unsigned int size
);
382 #endif /* DHD_LOG_DUMP */
384 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
385 #include <linux/workqueue.h>
386 #include <linux/pm_runtime.h>
387 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
389 #ifdef DHD_DEBUG_UART
390 #include <linux/kmod.h>
391 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
392 static void dhd_debug_uart_exec_rd(void *handle
, void *event_info
, u8 event
);
393 static void dhd_debug_uart_exec(dhd_pub_t
*dhdp
, char *cmd
);
394 #endif /* DHD_DEBUG_UART */
396 static int dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
);
397 static struct notifier_block dhd_reboot_notifier
= {
398 .notifier_call
= dhd_reboot_callback
,
403 static int is_reboot
= 0;
406 dhd_pub_t
*g_dhd_pub
= NULL
;
408 #if defined(BT_OVER_SDIO)
409 #include "dhd_bt_interface.h"
410 #endif /* defined (BT_OVER_SDIO) */
413 static int dhd_trace_open_proc(struct inode
*inode
, struct file
*file
);
414 ssize_t
dhd_trace_read_proc(struct file
*file
, char *buffer
, size_t tt
, loff_t
*loff
);
416 static const struct file_operations proc_file_fops
= {
417 .read
= dhd_trace_read_proc
,
418 .open
= dhd_trace_open_proc
,
419 .release
= seq_release
,
424 bool dhd_is_static_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
);
425 #endif /* WL_STATIC_IF */
427 atomic_t exit_in_progress
= ATOMIC_INIT(0);
429 static void dhd_process_daemon_msg(struct sk_buff
*skb
);
430 static void dhd_destroy_to_notifier_skt(void);
431 static int dhd_create_to_notifier_skt(void);
432 static struct sock
*nl_to_event_sk
= NULL
;
435 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
436 struct netlink_kernel_cfg dhd_netlink_cfg
= {
438 .input
= dhd_process_daemon_msg
,
440 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
442 #if defined(BT_OVER_SDIO)
443 /* Flag to indicate if driver is initialized */
444 uint dhd_driver_init_done
= TRUE
;
446 /* Flag to indicate if driver is initialized */
447 uint dhd_driver_init_done
= FALSE
;
449 /* Flag to indicate if we should download firmware on driver load */
450 uint dhd_download_fw_on_driverload
= TRUE
;
452 /* Definitions to provide path to the firmware and nvram
453 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
455 char firmware_path
[MOD_PARAM_PATHLEN
];
456 char nvram_path
[MOD_PARAM_PATHLEN
];
457 char clm_path
[MOD_PARAM_PATHLEN
];
458 #ifdef DHD_UCODE_DOWNLOAD
459 char ucode_path
[MOD_PARAM_PATHLEN
];
460 #endif /* DHD_UCODE_DOWNLOAD */
462 module_param_string(clm_path
, clm_path
, MOD_PARAM_PATHLEN
, 0660);
464 /* backup buffer for firmware and nvram path */
465 char fw_bak_path
[MOD_PARAM_PATHLEN
];
466 char nv_bak_path
[MOD_PARAM_PATHLEN
];
468 /* information string to keep firmware, chio, cheip version info visiable from log */
469 char info_string
[MOD_PARAM_INFOLEN
];
470 module_param_string(info_string
, info_string
, MOD_PARAM_INFOLEN
, 0444);
472 int disable_proptx
= 0;
473 module_param(op_mode
, int, 0644);
474 extern int wl_control_wl_start(struct net_device
*dev
);
475 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
476 struct semaphore dhd_registration_sem
;
477 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
480 int logdump_max_filesize
= LOG_DUMP_MAX_FILESIZE
;
481 module_param(logdump_max_filesize
, int, 0644);
482 int logdump_max_bufsize
= LOG_DUMP_GENERAL_MAX_BUFSIZE
;
483 module_param(logdump_max_bufsize
, int, 0644);
484 int logdump_prsrv_tailsize
= DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE
;
485 int logdump_periodic_flush
= FALSE
;
486 module_param(logdump_periodic_flush
, int, 0644);
487 #ifdef DEBUGABILITY_ECNTRS_LOGGING
488 int logdump_ecntr_enable
= TRUE
;
490 int logdump_ecntr_enable
= FALSE
;
491 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
492 module_param(logdump_ecntr_enable
, int, 0644);
493 #endif /* DHD_LOG_DUMP */
495 /* deferred handlers */
496 static void dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
);
497 static void dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
);
498 static void dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
);
499 static void dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
);
501 static void dhd_natoe_ct_event_hanlder(void *handle
, void *event_info
, u8 event
);
502 static void dhd_natoe_ct_ioctl_handler(void *handle
, void *event_info
, uint8 event
);
503 #endif /* WL_NATOE */
505 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
506 static void dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
);
507 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
509 extern void dhd_netdev_free(struct net_device
*ndev
);
510 #endif /* WL_CFG80211 */
511 static dhd_if_t
* dhd_get_ifp_by_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
);
513 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
514 /* update rx_pkt_chainable state of dhd interface */
515 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t
* dhdp
, uint32 idx
);
516 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
519 module_param(dhd_msg_level
, int, 0);
521 #ifdef ARP_OFFLOAD_SUPPORT
522 /* ARP offload enable */
523 uint dhd_arp_enable
= TRUE
;
524 module_param(dhd_arp_enable
, uint
, 0);
526 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
528 #ifdef ENABLE_ARP_SNOOP_MODE
529 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
| ARP_OL_SNOOP
| ARP_OL_HOST_AUTO_REPLY
;
531 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
;
532 #endif /* ENABLE_ARP_SNOOP_MODE */
534 module_param(dhd_arp_mode
, uint
, 0);
535 #endif /* ARP_OFFLOAD_SUPPORT */
537 /* Disable Prop tx */
538 module_param(disable_proptx
, int, 0644);
539 /* load firmware and/or nvram values from the filesystem */
540 module_param_string(firmware_path
, firmware_path
, MOD_PARAM_PATHLEN
, 0660);
541 module_param_string(nvram_path
, nvram_path
, MOD_PARAM_PATHLEN
, 0660);
542 #ifdef DHD_UCODE_DOWNLOAD
543 module_param_string(ucode_path
, ucode_path
, MOD_PARAM_PATHLEN
, 0660);
544 #endif /* DHD_UCODE_DOWNLOAD */
546 /* wl event forwarding */
548 uint wl_event_enable
= true;
550 uint wl_event_enable
= false;
551 #endif /* WL_EVENT_ENAB */
552 module_param(wl_event_enable
, uint
, 0660);
554 /* wl event forwarding */
555 #ifdef LOGTRACE_PKT_SENDUP
556 uint logtrace_pkt_sendup
= true;
558 uint logtrace_pkt_sendup
= false;
559 #endif /* LOGTRACE_PKT_SENDUP */
560 module_param(logtrace_pkt_sendup
, uint
, 0660);
562 /* Watchdog interval */
563 /* extend watchdog expiration to 2 seconds when DPC is running */
564 #define WATCHDOG_EXTEND_INTERVAL (2000)
566 uint dhd_watchdog_ms
= CUSTOM_DHD_WATCHDOG_MS
;
567 module_param(dhd_watchdog_ms
, uint
, 0);
569 #ifdef DHD_PCIE_RUNTIMEPM
570 uint dhd_runtimepm_ms
= CUSTOM_DHD_RUNTIME_MS
;
571 #endif /* DHD_PCIE_RUNTIMEPMT */
572 #if defined(DHD_DEBUG)
573 /* Console poll interval */
574 uint dhd_console_ms
= 0;
575 module_param(dhd_console_ms
, uint
, 0644);
577 uint dhd_console_ms
= 0;
578 #endif /* DHD_DEBUG */
580 uint dhd_slpauto
= TRUE
;
581 module_param(dhd_slpauto
, uint
, 0);
583 #ifdef PKT_FILTER_SUPPORT
584 /* Global Pkt filter enable control */
585 uint dhd_pkt_filter_enable
= TRUE
;
586 module_param(dhd_pkt_filter_enable
, uint
, 0);
589 /* Pkt filter init setup */
590 uint dhd_pkt_filter_init
= 0;
591 module_param(dhd_pkt_filter_init
, uint
, 0);
593 /* Pkt filter mode control */
594 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
595 uint dhd_master_mode
= FALSE
;
597 uint dhd_master_mode
= TRUE
;
598 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
599 module_param(dhd_master_mode
, uint
, 0);
601 int dhd_watchdog_prio
= 0;
602 module_param(dhd_watchdog_prio
, int, 0);
604 /* DPC thread priority */
605 int dhd_dpc_prio
= CUSTOM_DPC_PRIO_SETTING
;
606 module_param(dhd_dpc_prio
, int, 0);
608 /* RX frame thread priority */
609 int dhd_rxf_prio
= CUSTOM_RXF_PRIO_SETTING
;
610 module_param(dhd_rxf_prio
, int, 0);
612 #if !defined(BCMDHDUSB)
613 extern int dhd_dongle_ramsize
;
614 module_param(dhd_dongle_ramsize
, int, 0);
615 #endif /* BCMDHDUSB */
618 int passive_channel_skip
= 0;
619 module_param(passive_channel_skip
, int, (S_IRUSR
|S_IWUSR
));
620 #endif /* WL_CFG80211 */
622 #ifdef DHD_MSI_SUPPORT
623 uint enable_msi
= TRUE
;
624 module_param(enable_msi
, uint
, 0);
625 #endif /* PCIE_FULL_DONGLE */
627 /* Keep track of number of instances */
628 static int dhd_found
= 0;
629 static int instance_base
= 0; /* Starting instance number */
630 module_param(instance_base
, int, 0644);
632 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
633 static int dhd_napi_weight
= 32;
634 module_param(dhd_napi_weight
, int, 0644);
635 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
637 #ifdef PCIE_FULL_DONGLE
638 extern int h2d_max_txpost
;
639 module_param(h2d_max_txpost
, int, 0644);
641 extern uint dma_ring_indices
;
642 module_param(dma_ring_indices
, uint
, 0644);
644 extern bool h2d_phase
;
645 module_param(h2d_phase
, bool, 0644);
646 extern bool force_trap_bad_h2d_phase
;
647 module_param(force_trap_bad_h2d_phase
, bool, 0644);
648 #endif /* PCIE_FULL_DONGLE */
652 struct iphdr ip_header
;
653 struct udphdr udp_header
;
658 uint32 transaction_id
;
665 uint8 hw_address
[16];
666 uint8 server_name
[64];
667 uint8 file_name
[128];
671 static const uint8 bootp_magic_cookie
[4] = { 99, 130, 83, 99 };
672 static const char dhcp_ops
[][10] = {
673 "NA", "REQUEST", "REPLY"
675 static const char dhcp_types
[][10] = {
676 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
678 static void dhd_dhcp_dump(char *ifname
, uint8
*pktdata
, bool tx
);
679 #endif /* DHD_DHCP_DUMP */
682 #include <net/icmp.h>
683 static void dhd_icmp_dump(char *ifname
, uint8
*pktdata
, bool tx
);
684 #endif /* DHD_ICMP_DUMP */
687 #if defined(CUSTOMER_HW4_DEBUG)
688 static char *logstrs_path
= PLATFORM_PATH
"logstrs.bin";
689 char *st_str_file_path
= PLATFORM_PATH
"rtecdc.bin";
690 static char *map_file_path
= PLATFORM_PATH
"rtecdc.map";
691 static char *rom_st_str_file_path
= PLATFORM_PATH
"roml.bin";
692 static char *rom_map_file_path
= PLATFORM_PATH
"roml.map";
694 static char *logstrs_path
= "/installmedia/logstrs.bin";
695 char *st_str_file_path
= "/installmedia/rtecdc.bin";
696 static char *map_file_path
= "/installmedia/rtecdc.map";
697 static char *rom_st_str_file_path
= "/installmedia/roml.bin";
698 static char *rom_map_file_path
= "/installmedia/roml.map";
700 static char *ram_file_str
= "rtecdc";
701 static char *rom_file_str
= "roml";
703 module_param(logstrs_path
, charp
, S_IRUGO
);
704 module_param(st_str_file_path
, charp
, S_IRUGO
);
705 module_param(map_file_path
, charp
, S_IRUGO
);
706 module_param(rom_st_str_file_path
, charp
, S_IRUGO
);
707 module_param(rom_map_file_path
, charp
, S_IRUGO
);
709 static int dhd_init_logstrs_array(osl_t
*osh
, dhd_event_log_t
*temp
);
710 static int dhd_read_map(osl_t
*osh
, char *fname
, uint32
*ramstart
, uint32
*rodata_start
,
712 static int dhd_init_static_strs_array(osl_t
*osh
, dhd_event_log_t
*temp
, char *str_file
,
714 #endif /* SHOW_LOGTRACE */
717 void dhd_d2h_minidump(dhd_pub_t
*dhdp
);
718 #endif /* D2H_MINIDUMP */
720 #ifdef DHDTCPSYNC_FLOOD_BLK
721 extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t
*ifp
);
722 #endif /* DHDTCPSYNC_FLOOD_BLK */
727 dhd_lb_set_default_cpus(dhd_info_t
*dhd
)
729 /* Default CPU allocation for the jobs */
730 atomic_set(&dhd
->rx_napi_cpu
, 1);
731 atomic_set(&dhd
->rx_compl_cpu
, 2);
732 atomic_set(&dhd
->tx_compl_cpu
, 2);
733 atomic_set(&dhd
->tx_cpu
, 2);
734 atomic_set(&dhd
->net_tx_cpu
, 0);
738 dhd_cpumasks_deinit(dhd_info_t
*dhd
)
740 free_cpumask_var(dhd
->cpumask_curr_avail
);
741 free_cpumask_var(dhd
->cpumask_primary
);
742 free_cpumask_var(dhd
->cpumask_primary_new
);
743 free_cpumask_var(dhd
->cpumask_secondary
);
744 free_cpumask_var(dhd
->cpumask_secondary_new
);
748 dhd_cpumasks_init(dhd_info_t
*dhd
)
751 uint32 cpus
, num_cpus
= num_possible_cpus();
754 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__
,
755 DHD_LB_PRIMARY_CPUS
, DHD_LB_SECONDARY_CPUS
));
757 if (!alloc_cpumask_var(&dhd
->cpumask_curr_avail
, GFP_KERNEL
) ||
758 !alloc_cpumask_var(&dhd
->cpumask_primary
, GFP_KERNEL
) ||
759 !alloc_cpumask_var(&dhd
->cpumask_primary_new
, GFP_KERNEL
) ||
760 !alloc_cpumask_var(&dhd
->cpumask_secondary
, GFP_KERNEL
) ||
761 !alloc_cpumask_var(&dhd
->cpumask_secondary_new
, GFP_KERNEL
)) {
762 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__
));
767 cpumask_copy(dhd
->cpumask_curr_avail
, cpu_online_mask
);
768 cpumask_clear(dhd
->cpumask_primary
);
769 cpumask_clear(dhd
->cpumask_secondary
);
772 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__
, num_cpus
));
776 cpus
= DHD_LB_PRIMARY_CPUS
;
777 for (id
= 0; id
< num_cpus
; id
++) {
778 if (isset(&cpus
, id
))
779 cpumask_set_cpu(id
, dhd
->cpumask_primary
);
782 cpus
= DHD_LB_SECONDARY_CPUS
;
783 for (id
= 0; id
< num_cpus
; id
++) {
784 if (isset(&cpus
, id
))
785 cpumask_set_cpu(id
, dhd
->cpumask_secondary
);
790 dhd_cpumasks_deinit(dhd
);
795 * The CPU Candidacy Algorithm
796 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
797 * The available CPUs for selection are divided into two groups
798 * Primary Set - A CPU mask that carries the First Choice CPUs
799 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
801 * There are two types of Job, that needs to be assigned to
802 * the CPUs, from one of the above mentioned CPU group. The Jobs are
803 * 1) Rx Packet Processing - napi_cpu
804 * 2) Completion Processiong (Tx, RX) - compl_cpu
806 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
807 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
808 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
809 * If there are more processors free, it assigns one to compl_cpu.
810 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
811 * CPU, as much as possible.
813 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
814 * would allow Tx completion skb's to be released into a local free pool from
815 * which the rx buffer posts could have been serviced. it is important to note
816 * that a Tx packet may not have a large enough buffer for rx posting.
818 void dhd_select_cpu_candidacy(dhd_info_t
*dhd
)
820 uint32 primary_available_cpus
; /* count of primary available cpus */
821 uint32 secondary_available_cpus
; /* count of secondary available cpus */
822 uint32 napi_cpu
= 0; /* cpu selected for napi rx processing */
823 uint32 compl_cpu
= 0; /* cpu selected for completion jobs */
824 uint32 tx_cpu
= 0; /* cpu selected for tx processing job */
826 cpumask_clear(dhd
->cpumask_primary_new
);
827 cpumask_clear(dhd
->cpumask_secondary_new
);
830 * Now select from the primary mask. Even if a Job is
831 * already running on a CPU in secondary group, we still move
832 * to primary CPU. So no conditional checks.
834 cpumask_and(dhd
->cpumask_primary_new
, dhd
->cpumask_primary
,
835 dhd
->cpumask_curr_avail
);
837 cpumask_and(dhd
->cpumask_secondary_new
, dhd
->cpumask_secondary
,
838 dhd
->cpumask_curr_avail
);
840 primary_available_cpus
= cpumask_weight(dhd
->cpumask_primary_new
);
842 if (primary_available_cpus
> 0) {
843 napi_cpu
= cpumask_first(dhd
->cpumask_primary_new
);
845 /* If no further CPU is available,
846 * cpumask_next returns >= nr_cpu_ids
848 tx_cpu
= cpumask_next(napi_cpu
, dhd
->cpumask_primary_new
);
849 if (tx_cpu
>= nr_cpu_ids
)
852 /* In case there are no more CPUs, do completions & Tx in same CPU */
853 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_primary_new
);
854 if (compl_cpu
>= nr_cpu_ids
)
858 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
859 __FUNCTION__
, napi_cpu
, compl_cpu
, tx_cpu
));
861 /* -- Now check for the CPUs from the secondary mask -- */
862 secondary_available_cpus
= cpumask_weight(dhd
->cpumask_secondary_new
);
864 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
865 __FUNCTION__
, secondary_available_cpus
, nr_cpu_ids
));
867 if (secondary_available_cpus
> 0) {
868 /* At this point if napi_cpu is unassigned it means no CPU
869 * is online from Primary Group
872 napi_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
873 tx_cpu
= cpumask_next(napi_cpu
, dhd
->cpumask_secondary_new
);
874 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_secondary_new
);
875 } else if (tx_cpu
== 0) {
876 tx_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
877 compl_cpu
= cpumask_next(tx_cpu
, dhd
->cpumask_secondary_new
);
878 } else if (compl_cpu
== 0) {
879 compl_cpu
= cpumask_first(dhd
->cpumask_secondary_new
);
882 /* If no CPU was available for tx processing, choose CPU 0 */
883 if (tx_cpu
>= nr_cpu_ids
)
886 /* If no CPU was available for completion, choose CPU 0 */
887 if (compl_cpu
>= nr_cpu_ids
)
890 if ((primary_available_cpus
== 0) &&
891 (secondary_available_cpus
== 0)) {
892 /* No CPUs available from primary or secondary mask */
898 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
899 __FUNCTION__
, napi_cpu
, compl_cpu
, tx_cpu
));
901 ASSERT(napi_cpu
< nr_cpu_ids
);
902 ASSERT(compl_cpu
< nr_cpu_ids
);
903 ASSERT(tx_cpu
< nr_cpu_ids
);
905 atomic_set(&dhd
->rx_napi_cpu
, napi_cpu
);
906 atomic_set(&dhd
->tx_compl_cpu
, compl_cpu
);
907 atomic_set(&dhd
->rx_compl_cpu
, compl_cpu
);
908 atomic_set(&dhd
->tx_cpu
, tx_cpu
);
914 * Function to handle CPU Hotplug notifications.
915 * One of the task it does is to trigger the CPU Candidacy algorithm
916 * for load balancing.
919 dhd_cpu_callback(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
921 unsigned long int cpu
= (unsigned long int)hcpu
;
923 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
924 #pragma GCC diagnostic push
925 #pragma GCC diagnostic ignored "-Wcast-qual"
927 dhd_info_t
*dhd
= container_of(nfb
, dhd_info_t
, cpu_notifier
);
928 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
929 #pragma GCC diagnostic pop
932 if (!dhd
|| !(dhd
->dhd_state
& DHD_ATTACH_STATE_LB_ATTACH_DONE
)) {
933 DHD_INFO(("%s(): LB data is not initialized yet.\n",
941 case CPU_ONLINE_FROZEN
:
942 DHD_LB_STATS_INCR(dhd
->cpu_online_cnt
[cpu
]);
943 cpumask_set_cpu(cpu
, dhd
->cpumask_curr_avail
);
944 dhd_select_cpu_candidacy(dhd
);
947 case CPU_DOWN_PREPARE
:
948 case CPU_DOWN_PREPARE_FROZEN
:
949 DHD_LB_STATS_INCR(dhd
->cpu_offline_cnt
[cpu
]);
950 cpumask_clear_cpu(cpu
, dhd
->cpumask_curr_avail
);
951 dhd_select_cpu_candidacy(dhd
);
960 #if defined(DHD_LB_STATS)
961 void dhd_lb_stats_init(dhd_pub_t
*dhdp
)
964 int i
, j
, num_cpus
= num_possible_cpus();
965 int alloc_size
= sizeof(uint32
) * num_cpus
;
968 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
975 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
979 DHD_LB_STATS_CLR(dhd
->dhd_dpc_cnt
);
980 DHD_LB_STATS_CLR(dhd
->napi_sched_cnt
);
982 dhd
->napi_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
983 if (!dhd
->napi_percpu_run_cnt
) {
984 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
988 for (i
= 0; i
< num_cpus
; i
++)
989 DHD_LB_STATS_CLR(dhd
->napi_percpu_run_cnt
[i
]);
991 DHD_LB_STATS_CLR(dhd
->rxc_sched_cnt
);
993 dhd
->rxc_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
994 if (!dhd
->rxc_percpu_run_cnt
) {
995 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
999 for (i
= 0; i
< num_cpus
; i
++)
1000 DHD_LB_STATS_CLR(dhd
->rxc_percpu_run_cnt
[i
]);
1002 DHD_LB_STATS_CLR(dhd
->txc_sched_cnt
);
1004 dhd
->txc_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1005 if (!dhd
->txc_percpu_run_cnt
) {
1006 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1010 for (i
= 0; i
< num_cpus
; i
++)
1011 DHD_LB_STATS_CLR(dhd
->txc_percpu_run_cnt
[i
]);
1013 dhd
->cpu_online_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1014 if (!dhd
->cpu_online_cnt
) {
1015 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1019 for (i
= 0; i
< num_cpus
; i
++)
1020 DHD_LB_STATS_CLR(dhd
->cpu_online_cnt
[i
]);
1022 dhd
->cpu_offline_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1023 if (!dhd
->cpu_offline_cnt
) {
1024 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1028 for (i
= 0; i
< num_cpus
; i
++)
1029 DHD_LB_STATS_CLR(dhd
->cpu_offline_cnt
[i
]);
1031 dhd
->txp_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1032 if (!dhd
->txp_percpu_run_cnt
) {
1033 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1037 for (i
= 0; i
< num_cpus
; i
++)
1038 DHD_LB_STATS_CLR(dhd
->txp_percpu_run_cnt
[i
]);
1040 dhd
->tx_start_percpu_run_cnt
= (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1041 if (!dhd
->tx_start_percpu_run_cnt
) {
1042 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1046 for (i
= 0; i
< num_cpus
; i
++)
1047 DHD_LB_STATS_CLR(dhd
->tx_start_percpu_run_cnt
[i
]);
1049 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1050 dhd
->napi_rx_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1051 if (!dhd
->napi_rx_hist
[j
]) {
1052 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1056 for (i
= 0; i
< num_cpus
; i
++) {
1057 DHD_LB_STATS_CLR(dhd
->napi_rx_hist
[j
][i
]);
1061 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1062 dhd
->txc_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1063 if (!dhd
->txc_hist
[j
]) {
1064 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1068 for (i
= 0; i
< num_cpus
; i
++) {
1069 DHD_LB_STATS_CLR(dhd
->txc_hist
[j
][i
]);
1072 #endif /* DHD_LB_TXC */
1074 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1075 dhd
->rxc_hist
[j
] = (uint32
*)MALLOC(dhdp
->osh
, alloc_size
);
1076 if (!dhd
->rxc_hist
[j
]) {
1077 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1081 for (i
= 0; i
< num_cpus
; i
++) {
1082 DHD_LB_STATS_CLR(dhd
->rxc_hist
[j
][i
]);
1085 #endif /* DHD_LB_RXC */
1089 void dhd_lb_stats_deinit(dhd_pub_t
*dhdp
)
1092 int j
, num_cpus
= num_possible_cpus();
1093 int alloc_size
= sizeof(uint32
) * num_cpus
;
1096 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1103 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1107 if (dhd
->napi_percpu_run_cnt
) {
1108 MFREE(dhdp
->osh
, dhd
->napi_percpu_run_cnt
, alloc_size
);
1109 dhd
->napi_percpu_run_cnt
= NULL
;
1111 if (dhd
->rxc_percpu_run_cnt
) {
1112 MFREE(dhdp
->osh
, dhd
->rxc_percpu_run_cnt
, alloc_size
);
1113 dhd
->rxc_percpu_run_cnt
= NULL
;
1115 if (dhd
->txc_percpu_run_cnt
) {
1116 MFREE(dhdp
->osh
, dhd
->txc_percpu_run_cnt
, alloc_size
);
1117 dhd
->txc_percpu_run_cnt
= NULL
;
1119 if (dhd
->cpu_online_cnt
) {
1120 MFREE(dhdp
->osh
, dhd
->cpu_online_cnt
, alloc_size
);
1121 dhd
->cpu_online_cnt
= NULL
;
1123 if (dhd
->cpu_offline_cnt
) {
1124 MFREE(dhdp
->osh
, dhd
->cpu_offline_cnt
, alloc_size
);
1125 dhd
->cpu_offline_cnt
= NULL
;
1128 if (dhd
->txp_percpu_run_cnt
) {
1129 MFREE(dhdp
->osh
, dhd
->txp_percpu_run_cnt
, alloc_size
);
1130 dhd
->txp_percpu_run_cnt
= NULL
;
1132 if (dhd
->tx_start_percpu_run_cnt
) {
1133 MFREE(dhdp
->osh
, dhd
->tx_start_percpu_run_cnt
, alloc_size
);
1134 dhd
->tx_start_percpu_run_cnt
= NULL
;
1137 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1138 if (dhd
->napi_rx_hist
[j
]) {
1139 MFREE(dhdp
->osh
, dhd
->napi_rx_hist
[j
], alloc_size
);
1140 dhd
->napi_rx_hist
[j
] = NULL
;
1143 if (dhd
->txc_hist
[j
]) {
1144 MFREE(dhdp
->osh
, dhd
->txc_hist
[j
], alloc_size
);
1145 dhd
->txc_hist
[j
] = NULL
;
1147 #endif /* DHD_LB_TXC */
1149 if (dhd
->rxc_hist
[j
]) {
1150 MFREE(dhdp
->osh
, dhd
->rxc_hist
[j
], alloc_size
);
1151 dhd
->rxc_hist
[j
] = NULL
;
1153 #endif /* DHD_LB_RXC */
1159 static void dhd_lb_stats_dump_histo(dhd_pub_t
*dhdp
,
1160 struct bcmstrbuf
*strbuf
, uint32
**hist
)
1163 uint32
*per_cpu_total
;
1165 uint32 num_cpus
= num_possible_cpus();
1167 per_cpu_total
= (uint32
*)MALLOC(dhdp
->osh
, sizeof(uint32
) * num_cpus
);
1168 if (!per_cpu_total
) {
1169 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__
));
1172 bzero(per_cpu_total
, sizeof(uint32
) * num_cpus
);
1174 bcm_bprintf(strbuf
, "CPU: \t\t");
1175 for (i
= 0; i
< num_cpus
; i
++)
1176 bcm_bprintf(strbuf
, "%d\t", i
);
1177 bcm_bprintf(strbuf
, "\nBin\n");
1179 for (i
= 0; i
< HIST_BIN_SIZE
; i
++) {
1180 bcm_bprintf(strbuf
, "%d:\t\t", 1<<i
);
1181 for (j
= 0; j
< num_cpus
; j
++) {
1182 bcm_bprintf(strbuf
, "%d\t", hist
[i
][j
]);
1184 bcm_bprintf(strbuf
, "\n");
1186 bcm_bprintf(strbuf
, "Per CPU Total \t");
1188 for (i
= 0; i
< num_cpus
; i
++) {
1189 for (j
= 0; j
< HIST_BIN_SIZE
; j
++) {
1190 per_cpu_total
[i
] += (hist
[j
][i
] * (1<<j
));
1192 bcm_bprintf(strbuf
, "%d\t", per_cpu_total
[i
]);
1193 total
+= per_cpu_total
[i
];
1195 bcm_bprintf(strbuf
, "\nTotal\t\t%d \n", total
);
1197 if (per_cpu_total
) {
1198 MFREE(dhdp
->osh
, per_cpu_total
, sizeof(uint32
) * num_cpus
);
1199 per_cpu_total
= NULL
;
1204 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf
*strbuf
, uint32
*p
)
1206 int i
, num_cpus
= num_possible_cpus();
1208 bcm_bprintf(strbuf
, "CPU: \t");
1209 for (i
= 0; i
< num_cpus
; i
++)
1210 bcm_bprintf(strbuf
, "%d\t", i
);
1211 bcm_bprintf(strbuf
, "\n");
1213 bcm_bprintf(strbuf
, "Val: \t");
1214 for (i
= 0; i
< num_cpus
; i
++)
1215 bcm_bprintf(strbuf
, "%u\t", *(p
+i
));
1216 bcm_bprintf(strbuf
, "\n");
1220 void dhd_lb_stats_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
1224 if (dhdp
== NULL
|| strbuf
== NULL
) {
1225 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1226 __FUNCTION__
, dhdp
, strbuf
));
1232 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__
));
1236 bcm_bprintf(strbuf
, "\ncpu_online_cnt:\n");
1237 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->cpu_online_cnt
);
1239 bcm_bprintf(strbuf
, "\ncpu_offline_cnt:\n");
1240 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->cpu_offline_cnt
);
1242 bcm_bprintf(strbuf
, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1243 dhd
->dhd_dpc_cnt
, dhd
->napi_sched_cnt
, dhd
->rxc_sched_cnt
,
1244 dhd
->txc_sched_cnt
);
1247 bcm_bprintf(strbuf
, "\nnapi_percpu_run_cnt:\n");
1248 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->napi_percpu_run_cnt
);
1249 bcm_bprintf(strbuf
, "\nNAPI Packets Received Histogram:\n");
1250 dhd_lb_stats_dump_histo(dhdp
, strbuf
, dhd
->napi_rx_hist
);
1251 #endif /* DHD_LB_RXP */
1254 bcm_bprintf(strbuf
, "\nrxc_percpu_run_cnt:\n");
1255 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->rxc_percpu_run_cnt
);
1256 bcm_bprintf(strbuf
, "\nRX Completions (Buffer Post) Histogram:\n");
1257 dhd_lb_stats_dump_histo(dhdp
, strbuf
, dhd
->rxc_hist
);
1258 #endif /* DHD_LB_RXC */
1261 bcm_bprintf(strbuf
, "\ntxc_percpu_run_cnt:\n");
1262 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->txc_percpu_run_cnt
);
1263 bcm_bprintf(strbuf
, "\nTX Completions (Buffer Free) Histogram:\n");
1264 dhd_lb_stats_dump_histo(dhdp
, strbuf
, dhd
->txc_hist
);
1265 #endif /* DHD_LB_TXC */
1268 bcm_bprintf(strbuf
, "\ntxp_percpu_run_cnt:\n");
1269 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->txp_percpu_run_cnt
);
1271 bcm_bprintf(strbuf
, "\ntx_start_percpu_run_cnt:\n");
1272 dhd_lb_stats_dump_cpu_array(strbuf
, dhd
->tx_start_percpu_run_cnt
);
1273 #endif /* DHD_LB_TXP */
1276 /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
1277 static inline uint32
next_larger_power2(uint32 num
)
1289 static void dhd_lb_stats_update_histo(uint32
**bin
, uint32 count
, uint32 cpu
)
1293 bin_power
= next_larger_power2(count
);
1295 switch (bin_power
) {
1296 case 1: p
= bin
[0] + cpu
; break;
1297 case 2: p
= bin
[1] + cpu
; break;
1298 case 4: p
= bin
[2] + cpu
; break;
1299 case 8: p
= bin
[3] + cpu
; break;
1300 case 16: p
= bin
[4] + cpu
; break;
1301 case 32: p
= bin
[5] + cpu
; break;
1302 case 64: p
= bin
[6] + cpu
; break;
1303 case 128: p
= bin
[7] + cpu
; break;
1304 default : p
= bin
[8] + cpu
; break;
1311 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t
*dhdp
, uint32 count
)
1314 dhd_info_t
*dhd
= dhdp
->info
;
1318 dhd_lb_stats_update_histo(dhd
->napi_rx_hist
, count
, cpu
);
1323 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t
*dhdp
, uint32 count
)
1326 dhd_info_t
*dhd
= dhdp
->info
;
1330 dhd_lb_stats_update_histo(dhd
->txc_hist
, count
, cpu
);
1335 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t
*dhdp
, uint32 count
)
1338 dhd_info_t
*dhd
= dhdp
->info
;
1342 dhd_lb_stats_update_histo(dhd
->rxc_hist
, count
, cpu
);
1347 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t
*dhdp
)
1349 dhd_info_t
*dhd
= dhdp
->info
;
1350 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txc_percpu_run_cnt
);
1353 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t
*dhdp
)
1355 dhd_info_t
*dhd
= dhdp
->info
;
1356 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->rxc_percpu_run_cnt
);
1358 #endif /* DHD_LB_STATS */
1362 #ifdef USE_WFA_CERT_CONF
1363 int g_frameburst
= 1;
1364 #endif /* USE_WFA_CERT_CONF */
1366 static int dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
);
1368 /* DHD Perimiter lock only used in router with bypass forwarding. */
1369 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1370 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1371 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1373 #ifdef PCIE_FULL_DONGLE
1374 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1375 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1376 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1377 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1378 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1380 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1381 static struct list_head
* dhd_sta_list_snapshot(dhd_info_t
*dhd
, dhd_if_t
*ifp
,
1382 struct list_head
*snapshot_list
);
1383 static void dhd_sta_list_snapshot_free(dhd_info_t
*dhd
, struct list_head
*snapshot_list
);
1384 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1385 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1386 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1387 #endif /* PCIE_FULL_DONGLE */
1389 /* Control fw roaming */
1391 uint dhd_roam_disable
= 0;
1393 uint dhd_roam_disable
= 0;
1397 extern void dhd_dbgfs_init(dhd_pub_t
*dhdp
);
1398 extern void dhd_dbgfs_remove(void);
1401 static uint pcie_txs_metadata_enable
= 0; /* Enable TX status metadta report */
1402 module_param(pcie_txs_metadata_enable
, int, 0);
1404 /* Control radio state */
1405 uint dhd_radio_up
= 1;
1407 /* Network inteface name */
1408 char iface_name
[IFNAMSIZ
] = {'\0'};
1409 module_param_string(iface_name
, iface_name
, IFNAMSIZ
, 0);
1411 /* The following are specific to the SDIO dongle */
1413 /* IOCTL response timeout */
1414 int dhd_ioctl_timeout_msec
= IOCTL_RESP_TIMEOUT
;
1416 /* DS Exit response timeout */
1417 int ds_exit_timeout_msec
= DS_EXIT_TIMEOUT
;
1419 /* Idle timeout for backplane clock */
1420 int dhd_idletime
= DHD_IDLETIME_TICKS
;
1421 module_param(dhd_idletime
, int, 0);
1424 uint dhd_poll
= FALSE
;
1425 module_param(dhd_poll
, uint
, 0);
1427 /* Use interrupts */
1428 uint dhd_intr
= TRUE
;
1429 module_param(dhd_intr
, uint
, 0);
1431 /* SDIO Drive Strength (in milliamps) */
1432 uint dhd_sdiod_drive_strength
= 6;
1433 module_param(dhd_sdiod_drive_strength
, uint
, 0);
1437 extern uint dhd_txbound
;
1438 extern uint dhd_rxbound
;
1439 module_param(dhd_txbound
, uint
, 0);
1440 module_param(dhd_rxbound
, uint
, 0);
1442 /* Deferred transmits */
1443 extern uint dhd_deferred_tx
;
1444 module_param(dhd_deferred_tx
, uint
, 0);
1446 #endif /* BCMSDIO */
1449 /* Echo packet generator (pkts/s) */
1450 uint dhd_pktgen
= 0;
1451 module_param(dhd_pktgen
, uint
, 0);
1453 /* Echo packet len (0 => sawtooth, max 2040) */
1454 uint dhd_pktgen_len
= 0;
1455 module_param(dhd_pktgen_len
, uint
, 0);
1458 #if defined(BCMSUP_4WAY_HANDSHAKE)
1459 /* Use in dongle supplicant for 4-way handshake */
1460 #if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
1461 /* Enable idsup by default (if supported in fw) */
1462 uint dhd_use_idsup
= 1;
1464 uint dhd_use_idsup
= 0;
1465 #endif /* WLFBT || WL_ENABLE_IDSUP */
1466 module_param(dhd_use_idsup
, uint
, 0);
1467 #endif /* BCMSUP_4WAY_HANDSHAKE */
1469 /* Allow delayed firmware download for debug purpose */
1470 int allow_delay_fwdl
= FALSE
;
1471 module_param(allow_delay_fwdl
, int, 0);
1473 #ifdef ECOUNTER_PERIODIC_DISABLE
1474 uint enable_ecounter
= FALSE
;
1476 uint enable_ecounter
= TRUE
;
1478 module_param(enable_ecounter
, uint
, 0);
1480 extern char dhd_version
[];
1481 extern char fw_version
[];
1482 extern char clm_version
[];
1484 int dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
);
1485 static void dhd_net_if_lock_local(dhd_info_t
*dhd
);
1486 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
);
1487 static void dhd_suspend_lock(dhd_pub_t
*dhdp
);
1488 static void dhd_suspend_unlock(dhd_pub_t
*dhdp
);
1490 /* Monitor interface */
1491 int dhd_monitor_init(void *dhd_pub
);
1492 int dhd_monitor_uninit(void);
1494 #ifdef DHD_PM_CONTROL_FROM_FILE
1496 #ifdef DHD_EXPORT_CNTL_FILE
1498 #endif /* DHD_EXPORT_CNTL_FILE */
1499 void sec_control_pm(dhd_pub_t
*dhd
, uint
*);
1500 #endif /* DHD_PM_CONTROL_FROM_FILE */
1502 #if defined(WL_WIRELESS_EXT)
1503 struct iw_statistics
*dhd_get_wireless_stats(struct net_device
*dev
);
1504 #endif /* defined(WL_WIRELESS_EXT) */
1506 static void dhd_dpc(ulong data
);
1508 extern int dhd_wait_pend8021x(struct net_device
*dev
);
1509 void dhd_os_wd_timer_extend(void *bus
, bool extend
);
1513 #error TOE requires BDC
1515 static int dhd_toe_get(dhd_info_t
*dhd
, int idx
, uint32
*toe_ol
);
1516 static int dhd_toe_set(dhd_info_t
*dhd
, int idx
, uint32 toe_ol
);
1519 static int dhd_wl_host_event(dhd_info_t
*dhd
, int ifidx
, void *pktdata
, uint16 pktlen
,
1520 wl_event_msg_t
*event_ptr
, void **data_ptr
);
1522 #if defined(CONFIG_PM_SLEEP)
1523 static int dhd_pm_callback(struct notifier_block
*nfb
, unsigned long action
, void *ignored
)
1525 int ret
= NOTIFY_DONE
;
1526 bool suspend
= FALSE
;
1528 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1529 #pragma GCC diagnostic push
1530 #pragma GCC diagnostic ignored "-Wcast-qual"
1532 dhd_info_t
*dhdinfo
= (dhd_info_t
*)container_of(nfb
, struct dhd_info
, pm_notifier
);
1533 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1534 #pragma GCC diagnostic pop
1537 BCM_REFERENCE(dhdinfo
);
1538 BCM_REFERENCE(suspend
);
1541 case PM_HIBERNATION_PREPARE
:
1542 case PM_SUSPEND_PREPARE
:
1546 case PM_POST_HIBERNATION
:
1547 case PM_POST_SUSPEND
:
1552 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1554 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo
->pub
);
1555 dhd_wlfc_suspend(&dhdinfo
->pub
);
1556 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo
->pub
);
1558 dhd_wlfc_resume(&dhdinfo
->pub
);
1560 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1562 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1563 KERNEL_VERSION(2, 6, 39))
1564 dhd_mmc_suspend
= suspend
;
1571 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1572 * created in kernel notifier link list (with 'next' pointing to itself)
1574 static bool dhd_pm_notifier_registered
= FALSE
;
1576 extern int register_pm_notifier(struct notifier_block
*nb
);
1577 extern int unregister_pm_notifier(struct notifier_block
*nb
);
1578 #endif /* CONFIG_PM_SLEEP */
1580 /* Request scheduling of the bus rx frame */
1581 static void dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
);
1582 static void dhd_os_rxflock(dhd_pub_t
*pub
);
1583 static void dhd_os_rxfunlock(dhd_pub_t
*pub
);
1585 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1586 typedef struct dhd_dev_priv
{
1587 dhd_info_t
* dhd
; /* cached pointer to dhd_info in netdevice priv */
1588 dhd_if_t
* ifp
; /* cached pointer to dhd_if in netdevice priv */
1589 int ifidx
; /* interface index */
1593 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1594 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1595 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1596 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1597 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1598 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1600 #if defined(DHD_OF_SUPPORT)
1601 extern int dhd_wlan_init(void);
1602 #endif /* defined(DHD_OF_SUPPORT) */
1603 /** Clear the dhd net_device's private structure. */
1605 dhd_dev_priv_clear(struct net_device
* dev
)
1607 dhd_dev_priv_t
* dev_priv
;
1608 ASSERT(dev
!= (struct net_device
*)NULL
);
1609 dev_priv
= DHD_DEV_PRIV(dev
);
1610 dev_priv
->dhd
= (dhd_info_t
*)NULL
;
1611 dev_priv
->ifp
= (dhd_if_t
*)NULL
;
1612 dev_priv
->ifidx
= DHD_BAD_IF
;
1613 dev_priv
->lkup
= (void *)NULL
;
1616 /** Setup the dhd net_device's private structure. */
1618 dhd_dev_priv_save(struct net_device
* dev
, dhd_info_t
* dhd
, dhd_if_t
* ifp
,
1621 dhd_dev_priv_t
* dev_priv
;
1622 ASSERT(dev
!= (struct net_device
*)NULL
);
1623 dev_priv
= DHD_DEV_PRIV(dev
);
1624 dev_priv
->dhd
= dhd
;
1625 dev_priv
->ifp
= ifp
;
1626 dev_priv
->ifidx
= ifidx
;
1629 /* Return interface pointer */
1630 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
)
1632 ASSERT(ifidx
< DHD_MAX_IFS
);
1634 if (ifidx
>= DHD_MAX_IFS
)
1637 return dhdp
->info
->iflist
[ifidx
];
1640 #ifdef PCIE_FULL_DONGLE
1642 /** Dummy objects are defined with state representing bad|down.
1643 * Performance gains from reducing branch conditionals, instruction parallelism,
1644 * dual issue, reducing load shadows, avail of larger pipelines.
1645 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1646 * is accessed via the dhd_sta_t.
1649 /* Dummy dhd_info object */
1650 dhd_info_t dhd_info_null
= {
1652 .info
= &dhd_info_null
,
1653 #ifdef DHDTCPACK_SUPPRESS
1654 .tcpack_sup_mode
= TCPACK_SUP_REPLACE
,
1655 #endif /* DHDTCPACK_SUPPRESS */
1657 .busstate
= DHD_BUS_DOWN
1660 #define DHD_INFO_NULL (&dhd_info_null)
1661 #define DHD_PUB_NULL (&dhd_info_null.pub)
1663 /* Dummy netdevice object */
1664 struct net_device dhd_net_dev_null
= {
1665 .reg_state
= NETREG_UNREGISTERED
1667 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1669 /* Dummy dhd_if object */
1670 dhd_if_t dhd_if_null
= {
1672 .wmf
= { .wmf_enable
= TRUE
},
1674 .info
= DHD_INFO_NULL
,
1675 .net
= DHD_NET_DEV_NULL
,
1678 #define DHD_IF_NULL (&dhd_if_null)
1680 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1682 /** Interface STA list management. */
1684 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1685 static void dhd_sta_free(dhd_pub_t
*pub
, dhd_sta_t
*sta
);
1686 static dhd_sta_t
* dhd_sta_alloc(dhd_pub_t
* dhdp
);
1688 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1689 static void dhd_if_del_sta_list(dhd_if_t
* ifp
);
1690 static void dhd_if_flush_sta(dhd_if_t
* ifp
);
1692 /* Construct/Destruct a sta pool. */
1693 static int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
);
1694 static void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
);
1695 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1696 static void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
);
1698 /** Reset a dhd_sta object and free into the dhd pool. */
1700 dhd_sta_free(dhd_pub_t
* dhdp
, dhd_sta_t
* sta
)
1704 ASSERT((sta
!= DHD_STA_NULL
) && (sta
->idx
!= ID16_INVALID
));
1706 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
1709 * Flush and free all packets in all flowring's queues belonging to sta.
1710 * Packets in flow ring will be flushed later.
1712 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
1713 uint16 flowid
= sta
->flowid
[prio
];
1715 if (flowid
!= FLOWID_INVALID
) {
1716 unsigned long flags
;
1717 flow_ring_node_t
* flow_ring_node
;
1719 #ifdef DHDTCPACK_SUPPRESS
1720 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1721 * when there is a newly coming packet from network stack.
1723 dhd_tcpack_info_tbl_clean(dhdp
);
1724 #endif /* DHDTCPACK_SUPPRESS */
1726 flow_ring_node
= dhd_flow_ring_node(dhdp
, flowid
);
1727 if (flow_ring_node
) {
1728 flow_queue_t
*queue
= &flow_ring_node
->queue
;
1730 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
1731 flow_ring_node
->status
= FLOW_RING_STATUS_STA_FREEING
;
1733 if (!DHD_FLOW_QUEUE_EMPTY(queue
)) {
1735 while ((pkt
= dhd_flow_queue_dequeue(dhdp
, queue
)) !=
1737 PKTFREE(dhdp
->osh
, pkt
, TRUE
);
1741 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
1742 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
1746 sta
->flowid
[prio
] = FLOWID_INVALID
;
1749 id16_map_free(dhdp
->staid_allocator
, sta
->idx
);
1750 DHD_CUMM_CTR_INIT(&sta
->cumm_ctr
);
1751 sta
->ifp
= DHD_IF_NULL
; /* dummy dhd_if object */
1752 sta
->ifidx
= DHD_BAD_IF
;
1753 bzero(sta
->ea
.octet
, ETHER_ADDR_LEN
);
1754 INIT_LIST_HEAD(&sta
->list
);
1755 sta
->idx
= ID16_INVALID
; /* implying free */
1758 /** Allocate a dhd_sta object from the dhd pool. */
1760 dhd_sta_alloc(dhd_pub_t
* dhdp
)
1764 dhd_sta_pool_t
* sta_pool
;
1766 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
1768 idx
= id16_map_alloc(dhdp
->staid_allocator
);
1769 if (idx
== ID16_INVALID
) {
1770 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__
));
1771 return DHD_STA_NULL
;
1774 sta_pool
= (dhd_sta_pool_t
*)(dhdp
->sta_pool
);
1775 sta
= &sta_pool
[idx
];
1777 ASSERT((sta
->idx
== ID16_INVALID
) &&
1778 (sta
->ifp
== DHD_IF_NULL
) && (sta
->ifidx
== DHD_BAD_IF
));
1780 DHD_CUMM_CTR_INIT(&sta
->cumm_ctr
);
1782 sta
->idx
= idx
; /* implying allocated */
1787 /** Delete all STAs in an interface's STA list. */
1789 dhd_if_del_sta_list(dhd_if_t
*ifp
)
1791 dhd_sta_t
*sta
, *next
;
1792 unsigned long flags
;
1794 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1795 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1796 #pragma GCC diagnostic push
1797 #pragma GCC diagnostic ignored "-Wcast-qual"
1799 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1800 list_del(&sta
->list
);
1801 dhd_sta_free(&ifp
->info
->pub
, sta
);
1803 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1804 #pragma GCC diagnostic pop
1806 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1811 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1813 dhd_if_flush_sta(dhd_if_t
* ifp
)
1817 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1819 dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
)
1821 int idx
, prio
, sta_pool_memsz
;
1823 dhd_sta_pool_t
* sta_pool
;
1824 void * staid_allocator
;
1826 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
1827 ASSERT((dhdp
->staid_allocator
== NULL
) && (dhdp
->sta_pool
== NULL
));
1829 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1830 staid_allocator
= id16_map_init(dhdp
->osh
, max_sta
, 1);
1831 if (staid_allocator
== NULL
) {
1832 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__
));
1836 /* Pre allocate a pool of dhd_sta objects (one extra). */
1837 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
)); /* skip idx 0 */
1838 sta_pool
= (dhd_sta_pool_t
*)MALLOC(dhdp
->osh
, sta_pool_memsz
);
1839 if (sta_pool
== NULL
) {
1840 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__
));
1841 id16_map_fini(dhdp
->osh
, staid_allocator
);
1845 dhdp
->sta_pool
= sta_pool
;
1846 dhdp
->staid_allocator
= staid_allocator
;
1848 /* Initialize all sta(s) for the pre-allocated free pool. */
1849 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1850 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1851 sta
= &sta_pool
[idx
];
1852 sta
->idx
= id16_map_alloc(staid_allocator
);
1853 ASSERT(sta
->idx
<= max_sta
);
1856 /* Now place them into the pre-allocated free pool. */
1857 for (idx
= 1; idx
<= max_sta
; idx
++) {
1858 sta
= &sta_pool
[idx
];
1859 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
1860 sta
->flowid
[prio
] = FLOWID_INVALID
; /* Flow rings do not exist */
1862 dhd_sta_free(dhdp
, sta
);
1868 /** Destruct the pool of dhd_sta_t objects.
1869 * Caller must ensure that no STA objects are currently associated with an if.
1872 dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
)
1874 dhd_sta_pool_t
* sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1878 int sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1879 for (idx
= 1; idx
<= max_sta
; idx
++) {
1880 ASSERT(sta_pool
[idx
].ifp
== DHD_IF_NULL
);
1881 ASSERT(sta_pool
[idx
].idx
== ID16_INVALID
);
1883 MFREE(dhdp
->osh
, dhdp
->sta_pool
, sta_pool_memsz
);
1884 dhdp
->sta_pool
= NULL
;
1887 id16_map_fini(dhdp
->osh
, dhdp
->staid_allocator
);
1888 dhdp
->staid_allocator
= NULL
;
1891 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1893 dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
)
1895 int idx
, prio
, sta_pool_memsz
;
1897 dhd_sta_pool_t
* sta_pool
;
1898 void *staid_allocator
;
1901 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
1905 sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1906 staid_allocator
= dhdp
->staid_allocator
;
1909 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__
));
1913 if (!staid_allocator
) {
1914 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__
));
1918 /* clear free pool */
1919 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1920 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1922 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1923 id16_map_clear(staid_allocator
, max_sta
, 1);
1925 /* Initialize all sta(s) for the pre-allocated free pool. */
1926 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1927 sta
= &sta_pool
[idx
];
1928 sta
->idx
= id16_map_alloc(staid_allocator
);
1929 ASSERT(sta
->idx
<= max_sta
);
1931 /* Now place them into the pre-allocated free pool. */
1932 for (idx
= 1; idx
<= max_sta
; idx
++) {
1933 sta
= &sta_pool
[idx
];
1934 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++) {
1935 sta
->flowid
[prio
] = FLOWID_INVALID
; /* Flow rings do not exist */
1937 dhd_sta_free(dhdp
, sta
);
1941 /** Find STA with MAC address ea in an interface's STA list. */
1943 dhd_find_sta(void *pub
, int ifidx
, void *ea
)
1947 unsigned long flags
;
1950 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1952 return DHD_STA_NULL
;
1954 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1955 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1956 #pragma GCC diagnostic push
1957 #pragma GCC diagnostic ignored "-Wcast-qual"
1959 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
1960 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
1961 DHD_INFO(("%s: Found STA " MACDBG
"\n",
1962 __FUNCTION__
, MAC2STRDBG((char *)ea
)));
1963 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1967 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1968 #pragma GCC diagnostic pop
1970 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1972 return DHD_STA_NULL
;
1975 /** Add STA into the interface's STA list. */
1977 dhd_add_sta(void *pub
, int ifidx
, void *ea
)
1981 unsigned long flags
;
1984 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1986 return DHD_STA_NULL
;
1988 if (!memcmp(ifp
->net
->dev_addr
, ea
, ETHER_ADDR_LEN
)) {
1989 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__
, ea
));
1990 return DHD_STA_NULL
;
1993 sta
= dhd_sta_alloc((dhd_pub_t
*)pub
);
1994 if (sta
== DHD_STA_NULL
) {
1995 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__
));
1996 return DHD_STA_NULL
;
1999 memcpy(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
);
2001 /* link the sta and the dhd interface */
2004 INIT_LIST_HEAD(&sta
->list
);
2006 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2008 list_add_tail(&sta
->list
, &ifp
->sta_list
);
2010 DHD_ERROR(("%s: Adding STA " MACDBG
"\n",
2011 __FUNCTION__
, MAC2STRDBG((char *)ea
)));
2013 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2018 /** Delete all STAs from the interface's STA list. */
2020 dhd_del_all_sta(void *pub
, int ifidx
)
2022 dhd_sta_t
*sta
, *next
;
2024 unsigned long flags
;
2026 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2030 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2031 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2032 #pragma GCC diagnostic push
2033 #pragma GCC diagnostic ignored "-Wcast-qual"
2035 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2037 list_del(&sta
->list
);
2038 dhd_sta_free(&ifp
->info
->pub
, sta
);
2039 #ifdef DHD_L2_FILTER
2040 if (ifp
->parp_enable
) {
2041 /* clear Proxy ARP cache of specific Ethernet Address */
2042 bcm_l2_filter_arp_table_update(((dhd_pub_t
*)pub
)->osh
,
2043 ifp
->phnd_arp_table
, FALSE
,
2044 sta
->ea
.octet
, FALSE
, ((dhd_pub_t
*)pub
)->tickcnt
);
2046 #endif /* DHD_L2_FILTER */
2048 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2049 #pragma GCC diagnostic pop
2051 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2056 /** Delete STA from the interface's STA list. */
2058 dhd_del_sta(void *pub
, int ifidx
, void *ea
)
2060 dhd_sta_t
*sta
, *next
;
2062 unsigned long flags
;
2065 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
2069 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2070 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2071 #pragma GCC diagnostic push
2072 #pragma GCC diagnostic ignored "-Wcast-qual"
2074 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
2075 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
2076 DHD_ERROR(("%s: Deleting STA " MACDBG
"\n",
2077 __FUNCTION__
, MAC2STRDBG(sta
->ea
.octet
)));
2078 list_del(&sta
->list
);
2079 dhd_sta_free(&ifp
->info
->pub
, sta
);
2082 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2083 #pragma GCC diagnostic pop
2085 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2086 #ifdef DHD_L2_FILTER
2087 if (ifp
->parp_enable
) {
2088 /* clear Proxy ARP cache of specific Ethernet Address */
2089 bcm_l2_filter_arp_table_update(((dhd_pub_t
*)pub
)->osh
, ifp
->phnd_arp_table
, FALSE
,
2090 ea
, FALSE
, ((dhd_pub_t
*)pub
)->tickcnt
);
2092 #endif /* DHD_L2_FILTER */
2096 /** Add STA if it doesn't exist. Not reentrant. */
2098 dhd_findadd_sta(void *pub
, int ifidx
, void *ea
)
2102 sta
= dhd_find_sta(pub
, ifidx
, ea
);
2106 sta
= dhd_add_sta(pub
, ifidx
, ea
);
2112 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2113 static struct list_head
*
2114 dhd_sta_list_snapshot(dhd_info_t
*dhd
, dhd_if_t
*ifp
, struct list_head
*snapshot_list
)
2116 unsigned long flags
;
2117 dhd_sta_t
*sta
, *snapshot
;
2119 INIT_LIST_HEAD(snapshot_list
);
2121 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2123 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
2124 /* allocate one and add to snapshot */
2125 snapshot
= (dhd_sta_t
*)MALLOC(dhd
->pub
.osh
, sizeof(dhd_sta_t
));
2126 if (snapshot
== NULL
) {
2127 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__
));
2131 memcpy(snapshot
->ea
.octet
, sta
->ea
.octet
, ETHER_ADDR_LEN
);
2133 INIT_LIST_HEAD(&snapshot
->list
);
2134 list_add_tail(&snapshot
->list
, snapshot_list
);
2137 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2139 return snapshot_list
;
2143 dhd_sta_list_snapshot_free(dhd_info_t
*dhd
, struct list_head
*snapshot_list
)
2145 dhd_sta_t
*sta
, *next
;
2147 list_for_each_entry_safe(sta
, next
, snapshot_list
, list
) {
2148 list_del(&sta
->list
);
2149 MFREE(dhd
->pub
.osh
, sta
, sizeof(dhd_sta_t
));
2152 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2155 static inline void dhd_if_flush_sta(dhd_if_t
* ifp
) { }
2156 static inline void dhd_if_del_sta_list(dhd_if_t
*ifp
) {}
2157 static inline int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
) { return BCME_OK
; }
2158 static inline void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
) {}
2159 static inline void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
) {}
2160 dhd_sta_t
*dhd_findadd_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
2161 dhd_sta_t
*dhd_find_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
2162 void dhd_del_sta(void *pub
, int ifidx
, void *ea
) {}
2163 #endif /* PCIE_FULL_DONGLE */
2167 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \
2170 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2171 * CPU and schedules a tasklet.
2172 * @tasklet: opaque pointer to the tasklet
2175 dhd_tasklet_schedule(void *tasklet
)
2177 tasklet_schedule((struct tasklet_struct
*)tasklet
);
2180 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2181 * @tasklet: tasklet to be scheduled
2182 * @on_cpu: cpu core id
2184 * If the requested cpu is online, then an IPI is sent to this cpu via the
2185 * smp_call_function_single with no wait and the tasklet_schedule function
2186 * will be invoked to schedule the specified tasklet on the requested CPU.
2189 dhd_tasklet_schedule_on(struct tasklet_struct
*tasklet
, int on_cpu
)
2192 smp_call_function_single(on_cpu
,
2193 dhd_tasklet_schedule
, (void *)tasklet
, wait
);
2197 * dhd_work_schedule_on - Executes the passed work in a given CPU
2198 * @work: work to be scheduled
2199 * @on_cpu: cpu core id
2201 * If the requested cpu is online, then an IPI is sent to this cpu via the
2202 * schedule_work_on and the work function
2203 * will be invoked to schedule the specified work on the requested CPU.
2207 dhd_work_schedule_on(struct work_struct
*work
, int on_cpu
)
2209 schedule_work_on(on_cpu
, work
);
2211 #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */
2213 #if defined(DHD_LB_TXC)
2215 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2216 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2217 * freeing the packets placed in the tx_compl workq
2220 dhd_lb_tx_compl_dispatch(dhd_pub_t
*dhdp
)
2222 dhd_info_t
*dhd
= dhdp
->info
;
2223 int curr_cpu
, on_cpu
;
2225 if (dhd
->rx_napi_netdev
== NULL
) {
2226 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2230 DHD_LB_STATS_INCR(dhd
->txc_sched_cnt
);
2232 * If the destination CPU is NOT online or is same as current CPU
2233 * no need to schedule the work
2235 curr_cpu
= get_cpu();
2238 on_cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2240 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2241 dhd_tasklet_schedule(&dhd
->tx_compl_tasklet
);
2243 schedule_work(&dhd
->tx_compl_dispatcher_work
);
2247 static void dhd_tx_compl_dispatcher_fn(struct work_struct
* work
)
2249 struct dhd_info
*dhd
=
2250 container_of(work
, struct dhd_info
, tx_compl_dispatcher_work
);
2254 cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2255 if (!cpu_online(cpu
))
2256 dhd_tasklet_schedule(&dhd
->tx_compl_tasklet
);
2258 dhd_tasklet_schedule_on(&dhd
->tx_compl_tasklet
, cpu
);
2261 #endif /* DHD_LB_TXC */
2263 #if defined(DHD_LB_RXC)
2265 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2266 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2267 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2268 * placed in the rx_compl workq.
2270 * @dhdp: pointer to dhd_pub object
2273 dhd_lb_rx_compl_dispatch(dhd_pub_t
*dhdp
)
2275 dhd_info_t
*dhd
= dhdp
->info
;
2276 int curr_cpu
, on_cpu
;
2278 if (dhd
->rx_napi_netdev
== NULL
) {
2279 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2283 DHD_LB_STATS_INCR(dhd
->rxc_sched_cnt
);
2285 * If the destination CPU is NOT online or is same as current CPU
2286 * no need to schedule the work
2288 curr_cpu
= get_cpu();
2290 on_cpu
= atomic_read(&dhd
->rx_compl_cpu
);
2292 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2293 dhd_tasklet_schedule(&dhd
->rx_compl_tasklet
);
2295 schedule_work(&dhd
->rx_compl_dispatcher_work
);
2299 static void dhd_rx_compl_dispatcher_fn(struct work_struct
* work
)
2301 struct dhd_info
*dhd
=
2302 container_of(work
, struct dhd_info
, rx_compl_dispatcher_work
);
2306 cpu
= atomic_read(&dhd
->rx_compl_cpu
);
2307 if (!cpu_online(cpu
))
2308 dhd_tasklet_schedule(&dhd
->rx_compl_tasklet
);
2310 dhd_tasklet_schedule_on(&dhd
->rx_compl_tasklet
, cpu
);
2314 #endif /* DHD_LB_RXC */
2316 #if defined(DHD_LB_TXP)
2317 static void dhd_tx_dispatcher_work(struct work_struct
* work
)
2319 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2320 #pragma GCC diagnostic push
2321 #pragma GCC diagnostic ignored "-Wcast-qual"
2323 struct dhd_info
*dhd
=
2324 container_of(work
, struct dhd_info
, tx_dispatcher_work
);
2325 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2326 #pragma GCC diagnostic pop
2328 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
2331 static void dhd_tx_dispatcher_fn(dhd_pub_t
*dhdp
)
2335 dhd_info_t
*dhd
= dhdp
->info
;
2338 cpu
= atomic_read(&dhd
->tx_cpu
);
2339 net_tx_cpu
= atomic_read(&dhd
->net_tx_cpu
);
2342 * Now if the NET_TX has pushed the packet in the same
2343 * CPU that is chosen for Tx processing, seperate it out
2344 * i.e run the TX processing tasklet in compl_cpu
2346 if (net_tx_cpu
== cpu
)
2347 cpu
= atomic_read(&dhd
->tx_compl_cpu
);
2349 if (!cpu_online(cpu
)) {
2351 * Ooohh... but the Chosen CPU is not online,
2352 * Do the job in the current CPU itself.
2354 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
2357 * Schedule tx_dispatcher_work to on the cpu which
2358 * in turn will schedule tx_tasklet.
2360 dhd_work_schedule_on(&dhd
->tx_dispatcher_work
, cpu
);
2366 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2367 * on another cpu. The tx_tasklet will take care of actually putting
2368 * the skbs into appropriate flow ring and ringing H2D interrupt
2370 * @dhdp: pointer to dhd_pub object
2373 dhd_lb_tx_dispatch(dhd_pub_t
*dhdp
)
2375 dhd_info_t
*dhd
= dhdp
->info
;
2378 curr_cpu
= get_cpu();
2381 /* Record the CPU in which the TX request from Network stack came */
2382 atomic_set(&dhd
->net_tx_cpu
, curr_cpu
);
2384 /* Schedule the work to dispatch ... */
2385 dhd_tx_dispatcher_fn(dhdp
);
2387 #endif /* DHD_LB_TXP */
2389 #if defined(DHD_LB_RXP)
2391 * dhd_napi_poll - Load balance napi poll function to process received
2392 * packets and send up the network stack using netif_receive_skb()
2394 * @napi: napi object in which context this poll function is invoked
2395 * @budget: number of packets to be processed.
2397 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2398 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2399 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2400 * packet tag and sendup.
2403 dhd_napi_poll(struct napi_struct
*napi
, int budget
)
2406 const int pkt_count
= 1;
2408 struct sk_buff
* skb
;
2409 unsigned long flags
;
2410 struct dhd_info
*dhd
;
2412 struct sk_buff_head rx_process_queue
;
2414 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2415 #pragma GCC diagnostic push
2416 #pragma GCC diagnostic ignored "-Wcast-qual"
2418 dhd
= container_of(napi
, struct dhd_info
, rx_napi_struct
);
2419 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2420 #pragma GCC diagnostic pop
2423 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2424 __FUNCTION__
, skb_queue_len(&dhd
->rx_napi_queue
), budget
));
2425 __skb_queue_head_init(&rx_process_queue
);
2427 /* extract the entire rx_napi_queue into local rx_process_queue */
2428 spin_lock_irqsave(&dhd
->rx_napi_queue
.lock
, flags
);
2429 skb_queue_splice_tail_init(&dhd
->rx_napi_queue
, &rx_process_queue
);
2430 spin_unlock_irqrestore(&dhd
->rx_napi_queue
.lock
, flags
);
2432 while ((skb
= __skb_dequeue(&rx_process_queue
)) != NULL
) {
2433 OSL_PREFETCH(skb
->data
);
2435 ifid
= DHD_PKTTAG_IFID((dhd_pkttag_fr_t
*)PKTTAG(skb
));
2437 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2438 __FUNCTION__
, skb
, ifid
));
2440 dhd_rx_frame(&dhd
->pub
, ifid
, skb
, pkt_count
, chan
);
2444 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd
->pub
, processed
);
2446 DHD_INFO(("%s processed %d\n", __FUNCTION__
, processed
));
2447 napi_complete(napi
);
2453 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2454 * poll list. This function may be invoked via the smp_call_function_single
2455 * from a remote CPU.
2457 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2458 * after the napi_struct is added to the softnet data's poll_list
2460 * @info: pointer to a dhd_info struct
2463 dhd_napi_schedule(void *info
)
2465 dhd_info_t
*dhd
= (dhd_info_t
*)info
;
2467 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2468 __FUNCTION__
, &dhd
->rx_napi_struct
, atomic_read(&dhd
->rx_napi_cpu
)));
2470 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2471 if (napi_schedule_prep(&dhd
->rx_napi_struct
)) {
2472 __napi_schedule(&dhd
->rx_napi_struct
);
2473 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->napi_percpu_run_cnt
);
2474 #ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
2475 raise_softirq(NET_RX_SOFTIRQ
);
2476 #endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
2480 * If the rx_napi_struct was already running, then we let it complete
2481 * processing all its packets. The rx_napi_struct may only run on one
2482 * core at a time, to avoid out-of-order handling.
2487 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2488 * action after placing the dhd's rx_process napi object in the the remote CPU's
2489 * softnet data's poll_list.
2491 * @dhd: dhd_info which has the rx_process napi object
2492 * @on_cpu: desired remote CPU id
2495 dhd_napi_schedule_on(dhd_info_t
*dhd
, int on_cpu
)
2497 int wait
= 0; /* asynchronous IPI */
2498 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2499 __FUNCTION__
, dhd
, &dhd
->rx_napi_struct
, on_cpu
));
2501 if (smp_call_function_single(on_cpu
, dhd_napi_schedule
, dhd
, wait
)) {
2502 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2503 __FUNCTION__
, on_cpu
));
2506 DHD_LB_STATS_INCR(dhd
->napi_sched_cnt
);
2512 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2513 * Why should we do this?
2514 * The candidacy algorithm is run from the call back function
2515 * registered to CPU hotplug notifier. This call back happens from Worker
2516 * context. The dhd_napi_schedule_on is also from worker context.
2517 * Note that both of this can run on two different CPUs at the same time.
2518 * So we can possibly have a window where a given CPUn is being brought
2519 * down from CPUm while we try to run a function on CPUn.
2520 * To prevent this its better have the whole code to execute an SMP
2521 * function under get_online_cpus.
2522 * This function call ensures that hotplug mechanism does not kick-in
2523 * until we are done dealing with online CPUs
2524 * If the hotplug worker is already running, no worries because the
2525 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2527 * The below mentioned code structure is proposed in
2528 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2530 * Q: I need to ensure that a particular cpu is not removed when there is some
2531 * work specific to this cpu is in progress
2533 * According to the documentation calling get_online_cpus is NOT required, if
2534 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2535 * run from Work Queue context we have to call these functions
2537 static void dhd_rx_napi_dispatcher_fn(struct work_struct
* work
)
2539 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2540 #pragma GCC diagnostic push
2541 #pragma GCC diagnostic ignored "-Wcast-qual"
2543 struct dhd_info
*dhd
=
2544 container_of(work
, struct dhd_info
, rx_napi_dispatcher_work
);
2545 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2546 #pragma GCC diagnostic pop
2549 dhd_napi_schedule(dhd
);
2553 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2554 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2555 * the packets enqueued into the rx_napi_queue and sendup.
2556 * The producer's rx packet queue is appended to the rx_napi_queue before
2557 * dispatching the rx_napi_struct.
2560 dhd_lb_rx_napi_dispatch(dhd_pub_t
*dhdp
)
2562 unsigned long flags
;
2563 dhd_info_t
*dhd
= dhdp
->info
;
2566 #ifdef DHD_LB_IRQSET
2568 #endif /* DHD_LB_IRQSET */
2570 if (dhd
->rx_napi_netdev
== NULL
) {
2571 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__
));
2575 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__
,
2576 skb_queue_len(&dhd
->rx_napi_queue
), skb_queue_len(&dhd
->rx_pend_queue
)));
2578 /* append the producer's queue of packets to the napi's rx process queue */
2579 spin_lock_irqsave(&dhd
->rx_napi_queue
.lock
, flags
);
2580 skb_queue_splice_tail_init(&dhd
->rx_pend_queue
, &dhd
->rx_napi_queue
);
2581 spin_unlock_irqrestore(&dhd
->rx_napi_queue
.lock
, flags
);
2584 * If the destination CPU is NOT online or is same as current CPU
2585 * no need to schedule the work
2587 curr_cpu
= get_cpu();
2591 on_cpu
= atomic_read(&dhd
->rx_napi_cpu
);
2592 #ifdef DHD_LB_IRQSET
2593 if (cpumask_and(&cpus
, cpumask_of(curr_cpu
), dhd
->cpumask_primary
) ||
2594 (!cpu_online(on_cpu
))) {
2596 if ((on_cpu
== curr_cpu
) || (!cpu_online(on_cpu
))) {
2597 #endif /* DHD_LB_IRQSET */
2598 DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__
,
2599 curr_cpu
, *cpumask_bits(dhd
->cpumask_primary
)));
2600 dhd_napi_schedule(dhd
);
2602 DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
2603 __FUNCTION__
, curr_cpu
, *cpumask_bits(dhd
->cpumask_primary
)));
2604 dhd_work_schedule_on(&dhd
->rx_napi_dispatcher_work
, on_cpu
);
2610 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2613 dhd_lb_rx_pkt_enqueue(dhd_pub_t
*dhdp
, void *pkt
, int ifidx
)
2615 dhd_info_t
*dhd
= dhdp
->info
;
2617 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__
,
2618 pkt
, ifidx
, skb_queue_len(&dhd
->rx_pend_queue
)));
2619 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t
*)PKTTAG(pkt
), ifidx
);
2620 __skb_queue_tail(&dhd
->rx_pend_queue
, pkt
);
2622 #endif /* DHD_LB_RXP */
2624 #ifdef DHD_LB_IRQSET
2626 dhd_irq_set_affinity(dhd_pub_t
*dhdp
)
2628 unsigned int irq
= (unsigned int)-1;
2632 DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__
));
2637 DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__
));
2641 dhdpcie_get_pcieirq(dhdp
->bus
, &irq
);
2642 err
= irq_set_affinity(irq
, dhdp
->info
->cpumask_primary
);
2644 DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
2645 __FUNCTION__
, *cpumask_bits(dhdp
->info
->cpumask_primary
)));
2647 #endif /* DHD_LB_IRQSET */
2650 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2651 int dhd_bssidx2idx(dhd_pub_t
*dhdp
, uint32 bssidx
)
2654 dhd_info_t
*dhd
= dhdp
->info
;
2657 ASSERT(bssidx
< DHD_MAX_IFS
);
2660 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2661 ifp
= dhd
->iflist
[i
];
2662 if (ifp
&& (ifp
->bssidx
== bssidx
)) {
2663 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2664 ifp
->name
, bssidx
, i
));
2671 static inline int dhd_rxf_enqueue(dhd_pub_t
*dhdp
, void* skb
)
2677 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2681 dhd_os_rxflock(dhdp
);
2682 store_idx
= dhdp
->store_idx
;
2683 sent_idx
= dhdp
->sent_idx
;
2684 if (dhdp
->skbbuf
[store_idx
] != NULL
) {
2685 /* Make sure the previous packets are processed */
2686 dhd_os_rxfunlock(dhdp
);
2687 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2688 skb
, store_idx
, sent_idx
));
2689 /* removed msleep here, should use wait_event_timeout if we
2690 * want to give rx frame thread a chance to run
2692 #if defined(WAIT_DEQUEUE)
2697 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2698 skb
, store_idx
, (store_idx
+ 1) & (MAXSKBPEND
- 1)));
2699 dhdp
->skbbuf
[store_idx
] = skb
;
2700 dhdp
->store_idx
= (store_idx
+ 1) & (MAXSKBPEND
- 1);
2701 dhd_os_rxfunlock(dhdp
);
2706 static inline void* dhd_rxf_dequeue(dhd_pub_t
*dhdp
)
2712 dhd_os_rxflock(dhdp
);
2714 store_idx
= dhdp
->store_idx
;
2715 sent_idx
= dhdp
->sent_idx
;
2716 skb
= dhdp
->skbbuf
[sent_idx
];
2719 dhd_os_rxfunlock(dhdp
);
2720 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2721 store_idx
, sent_idx
));
2725 dhdp
->skbbuf
[sent_idx
] = NULL
;
2726 dhdp
->sent_idx
= (sent_idx
+ 1) & (MAXSKBPEND
- 1);
2728 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2731 dhd_os_rxfunlock(dhdp
);
2736 int dhd_process_cid_mac(dhd_pub_t
*dhdp
, bool prepost
)
2738 if (prepost
) { /* pre process */
2740 dhd_check_module_cid(dhdp
);
2741 dhd_check_module_mac(dhdp
);
2742 dhd_set_macaddr_from_file(dhdp
);
2743 } else { /* post process */
2744 dhd_write_macaddr(&dhdp
->mac
);
2745 dhd_clear_cis(dhdp
);
2751 #ifdef PKT_FILTER_SUPPORT
2752 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2754 _turn_on_arp_filter(dhd_pub_t
*dhd
, int op_mode_param
)
2756 bool _apply
= FALSE
;
2757 /* In case of IBSS mode, apply arp pkt filter */
2758 if (op_mode_param
& DHD_FLAG_IBSS_MODE
) {
2762 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2763 if (op_mode_param
& (DHD_FLAG_P2P_GC_MODE
| DHD_FLAG_P2P_GO_MODE
)) {
2771 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2774 dhd_set_packet_filter(dhd_pub_t
*dhd
)
2778 DHD_TRACE(("%s: enter\n", __FUNCTION__
));
2779 if (dhd_pkt_filter_enable
) {
2780 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
2781 dhd_pktfilter_offload_set(dhd
, dhd
->pktfilter
[i
]);
2787 dhd_enable_packet_filter(int value
, dhd_pub_t
*dhd
)
2791 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__
, value
));
2792 if ((dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) && value
) {
2793 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__
));
2796 /* 1 - Enable packet filter, only allow unicast packet to send up */
2797 /* 0 - Disable packet filter */
2798 if (dhd_pkt_filter_enable
&& (!value
||
2799 (dhd_support_sta_mode(dhd
) && !dhd
->dhcp_in_progress
)))
2801 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
2802 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2803 if (value
&& (i
== DHD_ARP_FILTER_NUM
) &&
2804 !_turn_on_arp_filter(dhd
, dhd
->op_mode
)) {
2805 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2806 "val %d, cnt %d, op_mode 0x%x\n",
2807 value
, i
, dhd
->op_mode
));
2810 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2811 dhd_pktfilter_offload_enable(dhd
, dhd
->pktfilter
[i
],
2812 value
, dhd_master_mode
);
2818 dhd_packet_filter_add_remove(dhd_pub_t
*dhdp
, int add_remove
, int num
)
2820 char *filterp
= NULL
;
2824 case DHD_BROADCAST_FILTER_NUM
:
2825 filterp
= "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
2828 case DHD_MULTICAST4_FILTER_NUM
:
2830 if (FW_SUPPORTED((dhdp
), pf6
)) {
2831 if (dhdp
->pktfilter
[num
] != NULL
) {
2832 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
2833 dhdp
->pktfilter
[num
] = NULL
;
2836 filterp
= DISCARD_IPV4_MCAST
;
2841 filterp
= "102 0 0 0 0xFFFFFF 0x01005E";
2843 case DHD_MULTICAST6_FILTER_NUM
:
2845 if (FW_SUPPORTED((dhdp
), pf6
)) {
2846 if (dhdp
->pktfilter
[num
] != NULL
) {
2847 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
2848 dhdp
->pktfilter
[num
] = NULL
;
2851 filterp
= DISCARD_IPV6_MCAST
;
2856 filterp
= "103 0 0 0 0xFFFF 0x3333";
2858 case DHD_MDNS_FILTER_NUM
:
2859 filterp
= "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
2862 case DHD_ARP_FILTER_NUM
:
2863 filterp
= "105 0 0 12 0xFFFF 0x0806";
2866 case DHD_BROADCAST_ARP_FILTER_NUM
:
2867 filterp
= "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
2868 " 0xFFFFFFFFFFFF0000000000000806";
2877 dhdp
->pktfilter
[num
] = filterp
;
2878 dhd_pktfilter_offload_set(dhdp
, dhdp
->pktfilter
[num
]);
2879 } else { /* Delete filter */
2880 if (dhdp
->pktfilter
[num
]) {
2881 dhd_pktfilter_offload_delete(dhdp
, filter_id
);
2882 dhdp
->pktfilter
[num
] = NULL
;
2888 #endif /* PKT_FILTER_SUPPORT */
2890 static int dhd_set_suspend(int value
, dhd_pub_t
*dhd
)
2892 #ifndef SUPPORT_PM2_ONLY
2893 int power_mode
= PM_MAX
;
2894 #endif /* SUPPORT_PM2_ONLY */
2895 /* wl_pkt_filter_enable_t enable_parm; */
2896 int bcn_li_dtim
= 0; /* Default bcn_li_dtim in resume mode is 0 */
2898 #ifdef DHD_USE_EARLYSUSPEND
2899 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2900 int bcn_timeout
= 0;
2901 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2902 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2903 int roam_time_thresh
= 0; /* (ms) */
2904 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2905 #ifndef ENABLE_FW_ROAM_SUSPEND
2907 #endif /* ENABLE_FW_ROAM_SUSPEND */
2908 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2910 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2911 uint nd_ra_filter
= 0;
2912 #ifdef ENABLE_IPMCAST_FILTER
2913 int ipmcast_l2filter
;
2914 #endif /* ENABLE_IPMCAST_FILTER */
2915 #ifdef CUSTOM_EVENT_PM_WAKE
2916 uint32 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
;
2917 #endif /* CUSTOM_EVENT_PM_WAKE */
2918 #endif /* DHD_USE_EARLYSUSPEND */
2919 #ifdef PASS_ALL_MCAST_PKTS
2920 struct dhd_info
*dhdinfo
;
2923 #endif /* PASS_ALL_MCAST_PKTS */
2924 #ifdef DYNAMIC_SWOOB_DURATION
2925 #ifndef CUSTOM_INTR_WIDTH
2926 #define CUSTOM_INTR_WIDTH 100
2928 #endif /* CUSTOM_INTR_WIDTH */
2929 #endif /* DYNAMIC_SWOOB_DURATION */
2931 #if defined(BCMPCIE)
2933 int dtim_period
= 0;
2934 int bcn_interval
= 0;
2936 #if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2937 bcn_timeout
= CUSTOM_BCN_TIMEOUT_SETTING
;
2939 int bcn_timeout
= CUSTOM_BCN_TIMEOUT_SETTING
;
2940 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
2941 #endif /* OEM_ANDROID && BCMPCIE */
2946 #ifdef PASS_ALL_MCAST_PKTS
2947 dhdinfo
= dhd
->info
;
2948 #endif /* PASS_ALL_MCAST_PKTS */
2950 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2951 __FUNCTION__
, value
, dhd
->in_suspend
));
2953 dhd_suspend_lock(dhd
);
2955 #ifdef CUSTOM_SET_CPUCORE
2956 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__
, value
));
2957 /* set specific cpucore */
2958 dhd_set_cpucore(dhd
, TRUE
);
2959 #endif /* CUSTOM_SET_CPUCORE */
2961 if (value
&& dhd
->in_suspend
) {
2962 #ifdef PKT_FILTER_SUPPORT
2963 dhd
->early_suspended
= 1;
2965 /* Kernel suspended */
2966 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__
));
2968 #ifndef SUPPORT_PM2_ONLY
2969 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
2970 sizeof(power_mode
), TRUE
, 0);
2971 #endif /* SUPPORT_PM2_ONLY */
2973 #ifdef PKT_FILTER_SUPPORT
2974 /* Enable packet filter,
2975 * only allow unicast packet to send up
2977 dhd_enable_packet_filter(1, dhd
);
2979 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd
));
2981 #endif /* PKT_FILTER_SUPPORT */
2983 #ifdef PASS_ALL_MCAST_PKTS
2985 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2986 if (dhdinfo
->iflist
[i
] && dhdinfo
->iflist
[i
]->net
)
2987 ret
= dhd_iovar(dhd
, i
, "allmulti",
2992 DHD_ERROR(("%s allmulti failed %d\n",
2993 __FUNCTION__
, ret
));
2996 #endif /* PASS_ALL_MCAST_PKTS */
2998 /* If DTIM skip is set up as default, force it to wake
2999 * each third DTIM for better power savings. Note that
3000 * one side effect is a chance to miss BC/MC packet.
3003 /* Do not set bcn_li_ditm on WFD mode */
3004 if (dhd
->tdls_mode
) {
3008 #if defined(BCMPCIE)
3009 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
, &dtim_period
,
3011 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3012 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3014 DHD_ERROR(("%s bcn_li_dtim failed %d\n",
3015 __FUNCTION__
, ret
));
3017 if ((bcn_li_dtim
* dtim_period
* bcn_interval
) >=
3018 MIN_DTIM_FOR_ROAM_THRES_EXTEND
) {
3020 * Increase max roaming threshold from 2 secs to 8 secs
3021 * the real roam threshold is MIN(max_roam_threshold,
3025 ret
= dhd_iovar(dhd
, 0, "lpas", (char *)&lpas
, sizeof(lpas
),
3028 DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__
,
3033 * if bcn_to_dly is 1, the real roam threshold is
3034 * MIN(max_roam_threshold, bcn_timeout -1);
3035 * notify link down event after roaming procedure complete
3036 * if we hit bcn_timeout while we are in roaming progress.
3038 ret
= dhd_iovar(dhd
, 0, "bcn_to_dly", (char *)&bcn_to_dly
,
3039 sizeof(bcn_to_dly
), NULL
, 0, TRUE
);
3041 DHD_ERROR(("%s bcn_to_dly failed %d\n",
3042 __FUNCTION__
, ret
));
3044 /* Increase beacon timeout to 6 secs or use bigger one */
3045 bcn_timeout
= max(bcn_timeout
, BCN_TIMEOUT_IN_SUSPEND
);
3046 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3047 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3049 DHD_ERROR(("%s bcn_timeout failed %d\n",
3050 __FUNCTION__
, ret
));
3054 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
);
3055 if (dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3056 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
) < 0)
3057 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__
));
3058 #endif /* OEM_ANDROID && BCMPCIE */
3060 #ifdef DHD_USE_EARLYSUSPEND
3061 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3062 bcn_timeout
= CUSTOM_BCN_TIMEOUT_IN_SUSPEND
;
3063 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3064 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3066 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__
,
3069 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3070 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3071 roam_time_thresh
= CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
;
3072 ret
= dhd_iovar(dhd
, 0, "roam_time_thresh",
3073 (char *)&roam_time_thresh
,
3074 sizeof(roam_time_thresh
), NULL
, 0, TRUE
);
3076 DHD_ERROR(("%s roam_time_thresh failed %d\n",
3077 __FUNCTION__
, ret
));
3079 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3080 #ifndef ENABLE_FW_ROAM_SUSPEND
3081 /* Disable firmware roaming during suspend */
3082 ret
= dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
,
3083 sizeof(roamvar
), NULL
, 0, TRUE
);
3085 DHD_ERROR(("%s roam_off failed %d\n",
3086 __FUNCTION__
, ret
));
3088 #endif /* ENABLE_FW_ROAM_SUSPEND */
3089 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3091 ret
= dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
,
3092 sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
3094 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__
, ret
));
3096 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3097 #ifdef NDO_CONFIG_SUPPORT
3098 if (dhd
->ndo_enable
) {
3099 if (!dhd
->ndo_host_ip_overflow
) {
3100 /* enable ND offload on suspend */
3101 ret
= dhd_ndo_enable(dhd
, TRUE
);
3103 DHD_ERROR(("%s: failed to enable NDO\n",
3107 DHD_INFO(("%s: NDO disabled on suspend due to"
3108 "HW capacity\n", __FUNCTION__
));
3111 #endif /* NDO_CONFIG_SUPPORT */
3113 if (FW_SUPPORTED(dhd
, ndoe
)) {
3115 if (FW_SUPPORTED(dhd
, ndoe
) && !FW_SUPPORTED(dhd
, apf
)) {
3117 /* enable IPv6 RA filter in firmware during suspend */
3119 ret
= dhd_iovar(dhd
, 0, "nd_ra_filter_enable",
3120 (char *)&nd_ra_filter
, sizeof(nd_ra_filter
),
3123 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3126 dhd_os_suppress_logging(dhd
, TRUE
);
3127 #ifdef ENABLE_IPMCAST_FILTER
3128 ipmcast_l2filter
= 1;
3129 ret
= dhd_iovar(dhd
, 0, "ipmcast_l2filter",
3130 (char *)&ipmcast_l2filter
, sizeof(ipmcast_l2filter
),
3133 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret
));
3135 #endif /* ENABLE_IPMCAST_FILTER */
3136 #ifdef DYNAMIC_SWOOB_DURATION
3137 intr_width
= CUSTOM_INTR_WIDTH
;
3138 ret
= dhd_iovar(dhd
, 0, "bus:intr_width", (char *)&intr_width
,
3139 sizeof(intr_width
), NULL
, 0, TRUE
);
3141 DHD_ERROR(("failed to set intr_width (%d)\n", ret
));
3143 #endif /* DYNAMIC_SWOOB_DURATION */
3144 #ifdef CUSTOM_EVENT_PM_WAKE
3145 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
* 4;
3146 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh",
3147 (char *)&pm_awake_thresh
,
3148 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
3150 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3151 __FUNCTION__
, ret
));
3153 #endif /* CUSTOM_EVENT_PM_WAKE */
3154 #endif /* DHD_USE_EARLYSUSPEND */
3156 #ifdef PKT_FILTER_SUPPORT
3157 dhd
->early_suspended
= 0;
3159 /* Kernel resumed */
3160 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__
));
3161 #ifdef DYNAMIC_SWOOB_DURATION
3163 ret
= dhd_iovar(dhd
, 0, "bus:intr_width", (char *)&intr_width
,
3164 sizeof(intr_width
), NULL
, 0, TRUE
);
3166 DHD_ERROR(("failed to set intr_width (%d)\n", ret
));
3168 #endif /* DYNAMIC_SWOOB_DURATION */
3169 #ifndef SUPPORT_PM2_ONLY
3170 power_mode
= PM_FAST
;
3171 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
3172 sizeof(power_mode
), TRUE
, 0);
3173 #endif /* SUPPORT_PM2_ONLY */
3174 #ifdef PKT_FILTER_SUPPORT
3175 /* disable pkt filter */
3176 dhd_enable_packet_filter(0, dhd
);
3178 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd
));
3180 #endif /* PKT_FILTER_SUPPORT */
3181 #ifdef PASS_ALL_MCAST_PKTS
3183 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3184 if (dhdinfo
->iflist
[i
] && dhdinfo
->iflist
[i
]->net
)
3185 ret
= dhd_iovar(dhd
, i
, "allmulti",
3187 sizeof(allmulti
), NULL
,
3190 DHD_ERROR(("%s: allmulti failed:%d\n",
3191 __FUNCTION__
, ret
));
3194 #endif /* PASS_ALL_MCAST_PKTS */
3195 #if defined(BCMPCIE)
3196 /* restore pre-suspend setting */
3197 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3198 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3200 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
3201 __FUNCTION__
, ret
));
3203 ret
= dhd_iovar(dhd
, 0, "lpas", (char *)&lpas
, sizeof(lpas
), NULL
,
3206 DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__
, ret
));
3208 ret
= dhd_iovar(dhd
, 0, "bcn_to_dly", (char *)&bcn_to_dly
,
3209 sizeof(bcn_to_dly
), NULL
, 0, TRUE
);
3211 DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__
, ret
));
3213 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3214 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3216 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3217 __FUNCTION__
, ret
));
3220 /* restore pre-suspend setting for dtim_skip */
3221 ret
= dhd_iovar(dhd
, 0, "bcn_li_dtim", (char *)&bcn_li_dtim
,
3222 sizeof(bcn_li_dtim
), NULL
, 0, TRUE
);
3224 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__
, ret
));
3226 #endif /* OEM_ANDROID && BCMPCIE */
3227 #ifdef DHD_USE_EARLYSUSPEND
3228 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3229 bcn_timeout
= CUSTOM_BCN_TIMEOUT
;
3230 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
,
3231 sizeof(bcn_timeout
), NULL
, 0, TRUE
);
3233 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3234 __FUNCTION__
, ret
));
3236 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3237 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3238 roam_time_thresh
= 2000;
3239 ret
= dhd_iovar(dhd
, 0, "roam_time_thresh",
3240 (char *)&roam_time_thresh
,
3241 sizeof(roam_time_thresh
), NULL
, 0, TRUE
);
3243 DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
3244 __FUNCTION__
, ret
));
3247 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3248 #ifndef ENABLE_FW_ROAM_SUSPEND
3249 roamvar
= dhd_roam_disable
;
3250 ret
= dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
,
3251 sizeof(roamvar
), NULL
, 0, TRUE
);
3253 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__
, ret
));
3255 #endif /* ENABLE_FW_ROAM_SUSPEND */
3256 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3258 ret
= dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
,
3259 sizeof(bcn_li_bcn
), NULL
, 0, TRUE
);
3261 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
3262 __FUNCTION__
, ret
));
3264 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3265 #ifdef NDO_CONFIG_SUPPORT
3266 if (dhd
->ndo_enable
) {
3267 /* Disable ND offload on resume */
3268 ret
= dhd_ndo_enable(dhd
, FALSE
);
3270 DHD_ERROR(("%s: failed to disable NDO\n",
3274 #endif /* NDO_CONFIG_SUPPORT */
3276 if (FW_SUPPORTED(dhd
, ndoe
)) {
3278 if (FW_SUPPORTED(dhd
, ndoe
) && !FW_SUPPORTED(dhd
, apf
)) {
3280 /* disable IPv6 RA filter in firmware during suspend */
3282 ret
= dhd_iovar(dhd
, 0, "nd_ra_filter_enable",
3283 (char *)&nd_ra_filter
, sizeof(nd_ra_filter
),
3286 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3290 dhd_os_suppress_logging(dhd
, FALSE
);
3291 #ifdef ENABLE_IPMCAST_FILTER
3292 ipmcast_l2filter
= 0;
3293 ret
= dhd_iovar(dhd
, 0, "ipmcast_l2filter",
3294 (char *)&ipmcast_l2filter
, sizeof(ipmcast_l2filter
),
3297 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret
));
3299 #endif /* ENABLE_IPMCAST_FILTER */
3300 #ifdef CUSTOM_EVENT_PM_WAKE
3301 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh",
3302 (char *)&pm_awake_thresh
,
3303 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
3305 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3306 __FUNCTION__
, ret
));
3308 #endif /* CUSTOM_EVENT_PM_WAKE */
3309 #endif /* DHD_USE_EARLYSUSPEND */
3310 #ifdef DHD_LB_IRQSET
3311 dhd_irq_set_affinity(dhd
);
3312 #endif /* DHD_LB_IRQSET */
3315 dhd_suspend_unlock(dhd
);
3320 static int dhd_suspend_resume_helper(struct dhd_info
*dhd
, int val
, int force
)
3322 dhd_pub_t
*dhdp
= &dhd
->pub
;
3325 DHD_OS_WAKE_LOCK(dhdp
);
3326 DHD_PERIM_LOCK(dhdp
);
3328 /* Set flag when early suspend was called */
3329 dhdp
->in_suspend
= val
;
3330 if ((force
|| !dhdp
->suspend_disable_flag
) &&
3331 dhd_support_sta_mode(dhdp
))
3333 ret
= dhd_set_suspend(val
, dhdp
);
3336 DHD_PERIM_UNLOCK(dhdp
);
3337 DHD_OS_WAKE_UNLOCK(dhdp
);
3341 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3342 static void dhd_early_suspend(struct early_suspend
*h
)
3344 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
3345 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
3348 dhd_suspend_resume_helper(dhd
, 1, 0);
3351 static void dhd_late_resume(struct early_suspend
*h
)
3353 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
3354 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
3357 dhd_suspend_resume_helper(dhd
, 0, 0);
3359 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3362 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3363 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3365 * dhd_timeout_start(&tmo, usec);
3366 * while (!dhd_timeout_expired(&tmo))
3367 * if (poll_something())
3369 * if (dhd_timeout_expired(&tmo))
3374 dhd_timeout_start(dhd_timeout_t
*tmo
, uint usec
)
3379 tmo
->tick
= jiffies_to_usecs(1);
3383 dhd_timeout_expired(dhd_timeout_t
*tmo
)
3385 /* Does nothing the first call */
3386 if (tmo
->increment
== 0) {
3391 if (tmo
->elapsed
>= tmo
->limit
)
3394 /* Add the delay that's about to take place */
3395 tmo
->elapsed
+= tmo
->increment
;
3397 if ((!CAN_SLEEP()) || tmo
->increment
< tmo
->tick
) {
3398 OSL_DELAY(tmo
->increment
);
3399 tmo
->increment
*= 2;
3400 if (tmo
->increment
> tmo
->tick
)
3401 tmo
->increment
= tmo
->tick
;
3404 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
3405 * context where the exact wakeup time is flexible, it would be good
3406 * to use usleep_range() instead of udelay(). It takes a few advantages
3407 * such as improving responsiveness and reducing power.
3409 OSL_SLEEP(jiffies_to_msecs(1));
3416 dhd_net2idx(dhd_info_t
*dhd
, struct net_device
*net
)
3421 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__
));
3425 while (i
< DHD_MAX_IFS
) {
3426 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->net
&& (dhd
->iflist
[i
]->net
== net
))
3434 struct net_device
* dhd_idx2net(void *pub
, int ifidx
)
3436 struct dhd_pub
*dhd_pub
= (struct dhd_pub
*)pub
;
3437 struct dhd_info
*dhd_info
;
3439 if (!dhd_pub
|| ifidx
< 0 || ifidx
>= DHD_MAX_IFS
)
3441 dhd_info
= dhd_pub
->info
;
3442 if (dhd_info
&& dhd_info
->iflist
[ifidx
])
3443 return dhd_info
->iflist
[ifidx
]->net
;
3448 dhd_ifname2idx(dhd_info_t
*dhd
, char *name
)
3450 int i
= DHD_MAX_IFS
;
3454 if (name
== NULL
|| *name
== '\0')
3458 if (dhd
->iflist
[i
] && !strncmp(dhd
->iflist
[i
]->dngl_name
, name
, IFNAMSIZ
))
3461 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__
, i
, name
));
3463 return i
; /* default - the primary interface */
3467 dhd_ifname(dhd_pub_t
*dhdp
, int ifidx
)
3469 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
3473 if (ifidx
< 0 || ifidx
>= DHD_MAX_IFS
) {
3474 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__
, ifidx
));
3478 if (dhd
->iflist
[ifidx
] == NULL
) {
3479 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__
, ifidx
));
3483 if (dhd
->iflist
[ifidx
]->net
)
3484 return dhd
->iflist
[ifidx
]->net
->name
;
3490 dhd_bssidx2bssid(dhd_pub_t
*dhdp
, int idx
)
3493 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
;
3496 for (i
= 0; i
< DHD_MAX_IFS
; i
++)
3497 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->bssidx
== idx
)
3498 return dhd
->iflist
[i
]->mac_addr
;
3504 _dhd_set_multicast_list(dhd_info_t
*dhd
, int ifidx
)
3506 struct net_device
*dev
;
3507 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3508 struct netdev_hw_addr
*ha
;
3510 struct dev_mc_list
*mclist
;
3512 uint32 allmulti
, cnt
;
3519 #ifdef MCAST_LIST_ACCUMULATION
3521 uint32 cnt_iface
[DHD_MAX_IFS
];
3525 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3526 if (dhd
->iflist
[i
]) {
3527 dev
= dhd
->iflist
[i
]->net
;
3530 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3531 netif_addr_lock_bh(dev
);
3532 #endif /* LINUX >= 2.6.27 */
3533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3534 cnt_iface
[i
] = netdev_mc_count(dev
);
3535 cnt
+= cnt_iface
[i
];
3537 cnt
+= dev
->mc_count
;
3538 #endif /* LINUX >= 2.6.35 */
3539 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3540 netif_addr_unlock_bh(dev
);
3541 #endif /* LINUX >= 2.6.27 */
3543 /* Determine initial value of allmulti flag */
3544 allmulti
|= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
3547 #else /* !MCAST_LIST_ACCUMULATION */
3548 if (!dhd
->iflist
[ifidx
]) {
3549 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__
, ifidx
));
3552 dev
= dhd
->iflist
[ifidx
]->net
;
3555 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3556 netif_addr_lock_bh(dev
);
3557 #endif /* LINUX >= 2.6.27 */
3558 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3559 cnt
= netdev_mc_count(dev
);
3561 cnt
= dev
->mc_count
;
3562 #endif /* LINUX >= 2.6.35 */
3563 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3564 netif_addr_unlock_bh(dev
);
3565 #endif /* LINUX >= 2.6.27 */
3567 /* Determine initial value of allmulti flag */
3568 allmulti
= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
3569 #endif /* MCAST_LIST_ACCUMULATION */
3571 #ifdef PASS_ALL_MCAST_PKTS
3572 #ifdef PKT_FILTER_SUPPORT
3573 if (!dhd
->pub
.early_suspended
)
3574 #endif /* PKT_FILTER_SUPPORT */
3576 #endif /* PASS_ALL_MCAST_PKTS */
3578 /* Send down the multicast list first. */
3580 buflen
= sizeof("mcast_list") + sizeof(cnt
) + (cnt
* ETHER_ADDR_LEN
);
3581 if (!(bufp
= buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
3582 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3583 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
3587 strncpy(bufp
, "mcast_list", buflen
- 1);
3588 bufp
[buflen
- 1] = '\0';
3589 bufp
+= strlen("mcast_list") + 1;
3592 memcpy(bufp
, &cnt
, sizeof(cnt
));
3593 bufp
+= sizeof(cnt
);
3595 #ifdef MCAST_LIST_ACCUMULATION
3596 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3597 if (dhd
->iflist
[i
]) {
3598 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i
));
3599 dev
= dhd
->iflist
[i
]->net
;
3601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3602 netif_addr_lock_bh(dev
);
3603 #endif /* LINUX >= 2.6.27 */
3604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3605 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3606 #pragma GCC diagnostic push
3607 #pragma GCC diagnostic ignored "-Wcast-qual"
3609 netdev_for_each_mc_addr(ha
, dev
) {
3610 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3611 #pragma GCC diagnostic pop
3615 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
3616 bufp
+= ETHER_ADDR_LEN
;
3617 DHD_TRACE(("_dhd_set_multicast_list: cnt "
3619 cnt_iface
[i
], MAC2STRDBG(ha
->addr
)));
3622 #else /* LINUX < 2.6.35 */
3623 for (mclist
= dev
->mc_list
; (mclist
&& (cnt_iface
[i
] > 0));
3624 cnt_iface
[i
]--, mclist
= mclist
->next
) {
3625 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
3626 bufp
+= ETHER_ADDR_LEN
;
3628 #endif /* LINUX >= 2.6.35 */
3629 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3630 netif_addr_unlock_bh(dev
);
3631 #endif /* LINUX >= 2.6.27 */
3634 #else /* !MCAST_LIST_ACCUMULATION */
3635 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3636 netif_addr_lock_bh(dev
);
3637 #endif /* LINUX >= 2.6.27 */
3638 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3639 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3640 #pragma GCC diagnostic push
3641 #pragma GCC diagnostic ignored "-Wcast-qual"
3643 netdev_for_each_mc_addr(ha
, dev
) {
3646 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
3647 bufp
+= ETHER_ADDR_LEN
;
3650 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3651 #pragma GCC diagnostic pop
3653 #else /* LINUX < 2.6.35 */
3654 for (mclist
= dev
->mc_list
; (mclist
&& (cnt
> 0));
3655 cnt
--, mclist
= mclist
->next
) {
3656 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
3657 bufp
+= ETHER_ADDR_LEN
;
3659 #endif /* LINUX >= 2.6.35 */
3660 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3661 netif_addr_unlock_bh(dev
);
3662 #endif /* LINUX >= 2.6.27 */
3663 #endif /* MCAST_LIST_ACCUMULATION */
3665 memset(&ioc
, 0, sizeof(ioc
));
3666 ioc
.cmd
= WLC_SET_VAR
;
3671 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
3673 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3674 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
3675 allmulti
= cnt
? TRUE
: allmulti
;
3678 MFREE(dhd
->pub
.osh
, buf
, buflen
);
3680 /* Now send the allmulti setting. This is based on the setting in the
3681 * net_device flags, but might be modified above to be turned on if we
3682 * were trying to set some addresses and dongle rejected it...
3685 allmulti
= htol32(allmulti
);
3686 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "allmulti", (char *)&allmulti
,
3687 sizeof(allmulti
), NULL
, 0, TRUE
);
3689 DHD_ERROR(("%s: set allmulti %d failed\n",
3690 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
3693 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3695 #ifdef MCAST_LIST_ACCUMULATION
3697 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3698 if (dhd
->iflist
[i
]) {
3699 dev
= dhd
->iflist
[i
]->net
;
3700 allmulti
|= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
3704 allmulti
= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
3705 #endif /* MCAST_LIST_ACCUMULATION */
3707 allmulti
= htol32(allmulti
);
3709 memset(&ioc
, 0, sizeof(ioc
));
3710 ioc
.cmd
= WLC_SET_PROMISC
;
3711 ioc
.buf
= &allmulti
;
3712 ioc
.len
= sizeof(allmulti
);
3715 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
3717 DHD_ERROR(("%s: set promisc %d failed\n",
3718 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
3723 _dhd_set_mac_address(dhd_info_t
*dhd
, int ifidx
, uint8
*addr
)
3727 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "cur_etheraddr", (char *)addr
,
3728 ETHER_ADDR_LEN
, NULL
, 0, TRUE
);
3730 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd
->pub
, ifidx
)));
3732 memcpy(dhd
->iflist
[ifidx
]->net
->dev_addr
, addr
, ETHER_ADDR_LEN
);
3734 memcpy(dhd
->pub
.mac
.octet
, addr
, ETHER_ADDR_LEN
);
3741 extern struct net_device
*ap_net_dev
;
3742 extern tsk_ctl_t ap_eth_ctl
; /* ap netdev heper thread ctl */
3746 /* Get psta/psr configuration configuration */
3747 int dhd_get_psta_mode(dhd_pub_t
*dhdp
)
3749 dhd_info_t
*dhd
= dhdp
->info
;
3750 return (int)dhd
->psta_mode
;
3752 /* Set psta/psr configuration configuration */
3753 int dhd_set_psta_mode(dhd_pub_t
*dhdp
, uint32 val
)
3755 dhd_info_t
*dhd
= dhdp
->info
;
3756 dhd
->psta_mode
= val
;
3759 #endif /* DHD_PSTA */
3761 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
3763 dhd_update_rx_pkt_chainable_state(dhd_pub_t
* dhdp
, uint32 idx
)
3765 dhd_info_t
*dhd
= dhdp
->info
;
3768 ASSERT(idx
< DHD_MAX_IFS
);
3770 ifp
= dhd
->iflist
[idx
];
3773 #ifdef DHD_L2_FILTER
3774 (ifp
->block_ping
) ||
3779 #ifdef DHD_MCAST_REGEN
3780 (ifp
->mcast_regen_bss_enable
) ||
3783 ifp
->rx_pkt_chainable
= FALSE
;
3786 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
3789 /* Get wet configuration configuration */
3790 int dhd_get_wet_mode(dhd_pub_t
*dhdp
)
3792 dhd_info_t
*dhd
= dhdp
->info
;
3793 return (int)dhd
->wet_mode
;
3796 /* Set wet configuration configuration */
3797 int dhd_set_wet_mode(dhd_pub_t
*dhdp
, uint32 val
)
3799 dhd_info_t
*dhd
= dhdp
->info
;
3800 dhd
->wet_mode
= val
;
3801 dhd_update_rx_pkt_chainable_state(dhdp
, 0);
3804 #endif /* DHD_WET */
3806 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3807 int32
dhd_role_to_nl80211_iftype(int32 role
)
3810 case WLC_E_IF_ROLE_STA
:
3811 return NL80211_IFTYPE_STATION
;
3812 case WLC_E_IF_ROLE_AP
:
3813 return NL80211_IFTYPE_AP
;
3814 case WLC_E_IF_ROLE_WDS
:
3815 return NL80211_IFTYPE_WDS
;
3816 case WLC_E_IF_ROLE_P2P_GO
:
3817 return NL80211_IFTYPE_P2P_GO
;
3818 case WLC_E_IF_ROLE_P2P_CLIENT
:
3819 return NL80211_IFTYPE_P2P_CLIENT
;
3820 case WLC_E_IF_ROLE_IBSS
:
3821 case WLC_E_IF_ROLE_NAN
:
3822 return NL80211_IFTYPE_ADHOC
;
3824 return NL80211_IFTYPE_UNSPECIFIED
;
3827 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3830 dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
)
3832 dhd_info_t
*dhd
= handle
;
3833 dhd_if_event_t
*if_event
= event_info
;
3836 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3837 struct wl_if_event_info info
;
3839 struct net_device
*ndev
;
3840 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3843 if (event
!= DHD_WQ_WORK_IF_ADD
) {
3844 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
3849 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
3854 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
3858 dhd_net_if_lock_local(dhd
);
3859 DHD_OS_WAKE_LOCK(&dhd
->pub
);
3860 DHD_PERIM_LOCK(&dhd
->pub
);
3862 ifidx
= if_event
->event
.ifidx
;
3863 bssidx
= if_event
->event
.bssidx
;
3864 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__
, ifidx
));
3866 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3867 if (if_event
->event
.ifidx
> 0) {
3869 bzero(&info
, sizeof(info
));
3871 info
.bssidx
= bssidx
;
3872 info
.role
= if_event
->event
.role
;
3873 strncpy(info
.name
, if_event
->name
, IFNAMSIZ
);
3874 if (is_valid_ether_addr(if_event
->mac
)) {
3875 mac_addr
= if_event
->mac
;
3880 if (wl_cfg80211_post_ifcreate(dhd
->pub
.info
->iflist
[0]->net
,
3881 &info
, mac_addr
, NULL
, true) == NULL
) {
3882 /* Do the post interface create ops */
3883 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
3888 /* This path is for non-android case */
3889 /* The interface name in host and in event msg are same */
3890 /* if name in event msg is used to create dongle if list on host */
3891 ndev
= dhd_allocate_if(&dhd
->pub
, ifidx
, if_event
->name
,
3892 if_event
->mac
, bssidx
, TRUE
, if_event
->name
);
3894 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__
));
3898 DHD_PERIM_UNLOCK(&dhd
->pub
);
3899 ret
= dhd_register_if(&dhd
->pub
, ifidx
, TRUE
);
3900 DHD_PERIM_LOCK(&dhd
->pub
);
3901 if (ret
!= BCME_OK
) {
3902 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__
));
3903 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
3906 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3908 #ifndef PCIE_FULL_DONGLE
3909 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3910 if (FW_SUPPORTED((&dhd
->pub
), ap
) && (if_event
->event
.role
!= WLC_E_IF_ROLE_STA
)) {
3912 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "ap_isolate", (char *)&var_int
, sizeof(var_int
),
3914 if (ret
!= BCME_OK
) {
3915 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__
));
3916 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
3919 #endif /* PCIE_FULL_DONGLE */
3922 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
3924 DHD_PERIM_UNLOCK(&dhd
->pub
);
3925 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3926 dhd_net_if_unlock_local(dhd
);
3930 dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
)
3932 dhd_info_t
*dhd
= handle
;
3934 dhd_if_event_t
*if_event
= event_info
;
3936 if (event
!= DHD_WQ_WORK_IF_DEL
) {
3937 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
3942 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
3947 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
3951 dhd_net_if_lock_local(dhd
);
3952 DHD_OS_WAKE_LOCK(&dhd
->pub
);
3953 DHD_PERIM_LOCK(&dhd
->pub
);
3955 ifidx
= if_event
->event
.ifidx
;
3956 DHD_TRACE(("Removing interface with idx %d\n", ifidx
));
3958 DHD_PERIM_UNLOCK(&dhd
->pub
);
3959 if (!dhd
->pub
.info
->iflist
[ifidx
]) {
3960 /* No matching netdev found */
3961 DHD_ERROR(("Netdev not found! Do nothing.\n"));
3964 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3965 if (if_event
->event
.ifidx
> 0) {
3966 /* Do the post interface del ops */
3967 if (wl_cfg80211_post_ifdel(dhd
->pub
.info
->iflist
[ifidx
]->net
,
3968 true, if_event
->event
.ifidx
) != 0) {
3969 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
3974 /* For non-cfg80211 drivers */
3975 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
3976 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3979 DHD_PERIM_LOCK(&dhd
->pub
);
3980 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
3981 DHD_PERIM_UNLOCK(&dhd
->pub
);
3982 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3983 dhd_net_if_unlock_local(dhd
);
3987 dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
)
3989 dhd_info_t
*dhd
= handle
;
3990 dhd_if_t
*ifp
= event_info
;
3992 if (event
!= DHD_WQ_WORK_SET_MAC
) {
3993 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
3997 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4001 dhd_net_if_lock_local(dhd
);
4002 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4003 DHD_PERIM_LOCK(&dhd
->pub
);
4007 unsigned long flags
;
4009 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4010 in_ap
= (ap_net_dev
!= NULL
);
4011 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4014 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
4021 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4022 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4026 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__
));
4027 ifp
->set_macaddress
= FALSE
;
4028 if (_dhd_set_mac_address(dhd
, ifp
->idx
, ifp
->mac_addr
) == 0)
4029 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__
));
4031 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__
));
4034 DHD_PERIM_UNLOCK(&dhd
->pub
);
4035 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4036 dhd_net_if_unlock_local(dhd
);
4040 dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
)
4042 dhd_info_t
*dhd
= handle
;
4043 int ifidx
= (int)((long int)event_info
);
4044 dhd_if_t
*ifp
= NULL
;
4046 if (event
!= DHD_WQ_WORK_SET_MCAST_LIST
) {
4047 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
4052 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
4056 dhd_net_if_lock_local(dhd
);
4057 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4058 DHD_PERIM_LOCK(&dhd
->pub
);
4060 ifp
= dhd
->iflist
[ifidx
];
4062 if (ifp
== NULL
|| !dhd
->pub
.up
) {
4063 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
4070 unsigned long flags
;
4071 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4072 in_ap
= (ap_net_dev
!= NULL
);
4073 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4076 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
4078 ifp
->set_multicast
= FALSE
;
4086 #ifdef MCAST_LIST_ACCUMULATION
4088 #endif /* MCAST_LIST_ACCUMULATION */
4090 _dhd_set_multicast_list(dhd
, ifidx
);
4091 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__
, ifidx
));
4094 DHD_PERIM_UNLOCK(&dhd
->pub
);
4095 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4096 dhd_net_if_unlock_local(dhd
);
4100 dhd_set_mac_address(struct net_device
*dev
, void *addr
)
4104 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
4105 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
4109 ifidx
= dhd_net2idx(dhd
, dev
);
4110 if (ifidx
== DHD_BAD_IF
)
4113 dhdif
= dhd
->iflist
[ifidx
];
4115 dhd_net_if_lock_local(dhd
);
4116 memcpy(dhdif
->mac_addr
, sa
->sa_data
, ETHER_ADDR_LEN
);
4117 dhdif
->set_macaddress
= TRUE
;
4118 dhd_net_if_unlock_local(dhd
);
4119 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhdif
, DHD_WQ_WORK_SET_MAC
,
4120 dhd_set_mac_addr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4125 dhd_set_multicast_list(struct net_device
*dev
)
4127 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
4130 ifidx
= dhd_net2idx(dhd
, dev
);
4131 if (ifidx
== DHD_BAD_IF
)
4134 dhd
->iflist
[ifidx
]->set_multicast
= TRUE
;
4135 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)((long int)ifidx
),
4136 DHD_WQ_WORK_SET_MCAST_LIST
, dhd_set_mcast_list_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
4139 #ifdef DHD_UCODE_DOWNLOAD
4140 /* Get ucode path */
4142 dhd_get_ucode_path(dhd_pub_t
*dhdp
)
4144 dhd_info_t
*dhd
= dhdp
->info
;
4145 return dhd
->uc_path
;
4147 #endif /* DHD_UCODE_DOWNLOAD */
4149 #ifdef PROP_TXSTATUS
4151 dhd_os_wlfc_block(dhd_pub_t
*pub
)
4153 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
4155 spin_lock_bh(&di
->wlfc_spinlock
);
4160 dhd_os_wlfc_unblock(dhd_pub_t
*pub
)
4162 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
4165 spin_unlock_bh(&di
->wlfc_spinlock
);
4169 #endif /* PROP_TXSTATUS */
4171 /* This routine do not support Packet chain feature, Currently tested for
4174 int dhd_sendup(dhd_pub_t
*dhdp
, int ifidx
, void *p
)
4176 struct sk_buff
*skb
;
4177 void *skbhead
= NULL
;
4178 void *skbprev
= NULL
;
4180 ASSERT(!PKTISCHAINED(p
));
4181 skb
= PKTTONATIVE(dhdp
->osh
, p
);
4183 ifp
= dhdp
->info
->iflist
[ifidx
];
4184 skb
->dev
= ifp
->net
;
4186 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4188 if (in_interrupt()) {
4189 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4190 __FUNCTION__
, __LINE__
);
4193 if (dhdp
->info
->rxthread_enabled
) {
4197 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
4201 /* If the receive is not processed inside an ISR,
4202 * the softirqd must be woken explicitly to service
4203 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4204 * by netif_rx_ni(), but in earlier kernels, we need
4205 * to do it manually.
4207 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4208 __FUNCTION__
, __LINE__
);
4209 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4214 local_irq_save(flags
);
4216 local_irq_restore(flags
);
4217 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4221 if (dhdp
->info
->rxthread_enabled
&& skbhead
)
4222 dhd_sched_rxf(dhdp
, skbhead
);
4228 __dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
4231 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
4232 struct ether_header
*eh
= NULL
;
4233 #if defined(DHD_L2_FILTER)
4234 dhd_if_t
*ifp
= dhd_get_ifp(dhdp
, ifidx
);
4237 /* Reject if down */
4238 if (!dhdp
->up
|| (dhdp
->busstate
== DHD_BUS_DOWN
)) {
4239 /* free the packet here since the caller won't */
4240 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4244 #ifdef PCIE_FULL_DONGLE
4245 if (dhdp
->busstate
== DHD_BUS_SUSPEND
) {
4246 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
4247 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4248 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4251 return NETDEV_TX_BUSY
;
4252 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4254 #endif /* PCIE_FULL_DONGLE */
4256 /* Reject if pktlen > MAX_MTU_SZ */
4257 if (PKTLEN(dhdp
->osh
, pktbuf
) > MAX_MTU_SZ
) {
4258 /* free the packet here since the caller won't */
4259 dhdp
->tx_big_packets
++;
4260 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4264 #ifdef DHD_L2_FILTER
4265 /* if dhcp_unicast is enabled, we need to convert the */
4266 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4267 if (ifp
->dhcp_unicast
) {
4269 uint8
* ehptr
= NULL
;
4271 ret
= bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp
->osh
, pktbuf
, ifidx
, &mac_addr
);
4272 if (ret
== BCME_OK
) {
4273 /* if given mac address having valid entry in sta list
4274 * copy the given mac address, and return with BCME_OK
4276 if (dhd_find_sta(dhdp
, ifidx
, mac_addr
)) {
4277 ehptr
= PKTDATA(dhdp
->osh
, pktbuf
);
4278 bcopy(mac_addr
, ehptr
+ ETHER_DEST_OFFSET
, ETHER_ADDR_LEN
);
4283 if (ifp
->grat_arp
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
4284 if (bcm_l2_filter_gratuitous_arp(dhdp
->osh
, pktbuf
) == BCME_OK
) {
4285 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4290 if (ifp
->parp_enable
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
4291 ret
= dhd_l2_filter_pkt_handle(dhdp
, ifidx
, pktbuf
, TRUE
);
4293 /* Drop the packets if l2 filter has processed it already
4294 * otherwise continue with the normal path
4296 if (ret
== BCME_OK
) {
4297 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4301 #endif /* DHD_L2_FILTER */
4302 /* Update multicast statistic */
4303 if (PKTLEN(dhdp
->osh
, pktbuf
) >= ETHER_HDR_LEN
) {
4304 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
4305 eh
= (struct ether_header
*)pktdata
;
4307 if (ETHER_ISMULTI(eh
->ether_dhost
))
4308 dhdp
->tx_multicast
++;
4309 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_802_1X
) {
4310 #ifdef DHD_LOSSLESS_ROAMING
4311 uint8 prio
= (uint8
)PKTPRIO(pktbuf
);
4313 /* back up 802.1x's priority */
4314 dhdp
->prio_8021x
= prio
;
4315 #endif /* DHD_LOSSLESS_ROAMING */
4316 DBG_EVENT_LOG(dhdp
, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED
);
4317 atomic_inc(&dhd
->pend_8021x_cnt
);
4318 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
4319 wl_handle_wps_states(dhd_idx2net(dhdp
, ifidx
),
4320 pktdata
, PKTLEN(dhdp
->osh
, pktbuf
), TRUE
);
4321 #endif /* WL_CFG80211 && WL_WPS_SYNC */
4322 #if defined(DHD_8021X_DUMP)
4323 dhd_dump_eapol_4way_message(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4324 #endif /* DHD_8021X_DUMP */
4327 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) {
4328 #ifdef DHD_DHCP_DUMP
4329 dhd_dhcp_dump(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4330 #endif /* DHD_DHCP_DUMP */
4331 #ifdef DHD_ICMP_DUMP
4332 dhd_icmp_dump(dhd_ifname(dhdp
, ifidx
), pktdata
, TRUE
);
4333 #endif /* DHD_ICMP_DUMP */
4336 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4341 /* Look into the packet and update the packet priority */
4342 #ifndef PKTPRIO_OVERRIDE
4343 if (PKTPRIO(pktbuf
) == 0)
4344 #endif /* !PKTPRIO_OVERRIDE */
4346 #if defined(QOS_MAP_SET)
4347 pktsetprio_qms(pktbuf
, wl_get_up_table(dhdp
, ifidx
), FALSE
);
4349 pktsetprio(pktbuf
, FALSE
);
4350 #endif /* QOS_MAP_SET */
4352 #ifndef PKTPRIO_OVERRIDE
4354 /* Some protocols like OZMO use priority values from 256..263.
4355 * these are magic values to indicate a specific 802.1d priority.
4356 * make sure that priority field is in range of 0..7
4358 PKTSETPRIO(pktbuf
, PKTPRIO(pktbuf
) & 0x7);
4360 #endif /* !PKTPRIO_OVERRIDE */
4363 #ifdef SUPPORT_SET_TID
4364 dhd_set_tid_based_on_uid(dhdp
, pktbuf
);
4365 #endif /* SUPPORT_SET_TID */
4367 #ifdef PCIE_FULL_DONGLE
4369 * Lkup the per interface hash table, for a matching flowring. If one is not
4370 * available, allocate a unique flowid and add a flowring entry.
4371 * The found or newly created flowid is placed into the pktbuf's tag.
4373 ret
= dhd_flowid_update(dhdp
, ifidx
, dhdp
->flow_prio_map
[(PKTPRIO(pktbuf
))], pktbuf
);
4374 if (ret
!= BCME_OK
) {
4375 PKTCFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
4380 #ifdef PROP_TXSTATUS
4381 if (dhd_wlfc_is_supported(dhdp
)) {
4382 /* store the interface ID */
4383 DHD_PKTTAG_SETIF(PKTTAG(pktbuf
), ifidx
);
4385 /* store destination MAC in the tag as well */
4386 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf
), eh
->ether_dhost
);
4388 /* decide which FIFO this packet belongs to */
4389 if (ETHER_ISMULTI(eh
->ether_dhost
))
4390 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4391 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), AC_COUNT
);
4393 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), WME_PRIO2AC(PKTPRIO(pktbuf
)));
4395 #endif /* PROP_TXSTATUS */
4397 /* If the protocol uses a data header, apply it */
4398 dhd_prot_hdrpush(dhdp
, ifidx
, pktbuf
);
4401 /* Use bus module to send data frame */
4402 #ifdef DYNAMIC_MUMIMO_CONTROL
4403 if (dhdp
->reassoc_mumimo_sw
&&
4404 dhd_check_eapol_4way_message(PKTDATA(dhdp
->osh
, pktbuf
)) == EAPOL_4WAY_M4
) {
4405 dhdp
->reassoc_mumimo_sw
= 0;
4406 DHD_ENABLE_RUNTIME_PM(dhdp
);
4408 #endif /* DYNAMIC_MUMIMO_CONTROL */
4409 #ifdef PROP_TXSTATUS
4411 if (dhd_wlfc_commit_packets(dhdp
, (f_commitpkt_t
)dhd_bus_txdata
,
4412 dhdp
->bus
, pktbuf
, TRUE
) == WLFC_UNSUPPORTED
) {
4413 /* non-proptxstatus way */
4415 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
4417 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
4418 #endif /* BCMPCIE */
4423 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
4425 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
4426 #endif /* BCMPCIE */
4427 #endif /* PROP_TXSTATUS */
4433 dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
4436 unsigned long flags
;
4439 DHD_GENERAL_LOCK(dhdp
, flags
);
4440 ifp
= dhd_get_ifp(dhdp
, ifidx
);
4441 if (!ifp
|| ifp
->del_in_progress
) {
4442 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
4443 __FUNCTION__
, ifp
, ifp
? ifp
->del_in_progress
: 0));
4444 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4445 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4448 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
4449 DHD_ERROR(("%s: returning as busstate=%d\n",
4450 __FUNCTION__
, dhdp
->busstate
));
4451 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4452 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4455 DHD_IF_SET_TX_ACTIVE(ifp
, DHD_TX_SEND_PKT
);
4456 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp
);
4457 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4459 #ifdef DHD_PCIE_RUNTIMEPM
4460 if (dhdpcie_runtime_bus_wake(dhdp
, FALSE
, __builtin_return_address(0))) {
4461 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
4462 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4466 #endif /* DHD_PCIE_RUNTIMEPM */
4468 DHD_GENERAL_LOCK(dhdp
, flags
);
4469 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
4470 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4471 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
4472 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp
);
4473 DHD_IF_CLR_TX_ACTIVE(ifp
, DHD_TX_SEND_PKT
);
4474 dhd_os_tx_completion_wake(dhdp
);
4475 dhd_os_busbusy_wake(dhdp
);
4476 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4477 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
4480 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4482 ret
= __dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
4484 #ifdef DHD_PCIE_RUNTIMEPM
4487 DHD_GENERAL_LOCK(dhdp
, flags
);
4488 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp
);
4489 DHD_IF_CLR_TX_ACTIVE(ifp
, DHD_TX_SEND_PKT
);
4490 dhd_os_tx_completion_wake(dhdp
);
4491 dhd_os_busbusy_wake(dhdp
);
4492 DHD_GENERAL_UNLOCK(dhdp
, flags
);
4496 #if defined(DHD_LB_TXP)
4499 dhd_lb_sendpkt(dhd_info_t
*dhd
, struct net_device
*net
,
4500 int ifidx
, void *skb
)
4502 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->tx_start_percpu_run_cnt
);
4504 /* If the feature is disabled run-time do TX from here */
4505 if (atomic_read(&dhd
->lb_txp_active
) == 0) {
4506 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txp_percpu_run_cnt
);
4507 return __dhd_sendpkt(&dhd
->pub
, ifidx
, skb
);
4510 /* Store the address of net device and interface index in the Packet tag */
4511 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
), net
);
4512 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
), ifidx
);
4514 /* Enqueue the skb into tx_pend_queue */
4515 skb_queue_tail(&dhd
->tx_pend_queue
, skb
);
4517 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__
, skb
, net
));
4519 /* Dispatch the Tx job to be processed by the tx_tasklet */
4520 dhd_lb_tx_dispatch(&dhd
->pub
);
4522 return NETDEV_TX_OK
;
4524 #endif /* DHD_LB_TXP */
4527 dhd_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
4532 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4533 dhd_if_t
*ifp
= NULL
;
4535 unsigned long flags
;
4536 uint8 htsfdlystat_sz
= 0;
4538 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4540 if (dhd_query_bus_erros(&dhd
->pub
)) {
4544 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4545 DHD_BUS_BUSY_SET_IN_TX(&dhd
->pub
);
4546 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4548 #ifdef DHD_PCIE_RUNTIMEPM
4549 if (dhdpcie_runtime_bus_wake(&dhd
->pub
, FALSE
, dhd_start_xmit
)) {
4550 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
4551 /* stop the network queue temporarily until resume done */
4552 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4553 if (!dhdpcie_is_resume_done(&dhd
->pub
)) {
4554 dhd_bus_stop_queue(dhd
->pub
.bus
);
4556 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4557 dhd_os_busbusy_wake(&dhd
->pub
);
4558 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4559 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4562 return NETDEV_TX_BUSY
;
4565 #endif /* DHD_PCIE_RUNTIMEPM */
4567 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4568 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
4569 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4570 __FUNCTION__
, dhd
->pub
.busstate
, dhd
->pub
.dhd_bus_busy_state
));
4571 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4572 #ifdef PCIE_FULL_DONGLE
4573 /* Stop tx queues if suspend is in progress */
4574 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd
->pub
)) {
4575 dhd_bus_stop_queue(dhd
->pub
.bus
);
4577 #endif /* PCIE_FULL_DONGLE */
4578 dhd_os_busbusy_wake(&dhd
->pub
);
4579 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4580 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4583 return NETDEV_TX_BUSY
;
4587 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4588 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4590 #if defined(DHD_HANG_SEND_UP_TEST)
4591 if (dhd
->pub
.req_hang_type
== HANG_REASON_BUS_DOWN
) {
4592 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
4594 #endif /* DHD_HANG_SEND_UP_TEST */
4596 /* Reject if down */
4597 if (dhd
->pub
.hang_was_sent
|| DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd
->pub
)) {
4598 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
4599 __FUNCTION__
, dhd
->pub
.up
, dhd
->pub
.busstate
));
4600 netif_stop_queue(net
);
4601 /* Send Event when bus down detected during data session */
4602 if (dhd
->pub
.up
&& !dhd
->pub
.hang_was_sent
) {
4603 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__
));
4604 dhd
->pub
.hang_reason
= HANG_REASON_BUS_DOWN
;
4605 net_os_send_hang_message(net
);
4607 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4608 dhd_os_busbusy_wake(&dhd
->pub
);
4609 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4610 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4611 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4612 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4615 return NETDEV_TX_BUSY
;
4619 ifp
= DHD_DEV_IFP(net
);
4620 ifidx
= DHD_DEV_IFIDX(net
);
4621 if (!ifp
|| (ifidx
== DHD_BAD_IF
) ||
4622 ifp
->del_in_progress
) {
4623 DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
4624 __FUNCTION__
, ifidx
, ifp
, (ifp
? ifp
->del_in_progress
: 0)));
4625 netif_stop_queue(net
);
4626 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4627 dhd_os_busbusy_wake(&dhd
->pub
);
4628 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4629 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4630 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4631 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4634 return NETDEV_TX_BUSY
;
4638 DHD_IF_SET_TX_ACTIVE(ifp
, DHD_TX_START_XMIT
);
4639 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4641 ASSERT(ifidx
== dhd_net2idx(dhd
, net
));
4642 ASSERT((ifp
!= NULL
) && ((ifidx
< DHD_MAX_IFS
) && (ifp
== dhd
->iflist
[ifidx
])));
4644 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
4646 /* re-align socket buffer if "skb->data" is odd address */
4647 if (((unsigned long)(skb
->data
)) & 0x1) {
4648 unsigned char *data
= skb
->data
;
4649 uint32 length
= skb
->len
;
4650 PKTPUSH(dhd
->pub
.osh
, skb
, 1);
4651 memmove(skb
->data
, data
, length
);
4652 PKTSETLEN(dhd
->pub
.osh
, skb
, length
);
4655 datalen
= PKTLEN(dhd
->pub
.osh
, skb
);
4657 /* Make sure there's enough room for any header */
4658 if (skb_headroom(skb
) < dhd
->pub
.hdrlen
+ htsfdlystat_sz
) {
4659 struct sk_buff
*skb2
;
4661 DHD_INFO(("%s: insufficient headroom\n",
4662 dhd_ifname(&dhd
->pub
, ifidx
)));
4663 dhd
->pub
.tx_realloc
++;
4665 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
4666 skb2
= skb_realloc_headroom(skb
, dhd
->pub
.hdrlen
+ htsfdlystat_sz
);
4669 if ((skb
= skb2
) == NULL
) {
4670 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4671 dhd_ifname(&dhd
->pub
, ifidx
)));
4675 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
4678 /* Convert to packet */
4679 if (!(pktbuf
= PKTFRMNATIVE(dhd
->pub
.osh
, skb
))) {
4680 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4681 dhd_ifname(&dhd
->pub
, ifidx
)));
4682 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
4683 dev_kfree_skb_any(skb
);
4689 /* wet related packet proto manipulation should be done in DHD
4690 since dongle doesn't have complete payload
4692 if (WET_ENABLED(&dhd
->pub
) &&
4693 (dhd_wet_send_proc(dhd
->pub
.wet_info
, pktbuf
, &pktbuf
) < 0)) {
4694 DHD_INFO(("%s:%s: wet send proc failed\n",
4695 __FUNCTION__
, dhd_ifname(&dhd
->pub
, ifidx
)));
4696 PKTFREE(dhd
->pub
.osh
, pktbuf
, FALSE
);
4700 #endif /* DHD_WET */
4703 /* PSR related packet proto manipulation should be done in DHD
4704 * since dongle doesn't have complete payload
4706 if (PSR_ENABLED(&dhd
->pub
) &&
4707 (dhd_psta_proc(&dhd
->pub
, ifidx
, &pktbuf
, TRUE
) < 0)) {
4709 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__
,
4710 dhd_ifname(&dhd
->pub
, ifidx
)));
4712 #endif /* DHD_PSTA */
4714 #ifdef DHDTCPSYNC_FLOOD_BLK
4715 if (dhd_tcpdata_get_flag(&dhd
->pub
, pktbuf
) == FLAG_SYNCACK
) {
4716 ifp
->tsyncack_txed
++;
4718 #endif /* DHDTCPSYNC_FLOOD_BLK */
4720 #ifdef DHDTCPACK_SUPPRESS
4721 if (dhd
->pub
.tcpack_sup_mode
== TCPACK_SUP_HOLD
) {
4722 /* If this packet has been hold or got freed, just return */
4723 if (dhd_tcpack_hold(&dhd
->pub
, pktbuf
, ifidx
)) {
4728 /* If this packet has replaced another packet and got freed, just return */
4729 if (dhd_tcpack_suppress(&dhd
->pub
, pktbuf
)) {
4734 #endif /* DHDTCPACK_SUPPRESS */
4737 * If Load Balance is enabled queue the packet
4738 * else send directly from here.
4740 #if defined(DHD_LB_TXP)
4741 ret
= dhd_lb_sendpkt(dhd
, net
, ifidx
, pktbuf
);
4743 ret
= __dhd_sendpkt(&dhd
->pub
, ifidx
, pktbuf
);
4748 ifp
->stats
.tx_dropped
++;
4749 dhd
->pub
.tx_dropped
++;
4751 #ifdef PROP_TXSTATUS
4752 /* tx_packets counter can counted only when wlfc is disabled */
4753 if (!dhd_wlfc_is_supported(&dhd
->pub
))
4756 dhd
->pub
.tx_packets
++;
4757 ifp
->stats
.tx_packets
++;
4758 ifp
->stats
.tx_bytes
+= datalen
;
4762 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
4763 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd
->pub
);
4764 DHD_IF_CLR_TX_ACTIVE(ifp
, DHD_TX_START_XMIT
);
4765 dhd_os_tx_completion_wake(&dhd
->pub
);
4766 dhd_os_busbusy_wake(&dhd
->pub
);
4767 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
4768 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), lock_taken
);
4769 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4770 /* Return ok: we always eat the packet */
4771 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4774 return NETDEV_TX_OK
;
4778 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4779 void dhd_rx_wq_wakeup(struct work_struct
*ptr
)
4781 struct dhd_rx_tx_work
*work
;
4782 struct dhd_pub
* pub
;
4784 work
= container_of(ptr
, struct dhd_rx_tx_work
, work
);
4788 DHD_RPM(("%s: ENTER. \n", __FUNCTION__
));
4790 if (atomic_read(&pub
->block_bus
) || pub
->busstate
== DHD_BUS_DOWN
) {
4794 DHD_OS_WAKE_LOCK(pub
);
4795 if (pm_runtime_get_sync(dhd_bus_to_dev(pub
->bus
)) >= 0) {
4797 // do nothing but wakeup the bus.
4798 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub
->bus
));
4799 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub
->bus
));
4801 DHD_OS_WAKE_UNLOCK(pub
);
4805 void dhd_start_xmit_wq_adapter(struct work_struct
*ptr
)
4807 struct dhd_rx_tx_work
*work
;
4810 struct dhd_bus
* bus
;
4812 work
= container_of(ptr
, struct dhd_rx_tx_work
, work
);
4814 dhd
= DHD_DEV_INFO(work
->net
);
4818 if (atomic_read(&dhd
->pub
.block_bus
)) {
4819 kfree_skb(work
->skb
);
4821 dhd_netif_start_queue(bus
);
4825 if (pm_runtime_get_sync(dhd_bus_to_dev(bus
)) >= 0) {
4826 ret
= dhd_start_xmit(work
->skb
, work
->net
);
4827 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
4828 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
4831 dhd_netif_start_queue(bus
);
4834 netdev_err(work
->net
,
4835 "error: dhd_start_xmit():%d\n", ret
);
4839 dhd_start_xmit_wrapper(struct sk_buff
*skb
, struct net_device
*net
)
4841 struct dhd_rx_tx_work
*start_xmit_work
;
4843 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4845 if (dhd
->pub
.busstate
== DHD_BUS_SUSPEND
) {
4846 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__
));
4848 dhd_netif_stop_queue(dhd
->pub
.bus
);
4850 start_xmit_work
= (struct dhd_rx_tx_work
*)
4851 kmalloc(sizeof(*start_xmit_work
), GFP_ATOMIC
);
4853 if (!start_xmit_work
) {
4855 "error: failed to alloc start_xmit_work\n");
4860 INIT_WORK(&start_xmit_work
->work
, dhd_start_xmit_wq_adapter
);
4861 start_xmit_work
->skb
= skb
;
4862 start_xmit_work
->net
= net
;
4863 queue_work(dhd
->tx_wq
, &start_xmit_work
->work
);
4864 ret
= NET_XMIT_SUCCESS
;
4866 } else if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
4867 ret
= dhd_start_xmit(skb
, net
);
4869 /* when bus is down */
4877 dhd_bus_wakeup_work(dhd_pub_t
*dhdp
)
4879 struct dhd_rx_tx_work
*rx_work
;
4880 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
4882 rx_work
= kmalloc(sizeof(*rx_work
), GFP_ATOMIC
);
4884 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__
));
4888 INIT_WORK(&rx_work
->work
, dhd_rx_wq_wakeup
);
4889 rx_work
->pub
= dhdp
;
4890 queue_work(dhd
->rx_wq
, &rx_work
->work
);
4893 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4895 dhd_txflowcontrol(dhd_pub_t
*dhdp
, int ifidx
, bool state
)
4897 struct net_device
*net
;
4898 dhd_info_t
*dhd
= dhdp
->info
;
4901 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4905 #ifdef DHD_LOSSLESS_ROAMING
4906 /* block flowcontrol during roaming */
4907 if ((dhdp
->dequeue_prec_map
== 1 << PRIO_8021D_NC
) && state
== ON
) {
4912 if (ifidx
== ALL_INTERFACES
) {
4913 /* Flow control on all active interfaces */
4914 dhdp
->txoff
= state
;
4915 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
4916 if (dhd
->iflist
[i
]) {
4917 net
= dhd
->iflist
[i
]->net
;
4919 netif_stop_queue(net
);
4921 netif_wake_queue(net
);
4925 if (dhd
->iflist
[ifidx
]) {
4926 net
= dhd
->iflist
[ifidx
]->net
;
4928 netif_stop_queue(net
);
4930 netif_wake_queue(net
);
4941 static const PKTTYPE_INFO packet_type_info
[] =
4943 { ETHER_TYPE_IP
, "IP" },
4944 { ETHER_TYPE_ARP
, "ARP" },
4945 { ETHER_TYPE_BRCM
, "BRCM" },
4946 { ETHER_TYPE_802_1X
, "802.1X" },
4948 { ETHER_TYPE_WAI
, "WAPI" },
4949 #endif /* BCMWAPI_WAI */
4953 static const char *_get_packet_type_str(uint16 type
)
4956 int n
= sizeof(packet_type_info
)/sizeof(packet_type_info
[1]) - 1;
4958 for (i
= 0; i
< n
; i
++) {
4959 if (packet_type_info
[i
].type
== type
)
4960 return packet_type_info
[i
].str
;
4963 return packet_type_info
[n
].str
;
4965 #endif /* DHD_RX_DUMP */
4967 #ifdef DHD_MCAST_REGEN
4969 * Description: This function is called to do the reverse translation
4971 * Input eh - pointer to the ethernet header
4974 dhd_mcast_reverse_translation(struct ether_header
*eh
)
4979 iph
= (uint8
*)eh
+ ETHER_HDR_LEN
;
4980 dest_ip
= ntoh32(*((uint32
*)(iph
+ IPV4_DEST_IP_OFFSET
)));
4982 /* Only IP packets are handled */
4983 if (eh
->ether_type
!= hton16(ETHER_TYPE_IP
))
4986 /* Non-IPv4 multicast packets are not handled */
4987 if (IP_VER(iph
) != IP_VER_4
)
4991 * The packet has a multicast IP and unicast MAC. That means
4992 * we have to do the reverse translation
4994 if (IPV4_ISMULTI(dest_ip
) && !ETHER_ISMULTI(&eh
->ether_dhost
)) {
4995 ETHER_FILL_MCAST_ADDR_FROM_IP(eh
->ether_dhost
, dest_ip
);
5001 #endif /* MCAST_REGEN */
5003 #ifdef SHOW_LOGTRACE
5005 dhd_netif_rx_ni(struct sk_buff
* skb
)
5007 /* Do not call netif_recieve_skb as this workqueue scheduler is
5008 * not from NAPI Also as we are not in INTR context, do not call
5009 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
5010 * does netif_rx, disables irq, raise NET_IF_RX softirq and
5011 * enables interrupts back
5013 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5018 local_irq_save(flags
);
5020 local_irq_restore(flags
);
5021 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5025 dhd_event_logtrace_pkt_process(dhd_pub_t
*dhdp
, struct sk_buff
* skb
)
5027 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5030 bcm_event_msg_u_t evu
;
5032 void *pktdata
= NULL
;
5033 bcm_event_t
*pvt_data
;
5036 DHD_TRACE(("%s:Enter\n", __FUNCTION__
));
5038 /* In dhd_rx_frame, header is stripped using skb_pull
5039 * of size ETH_HLEN, so adjust pktlen accordingly
5041 pktlen
= skb
->len
+ ETH_HLEN
;
5043 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5044 pktdata
= (void *)skb_mac_header(skb
);
5046 pktdata
= (void *)skb
->mac
.raw
;
5047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5049 ret
= wl_host_event_get_data(pktdata
, pktlen
, &evu
);
5051 if (ret
!= BCME_OK
) {
5052 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5053 __FUNCTION__
, ret
));
5057 datalen
= ntoh32(evu
.event
.datalen
);
5059 pvt_data
= (bcm_event_t
*)pktdata
;
5060 data
= &pvt_data
[1];
5062 dhd_dbg_trace_evnt_handler(dhdp
, data
, &dhd
->event_data
, datalen
);
5068 #define DHD_EVENT_LOGTRACE_BOUND 12
5069 #define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 1
5072 dhd_event_logtrace_process(struct work_struct
* work
)
5074 /* Ignore compiler warnings due to -Werror=cast-qual */
5075 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5076 #pragma GCC diagnostic push
5077 #pragma GCC diagnostic ignored "-Wcast-qual"
5079 struct delayed_work
*dw
= to_delayed_work(work
);
5080 struct dhd_info
*dhd
=
5081 container_of(dw
, struct dhd_info
, event_log_dispatcher_work
);
5082 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5083 #pragma GCC diagnostic pop
5087 struct sk_buff
*skb
;
5092 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
5099 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__
));
5103 qlen
= skb_queue_len(&dhd
->evt_trace_queue
);
5104 process_len
= MIN(qlen
, DHD_EVENT_LOGTRACE_BOUND
);
5106 /* Run while loop till bound is reached or skb queue is empty */
5107 while (process_len
--) {
5109 skb
= skb_dequeue(&dhd
->evt_trace_queue
);
5111 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
5115 BCM_REFERENCE(ifid
);
5116 #ifdef PCIE_FULL_DONGLE
5117 /* Check if pkt is from INFO ring or WLC_E_TRACE */
5118 ifid
= DHD_PKTTAG_IFID((dhd_pkttag_fr_t
*)PKTTAG(skb
));
5119 if (ifid
== DHD_DUMMY_INFO_IF
) {
5120 /* Process logtrace from info rings */
5121 dhd_event_logtrace_infobuf_pkt_process(dhdp
, skb
, &dhd
->event_data
);
5123 #endif /* PCIE_FULL_DONGLE */
5125 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
5126 dhd_event_logtrace_pkt_process(dhdp
, skb
);
5129 /* Send packet up if logtrace_pkt_sendup is TRUE */
5130 if (dhdp
->logtrace_pkt_sendup
) {
5131 #ifdef DHD_USE_STATIC_CTRLBUF
5132 /* If bufs are allocated via static buf pool
5133 * and logtrace_pkt_sendup enabled, make a copy,
5134 * free the local one and send the copy up.
5136 void *npkt
= PKTDUP(dhdp
->osh
, skb
);
5137 /* Clone event and send it up */
5138 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5142 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
5143 /* Packet is already freed, go to next packet */
5146 #endif /* DHD_USE_STATIC_CTRLBUF */
5147 #ifdef PCIE_FULL_DONGLE
5148 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
5149 * to send skb to network layer, assign skb->dev with
5150 * Primary interface n/w device
5152 if (ifid
== DHD_DUMMY_INFO_IF
) {
5153 skb
= PKTTONATIVE(dhdp
->osh
, skb
);
5154 skb
->dev
= dhd
->iflist
[0]->net
;
5156 #endif /* PCIE_FULL_DONGLE */
5158 dhd_netif_rx_ni(skb
);
5160 /* Don't send up. Free up the packet. */
5161 #ifdef DHD_USE_STATIC_CTRLBUF
5162 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5164 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5165 #endif /* DHD_USE_STATIC_CTRLBUF */
5169 /* Reschedule the workqueue if more packets to be processed */
5170 if (qlen
>= DHD_EVENT_LOGTRACE_BOUND
) {
5171 schedule_delayed_work(&dhd
->event_log_dispatcher_work
,
5172 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS
));
5177 dhd_event_logtrace_enqueue(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
5179 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5181 #ifdef PCIE_FULL_DONGLE
5182 /* Add ifidx in the PKTTAG */
5183 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t
*)PKTTAG(pktbuf
), ifidx
);
5184 #endif /* PCIE_FULL_DONGLE */
5185 skb_queue_tail(&dhd
->evt_trace_queue
, pktbuf
);
5187 schedule_delayed_work(&dhd
->event_log_dispatcher_work
, 0);
5191 dhd_event_logtrace_flush_queue(dhd_pub_t
*dhdp
)
5193 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5194 struct sk_buff
*skb
;
5196 while ((skb
= skb_dequeue(&dhd
->evt_trace_queue
)) != NULL
) {
5197 #ifdef DHD_USE_STATIC_CTRLBUF
5198 PKTFREE_STATIC(dhdp
->osh
, skb
, FALSE
);
5200 PKTFREE(dhdp
->osh
, skb
, FALSE
);
5201 #endif /* DHD_USE_STATIC_CTRLBUF */
5204 #endif /* SHOW_LOGTRACE */
5206 /** Called when a frame is received by the dongle on interface 'ifidx' */
5208 dhd_rx_frame(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
, int numpkt
, uint8 chan
)
5210 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5211 struct sk_buff
*skb
;
5214 void *data
, *pnext
= NULL
;
5217 wl_event_msg_t event
;
5220 void *skbhead
= NULL
;
5221 void *skbprev
= NULL
;
5223 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5224 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5225 unsigned char *dump_data
;
5226 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5227 #ifdef DHD_MCAST_REGEN
5228 uint8 interface_role
;
5229 if_flow_lkup_t
*if_flow_lkup
;
5230 unsigned long flags
;
5232 #ifdef DHD_WAKE_STATUS
5234 wake_counts_t
*wcp
= NULL
;
5235 #endif /* DHD_WAKE_STATUS */
5237 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5239 for (i
= 0; pktbuf
&& i
< numpkt
; i
++, pktbuf
= pnext
) {
5240 struct ether_header
*eh
;
5242 pnext
= PKTNEXT(dhdp
->osh
, pktbuf
);
5243 PKTSETNEXT(dhdp
->osh
, pktbuf
, NULL
);
5245 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5246 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
5247 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5249 if (ifidx
== DHD_DUMMY_INFO_IF
) {
5250 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5251 * context in case of PCIe FD, in case of other bus this will be from
5252 * DPC context. If we get bunch of events from Dongle then printing all
5253 * of them from Tasklet/DPC context that too in data path is costly.
5254 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5255 * events with type WLC_E_TRACE.
5256 * We'll print this console logs from the WorkQueue context by enqueing SKB
5257 * here and Dequeuing will be done in WorkQueue and will be freed only if
5258 * logtrace_pkt_sendup is TRUE
5260 #ifdef SHOW_LOGTRACE
5261 dhd_event_logtrace_enqueue(dhdp
, ifidx
, pktbuf
);
5262 #else /* !SHOW_LOGTRACE */
5263 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
5264 * free the PKT here itself
5266 #ifdef DHD_USE_STATIC_CTRLBUF
5267 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5269 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5270 #endif /* DHD_USE_STATIC_CTRLBUF */
5271 #endif /* SHOW_LOGTRACE */
5274 #ifdef DHD_WAKE_STATUS
5275 pkt_wake
= dhd_bus_get_bus_wake(dhdp
);
5276 wcp
= dhd_bus_get_wakecount(dhdp
);
5278 /* If wakeinfo count buffer is null do not update wake count values */
5281 #endif /* DHD_WAKE_STATUS */
5283 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
5285 if (ifidx
>= DHD_MAX_IFS
) {
5286 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
5287 __FUNCTION__
, ifidx
));
5288 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_BRCM
) {
5289 #ifdef DHD_USE_STATIC_CTRLBUF
5290 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5292 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5293 #endif /* DHD_USE_STATIC_CTRLBUF */
5295 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5300 ifp
= dhd
->iflist
[ifidx
];
5302 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5304 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_BRCM
) {
5305 #ifdef DHD_USE_STATIC_CTRLBUF
5306 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5308 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5309 #endif /* DHD_USE_STATIC_CTRLBUF */
5311 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5316 /* Dropping only data packets before registering net device to avoid kernel panic */
5317 #ifndef PROP_TXSTATUS_VSDB
5318 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
) &&
5319 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
5321 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
|| !dhd
->pub
.up
) &&
5322 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
5323 #endif /* PROP_TXSTATUS_VSDB */
5325 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5327 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5331 #ifdef PROP_TXSTATUS
5332 if (dhd_wlfc_is_header_only_pkt(dhdp
, pktbuf
)) {
5333 /* WLFC may send header only packet when
5334 there is an urgent message but no packet to
5337 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5341 #ifdef DHD_L2_FILTER
5342 /* If block_ping is enabled drop the ping packet */
5343 if (ifp
->block_ping
) {
5344 if (bcm_l2_filter_block_ping(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5345 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5349 if (ifp
->grat_arp
&& DHD_IF_ROLE_STA(dhdp
, ifidx
)) {
5350 if (bcm_l2_filter_gratuitous_arp(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5351 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5355 if (ifp
->parp_enable
&& DHD_IF_ROLE_AP(dhdp
, ifidx
)) {
5356 int ret
= dhd_l2_filter_pkt_handle(dhdp
, ifidx
, pktbuf
, FALSE
);
5358 /* Drop the packets if l2 filter has processed it already
5359 * otherwise continue with the normal path
5361 if (ret
== BCME_OK
) {
5362 PKTCFREE(dhdp
->osh
, pktbuf
, TRUE
);
5366 if (ifp
->block_tdls
) {
5367 if (bcm_l2_filter_block_tdls(dhdp
->osh
, pktbuf
) == BCME_OK
) {
5368 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
5372 #endif /* DHD_L2_FILTER */
5374 #ifdef DHD_MCAST_REGEN
5375 DHD_FLOWID_LOCK(dhdp
->flowid_lock
, flags
);
5376 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
5377 ASSERT(if_flow_lkup
);
5379 interface_role
= if_flow_lkup
[ifidx
].role
;
5380 DHD_FLOWID_UNLOCK(dhdp
->flowid_lock
, flags
);
5382 if (ifp
->mcast_regen_bss_enable
&& (interface_role
!= WLC_E_IF_ROLE_WDS
) &&
5383 !DHD_IF_ROLE_AP(dhdp
, ifidx
) &&
5384 ETHER_ISUCAST(eh
->ether_dhost
)) {
5385 if (dhd_mcast_reverse_translation(eh
) == BCME_OK
) {
5387 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5388 if ((dhd_get_psta_mode(dhdp
) == DHD_MODE_PSTA
) ||
5389 (dhd_get_psta_mode(dhdp
) == DHD_MODE_PSR
)) {
5391 /* Let the primary in PSTA interface handle this
5392 * frame after unicast to Multicast conversion
5394 ifp
= dhd_get_ifp(dhdp
, 0);
5401 #endif /* MCAST_REGEN */
5403 #ifdef DHDTCPSYNC_FLOOD_BLK
5404 if (dhd_tcpdata_get_flag(dhdp
, pktbuf
) == FLAG_SYNC
) {
5408 u64 curr_time
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
5410 delta_sync
= ifp
->tsync_rcvd
- ifp
->tsyncack_txed
;
5411 delta_sec
= curr_time
- ifp
->last_sync
;
5412 if (delta_sec
> 1) {
5413 sync_per_sec
= delta_sync
/delta_sec
;
5414 if (sync_per_sec
> TCP_SYNC_FLOOD_LIMIT
) {
5415 schedule_work(&ifp
->blk_tsfl_work
);
5416 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
5417 "sync recvied %d pkt/sec \n",
5418 ifidx
, sync_per_sec
));
5420 dhd_reset_tcpsync_info_by_ifp(ifp
);
5424 #endif /* DHDTCPSYNC_FLOOD_BLK */
5426 #ifdef DHDTCPACK_SUPPRESS
5427 dhd_tcpdata_info_get(dhdp
, pktbuf
);
5429 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
5432 skb
->dev
= ifp
->net
;
5434 /* wet related packet proto manipulation should be done in DHD
5435 * since dongle doesn't have complete payload
5437 if (WET_ENABLED(&dhd
->pub
) && (dhd_wet_recv_proc(dhd
->pub
.wet_info
,
5439 DHD_INFO(("%s:%s: wet recv proc failed\n",
5440 __FUNCTION__
, dhd_ifname(dhdp
, ifidx
)));
5442 #endif /* DHD_WET */
5445 if (PSR_ENABLED(dhdp
) &&
5446 (dhd_psta_proc(dhdp
, ifidx
, &pktbuf
, FALSE
) < 0)) {
5447 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__
,
5448 dhd_ifname(dhdp
, ifidx
)));
5450 #endif /* DHD_PSTA */
5452 #ifdef PCIE_FULL_DONGLE
5453 if ((DHD_IF_ROLE_AP(dhdp
, ifidx
) || DHD_IF_ROLE_P2PGO(dhdp
, ifidx
)) &&
5454 (!ifp
->ap_isolate
)) {
5455 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
5456 if (ETHER_ISUCAST(eh
->ether_dhost
)) {
5457 if (dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_dhost
)) {
5458 dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
5462 void *npktbuf
= NULL
;
5463 if ((ntoh16(eh
->ether_type
) != ETHER_TYPE_IAPP_L2_UPDATE
) &&
5464 (npktbuf
= PKTDUP(dhdp
->osh
, pktbuf
)) != NULL
) {
5465 dhd_sendpkt(dhdp
, ifidx
, npktbuf
);
5469 #endif /* PCIE_FULL_DONGLE */
5470 #ifdef DYNAMIC_MUMIMO_CONTROL
5471 if (dhdp
->reassoc_mumimo_sw
&& dhdp
->murx_block_eapol
&&
5472 dhd_check_eapol_4way_message((void *)(skb
->data
)) == EAPOL_4WAY_M1
) {
5473 DHD_ERROR(("%s: Reassoc is in progress..."
5474 " drop EAPOL M1 frame\n", __FUNCTION__
));
5475 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5478 #endif /* DYNAMIC_MUMIMO_CONTROL */
5480 /* Get the protocol, maintain skb around eth_type_trans()
5481 * The main reason for this hack is for the limitation of
5482 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5483 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5484 * coping of the packet coming from the network stack to add
5485 * BDC, Hardware header etc, during network interface registration
5486 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5487 * for BDC, Hardware header etc. and not just the ETH_HLEN
5492 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5493 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5494 dump_data
= skb
->data
;
5495 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5497 protocol
= (skb
->data
[12] << 8) | skb
->data
[13];
5498 if (protocol
== ETHER_TYPE_802_1X
) {
5499 DBG_EVENT_LOG(dhdp
, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED
);
5500 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
5501 wl_handle_wps_states(ifp
->net
, dump_data
, len
, FALSE
);
5502 #endif /* WL_CFG80211 && WL_WPS_SYNC */
5503 #ifdef DHD_8021X_DUMP
5504 dhd_dump_eapol_4way_message(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5505 #endif /* DHD_8021X_DUMP */
5508 if (protocol
!= ETHER_TYPE_BRCM
&& protocol
== ETHER_TYPE_IP
) {
5509 #ifdef DHD_DHCP_DUMP
5510 dhd_dhcp_dump(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5511 #endif /* DHD_DHCP_DUMP */
5512 #ifdef DHD_ICMP_DUMP
5513 dhd_icmp_dump(dhd_ifname(dhdp
, ifidx
), dump_data
, FALSE
);
5514 #endif /* DHD_ICMP_DUMP */
5517 DHD_ERROR(("RX DUMP[%s] - %s\n",
5518 dhd_ifname(dhdp
, ifidx
), _get_packet_type_str(protocol
)));
5519 if (protocol
!= ETHER_TYPE_BRCM
) {
5520 if (dump_data
[0] == 0xFF) {
5521 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__
));
5523 if ((dump_data
[12] == 8) &&
5524 (dump_data
[13] == 6)) {
5525 DHD_ERROR(("%s: ARP %d\n",
5526 __FUNCTION__
, dump_data
[0x15]));
5528 } else if (dump_data
[0] & 1) {
5529 DHD_ERROR(("%s: MULTICAST: " MACDBG
"\n",
5530 __FUNCTION__
, MAC2STRDBG(dump_data
)));
5532 #ifdef DHD_RX_FULL_DUMP
5535 for (k
= 0; k
< skb
->len
; k
++) {
5536 DHD_ERROR(("%02X ", dump_data
[k
]));
5542 #endif /* DHD_RX_FULL_DUMP */
5544 #endif /* DHD_RX_DUMP */
5546 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5548 prhex("[wakepkt_dump]", (char*)dump_data
, MIN(len
, 32));
5550 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5552 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5554 if (skb
->pkt_type
== PACKET_MULTICAST
) {
5555 dhd
->pub
.rx_multicast
++;
5556 ifp
->stats
.multicast
++;
5562 DHD_DBG_PKT_MON_RX(dhdp
, skb
);
5563 #ifdef DHD_PKT_LOGGING
5564 DHD_PKTLOG_RX(dhdp
, skb
);
5565 #endif /* DHD_PKT_LOGGING */
5566 /* Strip header, count, deliver upward */
5567 skb_pull(skb
, ETH_HLEN
);
5569 /* Process special event packets and then discard them */
5570 memset(&event
, 0, sizeof(event
));
5572 if (ntoh16(skb
->protocol
) == ETHER_TYPE_BRCM
) {
5573 bcm_event_msg_u_t evu
;
5577 ret_event
= wl_host_event_get_data(
5578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5579 skb_mac_header(skb
),
5582 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5585 if (ret_event
!= BCME_OK
) {
5586 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5587 __FUNCTION__
, ret_event
));
5588 #ifdef DHD_USE_STATIC_CTRLBUF
5589 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5591 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5596 memcpy(&event
, &evu
.event
, sizeof(wl_event_msg_t
));
5597 event_type
= ntoh32_ua((void *)&event
.event_type
);
5598 #ifdef SHOW_LOGTRACE
5599 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5600 * context in case of PCIe FD, in case of other bus this will be from
5601 * DPC context. If we get bunch of events from Dongle then printing all
5602 * of them from Tasklet/DPC context that too in data path is costly.
5603 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5604 * events with type WLC_E_TRACE.
5605 * We'll print this console logs from the WorkQueue context by enqueing SKB
5606 * here and Dequeuing will be done in WorkQueue and will be freed only if
5607 * logtrace_pkt_sendup is true
5609 if (event_type
== WLC_E_TRACE
) {
5610 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__
));
5611 dhd_event_logtrace_enqueue(dhdp
, ifidx
, pktbuf
);
5614 #endif /* SHOW_LOGTRACE */
5616 ret_event
= dhd_wl_host_event(dhd
, ifidx
,
5617 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5618 skb_mac_header(skb
),
5621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5622 len
, &event
, &data
);
5624 wl_event_to_host_order(&event
);
5626 tout_ctrl
= DHD_PACKET_TIMEOUT_MS
;
5628 #if defined(PNO_SUPPORT)
5629 if (event_type
== WLC_E_PFN_NET_FOUND
) {
5630 /* enforce custom wake lock to garantee that Kernel not suspended */
5631 tout_ctrl
= CUSTOM_PNO_EVENT_LOCK_xTIME
* DHD_PACKET_TIMEOUT_MS
;
5633 #endif /* PNO_SUPPORT */
5635 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
5639 #ifdef DHD_WAKE_STATUS
5640 if (unlikely(pkt_wake
)) {
5641 #ifdef DHD_WAKE_EVENT_STATUS
5642 if (event
.event_type
< WLC_E_LAST
) {
5643 wcp
->rc_event
[event
.event_type
]++;
5647 #endif /* DHD_WAKE_EVENT_STATUS */
5649 #endif /* DHD_WAKE_STATUS */
5651 /* For delete virtual interface event, wl_host_event returns positive
5652 * i/f index, do not proceed. just free the pkt.
5654 if ((event_type
== WLC_E_IF
) && (ret_event
> 0)) {
5655 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
5657 #ifdef DHD_USE_STATIC_CTRLBUF
5658 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5660 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5666 * For the event packets, there is a possibility
5667 * of ifidx getting modifed.Thus update the ifp
5670 ASSERT(ifidx
< DHD_MAX_IFS
&& dhd
->iflist
[ifidx
]);
5671 ifp
= dhd
->iflist
[ifidx
];
5672 #ifndef PROP_TXSTATUS_VSDB
5673 if (!(ifp
&& ifp
->net
&& (ifp
->net
->reg_state
== NETREG_REGISTERED
)))
5675 if (!(ifp
&& ifp
->net
&& (ifp
->net
->reg_state
== NETREG_REGISTERED
) &&
5677 #endif /* PROP_TXSTATUS_VSDB */
5679 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
5681 #ifdef DHD_USE_STATIC_CTRLBUF
5682 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5684 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5689 if (dhdp
->wl_event_enabled
) {
5690 #ifdef DHD_USE_STATIC_CTRLBUF
5691 /* If event bufs are allocated via static buf pool
5692 * and wl events are enabled, make a copy, free the
5693 * local one and send the copy up.
5695 void *npkt
= PKTDUP(dhdp
->osh
, skb
);
5696 /* Clone event and send it up */
5697 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5701 DHD_ERROR(("skb clone failed. dropping event.\n"));
5704 #endif /* DHD_USE_STATIC_CTRLBUF */
5706 /* If event enabled not explictly set, drop events */
5707 #ifdef DHD_USE_STATIC_CTRLBUF
5708 PKTFREE_STATIC(dhdp
->osh
, pktbuf
, FALSE
);
5710 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
5711 #endif /* DHD_USE_STATIC_CTRLBUF */
5715 tout_rx
= DHD_PACKET_TIMEOUT_MS
;
5717 #ifdef PROP_TXSTATUS
5718 dhd_wlfc_save_rxpath_ac_time(dhdp
, (uint8
)PKTPRIO(skb
));
5719 #endif /* PROP_TXSTATUS */
5721 #ifdef DHD_WAKE_STATUS
5722 if (unlikely(pkt_wake
)) {
5724 #ifdef DHD_WAKE_RX_STATUS
5725 #define ETHER_ICMP6_HEADER 20
5726 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
5727 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
5728 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
5730 if (ntoh16(skb
->protocol
) == ETHER_TYPE_ARP
) /* ARP */
5732 if (dump_data
[0] == 0xFF) { /* Broadcast */
5734 } else if (dump_data
[0] & 0x01) { /* Multicast */
5736 if (ntoh16(skb
->protocol
) == ETHER_TYPE_IPV6
) {
5737 wcp
->rx_multi_ipv6
++;
5738 if ((skb
->len
> ETHER_ICMP6_HEADER
) &&
5739 (dump_data
[ETHER_ICMP6_HEADER
] == IPPROTO_ICMPV6
)) {
5741 if (skb
->len
> ETHER_ICMPV6_TYPE
) {
5742 switch (dump_data
[ETHER_ICMPV6_TYPE
]) {
5743 case NDISC_ROUTER_ADVERTISEMENT
:
5744 wcp
->rx_icmpv6_ra
++;
5746 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
5747 wcp
->rx_icmpv6_na
++;
5749 case NDISC_NEIGHBOUR_SOLICITATION
:
5750 wcp
->rx_icmpv6_ns
++;
5755 } else if (dump_data
[2] == 0x5E) {
5756 wcp
->rx_multi_ipv4
++;
5758 wcp
->rx_multi_other
++;
5760 } else { /* Unicast */
5763 #undef ETHER_ICMP6_HEADER
5764 #undef ETHER_IPV6_SADDR
5765 #undef ETHER_IPV6_DAADR
5766 #undef ETHER_ICMPV6_TYPE
5767 #endif /* DHD_WAKE_RX_STATUS */
5770 #endif /* DHD_WAKE_STATUS */
5773 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
5774 ifp
->net
->last_rx
= jiffies
;
5775 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
5777 if (ntoh16(skb
->protocol
) != ETHER_TYPE_BRCM
) {
5778 dhdp
->dstats
.rx_bytes
+= skb
->len
;
5779 dhdp
->rx_packets
++; /* Local count */
5780 ifp
->stats
.rx_bytes
+= skb
->len
;
5781 ifp
->stats
.rx_packets
++;
5784 if (in_interrupt()) {
5785 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
5786 __FUNCTION__
, __LINE__
);
5787 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5788 #if defined(DHD_LB_RXP)
5789 netif_receive_skb(skb
);
5790 #else /* !defined(DHD_LB_RXP) */
5792 #endif /* !defined(DHD_LB_RXP) */
5793 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5795 if (dhd
->rxthread_enabled
) {
5799 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
5803 /* If the receive is not processed inside an ISR,
5804 * the softirqd must be woken explicitly to service
5805 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5806 * by netif_rx_ni(), but in earlier kernels, we need
5807 * to do it manually.
5809 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
5810 __FUNCTION__
, __LINE__
);
5812 #if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
5813 defined(ARGOS_NOTIFY_CB)
5814 argos_register_notifier_deinit();
5815 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
5816 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
5817 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
5818 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
5819 #if defined(DHD_LB_RXP)
5820 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5821 netif_receive_skb(skb
);
5822 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5823 #else /* !defined(DHD_LB_RXP) */
5824 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5825 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5827 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5830 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5832 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
5833 local_irq_save(flags
);
5835 local_irq_restore(flags
);
5836 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5837 #endif /* !defined(DHD_LB_RXP) */
5842 if (dhd
->rxthread_enabled
&& skbhead
)
5843 dhd_sched_rxf(dhdp
, skbhead
);
5845 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp
, tout_rx
);
5846 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp
, tout_ctrl
);
5850 dhd_event(struct dhd_info
*dhd
, char *evpkt
, int evlen
, int ifidx
)
5852 /* Linux version has nothing to do */
5857 dhd_txcomplete(dhd_pub_t
*dhdp
, void *txp
, bool success
)
5859 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
5860 struct ether_header
*eh
;
5863 dhd_prot_hdrpull(dhdp
, NULL
, txp
, NULL
, NULL
);
5865 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, txp
);
5866 type
= ntoh16(eh
->ether_type
);
5868 if (type
== ETHER_TYPE_802_1X
) {
5869 atomic_dec(&dhd
->pend_8021x_cnt
);
5872 #ifdef PROP_TXSTATUS
5873 if (dhdp
->wlfc_state
&& (dhdp
->proptxstatus_mode
!= WLFC_FCMODE_NONE
)) {
5874 dhd_if_t
*ifp
= dhd
->iflist
[DHD_PKTTAG_IF(PKTTAG(txp
))];
5875 uint datalen
= PKTLEN(dhd
->pub
.osh
, txp
);
5878 dhd
->pub
.tx_packets
++;
5879 ifp
->stats
.tx_packets
++;
5880 ifp
->stats
.tx_bytes
+= datalen
;
5882 ifp
->stats
.tx_dropped
++;
5889 static struct net_device_stats
*
5890 dhd_get_stats(struct net_device
*net
)
5892 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
5895 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5898 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__
));
5902 ifp
= dhd_get_ifp_by_ndev(&dhd
->pub
, net
);
5904 /* return empty stats */
5905 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__
));
5910 /* Use the protocol to get dongle stats */
5911 dhd_prot_dstats(&dhd
->pub
);
5916 memset(&net
->stats
, 0, sizeof(net
->stats
));
5921 dhd_watchdog_thread(void *data
)
5923 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
5924 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
5925 /* This thread doesn't need any user-level access,
5926 * so get rid of all our resources
5928 if (dhd_watchdog_prio
> 0) {
5929 struct sched_param param
;
5930 param
.sched_priority
= (dhd_watchdog_prio
< MAX_RT_PRIO
)?
5931 dhd_watchdog_prio
:(MAX_RT_PRIO
-1);
5932 setScheduler(current
, SCHED_FIFO
, ¶m
);
5936 if (down_interruptible (&tsk
->sema
) == 0) {
5937 unsigned long flags
;
5938 unsigned long jiffies_at_start
= jiffies
;
5939 unsigned long time_lapse
;
5941 DHD_OS_WD_WAKE_LOCK(&dhd
->pub
);
5942 #endif /* BCMPCIE */
5944 SMP_RD_BARRIER_DEPENDS();
5945 if (tsk
->terminated
) {
5947 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
5948 #endif /* BCMPCIE */
5952 if (dhd
->pub
.dongle_reset
== FALSE
) {
5953 DHD_TIMER(("%s:\n", __FUNCTION__
));
5954 dhd_bus_watchdog(&dhd
->pub
);
5956 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5957 /* Count the tick for reference */
5959 #ifdef DHD_L2_FILTER
5960 dhd_l2_filter_watchdog(&dhd
->pub
);
5961 #endif /* DHD_L2_FILTER */
5962 time_lapse
= jiffies
- jiffies_at_start
;
5964 /* Reschedule the watchdog */
5965 if (dhd
->wd_timer_valid
) {
5966 mod_timer(&dhd
->timer
,
5968 msecs_to_jiffies(dhd_watchdog_ms
) -
5969 min(msecs_to_jiffies(dhd_watchdog_ms
), time_lapse
));
5971 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5974 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
5975 #endif /* BCMPCIE */
5981 complete_and_exit(&tsk
->completed
, 0);
5984 static void dhd_watchdog(ulong data
)
5986 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
5987 unsigned long flags
;
5989 if (dhd
->pub
.dongle_reset
) {
5993 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
5994 up(&dhd
->thr_wdt_ctl
.sema
);
5999 DHD_OS_WD_WAKE_LOCK(&dhd
->pub
);
6000 #endif /* BCMPCIE */
6001 /* Call the bus module watchdog */
6002 dhd_bus_watchdog(&dhd
->pub
);
6004 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6005 /* Count the tick for reference */
6008 #ifdef DHD_L2_FILTER
6009 dhd_l2_filter_watchdog(&dhd
->pub
);
6010 #endif /* DHD_L2_FILTER */
6011 /* Reschedule the watchdog */
6012 if (dhd
->wd_timer_valid
)
6013 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
6014 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6016 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
6017 #endif /* BCMPCIE */
6020 #ifdef DHD_PCIE_RUNTIMEPM
6022 dhd_rpm_state_thread(void *data
)
6024 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6025 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6028 if (down_interruptible (&tsk
->sema
) == 0) {
6029 unsigned long flags
;
6030 unsigned long jiffies_at_start
= jiffies
;
6031 unsigned long time_lapse
;
6033 SMP_RD_BARRIER_DEPENDS();
6034 if (tsk
->terminated
) {
6038 if (dhd
->pub
.dongle_reset
== FALSE
) {
6039 DHD_TIMER(("%s:\n", __FUNCTION__
));
6041 dhd_runtimepm_state(&dhd
->pub
);
6044 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6045 time_lapse
= jiffies
- jiffies_at_start
;
6047 /* Reschedule the watchdog */
6048 if (dhd
->rpm_timer_valid
) {
6049 mod_timer(&dhd
->rpm_timer
,
6051 msecs_to_jiffies(dhd_runtimepm_ms
) -
6052 min(msecs_to_jiffies(dhd_runtimepm_ms
),
6055 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6062 complete_and_exit(&tsk
->completed
, 0);
6065 static void dhd_runtimepm(ulong data
)
6067 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
6069 if (dhd
->pub
.dongle_reset
) {
6073 if (dhd
->thr_rpm_ctl
.thr_pid
>= 0) {
6074 up(&dhd
->thr_rpm_ctl
.sema
);
6079 void dhd_runtime_pm_disable(dhd_pub_t
*dhdp
)
6081 dhd_os_runtimepm_timer(dhdp
, 0);
6082 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
6083 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6086 void dhd_runtime_pm_enable(dhd_pub_t
*dhdp
)
6088 if (dhd_get_idletime(dhdp
)) {
6089 dhd_os_runtimepm_timer(dhdp
, dhd_runtimepm_ms
);
6090 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6094 #endif /* DHD_PCIE_RUNTIMEPM */
6096 #ifdef ENABLE_ADAPTIVE_SCHED
6098 dhd_sched_policy(int prio
)
6100 struct sched_param param
;
6101 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH
) {
6102 param
.sched_priority
= 0;
6103 setScheduler(current
, SCHED_NORMAL
, ¶m
);
6105 if (get_scheduler_policy(current
) != SCHED_FIFO
) {
6106 param
.sched_priority
= (prio
< MAX_RT_PRIO
)? prio
: (MAX_RT_PRIO
-1);
6107 setScheduler(current
, SCHED_FIFO
, ¶m
);
6111 #endif /* ENABLE_ADAPTIVE_SCHED */
6112 #ifdef DEBUG_CPU_FREQ
6113 static int dhd_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
6115 dhd_info_t
*dhd
= container_of(nb
, struct dhd_info
, freq_trans
);
6116 struct cpufreq_freqs
*freq
= data
;
6120 if (val
== CPUFREQ_POSTCHANGE
) {
6121 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6122 freq
->new, freq
->cpu
));
6123 *per_cpu_ptr(dhd
->new_freq
, freq
->cpu
) = freq
->new;
6129 #endif /* DEBUG_CPU_FREQ */
6131 dhd_dpc_thread(void *data
)
6133 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6134 !defined(CONFIG_SOC_EXYNOS7870)
6136 unsigned long flags
;
6137 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6138 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6139 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6141 /* This thread doesn't need any user-level access,
6142 * so get rid of all our resources
6144 if (dhd_dpc_prio
> 0)
6146 struct sched_param param
;
6147 param
.sched_priority
= (dhd_dpc_prio
< MAX_RT_PRIO
)?dhd_dpc_prio
:(MAX_RT_PRIO
-1);
6148 setScheduler(current
, SCHED_FIFO
, ¶m
);
6151 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6152 !defined(CONFIG_SOC_EXYNOS7870)
6153 if (!zalloc_cpumask_var(&dhd
->pub
.default_cpu_mask
, GFP_KERNEL
)) {
6154 DHD_ERROR(("dpc_thread, zalloc_cpumask_var error\n"));
6155 dhd
->pub
.affinity_isdpc
= FALSE
;
6157 if (!zalloc_cpumask_var(&dhd
->pub
.dpc_affinity_cpu_mask
, GFP_KERNEL
)) {
6158 DHD_ERROR(("dpc_thread, dpc_affinity_cpu_mask error\n"));
6159 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
6160 dhd
->pub
.affinity_isdpc
= FALSE
;
6162 cpumask_copy(dhd
->pub
.default_cpu_mask
, &hmp_slow_cpu_mask
);
6163 cpumask_or(dhd
->pub
.dpc_affinity_cpu_mask
,
6164 dhd
->pub
.dpc_affinity_cpu_mask
, cpumask_of(DPC_CPUCORE
));
6166 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6167 if ((ret
= argos_task_affinity_setup_label(current
, "WIFI",
6168 dhd
->pub
.dpc_affinity_cpu_mask
,
6169 dhd
->pub
.default_cpu_mask
)) < 0) {
6170 DHD_ERROR(("Failed to add CPU affinity(dpc) error=%d\n",
6172 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
6173 free_cpumask_var(dhd
->pub
.dpc_affinity_cpu_mask
);
6174 dhd
->pub
.affinity_isdpc
= FALSE
;
6176 unsigned int irq
= -1;
6178 if (dhdpcie_get_pcieirq(dhd
->pub
.bus
, &irq
)) {
6179 DHD_ERROR(("%s : Can't get interrupt number\n",
6182 #endif /* BCMPCIE */
6184 wifi_adapter_info_t
*adapter
= dhd
->adapter
;
6185 irq
= adapter
->irq_num
;
6186 #endif /* BCMSDIO */
6187 DHD_ERROR(("Argos set Completed : dpcthread\n"));
6188 set_irq_cpucore(irq
, dhd
->pub
.default_cpu_mask
,
6189 dhd
->pub
.dpc_affinity_cpu_mask
);
6190 dhd
->pub
.affinity_isdpc
= TRUE
;
6192 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6195 #else /* ARGOS_CPU_SCHEDULER */
6196 #ifdef CUSTOM_DPC_CPUCORE
6197 set_cpus_allowed_ptr(current
, cpumask_of(CUSTOM_DPC_CPUCORE
));
6199 #ifdef CUSTOM_SET_CPUCORE
6200 dhd
->pub
.current_dpc
= current
;
6201 #endif /* CUSTOM_SET_CPUCORE */
6202 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6203 /* Run until signal received */
6205 if (!binary_sema_down(tsk
)) {
6206 #ifdef ENABLE_ADAPTIVE_SCHED
6207 dhd_sched_policy(dhd_dpc_prio
);
6208 #endif /* ENABLE_ADAPTIVE_SCHED */
6209 SMP_RD_BARRIER_DEPENDS();
6210 if (tsk
->terminated
) {
6214 /* Call bus dpc unless it indicated down (then clean stop) */
6215 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
6216 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6217 int resched_cnt
= 0;
6218 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6219 dhd_os_wd_timer_extend(&dhd
->pub
, TRUE
);
6220 while (dhd_bus_dpc(dhd
->pub
.bus
)) {
6221 /* process all data */
6222 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6224 if (resched_cnt
> MAX_RESCHED_CNT
) {
6225 DHD_INFO(("%s Calling msleep to"
6226 "let other processes run. \n",
6228 dhd
->pub
.dhd_bug_on
= true;
6232 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6234 dhd_os_wd_timer_extend(&dhd
->pub
, FALSE
);
6235 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
6238 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
6239 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
6245 #ifdef ARGOS_CPU_SCHEDULER
6246 if (dhd
->pub
.affinity_isdpc
== TRUE
) {
6247 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
6248 free_cpumask_var(dhd
->pub
.dpc_affinity_cpu_mask
);
6249 dhd
->pub
.affinity_isdpc
= FALSE
;
6251 #endif /* ARGOS_CPU_SCHEDULER */
6252 complete_and_exit(&tsk
->completed
, 0);
6256 dhd_rxf_thread(void *data
)
6258 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
6259 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
6260 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6261 !defined(CONFIG_SOC_EXYNOS7870)
6263 unsigned long flags
;
6264 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && CONFIG_SOC_EXYNOS7870 */
6265 #if defined(WAIT_DEQUEUE)
6266 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6267 ulong watchdogTime
= OSL_SYSUPTIME(); /* msec */
6269 dhd_pub_t
*pub
= &dhd
->pub
;
6271 /* This thread doesn't need any user-level access,
6272 * so get rid of all our resources
6274 if (dhd_rxf_prio
> 0)
6276 struct sched_param param
;
6277 param
.sched_priority
= (dhd_rxf_prio
< MAX_RT_PRIO
)?dhd_rxf_prio
:(MAX_RT_PRIO
-1);
6278 setScheduler(current
, SCHED_FIFO
, ¶m
);
6281 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6282 !defined(CONFIG_SOC_EXYNOS7870)
6283 if (!zalloc_cpumask_var(&dhd
->pub
.rxf_affinity_cpu_mask
, GFP_KERNEL
)) {
6284 DHD_ERROR(("rxthread zalloc_cpumask_var error\n"));
6285 dhd
->pub
.affinity_isrxf
= FALSE
;
6287 cpumask_or(dhd
->pub
.rxf_affinity_cpu_mask
, dhd
->pub
.rxf_affinity_cpu_mask
,
6288 cpumask_of(RXF_CPUCORE
));
6290 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6291 if ((ret
= argos_task_affinity_setup_label(current
, "WIFI",
6292 dhd
->pub
.rxf_affinity_cpu_mask
, dhd
->pub
.default_cpu_mask
)) < 0) {
6293 DHD_ERROR(("Failed to add CPU affinity(rxf) error=%d\n", ret
));
6294 dhd
->pub
.affinity_isrxf
= FALSE
;
6295 free_cpumask_var(dhd
->pub
.rxf_affinity_cpu_mask
);
6297 DHD_ERROR(("RXthread affinity completed\n"));
6298 dhd
->pub
.affinity_isrxf
= TRUE
;
6300 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6302 #else /* ARGOS_CPU_SCHEDULER */
6303 #ifdef CUSTOM_SET_CPUCORE
6304 dhd
->pub
.current_rxf
= current
;
6305 #endif /* CUSTOM_SET_CPUCORE */
6306 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6307 /* Run until signal received */
6309 if (down_interruptible(&tsk
->sema
) == 0) {
6311 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6314 #ifdef ENABLE_ADAPTIVE_SCHED
6315 dhd_sched_policy(dhd_rxf_prio
);
6316 #endif /* ENABLE_ADAPTIVE_SCHED */
6318 SMP_RD_BARRIER_DEPENDS();
6320 if (tsk
->terminated
) {
6323 skb
= dhd_rxf_dequeue(pub
);
6329 void *skbnext
= PKTNEXT(pub
->osh
, skb
);
6330 PKTSETNEXT(pub
->osh
, skb
, NULL
);
6331 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6332 __FUNCTION__
, __LINE__
);
6333 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6337 local_irq_save(flags
);
6339 local_irq_restore(flags
);
6344 #if defined(WAIT_DEQUEUE)
6345 if (OSL_SYSUPTIME() - watchdogTime
> RXF_WATCHDOG_TIME
) {
6347 watchdogTime
= OSL_SYSUPTIME();
6351 DHD_OS_WAKE_UNLOCK(pub
);
6356 #ifdef ARGOS_CPU_SCHEDULER
6357 if (dhd
->pub
.affinity_isrxf
== TRUE
) {
6358 free_cpumask_var(dhd
->pub
.rxf_affinity_cpu_mask
);
6359 dhd
->pub
.affinity_isrxf
= FALSE
;
6361 #endif /* ARGOS_CPU_SCHEDULER */
6362 complete_and_exit(&tsk
->completed
, 0);
6366 void dhd_dpc_enable(dhd_pub_t
*dhdp
)
6368 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6371 if (!dhdp
|| !dhdp
->info
)
6374 #endif /* DHD_LB_RXP || DHD_LB_TXP */
6377 __skb_queue_head_init(&dhd
->rx_pend_queue
);
6378 #endif /* DHD_LB_RXP */
6381 skb_queue_head_init(&dhd
->tx_pend_queue
);
6382 #endif /* DHD_LB_TXP */
6384 #endif /* BCMPCIE */
6388 dhd_dpc_kill(dhd_pub_t
*dhdp
)
6402 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
6403 tasklet_kill(&dhd
->tasklet
);
6404 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__
));
6409 cancel_work_sync(&dhd
->rx_napi_dispatcher_work
);
6410 __skb_queue_purge(&dhd
->rx_pend_queue
);
6411 #endif /* DHD_LB_RXP */
6413 cancel_work_sync(&dhd
->tx_dispatcher_work
);
6414 skb_queue_purge(&dhd
->tx_pend_queue
);
6415 #endif /* DHD_LB_TXP */
6417 /* Kill the Load Balancing Tasklets */
6418 #if defined(DHD_LB_TXC)
6419 tasklet_kill(&dhd
->tx_compl_tasklet
);
6420 #endif /* DHD_LB_TXC */
6421 #if defined(DHD_LB_RXC)
6422 tasklet_kill(&dhd
->rx_compl_tasklet
);
6423 #endif /* DHD_LB_RXC */
6424 #if defined(DHD_LB_TXP)
6425 tasklet_kill(&dhd
->tx_tasklet
);
6426 #endif /* DHD_LB_TXP */
6431 dhd_dpc_tasklet_kill(dhd_pub_t
*dhdp
)
6445 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
6446 tasklet_kill(&dhd
->tasklet
);
6449 #endif /* BCMPCIE */
6456 dhd
= (dhd_info_t
*)data
;
6458 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6459 * down below , wake lock is set,
6460 * the tasklet is initialized in dhd_attach()
6462 /* Call bus dpc unless it indicated down (then clean stop) */
6463 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
6464 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6465 DHD_LB_STATS_INCR(dhd
->dhd_dpc_cnt
);
6466 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6467 if (dhd_bus_dpc(dhd
->pub
.bus
)) {
6468 tasklet_schedule(&dhd
->tasklet
);
6471 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
6476 dhd_sched_dpc(dhd_pub_t
*dhdp
)
6478 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6480 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
6481 DHD_OS_WAKE_LOCK(dhdp
);
6482 /* If the semaphore does not get up,
6483 * wake unlock should be done here
6485 if (!binary_sema_up(&dhd
->thr_dpc_ctl
)) {
6486 DHD_OS_WAKE_UNLOCK(dhdp
);
6490 dhd_bus_set_dpc_sched_time(dhdp
);
6491 tasklet_schedule(&dhd
->tasklet
);
6496 dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
)
6498 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6500 DHD_OS_WAKE_LOCK(dhdp
);
6502 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6504 if (dhd_rxf_enqueue(dhdp
, skb
) == BCME_OK
)
6507 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
6508 up(&dhd
->thr_rxf_ctl
.sema
);
6513 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6514 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6517 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6519 dhd_toe_get(dhd_info_t
*dhd
, int ifidx
, uint32
*toe_ol
)
6524 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe_ol", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
6528 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd
->pub
,
6533 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6537 memcpy(toe_ol
, buf
, sizeof(uint32
));
6541 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6543 dhd_toe_set(dhd_info_t
*dhd
, int ifidx
, uint32 toe_ol
)
6547 /* Set toe_ol as requested */
6548 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe_ol", (char *)&toe_ol
, sizeof(toe_ol
), NULL
, 0, TRUE
);
6550 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6551 dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6555 /* Enable toe globally only if any components are enabled. */
6556 toe
= (toe_ol
!= 0);
6557 ret
= dhd_iovar(&dhd
->pub
, ifidx
, "toe", (char *)&toe
, sizeof(toe
), NULL
, 0, TRUE
);
6559 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
6567 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6568 void dhd_set_scb_probe(dhd_pub_t
*dhd
)
6570 wl_scb_probe_t scb_probe
;
6571 char iovbuf
[WL_EVENTING_MASK_LEN
+ sizeof(wl_scb_probe_t
)];
6574 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
6578 ret
= dhd_iovar(dhd
, 0, "scb_probe", NULL
, 0, iovbuf
, sizeof(iovbuf
), FALSE
);
6580 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__
));
6583 memcpy(&scb_probe
, iovbuf
, sizeof(wl_scb_probe_t
));
6585 scb_probe
.scb_max_probe
= NUM_SCB_MAX_PROBE
;
6587 ret
= dhd_iovar(dhd
, 0, "scb_probe", (char *)&scb_probe
, sizeof(wl_scb_probe_t
), NULL
, 0,
6590 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__
));
6594 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6596 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6598 dhd_ethtool_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*info
)
6600 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
6602 snprintf(info
->driver
, sizeof(info
->driver
), "wl");
6603 snprintf(info
->version
, sizeof(info
->version
), "%lu", dhd
->pub
.drv_version
);
6606 struct ethtool_ops dhd_ethtool_ops
= {
6607 .get_drvinfo
= dhd_ethtool_get_drvinfo
6609 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6611 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6613 dhd_ethtool(dhd_info_t
*dhd
, void *uaddr
)
6615 struct ethtool_drvinfo info
;
6616 char drvname
[sizeof(info
.driver
)];
6619 struct ethtool_value edata
;
6620 uint32 toe_cmpnt
, csum_dir
;
6624 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6626 /* all ethtool calls start with a cmd word */
6627 if (copy_from_user(&cmd
, uaddr
, sizeof (uint32
)))
6631 case ETHTOOL_GDRVINFO
:
6632 /* Copy out any request driver name */
6633 if (copy_from_user(&info
, uaddr
, sizeof(info
)))
6635 strncpy(drvname
, info
.driver
, sizeof(info
.driver
));
6636 drvname
[sizeof(info
.driver
)-1] = '\0';
6638 /* clear struct for return */
6639 memset(&info
, 0, sizeof(info
));
6642 /* if dhd requested, identify ourselves */
6643 if (strcmp(drvname
, "?dhd") == 0) {
6644 snprintf(info
.driver
, sizeof(info
.driver
), "dhd");
6645 strncpy(info
.version
, EPI_VERSION_STR
, sizeof(info
.version
) - 1);
6646 info
.version
[sizeof(info
.version
) - 1] = '\0';
6649 /* otherwise, require dongle to be up */
6650 else if (!dhd
->pub
.up
) {
6651 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__
));
6655 /* finally, report dongle driver type */
6656 else if (dhd
->pub
.iswl
)
6657 snprintf(info
.driver
, sizeof(info
.driver
), "wl");
6659 snprintf(info
.driver
, sizeof(info
.driver
), "xx");
6661 snprintf(info
.version
, sizeof(info
.version
), "%lu", dhd
->pub
.drv_version
);
6662 if (copy_to_user(uaddr
, &info
, sizeof(info
)))
6664 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__
,
6665 (int)sizeof(drvname
), drvname
, info
.driver
));
6669 /* Get toe offload components from dongle */
6670 case ETHTOOL_GRXCSUM
:
6671 case ETHTOOL_GTXCSUM
:
6672 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
6675 csum_dir
= (cmd
== ETHTOOL_GTXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
6678 edata
.data
= (toe_cmpnt
& csum_dir
) ? 1 : 0;
6680 if (copy_to_user(uaddr
, &edata
, sizeof(edata
)))
6684 /* Set toe offload components in dongle */
6685 case ETHTOOL_SRXCSUM
:
6686 case ETHTOOL_STXCSUM
:
6687 if (copy_from_user(&edata
, uaddr
, sizeof(edata
)))
6690 /* Read the current settings, update and write back */
6691 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
6694 csum_dir
= (cmd
== ETHTOOL_STXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
6696 if (edata
.data
!= 0)
6697 toe_cmpnt
|= csum_dir
;
6699 toe_cmpnt
&= ~csum_dir
;
6701 if ((ret
= dhd_toe_set(dhd
, 0, toe_cmpnt
)) < 0)
6704 /* If setting TX checksum mode, tell Linux the new mode */
6705 if (cmd
== ETHTOOL_STXCSUM
) {
6707 dhd
->iflist
[0]->net
->features
|= NETIF_F_IP_CSUM
;
6709 dhd
->iflist
[0]->net
->features
&= ~NETIF_F_IP_CSUM
;
6721 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
6723 static bool dhd_check_hang(struct net_device
*net
, dhd_pub_t
*dhdp
, int error
)
6726 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
6733 #if !defined(BCMPCIE)
6734 if (dhdp
->info
->thr_dpc_ctl
.thr_pid
< 0) {
6735 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__
));
6740 if ((error
== -ETIMEDOUT
) || (error
== -EREMOTEIO
) ||
6741 ((dhdp
->busstate
== DHD_BUS_DOWN
) && (!dhdp
->dongle_reset
))) {
6743 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
6744 __FUNCTION__
, dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
,
6745 dhdp
->d3ackcnt_timeout
, error
, dhdp
->busstate
));
6747 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__
,
6748 dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
, error
, dhdp
->busstate
));
6749 #endif /* BCMPCIE */
6750 if (dhdp
->hang_reason
== 0) {
6751 if (dhdp
->dongle_trap_occured
) {
6752 dhdp
->hang_reason
= HANG_REASON_DONGLE_TRAP
;
6754 } else if (dhdp
->d3ackcnt_timeout
) {
6755 dhdp
->hang_reason
= dhdp
->is_sched_error
?
6756 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR
:
6757 HANG_REASON_D3_ACK_TIMEOUT
;
6758 #endif /* BCMPCIE */
6760 dhdp
->hang_reason
= dhdp
->is_sched_error
?
6761 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR
:
6762 HANG_REASON_IOCTL_RESP_TIMEOUT
;
6765 net_os_send_hang_message(net
);
6773 dhd_monitor_enabled(dhd_pub_t
*dhd
, int ifidx
)
6775 return (dhd
->info
->monitor_type
!= 0);
6779 dhd_rx_mon_pkt(dhd_pub_t
*dhdp
, host_rxbuf_cmpl_t
* msg
, void *pkt
, int ifidx
)
6781 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6783 uint8 amsdu_flag
= (msg
->flags
& BCMPCIE_PKT_FLAGS_MONITOR_MASK
) >>
6784 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT
;
6785 switch (amsdu_flag
) {
6786 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU
:
6788 if (!dhd
->monitor_skb
) {
6789 if ((dhd
->monitor_skb
= PKTTONATIVE(dhdp
->osh
, pkt
))
6793 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
6794 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
6796 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6797 dhd
->monitor_skb
= NULL
;
6800 dhd
->monitor_skb
->protocol
=
6801 eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
6802 dhd
->monitor_len
= 0;
6805 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT
:
6806 if (!dhd
->monitor_skb
) {
6807 if ((dhd
->monitor_skb
= dev_alloc_skb(MAX_MON_PKT_SIZE
))
6810 dhd
->monitor_len
= 0;
6812 if (dhd
->monitor_type
&& dhd
->monitor_dev
)
6813 dhd
->monitor_skb
->dev
= dhd
->monitor_dev
;
6815 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6816 dev_kfree_skb(dhd
->monitor_skb
);
6819 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
),
6820 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
6821 dhd
->monitor_len
= PKTLEN(dhdp
->osh
, pkt
);
6822 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6825 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT
:
6826 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
) + dhd
->monitor_len
,
6827 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
6828 dhd
->monitor_len
+= PKTLEN(dhdp
->osh
, pkt
);
6829 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6832 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT
:
6833 memcpy(PKTDATA(dhdp
->osh
, dhd
->monitor_skb
) + dhd
->monitor_len
,
6834 PKTDATA(dhdp
->osh
, pkt
), PKTLEN(dhdp
->osh
, pkt
));
6835 dhd
->monitor_len
+= PKTLEN(dhdp
->osh
, pkt
);
6836 PKTFREE(dhdp
->osh
, pkt
, FALSE
);
6837 skb_put(dhd
->monitor_skb
, dhd
->monitor_len
);
6838 dhd
->monitor_skb
->protocol
=
6839 eth_type_trans(dhd
->monitor_skb
, dhd
->monitor_skb
->dev
);
6840 dhd
->monitor_len
= 0;
6845 if (in_interrupt()) {
6846 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
6847 __FUNCTION__
, __LINE__
);
6848 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6849 netif_rx(dhd
->monitor_skb
);
6850 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6852 /* If the receive is not processed inside an ISR,
6853 * the softirqd must be woken explicitly to service
6854 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6855 * by netif_rx_ni(), but in earlier kernels, we need
6856 * to do it manually.
6858 bcm_object_trace_opr(dhd
->monitor_skb
, BCM_OBJDBG_REMOVE
,
6859 __FUNCTION__
, __LINE__
);
6861 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6862 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6863 netif_rx_ni(dhd
->monitor_skb
);
6864 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6867 DHD_PERIM_UNLOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6868 netif_rx(dhd
->monitor_skb
);
6869 DHD_PERIM_LOCK_ALL((dhd
->fwder_unit
% FWDER_MAX_UNIT
));
6870 local_irq_save(flags
);
6872 local_irq_restore(flags
);
6873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6876 dhd
->monitor_skb
= NULL
;
6879 typedef struct dhd_mon_dev_priv
{
6880 struct net_device_stats stats
;
6881 } dhd_mon_dev_priv_t
;
6883 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
6884 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
6885 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
6888 dhd_monitor_start(struct sk_buff
*skb
, struct net_device
*dev
)
6890 PKTFREE(NULL
, skb
, FALSE
);
6894 #if defined(BT_OVER_SDIO)
6897 dhdsdio_bus_usr_cnt_inc(dhd_pub_t
*dhdp
)
6899 dhdp
->info
->bus_user_count
++;
6903 dhdsdio_bus_usr_cnt_dec(dhd_pub_t
*dhdp
)
6905 dhdp
->info
->bus_user_count
--;
6909 * Success: Returns 0
6910 * Failure: Returns -1 or errono code
6913 dhd_bus_get(wlan_bt_handle_t handle
, bus_owner_t owner
)
6915 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
6916 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6919 mutex_lock(&dhd
->bus_user_lock
);
6920 ++dhd
->bus_user_count
;
6921 if (dhd
->bus_user_count
< 0) {
6922 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__
));
6927 if (dhd
->bus_user_count
== 1) {
6929 dhd
->pub
.hang_was_sent
= 0;
6931 /* First user, turn on WL_REG, start the bus */
6932 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__
));
6934 if (!wifi_platform_set_power(dhd
->adapter
, TRUE
, WIFI_TURNON_DELAY
)) {
6936 ret
= dhd_bus_resume(dhdp
, 0);
6938 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
6939 __FUNCTION__
, ret
));
6944 dhd_update_fw_nv_path(dhd
);
6945 /* update firmware and nvram path to sdio bus */
6946 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
6947 dhd
->fw_path
, dhd
->nv_path
);
6948 /* download the firmware, Enable F2 */
6949 /* TODO: Should be done only in case of FW switch */
6950 ret
= dhd_bus_devreset(dhdp
, FALSE
);
6951 dhd_bus_resume(dhdp
, 1);
6953 if (dhd_sync_with_dongle(&dhd
->pub
) < 0) {
6954 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__
));
6958 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__
, ret
));
6961 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
6962 __FUNCTION__
, dhd
->bus_user_count
));
6965 mutex_unlock(&dhd
->bus_user_lock
);
6968 EXPORT_SYMBOL(dhd_bus_get
);
6971 * Success: Returns 0
6972 * Failure: Returns -1 or errono code
6975 dhd_bus_put(wlan_bt_handle_t handle
, bus_owner_t owner
)
6977 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
6978 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6980 BCM_REFERENCE(owner
);
6982 mutex_lock(&dhd
->bus_user_lock
);
6983 --dhd
->bus_user_count
;
6984 if (dhd
->bus_user_count
< 0) {
6985 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__
));
6986 dhd
->bus_user_count
= 0;
6991 if (dhd
->bus_user_count
== 0) {
6992 /* Last user, stop the bus and turn Off WL_REG */
6993 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
6995 #ifdef PROP_TXSTATUS
6996 if (dhd
->pub
.wlfc_enabled
) {
6997 dhd_wlfc_deinit(&dhd
->pub
);
6999 #endif /* PROP_TXSTATUS */
7001 if (dhd
->pub
.pno_state
) {
7002 dhd_pno_deinit(&dhd
->pub
);
7004 #endif /* PNO_SUPPORT */
7006 if (dhd
->pub
.rtt_state
) {
7007 dhd_rtt_deinit(&dhd
->pub
);
7009 #endif /* RTT_SUPPORT */
7010 ret
= dhd_bus_devreset(dhdp
, TRUE
);
7012 dhd_bus_suspend(dhdp
);
7013 wifi_platform_set_power(dhd
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
7016 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7017 __FUNCTION__
, dhd
->bus_user_count
));
7020 mutex_unlock(&dhd
->bus_user_lock
);
7023 EXPORT_SYMBOL(dhd_bus_put
);
7026 dhd_net_bus_get(struct net_device
*dev
)
7028 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
7029 return dhd_bus_get(&dhd
->pub
, WLAN_MODULE
);
7033 dhd_net_bus_put(struct net_device
*dev
)
7035 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
7036 return dhd_bus_put(&dhd
->pub
, WLAN_MODULE
);
7040 * Function to enable the Bus Clock
7041 * Returns BCME_OK on success and BCME_xxx on failure
7043 * This function is not callable from non-sleepable context
7045 int dhd_bus_clk_enable(wlan_bt_handle_t handle
, bus_owner_t owner
)
7047 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7051 dhd_os_sdlock(dhdp
);
7053 * The second argument is TRUE, that means, we expect
7054 * the function to "wait" until the clocks are really
7057 ret
= __dhdsdio_clk_enable(dhdp
->bus
, owner
, TRUE
);
7058 dhd_os_sdunlock(dhdp
);
7062 EXPORT_SYMBOL(dhd_bus_clk_enable
);
7065 * Function to disable the Bus Clock
7066 * Returns BCME_OK on success and BCME_xxx on failure
7068 * This function is not callable from non-sleepable context
7070 int dhd_bus_clk_disable(wlan_bt_handle_t handle
, bus_owner_t owner
)
7072 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7076 dhd_os_sdlock(dhdp
);
7078 * The second argument is TRUE, that means, we expect
7079 * the function to "wait" until the clocks are really
7082 ret
= __dhdsdio_clk_disable(dhdp
->bus
, owner
, TRUE
);
7083 dhd_os_sdunlock(dhdp
);
7087 EXPORT_SYMBOL(dhd_bus_clk_disable
);
7090 * Function to reset bt_use_count counter to zero.
7092 * This function is not callable from non-sleepable context
7094 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle
)
7096 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7098 /* take the lock and reset bt use count */
7099 dhd_os_sdlock(dhdp
);
7100 dhdsdio_reset_bt_use_count(dhdp
->bus
);
7101 dhd_os_sdunlock(dhdp
);
7103 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count
);
7105 void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle
)
7107 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
7108 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7110 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7111 dhdp
->hang_was_sent
= 0;
7113 dhd_os_send_hang_message(&dhd
->pub
);
7115 DHD_ERROR(("%s: unsupported\n", __FUNCTION__
));
7118 EXPORT_SYMBOL(dhd_bus_retry_hang_recovery
);
7120 #endif /* BT_OVER_SDIO */
7123 dhd_monitor_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7128 static struct net_device_stats
*
7129 dhd_monitor_get_stats(struct net_device
*dev
)
7131 return &DHD_MON_DEV_STATS(dev
);
7134 static const struct net_device_ops netdev_monitor_ops
=
7136 .ndo_start_xmit
= dhd_monitor_start
,
7137 .ndo_get_stats
= dhd_monitor_get_stats
,
7138 .ndo_do_ioctl
= dhd_monitor_ioctl
7142 dhd_add_monitor_if(dhd_info_t
*dhd
)
7144 struct net_device
*dev
;
7146 uint32 scan_suppress
= FALSE
;
7150 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
7154 if (dhd
->monitor_dev
) {
7155 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__
));
7159 dev
= alloc_etherdev(DHD_MON_DEV_PRIV_SIZE
);
7161 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__
));
7165 devname
= "radiotap";
7167 snprintf(dev
->name
, sizeof(dev
->name
), "%s%u", devname
, dhd
->unit
);
7169 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7170 #define ARPHRD_IEEE80211_PRISM 802
7173 #ifndef ARPHRD_IEEE80211_RADIOTAP
7174 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7175 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7177 dev
->type
= ARPHRD_IEEE80211_RADIOTAP
;
7179 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7180 dev
->hard_start_xmit
= dhd_monitor_start
;
7181 dev
->do_ioctl
= dhd_monitor_ioctl
;
7182 dev
->get_stats
= dhd_monitor_get_stats
;
7184 dev
->netdev_ops
= &netdev_monitor_ops
;
7187 if (register_netdevice(dev
)) {
7188 DHD_ERROR(("%s, register_netdev failed for %s\n",
7189 __FUNCTION__
, dev
->name
));
7193 if (FW_SUPPORTED((&dhd
->pub
), monitor
)) {
7194 #ifdef DHD_PCIE_RUNTIMEPM
7195 /* Disable RuntimePM in monitor mode */
7196 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
7197 DHD_ERROR(("%s : Disable RuntimePM in Monitor Mode\n", __FUNCTION__
));
7198 #endif /* DHD_PCIE_RUNTIME_PM */
7199 scan_suppress
= TRUE
;
7200 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
7201 ret
= dhd_iovar(&dhd
->pub
, 0, "scansuppress", (char *)&scan_suppress
,
7202 sizeof(scan_suppress
), NULL
, 0, TRUE
);
7204 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__
, ret
));
7208 dhd
->monitor_dev
= dev
;
7212 dhd_del_monitor_if(dhd_info_t
*dhd
)
7216 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
7220 if (!dhd
->monitor_dev
) {
7221 DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__
));
7225 if (dhd
->monitor_dev
) {
7226 if (dhd
->monitor_dev
->reg_state
== NETREG_UNINITIALIZED
) {
7227 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7228 MFREE(dhd
->osh
, dhd
->monitor_dev
->priv
, DHD_MON_DEV_PRIV_SIZE
);
7229 MFREE(dhd
->osh
, dhd
->monitor_dev
, sizeof(struct net_device
));
7231 free_netdev(dhd
->monitor_dev
);
7234 unregister_netdevice(dhd
->monitor_dev
);
7236 dhd
->monitor_dev
= NULL
;
7241 dhd_set_monitor(dhd_pub_t
*pub
, int ifidx
, int val
)
7243 dhd_info_t
*dhd
= pub
->info
;
7245 DHD_TRACE(("%s: val %d\n", __FUNCTION__
, val
));
7247 dhd_net_if_lock_local(dhd
);
7249 /* Delete monitor */
7250 dhd_del_monitor_if(dhd
);
7253 dhd_add_monitor_if(dhd
);
7255 dhd
->monitor_type
= val
;
7256 dhd_net_if_unlock_local(dhd
);
7258 #endif /* WL_MONITOR */
7260 int dhd_ioctl_process(dhd_pub_t
*pub
, int ifidx
, dhd_ioctl_t
*ioc
, void *data_buf
)
7262 int bcmerror
= BCME_OK
;
7264 struct net_device
*net
;
7266 net
= dhd_idx2net(pub
, ifidx
);
7268 bcmerror
= BCME_BADARG
;
7270 * The netdev pointer is bad means the DHD can't communicate
7271 * to higher layers, so just return from here
7276 /* check for local dhd ioctl and handle it */
7277 if (ioc
->driver
== DHD_IOCTL_MAGIC
) {
7278 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7280 buflen
= MIN(ioc
->len
, DHD_IOCTL_MAXLEN
);
7281 bcmerror
= dhd_ioctl((void *)pub
, ioc
, data_buf
, buflen
);
7283 pub
->bcmerror
= bcmerror
;
7287 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7289 buflen
= MIN(ioc
->len
, WLC_IOCTL_MAXLEN
);
7291 /* send to dongle (must be up, and wl). */
7292 if (pub
->busstate
== DHD_BUS_DOWN
|| pub
->busstate
== DHD_BUS_LOAD
) {
7293 if ((!pub
->dongle_trap_occured
) && allow_delay_fwdl
) {
7295 if (atomic_read(&exit_in_progress
)) {
7296 DHD_ERROR(("%s module exit in progress\n", __func__
));
7297 bcmerror
= BCME_DONGLE_DOWN
;
7300 ret
= dhd_bus_start(pub
);
7302 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
7303 bcmerror
= BCME_DONGLE_DOWN
;
7307 bcmerror
= BCME_DONGLE_DOWN
;
7313 bcmerror
= BCME_DONGLE_DOWN
;
7318 * Flush the TX queue if required for proper message serialization:
7319 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7320 * prevent M4 encryption and
7321 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7322 * prevent disassoc frame being sent before WPS-DONE frame.
7324 if (ioc
->cmd
== WLC_SET_KEY
||
7325 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
7326 strncmp("wsec_key", data_buf
, 9) == 0) ||
7327 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
7328 strncmp("bsscfg:wsec_key", data_buf
, 15) == 0) ||
7329 ioc
->cmd
== WLC_DISASSOC
)
7330 dhd_wait_pend8021x(net
);
7332 if ((ioc
->cmd
== WLC_SET_VAR
|| ioc
->cmd
== WLC_GET_VAR
) &&
7333 data_buf
!= NULL
&& strncmp("rpc_", data_buf
, 4) == 0) {
7334 bcmerror
= BCME_UNSUPPORTED
;
7337 bcmerror
= dhd_wl_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
7340 /* Intercept monitor ioctl here, add/del monitor if */
7341 if (bcmerror
== BCME_OK
&& ioc
->cmd
== WLC_SET_MONITOR
) {
7343 if (data_buf
!= NULL
&& buflen
!= 0) {
7345 val
= *(int*)data_buf
;
7346 } else if (buflen
>= 2) {
7347 val
= *(short*)data_buf
;
7349 val
= *(char*)data_buf
;
7352 dhd_set_monitor(pub
, ifidx
, val
);
7354 #endif /* WL_MONITOR */
7357 dhd_check_hang(net
, pub
, bcmerror
);
7363 * Called by the OS (optionally via a wrapper function).
7364 * @param net Linux per dongle instance
7365 * @param ifr Linux request structure
7366 * @param cmd e.g. SIOCETHTOOL
7369 dhd_ioctl_entry(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
7371 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7376 void *local_buf
= NULL
; /**< buffer in kernel space */
7377 void __user
*ioc_buf_user
= NULL
; /**< buffer in user space */
7380 if (atomic_read(&exit_in_progress
)) {
7381 DHD_ERROR(("%s module exit in progress\n", __func__
));
7382 bcmerror
= BCME_DONGLE_DOWN
;
7383 return OSL_ERROR(bcmerror
);
7386 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7387 DHD_PERIM_LOCK(&dhd
->pub
);
7389 /* Interface up check for built-in type */
7390 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== FALSE
) {
7391 DHD_TRACE(("%s: Interface is down \n", __FUNCTION__
));
7392 DHD_PERIM_UNLOCK(&dhd
->pub
);
7393 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7394 return OSL_ERROR(BCME_NOTUP
);
7397 ifidx
= dhd_net2idx(dhd
, net
);
7398 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__
, ifidx
, cmd
));
7400 #if defined(WL_STATIC_IF)
7401 /* skip for static ndev when it is down */
7402 if (dhd_is_static_ndev(&dhd
->pub
, net
) && !(net
->flags
& IFF_UP
)) {
7403 DHD_PERIM_UNLOCK(&dhd
->pub
);
7404 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7407 #endif /* WL_STATIC_iF */
7409 if (ifidx
== DHD_BAD_IF
) {
7410 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__
));
7411 DHD_PERIM_UNLOCK(&dhd
->pub
);
7412 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7416 #if defined(WL_WIRELESS_EXT)
7417 /* linux wireless extensions */
7418 if ((cmd
>= SIOCIWFIRST
) && (cmd
<= SIOCIWLAST
)) {
7419 /* may recurse, do NOT lock */
7420 ret
= wl_iw_ioctl(net
, ifr
, cmd
);
7421 DHD_PERIM_UNLOCK(&dhd
->pub
);
7422 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7425 #endif /* defined(WL_WIRELESS_EXT) */
7427 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7428 if (cmd
== SIOCETHTOOL
) {
7429 ret
= dhd_ethtool(dhd
, (void*)ifr
->ifr_data
);
7430 DHD_PERIM_UNLOCK(&dhd
->pub
);
7431 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7434 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7436 if (cmd
== SIOCDEVPRIVATE
+1) {
7437 ret
= wl_android_priv_cmd(net
, ifr
);
7438 dhd_check_hang(net
, &dhd
->pub
, ret
);
7439 DHD_PERIM_UNLOCK(&dhd
->pub
);
7440 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7444 if (cmd
!= SIOCDEVPRIVATE
) {
7445 DHD_PERIM_UNLOCK(&dhd
->pub
);
7446 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7450 memset(&ioc
, 0, sizeof(ioc
));
7452 #ifdef CONFIG_COMPAT
7453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7454 if (in_compat_syscall()) {
7456 if (is_compat_task()) {
7457 #endif /* LINUX_VER >= 4.6 */
7458 compat_wl_ioctl_t compat_ioc
;
7459 if (copy_from_user(&compat_ioc
, ifr
->ifr_data
, sizeof(compat_wl_ioctl_t
))) {
7460 bcmerror
= BCME_BADADDR
;
7463 ioc
.cmd
= compat_ioc
.cmd
;
7464 ioc
.buf
= compat_ptr(compat_ioc
.buf
);
7465 ioc
.len
= compat_ioc
.len
;
7466 ioc
.set
= compat_ioc
.set
;
7467 ioc
.used
= compat_ioc
.used
;
7468 ioc
.needed
= compat_ioc
.needed
;
7469 /* To differentiate between wl and dhd read 4 more byes */
7470 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(compat_wl_ioctl_t
),
7471 sizeof(uint
)) != 0)) {
7472 bcmerror
= BCME_BADADDR
;
7476 #endif /* CONFIG_COMPAT */
7478 /* Copy the ioc control structure part of ioctl request */
7479 if (copy_from_user(&ioc
, ifr
->ifr_data
, sizeof(wl_ioctl_t
))) {
7480 bcmerror
= BCME_BADADDR
;
7484 /* To differentiate between wl and dhd read 4 more byes */
7485 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(wl_ioctl_t
),
7486 sizeof(uint
)) != 0)) {
7487 bcmerror
= BCME_BADADDR
;
7492 if (!capable(CAP_NET_ADMIN
)) {
7493 bcmerror
= BCME_EPERM
;
7497 /* Take backup of ioc.buf and restore later */
7498 ioc_buf_user
= ioc
.buf
;
7501 buflen
= MIN(ioc
.len
, DHD_IOCTL_MAXLEN
);
7502 if (!(local_buf
= MALLOC(dhd
->pub
.osh
, buflen
+1))) {
7503 bcmerror
= BCME_NOMEM
;
7507 DHD_PERIM_UNLOCK(&dhd
->pub
);
7508 if (copy_from_user(local_buf
, ioc
.buf
, buflen
)) {
7509 DHD_PERIM_LOCK(&dhd
->pub
);
7510 bcmerror
= BCME_BADADDR
;
7513 DHD_PERIM_LOCK(&dhd
->pub
);
7515 *((char *)local_buf
+ buflen
) = '\0';
7517 /* For some platforms accessing userspace memory
7518 * of ioc.buf is causing kernel panic, so to avoid that
7519 * make ioc.buf pointing to kernel space memory local_buf
7521 ioc
.buf
= local_buf
;
7524 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7525 if (ioc
.driver
!= DHD_IOCTL_MAGIC
&& dhd
->pub
.hang_was_sent
) {
7526 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__
));
7527 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd
->pub
, DHD_EVENT_TIMEOUT_MS
);
7528 bcmerror
= BCME_DONGLE_DOWN
;
7532 bcmerror
= dhd_ioctl_process(&dhd
->pub
, ifidx
, &ioc
, local_buf
);
7534 /* Restore back userspace pointer to ioc.buf */
7535 ioc
.buf
= ioc_buf_user
;
7537 if (!bcmerror
&& buflen
&& local_buf
&& ioc
.buf
) {
7538 DHD_PERIM_UNLOCK(&dhd
->pub
);
7539 if (copy_to_user(ioc
.buf
, local_buf
, buflen
))
7541 DHD_PERIM_LOCK(&dhd
->pub
);
7546 MFREE(dhd
->pub
.osh
, local_buf
, buflen
+1);
7548 DHD_PERIM_UNLOCK(&dhd
->pub
);
7549 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7551 return OSL_ERROR(bcmerror
);
7554 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
7555 /* Flags to indicate if we distingish power off policy when
7556 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
7558 int trigger_deep_sleep
= 0;
7559 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
7561 #ifdef FIX_CPU_MIN_CLOCK
7562 static int dhd_init_cpufreq_fix(dhd_info_t
*dhd
)
7565 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7566 mutex_init(&dhd
->cpufreq_fix
);
7568 dhd
->cpufreq_fix_status
= FALSE
;
7573 static void dhd_fix_cpu_freq(dhd_info_t
*dhd
)
7575 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7576 mutex_lock(&dhd
->cpufreq_fix
);
7578 if (dhd
&& !dhd
->cpufreq_fix_status
) {
7579 pm_qos_add_request(&dhd
->dhd_cpu_qos
, PM_QOS_CPU_FREQ_MIN
, 300000);
7580 #ifdef FIX_BUS_MIN_CLOCK
7581 pm_qos_add_request(&dhd
->dhd_bus_qos
, PM_QOS_BUS_THROUGHPUT
, 400000);
7582 #endif /* FIX_BUS_MIN_CLOCK */
7583 DHD_ERROR(("pm_qos_add_requests called\n"));
7585 dhd
->cpufreq_fix_status
= TRUE
;
7587 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7588 mutex_unlock(&dhd
->cpufreq_fix
);
7592 static void dhd_rollback_cpu_freq(dhd_info_t
*dhd
)
7594 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7595 mutex_lock(&dhd
->cpufreq_fix
);
7597 if (dhd
&& dhd
->cpufreq_fix_status
!= TRUE
) {
7598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7599 mutex_unlock(&dhd
->cpufreq_fix
);
7604 pm_qos_remove_request(&dhd
->dhd_cpu_qos
);
7605 #ifdef FIX_BUS_MIN_CLOCK
7606 pm_qos_remove_request(&dhd
->dhd_bus_qos
);
7607 #endif /* FIX_BUS_MIN_CLOCK */
7608 DHD_ERROR(("pm_qos_add_requests called\n"));
7610 dhd
->cpufreq_fix_status
= FALSE
;
7611 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7612 mutex_unlock(&dhd
->cpufreq_fix
);
7615 #endif /* FIX_CPU_MIN_CLOCK */
7617 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7619 dhd_ioctl_entry_wrapper(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
7622 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7624 if (atomic_read(&dhd
->pub
.block_bus
))
7627 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd
->pub
.bus
)) < 0)
7630 error
= dhd_ioctl_entry(net
, ifr
, cmd
);
7632 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd
->pub
.bus
));
7633 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd
->pub
.bus
));
7637 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7640 dhd_stop(struct net_device
*net
)
7643 bool skip_reset
= false;
7644 #if defined(WL_CFG80211)
7645 unsigned long flags
= 0;
7647 struct bcm_cfg80211
*cfg
= wl_get_cfg(net
);
7648 #endif /* WL_STATIC_IF */
7649 #endif /* WL_CFG80211 */
7650 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7651 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7652 DHD_PERIM_LOCK(&dhd
->pub
);
7653 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__
, net
));
7654 dhd
->pub
.rxcnt_timeout
= 0;
7655 dhd
->pub
.txcnt_timeout
= 0;
7658 dhd
->pub
.d3ackcnt_timeout
= 0;
7659 #endif /* BCMPCIE */
7661 mutex_lock(&dhd
->pub
.ndev_op_sync
);
7663 if (dhd
->pub
.up
== 0) {
7666 #if defined(DHD_HANG_SEND_UP_TEST)
7667 if (dhd
->pub
.req_hang_type
) {
7668 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
7669 __FUNCTION__
, dhd
->pub
.req_hang_type
));
7670 dhd
->pub
.req_hang_type
= 0;
7672 #endif /* DHD_HANG_SEND_UP_TEST */
7674 dhd_if_flush_sta(DHD_DEV_IFP(net
));
7676 #ifdef FIX_CPU_MIN_CLOCK
7677 if (dhd_get_fw_mode(dhd
) == DHD_FLAG_HOSTAP_MODE
)
7678 dhd_rollback_cpu_freq(dhd
);
7679 #endif /* FIX_CPU_MIN_CLOCK */
7681 ifidx
= dhd_net2idx(dhd
, net
);
7682 BCM_REFERENCE(ifidx
);
7684 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7685 /* If static if is operational, don't reset the chip */
7686 if (IS_CFG80211_STATIC_IF_ACTIVE(cfg
)) {
7687 DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n"));
7688 wl_cfg80211_sta_ifdown(net
);
7692 #endif /* WL_STATIC_IF && WL_CFG80211 */
7695 /* Disable Runtime PM before interface down */
7696 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
7698 spin_lock_irqsave(&dhd
->pub
.up_lock
, flags
);
7700 spin_unlock_irqrestore(&dhd
->pub
.up_lock
, flags
);
7703 #endif /* WL_CFG80211 */
7708 wl_cfg80211_down(net
);
7710 ifp
= dhd
->iflist
[0];
7712 * For CFG80211: Clean up all the left over virtual interfaces
7713 * when the primary Interface is brought down. [ifconfig wlan0 down]
7715 if (!dhd_download_fw_on_driverload
) {
7716 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) &&
7717 (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
7719 #ifdef WL_CFG80211_P2P_DEV_IF
7720 wl_cfg80211_del_p2p_wdev(net
);
7721 #endif /* WL_CFG80211_P2P_DEV_IF */
7723 dhd_net_if_lock_local(dhd
);
7724 for (i
= 1; i
< DHD_MAX_IFS
; i
++)
7725 dhd_remove_if(&dhd
->pub
, i
, FALSE
);
7727 if (ifp
&& ifp
->net
) {
7728 dhd_if_del_sta_list(ifp
);
7730 #ifdef ARP_OFFLOAD_SUPPORT
7731 if (dhd_inetaddr_notifier_registered
) {
7732 dhd_inetaddr_notifier_registered
= FALSE
;
7733 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
7735 #endif /* ARP_OFFLOAD_SUPPORT */
7736 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7737 if (dhd_inet6addr_notifier_registered
) {
7738 dhd_inet6addr_notifier_registered
= FALSE
;
7739 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
7741 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7742 dhd_net_if_unlock_local(dhd
);
7744 cancel_work_sync(dhd
->dhd_deferred_wq
);
7746 #ifdef SHOW_LOGTRACE
7747 /* Wait till event_log_dispatcher_work finishes */
7748 cancel_delayed_work_sync(&dhd
->event_log_dispatcher_work
);
7749 #endif /* SHOW_LOGTRACE */
7751 #if defined(DHD_LB_RXP)
7752 __skb_queue_purge(&dhd
->rx_pend_queue
);
7753 #endif /* DHD_LB_RXP */
7755 #if defined(DHD_LB_TXP)
7756 skb_queue_purge(&dhd
->tx_pend_queue
);
7757 #endif /* DHD_LB_TXP */
7760 #if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
7761 defined(ARGOS_NOTIFY_CB)
7762 argos_register_notifier_deinit();
7763 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
7764 #ifdef DHDTCPACK_SUPPRESS
7765 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
7766 #endif /* DHDTCPACK_SUPPRESS */
7767 #if defined(DHD_LB_RXP)
7768 if (ifp
&& ifp
->net
== dhd
->rx_napi_netdev
) {
7769 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
7770 __FUNCTION__
, &dhd
->rx_napi_struct
, net
, net
->name
));
7771 skb_queue_purge(&dhd
->rx_napi_queue
);
7772 napi_disable(&dhd
->rx_napi_struct
);
7773 netif_napi_del(&dhd
->rx_napi_struct
);
7774 dhd
->rx_napi_netdev
= NULL
;
7776 #endif /* DHD_LB_RXP */
7778 #endif /* WL_CFG80211 */
7780 DHD_SSSR_DUMP_DEINIT(&dhd
->pub
);
7782 #ifdef PROP_TXSTATUS
7783 dhd_wlfc_cleanup(&dhd
->pub
, NULL
, 0);
7785 #ifdef SHOW_LOGTRACE
7786 if (!dhd_download_fw_on_driverload
) {
7787 /* Release the skbs from queue for WLC_E_TRACE event */
7788 dhd_event_logtrace_flush_queue(&dhd
->pub
);
7789 if (dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
) {
7790 if (dhd
->event_data
.fmts
) {
7791 MFREE(dhd
->pub
.osh
, dhd
->event_data
.fmts
,
7792 dhd
->event_data
.fmts_size
);
7793 dhd
->event_data
.fmts
= NULL
;
7795 if (dhd
->event_data
.raw_fmts
) {
7796 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_fmts
,
7797 dhd
->event_data
.raw_fmts_size
);
7798 dhd
->event_data
.raw_fmts
= NULL
;
7800 if (dhd
->event_data
.raw_sstr
) {
7801 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_sstr
,
7802 dhd
->event_data
.raw_sstr_size
);
7803 dhd
->event_data
.raw_sstr
= NULL
;
7805 if (dhd
->event_data
.rom_raw_sstr
) {
7806 MFREE(dhd
->pub
.osh
, dhd
->event_data
.rom_raw_sstr
,
7807 dhd
->event_data
.rom_raw_sstr_size
);
7808 dhd
->event_data
.rom_raw_sstr
= NULL
;
7810 dhd
->dhd_state
&= ~DHD_ATTACH_LOGTRACE_INIT
;
7813 #endif /* SHOW_LOGTRACE */
7815 dhd_dev_apf_delete_filter(net
);
7818 /* Stop the protocol module */
7819 dhd_prot_stop(&dhd
->pub
);
7821 OLD_MOD_DEC_USE_COUNT
;
7823 if (skip_reset
== false) {
7824 #if defined(WL_CFG80211)
7825 if (ifidx
== 0 && !dhd_download_fw_on_driverload
) {
7826 #if defined(BT_OVER_SDIO)
7827 dhd_bus_put(&dhd
->pub
, WLAN_MODULE
);
7828 wl_android_set_wifi_on_flag(FALSE
);
7830 wl_android_wifi_off(net
, TRUE
);
7831 #endif /* BT_OVER_SDIO */
7833 #ifdef SUPPORT_DEEP_SLEEP
7835 /* CSP#505233: Flags to indicate if we distingish
7836 * power off policy when user set the memu
7837 * "Keep Wi-Fi on during sleep" to "Never"
7839 if (trigger_deep_sleep
) {
7840 dhd_deepsleep(net
, 1);
7841 trigger_deep_sleep
= 0;
7844 #endif /* SUPPORT_DEEP_SLEEP */
7846 dhd
->pub
.hang_was_sent
= 0;
7848 /* Clear country spec for for built-in type driver */
7849 if (!dhd_download_fw_on_driverload
) {
7850 dhd
->pub
.dhd_cspec
.country_abbrev
[0] = 0x00;
7851 dhd
->pub
.dhd_cspec
.rev
= 0;
7852 dhd
->pub
.dhd_cspec
.ccode
[0] = 0x00;
7860 DHD_PERIM_UNLOCK(&dhd
->pub
);
7861 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
7863 /* Destroy wakelock */
7864 if (!dhd_download_fw_on_driverload
&&
7865 (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) &&
7866 (skip_reset
== false)) {
7867 DHD_OS_WAKE_LOCK_DESTROY(dhd
);
7868 dhd
->dhd_state
&= ~DHD_ATTACH_STATE_WAKELOCKS_INIT
;
7871 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
7875 #if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
7876 defined(USE_INITIAL_SHORT_DWELL_TIME))
7877 extern bool g_first_broadcast_scan
;
7878 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
7881 static int dhd_interworking_enable(dhd_pub_t
*dhd
)
7883 uint32 enable
= true;
7886 ret
= dhd_iovar(dhd
, 0, "interworking", (char *)&enable
, sizeof(enable
), NULL
, 0, TRUE
);
7888 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__
, ret
));
7896 dhd_open(struct net_device
*net
)
7898 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
7905 #if defined(PREVENT_REOPEN_DURING_HANG)
7906 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
7907 if (dhd
->pub
.hang_was_sent
== 1) {
7908 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__
));
7909 /* Force to bring down WLAN interface in case dhd_stop() is not called
7910 * from the upper layer when HANG event is triggered.
7912 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== 1) {
7913 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__
));
7919 #endif /* PREVENT_REOPEN_DURING_HANG */
7921 mutex_lock(&dhd
->pub
.ndev_op_sync
);
7923 if (dhd
->pub
.up
== 1) {
7925 DHD_ERROR(("Primary net_device is already up \n"));
7926 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
7930 if (!dhd_download_fw_on_driverload
) {
7931 if (!dhd_driver_init_done
) {
7932 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__
));
7933 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
7937 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
7938 DHD_OS_WAKE_LOCK_INIT(dhd
);
7939 dhd
->dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
7942 #ifdef SHOW_LOGTRACE
7943 skb_queue_head_init(&dhd
->evt_trace_queue
);
7945 if (!(dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
)) {
7946 ret
= dhd_init_logstrs_array(dhd
->pub
.osh
, &dhd
->event_data
);
7947 if (ret
== BCME_OK
) {
7948 dhd_init_static_strs_array(dhd
->pub
.osh
, &dhd
->event_data
,
7949 st_str_file_path
, map_file_path
);
7950 dhd_init_static_strs_array(dhd
->pub
.osh
, &dhd
->event_data
,
7951 rom_st_str_file_path
, rom_map_file_path
);
7952 dhd
->dhd_state
|= DHD_ATTACH_LOGTRACE_INIT
;
7955 #endif /* SHOW_LOGTRACE */
7958 #if defined(MULTIPLE_SUPPLICANT)
7959 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
7960 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
7961 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__
));
7963 mutex_lock(&_dhd_sdio_mutex_lock_
);
7965 #endif /* MULTIPLE_SUPPLICANT */
7967 DHD_OS_WAKE_LOCK(&dhd
->pub
);
7968 DHD_PERIM_LOCK(&dhd
->pub
);
7969 dhd
->pub
.dongle_trap_occured
= 0;
7970 dhd
->pub
.hang_was_sent
= 0;
7971 dhd
->pub
.hang_reason
= 0;
7972 dhd
->pub
.iovar_timeout_occured
= 0;
7973 #ifdef PCIE_FULL_DONGLE
7974 dhd
->pub
.d3ack_timeout_occured
= 0;
7975 #endif /* PCIE_FULL_DONGLE */
7976 #ifdef DHD_MAP_LOGGING
7977 dhd
->pub
.smmu_fault_occurred
= 0;
7978 #endif /* DHD_MAP_LOGGING */
7980 #ifdef DHD_LOSSLESS_ROAMING
7981 dhd
->pub
.dequeue_prec_map
= ALLPRIO
;
7984 #if !defined(WL_CFG80211)
7986 * Force start if ifconfig_up gets called before START command
7987 * We keep WEXT's wl_control_wl_start to provide backward compatibility
7988 * This should be removed in the future
7990 ret
= wl_control_wl_start(net
);
7992 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
7999 ifidx
= dhd_net2idx(dhd
, net
);
8000 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
8003 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__
));
8008 if (!dhd
->iflist
[ifidx
]) {
8009 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__
));
8015 atomic_set(&dhd
->pend_8021x_cnt
, 0);
8016 #if defined(WL_CFG80211)
8017 if (!dhd_download_fw_on_driverload
) {
8018 DHD_ERROR(("\n%s\n", dhd_version
));
8019 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8020 g_first_broadcast_scan
= TRUE
;
8021 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8022 #if defined(BT_OVER_SDIO)
8023 ret
= dhd_bus_get(&dhd
->pub
, WLAN_MODULE
);
8024 wl_android_set_wifi_on_flag(TRUE
);
8026 ret
= wl_android_wifi_on(net
);
8027 #endif /* BT_OVER_SDIO */
8029 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8030 __FUNCTION__
, ret
));
8035 #ifdef SUPPORT_DEEP_SLEEP
8037 /* Flags to indicate if we distingish
8038 * power off policy when user set the memu
8039 * "Keep Wi-Fi on during sleep" to "Never"
8041 if (trigger_deep_sleep
) {
8042 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8043 g_first_broadcast_scan
= TRUE
;
8044 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8045 dhd_deepsleep(net
, 0);
8046 trigger_deep_sleep
= 0;
8049 #endif /* SUPPORT_DEEP_SLEEP */
8050 #ifdef FIX_CPU_MIN_CLOCK
8051 if (dhd_get_fw_mode(dhd
) == DHD_FLAG_HOSTAP_MODE
) {
8052 dhd_init_cpufreq_fix(dhd
);
8053 dhd_fix_cpu_freq(dhd
);
8055 #endif /* FIX_CPU_MIN_CLOCK */
8058 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
8060 /* try to bring up bus */
8061 DHD_PERIM_UNLOCK(&dhd
->pub
);
8063 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8064 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd
->pub
.bus
)) >= 0) {
8065 ret
= dhd_bus_start(&dhd
->pub
);
8066 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd
->pub
.bus
));
8067 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd
->pub
.bus
));
8070 ret
= dhd_bus_start(&dhd
->pub
);
8071 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8073 DHD_PERIM_LOCK(&dhd
->pub
);
8075 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
8083 if (dhd
->pub
.is_bt_recovery_required
) {
8084 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__
));
8085 bcmsdh_btsdio_process_dhd_hang_notification(TRUE
);
8087 dhd
->pub
.is_bt_recovery_required
= FALSE
;
8090 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8091 memcpy(net
->dev_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
8094 /* Get current TOE mode from dongle */
8095 if (dhd_toe_get(dhd
, ifidx
, &toe_ol
) >= 0 && (toe_ol
& TOE_TX_CSUM_OL
) != 0) {
8096 dhd
->iflist
[ifidx
]->net
->features
|= NETIF_F_IP_CSUM
;
8098 dhd
->iflist
[ifidx
]->net
->features
&= ~NETIF_F_IP_CSUM
;
8102 #if defined(DHD_LB_RXP)
8103 __skb_queue_head_init(&dhd
->rx_pend_queue
);
8104 if (dhd
->rx_napi_netdev
== NULL
) {
8105 dhd
->rx_napi_netdev
= dhd
->iflist
[ifidx
]->net
;
8106 memset(&dhd
->rx_napi_struct
, 0, sizeof(struct napi_struct
));
8107 netif_napi_add(dhd
->rx_napi_netdev
, &dhd
->rx_napi_struct
,
8108 dhd_napi_poll
, dhd_napi_weight
);
8109 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8110 __FUNCTION__
, &dhd
->rx_napi_struct
, net
, net
->name
));
8111 napi_enable(&dhd
->rx_napi_struct
);
8112 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__
));
8113 skb_queue_head_init(&dhd
->rx_napi_queue
);
8114 } /* rx_napi_netdev == NULL */
8115 #endif /* DHD_LB_RXP */
8116 #ifdef DHD_LB_IRQSET
8117 dhd_irq_set_affinity(&dhd
->pub
);
8118 #endif /* DHD_LB_IRQSET */
8120 #if defined(DHD_LB_TXP)
8121 /* Use the variant that uses locks */
8122 skb_queue_head_init(&dhd
->tx_pend_queue
);
8123 #endif /* DHD_LB_TXP */
8125 #if defined(WL_CFG80211)
8126 if (unlikely(wl_cfg80211_up(net
))) {
8127 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__
));
8131 if (!dhd_download_fw_on_driverload
) {
8132 #ifdef ARP_OFFLOAD_SUPPORT
8133 dhd
->pend_ipaddr
= 0;
8134 if (!dhd_inetaddr_notifier_registered
) {
8135 dhd_inetaddr_notifier_registered
= TRUE
;
8136 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
8138 #endif /* ARP_OFFLOAD_SUPPORT */
8139 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8140 if (!dhd_inet6addr_notifier_registered
) {
8141 dhd_inet6addr_notifier_registered
= TRUE
;
8142 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
8144 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8147 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
8148 defined(ARGOS_NOTIFY_CB)
8149 argos_register_notifier_init(net
);
8150 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
8151 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
8152 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
8153 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
8155 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
8156 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
8157 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
8158 #if defined(NUM_SCB_MAX_PROBE)
8159 dhd_set_scb_probe(&dhd
->pub
);
8160 #endif /* NUM_SCB_MAX_PROBE */
8161 #endif /* WL_CFG80211 */
8166 if (wl_event_enable
) {
8167 /* For wl utility to receive events */
8168 dhd
->pub
.wl_event_enabled
= true;
8170 dhd
->pub
.wl_event_enabled
= false;
8173 if (logtrace_pkt_sendup
) {
8174 /* For any deamon to recieve logtrace */
8175 dhd
->pub
.logtrace_pkt_sendup
= true;
8177 dhd
->pub
.logtrace_pkt_sendup
= false;
8180 OLD_MOD_INC_USE_COUNT
;
8183 dhd_dbgfs_init(&dhd
->pub
);
8187 mutex_unlock(&dhd
->pub
.ndev_op_sync
);
8192 DHD_PERIM_UNLOCK(&dhd
->pub
);
8193 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
8195 #if defined(MULTIPLE_SUPPLICANT)
8196 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8197 mutex_unlock(&_dhd_sdio_mutex_lock_
);
8199 #endif /* MULTIPLE_SUPPLICANT */
8205 * ndo_start handler for primary ndev
8208 dhd_pri_open(struct net_device
*net
)
8212 ret
= dhd_open(net
);
8213 if (unlikely(ret
)) {
8214 DHD_ERROR(("Failed to open primary dev ret %d\n", ret
));
8218 /* Allow transmit calls */
8219 netif_start_queue(net
);
8220 DHD_ERROR(("[%s] tx queue started\n", net
->name
));
8225 * ndo_stop handler for primary ndev
8228 dhd_pri_stop(struct net_device
*net
)
8233 netif_stop_queue(net
);
8234 DHD_ERROR(("[%s] tx queue stopped\n", net
->name
));
8236 ret
= dhd_stop(net
);
8237 if (unlikely(ret
)) {
8238 DHD_ERROR(("dhd_stop failed: %d\n", ret
));
8245 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
8247 * For static I/Fs, the firmware interface init
8248 * is done from the IFF_UP context.
8251 dhd_static_if_open(struct net_device
*net
)
8254 struct bcm_cfg80211
*cfg
;
8255 struct net_device
*primary_netdev
= NULL
;
8257 cfg
= wl_get_cfg(net
);
8258 primary_netdev
= bcmcfg_to_prmry_ndev(cfg
);
8260 if (!IS_CFG80211_STATIC_IF(cfg
, net
)) {
8261 DHD_TRACE(("non-static interface (%s)..do nothing \n", net
->name
));
8266 DHD_INFO(("[%s][STATIC_IF] Enter \n", net
->name
));
8267 /* Ensure fw is initialized. If it is already initialized,
8268 * dhd_open will return success.
8270 ret
= dhd_open(primary_netdev
);
8271 if (unlikely(ret
)) {
8272 DHD_ERROR(("Failed to open primary dev ret %d\n", ret
));
8276 ret
= wl_cfg80211_static_if_open(net
);
8278 /* Allow transmit calls */
8279 netif_start_queue(net
);
8286 dhd_static_if_stop(struct net_device
*net
)
8288 struct bcm_cfg80211
*cfg
;
8289 struct net_device
*primary_netdev
= NULL
;
8291 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
8293 DHD_INFO(("[%s][STATIC_IF] Enter \n", net
->name
));
8295 /* Ensure queue is disabled */
8296 netif_tx_disable(net
);
8298 cfg
= wl_get_cfg(net
);
8299 if (!IS_CFG80211_STATIC_IF(cfg
, net
)) {
8300 DHD_TRACE(("non-static interface (%s)..do nothing \n", net
->name
));
8304 ret
= wl_cfg80211_static_if_close(net
);
8306 if (dhd
->pub
.up
== 0) {
8307 /* If fw is down, return */
8308 DHD_ERROR(("fw down\n"));
8311 /* If STA iface is not in operational, invoke dhd_close from this
8314 primary_netdev
= bcmcfg_to_prmry_ndev(cfg
);
8315 if (!(primary_netdev
->flags
& IFF_UP
)) {
8316 ret
= dhd_stop(primary_netdev
);
8318 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
8323 #endif /* WL_STATIC_IF && WL_CF80211 */
8325 int dhd_do_driver_init(struct net_device
*net
)
8327 dhd_info_t
*dhd
= NULL
;
8330 DHD_ERROR(("Primary Interface not initialized \n"));
8334 #ifdef MULTIPLE_SUPPLICANT
8335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8336 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
8337 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__
));
8340 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8341 #endif /* MULTIPLE_SUPPLICANT */
8343 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8344 dhd
= DHD_DEV_INFO(net
);
8346 /* If driver is already initialized, do nothing
8348 if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
8349 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8353 if (dhd_open(net
) < 0) {
8354 DHD_ERROR(("Driver Init Failed \n"));
8362 dhd_event_ifadd(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8366 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8367 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
, ifevent
->role
) == BCME_OK
)
8371 /* handle IF event caused by wl commands, SoftAP, WEXT and
8372 * anything else. This has to be done asynchronously otherwise
8373 * DPC will be blocked (and iovars will timeout as DPC has no chance
8374 * to read the response back)
8376 if (ifevent
->ifidx
> 0) {
8377 dhd_if_event_t
*if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8378 if (if_event
== NULL
) {
8379 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8380 MALLOCED(dhdinfo
->pub
.osh
)));
8384 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8385 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8386 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8387 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8388 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
,
8389 DHD_WQ_WORK_IF_ADD
, dhd_ifadd_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8396 dhd_event_ifdel(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8398 dhd_if_event_t
*if_event
;
8401 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8402 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
8404 #endif /* WL_CFG80211 */
8406 /* handle IF event caused by wl commands, SoftAP, WEXT and
8409 if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
8410 if (if_event
== NULL
) {
8411 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8412 MALLOCED(dhdinfo
->pub
.osh
)));
8415 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
8416 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
8417 strncpy(if_event
->name
, name
, IFNAMSIZ
);
8418 if_event
->name
[IFNAMSIZ
- 1] = '\0';
8419 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
, DHD_WQ_WORK_IF_DEL
,
8420 dhd_ifdel_event_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
8426 dhd_event_ifchange(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
8429 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo
->pub
),
8430 ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
);
8431 #endif /* WL_CFG80211 */
8436 /* Handler to update natoe info and bind with new subscriptions if there is change in config */
8438 dhd_natoe_ct_event_hanlder(void *handle
, void *event_info
, u8 event
)
8440 dhd_info_t
*dhd
= handle
;
8441 wl_event_data_natoe_t
*natoe
= event_info
;
8442 dhd_nfct_info_t
*nfct
= dhd
->pub
.nfct
;
8444 if (event
!= DHD_WQ_WORK_NATOE_EVENT
) {
8445 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
8450 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
8453 if (natoe
->natoe_active
&& natoe
->sta_ip
&& natoe
->start_port
&& natoe
->end_port
&&
8454 (natoe
->start_port
< natoe
->end_port
)) {
8455 /* Rebind subscriptions to start receiving notifications from groups */
8456 if (dhd_ct_nl_bind(nfct
, nfct
->subscriptions
) < 0) {
8459 dhd_ct_send_dump_req(nfct
);
8460 } else if (!natoe
->natoe_active
) {
8461 /* Rebind subscriptions to stop receiving notifications from groups */
8462 if (dhd_ct_nl_bind(nfct
, CT_NULL_SUBSCRIPTION
) < 0) {
8468 /* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
8469 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
8472 dhd_natoe_ct_event(dhd_pub_t
*dhd
, char *data
)
8474 wl_event_data_natoe_t
*event_data
= (wl_event_data_natoe_t
*)data
;
8477 wl_event_data_natoe_t
*natoe
= dhd
->nfct
->natoe_info
;
8478 uint8 prev_enable
= natoe
->natoe_active
;
8480 spin_lock_bh(&dhd
->nfct_lock
);
8481 memcpy(natoe
, event_data
, sizeof(*event_data
));
8482 spin_unlock_bh(&dhd
->nfct_lock
);
8484 if (prev_enable
!= event_data
->natoe_active
) {
8485 dhd_deferred_schedule_work(dhd
->info
->dhd_deferred_wq
,
8486 (void *)natoe
, DHD_WQ_WORK_NATOE_EVENT
,
8487 dhd_natoe_ct_event_hanlder
, DHD_WQ_WORK_PRIORITY_LOW
);
8491 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__
));
8495 /* Handler to send natoe ioctl to dongle */
8497 dhd_natoe_ct_ioctl_handler(void *handle
, void *event_info
, uint8 event
)
8499 dhd_info_t
*dhd
= handle
;
8500 dhd_ct_ioc_t
*ct_ioc
= event_info
;
8502 if (event
!= DHD_WQ_WORK_NATOE_IOCTL
) {
8503 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
8508 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
8512 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd
->pub
, ct_ioc
) < 0) {
8513 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__
));
8517 /* When Netlink message contains port collision info, the info must be sent to dongle FW
8518 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
8521 dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t
*dhd
, dhd_ct_ioc_t
*ioc
)
8524 dhd_deferred_schedule_work(dhd
->info
->dhd_deferred_wq
, (void *)ioc
,
8525 DHD_WQ_WORK_NATOE_IOCTL
, dhd_natoe_ct_ioctl_handler
,
8526 DHD_WQ_WORK_PRIORITY_HIGH
);
8528 #endif /* WL_NATOE */
8530 /* This API maps ndev to ifp inclusive of static IFs */
8532 dhd_get_ifp_by_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
)
8534 dhd_if_t
*ifp
= NULL
;
8536 u32 ifidx
= (DHD_MAX_IFS
+ DHD_MAX_STATIC_IFS
- 1);
8538 u32 ifidx
= (DHD_MAX_IFS
- 1);
8539 #endif /* WL_STATIC_IF */
8541 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdp
->info
;
8543 ifp
= dhdinfo
->iflist
[ifidx
];
8544 if (ifp
&& (ifp
->net
== ndev
)) {
8545 DHD_TRACE(("match found for %s. ifidx:%d\n",
8546 ndev
->name
, ifidx
));
8551 DHD_ERROR(("no entry found for %s\n", ndev
->name
));
8556 dhd_is_static_ndev(dhd_pub_t
*dhdp
, struct net_device
*ndev
)
8558 dhd_if_t
*ifp
= NULL
;
8560 if (!dhdp
|| !ndev
) {
8561 DHD_ERROR(("wrong input\n"));
8566 ifp
= dhd_get_ifp_by_ndev(dhdp
, ndev
);
8567 return (ifp
&& (ifp
->static_if
== true));
8571 /* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
8572 * are not known. For e.g: static i/f case. This function lets to update it once
8576 dhd_update_iflist_info(dhd_pub_t
*dhdp
, struct net_device
*ndev
, int ifidx
,
8577 uint8
*mac
, uint8 bssidx
, const char *dngl_name
, int if_state
)
8579 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdp
->info
;
8580 dhd_if_t
*ifp
, *ifp_new
;
8582 dhd_dev_priv_t
* dev_priv
;
8584 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
8587 ASSERT(dhdinfo
&& (ifidx
< (DHD_MAX_IFS
+ DHD_MAX_STATIC_IFS
)));
8589 if ((ifp
= dhd_get_ifp_by_ndev(dhdp
, ndev
)) == NULL
) {
8594 if (if_state
== NDEV_STATE_OS_IF_CREATED
) {
8595 /* mark static if */
8596 ifp
->static_if
= TRUE
;
8600 ifp_new
= dhdinfo
->iflist
[ifidx
];
8601 if (ifp_new
&& (ifp_new
!= ifp
)) {
8602 /* There should be only one entry for a given ifidx. */
8603 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx
));
8605 dhdp
->hang_reason
= HANG_REASON_IFACE_ADD_FAILURE
;
8606 net_os_send_hang_message(ifp
->net
);
8610 /* For static if delete case, cleanup the if before ifidx update */
8611 if ((if_state
== NDEV_STATE_FW_IF_DELETED
) ||
8612 (if_state
== NDEV_STATE_FW_IF_FAILED
)) {
8613 dhd_cleanup_if(ifp
->net
);
8614 dev_priv
= DHD_DEV_PRIV(ndev
);
8615 dev_priv
->ifidx
= ifidx
;
8618 /* update the iflist ifidx slot with cached info */
8619 dhdinfo
->iflist
[ifidx
] = ifp
;
8620 dhdinfo
->iflist
[cur_idx
] = NULL
;
8622 /* update the values */
8624 ifp
->bssidx
= bssidx
;
8626 if (if_state
== NDEV_STATE_FW_IF_CREATED
) {
8627 dhd_dev_priv_save(ndev
, dhdinfo
, ifp
, ifidx
);
8628 /* initialize the dongle provided if name */
8630 strlcpy(ifp
->dngl_name
, dngl_name
, IFNAMSIZ
);
8631 } else if (ndev
->name
[0] != '\0') {
8632 strlcpy(ifp
->dngl_name
, ndev
->name
, IFNAMSIZ
);
8635 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
8637 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
8638 ifidx
, cur_idx
, if_state
));
8641 #endif /* WL_STATIC_IF */
8643 /* unregister and free the existing net_device interface (if any) in iflist and
8644 * allocate a new one. the slot is reused. this function does NOT register the
8645 * new interface to linux kernel. dhd_register_if does the job
8648 dhd_allocate_if(dhd_pub_t
*dhdpub
, int ifidx
, const char *name
,
8649 uint8
*mac
, uint8 bssidx
, bool need_rtnl_lock
, const char *dngl_name
)
8651 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
8654 ASSERT(dhdinfo
&& (ifidx
< (DHD_MAX_IFS
+ DHD_MAX_STATIC_IFS
)));
8656 ifp
= dhdinfo
->iflist
[ifidx
];
8659 if (ifp
->net
!= NULL
) {
8660 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8661 __FUNCTION__
, ifp
->net
->name
, ifidx
));
8664 /* For primary ifidx (0), there shouldn't be
8665 * any netdev present already.
8667 DHD_ERROR(("Primary ifidx populated already\n"));
8672 dhd_dev_priv_clear(ifp
->net
); /* clear net_device private */
8674 /* in unregister_netdev case, the interface gets freed by net->destructor
8675 * (which is set to free_netdev)
8677 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
8678 free_netdev(ifp
->net
);
8680 netif_stop_queue(ifp
->net
);
8682 unregister_netdev(ifp
->net
);
8684 unregister_netdevice(ifp
->net
);
8689 ifp
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_t
));
8691 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__
, sizeof(dhd_if_t
)));
8696 memset(ifp
, 0, sizeof(dhd_if_t
));
8697 ifp
->info
= dhdinfo
;
8699 ifp
->bssidx
= bssidx
;
8700 #ifdef DHD_MCAST_REGEN
8701 ifp
->mcast_regen_bss_enable
= FALSE
;
8703 /* set to TRUE rx_pkt_chainable at alloc time */
8704 ifp
->rx_pkt_chainable
= TRUE
;
8707 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
8709 /* Allocate etherdev, including space for private structure */
8710 ifp
->net
= alloc_etherdev(DHD_DEV_PRIV_SIZE
);
8711 if (ifp
->net
== NULL
) {
8712 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__
, sizeof(dhdinfo
)));
8716 /* Setup the dhd interface's netdevice private structure. */
8717 dhd_dev_priv_save(ifp
->net
, dhdinfo
, ifp
, ifidx
);
8719 if (name
&& name
[0]) {
8720 strncpy(ifp
->net
->name
, name
, IFNAMSIZ
);
8721 ifp
->net
->name
[IFNAMSIZ
- 1] = '\0';
8726 ifp
->net
->destructor
= free_netdev
;
8728 ifp
->net
->destructor
= dhd_netdev_free
;
8730 ifp
->net
->destructor
= free_netdev
;
8731 #endif /* WL_CFG80211 */
8732 strncpy(ifp
->name
, ifp
->net
->name
, IFNAMSIZ
);
8733 ifp
->name
[IFNAMSIZ
- 1] = '\0';
8734 dhdinfo
->iflist
[ifidx
] = ifp
;
8736 /* initialize the dongle provided if name */
8738 strncpy(ifp
->dngl_name
, dngl_name
, IFNAMSIZ
);
8740 strncpy(ifp
->dngl_name
, name
, IFNAMSIZ
);
8743 #ifdef PCIE_FULL_DONGLE
8744 /* Initialize STA info list */
8745 INIT_LIST_HEAD(&ifp
->sta_list
);
8746 DHD_IF_STA_LIST_LOCK_INIT(ifp
);
8747 #endif /* PCIE_FULL_DONGLE */
8749 #ifdef DHD_L2_FILTER
8750 ifp
->phnd_arp_table
= init_l2_filter_arp_table(dhdpub
->osh
);
8751 ifp
->parp_allnode
= TRUE
;
8752 #endif /* DHD_L2_FILTER */
8754 DHD_CUMM_CTR_INIT(&ifp
->cumm_ctr
);
8756 #ifdef DHDTCPSYNC_FLOOD_BLK
8757 INIT_WORK(&ifp
->blk_tsfl_work
, dhd_blk_tsfl_handler
);
8758 dhd_reset_tcpsync_info_by_ifp(ifp
);
8759 #endif /* DHDTCPSYNC_FLOOD_BLK */
8765 if (ifp
->net
!= NULL
) {
8766 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8767 if (ifp
->net
== dhdinfo
->rx_napi_netdev
) {
8768 napi_disable(&dhdinfo
->rx_napi_struct
);
8769 netif_napi_del(&dhdinfo
->rx_napi_struct
);
8770 skb_queue_purge(&dhdinfo
->rx_napi_queue
);
8771 dhdinfo
->rx_napi_netdev
= NULL
;
8773 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8774 dhd_dev_priv_clear(ifp
->net
);
8775 free_netdev(ifp
->net
);
8778 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
8782 dhdinfo
->iflist
[ifidx
] = NULL
;
8787 dhd_cleanup_ifp(dhd_pub_t
*dhdp
, dhd_if_t
*ifp
)
8789 #ifdef PCIE_FULL_DONGLE
8791 if_flow_lkup_t
*if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
8792 #endif /* PCIE_FULL_DONGLE */
8795 if ((ifp
->idx
< 0) || (ifp
->idx
>= DHD_MAX_IFS
)) {
8796 DHD_ERROR(("Wrong idx:%d \n", ifp
->idx
));
8800 #ifdef DHD_L2_FILTER
8801 bcm_l2_filter_arp_table_update(dhdpub
->osh
, ifp
->phnd_arp_table
, TRUE
,
8802 NULL
, FALSE
, dhdpub
->tickcnt
);
8803 deinit_l2_filter_arp_table(dhdpub
->osh
, ifp
->phnd_arp_table
);
8804 ifp
->phnd_arp_table
= NULL
;
8805 #endif /* DHD_L2_FILTER */
8807 dhd_if_del_sta_list(ifp
);
8808 #ifdef PCIE_FULL_DONGLE
8809 /* Delete flowrings of virtual interface */
8811 if ((ifidx
!= 0) && (if_flow_lkup
[ifidx
].role
!= WLC_E_IF_ROLE_AP
)) {
8812 dhd_flow_rings_delete(dhdp
, ifidx
);
8814 #endif /* PCIE_FULL_DONGLE */
8819 dhd_cleanup_if(struct net_device
*net
)
8821 dhd_info_t
*dhdinfo
= DHD_DEV_INFO(net
);
8822 dhd_pub_t
*dhdp
= &dhdinfo
->pub
;
8825 if (!(ifp
= dhd_get_ifp_by_ndev(dhdp
, net
)) ||
8826 (ifp
->idx
>= DHD_MAX_IFS
)) {
8827 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp
, ifp
? ifp
->idx
: -1));
8832 dhd_cleanup_ifp(dhdp
, ifp
);
8835 /* unregister and free the the net_device interface associated with the indexed
8836 * slot, also free the slot memory and set the slot pointer to NULL
8838 #define DHD_TX_COMPLETION_TIMEOUT 5000
8840 dhd_remove_if(dhd_pub_t
*dhdpub
, int ifidx
, bool need_rtnl_lock
)
8842 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
8844 unsigned long flags
;
8847 ifp
= dhdinfo
->iflist
[ifidx
];
8850 #ifdef DHDTCPSYNC_FLOOD_BLK
8851 cancel_work_sync(&ifp
->blk_tsfl_work
);
8852 #endif /* DHDTCPSYNC_FLOOD_BLK */
8854 /* static IF will be handled in detach */
8855 if (ifp
->static_if
) {
8856 DHD_TRACE(("Skip del iface for static interface\n"));
8859 #endif /* WL_STATIC_IF */
8860 if (ifp
->net
!= NULL
) {
8861 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp
->net
->name
, ifp
->idx
));
8863 DHD_GENERAL_LOCK(dhdpub
, flags
);
8864 ifp
->del_in_progress
= true;
8865 DHD_GENERAL_UNLOCK(dhdpub
, flags
);
8867 /* If TX is in progress, hold the if del */
8868 if (DHD_IF_IS_TX_ACTIVE(ifp
)) {
8869 DHD_INFO(("TX in progress. Wait for it to be complete."));
8870 timeout
= wait_event_timeout(dhdpub
->tx_completion_wait
,
8871 ((ifp
->tx_paths_active
& DHD_TX_CONTEXT_MASK
) == 0),
8872 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT
));
8874 /* Tx completion timeout. Attempt proceeding ahead */
8875 DHD_ERROR(("Tx completion timed out!\n"));
8879 DHD_TRACE(("No outstanding TX!\n"));
8881 dhdinfo
->iflist
[ifidx
] = NULL
;
8882 /* in unregister_netdev case, the interface gets freed by net->destructor
8883 * (which is set to free_netdev)
8885 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
8886 free_netdev(ifp
->net
);
8888 netif_tx_disable(ifp
->net
);
8890 #if defined(SET_RPS_CPUS)
8891 custom_rps_map_clear(ifp
->net
->_rx
);
8892 #endif /* SET_RPS_CPUS */
8893 #if (defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL))
8894 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
8895 dhd_tcpack_suppress_set(dhdpub
, TCPACK_SUP_OFF
);
8896 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
8897 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
8899 unregister_netdev(ifp
->net
);
8901 unregister_netdevice(ifp
->net
);
8904 DHD_GENERAL_LOCK(dhdpub
, flags
);
8905 ifp
->del_in_progress
= false;
8906 DHD_GENERAL_UNLOCK(dhdpub
, flags
);
8908 dhd_cleanup_ifp(dhdpub
, ifp
);
8909 DHD_CUMM_CTR_INIT(&ifp
->cumm_ctr
);
8911 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
8918 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
8919 static struct net_device_ops dhd_ops_pri
= {
8920 .ndo_open
= dhd_pri_open
,
8921 .ndo_stop
= dhd_pri_stop
,
8922 .ndo_get_stats
= dhd_get_stats
,
8923 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8924 .ndo_do_ioctl
= dhd_ioctl_entry_wrapper
,
8925 .ndo_start_xmit
= dhd_start_xmit_wrapper
,
8927 .ndo_do_ioctl
= dhd_ioctl_entry
,
8928 .ndo_start_xmit
= dhd_start_xmit
,
8929 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8930 .ndo_set_mac_address
= dhd_set_mac_address
,
8931 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8932 .ndo_set_rx_mode
= dhd_set_multicast_list
,
8934 .ndo_set_multicast_list
= dhd_set_multicast_list
,
8938 static struct net_device_ops dhd_ops_virt
= {
8939 #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8940 .ndo_open
= dhd_static_if_open
,
8941 .ndo_stop
= dhd_static_if_stop
,
8943 .ndo_get_stats
= dhd_get_stats
,
8944 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8945 .ndo_do_ioctl
= dhd_ioctl_entry_wrapper
,
8946 .ndo_start_xmit
= dhd_start_xmit_wrapper
,
8948 .ndo_do_ioctl
= dhd_ioctl_entry
,
8949 .ndo_start_xmit
= dhd_start_xmit
,
8950 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8951 .ndo_set_mac_address
= dhd_set_mac_address
,
8952 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8953 .ndo_set_rx_mode
= dhd_set_multicast_list
,
8955 .ndo_set_multicast_list
= dhd_set_multicast_list
,
8958 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
8961 dhd_os_write_file_posn(void *fp
, unsigned long *posn
, void *buf
,
8962 unsigned long buflen
)
8964 loff_t wr_posn
= *posn
;
8966 if (!fp
|| !buf
|| buflen
== 0)
8969 if (vfs_write((struct file
*)fp
, buf
, buflen
, &wr_posn
) < 0)
8976 #ifdef SHOW_LOGTRACE
8978 dhd_os_read_file(void *file
, char *buf
, uint32 size
)
8980 struct file
*filep
= (struct file
*)file
;
8985 return vfs_read(filep
, buf
, size
, &filep
->f_pos
);
8989 dhd_os_seek_file(void *file
, int64 offset
)
8991 struct file
*filep
= (struct file
*)file
;
8995 /* offset can be -ve */
8996 filep
->f_pos
= filep
->f_pos
+ offset
;
9002 dhd_init_logstrs_array(osl_t
*osh
, dhd_event_log_t
*temp
)
9004 struct file
*filep
= NULL
;
9007 char *raw_fmts
= NULL
;
9008 int logstrs_size
= 0;
9014 filep
= filp_open(logstrs_path
, O_RDONLY
, 0);
9016 if (IS_ERR(filep
)) {
9017 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__
, logstrs_path
));
9020 error
= vfs_stat(logstrs_path
, &stat
);
9022 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__
, logstrs_path
));
9025 logstrs_size
= (int) stat
.size
;
9027 if (logstrs_size
== 0) {
9028 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__
));
9032 raw_fmts
= MALLOC(osh
, logstrs_size
);
9033 if (raw_fmts
== NULL
) {
9034 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__
));
9038 if (vfs_read(filep
, raw_fmts
, logstrs_size
, &filep
->f_pos
) != logstrs_size
) {
9039 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__
, logstrs_path
));
9043 if (dhd_parse_logstrs_file(osh
, raw_fmts
, logstrs_size
, temp
)
9045 filp_close(filep
, NULL
);
9052 MFREE(osh
, raw_fmts
, logstrs_size
);
9058 filp_close(filep
, NULL
);
9066 dhd_read_map(osl_t
*osh
, char *fname
, uint32
*ramstart
, uint32
*rodata_start
,
9069 struct file
*filep
= NULL
;
9071 int err
= BCME_ERROR
;
9073 if (fname
== NULL
) {
9074 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__
));
9081 filep
= filp_open(fname
, O_RDONLY
, 0);
9082 if (IS_ERR(filep
)) {
9083 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__
, fname
));
9087 if ((err
= dhd_parse_map_file(osh
, filep
, ramstart
,
9088 rodata_start
, rodata_end
)) < 0)
9093 filp_close(filep
, NULL
);
9101 dhd_init_static_strs_array(osl_t
*osh
, dhd_event_log_t
*temp
, char *str_file
, char *map_file
)
9103 struct file
*filep
= NULL
;
9105 char *raw_fmts
= NULL
;
9106 uint32 logstrs_size
= 0;
9108 uint32 ramstart
= 0;
9109 uint32 rodata_start
= 0;
9110 uint32 rodata_end
= 0;
9111 uint32 logfilebase
= 0;
9113 error
= dhd_read_map(osh
, map_file
, &ramstart
, &rodata_start
, &rodata_end
);
9114 if (error
!= BCME_OK
) {
9115 DHD_ERROR(("readmap Error!! \n"));
9116 /* don't do event log parsing in actual case */
9117 if (strstr(str_file
, ram_file_str
) != NULL
) {
9118 temp
->raw_sstr
= NULL
;
9119 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9120 temp
->rom_raw_sstr
= NULL
;
9124 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9125 ramstart
, rodata_start
, rodata_end
));
9130 filep
= filp_open(str_file
, O_RDONLY
, 0);
9131 if (IS_ERR(filep
)) {
9132 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__
, str_file
));
9137 /* Full file size is huge. Just read required part */
9138 logstrs_size
= rodata_end
- rodata_start
;
9139 logfilebase
= rodata_start
- ramstart
;
9142 if (logstrs_size
== 0) {
9143 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__
));
9147 raw_fmts
= MALLOC(osh
, logstrs_size
);
9148 if (raw_fmts
== NULL
) {
9149 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__
));
9154 error
= generic_file_llseek(filep
, logfilebase
, SEEK_SET
);
9156 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__
, str_file
, error
));
9161 error
= vfs_read(filep
, raw_fmts
, logstrs_size
, (&filep
->f_pos
));
9162 if (error
!= logstrs_size
) {
9163 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__
, str_file
, error
));
9167 if (strstr(str_file
, ram_file_str
) != NULL
) {
9168 temp
->raw_sstr
= raw_fmts
;
9169 temp
->raw_sstr_size
= logstrs_size
;
9170 temp
->rodata_start
= rodata_start
;
9171 temp
->rodata_end
= rodata_end
;
9172 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9173 temp
->rom_raw_sstr
= raw_fmts
;
9174 temp
->rom_raw_sstr_size
= logstrs_size
;
9175 temp
->rom_rodata_start
= rodata_start
;
9176 temp
->rom_rodata_end
= rodata_end
;
9179 filp_close(filep
, NULL
);
9186 MFREE(osh
, raw_fmts
, logstrs_size
);
9192 filp_close(filep
, NULL
);
9196 if (strstr(str_file
, ram_file_str
) != NULL
) {
9197 temp
->raw_sstr
= NULL
;
9198 } else if (strstr(str_file
, rom_file_str
) != NULL
) {
9199 temp
->rom_raw_sstr
= NULL
;
9203 } /* dhd_init_static_strs_array */
9206 dhd_trace_open_proc(struct inode
*inode
, struct file
*file
)
9208 return single_open(file
, 0, NULL
);
9212 dhd_trace_read_proc(struct file
*file
, char __user
*buffer
, size_t tt
, loff_t
*loff
)
9214 trace_buf_info_t
*trace_buf_info
;
9215 int ret
= BCME_ERROR
;
9218 mutex_lock(&g_dhd_pub
->dhd_trace_lock
);
9219 trace_buf_info
= (trace_buf_info_t
*)MALLOC(g_dhd_pub
->osh
,
9220 sizeof(trace_buf_info_t
));
9221 if (trace_buf_info
) {
9222 dhd_get_read_buf_ptr(g_dhd_pub
, trace_buf_info
);
9223 if (copy_to_user(buffer
, (void*)trace_buf_info
->buf
, MIN(trace_buf_info
->size
, tt
)))
9228 if (trace_buf_info
->availability
== BUF_NOT_AVAILABLE
)
9229 ret
= BUF_NOT_AVAILABLE
;
9231 ret
= trace_buf_info
->size
;
9233 DHD_ERROR(("Memory allocation Failed\n"));
9236 if (trace_buf_info
) {
9237 MFREE(g_dhd_pub
->osh
, trace_buf_info
, sizeof(trace_buf_info_t
));
9239 mutex_unlock(&g_dhd_pub
->dhd_trace_lock
);
9242 #endif /* SHOW_LOGTRACE */
9245 uint enable_erpom
= 0;
9246 module_param(enable_erpom
, int, 0);
9249 dhd_wlan_power_off_handler(void *handler
, unsigned char reason
)
9251 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handler
;
9252 bool dongle_isolation
= dhdp
->dongle_isolation
;
9254 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__
, reason
));
9256 if ((reason
== BY_BT_DUE_TO_BT
) || (reason
== BY_BT_DUE_TO_WLAN
)) {
9257 #if defined(DHD_FW_COREDUMP)
9258 /* save core dump to a file */
9259 if (dhdp
->memdump_enabled
) {
9260 #ifdef DHD_SSSR_DUMP
9261 if (dhdp
->sssr_inited
) {
9262 dhdp
->info
->no_wq_sssrdump
= TRUE
;
9263 dhd_bus_sssr_dump(dhdp
);
9264 dhdp
->info
->no_wq_sssrdump
= FALSE
;
9266 #endif /* DHD_SSSR_DUMP */
9267 dhdp
->memdump_type
= DUMP_TYPE_DUE_TO_BT
;
9268 dhd_bus_mem_dump(dhdp
);
9270 #endif /* DHD_FW_COREDUMP */
9273 /* pause data on all the interfaces */
9274 dhd_bus_stop_queue(dhdp
->bus
);
9276 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9277 dhdp
->dongle_isolation
= TRUE
;
9278 dhd_bus_devreset(dhdp
, 1); /* DHD structure cleanup */
9279 dhdp
->dongle_isolation
= dongle_isolation
; /* Restore the old value */
9284 dhd_wlan_power_on_handler(void *handler
, unsigned char reason
)
9286 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handler
;
9287 bool dongle_isolation
= dhdp
->dongle_isolation
;
9289 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__
, reason
));
9290 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9291 dhdp
->dongle_isolation
= TRUE
;
9292 dhd_bus_devreset(dhdp
, 0); /* DHD structure re-init */
9293 dhdp
->dongle_isolation
= dongle_isolation
; /* Restore the old value */
9294 /* resume data on all the interfaces */
9295 dhd_bus_start_queue(dhdp
->bus
);
9300 #endif /* DHD_ERPOM */
9301 /** Called once for each hardware (dongle) instance that this DHD manages */
9303 dhd_attach(osl_t
*osh
, struct dhd_bus
*bus
, uint bus_hdrlen
)
9305 dhd_info_t
*dhd
= NULL
;
9306 struct net_device
*net
= NULL
;
9307 char if_name
[IFNAMSIZ
] = {'\0'};
9308 uint32 bus_type
= -1;
9309 uint32 bus_num
= -1;
9310 uint32 slot_num
= -1;
9311 #ifdef SHOW_LOGTRACE
9313 #endif /* SHOW_LOGTRACE */
9315 pom_func_handler_t
*pom_handler
;
9316 #endif /* DHD_ERPOM */
9317 wifi_adapter_info_t
*adapter
= NULL
;
9319 dhd_attach_states_t dhd_state
= DHD_ATTACH_STATE_INIT
;
9320 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
9322 #ifdef PCIE_FULL_DONGLE
9323 ASSERT(sizeof(dhd_pkttag_fd_t
) <= OSL_PKTTAG_SZ
);
9324 ASSERT(sizeof(dhd_pkttag_fr_t
) <= OSL_PKTTAG_SZ
);
9325 #endif /* PCIE_FULL_DONGLE */
9327 /* will implement get_ids for DBUS later */
9328 #if defined(BCMSDIO)
9329 dhd_bus_get_ids(bus
, &bus_type
, &bus_num
, &slot_num
);
9331 adapter
= dhd_wifi_platform_get_adapter(bus_type
, bus_num
, slot_num
);
9333 /* Allocate primary dhd_info */
9334 dhd
= wifi_platform_prealloc(adapter
, DHD_PREALLOC_DHD_INFO
, sizeof(dhd_info_t
));
9336 dhd
= MALLOC(osh
, sizeof(dhd_info_t
));
9338 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__
));
9342 memset(dhd
, 0, sizeof(dhd_info_t
));
9343 dhd_state
|= DHD_ATTACH_STATE_DHD_ALLOC
;
9345 dhd
->unit
= dhd_found
+ instance_base
; /* do not increment dhd_found, yet */
9348 #ifdef DUMP_IOCTL_IOV_LIST
9349 dll_init(&(dhd
->pub
.dump_iovlist_head
));
9350 #endif /* DUMP_IOCTL_IOV_LIST */
9351 dhd
->adapter
= adapter
;
9353 dhd
->pub
.is_bt_recovery_required
= FALSE
;
9354 mutex_init(&dhd
->bus_user_lock
);
9355 #endif /* BT_OVER_SDIO */
9358 dll_init(&(dhd
->pub
.mw_list_head
));
9359 #endif /* DHD_DEBUG */
9361 #ifdef GET_CUSTOM_MAC_ENABLE
9362 wifi_platform_get_mac_addr(dhd
->adapter
, dhd
->pub
.mac
.octet
);
9363 #endif /* GET_CUSTOM_MAC_ENABLE */
9364 #ifdef CUSTOM_FORCE_NODFS_FLAG
9365 dhd
->pub
.dhd_cflags
|= WLAN_PLAT_NODFS_FLAG
;
9366 dhd
->pub
.force_country_change
= TRUE
;
9367 #endif /* CUSTOM_FORCE_NODFS_FLAG */
9368 #ifdef CUSTOM_COUNTRY_CODE
9369 get_customized_country_code(dhd
->adapter
,
9370 dhd
->pub
.dhd_cspec
.country_abbrev
, &dhd
->pub
.dhd_cspec
,
9371 dhd
->pub
.dhd_cflags
);
9372 #endif /* CUSTOM_COUNTRY_CODE */
9373 dhd
->thr_dpc_ctl
.thr_pid
= DHD_PID_KT_TL_INVALID
;
9374 dhd
->thr_wdt_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
9376 dhd
->pub
.wet_info
= dhd_get_wet_info(&dhd
->pub
);
9377 #endif /* DHD_WET */
9378 /* Initialize thread based operation and lock */
9379 sema_init(&dhd
->sdsem
, 1);
9381 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9382 * This is indeed a hack but we have to make it work properly before we have a better
9385 dhd_update_fw_nv_path(dhd
);
9386 dhd
->pub
.pcie_txs_metadata_enable
= pcie_txs_metadata_enable
;
9388 /* Link to info module */
9389 dhd
->pub
.info
= dhd
;
9391 /* Link to bus module */
9393 dhd
->pub
.hdrlen
= bus_hdrlen
;
9395 /* Set network interface name if it was provided as module parameter */
9396 if (iface_name
[0]) {
9399 strncpy(if_name
, iface_name
, IFNAMSIZ
);
9400 if_name
[IFNAMSIZ
- 1] = 0;
9401 len
= strlen(if_name
);
9402 ch
= if_name
[len
- 1];
9403 if ((ch
> '9' || ch
< '0') && (len
< IFNAMSIZ
- 2))
9404 strncat(if_name
, "%d", 2);
9407 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9408 net
= dhd_allocate_if(&dhd
->pub
, 0, if_name
, NULL
, 0, TRUE
, NULL
);
9412 mutex_init(&dhd
->pub
.ndev_op_sync
);
9413 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
9414 /* Init ARGOS notifier data */
9415 argos_wifi
.notifier_call
= NULL
;
9416 argos_p2p
.notifier_call
= NULL
;
9417 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
9419 dhd_state
|= DHD_ATTACH_STATE_ADD_IF
;
9420 #ifdef DHD_L2_FILTER
9421 /* initialize the l2_filter_cnt */
9422 dhd
->pub
.l2_filter_cnt
= 0;
9424 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9427 net
->netdev_ops
= NULL
;
9430 mutex_init(&dhd
->dhd_iovar_mutex
);
9431 sema_init(&dhd
->proto_sem
, 1);
9433 if (!(dhd_ulp_init(osh
, &dhd
->pub
)))
9435 #endif /* DHD_ULP */
9437 #if defined(DHD_HANG_SEND_UP_TEST)
9438 dhd
->pub
.req_hang_type
= 0;
9439 #endif /* DHD_HANG_SEND_UP_TEST */
9441 #ifdef PROP_TXSTATUS
9442 spin_lock_init(&dhd
->wlfc_spinlock
);
9444 dhd
->pub
.skip_fc
= dhd_wlfc_skip_fc
;
9445 dhd
->pub
.plat_init
= dhd_wlfc_plat_init
;
9446 dhd
->pub
.plat_deinit
= dhd_wlfc_plat_deinit
;
9448 #ifdef DHD_WLFC_THREAD
9449 init_waitqueue_head(&dhd
->pub
.wlfc_wqhead
);
9450 dhd
->pub
.wlfc_thread
= kthread_create(dhd_wlfc_transfer_packets
, &dhd
->pub
, "wlfc-thread");
9451 if (IS_ERR(dhd
->pub
.wlfc_thread
)) {
9452 DHD_ERROR(("create wlfc thread failed\n"));
9455 wake_up_process(dhd
->pub
.wlfc_thread
);
9457 #endif /* DHD_WLFC_THREAD */
9458 #endif /* PROP_TXSTATUS */
9460 /* Initialize other structure content */
9461 init_waitqueue_head(&dhd
->ioctl_resp_wait
);
9462 init_waitqueue_head(&dhd
->d3ack_wait
);
9463 init_waitqueue_head(&dhd
->ctrl_wait
);
9464 init_waitqueue_head(&dhd
->dhd_bus_busy_state_wait
);
9465 init_waitqueue_head(&dhd
->dmaxfer_wait
);
9466 init_waitqueue_head(&dhd
->pub
.tx_completion_wait
);
9467 dhd
->pub
.dhd_bus_busy_state
= 0;
9469 /* Initialize the spinlocks */
9470 spin_lock_init(&dhd
->sdlock
);
9471 spin_lock_init(&dhd
->txqlock
);
9472 spin_lock_init(&dhd
->dhd_lock
);
9473 spin_lock_init(&dhd
->rxf_lock
);
9475 spin_lock_init(&dhd
->pub
.tdls_lock
);
9477 #if defined(RXFRAME_THREAD)
9478 dhd
->rxthread_enabled
= TRUE
;
9479 #endif /* defined(RXFRAME_THREAD) */
9481 #ifdef DHDTCPACK_SUPPRESS
9482 spin_lock_init(&dhd
->tcpack_lock
);
9483 #endif /* DHDTCPACK_SUPPRESS */
9485 /* Initialize Wakelock stuff */
9486 spin_lock_init(&dhd
->wakelock_spinlock
);
9487 spin_lock_init(&dhd
->wakelock_evt_spinlock
);
9488 DHD_OS_WAKE_LOCK_INIT(dhd
);
9489 dhd
->wakelock_counter
= 0;
9490 /* wakelocks prevent a system from going into a low power state */
9491 #ifdef CONFIG_HAS_WAKELOCK
9492 wake_lock_init(&dhd
->wl_wdwake
, WAKE_LOCK_SUSPEND
, "wlan_wd_wake");
9493 #endif /* CONFIG_HAS_WAKELOCK */
9495 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9496 mutex_init(&dhd
->dhd_net_if_mutex
);
9497 mutex_init(&dhd
->dhd_suspend_mutex
);
9498 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
9499 mutex_init(&dhd
->dhd_apf_mutex
);
9500 #endif /* PKT_FILTER_SUPPORT && APF */
9502 dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
9504 /* Attach and link in the protocol */
9505 if (dhd_prot_attach(&dhd
->pub
) != 0) {
9506 DHD_ERROR(("dhd_prot_attach failed\n"));
9509 dhd_state
|= DHD_ATTACH_STATE_PROT_ATTACH
;
9512 spin_lock_init(&dhd
->pub
.up_lock
);
9513 /* Attach and link in the cfg80211 */
9514 if (unlikely(wl_cfg80211_attach(net
, &dhd
->pub
))) {
9515 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9519 dhd_monitor_init(&dhd
->pub
);
9520 dhd_state
|= DHD_ATTACH_STATE_CFG80211
;
9523 #if defined(WL_WIRELESS_EXT)
9524 /* Attach and link in the iw */
9525 if (!(dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
9526 if (wl_iw_attach(net
, (void *)&dhd
->pub
) != 0) {
9527 DHD_ERROR(("wl_iw_attach failed\n"));
9530 dhd_state
|= DHD_ATTACH_STATE_WL_ATTACH
;
9532 #endif /* defined(WL_WIRELESS_EXT) */
9534 #ifdef SHOW_LOGTRACE
9535 ret
= dhd_init_logstrs_array(osh
, &dhd
->event_data
);
9536 if (ret
== BCME_OK
) {
9537 dhd_init_static_strs_array(osh
, &dhd
->event_data
, st_str_file_path
, map_file_path
);
9538 dhd_init_static_strs_array(osh
, &dhd
->event_data
, rom_st_str_file_path
,
9540 dhd_state
|= DHD_ATTACH_LOGTRACE_INIT
;
9542 #endif /* SHOW_LOGTRACE */
9545 /* attach debug if support */
9546 if (dhd_os_dbg_attach(&dhd
->pub
)) {
9547 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__
));
9551 #if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
9552 /* enable verbose ring to support dump_trace_buf */
9553 dhd_os_start_logging(&dhd
->pub
, FW_VERBOSE_RING_NAME
, 3, 0, 0, 0);
9554 #endif /* SHOW_LOGTRACE */
9557 dhd
->pub
.dbg
->pkt_mon_lock
= dhd_os_spin_lock_init(dhd
->pub
.osh
);
9558 #ifdef DBG_PKT_MON_INIT_DEFAULT
9559 dhd_os_dbg_attach_pkt_monitor(&dhd
->pub
);
9560 #endif /* DBG_PKT_MON_INIT_DEFAULT */
9561 #endif /* DBG_PKT_MON */
9562 #endif /* DEBUGABILITY */
9565 dhd_log_dump_init(&dhd
->pub
);
9566 #endif /* DHD_LOG_DUMP */
9568 #ifdef DHD_PKT_LOGGING
9569 dhd_os_attach_pktlog(&dhd
->pub
);
9570 #endif /* DHD_PKT_LOGGING */
9571 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9572 dhd
->pub
.hang_info
= MALLOCZ(osh
, VENDOR_SEND_HANG_EXT_INFO_LEN
);
9573 if (dhd
->pub
.hang_info
== NULL
) {
9574 DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__
));
9576 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
9577 if (dhd_sta_pool_init(&dhd
->pub
, DHD_MAX_STA
) != BCME_OK
) {
9578 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__
, DHD_MAX_STA
));
9582 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9583 dhd
->tx_wq
= alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI
| WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1);
9585 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__
));
9588 dhd
->rx_wq
= alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI
| WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1);
9590 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__
));
9591 destroy_workqueue(dhd
->tx_wq
);
9595 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9597 /* Set up the watchdog timer */
9598 init_timer(&dhd
->timer
);
9599 dhd
->timer
.data
= (ulong
)dhd
;
9600 dhd
->timer
.function
= dhd_watchdog
;
9601 dhd
->default_wd_interval
= dhd_watchdog_ms
;
9603 if (dhd_watchdog_prio
>= 0) {
9604 /* Initialize watchdog thread */
9605 PROC_START(dhd_watchdog_thread
, dhd
, &dhd
->thr_wdt_ctl
, 0, "dhd_watchdog_thread");
9606 if (dhd
->thr_wdt_ctl
.thr_pid
< 0) {
9611 dhd
->thr_wdt_ctl
.thr_pid
= -1;
9614 #ifdef DHD_PCIE_RUNTIMEPM
9615 /* Setup up the runtime PM Idlecount timer */
9616 init_timer(&dhd
->rpm_timer
);
9617 dhd
->rpm_timer
.data
= (ulong
)dhd
;
9618 dhd
->rpm_timer
.function
= dhd_runtimepm
;
9619 dhd
->rpm_timer_valid
= FALSE
;
9621 dhd
->thr_rpm_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
9622 PROC_START(dhd_rpm_state_thread
, dhd
, &dhd
->thr_rpm_ctl
, 0, "dhd_rpm_state_thread");
9623 if (dhd
->thr_rpm_ctl
.thr_pid
< 0) {
9626 #endif /* DHD_PCIE_RUNTIMEPM */
9628 #ifdef SHOW_LOGTRACE
9629 skb_queue_head_init(&dhd
->evt_trace_queue
);
9630 if (proc_create("dhd_trace", S_IRUSR
, NULL
, &proc_file_fops
) == NULL
)
9631 DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
9632 mutex_init(&dhd
->pub
.dhd_trace_lock
);
9633 #endif /* SHOW_LOGTRACE */
9635 /* Set up the bottom half handler */
9636 if (dhd_dpc_prio
>= 0) {
9637 /* Initialize DPC thread */
9638 PROC_START(dhd_dpc_thread
, dhd
, &dhd
->thr_dpc_ctl
, 0, "dhd_dpc");
9639 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
9643 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_DPC_TASKLET_CTL) && \
9644 !defined(DHD_LB_IRQSET)
9645 if (!zalloc_cpumask_var(&dhd
->pub
.default_cpu_mask
, GFP_KERNEL
)) {
9646 DHD_ERROR(("dpc tasklet, zalloc_cpumask_var error\n"));
9647 dhd
->pub
.affinity_isdpc
= FALSE
;
9649 if (!zalloc_cpumask_var(&dhd
->pub
.dpc_affinity_cpu_mask
, GFP_KERNEL
)) {
9650 DHD_ERROR(("dpc thread, dpc_affinity_cpu_mask error\n"));
9651 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
9652 dhd
->pub
.affinity_isdpc
= FALSE
;
9654 unsigned int irq
= -1;
9656 if (dhdpcie_get_pcieirq(bus
, &irq
)) {
9657 DHD_ERROR(("%s : Can't get interrupt number\n",
9661 #endif /* BCMPCIE */
9663 irq
= adapter
->irq_num
;
9664 #endif /* BCMSDIO */
9666 cpumask_copy(dhd
->pub
.default_cpu_mask
, &hmp_slow_cpu_mask
);
9667 cpumask_or(dhd
->pub
.dpc_affinity_cpu_mask
,
9668 dhd
->pub
.dpc_affinity_cpu_mask
,
9669 cpumask_of(TASKLET_CPUCORE
));
9671 set_irq_cpucore(irq
, dhd
->pub
.default_cpu_mask
,
9672 dhd
->pub
.dpc_affinity_cpu_mask
);
9673 dhd
->pub
.affinity_isdpc
= TRUE
;
9676 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_DPC_TASKLET_CTL && !DHD_LB_IRQSET */
9677 /* use tasklet for dpc */
9678 tasklet_init(&dhd
->tasklet
, dhd_dpc
, (ulong
)dhd
);
9679 dhd
->thr_dpc_ctl
.thr_pid
= -1;
9682 if (dhd
->rxthread_enabled
) {
9683 bzero(&dhd
->pub
.skbbuf
[0], sizeof(void *) * MAXSKBPEND
);
9684 /* Initialize RXF thread */
9685 PROC_START(dhd_rxf_thread
, dhd
, &dhd
->thr_rxf_ctl
, 0, "dhd_rxf");
9686 if (dhd
->thr_rxf_ctl
.thr_pid
< 0) {
9691 dhd_state
|= DHD_ATTACH_STATE_THREADS_CREATED
;
9693 #if defined(CONFIG_PM_SLEEP)
9694 if (!dhd_pm_notifier_registered
) {
9695 dhd_pm_notifier_registered
= TRUE
;
9696 dhd
->pm_notifier
.notifier_call
= dhd_pm_callback
;
9697 dhd
->pm_notifier
.priority
= 10;
9698 register_pm_notifier(&dhd
->pm_notifier
);
9701 #endif /* CONFIG_PM_SLEEP */
9703 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9704 dhd
->early_suspend
.level
= EARLY_SUSPEND_LEVEL_BLANK_SCREEN
+ 20;
9705 dhd
->early_suspend
.suspend
= dhd_early_suspend
;
9706 dhd
->early_suspend
.resume
= dhd_late_resume
;
9707 register_early_suspend(&dhd
->early_suspend
);
9708 dhd_state
|= DHD_ATTACH_STATE_EARLYSUSPEND_DONE
;
9709 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9711 #ifdef ARP_OFFLOAD_SUPPORT
9712 dhd
->pend_ipaddr
= 0;
9713 if (!dhd_inetaddr_notifier_registered
) {
9714 dhd_inetaddr_notifier_registered
= TRUE
;
9715 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
9717 #endif /* ARP_OFFLOAD_SUPPORT */
9719 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9720 if (!dhd_inet6addr_notifier_registered
) {
9721 dhd_inet6addr_notifier_registered
= TRUE
;
9722 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
9724 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9725 dhd
->dhd_deferred_wq
= dhd_deferred_work_init((void *)dhd
);
9726 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9727 INIT_WORK(&dhd
->dhd_hang_process_work
, dhd_hang_process
);
9729 #ifdef DEBUG_CPU_FREQ
9730 dhd
->new_freq
= alloc_percpu(int);
9731 dhd
->freq_trans
.notifier_call
= dhd_cpufreq_notifier
;
9732 cpufreq_register_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
9734 #ifdef DHDTCPACK_SUPPRESS
9736 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_DELAYTX
);
9737 #elif defined(BCMPCIE)
9738 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_HOLD
);
9740 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
9741 #endif /* BCMSDIO */
9742 #endif /* DHDTCPACK_SUPPRESS */
9744 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9745 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9747 #ifdef DHD_DEBUG_PAGEALLOC
9748 register_page_corrupt_cb(dhd_page_corrupt_cb
, &dhd
->pub
);
9749 #endif /* DHD_DEBUG_PAGEALLOC */
9753 dhd_lb_set_default_cpus(dhd
);
9755 /* Initialize the CPU Masks */
9756 if (dhd_cpumasks_init(dhd
) == 0) {
9757 /* Now we have the current CPU maps, run through candidacy */
9758 dhd_select_cpu_candidacy(dhd
);
9760 * If we are able to initialize CPU masks, lets register to the
9761 * CPU Hotplug framework to change the CPU for each job dynamically
9762 * using candidacy algorithm.
9764 dhd
->cpu_notifier
.notifier_call
= dhd_cpu_callback
;
9765 register_hotcpu_notifier(&dhd
->cpu_notifier
); /* Register a callback */
9768 * We are unable to initialize CPU masks, so candidacy algorithm
9769 * won't run, but still Load Balancing will be honoured based
9770 * on the CPUs allocated for a given job statically during init
9772 dhd
->cpu_notifier
.notifier_call
= NULL
;
9773 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9778 #ifdef DHD_LB_TXP_DEFAULT_ENAB
9779 /* Trun ON the feature by default */
9780 atomic_set(&dhd
->lb_txp_active
, 1);
9782 /* Trun OFF the feature by default */
9783 atomic_set(&dhd
->lb_txp_active
, 0);
9784 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
9785 #endif /* DHD_LB_TXP */
9787 DHD_LB_STATS_INIT(&dhd
->pub
);
9789 /* Initialize the Load Balancing Tasklets and Napi object */
9790 #if defined(DHD_LB_TXC)
9791 tasklet_init(&dhd
->tx_compl_tasklet
,
9792 dhd_lb_tx_compl_handler
, (ulong
)(&dhd
->pub
));
9793 INIT_WORK(&dhd
->tx_compl_dispatcher_work
, dhd_tx_compl_dispatcher_fn
);
9794 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__
));
9795 #endif /* DHD_LB_TXC */
9797 #if defined(DHD_LB_RXC)
9798 tasklet_init(&dhd
->rx_compl_tasklet
,
9799 dhd_lb_rx_compl_handler
, (ulong
)(&dhd
->pub
));
9800 INIT_WORK(&dhd
->rx_compl_dispatcher_work
, dhd_rx_compl_dispatcher_fn
);
9801 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__
));
9802 #endif /* DHD_LB_RXC */
9804 #if defined(DHD_LB_RXP)
9805 __skb_queue_head_init(&dhd
->rx_pend_queue
);
9806 skb_queue_head_init(&dhd
->rx_napi_queue
);
9807 /* Initialize the work that dispatches NAPI job to a given core */
9808 INIT_WORK(&dhd
->rx_napi_dispatcher_work
, dhd_rx_napi_dispatcher_fn
);
9809 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__
));
9810 #endif /* DHD_LB_RXP */
9812 #if defined(DHD_LB_TXP)
9813 INIT_WORK(&dhd
->tx_dispatcher_work
, dhd_tx_dispatcher_work
);
9814 skb_queue_head_init(&dhd
->tx_pend_queue
);
9815 /* Initialize the work that dispatches TX job to a given core */
9816 tasklet_init(&dhd
->tx_tasklet
,
9817 dhd_lb_tx_handler
, (ulong
)(dhd
));
9818 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__
));
9819 #endif /* DHD_LB_TXP */
9821 dhd_state
|= DHD_ATTACH_STATE_LB_ATTACH_DONE
;
9824 #if defined(BCMPCIE)
9825 dhd
->pub
.extended_trap_data
= MALLOCZ(osh
, BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
9826 if (dhd
->pub
.extended_trap_data
== NULL
) {
9827 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__
));
9829 #endif /* BCMPCIE && ETD */
9831 #ifdef SHOW_LOGTRACE
9832 INIT_DELAYED_WORK(&dhd
->event_log_dispatcher_work
, dhd_event_logtrace_process
);
9833 #endif /* SHOW_LOGTRACE */
9835 DHD_INFO(("%s: sssr mempool init\n", __FUNCTION__
));
9836 DHD_SSSR_MEMPOOL_INIT(&dhd
->pub
);
9838 (void)dhd_sysfs_init(dhd
);
9841 /* Open Netlink socket for NF_CONNTRACK notifications */
9842 dhd
->pub
.nfct
= dhd_ct_open(&dhd
->pub
, NFNL_SUBSYS_CTNETLINK
| NFNL_SUBSYS_CTNETLINK_EXP
,
9844 #endif /* WL_NATOE */
9846 dhd_state
|= DHD_ATTACH_STATE_DONE
;
9847 dhd
->dhd_state
= dhd_state
;
9851 g_dhd_pub
= &dhd
->pub
;
9853 #ifdef DHD_DUMP_MNGR
9854 dhd
->pub
.dump_file_manage
=
9855 (dhd_dump_file_manage_t
*)MALLOCZ(dhd
->pub
.osh
, sizeof(dhd_dump_file_manage_t
));
9856 if (unlikely(!dhd
->pub
.dump_file_manage
)) {
9857 DHD_ERROR(("%s(): could not allocate memory for - "
9858 "dhd_dump_file_manage_t\n", __FUNCTION__
));
9860 #endif /* DHD_DUMP_MNGR */
9861 #ifdef DHD_FW_COREDUMP
9862 /* Set memdump default values */
9863 #ifdef CUSTOMER_HW4_DEBUG
9864 dhd
->pub
.memdump_enabled
= DUMP_DISABLED
;
9866 dhd
->pub
.memdump_enabled
= DUMP_MEMFILE_BUGON
;
9867 #endif /* CUSTOMER_HW4_DEBUG */
9868 /* Check the memdump capability */
9869 dhd_get_memdump_info(&dhd
->pub
);
9870 #endif /* DHD_FW_COREDUMP */
9874 pom_handler
= &dhd
->pub
.pom_wlan_handler
;
9875 pom_handler
->func_id
= WLAN_FUNC_ID
;
9876 pom_handler
->handler
= (void *)g_dhd_pub
;
9877 pom_handler
->power_off
= dhd_wlan_power_off_handler
;
9878 pom_handler
->power_on
= dhd_wlan_power_on_handler
;
9880 dhd
->pub
.pom_func_register
= NULL
;
9881 dhd
->pub
.pom_func_deregister
= NULL
;
9882 dhd
->pub
.pom_toggle_reg_on
= NULL
;
9884 dhd
->pub
.pom_func_register
= symbol_get(pom_func_register
);
9885 dhd
->pub
.pom_func_deregister
= symbol_get(pom_func_deregister
);
9886 dhd
->pub
.pom_toggle_reg_on
= symbol_get(pom_toggle_reg_on
);
9888 symbol_put(pom_func_register
);
9889 symbol_put(pom_func_deregister
);
9890 symbol_put(pom_toggle_reg_on
);
9892 if (!dhd
->pub
.pom_func_register
||
9893 !dhd
->pub
.pom_func_deregister
||
9894 !dhd
->pub
.pom_toggle_reg_on
) {
9895 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
9896 "POM is not loaded\n", __FUNCTION__
));
9900 dhd
->pub
.pom_func_register(pom_handler
);
9901 dhd
->pub
.enable_erpom
= TRUE
;
9904 #endif /* DHD_ERPOM */
9908 if (dhd_state
>= DHD_ATTACH_STATE_DHD_ALLOC
) {
9909 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9910 __FUNCTION__
, dhd_state
, &dhd
->pub
));
9911 dhd
->dhd_state
= dhd_state
;
9912 dhd_detach(&dhd
->pub
);
9913 dhd_free(&dhd
->pub
);
9920 int dhd_get_fw_mode(dhd_info_t
*dhdinfo
)
9922 if (strstr(dhdinfo
->fw_path
, "_apsta") != NULL
)
9923 return DHD_FLAG_HOSTAP_MODE
;
9924 if (strstr(dhdinfo
->fw_path
, "_p2p") != NULL
)
9925 return DHD_FLAG_P2P_MODE
;
9926 if (strstr(dhdinfo
->fw_path
, "_ibss") != NULL
)
9927 return DHD_FLAG_IBSS_MODE
;
9928 if (strstr(dhdinfo
->fw_path
, "_mfg") != NULL
)
9929 return DHD_FLAG_MFG_MODE
;
9931 return DHD_FLAG_STA_MODE
;
9934 int dhd_bus_get_fw_mode(dhd_pub_t
*dhdp
)
9936 return dhd_get_fw_mode(dhdp
->info
);
9939 extern char * nvram_get(const char *name
);
9940 bool dhd_update_fw_nv_path(dhd_info_t
*dhdinfo
)
9944 const char *fw
= NULL
;
9945 const char *nv
= NULL
;
9946 #ifdef DHD_UCODE_DOWNLOAD
9948 const char *uc
= NULL
;
9949 #endif /* DHD_UCODE_DOWNLOAD */
9950 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
9951 int fw_path_len
= sizeof(dhdinfo
->fw_path
);
9952 int nv_path_len
= sizeof(dhdinfo
->nv_path
);
9954 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9955 * The path from adapter info is used for initialization only (as it won't change).
9957 * The firmware_path/nvram_path module parameter may be changed by the system at run
9958 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9959 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9960 * module parameter after it is copied. We won't update the path until the module parameter
9961 * is changed again (first character is not '\0')
9964 /* set default firmware and nvram path for built-in type driver */
9965 if (!dhd_download_fw_on_driverload
) {
9966 #ifdef CONFIG_BCMDHD_FW_PATH
9967 fw
= VENDOR_PATH CONFIG_BCMDHD_FW_PATH
;
9968 #endif /* CONFIG_BCMDHD_FW_PATH */
9969 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9970 nv
= VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH
;
9971 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9974 /* check if we need to initialize the path */
9975 if (dhdinfo
->fw_path
[0] == '\0') {
9976 if (adapter
&& adapter
->fw_path
&& adapter
->fw_path
[0] != '\0')
9977 fw
= adapter
->fw_path
;
9979 if (dhdinfo
->nv_path
[0] == '\0') {
9980 if (adapter
&& adapter
->nv_path
&& adapter
->nv_path
[0] != '\0')
9981 nv
= adapter
->nv_path
;
9984 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9986 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9988 if (firmware_path
[0] != '\0')
9991 if (nvram_path
[0] != '\0')
9994 #ifdef DHD_UCODE_DOWNLOAD
9995 if (ucode_path
[0] != '\0')
9997 #endif /* DHD_UCODE_DOWNLOAD */
9999 if (fw
&& fw
[0] != '\0') {
10000 fw_len
= strlen(fw
);
10001 if (fw_len
>= fw_path_len
) {
10002 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
10005 strncpy(dhdinfo
->fw_path
, fw
, fw_path_len
);
10006 if (dhdinfo
->fw_path
[fw_len
-1] == '\n')
10007 dhdinfo
->fw_path
[fw_len
-1] = '\0';
10009 if (nv
&& nv
[0] != '\0') {
10010 nv_len
= strlen(nv
);
10011 if (nv_len
>= nv_path_len
) {
10012 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
10015 memset(dhdinfo
->nv_path
, 0, nv_path_len
);
10016 strncpy(dhdinfo
->nv_path
, nv
, nv_path_len
);
10017 dhdinfo
->nv_path
[nv_len
] = '\0';
10018 #ifdef DHD_USE_SINGLE_NVRAM_FILE
10019 /* Remove "_net" or "_mfg" tag from current nvram path */
10021 char *nvram_tag
= "nvram_";
10022 char *ext_tag
= ".txt";
10023 char *sp_nvram
= strnstr(dhdinfo
->nv_path
, nvram_tag
, nv_path_len
);
10024 bool valid_buf
= sp_nvram
&& ((uint32
)(sp_nvram
+ strlen(nvram_tag
) +
10025 strlen(ext_tag
) - dhdinfo
->nv_path
) <= nv_path_len
);
10027 char *sp
= sp_nvram
+ strlen(nvram_tag
) - 1;
10028 uint32 padding_size
= (uint32
)(dhdinfo
->nv_path
+
10030 memset(sp
, 0, padding_size
);
10031 strncat(dhdinfo
->nv_path
, ext_tag
, strlen(ext_tag
));
10032 nv_len
= strlen(dhdinfo
->nv_path
);
10033 DHD_INFO(("%s: new nvram path = %s\n",
10034 __FUNCTION__
, dhdinfo
->nv_path
));
10035 } else if (sp_nvram
) {
10036 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10040 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10041 " nvram path = %s\n", __FUNCTION__
, dhdinfo
->nv_path
));
10044 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
10045 if (dhdinfo
->nv_path
[nv_len
-1] == '\n')
10046 dhdinfo
->nv_path
[nv_len
-1] = '\0';
10048 #ifdef DHD_UCODE_DOWNLOAD
10049 if (uc
&& uc
[0] != '\0') {
10050 uc_len
= strlen(uc
);
10051 if (uc_len
>= sizeof(dhdinfo
->uc_path
)) {
10052 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10055 strncpy(dhdinfo
->uc_path
, uc
, sizeof(dhdinfo
->uc_path
));
10056 if (dhdinfo
->uc_path
[uc_len
-1] == '\n')
10057 dhdinfo
->uc_path
[uc_len
-1] = '\0';
10059 #endif /* DHD_UCODE_DOWNLOAD */
10061 /* clear the path in module parameter */
10062 if (dhd_download_fw_on_driverload
) {
10063 firmware_path
[0] = '\0';
10064 nvram_path
[0] = '\0';
10066 #ifdef DHD_UCODE_DOWNLOAD
10067 ucode_path
[0] = '\0';
10068 DHD_ERROR(("ucode path: %s\n", dhdinfo
->uc_path
));
10069 #endif /* DHD_UCODE_DOWNLOAD */
10071 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10072 if (dhdinfo
->fw_path
[0] == '\0') {
10073 DHD_ERROR(("firmware path not found\n"));
10076 if (dhdinfo
->nv_path
[0] == '\0') {
10077 DHD_ERROR(("nvram path not found\n"));
10084 #if defined(BT_OVER_SDIO)
10085 extern bool dhd_update_btfw_path(dhd_info_t
*dhdinfo
, char* btfw_path
)
10088 const char *fw
= NULL
;
10089 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
10091 /* Update bt firmware path. The path may be from adapter info or module parameter
10092 * The path from adapter info is used for initialization only (as it won't change).
10094 * The btfw_path module parameter may be changed by the system at run
10095 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10096 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10097 * module parameter after it is copied. We won't update the path until the module parameter
10098 * is changed again (first character is not '\0')
10101 /* set default firmware and nvram path for built-in type driver */
10102 if (!dhd_download_fw_on_driverload
) {
10103 #ifdef CONFIG_BCMDHD_BTFW_PATH
10104 fw
= CONFIG_BCMDHD_BTFW_PATH
;
10105 #endif /* CONFIG_BCMDHD_FW_PATH */
10108 /* check if we need to initialize the path */
10109 if (dhdinfo
->btfw_path
[0] == '\0') {
10110 if (adapter
&& adapter
->btfw_path
&& adapter
->btfw_path
[0] != '\0')
10111 fw
= adapter
->btfw_path
;
10114 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10116 if (btfw_path
[0] != '\0')
10119 if (fw
&& fw
[0] != '\0') {
10120 fw_len
= strlen(fw
);
10121 if (fw_len
>= sizeof(dhdinfo
->btfw_path
)) {
10122 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10125 strncpy(dhdinfo
->btfw_path
, fw
, sizeof(dhdinfo
->btfw_path
));
10126 if (dhdinfo
->btfw_path
[fw_len
-1] == '\n')
10127 dhdinfo
->btfw_path
[fw_len
-1] = '\0';
10130 /* clear the path in module parameter */
10131 btfw_path
[0] = '\0';
10133 if (dhdinfo
->btfw_path
[0] == '\0') {
10134 DHD_ERROR(("bt firmware path not found\n"));
10140 #endif /* defined (BT_OVER_SDIO) */
10142 #ifdef CUSTOMER_HW4_DEBUG
10143 bool dhd_validate_chipid(dhd_pub_t
*dhdp
)
10145 uint chipid
= dhd_bus_chip_id(dhdp
);
10146 uint config_chipid
;
10148 #ifdef BCM4375_CHIP
10149 config_chipid
= BCM4375_CHIP_ID
;
10150 #elif defined(BCM4361_CHIP)
10151 config_chipid
= BCM4361_CHIP_ID
;
10152 #elif defined(BCM4359_CHIP)
10153 config_chipid
= BCM4359_CHIP_ID
;
10154 #elif defined(BCM4358_CHIP)
10155 config_chipid
= BCM4358_CHIP_ID
;
10156 #elif defined(BCM4354_CHIP)
10157 config_chipid
= BCM4354_CHIP_ID
;
10158 #elif defined(BCM4339_CHIP)
10159 config_chipid
= BCM4339_CHIP_ID
;
10160 #elif defined(BCM4335_CHIP)
10161 config_chipid
= BCM4335_CHIP_ID
;
10162 #elif defined(BCM43430_CHIP)
10163 config_chipid
= BCM43430_CHIP_ID
;
10164 #elif defined(BCM43018_CHIP)
10165 config_chipid
= BCM43018_CHIP_ID
;
10166 #elif defined(BCM43455_CHIP) || defined(BCM43456_CHIP)
10167 config_chipid
= BCM4345_CHIP_ID
;
10168 #elif defined(BCM43454_CHIP)
10169 config_chipid
= BCM43454_CHIP_ID
;
10170 #elif defined(BCM43012_CHIP_)
10171 config_chipid
= BCM43012_CHIP_ID
;
10173 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10174 " please add CONFIG_BCMXXXX into the Kernel and"
10175 " BCMXXXX_CHIP definition into the DHD driver\n",
10180 #endif /* BCM4354_CHIP */
10182 #ifdef SUPPORT_MULTIPLE_CHIP_4345X
10183 if (config_chipid
== BCM43454_CHIP_ID
|| config_chipid
== BCM4345_CHIP_ID
) {
10186 #endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10187 #if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10188 if (chipid
== BCM4350_CHIP_ID
&& config_chipid
== BCM4354_CHIP_ID
) {
10191 #endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
10192 #if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10193 if (chipid
== BCM43569_CHIP_ID
&& config_chipid
== BCM4358_CHIP_ID
) {
10196 #endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
10197 #if defined(BCM4359_CHIP)
10198 if (chipid
== BCM4355_CHIP_ID
&& config_chipid
== BCM4359_CHIP_ID
) {
10201 #endif /* BCM4359_CHIP */
10202 #if defined(BCM4361_CHIP)
10203 if (chipid
== BCM4347_CHIP_ID
&& config_chipid
== BCM4361_CHIP_ID
) {
10206 #endif /* BCM4361_CHIP */
10208 return config_chipid
== chipid
;
10210 #endif /* CUSTOMER_HW4_DEBUG */
10212 #if defined(BT_OVER_SDIO)
10213 wlan_bt_handle_t
dhd_bt_get_pub_hndl(void)
10215 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__
, g_dhd_pub
));
10216 /* assuming that dhd_pub_t type pointer is available from a global variable */
10217 return (wlan_bt_handle_t
) g_dhd_pub
;
10218 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl
);
10220 int dhd_download_btfw(wlan_bt_handle_t handle
, char* btfw_path
)
10223 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
10224 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10226 /* Download BT firmware image to the dongle */
10227 if (dhd
->pub
.busstate
== DHD_BUS_DATA
&& dhd_update_btfw_path(dhd
, btfw_path
)) {
10228 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__
, dhd
->btfw_path
));
10229 ret
= dhd_bus_download_btfw(dhd
->pub
.bus
, dhd
->pub
.osh
, dhd
->btfw_path
);
10231 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10232 __FUNCTION__
, dhd
->btfw_path
));
10237 } EXPORT_SYMBOL(dhd_download_btfw
);
10238 #endif /* defined (BT_OVER_SDIO) */
10241 dhd_bus_start(dhd_pub_t
*dhdp
)
10244 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10245 unsigned long flags
;
10247 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10248 int fw_download_start
= 0, fw_download_end
= 0, f2_sync_start
= 0, f2_sync_end
= 0;
10249 #endif /* DHD_DEBUG && BCMSDIO */
10252 DHD_TRACE(("Enter %s:\n", __FUNCTION__
));
10253 dhdp
->dongle_trap_occured
= 0;
10254 dhdp
->iovar_timeout_occured
= 0;
10255 #ifdef PCIE_FULL_DONGLE
10256 dhdp
->d3ack_timeout_occured
= 0;
10257 #endif /* PCIE_FULL_DONGLE */
10258 #ifdef DHD_MAP_LOGGING
10259 dhdp
->smmu_fault_occurred
= 0;
10260 #endif /* DHD_MAP_LOGGING */
10262 DHD_PERIM_LOCK(dhdp
);
10263 /* try to download image and nvram to the dongle */
10264 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
&& dhd_update_fw_nv_path(dhd
)) {
10265 /* Indicate FW Download has not yet done */
10266 dhd
->pub
.fw_download_done
= FALSE
;
10267 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__
, dhd
->fw_path
, dhd
->nv_path
));
10268 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10269 fw_download_start
= OSL_SYSUPTIME();
10270 #endif /* DHD_DEBUG && BCMSDIO */
10271 ret
= dhd_bus_download_firmware(dhd
->pub
.bus
, dhd
->pub
.osh
,
10272 dhd
->fw_path
, dhd
->nv_path
);
10273 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10274 fw_download_end
= OSL_SYSUPTIME();
10275 #endif /* DHD_DEBUG && BCMSDIO */
10277 DHD_ERROR(("%s: failed to download firmware %s\n",
10278 __FUNCTION__
, dhd
->fw_path
));
10279 DHD_PERIM_UNLOCK(dhdp
);
10282 /* Indicate FW Download has succeeded */
10283 dhd
->pub
.fw_download_done
= TRUE
;
10285 if (dhd
->pub
.busstate
!= DHD_BUS_LOAD
) {
10286 DHD_PERIM_UNLOCK(dhdp
);
10291 dhd_os_sdlock(dhdp
);
10292 #endif /* BCMSDIO */
10294 /* Start the watchdog timer */
10295 dhd
->pub
.tickcnt
= 0;
10296 dhd_os_wd_timer(&dhd
->pub
, dhd_watchdog_ms
);
10298 /* Bring up the bus */
10299 if ((ret
= dhd_bus_init(&dhd
->pub
, FALSE
)) != 0) {
10301 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__
, ret
));
10303 dhd_os_sdunlock(dhdp
);
10304 #endif /* BCMSDIO */
10305 DHD_PERIM_UNLOCK(dhdp
);
10309 DHD_ENABLE_RUNTIME_PM(&dhd
->pub
);
10312 dhd_ulp_set_ulp_state(dhdp
, DHD_ULP_DISABLED
);
10313 #endif /* DHD_ULP */
10314 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
10315 /* Host registration for OOB interrupt */
10316 if (dhd_bus_oob_intr_register(dhdp
)) {
10317 /* deactivate timer and wait for the handler to finish */
10318 #if !defined(BCMPCIE_OOB_HOST_WAKE)
10319 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10320 dhd
->wd_timer_valid
= FALSE
;
10321 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10322 del_timer_sync(&dhd
->timer
);
10324 #endif /* !BCMPCIE_OOB_HOST_WAKE */
10325 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
10326 DHD_PERIM_UNLOCK(dhdp
);
10327 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__
));
10328 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10332 #if defined(BCMPCIE_OOB_HOST_WAKE)
10333 dhd_bus_oob_intr_set(dhdp
, TRUE
);
10335 /* Enable oob at firmware */
10336 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
10337 #endif /* BCMPCIE_OOB_HOST_WAKE */
10338 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
10339 #ifdef PCIE_FULL_DONGLE
10341 /* max_h2d_rings includes H2D common rings */
10342 uint32 max_h2d_rings
= dhd_bus_max_h2d_queues(dhd
->pub
.bus
);
10344 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__
,
10346 if ((ret
= dhd_flow_rings_init(&dhd
->pub
, max_h2d_rings
)) != BCME_OK
) {
10348 dhd_os_sdunlock(dhdp
);
10349 #endif /* BCMSDIO */
10350 DHD_PERIM_UNLOCK(dhdp
);
10354 #endif /* PCIE_FULL_DONGLE */
10356 /* Do protocol initialization necessary for IOCTL/IOVAR */
10357 ret
= dhd_prot_init(&dhd
->pub
);
10358 if (unlikely(ret
) != BCME_OK
) {
10359 DHD_PERIM_UNLOCK(dhdp
);
10360 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10364 /* If bus is not ready, can't come up */
10365 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
10366 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10367 dhd
->wd_timer_valid
= FALSE
;
10368 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10369 del_timer_sync(&dhd
->timer
);
10370 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__
));
10371 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
10373 dhd_os_sdunlock(dhdp
);
10374 #endif /* BCMSDIO */
10375 DHD_PERIM_UNLOCK(dhdp
);
10376 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10381 dhd_os_sdunlock(dhdp
);
10382 #endif /* BCMSDIO */
10384 /* Bus is ready, query any dongle information */
10385 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10386 f2_sync_start
= OSL_SYSUPTIME();
10387 #endif /* DHD_DEBUG && BCMSDIO */
10388 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
10389 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
10390 dhd
->wd_timer_valid
= FALSE
;
10391 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
10392 del_timer_sync(&dhd
->timer
);
10393 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__
));
10394 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
10395 DHD_PERIM_UNLOCK(dhdp
);
10398 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
10399 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__
));
10400 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI
);
10401 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
10403 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10404 f2_sync_end
= OSL_SYSUPTIME();
10405 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10406 (fw_download_end
- fw_download_start
) + (f2_sync_end
- f2_sync_start
)));
10407 #endif /* DHD_DEBUG && BCMSDIO */
10409 #ifdef ARP_OFFLOAD_SUPPORT
10410 if (dhd
->pend_ipaddr
) {
10411 #ifdef AOE_IP_ALIAS_SUPPORT
10412 aoe_update_host_ipv4_table(&dhd
->pub
, dhd
->pend_ipaddr
, TRUE
, 0);
10413 #endif /* AOE_IP_ALIAS_SUPPORT */
10414 dhd
->pend_ipaddr
= 0;
10416 #endif /* ARP_OFFLOAD_SUPPORT */
10418 DHD_PERIM_UNLOCK(dhdp
);
10423 int _dhd_tdls_enable(dhd_pub_t
*dhd
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
10425 uint32 tdls
= tdls_on
;
10427 uint32 tdls_auto_op
= 0;
10428 uint32 tdls_idle_time
= CUSTOM_TDLS_IDLE_MODE_SETTING
;
10429 int32 tdls_rssi_high
= CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
;
10430 int32 tdls_rssi_low
= CUSTOM_TDLS_RSSI_THRESHOLD_LOW
;
10431 uint32 tdls_pktcnt_high
= CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH
;
10432 uint32 tdls_pktcnt_low
= CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW
;
10434 BCM_REFERENCE(mac
);
10435 if (!FW_SUPPORTED(dhd
, tdls
))
10438 if (dhd
->tdls_enable
== tdls_on
)
10440 ret
= dhd_iovar(dhd
, 0, "tdls_enable", (char *)&tdls
, sizeof(tdls
), NULL
, 0, TRUE
);
10442 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__
, tdls
, ret
));
10445 dhd
->tdls_enable
= tdls_on
;
10448 tdls_auto_op
= auto_on
;
10449 ret
= dhd_iovar(dhd
, 0, "tdls_auto_op", (char *)&tdls_auto_op
, sizeof(tdls_auto_op
), NULL
,
10452 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__
, ret
));
10456 if (tdls_auto_op
) {
10457 ret
= dhd_iovar(dhd
, 0, "tdls_idle_time", (char *)&tdls_idle_time
,
10458 sizeof(tdls_idle_time
), NULL
, 0, TRUE
);
10460 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__
, ret
));
10463 ret
= dhd_iovar(dhd
, 0, "tdls_rssi_high", (char *)&tdls_rssi_high
,
10464 sizeof(tdls_rssi_high
), NULL
, 0, TRUE
);
10466 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__
, ret
));
10469 ret
= dhd_iovar(dhd
, 0, "tdls_rssi_low", (char *)&tdls_rssi_low
,
10470 sizeof(tdls_rssi_low
), NULL
, 0, TRUE
);
10472 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__
, ret
));
10475 ret
= dhd_iovar(dhd
, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high
,
10476 sizeof(tdls_pktcnt_high
), NULL
, 0, TRUE
);
10478 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__
, ret
));
10481 ret
= dhd_iovar(dhd
, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low
,
10482 sizeof(tdls_pktcnt_low
), NULL
, 0, TRUE
);
10484 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__
, ret
));
10492 int dhd_tdls_enable(struct net_device
*dev
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
10494 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
10497 ret
= _dhd_tdls_enable(&dhd
->pub
, tdls_on
, auto_on
, mac
);
10503 dhd_tdls_set_mode(dhd_pub_t
*dhd
, bool wfd_mode
)
10506 bool auto_on
= false;
10507 uint32 mode
= wfd_mode
;
10509 #ifdef ENABLE_TDLS_AUTO_MODE
10517 #endif /* ENABLE_TDLS_AUTO_MODE */
10518 ret
= _dhd_tdls_enable(dhd
, false, auto_on
, NULL
);
10520 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret
));
10524 ret
= dhd_iovar(dhd
, 0, "tdls_wfd_mode", (char *)&mode
, sizeof(mode
), NULL
, 0, TRUE
);
10525 if ((ret
< 0) && (ret
!= BCME_UNSUPPORTED
)) {
10526 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__
, ret
));
10530 ret
= _dhd_tdls_enable(dhd
, true, auto_on
, NULL
);
10532 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret
));
10536 dhd
->tdls_mode
= mode
;
10539 #ifdef PCIE_FULL_DONGLE
10540 int dhd_tdls_update_peer_info(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
)
10542 dhd_pub_t
*dhd_pub
= dhdp
;
10543 tdls_peer_node_t
*cur
= dhd_pub
->peer_tbl
.node
;
10544 tdls_peer_node_t
*new = NULL
, *prev
= NULL
;
10545 int ifindex
= dhd_ifname2idx(dhd_pub
->info
, event
->ifname
);
10546 uint8
*da
= (uint8
*)&event
->addr
.octet
[0];
10547 bool connect
= FALSE
;
10548 uint32 reason
= ntoh32(event
->reason
);
10549 unsigned long flags
;
10551 if (reason
== WLC_E_TDLS_PEER_CONNECTED
)
10553 else if (reason
== WLC_E_TDLS_PEER_DISCONNECTED
)
10557 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__
));
10560 if (ifindex
== DHD_BAD_IF
)
10564 while (cur
!= NULL
) {
10565 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
10566 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10567 __FUNCTION__
, __LINE__
));
10573 new = MALLOC(dhd_pub
->osh
, sizeof(tdls_peer_node_t
));
10575 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__
));
10578 memcpy(new->addr
, da
, ETHER_ADDR_LEN
);
10579 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
10580 new->next
= dhd_pub
->peer_tbl
.node
;
10581 dhd_pub
->peer_tbl
.node
= new;
10582 dhd_pub
->peer_tbl
.tdls_peer_count
++;
10583 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
10586 while (cur
!= NULL
) {
10587 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
10588 dhd_flow_rings_delete_for_peer(dhd_pub
, (uint8
)ifindex
, da
);
10589 DHD_TDLS_LOCK(&dhdp
->tdls_lock
, flags
);
10591 prev
->next
= cur
->next
;
10593 dhd_pub
->peer_tbl
.node
= cur
->next
;
10594 MFREE(dhd_pub
->osh
, cur
, sizeof(tdls_peer_node_t
));
10595 dhd_pub
->peer_tbl
.tdls_peer_count
--;
10596 DHD_TDLS_UNLOCK(&dhdp
->tdls_lock
, flags
);
10602 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__
));
10606 #endif /* PCIE_FULL_DONGLE */
10609 bool dhd_is_concurrent_mode(dhd_pub_t
*dhd
)
10614 if (dhd
->op_mode
& DHD_FLAG_CONCURR_MULTI_CHAN_MODE
)
10616 else if ((dhd
->op_mode
& DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
) ==
10617 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
)
10622 #if !defined(AP) && defined(WLP2P)
10623 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10624 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10625 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10626 * would still be named as fw_bcmdhd_apsta.
10629 dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
)
10632 char buf
[WLC_IOCTL_SMLEN
];
10633 bool mchan_supported
= FALSE
;
10634 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10635 * test mode, that means we only will use the mode as it is
10637 if (dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))
10639 if (FW_SUPPORTED(dhd
, vsdb
)) {
10640 mchan_supported
= TRUE
;
10642 if (!FW_SUPPORTED(dhd
, p2p
)) {
10643 DHD_TRACE(("Chip does not support p2p\n"));
10646 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10647 memset(buf
, 0, sizeof(buf
));
10648 ret
= dhd_iovar(dhd
, 0, "p2p", NULL
, 0, (char *)&buf
,
10649 sizeof(buf
), FALSE
);
10651 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__
, ret
));
10655 /* By default, chip supports single chan concurrency,
10656 * now lets check for mchan
10658 ret
= DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
;
10659 if (mchan_supported
)
10660 ret
|= DHD_FLAG_CONCURR_MULTI_CHAN_MODE
;
10661 if (FW_SUPPORTED(dhd
, rsdb
)) {
10662 ret
|= DHD_FLAG_RSDB_MODE
;
10664 #ifdef WL_SUPPORT_MULTIP2P
10665 if (FW_SUPPORTED(dhd
, mp2p
)) {
10666 ret
|= DHD_FLAG_MP2P_MODE
;
10668 #endif /* WL_SUPPORT_MULTIP2P */
10669 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10673 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10683 dhd_preinit_aibss_ioctls(dhd_pub_t
*dhd
, char *iov_buf_smlen
)
10686 aibss_bcn_force_config_t bcn_config
;
10691 #endif /* WLAIBSS_PS */
10695 ret
= dhd_iovar(dhd
, 0, "aibss", (char *)&aibss
, sizeof(aibss
), NULL
, 0, TRUE
);
10697 if (ret
== BCME_UNSUPPORTED
) {
10698 DHD_ERROR(("%s aibss is not supported\n",
10702 DHD_ERROR(("%s Set aibss to %d failed %d\n",
10703 __FUNCTION__
, aibss
, ret
));
10710 ret
= dhd_iovar(dhd
, 0, "aibss_ps", (char *)&aibss_ps
, sizeof(aibss_ps
), NULL
, 0, TRUE
);
10712 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
10713 __FUNCTION__
, aibss
, ret
));
10718 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ATIM
,
10719 (char *)&atim
, sizeof(atim
), TRUE
, 0)) < 0) {
10720 DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
10721 __FUNCTION__
, ret
));
10724 #endif /* WLAIBSS_PS */
10726 memset(&bcn_config
, 0, sizeof(bcn_config
));
10727 bcn_config
.initial_min_bcn_dur
= AIBSS_INITIAL_MIN_BCN_DUR
;
10728 bcn_config
.min_bcn_dur
= AIBSS_MIN_BCN_DUR
;
10729 bcn_config
.bcn_flood_dur
= AIBSS_BCN_FLOOD_DUR
;
10730 bcn_config
.version
= AIBSS_BCN_FORCE_CONFIG_VER_0
;
10731 bcn_config
.len
= sizeof(bcn_config
);
10733 ret
= dhd_iovar(dhd
, 0, "aibss_bcn_force_config", (char *)&bcn_config
,
10734 sizeof(aibss_bcn_force_config_t
), NULL
, 0, TRUE
);
10736 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
10737 __FUNCTION__
, AIBSS_INITIAL_MIN_BCN_DUR
, AIBSS_MIN_BCN_DUR
,
10738 AIBSS_BCN_FLOOD_DUR
, ret
));
10742 ibss_coalesce
= IBSS_COALESCE_DEFAULT
;
10743 ret
= dhd_iovar(dhd
, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce
,
10744 sizeof(ibss_coalesce
), NULL
, 0, TRUE
);
10746 DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
10747 __FUNCTION__
, ret
));
10751 dhd
->op_mode
|= DHD_FLAG_IBSS_MODE
;
10754 #endif /* WLAIBSS */
10756 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10759 dhd_check_adps_bad_ap(dhd_pub_t
*dhd
)
10761 struct net_device
*ndev
;
10762 struct bcm_cfg80211
*cfg
;
10763 struct wl_profile
*profile
;
10764 struct ether_addr bssid
;
10766 if (!dhd_is_associated(dhd
, 0, NULL
)) {
10767 DHD_ERROR(("%s - not associated\n", __FUNCTION__
));
10771 ndev
= dhd_linux_get_primary_netdev(dhd
);
10773 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__
));
10777 cfg
= wl_get_cfg(ndev
);
10779 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__
));
10783 profile
= wl_get_profile_by_netdev(cfg
, ndev
);
10784 memcpy(bssid
.octet
, profile
->bssid
, ETHER_ADDR_LEN
);
10785 if (wl_adps_bad_ap_check(cfg
, &bssid
)) {
10786 if (wl_adps_enabled(cfg
, ndev
)) {
10787 wl_adps_set_suspend(cfg
, ndev
, ADPS_SUSPEND
);
10793 #endif /* WL_BAM */
10796 dhd_enable_adps(dhd_pub_t
*dhd
, uint8 on
)
10802 bcm_iov_buf_t
*iov_buf
= NULL
;
10803 wl_adps_params_v1_t
*data
= NULL
;
10805 len
= OFFSETOF(bcm_iov_buf_t
, data
) + sizeof(*data
);
10806 iov_buf
= MALLOC(dhd
->osh
, len
);
10807 if (iov_buf
== NULL
) {
10808 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__
, len
));
10813 iov_buf
->version
= WL_ADPS_IOV_VER
;
10814 iov_buf
->len
= sizeof(*data
);
10815 iov_buf
->id
= WL_ADPS_IOV_MODE
;
10817 data
= (wl_adps_params_v1_t
*)iov_buf
->data
;
10818 data
->version
= ADPS_SUB_IOV_VERSION_1
;
10819 data
->length
= sizeof(*data
);
10822 for (i
= 1; i
<= MAX_BANDS
; i
++) {
10824 ret
= dhd_iovar(dhd
, 0, "adps", (char *)iov_buf
, len
, NULL
, 0, TRUE
);
10826 if (ret
== BCME_UNSUPPORTED
) {
10827 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__
));
10832 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10833 __FUNCTION__
, on
? "On" : "Off", i
, ret
));
10841 dhd_check_adps_bad_ap(dhd
);
10843 #endif /* WL_BAM */
10847 MFREE(dhd
->osh
, iov_buf
, len
);
10852 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10855 dhd_preinit_ioctls(dhd_pub_t
*dhd
)
10858 char eventmask
[WL_EVENTING_MASK_LEN
];
10859 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12]; /* Room for "event_msgs" + '\0' + bitvec */
10860 uint32 buf_key_b4_m4
= 1;
10862 eventmsgs_ext_t
*eventmask_msg
= NULL
;
10863 char* iov_buf
= NULL
;
10865 uint32 wnm_cap
= 0;
10866 #if defined(BCMSUP_4WAY_HANDSHAKE)
10867 uint32 sup_wpa
= 1;
10868 #endif /* BCMSUP_4WAY_HANDSHAKE */
10869 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
10870 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
10871 uint32 ampdu_ba_wsize
= 0;
10872 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
10873 #if defined(CUSTOM_AMPDU_MPDU)
10874 int32 ampdu_mpdu
= 0;
10876 #if defined(CUSTOM_AMPDU_RELEASE)
10877 int32 ampdu_release
= 0;
10879 #if defined(CUSTOM_AMSDU_AGGSF)
10880 int32 amsdu_aggsf
= 0;
10883 #if defined(BCMSDIO)
10884 #ifdef PROP_TXSTATUS
10885 int wlfc_enable
= TRUE
;
10886 #ifndef DISABLE_11N
10887 uint32 hostreorder
= 1;
10888 #endif /* DISABLE_11N */
10889 #endif /* PROP_TXSTATUS */
10891 #ifndef PCIE_FULL_DONGLE
10892 uint32 wl_ap_isolate
;
10893 #endif /* PCIE_FULL_DONGLE */
10894 uint32 frameburst
= CUSTOM_FRAMEBURST_SET
;
10895 uint wnm_bsstrans_resp
= 0;
10896 #ifdef SUPPORT_SET_CAC
10897 #ifdef SUPPORT_CUSTOM_SET_CAC
10901 #endif /* SUPPORT_CUSTOM_SET_CAC */
10902 #endif /* SUPPORT_SET_CAC */
10904 #if defined(DHD_NON_DMA_M2M_CORRUPTION)
10905 dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk
;
10906 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
10908 #ifdef DHD_ENABLE_LPC
10910 #endif /* DHD_ENABLE_LPC */
10911 uint power_mode
= PM_FAST
;
10912 #if defined(BCMSDIO)
10913 uint32 dongle_align
= DHD_SDALIGN
;
10914 uint32 glom
= CUSTOM_GLOM_SETTING
;
10915 #endif /* defined(BCMSDIO) */
10916 uint bcn_timeout
= CUSTOM_BCN_TIMEOUT
;
10917 uint scancache_enab
= TRUE
;
10918 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10919 uint32 bcn_li_bcn
= 1;
10920 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10921 uint retry_max
= CUSTOM_ASSOC_RETRY_MAX
;
10922 #if defined(ARP_OFFLOAD_SUPPORT)
10925 int scan_assoc_time
= DHD_SCAN_ASSOC_ACTIVE_TIME
;
10926 int scan_unassoc_time
= DHD_SCAN_UNASSOC_ACTIVE_TIME
;
10927 int scan_passive_time
= DHD_SCAN_PASSIVE_TIME
;
10928 char buf
[WLC_IOCTL_SMLEN
];
10930 uint32 listen_interval
= CUSTOM_LISTEN_INTERVAL
; /* Default Listen Interval in Beacons */
10931 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10932 wl_el_tag_params_t
*el_tag
= NULL
;
10933 #endif /* DHD_8021X_DUMP */
10936 int roam_trigger
[2] = {CUSTOM_ROAM_TRIGGER_SETTING
, WLC_BAND_ALL
};
10937 int roam_scan_period
[2] = {10, WLC_BAND_ALL
};
10938 int roam_delta
[2] = {CUSTOM_ROAM_DELTA_SETTING
, WLC_BAND_ALL
};
10939 #ifdef ROAM_AP_ENV_DETECTION
10940 int roam_env_mode
= AP_ENV_INDETERMINATE
;
10941 #endif /* ROAM_AP_ENV_DETECTION */
10942 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10943 int roam_fullscan_period
= 60;
10944 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10945 int roam_fullscan_period
= 120;
10946 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10947 #ifdef DISABLE_BCNLOSS_ROAM
10948 uint roam_bcnloss_off
= 1;
10949 #endif /* DISABLE_BCNLOSS_ROAM */
10951 #ifdef DISABLE_BUILTIN_ROAM
10953 #endif /* DISABLE_BUILTIN_ROAM */
10954 #endif /* ROAM_ENABLE */
10956 #if defined(SOFTAP)
10959 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10960 struct ether_addr p2p_ea
;
10965 #ifdef SOFTAP_UAPSD_OFF
10966 uint32 wme_apsd
= 0;
10967 #endif /* SOFTAP_UAPSD_OFF */
10968 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10969 uint32 apsta
= 1; /* Enable APSTA mode */
10970 #elif defined(SOFTAP_AND_GC)
10973 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10974 #ifdef GET_CUSTOM_MAC_ENABLE
10975 struct ether_addr ea_addr
;
10976 #endif /* GET_CUSTOM_MAC_ENABLE */
10983 #endif /* DISABLE_11N */
10987 #endif /* USE_WL_TXBF */
10988 #ifdef DISABLE_TXBFR
10989 uint32 txbf_bfr_cap
= 0;
10990 #endif /* DISABLE_TXBFR */
10991 #ifdef AMPDU_VO_ENABLE
10992 struct ampdu_tid_control tid
;
10994 #if defined(PROP_TXSTATUS)
10995 #ifdef USE_WFA_CERT_CONF
10997 #endif /* USE_WFA_CERT_CONF */
10998 #endif /* PROP_TXSTATUS */
10999 #ifdef DHD_SET_FW_HIGHSPEED
11000 uint32 ack_ratio
= 250;
11001 uint32 ack_ratio_depth
= 64;
11002 #endif /* DHD_SET_FW_HIGHSPEED */
11003 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11004 uint32 vht_features
= 0; /* init to 0, will be set based on each support */
11005 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11006 #ifdef DISABLE_11N_PROPRIETARY_RATES
11007 uint32 ht_features
= 0;
11008 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11009 #ifdef CUSTOM_EVENT_PM_WAKE
11010 uint32 pm_awake_thresh
= CUSTOM_EVENT_PM_WAKE
;
11011 #endif /* CUSTOM_EVENT_PM_WAKE */
11012 #ifdef DISABLE_PRUNED_SCAN
11013 uint32 scan_features
= 0;
11014 #endif /* DISABLE_PRUNED_SCAN */
11015 #ifdef DHD_2G_ONLY_SUPPORT
11016 uint band
= WLC_BAND_2G
;
11017 #endif /* DHD_2G_ONLY_SUPPORT */
11018 #ifdef BCMPCIE_OOB_HOST_WAKE
11019 uint32 hostwake_oob
= 0;
11020 #endif /* BCMPCIE_OOB_HOST_WAKE */
11021 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
11022 uint32 btmdelta
= WBTEXT_BTMDELTA
;
11023 #endif /* WBTEXT && WBTEXT_BTMDELTA */
11025 #ifdef PKT_FILTER_SUPPORT
11026 dhd_pkt_filter_enable
= TRUE
;
11028 dhd
->apf_set
= FALSE
;
11030 #endif /* PKT_FILTER_SUPPORT */
11031 dhd
->suspend_bcn_li_dtim
= CUSTOM_SUSPEND_BCN_LI_DTIM
;
11032 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
11033 dhd
->max_dtim_enable
= TRUE
;
11035 dhd
->max_dtim_enable
= FALSE
;
11036 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
11037 #ifdef CUSTOM_SET_OCLOFF
11038 dhd
->ocl_off
= FALSE
;
11039 #endif /* CUSTOM_SET_OCLOFF */
11040 #ifdef SUPPORT_SET_TID
11041 dhd
->tid_mode
= SET_TID_OFF
;
11042 dhd
->target_uid
= 0;
11043 dhd
->target_tid
= 0;
11044 #endif /* SUPPORT_SET_TID */
11045 DHD_TRACE(("Enter %s\n", __FUNCTION__
));
11048 #ifdef CUSTOMER_HW4_DEBUG
11049 if (!dhd_validate_chipid(dhd
)) {
11050 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
11051 __FUNCTION__
, dhd_bus_chip_id(dhd
)));
11052 #ifndef SUPPORT_MULTIPLE_CHIPS
11055 #endif /* !SUPPORT_MULTIPLE_CHIPS */
11057 #endif /* CUSTOMER_HW4_DEBUG */
11058 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
11059 (op_mode
== DHD_FLAG_MFG_MODE
)) {
11060 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
11061 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11062 /* disable runtimePM by default in MFG mode. */
11063 pm_runtime_disable(dhd_bus_to_dev(dhd
->bus
));
11064 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11065 #ifdef DHD_PCIE_RUNTIMEPM
11066 /* Disable RuntimePM in mfg mode */
11067 DHD_DISABLE_RUNTIME_PM(dhd
);
11068 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__
));
11069 #endif /* DHD_PCIE_RUNTIME_PM */
11070 /* Check and adjust IOCTL response timeout for Manufactring firmware */
11071 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT
);
11072 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
11075 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT
);
11076 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__
));
11078 #ifdef BCMPCIE_OOB_HOST_WAKE
11079 ret
= dhd_iovar(dhd
, 0, "bus:hostwake_oob", NULL
, 0, (char *)&hostwake_oob
,
11080 sizeof(hostwake_oob
), FALSE
);
11082 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__
));
11084 if (hostwake_oob
== 0) {
11085 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
11087 ret
= BCME_UNSUPPORTED
;
11090 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__
));
11093 #endif /* BCMPCIE_OOB_HOST_WAKE */
11094 #ifdef GET_CUSTOM_MAC_ENABLE
11095 ret
= wifi_platform_get_mac_addr(dhd
->info
->adapter
, ea_addr
.octet
);
11097 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", (char *)&ea_addr
, ETHER_ADDR_LEN
, NULL
, 0,
11100 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
11104 memcpy(dhd
->mac
.octet
, ea_addr
.octet
, ETHER_ADDR_LEN
);
11106 #endif /* GET_CUSTOM_MAC_ENABLE */
11107 /* Get the default device MAC address directly from firmware */
11108 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
11110 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__
, ret
));
11114 /* Update public MAC address after reading from Firmware */
11115 memcpy(dhd
->mac
.octet
, buf
, ETHER_ADDR_LEN
);
11117 #ifdef GET_CUSTOM_MAC_ENABLE
11119 #endif /* GET_CUSTOM_MAC_ENABLE */
11121 #ifdef DHD_USE_CLMINFO_PARSER
11122 if ((ret
= dhd_get_clminfo(dhd
, clm_path
)) < 0) {
11123 if (dhd
->is_clm_mult_regrev
) {
11124 DHD_ERROR(("%s: CLM Information load failed. Abort initialization.\n",
11129 #endif /* DHD_USE_CLMINFO_PARSER */
11130 if ((ret
= dhd_apply_default_clm(dhd
, clm_path
)) < 0) {
11131 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__
));
11135 /* get a capabilities from firmware */
11137 uint32 cap_buf_size
= sizeof(dhd
->fw_capabilities
);
11138 memset(dhd
->fw_capabilities
, 0, cap_buf_size
);
11139 ret
= dhd_iovar(dhd
, 0, "cap", NULL
, 0, dhd
->fw_capabilities
, (cap_buf_size
- 1),
11142 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11143 __FUNCTION__
, ret
));
11147 memmove(&dhd
->fw_capabilities
[1], dhd
->fw_capabilities
, (cap_buf_size
- 1));
11148 dhd
->fw_capabilities
[0] = ' ';
11149 dhd
->fw_capabilities
[cap_buf_size
- 2] = ' ';
11150 dhd
->fw_capabilities
[cap_buf_size
- 1] = '\0';
11153 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_HOSTAP_MODE
) ||
11154 (op_mode
== DHD_FLAG_HOSTAP_MODE
)) {
11155 #ifdef SET_RANDOM_MAC_SOFTAP
11157 #endif /* SET_RANDOM_MAC_SOFTAP */
11158 dhd
->op_mode
= DHD_FLAG_HOSTAP_MODE
;
11159 #if defined(ARP_OFFLOAD_SUPPORT)
11162 #ifdef PKT_FILTER_SUPPORT
11163 dhd_pkt_filter_enable
= FALSE
;
11165 #ifdef SET_RANDOM_MAC_SOFTAP
11166 SRANDOM32((uint
)jiffies
);
11167 rand_mac
= RANDOM32();
11168 iovbuf
[0] = (unsigned char)(vendor_oui
>> 16) | 0x02; /* local admin bit */
11169 iovbuf
[1] = (unsigned char)(vendor_oui
>> 8);
11170 iovbuf
[2] = (unsigned char)vendor_oui
;
11171 iovbuf
[3] = (unsigned char)(rand_mac
& 0x0F) | 0xF0;
11172 iovbuf
[4] = (unsigned char)(rand_mac
>> 8);
11173 iovbuf
[5] = (unsigned char)(rand_mac
>> 16);
11175 ret
= dhd_iovar(dhd
, 0, "cur_etheraddr", (char *)&iovbuf
, ETHER_ADDR_LEN
, NULL
, 0,
11178 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
11180 memcpy(dhd
->mac
.octet
, iovbuf
, ETHER_ADDR_LEN
);
11181 #endif /* SET_RANDOM_MAC_SOFTAP */
11182 #ifdef USE_DYNAMIC_F2_BLKSIZE
11183 dhdsdio_func_blocksize(dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
11184 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11185 #ifdef SOFTAP_UAPSD_OFF
11186 ret
= dhd_iovar(dhd
, 0, "wme_apsd", (char *)&wme_apsd
, sizeof(wme_apsd
), NULL
, 0,
11189 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11190 __FUNCTION__
, ret
));
11192 #endif /* SOFTAP_UAPSD_OFF */
11193 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
11194 (op_mode
== DHD_FLAG_MFG_MODE
)) {
11195 #if defined(ARP_OFFLOAD_SUPPORT)
11197 #endif /* ARP_OFFLOAD_SUPPORT */
11198 #ifdef PKT_FILTER_SUPPORT
11199 dhd_pkt_filter_enable
= FALSE
;
11200 #endif /* PKT_FILTER_SUPPORT */
11201 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
11202 #ifdef USE_DYNAMIC_F2_BLKSIZE
11203 dhdsdio_func_blocksize(dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
11204 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11205 #ifndef CUSTOM_SET_ANTNPM
11206 if (FW_SUPPORTED(dhd
, rsdb
)) {
11207 wl_config_t rsdb_mode
;
11208 memset(&rsdb_mode
, 0, sizeof(rsdb_mode
));
11209 ret
= dhd_iovar(dhd
, 0, "rsdb_mode", (char *)&rsdb_mode
, sizeof(rsdb_mode
),
11212 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11213 __FUNCTION__
, ret
));
11216 #endif /* !CUSTOM_SET_ANTNPM */
11218 uint32 concurrent_mode
= 0;
11219 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_P2P_MODE
) ||
11220 (op_mode
== DHD_FLAG_P2P_MODE
)) {
11221 #if defined(ARP_OFFLOAD_SUPPORT)
11224 #ifdef PKT_FILTER_SUPPORT
11225 dhd_pkt_filter_enable
= FALSE
;
11227 dhd
->op_mode
= DHD_FLAG_P2P_MODE
;
11228 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_IBSS_MODE
) ||
11229 (op_mode
== DHD_FLAG_IBSS_MODE
)) {
11230 dhd
->op_mode
= DHD_FLAG_IBSS_MODE
;
11232 dhd
->op_mode
= DHD_FLAG_STA_MODE
;
11233 #if !defined(AP) && defined(WLP2P)
11234 if (dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
&&
11235 (concurrent_mode
= dhd_get_concurrent_capabilites(dhd
))) {
11236 #if defined(ARP_OFFLOAD_SUPPORT)
11239 dhd
->op_mode
|= concurrent_mode
;
11242 /* Check if we are enabling p2p */
11243 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
11244 ret
= dhd_iovar(dhd
, 0, "apsta", (char *)&apsta
, sizeof(apsta
), NULL
, 0,
11247 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__
, ret
));
11249 #if defined(SOFTAP_AND_GC)
11250 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_AP
,
11251 (char *)&ap_mode
, sizeof(ap_mode
), TRUE
, 0)) < 0) {
11252 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__
, ret
));
11255 memcpy(&p2p_ea
, &dhd
->mac
, ETHER_ADDR_LEN
);
11256 ETHER_SET_LOCALADDR(&p2p_ea
);
11257 ret
= dhd_iovar(dhd
, 0, "p2p_da_override", (char *)&p2p_ea
, sizeof(p2p_ea
),
11260 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__
, ret
));
11262 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11265 (void)concurrent_mode
;
11269 #ifdef DISABLE_PRUNED_SCAN
11270 if (FW_SUPPORTED(dhd
, rsdb
)) {
11271 ret
= dhd_iovar(dhd
, 0, "scan_features", (char *)&scan_features
,
11272 sizeof(scan_features
), iovbuf
, sizeof(iovbuf
), FALSE
);
11274 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11275 __FUNCTION__
, ret
));
11277 memcpy(&scan_features
, iovbuf
, 4);
11278 scan_features
&= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM
;
11279 ret
= dhd_iovar(dhd
, 0, "scan_features", (char *)&scan_features
,
11280 sizeof(scan_features
), NULL
, 0, TRUE
);
11282 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11283 __FUNCTION__
, ret
));
11287 #endif /* DISABLE_PRUNED_SCAN */
11289 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG
"\n",
11290 dhd
->op_mode
, MAC2STRDBG(dhd
->mac
.octet
)));
11292 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11293 if (dhd
->op_mode
== DHD_FLAG_HOSTAP_MODE
)
11294 dhd
->info
->rxthread_enabled
= FALSE
;
11296 dhd
->info
->rxthread_enabled
= TRUE
;
11298 /* Set Country code */
11299 if (dhd
->dhd_cspec
.ccode
[0] != 0) {
11300 ret
= dhd_iovar(dhd
, 0, "country", (char *)&dhd
->dhd_cspec
, sizeof(wl_country_t
),
11303 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__
));
11306 #ifdef DHD_2G_ONLY_SUPPORT
11307 DHD_ERROR(("Enabled DHD 2G only support!!\n"));
11308 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_BAND
, (char *)&band
, sizeof(band
), TRUE
, 0);
11310 DHD_ERROR(("%s Set Band B failed %d\n", __FUNCTION__
, ret
));
11312 #endif /* DHD_2G_ONLY_SUPPORT */
11314 /* Set Listen Interval */
11315 ret
= dhd_iovar(dhd
, 0, "assoc_listen", (char *)&listen_interval
, sizeof(listen_interval
),
11318 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__
, ret
));
11320 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11321 #ifdef USE_WFA_CERT_CONF
11322 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_ROAMOFF
, &roamvar
) == BCME_OK
) {
11323 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__
, roamvar
));
11325 #endif /* USE_WFA_CERT_CONF */
11326 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11327 ret
= dhd_iovar(dhd
, 0, "roam_off", (char *)&roamvar
, sizeof(roamvar
), NULL
, 0, TRUE
);
11328 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11329 #if defined(ROAM_ENABLE)
11330 #ifdef DISABLE_BCNLOSS_ROAM
11331 ret
= dhd_iovar(dhd
, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off
,
11332 sizeof(roam_bcnloss_off
), NULL
, 0, TRUE
);
11333 #endif /* DISABLE_BCNLOSS_ROAM */
11334 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, roam_trigger
,
11335 sizeof(roam_trigger
), TRUE
, 0)) < 0)
11336 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__
, ret
));
11337 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
, roam_scan_period
,
11338 sizeof(roam_scan_period
), TRUE
, 0)) < 0)
11339 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__
, ret
));
11340 if ((dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, roam_delta
,
11341 sizeof(roam_delta
), TRUE
, 0)) < 0)
11342 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__
, ret
));
11343 ret
= dhd_iovar(dhd
, 0, "fullroamperiod", (char *)&roam_fullscan_period
,
11344 sizeof(roam_fullscan_period
), NULL
, 0, TRUE
);
11346 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__
, ret
));
11347 #ifdef ROAM_AP_ENV_DETECTION
11348 if (roam_trigger
[0] == WL_AUTO_ROAM_TRIGGER
) {
11349 if (dhd_iovar(dhd
, 0, "roam_env_detection", (char *)&roam_env_mode
,
11350 sizeof(roam_env_mode
), NULL
, 0, TRUE
) == BCME_OK
)
11351 dhd
->roam_env_detection
= TRUE
;
11353 dhd
->roam_env_detection
= FALSE
;
11355 #endif /* ROAM_AP_ENV_DETECTION */
11356 #endif /* ROAM_ENABLE */
11358 #ifdef CUSTOM_EVENT_PM_WAKE
11359 ret
= dhd_iovar(dhd
, 0, "const_awake_thresh", (char *)&pm_awake_thresh
,
11360 sizeof(pm_awake_thresh
), NULL
, 0, TRUE
);
11362 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__
, ret
));
11364 #endif /* CUSTOM_EVENT_PM_WAKE */
11366 ret
= dhd_iovar(dhd
, 0, "okc_enable", (char *)&okc
, sizeof(okc
), NULL
, 0, TRUE
);
11369 ret
= dhd_iovar(dhd
, 0, "ccx_enable", (char *)&ccx
, sizeof(ccx
), NULL
, 0, TRUE
);
11370 #endif /* BCMCCX */
11373 dhd
->tdls_enable
= FALSE
;
11374 dhd_tdls_set_mode(dhd
, false);
11375 #endif /* WLTDLS */
11377 #ifdef DHD_ENABLE_LPC
11379 ret
= dhd_iovar(dhd
, 0, "lpc", (char *)&lpc
, sizeof(lpc
), NULL
, 0, TRUE
);
11381 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
11383 if (ret
== BCME_NOTDOWN
) {
11385 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
11386 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11387 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__
, ret
, lpc
));
11389 ret
= dhd_iovar(dhd
, 0, "lpc", (char *)&lpc
, sizeof(lpc
), NULL
, 0, TRUE
);
11390 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__
, ret
));
11393 #endif /* DHD_ENABLE_LPC */
11396 if (dhd
->op_mode
& DHD_FLAG_STA_MODE
) {
11397 if ((ret
= dhd_enable_adps(dhd
, ADPS_ENABLE
)) != BCME_OK
) {
11398 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11399 __FUNCTION__
, ret
));
11402 #endif /* WLADPS */
11404 #ifdef DHD_PM_CONTROL_FROM_FILE
11405 sec_control_pm(dhd
, &power_mode
);
11407 /* Set PowerSave mode */
11408 (void) dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
, sizeof(power_mode
), TRUE
, 0);
11409 #endif /* DHD_PM_CONTROL_FROM_FILE */
11411 #if defined(BCMSDIO)
11412 /* Match Host and Dongle rx alignment */
11413 ret
= dhd_iovar(dhd
, 0, "bus:txglomalign", (char *)&dongle_align
, sizeof(dongle_align
),
11416 #ifdef USE_WFA_CERT_CONF
11417 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_BUS_TXGLOM_MODE
, &glom
) == BCME_OK
) {
11418 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__
, glom
));
11420 #endif /* USE_WFA_CERT_CONF */
11421 if (glom
!= DEFAULT_GLOM_VALUE
) {
11422 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__
, glom
));
11423 ret
= dhd_iovar(dhd
, 0, "bus:txglom", (char *)&glom
, sizeof(glom
), NULL
, 0, TRUE
);
11425 #endif /* defined(BCMSDIO) */
11427 /* Setup timeout if Beacons are lost and roam is off to report link down */
11428 ret
= dhd_iovar(dhd
, 0, "bcn_timeout", (char *)&bcn_timeout
, sizeof(bcn_timeout
), NULL
, 0,
11431 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11432 ret
= dhd_iovar(dhd
, 0, "assoc_retry_max", (char *)&retry_max
, sizeof(retry_max
), NULL
, 0,
11435 #if defined(AP) && !defined(WLP2P)
11436 ret
= dhd_iovar(dhd
, 0, "apsta", (char *)&apsta
, sizeof(apsta
), NULL
, 0, TRUE
);
11438 #endif /* defined(AP) && !defined(WLP2P) */
11440 #ifdef MIMO_ANT_SETTING
11441 dhd_sel_ant_from_file(dhd
);
11442 #endif /* MIMO_ANT_SETTING */
11444 #if defined(SOFTAP)
11445 if (ap_fw_loaded
== TRUE
) {
11446 dhd_wl_ioctl_cmd(dhd
, WLC_SET_DTIMPRD
, (char *)&dtim
, sizeof(dtim
), TRUE
, 0);
11450 #if defined(KEEP_ALIVE)
11452 /* Set Keep Alive : be sure to use FW with -keepalive */
11455 #if defined(SOFTAP)
11456 if (ap_fw_loaded
== FALSE
)
11458 if (!(dhd
->op_mode
&
11459 (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))) {
11460 if ((res
= dhd_keep_alive_onoff(dhd
)) < 0)
11461 DHD_ERROR(("%s set keeplive failed %d\n",
11462 __FUNCTION__
, res
));
11465 #endif /* defined(KEEP_ALIVE) */
11468 ret
= dhd_iovar(dhd
, 0, "txbf", (char *)&txbf
, sizeof(txbf
), NULL
, 0, TRUE
);
11470 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__
, ret
));
11472 #endif /* USE_WL_TXBF */
11474 ret
= dhd_iovar(dhd
, 0, "scancache", (char *)&scancache_enab
, sizeof(scancache_enab
), NULL
,
11477 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__
, ret
));
11480 #ifdef DISABLE_TXBFR
11481 ret
= dhd_iovar(dhd
, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap
, sizeof(txbf_bfr_cap
), NULL
,
11484 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__
, ret
));
11486 #endif /* DISABLE_TXBFR */
11488 #ifdef USE_WFA_CERT_CONF
11489 #ifdef USE_WL_FRAMEBURST
11490 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_FRAMEBURST
, &frameburst
) == BCME_OK
) {
11491 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__
, frameburst
));
11493 #endif /* USE_WL_FRAMEBURST */
11494 g_frameburst
= frameburst
;
11495 #endif /* USE_WFA_CERT_CONF */
11496 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11497 /* Disable Framebursting for SofAP */
11498 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
11501 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11502 /* Set frameburst to value */
11503 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_FAKEFRAG
, (char *)&frameburst
,
11504 sizeof(frameburst
), TRUE
, 0)) < 0) {
11505 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__
, ret
));
11507 #ifdef DHD_SET_FW_HIGHSPEED
11508 /* Set ack_ratio */
11509 ret
= dhd_iovar(dhd
, 0, "ack_ratio", (char *)&ack_ratio
, sizeof(ack_ratio
), NULL
, 0, TRUE
);
11511 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__
, ret
));
11514 /* Set ack_ratio_depth */
11515 ret
= dhd_iovar(dhd
, 0, "ack_ratio_depth", (char *)&ack_ratio_depth
,
11516 sizeof(ack_ratio_depth
), NULL
, 0, TRUE
);
11518 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__
, ret
));
11520 #endif /* DHD_SET_FW_HIGHSPEED */
11522 iov_buf
= (char*)MALLOC(dhd
->osh
, WLC_IOCTL_SMLEN
);
11523 if (iov_buf
== NULL
) {
11524 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN
));
11530 /* Apply AIBSS configurations */
11531 if ((ret
= dhd_preinit_aibss_ioctls(dhd
, iov_buf
)) != BCME_OK
) {
11532 DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
11533 __FUNCTION__
, ret
));
11536 #endif /* WLAIBSS */
11538 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
11539 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
11540 /* Set ampdu ba wsize to 64 or 16 */
11541 #ifdef CUSTOM_AMPDU_BA_WSIZE
11542 ampdu_ba_wsize
= CUSTOM_AMPDU_BA_WSIZE
;
11544 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
11545 if (dhd
->op_mode
== DHD_FLAG_IBSS_MODE
)
11546 ampdu_ba_wsize
= CUSTOM_IBSS_AMPDU_BA_WSIZE
;
11547 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
11548 if (ampdu_ba_wsize
!= 0) {
11549 ret
= dhd_iovar(dhd
, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize
,
11550 sizeof(ampdu_ba_wsize
), NULL
, 0, TRUE
);
11552 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11553 __FUNCTION__
, ampdu_ba_wsize
, ret
));
11556 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
11558 #if defined(CUSTOM_AMPDU_MPDU)
11559 ampdu_mpdu
= CUSTOM_AMPDU_MPDU
;
11560 if (ampdu_mpdu
!= 0 && (ampdu_mpdu
<= ampdu_ba_wsize
)) {
11561 ret
= dhd_iovar(dhd
, 0, "ampdu_mpdu", (char *)&du_mpdu
, sizeof(ampdu_mpdu
),
11564 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11565 __FUNCTION__
, CUSTOM_AMPDU_MPDU
, ret
));
11568 #endif /* CUSTOM_AMPDU_MPDU */
11570 #if defined(CUSTOM_AMPDU_RELEASE)
11571 ampdu_release
= CUSTOM_AMPDU_RELEASE
;
11572 if (ampdu_release
!= 0 && (ampdu_release
<= ampdu_ba_wsize
)) {
11573 ret
= dhd_iovar(dhd
, 0, "ampdu_release", (char *)&du_release
,
11574 sizeof(ampdu_release
), NULL
, 0, TRUE
);
11576 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11577 __FUNCTION__
, CUSTOM_AMPDU_RELEASE
, ret
));
11580 #endif /* CUSTOM_AMPDU_RELEASE */
11582 #if defined(CUSTOM_AMSDU_AGGSF)
11583 amsdu_aggsf
= CUSTOM_AMSDU_AGGSF
;
11584 if (amsdu_aggsf
!= 0) {
11585 ret
= dhd_iovar(dhd
, 0, "amsdu_aggsf", (char *)&amsdu_aggsf
, sizeof(amsdu_aggsf
),
11588 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11589 __FUNCTION__
, CUSTOM_AMSDU_AGGSF
, ret
));
11592 #endif /* CUSTOM_AMSDU_AGGSF */
11594 #if defined(BCMSUP_4WAY_HANDSHAKE)
11595 /* Read 4-way handshake requirements */
11596 if (dhd_use_idsup
== 1) {
11597 ret
= dhd_iovar(dhd
, 0, "sup_wpa", (char *)&sup_wpa
, sizeof(sup_wpa
),
11598 (char *)&iovbuf
, sizeof(iovbuf
), FALSE
);
11599 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
11600 * in-dongle supplicant.
11602 if (ret
>= 0 || ret
== BCME_NOTREADY
)
11603 dhd
->fw_4way_handshake
= TRUE
;
11604 DHD_TRACE(("4-way handshake mode is: %d\n", dhd
->fw_4way_handshake
));
11606 #endif /* BCMSUP_4WAY_HANDSHAKE */
11607 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11608 ret
= dhd_iovar(dhd
, 0, "vht_features", NULL
, 0,
11609 (char *)&vht_features
, sizeof(vht_features
), FALSE
);
11611 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__
, ret
));
11614 #ifdef SUPPORT_2G_VHT
11615 vht_features
|= 0x3; /* 2G support */
11616 #endif /* SUPPORT_2G_VHT */
11617 #ifdef SUPPORT_5G_1024QAM_VHT
11618 vht_features
|= 0x6; /* 5G 1024 QAM support */
11619 #endif /* SUPPORT_5G_1024QAM_VHT */
11621 if (vht_features
) {
11622 ret
= dhd_iovar(dhd
, 0, "vht_features", (char *)&vht_features
, sizeof(vht_features
),
11625 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__
, ret
));
11627 if (ret
== BCME_NOTDOWN
) {
11629 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
11630 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
11631 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11632 " vht_features = 0x%x\n",
11633 __FUNCTION__
, ret
, vht_features
));
11635 ret
= dhd_iovar(dhd
, 0, "vht_features", (char *)&vht_features
,
11636 sizeof(vht_features
), NULL
, 0, TRUE
);
11638 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__
, ret
));
11642 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11643 #ifdef DISABLE_11N_PROPRIETARY_RATES
11644 ret
= dhd_iovar(dhd
, 0, "ht_features", (char *)&ht_features
, sizeof(ht_features
), NULL
, 0,
11647 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__
, ret
));
11649 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11650 #ifdef DHD_DISABLE_VHTMODE
11651 dhd_disable_vhtmode(dhd
);
11652 #endif /* DHD_DISABLE_VHTMODE */
11654 ret
= dhd_iovar(dhd
, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4
, sizeof(buf_key_b4_m4
),
11657 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__
, ret
));
11659 #ifdef SUPPORT_SET_CAC
11660 ret
= dhd_iovar(dhd
, 0, "cac", (char *)&cac
, sizeof(cac
), NULL
, 0, TRUE
);
11662 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__
, cac
, ret
));
11664 #endif /* SUPPORT_SET_CAC */
11666 /* Get the required details from dongle during preinit ioctl */
11667 dhd_ulp_preinit(dhd
);
11668 #endif /* DHD_ULP */
11670 /* Read event_msgs mask */
11671 ret
= dhd_iovar(dhd
, 0, "event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
,
11672 sizeof(iovbuf
), FALSE
);
11674 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__
, ret
));
11677 bcopy(iovbuf
, eventmask
, WL_EVENTING_MASK_LEN
);
11679 /* Setup event_msgs */
11680 setbit(eventmask
, WLC_E_SET_SSID
);
11681 setbit(eventmask
, WLC_E_PRUNE
);
11682 setbit(eventmask
, WLC_E_AUTH
);
11683 setbit(eventmask
, WLC_E_AUTH_IND
);
11684 setbit(eventmask
, WLC_E_ASSOC
);
11685 setbit(eventmask
, WLC_E_REASSOC
);
11686 setbit(eventmask
, WLC_E_REASSOC_IND
);
11687 if (!(dhd
->op_mode
& DHD_FLAG_IBSS_MODE
))
11688 setbit(eventmask
, WLC_E_DEAUTH
);
11689 setbit(eventmask
, WLC_E_DEAUTH_IND
);
11690 setbit(eventmask
, WLC_E_DISASSOC_IND
);
11691 setbit(eventmask
, WLC_E_DISASSOC
);
11692 setbit(eventmask
, WLC_E_JOIN
);
11693 setbit(eventmask
, WLC_E_START
);
11694 setbit(eventmask
, WLC_E_ASSOC_IND
);
11695 setbit(eventmask
, WLC_E_PSK_SUP
);
11696 setbit(eventmask
, WLC_E_LINK
);
11697 setbit(eventmask
, WLC_E_MIC_ERROR
);
11698 setbit(eventmask
, WLC_E_ASSOC_REQ_IE
);
11699 setbit(eventmask
, WLC_E_ASSOC_RESP_IE
);
11700 #ifdef LIMIT_BORROW
11701 setbit(eventmask
, WLC_E_ALLOW_CREDIT_BORROW
);
11703 #ifndef WL_CFG80211
11704 setbit(eventmask
, WLC_E_PMKID_CACHE
);
11705 setbit(eventmask
, WLC_E_TXFAIL
);
11707 setbit(eventmask
, WLC_E_JOIN_START
);
11708 setbit(eventmask
, WLC_E_SCAN_COMPLETE
);
11710 setbit(eventmask
, WLC_E_SCAN_CONFIRM_IND
);
11713 setbit(eventmask
, WLC_E_PFN_NET_FOUND
);
11714 setbit(eventmask
, WLC_E_PFN_BEST_BATCHING
);
11715 setbit(eventmask
, WLC_E_PFN_BSSID_NET_FOUND
);
11716 setbit(eventmask
, WLC_E_PFN_BSSID_NET_LOST
);
11717 #endif /* PNO_SUPPORT */
11718 /* enable dongle roaming event */
11720 setbit(eventmask
, WLC_E_ROAM
);
11721 setbit(eventmask
, WLC_E_BSSID
);
11722 #endif /* WL_CFG80211 */
11724 setbit(eventmask
, WLC_E_ADDTS_IND
);
11725 setbit(eventmask
, WLC_E_DELTS_IND
);
11726 #endif /* BCMCCX */
11728 setbit(eventmask
, WLC_E_TDLS_PEER_EVENT
);
11729 #endif /* WLTDLS */
11731 setbit(eventmask
, WLC_E_PROXD
);
11732 #endif /* RTT_SUPPORT */
11734 setbit(eventmask
, WLC_E_ESCAN_RESULT
);
11735 setbit(eventmask
, WLC_E_AP_STARTED
);
11736 setbit(eventmask
, WLC_E_ACTION_FRAME_RX
);
11737 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
11738 setbit(eventmask
, WLC_E_P2P_DISC_LISTEN_COMPLETE
);
11740 #endif /* WL_CFG80211 */
11742 setbit(eventmask
, WLC_E_AIBSS_TXFAIL
);
11743 #endif /* WLAIBSS */
11745 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11746 if (dhd_logtrace_from_file(dhd
)) {
11747 setbit(eventmask
, WLC_E_TRACE
);
11749 clrbit(eventmask
, WLC_E_TRACE
);
11751 #elif defined(SHOW_LOGTRACE)
11752 setbit(eventmask
, WLC_E_TRACE
);
11754 clrbit(eventmask
, WLC_E_TRACE
);
11755 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11757 setbit(eventmask
, WLC_E_CSA_COMPLETE_IND
);
11758 #ifdef CUSTOM_EVENT_PM_WAKE
11759 setbit(eventmask
, WLC_E_EXCESS_PM_WAKE_EVENT
);
11760 #endif /* CUSTOM_EVENT_PM_WAKE */
11761 #ifdef DHD_LOSSLESS_ROAMING
11762 setbit(eventmask
, WLC_E_ROAM_PREP
);
11765 setbit(eventmask
, WLC_E_NAN
);
11766 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11767 dhd_update_flow_prio_map(dhd
, DHD_FLOW_PRIO_LLR_MAP
);
11768 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11770 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11771 dhd_update_flow_prio_map(dhd
, DHD_FLOW_PRIO_LLR_MAP
);
11772 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11774 /* Write updated Event mask */
11775 ret
= dhd_iovar(dhd
, 0, "event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, NULL
, 0, TRUE
);
11777 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__
, ret
));
11781 /* make up event mask ext message iovar for event larger than 128 */
11782 msglen
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
+ EVENTMSGS_EXT_STRUCT_SIZE
;
11783 eventmask_msg
= (eventmsgs_ext_t
*)MALLOC(dhd
->osh
, msglen
);
11784 if (eventmask_msg
== NULL
) {
11785 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen
));
11789 bzero(eventmask_msg
, msglen
);
11790 eventmask_msg
->ver
= EVENTMSGS_VER
;
11791 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
11793 /* Read event_msgs_ext mask */
11794 ret2
= dhd_iovar(dhd
, 0, "event_msgs_ext", (char *)eventmask_msg
, msglen
, iov_buf
,
11795 WLC_IOCTL_SMLEN
, FALSE
);
11797 if (ret2
== 0) { /* event_msgs_ext must be supported */
11798 bcopy(iov_buf
, eventmask_msg
, msglen
);
11799 #ifdef RSSI_MONITOR_SUPPORT
11800 setbit(eventmask_msg
->mask
, WLC_E_RSSI_LQM
);
11801 #endif /* RSSI_MONITOR_SUPPORT */
11802 #ifdef GSCAN_SUPPORT
11803 setbit(eventmask_msg
->mask
, WLC_E_PFN_GSCAN_FULL_RESULT
);
11804 setbit(eventmask_msg
->mask
, WLC_E_PFN_SCAN_COMPLETE
);
11805 setbit(eventmask_msg
->mask
, WLC_E_PFN_SSID_EXT
);
11806 setbit(eventmask_msg
->mask
, WLC_E_ROAM_EXP_EVENT
);
11807 #endif /* GSCAN_SUPPORT */
11808 setbit(eventmask_msg
->mask
, WLC_E_RSSI_LQM
);
11809 #ifdef BT_WIFI_HANDOVER
11810 setbit(eventmask_msg
->mask
, WLC_E_BT_WIFI_HANDOVER_REQ
);
11811 #endif /* BT_WIFI_HANDOVER */
11813 setbit(eventmask_msg
->mask
, WLC_E_ROAM_PREP
);
11814 #endif /* DBG_PKT_MON */
11816 setbit(eventmask_msg
->mask
, WLC_E_ULP
);
11819 setbit(eventmask_msg
->mask
, WLC_E_NATOE_NFCT
);
11820 #endif /* WL_NATOE */
11822 setbit(eventmask_msg
->mask
, WLC_E_SLOTTED_BSS_PEER_OP
);
11823 #endif /* WL_NAN */
11824 #ifdef SUPPORT_EVT_SDB_LOG
11825 setbit(eventmask_msg
->mask
, WLC_E_SDB_TRANSITION
);
11826 #endif /* SUPPORT_EVT_SDB_LOG */
11827 /* Write updated Event mask */
11828 eventmask_msg
->ver
= EVENTMSGS_VER
;
11829 eventmask_msg
->command
= EVENTMSGS_SET_MASK
;
11830 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
11831 ret
= dhd_iovar(dhd
, 0, "event_msgs_ext", (char *)eventmask_msg
, msglen
, NULL
, 0,
11834 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__
, ret
));
11837 } else if (ret2
== BCME_UNSUPPORTED
|| ret2
== BCME_VERSION
) {
11838 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11839 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11840 __FUNCTION__
, ret2
));
11842 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__
, ret2
));
11847 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11848 /* Enabling event log trace for EAP events */
11849 el_tag
= (wl_el_tag_params_t
*)MALLOC(dhd
->osh
, sizeof(wl_el_tag_params_t
));
11850 if (el_tag
== NULL
) {
11851 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11852 (int)sizeof(wl_el_tag_params_t
)));
11856 el_tag
->tag
= EVENT_LOG_TAG_4WAYHANDSHAKE
;
11858 el_tag
->flags
= EVENT_LOG_TAG_FLAG_LOG
;
11859 ret
= dhd_iovar(dhd
, 0, "event_log_tag_control", (char *)el_tag
, sizeof(*el_tag
), NULL
, 0,
11861 #endif /* DHD_8021X_DUMP */
11863 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_CHANNEL_TIME
, (char *)&scan_assoc_time
,
11864 sizeof(scan_assoc_time
), TRUE
, 0);
11865 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_UNASSOC_TIME
, (char *)&scan_unassoc_time
,
11866 sizeof(scan_unassoc_time
), TRUE
, 0);
11867 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_PASSIVE_TIME
, (char *)&scan_passive_time
,
11868 sizeof(scan_passive_time
), TRUE
, 0);
11870 #ifdef ARP_OFFLOAD_SUPPORT
11871 /* Set and enable ARP offload feature for STA only */
11872 #if defined(SOFTAP)
11873 if (arpoe
&& !ap_fw_loaded
) {
11877 dhd_arp_offload_enable(dhd
, TRUE
);
11878 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
11880 dhd_arp_offload_enable(dhd
, FALSE
);
11881 dhd_arp_offload_set(dhd
, 0);
11883 dhd_arp_enable
= arpoe
;
11884 #endif /* ARP_OFFLOAD_SUPPORT */
11886 #ifdef PKT_FILTER_SUPPORT
11887 /* Setup default defintions for pktfilter , enable in suspend */
11888 dhd
->pktfilter_count
= 6;
11889 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = NULL
;
11890 if (!FW_SUPPORTED(dhd
, pf6
)) {
11891 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = NULL
;
11892 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
11894 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11895 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = DISCARD_IPV4_MCAST
;
11896 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = DISCARD_IPV6_MCAST
;
11898 /* apply APP pktfilter */
11899 dhd
->pktfilter
[DHD_ARP_FILTER_NUM
] = "105 0 0 12 0xFFFF 0x0806";
11901 #ifdef BLOCK_IPV6_PACKET
11902 /* Setup filter to allow only IPv4 unicast frames */
11903 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 "
11904 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
11906 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR
;
11908 /* Setup filter to allow only unicast */
11909 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0x01 0x00";
11910 #endif /* BLOCK_IPV6_PACKET */
11912 #ifdef PASS_IPV4_SUSPEND
11913 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = "104 0 0 0 0xFFFFFF 0x01005E";
11915 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11916 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = NULL
;
11917 #endif /* PASS_IPV4_SUSPEND */
11918 if (FW_SUPPORTED(dhd
, pf6
)) {
11919 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11920 dhd
->pktfilter
[DHD_IP4BCAST_DROP_FILTER_NUM
] = DISCARD_IPV4_BCAST
;
11921 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11922 dhd
->pktfilter
[DHD_LLC_STP_DROP_FILTER_NUM
] = DISCARD_LLC_STP
;
11923 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11924 dhd
->pktfilter
[DHD_LLC_XID_DROP_FILTER_NUM
] = DISCARD_LLC_XID
;
11925 dhd
->pktfilter_count
= 10;
11928 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11929 dhd
->pktfilter_count
= 4;
11930 /* Setup filter to block broadcast and NAT Keepalive packets */
11931 /* discard all broadcast packets */
11932 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0xffffff 0xffffff";
11933 /* discard NAT Keepalive packets */
11934 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = "102 0 0 36 0xffffffff 0x11940009";
11935 /* discard NAT Keepalive packets */
11936 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = "104 0 0 38 0xffffffff 0x11940009";
11937 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
11938 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11940 #if defined(SOFTAP)
11941 if (ap_fw_loaded
) {
11942 dhd_enable_packet_filter(0, dhd
);
11944 #endif /* defined(SOFTAP) */
11945 dhd_set_packet_filter(dhd
);
11946 #endif /* PKT_FILTER_SUPPORT */
11948 ret
= dhd_iovar(dhd
, 0, "nmode", (char *)&nmode
, sizeof(nmode
), NULL
, 0, TRUE
);
11950 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__
, ret
));
11951 #endif /* DISABLE_11N */
11953 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11954 ret
= dhd_iovar(dhd
, 0, "bcn_li_bcn", (char *)&bcn_li_bcn
, sizeof(bcn_li_bcn
), NULL
, 0,
11956 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11957 #ifdef AMPDU_VO_ENABLE
11958 tid
.tid
= PRIO_8021D_VO
; /* Enable TID(6) for voice */
11960 ret
= dhd_iovar(dhd
, 0, "ampdu_tid", (char *)&tid
, sizeof(tid
), NULL
, 0, TRUE
);
11962 tid
.tid
= PRIO_8021D_NC
; /* Enable TID(7) for voice */
11964 ret
= dhd_iovar(dhd
, 0, "ampdu_tid", (char *)&tid
, sizeof(tid
), NULL
, 0, TRUE
);
11966 /* query for 'clmver' to get clm version info from firmware */
11967 memset(buf
, 0, sizeof(buf
));
11968 ret
= dhd_iovar(dhd
, 0, "clmver", NULL
, 0, buf
, sizeof(buf
), FALSE
);
11970 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
11972 char *ver_temp_buf
= NULL
;
11974 if ((ver_temp_buf
= bcmstrstr(buf
, "Data:")) == NULL
) {
11975 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11977 ptr
= (ver_temp_buf
+ strlen("Data:"));
11978 if ((ver_temp_buf
= bcmstrtok(&ptr
, "\n", 0)) == NULL
) {
11979 DHD_ERROR(("Couldn't find New line character\n"));
11981 memset(clm_version
, 0, CLM_VER_STR_LEN
);
11982 strncpy(clm_version
, ver_temp_buf
,
11983 MIN(strlen(ver_temp_buf
) + 1, CLM_VER_STR_LEN
- 1));
11984 DHD_INFO(("CLM version = %s\n", clm_version
));
11988 #if defined(CUSTOMER_HW4_DEBUG)
11989 if ((ver_temp_buf
= bcmstrstr(ptr
, "Customization:")) == NULL
) {
11990 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
11993 ptr
= (ver_temp_buf
+ strlen("Customization:"));
11994 if ((ver_temp_buf
= bcmstrtok(&ptr
, "(\n", &tokenlim
)) == NULL
) {
11995 DHD_ERROR(("Couldn't find project blob version"
11996 "or New line character\n"));
11997 } else if (tokenlim
== '(') {
11998 snprintf(clm_version
,
11999 CLM_VER_STR_LEN
- 1, "%s, Blob ver = Major : %s minor : ",
12000 clm_version
, ver_temp_buf
);
12001 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version
));
12002 if ((ver_temp_buf
= bcmstrtok(&ptr
, "\n", &tokenlim
)) == NULL
) {
12003 DHD_ERROR(("Couldn't find New line character\n"));
12005 snprintf(clm_version
,
12006 strlen(clm_version
) + strlen(ver_temp_buf
),
12007 "%s%s", clm_version
, ver_temp_buf
);
12008 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
12012 } else if (tokenlim
== '\n') {
12013 snprintf(clm_version
,
12014 strlen(clm_version
) + strlen(", Blob ver = Major : ") + 1,
12015 "%s, Blob ver = Major : ", clm_version
);
12016 snprintf(clm_version
,
12017 strlen(clm_version
) + strlen(ver_temp_buf
) + 1,
12018 "%s%s", clm_version
, ver_temp_buf
);
12019 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version
));
12022 #endif /* CUSTOMER_HW4_DEBUG */
12023 if (strlen(clm_version
)) {
12024 DHD_ERROR(("CLM version = %s\n", clm_version
));
12026 DHD_ERROR(("Couldn't find CLM version!\n"));
12030 /* query for 'ver' to get version info from firmware */
12031 memset(buf
, 0, sizeof(buf
));
12033 ret
= dhd_iovar(dhd
, 0, "ver", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
12035 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
12037 bcmstrtok(&ptr
, "\n", 0);
12038 /* Print fw version info */
12039 DHD_ERROR(("Firmware version = %s\n", buf
));
12040 strncpy(fw_version
, buf
, FW_VER_STR_LEN
);
12041 fw_version
[FW_VER_STR_LEN
-1] = '\0';
12042 #if defined(BCMSDIO) || defined(BCMPCIE)
12043 dhd_set_version_info(dhd
, buf
);
12044 #endif /* BCMSDIO || BCMPCIE */
12045 #ifdef WRITE_WLANINFO
12046 sec_save_wlinfo(buf
, EPI_VERSION_STR
, dhd
->info
->nv_path
, clm_version
);
12047 #endif /* WRITE_WLANINFO */
12049 #ifdef GEN_SOFTAP_INFO_FILE
12050 sec_save_softap_info();
12051 #endif /* GEN_SOFTAP_INFO_FILE */
12053 #if defined(BCMSDIO)
12054 dhd_txglom_enable(dhd
, TRUE
);
12055 #endif /* defined(BCMSDIO) */
12057 #if defined(BCMSDIO)
12058 #ifdef PROP_TXSTATUS
12059 if (disable_proptx
||
12060 #ifdef PROP_TXSTATUS_VSDB
12061 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
12062 (dhd
->op_mode
!= DHD_FLAG_HOSTAP_MODE
&&
12063 dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
) ||
12064 #endif /* PROP_TXSTATUS_VSDB */
12066 wlfc_enable
= FALSE
;
12069 #if defined(PROP_TXSTATUS)
12070 #ifdef USE_WFA_CERT_CONF
12071 if (sec_get_param_wfa_cert(dhd
, SET_PARAM_PROPTX
, &proptx
) == BCME_OK
) {
12072 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__
, proptx
));
12073 wlfc_enable
= proptx
;
12075 #endif /* USE_WFA_CERT_CONF */
12076 #endif /* PROP_TXSTATUS */
12078 #ifndef DISABLE_11N
12079 ret2
= dhd_iovar(dhd
, 0, "ampdu_hostreorder", (char *)&hostreorder
, sizeof(hostreorder
),
12082 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__
, ret2
));
12083 if (ret2
!= BCME_UNSUPPORTED
)
12086 if (ret
== BCME_NOTDOWN
) {
12088 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
,
12089 sizeof(wl_down
), TRUE
, 0);
12090 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
12091 __FUNCTION__
, ret2
, hostreorder
));
12093 ret2
= dhd_iovar(dhd
, 0, "ampdu_hostreorder", (char *)&hostreorder
,
12094 sizeof(hostreorder
), NULL
, 0, TRUE
);
12095 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__
, ret2
));
12096 if (ret2
!= BCME_UNSUPPORTED
)
12099 if (ret2
!= BCME_OK
)
12102 #endif /* DISABLE_11N */
12105 dhd_wlfc_init(dhd
);
12106 #ifndef DISABLE_11N
12107 else if (hostreorder
)
12108 dhd_wlfc_hostreorder_init(dhd
);
12109 #endif /* DISABLE_11N */
12111 #endif /* PROP_TXSTATUS */
12112 #endif /* BCMSDIO || BCMBUS */
12113 #ifndef PCIE_FULL_DONGLE
12114 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
12115 if (FW_SUPPORTED(dhd
, ap
)) {
12116 wl_ap_isolate
= AP_ISOLATE_SENDUP_ALL
;
12117 ret
= dhd_iovar(dhd
, 0, "ap_isolate", (char *)&wl_ap_isolate
, sizeof(wl_ap_isolate
),
12120 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
12122 #endif /* PCIE_FULL_DONGLE */
12124 if (!dhd
->pno_state
) {
12129 if (!dhd
->rtt_state
) {
12130 ret
= dhd_rtt_init(dhd
);
12132 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__
));
12137 /* Failure to configure filter IE is not a fatal error, ignore it. */
12138 if (!(dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
)))
12139 dhd_read_from_file(dhd
);
12140 #endif /* FILTER_IE */
12142 dhd_interworking_enable(dhd
);
12145 #ifdef NDO_CONFIG_SUPPORT
12146 dhd
->ndo_enable
= FALSE
;
12147 dhd
->ndo_host_ip_overflow
= FALSE
;
12148 dhd
->ndo_max_host_ip
= NDO_MAX_HOST_IP_ENTRIES
;
12149 #endif /* NDO_CONFIG_SUPPORT */
12151 /* ND offload version supported */
12152 dhd
->ndo_version
= dhd_ndo_get_version(dhd
);
12153 if (dhd
->ndo_version
> 0) {
12154 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__
, dhd
->ndo_version
));
12156 #ifdef NDO_CONFIG_SUPPORT
12157 /* enable Unsolicited NA filter */
12158 ret
= dhd_ndo_unsolicited_na_filter_enable(dhd
, 1);
12160 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__
));
12162 #endif /* NDO_CONFIG_SUPPORT */
12165 /* check dongle supports wbtext (product policy) or not */
12166 dhd
->wbtext_support
= FALSE
;
12167 if (dhd_wl_ioctl_get_intiovar(dhd
, "wnm_bsstrans_resp", &wnm_bsstrans_resp
,
12168 WLC_GET_VAR
, FALSE
, 0) != BCME_OK
) {
12169 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12171 dhd
->wbtext_policy
= wnm_bsstrans_resp
;
12172 if (dhd
->wbtext_policy
== WL_BSSTRANS_POLICY_PRODUCT_WBTEXT
) {
12173 dhd
->wbtext_support
= TRUE
;
12176 /* driver can turn off wbtext feature through makefile */
12177 if (dhd
->wbtext_support
) {
12178 if (dhd_wl_ioctl_set_intiovar(dhd
, "wnm_bsstrans_resp",
12179 WL_BSSTRANS_POLICY_ROAM_ALWAYS
,
12180 WLC_SET_VAR
, FALSE
, 0) != BCME_OK
) {
12181 DHD_ERROR(("failed to disable WBTEXT\n"));
12184 #endif /* !WBTEXT */
12186 #if defined(DHD_NON_DMA_M2M_CORRUPTION)
12187 /* check pcie non dma loopback */
12188 if (dhd
->op_mode
== DHD_FLAG_MFG_MODE
) {
12189 memset(&pcie_dmaxfer_lpbk
, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t
));
12190 pcie_dmaxfer_lpbk
.u
.length
= PCIE_DMAXFER_LPBK_LENGTH
;
12191 pcie_dmaxfer_lpbk
.lpbkmode
= M2M_NON_DMA_LPBK
;
12192 pcie_dmaxfer_lpbk
.wait
= TRUE
;
12194 if ((ret
= dhd_bus_iovar_op(dhd
, "pcie_dmaxfer", NULL
, 0,
12195 (char *)&pcie_dmaxfer_lpbk
, sizeof(dhd_pcie_dmaxfer_lpbk_t
),
12197 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n",
12202 if (pcie_dmaxfer_lpbk
.u
.status
!= BCME_OK
) {
12203 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d"
12204 " Status : %d\n", ret
, pcie_dmaxfer_lpbk
.u
.status
));
12209 DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n"));
12212 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
12214 /* WNM capabilities */
12217 | WL_WNM_BSSTRANS
| WL_WNM_NOTIF
12220 | WL_WNM_BSSTRANS
| WL_WNM_MAXIDLE
12223 if (dhd_iovar(dhd
, 0, "wnm", (char *)&wnm_cap
, sizeof(wnm_cap
), NULL
, 0, TRUE
) < 0) {
12224 DHD_ERROR(("failed to set WNM capabilities\n"));
12227 if (FW_SUPPORTED(dhd
, ecounters
) && enable_ecounter
) {
12228 if (dhd_start_ecounters(dhd
) != BCME_OK
) {
12229 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__
));
12230 } else if (dhd_start_event_ecounters(dhd
) != BCME_OK
) {
12231 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__
));
12236 /* store the preserve log set numbers */
12237 if (dhd_get_preserve_log_numbers(dhd
, &dhd
->logset_prsrv_mask
)
12239 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__
));
12242 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
12243 if (dhd_iovar(dhd
, 0, "wnm_btmdelta", (char *)&btmdelta
, sizeof(btmdelta
),
12244 NULL
, 0, TRUE
) < 0) {
12245 DHD_ERROR(("failed to set BTM delta\n"));
12247 #endif /* WBTEXT && WBTEXT_BTMDELTA */
12250 if (FW_SUPPORTED(dhd
, monitor
)) {
12251 dhd
->monitor_enable
= TRUE
;
12252 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__
));
12254 dhd
->monitor_enable
= FALSE
;
12255 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__
));
12257 #endif /* WL_MONITOR */
12261 if (eventmask_msg
) {
12262 MFREE(dhd
->osh
, eventmask_msg
, msglen
);
12263 eventmask_msg
= NULL
;
12266 MFREE(dhd
->osh
, iov_buf
, WLC_IOCTL_SMLEN
);
12269 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12271 MFREE(dhd
->osh
, el_tag
, sizeof(wl_el_tag_params_t
));
12274 #endif /* DHD_8021X_DUMP */
12279 dhd_iovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *param_buf
, uint param_len
, char *res_buf
,
12280 uint res_len
, int set
)
12287 if (res_len
> WLC_IOCTL_MAXLEN
|| param_len
> WLC_IOCTL_MAXLEN
)
12288 return BCME_BADARG
;
12290 input_len
= strlen(name
) + 1 + param_len
;
12291 if (input_len
> WLC_IOCTL_MAXLEN
)
12292 return BCME_BADARG
;
12296 if (res_buf
|| res_len
!= 0) {
12297 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__
));
12301 buf
= MALLOCZ(pub
->osh
, input_len
);
12303 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
12307 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
12313 ioc
.cmd
= WLC_SET_VAR
;
12315 ioc
.len
= input_len
;
12318 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12320 if (!res_buf
|| !res_len
) {
12321 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__
));
12326 if (res_len
< input_len
) {
12327 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__
,
12328 res_len
, input_len
));
12329 buf
= MALLOCZ(pub
->osh
, input_len
);
12331 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__
));
12335 ret
= bcm_mkiovar(name
, param_buf
, param_len
, buf
, input_len
);
12341 ioc
.cmd
= WLC_GET_VAR
;
12343 ioc
.len
= input_len
;
12346 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12348 if (ret
== BCME_OK
) {
12349 memcpy(res_buf
, buf
, res_len
);
12352 memset(res_buf
, 0, res_len
);
12353 ret
= bcm_mkiovar(name
, param_buf
, param_len
, res_buf
, res_len
);
12359 ioc
.cmd
= WLC_GET_VAR
;
12364 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12369 MFREE(pub
->osh
, buf
, input_len
);
12376 dhd_getiovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *cmd_buf
,
12377 uint cmd_len
, char **resptr
, uint resp_len
)
12379 int len
= resp_len
;
12381 char *buf
= *resptr
;
12383 if (resp_len
> WLC_IOCTL_MAXLEN
)
12384 return BCME_BADARG
;
12386 memset(buf
, 0, resp_len
);
12388 ret
= bcm_mkiovar(name
, cmd_buf
, cmd_len
, buf
, len
);
12390 return BCME_BUFTOOSHORT
;
12393 memset(&ioc
, 0, sizeof(ioc
));
12395 ioc
.cmd
= WLC_GET_VAR
;
12400 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
12405 int dhd_change_mtu(dhd_pub_t
*dhdp
, int new_mtu
, int ifidx
)
12407 struct dhd_info
*dhd
= dhdp
->info
;
12408 struct net_device
*dev
= NULL
;
12410 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
12411 dev
= dhd
->iflist
[ifidx
]->net
;
12414 if (netif_running(dev
)) {
12415 DHD_ERROR(("%s: Must be down to change its MTU", dev
->name
));
12416 return BCME_NOTDOWN
;
12419 #define DHD_MIN_MTU 1500
12420 #define DHD_MAX_MTU 1752
12422 if ((new_mtu
< DHD_MIN_MTU
) || (new_mtu
> DHD_MAX_MTU
)) {
12423 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__
, new_mtu
));
12424 return BCME_BADARG
;
12427 dev
->mtu
= new_mtu
;
12431 #ifdef ARP_OFFLOAD_SUPPORT
12432 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12434 aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
)
12436 u32 ipv4_buf
[MAX_IPV4_ENTRIES
]; /* temp save for AOE host_ip table */
12440 bzero(ipv4_buf
, sizeof(ipv4_buf
));
12442 /* display what we've got */
12443 ret
= dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
12444 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__
));
12446 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
12448 /* now we saved hoste_ip table, clr it in the dongle AOE */
12449 dhd_aoe_hostip_clr(dhd_pub
, idx
);
12452 DHD_ERROR(("%s failed\n", __FUNCTION__
));
12456 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
12457 if (add
&& (ipv4_buf
[i
] == 0)) {
12459 add
= FALSE
; /* added ipa to local table */
12460 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12462 } else if (ipv4_buf
[i
] == ipa
) {
12464 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12465 __FUNCTION__
, ipa
, i
));
12468 if (ipv4_buf
[i
] != 0) {
12469 /* add back host_ip entries from our local cache */
12470 dhd_arp_offload_add_ip(dhd_pub
, ipv4_buf
[i
], idx
);
12471 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12472 __FUNCTION__
, ipv4_buf
[i
], i
));
12476 /* see the resulting hostip table */
12477 dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
12478 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__
));
12479 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
12484 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12485 * whenever there is an event related to an IP address.
12486 * ptr : kernel provided pointer to IP address that has changed
12488 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
12489 unsigned long event
,
12492 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
12495 dhd_pub_t
*dhd_pub
;
12498 if (!dhd_arp_enable
)
12499 return NOTIFY_DONE
;
12500 if (!ifa
|| !(ifa
->ifa_dev
->dev
))
12501 return NOTIFY_DONE
;
12503 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12504 /* Filter notifications meant for non Broadcom devices */
12505 if ((ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_pri
) &&
12506 (ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_virt
)) {
12507 #if defined(WL_ENABLE_P2P_IF)
12508 if (!wl_cfgp2p_is_ifops(ifa
->ifa_dev
->dev
->netdev_ops
))
12509 #endif /* WL_ENABLE_P2P_IF */
12510 return NOTIFY_DONE
;
12512 #endif /* LINUX_VERSION_CODE */
12514 dhd
= DHD_DEV_INFO(ifa
->ifa_dev
->dev
);
12516 return NOTIFY_DONE
;
12518 dhd_pub
= &dhd
->pub
;
12520 if (dhd_pub
->arp_version
== 1) {
12523 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
12524 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
== ifa
->ifa_dev
->dev
)
12527 if (idx
< DHD_MAX_IFS
)
12528 DHD_TRACE(("ifidx : %p %s %d\n", dhd
->iflist
[idx
]->net
,
12529 dhd
->iflist
[idx
]->name
, dhd
->iflist
[idx
]->idx
));
12531 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa
->ifa_label
));
12538 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12539 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
12542 * Skip if Bus is not in a state to transport the IOVAR
12543 * (or) the Dongle is not ready.
12545 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd
->pub
) ||
12546 dhd
->pub
.busstate
== DHD_BUS_LOAD
) {
12547 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
12548 __FUNCTION__
, dhd
->pub
.busstate
));
12549 if (dhd
->pend_ipaddr
) {
12550 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12551 __FUNCTION__
, dhd
->pend_ipaddr
));
12553 dhd
->pend_ipaddr
= ifa
->ifa_address
;
12557 #ifdef AOE_IP_ALIAS_SUPPORT
12558 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12560 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, TRUE
, idx
);
12561 #endif /* AOE_IP_ALIAS_SUPPORT */
12565 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12566 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
12567 dhd
->pend_ipaddr
= 0;
12568 #ifdef AOE_IP_ALIAS_SUPPORT
12569 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12571 if ((dhd_pub
->op_mode
& DHD_FLAG_HOSTAP_MODE
) ||
12572 (ifa
->ifa_dev
->dev
!= dhd_linux_get_primary_netdev(dhd_pub
))) {
12573 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, FALSE
, idx
);
12575 #endif /* AOE_IP_ALIAS_SUPPORT */
12577 dhd_aoe_hostip_clr(&dhd
->pub
, idx
);
12578 dhd_aoe_arp_clr(&dhd
->pub
, idx
);
12583 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12584 __func__
, ifa
->ifa_label
, event
));
12587 return NOTIFY_DONE
;
12589 #endif /* ARP_OFFLOAD_SUPPORT */
12591 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12592 /* Neighbor Discovery Offload: defered handler */
12594 dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
)
12596 struct ipv6_work_info_t
*ndo_work
= (struct ipv6_work_info_t
*)event_data
;
12597 dhd_info_t
*dhd
= (dhd_info_t
*)dhd_info
;
12602 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__
));
12607 if (event
!= DHD_WQ_WORK_IPV6_NDO
) {
12608 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__
));
12613 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__
));
12617 switch (ndo_work
->event
) {
12619 #ifndef NDO_CONFIG_SUPPORT
12620 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__
));
12621 ret
= dhd_ndo_enable(dhdp
, TRUE
);
12623 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__
, ret
));
12625 #endif /* !NDO_CONFIG_SUPPORT */
12626 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__
));
12627 if (dhdp
->ndo_version
> 0) {
12628 /* inet6 addr notifier called only for unicast address */
12629 ret
= dhd_ndo_add_ip_with_type(dhdp
, &ndo_work
->ipv6_addr
[0],
12630 WL_ND_IPV6_ADDR_TYPE_UNICAST
, ndo_work
->if_idx
);
12632 ret
= dhd_ndo_add_ip(dhdp
, &ndo_work
->ipv6_addr
[0],
12636 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12637 __FUNCTION__
, ret
));
12641 if (dhdp
->ndo_version
> 0) {
12642 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__
));
12643 ret
= dhd_ndo_remove_ip_by_addr(dhdp
,
12644 &ndo_work
->ipv6_addr
[0], ndo_work
->if_idx
);
12646 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__
));
12647 ret
= dhd_ndo_remove_ip(dhdp
, ndo_work
->if_idx
);
12650 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12651 __FUNCTION__
, ret
));
12654 #ifdef NDO_CONFIG_SUPPORT
12655 if (dhdp
->ndo_host_ip_overflow
) {
12656 ret
= dhd_dev_ndo_update_inet6addr(
12657 dhd_idx2net(dhdp
, ndo_work
->if_idx
));
12658 if ((ret
< 0) && (ret
!= BCME_NORESOURCE
)) {
12659 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12660 __FUNCTION__
, ret
));
12664 #else /* !NDO_CONFIG_SUPPORT */
12665 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__
));
12666 ret
= dhd_ndo_enable(dhdp
, FALSE
);
12668 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__
, ret
));
12671 #endif /* NDO_CONFIG_SUPPORT */
12675 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__
));
12680 /* free ndo_work. alloced while scheduling the work */
12686 } /* dhd_init_logstrs_array */
12689 * Neighbor Discovery Offload: Called when an interface
12690 * is assigned with ipv6 address.
12691 * Handles only primary interface
12693 int dhd_inet6addr_notifier_call(struct notifier_block
*this, unsigned long event
, void *ptr
)
12697 struct inet6_ifaddr
*inet6_ifa
= ptr
;
12698 struct ipv6_work_info_t
*ndo_info
;
12701 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12702 /* Filter notifications meant for non Broadcom devices */
12703 if (inet6_ifa
->idev
->dev
->netdev_ops
!= &dhd_ops_pri
) {
12704 return NOTIFY_DONE
;
12706 #endif /* LINUX_VERSION_CODE */
12708 dhd
= DHD_DEV_INFO(inet6_ifa
->idev
->dev
);
12710 return NOTIFY_DONE
;
12714 /* Supports only primary interface */
12715 idx
= dhd_net2idx(dhd
, inet6_ifa
->idev
->dev
);
12717 return NOTIFY_DONE
;
12720 /* FW capability */
12721 if (!FW_SUPPORTED(dhdp
, ndoe
)) {
12722 return NOTIFY_DONE
;
12725 ndo_info
= (struct ipv6_work_info_t
*)kzalloc(sizeof(struct ipv6_work_info_t
), GFP_ATOMIC
);
12727 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__
));
12728 return NOTIFY_DONE
;
12731 /* fill up ndo_info */
12732 ndo_info
->event
= event
;
12733 ndo_info
->if_idx
= idx
;
12734 memcpy(ndo_info
->ipv6_addr
, &inet6_ifa
->addr
, IPV6_ADDR_LEN
);
12736 /* defer the work to thread as it may block kernel */
12737 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)ndo_info
, DHD_WQ_WORK_IPV6_NDO
,
12738 dhd_inet6_work_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
12739 return NOTIFY_DONE
;
12741 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12743 /* Network attach to be invoked from the bus probe handlers */
12745 dhd_attach_net(dhd_pub_t
*dhdp
, bool need_rtnl_lock
)
12747 struct net_device
*primary_ndev
;
12748 BCM_REFERENCE(primary_ndev
);
12750 /* Register primary net device */
12751 if (dhd_register_if(dhdp
, 0, need_rtnl_lock
) != 0) {
12755 #if defined(WL_CFG80211)
12756 primary_ndev
= dhd_linux_get_primary_netdev(dhdp
);
12757 if (wl_cfg80211_net_attach(primary_ndev
) < 0) {
12758 /* fail the init */
12759 dhd_remove_if(dhdp
, 0, TRUE
);
12762 #endif /* WL_CFG80211 */
12767 dhd_register_if(dhd_pub_t
*dhdp
, int ifidx
, bool need_rtnl_lock
)
12769 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
12771 struct net_device
*net
= NULL
;
12773 uint8 temp_addr
[ETHER_ADDR_LEN
] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12775 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
12777 if (dhd
== NULL
|| dhd
->iflist
[ifidx
] == NULL
) {
12778 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__
));
12782 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
12783 ifp
= dhd
->iflist
[ifidx
];
12785 ASSERT(net
&& (ifp
->idx
== ifidx
));
12787 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12788 ASSERT(!net
->open
);
12789 net
->get_stats
= dhd_get_stats
;
12790 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
12791 net
->do_ioctl
= dhd_ioctl_entry_wrapper
;
12792 net
->hard_start_xmit
= dhd_start_xmit_wrapper
;
12794 net
->do_ioctl
= dhd_ioctl_entry
;
12795 net
->hard_start_xmit
= dhd_start_xmit
;
12796 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
12798 net
->set_mac_address
= dhd_set_mac_address
;
12799 net
->set_multicast_list
= dhd_set_multicast_list
;
12800 net
->open
= net
->stop
= NULL
;
12802 ASSERT(!net
->netdev_ops
);
12803 net
->netdev_ops
= &dhd_ops_virt
;
12804 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12806 /* Ok, link into the network layer... */
12809 * device functions for the primary interface only
12811 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12812 net
->open
= dhd_pri_open
;
12813 net
->stop
= dhd_pri_stop
;
12815 net
->netdev_ops
= &dhd_ops_pri
;
12816 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12817 if (!ETHER_ISNULLADDR(dhd
->pub
.mac
.octet
))
12818 memcpy(temp_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
12821 * We have to use the primary MAC for virtual interfaces
12823 memcpy(temp_addr
, ifp
->mac_addr
, ETHER_ADDR_LEN
);
12825 * Android sets the locally administered bit to indicate that this is a
12826 * portable hotspot. This will not work in simultaneous AP/STA mode,
12827 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12829 if (!memcmp(temp_addr
, dhd
->iflist
[0]->mac_addr
,
12831 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12832 __func__
, net
->name
));
12833 temp_addr
[0] |= 0x02;
12837 net
->hard_header_len
= ETH_HLEN
+ dhd
->pub
.hdrlen
;
12838 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12839 net
->ethtool_ops
= &dhd_ethtool_ops
;
12840 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12842 #if defined(WL_WIRELESS_EXT)
12843 #if WIRELESS_EXT < 19
12844 net
->get_wireless_stats
= dhd_get_wireless_stats
;
12845 #endif /* WIRELESS_EXT < 19 */
12846 #if WIRELESS_EXT > 12
12847 net
->wireless_handlers
= &wl_iw_handler_def
;
12848 #endif /* WIRELESS_EXT > 12 */
12849 #endif /* defined(WL_WIRELESS_EXT) */
12851 dhd
->pub
.rxsz
= DBUS_RX_BUFFER_SIZE_DHD(net
);
12853 memcpy(net
->dev_addr
, temp_addr
, ETHER_ADDR_LEN
);
12856 printf("%s\n", dhd_version
);
12858 if (need_rtnl_lock
)
12859 err
= register_netdev(net
);
12861 err
= register_netdevice(net
);
12864 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net
->name
, err
));
12868 printf("Register interface [%s] MAC: "MACDBG
"\n\n", net
->name
,
12869 #if defined(CUSTOMER_HW4_DEBUG)
12870 MAC2STRDBG(dhd
->pub
.mac
.octet
));
12872 MAC2STRDBG(net
->dev_addr
));
12873 #endif /* CUSTOMER_HW4_DEBUG */
12875 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12876 wl_iw_iscan_set_scan_broadcast_prep(net
, 1);
12879 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12880 KERNEL_VERSION(2, 6, 27))))
12883 up(&dhd_registration_sem
);
12884 #endif /* BCMLXSDMMC */
12885 if (!dhd_download_fw_on_driverload
) {
12887 wl_terminate_event_handler(net
);
12888 #endif /* WL_CFG80211 */
12889 #if defined(DHD_LB_RXP)
12890 __skb_queue_purge(&dhd
->rx_pend_queue
);
12891 #endif /* DHD_LB_RXP */
12893 #if defined(DHD_LB_TXP)
12894 skb_queue_purge(&dhd
->tx_pend_queue
);
12895 #endif /* DHD_LB_TXP */
12897 #ifdef SHOW_LOGTRACE
12898 /* Release the skbs from queue for WLC_E_TRACE event */
12899 dhd_event_logtrace_flush_queue(dhdp
);
12900 #endif /* SHOW_LOGTRACE */
12902 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
12903 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
12904 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
12905 dhd_net_bus_devreset(net
, TRUE
);
12907 dhd_net_bus_suspend(net
);
12908 #endif /* BCMLXSDMMC */
12909 wifi_platform_set_power(dhdp
->info
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
12910 #if defined(BT_OVER_SDIO)
12911 dhd
->bus_user_count
--;
12912 #endif /* BT_OVER_SDIO */
12915 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12919 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12922 net
->netdev_ops
= NULL
;
12928 dhd_bus_detach(dhd_pub_t
*dhdp
)
12932 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
12935 dhd
= (dhd_info_t
*)dhdp
->info
;
12939 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12940 * calling stop again will cuase SD read/write errors.
12942 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
12943 /* Stop the protocol module */
12944 dhd_prot_stop(&dhd
->pub
);
12946 /* Stop the bus module */
12947 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
12950 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12951 dhd_bus_oob_intr_unregister(dhdp
);
12952 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12957 void dhd_detach(dhd_pub_t
*dhdp
)
12960 unsigned long flags
;
12961 int timer_valid
= FALSE
;
12962 struct net_device
*dev
;
12964 struct bcm_cfg80211
*cfg
= NULL
;
12969 dhd
= (dhd_info_t
*)dhdp
->info
;
12973 dev
= dhd
->iflist
[0]->net
;
12977 if (dev
->flags
& IFF_UP
) {
12978 /* If IFF_UP is still up, it indicates that
12979 * "ifconfig wlan0 down" hasn't been called.
12980 * So invoke dev_close explicitly here to
12981 * bring down the interface.
12983 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12989 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__
, dhd
->dhd_state
));
12992 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
12993 /* Give sufficient time for threads to start running in case
12994 * dhd_attach() has failed
12999 dhd_free_wet_info(&dhd
->pub
, dhd
->pub
.wet_info
);
13000 #endif /* DHD_WET */
13001 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
13002 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
13004 #ifdef PROP_TXSTATUS
13005 #ifdef DHD_WLFC_THREAD
13006 if (dhd
->pub
.wlfc_thread
) {
13007 kthread_stop(dhd
->pub
.wlfc_thread
);
13008 dhdp
->wlfc_thread_go
= TRUE
;
13009 wake_up_interruptible(&dhdp
->wlfc_wqhead
);
13011 dhd
->pub
.wlfc_thread
= NULL
;
13012 #endif /* DHD_WLFC_THREAD */
13013 #endif /* PROP_TXSTATUS */
13017 wl_cfg80211_down(dev
);
13018 #endif /* WL_CFG80211 */
13020 if (dhd
->dhd_state
& DHD_ATTACH_STATE_PROT_ATTACH
) {
13022 dhd_bus_detach(dhdp
);
13024 if (is_reboot
== SYS_RESTART
) {
13025 extern bcmdhd_wifi_platdata_t
*dhd_wifi_platdata
;
13026 if (dhd_wifi_platdata
&& !dhdp
->dongle_reset
) {
13027 dhdpcie_bus_clock_stop(dhdp
->bus
);
13028 wifi_platform_set_power(dhd_wifi_platdata
->adapters
,
13029 FALSE
, WIFI_TURNOFF_DELAY
);
13032 #endif /* BCMPCIE */
13033 #ifndef PCIE_FULL_DONGLE
13035 dhd_prot_detach(dhdp
);
13036 #endif /* !PCIE_FULL_DONGLE */
13039 #ifdef ARP_OFFLOAD_SUPPORT
13040 if (dhd_inetaddr_notifier_registered
) {
13041 dhd_inetaddr_notifier_registered
= FALSE
;
13042 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
13044 #endif /* ARP_OFFLOAD_SUPPORT */
13045 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
13046 if (dhd_inet6addr_notifier_registered
) {
13047 dhd_inet6addr_notifier_registered
= FALSE
;
13048 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
13050 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
13051 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
13052 if (dhd
->dhd_state
& DHD_ATTACH_STATE_EARLYSUSPEND_DONE
) {
13053 if (dhd
->early_suspend
.suspend
)
13054 unregister_early_suspend(&dhd
->early_suspend
);
13056 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
13058 #if defined(WL_WIRELESS_EXT)
13059 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WL_ATTACH
) {
13060 /* Detatch and unlink in the iw */
13063 #endif /* defined(WL_WIRELESS_EXT) */
13066 dhd_ulp_deinit(dhd
->pub
.osh
, dhdp
);
13067 #endif /* DHD_ULP */
13069 /* delete all interfaces, start with virtual */
13070 if (dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) {
13074 /* Cleanup virtual interfaces */
13075 dhd_net_if_lock_local(dhd
);
13076 for (i
= 1; i
< DHD_MAX_IFS
; i
++) {
13077 if (dhd
->iflist
[i
]) {
13078 dhd_remove_if(&dhd
->pub
, i
, TRUE
);
13081 dhd_net_if_unlock_local(dhd
);
13083 /* delete primary interface 0 */
13084 ifp
= dhd
->iflist
[0];
13085 if (ifp
&& ifp
->net
) {
13088 cfg
= wl_get_cfg(ifp
->net
);
13090 /* in unregister_netdev case, the interface gets freed by net->destructor
13091 * (which is set to free_netdev)
13093 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
13094 free_netdev(ifp
->net
);
13096 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
13097 defined(ARGOS_NOTIFY_CB)
13098 argos_register_notifier_deinit();
13099 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
13100 #ifdef SET_RPS_CPUS
13101 custom_rps_map_clear(ifp
->net
->_rx
);
13102 #endif /* SET_RPS_CPUS */
13103 netif_tx_disable(ifp
->net
);
13104 unregister_netdev(ifp
->net
);
13106 #ifdef PCIE_FULL_DONGLE
13107 ifp
->net
= DHD_NET_DEV_NULL
;
13110 #endif /* PCIE_FULL_DONGLE */
13112 #ifdef DHD_L2_FILTER
13113 bcm_l2_filter_arp_table_update(dhdp
->osh
, ifp
->phnd_arp_table
, TRUE
,
13114 NULL
, FALSE
, dhdp
->tickcnt
);
13115 deinit_l2_filter_arp_table(dhdp
->osh
, ifp
->phnd_arp_table
);
13116 ifp
->phnd_arp_table
= NULL
;
13117 #endif /* DHD_L2_FILTER */
13119 dhd_if_del_sta_list(ifp
);
13121 MFREE(dhd
->pub
.osh
, ifp
, sizeof(*ifp
));
13122 dhd
->iflist
[0] = NULL
;
13126 /* Clear the watchdog timer */
13127 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
13128 timer_valid
= dhd
->wd_timer_valid
;
13129 dhd
->wd_timer_valid
= FALSE
;
13130 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
13132 del_timer_sync(&dhd
->timer
);
13133 DHD_DISABLE_RUNTIME_PM(&dhd
->pub
);
13135 if (dhd
->dhd_state
& DHD_ATTACH_STATE_THREADS_CREATED
) {
13136 #ifdef DHD_PCIE_RUNTIMEPM
13137 if (dhd
->thr_rpm_ctl
.thr_pid
>= 0) {
13138 PROC_STOP(&dhd
->thr_rpm_ctl
);
13140 #endif /* DHD_PCIE_RUNTIMEPM */
13141 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
13142 PROC_STOP(&dhd
->thr_wdt_ctl
);
13145 if (dhd
->rxthread_enabled
&& dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
13146 PROC_STOP(&dhd
->thr_rxf_ctl
);
13149 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
13150 PROC_STOP(&dhd
->thr_dpc_ctl
);
13153 tasklet_kill(&dhd
->tasklet
);
13158 if (dhd
->pub
.nfct
) {
13159 dhd_ct_close(dhd
->pub
.nfct
);
13161 #endif /* WL_NATOE */
13164 if (dhd
->dhd_state
& DHD_ATTACH_STATE_LB_ATTACH_DONE
) {
13165 /* Clear the flag first to avoid calling the cpu notifier */
13166 dhd
->dhd_state
&= ~DHD_ATTACH_STATE_LB_ATTACH_DONE
;
13168 /* Kill the Load Balancing Tasklets */
13170 cancel_work_sync(&dhd
->rx_napi_dispatcher_work
);
13171 __skb_queue_purge(&dhd
->rx_pend_queue
);
13172 #endif /* DHD_LB_RXP */
13174 cancel_work_sync(&dhd
->tx_dispatcher_work
);
13175 tasklet_kill(&dhd
->tx_tasklet
);
13176 __skb_queue_purge(&dhd
->tx_pend_queue
);
13177 #endif /* DHD_LB_TXP */
13179 cancel_work_sync(&dhd
->tx_compl_dispatcher_work
);
13180 tasklet_kill(&dhd
->tx_compl_tasklet
);
13181 #endif /* DHD_LB_TXC */
13183 tasklet_kill(&dhd
->rx_compl_tasklet
);
13184 #endif /* DHD_LB_RXC */
13186 if (dhd
->cpu_notifier
.notifier_call
!= NULL
) {
13187 unregister_cpu_notifier(&dhd
->cpu_notifier
);
13189 dhd_cpumasks_deinit(dhd
);
13190 DHD_LB_STATS_DEINIT(&dhd
->pub
);
13192 #endif /* DHD_LB */
13194 DHD_SSSR_MEMPOOL_DEINIT(&dhd
->pub
);
13197 if (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
) {
13199 DHD_ERROR(("cfg NULL!\n"));
13202 wl_cfg80211_detach(cfg
);
13203 dhd_monitor_uninit();
13208 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13209 destroy_workqueue(dhd
->tx_wq
);
13211 destroy_workqueue(dhd
->rx_wq
);
13213 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13214 #ifdef DEBUGABILITY
13217 dhd_os_dbg_detach_pkt_monitor(dhdp
);
13218 dhd_os_spin_lock_deinit(dhd
->pub
.osh
, dhd
->pub
.dbg
->pkt_mon_lock
);
13219 #endif /* DBG_PKT_MON */
13220 dhd_os_dbg_detach(dhdp
);
13222 #endif /* DEBUGABILITY */
13223 #ifdef DHD_PKT_LOGGING
13224 dhd_os_detach_pktlog(dhdp
);
13225 #endif /* DHD_PKT_LOGGING */
13226 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13227 if (dhd
->pub
.hang_info
) {
13228 MFREE(dhd
->pub
.osh
, dhd
->pub
.hang_info
, VENDOR_SEND_HANG_EXT_INFO_LEN
);
13230 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13231 #ifdef SHOW_LOGTRACE
13232 /* Release the skbs from queue for WLC_E_TRACE event */
13233 dhd_event_logtrace_flush_queue(dhdp
);
13235 if (dhd
->dhd_state
& DHD_ATTACH_LOGTRACE_INIT
) {
13236 if (dhd
->event_data
.fmts
) {
13237 MFREE(dhd
->pub
.osh
, dhd
->event_data
.fmts
,
13238 dhd
->event_data
.fmts_size
);
13239 dhd
->event_data
.fmts
= NULL
;
13241 if (dhd
->event_data
.raw_fmts
) {
13242 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_fmts
,
13243 dhd
->event_data
.raw_fmts_size
);
13244 dhd
->event_data
.raw_fmts
= NULL
;
13246 if (dhd
->event_data
.raw_sstr
) {
13247 MFREE(dhd
->pub
.osh
, dhd
->event_data
.raw_sstr
,
13248 dhd
->event_data
.raw_sstr_size
);
13249 dhd
->event_data
.raw_sstr
= NULL
;
13251 if (dhd
->event_data
.rom_raw_sstr
) {
13252 MFREE(dhd
->pub
.osh
, dhd
->event_data
.rom_raw_sstr
,
13253 dhd
->event_data
.rom_raw_sstr_size
);
13254 dhd
->event_data
.rom_raw_sstr
= NULL
;
13256 dhd
->dhd_state
&= ~DHD_ATTACH_LOGTRACE_INIT
;
13258 #endif /* SHOW_LOGTRACE */
13260 if (dhdp
->pno_state
)
13261 dhd_pno_deinit(dhdp
);
13264 if (dhdp
->rtt_state
) {
13265 dhd_rtt_deinit(dhdp
);
13268 #if defined(CONFIG_PM_SLEEP)
13269 if (dhd_pm_notifier_registered
) {
13270 unregister_pm_notifier(&dhd
->pm_notifier
);
13271 dhd_pm_notifier_registered
= FALSE
;
13273 #endif /* CONFIG_PM_SLEEP */
13275 #ifdef DEBUG_CPU_FREQ
13277 free_percpu(dhd
->new_freq
);
13278 dhd
->new_freq
= NULL
;
13279 cpufreq_unregister_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
13281 #ifdef CONFIG_HAS_WAKELOCK
13282 dhd
->wakelock_wd_counter
= 0;
13283 wake_lock_destroy(&dhd
->wl_wdwake
);
13284 #endif /* CONFIG_HAS_WAKELOCK */
13285 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) {
13286 DHD_TRACE(("wd wakelock count:%d\n", dhd
->wakelock_wd_counter
));
13287 DHD_OS_WAKE_LOCK_DESTROY(dhd
);
13290 #ifdef ARGOS_CPU_SCHEDULER
13291 if (dhd
->pub
.affinity_isdpc
== TRUE
) {
13292 free_cpumask_var(dhd
->pub
.default_cpu_mask
);
13293 free_cpumask_var(dhd
->pub
.dpc_affinity_cpu_mask
);
13294 dhd
->pub
.affinity_isdpc
= FALSE
;
13296 #endif /* ARGOS_CPU_SCHEDULER */
13298 #ifdef DHDTCPACK_SUPPRESS
13299 /* This will free all MEM allocated for TCPACK SUPPRESS */
13300 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
13301 #endif /* DHDTCPACK_SUPPRESS */
13303 #ifdef PCIE_FULL_DONGLE
13304 dhd_flow_rings_deinit(dhdp
);
13306 dhd_prot_detach(dhdp
);
13309 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13310 dhd_free_tdls_peer_list(dhdp
);
13313 #ifdef DUMP_IOCTL_IOV_LIST
13314 dhd_iov_li_delete(dhdp
, &(dhdp
->dump_iovlist_head
));
13315 #endif /* DUMP_IOCTL_IOV_LIST */
13317 /* memory waste feature list initilization */
13318 dhd_mw_list_delete(dhdp
, &(dhdp
->mw_list_head
));
13319 #endif /* DHD_DEBUG */
13321 dhd_del_monitor_if(dhd
);
13322 #endif /* WL_MONITOR */
13325 if (dhdp
->enable_erpom
) {
13326 dhdp
->pom_func_deregister(&dhdp
->pom_wlan_handler
);
13328 #endif /* DHD_ERPOM */
13330 cancel_work_sync(&dhd
->dhd_hang_process_work
);
13332 /* Prefer adding de-init code above this comment unless necessary.
13333 * The idea is to cancel work queue, sysfs and flags at the end.
13335 dhd_deferred_work_deinit(dhd
->dhd_deferred_wq
);
13336 dhd
->dhd_deferred_wq
= NULL
;
13338 /* log dump related buffers should be freed after wq is purged */
13339 #ifdef DHD_LOG_DUMP
13340 dhd_log_dump_deinit(&dhd
->pub
);
13341 #endif /* DHD_LOG_DUMP */
13342 #if defined(BCMPCIE)
13343 if (dhdp
->extended_trap_data
)
13345 MFREE(dhdp
->osh
, dhdp
->extended_trap_data
, BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
13346 dhdp
->extended_trap_data
= NULL
;
13348 #endif /* BCMPCIE */
13350 #ifdef SHOW_LOGTRACE
13351 /* Wait till event_log_dispatcher_work finishes */
13352 cancel_delayed_work_sync(&dhd
->event_log_dispatcher_work
);
13353 mutex_lock(&dhd
->pub
.dhd_trace_lock
);
13354 remove_proc_entry("dhd_trace", NULL
);
13355 mutex_unlock(&dhd
->pub
.dhd_trace_lock
);
13356 #endif /* SHOW_LOGTRACE */
13358 #ifdef DHD_DUMP_MNGR
13359 if (dhd
->pub
.dump_file_manage
) {
13360 MFREE(dhd
->pub
.osh
, dhd
->pub
.dump_file_manage
,
13361 sizeof(dhd_dump_file_manage_t
));
13363 #endif /* DHD_DUMP_MNGR */
13364 dhd_sysfs_exit(dhd
);
13365 dhd
->pub
.fw_download_done
= FALSE
;
13367 #if defined(BT_OVER_SDIO)
13368 mutex_destroy(&dhd
->bus_user_lock
);
13369 #endif /* BT_OVER_SDIO */
13374 dhd_free(dhd_pub_t
*dhdp
)
13377 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13381 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
13382 if (dhdp
->reorder_bufs
[i
]) {
13383 reorder_info_t
*ptr
;
13384 uint32 buf_size
= sizeof(struct reorder_info
);
13386 ptr
= dhdp
->reorder_bufs
[i
];
13388 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
13389 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13390 i
, ptr
->max_idx
, buf_size
));
13392 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
13393 dhdp
->reorder_bufs
[i
] = NULL
;
13397 dhd_sta_pool_fini(dhdp
, DHD_MAX_STA
);
13399 dhd
= (dhd_info_t
*)dhdp
->info
;
13400 if (dhdp
->soc_ram
) {
13401 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13402 DHD_OS_PREFREE(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13404 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13405 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13406 dhdp
->soc_ram
= NULL
;
13410 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13411 if (dhd
!= (dhd_info_t
*)dhd_os_prealloc(dhdp
,
13412 DHD_PREALLOC_DHD_INFO
, 0, FALSE
))
13413 MFREE(dhd
->pub
.osh
, dhd
, sizeof(*dhd
));
13420 dhd_clear(dhd_pub_t
*dhdp
)
13422 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13426 #ifdef DHDTCPACK_SUPPRESS
13427 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13428 dhd_tcpack_info_tbl_clean(dhdp
);
13429 #endif /* DHDTCPACK_SUPPRESS */
13430 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
13431 if (dhdp
->reorder_bufs
[i
]) {
13432 reorder_info_t
*ptr
;
13433 uint32 buf_size
= sizeof(struct reorder_info
);
13435 ptr
= dhdp
->reorder_bufs
[i
];
13437 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
13438 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13439 i
, ptr
->max_idx
, buf_size
));
13441 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
13442 dhdp
->reorder_bufs
[i
] = NULL
;
13446 dhd_sta_pool_clear(dhdp
, DHD_MAX_STA
);
13448 if (dhdp
->soc_ram
) {
13449 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13450 DHD_OS_PREFREE(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13452 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
13453 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13454 dhdp
->soc_ram
= NULL
;
13460 dhd_module_cleanup(void)
13462 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13464 dhd_bus_unregister();
13468 dhd_wifi_platform_unregister_drv();
13472 dhd_module_exit(void)
13474 atomic_set(&exit_in_progress
, 1);
13475 dhd_module_cleanup();
13476 unregister_reboot_notifier(&dhd_reboot_notifier
);
13477 dhd_destroy_to_notifier_skt();
13481 dhd_module_init(void)
13484 int retry
= POWERUP_MAX_RETRY
;
13486 DHD_ERROR(("%s in\n", __FUNCTION__
));
13488 DHD_PERIM_RADIO_INIT();
13490 if (firmware_path
[0] != '\0') {
13491 strncpy(fw_bak_path
, firmware_path
, MOD_PARAM_PATHLEN
);
13492 fw_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13495 if (nvram_path
[0] != '\0') {
13496 strncpy(nv_bak_path
, nvram_path
, MOD_PARAM_PATHLEN
);
13497 nv_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13501 err
= dhd_wifi_platform_register_drv();
13503 register_reboot_notifier(&dhd_reboot_notifier
);
13506 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13507 __FUNCTION__
, retry
));
13508 strncpy(firmware_path
, fw_bak_path
, MOD_PARAM_PATHLEN
);
13509 firmware_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13510 strncpy(nvram_path
, nv_bak_path
, MOD_PARAM_PATHLEN
);
13511 nvram_path
[MOD_PARAM_PATHLEN
-1] = '\0';
13515 dhd_create_to_notifier_skt();
13518 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__
));
13520 if (!dhd_download_fw_on_driverload
) {
13521 dhd_driver_init_done
= TRUE
;
13525 DHD_ERROR(("%s out\n", __FUNCTION__
));
13531 dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
)
13533 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__
, code
));
13534 if (code
== SYS_RESTART
) {
13537 #endif /* BCMPCIE */
13539 return NOTIFY_DONE
;
13542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13543 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13544 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13545 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
13546 defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
13547 defined(CONFIG_ARCH_SDM845)
13548 deferred_module_init_sync(dhd_module_init
);
13550 deferred_module_init(dhd_module_init
);
13551 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13552 * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
13553 * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845
13555 #elif defined(USE_LATE_INITCALL_SYNC)
13556 late_initcall_sync(dhd_module_init
);
13558 late_initcall(dhd_module_init
);
13559 #endif /* USE_LATE_INITCALL_SYNC */
13561 module_init(dhd_module_init
);
13562 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13564 module_exit(dhd_module_exit
);
13567 * OS specific functions required to implement DHD driver in OS independent way
13570 dhd_os_proto_block(dhd_pub_t
*pub
)
13572 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13575 DHD_PERIM_UNLOCK(pub
);
13577 down(&dhd
->proto_sem
);
13579 DHD_PERIM_LOCK(pub
);
13587 dhd_os_proto_unblock(dhd_pub_t
*pub
)
13589 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13592 up(&dhd
->proto_sem
);
13600 dhd_os_dhdiovar_lock(dhd_pub_t
*pub
)
13602 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13605 mutex_lock(&dhd
->dhd_iovar_mutex
);
13610 dhd_os_dhdiovar_unlock(dhd_pub_t
*pub
)
13612 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13615 mutex_unlock(&dhd
->dhd_iovar_mutex
);
13620 dhd_os_logdump_lock(dhd_pub_t
*pub
)
13622 dhd_info_t
*dhd
= NULL
;
13627 dhd
= (dhd_info_t
*)(pub
->info
);
13630 mutex_lock(&dhd
->logdump_lock
);
13635 dhd_os_logdump_unlock(dhd_pub_t
*pub
)
13637 dhd_info_t
*dhd
= NULL
;
13642 dhd
= (dhd_info_t
*)(pub
->info
);
13645 mutex_unlock(&dhd
->logdump_lock
);
13650 dhd_os_dbgring_lock(void *lock
)
13655 mutex_lock((struct mutex
*)lock
);
13661 dhd_os_dbgring_unlock(void *lock
, unsigned long flags
)
13663 BCM_REFERENCE(flags
);
13668 mutex_unlock((struct mutex
*)lock
);
13672 dhd_os_get_ioctl_resp_timeout(void)
13674 return ((unsigned int)dhd_ioctl_timeout_msec
);
13678 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec
)
13680 dhd_ioctl_timeout_msec
= (int)timeout_msec
;
13684 dhd_os_ioctl_resp_wait(dhd_pub_t
*pub
, uint
*condition
)
13686 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13689 /* Convert timeout in millsecond to jiffies */
13690 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13691 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
13693 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
13696 DHD_PERIM_UNLOCK(pub
);
13698 timeout
= wait_event_timeout(dhd
->ioctl_resp_wait
, (*condition
), timeout
);
13700 DHD_PERIM_LOCK(pub
);
13706 dhd_os_ioctl_resp_wake(dhd_pub_t
*pub
)
13708 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13710 wake_up(&dhd
->ioctl_resp_wait
);
13715 dhd_os_d3ack_wait(dhd_pub_t
*pub
, uint
*condition
)
13717 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13720 /* Convert timeout in millsecond to jiffies */
13721 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13722 timeout
= msecs_to_jiffies(D3_ACK_RESP_TIMEOUT
);
13724 timeout
= D3_ACK_RESP_TIMEOUT
* HZ
/ 1000;
13727 DHD_PERIM_UNLOCK(pub
);
13729 timeout
= wait_event_timeout(dhd
->d3ack_wait
, (*condition
), timeout
);
13731 DHD_PERIM_LOCK(pub
);
13737 dhd_os_d3ack_wake(dhd_pub_t
*pub
)
13739 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13741 wake_up(&dhd
->d3ack_wait
);
13746 dhd_os_busbusy_wait_negation(dhd_pub_t
*pub
, uint
*condition
)
13748 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13751 /* Wait for bus usage contexts to gracefully exit within some timeout value
13752 * Set time out to little higher than dhd_ioctl_timeout_msec,
13753 * so that IOCTL timeout should not get affected.
13755 /* Convert timeout in millsecond to jiffies */
13756 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13757 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13759 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13762 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
, !(*condition
), timeout
);
13768 * Wait until the condition *var == condition is met.
13769 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13770 * Returns 1 if the @condition evaluated to true
13773 dhd_os_busbusy_wait_condition(dhd_pub_t
*pub
, uint
*var
, uint condition
)
13775 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13778 /* Convert timeout in millsecond to jiffies */
13779 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13780 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13782 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13785 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
, (*var
== condition
), timeout
);
13791 * Wait until the '(*var & bitmask) == condition' is met.
13792 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13793 * Returns 1 if the @condition evaluated to true
13796 dhd_os_busbusy_wait_bitmask(dhd_pub_t
*pub
, uint
*var
,
13797 uint bitmask
, uint condition
)
13799 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13802 /* Convert timeout in millsecond to jiffies */
13803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13804 timeout
= msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT
);
13806 timeout
= DHD_BUS_BUSY_TIMEOUT
* HZ
/ 1000;
13809 timeout
= wait_event_timeout(dhd
->dhd_bus_busy_state_wait
,
13810 ((*var
& bitmask
) == condition
), timeout
);
13816 dhd_os_dmaxfer_wait(dhd_pub_t
*pub
, uint
*condition
)
13819 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
13821 DHD_PERIM_UNLOCK(pub
);
13822 ret
= wait_event_interruptible(dhd
->dmaxfer_wait
, (*condition
));
13823 DHD_PERIM_LOCK(pub
);
13830 dhd_os_dmaxfer_wake(dhd_pub_t
*pub
)
13832 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13834 wake_up(&dhd
->dmaxfer_wait
);
13839 dhd_os_tx_completion_wake(dhd_pub_t
*dhd
)
13841 /* Call wmb() to make sure before waking up the other event value gets updated */
13843 wake_up(&dhd
->tx_completion_wait
);
13846 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13847 /* Fix compilation error for FC11 */
13851 dhd_os_busbusy_wake(dhd_pub_t
*pub
)
13853 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
13854 /* Call wmb() to make sure before waking up the other event value gets updated */
13856 wake_up(&dhd
->dhd_bus_busy_state_wait
);
13861 dhd_os_wd_timer_extend(void *bus
, bool extend
)
13863 dhd_pub_t
*pub
= bus
;
13864 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13867 dhd_os_wd_timer(bus
, WATCHDOG_EXTEND_INTERVAL
);
13869 dhd_os_wd_timer(bus
, dhd
->default_wd_interval
);
13873 dhd_os_wd_timer(void *bus
, uint wdtick
)
13875 dhd_pub_t
*pub
= bus
;
13876 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13877 unsigned long flags
;
13879 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13882 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
13886 DHD_GENERAL_LOCK(pub
, flags
);
13888 /* don't start the wd until fw is loaded */
13889 if (pub
->busstate
== DHD_BUS_DOWN
) {
13890 DHD_GENERAL_UNLOCK(pub
, flags
);
13893 DHD_OS_WD_WAKE_UNLOCK(pub
);
13895 #endif /* BCMSDIO */
13899 /* Totally stop the timer */
13900 if (!wdtick
&& dhd
->wd_timer_valid
== TRUE
) {
13901 dhd
->wd_timer_valid
= FALSE
;
13902 DHD_GENERAL_UNLOCK(pub
, flags
);
13903 del_timer_sync(&dhd
->timer
);
13905 DHD_OS_WD_WAKE_UNLOCK(pub
);
13906 #endif /* BCMSDIO */
13912 DHD_OS_WD_WAKE_LOCK(pub
);
13913 dhd_watchdog_ms
= (uint
)wdtick
;
13914 #endif /* BCMSDIO */
13915 /* Re arm the timer, at last watchdog period */
13916 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
13917 dhd
->wd_timer_valid
= TRUE
;
13919 DHD_GENERAL_UNLOCK(pub
, flags
);
13922 #ifdef DHD_PCIE_RUNTIMEPM
13924 dhd_os_runtimepm_timer(void *bus
, uint tick
)
13926 dhd_pub_t
*pub
= bus
;
13927 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
13928 unsigned long flags
;
13930 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
13933 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
13937 DHD_GENERAL_LOCK(pub
, flags
);
13939 /* don't start the RPM until fw is loaded */
13940 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub
)) {
13941 DHD_GENERAL_UNLOCK(pub
, flags
);
13945 /* If tick is non-zero, the request is to start the timer */
13947 /* Start the timer only if its not already running */
13948 if (dhd
->rpm_timer_valid
== FALSE
) {
13949 mod_timer(&dhd
->rpm_timer
, jiffies
+ msecs_to_jiffies(dhd_runtimepm_ms
));
13950 dhd
->rpm_timer_valid
= TRUE
;
13953 /* tick is zero, we have to stop the timer */
13954 /* Stop the timer only if its running, otherwise we don't have to do anything */
13955 if (dhd
->rpm_timer_valid
== TRUE
) {
13956 dhd
->rpm_timer_valid
= FALSE
;
13957 DHD_GENERAL_UNLOCK(pub
, flags
);
13958 del_timer_sync(&dhd
->rpm_timer
);
13959 /* we have already released the lock, so just go to exit */
13964 DHD_GENERAL_UNLOCK(pub
, flags
);
13970 #endif /* DHD_PCIE_RUNTIMEPM */
13973 dhd_os_open_image1(dhd_pub_t
*pub
, char *filename
)
13978 fp
= filp_open(filename
, O_RDONLY
, 0);
13980 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13982 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13990 if (!S_ISREG(file_inode(fp
)->i_mode
)) {
13991 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__
, filename
));
13996 size
= i_size_read(file_inode(fp
));
13998 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__
, filename
, size
));
14003 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__
, filename
, size
));
14010 dhd_os_get_image_block(char *buf
, int len
, void *image
)
14012 struct file
*fp
= (struct file
*)image
;
14020 size
= i_size_read(file_inode(fp
));
14021 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, MIN(len
, size
));
14023 if (len
>= size
&& size
!= rdlen
) {
14028 fp
->f_pos
+= rdlen
;
14034 #if defined(BT_OVER_SDIO)
14036 dhd_os_gets_image(dhd_pub_t
*pub
, char *str
, int len
, void *image
)
14038 struct file
*fp
= (struct file
*)image
;
14041 char *str_end
= NULL
;
14046 rd_len
= kernel_read(fp
, fp
->f_pos
, str
, len
);
14047 str_end
= strnchr(str
, len
, '\n');
14048 if (str_end
== NULL
) {
14051 str_len
= (uint
)(str_end
- str
);
14053 /* Advance file pointer past the string length */
14054 fp
->f_pos
+= str_len
+ 1;
14055 bzero(str_end
, rd_len
- str_len
);
14060 #endif /* defined (BT_OVER_SDIO) */
14063 dhd_os_get_image_size(void *image
)
14065 struct file
*fp
= (struct file
*)image
;
14071 size
= i_size_read(file_inode(fp
));
14077 dhd_os_close_image1(dhd_pub_t
*pub
, void *image
)
14080 filp_close((struct file
*)image
, NULL
);
14085 dhd_os_sdlock(dhd_pub_t
*pub
)
14089 dhd
= (dhd_info_t
*)(pub
->info
);
14091 if (dhd_dpc_prio
>= 0)
14094 spin_lock_bh(&dhd
->sdlock
);
14098 dhd_os_sdunlock(dhd_pub_t
*pub
)
14102 dhd
= (dhd_info_t
*)(pub
->info
);
14104 if (dhd_dpc_prio
>= 0)
14107 spin_unlock_bh(&dhd
->sdlock
);
14111 dhd_os_sdlock_txq(dhd_pub_t
*pub
)
14115 dhd
= (dhd_info_t
*)(pub
->info
);
14116 spin_lock_bh(&dhd
->txqlock
);
14120 dhd_os_sdunlock_txq(dhd_pub_t
*pub
)
14124 dhd
= (dhd_info_t
*)(pub
->info
);
14125 spin_unlock_bh(&dhd
->txqlock
);
14129 dhd_os_sdlock_rxq(dhd_pub_t
*pub
)
14134 dhd_os_sdunlock_rxq(dhd_pub_t
*pub
)
14139 dhd_os_rxflock(dhd_pub_t
*pub
)
14143 dhd
= (dhd_info_t
*)(pub
->info
);
14144 spin_lock_bh(&dhd
->rxf_lock
);
14149 dhd_os_rxfunlock(dhd_pub_t
*pub
)
14153 dhd
= (dhd_info_t
*)(pub
->info
);
14154 spin_unlock_bh(&dhd
->rxf_lock
);
14157 #ifdef DHDTCPACK_SUPPRESS
14159 dhd_os_tcpacklock(dhd_pub_t
*pub
)
14162 unsigned long flags
= 0;
14164 dhd
= (dhd_info_t
*)(pub
->info
);
14168 spin_lock_bh(&dhd
->tcpack_lock
);
14170 spin_lock_irqsave(&dhd
->tcpack_lock
, flags
);
14171 #endif /* BCMSDIO */
14178 dhd_os_tcpackunlock(dhd_pub_t
*pub
, unsigned long flags
)
14183 BCM_REFERENCE(flags
);
14184 #endif /* BCMSDIO */
14186 dhd
= (dhd_info_t
*)(pub
->info
);
14190 spin_unlock_bh(&dhd
->tcpack_lock
);
14192 spin_unlock_irqrestore(&dhd
->tcpack_lock
, flags
);
14193 #endif /* BCMSDIO */
14196 #endif /* DHDTCPACK_SUPPRESS */
14198 uint8
* dhd_os_prealloc(dhd_pub_t
*dhdpub
, int section
, uint size
, bool kmalloc_if_fail
)
14201 gfp_t flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
14203 buf
= (uint8
*)wifi_platform_prealloc(dhdpub
->info
->adapter
, section
, size
);
14204 if (buf
== NULL
&& kmalloc_if_fail
)
14205 buf
= kmalloc(size
, flags
);
14210 void dhd_os_prefree(dhd_pub_t
*dhdpub
, void *addr
, uint size
)
14214 #if defined(WL_WIRELESS_EXT)
14215 struct iw_statistics
*
14216 dhd_get_wireless_stats(struct net_device
*dev
)
14219 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14221 if (!dhd
->pub
.up
) {
14225 res
= wl_iw_get_wireless_stats(dev
, &dhd
->iw
.wstats
);
14228 return &dhd
->iw
.wstats
;
14232 #endif /* defined(WL_WIRELESS_EXT) */
14235 dhd_wl_host_event(dhd_info_t
*dhd
, int ifidx
, void *pktdata
, uint16 pktlen
,
14236 wl_event_msg_t
*event
, void **data
)
14240 unsigned long flags
= 0;
14241 #ifdef DYNAMIC_MUMIMO_CONTROL
14242 static uint32 reassoc_err
= 0;
14243 #endif /* DYNAMIC_MUMIMO_CONTROL */
14244 #endif /* WL_CFG80211 */
14245 ASSERT(dhd
!= NULL
);
14247 #ifdef SHOW_LOGTRACE
14248 bcmerror
= wl_process_host_event(&dhd
->pub
, &ifidx
, pktdata
, pktlen
, event
, data
,
14251 bcmerror
= wl_process_host_event(&dhd
->pub
, &ifidx
, pktdata
, pktlen
, event
, data
,
14253 #endif /* SHOW_LOGTRACE */
14254 if (unlikely(bcmerror
!= BCME_OK
)) {
14258 if (ntoh32(event
->event_type
) == WLC_E_IF
) {
14259 /* WLC_E_IF event types are consumed by wl_process_host_event.
14260 * For ifadd/del ops, the netdev ptr may not be valid at this
14261 * point. so return before invoking cfg80211/wext handlers.
14266 #if defined(WL_WIRELESS_EXT)
14267 if (event
->bsscfgidx
== 0) {
14269 * Wireless ext is on primary interface only
14271 ASSERT(dhd
->iflist
[ifidx
] != NULL
);
14272 ASSERT(dhd
->iflist
[ifidx
]->net
!= NULL
);
14274 if (dhd
->iflist
[ifidx
]->net
) {
14275 wl_iw_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14278 #endif /* defined(WL_WIRELESS_EXT) */
14281 if (dhd
->iflist
[ifidx
]->net
) {
14282 spin_lock_irqsave(&dhd
->pub
.up_lock
, flags
);
14284 wl_cfg80211_event(dhd
->iflist
[ifidx
]->net
, event
, *data
);
14286 spin_unlock_irqrestore(&dhd
->pub
.up_lock
, flags
);
14288 #ifdef DYNAMIC_MUMIMO_CONTROL
14289 #define REASSOC_ERROR_RETRY_LIMIT 1
14290 if (dhd
->pub
.reassoc_mumimo_sw
) {
14291 uint event_type
= ntoh32(event
->event_type
);
14292 uint status
= ntoh32(event
->status
);
14294 if (event_type
== WLC_E_REASSOC
) {
14295 if (status
== WLC_E_STATUS_SUCCESS
) {
14301 if (reassoc_err
> REASSOC_ERROR_RETRY_LIMIT
) {
14302 dhd
->pub
.reassoc_mumimo_sw
= FALSE
;
14303 dhd
->pub
.murx_block_eapol
= FALSE
;
14304 DHD_ENABLE_RUNTIME_PM(&dhd
->pub
);
14305 dhd_txflowcontrol(&dhd
->pub
, ALL_INTERFACES
, OFF
);
14309 #undef REASSOC_ERROR_RETRY_LIMIT
14310 #endif /* DYNAMIC_MUMIMO_CONTROL */
14311 #endif /* defined(WL_CFG80211) */
14316 /* send up locally generated event */
14318 dhd_sendup_event(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
, void *data
)
14320 switch (ntoh32(event
->event_type
)) {
14321 /* Handle error case or further events here */
14327 #ifdef LOG_INTO_TCPDUMP
14329 dhd_sendup_log(dhd_pub_t
*dhdp
, void *data
, int data_len
)
14331 struct sk_buff
*p
, *skb
;
14338 struct ether_header eth
;
14340 pktlen
= sizeof(eth
) + data_len
;
14343 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
14344 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
14346 bcopy(&dhdp
->mac
, ð
.ether_dhost
, ETHER_ADDR_LEN
);
14347 bcopy(&dhdp
->mac
, ð
.ether_shost
, ETHER_ADDR_LEN
);
14348 ETHER_TOGGLE_LOCALADDR(ð
.ether_shost
);
14349 eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
14351 bcopy((void *)ð
, PKTDATA(dhdp
->osh
, p
), sizeof(eth
));
14352 bcopy(data
, PKTDATA(dhdp
->osh
, p
) + sizeof(eth
), data_len
);
14353 skb
= PKTTONATIVE(dhdp
->osh
, p
);
14354 skb_data
= skb
->data
;
14357 ifidx
= dhd_ifname2idx(dhd
, "wlan0");
14358 ifp
= dhd
->iflist
[ifidx
];
14360 ifp
= dhd
->iflist
[0];
14363 skb
->dev
= ifp
->net
;
14364 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
14365 skb
->data
= skb_data
;
14368 /* Strip header, count, deliver upward */
14369 skb_pull(skb
, ETH_HLEN
);
14371 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
14372 __FUNCTION__
, __LINE__
);
14373 /* Send the packet */
14374 if (in_interrupt()) {
14380 /* Could not allocate a sk_buf */
14381 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__
));
14384 #endif /* LOG_INTO_TCPDUMP */
14386 void dhd_wait_for_event(dhd_pub_t
*dhd
, bool *lockvar
)
14388 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14389 struct dhd_info
*dhdinfo
= dhd
->info
;
14391 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14392 int timeout
= msecs_to_jiffies(IOCTL_RESP_TIMEOUT
);
14394 int timeout
= (IOCTL_RESP_TIMEOUT
/ 1000) * HZ
;
14395 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14397 dhd_os_sdunlock(dhd
);
14398 wait_event_timeout(dhdinfo
->ctrl_wait
, (*lockvar
== FALSE
), timeout
);
14399 dhd_os_sdlock(dhd
);
14400 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14402 } /* dhd_init_static_strs_array */
14404 void dhd_wait_event_wakeup(dhd_pub_t
*dhd
)
14406 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14407 struct dhd_info
*dhdinfo
= dhd
->info
;
14408 if (waitqueue_active(&dhdinfo
->ctrl_wait
))
14409 wake_up(&dhdinfo
->ctrl_wait
);
14414 #if defined(BCMSDIO) || defined(BCMPCIE)
14416 dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
)
14420 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14422 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14423 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd
->pub
.bus
)) < 0)
14425 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14427 if (flag
== TRUE
) {
14428 /* Issue wl down command before resetting the chip */
14429 if (dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
14430 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__
));
14432 #ifdef PROP_TXSTATUS
14433 if (dhd
->pub
.wlfc_enabled
) {
14434 dhd_wlfc_deinit(&dhd
->pub
);
14436 #endif /* PROP_TXSTATUS */
14438 if (dhd
->pub
.pno_state
) {
14439 dhd_pno_deinit(&dhd
->pub
);
14443 if (dhd
->pub
.rtt_state
) {
14444 dhd_rtt_deinit(&dhd
->pub
);
14446 #endif /* RTT_SUPPORT */
14448 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14449 dhd_os_dbg_detach_pkt_monitor(&dhd
->pub
);
14450 #endif /* DBG_PKT_MON */
14455 dhd_update_fw_nv_path(dhd
);
14456 /* update firmware and nvram path to sdio bus */
14457 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
14458 dhd
->fw_path
, dhd
->nv_path
);
14460 #endif /* BCMSDIO */
14462 ret
= dhd_bus_devreset(&dhd
->pub
, flag
);
14464 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14465 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd
->pub
.bus
));
14466 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd
->pub
.bus
));
14467 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14470 /* Clear some flags for recovery logic */
14471 dhd
->pub
.dongle_trap_occured
= 0;
14472 dhd
->pub
.iovar_timeout_occured
= 0;
14473 #ifdef PCIE_FULL_DONGLE
14474 dhd
->pub
.d3ack_timeout_occured
= 0;
14475 #endif /* PCIE_FULL_DONGLE */
14476 #ifdef DHD_MAP_LOGGING
14477 dhd
->pub
.smmu_fault_occurred
= 0;
14478 #endif /* DHD_MAP_LOGGING */
14482 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__
, ret
));
14490 dhd_net_bus_suspend(struct net_device
*dev
)
14492 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14493 return dhd_bus_suspend(&dhd
->pub
);
14497 dhd_net_bus_resume(struct net_device
*dev
, uint8 stage
)
14499 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14500 return dhd_bus_resume(&dhd
->pub
, stage
);
14503 #endif /* BCMSDIO */
14504 #endif /* BCMSDIO || BCMPCIE */
14506 int net_os_set_suspend_disable(struct net_device
*dev
, int val
)
14508 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14512 ret
= dhd
->pub
.suspend_disable_flag
;
14513 dhd
->pub
.suspend_disable_flag
= val
;
14518 int net_os_set_suspend(struct net_device
*dev
, int val
, int force
)
14521 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14524 #ifdef CONFIG_MACH_UNIVERSAL7420
14525 #if defined(ARGOS_RPS_CPU_CTL) && defined(DHD_LB_RXP)
14527 /* Force to set rps_cpus to specific CPU core */
14528 dhd_rps_cpus_enable(dev
, TRUE
);
14530 #endif /* ARGOS_RPS_CPU_CTL && DHD_LB_RXP */
14531 #endif /* CONFIG_MACH_UNIVERSAL7420 */
14532 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14533 ret
= dhd_set_suspend(val
, &dhd
->pub
);
14535 ret
= dhd_suspend_resume_helper(dhd
, val
, force
);
14538 wl_cfg80211_update_power_mode(dev
);
14544 int net_os_set_suspend_bcn_li_dtim(struct net_device
*dev
, int val
)
14546 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14549 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
14550 __FUNCTION__
, val
));
14551 dhd
->pub
.suspend_bcn_li_dtim
= val
;
14557 int net_os_set_max_dtim_enable(struct net_device
*dev
, int val
)
14559 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14562 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14563 __FUNCTION__
, (val
? "Enable" : "Disable")));
14565 dhd
->pub
.max_dtim_enable
= TRUE
;
14567 dhd
->pub
.max_dtim_enable
= FALSE
;
14576 #ifdef PKT_FILTER_SUPPORT
14577 int net_os_rxfilter_add_remove(struct net_device
*dev
, int add_remove
, int num
)
14581 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14582 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14584 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__
, add_remove
, num
));
14585 if (!dhd
|| (num
== DHD_UNICAST_FILTER_NUM
)) {
14589 #ifdef BLOCK_IPV6_PACKET
14590 /* customer want to use NO IPV6 packets only */
14591 if (num
== DHD_MULTICAST6_FILTER_NUM
) {
14594 #endif /* BLOCK_IPV6_PACKET */
14596 if (num
>= dhd
->pub
.pktfilter_count
) {
14600 ret
= dhd_packet_filter_add_remove(&dhd
->pub
, add_remove
, num
);
14601 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14606 int dhd_os_enable_packet_filter(dhd_pub_t
*dhdp
, int val
)
14611 /* Packet filtering is set only if we still in early-suspend and
14612 * we need either to turn it ON or turn it OFF
14613 * We can always turn it OFF in case of early-suspend, but we turn it
14614 * back ON only if suspend_disable_flag was not set
14616 if (dhdp
&& dhdp
->up
) {
14617 if (dhdp
->in_suspend
) {
14618 if (!val
|| (val
&& !dhdp
->suspend_disable_flag
))
14619 dhd_enable_packet_filter(val
, dhdp
);
14625 /* function to enable/disable packet for Network device */
14626 int net_os_enable_packet_filter(struct net_device
*dev
, int val
)
14628 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14630 DHD_ERROR(("%s: val = %d\n", __FUNCTION__
, val
));
14631 return dhd_os_enable_packet_filter(&dhd
->pub
, val
);
14633 #endif /* PKT_FILTER_SUPPORT */
14636 dhd_dev_init_ioctl(struct net_device
*dev
)
14638 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14641 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0)
14649 dhd_dev_get_feature_set(struct net_device
*dev
)
14651 dhd_info_t
*ptr
= *(dhd_info_t
**)netdev_priv(dev
);
14652 dhd_pub_t
*dhd
= (&ptr
->pub
);
14653 int feature_set
= 0;
14655 if (FW_SUPPORTED(dhd
, sta
))
14656 feature_set
|= WIFI_FEATURE_INFRA
;
14657 if (FW_SUPPORTED(dhd
, dualband
))
14658 feature_set
|= WIFI_FEATURE_INFRA_5G
;
14659 if (FW_SUPPORTED(dhd
, p2p
))
14660 feature_set
|= WIFI_FEATURE_P2P
;
14661 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
)
14662 feature_set
|= WIFI_FEATURE_SOFT_AP
;
14663 if (FW_SUPPORTED(dhd
, tdls
))
14664 feature_set
|= WIFI_FEATURE_TDLS
;
14665 if (FW_SUPPORTED(dhd
, vsdb
))
14666 feature_set
|= WIFI_FEATURE_TDLS_OFFCHANNEL
;
14667 if (FW_SUPPORTED(dhd
, nan
)) {
14668 feature_set
|= WIFI_FEATURE_NAN
;
14669 /* NAN is essentail for d2d rtt */
14670 if (FW_SUPPORTED(dhd
, rttd2d
))
14671 feature_set
|= WIFI_FEATURE_D2D_RTT
;
14674 feature_set
|= WIFI_FEATURE_D2D_RTT
;
14675 feature_set
|= WIFI_FEATURE_D2AP_RTT
;
14676 #endif /* RTT_SUPPORT */
14677 #ifdef LINKSTAT_SUPPORT
14678 feature_set
|= WIFI_FEATURE_LINKSTAT
;
14679 #endif /* LINKSTAT_SUPPORT */
14681 #if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
14682 if (dhd_is_pno_supported(dhd
)) {
14683 feature_set
|= WIFI_FEATURE_PNO
;
14684 #ifdef GSCAN_SUPPORT
14685 feature_set
|= WIFI_FEATURE_GSCAN
;
14686 feature_set
|= WIFI_FEATURE_HAL_EPNO
;
14687 #endif /* GSCAN_SUPPORT */
14689 #endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
14690 #ifdef RSSI_MONITOR_SUPPORT
14691 if (FW_SUPPORTED(dhd
, rssi_mon
)) {
14692 feature_set
|= WIFI_FEATURE_RSSI_MONITOR
;
14694 #endif /* RSSI_MONITOR_SUPPORT */
14696 feature_set
|= WIFI_FEATURE_HOTSPOT
;
14698 #ifdef NDO_CONFIG_SUPPORT
14699 feature_set
|= WIFI_FEATURE_CONFIG_NDO
;
14700 #endif /* NDO_CONFIG_SUPPORT */
14702 feature_set
|= WIFI_FEATURE_MKEEP_ALIVE
;
14703 #endif /* KEEP_ALIVE */
14704 #ifdef SUPPORT_RANDOM_MAC_SCAN
14705 feature_set
|= WIFI_FEATURE_SCAN_RAND
;
14706 #endif /* SUPPORT_RANDOM_MAC_SCAN */
14708 if (FW_SUPPORTED(dhd
, fie
)) {
14709 feature_set
|= WIFI_FEATURE_FILTER_IE
;
14711 #endif /* FILTER_IE */
14712 #ifdef ROAMEXP_SUPPORT
14713 /* Check if the Android O roam feature is supported by FW */
14714 if (!(BCME_UNSUPPORTED
== dhd_dev_set_whitelist_ssid(dev
, NULL
, 0, true))) {
14715 feature_set
|= WIFI_FEATURE_CONTROL_ROAMING
;
14717 #endif /* ROAMEXP_SUPPORT */
14718 return feature_set
;
14722 dhd_dev_get_feature_set_matrix(struct net_device
*dev
, int num
)
14724 int feature_set_full
;
14727 feature_set_full
= dhd_dev_get_feature_set(dev
);
14729 /* Common feature set for all interface */
14730 ret
= (feature_set_full
& WIFI_FEATURE_INFRA
) |
14731 (feature_set_full
& WIFI_FEATURE_INFRA_5G
) |
14732 (feature_set_full
& WIFI_FEATURE_D2D_RTT
) |
14733 (feature_set_full
& WIFI_FEATURE_D2AP_RTT
) |
14734 (feature_set_full
& WIFI_FEATURE_RSSI_MONITOR
) |
14735 (feature_set_full
& WIFI_FEATURE_EPR
);
14737 /* Specific feature group for each interface */
14740 ret
|= (feature_set_full
& WIFI_FEATURE_P2P
) |
14741 /* Not supported yet */
14742 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14743 (feature_set_full
& WIFI_FEATURE_TDLS
) |
14744 (feature_set_full
& WIFI_FEATURE_PNO
) |
14745 (feature_set_full
& WIFI_FEATURE_HAL_EPNO
) |
14746 (feature_set_full
& WIFI_FEATURE_BATCH_SCAN
) |
14747 (feature_set_full
& WIFI_FEATURE_GSCAN
) |
14748 (feature_set_full
& WIFI_FEATURE_HOTSPOT
) |
14749 (feature_set_full
& WIFI_FEATURE_ADDITIONAL_STA
);
14753 ret
|= (feature_set_full
& WIFI_FEATURE_P2P
);
14754 /* Not yet verified NAN with P2P */
14755 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14759 ret
|= (feature_set_full
& WIFI_FEATURE_NAN
) |
14760 (feature_set_full
& WIFI_FEATURE_TDLS
) |
14761 (feature_set_full
& WIFI_FEATURE_TDLS_OFFCHANNEL
);
14765 ret
= WIFI_FEATURE_INVALID
;
14766 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__
, num
));
14772 #ifdef CUSTOM_FORCE_NODFS_FLAG
14774 dhd_dev_set_nodfs(struct net_device
*dev
, u32 nodfs
)
14776 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
14779 dhd
->pub
.dhd_cflags
|= WLAN_PLAT_NODFS_FLAG
;
14781 dhd
->pub
.dhd_cflags
&= ~WLAN_PLAT_NODFS_FLAG
;
14782 dhd
->pub
.force_country_change
= TRUE
;
14785 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14786 #ifdef NDO_CONFIG_SUPPORT
14788 dhd_dev_ndo_cfg(struct net_device
*dev
, u8 enable
)
14790 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
14791 dhd_pub_t
*dhdp
= &dhd
->pub
;
14795 /* enable ND offload feature (will be enabled in FW on suspend) */
14796 dhdp
->ndo_enable
= TRUE
;
14798 /* Update changes of anycast address & DAD failed address */
14799 ret
= dhd_dev_ndo_update_inet6addr(dev
);
14800 if ((ret
< 0) && (ret
!= BCME_NORESOURCE
)) {
14801 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__
, ret
));
14805 /* disable ND offload feature */
14806 dhdp
->ndo_enable
= FALSE
;
14808 /* disable ND offload in FW */
14809 ret
= dhd_ndo_enable(dhdp
, FALSE
);
14811 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__
, ret
));
14817 /* #pragma used as a WAR to fix build failure,
14818 * ignore dropping of 'const' qualifier in 'list_entry' macro
14819 * this pragma disables the warning only for the following function
14821 #pragma GCC diagnostic push
14822 #pragma GCC diagnostic ignored "-Wcast-qual"
14825 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev
*inet6
)
14827 struct inet6_ifaddr
*ifa
;
14828 struct ifacaddr6
*acaddr
= NULL
;
14829 int addr_count
= 0;
14832 read_lock_bh(&inet6
->lock
);
14834 /* Count valid unicast address */
14835 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14836 if ((ifa
->flags
& IFA_F_DADFAILED
) == 0) {
14841 /* Count anycast address */
14842 acaddr
= inet6
->ac_list
;
14845 acaddr
= acaddr
->aca_next
;
14849 read_unlock_bh(&inet6
->lock
);
14855 dhd_dev_ndo_update_inet6addr(struct net_device
*dev
)
14859 struct inet6_dev
*inet6
;
14860 struct inet6_ifaddr
*ifa
;
14861 struct ifacaddr6
*acaddr
= NULL
;
14862 struct in6_addr
*ipv6_addr
= NULL
;
14867 * this function evaulates host ip address in struct inet6_dev
14868 * unicast addr in inet6_dev->addr_list
14869 * anycast addr in inet6_dev->ac_list
14870 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14871 * access on null(freed) pointer.
14875 inet6
= dev
->ip6_ptr
;
14877 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__
));
14881 dhd
= DHD_DEV_INFO(dev
);
14883 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__
));
14888 if (dhd_net2idx(dhd
, dev
) != 0) {
14889 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__
));
14893 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__
));
14897 /* Check host IP overflow */
14898 cnt
= dhd_dev_ndo_get_valid_inet6addr_count(inet6
);
14899 if (cnt
> dhdp
->ndo_max_host_ip
) {
14900 if (!dhdp
->ndo_host_ip_overflow
) {
14901 dhdp
->ndo_host_ip_overflow
= TRUE
;
14902 /* Disable ND offload in FW */
14903 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__
));
14904 ret
= dhd_ndo_enable(dhdp
, FALSE
);
14911 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14912 * driver need to lock inet6_dev while accessing structure. but, driver
14913 * cannot use ioctl while inet6_dev locked since it requires scheduling
14914 * hence, copy addresses to the buffer and do ioctl after unlock.
14916 ipv6_addr
= (struct in6_addr
*)MALLOC(dhdp
->osh
,
14917 sizeof(struct in6_addr
) * dhdp
->ndo_max_host_ip
);
14919 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__
));
14923 /* Find DAD failed unicast address to be removed */
14925 read_lock_bh(&inet6
->lock
);
14926 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14927 /* DAD failed unicast address */
14928 if ((ifa
->flags
& IFA_F_DADFAILED
) &&
14929 (cnt
< dhdp
->ndo_max_host_ip
)) {
14930 memcpy(&ipv6_addr
[cnt
], &ifa
->addr
, sizeof(struct in6_addr
));
14934 read_unlock_bh(&inet6
->lock
);
14936 /* Remove DAD failed unicast address */
14937 for (i
= 0; i
< cnt
; i
++) {
14938 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__
));
14939 ret
= dhd_ndo_remove_ip_by_addr(dhdp
, (char *)&ipv6_addr
[i
], 0);
14945 /* Remove all anycast address */
14946 ret
= dhd_ndo_remove_ip_by_type(dhdp
, WL_ND_IPV6_ADDR_TYPE_ANYCAST
, 0);
14952 * if ND offload was disabled due to host ip overflow,
14953 * attempt to add valid unicast address.
14955 if (dhdp
->ndo_host_ip_overflow
) {
14956 /* Find valid unicast address */
14958 read_lock_bh(&inet6
->lock
);
14959 list_for_each_entry(ifa
, &inet6
->addr_list
, if_list
) {
14960 /* valid unicast address */
14961 if (!(ifa
->flags
& IFA_F_DADFAILED
) &&
14962 (cnt
< dhdp
->ndo_max_host_ip
)) {
14963 memcpy(&ipv6_addr
[cnt
], &ifa
->addr
,
14964 sizeof(struct in6_addr
));
14968 read_unlock_bh(&inet6
->lock
);
14970 /* Add valid unicast address */
14971 for (i
= 0; i
< cnt
; i
++) {
14972 ret
= dhd_ndo_add_ip_with_type(dhdp
,
14973 (char *)&ipv6_addr
[i
], WL_ND_IPV6_ADDR_TYPE_UNICAST
, 0);
14980 /* Find anycast address */
14982 read_lock_bh(&inet6
->lock
);
14983 acaddr
= inet6
->ac_list
;
14985 if (cnt
< dhdp
->ndo_max_host_ip
) {
14986 memcpy(&ipv6_addr
[cnt
], &acaddr
->aca_addr
, sizeof(struct in6_addr
));
14989 acaddr
= acaddr
->aca_next
;
14991 read_unlock_bh(&inet6
->lock
);
14993 /* Add anycast address */
14994 for (i
= 0; i
< cnt
; i
++) {
14995 ret
= dhd_ndo_add_ip_with_type(dhdp
,
14996 (char *)&ipv6_addr
[i
], WL_ND_IPV6_ADDR_TYPE_ANYCAST
, 0);
15002 /* Now All host IP addr were added successfully */
15003 if (dhdp
->ndo_host_ip_overflow
) {
15004 dhdp
->ndo_host_ip_overflow
= FALSE
;
15005 if (dhdp
->in_suspend
) {
15006 /* drvier is in (early) suspend state, need to enable ND offload in FW */
15007 DHD_INFO(("%s: enable NDO\n", __FUNCTION__
));
15008 ret
= dhd_ndo_enable(dhdp
, TRUE
);
15014 MFREE(dhdp
->osh
, ipv6_addr
, sizeof(struct in6_addr
) * dhdp
->ndo_max_host_ip
);
15019 #pragma GCC diagnostic pop
15021 #endif /* NDO_CONFIG_SUPPORT */
15024 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
15026 dhd_dev_pno_stop_for_ssid(struct net_device
*dev
)
15028 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15030 return (dhd_pno_stop_for_ssid(&dhd
->pub
));
15032 /* Linux wrapper to call common dhd_pno_set_for_ssid */
15034 dhd_dev_pno_set_for_ssid(struct net_device
*dev
, wlc_ssid_ext_t
* ssids_local
, int nssid
,
15035 uint16 scan_fr
, int pno_repeat
, int pno_freq_expo_max
, uint16
*channel_list
, int nchan
)
15037 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15039 return (dhd_pno_set_for_ssid(&dhd
->pub
, ssids_local
, nssid
, scan_fr
,
15040 pno_repeat
, pno_freq_expo_max
, channel_list
, nchan
));
15043 /* Linux wrapper to call common dhd_pno_enable */
15045 dhd_dev_pno_enable(struct net_device
*dev
, int enable
)
15047 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15049 return (dhd_pno_enable(&dhd
->pub
, enable
));
15052 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
15054 dhd_dev_pno_set_for_hotlist(struct net_device
*dev
, wl_pfn_bssid_t
*p_pfn_bssid
,
15055 struct dhd_pno_hotlist_params
*hotlist_params
)
15057 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15058 return (dhd_pno_set_for_hotlist(&dhd
->pub
, p_pfn_bssid
, hotlist_params
));
15060 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
15062 dhd_dev_pno_stop_for_batch(struct net_device
*dev
)
15064 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15065 return (dhd_pno_stop_for_batch(&dhd
->pub
));
15067 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
15069 dhd_dev_pno_set_for_batch(struct net_device
*dev
, struct dhd_pno_batch_params
*batch_params
)
15071 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15072 return (dhd_pno_set_for_batch(&dhd
->pub
, batch_params
));
15074 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
15076 dhd_dev_pno_get_for_batch(struct net_device
*dev
, char *buf
, int bufsize
)
15078 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15079 return (dhd_pno_get_for_batch(&dhd
->pub
, buf
, bufsize
, PNO_STATUS_NORMAL
));
15081 #endif /* PNO_SUPPORT */
15083 #if defined(PNO_SUPPORT)
15084 #ifdef GSCAN_SUPPORT
15086 dhd_dev_is_legacy_pno_enabled(struct net_device
*dev
)
15088 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15090 return (dhd_is_legacy_pno_enabled(&dhd
->pub
));
15094 dhd_dev_set_epno(struct net_device
*dev
)
15096 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15100 return dhd_pno_set_epno(&dhd
->pub
);
15103 dhd_dev_flush_fw_epno(struct net_device
*dev
)
15105 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15109 return dhd_pno_flush_fw_epno(&dhd
->pub
);
15112 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15114 dhd_dev_pno_set_cfg_gscan(struct net_device
*dev
, dhd_pno_gscan_cmd_cfg_t type
,
15115 void *buf
, bool flush
)
15117 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15119 return (dhd_pno_set_cfg_gscan(&dhd
->pub
, type
, buf
, flush
));
15122 /* Linux wrapper to call common dhd_wait_batch_results_complete */
15124 dhd_dev_wait_batch_results_complete(struct net_device
*dev
)
15126 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15128 return (dhd_wait_batch_results_complete(&dhd
->pub
));
15131 /* Linux wrapper to call common dhd_pno_lock_batch_results */
15133 dhd_dev_pno_lock_access_batch_results(struct net_device
*dev
)
15135 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15137 return (dhd_pno_lock_batch_results(&dhd
->pub
));
15139 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
15141 dhd_dev_pno_unlock_access_batch_results(struct net_device
*dev
)
15143 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15145 return (dhd_pno_unlock_batch_results(&dhd
->pub
));
15148 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
15150 dhd_dev_pno_run_gscan(struct net_device
*dev
, bool run
, bool flush
)
15152 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15154 return (dhd_pno_initiate_gscan_request(&dhd
->pub
, run
, flush
));
15157 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
15159 dhd_dev_pno_enable_full_scan_result(struct net_device
*dev
, bool real_time_flag
)
15161 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15163 return (dhd_pno_enable_full_scan_result(&dhd
->pub
, real_time_flag
));
15166 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
15168 dhd_dev_hotlist_scan_event(struct net_device
*dev
,
15169 const void *data
, int *send_evt_bytes
, hotlist_type_t type
, u32
*buf_len
)
15171 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15173 return (dhd_handle_hotlist_scan_evt(&dhd
->pub
, data
, send_evt_bytes
, type
, buf_len
));
15176 /* Linux wrapper to call common dhd_process_full_gscan_result */
15178 dhd_dev_process_full_gscan_result(struct net_device
*dev
,
15179 const void *data
, uint32 len
, int *send_evt_bytes
)
15181 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15183 return (dhd_process_full_gscan_result(&dhd
->pub
, data
, len
, send_evt_bytes
));
15187 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device
*dev
, hotlist_type_t type
)
15189 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15191 dhd_gscan_hotlist_cache_cleanup(&dhd
->pub
, type
);
15197 dhd_dev_gscan_batch_cache_cleanup(struct net_device
*dev
)
15199 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15201 return (dhd_gscan_batch_cache_cleanup(&dhd
->pub
));
15204 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
15206 dhd_dev_retrieve_batch_scan(struct net_device
*dev
)
15208 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15210 return (dhd_retreive_batch_scan_results(&dhd
->pub
));
15212 /* Linux wrapper to call common dhd_pno_process_epno_result */
15213 void * dhd_dev_process_epno_result(struct net_device
*dev
,
15214 const void *data
, uint32 event
, int *send_evt_bytes
)
15216 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15218 return (dhd_pno_process_epno_result(&dhd
->pub
, data
, event
, send_evt_bytes
));
15222 dhd_dev_set_lazy_roam_cfg(struct net_device
*dev
,
15223 wlc_roam_exp_params_t
*roam_param
)
15225 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15226 wl_roam_exp_cfg_t roam_exp_cfg
;
15230 return BCME_BADARG
;
15233 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
15234 roam_param
->a_band_boost_threshold
, roam_param
->a_band_penalty_threshold
));
15235 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
15236 roam_param
->a_band_boost_factor
, roam_param
->a_band_penalty_factor
,
15237 roam_param
->cur_bssid_boost
));
15238 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
15239 roam_param
->alert_roam_trigger_threshold
, roam_param
->a_band_max_boost
));
15241 memcpy(&roam_exp_cfg
.params
, roam_param
, sizeof(*roam_param
));
15242 roam_exp_cfg
.version
= ROAM_EXP_CFG_VERSION
;
15243 roam_exp_cfg
.flags
= ROAM_EXP_CFG_PRESENT
;
15244 if (dhd
->pub
.lazy_roam_enable
) {
15245 roam_exp_cfg
.flags
|= ROAM_EXP_ENABLE_FLAG
;
15247 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_params",
15248 (char *)&roam_exp_cfg
, sizeof(roam_exp_cfg
), NULL
, 0,
15251 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__
, err
));
15257 dhd_dev_lazy_roam_enable(struct net_device
*dev
, uint32 enable
)
15260 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15261 wl_roam_exp_cfg_t roam_exp_cfg
;
15263 memset(&roam_exp_cfg
, 0, sizeof(roam_exp_cfg
));
15264 roam_exp_cfg
.version
= ROAM_EXP_CFG_VERSION
;
15266 roam_exp_cfg
.flags
= ROAM_EXP_ENABLE_FLAG
;
15269 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_params",
15270 (char *)&roam_exp_cfg
, sizeof(roam_exp_cfg
), NULL
, 0,
15273 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__
, err
));
15275 dhd
->pub
.lazy_roam_enable
= (enable
!= 0);
15280 dhd_dev_set_lazy_roam_bssid_pref(struct net_device
*dev
,
15281 wl_bssid_pref_cfg_t
*bssid_pref
, uint32 flush
)
15285 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15287 bssid_pref
->version
= BSSID_PREF_LIST_VERSION
;
15288 /* By default programming bssid pref flushes out old values */
15289 bssid_pref
->flags
= (flush
&& !bssid_pref
->count
) ? ROAM_EXP_CLEAR_BSSID_PREF
: 0;
15290 len
= sizeof(wl_bssid_pref_cfg_t
);
15291 if (bssid_pref
->count
) {
15292 len
+= (bssid_pref
->count
- 1) * sizeof(wl_bssid_pref_list_t
);
15294 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_bssid_pref",
15295 (char *)bssid_pref
, len
, NULL
, 0, TRUE
);
15296 if (err
!= BCME_OK
) {
15297 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__
, err
));
15301 #endif /* GSCAN_SUPPORT */
15302 #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
15304 dhd_dev_set_blacklist_bssid(struct net_device
*dev
, maclist_t
*blacklist
,
15305 uint32 len
, uint32 flush
)
15308 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15312 err
= dhd_wl_ioctl_cmd(&(dhd
->pub
), WLC_SET_MACLIST
, (char *)blacklist
,
15314 if (err
!= BCME_OK
) {
15315 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__
, err
));
15319 /* By default programming blacklist flushes out old values */
15320 macmode
= (flush
&& !blacklist
) ? WLC_MACMODE_DISABLED
: WLC_MACMODE_DENY
;
15321 err
= dhd_wl_ioctl_cmd(&(dhd
->pub
), WLC_SET_MACMODE
, (char *)&macmode
,
15322 sizeof(macmode
), TRUE
, 0);
15323 if (err
!= BCME_OK
) {
15324 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__
, err
));
15329 dhd_dev_set_whitelist_ssid(struct net_device
*dev
, wl_ssid_whitelist_t
*ssid_whitelist
,
15330 uint32 len
, uint32 flush
)
15333 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15334 wl_ssid_whitelist_t whitelist_ssid_flush
;
15336 if (!ssid_whitelist
) {
15338 ssid_whitelist
= &whitelist_ssid_flush
;
15339 ssid_whitelist
->ssid_count
= 0;
15341 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__
));
15342 return BCME_BADARG
;
15345 ssid_whitelist
->version
= SSID_WHITELIST_VERSION
;
15346 ssid_whitelist
->flags
= flush
? ROAM_EXP_CLEAR_SSID_WHITELIST
: 0;
15347 err
= dhd_iovar(&dhd
->pub
, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist
, len
, NULL
,
15349 if (err
!= BCME_OK
) {
15350 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__
, err
));
15354 #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
15355 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15356 /* Linux wrapper to call common dhd_pno_get_gscan */
15358 dhd_dev_pno_get_gscan(struct net_device
*dev
, dhd_pno_gscan_cmd_cfg_t type
,
15359 void *info
, uint32
*len
)
15361 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15363 return (dhd_pno_get_gscan(&dhd
->pub
, type
, info
, len
));
15365 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15368 #ifdef RSSI_MONITOR_SUPPORT
15370 dhd_dev_set_rssi_monitor_cfg(struct net_device
*dev
, int start
,
15371 int8 max_rssi
, int8 min_rssi
)
15374 wl_rssi_monitor_cfg_t rssi_monitor
;
15375 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15377 rssi_monitor
.version
= RSSI_MONITOR_VERSION
;
15378 rssi_monitor
.max_rssi
= max_rssi
;
15379 rssi_monitor
.min_rssi
= min_rssi
;
15380 rssi_monitor
.flags
= start
? 0: RSSI_MONITOR_STOP
;
15381 err
= dhd_iovar(&dhd
->pub
, 0, "rssi_monitor", (char *)&rssi_monitor
, sizeof(rssi_monitor
),
15383 if (err
< 0 && err
!= BCME_UNSUPPORTED
) {
15384 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__
, err
));
15388 #endif /* RSSI_MONITOR_SUPPORT */
15390 #ifdef DHDTCPACK_SUPPRESS
15392 dhd_dev_set_tcpack_sup_mode_cfg(struct net_device
*dev
, uint8 enable
)
15395 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15397 err
= dhd_tcpack_suppress_set(&dhd
->pub
, enable
);
15398 if (err
!= BCME_OK
) {
15399 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__
, err
));
15403 #endif /* DHDTCPACK_SUPPRESS */
15406 dhd_dev_cfg_rand_mac_oui(struct net_device
*dev
, uint8
*oui
)
15408 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15409 dhd_pub_t
*dhdp
= &dhd
->pub
;
15411 if (!dhdp
|| !oui
) {
15412 DHD_ERROR(("NULL POINTER : %s\n",
15416 if (ETHER_ISMULTI(oui
)) {
15417 DHD_ERROR(("Expected unicast OUI\n"));
15420 uint8
*rand_mac_oui
= dhdp
->rand_mac_oui
;
15421 memcpy(rand_mac_oui
, oui
, DOT11_OUI_LEN
);
15422 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG
"\n",
15423 MACOUI2STRDBG(rand_mac_oui
)));
15429 dhd_set_rand_mac_oui(dhd_pub_t
*dhd
)
15432 wl_pfn_macaddr_cfg_t wl_cfg
;
15433 uint8
*rand_mac_oui
= dhd
->rand_mac_oui
;
15435 memset(&wl_cfg
.macaddr
, 0, ETHER_ADDR_LEN
);
15436 memcpy(&wl_cfg
.macaddr
, rand_mac_oui
, DOT11_OUI_LEN
);
15437 wl_cfg
.version
= WL_PFN_MACADDR_CFG_VER
;
15438 if (ETHER_ISNULLADDR(&wl_cfg
.macaddr
)) {
15441 wl_cfg
.flags
= (WL_PFN_MAC_OUI_ONLY_MASK
| WL_PFN_SET_MAC_UNASSOC_MASK
);
15444 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG
"\n",
15445 MACOUI2STRDBG(rand_mac_oui
)));
15447 err
= dhd_iovar(dhd
, 0, "pfn_macaddr", (char *)&wl_cfg
, sizeof(wl_cfg
), NULL
, 0, TRUE
);
15449 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__
, err
));
15455 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15457 dhd_dev_rtt_set_cfg(struct net_device
*dev
, void *buf
)
15459 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15461 return (dhd_rtt_set_cfg(&dhd
->pub
, buf
));
15465 dhd_dev_rtt_cancel_cfg(struct net_device
*dev
, struct ether_addr
*mac_list
, int mac_cnt
)
15467 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15469 return (dhd_rtt_stop(&dhd
->pub
, mac_list
, mac_cnt
));
15473 dhd_dev_rtt_register_noti_callback(struct net_device
*dev
, void *ctx
, dhd_rtt_compl_noti_fn noti_fn
)
15475 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15477 return (dhd_rtt_register_noti_callback(&dhd
->pub
, ctx
, noti_fn
));
15481 dhd_dev_rtt_unregister_noti_callback(struct net_device
*dev
, dhd_rtt_compl_noti_fn noti_fn
)
15483 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15485 return (dhd_rtt_unregister_noti_callback(&dhd
->pub
, noti_fn
));
15489 dhd_dev_rtt_capability(struct net_device
*dev
, rtt_capabilities_t
*capa
)
15491 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15493 return (dhd_rtt_capability(&dhd
->pub
, capa
));
15497 dhd_dev_rtt_avail_channel(struct net_device
*dev
, wifi_channel_info
*channel_info
)
15499 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15500 return (dhd_rtt_avail_channel(&dhd
->pub
, channel_info
));
15504 dhd_dev_rtt_enable_responder(struct net_device
*dev
, wifi_channel_info
*channel_info
)
15506 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15507 return (dhd_rtt_enable_responder(&dhd
->pub
, channel_info
));
15510 int dhd_dev_rtt_cancel_responder(struct net_device
*dev
)
15512 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
15513 return (dhd_rtt_cancel_responder(&dhd
->pub
));
15516 #endif /* RTT_SUPPORT */
15519 #define KA_TEMP_BUF_SIZE 512
15520 #define KA_FRAME_SIZE 300
15523 dhd_dev_start_mkeep_alive(dhd_pub_t
*dhd_pub
, uint8 mkeep_alive_id
, uint8
*ip_pkt
,
15524 uint16 ip_pkt_len
, uint8
* src_mac
, uint8
* dst_mac
, uint32 period_msec
)
15526 const int ETHERTYPE_LEN
= 2;
15529 wl_mkeep_alive_pkt_t mkeep_alive_pkt
;
15530 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
= NULL
;
15533 int res
= BCME_ERROR
;
15537 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15538 char *pmac_frame
= NULL
;
15539 char *pmac_frame_begin
= NULL
;
15542 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15543 * dongle shall reject a mkeep_alive request.
15545 if (!dhd_support_sta_mode(dhd_pub
))
15548 DHD_TRACE(("%s execution\n", __FUNCTION__
));
15550 if ((pbuf
= MALLOCZ(dhd_pub
->osh
, KA_TEMP_BUF_SIZE
)) == NULL
) {
15551 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE
));
15556 if ((pmac_frame
= MALLOCZ(dhd_pub
->osh
, KA_FRAME_SIZE
)) == NULL
) {
15557 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE
));
15561 pmac_frame_begin
= pmac_frame
;
15564 * Get current mkeep-alive status.
15566 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive", &mkeep_alive_id
, sizeof(mkeep_alive_id
), pbuf
,
15567 KA_TEMP_BUF_SIZE
, FALSE
);
15569 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__
, res
));
15572 /* Check available ID whether it is occupied */
15573 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) pbuf
;
15574 if (dtoh32(mkeep_alive_pktp
->period_msec
!= 0)) {
15575 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15576 __FUNCTION__
, mkeep_alive_id
));
15578 /* Current occupied ID info */
15579 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__
));
15580 DHD_ERROR((" Id : %d\n"
15581 " Period: %d msec\n"
15584 mkeep_alive_pktp
->keep_alive_id
,
15585 dtoh32(mkeep_alive_pktp
->period_msec
),
15586 dtoh16(mkeep_alive_pktp
->len_bytes
)));
15588 for (i
= 0; i
< mkeep_alive_pktp
->len_bytes
; i
++) {
15589 DHD_ERROR(("%02x", mkeep_alive_pktp
->data
[i
]));
15593 res
= BCME_NOTFOUND
;
15598 /* Request the specified ID */
15599 memset(&mkeep_alive_pkt
, 0, sizeof(wl_mkeep_alive_pkt_t
));
15600 memset(pbuf
, 0, KA_TEMP_BUF_SIZE
);
15601 str
= "mkeep_alive";
15602 str_len
= strlen(str
);
15603 strncpy(pbuf
, str
, str_len
);
15604 pbuf
[str_len
] = '\0';
15606 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) (pbuf
+ str_len
+ 1);
15607 mkeep_alive_pkt
.period_msec
= htod32(period_msec
);
15608 buf_len
= str_len
+ 1;
15609 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
15610 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
15613 mkeep_alive_pkt
.keep_alive_id
= mkeep_alive_id
;
15615 buf_len
+= WL_MKEEP_ALIVE_FIXED_LEN
;
15618 * Build up Ethernet Frame
15621 /* Mapping dest mac addr */
15622 memcpy(pmac_frame
, dst_mac
, ETHER_ADDR_LEN
);
15623 pmac_frame
+= ETHER_ADDR_LEN
;
15625 /* Mapping src mac addr */
15626 memcpy(pmac_frame
, src_mac
, ETHER_ADDR_LEN
);
15627 pmac_frame
+= ETHER_ADDR_LEN
;
15629 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15630 *(pmac_frame
++) = 0x08;
15631 *(pmac_frame
++) = 0x00;
15633 /* Mapping IP pkt */
15634 memcpy(pmac_frame
, ip_pkt
, ip_pkt_len
);
15635 pmac_frame
+= ip_pkt_len
;
15638 * Length of ether frame (assume to be all hexa bytes)
15639 * = src mac + dst mac + ether type + ip pkt len
15641 len_bytes
= ETHER_ADDR_LEN
*2 + ETHERTYPE_LEN
+ ip_pkt_len
;
15642 memcpy(mkeep_alive_pktp
->data
, pmac_frame_begin
, len_bytes
);
15643 buf_len
+= len_bytes
;
15644 mkeep_alive_pkt
.len_bytes
= htod16(len_bytes
);
15647 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15648 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15649 * guarantee that the buffer is properly aligned.
15651 memcpy((char *)mkeep_alive_pktp
, &mkeep_alive_pkt
, WL_MKEEP_ALIVE_FIXED_LEN
);
15653 res
= dhd_wl_ioctl_cmd(dhd_pub
, WLC_SET_VAR
, pbuf
, buf_len
, TRUE
, 0);
15655 if (pmac_frame_begin
) {
15656 MFREE(dhd_pub
->osh
, pmac_frame_begin
, KA_FRAME_SIZE
);
15657 pmac_frame_begin
= NULL
;
15660 MFREE(dhd_pub
->osh
, pbuf
, KA_TEMP_BUF_SIZE
);
15667 dhd_dev_stop_mkeep_alive(dhd_pub_t
*dhd_pub
, uint8 mkeep_alive_id
)
15670 wl_mkeep_alive_pkt_t mkeep_alive_pkt
;
15671 wl_mkeep_alive_pkt_t
*mkeep_alive_pktp
= NULL
;
15672 int res
= BCME_ERROR
;
15676 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15677 * dongle shall reject a mkeep_alive request.
15679 if (!dhd_support_sta_mode(dhd_pub
))
15682 DHD_TRACE(("%s execution\n", __FUNCTION__
));
15685 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15687 if ((pbuf
= MALLOC(dhd_pub
->osh
, KA_TEMP_BUF_SIZE
)) == NULL
) {
15688 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE
));
15692 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive", &mkeep_alive_id
,
15693 sizeof(mkeep_alive_id
), pbuf
, KA_TEMP_BUF_SIZE
, FALSE
);
15695 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__
, res
));
15698 /* Check occupied ID */
15699 mkeep_alive_pktp
= (wl_mkeep_alive_pkt_t
*) pbuf
;
15700 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__
));
15701 DHD_INFO((" Id : %d\n"
15702 " Period: %d msec\n"
15705 mkeep_alive_pktp
->keep_alive_id
,
15706 dtoh32(mkeep_alive_pktp
->period_msec
),
15707 dtoh16(mkeep_alive_pktp
->len_bytes
)));
15709 for (i
= 0; i
< mkeep_alive_pktp
->len_bytes
; i
++) {
15710 DHD_INFO(("%02x", mkeep_alive_pktp
->data
[i
]));
15715 /* Make it stop if available */
15716 if (dtoh32(mkeep_alive_pktp
->period_msec
!= 0)) {
15717 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id
));
15718 memset(&mkeep_alive_pkt
, 0, sizeof(wl_mkeep_alive_pkt_t
));
15720 mkeep_alive_pkt
.period_msec
= 0;
15721 mkeep_alive_pkt
.version
= htod16(WL_MKEEP_ALIVE_VERSION
);
15722 mkeep_alive_pkt
.length
= htod16(WL_MKEEP_ALIVE_FIXED_LEN
);
15723 mkeep_alive_pkt
.keep_alive_id
= mkeep_alive_id
;
15725 res
= dhd_iovar(dhd_pub
, 0, "mkeep_alive",
15726 (char *)&mkeep_alive_pkt
,
15727 WL_MKEEP_ALIVE_FIXED_LEN
, NULL
, 0, TRUE
);
15729 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__
, mkeep_alive_id
));
15730 res
= BCME_NOTFOUND
;
15734 MFREE(dhd_pub
->osh
, pbuf
, KA_TEMP_BUF_SIZE
);
15739 #endif /* KEEP_ALIVE */
15741 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
15742 static void _dhd_apf_lock_local(dhd_info_t
*dhd
)
15744 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15746 mutex_lock(&dhd
->dhd_apf_mutex
);
15751 static void _dhd_apf_unlock_local(dhd_info_t
*dhd
)
15753 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15755 mutex_unlock(&dhd
->dhd_apf_mutex
);
15761 __dhd_apf_add_filter(struct net_device
*ndev
, uint32 filter_id
,
15762 u8
* program
, uint32 program_len
)
15764 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15765 dhd_pub_t
*dhdp
= &dhd
->pub
;
15766 wl_pkt_filter_t
* pkt_filterp
;
15767 wl_apf_program_t
*apf_program
;
15769 u32 cmd_len
, buf_len
;
15771 char cmd
[] = "pkt_filter_add";
15773 ifidx
= dhd_net2idx(dhd
, ndev
);
15774 if (ifidx
== DHD_BAD_IF
) {
15775 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15779 cmd_len
= sizeof(cmd
);
15781 /* Check if the program_len is more than the expected len
15782 * and if the program is NULL return from here.
15784 if ((program_len
> WL_APF_PROGRAM_MAX_SIZE
) || (program
== NULL
)) {
15785 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15786 __FUNCTION__
, program_len
, program
));
15789 buf_len
= cmd_len
+ WL_PKT_FILTER_FIXED_LEN
+
15790 WL_APF_PROGRAM_FIXED_LEN
+ program_len
;
15792 buf
= MALLOCZ(dhdp
->osh
, buf_len
);
15793 if (unlikely(!buf
)) {
15794 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__
, buf_len
));
15798 memcpy(buf
, cmd
, cmd_len
);
15800 pkt_filterp
= (wl_pkt_filter_t
*) (buf
+ cmd_len
);
15801 pkt_filterp
->id
= htod32(filter_id
);
15802 pkt_filterp
->negate_match
= htod32(FALSE
);
15803 pkt_filterp
->type
= htod32(WL_PKT_FILTER_TYPE_APF_MATCH
);
15805 apf_program
= &pkt_filterp
->u
.apf_program
;
15806 apf_program
->version
= htod16(WL_APF_INTERNAL_VERSION
);
15807 apf_program
->instr_len
= htod16(program_len
);
15808 memcpy(apf_program
->instrs
, program
, program_len
);
15810 ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, ifidx
);
15811 if (unlikely(ret
)) {
15812 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15813 __FUNCTION__
, filter_id
, ret
));
15817 MFREE(dhdp
->osh
, buf
, buf_len
);
15823 __dhd_apf_config_filter(struct net_device
*ndev
, uint32 filter_id
,
15824 uint32 mode
, uint32 enable
)
15826 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15827 dhd_pub_t
*dhdp
= &dhd
->pub
;
15828 wl_pkt_filter_enable_t
* pkt_filterp
;
15830 u32 cmd_len
, buf_len
;
15832 char cmd
[] = "pkt_filter_enable";
15834 ifidx
= dhd_net2idx(dhd
, ndev
);
15835 if (ifidx
== DHD_BAD_IF
) {
15836 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15840 cmd_len
= sizeof(cmd
);
15841 buf_len
= cmd_len
+ sizeof(*pkt_filterp
);
15843 buf
= MALLOCZ(dhdp
->osh
, buf_len
);
15844 if (unlikely(!buf
)) {
15845 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__
, buf_len
));
15849 memcpy(buf
, cmd
, cmd_len
);
15851 pkt_filterp
= (wl_pkt_filter_enable_t
*) (buf
+ cmd_len
);
15852 pkt_filterp
->id
= htod32(filter_id
);
15853 pkt_filterp
->enable
= htod32(enable
);
15855 ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, buf
, buf_len
, TRUE
, ifidx
);
15856 if (unlikely(ret
)) {
15857 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15858 __FUNCTION__
, filter_id
, ret
));
15862 ret
= dhd_wl_ioctl_set_intiovar(dhdp
, "pkt_filter_mode", dhd_master_mode
,
15863 WLC_SET_VAR
, TRUE
, ifidx
);
15864 if (unlikely(ret
)) {
15865 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15866 __FUNCTION__
, filter_id
, ret
));
15871 MFREE(dhdp
->osh
, buf
, buf_len
);
15877 __dhd_apf_delete_filter(struct net_device
*ndev
, uint32 filter_id
)
15879 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(ndev
);
15880 dhd_pub_t
*dhdp
= &dhd
->pub
;
15883 ifidx
= dhd_net2idx(dhd
, ndev
);
15884 if (ifidx
== DHD_BAD_IF
) {
15885 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15889 ret
= dhd_wl_ioctl_set_intiovar(dhdp
, "pkt_filter_delete",
15890 htod32(filter_id
), WLC_SET_VAR
, TRUE
, ifidx
);
15891 if (unlikely(ret
)) {
15892 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15893 __FUNCTION__
, filter_id
, ret
));
15899 void dhd_apf_lock(struct net_device
*dev
)
15901 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15902 _dhd_apf_lock_local(dhd
);
15905 void dhd_apf_unlock(struct net_device
*dev
)
15907 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
15908 _dhd_apf_unlock_local(dhd
);
15912 dhd_dev_apf_get_version(struct net_device
*ndev
, uint32
*version
)
15914 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15915 dhd_pub_t
*dhdp
= &dhd
->pub
;
15918 if (!FW_SUPPORTED(dhdp
, apf
)) {
15919 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__
));
15922 * Notify Android framework that APF is not supported by setting
15929 ifidx
= dhd_net2idx(dhd
, ndev
);
15930 if (ifidx
== DHD_BAD_IF
) {
15931 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__
));
15935 ret
= dhd_wl_ioctl_get_intiovar(dhdp
, "apf_ver", version
,
15936 WLC_GET_VAR
, FALSE
, ifidx
);
15937 if (unlikely(ret
)) {
15938 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15939 __FUNCTION__
, ret
));
15946 dhd_dev_apf_get_max_len(struct net_device
*ndev
, uint32
*max_len
)
15948 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(ndev
);
15949 dhd_pub_t
*dhdp
= &dhd
->pub
;
15952 if (!FW_SUPPORTED(dhdp
, apf
)) {
15953 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__
));
15958 ifidx
= dhd_net2idx(dhd
, ndev
);
15959 if (ifidx
== DHD_BAD_IF
) {
15960 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
15964 ret
= dhd_wl_ioctl_get_intiovar(dhdp
, "apf_size_limit", max_len
,
15965 WLC_GET_VAR
, FALSE
, ifidx
);
15966 if (unlikely(ret
)) {
15967 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15968 __FUNCTION__
, ret
));
15975 dhd_dev_apf_add_filter(struct net_device
*ndev
, u8
* program
,
15976 uint32 program_len
)
15978 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
15979 dhd_pub_t
*dhdp
= &dhd
->pub
;
15982 DHD_APF_LOCK(ndev
);
15984 /* delete, if filter already exists */
15985 if (dhdp
->apf_set
) {
15986 ret
= __dhd_apf_delete_filter(ndev
, PKT_FILTER_APF_ID
);
15987 if (unlikely(ret
)) {
15990 dhdp
->apf_set
= FALSE
;
15993 ret
= __dhd_apf_add_filter(ndev
, PKT_FILTER_APF_ID
, program
, program_len
);
15997 dhdp
->apf_set
= TRUE
;
15999 if (dhdp
->in_suspend
&& dhdp
->apf_set
&& !(dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
)) {
16000 /* Driver is still in (early) suspend state, enable APF filter back */
16001 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
16002 PKT_FILTER_MODE_FORWARD_ON_MATCH
, TRUE
);
16005 DHD_APF_UNLOCK(ndev
);
16011 dhd_dev_apf_enable_filter(struct net_device
*ndev
)
16013 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16014 dhd_pub_t
*dhdp
= &dhd
->pub
;
16016 bool nan_dp_active
= false;
16018 DHD_APF_LOCK(ndev
);
16020 nan_dp_active
= wl_cfgnan_is_dp_active(ndev
);
16021 #endif /* WL_NAN */
16022 if (dhdp
->apf_set
&& (!(dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
) &&
16024 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
16025 PKT_FILTER_MODE_FORWARD_ON_MATCH
, TRUE
);
16028 DHD_APF_UNLOCK(ndev
);
16034 dhd_dev_apf_disable_filter(struct net_device
*ndev
)
16036 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16037 dhd_pub_t
*dhdp
= &dhd
->pub
;
16040 DHD_APF_LOCK(ndev
);
16042 if (dhdp
->apf_set
) {
16043 ret
= __dhd_apf_config_filter(ndev
, PKT_FILTER_APF_ID
,
16044 PKT_FILTER_MODE_FORWARD_ON_MATCH
, FALSE
);
16047 DHD_APF_UNLOCK(ndev
);
16053 dhd_dev_apf_delete_filter(struct net_device
*ndev
)
16055 dhd_info_t
*dhd
= DHD_DEV_INFO(ndev
);
16056 dhd_pub_t
*dhdp
= &dhd
->pub
;
16059 DHD_APF_LOCK(ndev
);
16061 if (dhdp
->apf_set
) {
16062 ret
= __dhd_apf_delete_filter(ndev
, PKT_FILTER_APF_ID
);
16064 dhdp
->apf_set
= FALSE
;
16068 DHD_APF_UNLOCK(ndev
);
16072 #endif /* PKT_FILTER_SUPPORT && APF */
16074 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16075 static void dhd_hang_process(struct work_struct
*work_data
)
16077 struct net_device
*dev
;
16078 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
16079 struct net_device
*ndev
;
16081 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16082 /* Ignore compiler warnings due to -Werror=cast-qual */
16083 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16084 #pragma GCC diagnostic push
16085 #pragma GCC diagnostic ignored "-Wcast-qual"
16087 struct dhd_info
*dhd
=
16088 container_of(work_data
, dhd_info_t
, dhd_hang_process_work
);
16089 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16090 #pragma GCC diagnostic pop
16093 dev
= dhd
->iflist
[0]->net
;
16096 #if defined(WL_WIRELESS_EXT)
16097 wl_iw_send_priv_event(dev
, "HANG");
16099 #if defined(WL_CFG80211)
16100 wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
16103 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
16105 * For HW2, dev_close need to be done to recover
16106 * from upper layer after hang. For Interposer skip
16107 * dev_close so that dhd iovars can be used to take
16108 * socramdump after crash, also skip for HW4 as
16109 * handling of hang event is different
16113 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
16114 ndev
= dhd
->iflist
[i
] ? dhd
->iflist
[i
]->net
: NULL
;
16115 if (ndev
&& (ndev
->flags
& IFF_UP
)) {
16116 DHD_ERROR(("ndev->name : %s dev close\n",
16122 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16125 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
16126 extern dhd_pub_t
*link_recovery
;
16127 void dhd_host_recover_link(void)
16129 DHD_ERROR(("****** %s ******\n", __FUNCTION__
));
16130 link_recovery
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
16131 dhd_bus_set_linkdown(link_recovery
, TRUE
);
16132 dhd_os_send_hang_message(link_recovery
);
16134 EXPORT_SYMBOL(dhd_host_recover_link
);
16135 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
16137 int dhd_os_send_hang_message(dhd_pub_t
*dhdp
)
16143 struct net_device
*primary_ndev
;
16144 struct bcm_cfg80211
*cfg
;
16146 primary_ndev
= dhd_linux_get_primary_netdev(dhdp
);
16147 if (!primary_ndev
) {
16148 DHD_ERROR(("%s: Cannot find primary netdev\n",
16153 cfg
= wl_get_cfg(primary_ndev
);
16155 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__
));
16159 /* Skip sending HANG event to framework if driver is not ready */
16160 if (!wl_get_drv_status(cfg
, READY
, primary_ndev
)) {
16161 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__
));
16164 #endif /* WL_CFG80211 */
16166 #if defined(DHD_HANG_SEND_UP_TEST)
16167 if (dhdp
->req_hang_type
) {
16168 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
16169 __FUNCTION__
, dhdp
->req_hang_type
));
16170 dhdp
->req_hang_type
= 0;
16172 #endif /* DHD_HANG_SEND_UP_TEST */
16174 if (!dhdp
->hang_was_sent
) {
16175 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
16176 dhdp
->hang_counts
++;
16177 if (dhdp
->hang_counts
>= MAX_CONSECUTIVE_HANG_COUNTS
) {
16178 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
16179 __func__
, dhdp
->hang_counts
));
16182 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
16183 #ifdef DHD_DEBUG_UART
16184 /* If PCIe lane has broken, execute the debug uart application
16185 * to gether a ramdump data from dongle via uart
16187 if (!dhdp
->info
->duart_execute
) {
16188 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
16189 (void *)dhdp
, DHD_WQ_WORK_DEBUG_UART_DUMP
,
16190 dhd_debug_uart_exec_rd
, DHD_WQ_WORK_PRIORITY_HIGH
);
16192 #endif /* DHD_DEBUG_UART */
16193 dhdp
->hang_was_sent
= 1;
16194 #ifdef BT_OVER_SDIO
16195 dhdp
->is_bt_recovery_required
= TRUE
;
16197 schedule_work(&dhdp
->info
->dhd_hang_process_work
);
16204 int net_os_send_hang_message(struct net_device
*dev
)
16206 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16210 /* Report FW problem when enabled */
16211 if (dhd
->pub
.hang_report
) {
16212 #ifdef BT_OVER_SDIO
16213 if (netif_running(dev
)) {
16214 #endif /* BT_OVER_SDIO */
16215 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16216 ret
= dhd_os_send_hang_message(&dhd
->pub
);
16218 ret
= wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
16220 #ifdef BT_OVER_SDIO
16222 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__
));
16223 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev
));
16224 #endif /* BT_OVER_SDIO */
16226 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
16233 int net_os_send_hang_message_reason(struct net_device
*dev
, const char *string_num
)
16235 dhd_info_t
*dhd
= NULL
;
16236 dhd_pub_t
*dhdp
= NULL
;
16239 dhd
= DHD_DEV_INFO(dev
);
16244 if (!dhd
|| !dhdp
) {
16248 reason
= bcm_strtoul(string_num
, NULL
, 0);
16249 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__
, reason
));
16251 if ((reason
<= HANG_REASON_MASK
) || (reason
>= HANG_REASON_MAX
)) {
16255 dhdp
->hang_reason
= reason
;
16257 return net_os_send_hang_message(dev
);
16259 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
16261 int dhd_net_wifi_platform_set_power(struct net_device
*dev
, bool on
, unsigned long delay_msec
)
16263 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16264 return wifi_platform_set_power(dhd
->adapter
, on
, delay_msec
);
16267 bool dhd_force_country_change(struct net_device
*dev
)
16269 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16271 if (dhd
&& dhd
->pub
.up
)
16272 return dhd
->pub
.force_country_change
;
16276 void dhd_get_customized_country_code(struct net_device
*dev
, char *country_iso_code
,
16277 wl_country_t
*cspec
)
16279 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16280 dhd_pub_t
*dhdp
= &dhd
->pub
;
16282 BCM_REFERENCE(dhdp
);
16283 if (!CHECK_IS_BLOB(dhdp
) || CHECK_IS_MULT_REGREV(dhdp
)) {
16284 #if defined(CUSTOM_COUNTRY_CODE)
16285 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
,
16286 dhd
->pub
.dhd_cflags
);
16288 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
);
16289 #endif /* CUSTOM_COUNTRY_CODE */
16291 #if !defined(CUSTOM_COUNTRY_CODE)
16293 /* Replace the ccode to XZ if ccode is undefined country */
16294 if (strncmp(country_iso_code
, "", WLC_CNTRY_BUF_SZ
) == 0) {
16295 strlcpy(country_iso_code
, "XZ", WLC_CNTRY_BUF_SZ
);
16296 strlcpy(cspec
->country_abbrev
, country_iso_code
, WLC_CNTRY_BUF_SZ
);
16297 strlcpy(cspec
->ccode
, country_iso_code
, WLC_CNTRY_BUF_SZ
);
16298 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__
, country_iso_code
));
16301 #endif /* !CUSTOM_COUNTRY_CODE */
16303 #if defined(KEEP_KR_REGREV)
16304 if (strncmp(country_iso_code
, "KR", 3) == 0) {
16305 if (!CHECK_IS_BLOB(dhdp
) || CHECK_IS_MULT_REGREV(dhdp
)) {
16306 if (strncmp(dhd
->pub
.vars_ccode
, "KR", 3) == 0) {
16307 cspec
->rev
= dhd
->pub
.vars_regrev
;
16311 #endif /* KEEP_KR_REGREV */
16313 #ifdef KEEP_JP_REGREV
16314 if (strncmp(country_iso_code
, "JP", 3) == 0) {
16315 if (CHECK_IS_BLOB(dhdp
) && !CHECK_IS_MULT_REGREV(dhdp
)) {
16316 if (strncmp(dhd
->pub
.vars_ccode
, "J1", 3) == 0) {
16317 memcpy(cspec
->ccode
, dhd
->pub
.vars_ccode
,
16318 sizeof(dhd
->pub
.vars_ccode
));
16321 if (strncmp(dhd
->pub
.vars_ccode
, "JP", 3) == 0) {
16322 cspec
->rev
= dhd
->pub
.vars_regrev
;
16326 #endif /* KEEP_JP_REGREV */
16327 BCM_REFERENCE(dhd
);
16329 void dhd_bus_country_set(struct net_device
*dev
, wl_country_t
*cspec
, bool notify
)
16331 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16333 struct bcm_cfg80211
*cfg
= wl_get_cfg(dev
);
16336 if (dhd
&& dhd
->pub
.up
) {
16337 memcpy(&dhd
->pub
.dhd_cspec
, cspec
, sizeof(wl_country_t
));
16338 #ifdef DHD_DISABLE_VHTMODE
16339 dhd_disable_vhtmode(&dhd
->pub
);
16340 #endif /* DHD_DISABLE_VHTMODE */
16343 wl_update_wiphybands(cfg
, notify
);
16348 #ifdef DHD_DISABLE_VHTMODE
16350 dhd_disable_vhtmode(dhd_pub_t
*dhd
)
16353 uint32 vhtmode
= FALSE
;
16357 ret
= dhd_iovar(dhd
, 0, "vhtmode", NULL
, 0, (char *)&buf
, sizeof(buf
), FALSE
);
16359 DHD_ERROR(("%s Get vhtmode Fail ret %d\n", __FUNCTION__
, ret
));
16362 memcpy(&vhtmode
, buf
, sizeof(uint32
));
16363 if (vhtmode
== 0) {
16364 DHD_ERROR(("%s Get vhtmode is 0\n", __FUNCTION__
));
16370 ret
= dhd_iovar(dhd
, 0, "vhtmode", (char *)&vhtmode
, sizeof(vhtmode
), NULL
, 0, TRUE
);
16372 DHD_ERROR(("%s Set vhtmode Success %d\n", __FUNCTION__
, vhtmode
));
16374 if (ret
== BCME_NOTDOWN
) {
16376 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
,
16377 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
16379 DHD_ERROR(("%s WL_DOWN Fail ret %d\n", __FUNCTION__
, ret
));
16383 ret
= dhd_iovar(dhd
, 0, "vhtmode", (char *)&vhtmode
,
16384 sizeof(vhtmode
), NULL
, 0, TRUE
);
16385 DHD_ERROR(("%s Set vhtmode %d, ret %d\n", __FUNCTION__
, vhtmode
, ret
));
16387 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_UP
,
16388 (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
16390 DHD_ERROR(("%s WL_UP Fail ret %d\n", __FUNCTION__
, ret
));
16393 DHD_ERROR(("%s Set vhtmode 0 failed %d\n", __FUNCTION__
, ret
));
16397 #endif /* DHD_DISABLE_VHTMODE */
16399 void dhd_bus_band_set(struct net_device
*dev
, uint band
)
16401 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16403 struct bcm_cfg80211
*cfg
= wl_get_cfg(dev
);
16405 if (dhd
&& dhd
->pub
.up
) {
16407 wl_update_wiphybands(cfg
, true);
16412 int dhd_net_set_fw_path(struct net_device
*dev
, char *fw
)
16414 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16416 if (!fw
|| fw
[0] == '\0')
16419 strncpy(dhd
->fw_path
, fw
, sizeof(dhd
->fw_path
) - 1);
16420 dhd
->fw_path
[sizeof(dhd
->fw_path
)-1] = '\0';
16422 #if defined(SOFTAP)
16423 if (strstr(fw
, "apsta") != NULL
) {
16424 DHD_INFO(("GOT APSTA FIRMWARE\n"));
16425 ap_fw_loaded
= TRUE
;
16427 DHD_INFO(("GOT STA FIRMWARE\n"));
16428 ap_fw_loaded
= FALSE
;
16434 void dhd_net_if_lock(struct net_device
*dev
)
16436 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16437 dhd_net_if_lock_local(dhd
);
16440 void dhd_net_if_unlock(struct net_device
*dev
)
16442 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16443 dhd_net_if_unlock_local(dhd
);
16446 static void dhd_net_if_lock_local(dhd_info_t
*dhd
)
16448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16450 mutex_lock(&dhd
->dhd_net_if_mutex
);
16454 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
)
16456 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16458 mutex_unlock(&dhd
->dhd_net_if_mutex
);
16462 static void dhd_suspend_lock(dhd_pub_t
*pub
)
16464 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16465 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16467 mutex_lock(&dhd
->dhd_suspend_mutex
);
16471 static void dhd_suspend_unlock(dhd_pub_t
*pub
)
16473 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16474 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16476 mutex_unlock(&dhd
->dhd_suspend_mutex
);
16480 unsigned long dhd_os_general_spin_lock(dhd_pub_t
*pub
)
16482 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16483 unsigned long flags
= 0;
16486 spin_lock_irqsave(&dhd
->dhd_lock
, flags
);
16491 void dhd_os_general_spin_unlock(dhd_pub_t
*pub
, unsigned long flags
)
16493 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16496 spin_unlock_irqrestore(&dhd
->dhd_lock
, flags
);
16499 /* Linux specific multipurpose spinlock API */
16501 dhd_os_spin_lock_init(osl_t
*osh
)
16503 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16504 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16505 /* and this results in kernel asserts in internal builds */
16506 spinlock_t
* lock
= MALLOC(osh
, sizeof(spinlock_t
) + 4);
16508 spin_lock_init(lock
);
16509 return ((void *)lock
);
16512 dhd_os_spin_lock_deinit(osl_t
*osh
, void *lock
)
16515 MFREE(osh
, lock
, sizeof(spinlock_t
) + 4);
16518 dhd_os_spin_lock(void *lock
)
16520 unsigned long flags
= 0;
16523 spin_lock_irqsave((spinlock_t
*)lock
, flags
);
16528 dhd_os_spin_unlock(void *lock
, unsigned long flags
)
16531 spin_unlock_irqrestore((spinlock_t
*)lock
, flags
);
16535 dhd_os_dbgring_lock_init(osl_t
*osh
)
16537 struct mutex
*mtx
= NULL
;
16539 mtx
= MALLOCZ(osh
, sizeof(*mtx
));
16547 dhd_os_dbgring_lock_deinit(osl_t
*osh
, void *mtx
)
16550 mutex_destroy(mtx
);
16551 MFREE(osh
, mtx
, sizeof(struct mutex
));
16556 dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
)
16558 return (atomic_read(&dhd
->pend_8021x_cnt
));
16561 #define MAX_WAIT_FOR_8021X_TX 100
16564 dhd_wait_pend8021x(struct net_device
*dev
)
16566 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16567 int timeout
= msecs_to_jiffies(10);
16568 int ntimes
= MAX_WAIT_FOR_8021X_TX
;
16569 int pend
= dhd_get_pend_8021x_cnt(dhd
);
16571 while (ntimes
&& pend
) {
16573 set_current_state(TASK_INTERRUPTIBLE
);
16574 DHD_PERIM_UNLOCK(&dhd
->pub
);
16575 schedule_timeout(timeout
);
16576 DHD_PERIM_LOCK(&dhd
->pub
);
16577 set_current_state(TASK_RUNNING
);
16580 pend
= dhd_get_pend_8021x_cnt(dhd
);
16584 atomic_set(&dhd
->pend_8021x_cnt
, 0);
16585 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__
));
16590 #if defined(DHD_DEBUG)
16591 int write_file(const char * file_name
, uint32 flags
, uint8
*buf
, int size
)
16594 struct file
*fp
= NULL
;
16595 mm_segment_t old_fs
;
16597 /* change to KERNEL_DS address limit */
16601 /* open file to write */
16602 fp
= filp_open(file_name
, flags
, 0664);
16604 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp
)));
16608 /* Write buf to file */
16609 ret
= vfs_write(fp
, buf
, size
, &pos
);
16611 DHD_ERROR(("write file error, err = %d\n", ret
));
16615 /* Sync file from filesystem to physical media */
16616 ret
= vfs_fsync(fp
, 0);
16618 DHD_ERROR(("sync file error, error = %d\n", ret
));
16624 /* close file before return */
16626 filp_close(fp
, current
->files
);
16628 /* restore previous address limit */
16637 dhd_convert_memdump_type_to_str(uint32 type
, char *buf
, int substr_type
)
16639 char *type_str
= NULL
;
16642 case DUMP_TYPE_RESUMED_ON_TIMEOUT
:
16643 type_str
= "resumed_on_timeout";
16645 case DUMP_TYPE_D3_ACK_TIMEOUT
:
16646 type_str
= "D3_ACK_timeout";
16648 case DUMP_TYPE_DONGLE_TRAP
:
16649 type_str
= "Dongle_Trap";
16651 case DUMP_TYPE_MEMORY_CORRUPTION
:
16652 type_str
= "Memory_Corruption";
16654 case DUMP_TYPE_PKTID_AUDIT_FAILURE
:
16655 type_str
= "PKTID_AUDIT_Fail";
16657 case DUMP_TYPE_PKTID_INVALID
:
16658 type_str
= "PKTID_INVALID";
16660 case DUMP_TYPE_SCAN_TIMEOUT
:
16661 type_str
= "SCAN_timeout";
16663 case DUMP_TYPE_SCAN_BUSY
:
16664 type_str
= "SCAN_Busy";
16666 case DUMP_TYPE_BY_SYSDUMP
:
16667 if (substr_type
== CMD_UNWANTED
) {
16668 type_str
= "BY_SYSDUMP_FORUSER_unwanted";
16669 } else if (substr_type
== CMD_DISCONNECTED
) {
16670 type_str
= "BY_SYSDUMP_FORUSER_disconnected";
16672 type_str
= "BY_SYSDUMP_FORUSER";
16675 case DUMP_TYPE_BY_LIVELOCK
:
16676 type_str
= "BY_LIVELOCK";
16678 case DUMP_TYPE_AP_LINKUP_FAILURE
:
16679 type_str
= "BY_AP_LINK_FAILURE";
16681 case DUMP_TYPE_AP_ABNORMAL_ACCESS
:
16682 type_str
= "INVALID_ACCESS";
16684 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX
:
16685 type_str
= "ERROR_RX_TIMED_OUT";
16687 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX
:
16688 type_str
= "ERROR_TX_TIMED_OUT";
16690 case DUMP_TYPE_CFG_VENDOR_TRIGGERED
:
16691 type_str
= "CFG_VENDOR_TRIGGERED";
16693 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR
:
16694 type_str
= "BY_INVALID_RING_RDWR";
16696 case DUMP_TYPE_IFACE_OP_FAILURE
:
16697 type_str
= "BY_IFACE_OP_FAILURE";
16699 case DUMP_TYPE_TRANS_ID_MISMATCH
:
16700 type_str
= "BY_TRANS_ID_MISMATCH";
16702 #ifdef DEBUG_DNGL_INIT_FAIL
16703 case DUMP_TYPE_DONGLE_INIT_FAILURE
:
16704 type_str
= "DONGLE_INIT_FAIL";
16706 #endif /* DEBUG_DNGL_INIT_FAIL */
16707 #ifdef SUPPORT_LINKDOWN_RECOVERY
16708 case DUMP_TYPE_READ_SHM_FAIL
:
16709 type_str
= "READ_SHM_FAIL";
16711 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16712 case DUMP_TYPE_DONGLE_HOST_EVENT
:
16713 type_str
= "BY_DONGLE_HOST_EVENT";
16715 case DUMP_TYPE_SMMU_FAULT
:
16716 type_str
= "SMMU_FAULT";
16718 case DUMP_TYPE_BY_USER
:
16719 type_str
= "BY_USER";
16722 case DUMP_TYPE_DUE_TO_BT
:
16723 type_str
= "DUE_TO_BT";
16725 #endif /* DHD_ERPOM */
16727 type_str
= "Unknown_type";
16731 strncpy(buf
, type_str
, strlen(type_str
));
16732 buf
[strlen(type_str
)] = 0;
16736 write_dump_to_file(dhd_pub_t
*dhd
, uint8
*buf
, int size
, char *fname
)
16739 char memdump_path
[128];
16740 char memdump_type
[32];
16741 struct timeval curtime
;
16744 /* Init file name */
16745 memset(memdump_path
, 0, sizeof(memdump_path
));
16746 memset(memdump_type
, 0, sizeof(memdump_type
));
16747 do_gettimeofday(&curtime
);
16748 dhd_convert_memdump_type_to_str(dhd
->memdump_type
, memdump_type
, dhd
->debug_dump_subcmd
);
16749 #ifdef CUSTOMER_HW4_DEBUG
16750 get_debug_dump_time(dhd
->debug_dump_time_str
);
16751 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_" "%s",
16752 DHD_COMMON_DUMP_PATH
, fname
, memdump_type
, dhd
->debug_dump_time_str
);
16753 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16754 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16755 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16756 "/data/misc/wifi/", fname
, memdump_type
,
16757 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16758 file_mode
= O_CREAT
| O_WRONLY
;
16760 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16761 "/installmedia/", fname
, memdump_type
,
16762 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16763 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16764 * calling BUG_ON immediately after collecting the socram dump.
16765 * So the file write operation should directly write the contents into the
16766 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16767 * instead of appending.
16769 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
;
16771 struct file
*fp
= filp_open(memdump_path
, file_mode
, 0664);
16772 /* Check if it is live Brix image having /installmedia, else use /data */
16774 DHD_ERROR(("open file %s, try /data/\n", memdump_path
));
16775 snprintf(memdump_path
, sizeof(memdump_path
), "%s%s_%s_%ld.%ld",
16776 "/data/", fname
, memdump_type
,
16777 (unsigned long)curtime
.tv_sec
, (unsigned long)curtime
.tv_usec
);
16779 filp_close(fp
, NULL
);
16782 #endif /* CUSTOMER_HW4_DEBUG */
16784 /* print SOCRAM dump file path */
16785 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__
, memdump_path
));
16787 #ifdef DHD_LOG_DUMP
16788 dhd_print_buf_addr(dhd
, "write_dump_to_file", buf
, size
);
16789 #endif /* DHD_LOG_DUMP */
16792 ret
= write_file(memdump_path
, file_mode
, buf
, size
);
16794 #ifdef DHD_DUMP_MNGR
16795 if (ret
== BCME_OK
) {
16796 dhd_dump_file_manage_enqueue(dhd
, memdump_path
, fname
);
16798 #endif /* DHD_DUMP_MNGR */
16802 #endif /* DHD_DEBUG */
16804 int dhd_os_wake_lock_timeout(dhd_pub_t
*pub
)
16806 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16807 unsigned long flags
;
16810 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16811 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16812 ret
= dhd
->wakelock_rx_timeout_enable
> dhd
->wakelock_ctrl_timeout_enable
?
16813 dhd
->wakelock_rx_timeout_enable
: dhd
->wakelock_ctrl_timeout_enable
;
16814 #ifdef CONFIG_HAS_WAKELOCK
16815 if (dhd
->wakelock_rx_timeout_enable
)
16816 wake_lock_timeout(&dhd
->wl_rxwake
,
16817 msecs_to_jiffies(dhd
->wakelock_rx_timeout_enable
));
16818 if (dhd
->wakelock_ctrl_timeout_enable
)
16819 wake_lock_timeout(&dhd
->wl_ctrlwake
,
16820 msecs_to_jiffies(dhd
->wakelock_ctrl_timeout_enable
));
16822 dhd
->wakelock_rx_timeout_enable
= 0;
16823 dhd
->wakelock_ctrl_timeout_enable
= 0;
16824 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16829 int net_os_wake_lock_timeout(struct net_device
*dev
)
16831 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16835 ret
= dhd_os_wake_lock_timeout(&dhd
->pub
);
16839 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t
*pub
, int val
)
16841 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16842 unsigned long flags
;
16844 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16845 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16846 if (val
> dhd
->wakelock_rx_timeout_enable
)
16847 dhd
->wakelock_rx_timeout_enable
= val
;
16848 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16853 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t
*pub
, int val
)
16855 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16856 unsigned long flags
;
16858 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16859 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16860 if (val
> dhd
->wakelock_ctrl_timeout_enable
)
16861 dhd
->wakelock_ctrl_timeout_enable
= val
;
16862 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16867 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t
*pub
)
16869 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
16870 unsigned long flags
;
16872 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
16873 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
16874 dhd
->wakelock_ctrl_timeout_enable
= 0;
16875 #ifdef CONFIG_HAS_WAKELOCK
16876 if (wake_lock_active(&dhd
->wl_ctrlwake
))
16877 wake_unlock(&dhd
->wl_ctrlwake
);
16879 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
16884 int net_os_wake_lock_rx_timeout_enable(struct net_device
*dev
, int val
)
16886 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16890 ret
= dhd_os_wake_lock_rx_timeout_enable(&dhd
->pub
, val
);
16894 int net_os_wake_lock_ctrl_timeout_enable(struct net_device
*dev
, int val
)
16896 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
16900 ret
= dhd_os_wake_lock_ctrl_timeout_enable(&dhd
->pub
, val
);
16904 #if defined(DHD_TRACE_WAKE_LOCK)
16905 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16906 #include <linux/hashtable.h>
16908 #include <linux/hash.h>
16909 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16911 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16912 /* Define 2^5 = 32 bucket size hash table */
16913 DEFINE_HASHTABLE(wklock_history
, 5);
16915 /* Define 2^5 = 32 bucket size hash table */
16916 struct hlist_head wklock_history
[32] = { [0 ... 31] = HLIST_HEAD_INIT
};
16917 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16919 atomic_t trace_wklock_onoff
;
16920 typedef enum dhd_wklock_type
{
16927 struct wk_trace_record
{
16928 unsigned long addr
; /* Address of the instruction */
16929 dhd_wklock_t lock_type
; /* lock_type */
16930 unsigned long long counter
; /* counter information */
16931 struct hlist_node wklock_node
; /* hash node */
16934 static struct wk_trace_record
*find_wklock_entry(unsigned long addr
)
16936 struct wk_trace_record
*wklock_info
;
16937 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16938 hash_for_each_possible(wklock_history
, wklock_info
, wklock_node
, addr
)
16940 struct hlist_node
*entry
;
16941 int index
= hash_long(addr
, ilog2(ARRAY_SIZE(wklock_history
)));
16942 hlist_for_each_entry(wklock_info
, entry
, &wklock_history
[index
], wklock_node
)
16943 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16945 if (wklock_info
->addr
== addr
) {
16946 return wklock_info
;
16952 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16953 #define HASH_ADD(hashtable, node, key) \
16955 hash_add(hashtable, node, key); \
16958 #define HASH_ADD(hashtable, node, key) \
16960 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
16961 hlist_add_head(node, &hashtable[index]); \
16963 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
16965 #define STORE_WKLOCK_RECORD(wklock_type) \
16967 struct wk_trace_record *wklock_info = NULL; \
16968 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
16969 wklock_info = find_wklock_entry(func_addr); \
16970 if (wklock_info) { \
16971 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
16972 wklock_info->counter = dhd->wakelock_counter; \
16974 wklock_info->counter++; \
16977 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
16978 if (!wklock_info) {\
16979 printk("Can't allocate wk_trace_record \n"); \
16981 wklock_info->addr = func_addr; \
16982 wklock_info->lock_type = wklock_type; \
16983 if (wklock_type == DHD_WAIVE_LOCK || \
16984 wklock_type == DHD_RESTORE_LOCK) { \
16985 wklock_info->counter = dhd->wakelock_counter; \
16987 wklock_info->counter++; \
16989 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16994 static inline void dhd_wk_lock_rec_dump(void)
16997 struct wk_trace_record
*wklock_info
;
16999 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17000 hash_for_each(wklock_history
, bkt
, wklock_info
, wklock_node
)
17002 struct hlist_node
*entry
= NULL
;
17003 int max_index
= ARRAY_SIZE(wklock_history
);
17004 for (bkt
= 0; bkt
< max_index
; bkt
++)
17005 hlist_for_each_entry(wklock_info
, entry
, &wklock_history
[bkt
], wklock_node
)
17006 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17008 switch (wklock_info
->lock_type
) {
17009 case DHD_WAKE_LOCK
:
17010 printk("wakelock lock : %pS lock_counter : %llu \n",
17011 (void *)wklock_info
->addr
, wklock_info
->counter
);
17013 case DHD_WAKE_UNLOCK
:
17014 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
17015 (void *)wklock_info
->addr
, wklock_info
->counter
);
17017 case DHD_WAIVE_LOCK
:
17018 printk("wakelock waive : %pS before_waive : %llu \n",
17019 (void *)wklock_info
->addr
, wklock_info
->counter
);
17021 case DHD_RESTORE_LOCK
:
17022 printk("wakelock restore : %pS, after_waive : %llu \n",
17023 (void *)wklock_info
->addr
, wklock_info
->counter
);
17029 static void dhd_wk_lock_trace_init(struct dhd_info
*dhd
)
17031 unsigned long flags
;
17032 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17034 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17036 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17037 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17038 hash_init(wklock_history
);
17040 for (i
= 0; i
< ARRAY_SIZE(wklock_history
); i
++)
17041 INIT_HLIST_HEAD(&wklock_history
[i
]);
17042 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17043 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17044 atomic_set(&trace_wklock_onoff
, 1);
17047 static void dhd_wk_lock_trace_deinit(struct dhd_info
*dhd
)
17050 struct wk_trace_record
*wklock_info
;
17051 struct hlist_node
*tmp
;
17052 unsigned long flags
;
17053 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17054 struct hlist_node
*entry
= NULL
;
17055 int max_index
= ARRAY_SIZE(wklock_history
);
17056 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17058 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17059 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17060 hash_for_each_safe(wklock_history
, bkt
, tmp
, wklock_info
, wklock_node
)
17062 for (bkt
= 0; bkt
< max_index
; bkt
++)
17063 hlist_for_each_entry_safe(wklock_info
, entry
, tmp
,
17064 &wklock_history
[bkt
], wklock_node
)
17065 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17067 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17068 hash_del(&wklock_info
->wklock_node
);
17070 hlist_del_init(&wklock_info
->wklock_node
);
17071 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17072 kfree(wklock_info
);
17074 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17077 void dhd_wk_lock_stats_dump(dhd_pub_t
*dhdp
)
17079 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
17080 unsigned long flags
;
17082 printk(KERN_ERR
"DHD Printing wl_wake Lock/Unlock Record \r\n");
17083 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17084 dhd_wk_lock_rec_dump();
17085 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17089 #define STORE_WKLOCK_RECORD(wklock_type)
17090 #endif /* ! DHD_TRACE_WAKE_LOCK */
17092 int dhd_os_wake_lock(dhd_pub_t
*pub
)
17094 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17095 unsigned long flags
;
17098 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
17099 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17100 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
17101 #ifdef CONFIG_HAS_WAKELOCK
17102 wake_lock(&dhd
->wl_wifi
);
17103 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17104 dhd_bus_dev_pm_stay_awake(pub
);
17107 #ifdef DHD_TRACE_WAKE_LOCK
17108 if (atomic_read(&trace_wklock_onoff
)) {
17109 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK
);
17111 #endif /* DHD_TRACE_WAKE_LOCK */
17112 dhd
->wakelock_counter
++;
17113 ret
= dhd
->wakelock_counter
;
17114 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17120 void dhd_event_wake_lock(dhd_pub_t
*pub
)
17122 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17125 #ifdef CONFIG_HAS_WAKELOCK
17126 wake_lock(&dhd
->wl_evtwake
);
17127 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17128 dhd_bus_dev_pm_stay_awake(pub
);
17134 dhd_pm_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17136 #ifdef CONFIG_HAS_WAKELOCK
17137 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17140 wake_lock_timeout(&dhd
->wl_pmwake
, msecs_to_jiffies(val
));
17142 #endif /* CONFIG_HAS_WAKE_LOCK */
17146 dhd_txfl_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17148 #ifdef CONFIG_HAS_WAKELOCK
17149 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17152 wake_lock_timeout(&dhd
->wl_txflwake
, msecs_to_jiffies(val
));
17154 #endif /* CONFIG_HAS_WAKE_LOCK */
17157 int net_os_wake_lock(struct net_device
*dev
)
17159 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
17163 ret
= dhd_os_wake_lock(&dhd
->pub
);
17167 int dhd_os_wake_unlock(dhd_pub_t
*pub
)
17169 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17170 unsigned long flags
;
17173 dhd_os_wake_lock_timeout(pub
);
17174 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
17175 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17177 if (dhd
->wakelock_counter
> 0) {
17178 dhd
->wakelock_counter
--;
17179 #ifdef DHD_TRACE_WAKE_LOCK
17180 if (atomic_read(&trace_wklock_onoff
)) {
17181 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK
);
17183 #endif /* DHD_TRACE_WAKE_LOCK */
17184 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
17185 #ifdef CONFIG_HAS_WAKELOCK
17186 wake_unlock(&dhd
->wl_wifi
);
17187 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17188 dhd_bus_dev_pm_relax(pub
);
17191 ret
= dhd
->wakelock_counter
;
17193 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17198 void dhd_event_wake_unlock(dhd_pub_t
*pub
)
17200 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17203 #ifdef CONFIG_HAS_WAKELOCK
17204 wake_unlock(&dhd
->wl_evtwake
);
17205 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17206 dhd_bus_dev_pm_relax(pub
);
17211 void dhd_pm_wake_unlock(dhd_pub_t
*pub
)
17213 #ifdef CONFIG_HAS_WAKELOCK
17214 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17217 /* if wl_pmwake is active, unlock it */
17218 if (wake_lock_active(&dhd
->wl_pmwake
)) {
17219 wake_unlock(&dhd
->wl_pmwake
);
17222 #endif /* CONFIG_HAS_WAKELOCK */
17225 void dhd_txfl_wake_unlock(dhd_pub_t
*pub
)
17227 #ifdef CONFIG_HAS_WAKELOCK
17228 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17231 /* if wl_txflwake is active, unlock it */
17232 if (wake_lock_active(&dhd
->wl_txflwake
)) {
17233 wake_unlock(&dhd
->wl_txflwake
);
17236 #endif /* CONFIG_HAS_WAKELOCK */
17239 int dhd_os_check_wakelock(dhd_pub_t
*pub
)
17241 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17242 KERNEL_VERSION(2, 6, 36)))
17247 dhd
= (dhd_info_t
*)(pub
->info
);
17248 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17250 #ifdef CONFIG_HAS_WAKELOCK
17251 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
17252 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
17253 (wake_lock_active(&dhd
->wl_wdwake
))))
17255 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17256 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
17263 dhd_os_check_wakelock_all(dhd_pub_t
*pub
)
17265 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17266 KERNEL_VERSION(2, 6, 36)))
17267 #if defined(CONFIG_HAS_WAKELOCK)
17268 int l1
, l2
, l3
, l4
, l7
, l8
, l9
;
17269 int l5
= 0, l6
= 0;
17270 int c
, lock_active
;
17271 #endif /* CONFIG_HAS_WAKELOCK */
17277 dhd
= (dhd_info_t
*)(pub
->info
);
17281 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17283 #ifdef CONFIG_HAS_WAKELOCK
17284 c
= dhd
->wakelock_counter
;
17285 l1
= wake_lock_active(&dhd
->wl_wifi
);
17286 l2
= wake_lock_active(&dhd
->wl_wdwake
);
17287 l3
= wake_lock_active(&dhd
->wl_rxwake
);
17288 l4
= wake_lock_active(&dhd
->wl_ctrlwake
);
17289 l7
= wake_lock_active(&dhd
->wl_evtwake
);
17290 #ifdef BCMPCIE_OOB_HOST_WAKE
17291 l5
= wake_lock_active(&dhd
->wl_intrwake
);
17292 #endif /* BCMPCIE_OOB_HOST_WAKE */
17293 #ifdef DHD_USE_SCAN_WAKELOCK
17294 l6
= wake_lock_active(&dhd
->wl_scanwake
);
17295 #endif /* DHD_USE_SCAN_WAKELOCK */
17296 l8
= wake_lock_active(&dhd
->wl_pmwake
);
17297 l9
= wake_lock_active(&dhd
->wl_txflwake
);
17298 lock_active
= (l1
|| l2
|| l3
|| l4
|| l5
|| l6
|| l7
|| l8
|| l9
);
17300 /* Indicate to the Host to avoid going to suspend if internal locks are up */
17302 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
17303 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
17304 __FUNCTION__
, c
, l1
, l2
, l3
, l4
, l5
, l6
, l7
, l8
, l9
));
17307 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17308 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
)) {
17311 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
17315 int net_os_wake_unlock(struct net_device
*dev
)
17317 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
17321 ret
= dhd_os_wake_unlock(&dhd
->pub
);
17325 int dhd_os_wd_wake_lock(dhd_pub_t
*pub
)
17327 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17328 unsigned long flags
;
17332 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17333 if (dhd
->wakelock_wd_counter
== 0 && !dhd
->waive_wakelock
) {
17334 #ifdef CONFIG_HAS_WAKELOCK
17335 /* if wakelock_wd_counter was never used : lock it at once */
17336 wake_lock(&dhd
->wl_wdwake
);
17339 dhd
->wakelock_wd_counter
++;
17340 ret
= dhd
->wakelock_wd_counter
;
17341 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17346 int dhd_os_wd_wake_unlock(dhd_pub_t
*pub
)
17348 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17349 unsigned long flags
;
17353 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17354 if (dhd
->wakelock_wd_counter
> 0) {
17355 dhd
->wakelock_wd_counter
= 0;
17356 if (!dhd
->waive_wakelock
) {
17357 #ifdef CONFIG_HAS_WAKELOCK
17358 wake_unlock(&dhd
->wl_wdwake
);
17362 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17367 #ifdef BCMPCIE_OOB_HOST_WAKE
17369 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17371 #ifdef CONFIG_HAS_WAKELOCK
17372 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17375 wake_lock_timeout(&dhd
->wl_intrwake
, msecs_to_jiffies(val
));
17377 #endif /* CONFIG_HAS_WAKELOCK */
17381 dhd_os_oob_irq_wake_unlock(dhd_pub_t
*pub
)
17383 #ifdef CONFIG_HAS_WAKELOCK
17384 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17387 /* if wl_intrwake is active, unlock it */
17388 if (wake_lock_active(&dhd
->wl_intrwake
)) {
17389 wake_unlock(&dhd
->wl_intrwake
);
17392 #endif /* CONFIG_HAS_WAKELOCK */
17394 #endif /* BCMPCIE_OOB_HOST_WAKE */
17396 #ifdef DHD_USE_SCAN_WAKELOCK
17398 dhd_os_scan_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
17400 #ifdef CONFIG_HAS_WAKELOCK
17401 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17404 wake_lock_timeout(&dhd
->wl_scanwake
, msecs_to_jiffies(val
));
17406 #endif /* CONFIG_HAS_WAKELOCK */
17410 dhd_os_scan_wake_unlock(dhd_pub_t
*pub
)
17412 #ifdef CONFIG_HAS_WAKELOCK
17413 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17416 /* if wl_scanwake is active, unlock it */
17417 if (wake_lock_active(&dhd
->wl_scanwake
)) {
17418 wake_unlock(&dhd
->wl_scanwake
);
17421 #endif /* CONFIG_HAS_WAKELOCK */
17423 #endif /* DHD_USE_SCAN_WAKELOCK */
17425 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
17426 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
17428 int dhd_os_wake_lock_waive(dhd_pub_t
*pub
)
17430 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17431 unsigned long flags
;
17434 if (dhd
&& (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
)) {
17435 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17437 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17438 if (dhd
->waive_wakelock
== FALSE
) {
17439 #ifdef DHD_TRACE_WAKE_LOCK
17440 if (atomic_read(&trace_wklock_onoff
)) {
17441 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK
);
17443 #endif /* DHD_TRACE_WAKE_LOCK */
17444 /* record current lock status */
17445 dhd
->wakelock_before_waive
= dhd
->wakelock_counter
;
17446 dhd
->waive_wakelock
= TRUE
;
17448 ret
= dhd
->wakelock_wd_counter
;
17449 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17454 int dhd_os_wake_lock_restore(dhd_pub_t
*pub
)
17456 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
17457 unsigned long flags
;
17462 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) == 0)
17465 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
17467 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17468 if (!dhd
->waive_wakelock
)
17471 dhd
->waive_wakelock
= FALSE
;
17472 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
17473 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
17474 * the lock in between, do the same by calling wake_unlock or pm_relax
17476 #ifdef DHD_TRACE_WAKE_LOCK
17477 if (atomic_read(&trace_wklock_onoff
)) {
17478 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK
);
17480 #endif /* DHD_TRACE_WAKE_LOCK */
17482 if (dhd
->wakelock_before_waive
== 0 && dhd
->wakelock_counter
> 0) {
17483 #ifdef CONFIG_HAS_WAKELOCK
17484 wake_lock(&dhd
->wl_wifi
);
17485 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17486 dhd_bus_dev_pm_stay_awake(&dhd
->pub
);
17488 } else if (dhd
->wakelock_before_waive
> 0 && dhd
->wakelock_counter
== 0) {
17489 #ifdef CONFIG_HAS_WAKELOCK
17490 wake_unlock(&dhd
->wl_wifi
);
17491 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17492 dhd_bus_dev_pm_relax(&dhd
->pub
);
17495 dhd
->wakelock_before_waive
= 0;
17497 ret
= dhd
->wakelock_wd_counter
;
17498 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
17502 void dhd_os_wake_lock_init(struct dhd_info
*dhd
)
17504 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__
));
17505 dhd
->wakelock_counter
= 0;
17506 dhd
->wakelock_rx_timeout_enable
= 0;
17507 dhd
->wakelock_ctrl_timeout_enable
= 0;
17508 /* wakelocks prevent a system from going into a low power state */
17509 #ifdef CONFIG_HAS_WAKELOCK
17510 wake_lock_init(&dhd
->wl_wifi
, WAKE_LOCK_SUSPEND
, "wlan_wake");
17511 wake_lock_init(&dhd
->wl_rxwake
, WAKE_LOCK_SUSPEND
, "wlan_rx_wake");
17512 wake_lock_init(&dhd
->wl_ctrlwake
, WAKE_LOCK_SUSPEND
, "wlan_ctrl_wake");
17513 wake_lock_init(&dhd
->wl_evtwake
, WAKE_LOCK_SUSPEND
, "wlan_evt_wake");
17514 wake_lock_init(&dhd
->wl_pmwake
, WAKE_LOCK_SUSPEND
, "wlan_pm_wake");
17515 wake_lock_init(&dhd
->wl_txflwake
, WAKE_LOCK_SUSPEND
, "wlan_txfl_wake");
17516 #ifdef BCMPCIE_OOB_HOST_WAKE
17517 wake_lock_init(&dhd
->wl_intrwake
, WAKE_LOCK_SUSPEND
, "wlan_oob_irq_wake");
17518 #endif /* BCMPCIE_OOB_HOST_WAKE */
17519 #ifdef DHD_USE_SCAN_WAKELOCK
17520 wake_lock_init(&dhd
->wl_scanwake
, WAKE_LOCK_SUSPEND
, "wlan_scan_wake");
17521 #endif /* DHD_USE_SCAN_WAKELOCK */
17522 #endif /* CONFIG_HAS_WAKELOCK */
17523 #ifdef DHD_TRACE_WAKE_LOCK
17524 dhd_wk_lock_trace_init(dhd
);
17525 #endif /* DHD_TRACE_WAKE_LOCK */
17528 void dhd_os_wake_lock_destroy(struct dhd_info
*dhd
)
17530 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__
));
17531 #ifdef CONFIG_HAS_WAKELOCK
17532 dhd
->wakelock_counter
= 0;
17533 dhd
->wakelock_rx_timeout_enable
= 0;
17534 dhd
->wakelock_ctrl_timeout_enable
= 0;
17535 wake_lock_destroy(&dhd
->wl_wifi
);
17536 wake_lock_destroy(&dhd
->wl_rxwake
);
17537 wake_lock_destroy(&dhd
->wl_ctrlwake
);
17538 wake_lock_destroy(&dhd
->wl_evtwake
);
17539 wake_lock_destroy(&dhd
->wl_pmwake
);
17540 wake_lock_destroy(&dhd
->wl_txflwake
);
17541 #ifdef BCMPCIE_OOB_HOST_WAKE
17542 wake_lock_destroy(&dhd
->wl_intrwake
);
17543 #endif /* BCMPCIE_OOB_HOST_WAKE */
17544 #ifdef DHD_USE_SCAN_WAKELOCK
17545 wake_lock_destroy(&dhd
->wl_scanwake
);
17546 #endif /* DHD_USE_SCAN_WAKELOCK */
17547 #ifdef DHD_TRACE_WAKE_LOCK
17548 dhd_wk_lock_trace_deinit(dhd
);
17549 #endif /* DHD_TRACE_WAKE_LOCK */
17550 #endif /* CONFIG_HAS_WAKELOCK */
17553 bool dhd_os_check_if_up(dhd_pub_t
*pub
)
17560 #if defined(BCMSDIO) || defined(BCMPCIE)
17561 /* function to collect firmware, chip id and chip version info */
17562 void dhd_set_version_info(dhd_pub_t
*dhdp
, char *fw
)
17566 i
= snprintf(info_string
, sizeof(info_string
),
17567 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR
, fw
);
17572 i
= snprintf(&info_string
[i
], sizeof(info_string
) - i
,
17573 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp
),
17574 dhd_bus_chiprev_id(dhdp
), dhd_bus_chippkg_id(dhdp
));
17576 #endif /* BCMSDIO || BCMPCIE */
17577 int dhd_ioctl_entry_local(struct net_device
*net
, wl_ioctl_t
*ioc
, int cmd
)
17581 dhd_info_t
*dhd
= NULL
;
17583 if (!net
|| !DEV_PRIV(net
)) {
17584 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
17585 __FUNCTION__
, net
, DEV_PRIV(net
)));
17589 dhd
= DHD_DEV_INFO(net
);
17593 ifidx
= dhd_net2idx(dhd
, net
);
17594 if (ifidx
== DHD_BAD_IF
) {
17595 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
17599 DHD_OS_WAKE_LOCK(&dhd
->pub
);
17600 DHD_PERIM_LOCK(&dhd
->pub
);
17602 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, ioc
, ioc
->buf
, ioc
->len
);
17603 dhd_check_hang(net
, &dhd
->pub
, ret
);
17605 DHD_PERIM_UNLOCK(&dhd
->pub
);
17606 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
17611 bool dhd_os_check_hang(dhd_pub_t
*dhdp
, int ifidx
, int ret
)
17613 struct net_device
*net
;
17615 net
= dhd_idx2net(dhdp
, ifidx
);
17617 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__
, ifidx
));
17621 return dhd_check_hang(net
, dhdp
, ret
);
17624 /* Return instance */
17625 int dhd_get_instance(dhd_pub_t
*dhdp
)
17627 return dhdp
->info
->unit
;
17630 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
17631 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
17632 int dhd_deepsleep(struct net_device
*dev
, int flag
)
17641 dhd
= DHD_DEV_INFO(dev
);
17645 case 1 : /* Deepsleep on */
17646 DHD_ERROR(("[WiFi] Deepsleep On\n"));
17647 /* give some time to sysioc_work before deepsleep */
17649 #ifdef PKT_FILTER_SUPPORT
17650 /* disable pkt filter */
17651 dhd_enable_packet_filter(0, dhdp
);
17652 #endif /* PKT_FILTER_SUPPORT */
17655 ret
= dhd_iovar(dhdp
, 0, "mpc", (char *)&powervar
, sizeof(powervar
), NULL
,
17658 /* Enable Deepsleep */
17660 ret
= dhd_iovar(dhdp
, 0, "deepsleep", (char *)&powervar
, sizeof(powervar
),
17664 case 0: /* Deepsleep Off */
17665 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
17667 /* Disable Deepsleep */
17668 for (cnt
= 0; cnt
< MAX_TRY_CNT
; cnt
++) {
17670 ret
= dhd_iovar(dhdp
, 0, "deepsleep", (char *)&powervar
,
17671 sizeof(powervar
), NULL
, 0, TRUE
);
17673 ret
= dhd_iovar(dhdp
, 0, "deepsleep", (char *)&powervar
,
17674 sizeof(powervar
), iovbuf
, sizeof(iovbuf
), FALSE
);
17676 DHD_ERROR(("the error of dhd deepsleep status"
17677 " ret value :%d\n", ret
));
17679 if (!(*(int *)iovbuf
)) {
17680 DHD_ERROR(("deepsleep mode is 0,"
17681 " count: %d\n", cnt
));
17689 ret
= dhd_iovar(dhdp
, 0, "mpc", (char *)&powervar
, sizeof(powervar
), NULL
,
17696 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
17698 #ifdef PROP_TXSTATUS
17700 void dhd_wlfc_plat_init(void *dhd
)
17702 #ifdef USE_DYNAMIC_F2_BLKSIZE
17703 dhdsdio_func_blocksize((dhd_pub_t
*)dhd
, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY
);
17704 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17708 void dhd_wlfc_plat_deinit(void *dhd
)
17710 #ifdef USE_DYNAMIC_F2_BLKSIZE
17711 dhdsdio_func_blocksize((dhd_pub_t
*)dhd
, 2, sd_f2_blocksize
);
17712 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17716 bool dhd_wlfc_skip_fc(void * dhdp
, uint8 idx
)
17718 #ifdef SKIP_WLFC_ON_CONCURRENT
17721 struct net_device
* net
= dhd_idx2net((dhd_pub_t
*)dhdp
, idx
);
17723 /* enable flow control in vsdb mode */
17724 return !(wl_cfg80211_is_concurrent_mode(net
));
17726 return TRUE
; /* skip flow control */
17727 #endif /* WL_CFG80211 */
17731 #endif /* SKIP_WLFC_ON_CONCURRENT */
17734 #endif /* PROP_TXSTATUS */
17737 #include <linux/debugfs.h>
17739 typedef struct dhd_dbgfs
{
17740 struct dentry
*debugfs_dir
;
17741 struct dentry
*debugfs_mem
;
17746 dhd_dbgfs_t g_dbgfs
;
17748 extern uint32
dhd_readregl(void *bp
, uint32 addr
);
17749 extern uint32
dhd_writeregl(void *bp
, uint32 addr
, uint32 data
);
17752 dhd_dbg_state_open(struct inode
*inode
, struct file
*file
)
17754 file
->private_data
= inode
->i_private
;
17759 dhd_dbg_state_read(struct file
*file
, char __user
*ubuf
,
17760 size_t count
, loff_t
*ppos
)
17764 loff_t pos
= *ppos
;
17769 if (pos
>= g_dbgfs
.size
|| !count
)
17771 if (count
> g_dbgfs
.size
- pos
)
17772 count
= g_dbgfs
.size
- pos
;
17774 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17775 tmp
= dhd_readregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3));
17777 ret
= copy_to_user(ubuf
, &tmp
, 4);
17782 *ppos
= pos
+ count
;
17789 dhd_debugfs_write(struct file
*file
, const char __user
*ubuf
, size_t count
, loff_t
*ppos
)
17791 loff_t pos
= *ppos
;
17797 if (pos
>= g_dbgfs
.size
|| !count
)
17799 if (count
> g_dbgfs
.size
- pos
)
17800 count
= g_dbgfs
.size
- pos
;
17802 ret
= copy_from_user(&buf
, ubuf
, sizeof(uint32
));
17806 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17807 dhd_writeregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3), buf
);
17813 dhd_debugfs_lseek(struct file
*file
, loff_t off
, int whence
)
17822 pos
= file
->f_pos
+ off
;
17825 pos
= g_dbgfs
.size
- off
;
17827 return (pos
< 0 || pos
> g_dbgfs
.size
) ? -EINVAL
: (file
->f_pos
= pos
);
17830 static const struct file_operations dhd_dbg_state_ops
= {
17831 .read
= dhd_dbg_state_read
,
17832 .write
= dhd_debugfs_write
,
17833 .open
= dhd_dbg_state_open
,
17834 .llseek
= dhd_debugfs_lseek
17837 static void dhd_dbgfs_create(void)
17839 if (g_dbgfs
.debugfs_dir
) {
17840 g_dbgfs
.debugfs_mem
= debugfs_create_file("mem", 0644, g_dbgfs
.debugfs_dir
,
17841 NULL
, &dhd_dbg_state_ops
);
17845 void dhd_dbgfs_init(dhd_pub_t
*dhdp
)
17847 g_dbgfs
.dhdp
= dhdp
;
17848 g_dbgfs
.size
= 0x20000000; /* Allow access to various cores regs */
17850 g_dbgfs
.debugfs_dir
= debugfs_create_dir("dhd", 0);
17851 if (IS_ERR(g_dbgfs
.debugfs_dir
)) {
17852 g_dbgfs
.debugfs_dir
= NULL
;
17856 dhd_dbgfs_create();
17861 void dhd_dbgfs_remove(void)
17863 debugfs_remove(g_dbgfs
.debugfs_mem
);
17864 debugfs_remove(g_dbgfs
.debugfs_dir
);
17866 bzero((unsigned char *) &g_dbgfs
, sizeof(g_dbgfs
));
17868 #endif /* BCMDBGFS */
17870 #ifdef CUSTOM_SET_CPUCORE
17871 void dhd_set_cpucore(dhd_pub_t
*dhd
, int set
)
17873 int e_dpc
= 0, e_rxf
= 0, retry_set
= 0;
17875 if (!(dhd
->chan_isvht80
)) {
17876 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__
, dhd
->chan_isvht80
));
17883 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
17884 cpumask_of(DPC_CPUCORE
));
17886 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
17887 cpumask_of(PRIMARY_CPUCORE
));
17889 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
17890 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__
, e_dpc
));
17895 } while (e_dpc
< 0);
17900 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
17901 cpumask_of(RXF_CPUCORE
));
17903 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
17904 cpumask_of(PRIMARY_CPUCORE
));
17906 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
17907 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__
, e_rxf
));
17912 } while (e_rxf
< 0);
17914 #ifdef DHD_OF_SUPPORT
17915 interrupt_set_cpucore(set
, DPC_CPUCORE
, PRIMARY_CPUCORE
);
17916 #endif /* DHD_OF_SUPPORT */
17917 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__
, set
));
17921 #endif /* CUSTOM_SET_CPUCORE */
17923 #ifdef DHD_MCAST_REGEN
17924 /* Get interface specific ap_isolate configuration */
17925 int dhd_get_mcast_regen_bss_enable(dhd_pub_t
*dhdp
, uint32 idx
)
17927 dhd_info_t
*dhd
= dhdp
->info
;
17930 ASSERT(idx
< DHD_MAX_IFS
);
17932 ifp
= dhd
->iflist
[idx
];
17934 return ifp
->mcast_regen_bss_enable
;
17937 /* Set interface specific mcast_regen configuration */
17938 int dhd_set_mcast_regen_bss_enable(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
17940 dhd_info_t
*dhd
= dhdp
->info
;
17943 ASSERT(idx
< DHD_MAX_IFS
);
17945 ifp
= dhd
->iflist
[idx
];
17947 ifp
->mcast_regen_bss_enable
= val
;
17949 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
17952 dhd_update_rx_pkt_chainable_state(dhdp
, idx
);
17955 #endif /* DHD_MCAST_REGEN */
17957 /* Get interface specific ap_isolate configuration */
17958 int dhd_get_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
)
17960 dhd_info_t
*dhd
= dhdp
->info
;
17963 ASSERT(idx
< DHD_MAX_IFS
);
17965 ifp
= dhd
->iflist
[idx
];
17967 return ifp
->ap_isolate
;
17970 /* Set interface specific ap_isolate configuration */
17971 int dhd_set_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
17973 dhd_info_t
*dhd
= dhdp
->info
;
17976 ASSERT(idx
< DHD_MAX_IFS
);
17978 ifp
= dhd
->iflist
[idx
];
17981 ifp
->ap_isolate
= val
;
17986 #ifdef DHD_FW_COREDUMP
17987 void dhd_schedule_memdump(dhd_pub_t
*dhdp
, uint8
*buf
, uint32 size
)
17989 unsigned long flags
= 0;
17990 dhd_dump_t
*dump
= NULL
;
17991 dhd_info_t
*dhd_info
= NULL
;
17992 dhd_info
= (dhd_info_t
*)dhdp
->info
;
17993 dump
= (dhd_dump_t
*)MALLOC(dhdp
->osh
, sizeof(dhd_dump_t
));
17994 if (dump
== NULL
) {
17995 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__
));
17999 dump
->bufsize
= size
;
18000 #ifdef DHD_LOG_DUMP
18001 dhd_print_buf_addr(dhdp
, "memdump", buf
, size
);
18002 #endif /* DHD_LOG_DUMP */
18004 if (dhdp
->memdump_enabled
== DUMP_MEMONLY
) {
18008 #if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM)
18010 #if defined(DEBUG_DNGL_INIT_FAIL)
18011 (dhdp
->memdump_type
== DUMP_TYPE_DONGLE_INIT_FAILURE
) ||
18012 #endif /* DEBUG_DNGL_INIT_FAIL */
18014 (dhdp
->memdump_type
== DUMP_TYPE_DUE_TO_BT
) ||
18015 #endif /* DHD_ERPOM */
18018 #ifdef DHD_LOG_DUMP
18019 log_dump_type_t
*flush_type
= NULL
;
18021 dhd_info
->scheduled_memdump
= FALSE
;
18022 dhd_mem_dump((void *)dhdp
->info
, (void *)dump
, 0);
18023 /* for dongle init fail cases, 'dhd_mem_dump' does
18024 * not call 'dhd_log_dump', so call it here.
18026 #ifdef DHD_LOG_DUMP
18027 flush_type
= MALLOCZ(dhdp
->osh
,
18028 sizeof(log_dump_type_t
));
18030 *flush_type
= DLD_BUF_TYPE_ALL
;
18031 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__
));
18032 dhd_log_dump(dhdp
->info
, flush_type
, 0);
18034 #endif /* DHD_LOG_DUMP */
18037 #endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */
18039 dhd_info
->scheduled_memdump
= TRUE
;
18040 /* bus busy bit for mem dump will be cleared in mem dump
18041 * work item context, after mem dump file is written
18043 DHD_GENERAL_LOCK(dhdp
, flags
);
18044 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp
);
18045 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18046 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__
));
18047 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dump
,
18048 DHD_WQ_WORK_SOC_RAM_DUMP
, dhd_mem_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18051 dhd_mem_dump(void *handle
, void *event_info
, u8 event
)
18053 dhd_info_t
*dhd
= handle
;
18054 dhd_pub_t
*dhdp
= NULL
;
18055 dhd_dump_t
*dump
= event_info
;
18056 unsigned long flags
= 0;
18058 DHD_ERROR(("%s: ENTER \n", __FUNCTION__
));
18061 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
18067 DHD_GENERAL_LOCK(dhdp
, flags
);
18068 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
18069 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18070 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__
));
18073 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18075 #ifdef D2H_MINIDUMP
18076 /* dump minidump */
18077 if (dhd_bus_is_minidump_enabled(dhdp
)) {
18078 dhd_d2h_minidump(&dhd
->pub
);
18080 DHD_ERROR(("minidump is not enabled\n"));
18082 #endif /* D2H_MINIDUMP */
18085 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__
));
18089 if (write_dump_to_file(&dhd
->pub
, dump
->buf
, dump
->bufsize
, "mem_dump")) {
18090 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__
));
18091 #ifdef DHD_DEBUG_UART
18092 dhd
->pub
.memdump_success
= FALSE
;
18093 #endif /* DHD_DEBUG_UART */
18096 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
18097 * context, no need to schedule another work queue for log dump. In case of
18098 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
18099 * cfg layer is itself scheduling the log_dump work queue.
18100 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
18101 * collect debug_dump as it may be called from non-sleepable context.
18103 #ifdef DHD_LOG_DUMP
18104 if (dhd
->scheduled_memdump
&&
18105 dhdp
->memdump_type
!= DUMP_TYPE_BY_SYSDUMP
) {
18106 log_dump_type_t
*flush_type
= MALLOCZ(dhdp
->osh
,
18107 sizeof(log_dump_type_t
));
18109 *flush_type
= DLD_BUF_TYPE_ALL
;
18110 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__
));
18111 dhd_log_dump(dhd
, flush_type
, 0);
18114 #endif /* DHD_LOG_DUMP */
18116 #ifdef DHD_PKT_LOGGING
18117 copy_debug_dump_time(dhdp
->debug_dump_time_pktlog_str
, dhdp
->debug_dump_time_str
);
18118 #endif /* DHD_PKT_LOGGING */
18119 clear_debug_dump_time(dhdp
->debug_dump_time_str
);
18121 /* before calling bug on, wait for other logs to be dumped.
18122 * we cannot wait in case dhd_mem_dump is called directly
18123 * as it may not be in a sleepable context
18125 if (dhd
->scheduled_memdump
) {
18128 #ifdef DHD_SSSR_DUMP
18129 bitmask
|= DHD_BUS_BUSY_IN_SSSRDUMP
;
18131 if (bitmask
!= 0) {
18132 timeleft
= dhd_os_busbusy_wait_bitmask(dhdp
,
18133 &dhdp
->dhd_bus_busy_state
, bitmask
, 0);
18134 if ((timeleft
== 0) || (timeleft
== 1)) {
18135 DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n",
18136 __FUNCTION__
, dhdp
->dhd_bus_busy_state
));
18141 if (dhd
->pub
.memdump_enabled
== DUMP_MEMFILE_BUGON
&&
18142 #ifdef DHD_LOG_DUMP
18143 dhd
->pub
.memdump_type
!= DUMP_TYPE_BY_SYSDUMP
&&
18144 #endif /* DHD_LOG_DUMP */
18145 dhd
->pub
.memdump_type
!= DUMP_TYPE_BY_USER
&&
18146 #ifdef DHD_DEBUG_UART
18147 dhd
->pub
.memdump_success
== TRUE
&&
18148 #endif /* DHD_DEBUG_UART */
18149 #ifdef DNGL_EVENT_SUPPORT
18150 dhd
->pub
.memdump_type
!= DUMP_TYPE_DONGLE_HOST_EVENT
&&
18151 #endif /* DNGL_EVENT_SUPPORT */
18152 dhd
->pub
.memdump_type
!= DUMP_TYPE_CFG_VENDOR_TRIGGERED
) {
18154 #ifdef SHOW_LOGTRACE
18155 /* Wait till event_log_dispatcher_work finishes */
18156 cancel_delayed_work_sync(&dhd
->event_log_dispatcher_work
);
18157 #endif /* SHOW_LOGTRACE */
18164 MFREE(dhd
->pub
.osh
, dump
, sizeof(dhd_dump_t
));
18165 DHD_GENERAL_LOCK(dhdp
, flags
);
18166 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd
->pub
);
18167 dhd_os_busbusy_wake(dhdp
);
18168 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18169 dhd
->scheduled_memdump
= FALSE
;
18171 #endif /* DHD_FW_COREDUMP */
18173 #ifdef D2H_MINIDUMP
18175 dhd_d2h_minidump(dhd_pub_t
*dhdp
)
18177 char d2h_minidump
[128];
18178 dhd_dma_buf_t
*minidump_buf
;
18180 minidump_buf
= dhd_prot_get_minidump_buf(dhdp
);
18181 if (minidump_buf
->va
== NULL
) {
18182 DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__
));
18186 /* Init file name */
18187 memset(d2h_minidump
, 0, sizeof(d2h_minidump
));
18188 snprintf(d2h_minidump
, sizeof(d2h_minidump
), "%s", "d2h_minidump");
18190 if (write_dump_to_file(dhdp
, (uint8
*)minidump_buf
->va
,
18191 BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN
, d2h_minidump
)) {
18192 DHD_ERROR(("%s: failed to dump d2h_minidump to file\n",
18196 #endif /* D2H_MINIDUMP */
18198 #ifdef DHD_SSSR_DUMP
18201 dhd_sssr_dump(void *handle
, void *event_info
, u8 event
)
18203 dhd_info_t
*dhd
= handle
;
18206 char before_sr_dump
[128];
18207 char after_sr_dump
[128];
18208 unsigned long flags
= 0;
18209 uint dig_buf_size
= 0;
18211 DHD_ERROR(("%s: ENTER \n", __FUNCTION__
));
18214 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
18220 DHD_GENERAL_LOCK(dhdp
, flags
);
18221 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
18222 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18223 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__
));
18226 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18228 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
18229 /* Init file name */
18230 memset(before_sr_dump
, 0, sizeof(before_sr_dump
));
18231 memset(after_sr_dump
, 0, sizeof(after_sr_dump
));
18233 snprintf(before_sr_dump
, sizeof(before_sr_dump
), "%s_%d_%s",
18234 "sssr_core", i
, "before_SR");
18235 snprintf(after_sr_dump
, sizeof(after_sr_dump
), "%s_%d_%s",
18236 "sssr_core", i
, "after_SR");
18238 if (dhdp
->sssr_d11_before
[i
] && dhdp
->sssr_d11_outofreset
[i
]) {
18239 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_d11_before
[i
],
18240 dhdp
->sssr_reg_info
.mac_regs
[i
].sr_size
, before_sr_dump
)) {
18241 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
18245 if (dhdp
->sssr_d11_after
[i
] && dhdp
->sssr_d11_outofreset
[i
]) {
18246 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_d11_after
[i
],
18247 dhdp
->sssr_reg_info
.mac_regs
[i
].sr_size
, after_sr_dump
)) {
18248 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
18254 if (dhdp
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
18255 dig_buf_size
= dhdp
->sssr_reg_info
.vasip_regs
.vasip_sr_size
;
18256 } else if ((dhdp
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
18257 dhdp
->sssr_reg_info
.dig_mem_info
.dig_sr_size
) {
18258 dig_buf_size
= dhdp
->sssr_reg_info
.dig_mem_info
.dig_sr_size
;
18261 if (dhdp
->sssr_dig_buf_before
) {
18262 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_dig_buf_before
,
18263 dig_buf_size
, "sssr_dig_before_SR")) {
18264 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
18269 if (dhdp
->sssr_dig_buf_after
) {
18270 if (write_dump_to_file(dhdp
, (uint8
*)dhdp
->sssr_dig_buf_after
,
18271 dig_buf_size
, "sssr_dig_after_SR")) {
18272 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
18278 DHD_GENERAL_LOCK(dhdp
, flags
);
18279 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp
);
18280 dhd_os_busbusy_wake(dhdp
);
18281 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18285 dhd_schedule_sssr_dump(dhd_pub_t
*dhdp
)
18287 unsigned long flags
= 0;
18289 /* bus busy bit for sssr dump will be cleared in sssr dump
18290 * work item context, after sssr dump files are created
18292 DHD_GENERAL_LOCK(dhdp
, flags
);
18293 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp
);
18294 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18296 if (dhdp
->info
->no_wq_sssrdump
) {
18297 dhd_sssr_dump(dhdp
->info
, 0, 0);
18301 DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__
));
18302 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, NULL
,
18303 DHD_WQ_WORK_SSSR_DUMP
, dhd_sssr_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18305 #endif /* DHD_SSSR_DUMP */
18307 #ifdef DHD_LOG_DUMP
18309 dhd_log_dump(void *handle
, void *event_info
, u8 event
)
18311 dhd_info_t
*dhd
= handle
;
18312 log_dump_type_t
*type
= (log_dump_type_t
*)event_info
;
18314 if (!dhd
|| !type
) {
18315 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
18320 /* flush the fw side logs */
18321 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd
->pub
),
18322 FW_LOGSET_MASK_ALL
);
18324 /* there are currently 3 possible contexts from which
18325 * log dump can be scheduled -
18326 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
18327 * 3.HEALTH CHECK event
18328 * The concise debug info buffer is a shared resource
18329 * and in case a trap is one of the contexts then both the
18330 * scheduled work queues need to run because trap data is
18331 * essential for debugging. Hence a mutex lock is acquired
18332 * before calling do_dhd_log_dump().
18334 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__
));
18335 dhd_os_logdump_lock(&dhd
->pub
);
18336 DHD_OS_WAKE_LOCK(&dhd
->pub
);
18337 if (do_dhd_log_dump(&dhd
->pub
, type
) != BCME_OK
) {
18338 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__
));
18340 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
18341 dhd_os_logdump_unlock(&dhd
->pub
);
18344 void dhd_schedule_log_dump(dhd_pub_t
*dhdp
, void *type
)
18346 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__
));
18347 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
18348 type
, DHD_WQ_WORK_DHD_LOG_DUMP
,
18349 dhd_log_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
18353 dhd_print_buf_addr(dhd_pub_t
*dhdp
, char *name
, void *buf
, unsigned int size
)
18355 if ((dhdp
->memdump_enabled
== DUMP_MEMONLY
) ||
18356 (dhdp
->memdump_enabled
== DUMP_MEMFILE_BUGON
)) {
18357 #if defined(CONFIG_ARM64)
18358 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
18359 name
, (uint64
)buf
, (uint64
)__virt_to_phys((ulong
)buf
), size
));
18360 #elif defined(__ARM_ARCH_7A__)
18361 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
18362 name
, (uint32
)buf
, (uint32
)__virt_to_phys((ulong
)buf
), size
));
18363 #endif /* __ARM_ARCH_7A__ */
18368 dhd_log_dump_buf_addr(dhd_pub_t
*dhdp
, log_dump_type_t
*type
)
18371 unsigned long wr_size
= 0;
18372 struct dhd_log_dump_buf
*dld_buf
= &g_dld_buf
[0];
18373 size_t log_size
= 0;
18374 char buf_name
[DHD_PRINT_BUF_NAME_LEN
];
18375 dhd_dbg_ring_t
*ring
= NULL
;
18377 BCM_REFERENCE(ring
);
18379 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
18380 dld_buf
= &g_dld_buf
[i
];
18381 log_size
= (unsigned long)dld_buf
->max
-
18382 (unsigned long)dld_buf
->buffer
;
18383 if (dld_buf
->wraparound
) {
18384 wr_size
= log_size
;
18386 wr_size
= (unsigned long)dld_buf
->present
-
18387 (unsigned long)dld_buf
->front
;
18389 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d]", i
);
18390 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
, dld_buf_size
[i
]);
18391 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d] buffer", i
);
18392 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
->buffer
, wr_size
);
18393 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d] present", i
);
18394 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
->present
, wr_size
);
18395 scnprintf(buf_name
, sizeof(buf_name
), "dlb_buf[%d] front", i
);
18396 dhd_print_buf_addr(dhdp
, buf_name
, dld_buf
->front
, wr_size
);
18399 #ifdef DEBUGABILITY_ECNTRS_LOGGING
18400 /* periodic flushing of ecounters is NOT supported */
18401 if (*type
== DLD_BUF_TYPE_ALL
&&
18402 logdump_ecntr_enable
&&
18403 dhdp
->ecntr_dbg_ring
) {
18405 ring
= (dhd_dbg_ring_t
*)dhdp
->ecntr_dbg_ring
;
18406 dhd_print_buf_addr(dhdp
, "ecntr_dbg_ring", ring
, LOG_DUMP_ECNTRS_MAX_BUFSIZE
);
18407 dhd_print_buf_addr(dhdp
, "ecntr_dbg_ring ring_buf", ring
->ring_buf
,
18408 LOG_DUMP_ECNTRS_MAX_BUFSIZE
);
18410 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
18413 if (dhdp
->dongle_trap_occured
&& dhdp
->extended_trap_data
) {
18414 dhd_print_buf_addr(dhdp
, "extended_trap_data", dhdp
->extended_trap_data
,
18415 BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
18417 #endif /* BCMPCIE */
18419 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18420 /* if health check event was received */
18421 if (dhdp
->memdump_type
== DUMP_TYPE_DONGLE_HOST_EVENT
) {
18422 dhd_print_buf_addr(dhdp
, "health_chk_event_data", dhdp
->health_chk_event_data
,
18423 HEALTH_CHK_BUF_SIZE
);
18425 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18427 /* append the concise debug information */
18428 if (dhdp
->concise_dbg_buf
) {
18429 dhd_print_buf_addr(dhdp
, "concise_dbg_buf", dhdp
->concise_dbg_buf
,
18430 CONCISE_DUMP_BUFLEN
);
18434 #ifdef CUSTOMER_HW4_DEBUG
18436 dhd_log_dump_print_to_kmsg(char *bufptr
, unsigned long len
)
18438 char tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
+ 1];
18440 unsigned long plen
= 0;
18442 if (!bufptr
|| !len
)
18445 memset(tmp_buf
, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
);
18446 end
= bufptr
+ len
;
18447 while (bufptr
< end
) {
18448 if ((bufptr
+ DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
) < end
) {
18449 memcpy(tmp_buf
, bufptr
, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
);
18450 tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
] = '\0';
18451 printf("%s", tmp_buf
);
18452 bufptr
+= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
;
18454 plen
= (unsigned long)end
- (unsigned long)bufptr
;
18455 memcpy(tmp_buf
, bufptr
, plen
);
18456 tmp_buf
[plen
] = '\0';
18457 printf("%s", tmp_buf
);
18464 dhd_log_dump_print_tail(dhd_pub_t
*dhdp
,
18465 struct dhd_log_dump_buf
*dld_buf
,
18468 char *flush_ptr1
= NULL
, *flush_ptr2
= NULL
;
18469 unsigned long len_flush1
= 0, len_flush2
= 0;
18470 unsigned long flags
= 0;
18472 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18473 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18474 flush_ptr1
= dld_buf
->present
- tail_len
;
18475 if (flush_ptr1
>= dld_buf
->front
) {
18476 /* tail content is within the buffer */
18478 len_flush1
= tail_len
;
18479 } else if (dld_buf
->wraparound
) {
18480 /* tail content spans the buffer length i.e, wrap around */
18481 flush_ptr1
= dld_buf
->front
;
18482 len_flush1
= (unsigned long)dld_buf
->present
- (unsigned long)flush_ptr1
;
18483 len_flush2
= (unsigned long)tail_len
- len_flush1
;
18484 flush_ptr2
= (char *)((unsigned long)dld_buf
->max
-
18485 (unsigned long)len_flush2
);
18487 /* amt of logs in buffer is less than tail size */
18488 flush_ptr1
= dld_buf
->front
;
18490 len_flush1
= (unsigned long)dld_buf
->present
- (unsigned long)dld_buf
->front
;
18492 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18494 printf("\n================= LOG_DUMP tail =================\n");
18496 dhd_log_dump_print_to_kmsg(flush_ptr2
, len_flush2
);
18498 dhd_log_dump_print_to_kmsg(flush_ptr1
, len_flush1
);
18499 printf("\n===================================================\n");
18501 #endif /* CUSTOMER_HW4_DEBUG */
18503 /* Must hold 'dhd_os_logdump_lock' before calling this function ! */
18505 do_dhd_log_dump(dhd_pub_t
*dhdp
, log_dump_type_t
*type
)
18507 int ret
= 0, i
= 0;
18508 struct file
*fp
= NULL
;
18509 mm_segment_t old_fs
;
18511 unsigned int wr_size
= 0;
18512 char dump_path
[128];
18514 unsigned long flags
= 0;
18515 struct dhd_log_dump_buf
*dld_buf
= &g_dld_buf
[0];
18516 size_t log_size
= 0;
18517 size_t fspace_remain
= 0;
18519 char time_str
[128];
18521 uint32 remain_len
= 0;
18522 log_dump_section_hdr_t sec_hdr
;
18523 dhd_info_t
*dhd_info
= NULL
;
18525 DHD_ERROR(("%s: ENTER \n", __FUNCTION__
));
18527 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
18528 * so not freeing 'type' here is ok, even if we want to free 'type'
18529 * we cannot do so, since 'dhdp->osh' is unavailable
18532 if (!dhdp
|| !type
) {
18534 DHD_GENERAL_LOCK(dhdp
, flags
);
18535 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp
);
18536 dhd_os_busbusy_wake(dhdp
);
18537 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18542 DHD_GENERAL_LOCK(dhdp
, flags
);
18543 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
18544 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp
);
18545 dhd_os_busbusy_wake(dhdp
);
18546 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18547 MFREE(dhdp
->osh
, type
, sizeof(*type
));
18548 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__
));
18551 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp
);
18552 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18554 dhd_info
= (dhd_info_t
*)dhdp
->info
;
18555 BCM_REFERENCE(dhd_info
);
18557 /* in case of trap get preserve logs from ETD */
18558 #if defined(BCMPCIE) && defined(DEBUGABILITY_ETD_PRSRV_LOGS)
18559 if (dhdp
->dongle_trap_occured
&&
18560 dhdp
->extended_trap_data
) {
18561 dhdpcie_get_etd_preserve_logs(dhdp
, (uint8
*)dhdp
->extended_trap_data
,
18562 &dhd_info
->event_data
);
18564 #endif /* BCMPCIE */
18566 #ifdef SHOW_LOGTRACE
18567 /* flush the event work items to get any fw events/logs
18568 * flush_work is a blocking call
18570 flush_delayed_work(&dhd_info
->event_log_dispatcher_work
);
18571 #endif /* SHOW_LOGTRACE */
18573 #ifdef CUSTOMER_HW4_DEBUG
18574 /* print last 'x' KB of preserve buffer data to kmsg console
18575 * this is to address cases where debug_dump is not
18576 * available for debugging
18578 dhd_log_dump_print_tail(dhdp
,
18579 &g_dld_buf
[DLD_BUF_TYPE_PRESERVE
], logdump_prsrv_tailsize
);
18580 #endif /* CUSTOMER_HW4_DEBUG */
18582 /* change to KERNEL_DS address limit */
18586 /* Init file name */
18587 memset(dump_path
, 0, sizeof(dump_path
));
18588 switch (dhdp
->debug_dump_subcmd
) {
18590 snprintf(dump_path
, sizeof(dump_path
), "%s",
18591 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18592 DHD_DUMP_SUBSTR_UNWANTED
);
18594 case CMD_DISCONNECTED
:
18595 snprintf(dump_path
, sizeof(dump_path
), "%s",
18596 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18597 DHD_DUMP_SUBSTR_DISCONNECTED
);
18600 snprintf(dump_path
, sizeof(dump_path
), "%s",
18601 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
);
18604 if (!dhdp
->logdump_periodic_flush
) {
18605 get_debug_dump_time(dhdp
->debug_dump_time_str
);
18606 snprintf(dump_path
+ strlen(dump_path
),
18607 sizeof(dump_path
) - strlen(dump_path
),
18608 "_%s", dhdp
->debug_dump_time_str
);
18611 memset(time_str
, 0, sizeof(time_str
));
18612 ts
= dhd_log_dump_get_timestamp();
18613 snprintf(time_str
, sizeof(time_str
),
18614 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts
);
18616 DHD_ERROR(("DHD version: %s\n", dhd_version
));
18617 DHD_ERROR(("F/W version: %s\n", fw_version
));
18618 DHD_ERROR(("debug_dump_path = %s\n", dump_path
));
18620 dhd_log_dump_buf_addr(dhdp
, type
);
18622 /* if this is the first time after dhd is loaded,
18623 * or, if periodic flush is disabled, clear the log file
18625 if (!dhdp
->logdump_periodic_flush
|| dhdp
->last_file_posn
== 0)
18626 file_mode
= O_CREAT
| O_WRONLY
| O_SYNC
| O_TRUNC
;
18628 file_mode
= O_CREAT
| O_RDWR
| O_SYNC
;
18630 fp
= filp_open(dump_path
, file_mode
, 0664);
18632 /* If android installed image, try '/data' directory */
18633 #if defined(CONFIG_X86)
18634 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
18636 snprintf(dump_path
, sizeof(dump_path
), "/data/" DHD_DEBUG_DUMP_TYPE
);
18637 if (!dhdp
->logdump_periodic_flush
) {
18638 snprintf(dump_path
+ strlen(dump_path
),
18639 sizeof(dump_path
) - strlen(dump_path
),
18640 "_%s", dhdp
->debug_dump_time_str
);
18642 fp
= filp_open(dump_path
, file_mode
, 0664);
18645 DHD_ERROR(("open file error, err = %d\n", ret
));
18648 DHD_ERROR(("debug_dump_path = %s\n", dump_path
));
18651 DHD_ERROR(("open file error, err = %d\n", ret
));
18653 #endif /* CONFIG_X86 && OEM_ANDROID */
18656 ret
= vfs_stat(dump_path
, &stat
);
18658 DHD_ERROR(("file stat error, err = %d\n", ret
));
18662 /* if some one else has changed the file */
18663 if (dhdp
->last_file_posn
!= 0 &&
18664 stat
.size
< dhdp
->last_file_posn
) {
18665 dhdp
->last_file_posn
= 0;
18668 if (dhdp
->logdump_periodic_flush
) {
18669 log_size
= strlen(time_str
) + strlen(DHD_DUMP_LOG_HDR
) + sizeof(sec_hdr
);
18670 /* calculate the amount of space required to dump all logs */
18671 for (i
= 0; i
< DLD_BUFFER_NUM
; ++i
) {
18672 if (*type
!= DLD_BUF_TYPE_ALL
&& i
!= *type
)
18675 if (g_dld_buf
[i
].wraparound
) {
18676 log_size
+= (unsigned long)g_dld_buf
[i
].max
18677 - (unsigned long)g_dld_buf
[i
].buffer
;
18679 spin_lock_irqsave(&g_dld_buf
[i
].lock
, flags
);
18680 log_size
+= (unsigned long)g_dld_buf
[i
].present
-
18681 (unsigned long)g_dld_buf
[i
].front
;
18682 spin_unlock_irqrestore(&g_dld_buf
[i
].lock
, flags
);
18684 log_size
+= strlen(dld_hdrs
[i
].hdr_str
) + sizeof(sec_hdr
);
18686 if (*type
!= DLD_BUF_TYPE_ALL
&& i
== *type
)
18690 ret
= generic_file_llseek(fp
, dhdp
->last_file_posn
, SEEK_CUR
);
18692 DHD_ERROR(("file seek last posn error ! err = %d \n", ret
));
18697 /* if the max file size is reached, wrap around to beginning of the file
18698 * we're treating the file as a large ring buffer
18700 fspace_remain
= logdump_max_filesize
- pos
;
18701 if (log_size
> fspace_remain
) {
18706 /* write the timestamp hdr to the file first */
18707 ret
= vfs_write(fp
, time_str
, strlen(time_str
), &pos
);
18709 DHD_ERROR(("write file error, err = %d\n", ret
));
18713 /* prep the section header */
18714 memset(&sec_hdr
, 0, sizeof(sec_hdr
));
18715 sec_hdr
.magic
= LOG_DUMP_MAGIC
;
18716 sec_hdr
.timestamp
= local_clock();
18718 for (i
= 0; i
< DLD_BUFFER_NUM
; ++i
) {
18719 unsigned int buf_size
= 0;
18721 if (*type
!= DLD_BUF_TYPE_ALL
&& i
!= *type
)
18724 /* calculate the length of the log */
18725 dld_buf
= &g_dld_buf
[i
];
18726 buf_size
= (unsigned long)dld_buf
->max
-
18727 (unsigned long)dld_buf
->buffer
;
18728 if (dld_buf
->wraparound
) {
18729 wr_size
= buf_size
;
18731 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18732 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18733 wr_size
= (unsigned long)dld_buf
->present
-
18734 (unsigned long)dld_buf
->front
;
18735 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18738 /* write the section header first */
18739 sec_hdr
.type
= dld_hdrs
[i
].sec_type
;
18740 sec_hdr
.length
= wr_size
;
18741 vfs_write(fp
, dld_hdrs
[i
].hdr_str
, strlen(dld_hdrs
[i
].hdr_str
), &pos
);
18742 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18743 /* write the log */
18744 ret
= vfs_write(fp
, dld_buf
->buffer
, wr_size
, &pos
);
18746 DHD_ERROR(("write file error, err = %d\n", ret
));
18750 /* re-init dhd_log_dump_buf structure */
18751 spin_lock_irqsave(&dld_buf
->lock
, flags
);
18752 dld_buf
->wraparound
= 0;
18753 dld_buf
->present
= dld_buf
->front
;
18754 dld_buf
->remain
= buf_size
;
18755 bzero(dld_buf
->buffer
, buf_size
);
18756 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
18758 if (*type
!= DLD_BUF_TYPE_ALL
)
18762 #ifdef DEBUGABILITY_ECNTRS_LOGGING
18763 /* periodic flushing of ecounters is NOT supported */
18764 if (*type
== DLD_BUF_TYPE_ALL
&&
18765 logdump_ecntr_enable
&&
18766 dhdp
->ecntr_dbg_ring
) {
18767 dhd_log_dump_ring_to_file(dhdp
, dhdp
->ecntr_dbg_ring
,
18768 fp
, (unsigned long *)&pos
, &sec_hdr
);
18770 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
18773 /* append extended trap data to the file in case of traps */
18774 if (dhdp
->dongle_trap_occured
&&
18775 dhdp
->extended_trap_data
) {
18776 /* write the section header first */
18777 vfs_write(fp
, EXT_TRAP_LOG_HDR
, strlen(EXT_TRAP_LOG_HDR
), &pos
);
18778 sec_hdr
.type
= LOG_DUMP_SECTION_EXT_TRAP
;
18779 sec_hdr
.length
= BCMPCIE_EXT_TRAP_DATA_MAXLEN
;
18780 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18781 /* write the log */
18782 ret
= vfs_write(fp
, (char *)dhdp
->extended_trap_data
,
18783 BCMPCIE_EXT_TRAP_DATA_MAXLEN
, &pos
);
18785 DHD_ERROR(("write file error of ext trap info,"
18786 " err = %d\n", ret
));
18790 #endif /* BCMPCIE */
18792 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18793 /* if health check event was received, dump to file */
18794 if (dhdp
->memdump_type
== DUMP_TYPE_DONGLE_HOST_EVENT
) {
18795 /* write the section header first */
18796 vfs_write(fp
, HEALTH_CHK_LOG_HDR
, strlen(HEALTH_CHK_LOG_HDR
), &pos
);
18797 sec_hdr
.type
= LOG_DUMP_SECTION_HEALTH_CHK
;
18798 sec_hdr
.length
= HEALTH_CHK_BUF_SIZE
;
18799 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18800 /* write the log */
18801 ret
= vfs_write(fp
, (char *)dhdp
->health_chk_event_data
,
18802 HEALTH_CHK_BUF_SIZE
, &pos
);
18804 DHD_ERROR(("write file error of health chk info,"
18805 " err = %d\n", ret
));
18809 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18811 #ifdef DHD_DUMP_PCIE_RINGS
18812 /* write the section header first */
18813 vfs_write(fp
, FLOWRING_DUMP_HDR
, strlen(FLOWRING_DUMP_HDR
), &pos
);
18814 /* Write the ring summary */
18815 ret
= vfs_write(fp
, dhdp
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
- remain_len
, &pos
);
18817 DHD_ERROR(("write file error of concise debug info,"
18818 " err = %d\n", ret
));
18821 sec_hdr
.type
= LOG_DUMP_SECTION_FLOWRING
;
18822 sec_hdr
.length
= ((H2DRING_TXPOST_ITEMSIZE
18823 * H2DRING_TXPOST_MAX_ITEM
)
18824 + (D2HRING_TXCMPLT_ITEMSIZE
18825 * D2HRING_TXCMPLT_MAX_ITEM
)
18826 + (H2DRING_RXPOST_ITEMSIZE
18827 * H2DRING_RXPOST_MAX_ITEM
)
18828 + (D2HRING_RXCMPLT_ITEMSIZE
18829 * D2HRING_RXCMPLT_MAX_ITEM
)
18830 + (H2DRING_CTRL_SUB_ITEMSIZE
18831 * H2DRING_CTRL_SUB_MAX_ITEM
)
18832 + (D2HRING_CTRL_CMPLT_ITEMSIZE
18833 * D2HRING_CTRL_CMPLT_MAX_ITEM
)
18834 + (H2DRING_INFO_BUFPOST_ITEMSIZE
18835 * H2DRING_DYNAMIC_INFO_MAX_ITEM
)
18836 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE
18837 * D2HRING_DYNAMIC_INFO_MAX_ITEM
));
18838 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18839 /* write the log */
18840 ret
= dhd_d2h_h2d_ring_dump(dhdp
, fp
, (unsigned long *)&pos
);
18842 DHD_ERROR(("%s: error dumping ring data!\n",
18846 #endif /* DHD_DUMP_PCIE_RINGS */
18848 /* append the concise debug information to the file.
18849 * This is the information which is seen
18850 * when a 'dhd dump' iovar is fired
18852 if (dhdp
->concise_dbg_buf
) {
18853 remain_len
= dhd_dump(dhdp
, (char *)dhdp
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
);
18854 if (remain_len
<= 0) {
18855 DHD_ERROR(("%s: error getting concise debug info !\n",
18859 /* write the section header first */
18860 vfs_write(fp
, DHD_DUMP_LOG_HDR
, strlen(DHD_DUMP_LOG_HDR
), &pos
);
18861 sec_hdr
.type
= LOG_DUMP_SECTION_DHD_DUMP
;
18862 sec_hdr
.length
= CONCISE_DUMP_BUFLEN
- remain_len
;
18863 vfs_write(fp
, (char *)&sec_hdr
, sizeof(sec_hdr
), &pos
);
18864 /* write the log */
18865 ret
= vfs_write(fp
, dhdp
->concise_dbg_buf
,
18866 CONCISE_DUMP_BUFLEN
- remain_len
, &pos
);
18868 DHD_ERROR(("write file error of concise debug info,"
18869 " err = %d\n", ret
));
18875 if (dhdp
->logdump_cookie
&& dhd_logdump_cookie_count(dhdp
) > 0) {
18876 ret
= dhd_log_dump_cookie_to_file(dhdp
, fp
, (unsigned long *)&pos
);
18878 DHD_ERROR(("write file error of cooke info, err = %d\n", ret
));
18883 if (dhdp
->logdump_periodic_flush
) {
18884 /* store the last position written to in the file for future use */
18885 dhdp
->last_file_posn
= pos
;
18889 MFREE(dhdp
->osh
, type
, sizeof(*type
));
18890 if (!IS_ERR(fp
) && fp
!= NULL
) {
18891 filp_close(fp
, NULL
);
18892 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
18893 __FUNCTION__
, dump_path
));
18896 DHD_GENERAL_LOCK(dhdp
, flags
);
18897 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp
);
18898 dhd_os_busbusy_wake(dhdp
);
18899 DHD_GENERAL_UNLOCK(dhdp
, flags
);
18901 #ifdef DHD_DUMP_MNGR
18903 dhd_dump_file_manage_enqueue(dhdp
, dump_path
, DHD_DEBUG_DUMP_TYPE
);
18905 #endif /* DHD_DUMP_MNGR */
18907 return (ret
< 0) ? BCME_ERROR
: BCME_OK
;
18909 #endif /* DHD_LOG_DUMP */
18912 * This call is to get the memdump size so that,
18913 * halutil can alloc that much buffer in user space.
18916 dhd_os_socram_dump(struct net_device
*dev
, uint32
*dump_size
)
18919 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
18920 dhd_pub_t
*dhdp
= &dhd
->pub
;
18922 if (dhdp
->busstate
== DHD_BUS_DOWN
) {
18923 DHD_ERROR(("%s: bus is down\n", __FUNCTION__
));
18927 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
18928 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18929 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
18932 #ifdef DHD_PCIE_RUNTIMEPM
18933 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, __builtin_return_address(0));
18934 #endif /* DHD_PCIE_RUNTIMEPM */
18935 ret
= dhd_common_socram_dump(dhdp
);
18936 if (ret
== BCME_OK
) {
18937 *dump_size
= dhdp
->soc_ram_length
;
18943 * This is to get the actual memdup after getting the memdump size
18946 dhd_os_get_socram_dump(struct net_device
*dev
, char **buf
, uint32
*size
)
18950 dhd_info_t
*dhd
= *(dhd_info_t
**)netdev_priv(dev
);
18951 dhd_pub_t
*dhdp
= &dhd
->pub
;
18955 if (dhdp
->soc_ram
) {
18956 if (orig_len
>= dhdp
->soc_ram_length
) {
18957 memcpy(*buf
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
18958 /* reset the storage of dump */
18959 memset(dhdp
->soc_ram
, 0, dhdp
->soc_ram_length
);
18960 *size
= dhdp
->soc_ram_length
;
18962 ret
= BCME_BUFTOOSHORT
;
18963 DHD_ERROR(("The length of the buffer is too short"
18964 " to save the memory dump with %d\n", dhdp
->soc_ram_length
));
18967 DHD_ERROR(("socram_dump is not ready to get\n"));
18968 ret
= BCME_NOTREADY
;
18974 dhd_os_get_version(struct net_device
*dev
, bool dhd_ver
, char **buf
, uint32 size
)
18979 return BCME_BADARG
;
18981 fw_str
= strstr(info_string
, "Firmware: ");
18982 if (fw_str
== NULL
) {
18986 memset(*buf
, 0, size
);
18988 strncpy(*buf
, dhd_version
, size
- 1);
18990 strncpy(*buf
, fw_str
, size
- 1);
18995 bool dhd_sta_associated(dhd_pub_t
*dhdp
, uint32 bssidx
, uint8
*mac
)
18997 return dhd_find_sta(dhdp
, bssidx
, mac
) ? TRUE
: FALSE
;
19000 #ifdef DHD_L2_FILTER
19002 dhd_get_ifp_arp_table_handle(dhd_pub_t
*dhdp
, uint32 bssidx
)
19004 dhd_info_t
*dhd
= dhdp
->info
;
19007 ASSERT(bssidx
< DHD_MAX_IFS
);
19009 ifp
= dhd
->iflist
[bssidx
];
19010 return ifp
->phnd_arp_table
;
19013 int dhd_get_parp_status(dhd_pub_t
*dhdp
, uint32 idx
)
19015 dhd_info_t
*dhd
= dhdp
->info
;
19018 ASSERT(idx
< DHD_MAX_IFS
);
19020 ifp
= dhd
->iflist
[idx
];
19023 return ifp
->parp_enable
;
19028 /* Set interface specific proxy arp configuration */
19029 int dhd_set_parp_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19031 dhd_info_t
*dhd
= dhdp
->info
;
19033 ASSERT(idx
< DHD_MAX_IFS
);
19034 ifp
= dhd
->iflist
[idx
];
19039 /* At present all 3 variables are being
19042 ifp
->parp_enable
= val
;
19043 ifp
->parp_discard
= val
;
19044 ifp
->parp_allnode
= val
;
19046 /* Flush ARP entries when disabled */
19047 if (val
== FALSE
) {
19048 bcm_l2_filter_arp_table_update(dhdp
->osh
, ifp
->phnd_arp_table
, TRUE
, NULL
,
19049 FALSE
, dhdp
->tickcnt
);
19054 bool dhd_parp_discard_is_enabled(dhd_pub_t
*dhdp
, uint32 idx
)
19056 dhd_info_t
*dhd
= dhdp
->info
;
19059 ASSERT(idx
< DHD_MAX_IFS
);
19061 ifp
= dhd
->iflist
[idx
];
19064 return ifp
->parp_discard
;
19068 dhd_parp_allnode_is_enabled(dhd_pub_t
*dhdp
, uint32 idx
)
19070 dhd_info_t
*dhd
= dhdp
->info
;
19073 ASSERT(idx
< DHD_MAX_IFS
);
19075 ifp
= dhd
->iflist
[idx
];
19079 return ifp
->parp_allnode
;
19082 int dhd_get_dhcp_unicast_status(dhd_pub_t
*dhdp
, uint32 idx
)
19084 dhd_info_t
*dhd
= dhdp
->info
;
19087 ASSERT(idx
< DHD_MAX_IFS
);
19089 ifp
= dhd
->iflist
[idx
];
19093 return ifp
->dhcp_unicast
;
19096 int dhd_set_dhcp_unicast_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19098 dhd_info_t
*dhd
= dhdp
->info
;
19100 ASSERT(idx
< DHD_MAX_IFS
);
19101 ifp
= dhd
->iflist
[idx
];
19105 ifp
->dhcp_unicast
= val
;
19109 int dhd_get_block_ping_status(dhd_pub_t
*dhdp
, uint32 idx
)
19111 dhd_info_t
*dhd
= dhdp
->info
;
19114 ASSERT(idx
< DHD_MAX_IFS
);
19116 ifp
= dhd
->iflist
[idx
];
19120 return ifp
->block_ping
;
19123 int dhd_set_block_ping_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19125 dhd_info_t
*dhd
= dhdp
->info
;
19127 ASSERT(idx
< DHD_MAX_IFS
);
19128 ifp
= dhd
->iflist
[idx
];
19132 ifp
->block_ping
= val
;
19133 /* Disable rx_pkt_chain feature for interface if block_ping option is
19136 dhd_update_rx_pkt_chainable_state(dhdp
, idx
);
19140 int dhd_get_grat_arp_status(dhd_pub_t
*dhdp
, uint32 idx
)
19142 dhd_info_t
*dhd
= dhdp
->info
;
19145 ASSERT(idx
< DHD_MAX_IFS
);
19147 ifp
= dhd
->iflist
[idx
];
19151 return ifp
->grat_arp
;
19154 int dhd_set_grat_arp_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19156 dhd_info_t
*dhd
= dhdp
->info
;
19158 ASSERT(idx
< DHD_MAX_IFS
);
19159 ifp
= dhd
->iflist
[idx
];
19163 ifp
->grat_arp
= val
;
19168 int dhd_get_block_tdls_status(dhd_pub_t
*dhdp
, uint32 idx
)
19170 dhd_info_t
*dhd
= dhdp
->info
;
19173 ASSERT(idx
< DHD_MAX_IFS
);
19175 ifp
= dhd
->iflist
[idx
];
19179 return ifp
->block_tdls
;
19182 int dhd_set_block_tdls_status(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
19184 dhd_info_t
*dhd
= dhdp
->info
;
19186 ASSERT(idx
< DHD_MAX_IFS
);
19187 ifp
= dhd
->iflist
[idx
];
19191 ifp
->block_tdls
= val
;
19195 #endif /* DHD_L2_FILTER */
19197 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
19198 int dhd_rps_cpus_enable(struct net_device
*net
, int enable
)
19200 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
19203 char * RPS_CPU_SETBUF
;
19205 ifidx
= dhd_net2idx(dhd
, net
);
19206 if (ifidx
== DHD_BAD_IF
) {
19207 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
19211 if (ifidx
== PRIMARY_INF
) {
19212 if (dhd
->pub
.op_mode
== DHD_FLAG_IBSS_MODE
) {
19213 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__
));
19214 RPS_CPU_SETBUF
= RPS_CPUS_MASK_IBSS
;
19216 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__
));
19217 RPS_CPU_SETBUF
= RPS_CPUS_MASK
;
19219 } else if (ifidx
== VIRTUAL_INF
) {
19220 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__
));
19221 RPS_CPU_SETBUF
= RPS_CPUS_MASK_P2P
;
19223 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__
, ifidx
));
19227 ifp
= dhd
->iflist
[ifidx
];
19230 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__
, RPS_CPU_SETBUF
));
19231 custom_rps_map_set(ifp
->net
->_rx
, RPS_CPU_SETBUF
, strlen(RPS_CPU_SETBUF
));
19233 custom_rps_map_clear(ifp
->net
->_rx
);
19236 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__
));
19242 int custom_rps_map_set(struct netdev_rx_queue
*queue
, char *buf
, size_t len
)
19244 struct rps_map
*old_map
, *map
;
19245 cpumask_var_t mask
;
19247 static DEFINE_SPINLOCK(rps_map_lock
);
19249 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
19251 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
19252 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__
));
19256 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
19258 free_cpumask_var(mask
);
19259 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__
));
19263 map
= kzalloc(max_t(unsigned int,
19264 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
19267 free_cpumask_var(mask
);
19268 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__
));
19273 for_each_cpu(cpu
, mask
) {
19274 map
->cpus
[i
++] = cpu
;
19282 free_cpumask_var(mask
);
19283 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__
));
19287 spin_lock(&rps_map_lock
);
19288 old_map
= rcu_dereference_protected(queue
->rps_map
,
19289 lockdep_is_held(&rps_map_lock
));
19290 rcu_assign_pointer(queue
->rps_map
, map
);
19291 spin_unlock(&rps_map_lock
);
19294 static_key_slow_inc(&rps_needed
);
19297 kfree_rcu(old_map
, rcu
);
19298 static_key_slow_dec(&rps_needed
);
19300 free_cpumask_var(mask
);
19302 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__
, map
->len
));
19306 void custom_rps_map_clear(struct netdev_rx_queue
*queue
)
19308 struct rps_map
*map
;
19310 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
19312 map
= rcu_dereference_protected(queue
->rps_map
, 1);
19314 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
19315 kfree_rcu(map
, rcu
);
19316 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__
));
19319 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
19321 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
19322 defined(ARGOS_NOTIFY_CB)
19324 static int argos_status_notifier_wifi_cb(struct notifier_block
*notifier
,
19325 unsigned long speed
, void *v
);
19326 static int argos_status_notifier_p2p_cb(struct notifier_block
*notifier
,
19327 unsigned long speed
, void *v
);
19328 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19329 static int argos_status_notifier_config_mumimo_cb(struct notifier_block
*notifier
,
19330 unsigned long speed
, void *v
);
19331 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19333 #ifdef DYNAMIC_MUMIMO_CONTROL
19334 #define MUMIMO_CONTROL_TIMER_INTERVAL_MS 5000
19337 argos_config_mumimo_timer(unsigned long data
)
19339 argos_mumimo_ctrl
*ctrl_data
= (argos_mumimo_ctrl
*)data
;
19341 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
19342 schedule_work(&ctrl_data
->mumimo_ctrl_work
);
19346 argos_config_mumimo_handler(struct work_struct
*work
)
19348 argos_mumimo_ctrl
*ctrl_data
;
19349 struct net_device
*dev
;
19353 ctrl_data
= container_of(work
, argos_mumimo_ctrl
, mumimo_ctrl_work
);
19355 dev
= ctrl_data
->dev
;
19361 new_cap
= ctrl_data
->cur_murx_bfe_cap
;
19362 err
= wl_set_murx_bfe_cap(dev
, new_cap
, TRUE
);
19364 DHD_ERROR(("%s: Failed to set murx_bfe_cap to %d, err=%d\n",
19365 __FUNCTION__
, new_cap
, err
));
19367 DHD_ERROR(("%s: Newly configured murx_bfe_cap = %d\n",
19368 __FUNCTION__
, new_cap
));
19373 argos_status_notifier_config_mumimo(struct notifier_block
*notifier
,
19374 unsigned long speed
, void *v
)
19376 struct net_device
*dev
;
19377 int prev_murx_bfe_cap
;
19381 dev
= argos_mumimo_ctrl_data
.dev
;
19386 dhd
= DHD_DEV_INFO(dev
);
19391 /* Check if STA reassociate with the AP after murx configuration */
19392 if (dhd
->pub
.reassoc_mumimo_sw
) {
19393 /* Cancel the MU-MIMO control timer */
19394 if (timer_pending(&argos_mumimo_ctrl_data
.config_timer
)) {
19395 del_timer_sync(&argos_mumimo_ctrl_data
.config_timer
);
19398 DHD_ERROR(("%s: Reassociation is in progress...\n", __FUNCTION__
));
19402 /* Check if current associated AP supports MU-MIMO capability
19403 * or current Tput meets the condition for MU-MIMO configuration
19405 if ((wl_check_bss_support_mumimo(dev
) <= 0) ||
19406 ((speed
< MUMIMO_TO_SUMIMO_TPUT_THRESHOLD
) &&
19407 (speed
>= SUMIMO_TO_MUMIMO_TPUT_THRESHOLD
))) {
19411 prev_murx_bfe_cap
= argos_mumimo_ctrl_data
.cur_murx_bfe_cap
;
19413 /* Check the TPut condition */
19414 if (speed
>= MUMIMO_TO_SUMIMO_TPUT_THRESHOLD
) {
19420 if (prev_murx_bfe_cap
!= cap
) {
19421 /* Cancel the MU-MIMO control timer */
19422 if (timer_pending(&argos_mumimo_ctrl_data
.config_timer
)) {
19423 del_timer_sync(&argos_mumimo_ctrl_data
.config_timer
);
19426 /* Update the new value */
19427 argos_mumimo_ctrl_data
.cur_murx_bfe_cap
= cap
;
19429 /* Arm the MU-MIMO control timer */
19430 mod_timer(&argos_mumimo_ctrl_data
.config_timer
,
19431 jiffies
+ msecs_to_jiffies(MUMIMO_CONTROL_TIMER_INTERVAL_MS
));
19433 DHD_ERROR(("%s: Arm the MU-MIMO control timer, cur_murx_bfe_cap=%d\n",
19434 __FUNCTION__
, cap
));
19439 argos_config_mumimo_init(struct net_device
*dev
)
19441 init_timer(&argos_mumimo_ctrl_data
.config_timer
);
19442 argos_mumimo_ctrl_data
.config_timer
.data
= (unsigned long)&argos_mumimo_ctrl_data
;
19443 argos_mumimo_ctrl_data
.config_timer
.function
= argos_config_mumimo_timer
;
19444 argos_mumimo_ctrl_data
.dev
= dev
;
19445 INIT_WORK(&argos_mumimo_ctrl_data
.mumimo_ctrl_work
, argos_config_mumimo_handler
);
19446 argos_mumimo_ctrl_data
.cur_murx_bfe_cap
= -1;
19450 argos_config_mumimo_deinit(void)
19452 argos_mumimo_ctrl_data
.dev
= NULL
;
19453 if (timer_pending(&argos_mumimo_ctrl_data
.config_timer
)) {
19454 del_timer_sync(&argos_mumimo_ctrl_data
.config_timer
);
19457 cancel_work_sync(&argos_mumimo_ctrl_data
.mumimo_ctrl_work
);
19461 argos_config_mumimo_reset(void)
19463 argos_mumimo_ctrl_data
.cur_murx_bfe_cap
= -1;
19465 #endif /* DYNAMIC_MUMIMO_CONTROL */
19468 argos_register_notifier_init(struct net_device
*net
)
19472 DHD_INFO(("DHD: %s: \n", __FUNCTION__
));
19473 argos_rps_ctrl_data
.wlan_primary_netdev
= net
;
19474 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
19475 #ifdef DYNAMIC_MUMIMO_CONTROL
19476 argos_config_mumimo_init(net
);
19477 #endif /* DYNAMIC_MUMIMO_CONTROL */
19479 if (argos_wifi
.notifier_call
== NULL
) {
19480 argos_wifi
.notifier_call
= argos_status_notifier_wifi_cb
;
19481 ret
= sec_argos_register_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19483 DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret
));
19488 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19489 if (argos_mimo
.notifier_call
== NULL
) {
19490 argos_mimo
.notifier_call
= argos_status_notifier_config_mumimo_cb
;
19491 ret
= sec_argos_register_notifier(&argos_mimo
, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL
);
19493 DHD_ERROR(("DHD:Failed to register WIFI for MIMO notifier, ret=%d\n", ret
));
19494 sec_argos_unregister_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19498 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19500 if (argos_p2p
.notifier_call
== NULL
) {
19501 argos_p2p
.notifier_call
= argos_status_notifier_p2p_cb
;
19502 ret
= sec_argos_register_notifier(&argos_p2p
, ARGOS_P2P_TABLE_LABEL
);
19504 DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret
));
19505 sec_argos_unregister_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19506 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19507 sec_argos_unregister_notifier(&argos_mimo
, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL
);
19508 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19516 if (argos_wifi
.notifier_call
) {
19517 argos_wifi
.notifier_call
= NULL
;
19520 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19521 if (argos_mimo
.notifier_call
) {
19522 argos_mimo
.notifier_call
= NULL
;
19524 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19526 if (argos_p2p
.notifier_call
) {
19527 argos_p2p
.notifier_call
= NULL
;
19534 argos_register_notifier_deinit(void)
19536 DHD_INFO(("DHD: %s: \n", __FUNCTION__
));
19538 if (argos_rps_ctrl_data
.wlan_primary_netdev
== NULL
) {
19539 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__
));
19543 #ifdef DYNAMIC_MUMIMO_CONTROL
19544 argos_config_mumimo_deinit();
19545 #endif /* DYNAMIC_MUMIMO_CONTROL */
19547 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19548 custom_rps_map_clear(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
);
19549 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19551 if (argos_p2p
.notifier_call
) {
19552 sec_argos_unregister_notifier(&argos_p2p
, ARGOS_P2P_TABLE_LABEL
);
19553 argos_p2p
.notifier_call
= NULL
;
19556 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19557 if (argos_mimo
.notifier_call
) {
19558 sec_argos_unregister_notifier(&argos_mimo
, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL
);
19559 argos_mimo
.notifier_call
= NULL
;
19561 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19563 if (argos_wifi
.notifier_call
) {
19564 sec_argos_unregister_notifier(&argos_wifi
, ARGOS_WIFI_TABLE_LABEL
);
19565 argos_wifi
.notifier_call
= NULL
;
19568 argos_rps_ctrl_data
.wlan_primary_netdev
= NULL
;
19569 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
19575 argos_status_notifier_cb(struct notifier_block
*notifier
,
19576 unsigned long speed
, void *v
)
19581 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19583 if (argos_rps_ctrl_data
.wlan_primary_netdev
== NULL
) {
19587 dhd
= DHD_DEV_INFO(argos_rps_ctrl_data
.wlan_primary_netdev
);
19593 if (dhdp
== NULL
|| !dhdp
->up
) {
19596 /* Check if reported TPut value is more than threshold value */
19597 if (speed
> RPS_TPUT_THRESHOLD
) {
19598 if (argos_rps_ctrl_data
.argos_rps_cpus_enabled
== 0) {
19599 /* It does not need to configre rps_cpus
19600 * if Load Balance is enabled
19602 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19605 if (cpu_online(RPS_CPUS_WLAN_CORE_ID
)) {
19606 err
= custom_rps_map_set(
19607 argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
,
19608 RPS_CPUS_MASK
, strlen(RPS_CPUS_MASK
));
19610 DHD_ERROR(("DHD: %s: RPS_Set fail,"
19611 " Core=%d Offline\n", __FUNCTION__
,
19612 RPS_CPUS_WLAN_CORE_ID
));
19617 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
19618 "speed=%ld, error=%d\n",
19619 __FUNCTION__
, speed
, err
));
19621 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19622 #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19623 if (dhdp
->tcpack_sup_mode
!= TCPACK_SUP_HOLD
) {
19624 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
19625 __FUNCTION__
, TCPACK_SUP_HOLD
));
19626 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_HOLD
);
19628 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19629 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 1;
19630 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19631 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
19632 __FUNCTION__
, speed
));
19634 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19637 if (argos_rps_ctrl_data
.argos_rps_cpus_enabled
== 1) {
19638 #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19639 if (dhdp
->tcpack_sup_mode
!= TCPACK_SUP_OFF
) {
19640 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
19642 dhd_tcpack_suppress_set(dhdp
, TCPACK_SUP_OFF
);
19644 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19645 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19646 /* It does not need to configre rps_cpus
19647 * if Load Balance is enabled
19649 custom_rps_map_clear(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
);
19650 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__
, speed
));
19651 OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS
);
19652 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19653 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
19662 argos_status_notifier_wifi_cb(struct notifier_block
*notifier
,
19663 unsigned long speed
, void *v
)
19665 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19666 argos_status_notifier_cb(notifier
, speed
, v
);
19667 #if !defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19668 argos_status_notifier_config_mumimo(notifier
, speed
, v
);
19669 #endif /* !CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19674 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19676 argos_status_notifier_config_mumimo_cb(struct notifier_block
*notifier
,
19677 unsigned long speed
, void *v
)
19679 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19680 argos_status_notifier_config_mumimo(notifier
, speed
, v
);
19684 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19687 argos_status_notifier_p2p_cb(struct notifier_block
*notifier
,
19688 unsigned long speed
, void *v
)
19690 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__
, speed
));
19691 argos_status_notifier_cb(notifier
, speed
, v
);
19695 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
19697 #ifdef DHD_DEBUG_PAGEALLOC
19700 dhd_page_corrupt_cb(void *handle
, void *addr_corrupt
, size_t len
)
19702 dhd_pub_t
*dhdp
= (dhd_pub_t
*)handle
;
19704 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
19705 __FUNCTION__
, addr_corrupt
, (uint32
)len
));
19707 DHD_OS_WAKE_LOCK(dhdp
);
19708 prhex("Page Corruption:", addr_corrupt
, len
);
19709 dhd_dump_to_kernelog(dhdp
);
19710 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
19711 /* Load the dongle side dump to host memory and then BUG_ON() */
19712 dhdp
->memdump_enabled
= DUMP_MEMONLY
;
19713 dhdp
->memdump_type
= DUMP_TYPE_MEMORY_CORRUPTION
;
19714 dhd_bus_mem_dump(dhdp
);
19715 #endif /* BCMPCIE && DHD_FW_COREDUMP */
19716 DHD_OS_WAKE_UNLOCK(dhdp
);
19718 EXPORT_SYMBOL(dhd_page_corrupt_cb
);
19719 #endif /* DHD_DEBUG_PAGEALLOC */
19721 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
19723 dhd_pktid_error_handler(dhd_pub_t
*dhdp
)
19725 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__
));
19726 DHD_OS_WAKE_LOCK(dhdp
);
19727 dhd_dump_to_kernelog(dhdp
);
19728 #ifdef DHD_FW_COREDUMP
19729 /* Load the dongle side dump to host memory */
19730 if (dhdp
->memdump_enabled
== DUMP_DISABLED
) {
19731 dhdp
->memdump_enabled
= DUMP_MEMFILE
;
19733 dhdp
->memdump_type
= DUMP_TYPE_PKTID_AUDIT_FAILURE
;
19734 dhd_bus_mem_dump(dhdp
);
19735 #endif /* DHD_FW_COREDUMP */
19736 dhdp
->hang_reason
= HANG_REASON_PCIE_PKTID_ERROR
;
19737 dhd_os_check_hang(dhdp
, 0, -EREMOTEIO
);
19738 DHD_OS_WAKE_UNLOCK(dhdp
);
19740 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
19742 struct net_device
*
19743 dhd_linux_get_primary_netdev(dhd_pub_t
*dhdp
)
19745 dhd_info_t
*dhd
= dhdp
->info
;
19747 if (dhd
->iflist
[0] && dhd
->iflist
[0]->net
)
19748 return dhd
->iflist
[0]->net
;
19753 #ifdef DHD_DHCP_DUMP
19755 dhd_dhcp_dump(char *ifname
, uint8
*pktdata
, bool tx
)
19757 struct bootp_fmt
*b
= (struct bootp_fmt
*) &pktdata
[ETHER_HDR_LEN
];
19758 struct iphdr
*h
= &b
->ip_header
;
19759 uint8
*ptr
, *opt
, *end
= (uint8
*) b
+ ntohs(b
->ip_header
.tot_len
);
19760 int dhcp_type
= 0, len
, opt_len
;
19762 /* check IP header */
19763 if (h
->ihl
!= 5 || h
->version
!= 4 || h
->protocol
!= IPPROTO_UDP
) {
19767 /* check UDP port for bootp (67, 68) */
19768 if (b
->udp_header
.source
!= htons(67) && b
->udp_header
.source
!= htons(68) &&
19769 b
->udp_header
.dest
!= htons(67) && b
->udp_header
.dest
!= htons(68)) {
19773 /* check header length */
19774 if (ntohs(h
->tot_len
) < ntohs(b
->udp_header
.len
) + sizeof(struct iphdr
)) {
19778 len
= ntohs(b
->udp_header
.len
) - sizeof(struct udphdr
);
19780 - (sizeof(*b
) - sizeof(struct iphdr
) - sizeof(struct udphdr
) - sizeof(b
->options
));
19782 /* parse bootp options */
19783 if (opt_len
>= 4 && !memcmp(b
->options
, bootp_magic_cookie
, 4)) {
19784 ptr
= &b
->options
[4];
19785 while (ptr
< end
&& *ptr
!= 0xff) {
19794 /* 53 is dhcp type */
19797 dhcp_type
= opt
[2];
19798 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
19799 ifname
, dhcp_types
[dhcp_type
],
19800 tx
? "TX" : "RX", dhcp_ops
[b
->op
]));
19807 #endif /* DHD_DHCP_DUMP */
19809 #ifdef DHD_ICMP_DUMP
19811 dhd_icmp_dump(char *ifname
, uint8
*pktdata
, bool tx
)
19813 uint8
*pkt
= (uint8
*)&pktdata
[ETHER_HDR_LEN
];
19814 struct iphdr
*iph
= (struct iphdr
*)pkt
;
19815 struct icmphdr
*icmph
;
19817 /* check IP header */
19818 if (iph
->ihl
!= 5 || iph
->version
!= 4 || iph
->protocol
!= IP_PROT_ICMP
) {
19822 icmph
= (struct icmphdr
*)((uint8
*)pkt
+ sizeof(struct iphdr
));
19823 if (icmph
->type
== ICMP_ECHO
) {
19824 DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
19825 ifname
, tx
? "TX" : "RX", ntoh16(icmph
->un
.echo
.sequence
)));
19826 } else if (icmph
->type
== ICMP_ECHOREPLY
) {
19827 DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
19828 ifname
, tx
? "TX" : "RX", ntoh16(icmph
->un
.echo
.sequence
)));
19830 DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
19831 ifname
, tx
? "TX" : "RX", icmph
->type
, icmph
->code
));
19834 #endif /* DHD_ICMP_DUMP */
19836 #ifdef SHOW_LOGTRACE
19838 dhd_get_read_buf_ptr(dhd_pub_t
*dhd_pub
, trace_buf_info_t
*trace_buf_info
)
19840 dhd_dbg_ring_status_t ring_status
;
19842 #if defined(DEBUGABILITY)
19843 rlen
= dhd_dbg_pull_single_from_ring(dhd_pub
, FW_VERBOSE_RING_ID
, trace_buf_info
->buf
,
19844 TRACE_LOG_BUF_MAX_SIZE
, TRUE
);
19845 #elif defined(DEBUGABILITY_ECNTRS_LOGGING)
19846 rlen
= dhd_dbg_ring_pull_single(dhd_pub
->ecntr_dbg_ring
, trace_buf_info
->buf
,
19847 TRACE_LOG_BUF_MAX_SIZE
, TRUE
);
19850 #endif /* DEBUGABILITY */
19852 trace_buf_info
->size
= rlen
;
19853 trace_buf_info
->availability
= NEXT_BUF_NOT_AVAIL
;
19855 trace_buf_info
->availability
= BUF_NOT_AVAILABLE
;
19858 dhd_dbg_get_ring_status(dhd_pub
, FW_VERBOSE_RING_ID
, &ring_status
);
19859 if (ring_status
.written_bytes
!= ring_status
.read_bytes
) {
19860 trace_buf_info
->availability
= NEXT_BUF_AVAIL
;
19863 #endif /* SHOW_LOGTRACE */
19866 dhd_fw_download_status(dhd_pub_t
* dhd_pub
)
19868 return dhd_pub
->fw_download_done
;
19872 dhd_create_to_notifier_skt(void)
19874 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
19875 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
19876 /* Kernel version 3.6 is a special case which accepts 4 arguments */
19877 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, &dhd_netlink_cfg
);
19878 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
19879 /* Kernel version 3.5 and below use this old API format */
19880 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, 0,
19881 dhd_process_daemon_msg
, NULL
, THIS_MODULE
);
19883 nl_to_event_sk
= netlink_kernel_create(&init_net
, BCM_NL_USER
, THIS_MODULE
,
19885 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
19886 if (!nl_to_event_sk
)
19888 printf("Error creating socket.\n");
19891 DHD_INFO(("nl_to socket created successfully...\n"));
19896 dhd_destroy_to_notifier_skt(void)
19898 DHD_INFO(("Destroying nl_to socket\n"));
19899 netlink_kernel_release(nl_to_event_sk
);
19903 dhd_recv_msg_from_daemon(struct sk_buff
*skb
)
19905 struct nlmsghdr
*nlh
;
19906 bcm_to_info_t
*cmd
;
19908 nlh
= (struct nlmsghdr
*)skb
->data
;
19909 cmd
= (bcm_to_info_t
*)nlmsg_data(nlh
);
19910 if ((cmd
->magic
== BCM_TO_MAGIC
) && (cmd
->reason
== REASON_DAEMON_STARTED
)) {
19911 sender_pid
= ((struct nlmsghdr
*)(skb
->data
))->nlmsg_pid
;
19912 DHD_INFO(("DHD Daemon Started\n"));
19917 dhd_send_msg_to_daemon(struct sk_buff
*skb
, void *data
, int size
)
19919 struct nlmsghdr
*nlh
;
19920 struct sk_buff
*skb_out
;
19922 BCM_REFERENCE(skb
);
19923 if (sender_pid
== 0) {
19924 DHD_INFO(("Invalid PID 0\n"));
19928 if ((skb_out
= nlmsg_new(size
, 0)) == NULL
) {
19929 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__
));
19932 nlh
= nlmsg_put(skb_out
, 0, 0, NLMSG_DONE
, size
, 0);
19933 NETLINK_CB(skb_out
).dst_group
= 0; /* Unicast */
19934 memcpy(nlmsg_data(nlh
), (char *)data
, size
);
19936 if ((nlmsg_unicast(nl_to_event_sk
, skb_out
, sender_pid
)) < 0) {
19937 DHD_INFO(("Error sending message\n"));
19943 dhd_process_daemon_msg(struct sk_buff
*skb
)
19945 bcm_to_info_t to_info
;
19947 to_info
.magic
= BCM_TO_MAGIC
;
19948 to_info
.reason
= REASON_DAEMON_STARTED
;
19949 to_info
.trap
= NO_TRAP
;
19951 dhd_recv_msg_from_daemon(skb
);
19952 dhd_send_msg_to_daemon(skb
, &to_info
, sizeof(to_info
));
19955 #ifdef DHD_LOG_DUMP
19957 dhd_log_dump_ecntr_enabled(void)
19959 return (bool)logdump_ecntr_enable
;
19963 dhd_log_dump_init(dhd_pub_t
*dhd
)
19965 struct dhd_log_dump_buf
*dld_buf
, *dld_buf_special
;
19967 uint8
*prealloc_buf
= NULL
, *bufptr
= NULL
;
19968 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
19969 int prealloc_idx
= DHD_PREALLOC_DHD_LOG_DUMP_BUF
;
19970 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
19972 dhd_dbg_ring_t
*ring
= NULL
;
19973 unsigned long flags
= 0;
19974 dhd_info_t
*dhd_info
= dhd
->info
;
19975 void *cookie_buf
= NULL
;
19977 BCM_REFERENCE(ret
);
19978 BCM_REFERENCE(ring
);
19979 BCM_REFERENCE(flags
);
19982 if (logdump_prsrv_tailsize
<= 0 ||
19983 logdump_prsrv_tailsize
> DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE
) {
19984 logdump_prsrv_tailsize
= DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE
;
19986 /* now adjust the preserve log flush size based on the
19987 * kernel printk log buffer size
19989 #ifdef CONFIG_LOG_BUF_SHIFT
19990 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
19991 " limit prsrv tail size to = %uKB\n",
19992 __FUNCTION__
, (1 << CONFIG_LOG_BUF_SHIFT
)/1024,
19993 logdump_prsrv_tailsize
/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE
/1024));
19995 if (logdump_prsrv_tailsize
> LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE
) {
19996 logdump_prsrv_tailsize
= LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE
;
19999 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
20000 __FUNCTION__
, logdump_prsrv_tailsize
/1024);
20001 #endif /* CONFIG_LOG_BUF_SHIFT */
20003 mutex_init(&dhd_info
->logdump_lock
);
20005 /* initialize log dump buf structures */
20006 memset(g_dld_buf
, 0, sizeof(struct dhd_log_dump_buf
) * DLD_BUFFER_NUM
);
20008 /* set the log dump buffer size based on the module_param */
20009 if (logdump_max_bufsize
> LOG_DUMP_GENERAL_MAX_BUFSIZE
||
20010 logdump_max_bufsize
<= 0)
20011 dld_buf_size
[DLD_BUF_TYPE_GENERAL
] = LOG_DUMP_GENERAL_MAX_BUFSIZE
;
20013 dld_buf_size
[DLD_BUF_TYPE_GENERAL
] = logdump_max_bufsize
;
20015 /* pre-alloc the memory for the log buffers & 'special' buffer */
20016 dld_buf_special
= &g_dld_buf
[DLD_BUF_TYPE_SPECIAL
];
20017 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20018 DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
20019 __FUNCTION__
, LOG_DUMP_TOTAL_BUFSIZE
, LOG_DUMP_SPECIAL_MAX_BUFSIZE
));
20020 prealloc_buf
= DHD_OS_PREALLOC(dhd
, prealloc_idx
++, LOG_DUMP_TOTAL_BUFSIZE
);
20021 dld_buf_special
->buffer
= DHD_OS_PREALLOC(dhd
, prealloc_idx
++,
20022 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20024 prealloc_buf
= MALLOCZ(dhd
->osh
, LOG_DUMP_TOTAL_BUFSIZE
);
20025 dld_buf_special
->buffer
= MALLOCZ(dhd
->osh
, dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20026 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20027 if (!prealloc_buf
) {
20028 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
20031 if (!dld_buf_special
->buffer
) {
20032 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
20036 bufptr
= prealloc_buf
;
20037 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
20038 dld_buf
= &g_dld_buf
[i
];
20039 dld_buf
->dhd_pub
= dhd
;
20040 spin_lock_init(&dld_buf
->lock
);
20041 dld_buf
->wraparound
= 0;
20042 if (i
!= DLD_BUF_TYPE_SPECIAL
) {
20043 dld_buf
->buffer
= bufptr
;
20044 dld_buf
->max
= (unsigned long)dld_buf
->buffer
+ dld_buf_size
[i
];
20045 bufptr
= (uint8
*)dld_buf
->max
;
20047 dld_buf
->max
= (unsigned long)dld_buf
->buffer
+ dld_buf_size
[i
];
20049 dld_buf
->present
= dld_buf
->front
= dld_buf
->buffer
;
20050 dld_buf
->remain
= dld_buf_size
[i
];
20051 dld_buf
->enable
= 1;
20054 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20055 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20056 dhd
->ecntr_dbg_ring
= MALLOCZ(dhd
->osh
, sizeof(dhd_dbg_ring_t
));
20057 if (!dhd
->ecntr_dbg_ring
)
20060 ring
= (dhd_dbg_ring_t
*)dhd
->ecntr_dbg_ring
;
20061 ret
= dhd_dbg_ring_init(dhd
, ring
, ECNTR_RING_ID
,
20062 ECNTR_RING_NAME
, LOG_DUMP_ECNTRS_MAX_BUFSIZE
,
20064 if (ret
!= BCME_OK
) {
20065 DHD_ERROR(("%s: unable to init ecntr ring !\n",
20069 DHD_DBG_RING_LOCK(ring
->lock
, flags
);
20070 ring
->state
= RING_ACTIVE
;
20071 ring
->threshold
= 0;
20072 DHD_DBG_RING_UNLOCK(ring
->lock
, flags
);
20074 bufptr
+= LOG_DUMP_ECNTRS_MAX_BUFSIZE
;
20075 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20077 /* Concise buffer is used as intermediate buffer for following purposes
20078 * a) pull ecounters records temporarily before
20079 * writing it to file
20080 * b) to store dhd dump data before putting it to file
20081 * It should have a size equal to
20082 * MAX(largest possible ecntr record, 'dhd dump' data size)
20084 dhd
->concise_dbg_buf
= MALLOC(dhd
->osh
, CONCISE_DUMP_BUFLEN
);
20085 if (!dhd
->concise_dbg_buf
) {
20086 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
20091 #if defined(DHD_EVENT_LOG_FILTER)
20092 ret
= dhd_event_log_filter_init(dhd
,
20094 LOG_DUMP_FILTER_MAX_BUFSIZE
);
20095 if (ret
!= BCME_OK
) {
20098 #endif /* DHD_EVENT_LOG_FILTER */
20100 cookie_buf
= MALLOC(dhd
->osh
, LOG_DUMP_COOKIE_BUFSIZE
);
20102 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
20106 ret
= dhd_logdump_cookie_init(dhd
, cookie_buf
, LOG_DUMP_COOKIE_BUFSIZE
);
20107 if (ret
!= BCME_OK
) {
20108 MFREE(dhd
->osh
, cookie_buf
, LOG_DUMP_COOKIE_BUFSIZE
);
20115 if (dhd
->logdump_cookie
) {
20116 dhd_logdump_cookie_deinit(dhd
);
20117 MFREE(dhd
->osh
, dhd
->logdump_cookie
, LOG_DUMP_COOKIE_BUFSIZE
);
20118 dhd
->logdump_cookie
= NULL
;
20120 #if defined(DHD_EVENT_LOG_FILTER)
20121 if (dhd
->event_log_filter
) {
20122 dhd_event_log_filter_deinit(dhd
);
20124 #endif /* DHD_EVENT_LOG_FILTER */
20126 if (dhd
->concise_dbg_buf
) {
20127 MFREE(dhd
->osh
, dhd
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
);
20130 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20131 if (dhd
->ecntr_dbg_ring
) {
20132 ring
= (dhd_dbg_ring_t
*)dhd
->ecntr_dbg_ring
;
20133 dhd_dbg_ring_deinit(dhd
, ring
);
20134 ring
->ring_buf
= NULL
;
20135 ring
->ring_size
= 0;
20136 MFREE(dhd
->osh
, ring
, sizeof(dhd_dbg_ring_t
));
20137 dhd
->ecntr_dbg_ring
= NULL
;
20139 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20141 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20142 if (prealloc_buf
) {
20143 DHD_OS_PREFREE(dhd
, prealloc_buf
, LOG_DUMP_TOTAL_BUFSIZE
);
20145 if (dld_buf_special
->buffer
) {
20146 DHD_OS_PREFREE(dhd
, dld_buf_special
->buffer
,
20147 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20150 if (prealloc_buf
) {
20151 MFREE(dhd
->osh
, prealloc_buf
, LOG_DUMP_TOTAL_BUFSIZE
);
20153 if (dld_buf_special
->buffer
) {
20154 MFREE(dhd
->osh
, dld_buf_special
->buffer
,
20155 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20157 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20158 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
20159 dld_buf
= &g_dld_buf
[i
];
20160 dld_buf
->enable
= 0;
20161 dld_buf
->buffer
= NULL
;
20164 mutex_destroy(&dhd_info
->logdump_lock
);
20168 dhd_log_dump_deinit(dhd_pub_t
*dhd
)
20170 struct dhd_log_dump_buf
*dld_buf
= NULL
, *dld_buf_special
= NULL
;
20172 dhd_info_t
*dhd_info
= dhd
->info
;
20173 dhd_dbg_ring_t
*ring
= NULL
;
20175 BCM_REFERENCE(ring
);
20177 if (dhd
->concise_dbg_buf
) {
20178 MFREE(dhd
->osh
, dhd
->concise_dbg_buf
, CONCISE_DUMP_BUFLEN
);
20179 dhd
->concise_dbg_buf
= NULL
;
20182 if (dhd
->logdump_cookie
) {
20183 dhd_logdump_cookie_deinit(dhd
);
20184 MFREE(dhd
->osh
, dhd
->logdump_cookie
, LOG_DUMP_COOKIE_BUFSIZE
);
20185 dhd
->logdump_cookie
= NULL
;
20188 #if defined(DHD_EVENT_LOG_FILTER)
20189 if (dhd
->event_log_filter
) {
20190 dhd_event_log_filter_deinit(dhd
);
20192 #endif /* DHD_EVENT_LOG_FILTER */
20194 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20195 if (dhd
->ecntr_dbg_ring
) {
20196 ring
= (dhd_dbg_ring_t
*)dhd
->ecntr_dbg_ring
;
20197 dhd_dbg_ring_deinit(dhd
, ring
);
20198 ring
->ring_buf
= NULL
;
20199 ring
->ring_size
= 0;
20200 MFREE(dhd
->osh
, ring
, sizeof(dhd_dbg_ring_t
));
20201 dhd
->ecntr_dbg_ring
= NULL
;
20203 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20205 /* 'general' buffer points to start of the pre-alloc'd memory */
20206 dld_buf
= &g_dld_buf
[DLD_BUF_TYPE_GENERAL
];
20207 dld_buf_special
= &g_dld_buf
[DLD_BUF_TYPE_SPECIAL
];
20208 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20209 if (dld_buf
->buffer
) {
20210 DHD_OS_PREFREE(dhd
, dld_buf
->buffer
, LOG_DUMP_TOTAL_BUFSIZE
);
20212 if (dld_buf_special
->buffer
) {
20213 DHD_OS_PREFREE(dhd
, dld_buf_special
->buffer
,
20214 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20217 if (dld_buf
->buffer
) {
20218 MFREE(dhd
->osh
, dld_buf
->buffer
, LOG_DUMP_TOTAL_BUFSIZE
);
20220 if (dld_buf_special
->buffer
) {
20221 MFREE(dhd
->osh
, dld_buf_special
->buffer
,
20222 dld_buf_size
[DLD_BUF_TYPE_SPECIAL
]);
20224 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20225 for (i
= 0; i
< DLD_BUFFER_NUM
; i
++) {
20226 dld_buf
= &g_dld_buf
[i
];
20227 dld_buf
->enable
= 0;
20228 dld_buf
->buffer
= NULL
;
20231 mutex_destroy(&dhd_info
->logdump_lock
);
20235 dhd_log_dump_write(int type
, char *binary_data
,
20236 int binary_len
, const char *fmt
, ...)
20239 char tmp_buf
[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
] = {0, };
20241 unsigned long flags
= 0;
20242 struct dhd_log_dump_buf
*dld_buf
= NULL
;
20243 bool flush_log
= FALSE
;
20245 if (type
< 0 || type
>= DLD_BUFFER_NUM
) {
20246 DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
20247 __FUNCTION__
, type
));
20251 dld_buf
= &g_dld_buf
[type
];
20253 if (dld_buf
->enable
!= 1) {
20257 va_start(args
, fmt
);
20258 len
= vsnprintf(tmp_buf
, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
, fmt
, args
);
20259 /* Non ANSI C99 compliant returns -1,
20260 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
20267 if (len
>= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
) {
20268 len
= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
- 1;
20269 tmp_buf
[len
] = '\0';
20272 /* make a critical section to eliminate race conditions */
20273 spin_lock_irqsave(&dld_buf
->lock
, flags
);
20274 if (dld_buf
->remain
< len
) {
20275 dld_buf
->wraparound
= 1;
20276 dld_buf
->present
= dld_buf
->front
;
20277 dld_buf
->remain
= dld_buf_size
[type
];
20278 /* if wrap around happens, flush the ring buffer to the file */
20282 memcpy(dld_buf
->present
, tmp_buf
, len
);
20283 dld_buf
->remain
-= len
;
20284 dld_buf
->present
+= len
;
20285 spin_unlock_irqrestore(&dld_buf
->lock
, flags
);
20287 /* double check invalid memory operation */
20288 ASSERT((unsigned long)dld_buf
->present
<= dld_buf
->max
);
20290 if (dld_buf
->dhd_pub
) {
20291 dhd_pub_t
*dhdp
= (dhd_pub_t
*)dld_buf
->dhd_pub
;
20292 dhdp
->logdump_periodic_flush
=
20293 logdump_periodic_flush
;
20294 if (logdump_periodic_flush
&& flush_log
) {
20295 log_dump_type_t
*flush_type
= MALLOCZ(dhdp
->osh
,
20296 sizeof(log_dump_type_t
));
20298 *flush_type
= type
;
20299 dhd_schedule_log_dump(dld_buf
->dhd_pub
, flush_type
);
20306 dhd_log_dump_get_timestamp(void)
20308 static char buf
[16];
20310 unsigned long rem_nsec
;
20312 ts_nsec
= local_clock();
20313 rem_nsec
= DIV_AND_MOD_U64_BY_U32(ts_nsec
, NSEC_PER_SEC
);
20314 snprintf(buf
, sizeof(buf
), "%5lu.%06lu",
20315 (unsigned long)ts_nsec
, rem_nsec
/ NSEC_PER_USEC
);
20319 #endif /* DHD_LOG_DUMP */
20321 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
20323 dhd_flush_rx_tx_wq(dhd_pub_t
*dhdp
)
20330 flush_workqueue(dhd
->tx_wq
);
20331 flush_workqueue(dhd
->rx_wq
);
20337 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
20340 #define DHD_LB_TXBOUND 64
20342 * Function that performs the TX processing on a given CPU
20345 dhd_lb_tx_process(dhd_info_t
*dhd
)
20347 struct sk_buff
*skb
;
20349 struct net_device
*net
;
20351 bool resched
= FALSE
;
20353 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__
));
20355 DHD_ERROR((" Null pointer DHD \r\n"));
20359 BCM_REFERENCE(net
);
20361 DHD_LB_STATS_PERCPU_ARR_INCR(dhd
->txp_percpu_run_cnt
);
20363 /* Base Loop to perform the actual Tx */
20365 skb
= skb_dequeue(&dhd
->tx_pend_queue
);
20367 DHD_TRACE(("Dequeued a Null Packet \r\n"));
20372 net
= DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
));
20373 ifidx
= DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t
*)PKTTAG(skb
));
20375 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb
,
20378 __dhd_sendpkt(&dhd
->pub
, ifidx
, skb
);
20380 if (cnt
>= DHD_LB_TXBOUND
) {
20387 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__
, cnt
));
20393 dhd_lb_tx_handler(unsigned long data
)
20395 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
20397 if (dhd_lb_tx_process(dhd
)) {
20398 dhd_tasklet_schedule(&dhd
->tx_tasklet
);
20402 #endif /* DHD_LB_TXP */
20404 #ifdef DHD_DEBUG_UART
20406 dhd_debug_uart_is_running(struct net_device
*dev
)
20408 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
20410 if (dhd
->duart_execute
) {
20418 dhd_debug_uart_exec_rd(void *handle
, void *event_info
, u8 event
)
20420 dhd_pub_t
*dhdp
= handle
;
20421 dhd_debug_uart_exec(dhdp
, "rd");
20425 dhd_debug_uart_exec(dhd_pub_t
*dhdp
, char *cmd
)
20429 char *argv
[] = {DHD_DEBUG_UART_EXEC_PATH
, cmd
, NULL
};
20430 char *envp
[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL
};
20432 #ifdef DHD_FW_COREDUMP
20433 if (dhdp
->memdump_enabled
== DUMP_MEMFILE_BUGON
)
20436 if (dhdp
->hang_reason
== HANG_REASON_PCIE_LINK_DOWN
||
20437 #ifdef DHD_FW_COREDUMP
20438 dhdp
->memdump_success
== FALSE
||
20441 dhdp
->info
->duart_execute
= TRUE
;
20442 DHD_ERROR(("DHD: %s - execute %s %s\n",
20443 __FUNCTION__
, DHD_DEBUG_UART_EXEC_PATH
, cmd
));
20444 ret
= call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_PROC
);
20445 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
20446 __FUNCTION__
, DHD_DEBUG_UART_EXEC_PATH
, cmd
, ret
));
20447 dhdp
->info
->duart_execute
= FALSE
;
20449 #ifdef DHD_LOG_DUMP
20450 if (dhdp
->memdump_type
!= DUMP_TYPE_BY_SYSDUMP
)
20458 #endif /* DHD_DEBUG_UART */
20460 #if defined(DHD_BLOB_EXISTENCE_CHECK)
20462 dhd_set_blob_support(dhd_pub_t
*dhdp
, char *fw_path
)
20465 char *filepath
= VENDOR_PATH CONFIG_BCMDHD_CLM_PATH
;
20466 fp
= filp_open(filepath
, O_RDONLY
, 0);
20468 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__
,
20470 dhdp
->is_blob
= FALSE
;
20472 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__
, filepath
));
20473 dhdp
->is_blob
= TRUE
;
20474 #if defined(CONCATE_BLOB)
20475 strncat(fw_path
, "_blob", strlen("_blob"));
20477 BCM_REFERENCE(fw_path
);
20478 #endif /* SKIP_CONCATE_BLOB */
20479 filp_close(fp
, NULL
);
20482 #endif /* DHD_BLOB_EXISTENCE_CHECK */
20484 #if defined(PCIE_FULL_DONGLE)
20485 /** test / loopback */
20487 dmaxfer_free_dmaaddr_handler(void *handle
, void *event_info
, u8 event
)
20489 dmaxref_mem_map_t
*dmmap
= (dmaxref_mem_map_t
*)event_info
;
20490 dhd_info_t
*dhd_info
= (dhd_info_t
*)handle
;
20492 if (event
!= DHD_WQ_WORK_DMA_LB_MEM_REL
) {
20493 DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__
));
20496 if (dhd_info
== NULL
) {
20497 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__
));
20500 if (dmmap
== NULL
) {
20501 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__
));
20504 dmaxfer_free_prev_dmaaddr(&dhd_info
->pub
, dmmap
);
20508 dhd_schedule_dmaxfer_free(dhd_pub_t
*dhdp
, dmaxref_mem_map_t
*dmmap
)
20510 dhd_info_t
*dhd_info
= dhdp
->info
;
20512 dhd_deferred_schedule_work(dhd_info
->dhd_deferred_wq
, (void *)dmmap
,
20513 DHD_WQ_WORK_DMA_LB_MEM_REL
, dmaxfer_free_dmaaddr_handler
, DHD_WQ_WORK_PRIORITY_LOW
);
20515 #endif /* PCIE_FULL_DONGLE */
20516 /* ---------------------------- End of sysfs implementation ------------------------------------- */
20518 #ifdef SET_PCIE_IRQ_CPU_CORE
20520 dhd_set_irq_cpucore(dhd_pub_t
*dhdp
, int affinity_cmd
)
20522 unsigned int pcie_irq
= 0;
20525 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__
));
20530 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__
));
20534 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__
, affinity_cmd
));
20536 if (dhdpcie_get_pcieirq(dhdp
->bus
, &pcie_irq
)) {
20537 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__
));
20542 irq_set_affinity() assign dedicated CPU core PCIe interrupt
20543 If dedicated CPU core is not on-line,
20544 PCIe interrupt scheduled on CPU core 0
20546 switch (affinity_cmd
) {
20547 case PCIE_IRQ_AFFINITY_OFF
:
20549 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY
:
20550 irq_set_affinity(pcie_irq
, dhdp
->info
->cpumask_primary
);
20552 #ifdef CONFIG_SOC_EXYNOS9810
20553 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS
:
20554 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
20555 __FUNCTION__
, pcie_irq
, PCIE_IRQ_CPU_CORE
));
20556 irq_set_affinity(pcie_irq
, cpumask_of(PCIE_IRQ_CPU_CORE
));
20558 #endif /* CONFIG_SOC_EXYNOS9810 */
20560 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
20561 __FUNCTION__
, affinity_cmd
));
20564 #endif /* SET_PCIE_IRQ_CPU_CORE */
20567 dhd_write_file(const char *filepath
, char *buf
, int buf_len
)
20569 struct file
*fp
= NULL
;
20570 mm_segment_t old_fs
;
20573 /* change to KERNEL_DS address limit */
20577 /* File is always created. */
20578 fp
= filp_open(filepath
, O_RDWR
| O_CREAT
, 0664);
20580 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
20581 __FUNCTION__
, filepath
, PTR_ERR(fp
)));
20584 if (fp
->f_mode
& FMODE_WRITE
) {
20585 ret
= vfs_write(fp
, buf
, buf_len
, &fp
->f_pos
);
20587 DHD_ERROR(("%s: Couldn't write file '%s'\n",
20588 __FUNCTION__
, filepath
));
20594 filp_close(fp
, NULL
);
20597 /* restore previous address limit */
20604 dhd_read_file(const char *filepath
, char *buf
, int buf_len
)
20606 struct file
*fp
= NULL
;
20607 mm_segment_t old_fs
;
20610 /* change to KERNEL_DS address limit */
20614 fp
= filp_open(filepath
, O_RDONLY
, 0);
20617 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__
, filepath
));
20621 ret
= kernel_read(fp
, 0, buf
, buf_len
);
20622 filp_close(fp
, NULL
);
20624 /* restore previous address limit */
20627 /* Return the number of bytes read */
20629 /* Success to read */
20632 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
20633 __FUNCTION__
, filepath
, ret
));
20641 dhd_write_file_and_check(const char *filepath
, char *buf
, int buf_len
)
20645 ret
= dhd_write_file(filepath
, buf
, buf_len
);
20650 /* Read the file again and check if the file size is not zero */
20651 memset(buf
, 0, buf_len
);
20652 ret
= dhd_read_file(filepath
, buf
, buf_len
);
20658 int dhd_read_from_file(dhd_pub_t
*dhd
)
20660 int ret
= 0, nread
= 0;
20663 NULL_CHECK(dhd
, "dhd is NULL", ret
);
20665 buf
= MALLOCZ(dhd
->osh
, FILE_BLOCK_READ_SIZE
);
20667 DHD_ERROR(("error: failed to alllocate buf.\n"));
20671 /* open file to read */
20672 fd
= dhd_os_open_image1(dhd
, FILTER_IE_PATH
);
20674 DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH
));
20678 nread
= dhd_os_get_image_block(buf
, (FILE_BLOCK_READ_SIZE
- 1), fd
);
20681 if ((ret
= dhd_parse_filter_ie(dhd
, buf
)) < 0) {
20682 DHD_ERROR(("error: failed to parse filter ie\n"));
20685 DHD_ERROR(("error: zero length file.failed to read\n"));
20688 dhd_os_close_image1(dhd
, fd
);
20691 MFREE(dhd
->osh
, buf
, FILE_BLOCK_READ_SIZE
);
20697 int dhd_get_filter_ie_count(dhd_pub_t
*dhdp
, uint8
* buf
)
20700 int element_count
= 0;
20706 while (*pstr
!= '\0') {
20707 if (*pstr
== '\n') {
20713 * New line character must not be present after last line.
20714 * To count last line
20718 return element_count
;
20721 int dhd_parse_oui(dhd_pub_t
*dhd
, uint8
*inbuf
, uint8
*oui
, int len
)
20723 uint8 i
, j
, msb
, lsb
, oui_len
= 0;
20725 * OUI can vary from 3 bytes to 5 bytes.
20726 * While reading from file as ascii input it can
20727 * take maximum size of 14 bytes and minumum size of
20728 * 8 bytes including ":"
20729 * Example 5byte OUI <AB:DE:BE:CD:FA>
20730 * Example 3byte OUI <AB:DC:EF>
20733 if ((inbuf
== NULL
) || (len
< 8) || (len
> 14)) {
20734 DHD_ERROR(("error: failed to parse OUI \n"));
20738 for (j
= 0, i
= 0; i
< len
; i
+= 3, ++j
) {
20739 if (!bcm_isxdigit(inbuf
[i
]) || !bcm_isxdigit(inbuf
[i
+ 1])) {
20740 DHD_ERROR(("error: invalid OUI format \n"));
20743 msb
= inbuf
[i
] > '9' ? bcm_toupper(inbuf
[i
]) - 'A' + 10 : inbuf
[i
] - '0';
20744 lsb
= inbuf
[i
+ 1] > '9' ? bcm_toupper(inbuf
[i
+ 1]) -
20745 'A' + 10 : inbuf
[i
+ 1] - '0';
20746 oui
[j
] = (msb
<< 4) | lsb
;
20748 /* Size of oui.It can vary from 3/4/5 */
20754 int dhd_check_valid_ie(dhd_pub_t
*dhdp
, uint8
* buf
, int len
)
20759 if (!bcm_isdigit(buf
[i
])) {
20760 DHD_ERROR(("error: non digit value found in filter_ie \n"));
20765 if (bcm_atoi((char*)buf
) > 255) {
20766 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
20773 int dhd_parse_filter_ie(dhd_pub_t
*dhd
, uint8
*buf
)
20775 int element_count
= 0, i
= 0, oui_size
= 0, ret
= 0;
20776 uint16 bufsize
, buf_space_left
, id
= 0, len
= 0;
20777 uint16 filter_iovsize
, all_tlvsize
;
20778 wl_filter_ie_tlv_t
*p_ie_tlv
= NULL
;
20779 wl_filter_ie_iov_v1_t
*p_filter_iov
= (wl_filter_ie_iov_v1_t
*) NULL
;
20780 char *token
= NULL
, *ele_token
= NULL
, *oui_token
= NULL
, *type
= NULL
;
20783 element_count
= dhd_get_filter_ie_count(dhd
, buf
);
20784 DHD_INFO(("total element count %d \n", element_count
));
20785 /* Calculate the whole buffer size */
20786 filter_iovsize
= sizeof(wl_filter_ie_iov_v1_t
) + FILTER_IE_BUFSZ
;
20787 p_filter_iov
= MALLOCZ(dhd
->osh
, filter_iovsize
);
20789 if (p_filter_iov
== NULL
) {
20790 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize
));
20794 /* setup filter iovar header */
20795 p_filter_iov
->version
= WL_FILTER_IE_VERSION
;
20796 p_filter_iov
->len
= filter_iovsize
;
20797 p_filter_iov
->fixed_length
= p_filter_iov
->len
- FILTER_IE_BUFSZ
;
20798 p_filter_iov
->pktflag
= FC_PROBE_REQ
;
20799 p_filter_iov
->option
= WL_FILTER_IE_CHECK_SUB_OPTION
;
20801 bufsize
= filter_iovsize
- WL_FILTER_IE_IOV_HDR_SIZE
; /* adjust available size for TLVs */
20802 p_ie_tlv
= (wl_filter_ie_tlv_t
*)&p_filter_iov
->tlvs
[0];
20803 buf_space_left
= bufsize
;
20805 while ((i
< element_count
) && (buf
!= NULL
)) {
20807 /* token contains one line of input data */
20808 token
= bcmstrtok((char**)&buf
, "\n", NULL
);
20809 if (token
== NULL
) {
20812 if ((ele_token
= bcmstrstr(token
, ",")) == NULL
) {
20813 /* only element id is present */
20814 if (dhd_check_valid_ie(dhd
, token
, strlen(token
)) == BCME_ERROR
) {
20815 DHD_ERROR(("error: Invalid element id \n"));
20819 id
= bcm_atoi((char*)token
);
20820 data
[len
++] = WL_FILTER_IE_SET
;
20822 /* oui is present */
20823 ele_token
= bcmstrtok(&token
, ",", NULL
);
20824 if ((ele_token
== NULL
) || (dhd_check_valid_ie(dhd
, ele_token
,
20825 strlen(ele_token
)) == BCME_ERROR
)) {
20826 DHD_ERROR(("error: Invalid element id \n"));
20830 id
= bcm_atoi((char*)ele_token
);
20831 data
[len
++] = WL_FILTER_IE_SET
;
20832 if ((oui_token
= bcmstrstr(token
, ",")) == NULL
) {
20833 oui_size
= dhd_parse_oui(dhd
, token
, &(data
[len
]), strlen(token
));
20834 if (oui_size
== BCME_ERROR
) {
20835 DHD_ERROR(("error: Invalid OUI \n"));
20841 /* type is present */
20842 oui_token
= bcmstrtok(&token
, ",", NULL
);
20843 if ((oui_token
== NULL
) || ((oui_size
=
20844 dhd_parse_oui(dhd
, oui_token
,
20845 &(data
[len
]), strlen(oui_token
))) == BCME_ERROR
)) {
20846 DHD_ERROR(("error: Invalid OUI \n"));
20851 if ((type
= bcmstrstr(token
, ",")) == NULL
) {
20852 if (dhd_check_valid_ie(dhd
, token
,
20853 strlen(token
)) == BCME_ERROR
) {
20854 DHD_ERROR(("error: Invalid type \n"));
20858 data
[len
++] = bcm_atoi((char*)token
);
20860 /* subtype is present */
20861 type
= bcmstrtok(&token
, ",", NULL
);
20862 if ((type
== NULL
) || (dhd_check_valid_ie(dhd
, type
,
20863 strlen(type
)) == BCME_ERROR
)) {
20864 DHD_ERROR(("error: Invalid type \n"));
20868 data
[len
++] = bcm_atoi((char*)type
);
20869 /* subtype is last element */
20870 if ((token
== NULL
) || (*token
== '\0') ||
20871 (dhd_check_valid_ie(dhd
, token
,
20872 strlen(token
)) == BCME_ERROR
)) {
20873 DHD_ERROR(("error: Invalid subtype \n"));
20877 data
[len
++] = bcm_atoi((char*)token
);
20881 ret
= bcm_pack_xtlv_entry((uint8
**)&p_ie_tlv
,
20882 &buf_space_left
, id
, len
, data
, BCM_XTLV_OPTION_ALIGN32
);
20883 if (ret
!= BCME_OK
) {
20884 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
20885 "status=%d\n", __FUNCTION__
, ret
));
20891 /* file is empty or first line is blank */
20892 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
20896 /* update the iov header, set len to include all TLVs + header */
20897 all_tlvsize
= (bufsize
- buf_space_left
);
20898 p_filter_iov
->len
= htol16(all_tlvsize
+ WL_FILTER_IE_IOV_HDR_SIZE
);
20899 ret
= dhd_iovar(dhd
, 0, "filter_ie", (void *)p_filter_iov
,
20900 p_filter_iov
->len
, NULL
, 0, TRUE
);
20901 if (ret
!= BCME_OK
) {
20902 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret
));
20906 if (p_filter_iov
) {
20907 MFREE(dhd
->osh
, p_filter_iov
, filter_iovsize
);
20908 p_filter_iov
= NULL
;
20912 #endif /* FILTER_IE */
20913 #ifdef DHD_WAKE_STATUS
20915 dhd_get_wakecount(dhd_pub_t
*dhdp
)
20917 return dhd_bus_get_wakecount(dhdp
);
20919 #endif /* DHD_WAKE_STATUS */
20922 dhd_get_random_bytes(uint8
*buf
, uint len
)
20925 get_random_bytes_arch(buf
, len
);
20926 #endif /* BCMPCIE */
20930 #if defined(DHD_HANG_SEND_UP_TEST)
20932 dhd_make_hang_with_reason(struct net_device
*dev
, const char *string_num
)
20934 dhd_info_t
*dhd
= NULL
;
20935 dhd_pub_t
*dhdp
= NULL
;
20936 uint reason
= HANG_REASON_MAX
;
20937 uint32 fw_test_code
= 0;
20938 dhd
= DHD_DEV_INFO(dev
);
20944 if (!dhd
|| !dhdp
) {
20948 reason
= (uint
) bcm_strtoul(string_num
, NULL
, 0);
20949 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__
, reason
));
20952 if (dhdp
->req_hang_type
) {
20953 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
20954 __FUNCTION__
, dhdp
->req_hang_type
));
20955 dhdp
->req_hang_type
= 0;
20958 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__
));
20961 } else if ((reason
<= HANG_REASON_MASK
) || (reason
>= HANG_REASON_MAX
)) {
20962 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason
));
20966 if (dhdp
->req_hang_type
!= 0) {
20967 DHD_ERROR(("Already HANG requested for test\n"));
20972 case HANG_REASON_IOCTL_RESP_TIMEOUT
:
20973 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason
));
20974 dhdp
->req_hang_type
= reason
;
20975 fw_test_code
= 102; /* resumed on timeour */
20976 dhd_wl_ioctl_set_intiovar(dhdp
, "bus:disconnect", fw_test_code
,
20977 WLC_SET_VAR
, TRUE
, 0);
20979 case HANG_REASON_DONGLE_TRAP
:
20980 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason
));
20981 dhdp
->req_hang_type
= reason
;
20982 fw_test_code
= 99; /* dongle trap */
20983 dhd_wl_ioctl_set_intiovar(dhdp
, "bus:disconnect", fw_test_code
,
20984 WLC_SET_VAR
, TRUE
, 0);
20986 case HANG_REASON_D3_ACK_TIMEOUT
:
20987 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason
));
20988 dhdp
->req_hang_type
= reason
;
20990 case HANG_REASON_BUS_DOWN
:
20991 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason
));
20992 dhdp
->req_hang_type
= reason
;
20994 case HANG_REASON_PCIE_LINK_DOWN
:
20995 case HANG_REASON_MSGBUF_LIVELOCK
:
20996 dhdp
->req_hang_type
= 0;
20997 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason
));
20999 case HANG_REASON_IFACE_DEL_FAILURE
:
21000 dhdp
->req_hang_type
= 0;
21001 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason
));
21003 case HANG_REASON_HT_AVAIL_ERROR
:
21004 dhdp
->req_hang_type
= 0;
21005 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason
));
21007 case HANG_REASON_PCIE_RC_LINK_UP_FAIL
:
21008 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason
));
21009 dhdp
->req_hang_type
= reason
;
21012 dhdp
->req_hang_type
= 0;
21013 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason
));
21017 #endif /* DHD_HANG_SEND_UP_TEST */
21021 dhd_error_recovery(void *handle
, void *event_info
, u8 event
)
21023 dhd_info_t
*dhd
= handle
;
21028 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
21034 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
21035 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
21040 ret
= dhd_bus_perform_flr_with_quiesce(dhdp
);
21041 if (ret
!= BCME_DNGL_DEVRESET
) {
21042 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
21043 "toggle REG_ON\n", __FUNCTION__
, ret
));
21044 /* toggle REG_ON */
21045 dhdp
->pom_toggle_reg_on(WLAN_FUNC_ID
, BY_WLAN_DUE_TO_WLAN
);
21051 dhd_schedule_reset(dhd_pub_t
*dhdp
)
21053 if (dhdp
->enable_erpom
) {
21054 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, NULL
,
21055 DHD_WQ_WORK_ERROR_RECOVERY
, dhd_error_recovery
, DHD_WQ_WORK_PRIORITY_HIGH
);
21058 #endif /* DHD_ERPOM */
21060 #ifdef DHD_PKT_LOGGING
21062 dhd_pktlog_dump(void *handle
, void *event_info
, u8 event
)
21064 dhd_info_t
*dhd
= handle
;
21067 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
21071 if (dhd_pktlog_write_file(&dhd
->pub
)) {
21072 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__
));
21078 dhd_schedule_pktlog_dump(dhd_pub_t
*dhdp
)
21080 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
21081 (void*)NULL
, DHD_WQ_WORK_PKTLOG_DUMP
,
21082 dhd_pktlog_dump
, DHD_WQ_WORK_PRIORITY_HIGH
);
21084 #endif /* DHD_PKT_LOGGING */
21086 #ifdef BIGDATA_SOFTAP
21087 void dhd_schedule_gather_ap_stadata(void *bcm_cfg
, void *ndev
, const wl_event_msg_t
*e
)
21089 struct bcm_cfg80211
*cfg
;
21091 ap_sta_wq_data_t
*p_wq_data
;
21093 if (!bcm_cfg
|| !ndev
|| !e
) {
21094 WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg
, ndev
, e
));
21098 cfg
= (struct bcm_cfg80211
*)bcm_cfg
;
21099 dhdp
= (dhd_pub_t
*)cfg
->pub
;
21101 if (!dhdp
|| !cfg
->ap_sta_info
) {
21102 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp
, cfg
->ap_sta_info
));
21106 p_wq_data
= (ap_sta_wq_data_t
*)MALLOCZ(dhdp
->osh
, sizeof(ap_sta_wq_data_t
));
21107 if (unlikely(!p_wq_data
)) {
21108 DHD_ERROR(("%s(): could not allocate memory for - "
21109 "ap_sta_wq_data_t\n", __FUNCTION__
));
21113 mutex_lock(&cfg
->ap_sta_info
->wq_data_sync
);
21115 memcpy(&p_wq_data
->e
, e
, sizeof(wl_event_msg_t
));
21116 p_wq_data
->dhdp
= dhdp
;
21117 p_wq_data
->bcm_cfg
= cfg
;
21118 p_wq_data
->ndev
= (struct net_device
*)ndev
;
21120 mutex_unlock(&cfg
->ap_sta_info
->wq_data_sync
);
21122 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
,
21123 p_wq_data
, DHD_WQ_WORK_GET_BIGDATA_AP
,
21124 wl_gather_ap_stadata
, DHD_WQ_WORK_PRIORITY_HIGH
);
21127 #endif /* BIGDATA_SOFTAP */
21130 get_debug_dump_time(char *str
)
21132 struct timeval curtime
;
21133 unsigned long local_time
;
21134 struct rtc_time tm
;
21136 if (!strlen(str
)) {
21137 do_gettimeofday(&curtime
);
21138 local_time
= (u32
)(curtime
.tv_sec
-
21139 (sys_tz
.tz_minuteswest
* DHD_LOG_DUMP_TS_MULTIPLIER_VALUE
));
21140 rtc_time_to_tm(local_time
, &tm
);
21142 snprintf(str
, DEBUG_DUMP_TIME_BUF_LEN
, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS
,
21143 tm
.tm_year
- 100, tm
.tm_mon
+ 1, tm
.tm_mday
, tm
.tm_hour
, tm
.tm_min
,
21144 tm
.tm_sec
, (int)(curtime
.tv_usec
/NSEC_PER_USEC
));
21149 clear_debug_dump_time(char *str
)
21151 memset(str
, 0, DEBUG_DUMP_TIME_BUF_LEN
);
21153 #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
21155 copy_debug_dump_time(char *dest
, char *src
)
21157 memcpy(dest
, src
, DEBUG_DUMP_TIME_BUF_LEN
);
21159 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
21161 #define KIRQ_PRINT_BUF_LEN 256
21164 dhd_print_kirqstats(dhd_pub_t
*dhd
, unsigned int irq_num
)
21166 unsigned long flags
= 0;
21167 struct irq_desc
*desc
;
21168 int i
; /* cpu iterator */
21169 struct bcmstrbuf strbuf
;
21170 char tmp_buf
[KIRQ_PRINT_BUF_LEN
];
21172 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
21173 desc
= irq_to_desc(irq_num
);
21175 DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__
));
21178 bcm_binit(&strbuf
, tmp_buf
, KIRQ_PRINT_BUF_LEN
);
21179 raw_spin_lock_irqsave(&desc
->lock
, flags
);
21180 bcm_bprintf(&strbuf
, "dhd irq %u:", irq_num
);
21181 for_each_online_cpu(i
)
21182 bcm_bprintf(&strbuf
, "%10u ",
21183 desc
->kstat_irqs
? *per_cpu_ptr(desc
->kstat_irqs
, i
) : 0);
21184 if (desc
->irq_data
.chip
) {
21185 if (desc
->irq_data
.chip
->name
)
21186 bcm_bprintf(&strbuf
, " %8s", desc
->irq_data
.chip
->name
);
21188 bcm_bprintf(&strbuf
, " %8s", "-");
21190 bcm_bprintf(&strbuf
, " %8s", "None");
21192 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
21193 if (desc
->irq_data
.domain
)
21194 bcm_bprintf(&strbuf
, " %d", (int)desc
->irq_data
.hwirq
);
21195 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
21196 bcm_bprintf(&strbuf
, " %-8s", irqd_is_level_type(&desc
->irq_data
) ? "Level" : "Edge");
21198 #endif /* LINUX VERSION > 3.1.0 */
21201 bcm_bprintf(&strbuf
, "-%-8s", desc
->name
);
21203 DHD_ERROR(("%s\n", strbuf
.origbuf
));
21204 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
21205 #endif /* LINUX VERSION > 2.6.28 */
21209 dhd_show_kirqstats(dhd_pub_t
*dhd
)
21211 unsigned int irq
= -1;
21213 dhdpcie_get_pcieirq(dhd
->bus
, &irq
);
21214 #endif /* BCMPCIE */
21216 irq
= ((wifi_adapter_info_t
*)dhd
->info
->adapter
)->irq_num
;
21217 #endif /* BCMSDIO */
21220 DHD_ERROR(("DUMP data kernel irq stats : \n"));
21221 #endif /* BCMPCIE */
21223 DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
21224 #endif /* BCMSDIO */
21225 dhd_print_kirqstats(dhd
, irq
);
21227 #ifdef BCMPCIE_OOB_HOST_WAKE
21228 irq
= dhdpcie_get_oob_irq_num(dhd
->bus
);
21230 DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
21231 dhd_print_kirqstats(dhd
, irq
);
21233 #endif /* BCMPCIE_OOB_HOST_WAKE */
21237 dhd_print_tasklet_status(dhd_pub_t
*dhd
)
21239 dhd_info_t
*dhdinfo
;
21242 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__
));
21246 dhdinfo
= dhd
->info
;
21249 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__
));
21253 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo
->tasklet
.state
));
21259 #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
21260 #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
21262 #define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
21263 #define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
21265 #define DHD_RING_MAGIC 0x20170910
21266 #define DHD_RING_IDX_INVALID 0xffffffff
21271 uint32 write_idx
; /* next write index, -1 : not started */
21272 uint32 read_idx
; /* next read index, -1 : not start */
21274 /* protected elements during serialization */
21275 int lock_idx
; /* start index of locked, element will not be overried */
21276 int lock_count
; /* number of locked, from lock idx */
21278 /* saved data elements */
21280 } dhd_fixed_ring_info_t
;
21285 struct mutex ring_sync
; /* pointer to mutex */
21287 dhd_fixed_ring_info_t fixed
;
21292 dhd_ring_get_hdr_size(void)
21294 return sizeof(dhd_ring_info_t
);
21298 dhd_ring_init(uint8
*buf
, uint32 buf_size
, uint32 elem_size
, uint32 elem_cnt
)
21300 dhd_ring_info_t
*ret_ring
;
21303 DHD_RING_ERR(("NO RING BUFFER\n"));
21306 if (buf_size
< dhd_ring_get_hdr_size() + elem_size
* elem_cnt
) {
21307 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
21311 ret_ring
= (dhd_ring_info_t
*)buf
;
21312 ret_ring
->type
= DHD_RING_TYPE_FIXED
;
21313 mutex_init(&ret_ring
->ring_sync
);
21314 ret_ring
->fixed
.read_idx
= DHD_RING_IDX_INVALID
;
21315 ret_ring
->fixed
.write_idx
= DHD_RING_IDX_INVALID
;
21316 ret_ring
->fixed
.lock_idx
= DHD_RING_IDX_INVALID
;
21317 ret_ring
->fixed
.elem
= buf
+ sizeof(dhd_ring_info_t
);
21318 ret_ring
->fixed
.elem_size
= elem_size
;
21319 ret_ring
->fixed
.elem_cnt
= elem_cnt
;
21320 ret_ring
->magic
= DHD_RING_MAGIC
;
21325 dhd_ring_deinit(void *_ring
)
21327 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21328 dhd_fixed_ring_info_t
*fixed
;
21333 if (ring
->magic
!= DHD_RING_MAGIC
) {
21337 mutex_destroy(&ring
->ring_sync
);
21338 fixed
= &ring
->fixed
;
21339 memset(fixed
->elem
, 0, fixed
->elem_size
* fixed
->elem_cnt
);
21340 fixed
->elem_size
= fixed
->elem_cnt
= 0;
21346 /* get counts between two indexes of ring buffer (internal only) */
21348 __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t
*ring
, int start
, int end
)
21350 if (start
== DHD_RING_IDX_INVALID
|| end
== DHD_RING_IDX_INVALID
) {
21354 return (ring
->elem_cnt
+ end
- start
) % ring
->elem_cnt
+ 1;
21358 __dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t
*ring
)
21360 return __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, ring
->write_idx
);
21363 static inline void *
21364 __dhd_fixed_ring_get_first(dhd_fixed_ring_info_t
*ring
)
21366 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21369 return (uint8
*)ring
->elem
+ (ring
->elem_size
* ring
->read_idx
);
21373 __dhd_fixed_ring_free_first(dhd_fixed_ring_info_t
*ring
)
21377 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21378 DHD_RING_ERR(("EMPTY RING\n"));
21382 next_idx
= (ring
->read_idx
+ 1) % ring
->elem_cnt
;
21383 if (ring
->read_idx
== ring
->write_idx
) {
21385 ring
->read_idx
= ring
->write_idx
= DHD_RING_IDX_INVALID
;
21389 ring
->read_idx
= next_idx
;
21393 static inline void *
21394 __dhd_fixed_ring_get_last(dhd_fixed_ring_info_t
*ring
)
21396 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21399 return (uint8
*)ring
->elem
+ (ring
->elem_size
* ring
->write_idx
);
21402 static inline void *
21403 __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t
*ring
)
21407 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21408 ring
->read_idx
= ring
->write_idx
= 0;
21409 return (uint8
*)ring
->elem
;
21412 /* check next index is not locked */
21413 tmp_idx
= (ring
->write_idx
+ 1) % ring
->elem_cnt
;
21414 if (ring
->lock_idx
== tmp_idx
) {
21418 ring
->write_idx
= tmp_idx
;
21419 if (ring
->write_idx
== ring
->read_idx
) {
21420 /* record is full, drop oldest one */
21421 ring
->read_idx
= (ring
->read_idx
+ 1) % ring
->elem_cnt
;
21424 return (uint8
*)ring
->elem
+ (ring
->elem_size
* ring
->write_idx
);
21427 static inline uint32
21428 __dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t
*ring
, void *ptr
, char *sig
)
21431 uint32 ret_idx
= (uint32
)DHD_RING_IDX_INVALID
;
21433 if (ptr
< ring
->elem
) {
21434 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig
, ptr
, ring
->elem
));
21437 diff
= (uint32
)((uint8
*)ptr
- (uint8
*)ring
->elem
);
21438 if (diff
% ring
->elem_size
!= 0) {
21439 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig
, ptr
, ring
->elem
));
21442 ret_idx
= diff
/ ring
->elem_size
;
21443 if (ret_idx
>= ring
->elem_cnt
) {
21444 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring
->elem_cnt
, ret_idx
));
21449 static inline void *
21450 __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t
*ring
, void *prev
)
21454 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21455 DHD_RING_ERR(("EMPTY RING\n"));
21459 cur_idx
= __dhd_fixed_ring_ptr2idx(ring
, prev
, "NEXT");
21460 if (cur_idx
>= ring
->elem_cnt
) {
21464 if (cur_idx
== ring
->write_idx
) {
21465 /* no more new record */
21469 cur_idx
= (cur_idx
+ 1) % ring
->elem_cnt
;
21470 return (uint8
*)ring
->elem
+ ring
->elem_size
* cur_idx
;
21473 static inline void *
21474 __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t
*ring
, void *prev
)
21478 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21479 DHD_RING_ERR(("EMPTY RING\n"));
21482 cur_idx
= __dhd_fixed_ring_ptr2idx(ring
, prev
, "PREV");
21483 if (cur_idx
>= ring
->elem_cnt
) {
21486 if (cur_idx
== ring
->read_idx
) {
21487 /* no more new record */
21491 cur_idx
= (cur_idx
+ ring
->elem_cnt
- 1) % ring
->elem_cnt
;
21492 return (uint8
*)ring
->elem
+ ring
->elem_size
* cur_idx
;
21496 __dhd_fixed_ring_lock(dhd_fixed_ring_info_t
*ring
, void *first_ptr
, void *last_ptr
)
21500 uint32 ring_filled_cnt
;
21503 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21504 DHD_RING_ERR(("EMPTY RING\n"));
21509 first_idx
= __dhd_fixed_ring_ptr2idx(ring
, first_ptr
, "LCK FIRST");
21510 if (first_idx
>= ring
->elem_cnt
) {
21514 first_idx
= ring
->read_idx
;
21518 last_idx
= __dhd_fixed_ring_ptr2idx(ring
, last_ptr
, "LCK LAST");
21519 if (last_idx
>= ring
->elem_cnt
) {
21523 last_idx
= ring
->write_idx
;
21526 ring_filled_cnt
= __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, ring
->write_idx
);
21527 tmp_cnt
= __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, first_idx
);
21528 if (tmp_cnt
> ring_filled_cnt
) {
21529 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21530 ring
->write_idx
, ring
->read_idx
, first_idx
));
21534 tmp_cnt
= __dhd_fixed_ring_get_count(ring
, ring
->read_idx
, last_idx
);
21535 if (tmp_cnt
> ring_filled_cnt
) {
21536 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21537 ring
->write_idx
, ring
->read_idx
, last_idx
));
21541 ring
->lock_idx
= first_idx
;
21542 ring
->lock_count
= __dhd_fixed_ring_get_count(ring
, first_idx
, last_idx
);
21547 __dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t
*ring
)
21549 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21550 DHD_RING_ERR(("EMPTY RING\n"));
21554 ring
->lock_idx
= DHD_RING_IDX_INVALID
;
21555 ring
->lock_count
= 0;
21558 static inline void *
21559 __dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t
*ring
)
21561 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21562 DHD_RING_ERR(("EMPTY RING\n"));
21565 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21566 DHD_RING_ERR(("NO LOCK POINT\n"));
21569 return (uint8
*)ring
->elem
+ ring
->elem_size
* ring
->lock_idx
;
21572 static inline void *
21573 __dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t
*ring
)
21576 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21577 DHD_RING_ERR(("EMPTY RING\n"));
21580 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21581 DHD_RING_ERR(("NO LOCK POINT\n"));
21585 lock_last_idx
= (ring
->lock_idx
+ ring
->lock_count
- 1) % ring
->elem_cnt
;
21586 return (uint8
*)ring
->elem
+ ring
->elem_size
* lock_last_idx
;
21590 __dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t
*ring
)
21592 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21593 DHD_RING_ERR(("EMPTY RING\n"));
21596 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21597 DHD_RING_ERR(("NO LOCK POINT\n"));
21600 return ring
->lock_count
;
21604 __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t
*ring
)
21606 if (ring
->read_idx
== DHD_RING_IDX_INVALID
) {
21607 DHD_RING_ERR(("EMPTY RING\n"));
21610 if (ring
->lock_idx
== DHD_RING_IDX_INVALID
) {
21611 DHD_RING_ERR(("NO LOCK POINT\n"));
21615 ring
->lock_count
--;
21616 if (ring
->lock_count
<= 0) {
21617 ring
->lock_idx
= DHD_RING_IDX_INVALID
;
21619 ring
->lock_idx
= (ring
->lock_idx
+ 1) % ring
->elem_cnt
;
21624 /* Get first element : oldest element */
21626 dhd_ring_get_first(void *_ring
)
21628 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21631 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21632 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21636 mutex_lock(&ring
->ring_sync
);
21637 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21638 ret
= __dhd_fixed_ring_get_first(&ring
->fixed
);
21640 mutex_unlock(&ring
->ring_sync
);
21644 /* Free first element : oldest element */
21646 dhd_ring_free_first(void *_ring
)
21648 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21650 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21651 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21655 mutex_lock(&ring
->ring_sync
);
21656 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21657 __dhd_fixed_ring_free_first(&ring
->fixed
);
21659 mutex_unlock(&ring
->ring_sync
);
21663 /* Get latest element */
21665 dhd_ring_get_last(void *_ring
)
21667 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21670 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21671 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21675 mutex_lock(&ring
->ring_sync
);
21676 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21677 ret
= __dhd_fixed_ring_get_last(&ring
->fixed
);
21679 mutex_unlock(&ring
->ring_sync
);
21683 /* Get next point can be written
21684 * will overwrite which doesn't read
21685 * will return NULL if next pointer is locked
21688 dhd_ring_get_empty(void *_ring
)
21690 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21693 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21694 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21698 mutex_lock(&ring
->ring_sync
);
21699 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21700 ret
= __dhd_fixed_ring_get_empty(&ring
->fixed
);
21702 mutex_unlock(&ring
->ring_sync
);
21707 dhd_ring_get_next(void *_ring
, void *cur
)
21709 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21712 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21713 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21717 mutex_lock(&ring
->ring_sync
);
21718 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21719 ret
= __dhd_fixed_ring_get_next(&ring
->fixed
, cur
);
21721 mutex_unlock(&ring
->ring_sync
);
21726 dhd_ring_get_prev(void *_ring
, void *cur
)
21728 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21731 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21732 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21736 mutex_lock(&ring
->ring_sync
);
21737 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21738 ret
= __dhd_fixed_ring_get_prev(&ring
->fixed
, cur
);
21740 mutex_unlock(&ring
->ring_sync
);
21745 dhd_ring_get_cur_size(void *_ring
)
21747 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21750 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21751 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21755 mutex_lock(&ring
->ring_sync
);
21756 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21757 cnt
= __dhd_fixed_ring_get_cur_size(&ring
->fixed
);
21759 mutex_unlock(&ring
->ring_sync
);
21763 /* protect element between lock_ptr and write_idx */
21765 dhd_ring_lock(void *_ring
, void *first_ptr
, void *last_ptr
)
21767 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21769 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21770 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21774 mutex_lock(&ring
->ring_sync
);
21775 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21776 __dhd_fixed_ring_lock(&ring
->fixed
, first_ptr
, last_ptr
);
21778 mutex_unlock(&ring
->ring_sync
);
21782 /* free all lock */
21784 dhd_ring_lock_free(void *_ring
)
21786 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21788 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21789 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21793 mutex_lock(&ring
->ring_sync
);
21794 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21795 __dhd_fixed_ring_lock_free(&ring
->fixed
);
21797 mutex_unlock(&ring
->ring_sync
);
21802 dhd_ring_lock_get_first(void *_ring
)
21804 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21807 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21808 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21812 mutex_lock(&ring
->ring_sync
);
21813 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21814 ret
= __dhd_fixed_ring_lock_get_first(&ring
->fixed
);
21816 mutex_unlock(&ring
->ring_sync
);
21821 dhd_ring_lock_get_last(void *_ring
)
21823 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21826 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21827 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21831 mutex_lock(&ring
->ring_sync
);
21832 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21833 ret
= __dhd_fixed_ring_lock_get_last(&ring
->fixed
);
21835 mutex_unlock(&ring
->ring_sync
);
21840 dhd_ring_lock_get_count(void *_ring
)
21842 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21843 int ret
= BCME_ERROR
;
21845 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21846 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21850 mutex_lock(&ring
->ring_sync
);
21851 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21852 ret
= __dhd_fixed_ring_lock_get_count(&ring
->fixed
);
21854 mutex_unlock(&ring
->ring_sync
);
21858 /* free first locked element */
21860 dhd_ring_lock_free_first(void *_ring
)
21862 dhd_ring_info_t
*ring
= (dhd_ring_info_t
*)_ring
;
21864 if (!ring
|| ring
->magic
!= DHD_RING_MAGIC
) {
21865 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__
));
21869 mutex_lock(&ring
->ring_sync
);
21870 if (ring
->type
== DHD_RING_TYPE_FIXED
) {
21871 __dhd_fixed_ring_lock_free_first(&ring
->fixed
);
21873 mutex_unlock(&ring
->ring_sync
);
21877 #ifdef DHD_DUMP_MNGR
21878 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
21879 #define DHD_VFS_INODE(dir) (dir->d_inode)
21881 #define DHD_VFS_INODE(dir) d_inode(dir)
21882 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
21884 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
21885 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
21887 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
21888 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
21891 dhd_file_delete(char *path
)
21893 struct path file_path
;
21895 struct dentry
*dir
;
21897 err
= kern_path(path
, 0, &file_path
);
21903 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
21904 !d_is_file(file_path
.dentry
) ||
21905 #if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
21906 d_really_is_negative(file_path
.dentry
)
21907 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
21908 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
21913 dir
= dget_parent(file_path
.dentry
);
21915 if (!IS_ERR(dir
)) {
21916 err
= DHD_VFS_UNLINK(dir
, file_path
.dentry
, NULL
);
21919 err
= PTR_ERR(dir
);
21923 path_put(&file_path
);
21926 DHD_ERROR(("Failed to delete file: %s error: %d\n", path
, err
));
21933 dhd_dump_file_manage_idx(dhd_dump_file_manage_t
*fm_ptr
, char *fname
)
21938 for (i
= 0; i
< DHD_DUMP_TYPE_COUNT_MAX
; i
++) {
21939 if (strlen(fm_ptr
->elems
[i
].type_name
) == 0) {
21943 if (!(strncmp(fname
, fm_ptr
->elems
[i
].type_name
, strlen(fname
)))) {
21949 if (fm_idx
== -1) {
21953 if (strlen(fm_ptr
->elems
[fm_idx
].type_name
) == 0) {
21954 strncpy(fm_ptr
->elems
[fm_idx
].type_name
, fname
, DHD_DUMP_TYPE_NAME_SIZE
);
21955 fm_ptr
->elems
[fm_idx
].type_name
[DHD_DUMP_TYPE_NAME_SIZE
- 1] = '\0';
21956 fm_ptr
->elems
[fm_idx
].file_idx
= 0;
21963 * dhd_dump_file_manage_enqueue - enqueue dump file path
21964 * and delete odest file if file count is max.
21967 dhd_dump_file_manage_enqueue(dhd_pub_t
*dhd
, char *dump_path
, char *fname
)
21971 dhd_dump_file_manage_t
*fm_ptr
;
21974 if (!dhd
|| !dhd
->dump_file_manage
) {
21975 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
21976 __FUNCTION__
, dhd
, (dhd
? dhd
->dump_file_manage
: NULL
)));
21980 fm_ptr
= dhd
->dump_file_manage
;
21982 /* find file_manage idx */
21983 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__
, fname
, dump_path
));
21984 if ((fm_idx
= dhd_dump_file_manage_idx(fm_ptr
, fname
)) < 0) {
21985 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
21986 __FUNCTION__
, fname
));
21990 elem
= &fm_ptr
->elems
[fm_idx
];
21991 fp_idx
= elem
->file_idx
;
21992 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
21993 __FUNCTION__
, fm_idx
, fp_idx
, elem
->file_path
[fp_idx
]));
21995 /* delete oldest file */
21996 if (strlen(elem
->file_path
[fp_idx
]) != 0) {
21997 if (dhd_file_delete(elem
->file_path
[fp_idx
]) < 0) {
21998 DHD_ERROR(("%s(): Failed to delete file: %s\n",
21999 __FUNCTION__
, elem
->file_path
[fp_idx
]));
22001 DHD_ERROR(("%s(): Successed to delete file: %s\n",
22002 __FUNCTION__
, elem
->file_path
[fp_idx
]));
22006 /* save dump file path */
22007 strncpy(elem
->file_path
[fp_idx
], dump_path
, DHD_DUMP_FILE_PATH_SIZE
);
22008 elem
->file_path
[fp_idx
][DHD_DUMP_FILE_PATH_SIZE
- 1] = '\0';
22010 /* change file index to next file index */
22011 elem
->file_idx
= (elem
->file_idx
+ 1) % DHD_DUMP_FILE_COUNT_MAX
;
22013 #endif /* DHD_DUMP_MNGR */
22015 #ifdef DHD_MAP_LOGGING
22016 /* Will be called from SMMU fault handler */
22018 dhd_debug_info_dump(void)
22020 dhd_pub_t
*dhdp
= (dhd_pub_t
*)g_dhd_pub
;
22021 uint32 irq
= (uint32
)-1;
22023 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__
));
22024 dhdp
->smmu_fault_occurred
= TRUE
;
22026 /* Disable PCIe IRQ */
22027 dhdpcie_get_pcieirq(dhdp
->bus
, &irq
);
22028 if (irq
!= (uint32
)-1) {
22029 disable_irq_nosync(irq
);
22032 DHD_OS_WAKE_LOCK(dhdp
);
22033 dhd_prot_debug_info_print(dhdp
);
22034 osl_dma_map_dump(dhdp
->osh
);
22035 #ifdef DHD_MAP_PKTID_LOGGING
22036 dhd_pktid_logging_dump(dhdp
);
22037 #endif /* DHD_MAP_PKTID_LOGGING */
22038 #ifdef DHD_FW_COREDUMP
22039 /* Load the dongle side dump to host memory */
22040 dhdp
->memdump_enabled
= DUMP_MEMONLY
;
22041 dhdp
->memdump_type
= DUMP_TYPE_SMMU_FAULT
;
22042 dhd_bus_mem_dump(dhdp
);
22043 #endif /* DHD_FW_COREDUMP */
22044 DHD_OS_WAKE_UNLOCK(dhdp
);
22046 EXPORT_SYMBOL(dhd_debug_info_dump
);
22047 #endif /* DHD_MAP_LOGGING */
22049 dhd_get_host_whitelist_region(void *buf
, uint len
)
22051 dma_wl_addr_region_host_t
*host_reg
;
22054 if ((wlreg_len_h
== 0) && (wlreg_len_l
== 0)) {
22058 host_reg
= (dma_wl_addr_region_host_t
*)buf
;
22059 wl_end
= wlreg_len_h
+ wlreg_h
;
22060 wl_end
= (wl_end
& MASK_32_BITS
) << 32;
22062 wl_end
+= wlreg_len_l
;
22063 /* Now write whitelist region(s) */
22064 host_reg
->hreg_start
.addr_low
= wlreg_l
;
22065 host_reg
->hreg_start
.addr_high
= wlreg_h
;
22066 host_reg
->hreg_end
.addr_low
= EXTRACT_LOW32(wl_end
);
22067 host_reg
->hreg_end
.addr_high
= EXTRACT_HIGH32(wl_end
);
22071 #ifdef SUPPORT_SET_TID
22073 * Set custom TID value for UDP frame based on UID value.
22074 * This will be triggered by android private command below.
22075 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
22076 * Mode 0(SET_TID_OFF) : Disable changing TID
22077 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
22078 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
22081 dhd_set_tid_based_on_uid(dhd_pub_t
*dhdp
, void *pkt
)
22083 struct ether_header
*eh
= NULL
;
22084 struct sock
*sk
= NULL
;
22085 uint8
*pktdata
= NULL
;
22086 uint8
*ip_hdr
= NULL
;
22091 if (dhdp
->tid_mode
== SET_TID_OFF
) {
22095 pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pkt
);
22096 eh
= (struct ether_header
*) pktdata
;
22097 ip_hdr
= (uint8
*)eh
+ ETHER_HDR_LEN
;
22099 if (IPV4_PROT(ip_hdr
) != IP_PROT_UDP
) {
22103 cur_prio
= PKTPRIO(pkt
);
22104 prio
= dhdp
->target_tid
;
22105 uid
= dhdp
->target_uid
;
22107 if ((cur_prio
== prio
) ||
22108 (cur_prio
!= PRIO_8021D_BE
)) {
22112 sk
= ((struct sk_buff
*)(pkt
))->sk
;
22114 if ((dhdp
->tid_mode
== SET_TID_ALL_UDP
) ||
22115 (sk
&& (uid
== __kuid_val(sock_i_uid(sk
))))) {
22116 PKTSETPRIO(pkt
, prio
);
22119 #endif /* SUPPORT_SET_TID */
22120 #ifdef DHDTCPSYNC_FLOOD_BLK
22121 static void dhd_blk_tsfl_handler(struct work_struct
* work
)
22123 dhd_if_t
*ifp
= NULL
;
22124 dhd_pub_t
*dhdp
= NULL
;
22125 /* Ignore compiler warnings due to -Werror=cast-qual */
22126 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22127 #pragma GCC diagnostic push
22128 #pragma GCC diagnostic ignored "-Wcast-qual"
22129 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22130 ifp
= container_of(work
, dhd_if_t
, blk_tsfl_work
);
22131 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22132 #pragma GCC diagnostic pop
22133 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22135 dhdp
= &ifp
->info
->pub
;
22137 if ((dhdp
->op_mode
& DHD_FLAG_P2P_GO_MODE
)||
22138 (dhdp
->op_mode
& DHD_FLAG_HOSTAP_MODE
)) {
22139 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
22140 wl_cfg80211_del_all_sta(ifp
->net
, WLAN_REASON_UNSPECIFIED
);
22141 } else if ((dhdp
->op_mode
& DHD_FLAG_P2P_GC_MODE
)||
22142 (dhdp
->op_mode
& DHD_FLAG_STA_MODE
)) {
22143 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
22144 wl_cfg80211_disassoc(ifp
->net
);
22149 void dhd_reset_tcpsync_info_by_ifp(dhd_if_t
*ifp
)
22151 ifp
->tsync_rcvd
= 0;
22152 ifp
->tsyncack_txed
= 0;
22153 ifp
->last_sync
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
22155 void dhd_reset_tcpsync_info_by_dev(struct net_device
*dev
)
22157 dhd_if_t
*ifp
= NULL
;
22159 ifp
= DHD_DEV_IFP(dev
);
22162 ifp
->tsync_rcvd
= 0;
22163 ifp
->tsyncack_txed
= 0;
22164 ifp
->last_sync
= DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC
);
22167 #endif /* DHDTCPSYNC_FLOOD_BLK */