wireless: fix all kind of warnings
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / dhd_linux.c
CommitLineData
1cac41cb
MB
1/*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 796863 2018-12-27 07:39:27Z $
29 */
30
31#include <typedefs.h>
32#include <linuxver.h>
33#include <osl.h>
34#ifdef SHOW_LOGTRACE
35#include <linux/syscalls.h>
36#include <event_log.h>
37#endif /* SHOW_LOGTRACE */
38
39#ifdef PCIE_FULL_DONGLE
40#include <bcmmsgbuf.h>
41#endif /* PCIE_FULL_DONGLE */
42
43#include <linux/init.h>
44#include <linux/kernel.h>
45#include <linux/slab.h>
46#include <linux/skbuff.h>
47#include <linux/netdevice.h>
48#include <linux/inetdevice.h>
49#include <linux/rtnetlink.h>
50#include <linux/etherdevice.h>
51#include <linux/random.h>
52#include <linux/spinlock.h>
53#include <linux/ethtool.h>
54#include <linux/fcntl.h>
55#include <linux/fs.h>
56#include <linux/ip.h>
57#include <linux/reboot.h>
58#include <linux/notifier.h>
59#include <linux/irq.h>
60#include <net/addrconf.h>
61#ifdef ENABLE_ADAPTIVE_SCHED
62#include <linux/cpufreq.h>
63#endif /* ENABLE_ADAPTIVE_SCHED */
64#include <linux/rtc.h>
65#ifdef DHD_DUMP_MNGR
66#include <linux/namei.h>
67#endif /* DHD_DUMP_MNGR */
68#include <asm/uaccess.h>
69#include <asm/unaligned.h>
70#include <dhd_linux_priv.h>
71
72#include <epivers.h>
73#include <bcmutils.h>
74#include <bcmendian.h>
75#include <bcmdevs.h>
76#include <bcmiov.h>
77
78#include <ethernet.h>
79#include <bcmevent.h>
80#include <vlan.h>
81#include <802.3.h>
82
83#include <dhd_linux_wq.h>
84#include <dhd.h>
85#include <dhd_linux.h>
86#ifdef DHD_WET
87#include <dhd_wet.h>
88#endif /* DHD_WET */
89#ifdef PCIE_FULL_DONGLE
90#include <dhd_flowring.h>
91#endif // endif
92#include <dhd_bus.h>
93#include <dhd_proto.h>
94#include <dhd_dbg.h>
95#include <dhd_dbg_ring.h>
96#include <dhd_debug.h>
97#ifdef CONFIG_HAS_WAKELOCK
98#include <linux/wakelock.h>
99#endif // endif
100#if defined(WL_CFG80211)
101#include <wl_cfg80211.h>
102#ifdef WL_BAM
103#include <wl_bam.h>
104#endif /* WL_BAM */
105#endif /* WL_CFG80211 */
106#ifdef PNO_SUPPORT
107#include <dhd_pno.h>
108#endif // endif
109#ifdef RTT_SUPPORT
110#include <dhd_rtt.h>
111#endif // endif
112
113#ifdef CONFIG_COMPAT
114#include <linux/compat.h>
115#endif // endif
116
117#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
118#include <linux/exynos-pci-ctrl.h>
119#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
120
121#ifdef DHD_L2_FILTER
122#include <bcmicmp.h>
123#include <bcm_l2_filter.h>
124#include <dhd_l2_filter.h>
125#endif /* DHD_L2_FILTER */
126
127#ifdef DHD_PSTA
128#include <dhd_psta.h>
129#endif /* DHD_PSTA */
130
131#ifdef AMPDU_VO_ENABLE
132#include <802.1d.h>
133#endif /* AMPDU_VO_ENABLE */
134
135#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
136#include <dhd_ip.h>
137#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
138#include <dhd_daemon.h>
139#ifdef DHD_PKT_LOGGING
140#include <dhd_pktlog.h>
141#endif /* DHD_PKT_LOGGING */
142#ifdef DHD_DEBUG_PAGEALLOC
143typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
144void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
145extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
146#endif /* DHD_DEBUG_PAGEALLOC */
147
148#define IP_PROT_RESERVED 0xFF
149
150#ifdef DHDTCPSYNC_FLOOD_BLK
151static void dhd_blk_tsfl_handler(struct work_struct * work);
152#endif /* DHDTCPSYNC_FLOOD_BLK */
153
154#ifdef WL_NATOE
155#include <dhd_linux_nfct.h>
156#endif /* WL_NATOE */
157
158#if defined(SOFTAP)
159extern bool ap_cfg_running;
160extern bool ap_fw_loaded;
161#endif // endif
162
163#if defined(DHD_LB)
164#if defined(DHD_LB_RXP)
165static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
166#endif /* DHD_LB_RXP */
167#if defined(DHD_LB_TXP)
168static void dhd_lb_tx_handler(unsigned long data);
169static void dhd_tx_dispatcher_work(struct work_struct * work);
170static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
171static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
172#endif /* DHD_LB_TXP */
173#endif /* DHD_LB */
174
175#ifdef FIX_CPU_MIN_CLOCK
176#include <linux/pm_qos.h>
177#endif /* FIX_CPU_MIN_CLOCK */
178
179#ifdef SET_RANDOM_MAC_SOFTAP
180#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
181#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
182#endif // endif
183static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
184#endif /* SET_RANDOM_MAC_SOFTAP */
185
186#ifdef ENABLE_ADAPTIVE_SCHED
187#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
188#ifndef CUSTOM_CPUFREQ_THRESH
189#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
190#endif /* CUSTOM_CPUFREQ_THRESH */
191#endif /* ENABLE_ADAPTIVE_SCHED */
192
193/* enable HOSTIP cache update from the host side when an eth0:N is up */
194#define AOE_IP_ALIAS_SUPPORT 1
195
196#ifdef PROP_TXSTATUS
197#include <wlfc_proto.h>
198#include <dhd_wlfc.h>
199#endif // endif
200
201#include <wl_android.h>
202
203/* Maximum STA per radio */
204#define DHD_MAX_STA 32
205
206#ifdef DHD_EVENT_LOG_FILTER
207#include <dhd_event_log_filter.h>
208#endif /* DHD_EVENT_LOG_FILTER */
209
210/*
211 * Start of Host DMA whitelist region.
212 */
213uint32 wlreg_l = 0;
214uint32 wlreg_h = 0;
215module_param(wlreg_l, uint, 0644);
216module_param(wlreg_h, uint, 0644);
217
218/*
219 * Sizeof whitelist region. The dongle will allow DMA to only wlreg to wlreg+wlreg_len.
220 * If length of whitelist region is zero, host will not program whitelist region to dongle.
221 */
222uint32 wlreg_len_h = 0;
223uint32 wlreg_len_l = 0;
224
225module_param(wlreg_len_l, uint, 0644);
226module_param(wlreg_len_h, uint, 0644);
227
228const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
229const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
230#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
231
232#ifdef ARP_OFFLOAD_SUPPORT
233void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
234static int dhd_inetaddr_notifier_call(struct notifier_block *this,
235 unsigned long event, void *ptr);
236static struct notifier_block dhd_inetaddr_notifier = {
237 .notifier_call = dhd_inetaddr_notifier_call
238};
239/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
240 * created in kernel notifier link list (with 'next' pointing to itself)
241 */
242static bool dhd_inetaddr_notifier_registered = FALSE;
243#endif /* ARP_OFFLOAD_SUPPORT */
244
245#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
246int dhd_inet6addr_notifier_call(struct notifier_block *this,
247 unsigned long event, void *ptr);
248static struct notifier_block dhd_inet6addr_notifier = {
249 .notifier_call = dhd_inet6addr_notifier_call
250};
251/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
252 * created in kernel notifier link list (with 'next' pointing to itself)
253 */
254static bool dhd_inet6addr_notifier_registered = FALSE;
255#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
256
257#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
258#include <linux/suspend.h>
259volatile bool dhd_mmc_suspend = FALSE;
260DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
261#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
262
263#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
264extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
265#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
266#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
267static void dhd_hang_process(struct work_struct *work_data);
268#endif // endif
269#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
270MODULE_LICENSE("GPL and additional rights");
271#endif /* LinuxVer */
272
273#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
274#define MAX_CONSECUTIVE_HANG_COUNTS 5
275#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
276
277#include <dhd_bus.h>
278
279#ifdef DHD_ULP
280#include <dhd_ulp.h>
281#endif /* DHD_ULP */
282
283#ifndef PROP_TXSTATUS
284#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
285#else
286#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
287#endif // endif
288
289#ifdef PROP_TXSTATUS
290extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
291extern void dhd_wlfc_plat_init(void *dhd);
292extern void dhd_wlfc_plat_deinit(void *dhd);
293#endif /* PROP_TXSTATUS */
294#ifdef USE_DYNAMIC_F2_BLKSIZE
295extern uint sd_f2_blocksize;
296extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
297#endif /* USE_DYNAMIC_F2_BLKSIZE */
298
299#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
300const char *
301print_tainted()
302{
303 return "";
304}
305#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
306
307/* Linux wireless extension support */
308#if defined(WL_WIRELESS_EXT)
309#include <wl_iw.h>
310extern wl_iw_extra_params_t g_wl_iw_params;
311#endif /* defined(WL_WIRELESS_EXT) */
312
313#ifdef CONFIG_PARTIALSUSPEND_SLP
314#include <linux/partialsuspend_slp.h>
315#define CONFIG_HAS_EARLYSUSPEND
316#define DHD_USE_EARLYSUSPEND
317#define register_early_suspend register_pre_suspend
318#define unregister_early_suspend unregister_pre_suspend
319#define early_suspend pre_suspend
320#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
321#else
322#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
323#include <linux/earlysuspend.h>
324#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
325#endif /* CONFIG_PARTIALSUSPEND_SLP */
326
327#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
328#include <linux/nl80211.h>
329#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
330
331#if defined(PKT_FILTER_SUPPORT) && defined(APF)
332static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
333 u8* program, uint32 program_len);
334static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
335 uint32 mode, uint32 enable);
336static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
337#endif /* PKT_FILTER_SUPPORT && APF */
338
339#if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
340 defined(ARGOS_NOTIFY_CB)
341/* ARGOS notifer data */
342static struct notifier_block argos_wifi; /* STA */
343static struct notifier_block argos_p2p; /* P2P */
344argos_rps_ctrl argos_rps_ctrl_data;
345#ifdef DYNAMIC_MUMIMO_CONTROL
346argos_mumimo_ctrl argos_mumimo_ctrl_data;
347#ifdef CONFIG_SPLIT_ARGOS_SET
348static struct notifier_block argos_mimo; /* STA */
349#endif /* CONFIG_SPLIT_ARGOS_SET */
350#endif /* DYNAMIC_MUMIMO_CONTROL */
351#endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
352
353#ifdef DHD_FW_COREDUMP
354static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
355#endif /* DHD_FW_COREDUMP */
356
357#ifdef DHD_LOG_DUMP
358
359struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
360
361/* Only header for log dump buffers is stored in array
362 * header for sections like 'dhd dump', 'ext trap'
363 * etc, is not in the array, because they are not log
364 * ring buffers
365 */
366dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
367 {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
368 {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
369 {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
370};
371
372static int dld_buf_size[DLD_BUFFER_NUM] = {
373 LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
374 LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
375 LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
376};
377static void dhd_log_dump_init(dhd_pub_t *dhd);
378static void dhd_log_dump_deinit(dhd_pub_t *dhd);
379static void dhd_log_dump(void *handle, void *event_info, u8 event);
380static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
381static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
382#endif /* DHD_LOG_DUMP */
383
384#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
385#include <linux/workqueue.h>
386#include <linux/pm_runtime.h>
387#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
388
389#ifdef DHD_DEBUG_UART
390#include <linux/kmod.h>
391#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
392static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
393static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
394#endif /* DHD_DEBUG_UART */
395
396static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
397static struct notifier_block dhd_reboot_notifier = {
398 .notifier_call = dhd_reboot_callback,
399 .priority = 1,
400};
401
402#ifdef BCMPCIE
403static int is_reboot = 0;
404#endif /* BCMPCIE */
405
406dhd_pub_t *g_dhd_pub = NULL;
407
408#if defined(BT_OVER_SDIO)
409#include "dhd_bt_interface.h"
410#endif /* defined (BT_OVER_SDIO) */
411
412#ifdef SHOW_LOGTRACE
413static int dhd_trace_open_proc(struct inode *inode, struct file *file);
414ssize_t dhd_trace_read_proc(struct file *file, char *buffer, size_t tt, loff_t *loff);
415
416static const struct file_operations proc_file_fops = {
417 .read = dhd_trace_read_proc,
418 .open = dhd_trace_open_proc,
419 .release = seq_release,
420};
421#endif // endif
422
423#ifdef WL_STATIC_IF
424bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
425#endif /* WL_STATIC_IF */
426
427atomic_t exit_in_progress = ATOMIC_INIT(0);
428
429static void dhd_process_daemon_msg(struct sk_buff *skb);
430static void dhd_destroy_to_notifier_skt(void);
431static int dhd_create_to_notifier_skt(void);
432static struct sock *nl_to_event_sk = NULL;
433int sender_pid = 0;
434
435#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
436struct netlink_kernel_cfg dhd_netlink_cfg = {
437 .groups = 1,
438 .input = dhd_process_daemon_msg,
439};
440#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
441
442#if defined(BT_OVER_SDIO)
443/* Flag to indicate if driver is initialized */
444uint dhd_driver_init_done = TRUE;
445#else
446/* Flag to indicate if driver is initialized */
447uint dhd_driver_init_done = FALSE;
448#endif // endif
449/* Flag to indicate if we should download firmware on driver load */
450uint dhd_download_fw_on_driverload = TRUE;
451
452/* Definitions to provide path to the firmware and nvram
453 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
454 */
455char firmware_path[MOD_PARAM_PATHLEN];
456char nvram_path[MOD_PARAM_PATHLEN];
457char clm_path[MOD_PARAM_PATHLEN];
458#ifdef DHD_UCODE_DOWNLOAD
459char ucode_path[MOD_PARAM_PATHLEN];
460#endif /* DHD_UCODE_DOWNLOAD */
461
462module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
463
464/* backup buffer for firmware and nvram path */
465char fw_bak_path[MOD_PARAM_PATHLEN];
466char nv_bak_path[MOD_PARAM_PATHLEN];
467
468/* information string to keep firmware, chio, cheip version info visiable from log */
469char info_string[MOD_PARAM_INFOLEN];
470module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
471int op_mode = 0;
472int disable_proptx = 0;
473module_param(op_mode, int, 0644);
474extern int wl_control_wl_start(struct net_device *dev);
475#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
476struct semaphore dhd_registration_sem;
477#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
478
479#ifdef DHD_LOG_DUMP
480int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
481module_param(logdump_max_filesize, int, 0644);
482int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
483module_param(logdump_max_bufsize, int, 0644);
484int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
485int logdump_periodic_flush = FALSE;
486module_param(logdump_periodic_flush, int, 0644);
487#ifdef DEBUGABILITY_ECNTRS_LOGGING
488int logdump_ecntr_enable = TRUE;
489#else
490int logdump_ecntr_enable = FALSE;
491#endif /* DEBUGABILITY_ECNTRS_LOGGING */
492module_param(logdump_ecntr_enable, int, 0644);
493#endif /* DHD_LOG_DUMP */
494
495/* deferred handlers */
496static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
497static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
498static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
499static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
500#ifdef WL_NATOE
501static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
502static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
503#endif /* WL_NATOE */
504
505#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
506static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
507#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
508#ifdef WL_CFG80211
509extern void dhd_netdev_free(struct net_device *ndev);
510#endif /* WL_CFG80211 */
511static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
512
513#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
514/* update rx_pkt_chainable state of dhd interface */
515static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
516#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
517
518/* Error bits */
519module_param(dhd_msg_level, int, 0);
520
521#ifdef ARP_OFFLOAD_SUPPORT
522/* ARP offload enable */
523uint dhd_arp_enable = TRUE;
524module_param(dhd_arp_enable, uint, 0);
525
526/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
527
528#ifdef ENABLE_ARP_SNOOP_MODE
529uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
530#else
531uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
532#endif /* ENABLE_ARP_SNOOP_MODE */
533
534module_param(dhd_arp_mode, uint, 0);
535#endif /* ARP_OFFLOAD_SUPPORT */
536
537/* Disable Prop tx */
538module_param(disable_proptx, int, 0644);
539/* load firmware and/or nvram values from the filesystem */
540module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
541module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
542#ifdef DHD_UCODE_DOWNLOAD
543module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
544#endif /* DHD_UCODE_DOWNLOAD */
545
546/* wl event forwarding */
547#ifdef WL_EVENT_ENAB
548uint wl_event_enable = true;
549#else
550uint wl_event_enable = false;
551#endif /* WL_EVENT_ENAB */
552module_param(wl_event_enable, uint, 0660);
553
554/* wl event forwarding */
555#ifdef LOGTRACE_PKT_SENDUP
556uint logtrace_pkt_sendup = true;
557#else
558uint logtrace_pkt_sendup = false;
559#endif /* LOGTRACE_PKT_SENDUP */
560module_param(logtrace_pkt_sendup, uint, 0660);
561
562/* Watchdog interval */
563/* extend watchdog expiration to 2 seconds when DPC is running */
564#define WATCHDOG_EXTEND_INTERVAL (2000)
565
566uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
567module_param(dhd_watchdog_ms, uint, 0);
568
569#ifdef DHD_PCIE_RUNTIMEPM
570uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
571#endif /* DHD_PCIE_RUNTIMEPMT */
572#if defined(DHD_DEBUG)
573/* Console poll interval */
574uint dhd_console_ms = 0;
575module_param(dhd_console_ms, uint, 0644);
576#else
577uint dhd_console_ms = 0;
578#endif /* DHD_DEBUG */
579
580uint dhd_slpauto = TRUE;
581module_param(dhd_slpauto, uint, 0);
582
583#ifdef PKT_FILTER_SUPPORT
584/* Global Pkt filter enable control */
585uint dhd_pkt_filter_enable = TRUE;
586module_param(dhd_pkt_filter_enable, uint, 0);
587#endif // endif
588
589/* Pkt filter init setup */
590uint dhd_pkt_filter_init = 0;
591module_param(dhd_pkt_filter_init, uint, 0);
592
593/* Pkt filter mode control */
594#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
595uint dhd_master_mode = FALSE;
596#else
597uint dhd_master_mode = TRUE;
598#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
599module_param(dhd_master_mode, uint, 0);
600
601int dhd_watchdog_prio = 0;
602module_param(dhd_watchdog_prio, int, 0);
603
604/* DPC thread priority */
605int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
606module_param(dhd_dpc_prio, int, 0);
607
608/* RX frame thread priority */
609int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
610module_param(dhd_rxf_prio, int, 0);
611
612#if !defined(BCMDHDUSB)
613extern int dhd_dongle_ramsize;
614module_param(dhd_dongle_ramsize, int, 0);
615#endif /* BCMDHDUSB */
616
617#ifdef WL_CFG80211
618int passive_channel_skip = 0;
619module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
620#endif /* WL_CFG80211 */
621
622#ifdef DHD_MSI_SUPPORT
623uint enable_msi = TRUE;
624module_param(enable_msi, uint, 0);
625#endif /* PCIE_FULL_DONGLE */
626
627/* Keep track of number of instances */
628static int dhd_found = 0;
629static int instance_base = 0; /* Starting instance number */
630module_param(instance_base, int, 0644);
631
632#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
633static int dhd_napi_weight = 32;
634module_param(dhd_napi_weight, int, 0644);
635#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
636
637#ifdef PCIE_FULL_DONGLE
638extern int h2d_max_txpost;
639module_param(h2d_max_txpost, int, 0644);
640
641extern uint dma_ring_indices;
642module_param(dma_ring_indices, uint, 0644);
643
644extern bool h2d_phase;
645module_param(h2d_phase, bool, 0644);
646extern bool force_trap_bad_h2d_phase;
647module_param(force_trap_bad_h2d_phase, bool, 0644);
648#endif /* PCIE_FULL_DONGLE */
649
650#ifdef DHD_DHCP_DUMP
651struct bootp_fmt {
652 struct iphdr ip_header;
653 struct udphdr udp_header;
654 uint8 op;
655 uint8 htype;
656 uint8 hlen;
657 uint8 hops;
658 uint32 transaction_id;
659 uint16 secs;
660 uint16 flags;
661 uint32 client_ip;
662 uint32 assigned_ip;
663 uint32 server_ip;
664 uint32 relay_ip;
665 uint8 hw_address[16];
666 uint8 server_name[64];
667 uint8 file_name[128];
668 uint8 options[312];
669};
670
671static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
672static const char dhcp_ops[][10] = {
673 "NA", "REQUEST", "REPLY"
674};
675static const char dhcp_types[][10] = {
676 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
677};
678static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
679#endif /* DHD_DHCP_DUMP */
680
681#ifdef DHD_ICMP_DUMP
682#include <net/icmp.h>
683static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
684#endif /* DHD_ICMP_DUMP */
685
686#ifdef SHOW_LOGTRACE
687#if defined(CUSTOMER_HW4_DEBUG)
688static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
689char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
690static char *map_file_path = PLATFORM_PATH"rtecdc.map";
691static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
692static char *rom_map_file_path = PLATFORM_PATH"roml.map";
693#else
694static char *logstrs_path = "/installmedia/logstrs.bin";
695char *st_str_file_path = "/installmedia/rtecdc.bin";
696static char *map_file_path = "/installmedia/rtecdc.map";
697static char *rom_st_str_file_path = "/installmedia/roml.bin";
698static char *rom_map_file_path = "/installmedia/roml.map";
699#endif // endif
700static char *ram_file_str = "rtecdc";
701static char *rom_file_str = "roml";
702
703module_param(logstrs_path, charp, S_IRUGO);
704module_param(st_str_file_path, charp, S_IRUGO);
705module_param(map_file_path, charp, S_IRUGO);
706module_param(rom_st_str_file_path, charp, S_IRUGO);
707module_param(rom_map_file_path, charp, S_IRUGO);
708
709static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
710static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
711 uint32 *rodata_end);
712static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
713 char *map_file);
714#endif /* SHOW_LOGTRACE */
715
716#ifdef D2H_MINIDUMP
717void dhd_d2h_minidump(dhd_pub_t *dhdp);
718#endif /* D2H_MINIDUMP */
719
720#ifdef DHDTCPSYNC_FLOOD_BLK
721extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp);
722#endif /* DHDTCPSYNC_FLOOD_BLK */
723
724#if defined(DHD_LB)
725
726static void
727dhd_lb_set_default_cpus(dhd_info_t *dhd)
728{
729 /* Default CPU allocation for the jobs */
730 atomic_set(&dhd->rx_napi_cpu, 1);
731 atomic_set(&dhd->rx_compl_cpu, 2);
732 atomic_set(&dhd->tx_compl_cpu, 2);
733 atomic_set(&dhd->tx_cpu, 2);
734 atomic_set(&dhd->net_tx_cpu, 0);
735}
736
737static void
738dhd_cpumasks_deinit(dhd_info_t *dhd)
739{
740 free_cpumask_var(dhd->cpumask_curr_avail);
741 free_cpumask_var(dhd->cpumask_primary);
742 free_cpumask_var(dhd->cpumask_primary_new);
743 free_cpumask_var(dhd->cpumask_secondary);
744 free_cpumask_var(dhd->cpumask_secondary_new);
745}
746
747static int
748dhd_cpumasks_init(dhd_info_t *dhd)
749{
750 int id;
751 uint32 cpus, num_cpus = num_possible_cpus();
752 int ret = 0;
753
754 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
755 DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
756
757 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
758 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
759 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
760 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
761 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
762 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
763 ret = -ENOMEM;
764 goto fail;
765 }
766
767 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
768 cpumask_clear(dhd->cpumask_primary);
769 cpumask_clear(dhd->cpumask_secondary);
770
771 if (num_cpus > 32) {
772 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
773 ASSERT(0);
774 }
775
776 cpus = DHD_LB_PRIMARY_CPUS;
777 for (id = 0; id < num_cpus; id++) {
778 if (isset(&cpus, id))
779 cpumask_set_cpu(id, dhd->cpumask_primary);
780 }
781
782 cpus = DHD_LB_SECONDARY_CPUS;
783 for (id = 0; id < num_cpus; id++) {
784 if (isset(&cpus, id))
785 cpumask_set_cpu(id, dhd->cpumask_secondary);
786 }
787
788 return ret;
789fail:
790 dhd_cpumasks_deinit(dhd);
791 return ret;
792}
793
794/*
795 * The CPU Candidacy Algorithm
796 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
797 * The available CPUs for selection are divided into two groups
798 * Primary Set - A CPU mask that carries the First Choice CPUs
799 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
800 *
801 * There are two types of Job, that needs to be assigned to
802 * the CPUs, from one of the above mentioned CPU group. The Jobs are
803 * 1) Rx Packet Processing - napi_cpu
804 * 2) Completion Processiong (Tx, RX) - compl_cpu
805 *
806 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
807 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
808 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
809 * If there are more processors free, it assigns one to compl_cpu.
810 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
811 * CPU, as much as possible.
812 *
813 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
814 * would allow Tx completion skb's to be released into a local free pool from
815 * which the rx buffer posts could have been serviced. it is important to note
816 * that a Tx packet may not have a large enough buffer for rx posting.
817 */
818void dhd_select_cpu_candidacy(dhd_info_t *dhd)
819{
820 uint32 primary_available_cpus; /* count of primary available cpus */
821 uint32 secondary_available_cpus; /* count of secondary available cpus */
822 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
823 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
824 uint32 tx_cpu = 0; /* cpu selected for tx processing job */
825
826 cpumask_clear(dhd->cpumask_primary_new);
827 cpumask_clear(dhd->cpumask_secondary_new);
828
829 /*
830 * Now select from the primary mask. Even if a Job is
831 * already running on a CPU in secondary group, we still move
832 * to primary CPU. So no conditional checks.
833 */
834 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
835 dhd->cpumask_curr_avail);
836
837 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
838 dhd->cpumask_curr_avail);
839
840 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
841
842 if (primary_available_cpus > 0) {
843 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
844
845 /* If no further CPU is available,
846 * cpumask_next returns >= nr_cpu_ids
847 */
848 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
849 if (tx_cpu >= nr_cpu_ids)
850 tx_cpu = 0;
851
852 /* In case there are no more CPUs, do completions & Tx in same CPU */
853 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
854 if (compl_cpu >= nr_cpu_ids)
855 compl_cpu = tx_cpu;
856 }
857
858 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
859 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
860
861 /* -- Now check for the CPUs from the secondary mask -- */
862 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
863
864 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
865 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
866
867 if (secondary_available_cpus > 0) {
868 /* At this point if napi_cpu is unassigned it means no CPU
869 * is online from Primary Group
870 */
871 if (napi_cpu == 0) {
872 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
873 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
874 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
875 } else if (tx_cpu == 0) {
876 tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
877 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
878 } else if (compl_cpu == 0) {
879 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
880 }
881
882 /* If no CPU was available for tx processing, choose CPU 0 */
883 if (tx_cpu >= nr_cpu_ids)
884 tx_cpu = 0;
885
886 /* If no CPU was available for completion, choose CPU 0 */
887 if (compl_cpu >= nr_cpu_ids)
888 compl_cpu = 0;
889 }
890 if ((primary_available_cpus == 0) &&
891 (secondary_available_cpus == 0)) {
892 /* No CPUs available from primary or secondary mask */
893 napi_cpu = 1;
894 compl_cpu = 0;
895 tx_cpu = 2;
896 }
897
898 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
899 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
900
901 ASSERT(napi_cpu < nr_cpu_ids);
902 ASSERT(compl_cpu < nr_cpu_ids);
903 ASSERT(tx_cpu < nr_cpu_ids);
904
905 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
906 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
907 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
908 atomic_set(&dhd->tx_cpu, tx_cpu);
909
910 return;
911}
912
913/*
914 * Function to handle CPU Hotplug notifications.
915 * One of the task it does is to trigger the CPU Candidacy algorithm
916 * for load balancing.
917 */
918int
919dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
920{
921 unsigned long int cpu = (unsigned long int)hcpu;
922
923#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
924#pragma GCC diagnostic push
925#pragma GCC diagnostic ignored "-Wcast-qual"
926#endif // endif
927 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
928#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
929#pragma GCC diagnostic pop
930#endif // endif
931
932 if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
933 DHD_INFO(("%s(): LB data is not initialized yet.\n",
934 __FUNCTION__));
935 return NOTIFY_BAD;
936 }
937
938 switch (action)
939 {
940 case CPU_ONLINE:
941 case CPU_ONLINE_FROZEN:
942 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
943 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
944 dhd_select_cpu_candidacy(dhd);
945 break;
946
947 case CPU_DOWN_PREPARE:
948 case CPU_DOWN_PREPARE_FROZEN:
949 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
950 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
951 dhd_select_cpu_candidacy(dhd);
952 break;
953 default:
954 break;
955 }
956
957 return NOTIFY_OK;
958}
959
960#if defined(DHD_LB_STATS)
961void dhd_lb_stats_init(dhd_pub_t *dhdp)
962{
963 dhd_info_t *dhd;
964 int i, j, num_cpus = num_possible_cpus();
965 int alloc_size = sizeof(uint32) * num_cpus;
966
967 if (dhdp == NULL) {
968 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
969 __FUNCTION__));
970 return;
971 }
972
973 dhd = dhdp->info;
974 if (dhd == NULL) {
975 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
976 return;
977 }
978
979 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
980 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
981
982 dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
983 if (!dhd->napi_percpu_run_cnt) {
984 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
985 __FUNCTION__));
986 return;
987 }
988 for (i = 0; i < num_cpus; i++)
989 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
990
991 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
992
993 dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
994 if (!dhd->rxc_percpu_run_cnt) {
995 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
996 __FUNCTION__));
997 return;
998 }
999 for (i = 0; i < num_cpus; i++)
1000 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1001
1002 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1003
1004 dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1005 if (!dhd->txc_percpu_run_cnt) {
1006 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1007 __FUNCTION__));
1008 return;
1009 }
1010 for (i = 0; i < num_cpus; i++)
1011 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1012
1013 dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1014 if (!dhd->cpu_online_cnt) {
1015 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1016 __FUNCTION__));
1017 return;
1018 }
1019 for (i = 0; i < num_cpus; i++)
1020 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1021
1022 dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1023 if (!dhd->cpu_offline_cnt) {
1024 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1025 __FUNCTION__));
1026 return;
1027 }
1028 for (i = 0; i < num_cpus; i++)
1029 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1030
1031 dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1032 if (!dhd->txp_percpu_run_cnt) {
1033 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1034 __FUNCTION__));
1035 return;
1036 }
1037 for (i = 0; i < num_cpus; i++)
1038 DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
1039
1040 dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1041 if (!dhd->tx_start_percpu_run_cnt) {
1042 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1043 __FUNCTION__));
1044 return;
1045 }
1046 for (i = 0; i < num_cpus; i++)
1047 DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
1048
1049 for (j = 0; j < HIST_BIN_SIZE; j++) {
1050 dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1051 if (!dhd->napi_rx_hist[j]) {
1052 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1053 __FUNCTION__, j));
1054 return;
1055 }
1056 for (i = 0; i < num_cpus; i++) {
1057 DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
1058 }
1059 }
1060#ifdef DHD_LB_TXC
1061 for (j = 0; j < HIST_BIN_SIZE; j++) {
1062 dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1063 if (!dhd->txc_hist[j]) {
1064 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1065 __FUNCTION__, j));
1066 return;
1067 }
1068 for (i = 0; i < num_cpus; i++) {
1069 DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
1070 }
1071 }
1072#endif /* DHD_LB_TXC */
1073#ifdef DHD_LB_RXC
1074 for (j = 0; j < HIST_BIN_SIZE; j++) {
1075 dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1076 if (!dhd->rxc_hist[j]) {
1077 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1078 __FUNCTION__, j));
1079 return;
1080 }
1081 for (i = 0; i < num_cpus; i++) {
1082 DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
1083 }
1084 }
1085#endif /* DHD_LB_RXC */
1086 return;
1087}
1088
1089void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
1090{
1091 dhd_info_t *dhd;
1092 int j, num_cpus = num_possible_cpus();
1093 int alloc_size = sizeof(uint32) * num_cpus;
1094
1095 if (dhdp == NULL) {
1096 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1097 __FUNCTION__));
1098 return;
1099 }
1100
1101 dhd = dhdp->info;
1102 if (dhd == NULL) {
1103 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1104 return;
1105 }
1106
1107 if (dhd->napi_percpu_run_cnt) {
1108 MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
1109 dhd->napi_percpu_run_cnt = NULL;
1110 }
1111 if (dhd->rxc_percpu_run_cnt) {
1112 MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
1113 dhd->rxc_percpu_run_cnt = NULL;
1114 }
1115 if (dhd->txc_percpu_run_cnt) {
1116 MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
1117 dhd->txc_percpu_run_cnt = NULL;
1118 }
1119 if (dhd->cpu_online_cnt) {
1120 MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
1121 dhd->cpu_online_cnt = NULL;
1122 }
1123 if (dhd->cpu_offline_cnt) {
1124 MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
1125 dhd->cpu_offline_cnt = NULL;
1126 }
1127
1128 if (dhd->txp_percpu_run_cnt) {
1129 MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
1130 dhd->txp_percpu_run_cnt = NULL;
1131 }
1132 if (dhd->tx_start_percpu_run_cnt) {
1133 MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
1134 dhd->tx_start_percpu_run_cnt = NULL;
1135 }
1136
1137 for (j = 0; j < HIST_BIN_SIZE; j++) {
1138 if (dhd->napi_rx_hist[j]) {
1139 MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
1140 dhd->napi_rx_hist[j] = NULL;
1141 }
1142#ifdef DHD_LB_TXC
1143 if (dhd->txc_hist[j]) {
1144 MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
1145 dhd->txc_hist[j] = NULL;
1146 }
1147#endif /* DHD_LB_TXC */
1148#ifdef DHD_LB_RXC
1149 if (dhd->rxc_hist[j]) {
1150 MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
1151 dhd->rxc_hist[j] = NULL;
1152 }
1153#endif /* DHD_LB_RXC */
1154 }
1155
1156 return;
1157}
1158
1159static void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
1160 struct bcmstrbuf *strbuf, uint32 **hist)
1161{
1162 int i, j;
1163 uint32 *per_cpu_total;
1164 uint32 total = 0;
1165 uint32 num_cpus = num_possible_cpus();
1166
1167 per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
1168 if (!per_cpu_total) {
1169 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
1170 return;
1171 }
1172 bzero(per_cpu_total, sizeof(uint32) * num_cpus);
1173
1174 bcm_bprintf(strbuf, "CPU: \t\t");
1175 for (i = 0; i < num_cpus; i++)
1176 bcm_bprintf(strbuf, "%d\t", i);
1177 bcm_bprintf(strbuf, "\nBin\n");
1178
1179 for (i = 0; i < HIST_BIN_SIZE; i++) {
1180 bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
1181 for (j = 0; j < num_cpus; j++) {
1182 bcm_bprintf(strbuf, "%d\t", hist[i][j]);
1183 }
1184 bcm_bprintf(strbuf, "\n");
1185 }
1186 bcm_bprintf(strbuf, "Per CPU Total \t");
1187 total = 0;
1188 for (i = 0; i < num_cpus; i++) {
1189 for (j = 0; j < HIST_BIN_SIZE; j++) {
1190 per_cpu_total[i] += (hist[j][i] * (1<<j));
1191 }
1192 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1193 total += per_cpu_total[i];
1194 }
1195 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1196
1197 if (per_cpu_total) {
1198 MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
1199 per_cpu_total = NULL;
1200 }
1201 return;
1202}
1203
1204static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1205{
1206 int i, num_cpus = num_possible_cpus();
1207
1208 bcm_bprintf(strbuf, "CPU: \t");
1209 for (i = 0; i < num_cpus; i++)
1210 bcm_bprintf(strbuf, "%d\t", i);
1211 bcm_bprintf(strbuf, "\n");
1212
1213 bcm_bprintf(strbuf, "Val: \t");
1214 for (i = 0; i < num_cpus; i++)
1215 bcm_bprintf(strbuf, "%u\t", *(p+i));
1216 bcm_bprintf(strbuf, "\n");
1217 return;
1218}
1219
1220void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1221{
1222 dhd_info_t *dhd;
1223
1224 if (dhdp == NULL || strbuf == NULL) {
1225 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1226 __FUNCTION__, dhdp, strbuf));
1227 return;
1228 }
1229
1230 dhd = dhdp->info;
1231 if (dhd == NULL) {
1232 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1233 return;
1234 }
1235
1236 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1237 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1238
1239 bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
1240 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1241
1242 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1243 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1244 dhd->txc_sched_cnt);
1245
1246#ifdef DHD_LB_RXP
1247 bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
1248 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1249 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1250 dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
1251#endif /* DHD_LB_RXP */
1252
1253#ifdef DHD_LB_RXC
1254 bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
1255 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1256 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1257 dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
1258#endif /* DHD_LB_RXC */
1259
1260#ifdef DHD_LB_TXC
1261 bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
1262 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1263 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1264 dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
1265#endif /* DHD_LB_TXC */
1266
1267#ifdef DHD_LB_TXP
1268 bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
1269 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
1270
1271 bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
1272 dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
1273#endif /* DHD_LB_TXP */
1274}
1275
1276/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
1277static inline uint32 next_larger_power2(uint32 num)
1278{
1279 num--;
1280 num |= (num >> 1);
1281 num |= (num >> 2);
1282 num |= (num >> 4);
1283 num |= (num >> 8);
1284 num |= (num >> 16);
1285
1286 return (num + 1);
1287}
1288
1289static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
1290{
1291 uint32 bin_power;
1292 uint32 *p;
1293 bin_power = next_larger_power2(count);
1294
1295 switch (bin_power) {
1296 case 1: p = bin[0] + cpu; break;
1297 case 2: p = bin[1] + cpu; break;
1298 case 4: p = bin[2] + cpu; break;
1299 case 8: p = bin[3] + cpu; break;
1300 case 16: p = bin[4] + cpu; break;
1301 case 32: p = bin[5] + cpu; break;
1302 case 64: p = bin[6] + cpu; break;
1303 case 128: p = bin[7] + cpu; break;
1304 default : p = bin[8] + cpu; break;
1305 }
1306
1307 *p = *p + 1;
1308 return;
1309}
1310
1311extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1312{
1313 int cpu;
1314 dhd_info_t *dhd = dhdp->info;
1315
1316 cpu = get_cpu();
1317 put_cpu();
1318 dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
1319
1320 return;
1321}
1322
1323extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1324{
1325 int cpu;
1326 dhd_info_t *dhd = dhdp->info;
1327
1328 cpu = get_cpu();
1329 put_cpu();
1330 dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
1331
1332 return;
1333}
1334
1335extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1336{
1337 int cpu;
1338 dhd_info_t *dhd = dhdp->info;
1339
1340 cpu = get_cpu();
1341 put_cpu();
1342 dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
1343
1344 return;
1345}
1346
1347extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1348{
1349 dhd_info_t *dhd = dhdp->info;
1350 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1351}
1352
1353extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1354{
1355 dhd_info_t *dhd = dhdp->info;
1356 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1357}
1358#endif /* DHD_LB_STATS */
1359
1360#endif /* DHD_LB */
1361
1362#ifdef USE_WFA_CERT_CONF
1363int g_frameburst = 1;
1364#endif /* USE_WFA_CERT_CONF */
1365
1366static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1367
1368/* DHD Perimiter lock only used in router with bypass forwarding. */
1369#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1370#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1371#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1372
1373#ifdef PCIE_FULL_DONGLE
1374#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1375#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1376 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1377#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1378 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1379
1380#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1381static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1382 struct list_head *snapshot_list);
1383static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1384#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1385#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1386#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1387#endif /* PCIE_FULL_DONGLE */
1388
1389/* Control fw roaming */
1390#ifdef BCMCCX
1391uint dhd_roam_disable = 0;
1392#else
1393uint dhd_roam_disable = 0;
1394#endif /* BCMCCX */
1395
1396#ifdef BCMDBGFS
1397extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
1398extern void dhd_dbgfs_remove(void);
1399#endif // endif
1400
1401static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
1402module_param(pcie_txs_metadata_enable, int, 0);
1403
1404/* Control radio state */
1405uint dhd_radio_up = 1;
1406
1407/* Network inteface name */
1408char iface_name[IFNAMSIZ] = {'\0'};
1409module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1410
1411/* The following are specific to the SDIO dongle */
1412
1413/* IOCTL response timeout */
1414int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1415
1416/* DS Exit response timeout */
1417int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
1418
1419/* Idle timeout for backplane clock */
1420int dhd_idletime = DHD_IDLETIME_TICKS;
1421module_param(dhd_idletime, int, 0);
1422
1423/* Use polling */
1424uint dhd_poll = FALSE;
1425module_param(dhd_poll, uint, 0);
1426
1427/* Use interrupts */
1428uint dhd_intr = TRUE;
1429module_param(dhd_intr, uint, 0);
1430
1431/* SDIO Drive Strength (in milliamps) */
1432uint dhd_sdiod_drive_strength = 6;
1433module_param(dhd_sdiod_drive_strength, uint, 0);
1434
1435#ifdef BCMSDIO
1436/* Tx/Rx bounds */
1437extern uint dhd_txbound;
1438extern uint dhd_rxbound;
1439module_param(dhd_txbound, uint, 0);
1440module_param(dhd_rxbound, uint, 0);
1441
1442/* Deferred transmits */
1443extern uint dhd_deferred_tx;
1444module_param(dhd_deferred_tx, uint, 0);
1445
1446#endif /* BCMSDIO */
1447
1448#ifdef SDTEST
1449/* Echo packet generator (pkts/s) */
1450uint dhd_pktgen = 0;
1451module_param(dhd_pktgen, uint, 0);
1452
1453/* Echo packet len (0 => sawtooth, max 2040) */
1454uint dhd_pktgen_len = 0;
1455module_param(dhd_pktgen_len, uint, 0);
1456#endif /* SDTEST */
1457
1458#if defined(BCMSUP_4WAY_HANDSHAKE)
1459/* Use in dongle supplicant for 4-way handshake */
1460#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
1461/* Enable idsup by default (if supported in fw) */
1462uint dhd_use_idsup = 1;
1463#else
1464uint dhd_use_idsup = 0;
1465#endif /* WLFBT || WL_ENABLE_IDSUP */
1466module_param(dhd_use_idsup, uint, 0);
1467#endif /* BCMSUP_4WAY_HANDSHAKE */
1468
1469/* Allow delayed firmware download for debug purpose */
1470int allow_delay_fwdl = FALSE;
1471module_param(allow_delay_fwdl, int, 0);
1472
1473#ifdef ECOUNTER_PERIODIC_DISABLE
1474uint enable_ecounter = FALSE;
1475#else
1476uint enable_ecounter = TRUE;
1477#endif // endif
1478module_param(enable_ecounter, uint, 0);
1479
1480extern char dhd_version[];
1481extern char fw_version[];
1482extern char clm_version[];
1483
1484int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1485static void dhd_net_if_lock_local(dhd_info_t *dhd);
1486static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1487static void dhd_suspend_lock(dhd_pub_t *dhdp);
1488static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1489
1490/* Monitor interface */
1491int dhd_monitor_init(void *dhd_pub);
1492int dhd_monitor_uninit(void);
1493
1494#ifdef DHD_PM_CONTROL_FROM_FILE
1495bool g_pm_control;
1496#ifdef DHD_EXPORT_CNTL_FILE
1497int pmmode_val;
1498#endif /* DHD_EXPORT_CNTL_FILE */
1499void sec_control_pm(dhd_pub_t *dhd, uint *);
1500#endif /* DHD_PM_CONTROL_FROM_FILE */
1501
1502#if defined(WL_WIRELESS_EXT)
1503struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1504#endif /* defined(WL_WIRELESS_EXT) */
1505
1506static void dhd_dpc(ulong data);
1507/* forward decl */
1508extern int dhd_wait_pend8021x(struct net_device *dev);
1509void dhd_os_wd_timer_extend(void *bus, bool extend);
1510
1511#ifdef TOE
1512#ifndef BDC
1513#error TOE requires BDC
1514#endif /* !BDC */
1515static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1516static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1517#endif /* TOE */
1518
1519static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
1520 wl_event_msg_t *event_ptr, void **data_ptr);
1521
1522#if defined(CONFIG_PM_SLEEP)
1523static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1524{
1525 int ret = NOTIFY_DONE;
1526 bool suspend = FALSE;
1527
1528#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1529#pragma GCC diagnostic push
1530#pragma GCC diagnostic ignored "-Wcast-qual"
1531#endif // endif
1532 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1533#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1534#pragma GCC diagnostic pop
1535#endif // endif
1536
1537 BCM_REFERENCE(dhdinfo);
1538 BCM_REFERENCE(suspend);
1539
1540 switch (action) {
1541 case PM_HIBERNATION_PREPARE:
1542 case PM_SUSPEND_PREPARE:
1543 suspend = TRUE;
1544 break;
1545
1546 case PM_POST_HIBERNATION:
1547 case PM_POST_SUSPEND:
1548 suspend = FALSE;
1549 break;
1550 }
1551
1552#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1553 if (suspend) {
1554 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1555 dhd_wlfc_suspend(&dhdinfo->pub);
1556 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1557 } else {
1558 dhd_wlfc_resume(&dhdinfo->pub);
1559 }
1560#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1561
1562#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1563 KERNEL_VERSION(2, 6, 39))
1564 dhd_mmc_suspend = suspend;
1565 smp_mb();
1566#endif // endif
1567
1568 return ret;
1569}
1570
1571/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1572 * created in kernel notifier link list (with 'next' pointing to itself)
1573 */
1574static bool dhd_pm_notifier_registered = FALSE;
1575
1576extern int register_pm_notifier(struct notifier_block *nb);
1577extern int unregister_pm_notifier(struct notifier_block *nb);
1578#endif /* CONFIG_PM_SLEEP */
1579
1580/* Request scheduling of the bus rx frame */
1581static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1582static void dhd_os_rxflock(dhd_pub_t *pub);
1583static void dhd_os_rxfunlock(dhd_pub_t *pub);
1584
1585/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1586typedef struct dhd_dev_priv {
1587 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1588 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1589 int ifidx; /* interface index */
1590 void * lkup;
1591} dhd_dev_priv_t;
1592
1593#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1594#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1595#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1596#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1597#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1598#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1599
1600#if defined(DHD_OF_SUPPORT)
1601extern int dhd_wlan_init(void);
1602#endif /* defined(DHD_OF_SUPPORT) */
1603/** Clear the dhd net_device's private structure. */
1604static inline void
1605dhd_dev_priv_clear(struct net_device * dev)
1606{
1607 dhd_dev_priv_t * dev_priv;
1608 ASSERT(dev != (struct net_device *)NULL);
1609 dev_priv = DHD_DEV_PRIV(dev);
1610 dev_priv->dhd = (dhd_info_t *)NULL;
1611 dev_priv->ifp = (dhd_if_t *)NULL;
1612 dev_priv->ifidx = DHD_BAD_IF;
1613 dev_priv->lkup = (void *)NULL;
1614}
1615
1616/** Setup the dhd net_device's private structure. */
1617static inline void
1618dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1619 int ifidx)
1620{
1621 dhd_dev_priv_t * dev_priv;
1622 ASSERT(dev != (struct net_device *)NULL);
1623 dev_priv = DHD_DEV_PRIV(dev);
1624 dev_priv->dhd = dhd;
1625 dev_priv->ifp = ifp;
1626 dev_priv->ifidx = ifidx;
1627}
1628
1629/* Return interface pointer */
1630static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1631{
1632 ASSERT(ifidx < DHD_MAX_IFS);
1633
1634 if (ifidx >= DHD_MAX_IFS)
1635 return NULL;
1636
1637 return dhdp->info->iflist[ifidx];
1638}
1639
1640#ifdef PCIE_FULL_DONGLE
1641
1642/** Dummy objects are defined with state representing bad|down.
1643 * Performance gains from reducing branch conditionals, instruction parallelism,
1644 * dual issue, reducing load shadows, avail of larger pipelines.
1645 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1646 * is accessed via the dhd_sta_t.
1647 */
1648
1649/* Dummy dhd_info object */
1650dhd_info_t dhd_info_null = {
1651 .pub = {
1652 .info = &dhd_info_null,
1653#ifdef DHDTCPACK_SUPPRESS
1654 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1655#endif /* DHDTCPACK_SUPPRESS */
1656 .up = FALSE,
1657 .busstate = DHD_BUS_DOWN
1658 }
1659};
1660#define DHD_INFO_NULL (&dhd_info_null)
1661#define DHD_PUB_NULL (&dhd_info_null.pub)
1662
1663/* Dummy netdevice object */
1664struct net_device dhd_net_dev_null = {
1665 .reg_state = NETREG_UNREGISTERED
1666};
1667#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1668
1669/* Dummy dhd_if object */
1670dhd_if_t dhd_if_null = {
1671#ifdef WMF
1672 .wmf = { .wmf_enable = TRUE },
1673#endif // endif
1674 .info = DHD_INFO_NULL,
1675 .net = DHD_NET_DEV_NULL,
1676 .idx = DHD_BAD_IF
1677};
1678#define DHD_IF_NULL (&dhd_if_null)
1679
1680#define DHD_STA_NULL ((dhd_sta_t *)NULL)
1681
1682/** Interface STA list management. */
1683
1684/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1685static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1686static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1687
1688/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1689static void dhd_if_del_sta_list(dhd_if_t * ifp);
1690static void dhd_if_flush_sta(dhd_if_t * ifp);
1691
1692/* Construct/Destruct a sta pool. */
1693static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1694static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1695/* Clear the pool of dhd_sta_t objects for built-in type driver */
1696static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1697
1698/** Reset a dhd_sta object and free into the dhd pool. */
1699static void
1700dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1701{
1702 int prio;
1703
1704 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1705
1706 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1707
1708 /*
1709 * Flush and free all packets in all flowring's queues belonging to sta.
1710 * Packets in flow ring will be flushed later.
1711 */
1712 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1713 uint16 flowid = sta->flowid[prio];
1714
1715 if (flowid != FLOWID_INVALID) {
1716 unsigned long flags;
1717 flow_ring_node_t * flow_ring_node;
1718
1719#ifdef DHDTCPACK_SUPPRESS
1720 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1721 * when there is a newly coming packet from network stack.
1722 */
1723 dhd_tcpack_info_tbl_clean(dhdp);
1724#endif /* DHDTCPACK_SUPPRESS */
1725
1726 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1727 if (flow_ring_node) {
1728 flow_queue_t *queue = &flow_ring_node->queue;
1729
1730 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1731 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1732
1733 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1734 void * pkt;
1735 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
1736 NULL) {
1737 PKTFREE(dhdp->osh, pkt, TRUE);
1738 }
1739 }
1740
1741 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1742 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1743 }
1744 }
1745
1746 sta->flowid[prio] = FLOWID_INVALID;
1747 }
1748
1749 id16_map_free(dhdp->staid_allocator, sta->idx);
1750 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1751 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1752 sta->ifidx = DHD_BAD_IF;
1753 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1754 INIT_LIST_HEAD(&sta->list);
1755 sta->idx = ID16_INVALID; /* implying free */
1756}
1757
1758/** Allocate a dhd_sta object from the dhd pool. */
1759static dhd_sta_t *
1760dhd_sta_alloc(dhd_pub_t * dhdp)
1761{
1762 uint16 idx;
1763 dhd_sta_t * sta;
1764 dhd_sta_pool_t * sta_pool;
1765
1766 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1767
1768 idx = id16_map_alloc(dhdp->staid_allocator);
1769 if (idx == ID16_INVALID) {
1770 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1771 return DHD_STA_NULL;
1772 }
1773
1774 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1775 sta = &sta_pool[idx];
1776
1777 ASSERT((sta->idx == ID16_INVALID) &&
1778 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1779
1780 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1781
1782 sta->idx = idx; /* implying allocated */
1783
1784 return sta;
1785}
1786
1787/** Delete all STAs in an interface's STA list. */
1788static void
1789dhd_if_del_sta_list(dhd_if_t *ifp)
1790{
1791 dhd_sta_t *sta, *next;
1792 unsigned long flags;
1793
1794 DHD_IF_STA_LIST_LOCK(ifp, flags);
1795#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1796#pragma GCC diagnostic push
1797#pragma GCC diagnostic ignored "-Wcast-qual"
1798#endif // endif
1799 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1800 list_del(&sta->list);
1801 dhd_sta_free(&ifp->info->pub, sta);
1802 }
1803#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1804#pragma GCC diagnostic pop
1805#endif // endif
1806 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1807
1808 return;
1809}
1810
1811/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1812static void
1813dhd_if_flush_sta(dhd_if_t * ifp)
1814{
1815}
1816
1817/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1818static int
1819dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1820{
1821 int idx, prio, sta_pool_memsz;
1822 dhd_sta_t * sta;
1823 dhd_sta_pool_t * sta_pool;
1824 void * staid_allocator;
1825
1826 ASSERT(dhdp != (dhd_pub_t *)NULL);
1827 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1828
1829 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1830 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1831 if (staid_allocator == NULL) {
1832 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1833 return BCME_ERROR;
1834 }
1835
1836 /* Pre allocate a pool of dhd_sta objects (one extra). */
1837 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1838 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1839 if (sta_pool == NULL) {
1840 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1841 id16_map_fini(dhdp->osh, staid_allocator);
1842 return BCME_ERROR;
1843 }
1844
1845 dhdp->sta_pool = sta_pool;
1846 dhdp->staid_allocator = staid_allocator;
1847
1848 /* Initialize all sta(s) for the pre-allocated free pool. */
1849 bzero((uchar *)sta_pool, sta_pool_memsz);
1850 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1851 sta = &sta_pool[idx];
1852 sta->idx = id16_map_alloc(staid_allocator);
1853 ASSERT(sta->idx <= max_sta);
1854 }
1855
1856 /* Now place them into the pre-allocated free pool. */
1857 for (idx = 1; idx <= max_sta; idx++) {
1858 sta = &sta_pool[idx];
1859 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1860 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1861 }
1862 dhd_sta_free(dhdp, sta);
1863 }
1864
1865 return BCME_OK;
1866}
1867
1868/** Destruct the pool of dhd_sta_t objects.
1869 * Caller must ensure that no STA objects are currently associated with an if.
1870 */
1871static void
1872dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1873{
1874 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1875
1876 if (sta_pool) {
1877 int idx;
1878 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1879 for (idx = 1; idx <= max_sta; idx++) {
1880 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1881 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1882 }
1883 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1884 dhdp->sta_pool = NULL;
1885 }
1886
1887 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1888 dhdp->staid_allocator = NULL;
1889}
1890
1891/* Clear the pool of dhd_sta_t objects for built-in type driver */
1892static void
1893dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1894{
1895 int idx, prio, sta_pool_memsz;
1896 dhd_sta_t * sta;
1897 dhd_sta_pool_t * sta_pool;
1898 void *staid_allocator;
1899
1900 if (!dhdp) {
1901 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1902 return;
1903 }
1904
1905 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1906 staid_allocator = dhdp->staid_allocator;
1907
1908 if (!sta_pool) {
1909 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1910 return;
1911 }
1912
1913 if (!staid_allocator) {
1914 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1915 return;
1916 }
1917
1918 /* clear free pool */
1919 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1920 bzero((uchar *)sta_pool, sta_pool_memsz);
1921
1922 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1923 id16_map_clear(staid_allocator, max_sta, 1);
1924
1925 /* Initialize all sta(s) for the pre-allocated free pool. */
1926 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1927 sta = &sta_pool[idx];
1928 sta->idx = id16_map_alloc(staid_allocator);
1929 ASSERT(sta->idx <= max_sta);
1930 }
1931 /* Now place them into the pre-allocated free pool. */
1932 for (idx = 1; idx <= max_sta; idx++) {
1933 sta = &sta_pool[idx];
1934 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1935 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1936 }
1937 dhd_sta_free(dhdp, sta);
1938 }
1939}
1940
1941/** Find STA with MAC address ea in an interface's STA list. */
1942dhd_sta_t *
1943dhd_find_sta(void *pub, int ifidx, void *ea)
1944{
1945 dhd_sta_t *sta;
1946 dhd_if_t *ifp;
1947 unsigned long flags;
1948
1949 ASSERT(ea != NULL);
1950 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1951 if (ifp == NULL)
1952 return DHD_STA_NULL;
1953
1954 DHD_IF_STA_LIST_LOCK(ifp, flags);
1955#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1956#pragma GCC diagnostic push
1957#pragma GCC diagnostic ignored "-Wcast-qual"
1958#endif // endif
1959 list_for_each_entry(sta, &ifp->sta_list, list) {
1960 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1961 DHD_INFO(("%s: Found STA " MACDBG "\n",
1962 __FUNCTION__, MAC2STRDBG((char *)ea)));
1963 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1964 return sta;
1965 }
1966 }
1967#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1968#pragma GCC diagnostic pop
1969#endif // endif
1970 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1971
1972 return DHD_STA_NULL;
1973}
1974
1975/** Add STA into the interface's STA list. */
1976dhd_sta_t *
1977dhd_add_sta(void *pub, int ifidx, void *ea)
1978{
1979 dhd_sta_t *sta;
1980 dhd_if_t *ifp;
1981 unsigned long flags;
1982
1983 ASSERT(ea != NULL);
1984 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1985 if (ifp == NULL)
1986 return DHD_STA_NULL;
1987
1988 if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
1989 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
1990 return DHD_STA_NULL;
1991 }
1992
1993 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1994 if (sta == DHD_STA_NULL) {
1995 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1996 return DHD_STA_NULL;
1997 }
1998
1999 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
2000
2001 /* link the sta and the dhd interface */
2002 sta->ifp = ifp;
2003 sta->ifidx = ifidx;
2004 INIT_LIST_HEAD(&sta->list);
2005
2006 DHD_IF_STA_LIST_LOCK(ifp, flags);
2007
2008 list_add_tail(&sta->list, &ifp->sta_list);
2009
2010 DHD_ERROR(("%s: Adding STA " MACDBG "\n",
2011 __FUNCTION__, MAC2STRDBG((char *)ea)));
2012
2013 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2014
2015 return sta;
2016}
2017
2018/** Delete all STAs from the interface's STA list. */
2019void
2020dhd_del_all_sta(void *pub, int ifidx)
2021{
2022 dhd_sta_t *sta, *next;
2023 dhd_if_t *ifp;
2024 unsigned long flags;
2025
2026 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2027 if (ifp == NULL)
2028 return;
2029
2030 DHD_IF_STA_LIST_LOCK(ifp, flags);
2031#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2032#pragma GCC diagnostic push
2033#pragma GCC diagnostic ignored "-Wcast-qual"
2034#endif // endif
2035 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2036
2037 list_del(&sta->list);
2038 dhd_sta_free(&ifp->info->pub, sta);
2039#ifdef DHD_L2_FILTER
2040 if (ifp->parp_enable) {
2041 /* clear Proxy ARP cache of specific Ethernet Address */
2042 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
2043 ifp->phnd_arp_table, FALSE,
2044 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2045 }
2046#endif /* DHD_L2_FILTER */
2047 }
2048#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2049#pragma GCC diagnostic pop
2050#endif // endif
2051 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2052
2053 return;
2054}
2055
2056/** Delete STA from the interface's STA list. */
2057void
2058dhd_del_sta(void *pub, int ifidx, void *ea)
2059{
2060 dhd_sta_t *sta, *next;
2061 dhd_if_t *ifp;
2062 unsigned long flags;
2063
2064 ASSERT(ea != NULL);
2065 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2066 if (ifp == NULL)
2067 return;
2068
2069 DHD_IF_STA_LIST_LOCK(ifp, flags);
2070#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2071#pragma GCC diagnostic push
2072#pragma GCC diagnostic ignored "-Wcast-qual"
2073#endif // endif
2074 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2075 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
2076 DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
2077 __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
2078 list_del(&sta->list);
2079 dhd_sta_free(&ifp->info->pub, sta);
2080 }
2081 }
2082#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2083#pragma GCC diagnostic pop
2084#endif // endif
2085 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2086#ifdef DHD_L2_FILTER
2087 if (ifp->parp_enable) {
2088 /* clear Proxy ARP cache of specific Ethernet Address */
2089 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
2090 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2091 }
2092#endif /* DHD_L2_FILTER */
2093 return;
2094}
2095
2096/** Add STA if it doesn't exist. Not reentrant. */
2097dhd_sta_t*
2098dhd_findadd_sta(void *pub, int ifidx, void *ea)
2099{
2100 dhd_sta_t *sta;
2101
2102 sta = dhd_find_sta(pub, ifidx, ea);
2103
2104 if (!sta) {
2105 /* Add entry */
2106 sta = dhd_add_sta(pub, ifidx, ea);
2107 }
2108
2109 return sta;
2110}
2111
2112#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2113static struct list_head *
2114dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
2115{
2116 unsigned long flags;
2117 dhd_sta_t *sta, *snapshot;
2118
2119 INIT_LIST_HEAD(snapshot_list);
2120
2121 DHD_IF_STA_LIST_LOCK(ifp, flags);
2122
2123 list_for_each_entry(sta, &ifp->sta_list, list) {
2124 /* allocate one and add to snapshot */
2125 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
2126 if (snapshot == NULL) {
2127 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
2128 continue;
2129 }
2130
2131 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
2132
2133 INIT_LIST_HEAD(&snapshot->list);
2134 list_add_tail(&snapshot->list, snapshot_list);
2135 }
2136
2137 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2138
2139 return snapshot_list;
2140}
2141
2142static void
2143dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
2144{
2145 dhd_sta_t *sta, *next;
2146
2147 list_for_each_entry_safe(sta, next, snapshot_list, list) {
2148 list_del(&sta->list);
2149 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
2150 }
2151}
2152#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2153
2154#else
2155static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2156static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2157static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2158static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2159static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2160dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2161dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
2162void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2163#endif /* PCIE_FULL_DONGLE */
2164
2165#if defined(DHD_LB)
2166
2167#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \
2168 defined(DHD_LB_RXP)
2169/**
2170 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2171 * CPU and schedules a tasklet.
2172 * @tasklet: opaque pointer to the tasklet
2173 */
2174INLINE void
2175dhd_tasklet_schedule(void *tasklet)
2176{
2177 tasklet_schedule((struct tasklet_struct *)tasklet);
2178}
2179/**
2180 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2181 * @tasklet: tasklet to be scheduled
2182 * @on_cpu: cpu core id
2183 *
2184 * If the requested cpu is online, then an IPI is sent to this cpu via the
2185 * smp_call_function_single with no wait and the tasklet_schedule function
2186 * will be invoked to schedule the specified tasklet on the requested CPU.
2187 */
2188INLINE void
2189dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2190{
2191 const int wait = 0;
2192 smp_call_function_single(on_cpu,
2193 dhd_tasklet_schedule, (void *)tasklet, wait);
2194}
2195
2196/**
2197 * dhd_work_schedule_on - Executes the passed work in a given CPU
2198 * @work: work to be scheduled
2199 * @on_cpu: cpu core id
2200 *
2201 * If the requested cpu is online, then an IPI is sent to this cpu via the
2202 * schedule_work_on and the work function
2203 * will be invoked to schedule the specified work on the requested CPU.
2204 */
2205
2206INLINE void
2207dhd_work_schedule_on(struct work_struct *work, int on_cpu)
2208{
2209 schedule_work_on(on_cpu, work);
2210}
2211#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */
2212
2213#if defined(DHD_LB_TXC)
2214/**
2215 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2216 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2217 * freeing the packets placed in the tx_compl workq
2218 */
2219void
2220dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2221{
2222 dhd_info_t *dhd = dhdp->info;
2223 int curr_cpu, on_cpu;
2224
2225 if (dhd->rx_napi_netdev == NULL) {
2226 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2227 return;
2228 }
2229
2230 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2231 /*
2232 * If the destination CPU is NOT online or is same as current CPU
2233 * no need to schedule the work
2234 */
2235 curr_cpu = get_cpu();
2236 put_cpu();
2237
2238 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2239
2240 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2241 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2242 } else {
2243 schedule_work(&dhd->tx_compl_dispatcher_work);
2244 }
2245}
2246
2247static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2248{
2249 struct dhd_info *dhd =
2250 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2251 int cpu;
2252
2253 get_online_cpus();
2254 cpu = atomic_read(&dhd->tx_compl_cpu);
2255 if (!cpu_online(cpu))
2256 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2257 else
2258 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2259 put_online_cpus();
2260}
2261#endif /* DHD_LB_TXC */
2262
2263#if defined(DHD_LB_RXC)
2264/**
2265 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2266 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2267 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2268 * placed in the rx_compl workq.
2269 *
2270 * @dhdp: pointer to dhd_pub object
2271 */
2272void
2273dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2274{
2275 dhd_info_t *dhd = dhdp->info;
2276 int curr_cpu, on_cpu;
2277
2278 if (dhd->rx_napi_netdev == NULL) {
2279 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2280 return;
2281 }
2282
2283 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2284 /*
2285 * If the destination CPU is NOT online or is same as current CPU
2286 * no need to schedule the work
2287 */
2288 curr_cpu = get_cpu();
2289 put_cpu();
2290 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2291
2292 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2293 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2294 } else {
2295 schedule_work(&dhd->rx_compl_dispatcher_work);
2296 }
2297}
2298
2299static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2300{
2301 struct dhd_info *dhd =
2302 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2303 int cpu;
2304
2305 get_online_cpus();
2306 cpu = atomic_read(&dhd->rx_compl_cpu);
2307 if (!cpu_online(cpu))
2308 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2309 else {
2310 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2311 }
2312 put_online_cpus();
2313}
2314#endif /* DHD_LB_RXC */
2315
2316#if defined(DHD_LB_TXP)
2317static void dhd_tx_dispatcher_work(struct work_struct * work)
2318{
2319#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2320#pragma GCC diagnostic push
2321#pragma GCC diagnostic ignored "-Wcast-qual"
2322#endif // endif
2323 struct dhd_info *dhd =
2324 container_of(work, struct dhd_info, tx_dispatcher_work);
2325#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2326#pragma GCC diagnostic pop
2327#endif // endif
2328 dhd_tasklet_schedule(&dhd->tx_tasklet);
2329}
2330
2331static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
2332{
2333 int cpu;
2334 int net_tx_cpu;
2335 dhd_info_t *dhd = dhdp->info;
2336
2337 preempt_disable();
2338 cpu = atomic_read(&dhd->tx_cpu);
2339 net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
2340
2341 /*
2342 * Now if the NET_TX has pushed the packet in the same
2343 * CPU that is chosen for Tx processing, seperate it out
2344 * i.e run the TX processing tasklet in compl_cpu
2345 */
2346 if (net_tx_cpu == cpu)
2347 cpu = atomic_read(&dhd->tx_compl_cpu);
2348
2349 if (!cpu_online(cpu)) {
2350 /*
2351 * Ooohh... but the Chosen CPU is not online,
2352 * Do the job in the current CPU itself.
2353 */
2354 dhd_tasklet_schedule(&dhd->tx_tasklet);
2355 } else {
2356 /*
2357 * Schedule tx_dispatcher_work to on the cpu which
2358 * in turn will schedule tx_tasklet.
2359 */
2360 dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
2361 }
2362 preempt_enable();
2363}
2364
2365/**
2366 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2367 * on another cpu. The tx_tasklet will take care of actually putting
2368 * the skbs into appropriate flow ring and ringing H2D interrupt
2369 *
2370 * @dhdp: pointer to dhd_pub object
2371 */
2372static void
2373dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
2374{
2375 dhd_info_t *dhd = dhdp->info;
2376 int curr_cpu;
2377
2378 curr_cpu = get_cpu();
2379 put_cpu();
2380
2381 /* Record the CPU in which the TX request from Network stack came */
2382 atomic_set(&dhd->net_tx_cpu, curr_cpu);
2383
2384 /* Schedule the work to dispatch ... */
2385 dhd_tx_dispatcher_fn(dhdp);
2386}
2387#endif /* DHD_LB_TXP */
2388
2389#if defined(DHD_LB_RXP)
2390/**
2391 * dhd_napi_poll - Load balance napi poll function to process received
2392 * packets and send up the network stack using netif_receive_skb()
2393 *
2394 * @napi: napi object in which context this poll function is invoked
2395 * @budget: number of packets to be processed.
2396 *
2397 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2398 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2399 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2400 * packet tag and sendup.
2401 */
2402static int
2403dhd_napi_poll(struct napi_struct *napi, int budget)
2404{
2405 int ifid;
2406 const int pkt_count = 1;
2407 const int chan = 0;
2408 struct sk_buff * skb;
2409 unsigned long flags;
2410 struct dhd_info *dhd;
2411 int processed = 0;
2412 struct sk_buff_head rx_process_queue;
2413
2414#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2415#pragma GCC diagnostic push
2416#pragma GCC diagnostic ignored "-Wcast-qual"
2417#endif // endif
2418 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2419#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2420#pragma GCC diagnostic pop
2421#endif // endif
2422
2423 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2424 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2425 __skb_queue_head_init(&rx_process_queue);
2426
2427 /* extract the entire rx_napi_queue into local rx_process_queue */
2428 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2429 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2430 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2431
2432 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2433 OSL_PREFETCH(skb->data);
2434
2435 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2436
2437 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2438 __FUNCTION__, skb, ifid));
2439
2440 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2441 processed++;
2442 }
2443
2444 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2445
2446 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2447 napi_complete(napi);
2448
2449 return budget - 1;
2450}
2451
2452/**
2453 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2454 * poll list. This function may be invoked via the smp_call_function_single
2455 * from a remote CPU.
2456 *
2457 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2458 * after the napi_struct is added to the softnet data's poll_list
2459 *
2460 * @info: pointer to a dhd_info struct
2461 */
2462static void
2463dhd_napi_schedule(void *info)
2464{
2465 dhd_info_t *dhd = (dhd_info_t *)info;
2466
2467 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2468 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2469
2470 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2471 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2472 __napi_schedule(&dhd->rx_napi_struct);
2473 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2474#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
2475 raise_softirq(NET_RX_SOFTIRQ);
2476#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
2477 }
2478
2479 /*
2480 * If the rx_napi_struct was already running, then we let it complete
2481 * processing all its packets. The rx_napi_struct may only run on one
2482 * core at a time, to avoid out-of-order handling.
2483 */
2484}
2485
2486/**
2487 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2488 * action after placing the dhd's rx_process napi object in the the remote CPU's
2489 * softnet data's poll_list.
2490 *
2491 * @dhd: dhd_info which has the rx_process napi object
2492 * @on_cpu: desired remote CPU id
2493 */
2494static INLINE int
2495dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2496{
2497 int wait = 0; /* asynchronous IPI */
2498 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2499 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2500
2501 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2502 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2503 __FUNCTION__, on_cpu));
2504 }
2505
2506 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2507
2508 return 0;
2509}
2510
2511/*
2512 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2513 * Why should we do this?
2514 * The candidacy algorithm is run from the call back function
2515 * registered to CPU hotplug notifier. This call back happens from Worker
2516 * context. The dhd_napi_schedule_on is also from worker context.
2517 * Note that both of this can run on two different CPUs at the same time.
2518 * So we can possibly have a window where a given CPUn is being brought
2519 * down from CPUm while we try to run a function on CPUn.
2520 * To prevent this its better have the whole code to execute an SMP
2521 * function under get_online_cpus.
2522 * This function call ensures that hotplug mechanism does not kick-in
2523 * until we are done dealing with online CPUs
2524 * If the hotplug worker is already running, no worries because the
2525 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2526 *
2527 * The below mentioned code structure is proposed in
2528 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2529 * for the question
2530 * Q: I need to ensure that a particular cpu is not removed when there is some
2531 * work specific to this cpu is in progress
2532 *
2533 * According to the documentation calling get_online_cpus is NOT required, if
2534 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2535 * run from Work Queue context we have to call these functions
2536 */
2537static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2538{
2539#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2540#pragma GCC diagnostic push
2541#pragma GCC diagnostic ignored "-Wcast-qual"
2542#endif // endif
2543 struct dhd_info *dhd =
2544 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2545#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2546#pragma GCC diagnostic pop
2547#endif // endif
2548
2549 dhd_napi_schedule(dhd);
2550}
2551
2552/**
2553 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2554 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2555 * the packets enqueued into the rx_napi_queue and sendup.
2556 * The producer's rx packet queue is appended to the rx_napi_queue before
2557 * dispatching the rx_napi_struct.
2558 */
2559void
2560dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2561{
2562 unsigned long flags;
2563 dhd_info_t *dhd = dhdp->info;
2564 int curr_cpu;
2565 int on_cpu;
2566#ifdef DHD_LB_IRQSET
2567 cpumask_t cpus;
2568#endif /* DHD_LB_IRQSET */
2569
2570 if (dhd->rx_napi_netdev == NULL) {
2571 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2572 return;
2573 }
2574
2575 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2576 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2577
2578 /* append the producer's queue of packets to the napi's rx process queue */
2579 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2580 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2581 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2582
2583 /*
2584 * If the destination CPU is NOT online or is same as current CPU
2585 * no need to schedule the work
2586 */
2587 curr_cpu = get_cpu();
2588 put_cpu();
2589
2590 preempt_disable();
2591 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2592#ifdef DHD_LB_IRQSET
2593 if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
2594 (!cpu_online(on_cpu))) {
2595#else
2596 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2597#endif /* DHD_LB_IRQSET */
2598 DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
2599 curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
2600 dhd_napi_schedule(dhd);
2601 } else {
2602 DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
2603 __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
2604 dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu);
2605 }
2606 preempt_enable();
2607}
2608
2609/**
2610 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2611 */
2612void
2613dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2614{
2615 dhd_info_t *dhd = dhdp->info;
2616
2617 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2618 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2619 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2620 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2621}
2622#endif /* DHD_LB_RXP */
2623
2624#ifdef DHD_LB_IRQSET
2625void
2626dhd_irq_set_affinity(dhd_pub_t *dhdp)
2627{
2628 unsigned int irq = (unsigned int)-1;
2629 int err = BCME_OK;
2630
2631 if (!dhdp) {
2632 DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
2633 return;
2634 }
2635
2636 if (!dhdp->bus) {
2637 DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
2638 return;
2639 }
2640
2641 dhdpcie_get_pcieirq(dhdp->bus, &irq);
2642 err = irq_set_affinity(irq, dhdp->info->cpumask_primary);
2643 if (err)
2644 DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
2645 __FUNCTION__, *cpumask_bits(dhdp->info->cpumask_primary)));
2646}
2647#endif /* DHD_LB_IRQSET */
2648#endif /* DHD_LB */
2649
2650/** Returns dhd iflist index corresponding the the bssidx provided by apps */
2651int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2652{
2653 dhd_if_t *ifp;
2654 dhd_info_t *dhd = dhdp->info;
2655 int i;
2656
2657 ASSERT(bssidx < DHD_MAX_IFS);
2658 ASSERT(dhdp);
2659
2660 for (i = 0; i < DHD_MAX_IFS; i++) {
2661 ifp = dhd->iflist[i];
2662 if (ifp && (ifp->bssidx == bssidx)) {
2663 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2664 ifp->name, bssidx, i));
2665 break;
2666 }
2667 }
2668 return i;
2669}
2670
2671static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2672{
2673 uint32 store_idx;
2674 uint32 sent_idx;
2675
2676 if (!skb) {
2677 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2678 return BCME_ERROR;
2679 }
2680
2681 dhd_os_rxflock(dhdp);
2682 store_idx = dhdp->store_idx;
2683 sent_idx = dhdp->sent_idx;
2684 if (dhdp->skbbuf[store_idx] != NULL) {
2685 /* Make sure the previous packets are processed */
2686 dhd_os_rxfunlock(dhdp);
2687 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2688 skb, store_idx, sent_idx));
2689 /* removed msleep here, should use wait_event_timeout if we
2690 * want to give rx frame thread a chance to run
2691 */
2692#if defined(WAIT_DEQUEUE)
2693 OSL_SLEEP(1);
2694#endif // endif
2695 return BCME_ERROR;
2696 }
2697 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2698 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2699 dhdp->skbbuf[store_idx] = skb;
2700 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2701 dhd_os_rxfunlock(dhdp);
2702
2703 return BCME_OK;
2704}
2705
2706static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2707{
2708 uint32 store_idx;
2709 uint32 sent_idx;
2710 void *skb;
2711
2712 dhd_os_rxflock(dhdp);
2713
2714 store_idx = dhdp->store_idx;
2715 sent_idx = dhdp->sent_idx;
2716 skb = dhdp->skbbuf[sent_idx];
2717
2718 if (skb == NULL) {
2719 dhd_os_rxfunlock(dhdp);
2720 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2721 store_idx, sent_idx));
2722 return NULL;
2723 }
2724
2725 dhdp->skbbuf[sent_idx] = NULL;
2726 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2727
2728 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2729 skb, sent_idx));
2730
2731 dhd_os_rxfunlock(dhdp);
2732
2733 return skb;
2734}
2735
2736int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2737{
2738 if (prepost) { /* pre process */
2739 dhd_read_cis(dhdp);
2740 dhd_check_module_cid(dhdp);
2741 dhd_check_module_mac(dhdp);
2742 dhd_set_macaddr_from_file(dhdp);
2743 } else { /* post process */
2744 dhd_write_macaddr(&dhdp->mac);
2745 dhd_clear_cis(dhdp);
2746 }
2747
2748 return 0;
2749}
2750
2751#ifdef PKT_FILTER_SUPPORT
2752#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2753static bool
2754_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
2755{
2756 bool _apply = FALSE;
2757 /* In case of IBSS mode, apply arp pkt filter */
2758 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
2759 _apply = TRUE;
2760 goto exit;
2761 }
2762 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2763 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
2764 _apply = TRUE;
2765 goto exit;
2766 }
2767
2768exit:
2769 return _apply;
2770}
2771#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2772
2773void
2774dhd_set_packet_filter(dhd_pub_t *dhd)
2775{
2776 int i;
2777
2778 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2779 if (dhd_pkt_filter_enable) {
2780 for (i = 0; i < dhd->pktfilter_count; i++) {
2781 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2782 }
2783 }
2784}
2785
2786void
2787dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2788{
2789 int i;
2790
2791 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2792 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2793 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2794 return;
2795 }
2796 /* 1 - Enable packet filter, only allow unicast packet to send up */
2797 /* 0 - Disable packet filter */
2798 if (dhd_pkt_filter_enable && (!value ||
2799 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2800 {
2801 for (i = 0; i < dhd->pktfilter_count; i++) {
2802#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2803 if (value && (i == DHD_ARP_FILTER_NUM) &&
2804 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2805 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2806 "val %d, cnt %d, op_mode 0x%x\n",
2807 value, i, dhd->op_mode));
2808 continue;
2809 }
2810#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2811 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2812 value, dhd_master_mode);
2813 }
2814 }
2815}
2816
2817int
2818dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
2819{
2820 char *filterp = NULL;
2821 int filter_id = 0;
2822
2823 switch (num) {
2824 case DHD_BROADCAST_FILTER_NUM:
2825 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
2826 filter_id = 101;
2827 break;
2828 case DHD_MULTICAST4_FILTER_NUM:
2829 filter_id = 102;
2830 if (FW_SUPPORTED((dhdp), pf6)) {
2831 if (dhdp->pktfilter[num] != NULL) {
2832 dhd_pktfilter_offload_delete(dhdp, filter_id);
2833 dhdp->pktfilter[num] = NULL;
2834 }
2835 if (!add_remove) {
2836 filterp = DISCARD_IPV4_MCAST;
2837 add_remove = 1;
2838 break;
2839 }
2840 }
2841 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
2842 break;
2843 case DHD_MULTICAST6_FILTER_NUM:
2844 filter_id = 103;
2845 if (FW_SUPPORTED((dhdp), pf6)) {
2846 if (dhdp->pktfilter[num] != NULL) {
2847 dhd_pktfilter_offload_delete(dhdp, filter_id);
2848 dhdp->pktfilter[num] = NULL;
2849 }
2850 if (!add_remove) {
2851 filterp = DISCARD_IPV6_MCAST;
2852 add_remove = 1;
2853 break;
2854 }
2855 }
2856 filterp = "103 0 0 0 0xFFFF 0x3333";
2857 break;
2858 case DHD_MDNS_FILTER_NUM:
2859 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
2860 filter_id = 104;
2861 break;
2862 case DHD_ARP_FILTER_NUM:
2863 filterp = "105 0 0 12 0xFFFF 0x0806";
2864 filter_id = 105;
2865 break;
2866 case DHD_BROADCAST_ARP_FILTER_NUM:
2867 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
2868 " 0xFFFFFFFFFFFF0000000000000806";
2869 filter_id = 106;
2870 break;
2871 default:
2872 return -EINVAL;
2873 }
2874
2875 /* Add filter */
2876 if (add_remove) {
2877 dhdp->pktfilter[num] = filterp;
2878 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
2879 } else { /* Delete filter */
2880 if (dhdp->pktfilter[num]) {
2881 dhd_pktfilter_offload_delete(dhdp, filter_id);
2882 dhdp->pktfilter[num] = NULL;
2883 }
2884 }
2885
2886 return 0;
2887}
2888#endif /* PKT_FILTER_SUPPORT */
2889
2890static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2891{
2892#ifndef SUPPORT_PM2_ONLY
2893 int power_mode = PM_MAX;
2894#endif /* SUPPORT_PM2_ONLY */
2895 /* wl_pkt_filter_enable_t enable_parm; */
2896 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2897 int ret = 0;
2898#ifdef DHD_USE_EARLYSUSPEND
2899#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2900 int bcn_timeout = 0;
2901#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2902#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2903 int roam_time_thresh = 0; /* (ms) */
2904#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2905#ifndef ENABLE_FW_ROAM_SUSPEND
2906 uint roamvar = 1;
2907#endif /* ENABLE_FW_ROAM_SUSPEND */
2908#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2909 int bcn_li_bcn;
2910#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2911 uint nd_ra_filter = 0;
2912#ifdef ENABLE_IPMCAST_FILTER
2913 int ipmcast_l2filter;
2914#endif /* ENABLE_IPMCAST_FILTER */
2915#ifdef CUSTOM_EVENT_PM_WAKE
2916 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
2917#endif /* CUSTOM_EVENT_PM_WAKE */
2918#endif /* DHD_USE_EARLYSUSPEND */
2919#ifdef PASS_ALL_MCAST_PKTS
2920 struct dhd_info *dhdinfo;
2921 uint32 allmulti;
2922 uint i;
2923#endif /* PASS_ALL_MCAST_PKTS */
2924#ifdef DYNAMIC_SWOOB_DURATION
2925#ifndef CUSTOM_INTR_WIDTH
2926#define CUSTOM_INTR_WIDTH 100
2927 int intr_width = 0;
2928#endif /* CUSTOM_INTR_WIDTH */
2929#endif /* DYNAMIC_SWOOB_DURATION */
2930
2931#if defined(BCMPCIE)
2932 int lpas = 0;
2933 int dtim_period = 0;
2934 int bcn_interval = 0;
2935 int bcn_to_dly = 0;
2936#if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2937 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
2938#else
2939 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
2940#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
2941#endif /* OEM_ANDROID && BCMPCIE */
2942
2943 if (!dhd)
2944 return -ENODEV;
2945
2946#ifdef PASS_ALL_MCAST_PKTS
2947 dhdinfo = dhd->info;
2948#endif /* PASS_ALL_MCAST_PKTS */
2949
2950 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2951 __FUNCTION__, value, dhd->in_suspend));
2952
2953 dhd_suspend_lock(dhd);
2954
2955#ifdef CUSTOM_SET_CPUCORE
2956 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2957 /* set specific cpucore */
2958 dhd_set_cpucore(dhd, TRUE);
2959#endif /* CUSTOM_SET_CPUCORE */
2960 if (dhd->up) {
2961 if (value && dhd->in_suspend) {
2962#ifdef PKT_FILTER_SUPPORT
2963 dhd->early_suspended = 1;
2964#endif // endif
2965 /* Kernel suspended */
2966 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
2967
2968#ifndef SUPPORT_PM2_ONLY
2969 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2970 sizeof(power_mode), TRUE, 0);
2971#endif /* SUPPORT_PM2_ONLY */
2972
2973#ifdef PKT_FILTER_SUPPORT
2974 /* Enable packet filter,
2975 * only allow unicast packet to send up
2976 */
2977 dhd_enable_packet_filter(1, dhd);
2978#ifdef APF
2979 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
2980#endif /* APF */
2981#endif /* PKT_FILTER_SUPPORT */
2982
2983#ifdef PASS_ALL_MCAST_PKTS
2984 allmulti = 0;
2985 for (i = 0; i < DHD_MAX_IFS; i++) {
2986 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2987 ret = dhd_iovar(dhd, i, "allmulti",
2988 (char *)&allmulti,
2989 sizeof(allmulti),
2990 NULL, 0, TRUE);
2991 if (ret < 0) {
2992 DHD_ERROR(("%s allmulti failed %d\n",
2993 __FUNCTION__, ret));
2994 }
2995 }
2996#endif /* PASS_ALL_MCAST_PKTS */
2997
2998 /* If DTIM skip is set up as default, force it to wake
2999 * each third DTIM for better power savings. Note that
3000 * one side effect is a chance to miss BC/MC packet.
3001 */
3002#ifdef WLTDLS
3003 /* Do not set bcn_li_ditm on WFD mode */
3004 if (dhd->tdls_mode) {
3005 bcn_li_dtim = 0;
3006 } else
3007#endif /* WLTDLS */
3008#if defined(BCMPCIE)
3009 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
3010 &bcn_interval);
3011 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3012 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3013 if (ret < 0) {
3014 DHD_ERROR(("%s bcn_li_dtim failed %d\n",
3015 __FUNCTION__, ret));
3016 }
3017 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
3018 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
3019 /*
3020 * Increase max roaming threshold from 2 secs to 8 secs
3021 * the real roam threshold is MIN(max_roam_threshold,
3022 * bcn_timeout/2)
3023 */
3024 lpas = 1;
3025 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
3026 NULL, 0, TRUE);
3027 if (ret < 0) {
3028 DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__,
3029 ret));
3030 }
3031 bcn_to_dly = 1;
3032 /*
3033 * if bcn_to_dly is 1, the real roam threshold is
3034 * MIN(max_roam_threshold, bcn_timeout -1);
3035 * notify link down event after roaming procedure complete
3036 * if we hit bcn_timeout while we are in roaming progress.
3037 */
3038 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3039 sizeof(bcn_to_dly), NULL, 0, TRUE);
3040 if (ret < 0) {
3041 DHD_ERROR(("%s bcn_to_dly failed %d\n",
3042 __FUNCTION__, ret));
3043 }
3044 /* Increase beacon timeout to 6 secs or use bigger one */
3045 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
3046 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3047 sizeof(bcn_timeout), NULL, 0, TRUE);
3048 if (ret < 0) {
3049 DHD_ERROR(("%s bcn_timeout failed %d\n",
3050 __FUNCTION__, ret));
3051 }
3052 }
3053#else
3054 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
3055 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3056 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
3057 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
3058#endif /* OEM_ANDROID && BCMPCIE */
3059
3060#ifdef DHD_USE_EARLYSUSPEND
3061#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3062 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
3063 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3064 sizeof(bcn_timeout), NULL, 0, TRUE);
3065 if (ret < 0) {
3066 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
3067 ret));
3068 }
3069#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3070#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3071 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
3072 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
3073 (char *)&roam_time_thresh,
3074 sizeof(roam_time_thresh), NULL, 0, TRUE);
3075 if (ret < 0) {
3076 DHD_ERROR(("%s roam_time_thresh failed %d\n",
3077 __FUNCTION__, ret));
3078 }
3079#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3080#ifndef ENABLE_FW_ROAM_SUSPEND
3081 /* Disable firmware roaming during suspend */
3082 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
3083 sizeof(roamvar), NULL, 0, TRUE);
3084 if (ret < 0) {
3085 DHD_ERROR(("%s roam_off failed %d\n",
3086 __FUNCTION__, ret));
3087 }
3088#endif /* ENABLE_FW_ROAM_SUSPEND */
3089#ifdef ENABLE_BCN_LI_BCN_WAKEUP
3090 bcn_li_bcn = 0;
3091 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3092 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3093 if (ret < 0) {
3094 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
3095 }
3096#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3097#ifdef NDO_CONFIG_SUPPORT
3098 if (dhd->ndo_enable) {
3099 if (!dhd->ndo_host_ip_overflow) {
3100 /* enable ND offload on suspend */
3101 ret = dhd_ndo_enable(dhd, TRUE);
3102 if (ret < 0) {
3103 DHD_ERROR(("%s: failed to enable NDO\n",
3104 __FUNCTION__));
3105 }
3106 } else {
3107 DHD_INFO(("%s: NDO disabled on suspend due to"
3108 "HW capacity\n", __FUNCTION__));
3109 }
3110 }
3111#endif /* NDO_CONFIG_SUPPORT */
3112#ifndef APF
3113 if (FW_SUPPORTED(dhd, ndoe)) {
3114#else
3115 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
3116#endif /* APF */
3117 /* enable IPv6 RA filter in firmware during suspend */
3118 nd_ra_filter = 1;
3119 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3120 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3121 NULL, 0, TRUE);
3122 if (ret < 0)
3123 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3124 ret));
3125 }
3126 dhd_os_suppress_logging(dhd, TRUE);
3127#ifdef ENABLE_IPMCAST_FILTER
3128 ipmcast_l2filter = 1;
3129 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3130 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3131 NULL, 0, TRUE);
3132 if (ret < 0) {
3133 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
3134 }
3135#endif /* ENABLE_IPMCAST_FILTER */
3136#ifdef DYNAMIC_SWOOB_DURATION
3137 intr_width = CUSTOM_INTR_WIDTH;
3138 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3139 sizeof(intr_width), NULL, 0, TRUE);
3140 if (ret < 0) {
3141 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3142 }
3143#endif /* DYNAMIC_SWOOB_DURATION */
3144#ifdef CUSTOM_EVENT_PM_WAKE
3145 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
3146 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
3147 (char *)&pm_awake_thresh,
3148 sizeof(pm_awake_thresh), NULL, 0, TRUE);
3149 if (ret < 0) {
3150 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3151 __FUNCTION__, ret));
3152 }
3153#endif /* CUSTOM_EVENT_PM_WAKE */
3154#endif /* DHD_USE_EARLYSUSPEND */
3155 } else {
3156#ifdef PKT_FILTER_SUPPORT
3157 dhd->early_suspended = 0;
3158#endif // endif
3159 /* Kernel resumed */
3160 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
3161#ifdef DYNAMIC_SWOOB_DURATION
3162 intr_width = 0;
3163 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3164 sizeof(intr_width), NULL, 0, TRUE);
3165 if (ret < 0) {
3166 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3167 }
3168#endif /* DYNAMIC_SWOOB_DURATION */
3169#ifndef SUPPORT_PM2_ONLY
3170 power_mode = PM_FAST;
3171 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
3172 sizeof(power_mode), TRUE, 0);
3173#endif /* SUPPORT_PM2_ONLY */
3174#ifdef PKT_FILTER_SUPPORT
3175 /* disable pkt filter */
3176 dhd_enable_packet_filter(0, dhd);
3177#ifdef APF
3178 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
3179#endif /* APF */
3180#endif /* PKT_FILTER_SUPPORT */
3181#ifdef PASS_ALL_MCAST_PKTS
3182 allmulti = 1;
3183 for (i = 0; i < DHD_MAX_IFS; i++) {
3184 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
3185 ret = dhd_iovar(dhd, i, "allmulti",
3186 (char *)&allmulti,
3187 sizeof(allmulti), NULL,
3188 0, TRUE);
3189 if (ret < 0) {
3190 DHD_ERROR(("%s: allmulti failed:%d\n",
3191 __FUNCTION__, ret));
3192 }
3193 }
3194#endif /* PASS_ALL_MCAST_PKTS */
3195#if defined(BCMPCIE)
3196 /* restore pre-suspend setting */
3197 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3198 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3199 if (ret < 0) {
3200 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
3201 __FUNCTION__, ret));
3202 }
3203 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
3204 0, TRUE);
3205 if (ret < 0) {
3206 DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret));
3207 }
3208 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3209 sizeof(bcn_to_dly), NULL, 0, TRUE);
3210 if (ret < 0) {
3211 DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret));
3212 }
3213 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3214 sizeof(bcn_timeout), NULL, 0, TRUE);
3215 if (ret < 0) {
3216 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3217 __FUNCTION__, ret));
3218 }
3219#else
3220 /* restore pre-suspend setting for dtim_skip */
3221 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3222 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3223 if (ret < 0) {
3224 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
3225 }
3226#endif /* OEM_ANDROID && BCMPCIE */
3227#ifdef DHD_USE_EARLYSUSPEND
3228#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3229 bcn_timeout = CUSTOM_BCN_TIMEOUT;
3230 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3231 sizeof(bcn_timeout), NULL, 0, TRUE);
3232 if (ret < 0) {
3233 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3234 __FUNCTION__, ret));
3235 }
3236#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3237#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3238 roam_time_thresh = 2000;
3239 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
3240 (char *)&roam_time_thresh,
3241 sizeof(roam_time_thresh), NULL, 0, TRUE);
3242 if (ret < 0) {
3243 DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
3244 __FUNCTION__, ret));
3245 }
3246
3247#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3248#ifndef ENABLE_FW_ROAM_SUSPEND
3249 roamvar = dhd_roam_disable;
3250 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
3251 sizeof(roamvar), NULL, 0, TRUE);
3252 if (ret < 0) {
3253 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
3254 }
3255#endif /* ENABLE_FW_ROAM_SUSPEND */
3256#ifdef ENABLE_BCN_LI_BCN_WAKEUP
3257 bcn_li_bcn = 1;
3258 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3259 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3260 if (ret < 0) {
3261 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
3262 __FUNCTION__, ret));
3263 }
3264#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3265#ifdef NDO_CONFIG_SUPPORT
3266 if (dhd->ndo_enable) {
3267 /* Disable ND offload on resume */
3268 ret = dhd_ndo_enable(dhd, FALSE);
3269 if (ret < 0) {
3270 DHD_ERROR(("%s: failed to disable NDO\n",
3271 __FUNCTION__));
3272 }
3273 }
3274#endif /* NDO_CONFIG_SUPPORT */
3275#ifndef APF
3276 if (FW_SUPPORTED(dhd, ndoe)) {
3277#else
3278 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
3279#endif /* APF */
3280 /* disable IPv6 RA filter in firmware during suspend */
3281 nd_ra_filter = 0;
3282 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3283 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3284 NULL, 0, TRUE);
3285 if (ret < 0) {
3286 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3287 ret));
3288 }
3289 }
3290 dhd_os_suppress_logging(dhd, FALSE);
3291#ifdef ENABLE_IPMCAST_FILTER
3292 ipmcast_l2filter = 0;
3293 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3294 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3295 NULL, 0, TRUE);
3296 if (ret < 0) {
3297 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
3298 }
3299#endif /* ENABLE_IPMCAST_FILTER */
3300#ifdef CUSTOM_EVENT_PM_WAKE
3301 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
3302 (char *)&pm_awake_thresh,
3303 sizeof(pm_awake_thresh), NULL, 0, TRUE);
3304 if (ret < 0) {
3305 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3306 __FUNCTION__, ret));
3307 }
3308#endif /* CUSTOM_EVENT_PM_WAKE */
3309#endif /* DHD_USE_EARLYSUSPEND */
3310#ifdef DHD_LB_IRQSET
3311 dhd_irq_set_affinity(dhd);
3312#endif /* DHD_LB_IRQSET */
3313 }
3314 }
3315 dhd_suspend_unlock(dhd);
3316
3317 return 0;
3318}
3319
3320static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
3321{
3322 dhd_pub_t *dhdp = &dhd->pub;
3323 int ret = 0;
3324
3325 DHD_OS_WAKE_LOCK(dhdp);
3326 DHD_PERIM_LOCK(dhdp);
3327
3328 /* Set flag when early suspend was called */
3329 dhdp->in_suspend = val;
3330 if ((force || !dhdp->suspend_disable_flag) &&
3331 dhd_support_sta_mode(dhdp))
3332 {
3333 ret = dhd_set_suspend(val, dhdp);
3334 }
3335
3336 DHD_PERIM_UNLOCK(dhdp);
3337 DHD_OS_WAKE_UNLOCK(dhdp);
3338 return ret;
3339}
3340
3341#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3342static void dhd_early_suspend(struct early_suspend *h)
3343{
3344 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3345 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3346
3347 if (dhd)
3348 dhd_suspend_resume_helper(dhd, 1, 0);
3349}
3350
3351static void dhd_late_resume(struct early_suspend *h)
3352{
3353 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3354 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3355
3356 if (dhd)
3357 dhd_suspend_resume_helper(dhd, 0, 0);
3358}
3359#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3360
3361/*
3362 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3363 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3364 *
3365 * dhd_timeout_start(&tmo, usec);
3366 * while (!dhd_timeout_expired(&tmo))
3367 * if (poll_something())
3368 * break;
3369 * if (dhd_timeout_expired(&tmo))
3370 * fatal();
3371 */
3372
3373void
3374dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
3375{
3376 tmo->limit = usec;
3377 tmo->increment = 0;
3378 tmo->elapsed = 0;
3379 tmo->tick = jiffies_to_usecs(1);
3380}
3381
3382int
3383dhd_timeout_expired(dhd_timeout_t *tmo)
3384{
3385 /* Does nothing the first call */
3386 if (tmo->increment == 0) {
3387 tmo->increment = 1;
3388 return 0;
3389 }
3390
3391 if (tmo->elapsed >= tmo->limit)
3392 return 1;
3393
3394 /* Add the delay that's about to take place */
3395 tmo->elapsed += tmo->increment;
3396
3397 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
3398 OSL_DELAY(tmo->increment);
3399 tmo->increment *= 2;
3400 if (tmo->increment > tmo->tick)
3401 tmo->increment = tmo->tick;
3402 } else {
3403 /*
3404 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
3405 * context where the exact wakeup time is flexible, it would be good
3406 * to use usleep_range() instead of udelay(). It takes a few advantages
3407 * such as improving responsiveness and reducing power.
3408 */
3409 OSL_SLEEP(jiffies_to_msecs(1));
3410 }
3411
3412 return 0;
3413}
3414
3415int
3416dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
3417{
3418 int i = 0;
3419
3420 if (!dhd) {
3421 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
3422 return DHD_BAD_IF;
3423 }
3424
3425 while (i < DHD_MAX_IFS) {
3426 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
3427 return i;
3428 i++;
3429 }
3430
3431 return DHD_BAD_IF;
3432}
3433
3434struct net_device * dhd_idx2net(void *pub, int ifidx)
3435{
3436 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
3437 struct dhd_info *dhd_info;
3438
3439 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
3440 return NULL;
3441 dhd_info = dhd_pub->info;
3442 if (dhd_info && dhd_info->iflist[ifidx])
3443 return dhd_info->iflist[ifidx]->net;
3444 return NULL;
3445}
3446
3447int
3448dhd_ifname2idx(dhd_info_t *dhd, char *name)
3449{
3450 int i = DHD_MAX_IFS;
3451
3452 ASSERT(dhd);
3453
3454 if (name == NULL || *name == '\0')
3455 return 0;
3456
3457 while (--i > 0)
3458 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
3459 break;
3460
3461 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
3462
3463 return i; /* default - the primary interface */
3464}
3465
3466char *
3467dhd_ifname(dhd_pub_t *dhdp, int ifidx)
3468{
3469 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3470
3471 ASSERT(dhd);
3472
3473 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
3474 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
3475 return "<if_bad>";
3476 }
3477
3478 if (dhd->iflist[ifidx] == NULL) {
3479 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
3480 return "<if_null>";
3481 }
3482
3483 if (dhd->iflist[ifidx]->net)
3484 return dhd->iflist[ifidx]->net->name;
3485
3486 return "<if_none>";
3487}
3488
3489uint8 *
3490dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
3491{
3492 int i;
3493 dhd_info_t *dhd = (dhd_info_t *)dhdp;
3494
3495 ASSERT(dhd);
3496 for (i = 0; i < DHD_MAX_IFS; i++)
3497 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
3498 return dhd->iflist[i]->mac_addr;
3499
3500 return NULL;
3501}
3502
3503static void
3504_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3505{
3506 struct net_device *dev;
3507#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3508 struct netdev_hw_addr *ha;
3509#else
3510 struct dev_mc_list *mclist;
3511#endif // endif
3512 uint32 allmulti, cnt;
3513
3514 wl_ioctl_t ioc;
3515 char *buf, *bufp;
3516 uint buflen;
3517 int ret;
3518
3519#ifdef MCAST_LIST_ACCUMULATION
3520 int i;
3521 uint32 cnt_iface[DHD_MAX_IFS];
3522 cnt = 0;
3523 allmulti = 0;
3524
3525 for (i = 0; i < DHD_MAX_IFS; i++) {
3526 if (dhd->iflist[i]) {
3527 dev = dhd->iflist[i]->net;
3528 if (!dev)
3529 continue;
3530#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3531 netif_addr_lock_bh(dev);
3532#endif /* LINUX >= 2.6.27 */
3533#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3534 cnt_iface[i] = netdev_mc_count(dev);
3535 cnt += cnt_iface[i];
3536#else
3537 cnt += dev->mc_count;
3538#endif /* LINUX >= 2.6.35 */
3539#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3540 netif_addr_unlock_bh(dev);
3541#endif /* LINUX >= 2.6.27 */
3542
3543 /* Determine initial value of allmulti flag */
3544 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3545 }
3546 }
3547#else /* !MCAST_LIST_ACCUMULATION */
3548 if (!dhd->iflist[ifidx]) {
3549 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3550 return;
3551 }
3552 dev = dhd->iflist[ifidx]->net;
3553 if (!dev)
3554 return;
3555#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3556 netif_addr_lock_bh(dev);
3557#endif /* LINUX >= 2.6.27 */
3558#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3559 cnt = netdev_mc_count(dev);
3560#else
3561 cnt = dev->mc_count;
3562#endif /* LINUX >= 2.6.35 */
3563#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3564 netif_addr_unlock_bh(dev);
3565#endif /* LINUX >= 2.6.27 */
3566
3567 /* Determine initial value of allmulti flag */
3568 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3569#endif /* MCAST_LIST_ACCUMULATION */
3570
3571#ifdef PASS_ALL_MCAST_PKTS
3572#ifdef PKT_FILTER_SUPPORT
3573 if (!dhd->pub.early_suspended)
3574#endif /* PKT_FILTER_SUPPORT */
3575 allmulti = TRUE;
3576#endif /* PASS_ALL_MCAST_PKTS */
3577
3578 /* Send down the multicast list first. */
3579
3580 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3581 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3582 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3583 dhd_ifname(&dhd->pub, ifidx), cnt));
3584 return;
3585 }
3586
3587 strncpy(bufp, "mcast_list", buflen - 1);
3588 bufp[buflen - 1] = '\0';
3589 bufp += strlen("mcast_list") + 1;
3590
3591 cnt = htol32(cnt);
3592 memcpy(bufp, &cnt, sizeof(cnt));
3593 bufp += sizeof(cnt);
3594
3595#ifdef MCAST_LIST_ACCUMULATION
3596 for (i = 0; i < DHD_MAX_IFS; i++) {
3597 if (dhd->iflist[i]) {
3598 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
3599 dev = dhd->iflist[i]->net;
3600
3601#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3602 netif_addr_lock_bh(dev);
3603#endif /* LINUX >= 2.6.27 */
3604#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3605#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3606#pragma GCC diagnostic push
3607#pragma GCC diagnostic ignored "-Wcast-qual"
3608#endif // endif
3609 netdev_for_each_mc_addr(ha, dev) {
3610#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3611#pragma GCC diagnostic pop
3612#endif // endif
3613 if (!cnt_iface[i])
3614 break;
3615 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3616 bufp += ETHER_ADDR_LEN;
3617 DHD_TRACE(("_dhd_set_multicast_list: cnt "
3618 "%d " MACDBG "\n",
3619 cnt_iface[i], MAC2STRDBG(ha->addr)));
3620 cnt_iface[i]--;
3621 }
3622#else /* LINUX < 2.6.35 */
3623 for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0));
3624 cnt_iface[i]--, mclist = mclist->next) {
3625 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3626 bufp += ETHER_ADDR_LEN;
3627 }
3628#endif /* LINUX >= 2.6.35 */
3629#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3630 netif_addr_unlock_bh(dev);
3631#endif /* LINUX >= 2.6.27 */
3632 }
3633 }
3634#else /* !MCAST_LIST_ACCUMULATION */
3635#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3636 netif_addr_lock_bh(dev);
3637#endif /* LINUX >= 2.6.27 */
3638#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3639#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3640#pragma GCC diagnostic push
3641#pragma GCC diagnostic ignored "-Wcast-qual"
3642#endif // endif
3643 netdev_for_each_mc_addr(ha, dev) {
3644 if (!cnt)
3645 break;
3646 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3647 bufp += ETHER_ADDR_LEN;
3648 cnt--;
3649 }
3650#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3651#pragma GCC diagnostic pop
3652#endif // endif
3653#else /* LINUX < 2.6.35 */
3654 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3655 cnt--, mclist = mclist->next) {
3656 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3657 bufp += ETHER_ADDR_LEN;
3658 }
3659#endif /* LINUX >= 2.6.35 */
3660#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3661 netif_addr_unlock_bh(dev);
3662#endif /* LINUX >= 2.6.27 */
3663#endif /* MCAST_LIST_ACCUMULATION */
3664
3665 memset(&ioc, 0, sizeof(ioc));
3666 ioc.cmd = WLC_SET_VAR;
3667 ioc.buf = buf;
3668 ioc.len = buflen;
3669 ioc.set = TRUE;
3670
3671 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3672 if (ret < 0) {
3673 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3674 dhd_ifname(&dhd->pub, ifidx), cnt));
3675 allmulti = cnt ? TRUE : allmulti;
3676 }
3677
3678 MFREE(dhd->pub.osh, buf, buflen);
3679
3680 /* Now send the allmulti setting. This is based on the setting in the
3681 * net_device flags, but might be modified above to be turned on if we
3682 * were trying to set some addresses and dongle rejected it...
3683 */
3684
3685 allmulti = htol32(allmulti);
3686 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
3687 sizeof(allmulti), NULL, 0, TRUE);
3688 if (ret < 0) {
3689 DHD_ERROR(("%s: set allmulti %d failed\n",
3690 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3691 }
3692
3693 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3694
3695#ifdef MCAST_LIST_ACCUMULATION
3696 allmulti = 0;
3697 for (i = 0; i < DHD_MAX_IFS; i++) {
3698 if (dhd->iflist[i]) {
3699 dev = dhd->iflist[i]->net;
3700 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3701 }
3702 }
3703#else
3704 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3705#endif /* MCAST_LIST_ACCUMULATION */
3706
3707 allmulti = htol32(allmulti);
3708
3709 memset(&ioc, 0, sizeof(ioc));
3710 ioc.cmd = WLC_SET_PROMISC;
3711 ioc.buf = &allmulti;
3712 ioc.len = sizeof(allmulti);
3713 ioc.set = TRUE;
3714
3715 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3716 if (ret < 0) {
3717 DHD_ERROR(("%s: set promisc %d failed\n",
3718 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3719 }
3720}
3721
3722int
3723_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3724{
3725 int ret;
3726
3727 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
3728 ETHER_ADDR_LEN, NULL, 0, TRUE);
3729 if (ret < 0) {
3730 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3731 } else {
3732 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3733 if (ifidx == 0)
3734 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3735 }
3736
3737 return ret;
3738}
3739
3740#ifdef SOFTAP
3741extern struct net_device *ap_net_dev;
3742extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3743#endif // endif
3744
3745#ifdef DHD_PSTA
3746/* Get psta/psr configuration configuration */
3747int dhd_get_psta_mode(dhd_pub_t *dhdp)
3748{
3749 dhd_info_t *dhd = dhdp->info;
3750 return (int)dhd->psta_mode;
3751}
3752/* Set psta/psr configuration configuration */
3753int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3754{
3755 dhd_info_t *dhd = dhdp->info;
3756 dhd->psta_mode = val;
3757 return 0;
3758}
3759#endif /* DHD_PSTA */
3760
3761#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
3762static void
3763dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
3764{
3765 dhd_info_t *dhd = dhdp->info;
3766 dhd_if_t *ifp;
3767
3768 ASSERT(idx < DHD_MAX_IFS);
3769
3770 ifp = dhd->iflist[idx];
3771
3772 if (
3773#ifdef DHD_L2_FILTER
3774 (ifp->block_ping) ||
3775#endif // endif
3776#ifdef DHD_WET
3777 (dhd->wet_mode) ||
3778#endif // endif
3779#ifdef DHD_MCAST_REGEN
3780 (ifp->mcast_regen_bss_enable) ||
3781#endif // endif
3782 FALSE) {
3783 ifp->rx_pkt_chainable = FALSE;
3784 }
3785}
3786#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
3787
3788#ifdef DHD_WET
3789/* Get wet configuration configuration */
3790int dhd_get_wet_mode(dhd_pub_t *dhdp)
3791{
3792 dhd_info_t *dhd = dhdp->info;
3793 return (int)dhd->wet_mode;
3794}
3795
3796/* Set wet configuration configuration */
3797int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
3798{
3799 dhd_info_t *dhd = dhdp->info;
3800 dhd->wet_mode = val;
3801 dhd_update_rx_pkt_chainable_state(dhdp, 0);
3802 return 0;
3803}
3804#endif /* DHD_WET */
3805
3806#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3807int32 dhd_role_to_nl80211_iftype(int32 role)
3808{
3809 switch (role) {
3810 case WLC_E_IF_ROLE_STA:
3811 return NL80211_IFTYPE_STATION;
3812 case WLC_E_IF_ROLE_AP:
3813 return NL80211_IFTYPE_AP;
3814 case WLC_E_IF_ROLE_WDS:
3815 return NL80211_IFTYPE_WDS;
3816 case WLC_E_IF_ROLE_P2P_GO:
3817 return NL80211_IFTYPE_P2P_GO;
3818 case WLC_E_IF_ROLE_P2P_CLIENT:
3819 return NL80211_IFTYPE_P2P_CLIENT;
3820 case WLC_E_IF_ROLE_IBSS:
3821 case WLC_E_IF_ROLE_NAN:
3822 return NL80211_IFTYPE_ADHOC;
3823 default:
3824 return NL80211_IFTYPE_UNSPECIFIED;
3825 }
3826}
3827#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3828
3829static void
3830dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3831{
3832 dhd_info_t *dhd = handle;
3833 dhd_if_event_t *if_event = event_info;
3834 int ifidx, bssidx;
3835 int ret;
3836#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3837 struct wl_if_event_info info;
3838#else
3839 struct net_device *ndev;
3840#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3841
3842 BCM_REFERENCE(ret);
3843 if (event != DHD_WQ_WORK_IF_ADD) {
3844 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3845 return;
3846 }
3847
3848 if (!dhd) {
3849 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3850 return;
3851 }
3852
3853 if (!if_event) {
3854 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3855 return;
3856 }
3857
3858 dhd_net_if_lock_local(dhd);
3859 DHD_OS_WAKE_LOCK(&dhd->pub);
3860 DHD_PERIM_LOCK(&dhd->pub);
3861
3862 ifidx = if_event->event.ifidx;
3863 bssidx = if_event->event.bssidx;
3864 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3865
3866#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3867 if (if_event->event.ifidx > 0) {
3868 u8 *mac_addr;
3869 bzero(&info, sizeof(info));
3870 info.ifidx = ifidx;
3871 info.bssidx = bssidx;
3872 info.role = if_event->event.role;
3873 strncpy(info.name, if_event->name, IFNAMSIZ);
3874 if (is_valid_ether_addr(if_event->mac)) {
3875 mac_addr = if_event->mac;
3876 } else {
3877 mac_addr = NULL;
3878 }
3879
3880 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
3881 &info, mac_addr, NULL, true) == NULL) {
3882 /* Do the post interface create ops */
3883 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
3884 goto done;
3885 }
3886 }
3887#else
3888 /* This path is for non-android case */
3889 /* The interface name in host and in event msg are same */
3890 /* if name in event msg is used to create dongle if list on host */
3891 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3892 if_event->mac, bssidx, TRUE, if_event->name);
3893 if (!ndev) {
3894 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3895 goto done;
3896 }
3897
3898 DHD_PERIM_UNLOCK(&dhd->pub);
3899 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3900 DHD_PERIM_LOCK(&dhd->pub);
3901 if (ret != BCME_OK) {
3902 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3903 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3904 goto done;
3905 }
3906#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3907
3908#ifndef PCIE_FULL_DONGLE
3909 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3910 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3911 uint32 var_int = 1;
3912 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
3913 NULL, 0, TRUE);
3914 if (ret != BCME_OK) {
3915 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3916 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3917 }
3918 }
3919#endif /* PCIE_FULL_DONGLE */
3920
3921done:
3922 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3923
3924 DHD_PERIM_UNLOCK(&dhd->pub);
3925 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3926 dhd_net_if_unlock_local(dhd);
3927}
3928
3929static void
3930dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3931{
3932 dhd_info_t *dhd = handle;
3933 int ifidx;
3934 dhd_if_event_t *if_event = event_info;
3935
3936 if (event != DHD_WQ_WORK_IF_DEL) {
3937 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3938 return;
3939 }
3940
3941 if (!dhd) {
3942 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3943 return;
3944 }
3945
3946 if (!if_event) {
3947 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3948 return;
3949 }
3950
3951 dhd_net_if_lock_local(dhd);
3952 DHD_OS_WAKE_LOCK(&dhd->pub);
3953 DHD_PERIM_LOCK(&dhd->pub);
3954
3955 ifidx = if_event->event.ifidx;
3956 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3957
3958 DHD_PERIM_UNLOCK(&dhd->pub);
3959 if (!dhd->pub.info->iflist[ifidx]) {
3960 /* No matching netdev found */
3961 DHD_ERROR(("Netdev not found! Do nothing.\n"));
3962 goto done;
3963 }
3964#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3965 if (if_event->event.ifidx > 0) {
3966 /* Do the post interface del ops */
3967 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
3968 true, if_event->event.ifidx) != 0) {
3969 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
3970 goto done;
3971 }
3972 }
3973#else
3974 /* For non-cfg80211 drivers */
3975 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3976#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3977
3978done:
3979 DHD_PERIM_LOCK(&dhd->pub);
3980 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3981 DHD_PERIM_UNLOCK(&dhd->pub);
3982 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3983 dhd_net_if_unlock_local(dhd);
3984}
3985
3986static void
3987dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3988{
3989 dhd_info_t *dhd = handle;
3990 dhd_if_t *ifp = event_info;
3991
3992 if (event != DHD_WQ_WORK_SET_MAC) {
3993 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3994 }
3995
3996 if (!dhd) {
3997 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3998 return;
3999 }
4000
4001 dhd_net_if_lock_local(dhd);
4002 DHD_OS_WAKE_LOCK(&dhd->pub);
4003 DHD_PERIM_LOCK(&dhd->pub);
4004
4005#ifdef SOFTAP
4006 {
4007 unsigned long flags;
4008 bool in_ap = FALSE;
4009 DHD_GENERAL_LOCK(&dhd->pub, flags);
4010 in_ap = (ap_net_dev != NULL);
4011 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4012
4013 if (in_ap) {
4014 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
4015 ifp->net->name));
4016 goto done;
4017 }
4018 }
4019#endif /* SOFTAP */
4020
4021 if (ifp == NULL || !dhd->pub.up) {
4022 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4023 goto done;
4024 }
4025
4026 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
4027 ifp->set_macaddress = FALSE;
4028 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
4029 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
4030 else
4031 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
4032
4033done:
4034 DHD_PERIM_UNLOCK(&dhd->pub);
4035 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4036 dhd_net_if_unlock_local(dhd);
4037}
4038
4039static void
4040dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
4041{
4042 dhd_info_t *dhd = handle;
4043 int ifidx = (int)((long int)event_info);
4044 dhd_if_t *ifp = NULL;
4045
4046 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
4047 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4048 return;
4049 }
4050
4051 if (!dhd) {
4052 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4053 return;
4054 }
4055
4056 dhd_net_if_lock_local(dhd);
4057 DHD_OS_WAKE_LOCK(&dhd->pub);
4058 DHD_PERIM_LOCK(&dhd->pub);
4059
4060 ifp = dhd->iflist[ifidx];
4061
4062 if (ifp == NULL || !dhd->pub.up) {
4063 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4064 goto done;
4065 }
4066
4067#ifdef SOFTAP
4068 {
4069 bool in_ap = FALSE;
4070 unsigned long flags;
4071 DHD_GENERAL_LOCK(&dhd->pub, flags);
4072 in_ap = (ap_net_dev != NULL);
4073 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4074
4075 if (in_ap) {
4076 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
4077 ifp->net->name));
4078 ifp->set_multicast = FALSE;
4079 goto done;
4080 }
4081 }
4082#endif /* SOFTAP */
4083
4084 ifidx = ifp->idx;
4085
4086#ifdef MCAST_LIST_ACCUMULATION
4087 ifidx = 0;
4088#endif /* MCAST_LIST_ACCUMULATION */
4089
4090 _dhd_set_multicast_list(dhd, ifidx);
4091 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
4092
4093done:
4094 DHD_PERIM_UNLOCK(&dhd->pub);
4095 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4096 dhd_net_if_unlock_local(dhd);
4097}
4098
4099static int
4100dhd_set_mac_address(struct net_device *dev, void *addr)
4101{
4102 int ret = 0;
4103
4104 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4105 struct sockaddr *sa = (struct sockaddr *)addr;
4106 int ifidx;
4107 dhd_if_t *dhdif;
4108
4109 ifidx = dhd_net2idx(dhd, dev);
4110 if (ifidx == DHD_BAD_IF)
4111 return -1;
4112
4113 dhdif = dhd->iflist[ifidx];
4114
4115 dhd_net_if_lock_local(dhd);
4116 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
4117 dhdif->set_macaddress = TRUE;
4118 dhd_net_if_unlock_local(dhd);
4119 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
4120 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
4121 return ret;
4122}
4123
4124static void
4125dhd_set_multicast_list(struct net_device *dev)
4126{
4127 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4128 int ifidx;
4129
4130 ifidx = dhd_net2idx(dhd, dev);
4131 if (ifidx == DHD_BAD_IF)
4132 return;
4133
4134 dhd->iflist[ifidx]->set_multicast = TRUE;
4135 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
4136 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
4137}
4138
4139#ifdef DHD_UCODE_DOWNLOAD
4140/* Get ucode path */
4141char *
4142dhd_get_ucode_path(dhd_pub_t *dhdp)
4143{
4144 dhd_info_t *dhd = dhdp->info;
4145 return dhd->uc_path;
4146}
4147#endif /* DHD_UCODE_DOWNLOAD */
4148
4149#ifdef PROP_TXSTATUS
4150int
4151dhd_os_wlfc_block(dhd_pub_t *pub)
4152{
4153 dhd_info_t *di = (dhd_info_t *)(pub->info);
4154 ASSERT(di != NULL);
4155 spin_lock_bh(&di->wlfc_spinlock);
4156 return 1;
4157}
4158
4159int
4160dhd_os_wlfc_unblock(dhd_pub_t *pub)
4161{
4162 dhd_info_t *di = (dhd_info_t *)(pub->info);
4163
4164 ASSERT(di != NULL);
4165 spin_unlock_bh(&di->wlfc_spinlock);
4166 return 1;
4167}
4168
4169#endif /* PROP_TXSTATUS */
4170
4171/* This routine do not support Packet chain feature, Currently tested for
4172 * proxy arp feature
4173 */
4174int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
4175{
4176 struct sk_buff *skb;
4177 void *skbhead = NULL;
4178 void *skbprev = NULL;
4179 dhd_if_t *ifp;
4180 ASSERT(!PKTISCHAINED(p));
4181 skb = PKTTONATIVE(dhdp->osh, p);
4182
4183 ifp = dhdp->info->iflist[ifidx];
4184 skb->dev = ifp->net;
4185
4186 skb->protocol = eth_type_trans(skb, skb->dev);
4187
4188 if (in_interrupt()) {
4189 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4190 __FUNCTION__, __LINE__);
4191 netif_rx(skb);
4192 } else {
4193 if (dhdp->info->rxthread_enabled) {
4194 if (!skbhead) {
4195 skbhead = skb;
4196 } else {
4197 PKTSETNEXT(dhdp->osh, skbprev, skb);
4198 }
4199 skbprev = skb;
4200 } else {
4201 /* If the receive is not processed inside an ISR,
4202 * the softirqd must be woken explicitly to service
4203 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4204 * by netif_rx_ni(), but in earlier kernels, we need
4205 * to do it manually.
4206 */
4207 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4208 __FUNCTION__, __LINE__);
4209#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4210 netif_rx_ni(skb);
4211#else
4212 ulong flags;
4213 netif_rx(skb);
4214 local_irq_save(flags);
4215 RAISE_RX_SOFTIRQ();
4216 local_irq_restore(flags);
4217#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4218 }
4219 }
4220
4221 if (dhdp->info->rxthread_enabled && skbhead)
4222 dhd_sched_rxf(dhdp, skbhead);
4223
4224 return BCME_OK;
4225}
4226
4227int BCMFASTPATH
4228__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4229{
4230 int ret = BCME_OK;
4231 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4232 struct ether_header *eh = NULL;
4233#if defined(DHD_L2_FILTER)
4234 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
4235#endif // endif
4236
4237 /* Reject if down */
4238 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
4239 /* free the packet here since the caller won't */
4240 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4241 return -ENODEV;
4242 }
4243
4244#ifdef PCIE_FULL_DONGLE
4245 if (dhdp->busstate == DHD_BUS_SUSPEND) {
4246 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4247 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4248#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4249 return -ENODEV;
4250#else
4251 return NETDEV_TX_BUSY;
4252#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4253 }
4254#endif /* PCIE_FULL_DONGLE */
4255
4256 /* Reject if pktlen > MAX_MTU_SZ */
4257 if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
4258 /* free the packet here since the caller won't */
4259 dhdp->tx_big_packets++;
4260 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4261 return BCME_ERROR;
4262 }
4263
4264#ifdef DHD_L2_FILTER
4265 /* if dhcp_unicast is enabled, we need to convert the */
4266 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4267 if (ifp->dhcp_unicast) {
4268 uint8* mac_addr;
4269 uint8* ehptr = NULL;
4270 int ret;
4271 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
4272 if (ret == BCME_OK) {
4273 /* if given mac address having valid entry in sta list
4274 * copy the given mac address, and return with BCME_OK
4275 */
4276 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
4277 ehptr = PKTDATA(dhdp->osh, pktbuf);
4278 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
4279 }
4280 }
4281 }
4282
4283 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4284 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4285 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4286 return BCME_ERROR;
4287 }
4288 }
4289
4290 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4291 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
4292
4293 /* Drop the packets if l2 filter has processed it already
4294 * otherwise continue with the normal path
4295 */
4296 if (ret == BCME_OK) {
4297 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4298 return BCME_ERROR;
4299 }
4300 }
4301#endif /* DHD_L2_FILTER */
4302 /* Update multicast statistic */
4303 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
4304 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
4305 eh = (struct ether_header *)pktdata;
4306
4307 if (ETHER_ISMULTI(eh->ether_dhost))
4308 dhdp->tx_multicast++;
4309 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
4310#ifdef DHD_LOSSLESS_ROAMING
4311 uint8 prio = (uint8)PKTPRIO(pktbuf);
4312
4313 /* back up 802.1x's priority */
4314 dhdp->prio_8021x = prio;
4315#endif /* DHD_LOSSLESS_ROAMING */
4316 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
4317 atomic_inc(&dhd->pend_8021x_cnt);
4318#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
4319 wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
4320 pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
4321#endif /* WL_CFG80211 && WL_WPS_SYNC */
4322#if defined(DHD_8021X_DUMP)
4323 dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4324#endif /* DHD_8021X_DUMP */
4325 }
4326
4327 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
4328#ifdef DHD_DHCP_DUMP
4329 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4330#endif /* DHD_DHCP_DUMP */
4331#ifdef DHD_ICMP_DUMP
4332 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4333#endif /* DHD_ICMP_DUMP */
4334 }
4335 } else {
4336 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4337 return BCME_ERROR;
4338 }
4339
4340 {
4341 /* Look into the packet and update the packet priority */
4342#ifndef PKTPRIO_OVERRIDE
4343 if (PKTPRIO(pktbuf) == 0)
4344#endif /* !PKTPRIO_OVERRIDE */
4345 {
4346#if defined(QOS_MAP_SET)
4347 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
4348#else
4349 pktsetprio(pktbuf, FALSE);
4350#endif /* QOS_MAP_SET */
4351 }
4352#ifndef PKTPRIO_OVERRIDE
4353 else {
4354 /* Some protocols like OZMO use priority values from 256..263.
4355 * these are magic values to indicate a specific 802.1d priority.
4356 * make sure that priority field is in range of 0..7
4357 */
4358 PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
4359 }
4360#endif /* !PKTPRIO_OVERRIDE */
4361 }
4362
4363#ifdef SUPPORT_SET_TID
4364 dhd_set_tid_based_on_uid(dhdp, pktbuf);
4365#endif /* SUPPORT_SET_TID */
4366
4367#ifdef PCIE_FULL_DONGLE
4368 /*
4369 * Lkup the per interface hash table, for a matching flowring. If one is not
4370 * available, allocate a unique flowid and add a flowring entry.
4371 * The found or newly created flowid is placed into the pktbuf's tag.
4372 */
4373 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
4374 if (ret != BCME_OK) {
4375 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
4376 return ret;
4377 }
4378#endif // endif
4379
4380#ifdef PROP_TXSTATUS
4381 if (dhd_wlfc_is_supported(dhdp)) {
4382 /* store the interface ID */
4383 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
4384
4385 /* store destination MAC in the tag as well */
4386 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
4387
4388 /* decide which FIFO this packet belongs to */
4389 if (ETHER_ISMULTI(eh->ether_dhost))
4390 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4391 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
4392 else
4393 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
4394 } else
4395#endif /* PROP_TXSTATUS */
4396 {
4397 /* If the protocol uses a data header, apply it */
4398 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
4399 }
4400
4401 /* Use bus module to send data frame */
4402#ifdef DYNAMIC_MUMIMO_CONTROL
4403 if (dhdp->reassoc_mumimo_sw &&
4404 dhd_check_eapol_4way_message(PKTDATA(dhdp->osh, pktbuf)) == EAPOL_4WAY_M4) {
4405 dhdp->reassoc_mumimo_sw = 0;
4406 DHD_ENABLE_RUNTIME_PM(dhdp);
4407 }
4408#endif /* DYNAMIC_MUMIMO_CONTROL */
4409#ifdef PROP_TXSTATUS
4410 {
4411 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
4412 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
4413 /* non-proptxstatus way */
4414#ifdef BCMPCIE
4415 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4416#else
4417 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4418#endif /* BCMPCIE */
4419 }
4420 }
4421#else
4422#ifdef BCMPCIE
4423 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4424#else
4425 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4426#endif /* BCMPCIE */
4427#endif /* PROP_TXSTATUS */
4428
4429 return ret;
4430}
4431
4432int BCMFASTPATH
4433dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4434{
4435 int ret = 0;
4436 unsigned long flags;
4437 dhd_if_t *ifp;
4438
4439 DHD_GENERAL_LOCK(dhdp, flags);
4440 ifp = dhd_get_ifp(dhdp, ifidx);
4441 if (!ifp || ifp->del_in_progress) {
4442 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
4443 __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
4444 DHD_GENERAL_UNLOCK(dhdp, flags);
4445 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4446 return -ENODEV;
4447 }
4448 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
4449 DHD_ERROR(("%s: returning as busstate=%d\n",
4450 __FUNCTION__, dhdp->busstate));
4451 DHD_GENERAL_UNLOCK(dhdp, flags);
4452 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4453 return -ENODEV;
4454 }
4455 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4456 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
4457 DHD_GENERAL_UNLOCK(dhdp, flags);
4458
4459#ifdef DHD_PCIE_RUNTIMEPM
4460 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
4461 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4462 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4463 ret = -EBUSY;
4464 goto exit;
4465 }
4466#endif /* DHD_PCIE_RUNTIMEPM */
4467
4468 DHD_GENERAL_LOCK(dhdp, flags);
4469 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
4470 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4471 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
4472 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4473 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4474 dhd_os_tx_completion_wake(dhdp);
4475 dhd_os_busbusy_wake(dhdp);
4476 DHD_GENERAL_UNLOCK(dhdp, flags);
4477 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4478 return -ENODEV;
4479 }
4480 DHD_GENERAL_UNLOCK(dhdp, flags);
4481
4482 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
4483
4484#ifdef DHD_PCIE_RUNTIMEPM
4485exit:
4486#endif // endif
4487 DHD_GENERAL_LOCK(dhdp, flags);
4488 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4489 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4490 dhd_os_tx_completion_wake(dhdp);
4491 dhd_os_busbusy_wake(dhdp);
4492 DHD_GENERAL_UNLOCK(dhdp, flags);
4493 return ret;
4494}
4495
4496#if defined(DHD_LB_TXP)
4497
4498int BCMFASTPATH
4499dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
4500 int ifidx, void *skb)
4501{
4502 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
4503
4504 /* If the feature is disabled run-time do TX from here */
4505 if (atomic_read(&dhd->lb_txp_active) == 0) {
4506 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
4507 return __dhd_sendpkt(&dhd->pub, ifidx, skb);
4508 }
4509
4510 /* Store the address of net device and interface index in the Packet tag */
4511 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
4512 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
4513
4514 /* Enqueue the skb into tx_pend_queue */
4515 skb_queue_tail(&dhd->tx_pend_queue, skb);
4516
4517 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
4518
4519 /* Dispatch the Tx job to be processed by the tx_tasklet */
4520 dhd_lb_tx_dispatch(&dhd->pub);
4521
4522 return NETDEV_TX_OK;
4523}
4524#endif /* DHD_LB_TXP */
4525
4526int BCMFASTPATH
4527dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
4528{
4529 int ret;
4530 uint datalen;
4531 void *pktbuf;
4532 dhd_info_t *dhd = DHD_DEV_INFO(net);
4533 dhd_if_t *ifp = NULL;
4534 int ifidx;
4535 unsigned long flags;
4536 uint8 htsfdlystat_sz = 0;
4537
4538 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4539
4540 if (dhd_query_bus_erros(&dhd->pub)) {
4541 return -ENODEV;
4542 }
4543
4544 DHD_GENERAL_LOCK(&dhd->pub, flags);
4545 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
4546 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4547
4548#ifdef DHD_PCIE_RUNTIMEPM
4549 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
4550 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
4551 /* stop the network queue temporarily until resume done */
4552 DHD_GENERAL_LOCK(&dhd->pub, flags);
4553 if (!dhdpcie_is_resume_done(&dhd->pub)) {
4554 dhd_bus_stop_queue(dhd->pub.bus);
4555 }
4556 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4557 dhd_os_busbusy_wake(&dhd->pub);
4558 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4559#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4560 return -ENODEV;
4561#else
4562 return NETDEV_TX_BUSY;
4563#endif // endif
4564 }
4565#endif /* DHD_PCIE_RUNTIMEPM */
4566
4567 DHD_GENERAL_LOCK(&dhd->pub, flags);
4568 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4569 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4570 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
4571 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4572#ifdef PCIE_FULL_DONGLE
4573 /* Stop tx queues if suspend is in progress */
4574 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4575 dhd_bus_stop_queue(dhd->pub.bus);
4576 }
4577#endif /* PCIE_FULL_DONGLE */
4578 dhd_os_busbusy_wake(&dhd->pub);
4579 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4580#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4581 return -ENODEV;
4582#else
4583 return NETDEV_TX_BUSY;
4584#endif // endif
4585 }
4586
4587 DHD_OS_WAKE_LOCK(&dhd->pub);
4588 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4589
4590#if defined(DHD_HANG_SEND_UP_TEST)
4591 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
4592 dhd->pub.busstate = DHD_BUS_DOWN;
4593 }
4594#endif /* DHD_HANG_SEND_UP_TEST */
4595
4596 /* Reject if down */
4597 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
4598 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
4599 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
4600 netif_stop_queue(net);
4601 /* Send Event when bus down detected during data session */
4602 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
4603 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
4604 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
4605 net_os_send_hang_message(net);
4606 }
4607 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4608 dhd_os_busbusy_wake(&dhd->pub);
4609 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4610 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4611 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4612#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4613 return -ENODEV;
4614#else
4615 return NETDEV_TX_BUSY;
4616#endif // endif
4617 }
4618
4619 ifp = DHD_DEV_IFP(net);
4620 ifidx = DHD_DEV_IFIDX(net);
4621 if (!ifp || (ifidx == DHD_BAD_IF) ||
4622 ifp->del_in_progress) {
4623 DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
4624 __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
4625 netif_stop_queue(net);
4626 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4627 dhd_os_busbusy_wake(&dhd->pub);
4628 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4629 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4630 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4631#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4632 return -ENODEV;
4633#else
4634 return NETDEV_TX_BUSY;
4635#endif // endif
4636 }
4637
4638 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
4639 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4640
4641 ASSERT(ifidx == dhd_net2idx(dhd, net));
4642 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4643
4644 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4645
4646 /* re-align socket buffer if "skb->data" is odd address */
4647 if (((unsigned long)(skb->data)) & 0x1) {
4648 unsigned char *data = skb->data;
4649 uint32 length = skb->len;
4650 PKTPUSH(dhd->pub.osh, skb, 1);
4651 memmove(skb->data, data, length);
4652 PKTSETLEN(dhd->pub.osh, skb, length);
4653 }
4654
4655 datalen = PKTLEN(dhd->pub.osh, skb);
4656
4657 /* Make sure there's enough room for any header */
4658 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4659 struct sk_buff *skb2;
4660
4661 DHD_INFO(("%s: insufficient headroom\n",
4662 dhd_ifname(&dhd->pub, ifidx)));
4663 dhd->pub.tx_realloc++;
4664
4665 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4666 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4667
4668 dev_kfree_skb(skb);
4669 if ((skb = skb2) == NULL) {
4670 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4671 dhd_ifname(&dhd->pub, ifidx)));
4672 ret = -ENOMEM;
4673 goto done;
4674 }
4675 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4676 }
4677
4678 /* Convert to packet */
4679 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4680 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4681 dhd_ifname(&dhd->pub, ifidx)));
4682 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4683 dev_kfree_skb_any(skb);
4684 ret = -ENOMEM;
4685 goto done;
4686 }
4687
4688#ifdef DHD_WET
4689 /* wet related packet proto manipulation should be done in DHD
4690 since dongle doesn't have complete payload
4691 */
4692 if (WET_ENABLED(&dhd->pub) &&
4693 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
4694 DHD_INFO(("%s:%s: wet send proc failed\n",
4695 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
4696 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
4697 ret = -EFAULT;
4698 goto done;
4699 }
4700#endif /* DHD_WET */
4701
4702#ifdef DHD_PSTA
4703 /* PSR related packet proto manipulation should be done in DHD
4704 * since dongle doesn't have complete payload
4705 */
4706 if (PSR_ENABLED(&dhd->pub) &&
4707 (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
4708
4709 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4710 dhd_ifname(&dhd->pub, ifidx)));
4711 }
4712#endif /* DHD_PSTA */
4713
4714#ifdef DHDTCPSYNC_FLOOD_BLK
4715 if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
4716 ifp->tsyncack_txed ++;
4717 }
4718#endif /* DHDTCPSYNC_FLOOD_BLK */
4719
4720#ifdef DHDTCPACK_SUPPRESS
4721 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4722 /* If this packet has been hold or got freed, just return */
4723 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4724 ret = 0;
4725 goto done;
4726 }
4727 } else {
4728 /* If this packet has replaced another packet and got freed, just return */
4729 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4730 ret = 0;
4731 goto done;
4732 }
4733 }
4734#endif /* DHDTCPACK_SUPPRESS */
4735
4736 /*
4737 * If Load Balance is enabled queue the packet
4738 * else send directly from here.
4739 */
4740#if defined(DHD_LB_TXP)
4741 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
4742#else
4743 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4744#endif // endif
4745
4746done:
4747 if (ret) {
4748 ifp->stats.tx_dropped++;
4749 dhd->pub.tx_dropped++;
4750 } else {
4751#ifdef PROP_TXSTATUS
4752 /* tx_packets counter can counted only when wlfc is disabled */
4753 if (!dhd_wlfc_is_supported(&dhd->pub))
4754#endif // endif
4755 {
4756 dhd->pub.tx_packets++;
4757 ifp->stats.tx_packets++;
4758 ifp->stats.tx_bytes += datalen;
4759 }
4760 }
4761
4762 DHD_GENERAL_LOCK(&dhd->pub, flags);
4763 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4764 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
4765 dhd_os_tx_completion_wake(&dhd->pub);
4766 dhd_os_busbusy_wake(&dhd->pub);
4767 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4768 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4769 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4770 /* Return ok: we always eat the packet */
4771#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4772 return 0;
4773#else
4774 return NETDEV_TX_OK;
4775#endif // endif
4776}
4777
4778#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4779void dhd_rx_wq_wakeup(struct work_struct *ptr)
4780{
4781 struct dhd_rx_tx_work *work;
4782 struct dhd_pub * pub;
4783
4784 work = container_of(ptr, struct dhd_rx_tx_work, work);
4785
4786 pub = work->pub;
4787
4788 DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
4789
4790 if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
4791 return;
4792 }
4793
4794 DHD_OS_WAKE_LOCK(pub);
4795 if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
4796
4797 // do nothing but wakeup the bus.
4798 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
4799 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
4800 }
4801 DHD_OS_WAKE_UNLOCK(pub);
4802 kfree(work);
4803}
4804
4805void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
4806{
4807 struct dhd_rx_tx_work *work;
4808 int ret;
4809 dhd_info_t *dhd;
4810 struct dhd_bus * bus;
4811
4812 work = container_of(ptr, struct dhd_rx_tx_work, work);
4813
4814 dhd = DHD_DEV_INFO(work->net);
4815
4816 bus = dhd->pub.bus;
4817
4818 if (atomic_read(&dhd->pub.block_bus)) {
4819 kfree_skb(work->skb);
4820 kfree(work);
4821 dhd_netif_start_queue(bus);
4822 return;
4823 }
4824
4825 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
4826 ret = dhd_start_xmit(work->skb, work->net);
4827 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
4828 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
4829 }
4830 kfree(work);
4831 dhd_netif_start_queue(bus);
4832
4833 if (ret)
4834 netdev_err(work->net,
4835 "error: dhd_start_xmit():%d\n", ret);
4836}
4837
4838int BCMFASTPATH
4839dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
4840{
4841 struct dhd_rx_tx_work *start_xmit_work;
4842 int ret;
4843 dhd_info_t *dhd = DHD_DEV_INFO(net);
4844
4845 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4846 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
4847
4848 dhd_netif_stop_queue(dhd->pub.bus);
4849
4850 start_xmit_work = (struct dhd_rx_tx_work*)
4851 kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
4852
4853 if (!start_xmit_work) {
4854 netdev_err(net,
4855 "error: failed to alloc start_xmit_work\n");
4856 ret = -ENOMEM;
4857 goto exit;
4858 }
4859
4860 INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
4861 start_xmit_work->skb = skb;
4862 start_xmit_work->net = net;
4863 queue_work(dhd->tx_wq, &start_xmit_work->work);
4864 ret = NET_XMIT_SUCCESS;
4865
4866 } else if (dhd->pub.busstate == DHD_BUS_DATA) {
4867 ret = dhd_start_xmit(skb, net);
4868 } else {
4869 /* when bus is down */
4870 ret = -ENODEV;
4871 }
4872
4873exit:
4874 return ret;
4875}
4876void
4877dhd_bus_wakeup_work(dhd_pub_t *dhdp)
4878{
4879 struct dhd_rx_tx_work *rx_work;
4880 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4881
4882 rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
4883 if (!rx_work) {
4884 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
4885 return;
4886 }
4887
4888 INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
4889 rx_work->pub = dhdp;
4890 queue_work(dhd->rx_wq, &rx_work->work);
4891
4892}
4893#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4894void
4895dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4896{
4897 struct net_device *net;
4898 dhd_info_t *dhd = dhdp->info;
4899 int i;
4900
4901 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4902
4903 ASSERT(dhd);
4904
4905#ifdef DHD_LOSSLESS_ROAMING
4906 /* block flowcontrol during roaming */
4907 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4908 return;
4909 }
4910#endif // endif
4911
4912 if (ifidx == ALL_INTERFACES) {
4913 /* Flow control on all active interfaces */
4914 dhdp->txoff = state;
4915 for (i = 0; i < DHD_MAX_IFS; i++) {
4916 if (dhd->iflist[i]) {
4917 net = dhd->iflist[i]->net;
4918 if (state == ON)
4919 netif_stop_queue(net);
4920 else
4921 netif_wake_queue(net);
4922 }
4923 }
4924 } else {
4925 if (dhd->iflist[ifidx]) {
4926 net = dhd->iflist[ifidx]->net;
4927 if (state == ON)
4928 netif_stop_queue(net);
4929 else
4930 netif_wake_queue(net);
4931 }
4932 }
4933}
4934
4935#ifdef DHD_RX_DUMP
4936typedef struct {
4937 uint16 type;
4938 const char *str;
4939} PKTTYPE_INFO;
4940
4941static const PKTTYPE_INFO packet_type_info[] =
4942{
4943 { ETHER_TYPE_IP, "IP" },
4944 { ETHER_TYPE_ARP, "ARP" },
4945 { ETHER_TYPE_BRCM, "BRCM" },
4946 { ETHER_TYPE_802_1X, "802.1X" },
4947#ifdef BCMWAPI_WAI
4948 { ETHER_TYPE_WAI, "WAPI" },
4949#endif /* BCMWAPI_WAI */
4950 { 0, ""}
4951};
4952
4953static const char *_get_packet_type_str(uint16 type)
4954{
4955 int i;
4956 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
4957
4958 for (i = 0; i < n; i++) {
4959 if (packet_type_info[i].type == type)
4960 return packet_type_info[i].str;
4961 }
4962
4963 return packet_type_info[n].str;
4964}
4965#endif /* DHD_RX_DUMP */
4966
4967#ifdef DHD_MCAST_REGEN
4968/*
4969 * Description: This function is called to do the reverse translation
4970 *
4971 * Input eh - pointer to the ethernet header
4972 */
4973int32
4974dhd_mcast_reverse_translation(struct ether_header *eh)
4975{
4976 uint8 *iph;
4977 uint32 dest_ip;
4978
4979 iph = (uint8 *)eh + ETHER_HDR_LEN;
4980 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4981
4982 /* Only IP packets are handled */
4983 if (eh->ether_type != hton16(ETHER_TYPE_IP))
4984 return BCME_ERROR;
4985
4986 /* Non-IPv4 multicast packets are not handled */
4987 if (IP_VER(iph) != IP_VER_4)
4988 return BCME_ERROR;
4989
4990 /*
4991 * The packet has a multicast IP and unicast MAC. That means
4992 * we have to do the reverse translation
4993 */
4994 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
4995 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
4996 return BCME_OK;
4997 }
4998
4999 return BCME_ERROR;
5000}
5001#endif /* MCAST_REGEN */
5002
5003#ifdef SHOW_LOGTRACE
5004static void
5005dhd_netif_rx_ni(struct sk_buff * skb)
5006{
5007 /* Do not call netif_recieve_skb as this workqueue scheduler is
5008 * not from NAPI Also as we are not in INTR context, do not call
5009 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
5010 * does netif_rx, disables irq, raise NET_IF_RX softirq and
5011 * enables interrupts back
5012 */
5013#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5014 netif_rx_ni(skb);
5015#else
5016 ulong flags;
5017 netif_rx(skb);
5018 local_irq_save(flags);
5019 RAISE_RX_SOFTIRQ();
5020 local_irq_restore(flags);
5021#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5022}
5023
5024static int
5025dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
5026{
5027 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5028 int ret = BCME_OK;
5029 uint datalen;
5030 bcm_event_msg_u_t evu;
5031 void *data = NULL;
5032 void *pktdata = NULL;
5033 bcm_event_t *pvt_data;
5034 uint pktlen;
5035
5036 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5037
5038 /* In dhd_rx_frame, header is stripped using skb_pull
5039 * of size ETH_HLEN, so adjust pktlen accordingly
5040 */
5041 pktlen = skb->len + ETH_HLEN;
5042
5043#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5044 pktdata = (void *)skb_mac_header(skb);
5045#else
5046 pktdata = (void *)skb->mac.raw;
5047#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5048
5049 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5050
5051 if (ret != BCME_OK) {
5052 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5053 __FUNCTION__, ret));
5054 goto exit;
5055 }
5056
5057 datalen = ntoh32(evu.event.datalen);
5058
5059 pvt_data = (bcm_event_t *)pktdata;
5060 data = &pvt_data[1];
5061
5062 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
5063
5064exit:
5065 return ret;
5066}
5067
5068#define DHD_EVENT_LOGTRACE_BOUND 12
5069#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 1
5070
5071static void
5072dhd_event_logtrace_process(struct work_struct * work)
5073{
5074/* Ignore compiler warnings due to -Werror=cast-qual */
5075#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5076#pragma GCC diagnostic push
5077#pragma GCC diagnostic ignored "-Wcast-qual"
5078#endif // endif
5079 struct delayed_work *dw = to_delayed_work(work);
5080 struct dhd_info *dhd =
5081 container_of(dw, struct dhd_info, event_log_dispatcher_work);
5082#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5083#pragma GCC diagnostic pop
5084#endif // endif
5085
5086 dhd_pub_t *dhdp;
5087 struct sk_buff *skb;
5088 uint32 qlen;
5089 uint32 process_len;
5090
5091 if (!dhd) {
5092 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5093 return;
5094 }
5095
5096 dhdp = &dhd->pub;
5097
5098 if (!dhdp) {
5099 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
5100 return;
5101 }
5102
5103 qlen = skb_queue_len(&dhd->evt_trace_queue);
5104 process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
5105
5106 /* Run while loop till bound is reached or skb queue is empty */
5107 while (process_len--) {
5108 int ifid = 0;
5109 skb = skb_dequeue(&dhd->evt_trace_queue);
5110 if (skb == NULL) {
5111 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
5112 __FUNCTION__));
5113 break;
5114 }
5115 BCM_REFERENCE(ifid);
5116#ifdef PCIE_FULL_DONGLE
5117 /* Check if pkt is from INFO ring or WLC_E_TRACE */
5118 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
5119 if (ifid == DHD_DUMMY_INFO_IF) {
5120 /* Process logtrace from info rings */
5121 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
5122 } else
5123#endif /* PCIE_FULL_DONGLE */
5124 {
5125 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
5126 dhd_event_logtrace_pkt_process(dhdp, skb);
5127 }
5128
5129 /* Send packet up if logtrace_pkt_sendup is TRUE */
5130 if (dhdp->logtrace_pkt_sendup) {
5131#ifdef DHD_USE_STATIC_CTRLBUF
5132 /* If bufs are allocated via static buf pool
5133 * and logtrace_pkt_sendup enabled, make a copy,
5134 * free the local one and send the copy up.
5135 */
5136 void *npkt = PKTDUP(dhdp->osh, skb);
5137 /* Clone event and send it up */
5138 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5139 if (npkt) {
5140 skb = npkt;
5141 } else {
5142 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
5143 /* Packet is already freed, go to next packet */
5144 continue;
5145 }
5146#endif /* DHD_USE_STATIC_CTRLBUF */
5147#ifdef PCIE_FULL_DONGLE
5148 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
5149 * to send skb to network layer, assign skb->dev with
5150 * Primary interface n/w device
5151 */
5152 if (ifid == DHD_DUMMY_INFO_IF) {
5153 skb = PKTTONATIVE(dhdp->osh, skb);
5154 skb->dev = dhd->iflist[0]->net;
5155 }
5156#endif /* PCIE_FULL_DONGLE */
5157 /* Send pkt UP */
5158 dhd_netif_rx_ni(skb);
5159 } else {
5160 /* Don't send up. Free up the packet. */
5161#ifdef DHD_USE_STATIC_CTRLBUF
5162 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5163#else
5164 PKTFREE(dhdp->osh, skb, FALSE);
5165#endif /* DHD_USE_STATIC_CTRLBUF */
5166 }
5167 }
5168
5169 /* Reschedule the workqueue if more packets to be processed */
5170 if (qlen >= DHD_EVENT_LOGTRACE_BOUND) {
5171 schedule_delayed_work(&dhd->event_log_dispatcher_work,
5172 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
5173 }
5174}
5175
5176void
5177dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
5178{
5179 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5180
5181#ifdef PCIE_FULL_DONGLE
5182 /* Add ifidx in the PKTTAG */
5183 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
5184#endif /* PCIE_FULL_DONGLE */
5185 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
5186
5187 schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
5188}
5189
5190void
5191dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
5192{
5193 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5194 struct sk_buff *skb;
5195
5196 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5197#ifdef DHD_USE_STATIC_CTRLBUF
5198 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5199#else
5200 PKTFREE(dhdp->osh, skb, FALSE);
5201#endif /* DHD_USE_STATIC_CTRLBUF */
5202 }
5203}
5204#endif /* SHOW_LOGTRACE */
5205
5206/** Called when a frame is received by the dongle on interface 'ifidx' */
5207void
5208dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
5209{
5210 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5211 struct sk_buff *skb;
5212 uchar *eth;
5213 uint len;
5214 void *data, *pnext = NULL;
5215 int i;
5216 dhd_if_t *ifp;
5217 wl_event_msg_t event;
5218 int tout_rx = 0;
5219 int tout_ctrl = 0;
5220 void *skbhead = NULL;
5221 void *skbprev = NULL;
5222 uint16 protocol;
5223#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5224 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5225 unsigned char *dump_data;
5226#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5227#ifdef DHD_MCAST_REGEN
5228 uint8 interface_role;
5229 if_flow_lkup_t *if_flow_lkup;
5230 unsigned long flags;
5231#endif // endif
5232#ifdef DHD_WAKE_STATUS
5233 int pkt_wake = 0;
5234 wake_counts_t *wcp = NULL;
5235#endif /* DHD_WAKE_STATUS */
5236
5237 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5238
5239 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
5240 struct ether_header *eh;
5241
5242 pnext = PKTNEXT(dhdp->osh, pktbuf);
5243 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
5244
5245 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5246 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
5247 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5248 */
5249 if (ifidx == DHD_DUMMY_INFO_IF) {
5250 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5251 * context in case of PCIe FD, in case of other bus this will be from
5252 * DPC context. If we get bunch of events from Dongle then printing all
5253 * of them from Tasklet/DPC context that too in data path is costly.
5254 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5255 * events with type WLC_E_TRACE.
5256 * We'll print this console logs from the WorkQueue context by enqueing SKB
5257 * here and Dequeuing will be done in WorkQueue and will be freed only if
5258 * logtrace_pkt_sendup is TRUE
5259 */
5260#ifdef SHOW_LOGTRACE
5261 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5262#else /* !SHOW_LOGTRACE */
5263 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
5264 * free the PKT here itself
5265 */
5266#ifdef DHD_USE_STATIC_CTRLBUF
5267 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5268#else
5269 PKTFREE(dhdp->osh, pktbuf, FALSE);
5270#endif /* DHD_USE_STATIC_CTRLBUF */
5271#endif /* SHOW_LOGTRACE */
5272 continue;
5273 }
5274#ifdef DHD_WAKE_STATUS
5275 pkt_wake = dhd_bus_get_bus_wake(dhdp);
5276 wcp = dhd_bus_get_wakecount(dhdp);
5277 if (wcp == NULL) {
5278 /* If wakeinfo count buffer is null do not update wake count values */
5279 pkt_wake = 0;
5280 }
5281#endif /* DHD_WAKE_STATUS */
5282
5283 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5284
5285 if (ifidx >= DHD_MAX_IFS) {
5286 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
5287 __FUNCTION__, ifidx));
5288 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
5289#ifdef DHD_USE_STATIC_CTRLBUF
5290 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5291#else
5292 PKTFREE(dhdp->osh, pktbuf, FALSE);
5293#endif /* DHD_USE_STATIC_CTRLBUF */
5294 } else {
5295 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5296 }
5297 continue;
5298 }
5299
5300 ifp = dhd->iflist[ifidx];
5301 if (ifp == NULL) {
5302 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5303 __FUNCTION__));
5304 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
5305#ifdef DHD_USE_STATIC_CTRLBUF
5306 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5307#else
5308 PKTFREE(dhdp->osh, pktbuf, FALSE);
5309#endif /* DHD_USE_STATIC_CTRLBUF */
5310 } else {
5311 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5312 }
5313 continue;
5314 }
5315
5316 /* Dropping only data packets before registering net device to avoid kernel panic */
5317#ifndef PROP_TXSTATUS_VSDB
5318 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
5319 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5320#else
5321 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
5322 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5323#endif /* PROP_TXSTATUS_VSDB */
5324 {
5325 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5326 __FUNCTION__));
5327 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5328 continue;
5329 }
5330
5331#ifdef PROP_TXSTATUS
5332 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
5333 /* WLFC may send header only packet when
5334 there is an urgent message but no packet to
5335 piggy-back on
5336 */
5337 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5338 continue;
5339 }
5340#endif // endif
5341#ifdef DHD_L2_FILTER
5342 /* If block_ping is enabled drop the ping packet */
5343 if (ifp->block_ping) {
5344 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
5345 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5346 continue;
5347 }
5348 }
5349 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
5350 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
5351 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5352 continue;
5353 }
5354 }
5355 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
5356 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
5357
5358 /* Drop the packets if l2 filter has processed it already
5359 * otherwise continue with the normal path
5360 */
5361 if (ret == BCME_OK) {
5362 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5363 continue;
5364 }
5365 }
5366 if (ifp->block_tdls) {
5367 if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
5368 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5369 continue;
5370 }
5371 }
5372#endif /* DHD_L2_FILTER */
5373
5374#ifdef DHD_MCAST_REGEN
5375 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
5376 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
5377 ASSERT(if_flow_lkup);
5378
5379 interface_role = if_flow_lkup[ifidx].role;
5380 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
5381
5382 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
5383 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
5384 ETHER_ISUCAST(eh->ether_dhost)) {
5385 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
5386#ifdef DHD_PSTA
5387 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5388 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
5389 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
5390 if (ifidx != 0) {
5391 /* Let the primary in PSTA interface handle this
5392 * frame after unicast to Multicast conversion
5393 */
5394 ifp = dhd_get_ifp(dhdp, 0);
5395 ASSERT(ifp);
5396 }
5397 }
5398 }
5399#endif /* PSTA */
5400 }
5401#endif /* MCAST_REGEN */
5402
5403#ifdef DHDTCPSYNC_FLOOD_BLK
5404 if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
5405 int delta_sec;
5406 int delta_sync;
5407 int sync_per_sec;
5408 u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
5409 ifp->tsync_rcvd ++;
5410 delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
5411 delta_sec = curr_time - ifp->last_sync;
5412 if (delta_sec > 1) {
5413 sync_per_sec = delta_sync/delta_sec;
5414 if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
5415 schedule_work(&ifp->blk_tsfl_work);
5416 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
5417 "sync recvied %d pkt/sec \n",
5418 ifidx, sync_per_sec));
5419 }
5420 dhd_reset_tcpsync_info_by_ifp(ifp);
5421 }
5422
5423 }
5424#endif /* DHDTCPSYNC_FLOOD_BLK */
5425
5426#ifdef DHDTCPACK_SUPPRESS
5427 dhd_tcpdata_info_get(dhdp, pktbuf);
5428#endif // endif
5429 skb = PKTTONATIVE(dhdp->osh, pktbuf);
5430
5431 ASSERT(ifp);
5432 skb->dev = ifp->net;
5433#ifdef DHD_WET
5434 /* wet related packet proto manipulation should be done in DHD
5435 * since dongle doesn't have complete payload
5436 */
5437 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
5438 pktbuf) < 0)) {
5439 DHD_INFO(("%s:%s: wet recv proc failed\n",
5440 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
5441 }
5442#endif /* DHD_WET */
5443
5444#ifdef DHD_PSTA
5445 if (PSR_ENABLED(dhdp) &&
5446 (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
5447 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
5448 dhd_ifname(dhdp, ifidx)));
5449 }
5450#endif /* DHD_PSTA */
5451
5452#ifdef PCIE_FULL_DONGLE
5453 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
5454 (!ifp->ap_isolate)) {
5455 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5456 if (ETHER_ISUCAST(eh->ether_dhost)) {
5457 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
5458 dhd_sendpkt(dhdp, ifidx, pktbuf);
5459 continue;
5460 }
5461 } else {
5462 void *npktbuf = NULL;
5463 if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
5464 (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) {
5465 dhd_sendpkt(dhdp, ifidx, npktbuf);
5466 }
5467 }
5468 }
5469#endif /* PCIE_FULL_DONGLE */
5470#ifdef DYNAMIC_MUMIMO_CONTROL
5471 if (dhdp->reassoc_mumimo_sw && dhdp->murx_block_eapol &&
5472 dhd_check_eapol_4way_message((void *)(skb->data)) == EAPOL_4WAY_M1) {
5473 DHD_ERROR(("%s: Reassoc is in progress..."
5474 " drop EAPOL M1 frame\n", __FUNCTION__));
5475 PKTFREE(dhdp->osh, pktbuf, FALSE);
5476 continue;
5477 }
5478#endif /* DYNAMIC_MUMIMO_CONTROL */
5479
5480 /* Get the protocol, maintain skb around eth_type_trans()
5481 * The main reason for this hack is for the limitation of
5482 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5483 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5484 * coping of the packet coming from the network stack to add
5485 * BDC, Hardware header etc, during network interface registration
5486 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5487 * for BDC, Hardware header etc. and not just the ETH_HLEN
5488 */
5489 eth = skb->data;
5490 len = skb->len;
5491
5492#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5493 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5494 dump_data = skb->data;
5495#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5496
5497 protocol = (skb->data[12] << 8) | skb->data[13];
5498 if (protocol == ETHER_TYPE_802_1X) {
5499 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
5500#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
5501 wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
5502#endif /* WL_CFG80211 && WL_WPS_SYNC */
5503#ifdef DHD_8021X_DUMP
5504 dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5505#endif /* DHD_8021X_DUMP */
5506 }
5507
5508 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
5509#ifdef DHD_DHCP_DUMP
5510 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5511#endif /* DHD_DHCP_DUMP */
5512#ifdef DHD_ICMP_DUMP
5513 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5514#endif /* DHD_ICMP_DUMP */
5515 }
5516#ifdef DHD_RX_DUMP
5517 DHD_ERROR(("RX DUMP[%s] - %s\n",
5518 dhd_ifname(dhdp, ifidx), _get_packet_type_str(protocol)));
5519 if (protocol != ETHER_TYPE_BRCM) {
5520 if (dump_data[0] == 0xFF) {
5521 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
5522
5523 if ((dump_data[12] == 8) &&
5524 (dump_data[13] == 6)) {
5525 DHD_ERROR(("%s: ARP %d\n",
5526 __FUNCTION__, dump_data[0x15]));
5527 }
5528 } else if (dump_data[0] & 1) {
5529 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
5530 __FUNCTION__, MAC2STRDBG(dump_data)));
5531 }
5532#ifdef DHD_RX_FULL_DUMP
5533 {
5534 int k;
5535 for (k = 0; k < skb->len; k++) {
5536 DHD_ERROR(("%02X ", dump_data[k]));
5537 if ((k & 15) == 15)
5538 DHD_ERROR(("\n"));
5539 }
5540 DHD_ERROR(("\n"));
5541 }
5542#endif /* DHD_RX_FULL_DUMP */
5543 }
5544#endif /* DHD_RX_DUMP */
5545
5546#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5547 if (pkt_wake) {
5548 prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
5549 }
5550#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5551
5552 skb->protocol = eth_type_trans(skb, skb->dev);
5553
5554 if (skb->pkt_type == PACKET_MULTICAST) {
5555 dhd->pub.rx_multicast++;
5556 ifp->stats.multicast++;
5557 }
5558
5559 skb->data = eth;
5560 skb->len = len;
5561
5562 DHD_DBG_PKT_MON_RX(dhdp, skb);
5563#ifdef DHD_PKT_LOGGING
5564 DHD_PKTLOG_RX(dhdp, skb);
5565#endif /* DHD_PKT_LOGGING */
5566 /* Strip header, count, deliver upward */
5567 skb_pull(skb, ETH_HLEN);
5568
5569 /* Process special event packets and then discard them */
5570 memset(&event, 0, sizeof(event));
5571
5572 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
5573 bcm_event_msg_u_t evu;
5574 int ret_event;
5575 int event_type;
5576
5577 ret_event = wl_host_event_get_data(
5578#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5579 skb_mac_header(skb),
5580#else
5581 skb->mac.raw,
5582#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5583 len, &evu);
5584
5585 if (ret_event != BCME_OK) {
5586 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5587 __FUNCTION__, ret_event));
5588#ifdef DHD_USE_STATIC_CTRLBUF
5589 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5590#else
5591 PKTFREE(dhdp->osh, pktbuf, FALSE);
5592#endif // endif
5593 continue;
5594 }
5595
5596 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5597 event_type = ntoh32_ua((void *)&event.event_type);
5598#ifdef SHOW_LOGTRACE
5599 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5600 * context in case of PCIe FD, in case of other bus this will be from
5601 * DPC context. If we get bunch of events from Dongle then printing all
5602 * of them from Tasklet/DPC context that too in data path is costly.
5603 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5604 * events with type WLC_E_TRACE.
5605 * We'll print this console logs from the WorkQueue context by enqueing SKB
5606 * here and Dequeuing will be done in WorkQueue and will be freed only if
5607 * logtrace_pkt_sendup is true
5608 */
5609 if (event_type == WLC_E_TRACE) {
5610 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
5611 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5612 continue;
5613 }
5614#endif /* SHOW_LOGTRACE */
5615
5616 ret_event = dhd_wl_host_event(dhd, ifidx,
5617#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5618 skb_mac_header(skb),
5619#else
5620 skb->mac.raw,
5621#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5622 len, &event, &data);
5623
5624 wl_event_to_host_order(&event);
5625 if (!tout_ctrl)
5626 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
5627
5628#if defined(PNO_SUPPORT)
5629 if (event_type == WLC_E_PFN_NET_FOUND) {
5630 /* enforce custom wake lock to garantee that Kernel not suspended */
5631 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
5632 }
5633#endif /* PNO_SUPPORT */
5634 if (numpkt != 1) {
5635 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
5636 __FUNCTION__));
5637 }
5638
5639#ifdef DHD_WAKE_STATUS
5640 if (unlikely(pkt_wake)) {
5641#ifdef DHD_WAKE_EVENT_STATUS
5642 if (event.event_type < WLC_E_LAST) {
5643 wcp->rc_event[event.event_type]++;
5644 wcp->rcwake++;
5645 pkt_wake = 0;
5646 }
5647#endif /* DHD_WAKE_EVENT_STATUS */
5648 }
5649#endif /* DHD_WAKE_STATUS */
5650
5651 /* For delete virtual interface event, wl_host_event returns positive
5652 * i/f index, do not proceed. just free the pkt.
5653 */
5654 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
5655 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
5656 __FUNCTION__));
5657#ifdef DHD_USE_STATIC_CTRLBUF
5658 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5659#else
5660 PKTFREE(dhdp->osh, pktbuf, FALSE);
5661#endif // endif
5662 continue;
5663 }
5664
5665 /*
5666 * For the event packets, there is a possibility
5667 * of ifidx getting modifed.Thus update the ifp
5668 * once again.
5669 */
5670 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
5671 ifp = dhd->iflist[ifidx];
5672#ifndef PROP_TXSTATUS_VSDB
5673 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
5674#else
5675 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
5676 dhd->pub.up))
5677#endif /* PROP_TXSTATUS_VSDB */
5678 {
5679 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
5680 __FUNCTION__));
5681#ifdef DHD_USE_STATIC_CTRLBUF
5682 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5683#else
5684 PKTFREE(dhdp->osh, pktbuf, FALSE);
5685#endif // endif
5686 continue;
5687 }
5688
5689 if (dhdp->wl_event_enabled) {
5690#ifdef DHD_USE_STATIC_CTRLBUF
5691 /* If event bufs are allocated via static buf pool
5692 * and wl events are enabled, make a copy, free the
5693 * local one and send the copy up.
5694 */
5695 void *npkt = PKTDUP(dhdp->osh, skb);
5696 /* Clone event and send it up */
5697 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5698 if (npkt) {
5699 skb = npkt;
5700 } else {
5701 DHD_ERROR(("skb clone failed. dropping event.\n"));
5702 continue;
5703 }
5704#endif /* DHD_USE_STATIC_CTRLBUF */
5705 } else {
5706 /* If event enabled not explictly set, drop events */
5707#ifdef DHD_USE_STATIC_CTRLBUF
5708 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5709#else
5710 PKTFREE(dhdp->osh, pktbuf, FALSE);
5711#endif /* DHD_USE_STATIC_CTRLBUF */
5712 continue;
5713 }
5714 } else {
5715 tout_rx = DHD_PACKET_TIMEOUT_MS;
5716
5717#ifdef PROP_TXSTATUS
5718 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
5719#endif /* PROP_TXSTATUS */
5720
5721#ifdef DHD_WAKE_STATUS
5722 if (unlikely(pkt_wake)) {
5723 wcp->rxwake++;
5724#ifdef DHD_WAKE_RX_STATUS
5725#define ETHER_ICMP6_HEADER 20
5726#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
5727#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
5728#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
5729
5730 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
5731 wcp->rx_arp++;
5732 if (dump_data[0] == 0xFF) { /* Broadcast */
5733 wcp->rx_bcast++;
5734 } else if (dump_data[0] & 0x01) { /* Multicast */
5735 wcp->rx_mcast++;
5736 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
5737 wcp->rx_multi_ipv6++;
5738 if ((skb->len > ETHER_ICMP6_HEADER) &&
5739 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
5740 wcp->rx_icmpv6++;
5741 if (skb->len > ETHER_ICMPV6_TYPE) {
5742 switch (dump_data[ETHER_ICMPV6_TYPE]) {
5743 case NDISC_ROUTER_ADVERTISEMENT:
5744 wcp->rx_icmpv6_ra++;
5745 break;
5746 case NDISC_NEIGHBOUR_ADVERTISEMENT:
5747 wcp->rx_icmpv6_na++;
5748 break;
5749 case NDISC_NEIGHBOUR_SOLICITATION:
5750 wcp->rx_icmpv6_ns++;
5751 break;
5752 }
5753 }
5754 }
5755 } else if (dump_data[2] == 0x5E) {
5756 wcp->rx_multi_ipv4++;
5757 } else {
5758 wcp->rx_multi_other++;
5759 }
5760 } else { /* Unicast */
5761 wcp->rx_ucast++;
5762 }
5763#undef ETHER_ICMP6_HEADER
5764#undef ETHER_IPV6_SADDR
5765#undef ETHER_IPV6_DAADR
5766#undef ETHER_ICMPV6_TYPE
5767#endif /* DHD_WAKE_RX_STATUS */
5768 pkt_wake = 0;
5769 }
5770#endif /* DHD_WAKE_STATUS */
5771 }
5772
5773#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
5774 ifp->net->last_rx = jiffies;
5775#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
5776
5777 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
5778 dhdp->dstats.rx_bytes += skb->len;
5779 dhdp->rx_packets++; /* Local count */
5780 ifp->stats.rx_bytes += skb->len;
5781 ifp->stats.rx_packets++;
5782 }
5783
5784 if (in_interrupt()) {
5785 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5786 __FUNCTION__, __LINE__);
5787 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5788#if defined(DHD_LB_RXP)
5789 netif_receive_skb(skb);
5790#else /* !defined(DHD_LB_RXP) */
5791 netif_rx(skb);
5792#endif /* !defined(DHD_LB_RXP) */
5793 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5794 } else {
5795 if (dhd->rxthread_enabled) {
5796 if (!skbhead)
5797 skbhead = skb;
5798 else
5799 PKTSETNEXT(dhdp->osh, skbprev, skb);
5800 skbprev = skb;
5801 } else {
5802
5803 /* If the receive is not processed inside an ISR,
5804 * the softirqd must be woken explicitly to service
5805 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5806 * by netif_rx_ni(), but in earlier kernels, we need
5807 * to do it manually.
5808 */
5809 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5810 __FUNCTION__, __LINE__);
5811
5812#if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
5813 defined(ARGOS_NOTIFY_CB)
5814 argos_register_notifier_deinit();
5815#endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
5816#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
5817 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5818#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
5819#if defined(DHD_LB_RXP)
5820 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5821 netif_receive_skb(skb);
5822 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5823#else /* !defined(DHD_LB_RXP) */
5824#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5825 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5826 netif_rx_ni(skb);
5827 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5828#else
5829 ulong flags;
5830 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5831 netif_rx(skb);
5832 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5833 local_irq_save(flags);
5834 RAISE_RX_SOFTIRQ();
5835 local_irq_restore(flags);
5836#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5837#endif /* !defined(DHD_LB_RXP) */
5838 }
5839 }
5840 }
5841
5842 if (dhd->rxthread_enabled && skbhead)
5843 dhd_sched_rxf(dhdp, skbhead);
5844
5845 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
5846 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
5847}
5848
5849void
5850dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
5851{
5852 /* Linux version has nothing to do */
5853 return;
5854}
5855
5856void
5857dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
5858{
5859 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
5860 struct ether_header *eh;
5861 uint16 type;
5862
5863 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
5864
5865 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
5866 type = ntoh16(eh->ether_type);
5867
5868 if (type == ETHER_TYPE_802_1X) {
5869 atomic_dec(&dhd->pend_8021x_cnt);
5870 }
5871
5872#ifdef PROP_TXSTATUS
5873 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
5874 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
5875 uint datalen = PKTLEN(dhd->pub.osh, txp);
5876 if (ifp != NULL) {
5877 if (success) {
5878 dhd->pub.tx_packets++;
5879 ifp->stats.tx_packets++;
5880 ifp->stats.tx_bytes += datalen;
5881 } else {
5882 ifp->stats.tx_dropped++;
5883 }
5884 }
5885 }
5886#endif // endif
5887}
5888
5889static struct net_device_stats *
5890dhd_get_stats(struct net_device *net)
5891{
5892 dhd_info_t *dhd = DHD_DEV_INFO(net);
5893 dhd_if_t *ifp;
5894
5895 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5896
5897 if (!dhd) {
5898 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
5899 goto error;
5900 }
5901
5902 ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
5903 if (!ifp) {
5904 /* return empty stats */
5905 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
5906 goto error;
5907 }
5908
5909 if (dhd->pub.up) {
5910 /* Use the protocol to get dongle stats */
5911 dhd_prot_dstats(&dhd->pub);
5912 }
5913 return &ifp->stats;
5914
5915error:
5916 memset(&net->stats, 0, sizeof(net->stats));
5917 return &net->stats;
5918}
5919
5920static int
5921dhd_watchdog_thread(void *data)
5922{
5923 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5924 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5925 /* This thread doesn't need any user-level access,
5926 * so get rid of all our resources
5927 */
5928 if (dhd_watchdog_prio > 0) {
5929 struct sched_param param;
5930 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
5931 dhd_watchdog_prio:(MAX_RT_PRIO-1);
5932 setScheduler(current, SCHED_FIFO, &param);
5933 }
5934
5935 while (1) {
5936 if (down_interruptible (&tsk->sema) == 0) {
5937 unsigned long flags;
5938 unsigned long jiffies_at_start = jiffies;
5939 unsigned long time_lapse;
5940#ifdef BCMPCIE
5941 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5942#endif /* BCMPCIE */
5943
5944 SMP_RD_BARRIER_DEPENDS();
5945 if (tsk->terminated) {
5946#ifdef BCMPCIE
5947 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5948#endif /* BCMPCIE */
5949 break;
5950 }
5951
5952 if (dhd->pub.dongle_reset == FALSE) {
5953 DHD_TIMER(("%s:\n", __FUNCTION__));
5954 dhd_bus_watchdog(&dhd->pub);
5955
5956 DHD_GENERAL_LOCK(&dhd->pub, flags);
5957 /* Count the tick for reference */
5958 dhd->pub.tickcnt++;
5959#ifdef DHD_L2_FILTER
5960 dhd_l2_filter_watchdog(&dhd->pub);
5961#endif /* DHD_L2_FILTER */
5962 time_lapse = jiffies - jiffies_at_start;
5963
5964 /* Reschedule the watchdog */
5965 if (dhd->wd_timer_valid) {
5966 mod_timer(&dhd->timer,
5967 jiffies +
5968 msecs_to_jiffies(dhd_watchdog_ms) -
5969 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
5970 }
5971 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5972 }
5973#ifdef BCMPCIE
5974 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5975#endif /* BCMPCIE */
5976 } else {
5977 break;
5978 }
5979 }
5980
5981 complete_and_exit(&tsk->completed, 0);
5982}
5983
5984static void dhd_watchdog(ulong data)
5985{
5986 dhd_info_t *dhd = (dhd_info_t *)data;
5987 unsigned long flags;
5988
5989 if (dhd->pub.dongle_reset) {
5990 return;
5991 }
5992
5993 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
5994 up(&dhd->thr_wdt_ctl.sema);
5995 return;
5996 }
5997
5998#ifdef BCMPCIE
5999 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6000#endif /* BCMPCIE */
6001 /* Call the bus module watchdog */
6002 dhd_bus_watchdog(&dhd->pub);
6003
6004 DHD_GENERAL_LOCK(&dhd->pub, flags);
6005 /* Count the tick for reference */
6006 dhd->pub.tickcnt++;
6007
6008#ifdef DHD_L2_FILTER
6009 dhd_l2_filter_watchdog(&dhd->pub);
6010#endif /* DHD_L2_FILTER */
6011 /* Reschedule the watchdog */
6012 if (dhd->wd_timer_valid)
6013 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
6014 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6015#ifdef BCMPCIE
6016 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6017#endif /* BCMPCIE */
6018}
6019
6020#ifdef DHD_PCIE_RUNTIMEPM
6021static int
6022dhd_rpm_state_thread(void *data)
6023{
6024 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6025 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6026
6027 while (1) {
6028 if (down_interruptible (&tsk->sema) == 0) {
6029 unsigned long flags;
6030 unsigned long jiffies_at_start = jiffies;
6031 unsigned long time_lapse;
6032
6033 SMP_RD_BARRIER_DEPENDS();
6034 if (tsk->terminated) {
6035 break;
6036 }
6037
6038 if (dhd->pub.dongle_reset == FALSE) {
6039 DHD_TIMER(("%s:\n", __FUNCTION__));
6040 if (dhd->pub.up) {
6041 dhd_runtimepm_state(&dhd->pub);
6042 }
6043
6044 DHD_GENERAL_LOCK(&dhd->pub, flags);
6045 time_lapse = jiffies - jiffies_at_start;
6046
6047 /* Reschedule the watchdog */
6048 if (dhd->rpm_timer_valid) {
6049 mod_timer(&dhd->rpm_timer,
6050 jiffies +
6051 msecs_to_jiffies(dhd_runtimepm_ms) -
6052 min(msecs_to_jiffies(dhd_runtimepm_ms),
6053 time_lapse));
6054 }
6055 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6056 }
6057 } else {
6058 break;
6059 }
6060 }
6061
6062 complete_and_exit(&tsk->completed, 0);
6063}
6064
6065static void dhd_runtimepm(ulong data)
6066{
6067 dhd_info_t *dhd = (dhd_info_t *)data;
6068
6069 if (dhd->pub.dongle_reset) {
6070 return;
6071 }
6072
6073 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
6074 up(&dhd->thr_rpm_ctl.sema);
6075 return;
6076 }
6077}
6078
6079void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
6080{
6081 dhd_os_runtimepm_timer(dhdp, 0);
6082 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
6083 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6084}
6085
6086void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
6087{
6088 if (dhd_get_idletime(dhdp)) {
6089 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
6090 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6091 }
6092}
6093
6094#endif /* DHD_PCIE_RUNTIMEPM */
6095
6096#ifdef ENABLE_ADAPTIVE_SCHED
6097static void
6098dhd_sched_policy(int prio)
6099{
6100 struct sched_param param;
6101 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
6102 param.sched_priority = 0;
6103 setScheduler(current, SCHED_NORMAL, &param);
6104 } else {
6105 if (get_scheduler_policy(current) != SCHED_FIFO) {
6106 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
6107 setScheduler(current, SCHED_FIFO, &param);
6108 }
6109 }
6110}
6111#endif /* ENABLE_ADAPTIVE_SCHED */
6112#ifdef DEBUG_CPU_FREQ
6113static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
6114{
6115 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
6116 struct cpufreq_freqs *freq = data;
6117 if (dhd) {
6118 if (!dhd->new_freq)
6119 goto exit;
6120 if (val == CPUFREQ_POSTCHANGE) {
6121 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6122 freq->new, freq->cpu));
6123 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
6124 }
6125 }
6126exit:
6127 return 0;
6128}
6129#endif /* DEBUG_CPU_FREQ */
6130static int
6131dhd_dpc_thread(void *data)
6132{
6133#if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6134 !defined(CONFIG_SOC_EXYNOS7870)
6135 int ret = 0;
6136 unsigned long flags;
6137#endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6138 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6139 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6140
6141 /* This thread doesn't need any user-level access,
6142 * so get rid of all our resources
6143 */
6144 if (dhd_dpc_prio > 0)
6145 {
6146 struct sched_param param;
6147 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
6148 setScheduler(current, SCHED_FIFO, &param);
6149 }
6150
6151#if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6152 !defined(CONFIG_SOC_EXYNOS7870)
6153 if (!zalloc_cpumask_var(&dhd->pub.default_cpu_mask, GFP_KERNEL)) {
6154 DHD_ERROR(("dpc_thread, zalloc_cpumask_var error\n"));
6155 dhd->pub.affinity_isdpc = FALSE;
6156 } else {
6157 if (!zalloc_cpumask_var(&dhd->pub.dpc_affinity_cpu_mask, GFP_KERNEL)) {
6158 DHD_ERROR(("dpc_thread, dpc_affinity_cpu_mask error\n"));
6159 free_cpumask_var(dhd->pub.default_cpu_mask);
6160 dhd->pub.affinity_isdpc = FALSE;
6161 } else {
6162 cpumask_copy(dhd->pub.default_cpu_mask, &hmp_slow_cpu_mask);
6163 cpumask_or(dhd->pub.dpc_affinity_cpu_mask,
6164 dhd->pub.dpc_affinity_cpu_mask, cpumask_of(DPC_CPUCORE));
6165
6166 DHD_GENERAL_LOCK(&dhd->pub, flags);
6167 if ((ret = argos_task_affinity_setup_label(current, "WIFI",
6168 dhd->pub.dpc_affinity_cpu_mask,
6169 dhd->pub.default_cpu_mask)) < 0) {
6170 DHD_ERROR(("Failed to add CPU affinity(dpc) error=%d\n",
6171 ret));
6172 free_cpumask_var(dhd->pub.default_cpu_mask);
6173 free_cpumask_var(dhd->pub.dpc_affinity_cpu_mask);
6174 dhd->pub.affinity_isdpc = FALSE;
6175 } else {
6176 unsigned int irq = -1;
6177#ifdef BCMPCIE
6178 if (dhdpcie_get_pcieirq(dhd->pub.bus, &irq)) {
6179 DHD_ERROR(("%s : Can't get interrupt number\n",
6180 __FUNCTION__));
6181 }
6182#endif /* BCMPCIE */
6183#ifdef BCMSDIO
6184 wifi_adapter_info_t *adapter = dhd->adapter;
6185 irq = adapter->irq_num;
6186#endif /* BCMSDIO */
6187 DHD_ERROR(("Argos set Completed : dpcthread\n"));
6188 set_irq_cpucore(irq, dhd->pub.default_cpu_mask,
6189 dhd->pub.dpc_affinity_cpu_mask);
6190 dhd->pub.affinity_isdpc = TRUE;
6191 }
6192 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6193 }
6194 }
6195#else /* ARGOS_CPU_SCHEDULER */
6196#ifdef CUSTOM_DPC_CPUCORE
6197 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
6198#endif // endif
6199#ifdef CUSTOM_SET_CPUCORE
6200 dhd->pub.current_dpc = current;
6201#endif /* CUSTOM_SET_CPUCORE */
6202#endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6203 /* Run until signal received */
6204 while (1) {
6205 if (!binary_sema_down(tsk)) {
6206#ifdef ENABLE_ADAPTIVE_SCHED
6207 dhd_sched_policy(dhd_dpc_prio);
6208#endif /* ENABLE_ADAPTIVE_SCHED */
6209 SMP_RD_BARRIER_DEPENDS();
6210 if (tsk->terminated) {
6211 break;
6212 }
6213
6214 /* Call bus dpc unless it indicated down (then clean stop) */
6215 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6216#ifdef DEBUG_DPC_THREAD_WATCHDOG
6217 int resched_cnt = 0;
6218#endif /* DEBUG_DPC_THREAD_WATCHDOG */
6219 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
6220 while (dhd_bus_dpc(dhd->pub.bus)) {
6221 /* process all data */
6222#ifdef DEBUG_DPC_THREAD_WATCHDOG
6223 resched_cnt++;
6224 if (resched_cnt > MAX_RESCHED_CNT) {
6225 DHD_INFO(("%s Calling msleep to"
6226 "let other processes run. \n",
6227 __FUNCTION__));
6228 dhd->pub.dhd_bug_on = true;
6229 resched_cnt = 0;
6230 OSL_SLEEP(1);
6231 }
6232#endif /* DEBUG_DPC_THREAD_WATCHDOG */
6233 }
6234 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
6235 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6236 } else {
6237 if (dhd->pub.up)
6238 dhd_bus_stop(dhd->pub.bus, TRUE);
6239 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6240 }
6241 } else {
6242 break;
6243 }
6244 }
6245#ifdef ARGOS_CPU_SCHEDULER
6246 if (dhd->pub.affinity_isdpc == TRUE) {
6247 free_cpumask_var(dhd->pub.default_cpu_mask);
6248 free_cpumask_var(dhd->pub.dpc_affinity_cpu_mask);
6249 dhd->pub.affinity_isdpc = FALSE;
6250 }
6251#endif /* ARGOS_CPU_SCHEDULER */
6252 complete_and_exit(&tsk->completed, 0);
6253}
6254
6255static int
6256dhd_rxf_thread(void *data)
6257{
6258 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6259 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6260#if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6261 !defined(CONFIG_SOC_EXYNOS7870)
6262 int ret = 0;
6263 unsigned long flags;
6264#endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && CONFIG_SOC_EXYNOS7870 */
6265#if defined(WAIT_DEQUEUE)
6266#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6267 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
6268#endif // endif
6269 dhd_pub_t *pub = &dhd->pub;
6270
6271 /* This thread doesn't need any user-level access,
6272 * so get rid of all our resources
6273 */
6274 if (dhd_rxf_prio > 0)
6275 {
6276 struct sched_param param;
6277 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
6278 setScheduler(current, SCHED_FIFO, &param);
6279 }
6280
6281#if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6282 !defined(CONFIG_SOC_EXYNOS7870)
6283 if (!zalloc_cpumask_var(&dhd->pub.rxf_affinity_cpu_mask, GFP_KERNEL)) {
6284 DHD_ERROR(("rxthread zalloc_cpumask_var error\n"));
6285 dhd->pub.affinity_isrxf = FALSE;
6286 } else {
6287 cpumask_or(dhd->pub.rxf_affinity_cpu_mask, dhd->pub.rxf_affinity_cpu_mask,
6288 cpumask_of(RXF_CPUCORE));
6289
6290 DHD_GENERAL_LOCK(&dhd->pub, flags);
6291 if ((ret = argos_task_affinity_setup_label(current, "WIFI",
6292 dhd->pub.rxf_affinity_cpu_mask, dhd->pub.default_cpu_mask)) < 0) {
6293 DHD_ERROR(("Failed to add CPU affinity(rxf) error=%d\n", ret));
6294 dhd->pub.affinity_isrxf = FALSE;
6295 free_cpumask_var(dhd->pub.rxf_affinity_cpu_mask);
6296 } else {
6297 DHD_ERROR(("RXthread affinity completed\n"));
6298 dhd->pub.affinity_isrxf = TRUE;
6299 }
6300 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6301 }
6302#else /* ARGOS_CPU_SCHEDULER */
6303#ifdef CUSTOM_SET_CPUCORE
6304 dhd->pub.current_rxf = current;
6305#endif /* CUSTOM_SET_CPUCORE */
6306#endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6307 /* Run until signal received */
6308 while (1) {
6309 if (down_interruptible(&tsk->sema) == 0) {
6310 void *skb;
6311#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6312 ulong flags;
6313#endif // endif
6314#ifdef ENABLE_ADAPTIVE_SCHED
6315 dhd_sched_policy(dhd_rxf_prio);
6316#endif /* ENABLE_ADAPTIVE_SCHED */
6317
6318 SMP_RD_BARRIER_DEPENDS();
6319
6320 if (tsk->terminated) {
6321 break;
6322 }
6323 skb = dhd_rxf_dequeue(pub);
6324
6325 if (skb == NULL) {
6326 continue;
6327 }
6328 while (skb) {
6329 void *skbnext = PKTNEXT(pub->osh, skb);
6330 PKTSETNEXT(pub->osh, skb, NULL);
6331 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6332 __FUNCTION__, __LINE__);
6333#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6334 netif_rx_ni(skb);
6335#else
6336 netif_rx(skb);
6337 local_irq_save(flags);
6338 RAISE_RX_SOFTIRQ();
6339 local_irq_restore(flags);
6340
6341#endif // endif
6342 skb = skbnext;
6343 }
6344#if defined(WAIT_DEQUEUE)
6345 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
6346 OSL_SLEEP(1);
6347 watchdogTime = OSL_SYSUPTIME();
6348 }
6349#endif // endif
6350
6351 DHD_OS_WAKE_UNLOCK(pub);
6352 } else {
6353 break;
6354 }
6355 }
6356#ifdef ARGOS_CPU_SCHEDULER
6357 if (dhd->pub.affinity_isrxf == TRUE) {
6358 free_cpumask_var(dhd->pub.rxf_affinity_cpu_mask);
6359 dhd->pub.affinity_isrxf = FALSE;
6360 }
6361#endif /* ARGOS_CPU_SCHEDULER */
6362 complete_and_exit(&tsk->completed, 0);
6363}
6364
6365#ifdef BCMPCIE
6366void dhd_dpc_enable(dhd_pub_t *dhdp)
6367{
6368#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6369 dhd_info_t *dhd;
6370
6371 if (!dhdp || !dhdp->info)
6372 return;
6373 dhd = dhdp->info;
6374#endif /* DHD_LB_RXP || DHD_LB_TXP */
6375
6376#ifdef DHD_LB_RXP
6377 __skb_queue_head_init(&dhd->rx_pend_queue);
6378#endif /* DHD_LB_RXP */
6379
6380#ifdef DHD_LB_TXP
6381 skb_queue_head_init(&dhd->tx_pend_queue);
6382#endif /* DHD_LB_TXP */
6383}
6384#endif /* BCMPCIE */
6385
6386#ifdef BCMPCIE
6387void
6388dhd_dpc_kill(dhd_pub_t *dhdp)
6389{
6390 dhd_info_t *dhd;
6391
6392 if (!dhdp) {
6393 return;
6394 }
6395
6396 dhd = dhdp->info;
6397
6398 if (!dhd) {
6399 return;
6400 }
6401
6402 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6403 tasklet_kill(&dhd->tasklet);
6404 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
6405 }
6406
6407#ifdef DHD_LB
6408#ifdef DHD_LB_RXP
6409 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
6410 __skb_queue_purge(&dhd->rx_pend_queue);
6411#endif /* DHD_LB_RXP */
6412#ifdef DHD_LB_TXP
6413 cancel_work_sync(&dhd->tx_dispatcher_work);
6414 skb_queue_purge(&dhd->tx_pend_queue);
6415#endif /* DHD_LB_TXP */
6416
6417 /* Kill the Load Balancing Tasklets */
6418#if defined(DHD_LB_TXC)
6419 tasklet_kill(&dhd->tx_compl_tasklet);
6420#endif /* DHD_LB_TXC */
6421#if defined(DHD_LB_RXC)
6422 tasklet_kill(&dhd->rx_compl_tasklet);
6423#endif /* DHD_LB_RXC */
6424#if defined(DHD_LB_TXP)
6425 tasklet_kill(&dhd->tx_tasklet);
6426#endif /* DHD_LB_TXP */
6427#endif /* DHD_LB */
6428}
6429
6430void
6431dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
6432{
6433 dhd_info_t *dhd;
6434
6435 if (!dhdp) {
6436 return;
6437 }
6438
6439 dhd = dhdp->info;
6440
6441 if (!dhd) {
6442 return;
6443 }
6444
6445 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6446 tasklet_kill(&dhd->tasklet);
6447 }
6448}
6449#endif /* BCMPCIE */
6450
6451static void
6452dhd_dpc(ulong data)
6453{
6454 dhd_info_t *dhd;
6455
6456 dhd = (dhd_info_t *)data;
6457
6458 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6459 * down below , wake lock is set,
6460 * the tasklet is initialized in dhd_attach()
6461 */
6462 /* Call bus dpc unless it indicated down (then clean stop) */
6463 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6464#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6465 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
6466#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6467 if (dhd_bus_dpc(dhd->pub.bus)) {
6468 tasklet_schedule(&dhd->tasklet);
6469 }
6470 } else {
6471 dhd_bus_stop(dhd->pub.bus, TRUE);
6472 }
6473}
6474
6475void
6476dhd_sched_dpc(dhd_pub_t *dhdp)
6477{
6478 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6479
6480 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
6481 DHD_OS_WAKE_LOCK(dhdp);
6482 /* If the semaphore does not get up,
6483 * wake unlock should be done here
6484 */
6485 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
6486 DHD_OS_WAKE_UNLOCK(dhdp);
6487 }
6488 return;
6489 } else {
6490 dhd_bus_set_dpc_sched_time(dhdp);
6491 tasklet_schedule(&dhd->tasklet);
6492 }
6493}
6494
6495static void
6496dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
6497{
6498 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6499
6500 DHD_OS_WAKE_LOCK(dhdp);
6501
6502 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6503 do {
6504 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
6505 break;
6506 } while (1);
6507 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6508 up(&dhd->thr_rxf_ctl.sema);
6509 }
6510 return;
6511}
6512
6513#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6514#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6515
6516#ifdef TOE
6517/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6518static int
6519dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
6520{
6521 char buf[32];
6522 int ret;
6523
6524 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6525
6526 if (ret < 0) {
6527 if (ret == -EIO) {
6528 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
6529 ifidx)));
6530 return -EOPNOTSUPP;
6531 }
6532
6533 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6534 return ret;
6535 }
6536
6537 memcpy(toe_ol, buf, sizeof(uint32));
6538 return 0;
6539}
6540
6541/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6542static int
6543dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
6544{
6545 int toe, ret;
6546
6547 /* Set toe_ol as requested */
6548 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
6549 if (ret < 0) {
6550 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6551 dhd_ifname(&dhd->pub, ifidx), ret));
6552 return ret;
6553 }
6554
6555 /* Enable toe globally only if any components are enabled. */
6556 toe = (toe_ol != 0);
6557 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
6558 if (ret < 0) {
6559 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6560 return ret;
6561 }
6562
6563 return 0;
6564}
6565#endif /* TOE */
6566
6567#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6568void dhd_set_scb_probe(dhd_pub_t *dhd)
6569{
6570 wl_scb_probe_t scb_probe;
6571 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
6572 int ret;
6573
6574 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6575 return;
6576 }
6577
6578 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
6579 if (ret < 0) {
6580 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
6581 }
6582
6583 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
6584
6585 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
6586
6587 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
6588 TRUE);
6589 if (ret < 0) {
6590 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
6591 return;
6592 }
6593}
6594#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6595
6596#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6597static void
6598dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
6599{
6600 dhd_info_t *dhd = DHD_DEV_INFO(net);
6601
6602 snprintf(info->driver, sizeof(info->driver), "wl");
6603 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
6604}
6605
6606struct ethtool_ops dhd_ethtool_ops = {
6607 .get_drvinfo = dhd_ethtool_get_drvinfo
6608};
6609#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6610
6611#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6612static int
6613dhd_ethtool(dhd_info_t *dhd, void *uaddr)
6614{
6615 struct ethtool_drvinfo info;
6616 char drvname[sizeof(info.driver)];
6617 uint32 cmd;
6618#ifdef TOE
6619 struct ethtool_value edata;
6620 uint32 toe_cmpnt, csum_dir;
6621 int ret;
6622#endif // endif
6623
6624 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6625
6626 /* all ethtool calls start with a cmd word */
6627 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
6628 return -EFAULT;
6629
6630 switch (cmd) {
6631 case ETHTOOL_GDRVINFO:
6632 /* Copy out any request driver name */
6633 if (copy_from_user(&info, uaddr, sizeof(info)))
6634 return -EFAULT;
6635 strncpy(drvname, info.driver, sizeof(info.driver));
6636 drvname[sizeof(info.driver)-1] = '\0';
6637
6638 /* clear struct for return */
6639 memset(&info, 0, sizeof(info));
6640 info.cmd = cmd;
6641
6642 /* if dhd requested, identify ourselves */
6643 if (strcmp(drvname, "?dhd") == 0) {
6644 snprintf(info.driver, sizeof(info.driver), "dhd");
6645 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
6646 info.version[sizeof(info.version) - 1] = '\0';
6647 }
6648
6649 /* otherwise, require dongle to be up */
6650 else if (!dhd->pub.up) {
6651 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
6652 return -ENODEV;
6653 }
6654
6655 /* finally, report dongle driver type */
6656 else if (dhd->pub.iswl)
6657 snprintf(info.driver, sizeof(info.driver), "wl");
6658 else
6659 snprintf(info.driver, sizeof(info.driver), "xx");
6660
6661 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
6662 if (copy_to_user(uaddr, &info, sizeof(info)))
6663 return -EFAULT;
6664 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
6665 (int)sizeof(drvname), drvname, info.driver));
6666 break;
6667
6668#ifdef TOE
6669 /* Get toe offload components from dongle */
6670 case ETHTOOL_GRXCSUM:
6671 case ETHTOOL_GTXCSUM:
6672 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
6673 return ret;
6674
6675 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
6676
6677 edata.cmd = cmd;
6678 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
6679
6680 if (copy_to_user(uaddr, &edata, sizeof(edata)))
6681 return -EFAULT;
6682 break;
6683
6684 /* Set toe offload components in dongle */
6685 case ETHTOOL_SRXCSUM:
6686 case ETHTOOL_STXCSUM:
6687 if (copy_from_user(&edata, uaddr, sizeof(edata)))
6688 return -EFAULT;
6689
6690 /* Read the current settings, update and write back */
6691 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
6692 return ret;
6693
6694 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
6695
6696 if (edata.data != 0)
6697 toe_cmpnt |= csum_dir;
6698 else
6699 toe_cmpnt &= ~csum_dir;
6700
6701 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
6702 return ret;
6703
6704 /* If setting TX checksum mode, tell Linux the new mode */
6705 if (cmd == ETHTOOL_STXCSUM) {
6706 if (edata.data)
6707 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
6708 else
6709 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
6710 }
6711
6712 break;
6713#endif /* TOE */
6714
6715 default:
6716 return -EOPNOTSUPP;
6717 }
6718
6719 return 0;
6720}
6721#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
6722
6723static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
6724{
6725 if (!dhdp) {
6726 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
6727 return FALSE;
6728 }
6729
6730 if (!dhdp->up)
6731 return FALSE;
6732
6733#if !defined(BCMPCIE)
6734 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
6735 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
6736 return FALSE;
6737 }
6738#endif // endif
6739
6740 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
6741 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
6742#ifdef BCMPCIE
6743 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
6744 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
6745 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
6746#else
6747 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
6748 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
6749#endif /* BCMPCIE */
6750 if (dhdp->hang_reason == 0) {
6751 if (dhdp->dongle_trap_occured) {
6752 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
6753#ifdef BCMPCIE
6754 } else if (dhdp->d3ackcnt_timeout) {
6755 dhdp->hang_reason = dhdp->is_sched_error ?
6756 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
6757 HANG_REASON_D3_ACK_TIMEOUT;
6758#endif /* BCMPCIE */
6759 } else {
6760 dhdp->hang_reason = dhdp->is_sched_error ?
6761 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
6762 HANG_REASON_IOCTL_RESP_TIMEOUT;
6763 }
6764 }
6765 net_os_send_hang_message(net);
6766 return TRUE;
6767 }
6768 return FALSE;
6769}
6770
6771#ifdef WL_MONITOR
6772bool
6773dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
6774{
6775 return (dhd->info->monitor_type != 0);
6776}
6777
6778void
6779dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
6780{
6781 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6782 {
6783 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
6784 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
6785 switch (amsdu_flag) {
6786 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
6787 default:
6788 if (!dhd->monitor_skb) {
6789 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
6790 == NULL)
6791 return;
6792 }
6793 if (dhd->monitor_type && dhd->monitor_dev)
6794 dhd->monitor_skb->dev = dhd->monitor_dev;
6795 else {
6796 PKTFREE(dhdp->osh, pkt, FALSE);
6797 dhd->monitor_skb = NULL;
6798 return;
6799 }
6800 dhd->monitor_skb->protocol =
6801 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6802 dhd->monitor_len = 0;
6803 break;
6804
6805 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
6806 if (!dhd->monitor_skb) {
6807 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
6808 == NULL)
6809 return;
6810 dhd->monitor_len = 0;
6811 }
6812 if (dhd->monitor_type && dhd->monitor_dev)
6813 dhd->monitor_skb->dev = dhd->monitor_dev;
6814 else {
6815 PKTFREE(dhdp->osh, pkt, FALSE);
6816 dev_kfree_skb(dhd->monitor_skb);
6817 return;
6818 }
6819 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
6820 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6821 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
6822 PKTFREE(dhdp->osh, pkt, FALSE);
6823 return;
6824
6825 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
6826 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
6827 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6828 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
6829 PKTFREE(dhdp->osh, pkt, FALSE);
6830 return;
6831
6832 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
6833 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
6834 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6835 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
6836 PKTFREE(dhdp->osh, pkt, FALSE);
6837 skb_put(dhd->monitor_skb, dhd->monitor_len);
6838 dhd->monitor_skb->protocol =
6839 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6840 dhd->monitor_len = 0;
6841 break;
6842 }
6843 }
6844
6845 if (in_interrupt()) {
6846 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6847 __FUNCTION__, __LINE__);
6848 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6849 netif_rx(dhd->monitor_skb);
6850 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6851 } else {
6852 /* If the receive is not processed inside an ISR,
6853 * the softirqd must be woken explicitly to service
6854 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6855 * by netif_rx_ni(), but in earlier kernels, we need
6856 * to do it manually.
6857 */
6858 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
6859 __FUNCTION__, __LINE__);
6860
6861#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6862 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6863 netif_rx_ni(dhd->monitor_skb);
6864 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6865#else
6866 ulong flags;
6867 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6868 netif_rx(dhd->monitor_skb);
6869 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6870 local_irq_save(flags);
6871 RAISE_RX_SOFTIRQ();
6872 local_irq_restore(flags);
6873#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6874 }
6875
6876 dhd->monitor_skb = NULL;
6877}
6878
6879typedef struct dhd_mon_dev_priv {
6880 struct net_device_stats stats;
6881} dhd_mon_dev_priv_t;
6882
6883#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
6884#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
6885#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
6886
6887static int
6888dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
6889{
6890 PKTFREE(NULL, skb, FALSE);
6891 return 0;
6892}
6893
6894#if defined(BT_OVER_SDIO)
6895
6896void
6897dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
6898{
6899 dhdp->info->bus_user_count++;
6900}
6901
6902void
6903dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
6904{
6905 dhdp->info->bus_user_count--;
6906}
6907
6908/* Return values:
6909 * Success: Returns 0
6910 * Failure: Returns -1 or errono code
6911 */
6912int
6913dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
6914{
6915 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6916 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6917 int ret = 0;
6918
6919 mutex_lock(&dhd->bus_user_lock);
6920 ++dhd->bus_user_count;
6921 if (dhd->bus_user_count < 0) {
6922 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6923 ret = -1;
6924 goto exit;
6925 }
6926
6927 if (dhd->bus_user_count == 1) {
6928
6929 dhd->pub.hang_was_sent = 0;
6930
6931 /* First user, turn on WL_REG, start the bus */
6932 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
6933
6934 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
6935 /* Enable F1 */
6936 ret = dhd_bus_resume(dhdp, 0);
6937 if (ret) {
6938 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
6939 __FUNCTION__, ret));
6940 goto exit;
6941 }
6942 }
6943
6944 dhd_update_fw_nv_path(dhd);
6945 /* update firmware and nvram path to sdio bus */
6946 dhd_bus_update_fw_nv_path(dhd->pub.bus,
6947 dhd->fw_path, dhd->nv_path);
6948 /* download the firmware, Enable F2 */
6949 /* TODO: Should be done only in case of FW switch */
6950 ret = dhd_bus_devreset(dhdp, FALSE);
6951 dhd_bus_resume(dhdp, 1);
6952 if (!ret) {
6953 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
6954 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
6955 ret = -EFAULT;
6956 }
6957 } else {
6958 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
6959 }
6960 } else {
6961 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
6962 __FUNCTION__, dhd->bus_user_count));
6963 }
6964exit:
6965 mutex_unlock(&dhd->bus_user_lock);
6966 return ret;
6967}
6968EXPORT_SYMBOL(dhd_bus_get);
6969
6970/* Return values:
6971 * Success: Returns 0
6972 * Failure: Returns -1 or errono code
6973 */
6974int
6975dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
6976{
6977 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6978 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6979 int ret = 0;
6980 BCM_REFERENCE(owner);
6981
6982 mutex_lock(&dhd->bus_user_lock);
6983 --dhd->bus_user_count;
6984 if (dhd->bus_user_count < 0) {
6985 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6986 dhd->bus_user_count = 0;
6987 ret = -1;
6988 goto exit;
6989 }
6990
6991 if (dhd->bus_user_count == 0) {
6992 /* Last user, stop the bus and turn Off WL_REG */
6993 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
6994 __FUNCTION__));
6995#ifdef PROP_TXSTATUS
6996 if (dhd->pub.wlfc_enabled) {
6997 dhd_wlfc_deinit(&dhd->pub);
6998 }
6999#endif /* PROP_TXSTATUS */
7000#ifdef PNO_SUPPORT
7001 if (dhd->pub.pno_state) {
7002 dhd_pno_deinit(&dhd->pub);
7003 }
7004#endif /* PNO_SUPPORT */
7005#ifdef RTT_SUPPORT
7006 if (dhd->pub.rtt_state) {
7007 dhd_rtt_deinit(&dhd->pub);
7008 }
7009#endif /* RTT_SUPPORT */
7010 ret = dhd_bus_devreset(dhdp, TRUE);
7011 if (!ret) {
7012 dhd_bus_suspend(dhdp);
7013 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
7014 }
7015 } else {
7016 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7017 __FUNCTION__, dhd->bus_user_count));
7018 }
7019exit:
7020 mutex_unlock(&dhd->bus_user_lock);
7021 return ret;
7022}
7023EXPORT_SYMBOL(dhd_bus_put);
7024
7025int
7026dhd_net_bus_get(struct net_device *dev)
7027{
7028 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7029 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
7030}
7031
7032int
7033dhd_net_bus_put(struct net_device *dev)
7034{
7035 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7036 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
7037}
7038
7039/*
7040 * Function to enable the Bus Clock
7041 * Returns BCME_OK on success and BCME_xxx on failure
7042 *
7043 * This function is not callable from non-sleepable context
7044 */
7045int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
7046{
7047 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7048
7049 int ret;
7050
7051 dhd_os_sdlock(dhdp);
7052 /*
7053 * The second argument is TRUE, that means, we expect
7054 * the function to "wait" until the clocks are really
7055 * available
7056 */
7057 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
7058 dhd_os_sdunlock(dhdp);
7059
7060 return ret;
7061}
7062EXPORT_SYMBOL(dhd_bus_clk_enable);
7063
7064/*
7065 * Function to disable the Bus Clock
7066 * Returns BCME_OK on success and BCME_xxx on failure
7067 *
7068 * This function is not callable from non-sleepable context
7069 */
7070int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
7071{
7072 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7073
7074 int ret;
7075
7076 dhd_os_sdlock(dhdp);
7077 /*
7078 * The second argument is TRUE, that means, we expect
7079 * the function to "wait" until the clocks are really
7080 * disabled
7081 */
7082 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
7083 dhd_os_sdunlock(dhdp);
7084
7085 return ret;
7086}
7087EXPORT_SYMBOL(dhd_bus_clk_disable);
7088
7089/*
7090 * Function to reset bt_use_count counter to zero.
7091 *
7092 * This function is not callable from non-sleepable context
7093 */
7094void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
7095{
7096 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7097
7098 /* take the lock and reset bt use count */
7099 dhd_os_sdlock(dhdp);
7100 dhdsdio_reset_bt_use_count(dhdp->bus);
7101 dhd_os_sdunlock(dhdp);
7102}
7103EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
7104
7105void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
7106{
7107 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7108 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7109
7110#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7111 dhdp->hang_was_sent = 0;
7112
7113 dhd_os_send_hang_message(&dhd->pub);
7114#else
7115 DHD_ERROR(("%s: unsupported\n", __FUNCTION__));
7116#endif // endif
7117}
7118EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
7119
7120#endif /* BT_OVER_SDIO */
7121
7122static int
7123dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7124{
7125 return 0;
7126}
7127
7128static struct net_device_stats*
7129dhd_monitor_get_stats(struct net_device *dev)
7130{
7131 return &DHD_MON_DEV_STATS(dev);
7132}
7133
7134static const struct net_device_ops netdev_monitor_ops =
7135{
7136 .ndo_start_xmit = dhd_monitor_start,
7137 .ndo_get_stats = dhd_monitor_get_stats,
7138 .ndo_do_ioctl = dhd_monitor_ioctl
7139};
7140
7141static void
7142dhd_add_monitor_if(dhd_info_t *dhd)
7143{
7144 struct net_device *dev;
7145 char *devname;
7146 uint32 scan_suppress = FALSE;
7147 int ret = BCME_OK;
7148
7149 if (!dhd) {
7150 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7151 return;
7152 }
7153
7154 if (dhd->monitor_dev) {
7155 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
7156 return;
7157 }
7158
7159 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
7160 if (!dev) {
7161 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
7162 return;
7163 }
7164
7165 devname = "radiotap";
7166
7167 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
7168
7169#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7170#define ARPHRD_IEEE80211_PRISM 802
7171#endif // endif
7172
7173#ifndef ARPHRD_IEEE80211_RADIOTAP
7174#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7175#endif /* ARPHRD_IEEE80211_RADIOTAP */
7176
7177 dev->type = ARPHRD_IEEE80211_RADIOTAP;
7178
7179#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7180 dev->hard_start_xmit = dhd_monitor_start;
7181 dev->do_ioctl = dhd_monitor_ioctl;
7182 dev->get_stats = dhd_monitor_get_stats;
7183#else
7184 dev->netdev_ops = &netdev_monitor_ops;
7185#endif // endif
7186
7187 if (register_netdevice(dev)) {
7188 DHD_ERROR(("%s, register_netdev failed for %s\n",
7189 __FUNCTION__, dev->name));
7190 free_netdev(dev);
7191 }
7192
7193 if (FW_SUPPORTED((&dhd->pub), monitor)) {
7194#ifdef DHD_PCIE_RUNTIMEPM
7195 /* Disable RuntimePM in monitor mode */
7196 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7197 DHD_ERROR(("%s : Disable RuntimePM in Monitor Mode\n", __FUNCTION__));
7198#endif /* DHD_PCIE_RUNTIME_PM */
7199 scan_suppress = TRUE;
7200 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
7201 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
7202 sizeof(scan_suppress), NULL, 0, TRUE);
7203 if (ret < 0) {
7204 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
7205 }
7206 }
7207
7208 dhd->monitor_dev = dev;
7209}
7210
7211static void
7212dhd_del_monitor_if(dhd_info_t *dhd)
7213{
7214
7215 if (!dhd) {
7216 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7217 return;
7218 }
7219
7220 if (!dhd->monitor_dev) {
7221 DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
7222 return;
7223 }
7224
7225 if (dhd->monitor_dev) {
7226 if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
7227#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7228 MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
7229 MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
7230#else
7231 free_netdev(dhd->monitor_dev);
7232#endif /* 2.6.24 */
7233 } else {
7234 unregister_netdevice(dhd->monitor_dev);
7235 }
7236 dhd->monitor_dev = NULL;
7237 }
7238}
7239
7240static void
7241dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
7242{
7243 dhd_info_t *dhd = pub->info;
7244
7245 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
7246
7247 dhd_net_if_lock_local(dhd);
7248 if (!val) {
7249 /* Delete monitor */
7250 dhd_del_monitor_if(dhd);
7251 } else {
7252 /* Add monitor */
7253 dhd_add_monitor_if(dhd);
7254 }
7255 dhd->monitor_type = val;
7256 dhd_net_if_unlock_local(dhd);
7257}
7258#endif /* WL_MONITOR */
7259
7260int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
7261{
7262 int bcmerror = BCME_OK;
7263 int buflen = 0;
7264 struct net_device *net;
7265
7266 net = dhd_idx2net(pub, ifidx);
7267 if (!net) {
7268 bcmerror = BCME_BADARG;
7269 /*
7270 * The netdev pointer is bad means the DHD can't communicate
7271 * to higher layers, so just return from here
7272 */
7273 return bcmerror;
7274 }
7275
7276 /* check for local dhd ioctl and handle it */
7277 if (ioc->driver == DHD_IOCTL_MAGIC) {
7278 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7279 if (data_buf)
7280 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
7281 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
7282 if (bcmerror)
7283 pub->bcmerror = bcmerror;
7284 goto done;
7285 }
7286
7287 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7288 if (data_buf)
7289 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
7290
7291 /* send to dongle (must be up, and wl). */
7292 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
7293 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
7294 int ret;
7295 if (atomic_read(&exit_in_progress)) {
7296 DHD_ERROR(("%s module exit in progress\n", __func__));
7297 bcmerror = BCME_DONGLE_DOWN;
7298 goto done;
7299 }
7300 ret = dhd_bus_start(pub);
7301 if (ret != 0) {
7302 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7303 bcmerror = BCME_DONGLE_DOWN;
7304 goto done;
7305 }
7306 } else {
7307 bcmerror = BCME_DONGLE_DOWN;
7308 goto done;
7309 }
7310 }
7311
7312 if (!pub->iswl) {
7313 bcmerror = BCME_DONGLE_DOWN;
7314 goto done;
7315 }
7316
7317 /*
7318 * Flush the TX queue if required for proper message serialization:
7319 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7320 * prevent M4 encryption and
7321 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7322 * prevent disassoc frame being sent before WPS-DONE frame.
7323 */
7324 if (ioc->cmd == WLC_SET_KEY ||
7325 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7326 strncmp("wsec_key", data_buf, 9) == 0) ||
7327 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7328 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
7329 ioc->cmd == WLC_DISASSOC)
7330 dhd_wait_pend8021x(net);
7331
7332 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
7333 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
7334 bcmerror = BCME_UNSUPPORTED;
7335 goto done;
7336 }
7337 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
7338
7339#ifdef WL_MONITOR
7340 /* Intercept monitor ioctl here, add/del monitor if */
7341 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
7342 int val = 0;
7343 if (data_buf != NULL && buflen != 0) {
7344 if (buflen >= 4) {
7345 val = *(int*)data_buf;
7346 } else if (buflen >= 2) {
7347 val = *(short*)data_buf;
7348 } else {
7349 val = *(char*)data_buf;
7350 }
7351 }
7352 dhd_set_monitor(pub, ifidx, val);
7353 }
7354#endif /* WL_MONITOR */
7355
7356done:
7357 dhd_check_hang(net, pub, bcmerror);
7358
7359 return bcmerror;
7360}
7361
7362/**
7363 * Called by the OS (optionally via a wrapper function).
7364 * @param net Linux per dongle instance
7365 * @param ifr Linux request structure
7366 * @param cmd e.g. SIOCETHTOOL
7367 */
7368static int
7369dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
7370{
7371 dhd_info_t *dhd = DHD_DEV_INFO(net);
7372 dhd_ioctl_t ioc;
7373 int bcmerror = 0;
7374 int ifidx;
7375 int ret;
7376 void *local_buf = NULL; /**< buffer in kernel space */
7377 void __user *ioc_buf_user = NULL; /**< buffer in user space */
7378 u16 buflen = 0;
7379
7380 if (atomic_read(&exit_in_progress)) {
7381 DHD_ERROR(("%s module exit in progress\n", __func__));
7382 bcmerror = BCME_DONGLE_DOWN;
7383 return OSL_ERROR(bcmerror);
7384 }
7385
7386 DHD_OS_WAKE_LOCK(&dhd->pub);
7387 DHD_PERIM_LOCK(&dhd->pub);
7388
7389 /* Interface up check for built-in type */
7390 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
7391 DHD_TRACE(("%s: Interface is down \n", __FUNCTION__));
7392 DHD_PERIM_UNLOCK(&dhd->pub);
7393 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7394 return OSL_ERROR(BCME_NOTUP);
7395 }
7396
7397 ifidx = dhd_net2idx(dhd, net);
7398 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
7399
7400#if defined(WL_STATIC_IF)
7401 /* skip for static ndev when it is down */
7402 if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
7403 DHD_PERIM_UNLOCK(&dhd->pub);
7404 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7405 return -1;
7406 }
7407#endif /* WL_STATIC_iF */
7408
7409 if (ifidx == DHD_BAD_IF) {
7410 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
7411 DHD_PERIM_UNLOCK(&dhd->pub);
7412 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7413 return -1;
7414 }
7415
7416#if defined(WL_WIRELESS_EXT)
7417 /* linux wireless extensions */
7418 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
7419 /* may recurse, do NOT lock */
7420 ret = wl_iw_ioctl(net, ifr, cmd);
7421 DHD_PERIM_UNLOCK(&dhd->pub);
7422 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7423 return ret;
7424 }
7425#endif /* defined(WL_WIRELESS_EXT) */
7426
7427#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7428 if (cmd == SIOCETHTOOL) {
7429 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
7430 DHD_PERIM_UNLOCK(&dhd->pub);
7431 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7432 return ret;
7433 }
7434#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7435
7436 if (cmd == SIOCDEVPRIVATE+1) {
7437 ret = wl_android_priv_cmd(net, ifr);
7438 dhd_check_hang(net, &dhd->pub, ret);
7439 DHD_PERIM_UNLOCK(&dhd->pub);
7440 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7441 return ret;
7442 }
7443
7444 if (cmd != SIOCDEVPRIVATE) {
7445 DHD_PERIM_UNLOCK(&dhd->pub);
7446 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7447 return -EOPNOTSUPP;
7448 }
7449
7450 memset(&ioc, 0, sizeof(ioc));
7451
7452#ifdef CONFIG_COMPAT
7453#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7454 if (in_compat_syscall()) {
7455#else
7456 if (is_compat_task()) {
7457#endif /* LINUX_VER >= 4.6 */
7458 compat_wl_ioctl_t compat_ioc;
7459 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
7460 bcmerror = BCME_BADADDR;
7461 goto done;
7462 }
7463 ioc.cmd = compat_ioc.cmd;
7464 ioc.buf = compat_ptr(compat_ioc.buf);
7465 ioc.len = compat_ioc.len;
7466 ioc.set = compat_ioc.set;
7467 ioc.used = compat_ioc.used;
7468 ioc.needed = compat_ioc.needed;
7469 /* To differentiate between wl and dhd read 4 more byes */
7470 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
7471 sizeof(uint)) != 0)) {
7472 bcmerror = BCME_BADADDR;
7473 goto done;
7474 }
7475 } else
7476#endif /* CONFIG_COMPAT */
7477 {
7478 /* Copy the ioc control structure part of ioctl request */
7479 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7480 bcmerror = BCME_BADADDR;
7481 goto done;
7482 }
7483
7484 /* To differentiate between wl and dhd read 4 more byes */
7485 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7486 sizeof(uint)) != 0)) {
7487 bcmerror = BCME_BADADDR;
7488 goto done;
7489 }
7490 }
7491
7492 if (!capable(CAP_NET_ADMIN)) {
7493 bcmerror = BCME_EPERM;
7494 goto done;
7495 }
7496
7497 /* Take backup of ioc.buf and restore later */
7498 ioc_buf_user = ioc.buf;
7499
7500 if (ioc.len > 0) {
7501 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
7502 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
7503 bcmerror = BCME_NOMEM;
7504 goto done;
7505 }
7506
7507 DHD_PERIM_UNLOCK(&dhd->pub);
7508 if (copy_from_user(local_buf, ioc.buf, buflen)) {
7509 DHD_PERIM_LOCK(&dhd->pub);
7510 bcmerror = BCME_BADADDR;
7511 goto done;
7512 }
7513 DHD_PERIM_LOCK(&dhd->pub);
7514
7515 *((char *)local_buf + buflen) = '\0';
7516
7517 /* For some platforms accessing userspace memory
7518 * of ioc.buf is causing kernel panic, so to avoid that
7519 * make ioc.buf pointing to kernel space memory local_buf
7520 */
7521 ioc.buf = local_buf;
7522 }
7523
7524 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7525 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
7526 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
7527 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
7528 bcmerror = BCME_DONGLE_DOWN;
7529 goto done;
7530 }
7531
7532 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
7533
7534 /* Restore back userspace pointer to ioc.buf */
7535 ioc.buf = ioc_buf_user;
7536
7537 if (!bcmerror && buflen && local_buf && ioc.buf) {
7538 DHD_PERIM_UNLOCK(&dhd->pub);
7539 if (copy_to_user(ioc.buf, local_buf, buflen))
7540 bcmerror = -EFAULT;
7541 DHD_PERIM_LOCK(&dhd->pub);
7542 }
7543
7544done:
7545 if (local_buf)
7546 MFREE(dhd->pub.osh, local_buf, buflen+1);
7547
7548 DHD_PERIM_UNLOCK(&dhd->pub);
7549 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7550
7551 return OSL_ERROR(bcmerror);
7552}
7553
7554#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
7555/* Flags to indicate if we distingish power off policy when
7556 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
7557 */
7558int trigger_deep_sleep = 0;
7559#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
7560
7561#ifdef FIX_CPU_MIN_CLOCK
7562static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
7563{
7564 if (dhd) {
7565#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7566 mutex_init(&dhd->cpufreq_fix);
7567#endif // endif
7568 dhd->cpufreq_fix_status = FALSE;
7569 }
7570 return 0;
7571}
7572
7573static void dhd_fix_cpu_freq(dhd_info_t *dhd)
7574{
7575#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7576 mutex_lock(&dhd->cpufreq_fix);
7577#endif // endif
7578 if (dhd && !dhd->cpufreq_fix_status) {
7579 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
7580#ifdef FIX_BUS_MIN_CLOCK
7581 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
7582#endif /* FIX_BUS_MIN_CLOCK */
7583 DHD_ERROR(("pm_qos_add_requests called\n"));
7584
7585 dhd->cpufreq_fix_status = TRUE;
7586 }
7587#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7588 mutex_unlock(&dhd->cpufreq_fix);
7589#endif // endif
7590}
7591
7592static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
7593{
7594#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7595 mutex_lock(&dhd ->cpufreq_fix);
7596#endif // endif
7597 if (dhd && dhd->cpufreq_fix_status != TRUE) {
7598#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7599 mutex_unlock(&dhd->cpufreq_fix);
7600#endif // endif
7601 return;
7602 }
7603
7604 pm_qos_remove_request(&dhd->dhd_cpu_qos);
7605#ifdef FIX_BUS_MIN_CLOCK
7606 pm_qos_remove_request(&dhd->dhd_bus_qos);
7607#endif /* FIX_BUS_MIN_CLOCK */
7608 DHD_ERROR(("pm_qos_add_requests called\n"));
7609
7610 dhd->cpufreq_fix_status = FALSE;
7611#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7612 mutex_unlock(&dhd->cpufreq_fix);
7613#endif // endif
7614}
7615#endif /* FIX_CPU_MIN_CLOCK */
7616
7617#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7618static int
7619dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
7620{
7621 int error;
7622 dhd_info_t *dhd = DHD_DEV_INFO(net);
7623
7624 if (atomic_read(&dhd->pub.block_bus))
7625 return -EHOSTDOWN;
7626
7627 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
7628 return BCME_ERROR;
7629
7630 error = dhd_ioctl_entry(net, ifr, cmd);
7631
7632 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7633 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7634
7635 return error;
7636}
7637#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7638
7639static int
7640dhd_stop(struct net_device *net)
7641{
7642 int ifidx = 0;
7643 bool skip_reset = false;
7644#if defined(WL_CFG80211)
7645 unsigned long flags = 0;
7646#ifdef WL_STATIC_IF
7647 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
7648#endif /* WL_STATIC_IF */
7649#endif /* WL_CFG80211 */
7650 dhd_info_t *dhd = DHD_DEV_INFO(net);
7651 DHD_OS_WAKE_LOCK(&dhd->pub);
7652 DHD_PERIM_LOCK(&dhd->pub);
7653 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
7654 dhd->pub.rxcnt_timeout = 0;
7655 dhd->pub.txcnt_timeout = 0;
7656
7657#ifdef BCMPCIE
7658 dhd->pub.d3ackcnt_timeout = 0;
7659#endif /* BCMPCIE */
7660
7661 mutex_lock(&dhd->pub.ndev_op_sync);
7662
7663 if (dhd->pub.up == 0) {
7664 goto exit;
7665 }
7666#if defined(DHD_HANG_SEND_UP_TEST)
7667 if (dhd->pub.req_hang_type) {
7668 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
7669 __FUNCTION__, dhd->pub.req_hang_type));
7670 dhd->pub.req_hang_type = 0;
7671 }
7672#endif /* DHD_HANG_SEND_UP_TEST */
7673
7674 dhd_if_flush_sta(DHD_DEV_IFP(net));
7675
7676#ifdef FIX_CPU_MIN_CLOCK
7677 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
7678 dhd_rollback_cpu_freq(dhd);
7679#endif /* FIX_CPU_MIN_CLOCK */
7680
7681 ifidx = dhd_net2idx(dhd, net);
7682 BCM_REFERENCE(ifidx);
7683
7684#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7685 /* If static if is operational, don't reset the chip */
7686 if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
7687 DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n"));
7688 wl_cfg80211_sta_ifdown(net);
7689 skip_reset = true;
7690 goto exit;
7691 }
7692#endif /* WL_STATIC_IF && WL_CFG80211 */
7693#ifdef WL_CFG80211
7694
7695 /* Disable Runtime PM before interface down */
7696 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7697
7698 spin_lock_irqsave(&dhd->pub.up_lock, flags);
7699 dhd->pub.up = 0;
7700 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
7701#else
7702 dhd->pub.up = 0;
7703#endif /* WL_CFG80211 */
7704
7705#ifdef WL_CFG80211
7706 if (ifidx == 0) {
7707 dhd_if_t *ifp;
7708 wl_cfg80211_down(net);
7709
7710 ifp = dhd->iflist[0];
7711 /*
7712 * For CFG80211: Clean up all the left over virtual interfaces
7713 * when the primary Interface is brought down. [ifconfig wlan0 down]
7714 */
7715 if (!dhd_download_fw_on_driverload) {
7716 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
7717 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7718 int i;
7719#ifdef WL_CFG80211_P2P_DEV_IF
7720 wl_cfg80211_del_p2p_wdev(net);
7721#endif /* WL_CFG80211_P2P_DEV_IF */
7722
7723 dhd_net_if_lock_local(dhd);
7724 for (i = 1; i < DHD_MAX_IFS; i++)
7725 dhd_remove_if(&dhd->pub, i, FALSE);
7726
7727 if (ifp && ifp->net) {
7728 dhd_if_del_sta_list(ifp);
7729 }
7730#ifdef ARP_OFFLOAD_SUPPORT
7731 if (dhd_inetaddr_notifier_registered) {
7732 dhd_inetaddr_notifier_registered = FALSE;
7733 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7734 }
7735#endif /* ARP_OFFLOAD_SUPPORT */
7736#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7737 if (dhd_inet6addr_notifier_registered) {
7738 dhd_inet6addr_notifier_registered = FALSE;
7739 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7740 }
7741#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7742 dhd_net_if_unlock_local(dhd);
7743 }
7744 cancel_work_sync(dhd->dhd_deferred_wq);
7745
7746#ifdef SHOW_LOGTRACE
7747 /* Wait till event_log_dispatcher_work finishes */
7748 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
7749#endif /* SHOW_LOGTRACE */
7750
7751#if defined(DHD_LB_RXP)
7752 __skb_queue_purge(&dhd->rx_pend_queue);
7753#endif /* DHD_LB_RXP */
7754
7755#if defined(DHD_LB_TXP)
7756 skb_queue_purge(&dhd->tx_pend_queue);
7757#endif /* DHD_LB_TXP */
7758 }
7759
7760#if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
7761 defined(ARGOS_NOTIFY_CB)
7762 argos_register_notifier_deinit();
7763#endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
7764#ifdef DHDTCPACK_SUPPRESS
7765 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7766#endif /* DHDTCPACK_SUPPRESS */
7767#if defined(DHD_LB_RXP)
7768 if (ifp && ifp->net == dhd->rx_napi_netdev) {
7769 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
7770 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7771 skb_queue_purge(&dhd->rx_napi_queue);
7772 napi_disable(&dhd->rx_napi_struct);
7773 netif_napi_del(&dhd->rx_napi_struct);
7774 dhd->rx_napi_netdev = NULL;
7775 }
7776#endif /* DHD_LB_RXP */
7777 }
7778#endif /* WL_CFG80211 */
7779
7780 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
7781
7782#ifdef PROP_TXSTATUS
7783 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
7784#endif // endif
7785#ifdef SHOW_LOGTRACE
7786 if (!dhd_download_fw_on_driverload) {
7787 /* Release the skbs from queue for WLC_E_TRACE event */
7788 dhd_event_logtrace_flush_queue(&dhd->pub);
7789 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
7790 if (dhd->event_data.fmts) {
7791 MFREE(dhd->pub.osh, dhd->event_data.fmts,
7792 dhd->event_data.fmts_size);
7793 dhd->event_data.fmts = NULL;
7794 }
7795 if (dhd->event_data.raw_fmts) {
7796 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
7797 dhd->event_data.raw_fmts_size);
7798 dhd->event_data.raw_fmts = NULL;
7799 }
7800 if (dhd->event_data.raw_sstr) {
7801 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
7802 dhd->event_data.raw_sstr_size);
7803 dhd->event_data.raw_sstr = NULL;
7804 }
7805 if (dhd->event_data.rom_raw_sstr) {
7806 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
7807 dhd->event_data.rom_raw_sstr_size);
7808 dhd->event_data.rom_raw_sstr = NULL;
7809 }
7810 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
7811 }
7812 }
7813#endif /* SHOW_LOGTRACE */
7814#ifdef APF
7815 dhd_dev_apf_delete_filter(net);
7816#endif /* APF */
7817
7818 /* Stop the protocol module */
7819 dhd_prot_stop(&dhd->pub);
7820
7821 OLD_MOD_DEC_USE_COUNT;
7822exit:
7823 if (skip_reset == false) {
7824#if defined(WL_CFG80211)
7825 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
7826#if defined(BT_OVER_SDIO)
7827 dhd_bus_put(&dhd->pub, WLAN_MODULE);
7828 wl_android_set_wifi_on_flag(FALSE);
7829#else
7830 wl_android_wifi_off(net, TRUE);
7831#endif /* BT_OVER_SDIO */
7832 }
7833#ifdef SUPPORT_DEEP_SLEEP
7834 else {
7835 /* CSP#505233: Flags to indicate if we distingish
7836 * power off policy when user set the memu
7837 * "Keep Wi-Fi on during sleep" to "Never"
7838 */
7839 if (trigger_deep_sleep) {
7840 dhd_deepsleep(net, 1);
7841 trigger_deep_sleep = 0;
7842 }
7843 }
7844#endif /* SUPPORT_DEEP_SLEEP */
7845#endif // endif
7846 dhd->pub.hang_was_sent = 0;
7847
7848 /* Clear country spec for for built-in type driver */
7849 if (!dhd_download_fw_on_driverload) {
7850 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
7851 dhd->pub.dhd_cspec.rev = 0;
7852 dhd->pub.dhd_cspec.ccode[0] = 0x00;
7853 }
7854
7855#ifdef BCMDBGFS
7856 dhd_dbgfs_remove();
7857#endif // endif
7858 }
7859
7860 DHD_PERIM_UNLOCK(&dhd->pub);
7861 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7862
7863 /* Destroy wakelock */
7864 if (!dhd_download_fw_on_driverload &&
7865 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
7866 (skip_reset == false)) {
7867 DHD_OS_WAKE_LOCK_DESTROY(dhd);
7868 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
7869 }
7870
7871 mutex_unlock(&dhd->pub.ndev_op_sync);
7872 return 0;
7873}
7874
7875#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
7876 defined(USE_INITIAL_SHORT_DWELL_TIME))
7877extern bool g_first_broadcast_scan;
7878#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
7879
7880#ifdef WL11U
7881static int dhd_interworking_enable(dhd_pub_t *dhd)
7882{
7883 uint32 enable = true;
7884 int ret = BCME_OK;
7885
7886 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
7887 if (ret < 0) {
7888 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
7889 }
7890
7891 return ret;
7892}
7893#endif /* WL11u */
7894
7895static int
7896dhd_open(struct net_device *net)
7897{
7898 dhd_info_t *dhd = DHD_DEV_INFO(net);
7899#ifdef TOE
7900 uint32 toe_ol;
7901#endif // endif
7902 int ifidx;
7903 int32 ret = 0;
7904
7905#if defined(PREVENT_REOPEN_DURING_HANG)
7906 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
7907 if (dhd->pub.hang_was_sent == 1) {
7908 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
7909 /* Force to bring down WLAN interface in case dhd_stop() is not called
7910 * from the upper layer when HANG event is triggered.
7911 */
7912 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
7913 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
7914 dhd_stop(net);
7915 } else {
7916 return -1;
7917 }
7918 }
7919#endif /* PREVENT_REOPEN_DURING_HANG */
7920
7921 mutex_lock(&dhd->pub.ndev_op_sync);
7922
7923 if (dhd->pub.up == 1) {
7924 /* already up */
7925 DHD_ERROR(("Primary net_device is already up \n"));
7926 mutex_unlock(&dhd->pub.ndev_op_sync);
7927 return BCME_OK;
7928 }
7929
7930 if (!dhd_download_fw_on_driverload) {
7931 if (!dhd_driver_init_done) {
7932 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
7933 mutex_unlock(&dhd->pub.ndev_op_sync);
7934 return -1;
7935 }
7936 /* Init wakelock */
7937 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
7938 DHD_OS_WAKE_LOCK_INIT(dhd);
7939 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7940 }
7941
7942#ifdef SHOW_LOGTRACE
7943 skb_queue_head_init(&dhd->evt_trace_queue);
7944
7945 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
7946 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
7947 if (ret == BCME_OK) {
7948 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7949 st_str_file_path, map_file_path);
7950 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7951 rom_st_str_file_path, rom_map_file_path);
7952 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
7953 }
7954 }
7955#endif /* SHOW_LOGTRACE */
7956 }
7957
7958#if defined(MULTIPLE_SUPPLICANT)
7959#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
7960 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
7961 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
7962 }
7963 mutex_lock(&_dhd_sdio_mutex_lock_);
7964#endif // endif
7965#endif /* MULTIPLE_SUPPLICANT */
7966
7967 DHD_OS_WAKE_LOCK(&dhd->pub);
7968 DHD_PERIM_LOCK(&dhd->pub);
7969 dhd->pub.dongle_trap_occured = 0;
7970 dhd->pub.hang_was_sent = 0;
7971 dhd->pub.hang_reason = 0;
7972 dhd->pub.iovar_timeout_occured = 0;
7973#ifdef PCIE_FULL_DONGLE
7974 dhd->pub.d3ack_timeout_occured = 0;
7975#endif /* PCIE_FULL_DONGLE */
7976#ifdef DHD_MAP_LOGGING
7977 dhd->pub.smmu_fault_occurred = 0;
7978#endif /* DHD_MAP_LOGGING */
7979
7980#ifdef DHD_LOSSLESS_ROAMING
7981 dhd->pub.dequeue_prec_map = ALLPRIO;
7982#endif // endif
7983
7984#if !defined(WL_CFG80211)
7985 /*
7986 * Force start if ifconfig_up gets called before START command
7987 * We keep WEXT's wl_control_wl_start to provide backward compatibility
7988 * This should be removed in the future
7989 */
7990 ret = wl_control_wl_start(net);
7991 if (ret != 0) {
7992 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7993 ret = -1;
7994 goto exit;
7995 }
7996
7997#endif // endif
7998
7999 ifidx = dhd_net2idx(dhd, net);
8000 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
8001
8002 if (ifidx < 0) {
8003 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
8004 ret = -1;
8005 goto exit;
8006 }
8007
8008 if (!dhd->iflist[ifidx]) {
8009 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
8010 ret = -1;
8011 goto exit;
8012 }
8013
8014 if (ifidx == 0) {
8015 atomic_set(&dhd->pend_8021x_cnt, 0);
8016#if defined(WL_CFG80211)
8017 if (!dhd_download_fw_on_driverload) {
8018 DHD_ERROR(("\n%s\n", dhd_version));
8019#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8020 g_first_broadcast_scan = TRUE;
8021#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8022#if defined(BT_OVER_SDIO)
8023 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
8024 wl_android_set_wifi_on_flag(TRUE);
8025#else
8026 ret = wl_android_wifi_on(net);
8027#endif /* BT_OVER_SDIO */
8028 if (ret != 0) {
8029 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8030 __FUNCTION__, ret));
8031 ret = -1;
8032 goto exit;
8033 }
8034 }
8035#ifdef SUPPORT_DEEP_SLEEP
8036 else {
8037 /* Flags to indicate if we distingish
8038 * power off policy when user set the memu
8039 * "Keep Wi-Fi on during sleep" to "Never"
8040 */
8041 if (trigger_deep_sleep) {
8042#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8043 g_first_broadcast_scan = TRUE;
8044#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8045 dhd_deepsleep(net, 0);
8046 trigger_deep_sleep = 0;
8047 }
8048 }
8049#endif /* SUPPORT_DEEP_SLEEP */
8050#ifdef FIX_CPU_MIN_CLOCK
8051 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
8052 dhd_init_cpufreq_fix(dhd);
8053 dhd_fix_cpu_freq(dhd);
8054 }
8055#endif /* FIX_CPU_MIN_CLOCK */
8056#endif // endif
8057
8058 if (dhd->pub.busstate != DHD_BUS_DATA) {
8059
8060 /* try to bring up bus */
8061 DHD_PERIM_UNLOCK(&dhd->pub);
8062
8063#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8064 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
8065 ret = dhd_bus_start(&dhd->pub);
8066 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
8067 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
8068 }
8069#else
8070 ret = dhd_bus_start(&dhd->pub);
8071#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8072
8073 DHD_PERIM_LOCK(&dhd->pub);
8074 if (ret) {
8075 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8076 ret = -1;
8077 goto exit;
8078 }
8079
8080 }
8081
8082#ifdef BT_OVER_SDIO
8083 if (dhd->pub.is_bt_recovery_required) {
8084 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
8085 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
8086 }
8087 dhd->pub.is_bt_recovery_required = FALSE;
8088#endif // endif
8089
8090 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8091 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
8092
8093#ifdef TOE
8094 /* Get current TOE mode from dongle */
8095 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
8096 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
8097 } else {
8098 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
8099 }
8100#endif /* TOE */
8101
8102#if defined(DHD_LB_RXP)
8103 __skb_queue_head_init(&dhd->rx_pend_queue);
8104 if (dhd->rx_napi_netdev == NULL) {
8105 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
8106 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
8107 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
8108 dhd_napi_poll, dhd_napi_weight);
8109 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8110 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8111 napi_enable(&dhd->rx_napi_struct);
8112 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
8113 skb_queue_head_init(&dhd->rx_napi_queue);
8114 } /* rx_napi_netdev == NULL */
8115#endif /* DHD_LB_RXP */
8116#ifdef DHD_LB_IRQSET
8117 dhd_irq_set_affinity(&dhd->pub);
8118#endif /* DHD_LB_IRQSET */
8119
8120#if defined(DHD_LB_TXP)
8121 /* Use the variant that uses locks */
8122 skb_queue_head_init(&dhd->tx_pend_queue);
8123#endif /* DHD_LB_TXP */
8124
8125#if defined(WL_CFG80211)
8126 if (unlikely(wl_cfg80211_up(net))) {
8127 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
8128 ret = -1;
8129 goto exit;
8130 }
8131 if (!dhd_download_fw_on_driverload) {
8132#ifdef ARP_OFFLOAD_SUPPORT
8133 dhd->pend_ipaddr = 0;
8134 if (!dhd_inetaddr_notifier_registered) {
8135 dhd_inetaddr_notifier_registered = TRUE;
8136 register_inetaddr_notifier(&dhd_inetaddr_notifier);
8137 }
8138#endif /* ARP_OFFLOAD_SUPPORT */
8139#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8140 if (!dhd_inet6addr_notifier_registered) {
8141 dhd_inet6addr_notifier_registered = TRUE;
8142 register_inet6addr_notifier(&dhd_inet6addr_notifier);
8143 }
8144#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8145 }
8146
8147#if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
8148 defined(ARGOS_NOTIFY_CB)
8149 argos_register_notifier_init(net);
8150#endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
8151#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
8152#if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
8153 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8154#else
8155 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8156#endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
8157#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
8158#if defined(NUM_SCB_MAX_PROBE)
8159 dhd_set_scb_probe(&dhd->pub);
8160#endif /* NUM_SCB_MAX_PROBE */
8161#endif /* WL_CFG80211 */
8162 }
8163
8164 dhd->pub.up = 1;
8165
8166 if (wl_event_enable) {
8167 /* For wl utility to receive events */
8168 dhd->pub.wl_event_enabled = true;
8169 } else {
8170 dhd->pub.wl_event_enabled = false;
8171 }
8172
8173 if (logtrace_pkt_sendup) {
8174 /* For any deamon to recieve logtrace */
8175 dhd->pub.logtrace_pkt_sendup = true;
8176 } else {
8177 dhd->pub.logtrace_pkt_sendup = false;
8178 }
8179
8180 OLD_MOD_INC_USE_COUNT;
8181
8182#ifdef BCMDBGFS
8183 dhd_dbgfs_init(&dhd->pub);
8184#endif // endif
8185
8186exit:
8187 mutex_unlock(&dhd->pub.ndev_op_sync);
8188 if (ret) {
8189 dhd_stop(net);
8190 }
8191
8192 DHD_PERIM_UNLOCK(&dhd->pub);
8193 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8194
8195#if defined(MULTIPLE_SUPPLICANT)
8196#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8197 mutex_unlock(&_dhd_sdio_mutex_lock_);
8198#endif // endif
8199#endif /* MULTIPLE_SUPPLICANT */
8200
8201 return ret;
8202}
8203
8204/*
8205 * ndo_start handler for primary ndev
8206 */
8207static int
8208dhd_pri_open(struct net_device *net)
8209{
8210 s32 ret;
8211
8212 ret = dhd_open(net);
8213 if (unlikely(ret)) {
8214 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
8215 return ret;
8216 }
8217
8218 /* Allow transmit calls */
8219 netif_start_queue(net);
8220 DHD_ERROR(("[%s] tx queue started\n", net->name));
8221 return ret;
8222}
8223
8224/*
8225 * ndo_stop handler for primary ndev
8226 */
8227static int
8228dhd_pri_stop(struct net_device *net)
8229{
8230 s32 ret;
8231
8232 /* stop tx queue */
8233 netif_stop_queue(net);
8234 DHD_ERROR(("[%s] tx queue stopped\n", net->name));
8235
8236 ret = dhd_stop(net);
8237 if (unlikely(ret)) {
8238 DHD_ERROR(("dhd_stop failed: %d\n", ret));
8239 return ret;
8240 }
8241
8242 return ret;
8243}
8244
8245#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
8246/*
8247 * For static I/Fs, the firmware interface init
8248 * is done from the IFF_UP context.
8249 */
8250static int
8251dhd_static_if_open(struct net_device *net)
8252{
8253 s32 ret = 0;
8254 struct bcm_cfg80211 *cfg;
8255 struct net_device *primary_netdev = NULL;
8256
8257 cfg = wl_get_cfg(net);
8258 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
8259
8260 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
8261 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
8262 ret = BCME_OK;
8263 goto done;
8264 }
8265
8266 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
8267 /* Ensure fw is initialized. If it is already initialized,
8268 * dhd_open will return success.
8269 */
8270 ret = dhd_open(primary_netdev);
8271 if (unlikely(ret)) {
8272 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
8273 goto done;
8274 }
8275
8276 ret = wl_cfg80211_static_if_open(net);
8277 if (!ret) {
8278 /* Allow transmit calls */
8279 netif_start_queue(net);
8280 }
8281done:
8282 return ret;
8283}
8284
8285static int
8286dhd_static_if_stop(struct net_device *net)
8287{
8288 struct bcm_cfg80211 *cfg;
8289 struct net_device *primary_netdev = NULL;
8290 int ret = BCME_OK;
8291 dhd_info_t *dhd = DHD_DEV_INFO(net);
8292
8293 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
8294
8295 /* Ensure queue is disabled */
8296 netif_tx_disable(net);
8297
8298 cfg = wl_get_cfg(net);
8299 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
8300 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
8301 return BCME_OK;
8302 }
8303
8304 ret = wl_cfg80211_static_if_close(net);
8305
8306 if (dhd->pub.up == 0) {
8307 /* If fw is down, return */
8308 DHD_ERROR(("fw down\n"));
8309 return BCME_OK;
8310 }
8311 /* If STA iface is not in operational, invoke dhd_close from this
8312 * context.
8313 */
8314 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
8315 if (!(primary_netdev->flags & IFF_UP)) {
8316 ret = dhd_stop(primary_netdev);
8317 } else {
8318 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
8319 }
8320
8321 return ret;
8322}
8323#endif /* WL_STATIC_IF && WL_CF80211 */
8324
8325int dhd_do_driver_init(struct net_device *net)
8326{
8327 dhd_info_t *dhd = NULL;
8328
8329 if (!net) {
8330 DHD_ERROR(("Primary Interface not initialized \n"));
8331 return -EINVAL;
8332 }
8333
8334#ifdef MULTIPLE_SUPPLICANT
8335#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8336 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
8337 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
8338 return 0;
8339 }
8340#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8341#endif /* MULTIPLE_SUPPLICANT */
8342
8343 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8344 dhd = DHD_DEV_INFO(net);
8345
8346 /* If driver is already initialized, do nothing
8347 */
8348 if (dhd->pub.busstate == DHD_BUS_DATA) {
8349 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8350 return 0;
8351 }
8352
8353 if (dhd_open(net) < 0) {
8354 DHD_ERROR(("Driver Init Failed \n"));
8355 return -1;
8356 }
8357
8358 return 0;
8359}
8360
8361int
8362dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8363{
8364
8365#ifdef WL_CFG80211
8366 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8367 ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
8368 return BCME_OK;
8369#endif // endif
8370
8371 /* handle IF event caused by wl commands, SoftAP, WEXT and
8372 * anything else. This has to be done asynchronously otherwise
8373 * DPC will be blocked (and iovars will timeout as DPC has no chance
8374 * to read the response back)
8375 */
8376 if (ifevent->ifidx > 0) {
8377 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8378 if (if_event == NULL) {
8379 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8380 MALLOCED(dhdinfo->pub.osh)));
8381 return BCME_NOMEM;
8382 }
8383
8384 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8385 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8386 strncpy(if_event->name, name, IFNAMSIZ);
8387 if_event->name[IFNAMSIZ - 1] = '\0';
8388 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
8389 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8390 }
8391
8392 return BCME_OK;
8393}
8394
8395int
8396dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8397{
8398 dhd_if_event_t *if_event;
8399
8400#ifdef WL_CFG80211
8401 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8402 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8403 return BCME_OK;
8404#endif /* WL_CFG80211 */
8405
8406 /* handle IF event caused by wl commands, SoftAP, WEXT and
8407 * anything else
8408 */
8409 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8410 if (if_event == NULL) {
8411 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8412 MALLOCED(dhdinfo->pub.osh)));
8413 return BCME_NOMEM;
8414 }
8415 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8416 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8417 strncpy(if_event->name, name, IFNAMSIZ);
8418 if_event->name[IFNAMSIZ - 1] = '\0';
8419 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
8420 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8421
8422 return BCME_OK;
8423}
8424
8425int
8426dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8427{
8428#ifdef WL_CFG80211
8429 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8430 ifevent->ifidx, name, mac, ifevent->bssidx);
8431#endif /* WL_CFG80211 */
8432 return BCME_OK;
8433}
8434
8435#ifdef WL_NATOE
8436/* Handler to update natoe info and bind with new subscriptions if there is change in config */
8437static void
8438dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
8439{
8440 dhd_info_t *dhd = handle;
8441 wl_event_data_natoe_t *natoe = event_info;
8442 dhd_nfct_info_t *nfct = dhd->pub.nfct;
8443
8444 if (event != DHD_WQ_WORK_NATOE_EVENT) {
8445 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8446 return;
8447 }
8448
8449 if (!dhd) {
8450 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8451 return;
8452 }
8453 if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
8454 (natoe->start_port < natoe->end_port)) {
8455 /* Rebind subscriptions to start receiving notifications from groups */
8456 if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
8457 dhd_ct_close(nfct);
8458 }
8459 dhd_ct_send_dump_req(nfct);
8460 } else if (!natoe->natoe_active) {
8461 /* Rebind subscriptions to stop receiving notifications from groups */
8462 if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
8463 dhd_ct_close(nfct);
8464 }
8465 }
8466}
8467
8468/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
8469 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
8470 */
8471int
8472dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
8473{
8474 wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
8475
8476 if (dhd->nfct) {
8477 wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
8478 uint8 prev_enable = natoe->natoe_active;
8479
8480 spin_lock_bh(&dhd->nfct_lock);
8481 memcpy(natoe, event_data, sizeof(*event_data));
8482 spin_unlock_bh(&dhd->nfct_lock);
8483
8484 if (prev_enable != event_data->natoe_active) {
8485 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
8486 (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
8487 dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
8488 }
8489 return BCME_OK;
8490 }
8491 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
8492 return BCME_ERROR;
8493}
8494
8495/* Handler to send natoe ioctl to dongle */
8496static void
8497dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
8498{
8499 dhd_info_t *dhd = handle;
8500 dhd_ct_ioc_t *ct_ioc = event_info;
8501
8502 if (event != DHD_WQ_WORK_NATOE_IOCTL) {
8503 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8504 return;
8505 }
8506
8507 if (!dhd) {
8508 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8509 return;
8510 }
8511
8512 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
8513 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
8514 }
8515}
8516
8517/* When Netlink message contains port collision info, the info must be sent to dongle FW
8518 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
8519 */
8520void
8521dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
8522{
8523
8524 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
8525 DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
8526 DHD_WQ_WORK_PRIORITY_HIGH);
8527}
8528#endif /* WL_NATOE */
8529
8530/* This API maps ndev to ifp inclusive of static IFs */
8531static dhd_if_t *
8532dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
8533{
8534 dhd_if_t *ifp = NULL;
8535#ifdef WL_STATIC_IF
8536 u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
8537#else
8538 u32 ifidx = (DHD_MAX_IFS - 1);
8539#endif /* WL_STATIC_IF */
8540
8541 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
8542 do {
8543 ifp = dhdinfo->iflist[ifidx];
8544 if (ifp && (ifp->net == ndev)) {
8545 DHD_TRACE(("match found for %s. ifidx:%d\n",
8546 ndev->name, ifidx));
8547 return ifp;
8548 }
8549 } while (ifidx--);
8550
8551 DHD_ERROR(("no entry found for %s\n", ndev->name));
8552 return NULL;
8553}
8554
8555bool
8556dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
8557{
8558 dhd_if_t *ifp = NULL;
8559
8560 if (!dhdp || !ndev) {
8561 DHD_ERROR(("wrong input\n"));
8562 ASSERT(0);
8563 return false;
8564 }
8565
8566 ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
8567 return (ifp && (ifp->static_if == true));
8568}
8569
8570#ifdef WL_STATIC_IF
8571/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
8572 * are not known. For e.g: static i/f case. This function lets to update it once
8573 * it is known.
8574 */
8575s32
8576dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
8577 uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
8578{
8579 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
8580 dhd_if_t *ifp, *ifp_new;
8581 s32 cur_idx;
8582 dhd_dev_priv_t * dev_priv;
8583
8584 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
8585 if_state, ifidx));
8586
8587 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
8588
8589 if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
8590 return -ENODEV;
8591 }
8592 cur_idx = ifp->idx;
8593
8594 if (if_state == NDEV_STATE_OS_IF_CREATED) {
8595 /* mark static if */
8596 ifp->static_if = TRUE;
8597 return BCME_OK;
8598 }
8599
8600 ifp_new = dhdinfo->iflist[ifidx];
8601 if (ifp_new && (ifp_new != ifp)) {
8602 /* There should be only one entry for a given ifidx. */
8603 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
8604 ASSERT(0);
8605 dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
8606 net_os_send_hang_message(ifp->net);
8607 return -EINVAL;
8608 }
8609
8610 /* For static if delete case, cleanup the if before ifidx update */
8611 if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
8612 (if_state == NDEV_STATE_FW_IF_FAILED)) {
8613 dhd_cleanup_if(ifp->net);
8614 dev_priv = DHD_DEV_PRIV(ndev);
8615 dev_priv->ifidx = ifidx;
8616 }
8617
8618 /* update the iflist ifidx slot with cached info */
8619 dhdinfo->iflist[ifidx] = ifp;
8620 dhdinfo->iflist[cur_idx] = NULL;
8621
8622 /* update the values */
8623 ifp->idx = ifidx;
8624 ifp->bssidx = bssidx;
8625
8626 if (if_state == NDEV_STATE_FW_IF_CREATED) {
8627 dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
8628 /* initialize the dongle provided if name */
8629 if (dngl_name) {
8630 strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
92faf122 8631 } else if (ndev->name[0] != '\0') {
1cac41cb
MB
8632 strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
8633 }
8634 if (mac != NULL)
8635 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8636 }
8637 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
8638 ifidx, cur_idx, if_state));
8639 return BCME_OK;
8640}
8641#endif /* WL_STATIC_IF */
8642
8643/* unregister and free the existing net_device interface (if any) in iflist and
8644 * allocate a new one. the slot is reused. this function does NOT register the
8645 * new interface to linux kernel. dhd_register_if does the job
8646 */
8647struct net_device*
8648dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
8649 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
8650{
8651 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8652 dhd_if_t *ifp;
8653
8654 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
8655
8656 ifp = dhdinfo->iflist[ifidx];
8657
8658 if (ifp != NULL) {
8659 if (ifp->net != NULL) {
8660 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8661 __FUNCTION__, ifp->net->name, ifidx));
8662
8663 if (ifidx == 0) {
8664 /* For primary ifidx (0), there shouldn't be
8665 * any netdev present already.
8666 */
8667 DHD_ERROR(("Primary ifidx populated already\n"));
8668 ASSERT(0);
8669 return NULL;
8670 }
8671
8672 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
8673
8674 /* in unregister_netdev case, the interface gets freed by net->destructor
8675 * (which is set to free_netdev)
8676 */
8677 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8678 free_netdev(ifp->net);
8679 } else {
8680 netif_stop_queue(ifp->net);
8681 if (need_rtnl_lock)
8682 unregister_netdev(ifp->net);
8683 else
8684 unregister_netdevice(ifp->net);
8685 }
8686 ifp->net = NULL;
8687 }
8688 } else {
8689 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
8690 if (ifp == NULL) {
8691 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
8692 return NULL;
8693 }
8694 }
8695
8696 memset(ifp, 0, sizeof(dhd_if_t));
8697 ifp->info = dhdinfo;
8698 ifp->idx = ifidx;
8699 ifp->bssidx = bssidx;
8700#ifdef DHD_MCAST_REGEN
8701 ifp->mcast_regen_bss_enable = FALSE;
8702#endif // endif
8703 /* set to TRUE rx_pkt_chainable at alloc time */
8704 ifp->rx_pkt_chainable = TRUE;
8705
8706 if (mac != NULL)
8707 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8708
8709 /* Allocate etherdev, including space for private structure */
8710 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
8711 if (ifp->net == NULL) {
8712 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
8713 goto fail;
8714 }
8715
8716 /* Setup the dhd interface's netdevice private structure. */
8717 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
8718
8719 if (name && name[0]) {
8720 strncpy(ifp->net->name, name, IFNAMSIZ);
8721 ifp->net->name[IFNAMSIZ - 1] = '\0';
8722 }
8723
8724#ifdef WL_CFG80211
8725 if (ifidx == 0)
8726 ifp->net->destructor = free_netdev;
8727 else
8728 ifp->net->destructor = dhd_netdev_free;
8729#else
8730 ifp->net->destructor = free_netdev;
8731#endif /* WL_CFG80211 */
8732 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
8733 ifp->name[IFNAMSIZ - 1] = '\0';
8734 dhdinfo->iflist[ifidx] = ifp;
8735
8736 /* initialize the dongle provided if name */
8737 if (dngl_name) {
8738 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
8739 } else if (name) {
8740 strncpy(ifp->dngl_name, name, IFNAMSIZ);
8741 }
8742
8743#ifdef PCIE_FULL_DONGLE
8744 /* Initialize STA info list */
8745 INIT_LIST_HEAD(&ifp->sta_list);
8746 DHD_IF_STA_LIST_LOCK_INIT(ifp);
8747#endif /* PCIE_FULL_DONGLE */
8748
8749#ifdef DHD_L2_FILTER
8750 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
8751 ifp->parp_allnode = TRUE;
8752#endif /* DHD_L2_FILTER */
8753
8754 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8755
8756#ifdef DHDTCPSYNC_FLOOD_BLK
8757 INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
8758 dhd_reset_tcpsync_info_by_ifp(ifp);
8759#endif /* DHDTCPSYNC_FLOOD_BLK */
8760
8761 return ifp->net;
8762
8763fail:
8764 if (ifp != NULL) {
8765 if (ifp->net != NULL) {
8766#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8767 if (ifp->net == dhdinfo->rx_napi_netdev) {
8768 napi_disable(&dhdinfo->rx_napi_struct);
8769 netif_napi_del(&dhdinfo->rx_napi_struct);
8770 skb_queue_purge(&dhdinfo->rx_napi_queue);
8771 dhdinfo->rx_napi_netdev = NULL;
8772 }
8773#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8774 dhd_dev_priv_clear(ifp->net);
8775 free_netdev(ifp->net);
8776 ifp->net = NULL;
8777 }
8778 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8779 ifp = NULL;
8780 }
8781
8782 dhdinfo->iflist[ifidx] = NULL;
8783 return NULL;
8784}
8785
8786static void
8787dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
8788{
8789#ifdef PCIE_FULL_DONGLE
8790 s32 ifidx = 0;
8791 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8792#endif /* PCIE_FULL_DONGLE */
8793
8794 if (ifp != NULL) {
8795 if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
8796 DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
8797 ASSERT(0);
8798 return;
8799 }
8800#ifdef DHD_L2_FILTER
8801 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
8802 NULL, FALSE, dhdpub->tickcnt);
8803 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
8804 ifp->phnd_arp_table = NULL;
8805#endif /* DHD_L2_FILTER */
8806
8807 dhd_if_del_sta_list(ifp);
8808#ifdef PCIE_FULL_DONGLE
8809 /* Delete flowrings of virtual interface */
8810 ifidx = ifp->idx;
8811 if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
8812 dhd_flow_rings_delete(dhdp, ifidx);
8813 }
8814#endif /* PCIE_FULL_DONGLE */
8815 }
8816}
8817
8818void
8819dhd_cleanup_if(struct net_device *net)
8820{
8821 dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
8822 dhd_pub_t *dhdp = &dhdinfo->pub;
8823 dhd_if_t *ifp;
8824
8825 if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
8826 (ifp->idx >= DHD_MAX_IFS)) {
8827 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
8828 ASSERT(0);
8829 return;
8830 }
8831
8832 dhd_cleanup_ifp(dhdp, ifp);
8833}
8834
8835/* unregister and free the the net_device interface associated with the indexed
8836 * slot, also free the slot memory and set the slot pointer to NULL
8837 */
8838#define DHD_TX_COMPLETION_TIMEOUT 5000
8839int
8840dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
8841{
8842 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8843 dhd_if_t *ifp;
8844 unsigned long flags;
8845 long timeout;
8846
8847 ifp = dhdinfo->iflist[ifidx];
8848
8849 if (ifp != NULL) {
8850#ifdef DHDTCPSYNC_FLOOD_BLK
8851 cancel_work_sync(&ifp->blk_tsfl_work);
8852#endif /* DHDTCPSYNC_FLOOD_BLK */
8853#ifdef WL_STATIC_IF
8854 /* static IF will be handled in detach */
8855 if (ifp->static_if) {
8856 DHD_TRACE(("Skip del iface for static interface\n"));
8857 return BCME_OK;
8858 }
8859#endif /* WL_STATIC_IF */
8860 if (ifp->net != NULL) {
8861 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
8862
8863 DHD_GENERAL_LOCK(dhdpub, flags);
8864 ifp->del_in_progress = true;
8865 DHD_GENERAL_UNLOCK(dhdpub, flags);
8866
8867 /* If TX is in progress, hold the if del */
8868 if (DHD_IF_IS_TX_ACTIVE(ifp)) {
8869 DHD_INFO(("TX in progress. Wait for it to be complete."));
8870 timeout = wait_event_timeout(dhdpub->tx_completion_wait,
8871 ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
8872 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
8873 if (!timeout) {
8874 /* Tx completion timeout. Attempt proceeding ahead */
8875 DHD_ERROR(("Tx completion timed out!\n"));
8876 ASSERT(0);
8877 }
8878 } else {
8879 DHD_TRACE(("No outstanding TX!\n"));
8880 }
8881 dhdinfo->iflist[ifidx] = NULL;
8882 /* in unregister_netdev case, the interface gets freed by net->destructor
8883 * (which is set to free_netdev)
8884 */
8885 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8886 free_netdev(ifp->net);
8887 } else {
8888 netif_tx_disable(ifp->net);
8889
8890#if defined(SET_RPS_CPUS)
8891 custom_rps_map_clear(ifp->net->_rx);
8892#endif /* SET_RPS_CPUS */
8893#if (defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL))
8894#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
8895 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
8896#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
8897#endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
8898 if (need_rtnl_lock)
8899 unregister_netdev(ifp->net);
8900 else
8901 unregister_netdevice(ifp->net);
8902 }
8903 ifp->net = NULL;
8904 DHD_GENERAL_LOCK(dhdpub, flags);
8905 ifp->del_in_progress = false;
8906 DHD_GENERAL_UNLOCK(dhdpub, flags);
8907 }
8908 dhd_cleanup_ifp(dhdpub, ifp);
8909 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8910
8911 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8912 ifp = NULL;
8913 }
8914
8915 return BCME_OK;
8916}
8917
8918#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
8919static struct net_device_ops dhd_ops_pri = {
8920 .ndo_open = dhd_pri_open,
8921 .ndo_stop = dhd_pri_stop,
8922 .ndo_get_stats = dhd_get_stats,
8923#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8924 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8925 .ndo_start_xmit = dhd_start_xmit_wrapper,
8926#else
8927 .ndo_do_ioctl = dhd_ioctl_entry,
8928 .ndo_start_xmit = dhd_start_xmit,
8929#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8930 .ndo_set_mac_address = dhd_set_mac_address,
8931#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8932 .ndo_set_rx_mode = dhd_set_multicast_list,
8933#else
8934 .ndo_set_multicast_list = dhd_set_multicast_list,
8935#endif // endif
8936};
8937
8938static struct net_device_ops dhd_ops_virt = {
8939#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8940 .ndo_open = dhd_static_if_open,
8941 .ndo_stop = dhd_static_if_stop,
8942#endif // endif
8943 .ndo_get_stats = dhd_get_stats,
8944#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8945 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8946 .ndo_start_xmit = dhd_start_xmit_wrapper,
8947#else
8948 .ndo_do_ioctl = dhd_ioctl_entry,
8949 .ndo_start_xmit = dhd_start_xmit,
8950#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8951 .ndo_set_mac_address = dhd_set_mac_address,
8952#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8953 .ndo_set_rx_mode = dhd_set_multicast_list,
8954#else
8955 .ndo_set_multicast_list = dhd_set_multicast_list,
8956#endif // endif
8957};
8958#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
8959
8960int
8961dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
8962 unsigned long buflen)
8963{
8964 loff_t wr_posn = *posn;
8965
8966 if (!fp || !buf || buflen == 0)
8967 return -1;
8968
8969 if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
8970 return -1;
8971
8972 *posn = wr_posn;
8973 return 0;
8974}
8975
8976#ifdef SHOW_LOGTRACE
8977int
8978dhd_os_read_file(void *file, char *buf, uint32 size)
8979{
8980 struct file *filep = (struct file *)file;
8981
8982 if (!file || !buf)
8983 return -1;
8984
8985 return vfs_read(filep, buf, size, &filep->f_pos);
8986}
8987
8988int
8989dhd_os_seek_file(void *file, int64 offset)
8990{
8991 struct file *filep = (struct file *)file;
8992 if (!file)
8993 return -1;
8994
8995 /* offset can be -ve */
8996 filep->f_pos = filep->f_pos + offset;
8997
8998 return 0;
8999}
9000
9001static int
9002dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
9003{
9004 struct file *filep = NULL;
9005 struct kstat stat;
9006 mm_segment_t fs;
9007 char *raw_fmts = NULL;
9008 int logstrs_size = 0;
9009 int error = 0;
9010
9011 fs = get_fs();
9012 set_fs(KERNEL_DS);
9013
9014 filep = filp_open(logstrs_path, O_RDONLY, 0);
9015
9016 if (IS_ERR(filep)) {
9017 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
9018 goto fail;
9019 }
9020 error = vfs_stat(logstrs_path, &stat);
9021 if (error) {
9022 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
9023 goto fail;
9024 }
9025 logstrs_size = (int) stat.size;
9026
9027 if (logstrs_size == 0) {
9028 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9029 goto fail1;
9030 }
9031
9032 raw_fmts = MALLOC(osh, logstrs_size);
9033 if (raw_fmts == NULL) {
9034 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
9035 goto fail;
9036 }
9037
9038 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
9039 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
9040 goto fail;
9041 }
9042
9043 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
9044 == BCME_OK) {
9045 filp_close(filep, NULL);
9046 set_fs(fs);
9047 return BCME_OK;
9048 }
9049
9050fail:
9051 if (raw_fmts) {
9052 MFREE(osh, raw_fmts, logstrs_size);
9053 raw_fmts = NULL;
9054 }
9055
9056fail1:
9057 if (!IS_ERR(filep))
9058 filp_close(filep, NULL);
9059
9060 set_fs(fs);
9061 temp->fmts = NULL;
9062 return BCME_ERROR;
9063}
9064
9065static int
9066dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
9067 uint32 *rodata_end)
9068{
9069 struct file *filep = NULL;
9070 mm_segment_t fs;
9071 int err = BCME_ERROR;
9072
9073 if (fname == NULL) {
9074 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
9075 return BCME_ERROR;
9076 }
9077
9078 fs = get_fs();
9079 set_fs(KERNEL_DS);
9080
9081 filep = filp_open(fname, O_RDONLY, 0);
9082 if (IS_ERR(filep)) {
9083 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
9084 goto fail;
9085 }
9086
9087 if ((err = dhd_parse_map_file(osh, filep, ramstart,
9088 rodata_start, rodata_end)) < 0)
9089 goto fail;
9090
9091fail:
9092 if (!IS_ERR(filep))
9093 filp_close(filep, NULL);
9094
9095 set_fs(fs);
9096
9097 return err;
9098}
9099
9100static int
9101dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
9102{
9103 struct file *filep = NULL;
9104 mm_segment_t fs;
9105 char *raw_fmts = NULL;
9106 uint32 logstrs_size = 0;
9107 int error = 0;
9108 uint32 ramstart = 0;
9109 uint32 rodata_start = 0;
9110 uint32 rodata_end = 0;
9111 uint32 logfilebase = 0;
9112
9113 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
9114 if (error != BCME_OK) {
9115 DHD_ERROR(("readmap Error!! \n"));
9116 /* don't do event log parsing in actual case */
9117 if (strstr(str_file, ram_file_str) != NULL) {
9118 temp->raw_sstr = NULL;
9119 } else if (strstr(str_file, rom_file_str) != NULL) {
9120 temp->rom_raw_sstr = NULL;
9121 }
9122 return error;
9123 }
9124 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9125 ramstart, rodata_start, rodata_end));
9126
9127 fs = get_fs();
9128 set_fs(KERNEL_DS);
9129
9130 filep = filp_open(str_file, O_RDONLY, 0);
9131 if (IS_ERR(filep)) {
9132 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
9133 goto fail;
9134 }
9135
9136 if (TRUE) {
9137 /* Full file size is huge. Just read required part */
9138 logstrs_size = rodata_end - rodata_start;
9139 logfilebase = rodata_start - ramstart;
9140 }
9141
9142 if (logstrs_size == 0) {
9143 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9144 goto fail1;
9145 }
9146
9147 raw_fmts = MALLOC(osh, logstrs_size);
9148 if (raw_fmts == NULL) {
9149 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9150 goto fail;
9151 }
9152
9153 if (TRUE) {
9154 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
9155 if (error < 0) {
9156 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
9157 goto fail;
9158 }
9159 }
9160
9161 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
9162 if (error != logstrs_size) {
9163 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
9164 goto fail;
9165 }
9166
9167 if (strstr(str_file, ram_file_str) != NULL) {
9168 temp->raw_sstr = raw_fmts;
9169 temp->raw_sstr_size = logstrs_size;
9170 temp->rodata_start = rodata_start;
9171 temp->rodata_end = rodata_end;
9172 } else if (strstr(str_file, rom_file_str) != NULL) {
9173 temp->rom_raw_sstr = raw_fmts;
9174 temp->rom_raw_sstr_size = logstrs_size;
9175 temp->rom_rodata_start = rodata_start;
9176 temp->rom_rodata_end = rodata_end;
9177 }
9178
9179 filp_close(filep, NULL);
9180 set_fs(fs);
9181
9182 return BCME_OK;
9183
9184fail:
9185 if (raw_fmts) {
9186 MFREE(osh, raw_fmts, logstrs_size);
9187 raw_fmts = NULL;
9188 }
9189
9190fail1:
9191 if (!IS_ERR(filep))
9192 filp_close(filep, NULL);
9193
9194 set_fs(fs);
9195
9196 if (strstr(str_file, ram_file_str) != NULL) {
9197 temp->raw_sstr = NULL;
9198 } else if (strstr(str_file, rom_file_str) != NULL) {
9199 temp->rom_raw_sstr = NULL;
9200 }
9201
9202 return error;
9203} /* dhd_init_static_strs_array */
9204
9205static int
9206dhd_trace_open_proc(struct inode *inode, struct file *file)
9207{
9208 return single_open(file, 0, NULL);
9209}
9210
9211ssize_t
9212dhd_trace_read_proc(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
9213{
9214 trace_buf_info_t *trace_buf_info;
9215 int ret = BCME_ERROR;
9216
9217 ASSERT(g_dhd_pub);
9218 mutex_lock(&g_dhd_pub->dhd_trace_lock);
9219 trace_buf_info = (trace_buf_info_t *)MALLOC(g_dhd_pub->osh,
9220 sizeof(trace_buf_info_t));
9221 if (trace_buf_info) {
9222 dhd_get_read_buf_ptr(g_dhd_pub, trace_buf_info);
9223 if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
9224 {
9225 ret = -EFAULT;
9226 goto exit;
9227 }
9228 if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
9229 ret = BUF_NOT_AVAILABLE;
9230 else
9231 ret = trace_buf_info->size;
9232 } else
9233 DHD_ERROR(("Memory allocation Failed\n"));
9234
9235exit:
9236 if (trace_buf_info) {
9237 MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
9238 }
9239 mutex_unlock(&g_dhd_pub->dhd_trace_lock);
9240 return ret;
9241}
9242#endif /* SHOW_LOGTRACE */
9243
9244#ifdef DHD_ERPOM
9245uint enable_erpom = 0;
9246module_param(enable_erpom, int, 0);
9247
9248int
9249dhd_wlan_power_off_handler(void *handler, unsigned char reason)
9250{
9251 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
9252 bool dongle_isolation = dhdp->dongle_isolation;
9253
9254 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
9255
9256 if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
9257#if defined(DHD_FW_COREDUMP)
9258 /* save core dump to a file */
9259 if (dhdp->memdump_enabled) {
9260#ifdef DHD_SSSR_DUMP
9261 if (dhdp->sssr_inited) {
9262 dhdp->info->no_wq_sssrdump = TRUE;
9263 dhd_bus_sssr_dump(dhdp);
9264 dhdp->info->no_wq_sssrdump = FALSE;
9265 }
9266#endif /* DHD_SSSR_DUMP */
9267 dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
9268 dhd_bus_mem_dump(dhdp);
9269 }
9270#endif /* DHD_FW_COREDUMP */
9271 }
9272
9273 /* pause data on all the interfaces */
9274 dhd_bus_stop_queue(dhdp->bus);
9275
9276 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9277 dhdp->dongle_isolation = TRUE;
9278 dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
9279 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
9280 return 0;
9281}
9282
9283int
9284dhd_wlan_power_on_handler(void *handler, unsigned char reason)
9285{
9286 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
9287 bool dongle_isolation = dhdp->dongle_isolation;
9288
9289 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
9290 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9291 dhdp->dongle_isolation = TRUE;
9292 dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
9293 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
9294 /* resume data on all the interfaces */
9295 dhd_bus_start_queue(dhdp->bus);
9296 return 0;
9297
9298}
9299
9300#endif /* DHD_ERPOM */
9301/** Called once for each hardware (dongle) instance that this DHD manages */
9302dhd_pub_t *
9303dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
9304{
9305 dhd_info_t *dhd = NULL;
9306 struct net_device *net = NULL;
9307 char if_name[IFNAMSIZ] = {'\0'};
9308 uint32 bus_type = -1;
9309 uint32 bus_num = -1;
9310 uint32 slot_num = -1;
9311#ifdef SHOW_LOGTRACE
9312 int ret;
9313#endif /* SHOW_LOGTRACE */
9314#ifdef DHD_ERPOM
9315 pom_func_handler_t *pom_handler;
9316#endif /* DHD_ERPOM */
9317 wifi_adapter_info_t *adapter = NULL;
9318
9319 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
9320 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9321
9322#ifdef PCIE_FULL_DONGLE
9323 ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
9324 ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
9325#endif /* PCIE_FULL_DONGLE */
9326
9327 /* will implement get_ids for DBUS later */
9328#if defined(BCMSDIO)
9329 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
9330#endif // endif
9331 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
9332
9333 /* Allocate primary dhd_info */
9334 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
9335 if (dhd == NULL) {
9336 dhd = MALLOC(osh, sizeof(dhd_info_t));
9337 if (dhd == NULL) {
9338 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
9339 goto dhd_null_flag;
9340 }
9341 }
9342 memset(dhd, 0, sizeof(dhd_info_t));
9343 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
9344
9345 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
9346
9347 dhd->pub.osh = osh;
9348#ifdef DUMP_IOCTL_IOV_LIST
9349 dll_init(&(dhd->pub.dump_iovlist_head));
9350#endif /* DUMP_IOCTL_IOV_LIST */
9351 dhd->adapter = adapter;
9352#ifdef BT_OVER_SDIO
9353 dhd->pub.is_bt_recovery_required = FALSE;
9354 mutex_init(&dhd->bus_user_lock);
9355#endif /* BT_OVER_SDIO */
9356
9357#ifdef DHD_DEBUG
9358 dll_init(&(dhd->pub.mw_list_head));
9359#endif /* DHD_DEBUG */
9360
9361#ifdef GET_CUSTOM_MAC_ENABLE
9362 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
9363#endif /* GET_CUSTOM_MAC_ENABLE */
9364#ifdef CUSTOM_FORCE_NODFS_FLAG
9365 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
9366 dhd->pub.force_country_change = TRUE;
9367#endif /* CUSTOM_FORCE_NODFS_FLAG */
9368#ifdef CUSTOM_COUNTRY_CODE
9369 get_customized_country_code(dhd->adapter,
9370 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
9371 dhd->pub.dhd_cflags);
9372#endif /* CUSTOM_COUNTRY_CODE */
9373 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
9374 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
9375#ifdef DHD_WET
9376 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
9377#endif /* DHD_WET */
9378 /* Initialize thread based operation and lock */
9379 sema_init(&dhd->sdsem, 1);
9380
9381 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9382 * This is indeed a hack but we have to make it work properly before we have a better
9383 * solution
9384 */
9385 dhd_update_fw_nv_path(dhd);
9386 dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
9387
9388 /* Link to info module */
9389 dhd->pub.info = dhd;
9390
9391 /* Link to bus module */
9392 dhd->pub.bus = bus;
9393 dhd->pub.hdrlen = bus_hdrlen;
9394
9395 /* Set network interface name if it was provided as module parameter */
9396 if (iface_name[0]) {
9397 int len;
9398 char ch;
9399 strncpy(if_name, iface_name, IFNAMSIZ);
9400 if_name[IFNAMSIZ - 1] = 0;
9401 len = strlen(if_name);
9402 ch = if_name[len - 1];
9403 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
9404 strncat(if_name, "%d", 2);
9405 }
9406
9407 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9408 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
9409 if (net == NULL) {
9410 goto fail;
9411 }
9412 mutex_init(&dhd->pub.ndev_op_sync);
9413#if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
9414 /* Init ARGOS notifier data */
9415 argos_wifi.notifier_call = NULL;
9416 argos_p2p.notifier_call = NULL;
9417#endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
9418
9419 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
9420#ifdef DHD_L2_FILTER
9421 /* initialize the l2_filter_cnt */
9422 dhd->pub.l2_filter_cnt = 0;
9423#endif // endif
9424#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9425 net->open = NULL;
9426#else
9427 net->netdev_ops = NULL;
9428#endif // endif
9429
9430 mutex_init(&dhd->dhd_iovar_mutex);
9431 sema_init(&dhd->proto_sem, 1);
9432#ifdef DHD_ULP
9433 if (!(dhd_ulp_init(osh, &dhd->pub)))
9434 goto fail;
9435#endif /* DHD_ULP */
9436
9437#if defined(DHD_HANG_SEND_UP_TEST)
9438 dhd->pub.req_hang_type = 0;
9439#endif /* DHD_HANG_SEND_UP_TEST */
9440
9441#ifdef PROP_TXSTATUS
9442 spin_lock_init(&dhd->wlfc_spinlock);
9443
9444 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
9445 dhd->pub.plat_init = dhd_wlfc_plat_init;
9446 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
9447
9448#ifdef DHD_WLFC_THREAD
9449 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
9450 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
9451 if (IS_ERR(dhd->pub.wlfc_thread)) {
9452 DHD_ERROR(("create wlfc thread failed\n"));
9453 goto fail;
9454 } else {
9455 wake_up_process(dhd->pub.wlfc_thread);
9456 }
9457#endif /* DHD_WLFC_THREAD */
9458#endif /* PROP_TXSTATUS */
9459
9460 /* Initialize other structure content */
9461 init_waitqueue_head(&dhd->ioctl_resp_wait);
9462 init_waitqueue_head(&dhd->d3ack_wait);
9463 init_waitqueue_head(&dhd->ctrl_wait);
9464 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
9465 init_waitqueue_head(&dhd->dmaxfer_wait);
9466 init_waitqueue_head(&dhd->pub.tx_completion_wait);
9467 dhd->pub.dhd_bus_busy_state = 0;
9468
9469 /* Initialize the spinlocks */
9470 spin_lock_init(&dhd->sdlock);
9471 spin_lock_init(&dhd->txqlock);
9472 spin_lock_init(&dhd->dhd_lock);
9473 spin_lock_init(&dhd->rxf_lock);
9474#ifdef WLTDLS
9475 spin_lock_init(&dhd->pub.tdls_lock);
9476#endif /* WLTDLS */
9477#if defined(RXFRAME_THREAD)
9478 dhd->rxthread_enabled = TRUE;
9479#endif /* defined(RXFRAME_THREAD) */
9480
9481#ifdef DHDTCPACK_SUPPRESS
9482 spin_lock_init(&dhd->tcpack_lock);
9483#endif /* DHDTCPACK_SUPPRESS */
9484
9485 /* Initialize Wakelock stuff */
9486 spin_lock_init(&dhd->wakelock_spinlock);
9487 spin_lock_init(&dhd->wakelock_evt_spinlock);
9488 DHD_OS_WAKE_LOCK_INIT(dhd);
9489 dhd->wakelock_counter = 0;
9490 /* wakelocks prevent a system from going into a low power state */
9491#ifdef CONFIG_HAS_WAKELOCK
9492 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
9493#endif /* CONFIG_HAS_WAKELOCK */
9494
9495#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9496 mutex_init(&dhd->dhd_net_if_mutex);
9497 mutex_init(&dhd->dhd_suspend_mutex);
9498#if defined(PKT_FILTER_SUPPORT) && defined(APF)
9499 mutex_init(&dhd->dhd_apf_mutex);
9500#endif /* PKT_FILTER_SUPPORT && APF */
9501#endif // endif
9502 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
9503
9504 /* Attach and link in the protocol */
9505 if (dhd_prot_attach(&dhd->pub) != 0) {
9506 DHD_ERROR(("dhd_prot_attach failed\n"));
9507 goto fail;
9508 }
9509 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
9510
9511#ifdef WL_CFG80211
9512 spin_lock_init(&dhd->pub.up_lock);
9513 /* Attach and link in the cfg80211 */
9514 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
9515 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9516 goto fail;
9517 }
9518
9519 dhd_monitor_init(&dhd->pub);
9520 dhd_state |= DHD_ATTACH_STATE_CFG80211;
9521#endif // endif
9522
9523#if defined(WL_WIRELESS_EXT)
9524 /* Attach and link in the iw */
9525 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
9526 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
9527 DHD_ERROR(("wl_iw_attach failed\n"));
9528 goto fail;
9529 }
9530 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
9531 }
9532#endif /* defined(WL_WIRELESS_EXT) */
9533
9534#ifdef SHOW_LOGTRACE
9535 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
9536 if (ret == BCME_OK) {
9537 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
9538 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
9539 rom_map_file_path);
9540 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
9541 }
9542#endif /* SHOW_LOGTRACE */
9543
9544#ifdef DEBUGABILITY
9545 /* attach debug if support */
9546 if (dhd_os_dbg_attach(&dhd->pub)) {
9547 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
9548 goto fail;
9549 }
9550
9551#if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
9552 /* enable verbose ring to support dump_trace_buf */
9553 dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
9554#endif /* SHOW_LOGTRACE */
9555
9556#ifdef DBG_PKT_MON
9557 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
9558#ifdef DBG_PKT_MON_INIT_DEFAULT
9559 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
9560#endif /* DBG_PKT_MON_INIT_DEFAULT */
9561#endif /* DBG_PKT_MON */
9562#endif /* DEBUGABILITY */
9563
9564#ifdef DHD_LOG_DUMP
9565 dhd_log_dump_init(&dhd->pub);
9566#endif /* DHD_LOG_DUMP */
9567
9568#ifdef DHD_PKT_LOGGING
9569 dhd_os_attach_pktlog(&dhd->pub);
9570#endif /* DHD_PKT_LOGGING */
9571#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9572 dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
9573 if (dhd->pub.hang_info == NULL) {
9574 DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
9575 }
9576#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
9577 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
9578 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
9579 goto fail;
9580 }
9581
9582#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9583 dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
9584 if (!dhd->tx_wq) {
9585 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
9586 goto fail;
9587 }
9588 dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
9589 if (!dhd->rx_wq) {
9590 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
9591 destroy_workqueue(dhd->tx_wq);
9592 dhd->tx_wq = NULL;
9593 goto fail;
9594 }
9595#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9596
9597 /* Set up the watchdog timer */
9598 init_timer(&dhd->timer);
9599 dhd->timer.data = (ulong)dhd;
9600 dhd->timer.function = dhd_watchdog;
9601 dhd->default_wd_interval = dhd_watchdog_ms;
9602
9603 if (dhd_watchdog_prio >= 0) {
9604 /* Initialize watchdog thread */
9605 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
9606 if (dhd->thr_wdt_ctl.thr_pid < 0) {
9607 goto fail;
9608 }
9609
9610 } else {
9611 dhd->thr_wdt_ctl.thr_pid = -1;
9612 }
9613
9614#ifdef DHD_PCIE_RUNTIMEPM
9615 /* Setup up the runtime PM Idlecount timer */
9616 init_timer(&dhd->rpm_timer);
9617 dhd->rpm_timer.data = (ulong)dhd;
9618 dhd->rpm_timer.function = dhd_runtimepm;
9619 dhd->rpm_timer_valid = FALSE;
9620
9621 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
9622 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
9623 if (dhd->thr_rpm_ctl.thr_pid < 0) {
9624 goto fail;
9625 }
9626#endif /* DHD_PCIE_RUNTIMEPM */
9627
9628#ifdef SHOW_LOGTRACE
9629 skb_queue_head_init(&dhd->evt_trace_queue);
9630 if (proc_create("dhd_trace", S_IRUSR, NULL, &proc_file_fops) == NULL)
9631 DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
9632 mutex_init(&dhd->pub.dhd_trace_lock);
9633#endif /* SHOW_LOGTRACE */
9634
9635 /* Set up the bottom half handler */
9636 if (dhd_dpc_prio >= 0) {
9637 /* Initialize DPC thread */
9638 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
9639 if (dhd->thr_dpc_ctl.thr_pid < 0) {
9640 goto fail;
9641 }
9642 } else {
9643#if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_DPC_TASKLET_CTL) && \
9644 !defined(DHD_LB_IRQSET)
9645 if (!zalloc_cpumask_var(&dhd->pub.default_cpu_mask, GFP_KERNEL)) {
9646 DHD_ERROR(("dpc tasklet, zalloc_cpumask_var error\n"));
9647 dhd->pub.affinity_isdpc = FALSE;
9648 } else {
9649 if (!zalloc_cpumask_var(&dhd->pub.dpc_affinity_cpu_mask, GFP_KERNEL)) {
9650 DHD_ERROR(("dpc thread, dpc_affinity_cpu_mask error\n"));
9651 free_cpumask_var(dhd->pub.default_cpu_mask);
9652 dhd->pub.affinity_isdpc = FALSE;
9653 } else {
9654 unsigned int irq = -1;
9655#ifdef BCMPCIE
9656 if (dhdpcie_get_pcieirq(bus, &irq)) {
9657 DHD_ERROR(("%s : Can't get interrupt number\n",
9658 __FUNCTION__));
9659 goto fail;
9660 }
9661#endif /* BCMPCIE */
9662#ifdef BCMSDIO
9663 irq = adapter->irq_num;
9664#endif /* BCMSDIO */
9665
9666 cpumask_copy(dhd->pub.default_cpu_mask, &hmp_slow_cpu_mask);
9667 cpumask_or(dhd->pub.dpc_affinity_cpu_mask,
9668 dhd->pub.dpc_affinity_cpu_mask,
9669 cpumask_of(TASKLET_CPUCORE));
9670
9671 set_irq_cpucore(irq, dhd->pub.default_cpu_mask,
9672 dhd->pub.dpc_affinity_cpu_mask);
9673 dhd->pub.affinity_isdpc = TRUE;
9674 }
9675 }
9676#endif /* ARGOS_CPU_SCHEDULER && ARGOS_DPC_TASKLET_CTL && !DHD_LB_IRQSET */
9677 /* use tasklet for dpc */
9678 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
9679 dhd->thr_dpc_ctl.thr_pid = -1;
9680 }
9681
9682 if (dhd->rxthread_enabled) {
9683 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
9684 /* Initialize RXF thread */
9685 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
9686 if (dhd->thr_rxf_ctl.thr_pid < 0) {
9687 goto fail;
9688 }
9689 }
9690
9691 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
9692
9693#if defined(CONFIG_PM_SLEEP)
9694 if (!dhd_pm_notifier_registered) {
9695 dhd_pm_notifier_registered = TRUE;
9696 dhd->pm_notifier.notifier_call = dhd_pm_callback;
9697 dhd->pm_notifier.priority = 10;
9698 register_pm_notifier(&dhd->pm_notifier);
9699 }
9700
9701#endif /* CONFIG_PM_SLEEP */
9702
9703#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9704 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
9705 dhd->early_suspend.suspend = dhd_early_suspend;
9706 dhd->early_suspend.resume = dhd_late_resume;
9707 register_early_suspend(&dhd->early_suspend);
9708 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
9709#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9710
9711#ifdef ARP_OFFLOAD_SUPPORT
9712 dhd->pend_ipaddr = 0;
9713 if (!dhd_inetaddr_notifier_registered) {
9714 dhd_inetaddr_notifier_registered = TRUE;
9715 register_inetaddr_notifier(&dhd_inetaddr_notifier);
9716 }
9717#endif /* ARP_OFFLOAD_SUPPORT */
9718
9719#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9720 if (!dhd_inet6addr_notifier_registered) {
9721 dhd_inet6addr_notifier_registered = TRUE;
9722 register_inet6addr_notifier(&dhd_inet6addr_notifier);
9723 }
9724#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9725 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
9726#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9727 INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
9728#endif // endif
9729#ifdef DEBUG_CPU_FREQ
9730 dhd->new_freq = alloc_percpu(int);
9731 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
9732 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9733#endif // endif
9734#ifdef DHDTCPACK_SUPPRESS
9735#ifdef BCMSDIO
9736 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
9737#elif defined(BCMPCIE)
9738 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
9739#else
9740 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9741#endif /* BCMSDIO */
9742#endif /* DHDTCPACK_SUPPRESS */
9743
9744#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9745#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9746
9747#ifdef DHD_DEBUG_PAGEALLOC
9748 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
9749#endif /* DHD_DEBUG_PAGEALLOC */
9750
9751#if defined(DHD_LB)
9752
9753 dhd_lb_set_default_cpus(dhd);
9754
9755 /* Initialize the CPU Masks */
9756 if (dhd_cpumasks_init(dhd) == 0) {
9757 /* Now we have the current CPU maps, run through candidacy */
9758 dhd_select_cpu_candidacy(dhd);
9759 /*
9760 * If we are able to initialize CPU masks, lets register to the
9761 * CPU Hotplug framework to change the CPU for each job dynamically
9762 * using candidacy algorithm.
9763 */
9764 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
9765 register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
9766 } else {
9767 /*
9768 * We are unable to initialize CPU masks, so candidacy algorithm
9769 * won't run, but still Load Balancing will be honoured based
9770 * on the CPUs allocated for a given job statically during init
9771 */
9772 dhd->cpu_notifier.notifier_call = NULL;
9773 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9774 __FUNCTION__));
9775 }
9776
9777#ifdef DHD_LB_TXP
9778#ifdef DHD_LB_TXP_DEFAULT_ENAB
9779 /* Trun ON the feature by default */
9780 atomic_set(&dhd->lb_txp_active, 1);
9781#else
9782 /* Trun OFF the feature by default */
9783 atomic_set(&dhd->lb_txp_active, 0);
9784#endif /* DHD_LB_TXP_DEFAULT_ENAB */
9785#endif /* DHD_LB_TXP */
9786
9787 DHD_LB_STATS_INIT(&dhd->pub);
9788
9789 /* Initialize the Load Balancing Tasklets and Napi object */
9790#if defined(DHD_LB_TXC)
9791 tasklet_init(&dhd->tx_compl_tasklet,
9792 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
9793 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
9794 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
9795#endif /* DHD_LB_TXC */
9796
9797#if defined(DHD_LB_RXC)
9798 tasklet_init(&dhd->rx_compl_tasklet,
9799 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
9800 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
9801 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
9802#endif /* DHD_LB_RXC */
9803
9804#if defined(DHD_LB_RXP)
9805 __skb_queue_head_init(&dhd->rx_pend_queue);
9806 skb_queue_head_init(&dhd->rx_napi_queue);
9807 /* Initialize the work that dispatches NAPI job to a given core */
9808 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
9809 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
9810#endif /* DHD_LB_RXP */
9811
9812#if defined(DHD_LB_TXP)
9813 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
9814 skb_queue_head_init(&dhd->tx_pend_queue);
9815 /* Initialize the work that dispatches TX job to a given core */
9816 tasklet_init(&dhd->tx_tasklet,
9817 dhd_lb_tx_handler, (ulong)(dhd));
9818 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
9819#endif /* DHD_LB_TXP */
9820
9821 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
9822#endif /* DHD_LB */
9823
9824#if defined(BCMPCIE)
9825 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
9826 if (dhd->pub.extended_trap_data == NULL) {
9827 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
9828 }
9829#endif /* BCMPCIE && ETD */
9830
9831#ifdef SHOW_LOGTRACE
9832 INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
9833#endif /* SHOW_LOGTRACE */
9834
9835 DHD_INFO(("%s: sssr mempool init\n", __FUNCTION__));
9836 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
9837
9838 (void)dhd_sysfs_init(dhd);
9839
9840#ifdef WL_NATOE
9841 /* Open Netlink socket for NF_CONNTRACK notifications */
9842 dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
9843 CT_ALL);
9844#endif /* WL_NATOE */
9845
9846 dhd_state |= DHD_ATTACH_STATE_DONE;
9847 dhd->dhd_state = dhd_state;
9848
9849 dhd_found++;
9850
9851 g_dhd_pub = &dhd->pub;
9852
9853#ifdef DHD_DUMP_MNGR
9854 dhd->pub.dump_file_manage =
9855 (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
9856 if (unlikely(!dhd->pub.dump_file_manage)) {
9857 DHD_ERROR(("%s(): could not allocate memory for - "
9858 "dhd_dump_file_manage_t\n", __FUNCTION__));
9859 }
9860#endif /* DHD_DUMP_MNGR */
9861#ifdef DHD_FW_COREDUMP
9862 /* Set memdump default values */
9863#ifdef CUSTOMER_HW4_DEBUG
9864 dhd->pub.memdump_enabled = DUMP_DISABLED;
9865#else
9866 dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
9867#endif /* CUSTOMER_HW4_DEBUG */
9868 /* Check the memdump capability */
9869 dhd_get_memdump_info(&dhd->pub);
9870#endif /* DHD_FW_COREDUMP */
9871
9872#ifdef DHD_ERPOM
9873 if (enable_erpom) {
9874 pom_handler = &dhd->pub.pom_wlan_handler;
9875 pom_handler->func_id = WLAN_FUNC_ID;
9876 pom_handler->handler = (void *)g_dhd_pub;
9877 pom_handler->power_off = dhd_wlan_power_off_handler;
9878 pom_handler->power_on = dhd_wlan_power_on_handler;
9879
9880 dhd->pub.pom_func_register = NULL;
9881 dhd->pub.pom_func_deregister = NULL;
9882 dhd->pub.pom_toggle_reg_on = NULL;
9883
9884 dhd->pub.pom_func_register = symbol_get(pom_func_register);
9885 dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
9886 dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
9887
9888 symbol_put(pom_func_register);
9889 symbol_put(pom_func_deregister);
9890 symbol_put(pom_toggle_reg_on);
9891
9892 if (!dhd->pub.pom_func_register ||
9893 !dhd->pub.pom_func_deregister ||
9894 !dhd->pub.pom_toggle_reg_on) {
9895 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
9896 "POM is not loaded\n", __FUNCTION__));
9897 ASSERT(0);
9898 goto fail;
9899 }
9900 dhd->pub.pom_func_register(pom_handler);
9901 dhd->pub.enable_erpom = TRUE;
9902
9903 }
9904#endif /* DHD_ERPOM */
9905 return &dhd->pub;
9906
9907fail:
9908 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
9909 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9910 __FUNCTION__, dhd_state, &dhd->pub));
9911 dhd->dhd_state = dhd_state;
9912 dhd_detach(&dhd->pub);
9913 dhd_free(&dhd->pub);
9914 }
9915
9916dhd_null_flag:
9917 return NULL;
9918}
9919
9920int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9921{
9922 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9923 return DHD_FLAG_HOSTAP_MODE;
9924 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9925 return DHD_FLAG_P2P_MODE;
9926 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9927 return DHD_FLAG_IBSS_MODE;
9928 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9929 return DHD_FLAG_MFG_MODE;
9930
9931 return DHD_FLAG_STA_MODE;
9932}
9933
9934int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9935{
9936 return dhd_get_fw_mode(dhdp->info);
9937}
9938
9939extern char * nvram_get(const char *name);
9940bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9941{
9942 int fw_len;
9943 int nv_len;
9944 const char *fw = NULL;
9945 const char *nv = NULL;
9946#ifdef DHD_UCODE_DOWNLOAD
9947 int uc_len;
9948 const char *uc = NULL;
9949#endif /* DHD_UCODE_DOWNLOAD */
9950 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9951 int fw_path_len = sizeof(dhdinfo->fw_path);
9952 int nv_path_len = sizeof(dhdinfo->nv_path);
9953
9954 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9955 * The path from adapter info is used for initialization only (as it won't change).
9956 *
9957 * The firmware_path/nvram_path module parameter may be changed by the system at run
9958 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9959 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9960 * module parameter after it is copied. We won't update the path until the module parameter
9961 * is changed again (first character is not '\0')
9962 */
9963
9964 /* set default firmware and nvram path for built-in type driver */
9965 if (!dhd_download_fw_on_driverload) {
9966#ifdef CONFIG_BCMDHD_FW_PATH
9967 fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
9968#endif /* CONFIG_BCMDHD_FW_PATH */
9969#ifdef CONFIG_BCMDHD_NVRAM_PATH
9970 nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
9971#endif /* CONFIG_BCMDHD_NVRAM_PATH */
9972 }
9973
9974 /* check if we need to initialize the path */
9975 if (dhdinfo->fw_path[0] == '\0') {
9976 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9977 fw = adapter->fw_path;
9978 }
9979 if (dhdinfo->nv_path[0] == '\0') {
9980 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9981 nv = adapter->nv_path;
9982 }
9983
9984 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9985 *
9986 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9987 */
9988 if (firmware_path[0] != '\0')
9989 fw = firmware_path;
9990
9991 if (nvram_path[0] != '\0')
9992 nv = nvram_path;
9993
9994#ifdef DHD_UCODE_DOWNLOAD
9995 if (ucode_path[0] != '\0')
9996 uc = ucode_path;
9997#endif /* DHD_UCODE_DOWNLOAD */
9998
9999 if (fw && fw[0] != '\0') {
10000 fw_len = strlen(fw);
10001 if (fw_len >= fw_path_len) {
10002 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
10003 return FALSE;
10004 }
10005 strncpy(dhdinfo->fw_path, fw, fw_path_len);
10006 if (dhdinfo->fw_path[fw_len-1] == '\n')
10007 dhdinfo->fw_path[fw_len-1] = '\0';
10008 }
10009 if (nv && nv[0] != '\0') {
10010 nv_len = strlen(nv);
10011 if (nv_len >= nv_path_len) {
10012 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
10013 return FALSE;
10014 }
10015 memset(dhdinfo->nv_path, 0, nv_path_len);
10016 strncpy(dhdinfo->nv_path, nv, nv_path_len);
10017 dhdinfo->nv_path[nv_len] = '\0';
10018#ifdef DHD_USE_SINGLE_NVRAM_FILE
10019 /* Remove "_net" or "_mfg" tag from current nvram path */
10020 {
10021 char *nvram_tag = "nvram_";
10022 char *ext_tag = ".txt";
10023 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
10024 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
10025 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
10026 if (valid_buf) {
10027 char *sp = sp_nvram + strlen(nvram_tag) - 1;
10028 uint32 padding_size = (uint32)(dhdinfo->nv_path +
10029 nv_path_len - sp);
10030 memset(sp, 0, padding_size);
10031 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
10032 nv_len = strlen(dhdinfo->nv_path);
10033 DHD_INFO(("%s: new nvram path = %s\n",
10034 __FUNCTION__, dhdinfo->nv_path));
10035 } else if (sp_nvram) {
10036 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10037 __FUNCTION__));
10038 return FALSE;
10039 } else {
10040 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10041 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
10042 }
10043 }
10044#endif /* DHD_USE_SINGLE_NVRAM_FILE */
10045 if (dhdinfo->nv_path[nv_len-1] == '\n')
10046 dhdinfo->nv_path[nv_len-1] = '\0';
10047 }
10048#ifdef DHD_UCODE_DOWNLOAD
10049 if (uc && uc[0] != '\0') {
10050 uc_len = strlen(uc);
10051 if (uc_len >= sizeof(dhdinfo->uc_path)) {
10052 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10053 return FALSE;
10054 }
10055 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
10056 if (dhdinfo->uc_path[uc_len-1] == '\n')
10057 dhdinfo->uc_path[uc_len-1] = '\0';
10058 }
10059#endif /* DHD_UCODE_DOWNLOAD */
10060
10061 /* clear the path in module parameter */
10062 if (dhd_download_fw_on_driverload) {
10063 firmware_path[0] = '\0';
10064 nvram_path[0] = '\0';
10065 }
10066#ifdef DHD_UCODE_DOWNLOAD
10067 ucode_path[0] = '\0';
10068 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
10069#endif /* DHD_UCODE_DOWNLOAD */
10070
10071 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10072 if (dhdinfo->fw_path[0] == '\0') {
10073 DHD_ERROR(("firmware path not found\n"));
10074 return FALSE;
10075 }
10076 if (dhdinfo->nv_path[0] == '\0') {
10077 DHD_ERROR(("nvram path not found\n"));
10078 return FALSE;
10079 }
10080
10081 return TRUE;
10082}
10083
10084#if defined(BT_OVER_SDIO)
10085extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
10086{
10087 int fw_len;
10088 const char *fw = NULL;
10089 wifi_adapter_info_t *adapter = dhdinfo->adapter;
10090
10091 /* Update bt firmware path. The path may be from adapter info or module parameter
10092 * The path from adapter info is used for initialization only (as it won't change).
10093 *
10094 * The btfw_path module parameter may be changed by the system at run
10095 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10096 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10097 * module parameter after it is copied. We won't update the path until the module parameter
10098 * is changed again (first character is not '\0')
10099 */
10100
10101 /* set default firmware and nvram path for built-in type driver */
10102 if (!dhd_download_fw_on_driverload) {
10103#ifdef CONFIG_BCMDHD_BTFW_PATH
10104 fw = CONFIG_BCMDHD_BTFW_PATH;
10105#endif /* CONFIG_BCMDHD_FW_PATH */
10106 }
10107
10108 /* check if we need to initialize the path */
10109 if (dhdinfo->btfw_path[0] == '\0') {
10110 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
10111 fw = adapter->btfw_path;
10112 }
10113
10114 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10115 */
10116 if (btfw_path[0] != '\0')
10117 fw = btfw_path;
10118
10119 if (fw && fw[0] != '\0') {
10120 fw_len = strlen(fw);
10121 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
10122 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10123 return FALSE;
10124 }
10125 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
10126 if (dhdinfo->btfw_path[fw_len-1] == '\n')
10127 dhdinfo->btfw_path[fw_len-1] = '\0';
10128 }
10129
10130 /* clear the path in module parameter */
10131 btfw_path[0] = '\0';
10132
10133 if (dhdinfo->btfw_path[0] == '\0') {
10134 DHD_ERROR(("bt firmware path not found\n"));
10135 return FALSE;
10136 }
10137
10138 return TRUE;
10139}
10140#endif /* defined (BT_OVER_SDIO) */
10141
10142#ifdef CUSTOMER_HW4_DEBUG
10143bool dhd_validate_chipid(dhd_pub_t *dhdp)
10144{
10145 uint chipid = dhd_bus_chip_id(dhdp);
10146 uint config_chipid;
10147
10148#ifdef BCM4375_CHIP
10149 config_chipid = BCM4375_CHIP_ID;
10150#elif defined(BCM4361_CHIP)
10151 config_chipid = BCM4361_CHIP_ID;
10152#elif defined(BCM4359_CHIP)
10153 config_chipid = BCM4359_CHIP_ID;
10154#elif defined(BCM4358_CHIP)
10155 config_chipid = BCM4358_CHIP_ID;
10156#elif defined(BCM4354_CHIP)
10157 config_chipid = BCM4354_CHIP_ID;
10158#elif defined(BCM4339_CHIP)
10159 config_chipid = BCM4339_CHIP_ID;
10160#elif defined(BCM4335_CHIP)
10161 config_chipid = BCM4335_CHIP_ID;
10162#elif defined(BCM43430_CHIP)
10163 config_chipid = BCM43430_CHIP_ID;
10164#elif defined(BCM43018_CHIP)
10165 config_chipid = BCM43018_CHIP_ID;
10166#elif defined(BCM43455_CHIP) || defined(BCM43456_CHIP)
10167 config_chipid = BCM4345_CHIP_ID;
10168#elif defined(BCM43454_CHIP)
10169 config_chipid = BCM43454_CHIP_ID;
10170#elif defined(BCM43012_CHIP_)
10171 config_chipid = BCM43012_CHIP_ID;
10172#else
10173 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10174 " please add CONFIG_BCMXXXX into the Kernel and"
10175 " BCMXXXX_CHIP definition into the DHD driver\n",
10176 __FUNCTION__));
10177 config_chipid = 0;
10178
10179 return FALSE;
10180#endif /* BCM4354_CHIP */
10181
10182#ifdef SUPPORT_MULTIPLE_CHIP_4345X
10183 if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) {
10184 return TRUE;
10185 }
10186#endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10187#if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10188 if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
10189 return TRUE;
10190 }
10191#endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
10192#if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10193 if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
10194 return TRUE;
10195 }
10196#endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
10197#if defined(BCM4359_CHIP)
10198 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
10199 return TRUE;
10200 }
10201#endif /* BCM4359_CHIP */
10202#if defined(BCM4361_CHIP)
10203 if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
10204 return TRUE;
10205 }
10206#endif /* BCM4361_CHIP */
10207
10208 return config_chipid == chipid;
10209}
10210#endif /* CUSTOMER_HW4_DEBUG */
10211
10212#if defined(BT_OVER_SDIO)
10213wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
10214{
10215 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
10216 /* assuming that dhd_pub_t type pointer is available from a global variable */
10217 return (wlan_bt_handle_t) g_dhd_pub;
10218} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
10219
10220int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
10221{
10222 int ret = -1;
10223 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
10224 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10225
10226 /* Download BT firmware image to the dongle */
10227 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
10228 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
10229 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
10230 if (ret < 0) {
10231 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10232 __FUNCTION__, dhd->btfw_path));
10233 return ret;
10234 }
10235 }
10236 return ret;
10237} EXPORT_SYMBOL(dhd_download_btfw);
10238#endif /* defined (BT_OVER_SDIO) */
10239
10240int
10241dhd_bus_start(dhd_pub_t *dhdp)
10242{
10243 int ret = -1;
10244 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10245 unsigned long flags;
10246
10247#if defined(DHD_DEBUG) && defined(BCMSDIO)
10248 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
10249#endif /* DHD_DEBUG && BCMSDIO */
10250 ASSERT(dhd);
10251
10252 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
10253 dhdp->dongle_trap_occured = 0;
10254 dhdp->iovar_timeout_occured = 0;
10255#ifdef PCIE_FULL_DONGLE
10256 dhdp->d3ack_timeout_occured = 0;
10257#endif /* PCIE_FULL_DONGLE */
10258#ifdef DHD_MAP_LOGGING
10259 dhdp->smmu_fault_occurred = 0;
10260#endif /* DHD_MAP_LOGGING */
10261
10262 DHD_PERIM_LOCK(dhdp);
10263 /* try to download image and nvram to the dongle */
10264 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
10265 /* Indicate FW Download has not yet done */
10266 dhd->pub.fw_download_done = FALSE;
10267 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
10268#if defined(DHD_DEBUG) && defined(BCMSDIO)
10269 fw_download_start = OSL_SYSUPTIME();
10270#endif /* DHD_DEBUG && BCMSDIO */
10271 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
10272 dhd->fw_path, dhd->nv_path);
10273#if defined(DHD_DEBUG) && defined(BCMSDIO)
10274 fw_download_end = OSL_SYSUPTIME();
10275#endif /* DHD_DEBUG && BCMSDIO */
10276 if (ret < 0) {
10277 DHD_ERROR(("%s: failed to download firmware %s\n",
10278 __FUNCTION__, dhd->fw_path));
10279 DHD_PERIM_UNLOCK(dhdp);
10280 return ret;
10281 }
10282 /* Indicate FW Download has succeeded */
10283 dhd->pub.fw_download_done = TRUE;
10284 }
10285 if (dhd->pub.busstate != DHD_BUS_LOAD) {
10286 DHD_PERIM_UNLOCK(dhdp);
10287 return -ENETDOWN;
10288 }
10289
10290#ifdef BCMSDIO
10291 dhd_os_sdlock(dhdp);
10292#endif /* BCMSDIO */
10293
10294 /* Start the watchdog timer */
10295 dhd->pub.tickcnt = 0;
10296 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
10297
10298 /* Bring up the bus */
10299 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
10300
10301 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
10302#ifdef BCMSDIO
10303 dhd_os_sdunlock(dhdp);
10304#endif /* BCMSDIO */
10305 DHD_PERIM_UNLOCK(dhdp);
10306 return ret;
10307 }
10308
10309 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
10310
10311#ifdef DHD_ULP
10312 dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
10313#endif /* DHD_ULP */
10314#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
10315 /* Host registration for OOB interrupt */
10316 if (dhd_bus_oob_intr_register(dhdp)) {
10317 /* deactivate timer and wait for the handler to finish */
10318#if !defined(BCMPCIE_OOB_HOST_WAKE)
10319 DHD_GENERAL_LOCK(&dhd->pub, flags);
10320 dhd->wd_timer_valid = FALSE;
10321 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10322 del_timer_sync(&dhd->timer);
10323
10324#endif /* !BCMPCIE_OOB_HOST_WAKE */
10325 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10326 DHD_PERIM_UNLOCK(dhdp);
10327 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
10328 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10329 return -ENODEV;
10330 }
10331
10332#if defined(BCMPCIE_OOB_HOST_WAKE)
10333 dhd_bus_oob_intr_set(dhdp, TRUE);
10334#else
10335 /* Enable oob at firmware */
10336 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10337#endif /* BCMPCIE_OOB_HOST_WAKE */
10338#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
10339#ifdef PCIE_FULL_DONGLE
10340 {
10341 /* max_h2d_rings includes H2D common rings */
10342 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
10343
10344 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
10345 max_h2d_rings));
10346 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
10347#ifdef BCMSDIO
10348 dhd_os_sdunlock(dhdp);
10349#endif /* BCMSDIO */
10350 DHD_PERIM_UNLOCK(dhdp);
10351 return ret;
10352 }
10353 }
10354#endif /* PCIE_FULL_DONGLE */
10355
10356 /* Do protocol initialization necessary for IOCTL/IOVAR */
10357 ret = dhd_prot_init(&dhd->pub);
10358 if (unlikely(ret) != BCME_OK) {
10359 DHD_PERIM_UNLOCK(dhdp);
10360 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10361 return ret;
10362 }
10363
10364 /* If bus is not ready, can't come up */
10365 if (dhd->pub.busstate != DHD_BUS_DATA) {
10366 DHD_GENERAL_LOCK(&dhd->pub, flags);
10367 dhd->wd_timer_valid = FALSE;
10368 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10369 del_timer_sync(&dhd->timer);
10370 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
10371 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10372#ifdef BCMSDIO
10373 dhd_os_sdunlock(dhdp);
10374#endif /* BCMSDIO */
10375 DHD_PERIM_UNLOCK(dhdp);
10376 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10377 return -ENODEV;
10378 }
10379
10380#ifdef BCMSDIO
10381 dhd_os_sdunlock(dhdp);
10382#endif /* BCMSDIO */
10383
10384 /* Bus is ready, query any dongle information */
10385#if defined(DHD_DEBUG) && defined(BCMSDIO)
10386 f2_sync_start = OSL_SYSUPTIME();
10387#endif /* DHD_DEBUG && BCMSDIO */
10388 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
10389 DHD_GENERAL_LOCK(&dhd->pub, flags);
10390 dhd->wd_timer_valid = FALSE;
10391 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10392 del_timer_sync(&dhd->timer);
10393 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
10394 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10395 DHD_PERIM_UNLOCK(dhdp);
10396 return ret;
10397 }
10398#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
10399 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
10400 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
10401#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
10402
10403#if defined(DHD_DEBUG) && defined(BCMSDIO)
10404 f2_sync_end = OSL_SYSUPTIME();
10405 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10406 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
10407#endif /* DHD_DEBUG && BCMSDIO */
10408
10409#ifdef ARP_OFFLOAD_SUPPORT
10410 if (dhd->pend_ipaddr) {
10411#ifdef AOE_IP_ALIAS_SUPPORT
10412 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
10413#endif /* AOE_IP_ALIAS_SUPPORT */
10414 dhd->pend_ipaddr = 0;
10415 }
10416#endif /* ARP_OFFLOAD_SUPPORT */
10417
10418 DHD_PERIM_UNLOCK(dhdp);
10419
10420 return 0;
10421}
10422#ifdef WLTDLS
10423int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
10424{
10425 uint32 tdls = tdls_on;
10426 int ret = 0;
10427 uint32 tdls_auto_op = 0;
10428 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
10429 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
10430 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
10431 uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
10432 uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
10433
10434 BCM_REFERENCE(mac);
10435 if (!FW_SUPPORTED(dhd, tdls))
10436 return BCME_ERROR;
10437
10438 if (dhd->tdls_enable == tdls_on)
10439 goto auto_mode;
10440 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
10441 if (ret < 0) {
10442 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
10443 goto exit;
10444 }
10445 dhd->tdls_enable = tdls_on;
10446auto_mode:
10447
10448 tdls_auto_op = auto_on;
10449 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
10450 0, TRUE);
10451 if (ret < 0) {
10452 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
10453 goto exit;
10454 }
10455
10456 if (tdls_auto_op) {
10457 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
10458 sizeof(tdls_idle_time), NULL, 0, TRUE);
10459 if (ret < 0) {
10460 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
10461 goto exit;
10462 }
10463 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
10464 sizeof(tdls_rssi_high), NULL, 0, TRUE);
10465 if (ret < 0) {
10466 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
10467 goto exit;
10468 }
10469 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
10470 sizeof(tdls_rssi_low), NULL, 0, TRUE);
10471 if (ret < 0) {
10472 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
10473 goto exit;
10474 }
10475 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
10476 sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
10477 if (ret < 0) {
10478 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
10479 goto exit;
10480 }
10481 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
10482 sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
10483 if (ret < 0) {
10484 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
10485 goto exit;
10486 }
10487 }
10488
10489exit:
10490 return ret;
10491}
10492int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
10493{
10494 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10495 int ret = 0;
10496 if (dhd)
10497 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
10498 else
10499 ret = BCME_ERROR;
10500 return ret;
10501}
10502int
10503dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
10504{
10505 int ret = 0;
10506 bool auto_on = false;
10507 uint32 mode = wfd_mode;
10508
10509#ifdef ENABLE_TDLS_AUTO_MODE
10510 if (wfd_mode) {
10511 auto_on = false;
10512 } else {
10513 auto_on = true;
10514 }
10515#else
10516 auto_on = false;
10517#endif /* ENABLE_TDLS_AUTO_MODE */
10518 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
10519 if (ret < 0) {
10520 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
10521 return ret;
10522 }
10523
10524 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
10525 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
10526 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
10527 return ret;
10528 }
10529
10530 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
10531 if (ret < 0) {
10532 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
10533 return ret;
10534 }
10535
10536 dhd->tdls_mode = mode;
10537 return ret;
10538}
10539#ifdef PCIE_FULL_DONGLE
10540int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
10541{
10542 dhd_pub_t *dhd_pub = dhdp;
10543 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
10544 tdls_peer_node_t *new = NULL, *prev = NULL;
10545 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
10546 uint8 *da = (uint8 *)&event->addr.octet[0];
10547 bool connect = FALSE;
10548 uint32 reason = ntoh32(event->reason);
10549 unsigned long flags;
10550
10551 if (reason == WLC_E_TDLS_PEER_CONNECTED)
10552 connect = TRUE;
10553 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
10554 connect = FALSE;
10555 else
10556 {
10557 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
10558 return BCME_ERROR;
10559 }
10560 if (ifindex == DHD_BAD_IF)
10561 return BCME_ERROR;
10562
10563 if (connect) {
10564 while (cur != NULL) {
10565 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10566 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10567 __FUNCTION__, __LINE__));
10568 return BCME_ERROR;
10569 }
10570 cur = cur->next;
10571 }
10572
10573 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
10574 if (new == NULL) {
10575 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
10576 return BCME_ERROR;
10577 }
10578 memcpy(new->addr, da, ETHER_ADDR_LEN);
10579 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10580 new->next = dhd_pub->peer_tbl.node;
10581 dhd_pub->peer_tbl.node = new;
10582 dhd_pub->peer_tbl.tdls_peer_count++;
10583 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10584
10585 } else {
10586 while (cur != NULL) {
10587 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10588 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
10589 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10590 if (prev)
10591 prev->next = cur->next;
10592 else
10593 dhd_pub->peer_tbl.node = cur->next;
10594 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
10595 dhd_pub->peer_tbl.tdls_peer_count--;
10596 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10597 return BCME_OK;
10598 }
10599 prev = cur;
10600 cur = cur->next;
10601 }
10602 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
10603 }
10604 return BCME_OK;
10605}
10606#endif /* PCIE_FULL_DONGLE */
10607#endif // endif
10608
10609bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
10610{
10611 if (!dhd)
10612 return FALSE;
10613
10614 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
10615 return TRUE;
10616 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
10617 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
10618 return TRUE;
10619 else
10620 return FALSE;
10621}
10622#if !defined(AP) && defined(WLP2P)
10623/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10624 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10625 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10626 * would still be named as fw_bcmdhd_apsta.
10627 */
10628uint32
10629dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
10630{
10631 int32 ret = 0;
10632 char buf[WLC_IOCTL_SMLEN];
10633 bool mchan_supported = FALSE;
10634 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10635 * test mode, that means we only will use the mode as it is
10636 */
10637 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
10638 return 0;
10639 if (FW_SUPPORTED(dhd, vsdb)) {
10640 mchan_supported = TRUE;
10641 }
10642 if (!FW_SUPPORTED(dhd, p2p)) {
10643 DHD_TRACE(("Chip does not support p2p\n"));
10644 return 0;
10645 } else {
10646 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10647 memset(buf, 0, sizeof(buf));
10648 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
10649 sizeof(buf), FALSE);
10650 if (ret < 0) {
10651 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
10652 return 0;
10653 } else {
10654 if (buf[0] == 1) {
10655 /* By default, chip supports single chan concurrency,
10656 * now lets check for mchan
10657 */
10658 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
10659 if (mchan_supported)
10660 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
10661 if (FW_SUPPORTED(dhd, rsdb)) {
10662 ret |= DHD_FLAG_RSDB_MODE;
10663 }
10664#ifdef WL_SUPPORT_MULTIP2P
10665 if (FW_SUPPORTED(dhd, mp2p)) {
10666 ret |= DHD_FLAG_MP2P_MODE;
10667 }
10668#endif /* WL_SUPPORT_MULTIP2P */
10669#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10670 return ret;
10671#else
10672 return 0;
10673#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10674 }
10675 }
10676 }
10677 return 0;
10678}
10679#endif // endif
10680
10681#ifdef WLAIBSS
10682int
10683dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
10684{
10685 int ret = BCME_OK;
10686 aibss_bcn_force_config_t bcn_config;
10687 uint32 aibss;
10688#ifdef WLAIBSS_PS
10689 uint32 aibss_ps;
10690 s32 atim;
10691#endif /* WLAIBSS_PS */
10692 int ibss_coalesce;
10693
10694 aibss = 1;
10695 ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
10696 if (ret < 0) {
10697 if (ret == BCME_UNSUPPORTED) {
10698 DHD_ERROR(("%s aibss is not supported\n",
10699 __FUNCTION__));
10700 return BCME_OK;
10701 } else {
10702 DHD_ERROR(("%s Set aibss to %d failed %d\n",
10703 __FUNCTION__, aibss, ret));
10704 return ret;
10705 }
10706 }
10707
10708#ifdef WLAIBSS_PS
10709 aibss_ps = 1;
10710 ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
10711 if (ret < 0) {
10712 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
10713 __FUNCTION__, aibss, ret));
10714 return ret;
10715 }
10716
10717 atim = 10;
10718 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
10719 (char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
10720 DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
10721 __FUNCTION__, ret));
10722 return ret;
10723 }
10724#endif /* WLAIBSS_PS */
10725
10726 memset(&bcn_config, 0, sizeof(bcn_config));
10727 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
10728 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
10729 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
10730 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
10731 bcn_config.len = sizeof(bcn_config);
10732
10733 ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
10734 sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
10735 if (ret < 0) {
10736 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
10737 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
10738 AIBSS_BCN_FLOOD_DUR, ret));
10739 return ret;
10740 }
10741
10742 ibss_coalesce = IBSS_COALESCE_DEFAULT;
10743 ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
10744 sizeof(ibss_coalesce), NULL, 0, TRUE);
10745 if (ret < 0) {
10746 DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
10747 __FUNCTION__, ret));
10748 return ret;
10749 }
10750
10751 dhd->op_mode |= DHD_FLAG_IBSS_MODE;
10752 return BCME_OK;
10753}
10754#endif /* WLAIBSS */
10755
10756#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10757#ifdef WL_BAM
10758static int
10759dhd_check_adps_bad_ap(dhd_pub_t *dhd)
10760{
10761 struct net_device *ndev;
10762 struct bcm_cfg80211 *cfg;
10763 struct wl_profile *profile;
10764 struct ether_addr bssid;
10765
10766 if (!dhd_is_associated(dhd, 0, NULL)) {
10767 DHD_ERROR(("%s - not associated\n", __FUNCTION__));
10768 return BCME_OK;
10769 }
10770
10771 ndev = dhd_linux_get_primary_netdev(dhd);
10772 if (!ndev) {
10773 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
10774 return -ENODEV;
10775 }
10776
10777 cfg = wl_get_cfg(ndev);
10778 if (!cfg) {
10779 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
10780 return -EINVAL;
10781 }
10782
10783 profile = wl_get_profile_by_netdev(cfg, ndev);
10784 memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
10785 if (wl_adps_bad_ap_check(cfg, &bssid)) {
10786 if (wl_adps_enabled(cfg, ndev)) {
10787 wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
10788 }
10789 }
10790
10791 return BCME_OK;
10792}
10793#endif /* WL_BAM */
10794
10795int
10796dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
10797{
10798 int i;
10799 int len;
10800 int ret = BCME_OK;
10801
10802 bcm_iov_buf_t *iov_buf = NULL;
10803 wl_adps_params_v1_t *data = NULL;
10804
10805 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
10806 iov_buf = MALLOC(dhd->osh, len);
10807 if (iov_buf == NULL) {
10808 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
10809 ret = BCME_NOMEM;
10810 goto exit;
10811 }
10812
10813 iov_buf->version = WL_ADPS_IOV_VER;
10814 iov_buf->len = sizeof(*data);
10815 iov_buf->id = WL_ADPS_IOV_MODE;
10816
10817 data = (wl_adps_params_v1_t *)iov_buf->data;
10818 data->version = ADPS_SUB_IOV_VERSION_1;
10819 data->length = sizeof(*data);
10820 data->mode = on;
10821
10822 for (i = 1; i <= MAX_BANDS; i++) {
10823 data->band = i;
10824 ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
10825 if (ret < 0) {
10826 if (ret == BCME_UNSUPPORTED) {
10827 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
10828 ret = BCME_OK;
10829 goto exit;
10830 }
10831 else {
10832 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10833 __FUNCTION__, on ? "On" : "Off", i, ret));
10834 goto exit;
10835 }
10836 }
10837 }
10838
10839#ifdef WL_BAM
10840 if (on) {
10841 dhd_check_adps_bad_ap(dhd);
10842 }
10843#endif /* WL_BAM */
10844
10845exit:
10846 if (iov_buf) {
10847 MFREE(dhd->osh, iov_buf, len);
10848 iov_buf = NULL;
10849 }
10850 return ret;
10851}
10852#endif /* WLADPS || WLADPS_PRIVATE_CMD */
10853
10854int
10855dhd_preinit_ioctls(dhd_pub_t *dhd)
10856{
10857 int ret = 0;
10858 char eventmask[WL_EVENTING_MASK_LEN];
10859 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10860 uint32 buf_key_b4_m4 = 1;
10861 uint8 msglen;
10862 eventmsgs_ext_t *eventmask_msg = NULL;
10863 char* iov_buf = NULL;
10864 int ret2 = 0;
10865 uint32 wnm_cap = 0;
10866#if defined(BCMSUP_4WAY_HANDSHAKE)
10867 uint32 sup_wpa = 1;
10868#endif /* BCMSUP_4WAY_HANDSHAKE */
10869#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
10870 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
10871 uint32 ampdu_ba_wsize = 0;
10872#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
10873#if defined(CUSTOM_AMPDU_MPDU)
10874 int32 ampdu_mpdu = 0;
10875#endif // endif
10876#if defined(CUSTOM_AMPDU_RELEASE)
10877 int32 ampdu_release = 0;
10878#endif // endif
10879#if defined(CUSTOM_AMSDU_AGGSF)
10880 int32 amsdu_aggsf = 0;
10881#endif // endif
10882
10883#if defined(BCMSDIO)
10884#ifdef PROP_TXSTATUS
10885 int wlfc_enable = TRUE;
10886#ifndef DISABLE_11N
10887 uint32 hostreorder = 1;
10888#endif /* DISABLE_11N */
10889#endif /* PROP_TXSTATUS */
10890#endif // endif
10891#ifndef PCIE_FULL_DONGLE
10892 uint32 wl_ap_isolate;
10893#endif /* PCIE_FULL_DONGLE */
10894 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
10895 uint wnm_bsstrans_resp = 0;
10896#ifdef SUPPORT_SET_CAC
10897#ifdef SUPPORT_CUSTOM_SET_CAC
10898 uint32 cac = 0;
10899#else
10900 uint32 cac = 1;
10901#endif /* SUPPORT_CUSTOM_SET_CAC */
10902#endif /* SUPPORT_SET_CAC */
10903
10904#if defined(DHD_NON_DMA_M2M_CORRUPTION)
10905 dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk;
10906#endif /* DHD_NON_DMA_M2M_CORRUPTION */
10907
10908#ifdef DHD_ENABLE_LPC
10909 uint32 lpc = 1;
10910#endif /* DHD_ENABLE_LPC */
10911 uint power_mode = PM_FAST;
10912#if defined(BCMSDIO)
10913 uint32 dongle_align = DHD_SDALIGN;
10914 uint32 glom = CUSTOM_GLOM_SETTING;
10915#endif /* defined(BCMSDIO) */
10916 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
10917 uint scancache_enab = TRUE;
10918#ifdef ENABLE_BCN_LI_BCN_WAKEUP
10919 uint32 bcn_li_bcn = 1;
10920#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10921 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
10922#if defined(ARP_OFFLOAD_SUPPORT)
10923 int arpoe = 1;
10924#endif // endif
10925 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
10926 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
10927 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
10928 char buf[WLC_IOCTL_SMLEN];
10929 char *ptr;
10930 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
10931#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10932 wl_el_tag_params_t *el_tag = NULL;
10933#endif /* DHD_8021X_DUMP */
10934#ifdef ROAM_ENABLE
10935 uint roamvar = 0;
10936 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
10937 int roam_scan_period[2] = {10, WLC_BAND_ALL};
10938 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
10939#ifdef ROAM_AP_ENV_DETECTION
10940 int roam_env_mode = AP_ENV_INDETERMINATE;
10941#endif /* ROAM_AP_ENV_DETECTION */
10942#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10943 int roam_fullscan_period = 60;
10944#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10945 int roam_fullscan_period = 120;
10946#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10947#ifdef DISABLE_BCNLOSS_ROAM
10948 uint roam_bcnloss_off = 1;
10949#endif /* DISABLE_BCNLOSS_ROAM */
10950#else
10951#ifdef DISABLE_BUILTIN_ROAM
10952 uint roamvar = 1;
10953#endif /* DISABLE_BUILTIN_ROAM */
10954#endif /* ROAM_ENABLE */
10955
10956#if defined(SOFTAP)
10957 uint dtim = 1;
10958#endif // endif
10959#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10960 struct ether_addr p2p_ea;
10961#endif // endif
10962#ifdef BCMCCX
10963 uint32 ccx = 1;
10964#endif // endif
10965#ifdef SOFTAP_UAPSD_OFF
10966 uint32 wme_apsd = 0;
10967#endif /* SOFTAP_UAPSD_OFF */
10968#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10969 uint32 apsta = 1; /* Enable APSTA mode */
10970#elif defined(SOFTAP_AND_GC)
10971 uint32 apsta = 0;
10972 int ap_mode = 1;
10973#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10974#ifdef GET_CUSTOM_MAC_ENABLE
10975 struct ether_addr ea_addr;
10976#endif /* GET_CUSTOM_MAC_ENABLE */
10977#ifdef OKC_SUPPORT
10978 uint32 okc = 1;
10979#endif // endif
10980
10981#ifdef DISABLE_11N
10982 uint32 nmode = 0;
10983#endif /* DISABLE_11N */
10984
10985#ifdef USE_WL_TXBF
10986 uint32 txbf = 1;
10987#endif /* USE_WL_TXBF */
10988#ifdef DISABLE_TXBFR
10989 uint32 txbf_bfr_cap = 0;
10990#endif /* DISABLE_TXBFR */
10991#ifdef AMPDU_VO_ENABLE
10992 struct ampdu_tid_control tid;
10993#endif // endif
10994#if defined(PROP_TXSTATUS)
10995#ifdef USE_WFA_CERT_CONF
10996 uint32 proptx = 0;
10997#endif /* USE_WFA_CERT_CONF */
10998#endif /* PROP_TXSTATUS */
10999#ifdef DHD_SET_FW_HIGHSPEED
11000 uint32 ack_ratio = 250;
11001 uint32 ack_ratio_depth = 64;
11002#endif /* DHD_SET_FW_HIGHSPEED */
11003#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11004 uint32 vht_features = 0; /* init to 0, will be set based on each support */
11005#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11006#ifdef DISABLE_11N_PROPRIETARY_RATES
11007 uint32 ht_features = 0;
11008#endif /* DISABLE_11N_PROPRIETARY_RATES */
11009#ifdef CUSTOM_EVENT_PM_WAKE
11010 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
11011#endif /* CUSTOM_EVENT_PM_WAKE */
11012#ifdef DISABLE_PRUNED_SCAN
11013 uint32 scan_features = 0;
11014#endif /* DISABLE_PRUNED_SCAN */
11015#ifdef DHD_2G_ONLY_SUPPORT
11016 uint band = WLC_BAND_2G;
11017#endif /* DHD_2G_ONLY_SUPPORT */
11018#ifdef BCMPCIE_OOB_HOST_WAKE
11019 uint32 hostwake_oob = 0;
11020#endif /* BCMPCIE_OOB_HOST_WAKE */
11021#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
11022 uint32 btmdelta = WBTEXT_BTMDELTA;
11023#endif /* WBTEXT && WBTEXT_BTMDELTA */
11024
11025#ifdef PKT_FILTER_SUPPORT
11026 dhd_pkt_filter_enable = TRUE;
11027#ifdef APF
11028 dhd->apf_set = FALSE;
11029#endif /* APF */
11030#endif /* PKT_FILTER_SUPPORT */
11031 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
11032#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
11033 dhd->max_dtim_enable = TRUE;
11034#else
11035 dhd->max_dtim_enable = FALSE;
11036#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
11037#ifdef CUSTOM_SET_OCLOFF
11038 dhd->ocl_off = FALSE;
11039#endif /* CUSTOM_SET_OCLOFF */
11040#ifdef SUPPORT_SET_TID
11041 dhd->tid_mode = SET_TID_OFF;
11042 dhd->target_uid = 0;
11043 dhd->target_tid = 0;
11044#endif /* SUPPORT_SET_TID */
11045 DHD_TRACE(("Enter %s\n", __FUNCTION__));
11046 dhd->op_mode = 0;
11047
11048#ifdef CUSTOMER_HW4_DEBUG
11049 if (!dhd_validate_chipid(dhd)) {
11050 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
11051 __FUNCTION__, dhd_bus_chip_id(dhd)));
11052#ifndef SUPPORT_MULTIPLE_CHIPS
11053 ret = BCME_BADARG;
11054 goto done;
11055#endif /* !SUPPORT_MULTIPLE_CHIPS */
11056 }
11057#endif /* CUSTOMER_HW4_DEBUG */
11058 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11059 (op_mode == DHD_FLAG_MFG_MODE)) {
11060 dhd->op_mode = DHD_FLAG_MFG_MODE;
11061#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11062 /* disable runtimePM by default in MFG mode. */
11063 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
11064#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11065#ifdef DHD_PCIE_RUNTIMEPM
11066 /* Disable RuntimePM in mfg mode */
11067 DHD_DISABLE_RUNTIME_PM(dhd);
11068 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
11069#endif /* DHD_PCIE_RUNTIME_PM */
11070 /* Check and adjust IOCTL response timeout for Manufactring firmware */
11071 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
11072 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
11073 __FUNCTION__));
11074 } else {
11075 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
11076 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
11077 }
11078#ifdef BCMPCIE_OOB_HOST_WAKE
11079 ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
11080 sizeof(hostwake_oob), FALSE);
11081 if (ret < 0) {
11082 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
11083 } else {
11084 if (hostwake_oob == 0) {
11085 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
11086 __FUNCTION__));
11087 ret = BCME_UNSUPPORTED;
11088 goto done;
11089 } else {
11090 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
11091 }
11092 }
11093#endif /* BCMPCIE_OOB_HOST_WAKE */
11094#ifdef GET_CUSTOM_MAC_ENABLE
11095 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
11096 if (!ret) {
11097 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
11098 TRUE);
11099 if (ret < 0) {
11100 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11101 ret = BCME_NOTUP;
11102 goto done;
11103 }
11104 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
11105 } else {
11106#endif /* GET_CUSTOM_MAC_ENABLE */
11107 /* Get the default device MAC address directly from firmware */
11108 ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11109 if (ret < 0) {
11110 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
11111 ret = BCME_NOTUP;
11112 goto done;
11113 }
11114 /* Update public MAC address after reading from Firmware */
11115 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
11116
11117#ifdef GET_CUSTOM_MAC_ENABLE
11118 }
11119#endif /* GET_CUSTOM_MAC_ENABLE */
11120
11121#ifdef DHD_USE_CLMINFO_PARSER
11122 if ((ret = dhd_get_clminfo(dhd, clm_path)) < 0) {
11123 if (dhd->is_clm_mult_regrev) {
11124 DHD_ERROR(("%s: CLM Information load failed. Abort initialization.\n",
11125 __FUNCTION__));
11126 goto done;
11127 }
11128 }
11129#endif /* DHD_USE_CLMINFO_PARSER */
11130 if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
11131 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
11132 goto done;
11133 }
11134
11135 /* get a capabilities from firmware */
11136 {
11137 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
11138 memset(dhd->fw_capabilities, 0, cap_buf_size);
11139 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
11140 FALSE);
11141 if (ret < 0) {
11142 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11143 __FUNCTION__, ret));
11144 return 0;
11145 }
11146
11147 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
11148 dhd->fw_capabilities[0] = ' ';
11149 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
11150 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
11151 }
11152
11153 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
11154 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
11155#ifdef SET_RANDOM_MAC_SOFTAP
11156 uint rand_mac;
11157#endif /* SET_RANDOM_MAC_SOFTAP */
11158 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
11159#if defined(ARP_OFFLOAD_SUPPORT)
11160 arpoe = 0;
11161#endif // endif
11162#ifdef PKT_FILTER_SUPPORT
11163 dhd_pkt_filter_enable = FALSE;
11164#endif // endif
11165#ifdef SET_RANDOM_MAC_SOFTAP
11166 SRANDOM32((uint)jiffies);
11167 rand_mac = RANDOM32();
11168 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
11169 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
11170 iovbuf[2] = (unsigned char)vendor_oui;
11171 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
11172 iovbuf[4] = (unsigned char)(rand_mac >> 8);
11173 iovbuf[5] = (unsigned char)(rand_mac >> 16);
11174
11175 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
11176 TRUE);
11177 if (ret < 0) {
11178 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11179 } else
11180 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
11181#endif /* SET_RANDOM_MAC_SOFTAP */
11182#ifdef USE_DYNAMIC_F2_BLKSIZE
11183 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11184#endif /* USE_DYNAMIC_F2_BLKSIZE */
11185#ifdef SOFTAP_UAPSD_OFF
11186 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
11187 TRUE);
11188 if (ret < 0) {
11189 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11190 __FUNCTION__, ret));
11191 }
11192#endif /* SOFTAP_UAPSD_OFF */
11193 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11194 (op_mode == DHD_FLAG_MFG_MODE)) {
11195#if defined(ARP_OFFLOAD_SUPPORT)
11196 arpoe = 0;
11197#endif /* ARP_OFFLOAD_SUPPORT */
11198#ifdef PKT_FILTER_SUPPORT
11199 dhd_pkt_filter_enable = FALSE;
11200#endif /* PKT_FILTER_SUPPORT */
11201 dhd->op_mode = DHD_FLAG_MFG_MODE;
11202#ifdef USE_DYNAMIC_F2_BLKSIZE
11203 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11204#endif /* USE_DYNAMIC_F2_BLKSIZE */
11205#ifndef CUSTOM_SET_ANTNPM
11206 if (FW_SUPPORTED(dhd, rsdb)) {
11207 wl_config_t rsdb_mode;
11208 memset(&rsdb_mode, 0, sizeof(rsdb_mode));
11209 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
11210 NULL, 0, TRUE);
11211 if (ret < 0) {
11212 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11213 __FUNCTION__, ret));
11214 }
11215 }
11216#endif /* !CUSTOM_SET_ANTNPM */
11217 } else {
11218 uint32 concurrent_mode = 0;
11219 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
11220 (op_mode == DHD_FLAG_P2P_MODE)) {
11221#if defined(ARP_OFFLOAD_SUPPORT)
11222 arpoe = 0;
11223#endif // endif
11224#ifdef PKT_FILTER_SUPPORT
11225 dhd_pkt_filter_enable = FALSE;
11226#endif // endif
11227 dhd->op_mode = DHD_FLAG_P2P_MODE;
11228 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
11229 (op_mode == DHD_FLAG_IBSS_MODE)) {
11230 dhd->op_mode = DHD_FLAG_IBSS_MODE;
11231 } else
11232 dhd->op_mode = DHD_FLAG_STA_MODE;
11233#if !defined(AP) && defined(WLP2P)
11234 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
11235 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
11236#if defined(ARP_OFFLOAD_SUPPORT)
11237 arpoe = 1;
11238#endif // endif
11239 dhd->op_mode |= concurrent_mode;
11240 }
11241
11242 /* Check if we are enabling p2p */
11243 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11244 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
11245 TRUE);
11246 if (ret < 0)
11247 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
11248
11249#if defined(SOFTAP_AND_GC)
11250 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
11251 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
11252 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
11253 }
11254#endif // endif
11255 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
11256 ETHER_SET_LOCALADDR(&p2p_ea);
11257 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
11258 NULL, 0, TRUE);
11259 if (ret < 0)
11260 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
11261 else
11262 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11263 }
11264#else
11265 (void)concurrent_mode;
11266#endif // endif
11267 }
11268
11269#ifdef DISABLE_PRUNED_SCAN
11270 if (FW_SUPPORTED(dhd, rsdb)) {
11271 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11272 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
11273 if (ret < 0) {
11274 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11275 __FUNCTION__, ret));
11276 } else {
11277 memcpy(&scan_features, iovbuf, 4);
11278 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
11279 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11280 sizeof(scan_features), NULL, 0, TRUE);
11281 if (ret < 0) {
11282 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11283 __FUNCTION__, ret));
11284 }
11285 }
11286 }
11287#endif /* DISABLE_PRUNED_SCAN */
11288
11289 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
11290 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
11291
11292#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11293 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
11294 dhd->info->rxthread_enabled = FALSE;
11295 else
11296 dhd->info->rxthread_enabled = TRUE;
11297#endif // endif
11298 /* Set Country code */
11299 if (dhd->dhd_cspec.ccode[0] != 0) {
11300 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
11301 NULL, 0, TRUE);
11302 if (ret < 0)
11303 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
11304 }
11305
11306#ifdef DHD_2G_ONLY_SUPPORT
11307 DHD_ERROR(("Enabled DHD 2G only support!!\n"));
11308 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, (char *)&band, sizeof(band), TRUE, 0);
11309 if (ret < 0) {
11310 DHD_ERROR(("%s Set Band B failed %d\n", __FUNCTION__, ret));
11311 }
11312#endif /* DHD_2G_ONLY_SUPPORT */
11313
11314 /* Set Listen Interval */
11315 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
11316 NULL, 0, TRUE);
11317 if (ret < 0)
11318 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
11319
11320#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11321#ifdef USE_WFA_CERT_CONF
11322 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
11323 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
11324 }
11325#endif /* USE_WFA_CERT_CONF */
11326 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11327 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
11328#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11329#if defined(ROAM_ENABLE)
11330#ifdef DISABLE_BCNLOSS_ROAM
11331 ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
11332 sizeof(roam_bcnloss_off), NULL, 0, TRUE);
11333#endif /* DISABLE_BCNLOSS_ROAM */
11334 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
11335 sizeof(roam_trigger), TRUE, 0)) < 0)
11336 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
11337 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
11338 sizeof(roam_scan_period), TRUE, 0)) < 0)
11339 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
11340 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
11341 sizeof(roam_delta), TRUE, 0)) < 0)
11342 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
11343 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
11344 sizeof(roam_fullscan_period), NULL, 0, TRUE);
11345 if (ret < 0)
11346 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
11347#ifdef ROAM_AP_ENV_DETECTION
11348 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
11349 if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
11350 sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
11351 dhd->roam_env_detection = TRUE;
11352 else
11353 dhd->roam_env_detection = FALSE;
11354 }
11355#endif /* ROAM_AP_ENV_DETECTION */
11356#endif /* ROAM_ENABLE */
11357
11358#ifdef CUSTOM_EVENT_PM_WAKE
11359 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
11360 sizeof(pm_awake_thresh), NULL, 0, TRUE);
11361 if (ret < 0) {
11362 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
11363 }
11364#endif /* CUSTOM_EVENT_PM_WAKE */
11365#ifdef OKC_SUPPORT
11366 ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
11367#endif // endif
11368#ifdef BCMCCX
11369 ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
11370#endif /* BCMCCX */
11371
11372#ifdef WLTDLS
11373 dhd->tdls_enable = FALSE;
11374 dhd_tdls_set_mode(dhd, false);
11375#endif /* WLTDLS */
11376
11377#ifdef DHD_ENABLE_LPC
11378 /* Set lpc 1 */
11379 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11380 if (ret < 0) {
11381 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
11382
11383 if (ret == BCME_NOTDOWN) {
11384 uint wl_down = 1;
11385 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11386 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11387 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
11388
11389 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11390 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
11391 }
11392 }
11393#endif /* DHD_ENABLE_LPC */
11394
11395#ifdef WLADPS
11396 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
11397 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
11398 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11399 __FUNCTION__, ret));
11400 }
11401 }
11402#endif /* WLADPS */
11403
11404#ifdef DHD_PM_CONTROL_FROM_FILE
11405 sec_control_pm(dhd, &power_mode);
11406#else
11407 /* Set PowerSave mode */
11408 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
11409#endif /* DHD_PM_CONTROL_FROM_FILE */
11410
11411#if defined(BCMSDIO)
11412 /* Match Host and Dongle rx alignment */
11413 ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
11414 NULL, 0, TRUE);
11415
11416#ifdef USE_WFA_CERT_CONF
11417 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
11418 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
11419 }
11420#endif /* USE_WFA_CERT_CONF */
11421 if (glom != DEFAULT_GLOM_VALUE) {
11422 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
11423 ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
11424 }
11425#endif /* defined(BCMSDIO) */
11426
11427 /* Setup timeout if Beacons are lost and roam is off to report link down */
11428 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0,
11429 TRUE);
11430
11431 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11432 ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0,
11433 TRUE);
11434
11435#if defined(AP) && !defined(WLP2P)
11436 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
11437
11438#endif /* defined(AP) && !defined(WLP2P) */
11439
11440#ifdef MIMO_ANT_SETTING
11441 dhd_sel_ant_from_file(dhd);
11442#endif /* MIMO_ANT_SETTING */
11443
11444#if defined(SOFTAP)
11445 if (ap_fw_loaded == TRUE) {
11446 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
11447 }
11448#endif // endif
11449
11450#if defined(KEEP_ALIVE)
11451 {
11452 /* Set Keep Alive : be sure to use FW with -keepalive */
11453 int res;
11454
11455#if defined(SOFTAP)
11456 if (ap_fw_loaded == FALSE)
11457#endif // endif
11458 if (!(dhd->op_mode &
11459 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
11460 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
11461 DHD_ERROR(("%s set keeplive failed %d\n",
11462 __FUNCTION__, res));
11463 }
11464 }
11465#endif /* defined(KEEP_ALIVE) */
11466
11467#ifdef USE_WL_TXBF
11468 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
11469 if (ret < 0)
11470 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
11471
11472#endif /* USE_WL_TXBF */
11473
11474 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
11475 0, TRUE);
11476 if (ret < 0) {
11477 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
11478 }
11479
11480#ifdef DISABLE_TXBFR
11481 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
11482 0, TRUE);
11483 if (ret < 0) {
11484 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
11485 }
11486#endif /* DISABLE_TXBFR */
11487
11488#ifdef USE_WFA_CERT_CONF
11489#ifdef USE_WL_FRAMEBURST
11490 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
11491 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
11492 }
11493#endif /* USE_WL_FRAMEBURST */
11494 g_frameburst = frameburst;
11495#endif /* USE_WFA_CERT_CONF */
11496#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11497 /* Disable Framebursting for SofAP */
11498 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
11499 frameburst = 0;
11500 }
11501#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11502 /* Set frameburst to value */
11503 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
11504 sizeof(frameburst), TRUE, 0)) < 0) {
11505 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
11506 }
11507#ifdef DHD_SET_FW_HIGHSPEED
11508 /* Set ack_ratio */
11509 ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
11510 if (ret < 0) {
11511 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
11512 }
11513
11514 /* Set ack_ratio_depth */
11515 ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
11516 sizeof(ack_ratio_depth), NULL, 0, TRUE);
11517 if (ret < 0) {
11518 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
11519 }
11520#endif /* DHD_SET_FW_HIGHSPEED */
11521
11522 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
11523 if (iov_buf == NULL) {
11524 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
11525 ret = BCME_NOMEM;
11526 goto done;
11527 }
11528
11529#ifdef WLAIBSS
11530 /* Apply AIBSS configurations */
11531 if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
11532 DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
11533 __FUNCTION__, ret));
11534 goto done;
11535 }
11536#endif /* WLAIBSS */
11537
11538#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
11539 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
11540 /* Set ampdu ba wsize to 64 or 16 */
11541#ifdef CUSTOM_AMPDU_BA_WSIZE
11542 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
11543#endif // endif
11544#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
11545 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
11546 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
11547#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
11548 if (ampdu_ba_wsize != 0) {
11549 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
11550 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
11551 if (ret < 0) {
11552 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11553 __FUNCTION__, ampdu_ba_wsize, ret));
11554 }
11555 }
11556#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
11557
11558#if defined(CUSTOM_AMPDU_MPDU)
11559 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
11560 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
11561 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
11562 NULL, 0, TRUE);
11563 if (ret < 0) {
11564 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11565 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
11566 }
11567 }
11568#endif /* CUSTOM_AMPDU_MPDU */
11569
11570#if defined(CUSTOM_AMPDU_RELEASE)
11571 ampdu_release = CUSTOM_AMPDU_RELEASE;
11572 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
11573 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
11574 sizeof(ampdu_release), NULL, 0, TRUE);
11575 if (ret < 0) {
11576 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11577 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
11578 }
11579 }
11580#endif /* CUSTOM_AMPDU_RELEASE */
11581
11582#if defined(CUSTOM_AMSDU_AGGSF)
11583 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
11584 if (amsdu_aggsf != 0) {
11585 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
11586 NULL, 0, TRUE);
11587 if (ret < 0) {
11588 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11589 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
11590 }
11591 }
11592#endif /* CUSTOM_AMSDU_AGGSF */
11593
11594#if defined(BCMSUP_4WAY_HANDSHAKE)
11595 /* Read 4-way handshake requirements */
11596 if (dhd_use_idsup == 1) {
11597 ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
11598 (char *)&iovbuf, sizeof(iovbuf), FALSE);
11599 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
11600 * in-dongle supplicant.
11601 */
11602 if (ret >= 0 || ret == BCME_NOTREADY)
11603 dhd->fw_4way_handshake = TRUE;
11604 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
11605 }
11606#endif /* BCMSUP_4WAY_HANDSHAKE */
11607#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11608 ret = dhd_iovar(dhd, 0, "vht_features", NULL, 0,
11609 (char *)&vht_features, sizeof(vht_features), FALSE);
11610 if (ret < 0) {
11611 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
11612 vht_features = 0;
11613 } else {
11614#ifdef SUPPORT_2G_VHT
11615 vht_features |= 0x3; /* 2G support */
11616#endif /* SUPPORT_2G_VHT */
11617#ifdef SUPPORT_5G_1024QAM_VHT
11618 vht_features |= 0x6; /* 5G 1024 QAM support */
11619#endif /* SUPPORT_5G_1024QAM_VHT */
11620 }
11621 if (vht_features) {
11622 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
11623 NULL, 0, TRUE);
11624 if (ret < 0) {
11625 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
11626
11627 if (ret == BCME_NOTDOWN) {
11628 uint wl_down = 1;
11629 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11630 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11631 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11632 " vht_features = 0x%x\n",
11633 __FUNCTION__, ret, vht_features));
11634
11635 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
11636 sizeof(vht_features), NULL, 0, TRUE);
11637
11638 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
11639 }
11640 }
11641 }
11642#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11643#ifdef DISABLE_11N_PROPRIETARY_RATES
11644 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
11645 TRUE);
11646 if (ret < 0) {
11647 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
11648 }
11649#endif /* DISABLE_11N_PROPRIETARY_RATES */
11650#ifdef DHD_DISABLE_VHTMODE
11651 dhd_disable_vhtmode(dhd);
11652#endif /* DHD_DISABLE_VHTMODE */
11653
11654 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
11655 NULL, 0, TRUE);
11656 if (ret < 0) {
11657 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
11658 }
11659#ifdef SUPPORT_SET_CAC
11660 ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
11661 if (ret < 0) {
11662 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
11663 }
11664#endif /* SUPPORT_SET_CAC */
11665#ifdef DHD_ULP
11666 /* Get the required details from dongle during preinit ioctl */
11667 dhd_ulp_preinit(dhd);
11668#endif /* DHD_ULP */
11669
11670 /* Read event_msgs mask */
11671 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
11672 sizeof(iovbuf), FALSE);
11673 if (ret < 0) {
11674 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
11675 goto done;
11676 }
11677 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
11678
11679 /* Setup event_msgs */
11680 setbit(eventmask, WLC_E_SET_SSID);
11681 setbit(eventmask, WLC_E_PRUNE);
11682 setbit(eventmask, WLC_E_AUTH);
11683 setbit(eventmask, WLC_E_AUTH_IND);
11684 setbit(eventmask, WLC_E_ASSOC);
11685 setbit(eventmask, WLC_E_REASSOC);
11686 setbit(eventmask, WLC_E_REASSOC_IND);
11687 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
11688 setbit(eventmask, WLC_E_DEAUTH);
11689 setbit(eventmask, WLC_E_DEAUTH_IND);
11690 setbit(eventmask, WLC_E_DISASSOC_IND);
11691 setbit(eventmask, WLC_E_DISASSOC);
11692 setbit(eventmask, WLC_E_JOIN);
11693 setbit(eventmask, WLC_E_START);
11694 setbit(eventmask, WLC_E_ASSOC_IND);
11695 setbit(eventmask, WLC_E_PSK_SUP);
11696 setbit(eventmask, WLC_E_LINK);
11697 setbit(eventmask, WLC_E_MIC_ERROR);
11698 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
11699 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
11700#ifdef LIMIT_BORROW
11701 setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
11702#endif // endif
11703#ifndef WL_CFG80211
11704 setbit(eventmask, WLC_E_PMKID_CACHE);
11705 setbit(eventmask, WLC_E_TXFAIL);
11706#endif // endif
11707 setbit(eventmask, WLC_E_JOIN_START);
11708 setbit(eventmask, WLC_E_SCAN_COMPLETE);
11709#ifdef DHD_DEBUG
11710 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
11711#endif // endif
11712#ifdef PNO_SUPPORT
11713 setbit(eventmask, WLC_E_PFN_NET_FOUND);
11714 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
11715 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
11716 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
11717#endif /* PNO_SUPPORT */
11718 /* enable dongle roaming event */
11719#ifdef WL_CFG80211
11720 setbit(eventmask, WLC_E_ROAM);
11721 setbit(eventmask, WLC_E_BSSID);
11722#endif /* WL_CFG80211 */
11723#ifdef BCMCCX
11724 setbit(eventmask, WLC_E_ADDTS_IND);
11725 setbit(eventmask, WLC_E_DELTS_IND);
11726#endif /* BCMCCX */
11727#ifdef WLTDLS
11728 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
11729#endif /* WLTDLS */
11730#ifdef RTT_SUPPORT
11731 setbit(eventmask, WLC_E_PROXD);
11732#endif /* RTT_SUPPORT */
11733#ifdef WL_CFG80211
11734 setbit(eventmask, WLC_E_ESCAN_RESULT);
11735 setbit(eventmask, WLC_E_AP_STARTED);
11736 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
11737 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11738 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
11739 }
11740#endif /* WL_CFG80211 */
11741#ifdef WLAIBSS
11742 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
11743#endif /* WLAIBSS */
11744
11745#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11746 if (dhd_logtrace_from_file(dhd)) {
11747 setbit(eventmask, WLC_E_TRACE);
11748 } else {
11749 clrbit(eventmask, WLC_E_TRACE);
11750 }
11751#elif defined(SHOW_LOGTRACE)
11752 setbit(eventmask, WLC_E_TRACE);
11753#else
11754 clrbit(eventmask, WLC_E_TRACE);
11755#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11756
11757 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
11758#ifdef CUSTOM_EVENT_PM_WAKE
11759 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
11760#endif /* CUSTOM_EVENT_PM_WAKE */
11761#ifdef DHD_LOSSLESS_ROAMING
11762 setbit(eventmask, WLC_E_ROAM_PREP);
11763#endif // endif
11764 /* nan events */
11765 setbit(eventmask, WLC_E_NAN);
11766#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11767 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11768#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11769
11770#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11771 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11772#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11773
11774 /* Write updated Event mask */
11775 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
11776 if (ret < 0) {
11777 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
11778 goto done;
11779 }
11780
11781 /* make up event mask ext message iovar for event larger than 128 */
11782 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
11783 eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
11784 if (eventmask_msg == NULL) {
11785 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
11786 ret = BCME_NOMEM;
11787 goto done;
11788 }
11789 bzero(eventmask_msg, msglen);
11790 eventmask_msg->ver = EVENTMSGS_VER;
11791 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11792
11793 /* Read event_msgs_ext mask */
11794 ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
11795 WLC_IOCTL_SMLEN, FALSE);
11796
11797 if (ret2 == 0) { /* event_msgs_ext must be supported */
11798 bcopy(iov_buf, eventmask_msg, msglen);
11799#ifdef RSSI_MONITOR_SUPPORT
11800 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11801#endif /* RSSI_MONITOR_SUPPORT */
11802#ifdef GSCAN_SUPPORT
11803 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
11804 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
11805 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
11806 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
11807#endif /* GSCAN_SUPPORT */
11808 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11809#ifdef BT_WIFI_HANDOVER
11810 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
11811#endif /* BT_WIFI_HANDOVER */
11812#ifdef DBG_PKT_MON
11813 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
11814#endif /* DBG_PKT_MON */
11815#ifdef DHD_ULP
11816 setbit(eventmask_msg->mask, WLC_E_ULP);
11817#endif // endif
11818#ifdef WL_NATOE
11819 setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
11820#endif /* WL_NATOE */
11821#ifdef WL_NAN
11822 setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
11823#endif /* WL_NAN */
11824#ifdef SUPPORT_EVT_SDB_LOG
11825 setbit(eventmask_msg->mask, WLC_E_SDB_TRANSITION);
11826#endif /* SUPPORT_EVT_SDB_LOG */
11827 /* Write updated Event mask */
11828 eventmask_msg->ver = EVENTMSGS_VER;
11829 eventmask_msg->command = EVENTMSGS_SET_MASK;
11830 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11831 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
11832 TRUE);
11833 if (ret < 0) {
11834 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
11835 goto done;
11836 }
11837 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
11838 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11839 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11840 __FUNCTION__, ret2));
11841 } else {
11842 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
11843 ret = ret2;
11844 goto done;
11845 }
11846
11847#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11848 /* Enabling event log trace for EAP events */
11849 el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
11850 if (el_tag == NULL) {
11851 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11852 (int)sizeof(wl_el_tag_params_t)));
11853 ret = BCME_NOMEM;
11854 goto done;
11855 }
11856 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
11857 el_tag->set = 1;
11858 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
11859 ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0,
11860 TRUE);
11861#endif /* DHD_8021X_DUMP */
11862
11863 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
11864 sizeof(scan_assoc_time), TRUE, 0);
11865 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
11866 sizeof(scan_unassoc_time), TRUE, 0);
11867 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
11868 sizeof(scan_passive_time), TRUE, 0);
11869
11870#ifdef ARP_OFFLOAD_SUPPORT
11871 /* Set and enable ARP offload feature for STA only */
11872#if defined(SOFTAP)
11873 if (arpoe && !ap_fw_loaded) {
11874#else
11875 if (arpoe) {
11876#endif // endif
11877 dhd_arp_offload_enable(dhd, TRUE);
11878 dhd_arp_offload_set(dhd, dhd_arp_mode);
11879 } else {
11880 dhd_arp_offload_enable(dhd, FALSE);
11881 dhd_arp_offload_set(dhd, 0);
11882 }
11883 dhd_arp_enable = arpoe;
11884#endif /* ARP_OFFLOAD_SUPPORT */
11885
11886#ifdef PKT_FILTER_SUPPORT
11887 /* Setup default defintions for pktfilter , enable in suspend */
11888 dhd->pktfilter_count = 6;
11889 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11890 if (!FW_SUPPORTED(dhd, pf6)) {
11891 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11892 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11893 } else {
11894 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11895 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11896 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11897 }
11898 /* apply APP pktfilter */
11899 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11900
11901#ifdef BLOCK_IPV6_PACKET
11902 /* Setup filter to allow only IPv4 unicast frames */
11903 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
11904 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
11905 " "
11906 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
11907#else
11908 /* Setup filter to allow only unicast */
11909 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11910#endif /* BLOCK_IPV6_PACKET */
11911
11912#ifdef PASS_IPV4_SUSPEND
11913 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
11914#else
11915 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11916 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11917#endif /* PASS_IPV4_SUSPEND */
11918 if (FW_SUPPORTED(dhd, pf6)) {
11919 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11920 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
11921 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11922 dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
11923 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11924 dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
11925 dhd->pktfilter_count = 10;
11926 }
11927
11928#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11929 dhd->pktfilter_count = 4;
11930 /* Setup filter to block broadcast and NAT Keepalive packets */
11931 /* discard all broadcast packets */
11932 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11933 /* discard NAT Keepalive packets */
11934 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11935 /* discard NAT Keepalive packets */
11936 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11937 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11938#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11939
11940#if defined(SOFTAP)
11941 if (ap_fw_loaded) {
11942 dhd_enable_packet_filter(0, dhd);
11943 }
11944#endif /* defined(SOFTAP) */
11945 dhd_set_packet_filter(dhd);
11946#endif /* PKT_FILTER_SUPPORT */
11947#ifdef DISABLE_11N
11948 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
11949 if (ret < 0)
11950 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
11951#endif /* DISABLE_11N */
11952
11953#ifdef ENABLE_BCN_LI_BCN_WAKEUP
11954 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0,
11955 TRUE);
11956#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11957#ifdef AMPDU_VO_ENABLE
11958 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
11959 tid.enable = TRUE;
11960 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11961
11962 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
11963 tid.enable = TRUE;
11964 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11965#endif // endif
11966 /* query for 'clmver' to get clm version info from firmware */
11967 memset(buf, 0, sizeof(buf));
11968 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11969 if (ret < 0)
11970 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11971 else {
11972 char *ver_temp_buf = NULL;
11973
11974 if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
11975 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11976 } else {
11977 ptr = (ver_temp_buf + strlen("Data:"));
11978 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
11979 DHD_ERROR(("Couldn't find New line character\n"));
11980 } else {
11981 memset(clm_version, 0, CLM_VER_STR_LEN);
11982 strncpy(clm_version, ver_temp_buf,
11983 MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN - 1));
11984 DHD_INFO(("CLM version = %s\n", clm_version));
11985 }
11986 }
11987
11988#if defined(CUSTOMER_HW4_DEBUG)
11989 if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
11990 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
11991 } else {
11992 char tokenlim;
11993 ptr = (ver_temp_buf + strlen("Customization:"));
11994 if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
11995 DHD_ERROR(("Couldn't find project blob version"
11996 "or New line character\n"));
11997 } else if (tokenlim == '(') {
11998 snprintf(clm_version,
11999 CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
12000 clm_version, ver_temp_buf);
12001 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
12002 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
12003 DHD_ERROR(("Couldn't find New line character\n"));
12004 } else {
12005 snprintf(clm_version,
12006 strlen(clm_version) + strlen(ver_temp_buf),
12007 "%s%s", clm_version, ver_temp_buf);
12008 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
12009 clm_version));
12010
12011 }
12012 } else if (tokenlim == '\n') {
12013 snprintf(clm_version,
12014 strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
12015 "%s, Blob ver = Major : ", clm_version);
12016 snprintf(clm_version,
12017 strlen(clm_version) + strlen(ver_temp_buf) + 1,
12018 "%s%s", clm_version, ver_temp_buf);
12019 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
12020 }
12021 }
12022#endif /* CUSTOMER_HW4_DEBUG */
12023 if (strlen(clm_version)) {
12024 DHD_ERROR(("CLM version = %s\n", clm_version));
12025 } else {
12026 DHD_ERROR(("Couldn't find CLM version!\n"));
12027 }
12028 }
12029
12030 /* query for 'ver' to get version info from firmware */
12031 memset(buf, 0, sizeof(buf));
12032 ptr = buf;
12033 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
12034 if (ret < 0)
12035 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
12036 else {
12037 bcmstrtok(&ptr, "\n", 0);
12038 /* Print fw version info */
12039 DHD_ERROR(("Firmware version = %s\n", buf));
12040 strncpy(fw_version, buf, FW_VER_STR_LEN);
12041 fw_version[FW_VER_STR_LEN-1] = '\0';
12042#if defined(BCMSDIO) || defined(BCMPCIE)
12043 dhd_set_version_info(dhd, buf);
12044#endif /* BCMSDIO || BCMPCIE */
12045#ifdef WRITE_WLANINFO
12046 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
12047#endif /* WRITE_WLANINFO */
12048 }
12049#ifdef GEN_SOFTAP_INFO_FILE
12050 sec_save_softap_info();
12051#endif /* GEN_SOFTAP_INFO_FILE */
12052
12053#if defined(BCMSDIO)
12054 dhd_txglom_enable(dhd, TRUE);
12055#endif /* defined(BCMSDIO) */
12056
12057#if defined(BCMSDIO)
12058#ifdef PROP_TXSTATUS
12059 if (disable_proptx ||
12060#ifdef PROP_TXSTATUS_VSDB
12061 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
12062 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
12063 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
12064#endif /* PROP_TXSTATUS_VSDB */
12065 FALSE) {
12066 wlfc_enable = FALSE;
12067 }
12068
12069#if defined(PROP_TXSTATUS)
12070#ifdef USE_WFA_CERT_CONF
12071 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
12072 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
12073 wlfc_enable = proptx;
12074 }
12075#endif /* USE_WFA_CERT_CONF */
12076#endif /* PROP_TXSTATUS */
12077
12078#ifndef DISABLE_11N
12079 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
12080 NULL, 0, TRUE);
12081 if (ret2 < 0) {
12082 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
12083 if (ret2 != BCME_UNSUPPORTED)
12084 ret = ret2;
12085
12086 if (ret == BCME_NOTDOWN) {
12087 uint wl_down = 1;
12088 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
12089 sizeof(wl_down), TRUE, 0);
12090 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
12091 __FUNCTION__, ret2, hostreorder));
12092
12093 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
12094 sizeof(hostreorder), NULL, 0, TRUE);
12095 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
12096 if (ret2 != BCME_UNSUPPORTED)
12097 ret = ret2;
12098 }
12099 if (ret2 != BCME_OK)
12100 hostreorder = 0;
12101 }
12102#endif /* DISABLE_11N */
12103
12104 if (wlfc_enable)
12105 dhd_wlfc_init(dhd);
12106#ifndef DISABLE_11N
12107 else if (hostreorder)
12108 dhd_wlfc_hostreorder_init(dhd);
12109#endif /* DISABLE_11N */
12110
12111#endif /* PROP_TXSTATUS */
12112#endif /* BCMSDIO || BCMBUS */
12113#ifndef PCIE_FULL_DONGLE
12114 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
12115 if (FW_SUPPORTED(dhd, ap)) {
12116 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
12117 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
12118 NULL, 0, TRUE);
12119 if (ret < 0)
12120 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
12121 }
12122#endif /* PCIE_FULL_DONGLE */
12123#ifdef PNO_SUPPORT
12124 if (!dhd->pno_state) {
12125 dhd_pno_init(dhd);
12126 }
12127#endif // endif
12128#ifdef RTT_SUPPORT
12129 if (!dhd->rtt_state) {
12130 ret = dhd_rtt_init(dhd);
12131 if (ret < 0) {
12132 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
12133 }
12134 }
12135#endif // endif
12136#ifdef FILTER_IE
12137 /* Failure to configure filter IE is not a fatal error, ignore it. */
12138 if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
12139 dhd_read_from_file(dhd);
12140#endif /* FILTER_IE */
12141#ifdef WL11U
12142 dhd_interworking_enable(dhd);
12143#endif /* WL11U */
12144
12145#ifdef NDO_CONFIG_SUPPORT
12146 dhd->ndo_enable = FALSE;
12147 dhd->ndo_host_ip_overflow = FALSE;
12148 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
12149#endif /* NDO_CONFIG_SUPPORT */
12150
12151 /* ND offload version supported */
12152 dhd->ndo_version = dhd_ndo_get_version(dhd);
12153 if (dhd->ndo_version > 0) {
12154 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
12155
12156#ifdef NDO_CONFIG_SUPPORT
12157 /* enable Unsolicited NA filter */
12158 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
12159 if (ret < 0) {
12160 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
12161 }
12162#endif /* NDO_CONFIG_SUPPORT */
12163 }
12164
12165 /* check dongle supports wbtext (product policy) or not */
12166 dhd->wbtext_support = FALSE;
12167 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
12168 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
12169 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12170 }
12171 dhd->wbtext_policy = wnm_bsstrans_resp;
12172 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
12173 dhd->wbtext_support = TRUE;
12174 }
12175#ifndef WBTEXT
12176 /* driver can turn off wbtext feature through makefile */
12177 if (dhd->wbtext_support) {
12178 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
12179 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
12180 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
12181 DHD_ERROR(("failed to disable WBTEXT\n"));
12182 }
12183 }
12184#endif /* !WBTEXT */
12185
12186#if defined(DHD_NON_DMA_M2M_CORRUPTION)
12187 /* check pcie non dma loopback */
12188 if (dhd->op_mode == DHD_FLAG_MFG_MODE) {
12189 memset(&pcie_dmaxfer_lpbk, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t));
12190 pcie_dmaxfer_lpbk.u.length = PCIE_DMAXFER_LPBK_LENGTH;
12191 pcie_dmaxfer_lpbk.lpbkmode = M2M_NON_DMA_LPBK;
12192 pcie_dmaxfer_lpbk.wait = TRUE;
12193
12194 if ((ret = dhd_bus_iovar_op(dhd, "pcie_dmaxfer", NULL, 0,
12195 (char *)&pcie_dmaxfer_lpbk, sizeof(dhd_pcie_dmaxfer_lpbk_t),
12196 IOV_SET)) < 0) {
12197 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n",
12198 ret));
12199 goto done;
12200 }
12201
12202 if (pcie_dmaxfer_lpbk.u.status != BCME_OK) {
12203 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d"
12204 " Status : %d\n", ret, pcie_dmaxfer_lpbk.u.status));
12205 ret = BCME_ERROR;
12206 goto done;
12207 } else {
12208
12209 DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n"));
12210 }
12211 }
12212#endif /* DHD_NON_DMA_M2M_CORRUPTION */
12213
12214 /* WNM capabilities */
12215 wnm_cap = 0
12216#ifdef WL11U
12217 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
12218#endif // endif
12219#ifdef WBTEXT
12220 | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
12221#endif // endif
12222 ;
12223 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
12224 DHD_ERROR(("failed to set WNM capabilities\n"));
12225 }
12226
12227 if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
12228 if (dhd_start_ecounters(dhd) != BCME_OK) {
12229 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
12230 } else if (dhd_start_event_ecounters(dhd) != BCME_OK) {
12231 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
12232 }
12233
12234 }
12235
12236 /* store the preserve log set numbers */
12237 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
12238 != BCME_OK) {
12239 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
12240 }
12241
12242#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
12243 if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
12244 NULL, 0, TRUE) < 0) {
12245 DHD_ERROR(("failed to set BTM delta\n"));
12246 }
12247#endif /* WBTEXT && WBTEXT_BTMDELTA */
12248
12249#ifdef WL_MONITOR
12250 if (FW_SUPPORTED(dhd, monitor)) {
12251 dhd->monitor_enable = TRUE;
12252 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
12253 } else {
12254 dhd->monitor_enable = FALSE;
12255 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
12256 }
12257#endif /* WL_MONITOR */
12258
12259done:
12260
12261 if (eventmask_msg) {
12262 MFREE(dhd->osh, eventmask_msg, msglen);
12263 eventmask_msg = NULL;
12264 }
12265 if (iov_buf) {
12266 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
12267 iov_buf = NULL;
12268 }
12269#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12270 if (el_tag) {
12271 MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
12272 el_tag = NULL;
12273 }
12274#endif /* DHD_8021X_DUMP */
12275 return ret;
12276}
12277
12278int
12279dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
12280 uint res_len, int set)
12281{
12282 char *buf = NULL;
12283 int input_len;
12284 wl_ioctl_t ioc;
12285 int ret;
12286
12287 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
12288 return BCME_BADARG;
12289
12290 input_len = strlen(name) + 1 + param_len;
12291 if (input_len > WLC_IOCTL_MAXLEN)
12292 return BCME_BADARG;
12293
12294 buf = NULL;
12295 if (set) {
12296 if (res_buf || res_len != 0) {
12297 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
12298 ret = BCME_BADARG;
12299 goto exit;
12300 }
12301 buf = MALLOCZ(pub->osh, input_len);
12302 if (!buf) {
12303 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12304 ret = BCME_NOMEM;
12305 goto exit;
12306 }
12307 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12308 if (!ret) {
12309 ret = BCME_NOMEM;
12310 goto exit;
12311 }
12312
12313 ioc.cmd = WLC_SET_VAR;
12314 ioc.buf = buf;
12315 ioc.len = input_len;
12316 ioc.set = set;
12317
12318 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12319 } else {
12320 if (!res_buf || !res_len) {
12321 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
12322 ret = BCME_BADARG;
12323 goto exit;
12324 }
12325
12326 if (res_len < input_len) {
12327 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
12328 res_len, input_len));
12329 buf = MALLOCZ(pub->osh, input_len);
12330 if (!buf) {
12331 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12332 ret = BCME_NOMEM;
12333 goto exit;
12334 }
12335 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12336 if (!ret) {
12337 ret = BCME_NOMEM;
12338 goto exit;
12339 }
12340
12341 ioc.cmd = WLC_GET_VAR;
12342 ioc.buf = buf;
12343 ioc.len = input_len;
12344 ioc.set = set;
12345
12346 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12347
12348 if (ret == BCME_OK) {
12349 memcpy(res_buf, buf, res_len);
12350 }
12351 } else {
12352 memset(res_buf, 0, res_len);
12353 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
12354 if (!ret) {
12355 ret = BCME_NOMEM;
12356 goto exit;
12357 }
12358
12359 ioc.cmd = WLC_GET_VAR;
12360 ioc.buf = res_buf;
12361 ioc.len = res_len;
12362 ioc.set = set;
12363
12364 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12365 }
12366 }
12367exit:
12368 if (buf) {
12369 MFREE(pub->osh, buf, input_len);
12370 buf = NULL;
12371 }
12372 return ret;
12373}
12374
12375int
12376dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
12377 uint cmd_len, char **resptr, uint resp_len)
12378{
12379 int len = resp_len;
12380 int ret;
12381 char *buf = *resptr;
12382 wl_ioctl_t ioc;
12383 if (resp_len > WLC_IOCTL_MAXLEN)
12384 return BCME_BADARG;
12385
12386 memset(buf, 0, resp_len);
12387
12388 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
12389 if (ret == 0) {
12390 return BCME_BUFTOOSHORT;
12391 }
12392
12393 memset(&ioc, 0, sizeof(ioc));
12394
12395 ioc.cmd = WLC_GET_VAR;
12396 ioc.buf = buf;
12397 ioc.len = len;
12398 ioc.set = 0;
12399
12400 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12401
12402 return ret;
12403}
12404
12405int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
12406{
12407 struct dhd_info *dhd = dhdp->info;
12408 struct net_device *dev = NULL;
12409
12410 ASSERT(dhd && dhd->iflist[ifidx]);
12411 dev = dhd->iflist[ifidx]->net;
12412 ASSERT(dev);
12413
12414 if (netif_running(dev)) {
12415 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
12416 return BCME_NOTDOWN;
12417 }
12418
12419#define DHD_MIN_MTU 1500
12420#define DHD_MAX_MTU 1752
12421
12422 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
12423 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
12424 return BCME_BADARG;
12425 }
12426
12427 dev->mtu = new_mtu;
12428 return 0;
12429}
12430
12431#ifdef ARP_OFFLOAD_SUPPORT
12432/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12433void
12434aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
12435{
12436 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
12437 int i;
12438 int ret;
12439
12440 bzero(ipv4_buf, sizeof(ipv4_buf));
12441
12442 /* display what we've got */
12443 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12444 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
12445#ifdef AOE_DBG
12446 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12447#endif // endif
12448 /* now we saved hoste_ip table, clr it in the dongle AOE */
12449 dhd_aoe_hostip_clr(dhd_pub, idx);
12450
12451 if (ret) {
12452 DHD_ERROR(("%s failed\n", __FUNCTION__));
12453 return;
12454 }
12455
12456 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
12457 if (add && (ipv4_buf[i] == 0)) {
12458 ipv4_buf[i] = ipa;
12459 add = FALSE; /* added ipa to local table */
12460 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12461 __FUNCTION__, i));
12462 } else if (ipv4_buf[i] == ipa) {
12463 ipv4_buf[i] = 0;
12464 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12465 __FUNCTION__, ipa, i));
12466 }
12467
12468 if (ipv4_buf[i] != 0) {
12469 /* add back host_ip entries from our local cache */
12470 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
12471 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12472 __FUNCTION__, ipv4_buf[i], i));
12473 }
12474 }
12475#ifdef AOE_DBG
12476 /* see the resulting hostip table */
12477 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12478 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
12479 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12480#endif // endif
12481}
12482
12483/*
12484 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12485 * whenever there is an event related to an IP address.
12486 * ptr : kernel provided pointer to IP address that has changed
12487 */
12488static int dhd_inetaddr_notifier_call(struct notifier_block *this,
12489 unsigned long event,
12490 void *ptr)
12491{
12492 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
12493
12494 dhd_info_t *dhd;
12495 dhd_pub_t *dhd_pub;
12496 int idx;
12497
12498 if (!dhd_arp_enable)
12499 return NOTIFY_DONE;
12500 if (!ifa || !(ifa->ifa_dev->dev))
12501 return NOTIFY_DONE;
12502
12503#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12504 /* Filter notifications meant for non Broadcom devices */
12505 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
12506 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
12507#if defined(WL_ENABLE_P2P_IF)
12508 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
12509#endif /* WL_ENABLE_P2P_IF */
12510 return NOTIFY_DONE;
12511 }
12512#endif /* LINUX_VERSION_CODE */
12513
12514 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
12515 if (!dhd)
12516 return NOTIFY_DONE;
12517
12518 dhd_pub = &dhd->pub;
12519
12520 if (dhd_pub->arp_version == 1) {
12521 idx = 0;
12522 } else {
12523 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
12524 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
12525 break;
12526 }
12527 if (idx < DHD_MAX_IFS)
12528 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
12529 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
12530 else {
12531 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
12532 idx = 0;
12533 }
12534 }
12535
12536 switch (event) {
12537 case NETDEV_UP:
12538 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12539 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12540
12541 /*
12542 * Skip if Bus is not in a state to transport the IOVAR
12543 * (or) the Dongle is not ready.
12544 */
12545 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
12546 dhd->pub.busstate == DHD_BUS_LOAD) {
12547 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
12548 __FUNCTION__, dhd->pub.busstate));
12549 if (dhd->pend_ipaddr) {
12550 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12551 __FUNCTION__, dhd->pend_ipaddr));
12552 }
12553 dhd->pend_ipaddr = ifa->ifa_address;
12554 break;
12555 }
12556
12557#ifdef AOE_IP_ALIAS_SUPPORT
12558 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12559 __FUNCTION__));
12560 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
12561#endif /* AOE_IP_ALIAS_SUPPORT */
12562 break;
12563
12564 case NETDEV_DOWN:
12565 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12566 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12567 dhd->pend_ipaddr = 0;
12568#ifdef AOE_IP_ALIAS_SUPPORT
12569 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12570 __FUNCTION__));
12571 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
12572 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
12573 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
12574 } else
12575#endif /* AOE_IP_ALIAS_SUPPORT */
12576 {
12577 dhd_aoe_hostip_clr(&dhd->pub, idx);
12578 dhd_aoe_arp_clr(&dhd->pub, idx);
12579 }
12580 break;
12581
12582 default:
12583 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12584 __func__, ifa->ifa_label, event));
12585 break;
12586 }
12587 return NOTIFY_DONE;
12588}
12589#endif /* ARP_OFFLOAD_SUPPORT */
12590
12591#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12592/* Neighbor Discovery Offload: defered handler */
12593static void
12594dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
12595{
12596 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
12597 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
12598 dhd_pub_t *dhdp;
12599 int ret;
12600
12601 if (!dhd) {
12602 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
12603 goto done;
12604 }
12605 dhdp = &dhd->pub;
12606
12607 if (event != DHD_WQ_WORK_IPV6_NDO) {
12608 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
12609 goto done;
12610 }
12611
12612 if (!ndo_work) {
12613 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
12614 return;
12615 }
12616
12617 switch (ndo_work->event) {
12618 case NETDEV_UP:
12619#ifndef NDO_CONFIG_SUPPORT
12620 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
12621 ret = dhd_ndo_enable(dhdp, TRUE);
12622 if (ret < 0) {
12623 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
12624 }
12625#endif /* !NDO_CONFIG_SUPPORT */
12626 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
12627 if (dhdp->ndo_version > 0) {
12628 /* inet6 addr notifier called only for unicast address */
12629 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
12630 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
12631 } else {
12632 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
12633 ndo_work->if_idx);
12634 }
12635 if (ret < 0) {
12636 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12637 __FUNCTION__, ret));
12638 }
12639 break;
12640 case NETDEV_DOWN:
12641 if (dhdp->ndo_version > 0) {
12642 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
12643 ret = dhd_ndo_remove_ip_by_addr(dhdp,
12644 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
12645 } else {
12646 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
12647 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
12648 }
12649 if (ret < 0) {
12650 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12651 __FUNCTION__, ret));
12652 goto done;
12653 }
12654#ifdef NDO_CONFIG_SUPPORT
12655 if (dhdp->ndo_host_ip_overflow) {
12656 ret = dhd_dev_ndo_update_inet6addr(
12657 dhd_idx2net(dhdp, ndo_work->if_idx));
12658 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
12659 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12660 __FUNCTION__, ret));
12661 goto done;
12662 }
12663 }
12664#else /* !NDO_CONFIG_SUPPORT */
12665 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
12666 ret = dhd_ndo_enable(dhdp, FALSE);
12667 if (ret < 0) {
12668 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
12669 goto done;
12670 }
12671#endif /* NDO_CONFIG_SUPPORT */
12672 break;
12673
12674 default:
12675 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
12676 break;
12677 }
12678done:
12679
12680 /* free ndo_work. alloced while scheduling the work */
12681 if (ndo_work) {
12682 kfree(ndo_work);
12683 }
12684
12685 return;
12686} /* dhd_init_logstrs_array */
12687
12688/*
12689 * Neighbor Discovery Offload: Called when an interface
12690 * is assigned with ipv6 address.
12691 * Handles only primary interface
12692 */
12693int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
12694{
12695 dhd_info_t *dhd;
12696 dhd_pub_t *dhdp;
12697 struct inet6_ifaddr *inet6_ifa = ptr;
12698 struct ipv6_work_info_t *ndo_info;
12699 int idx;
12700
12701#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12702 /* Filter notifications meant for non Broadcom devices */
12703 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
12704 return NOTIFY_DONE;
12705 }
12706#endif /* LINUX_VERSION_CODE */
12707
12708 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
12709 if (!dhd) {
12710 return NOTIFY_DONE;
12711 }
12712 dhdp = &dhd->pub;
12713
12714 /* Supports only primary interface */
12715 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
12716 if (idx != 0) {
12717 return NOTIFY_DONE;
12718 }
12719
12720 /* FW capability */
12721 if (!FW_SUPPORTED(dhdp, ndoe)) {
12722 return NOTIFY_DONE;
12723 }
12724
12725 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
12726 if (!ndo_info) {
12727 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
12728 return NOTIFY_DONE;
12729 }
12730
12731 /* fill up ndo_info */
12732 ndo_info->event = event;
12733 ndo_info->if_idx = idx;
12734 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
12735
12736 /* defer the work to thread as it may block kernel */
12737 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
12738 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
12739 return NOTIFY_DONE;
12740}
12741#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12742
12743/* Network attach to be invoked from the bus probe handlers */
12744int
12745dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
12746{
12747 struct net_device *primary_ndev;
12748 BCM_REFERENCE(primary_ndev);
12749
12750 /* Register primary net device */
12751 if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
12752 return BCME_ERROR;
12753 }
12754
12755#if defined(WL_CFG80211)
12756 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
12757 if (wl_cfg80211_net_attach(primary_ndev) < 0) {
12758 /* fail the init */
12759 dhd_remove_if(dhdp, 0, TRUE);
12760 return BCME_ERROR;
12761 }
12762#endif /* WL_CFG80211 */
12763 return BCME_OK;
12764}
12765
12766int
12767dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
12768{
12769 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12770 dhd_if_t *ifp;
12771 struct net_device *net = NULL;
12772 int err = 0;
12773 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12774
12775 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
12776
12777 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
12778 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
12779 return BCME_ERROR;
12780 }
12781
12782 ASSERT(dhd && dhd->iflist[ifidx]);
12783 ifp = dhd->iflist[ifidx];
12784 net = ifp->net;
12785 ASSERT(net && (ifp->idx == ifidx));
12786
12787#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12788 ASSERT(!net->open);
12789 net->get_stats = dhd_get_stats;
12790#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
12791 net->do_ioctl = dhd_ioctl_entry_wrapper;
12792 net->hard_start_xmit = dhd_start_xmit_wrapper;
12793#else
12794 net->do_ioctl = dhd_ioctl_entry;
12795 net->hard_start_xmit = dhd_start_xmit;
12796#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
12797
12798 net->set_mac_address = dhd_set_mac_address;
12799 net->set_multicast_list = dhd_set_multicast_list;
12800 net->open = net->stop = NULL;
12801#else
12802 ASSERT(!net->netdev_ops);
12803 net->netdev_ops = &dhd_ops_virt;
12804#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12805
12806 /* Ok, link into the network layer... */
12807 if (ifidx == 0) {
12808 /*
12809 * device functions for the primary interface only
12810 */
12811#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12812 net->open = dhd_pri_open;
12813 net->stop = dhd_pri_stop;
12814#else
12815 net->netdev_ops = &dhd_ops_pri;
12816#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12817 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
12818 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12819 } else {
12820 /*
12821 * We have to use the primary MAC for virtual interfaces
12822 */
12823 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
12824 /*
12825 * Android sets the locally administered bit to indicate that this is a
12826 * portable hotspot. This will not work in simultaneous AP/STA mode,
12827 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12828 */
12829 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
12830 ETHER_ADDR_LEN)) {
12831 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12832 __func__, net->name));
12833 temp_addr[0] |= 0x02;
12834 }
12835 }
12836
12837 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
12838#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12839 net->ethtool_ops = &dhd_ethtool_ops;
12840#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12841
12842#if defined(WL_WIRELESS_EXT)
12843#if WIRELESS_EXT < 19
12844 net->get_wireless_stats = dhd_get_wireless_stats;
12845#endif /* WIRELESS_EXT < 19 */
12846#if WIRELESS_EXT > 12
12847 net->wireless_handlers = &wl_iw_handler_def;
12848#endif /* WIRELESS_EXT > 12 */
12849#endif /* defined(WL_WIRELESS_EXT) */
12850
12851 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
12852
12853 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
12854
12855 if (ifidx == 0)
12856 printf("%s\n", dhd_version);
12857
12858 if (need_rtnl_lock)
12859 err = register_netdev(net);
12860 else
12861 err = register_netdevice(net);
12862
12863 if (err != 0) {
12864 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
12865 goto fail;
12866 }
12867
12868 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
12869#if defined(CUSTOMER_HW4_DEBUG)
12870 MAC2STRDBG(dhd->pub.mac.octet));
12871#else
12872 MAC2STRDBG(net->dev_addr));
12873#endif /* CUSTOMER_HW4_DEBUG */
12874
12875#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12876 wl_iw_iscan_set_scan_broadcast_prep(net, 1);
12877#endif // endif
12878
12879#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12880 KERNEL_VERSION(2, 6, 27))))
12881 if (ifidx == 0) {
12882#ifdef BCMLXSDMMC
12883 up(&dhd_registration_sem);
12884#endif /* BCMLXSDMMC */
12885 if (!dhd_download_fw_on_driverload) {
12886#ifdef WL_CFG80211
12887 wl_terminate_event_handler(net);
12888#endif /* WL_CFG80211 */
12889#if defined(DHD_LB_RXP)
12890 __skb_queue_purge(&dhd->rx_pend_queue);
12891#endif /* DHD_LB_RXP */
12892
12893#if defined(DHD_LB_TXP)
12894 skb_queue_purge(&dhd->tx_pend_queue);
12895#endif /* DHD_LB_TXP */
12896
12897#ifdef SHOW_LOGTRACE
12898 /* Release the skbs from queue for WLC_E_TRACE event */
12899 dhd_event_logtrace_flush_queue(dhdp);
12900#endif /* SHOW_LOGTRACE */
12901
12902#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
12903 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
12904#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
12905 dhd_net_bus_devreset(net, TRUE);
12906#ifdef BCMLXSDMMC
12907 dhd_net_bus_suspend(net);
12908#endif /* BCMLXSDMMC */
12909 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
12910#if defined(BT_OVER_SDIO)
12911 dhd->bus_user_count--;
12912#endif /* BT_OVER_SDIO */
12913 }
12914 }
12915#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12916 return 0;
12917
12918fail:
12919#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12920 net->open = NULL;
12921#else
12922 net->netdev_ops = NULL;
12923#endif // endif
12924 return err;
12925}
12926
12927void
12928dhd_bus_detach(dhd_pub_t *dhdp)
12929{
12930 dhd_info_t *dhd;
12931
12932 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12933
12934 if (dhdp) {
12935 dhd = (dhd_info_t *)dhdp->info;
12936 if (dhd) {
12937
12938 /*
12939 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12940 * calling stop again will cuase SD read/write errors.
12941 */
12942 if (dhd->pub.busstate != DHD_BUS_DOWN) {
12943 /* Stop the protocol module */
12944 dhd_prot_stop(&dhd->pub);
12945
12946 /* Stop the bus module */
12947 dhd_bus_stop(dhd->pub.bus, TRUE);
12948 }
12949
12950#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12951 dhd_bus_oob_intr_unregister(dhdp);
12952#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12953 }
12954 }
12955}
12956
12957void dhd_detach(dhd_pub_t *dhdp)
12958{
12959 dhd_info_t *dhd;
12960 unsigned long flags;
12961 int timer_valid = FALSE;
12962 struct net_device *dev;
12963#ifdef WL_CFG80211
12964 struct bcm_cfg80211 *cfg = NULL;
12965#endif // endif
12966 if (!dhdp)
12967 return;
12968
12969 dhd = (dhd_info_t *)dhdp->info;
12970 if (!dhd)
12971 return;
12972
12973 dev = dhd->iflist[0]->net;
12974
12975 if (dev) {
12976 rtnl_lock();
12977 if (dev->flags & IFF_UP) {
12978 /* If IFF_UP is still up, it indicates that
12979 * "ifconfig wlan0 down" hasn't been called.
12980 * So invoke dev_close explicitly here to
12981 * bring down the interface.
12982 */
12983 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12984 dev_close(dev);
12985 }
12986 rtnl_unlock();
12987 }
12988
12989 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
12990
12991 dhd->pub.up = 0;
12992 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
12993 /* Give sufficient time for threads to start running in case
12994 * dhd_attach() has failed
12995 */
12996 OSL_SLEEP(100);
12997 }
12998#ifdef DHD_WET
12999 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
13000#endif /* DHD_WET */
13001#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
13002#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
13003
13004#ifdef PROP_TXSTATUS
13005#ifdef DHD_WLFC_THREAD
13006 if (dhd->pub.wlfc_thread) {
13007 kthread_stop(dhd->pub.wlfc_thread);
13008 dhdp->wlfc_thread_go = TRUE;
13009 wake_up_interruptible(&dhdp->wlfc_wqhead);
13010 }
13011 dhd->pub.wlfc_thread = NULL;
13012#endif /* DHD_WLFC_THREAD */
13013#endif /* PROP_TXSTATUS */
13014
13015#ifdef WL_CFG80211
13016 if (dev)
13017 wl_cfg80211_down(dev);
13018#endif /* WL_CFG80211 */
13019
13020 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
13021
13022 dhd_bus_detach(dhdp);
13023#ifdef BCMPCIE
13024 if (is_reboot == SYS_RESTART) {
13025 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
13026 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
13027 dhdpcie_bus_clock_stop(dhdp->bus);
13028 wifi_platform_set_power(dhd_wifi_platdata->adapters,
13029 FALSE, WIFI_TURNOFF_DELAY);
13030 }
13031 }
13032#endif /* BCMPCIE */
13033#ifndef PCIE_FULL_DONGLE
13034 if (dhdp->prot)
13035 dhd_prot_detach(dhdp);
13036#endif /* !PCIE_FULL_DONGLE */
13037 }
13038
13039#ifdef ARP_OFFLOAD_SUPPORT
13040 if (dhd_inetaddr_notifier_registered) {
13041 dhd_inetaddr_notifier_registered = FALSE;
13042 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
13043 }
13044#endif /* ARP_OFFLOAD_SUPPORT */
13045#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
13046 if (dhd_inet6addr_notifier_registered) {
13047 dhd_inet6addr_notifier_registered = FALSE;
13048 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
13049 }
13050#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
13051#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
13052 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
13053 if (dhd->early_suspend.suspend)
13054 unregister_early_suspend(&dhd->early_suspend);
13055 }
13056#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
13057
13058#if defined(WL_WIRELESS_EXT)
13059 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
13060 /* Detatch and unlink in the iw */
13061 wl_iw_detach();
13062 }
13063#endif /* defined(WL_WIRELESS_EXT) */
13064
13065#ifdef DHD_ULP
13066 dhd_ulp_deinit(dhd->pub.osh, dhdp);
13067#endif /* DHD_ULP */
13068
13069 /* delete all interfaces, start with virtual */
13070 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
13071 int i = 1;
13072 dhd_if_t *ifp;
13073
13074 /* Cleanup virtual interfaces */
13075 dhd_net_if_lock_local(dhd);
13076 for (i = 1; i < DHD_MAX_IFS; i++) {
13077 if (dhd->iflist[i]) {
13078 dhd_remove_if(&dhd->pub, i, TRUE);
13079 }
13080 }
13081 dhd_net_if_unlock_local(dhd);
13082
13083 /* delete primary interface 0 */
13084 ifp = dhd->iflist[0];
13085 if (ifp && ifp->net) {
13086
13087#ifdef WL_CFG80211
13088 cfg = wl_get_cfg(ifp->net);
13089#endif // endif
13090 /* in unregister_netdev case, the interface gets freed by net->destructor
13091 * (which is set to free_netdev)
13092 */
13093 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
13094 free_netdev(ifp->net);
13095 } else {
13096#if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
13097 defined(ARGOS_NOTIFY_CB)
13098 argos_register_notifier_deinit();
13099#endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
13100#ifdef SET_RPS_CPUS
13101 custom_rps_map_clear(ifp->net->_rx);
13102#endif /* SET_RPS_CPUS */
13103 netif_tx_disable(ifp->net);
13104 unregister_netdev(ifp->net);
13105 }
13106#ifdef PCIE_FULL_DONGLE
13107 ifp->net = DHD_NET_DEV_NULL;
13108#else
13109 ifp->net = NULL;
13110#endif /* PCIE_FULL_DONGLE */
13111
13112#ifdef DHD_L2_FILTER
13113 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
13114 NULL, FALSE, dhdp->tickcnt);
13115 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
13116 ifp->phnd_arp_table = NULL;
13117#endif /* DHD_L2_FILTER */
13118
13119 dhd_if_del_sta_list(ifp);
13120
13121 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
13122 dhd->iflist[0] = NULL;
13123 }
13124 }
13125
13126 /* Clear the watchdog timer */
13127 DHD_GENERAL_LOCK(&dhd->pub, flags);
13128 timer_valid = dhd->wd_timer_valid;
13129 dhd->wd_timer_valid = FALSE;
13130 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
13131 if (timer_valid)
13132 del_timer_sync(&dhd->timer);
13133 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
13134
13135 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
13136#ifdef DHD_PCIE_RUNTIMEPM
13137 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
13138 PROC_STOP(&dhd->thr_rpm_ctl);
13139 }
13140#endif /* DHD_PCIE_RUNTIMEPM */
13141 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
13142 PROC_STOP(&dhd->thr_wdt_ctl);
13143 }
13144
13145 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
13146 PROC_STOP(&dhd->thr_rxf_ctl);
13147 }
13148
13149 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
13150 PROC_STOP(&dhd->thr_dpc_ctl);
13151 } else
13152 {
13153 tasklet_kill(&dhd->tasklet);
13154 }
13155 }
13156
13157#ifdef WL_NATOE
13158 if (dhd->pub.nfct) {
13159 dhd_ct_close(dhd->pub.nfct);
13160 }
13161#endif /* WL_NATOE */
13162
13163#ifdef DHD_LB
13164 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
13165 /* Clear the flag first to avoid calling the cpu notifier */
13166 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
13167
13168 /* Kill the Load Balancing Tasklets */
13169#ifdef DHD_LB_RXP
13170 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
13171 __skb_queue_purge(&dhd->rx_pend_queue);
13172#endif /* DHD_LB_RXP */
13173#ifdef DHD_LB_TXP
13174 cancel_work_sync(&dhd->tx_dispatcher_work);
13175 tasklet_kill(&dhd->tx_tasklet);
13176 __skb_queue_purge(&dhd->tx_pend_queue);
13177#endif /* DHD_LB_TXP */
13178#ifdef DHD_LB_TXC
13179 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
13180 tasklet_kill(&dhd->tx_compl_tasklet);
13181#endif /* DHD_LB_TXC */
13182#ifdef DHD_LB_RXC
13183 tasklet_kill(&dhd->rx_compl_tasklet);
13184#endif /* DHD_LB_RXC */
13185
13186 if (dhd->cpu_notifier.notifier_call != NULL) {
13187 unregister_cpu_notifier(&dhd->cpu_notifier);
13188 }
13189 dhd_cpumasks_deinit(dhd);
13190 DHD_LB_STATS_DEINIT(&dhd->pub);
13191 }
13192#endif /* DHD_LB */
13193
13194 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
13195
13196#ifdef WL_CFG80211
13197 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
13198 if (!cfg) {
13199 DHD_ERROR(("cfg NULL!\n"));
13200 ASSERT(0);
13201 } else {
13202 wl_cfg80211_detach(cfg);
13203 dhd_monitor_uninit();
13204 }
13205 }
13206#endif // endif
13207
13208#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13209 destroy_workqueue(dhd->tx_wq);
13210 dhd->tx_wq = NULL;
13211 destroy_workqueue(dhd->rx_wq);
13212 dhd->rx_wq = NULL;
13213#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13214#ifdef DEBUGABILITY
13215 if (dhdp->dbg) {
13216#ifdef DBG_PKT_MON
13217 dhd_os_dbg_detach_pkt_monitor(dhdp);
13218 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
13219#endif /* DBG_PKT_MON */
13220 dhd_os_dbg_detach(dhdp);
13221 }
13222#endif /* DEBUGABILITY */
13223#ifdef DHD_PKT_LOGGING
13224 dhd_os_detach_pktlog(dhdp);
13225#endif /* DHD_PKT_LOGGING */
13226#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13227 if (dhd->pub.hang_info) {
13228 MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
13229 }
13230#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13231#ifdef SHOW_LOGTRACE
13232 /* Release the skbs from queue for WLC_E_TRACE event */
13233 dhd_event_logtrace_flush_queue(dhdp);
13234
13235 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
13236 if (dhd->event_data.fmts) {
13237 MFREE(dhd->pub.osh, dhd->event_data.fmts,
13238 dhd->event_data.fmts_size);
13239 dhd->event_data.fmts = NULL;
13240 }
13241 if (dhd->event_data.raw_fmts) {
13242 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
13243 dhd->event_data.raw_fmts_size);
13244 dhd->event_data.raw_fmts = NULL;
13245 }
13246 if (dhd->event_data.raw_sstr) {
13247 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
13248 dhd->event_data.raw_sstr_size);
13249 dhd->event_data.raw_sstr = NULL;
13250 }
13251 if (dhd->event_data.rom_raw_sstr) {
13252 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
13253 dhd->event_data.rom_raw_sstr_size);
13254 dhd->event_data.rom_raw_sstr = NULL;
13255 }
13256 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
13257 }
13258#endif /* SHOW_LOGTRACE */
13259#ifdef PNO_SUPPORT
13260 if (dhdp->pno_state)
13261 dhd_pno_deinit(dhdp);
13262#endif // endif
13263#ifdef RTT_SUPPORT
13264 if (dhdp->rtt_state) {
13265 dhd_rtt_deinit(dhdp);
13266 }
13267#endif // endif
13268#if defined(CONFIG_PM_SLEEP)
13269 if (dhd_pm_notifier_registered) {
13270 unregister_pm_notifier(&dhd->pm_notifier);
13271 dhd_pm_notifier_registered = FALSE;
13272 }
13273#endif /* CONFIG_PM_SLEEP */
13274
13275#ifdef DEBUG_CPU_FREQ
13276 if (dhd->new_freq)
13277 free_percpu(dhd->new_freq);
13278 dhd->new_freq = NULL;
13279 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
13280#endif // endif
13281#ifdef CONFIG_HAS_WAKELOCK
13282 dhd->wakelock_wd_counter = 0;
13283 wake_lock_destroy(&dhd->wl_wdwake);
13284#endif /* CONFIG_HAS_WAKELOCK */
13285 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
13286 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
13287 DHD_OS_WAKE_LOCK_DESTROY(dhd);
13288 }
13289
13290#ifdef ARGOS_CPU_SCHEDULER
13291 if (dhd->pub.affinity_isdpc == TRUE) {
13292 free_cpumask_var(dhd->pub.default_cpu_mask);
13293 free_cpumask_var(dhd->pub.dpc_affinity_cpu_mask);
13294 dhd->pub.affinity_isdpc = FALSE;
13295 }
13296#endif /* ARGOS_CPU_SCHEDULER */
13297
13298#ifdef DHDTCPACK_SUPPRESS
13299 /* This will free all MEM allocated for TCPACK SUPPRESS */
13300 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
13301#endif /* DHDTCPACK_SUPPRESS */
13302
13303#ifdef PCIE_FULL_DONGLE
13304 dhd_flow_rings_deinit(dhdp);
13305 if (dhdp->prot)
13306 dhd_prot_detach(dhdp);
13307#endif // endif
13308
13309#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13310 dhd_free_tdls_peer_list(dhdp);
13311#endif // endif
13312
13313#ifdef DUMP_IOCTL_IOV_LIST
13314 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
13315#endif /* DUMP_IOCTL_IOV_LIST */
13316#ifdef DHD_DEBUG
13317 /* memory waste feature list initilization */
13318 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
13319#endif /* DHD_DEBUG */
13320#ifdef WL_MONITOR
13321 dhd_del_monitor_if(dhd);
13322#endif /* WL_MONITOR */
13323
13324#ifdef DHD_ERPOM
13325 if (dhdp->enable_erpom) {
13326 dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
13327 }
13328#endif /* DHD_ERPOM */
13329
92faf122
MB
13330 cancel_work_sync(&dhd->dhd_hang_process_work);
13331
1cac41cb
MB
13332 /* Prefer adding de-init code above this comment unless necessary.
13333 * The idea is to cancel work queue, sysfs and flags at the end.
13334 */
13335 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
13336 dhd->dhd_deferred_wq = NULL;
13337
13338 /* log dump related buffers should be freed after wq is purged */
13339#ifdef DHD_LOG_DUMP
13340 dhd_log_dump_deinit(&dhd->pub);
13341#endif /* DHD_LOG_DUMP */
13342#if defined(BCMPCIE)
13343 if (dhdp->extended_trap_data)
13344 {
13345 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
13346 dhdp->extended_trap_data = NULL;
13347 }
13348#endif /* BCMPCIE */
13349
13350#ifdef SHOW_LOGTRACE
13351 /* Wait till event_log_dispatcher_work finishes */
13352 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
13353 mutex_lock(&dhd->pub.dhd_trace_lock);
13354 remove_proc_entry("dhd_trace", NULL);
13355 mutex_unlock(&dhd->pub.dhd_trace_lock);
13356#endif /* SHOW_LOGTRACE */
13357
13358#ifdef DHD_DUMP_MNGR
13359 if (dhd->pub.dump_file_manage) {
13360 MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
13361 sizeof(dhd_dump_file_manage_t));
13362 }
13363#endif /* DHD_DUMP_MNGR */
13364 dhd_sysfs_exit(dhd);
13365 dhd->pub.fw_download_done = FALSE;
13366
13367#if defined(BT_OVER_SDIO)
13368 mutex_destroy(&dhd->bus_user_lock);
13369#endif /* BT_OVER_SDIO */
13370
13371} /* dhd_detach */
13372
13373void
13374dhd_free(dhd_pub_t *dhdp)
13375{
13376 dhd_info_t *dhd;
13377 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13378
13379 if (dhdp) {
13380 int i;
13381 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13382 if (dhdp->reorder_bufs[i]) {
13383 reorder_info_t *ptr;
13384 uint32 buf_size = sizeof(struct reorder_info);
13385
13386 ptr = dhdp->reorder_bufs[i];
13387
13388 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13389 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13390 i, ptr->max_idx, buf_size));
13391
13392 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13393 dhdp->reorder_bufs[i] = NULL;
13394 }
13395 }
13396
13397 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
13398
13399 dhd = (dhd_info_t *)dhdp->info;
13400 if (dhdp->soc_ram) {
13401#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13402 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13403#else
13404 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13405#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13406 dhdp->soc_ram = NULL;
13407 }
13408 if (dhd != NULL) {
13409
13410 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13411 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
13412 DHD_PREALLOC_DHD_INFO, 0, FALSE))
13413 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
13414 dhd = NULL;
13415 }
13416 }
13417}
13418
13419void
13420dhd_clear(dhd_pub_t *dhdp)
13421{
13422 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13423
13424 if (dhdp) {
13425 int i;
13426#ifdef DHDTCPACK_SUPPRESS
13427 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13428 dhd_tcpack_info_tbl_clean(dhdp);
13429#endif /* DHDTCPACK_SUPPRESS */
13430 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13431 if (dhdp->reorder_bufs[i]) {
13432 reorder_info_t *ptr;
13433 uint32 buf_size = sizeof(struct reorder_info);
13434
13435 ptr = dhdp->reorder_bufs[i];
13436
13437 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13438 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13439 i, ptr->max_idx, buf_size));
13440
13441 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13442 dhdp->reorder_bufs[i] = NULL;
13443 }
13444 }
13445
13446 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
13447
13448 if (dhdp->soc_ram) {
13449#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13450 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13451#else
13452 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13453#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13454 dhdp->soc_ram = NULL;
13455 }
13456 }
13457}
13458
13459static void
13460dhd_module_cleanup(void)
13461{
13462 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13463
13464 dhd_bus_unregister();
13465
13466 wl_android_exit();
13467
13468 dhd_wifi_platform_unregister_drv();
13469}
13470
13471static void __exit
13472dhd_module_exit(void)
13473{
13474 atomic_set(&exit_in_progress, 1);
13475 dhd_module_cleanup();
13476 unregister_reboot_notifier(&dhd_reboot_notifier);
13477 dhd_destroy_to_notifier_skt();
13478}
13479
13480static int __init
13481dhd_module_init(void)
13482{
13483 int err;
13484 int retry = POWERUP_MAX_RETRY;
13485
13486 DHD_ERROR(("%s in\n", __FUNCTION__));
13487
13488 DHD_PERIM_RADIO_INIT();
13489
13490 if (firmware_path[0] != '\0') {
13491 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
13492 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13493 }
13494
13495 if (nvram_path[0] != '\0') {
13496 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
13497 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13498 }
13499
13500 do {
13501 err = dhd_wifi_platform_register_drv();
13502 if (!err) {
13503 register_reboot_notifier(&dhd_reboot_notifier);
13504 break;
13505 } else {
13506 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13507 __FUNCTION__, retry));
13508 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
13509 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
13510 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
13511 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
13512 }
13513 } while (retry--);
13514
13515 dhd_create_to_notifier_skt();
13516
13517 if (err) {
13518 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
13519 } else {
13520 if (!dhd_download_fw_on_driverload) {
13521 dhd_driver_init_done = TRUE;
13522 }
13523 }
13524
13525 DHD_ERROR(("%s out\n", __FUNCTION__));
13526
13527 return err;
13528}
13529
13530static int
13531dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
13532{
13533 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
13534 if (code == SYS_RESTART) {
13535#ifdef BCMPCIE
13536 is_reboot = code;
13537#endif /* BCMPCIE */
13538 }
13539 return NOTIFY_DONE;
13540}
13541
13542#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13543#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13544#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13545 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
13546 defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
13547 defined(CONFIG_ARCH_SDM845)
13548deferred_module_init_sync(dhd_module_init);
13549#else
13550deferred_module_init(dhd_module_init);
13551#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13552 * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
13553 * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845
13554 */
13555#elif defined(USE_LATE_INITCALL_SYNC)
13556late_initcall_sync(dhd_module_init);
13557#else
13558late_initcall(dhd_module_init);
13559#endif /* USE_LATE_INITCALL_SYNC */
13560#else
13561module_init(dhd_module_init);
13562#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13563
13564module_exit(dhd_module_exit);
13565
13566/*
13567 * OS specific functions required to implement DHD driver in OS independent way
13568 */
13569int
13570dhd_os_proto_block(dhd_pub_t *pub)
13571{
13572 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13573
13574 if (dhd) {
13575 DHD_PERIM_UNLOCK(pub);
13576
13577 down(&dhd->proto_sem);
13578
13579 DHD_PERIM_LOCK(pub);
13580 return 1;
13581 }
13582
13583 return 0;
13584}
13585
13586int
13587dhd_os_proto_unblock(dhd_pub_t *pub)
13588{
13589 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13590
13591 if (dhd) {
13592 up(&dhd->proto_sem);
13593 return 1;
13594 }
13595
13596 return 0;
13597}
13598
13599void
13600dhd_os_dhdiovar_lock(dhd_pub_t *pub)
13601{
13602 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13603
13604 if (dhd) {
13605 mutex_lock(&dhd->dhd_iovar_mutex);
13606 }
13607}
13608
13609void
13610dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
13611{
13612 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13613
13614 if (dhd) {
13615 mutex_unlock(&dhd->dhd_iovar_mutex);
13616 }
13617}
13618
13619void
13620dhd_os_logdump_lock(dhd_pub_t *pub)
13621{
13622 dhd_info_t *dhd = NULL;
13623
13624 if (!pub)
13625 return;
13626
13627 dhd = (dhd_info_t *)(pub->info);
13628
13629 if (dhd) {
13630 mutex_lock(&dhd->logdump_lock);
13631 }
13632}
13633
13634void
13635dhd_os_logdump_unlock(dhd_pub_t *pub)
13636{
13637 dhd_info_t *dhd = NULL;
13638
13639 if (!pub)
13640 return;
13641
13642 dhd = (dhd_info_t *)(pub->info);
13643
13644 if (dhd) {
13645 mutex_unlock(&dhd->logdump_lock);
13646 }
13647}
13648
13649unsigned long
13650dhd_os_dbgring_lock(void *lock)
13651{
13652 if (!lock)
13653 return 0;
13654
13655 mutex_lock((struct mutex *)lock);
13656
13657 return 0;
13658}
13659
13660void
13661dhd_os_dbgring_unlock(void *lock, unsigned long flags)
13662{
13663 BCM_REFERENCE(flags);
13664
13665 if (!lock)
13666 return;
13667
13668 mutex_unlock((struct mutex *)lock);
13669}
13670
13671unsigned int
13672dhd_os_get_ioctl_resp_timeout(void)
13673{
13674 return ((unsigned int)dhd_ioctl_timeout_msec);
13675}
13676
13677void
13678dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
13679{
13680 dhd_ioctl_timeout_msec = (int)timeout_msec;
13681}
13682
13683int
13684dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
13685{
13686 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13687 int timeout;
13688
13689 /* Convert timeout in millsecond to jiffies */
13690#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13691 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13692#else
13693 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
13694#endif // endif
13695
13696 DHD_PERIM_UNLOCK(pub);
13697
13698 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
13699
13700 DHD_PERIM_LOCK(pub);
13701
13702 return timeout;
13703}
13704
13705int
13706dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
13707{
13708 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13709
13710 wake_up(&dhd->ioctl_resp_wait);
13711 return 0;
13712}
13713
13714int
13715dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
13716{
13717 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13718 int timeout;
13719
13720 /* Convert timeout in millsecond to jiffies */
13721#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13722 timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
13723#else
13724 timeout = D3_ACK_RESP_TIMEOUT * HZ / 1000;
13725#endif // endif
13726
13727 DHD_PERIM_UNLOCK(pub);
13728
13729 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
13730
13731 DHD_PERIM_LOCK(pub);
13732
13733 return timeout;
13734}
13735
13736int
13737dhd_os_d3ack_wake(dhd_pub_t *pub)
13738{
13739 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13740
13741 wake_up(&dhd->d3ack_wait);
13742 return 0;
13743}
13744
13745int
13746dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
13747{
13748 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13749 int timeout;
13750
13751 /* Wait for bus usage contexts to gracefully exit within some timeout value
13752 * Set time out to little higher than dhd_ioctl_timeout_msec,
13753 * so that IOCTL timeout should not get affected.
13754 */
13755 /* Convert timeout in millsecond to jiffies */
13756#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13757 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13758#else
13759 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13760#endif // endif
13761
13762 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
13763
13764 return timeout;
13765}
13766
13767/*
13768 * Wait until the condition *var == condition is met.
13769 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13770 * Returns 1 if the @condition evaluated to true
13771 */
13772int
13773dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
13774{
13775 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13776 int timeout;
13777
13778 /* Convert timeout in millsecond to jiffies */
13779#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13780 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13781#else
13782 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13783#endif // endif
13784
13785 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
13786
13787 return timeout;
13788}
13789
13790/*
13791 * Wait until the '(*var & bitmask) == condition' is met.
13792 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13793 * Returns 1 if the @condition evaluated to true
13794 */
13795int
13796dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
13797 uint bitmask, uint condition)
13798{
13799 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13800 int timeout;
13801
13802 /* Convert timeout in millsecond to jiffies */
13803#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13804 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13805#else
13806 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13807#endif // endif
13808
13809 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
13810 ((*var & bitmask) == condition), timeout);
13811
13812 return timeout;
13813}
13814
13815int
13816dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
13817{
13818 int ret = 0;
13819 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13820
13821 DHD_PERIM_UNLOCK(pub);
13822 ret = wait_event_interruptible(dhd->dmaxfer_wait, (*condition));
13823 DHD_PERIM_LOCK(pub);
13824
13825 return ret;
13826
13827}
13828
13829int
13830dhd_os_dmaxfer_wake(dhd_pub_t *pub)
13831{
13832 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13833
13834 wake_up(&dhd->dmaxfer_wait);
13835 return 0;
13836}
13837
13838void
13839dhd_os_tx_completion_wake(dhd_pub_t *dhd)
13840{
13841 /* Call wmb() to make sure before waking up the other event value gets updated */
13842 OSL_SMP_WMB();
13843 wake_up(&dhd->tx_completion_wait);
13844}
13845
13846#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13847/* Fix compilation error for FC11 */
13848INLINE
13849#endif // endif
13850int
13851dhd_os_busbusy_wake(dhd_pub_t *pub)
13852{
13853 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13854 /* Call wmb() to make sure before waking up the other event value gets updated */
13855 OSL_SMP_WMB();
13856 wake_up(&dhd->dhd_bus_busy_state_wait);
13857 return 0;
13858}
13859
13860void
13861dhd_os_wd_timer_extend(void *bus, bool extend)
13862{
13863 dhd_pub_t *pub = bus;
13864 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13865
13866 if (extend)
13867 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
13868 else
13869 dhd_os_wd_timer(bus, dhd->default_wd_interval);
13870}
13871
13872void
13873dhd_os_wd_timer(void *bus, uint wdtick)
13874{
13875 dhd_pub_t *pub = bus;
13876 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13877 unsigned long flags;
13878
13879 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13880
13881 if (!dhd) {
13882 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13883 return;
13884 }
13885
13886 DHD_GENERAL_LOCK(pub, flags);
13887
13888 /* don't start the wd until fw is loaded */
13889 if (pub->busstate == DHD_BUS_DOWN) {
13890 DHD_GENERAL_UNLOCK(pub, flags);
13891#ifdef BCMSDIO
13892 if (!wdtick) {
13893 DHD_OS_WD_WAKE_UNLOCK(pub);
13894 }
13895#endif /* BCMSDIO */
13896 return;
13897 }
13898
13899 /* Totally stop the timer */
13900 if (!wdtick && dhd->wd_timer_valid == TRUE) {
13901 dhd->wd_timer_valid = FALSE;
13902 DHD_GENERAL_UNLOCK(pub, flags);
13903 del_timer_sync(&dhd->timer);
13904#ifdef BCMSDIO
13905 DHD_OS_WD_WAKE_UNLOCK(pub);
13906#endif /* BCMSDIO */
13907 return;
13908 }
13909
13910 if (wdtick) {
13911#ifdef BCMSDIO
13912 DHD_OS_WD_WAKE_LOCK(pub);
13913 dhd_watchdog_ms = (uint)wdtick;
13914#endif /* BCMSDIO */
13915 /* Re arm the timer, at last watchdog period */
13916 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13917 dhd->wd_timer_valid = TRUE;
13918 }
13919 DHD_GENERAL_UNLOCK(pub, flags);
13920}
13921
13922#ifdef DHD_PCIE_RUNTIMEPM
13923void
13924dhd_os_runtimepm_timer(void *bus, uint tick)
13925{
13926 dhd_pub_t *pub = bus;
13927 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13928 unsigned long flags;
13929
13930 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13931
13932 if (!dhd) {
13933 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13934 return;
13935 }
13936
13937 DHD_GENERAL_LOCK(pub, flags);
13938
13939 /* don't start the RPM until fw is loaded */
13940 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
13941 DHD_GENERAL_UNLOCK(pub, flags);
13942 return;
13943 }
13944
13945 /* If tick is non-zero, the request is to start the timer */
13946 if (tick) {
13947 /* Start the timer only if its not already running */
13948 if (dhd->rpm_timer_valid == FALSE) {
13949 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
13950 dhd->rpm_timer_valid = TRUE;
13951 }
13952 } else {
13953 /* tick is zero, we have to stop the timer */
13954 /* Stop the timer only if its running, otherwise we don't have to do anything */
13955 if (dhd->rpm_timer_valid == TRUE) {
13956 dhd->rpm_timer_valid = FALSE;
13957 DHD_GENERAL_UNLOCK(pub, flags);
13958 del_timer_sync(&dhd->rpm_timer);
13959 /* we have already released the lock, so just go to exit */
13960 goto exit;
13961 }
13962 }
13963
13964 DHD_GENERAL_UNLOCK(pub, flags);
13965exit:
13966 return;
13967
13968}
13969
13970#endif /* DHD_PCIE_RUNTIMEPM */
13971
13972void *
13973dhd_os_open_image1(dhd_pub_t *pub, char *filename)
13974{
13975 struct file *fp;
13976 int size;
13977
13978 fp = filp_open(filename, O_RDONLY, 0);
13979 /*
13980 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13981 * Alternative:
13982 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13983 * ???
13984 */
13985 if (IS_ERR(fp)) {
13986 fp = NULL;
13987 goto err;
13988 }
13989
13990 if (!S_ISREG(file_inode(fp)->i_mode)) {
13991 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
13992 fp = NULL;
13993 goto err;
13994 }
13995
13996 size = i_size_read(file_inode(fp));
13997 if (size <= 0) {
13998 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
13999 fp = NULL;
14000 goto err;
14001 }
14002
14003 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
14004
14005err:
14006 return fp;
14007}
14008
14009int
14010dhd_os_get_image_block(char *buf, int len, void *image)
14011{
14012 struct file *fp = (struct file *)image;
14013 int rdlen;
14014 int size;
14015
14016 if (!image) {
14017 return 0;
14018 }
14019
14020 size = i_size_read(file_inode(fp));
14021 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
14022
14023 if (len >= size && size != rdlen) {
14024 return -EIO;
14025 }
14026
14027 if (rdlen > 0) {
14028 fp->f_pos += rdlen;
14029 }
14030
14031 return rdlen;
14032}
14033
14034#if defined(BT_OVER_SDIO)
14035int
14036dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
14037{
14038 struct file *fp = (struct file *)image;
14039 int rd_len;
14040 uint str_len = 0;
14041 char *str_end = NULL;
14042
14043 if (!image)
14044 return 0;
14045
14046 rd_len = kernel_read(fp, fp->f_pos, str, len);
14047 str_end = strnchr(str, len, '\n');
14048 if (str_end == NULL) {
14049 goto err;
14050 }
14051 str_len = (uint)(str_end - str);
14052
14053 /* Advance file pointer past the string length */
14054 fp->f_pos += str_len + 1;
14055 bzero(str_end, rd_len - str_len);
14056
14057err:
14058 return str_len;
14059}
14060#endif /* defined (BT_OVER_SDIO) */
14061
14062int
14063dhd_os_get_image_size(void *image)
14064{
14065 struct file *fp = (struct file *)image;
14066 int size;
14067 if (!image) {
14068 return 0;
14069 }
14070
14071 size = i_size_read(file_inode(fp));
14072
14073 return size;
14074}
14075
14076void
14077dhd_os_close_image1(dhd_pub_t *pub, void *image)
14078{
14079 if (image) {
14080 filp_close((struct file *)image, NULL);
14081 }
14082}
14083
14084void
14085dhd_os_sdlock(dhd_pub_t *pub)
14086{
14087 dhd_info_t *dhd;
14088
14089 dhd = (dhd_info_t *)(pub->info);
14090
14091 if (dhd_dpc_prio >= 0)
14092 down(&dhd->sdsem);
14093 else
14094 spin_lock_bh(&dhd->sdlock);
14095}
14096
14097void
14098dhd_os_sdunlock(dhd_pub_t *pub)
14099{
14100 dhd_info_t *dhd;
14101
14102 dhd = (dhd_info_t *)(pub->info);
14103
14104 if (dhd_dpc_prio >= 0)
14105 up(&dhd->sdsem);
14106 else
14107 spin_unlock_bh(&dhd->sdlock);
14108}
14109
14110void
14111dhd_os_sdlock_txq(dhd_pub_t *pub)
14112{
14113 dhd_info_t *dhd;
14114
14115 dhd = (dhd_info_t *)(pub->info);
14116 spin_lock_bh(&dhd->txqlock);
14117}
14118
14119void
14120dhd_os_sdunlock_txq(dhd_pub_t *pub)
14121{
14122 dhd_info_t *dhd;
14123
14124 dhd = (dhd_info_t *)(pub->info);
14125 spin_unlock_bh(&dhd->txqlock);
14126}
14127
14128void
14129dhd_os_sdlock_rxq(dhd_pub_t *pub)
14130{
14131}
14132
14133void
14134dhd_os_sdunlock_rxq(dhd_pub_t *pub)
14135{
14136}
14137
14138static void
14139dhd_os_rxflock(dhd_pub_t *pub)
14140{
14141 dhd_info_t *dhd;
14142
14143 dhd = (dhd_info_t *)(pub->info);
14144 spin_lock_bh(&dhd->rxf_lock);
14145
14146}
14147
14148static void
14149dhd_os_rxfunlock(dhd_pub_t *pub)
14150{
14151 dhd_info_t *dhd;
14152
14153 dhd = (dhd_info_t *)(pub->info);
14154 spin_unlock_bh(&dhd->rxf_lock);
14155}
14156
14157#ifdef DHDTCPACK_SUPPRESS
14158unsigned long
14159dhd_os_tcpacklock(dhd_pub_t *pub)
14160{
14161 dhd_info_t *dhd;
14162 unsigned long flags = 0;
14163
14164 dhd = (dhd_info_t *)(pub->info);
14165
14166 if (dhd) {
14167#ifdef BCMSDIO
14168 spin_lock_bh(&dhd->tcpack_lock);
14169#else
14170 spin_lock_irqsave(&dhd->tcpack_lock, flags);
14171#endif /* BCMSDIO */
14172 }
14173
14174 return flags;
14175}
14176
14177void
14178dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
14179{
14180 dhd_info_t *dhd;
14181
14182#ifdef BCMSDIO
14183 BCM_REFERENCE(flags);
14184#endif /* BCMSDIO */
14185
14186 dhd = (dhd_info_t *)(pub->info);
14187
14188 if (dhd) {
14189#ifdef BCMSDIO
14190 spin_unlock_bh(&dhd->tcpack_lock);
14191#else
14192 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
14193#endif /* BCMSDIO */
14194 }
14195}
14196#endif /* DHDTCPACK_SUPPRESS */
14197
14198uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
14199{
14200 uint8* buf;
14201 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
14202
14203 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
14204 if (buf == NULL && kmalloc_if_fail)
14205 buf = kmalloc(size, flags);
14206
14207 return buf;
14208}
14209
14210void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
14211{
14212}
14213
14214#if defined(WL_WIRELESS_EXT)
14215struct iw_statistics *
14216dhd_get_wireless_stats(struct net_device *dev)
14217{
14218 int res = 0;
14219 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14220
14221 if (!dhd->pub.up) {
14222 return NULL;
14223 }
14224
14225 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
14226
14227 if (res == 0)
14228 return &dhd->iw.wstats;
14229 else
14230 return NULL;
14231}
14232#endif /* defined(WL_WIRELESS_EXT) */
14233
14234static int
14235dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
14236 wl_event_msg_t *event, void **data)
14237{
14238 int bcmerror = 0;
14239#ifdef WL_CFG80211
14240 unsigned long flags = 0;
14241#ifdef DYNAMIC_MUMIMO_CONTROL
14242 static uint32 reassoc_err = 0;
14243#endif /* DYNAMIC_MUMIMO_CONTROL */
14244#endif /* WL_CFG80211 */
14245 ASSERT(dhd != NULL);
14246
14247#ifdef SHOW_LOGTRACE
14248 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
14249 &dhd->event_data);
14250#else
14251 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
14252 NULL);
14253#endif /* SHOW_LOGTRACE */
14254 if (unlikely(bcmerror != BCME_OK)) {
14255 return bcmerror;
14256 }
14257
14258 if (ntoh32(event->event_type) == WLC_E_IF) {
14259 /* WLC_E_IF event types are consumed by wl_process_host_event.
14260 * For ifadd/del ops, the netdev ptr may not be valid at this
14261 * point. so return before invoking cfg80211/wext handlers.
14262 */
14263 return BCME_OK;
14264 }
14265
14266#if defined(WL_WIRELESS_EXT)
14267 if (event->bsscfgidx == 0) {
14268 /*
14269 * Wireless ext is on primary interface only
14270 */
14271 ASSERT(dhd->iflist[ifidx] != NULL);
14272 ASSERT(dhd->iflist[ifidx]->net != NULL);
14273
14274 if (dhd->iflist[ifidx]->net) {
14275 wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
14276 }
14277 }
14278#endif /* defined(WL_WIRELESS_EXT) */
14279
14280#ifdef WL_CFG80211
14281 if (dhd->iflist[ifidx]->net) {
14282 spin_lock_irqsave(&dhd->pub.up_lock, flags);
14283 if (dhd->pub.up) {
14284 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
14285 }
14286 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
14287 }
14288#ifdef DYNAMIC_MUMIMO_CONTROL
14289#define REASSOC_ERROR_RETRY_LIMIT 1
14290 if (dhd->pub.reassoc_mumimo_sw) {
14291 uint event_type = ntoh32(event->event_type);
14292 uint status = ntoh32(event->status);
14293
14294 if (event_type == WLC_E_REASSOC) {
14295 if (status == WLC_E_STATUS_SUCCESS) {
14296 reassoc_err = 0;
14297 } else {
14298 reassoc_err++;
14299 }
14300
14301 if (reassoc_err > REASSOC_ERROR_RETRY_LIMIT) {
14302 dhd->pub.reassoc_mumimo_sw = FALSE;
14303 dhd->pub.murx_block_eapol = FALSE;
14304 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
14305 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, OFF);
14306 }
14307 }
14308 }
14309#undef REASSOC_ERROR_RETRY_LIMIT
14310#endif /* DYNAMIC_MUMIMO_CONTROL */
14311#endif /* defined(WL_CFG80211) */
14312
14313 return (bcmerror);
14314}
14315
14316/* send up locally generated event */
14317void
14318dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
14319{
14320 switch (ntoh32(event->event_type)) {
14321 /* Handle error case or further events here */
14322 default:
14323 break;
14324 }
14325}
14326
14327#ifdef LOG_INTO_TCPDUMP
14328void
14329dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
14330{
14331 struct sk_buff *p, *skb;
14332 uint32 pktlen;
14333 int len;
14334 dhd_if_t *ifp;
14335 dhd_info_t *dhd;
14336 uchar *skb_data;
14337 int ifidx = 0;
14338 struct ether_header eth;
14339
14340 pktlen = sizeof(eth) + data_len;
14341 dhd = dhdp->info;
14342
14343 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
14344 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
14345
14346 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
14347 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
14348 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
14349 eth.ether_type = hton16(ETHER_TYPE_BRCM);
14350
14351 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
14352 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
14353 skb = PKTTONATIVE(dhdp->osh, p);
14354 skb_data = skb->data;
14355 len = skb->len;
14356
14357 ifidx = dhd_ifname2idx(dhd, "wlan0");
14358 ifp = dhd->iflist[ifidx];
14359 if (ifp == NULL)
14360 ifp = dhd->iflist[0];
14361
14362 ASSERT(ifp);
14363 skb->dev = ifp->net;
14364 skb->protocol = eth_type_trans(skb, skb->dev);
14365 skb->data = skb_data;
14366 skb->len = len;
14367
14368 /* Strip header, count, deliver upward */
14369 skb_pull(skb, ETH_HLEN);
14370
14371 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
14372 __FUNCTION__, __LINE__);
14373 /* Send the packet */
14374 if (in_interrupt()) {
14375 netif_rx(skb);
14376 } else {
14377 netif_rx_ni(skb);
14378 }
14379 } else {
14380 /* Could not allocate a sk_buf */
14381 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
14382 }
14383}
14384#endif /* LOG_INTO_TCPDUMP */
14385
14386void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
14387{
14388#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14389 struct dhd_info *dhdinfo = dhd->info;
14390
14391#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14392 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
14393#else
14394 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
14395#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14396
14397 dhd_os_sdunlock(dhd);
14398 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
14399 dhd_os_sdlock(dhd);
14400#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14401 return;
14402} /* dhd_init_static_strs_array */
14403
14404void dhd_wait_event_wakeup(dhd_pub_t *dhd)
14405{
14406#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14407 struct dhd_info *dhdinfo = dhd->info;
14408 if (waitqueue_active(&dhdinfo->ctrl_wait))
14409 wake_up(&dhdinfo->ctrl_wait);
14410#endif // endif
14411 return;
14412}
14413
14414#if defined(BCMSDIO) || defined(BCMPCIE)
14415int
14416dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
14417{
14418 int ret;
14419
14420 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14421
14422#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14423 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
14424 return BCME_ERROR;
14425#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14426
14427 if (flag == TRUE) {
14428 /* Issue wl down command before resetting the chip */
14429 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
14430 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
14431 }
14432#ifdef PROP_TXSTATUS
14433 if (dhd->pub.wlfc_enabled) {
14434 dhd_wlfc_deinit(&dhd->pub);
14435 }
14436#endif /* PROP_TXSTATUS */
14437#ifdef PNO_SUPPORT
14438 if (dhd->pub.pno_state) {
14439 dhd_pno_deinit(&dhd->pub);
14440 }
14441#endif // endif
14442#ifdef RTT_SUPPORT
14443 if (dhd->pub.rtt_state) {
14444 dhd_rtt_deinit(&dhd->pub);
14445 }
14446#endif /* RTT_SUPPORT */
14447
14448#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14449 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
14450#endif /* DBG_PKT_MON */
14451 }
14452
14453#ifdef BCMSDIO
14454 if (!flag) {
14455 dhd_update_fw_nv_path(dhd);
14456 /* update firmware and nvram path to sdio bus */
14457 dhd_bus_update_fw_nv_path(dhd->pub.bus,
14458 dhd->fw_path, dhd->nv_path);
14459 }
14460#endif /* BCMSDIO */
14461
14462 ret = dhd_bus_devreset(&dhd->pub, flag);
14463
14464#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14465 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
14466 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
14467#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14468
14469 if (flag) {
14470 /* Clear some flags for recovery logic */
14471 dhd->pub.dongle_trap_occured = 0;
14472 dhd->pub.iovar_timeout_occured = 0;
14473#ifdef PCIE_FULL_DONGLE
14474 dhd->pub.d3ack_timeout_occured = 0;
14475#endif /* PCIE_FULL_DONGLE */
14476#ifdef DHD_MAP_LOGGING
14477 dhd->pub.smmu_fault_occurred = 0;
14478#endif /* DHD_MAP_LOGGING */
14479 }
14480
14481 if (ret) {
14482 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
14483 }
14484
14485 return ret;
14486}
14487
14488#ifdef BCMSDIO
14489int
14490dhd_net_bus_suspend(struct net_device *dev)
14491{
14492 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14493 return dhd_bus_suspend(&dhd->pub);
14494}
14495
14496int
14497dhd_net_bus_resume(struct net_device *dev, uint8 stage)
14498{
14499 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14500 return dhd_bus_resume(&dhd->pub, stage);
14501}
14502
14503#endif /* BCMSDIO */
14504#endif /* BCMSDIO || BCMPCIE */
14505
14506int net_os_set_suspend_disable(struct net_device *dev, int val)
14507{
14508 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14509 int ret = 0;
14510
14511 if (dhd) {
14512 ret = dhd->pub.suspend_disable_flag;
14513 dhd->pub.suspend_disable_flag = val;
14514 }
14515 return ret;
14516}
14517
14518int net_os_set_suspend(struct net_device *dev, int val, int force)
14519{
14520 int ret = 0;
14521 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14522
14523 if (dhd) {
14524#ifdef CONFIG_MACH_UNIVERSAL7420
14525#if defined(ARGOS_RPS_CPU_CTL) && defined(DHD_LB_RXP)
14526 if (!val) {
14527 /* Force to set rps_cpus to specific CPU core */
14528 dhd_rps_cpus_enable(dev, TRUE);
14529 }
14530#endif /* ARGOS_RPS_CPU_CTL && DHD_LB_RXP */
14531#endif /* CONFIG_MACH_UNIVERSAL7420 */
14532#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14533 ret = dhd_set_suspend(val, &dhd->pub);
14534#else
14535 ret = dhd_suspend_resume_helper(dhd, val, force);
14536#endif // endif
14537#ifdef WL_CFG80211
14538 wl_cfg80211_update_power_mode(dev);
14539#endif // endif
14540 }
14541 return ret;
14542}
14543
14544int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
14545{
14546 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14547
14548 if (dhd) {
14549 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
14550 __FUNCTION__, val));
14551 dhd->pub.suspend_bcn_li_dtim = val;
14552 }
14553
14554 return 0;
14555}
14556
14557int net_os_set_max_dtim_enable(struct net_device *dev, int val)
14558{
14559 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14560
14561 if (dhd) {
14562 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14563 __FUNCTION__, (val ? "Enable" : "Disable")));
14564 if (val) {
14565 dhd->pub.max_dtim_enable = TRUE;
14566 } else {
14567 dhd->pub.max_dtim_enable = FALSE;
14568 }
14569 } else {
14570 return -1;
14571 }
14572
14573 return 0;
14574}
14575
14576#ifdef PKT_FILTER_SUPPORT
14577int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
14578{
14579 int ret = 0;
14580
14581#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14582 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14583
14584 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
14585 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
14586 return 0;
14587 }
14588
14589#ifdef BLOCK_IPV6_PACKET
14590 /* customer want to use NO IPV6 packets only */
14591 if (num == DHD_MULTICAST6_FILTER_NUM) {
14592 return 0;
14593 }
14594#endif /* BLOCK_IPV6_PACKET */
14595
14596 if (num >= dhd->pub.pktfilter_count) {
14597 return -EINVAL;
14598 }
14599
14600 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
14601#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14602
14603 return ret;
14604}
14605
14606int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
14607
14608{
14609 int ret = 0;
14610
14611 /* Packet filtering is set only if we still in early-suspend and
14612 * we need either to turn it ON or turn it OFF
14613 * We can always turn it OFF in case of early-suspend, but we turn it
14614 * back ON only if suspend_disable_flag was not set
14615 */
14616 if (dhdp && dhdp->up) {
14617 if (dhdp->in_suspend) {
14618 if (!val || (val && !dhdp->suspend_disable_flag))
14619 dhd_enable_packet_filter(val, dhdp);
14620 }
14621 }
14622 return ret;
14623}
14624
14625/* function to enable/disable packet for Network device */
14626int net_os_enable_packet_filter(struct net_device *dev, int val)
14627{
14628 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14629
14630 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
14631 return dhd_os_enable_packet_filter(&dhd->pub, val);
14632}
14633#endif /* PKT_FILTER_SUPPORT */
14634
14635int
14636dhd_dev_init_ioctl(struct net_device *dev)
14637{
14638 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14639 int ret;
14640
14641 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
14642 goto done;
14643
14644done:
14645 return ret;
14646}
14647
14648int
14649dhd_dev_get_feature_set(struct net_device *dev)
14650{
14651 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
14652 dhd_pub_t *dhd = (&ptr->pub);
14653 int feature_set = 0;
14654
14655 if (FW_SUPPORTED(dhd, sta))
14656 feature_set |= WIFI_FEATURE_INFRA;
14657 if (FW_SUPPORTED(dhd, dualband))
14658 feature_set |= WIFI_FEATURE_INFRA_5G;
14659 if (FW_SUPPORTED(dhd, p2p))
14660 feature_set |= WIFI_FEATURE_P2P;
14661 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
14662 feature_set |= WIFI_FEATURE_SOFT_AP;
14663 if (FW_SUPPORTED(dhd, tdls))
14664 feature_set |= WIFI_FEATURE_TDLS;
14665 if (FW_SUPPORTED(dhd, vsdb))
14666 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
14667 if (FW_SUPPORTED(dhd, nan)) {
14668 feature_set |= WIFI_FEATURE_NAN;
14669 /* NAN is essentail for d2d rtt */
14670 if (FW_SUPPORTED(dhd, rttd2d))
14671 feature_set |= WIFI_FEATURE_D2D_RTT;
14672 }
14673#ifdef RTT_SUPPORT
14674 feature_set |= WIFI_FEATURE_D2D_RTT;
14675 feature_set |= WIFI_FEATURE_D2AP_RTT;
14676#endif /* RTT_SUPPORT */
14677#ifdef LINKSTAT_SUPPORT
14678 feature_set |= WIFI_FEATURE_LINKSTAT;
14679#endif /* LINKSTAT_SUPPORT */
14680
14681#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
14682 if (dhd_is_pno_supported(dhd)) {
14683 feature_set |= WIFI_FEATURE_PNO;
14684#ifdef GSCAN_SUPPORT
14685 feature_set |= WIFI_FEATURE_GSCAN;
14686 feature_set |= WIFI_FEATURE_HAL_EPNO;
14687#endif /* GSCAN_SUPPORT */
14688 }
14689#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
14690#ifdef RSSI_MONITOR_SUPPORT
14691 if (FW_SUPPORTED(dhd, rssi_mon)) {
14692 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
14693 }
14694#endif /* RSSI_MONITOR_SUPPORT */
14695#ifdef WL11U
14696 feature_set |= WIFI_FEATURE_HOTSPOT;
14697#endif /* WL11U */
14698#ifdef NDO_CONFIG_SUPPORT
14699 feature_set |= WIFI_FEATURE_CONFIG_NDO;
14700#endif /* NDO_CONFIG_SUPPORT */
14701#ifdef KEEP_ALIVE
14702 feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
14703#endif /* KEEP_ALIVE */
14704#ifdef SUPPORT_RANDOM_MAC_SCAN
14705 feature_set |= WIFI_FEATURE_SCAN_RAND;
14706#endif /* SUPPORT_RANDOM_MAC_SCAN */
14707#ifdef FILTER_IE
14708 if (FW_SUPPORTED(dhd, fie)) {
14709 feature_set |= WIFI_FEATURE_FILTER_IE;
14710 }
14711#endif /* FILTER_IE */
14712#ifdef ROAMEXP_SUPPORT
14713 /* Check if the Android O roam feature is supported by FW */
14714 if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
14715 feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
14716 }
14717#endif /* ROAMEXP_SUPPORT */
14718 return feature_set;
14719}
14720
14721int
14722dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
14723{
14724 int feature_set_full;
14725 int ret = 0;
14726
14727 feature_set_full = dhd_dev_get_feature_set(dev);
14728
14729 /* Common feature set for all interface */
14730 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
14731 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
14732 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
14733 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
14734 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
14735 (feature_set_full & WIFI_FEATURE_EPR);
14736
14737 /* Specific feature group for each interface */
14738 switch (num) {
14739 case 0:
14740 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
14741 /* Not supported yet */
14742 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14743 (feature_set_full & WIFI_FEATURE_TDLS) |
14744 (feature_set_full & WIFI_FEATURE_PNO) |
14745 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
14746 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
14747 (feature_set_full & WIFI_FEATURE_GSCAN) |
14748 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
14749 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
14750 break;
14751
14752 case 1:
14753 ret |= (feature_set_full & WIFI_FEATURE_P2P);
14754 /* Not yet verified NAN with P2P */
14755 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14756 break;
14757
14758 case 2:
14759 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
14760 (feature_set_full & WIFI_FEATURE_TDLS) |
14761 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
14762 break;
14763
14764 default:
14765 ret = WIFI_FEATURE_INVALID;
14766 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
14767 break;
14768 }
14769
14770 return ret;
14771}
14772#ifdef CUSTOM_FORCE_NODFS_FLAG
14773int
14774dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
14775{
14776 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14777
14778 if (nodfs)
14779 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
14780 else
14781 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
14782 dhd->pub.force_country_change = TRUE;
14783 return 0;
14784}
14785#endif /* CUSTOM_FORCE_NODFS_FLAG */
14786#ifdef NDO_CONFIG_SUPPORT
14787int
14788dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
14789{
14790 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14791 dhd_pub_t *dhdp = &dhd->pub;
14792 int ret = 0;
14793
14794 if (enable) {
14795 /* enable ND offload feature (will be enabled in FW on suspend) */
14796 dhdp->ndo_enable = TRUE;
14797
14798 /* Update changes of anycast address & DAD failed address */
14799 ret = dhd_dev_ndo_update_inet6addr(dev);
14800 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
14801 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
14802 return ret;
14803 }
14804 } else {
14805 /* disable ND offload feature */
14806 dhdp->ndo_enable = FALSE;
14807
14808 /* disable ND offload in FW */
14809 ret = dhd_ndo_enable(dhdp, FALSE);
14810 if (ret < 0) {
14811 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
14812 }
14813 }
14814 return ret;
14815}
14816
14817/* #pragma used as a WAR to fix build failure,
14818* ignore dropping of 'const' qualifier in 'list_entry' macro
14819* this pragma disables the warning only for the following function
14820*/
14821#pragma GCC diagnostic push
14822#pragma GCC diagnostic ignored "-Wcast-qual"
14823
14824static int
14825dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
14826{
14827 struct inet6_ifaddr *ifa;
14828 struct ifacaddr6 *acaddr = NULL;
14829 int addr_count = 0;
14830
14831 /* lock */
14832 read_lock_bh(&inet6->lock);
14833
14834 /* Count valid unicast address */
14835 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14836 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
14837 addr_count++;
14838 }
14839 }
14840
14841 /* Count anycast address */
14842 acaddr = inet6->ac_list;
14843 while (acaddr) {
14844 addr_count++;
14845 acaddr = acaddr->aca_next;
14846 }
14847
14848 /* unlock */
14849 read_unlock_bh(&inet6->lock);
14850
14851 return addr_count;
14852}
14853
14854int
14855dhd_dev_ndo_update_inet6addr(struct net_device *dev)
14856{
14857 dhd_info_t *dhd;
14858 dhd_pub_t *dhdp;
14859 struct inet6_dev *inet6;
14860 struct inet6_ifaddr *ifa;
14861 struct ifacaddr6 *acaddr = NULL;
14862 struct in6_addr *ipv6_addr = NULL;
14863 int cnt, i;
14864 int ret = BCME_OK;
14865
14866 /*
14867 * this function evaulates host ip address in struct inet6_dev
14868 * unicast addr in inet6_dev->addr_list
14869 * anycast addr in inet6_dev->ac_list
14870 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14871 * access on null(freed) pointer.
14872 */
14873
14874 if (dev) {
14875 inet6 = dev->ip6_ptr;
14876 if (!inet6) {
14877 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
14878 return BCME_ERROR;
14879 }
14880
14881 dhd = DHD_DEV_INFO(dev);
14882 if (!dhd) {
14883 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
14884 return BCME_ERROR;
14885 }
14886 dhdp = &dhd->pub;
14887
14888 if (dhd_net2idx(dhd, dev) != 0) {
14889 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
14890 return BCME_ERROR;
14891 }
14892 } else {
14893 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
14894 return BCME_ERROR;
14895 }
14896
14897 /* Check host IP overflow */
14898 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
14899 if (cnt > dhdp->ndo_max_host_ip) {
14900 if (!dhdp->ndo_host_ip_overflow) {
14901 dhdp->ndo_host_ip_overflow = TRUE;
14902 /* Disable ND offload in FW */
14903 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
14904 ret = dhd_ndo_enable(dhdp, FALSE);
14905 }
14906
14907 return ret;
14908 }
14909
14910 /*
14911 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14912 * driver need to lock inet6_dev while accessing structure. but, driver
14913 * cannot use ioctl while inet6_dev locked since it requires scheduling
14914 * hence, copy addresses to the buffer and do ioctl after unlock.
14915 */
14916 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
14917 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14918 if (!ipv6_addr) {
14919 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
14920 return BCME_NOMEM;
14921 }
14922
14923 /* Find DAD failed unicast address to be removed */
14924 cnt = 0;
14925 read_lock_bh(&inet6->lock);
14926 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14927 /* DAD failed unicast address */
14928 if ((ifa->flags & IFA_F_DADFAILED) &&
14929 (cnt < dhdp->ndo_max_host_ip)) {
14930 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
14931 cnt++;
14932 }
14933 }
14934 read_unlock_bh(&inet6->lock);
14935
14936 /* Remove DAD failed unicast address */
14937 for (i = 0; i < cnt; i++) {
14938 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
14939 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
14940 if (ret < 0) {
14941 goto done;
14942 }
14943 }
14944
14945 /* Remove all anycast address */
14946 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14947 if (ret < 0) {
14948 goto done;
14949 }
14950
14951 /*
14952 * if ND offload was disabled due to host ip overflow,
14953 * attempt to add valid unicast address.
14954 */
14955 if (dhdp->ndo_host_ip_overflow) {
14956 /* Find valid unicast address */
14957 cnt = 0;
14958 read_lock_bh(&inet6->lock);
14959 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14960 /* valid unicast address */
14961 if (!(ifa->flags & IFA_F_DADFAILED) &&
14962 (cnt < dhdp->ndo_max_host_ip)) {
14963 memcpy(&ipv6_addr[cnt], &ifa->addr,
14964 sizeof(struct in6_addr));
14965 cnt++;
14966 }
14967 }
14968 read_unlock_bh(&inet6->lock);
14969
14970 /* Add valid unicast address */
14971 for (i = 0; i < cnt; i++) {
14972 ret = dhd_ndo_add_ip_with_type(dhdp,
14973 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
14974 if (ret < 0) {
14975 goto done;
14976 }
14977 }
14978 }
14979
14980 /* Find anycast address */
14981 cnt = 0;
14982 read_lock_bh(&inet6->lock);
14983 acaddr = inet6->ac_list;
14984 while (acaddr) {
14985 if (cnt < dhdp->ndo_max_host_ip) {
14986 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
14987 cnt++;
14988 }
14989 acaddr = acaddr->aca_next;
14990 }
14991 read_unlock_bh(&inet6->lock);
14992
14993 /* Add anycast address */
14994 for (i = 0; i < cnt; i++) {
14995 ret = dhd_ndo_add_ip_with_type(dhdp,
14996 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14997 if (ret < 0) {
14998 goto done;
14999 }
15000 }
15001
15002 /* Now All host IP addr were added successfully */
15003 if (dhdp->ndo_host_ip_overflow) {
15004 dhdp->ndo_host_ip_overflow = FALSE;
15005 if (dhdp->in_suspend) {
15006 /* drvier is in (early) suspend state, need to enable ND offload in FW */
15007 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
15008 ret = dhd_ndo_enable(dhdp, TRUE);
15009 }
15010 }
15011
15012done:
15013 if (ipv6_addr) {
15014 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
15015 }
15016
15017 return ret;
15018}
15019#pragma GCC diagnostic pop
15020
15021#endif /* NDO_CONFIG_SUPPORT */
15022
15023#ifdef PNO_SUPPORT
15024/* Linux wrapper to call common dhd_pno_stop_for_ssid */
15025int
15026dhd_dev_pno_stop_for_ssid(struct net_device *dev)
15027{
15028 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15029
15030 return (dhd_pno_stop_for_ssid(&dhd->pub));
15031}
15032/* Linux wrapper to call common dhd_pno_set_for_ssid */
15033int
15034dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
15035 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
15036{
15037 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15038
15039 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
15040 pno_repeat, pno_freq_expo_max, channel_list, nchan));
15041}
15042
15043/* Linux wrapper to call common dhd_pno_enable */
15044int
15045dhd_dev_pno_enable(struct net_device *dev, int enable)
15046{
15047 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15048
15049 return (dhd_pno_enable(&dhd->pub, enable));
15050}
15051
15052/* Linux wrapper to call common dhd_pno_set_for_hotlist */
15053int
15054dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
15055 struct dhd_pno_hotlist_params *hotlist_params)
15056{
15057 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15058 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
15059}
15060/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
15061int
15062dhd_dev_pno_stop_for_batch(struct net_device *dev)
15063{
15064 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15065 return (dhd_pno_stop_for_batch(&dhd->pub));
15066}
15067/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
15068int
15069dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
15070{
15071 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15072 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
15073}
15074/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
15075int
15076dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
15077{
15078 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15079 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
15080}
15081#endif /* PNO_SUPPORT */
15082
15083#if defined(PNO_SUPPORT)
15084#ifdef GSCAN_SUPPORT
15085bool
15086dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
15087{
15088 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15089
15090 return (dhd_is_legacy_pno_enabled(&dhd->pub));
15091}
15092
15093int
15094dhd_dev_set_epno(struct net_device *dev)
15095{
15096 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15097 if (!dhd) {
15098 return BCME_ERROR;
15099 }
15100 return dhd_pno_set_epno(&dhd->pub);
15101}
15102int
15103dhd_dev_flush_fw_epno(struct net_device *dev)
15104{
15105 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15106 if (!dhd) {
15107 return BCME_ERROR;
15108 }
15109 return dhd_pno_flush_fw_epno(&dhd->pub);
15110}
15111
15112/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15113int
15114dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
15115 void *buf, bool flush)
15116{
15117 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15118
15119 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
15120}
15121
15122/* Linux wrapper to call common dhd_wait_batch_results_complete */
15123int
15124dhd_dev_wait_batch_results_complete(struct net_device *dev)
15125{
15126 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15127
15128 return (dhd_wait_batch_results_complete(&dhd->pub));
15129}
15130
15131/* Linux wrapper to call common dhd_pno_lock_batch_results */
15132int
15133dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
15134{
15135 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15136
15137 return (dhd_pno_lock_batch_results(&dhd->pub));
15138}
15139/* Linux wrapper to call common dhd_pno_unlock_batch_results */
15140void
15141dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
15142{
15143 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15144
15145 return (dhd_pno_unlock_batch_results(&dhd->pub));
15146}
15147
15148/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
15149int
15150dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
15151{
15152 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15153
15154 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
15155}
15156
15157/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
15158int
15159dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
15160{
15161 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15162
15163 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
15164}
15165
15166/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
15167void *
15168dhd_dev_hotlist_scan_event(struct net_device *dev,
15169 const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
15170{
15171 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15172
15173 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
15174}
15175
15176/* Linux wrapper to call common dhd_process_full_gscan_result */
15177void *
15178dhd_dev_process_full_gscan_result(struct net_device *dev,
15179const void *data, uint32 len, int *send_evt_bytes)
15180{
15181 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15182
15183 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
15184}
15185
15186void
15187dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
15188{
15189 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15190
15191 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
15192
15193 return;
15194}
15195
15196int
15197dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
15198{
15199 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15200
15201 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
15202}
15203
15204/* Linux wrapper to call common dhd_retreive_batch_scan_results */
15205int
15206dhd_dev_retrieve_batch_scan(struct net_device *dev)
15207{
15208 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15209
15210 return (dhd_retreive_batch_scan_results(&dhd->pub));
15211}
15212/* Linux wrapper to call common dhd_pno_process_epno_result */
15213void * dhd_dev_process_epno_result(struct net_device *dev,
15214 const void *data, uint32 event, int *send_evt_bytes)
15215{
15216 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15217
15218 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
15219}
15220
15221int
15222dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
15223 wlc_roam_exp_params_t *roam_param)
15224{
15225 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15226 wl_roam_exp_cfg_t roam_exp_cfg;
15227 int err;
15228
15229 if (!roam_param) {
15230 return BCME_BADARG;
15231 }
15232
15233 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
15234 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
15235 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
15236 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
15237 roam_param->cur_bssid_boost));
15238 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
15239 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
15240
15241 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
15242 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
15243 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
15244 if (dhd->pub.lazy_roam_enable) {
15245 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
15246 }
15247 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
15248 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
15249 TRUE);
15250 if (err < 0) {
15251 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
15252 }
15253 return err;
15254}
15255
15256int
15257dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
15258{
15259 int err;
15260 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15261 wl_roam_exp_cfg_t roam_exp_cfg;
15262
15263 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
15264 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
15265 if (enable) {
15266 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
15267 }
15268
15269 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
15270 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
15271 TRUE);
15272 if (err < 0) {
15273 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
15274 } else {
15275 dhd->pub.lazy_roam_enable = (enable != 0);
15276 }
15277 return err;
15278}
15279int
15280dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
15281 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
15282{
15283 int err;
15284 uint len;
15285 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15286
15287 bssid_pref->version = BSSID_PREF_LIST_VERSION;
15288 /* By default programming bssid pref flushes out old values */
15289 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
15290 len = sizeof(wl_bssid_pref_cfg_t);
15291 if (bssid_pref->count) {
15292 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
15293 }
15294 err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
15295 (char *)bssid_pref, len, NULL, 0, TRUE);
15296 if (err != BCME_OK) {
15297 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15298 }
15299 return err;
15300}
15301#endif /* GSCAN_SUPPORT */
15302#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
15303int
15304dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
15305 uint32 len, uint32 flush)
15306{
15307 int err;
15308 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15309 int macmode;
15310
15311 if (blacklist) {
15312 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
15313 len, TRUE, 0);
15314 if (err != BCME_OK) {
15315 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
15316 return err;
15317 }
15318 }
15319 /* By default programming blacklist flushes out old values */
15320 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
15321 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
15322 sizeof(macmode), TRUE, 0);
15323 if (err != BCME_OK) {
15324 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
15325 }
15326 return err;
15327}
15328int
15329dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
15330 uint32 len, uint32 flush)
15331{
15332 int err;
15333 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15334 wl_ssid_whitelist_t whitelist_ssid_flush;
15335
15336 if (!ssid_whitelist) {
15337 if (flush) {
15338 ssid_whitelist = &whitelist_ssid_flush;
15339 ssid_whitelist->ssid_count = 0;
15340 } else {
15341 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
15342 return BCME_BADARG;
15343 }
15344 }
15345 ssid_whitelist->version = SSID_WHITELIST_VERSION;
15346 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
15347 err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
15348 0, TRUE);
15349 if (err != BCME_OK) {
15350 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15351 }
15352 return err;
15353}
15354#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
15355#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15356/* Linux wrapper to call common dhd_pno_get_gscan */
15357void *
15358dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
15359 void *info, uint32 *len)
15360{
15361 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15362
15363 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
15364}
15365#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15366#endif // endif
15367
15368#ifdef RSSI_MONITOR_SUPPORT
15369int
15370dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
15371 int8 max_rssi, int8 min_rssi)
15372{
15373 int err;
15374 wl_rssi_monitor_cfg_t rssi_monitor;
15375 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15376
15377 rssi_monitor.version = RSSI_MONITOR_VERSION;
15378 rssi_monitor.max_rssi = max_rssi;
15379 rssi_monitor.min_rssi = min_rssi;
15380 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
15381 err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
15382 NULL, 0, TRUE);
15383 if (err < 0 && err != BCME_UNSUPPORTED) {
15384 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
15385 }
15386 return err;
15387}
15388#endif /* RSSI_MONITOR_SUPPORT */
15389
15390#ifdef DHDTCPACK_SUPPRESS
15391int
15392dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
15393{
15394 int err;
15395 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15396
15397 err = dhd_tcpack_suppress_set(&dhd->pub, enable);
15398 if (err != BCME_OK) {
15399 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
15400 }
15401 return err;
15402}
15403#endif /* DHDTCPACK_SUPPRESS */
15404
15405int
15406dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
15407{
15408 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15409 dhd_pub_t *dhdp = &dhd->pub;
15410
15411 if (!dhdp || !oui) {
15412 DHD_ERROR(("NULL POINTER : %s\n",
15413 __FUNCTION__));
15414 return BCME_ERROR;
15415 }
15416 if (ETHER_ISMULTI(oui)) {
15417 DHD_ERROR(("Expected unicast OUI\n"));
15418 return BCME_ERROR;
15419 } else {
15420 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
15421 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
15422 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
15423 MACOUI2STRDBG(rand_mac_oui)));
15424 }
15425 return BCME_OK;
15426}
15427
15428int
15429dhd_set_rand_mac_oui(dhd_pub_t *dhd)
15430{
15431 int err;
15432 wl_pfn_macaddr_cfg_t wl_cfg;
15433 uint8 *rand_mac_oui = dhd->rand_mac_oui;
15434
15435 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
15436 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
15437 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
15438 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
15439 wl_cfg.flags = 0;
15440 } else {
15441 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
15442 }
15443
15444 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
15445 MACOUI2STRDBG(rand_mac_oui)));
15446
15447 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
15448 if (err < 0) {
15449 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
15450 }
15451 return err;
15452}
15453
15454#ifdef RTT_SUPPORT
15455/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15456int
15457dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
15458{
15459 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15460
15461 return (dhd_rtt_set_cfg(&dhd->pub, buf));
15462}
15463
15464int
15465dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
15466{
15467 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15468
15469 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
15470}
15471
15472int
15473dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
15474{
15475 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15476
15477 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
15478}
15479
15480int
15481dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
15482{
15483 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15484
15485 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
15486}
15487
15488int
15489dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
15490{
15491 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15492
15493 return (dhd_rtt_capability(&dhd->pub, capa));
15494}
15495
15496int
15497dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
15498{
15499 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15500 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
15501}
15502
15503int
15504dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
15505{
15506 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15507 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
15508}
15509
15510int dhd_dev_rtt_cancel_responder(struct net_device *dev)
15511{
15512 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15513 return (dhd_rtt_cancel_responder(&dhd->pub));
15514}
15515
15516#endif /* RTT_SUPPORT */
15517
15518#ifdef KEEP_ALIVE
15519#define KA_TEMP_BUF_SIZE 512
15520#define KA_FRAME_SIZE 300
15521
15522int
15523dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
15524 uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
15525{
15526 const int ETHERTYPE_LEN = 2;
15527 char *pbuf = NULL;
15528 const char *str;
15529 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15530 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15531 int buf_len = 0;
15532 int str_len = 0;
15533 int res = BCME_ERROR;
15534 int len_bytes = 0;
15535 int i = 0;
15536
15537 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15538 char *pmac_frame = NULL;
15539 char *pmac_frame_begin = NULL;
15540
15541 /*
15542 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15543 * dongle shall reject a mkeep_alive request.
15544 */
15545 if (!dhd_support_sta_mode(dhd_pub))
15546 return res;
15547
15548 DHD_TRACE(("%s execution\n", __FUNCTION__));
15549
15550 if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15551 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15552 res = BCME_NOMEM;
15553 return res;
15554 }
15555
15556 if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
15557 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
15558 res = BCME_NOMEM;
15559 goto exit;
15560 }
15561 pmac_frame_begin = pmac_frame;
15562
15563 /*
15564 * Get current mkeep-alive status.
15565 */
15566 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
15567 KA_TEMP_BUF_SIZE, FALSE);
15568 if (res < 0) {
15569 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15570 goto exit;
15571 } else {
15572 /* Check available ID whether it is occupied */
15573 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15574 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15575 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15576 __FUNCTION__, mkeep_alive_id));
15577
15578 /* Current occupied ID info */
15579 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
15580 DHD_ERROR((" Id : %d\n"
15581 " Period: %d msec\n"
15582 " Length: %d\n"
15583 " Packet: 0x",
15584 mkeep_alive_pktp->keep_alive_id,
15585 dtoh32(mkeep_alive_pktp->period_msec),
15586 dtoh16(mkeep_alive_pktp->len_bytes)));
15587
15588 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15589 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
15590 }
15591 DHD_ERROR(("\n"));
15592
15593 res = BCME_NOTFOUND;
15594 goto exit;
15595 }
15596 }
15597
15598 /* Request the specified ID */
15599 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15600 memset(pbuf, 0, KA_TEMP_BUF_SIZE);
15601 str = "mkeep_alive";
15602 str_len = strlen(str);
15603 strncpy(pbuf, str, str_len);
15604 pbuf[str_len] = '\0';
15605
15606 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
15607 mkeep_alive_pkt.period_msec = htod32(period_msec);
15608 buf_len = str_len + 1;
15609 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15610 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15611
15612 /* ID assigned */
15613 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15614
15615 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
15616
15617 /*
15618 * Build up Ethernet Frame
15619 */
15620
15621 /* Mapping dest mac addr */
15622 memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
15623 pmac_frame += ETHER_ADDR_LEN;
15624
15625 /* Mapping src mac addr */
15626 memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
15627 pmac_frame += ETHER_ADDR_LEN;
15628
15629 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15630 *(pmac_frame++) = 0x08;
15631 *(pmac_frame++) = 0x00;
15632
15633 /* Mapping IP pkt */
15634 memcpy(pmac_frame, ip_pkt, ip_pkt_len);
15635 pmac_frame += ip_pkt_len;
15636
15637 /*
15638 * Length of ether frame (assume to be all hexa bytes)
15639 * = src mac + dst mac + ether type + ip pkt len
15640 */
15641 len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
15642 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
15643 buf_len += len_bytes;
15644 mkeep_alive_pkt.len_bytes = htod16(len_bytes);
15645
15646 /*
15647 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15648 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15649 * guarantee that the buffer is properly aligned.
15650 */
15651 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
15652
15653 res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
15654exit:
15655 if (pmac_frame_begin) {
15656 MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
15657 pmac_frame_begin = NULL;
15658 }
15659 if (pbuf) {
15660 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15661 pbuf = NULL;
15662 }
15663 return res;
15664}
15665
15666int
15667dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
15668{
15669 char *pbuf = NULL;
15670 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15671 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15672 int res = BCME_ERROR;
15673 int i = 0;
15674
15675 /*
15676 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15677 * dongle shall reject a mkeep_alive request.
15678 */
15679 if (!dhd_support_sta_mode(dhd_pub))
15680 return res;
15681
15682 DHD_TRACE(("%s execution\n", __FUNCTION__));
15683
15684 /*
15685 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15686 */
15687 if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15688 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15689 return res;
15690 }
15691
15692 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
15693 sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
15694 if (res < 0) {
15695 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15696 goto exit;
15697 } else {
15698 /* Check occupied ID */
15699 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15700 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
15701 DHD_INFO((" Id : %d\n"
15702 " Period: %d msec\n"
15703 " Length: %d\n"
15704 " Packet: 0x",
15705 mkeep_alive_pktp->keep_alive_id,
15706 dtoh32(mkeep_alive_pktp->period_msec),
15707 dtoh16(mkeep_alive_pktp->len_bytes)));
15708
15709 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15710 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
15711 }
15712 DHD_INFO(("\n"));
15713 }
15714
15715 /* Make it stop if available */
15716 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15717 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
15718 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15719
15720 mkeep_alive_pkt.period_msec = 0;
15721 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15722 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15723 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15724
15725 res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
15726 (char *)&mkeep_alive_pkt,
15727 WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
15728 } else {
15729 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
15730 res = BCME_NOTFOUND;
15731 }
15732exit:
15733 if (pbuf) {
15734 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15735 pbuf = NULL;
15736 }
15737 return res;
15738}
15739#endif /* KEEP_ALIVE */
15740
15741#if defined(PKT_FILTER_SUPPORT) && defined(APF)
15742static void _dhd_apf_lock_local(dhd_info_t *dhd)
15743{
15744#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15745 if (dhd) {
15746 mutex_lock(&dhd->dhd_apf_mutex);
15747 }
15748#endif // endif
15749}
15750
15751static void _dhd_apf_unlock_local(dhd_info_t *dhd)
15752{
15753#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15754 if (dhd) {
15755 mutex_unlock(&dhd->dhd_apf_mutex);
15756 }
15757#endif // endif
15758}
15759
15760static int
15761__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
15762 u8* program, uint32 program_len)
15763{
15764 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15765 dhd_pub_t *dhdp = &dhd->pub;
15766 wl_pkt_filter_t * pkt_filterp;
15767 wl_apf_program_t *apf_program;
15768 char *buf;
15769 u32 cmd_len, buf_len;
15770 int ifidx, ret;
15771 char cmd[] = "pkt_filter_add";
15772
15773 ifidx = dhd_net2idx(dhd, ndev);
15774 if (ifidx == DHD_BAD_IF) {
15775 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15776 return -ENODEV;
15777 }
15778
15779 cmd_len = sizeof(cmd);
15780
15781 /* Check if the program_len is more than the expected len
15782 * and if the program is NULL return from here.
15783 */
15784 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
15785 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15786 __FUNCTION__, program_len, program));
15787 return -EINVAL;
15788 }
15789 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
15790 WL_APF_PROGRAM_FIXED_LEN + program_len;
15791
15792 buf = MALLOCZ(dhdp->osh, buf_len);
15793 if (unlikely(!buf)) {
15794 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15795 return -ENOMEM;
15796 }
15797
15798 memcpy(buf, cmd, cmd_len);
15799
15800 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
15801 pkt_filterp->id = htod32(filter_id);
15802 pkt_filterp->negate_match = htod32(FALSE);
15803 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
15804
15805 apf_program = &pkt_filterp->u.apf_program;
15806 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
15807 apf_program->instr_len = htod16(program_len);
15808 memcpy(apf_program->instrs, program, program_len);
15809
15810 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15811 if (unlikely(ret)) {
15812 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15813 __FUNCTION__, filter_id, ret));
15814 }
15815
15816 if (buf) {
15817 MFREE(dhdp->osh, buf, buf_len);
15818 }
15819 return ret;
15820}
15821
15822static int
15823__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
15824 uint32 mode, uint32 enable)
15825{
15826 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15827 dhd_pub_t *dhdp = &dhd->pub;
15828 wl_pkt_filter_enable_t * pkt_filterp;
15829 char *buf;
15830 u32 cmd_len, buf_len;
15831 int ifidx, ret;
15832 char cmd[] = "pkt_filter_enable";
15833
15834 ifidx = dhd_net2idx(dhd, ndev);
15835 if (ifidx == DHD_BAD_IF) {
15836 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15837 return -ENODEV;
15838 }
15839
15840 cmd_len = sizeof(cmd);
15841 buf_len = cmd_len + sizeof(*pkt_filterp);
15842
15843 buf = MALLOCZ(dhdp->osh, buf_len);
15844 if (unlikely(!buf)) {
15845 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15846 return -ENOMEM;
15847 }
15848
15849 memcpy(buf, cmd, cmd_len);
15850
15851 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
15852 pkt_filterp->id = htod32(filter_id);
15853 pkt_filterp->enable = htod32(enable);
15854
15855 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15856 if (unlikely(ret)) {
15857 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15858 __FUNCTION__, filter_id, ret));
15859 goto exit;
15860 }
15861
15862 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
15863 WLC_SET_VAR, TRUE, ifidx);
15864 if (unlikely(ret)) {
15865 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15866 __FUNCTION__, filter_id, ret));
15867 }
15868
15869exit:
15870 if (buf) {
15871 MFREE(dhdp->osh, buf, buf_len);
15872 }
15873 return ret;
15874}
15875
15876static int
15877__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
15878{
15879 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15880 dhd_pub_t *dhdp = &dhd->pub;
15881 int ifidx, ret;
15882
15883 ifidx = dhd_net2idx(dhd, ndev);
15884 if (ifidx == DHD_BAD_IF) {
15885 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15886 return -ENODEV;
15887 }
15888
15889 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
15890 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
15891 if (unlikely(ret)) {
15892 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15893 __FUNCTION__, filter_id, ret));
15894 }
15895
15896 return ret;
15897}
15898
15899void dhd_apf_lock(struct net_device *dev)
15900{
15901 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15902 _dhd_apf_lock_local(dhd);
15903}
15904
15905void dhd_apf_unlock(struct net_device *dev)
15906{
15907 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15908 _dhd_apf_unlock_local(dhd);
15909}
15910
15911int
15912dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
15913{
15914 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15915 dhd_pub_t *dhdp = &dhd->pub;
15916 int ifidx, ret;
15917
15918 if (!FW_SUPPORTED(dhdp, apf)) {
15919 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15920
15921 /*
15922 * Notify Android framework that APF is not supported by setting
15923 * version as zero.
15924 */
15925 *version = 0;
15926 return BCME_OK;
15927 }
15928
15929 ifidx = dhd_net2idx(dhd, ndev);
15930 if (ifidx == DHD_BAD_IF) {
15931 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15932 return -ENODEV;
15933 }
15934
15935 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
15936 WLC_GET_VAR, FALSE, ifidx);
15937 if (unlikely(ret)) {
15938 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15939 __FUNCTION__, ret));
15940 }
15941
15942 return ret;
15943}
15944
15945int
15946dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
15947{
15948 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15949 dhd_pub_t *dhdp = &dhd->pub;
15950 int ifidx, ret;
15951
15952 if (!FW_SUPPORTED(dhdp, apf)) {
15953 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15954 *max_len = 0;
15955 return BCME_OK;
15956 }
15957
15958 ifidx = dhd_net2idx(dhd, ndev);
15959 if (ifidx == DHD_BAD_IF) {
15960 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
15961 return -ENODEV;
15962 }
15963
15964 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
15965 WLC_GET_VAR, FALSE, ifidx);
15966 if (unlikely(ret)) {
15967 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15968 __FUNCTION__, ret));
15969 }
15970
15971 return ret;
15972}
15973
15974int
15975dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
15976 uint32 program_len)
15977{
15978 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15979 dhd_pub_t *dhdp = &dhd->pub;
15980 int ret;
15981
15982 DHD_APF_LOCK(ndev);
15983
15984 /* delete, if filter already exists */
15985 if (dhdp->apf_set) {
15986 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15987 if (unlikely(ret)) {
15988 goto exit;
15989 }
15990 dhdp->apf_set = FALSE;
15991 }
15992
15993 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
15994 if (ret) {
15995 goto exit;
15996 }
15997 dhdp->apf_set = TRUE;
15998
15999 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
16000 /* Driver is still in (early) suspend state, enable APF filter back */
16001 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
16002 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
16003 }
16004exit:
16005 DHD_APF_UNLOCK(ndev);
16006
16007 return ret;
16008}
16009
16010int
16011dhd_dev_apf_enable_filter(struct net_device *ndev)
16012{
16013 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16014 dhd_pub_t *dhdp = &dhd->pub;
16015 int ret = 0;
16016 bool nan_dp_active = false;
16017
16018 DHD_APF_LOCK(ndev);
16019#ifdef WL_NAN
16020 nan_dp_active = wl_cfgnan_is_dp_active(ndev);
16021#endif /* WL_NAN */
16022 if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
16023 !nan_dp_active)) {
16024 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
16025 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
16026 }
16027
16028 DHD_APF_UNLOCK(ndev);
16029
16030 return ret;
16031}
16032
16033int
16034dhd_dev_apf_disable_filter(struct net_device *ndev)
16035{
16036 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16037 dhd_pub_t *dhdp = &dhd->pub;
16038 int ret = 0;
16039
16040 DHD_APF_LOCK(ndev);
16041
16042 if (dhdp->apf_set) {
16043 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
16044 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
16045 }
16046
16047 DHD_APF_UNLOCK(ndev);
16048
16049 return ret;
16050}
16051
16052int
16053dhd_dev_apf_delete_filter(struct net_device *ndev)
16054{
16055 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16056 dhd_pub_t *dhdp = &dhd->pub;
16057 int ret = 0;
16058
16059 DHD_APF_LOCK(ndev);
16060
16061 if (dhdp->apf_set) {
16062 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
16063 if (!ret) {
16064 dhdp->apf_set = FALSE;
16065 }
16066 }
16067
16068 DHD_APF_UNLOCK(ndev);
16069
16070 return ret;
16071}
16072#endif /* PKT_FILTER_SUPPORT && APF */
16073
16074#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16075static void dhd_hang_process(struct work_struct *work_data)
16076{
16077 struct net_device *dev;
16078#ifdef IFACE_HANG_FORCE_DEV_CLOSE
16079 struct net_device *ndev;
16080 uint8 i = 0;
16081#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16082/* Ignore compiler warnings due to -Werror=cast-qual */
16083#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16084#pragma GCC diagnostic push
16085#pragma GCC diagnostic ignored "-Wcast-qual"
16086#endif // endif
16087 struct dhd_info *dhd =
16088 container_of(work_data, dhd_info_t, dhd_hang_process_work);
16089#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16090#pragma GCC diagnostic pop
16091#endif // endif
16092
16093 dev = dhd->iflist[0]->net;
16094
16095 if (dev) {
16096#if defined(WL_WIRELESS_EXT)
16097 wl_iw_send_priv_event(dev, "HANG");
16098#endif // endif
16099#if defined(WL_CFG80211)
16100 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
16101#endif // endif
16102 }
16103#ifdef IFACE_HANG_FORCE_DEV_CLOSE
16104 /*
16105 * For HW2, dev_close need to be done to recover
16106 * from upper layer after hang. For Interposer skip
16107 * dev_close so that dhd iovars can be used to take
16108 * socramdump after crash, also skip for HW4 as
16109 * handling of hang event is different
16110 */
16111
16112 rtnl_lock();
16113 for (i = 0; i < DHD_MAX_IFS; i++) {
16114 ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
16115 if (ndev && (ndev->flags & IFF_UP)) {
16116 DHD_ERROR(("ndev->name : %s dev close\n",
16117 ndev->name));
16118 dev_close(ndev);
16119 }
16120 }
16121 rtnl_unlock();
16122#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16123}
16124
16125#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
16126extern dhd_pub_t *link_recovery;
16127void dhd_host_recover_link(void)
16128{
16129 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
16130 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
16131 dhd_bus_set_linkdown(link_recovery, TRUE);
16132 dhd_os_send_hang_message(link_recovery);
16133}
16134EXPORT_SYMBOL(dhd_host_recover_link);
16135#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
16136
16137int dhd_os_send_hang_message(dhd_pub_t *dhdp)
16138{
16139 int ret = 0;
16140
16141 if (dhdp) {
16142#ifdef WL_CFG80211
16143 struct net_device *primary_ndev;
16144 struct bcm_cfg80211 *cfg;
16145
16146 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
16147 if (!primary_ndev) {
16148 DHD_ERROR(("%s: Cannot find primary netdev\n",
16149 __FUNCTION__));
16150 return -ENODEV;
16151 }
16152
16153 cfg = wl_get_cfg(primary_ndev);
16154 if (!cfg) {
16155 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
16156 return -EINVAL;
16157 }
16158
16159 /* Skip sending HANG event to framework if driver is not ready */
16160 if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
16161 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
16162 return -ENODEV;
16163 }
16164#endif /* WL_CFG80211 */
16165
16166#if defined(DHD_HANG_SEND_UP_TEST)
16167 if (dhdp->req_hang_type) {
16168 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
16169 __FUNCTION__, dhdp->req_hang_type));
16170 dhdp->req_hang_type = 0;
16171 }
16172#endif /* DHD_HANG_SEND_UP_TEST */
16173
16174 if (!dhdp->hang_was_sent) {
16175#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
16176 dhdp->hang_counts++;
16177 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
16178 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
16179 __func__, dhdp->hang_counts));
16180 BUG_ON(1);
16181 }
16182#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
16183#ifdef DHD_DEBUG_UART
16184 /* If PCIe lane has broken, execute the debug uart application
16185 * to gether a ramdump data from dongle via uart
16186 */
16187 if (!dhdp->info->duart_execute) {
16188 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
16189 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
16190 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
16191 }
16192#endif /* DHD_DEBUG_UART */
16193 dhdp->hang_was_sent = 1;
16194#ifdef BT_OVER_SDIO
16195 dhdp->is_bt_recovery_required = TRUE;
16196#endif // endif
16197 schedule_work(&dhdp->info->dhd_hang_process_work);
16198
16199 }
16200 }
16201 return ret;
16202}
16203
16204int net_os_send_hang_message(struct net_device *dev)
16205{
16206 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16207 int ret = 0;
16208
16209 if (dhd) {
16210 /* Report FW problem when enabled */
16211 if (dhd->pub.hang_report) {
16212#ifdef BT_OVER_SDIO
16213 if (netif_running(dev)) {
16214#endif /* BT_OVER_SDIO */
16215#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16216 ret = dhd_os_send_hang_message(&dhd->pub);
16217#else
16218 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
16219#endif // endif
16220#ifdef BT_OVER_SDIO
16221 }
16222 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
16223 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
16224#endif /* BT_OVER_SDIO */
16225 } else {
16226 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
16227 __FUNCTION__));
16228 }
16229 }
16230 return ret;
16231}
16232
16233int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
16234{
16235 dhd_info_t *dhd = NULL;
16236 dhd_pub_t *dhdp = NULL;
16237 int reason;
16238
16239 dhd = DHD_DEV_INFO(dev);
16240 if (dhd) {
16241 dhdp = &dhd->pub;
16242 }
16243
16244 if (!dhd || !dhdp) {
16245 return 0;
16246 }
16247
16248 reason = bcm_strtoul(string_num, NULL, 0);
16249 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
16250
16251 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
16252 reason = 0;
16253 }
16254
16255 dhdp->hang_reason = reason;
16256
16257 return net_os_send_hang_message(dev);
16258}
16259#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
16260
16261int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
16262{
16263 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16264 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
16265}
16266
16267bool dhd_force_country_change(struct net_device *dev)
16268{
16269 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16270
16271 if (dhd && dhd->pub.up)
16272 return dhd->pub.force_country_change;
16273 return FALSE;
16274}
16275
16276void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
16277 wl_country_t *cspec)
16278{
16279 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16280 dhd_pub_t *dhdp = &dhd->pub;
16281
16282 BCM_REFERENCE(dhdp);
16283 if (!CHECK_IS_BLOB(dhdp) || CHECK_IS_MULT_REGREV(dhdp)) {
16284#if defined(CUSTOM_COUNTRY_CODE)
16285 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
16286 dhd->pub.dhd_cflags);
16287#else
16288 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
16289#endif /* CUSTOM_COUNTRY_CODE */
16290 }
16291#if !defined(CUSTOM_COUNTRY_CODE)
16292 else {
16293 /* Replace the ccode to XZ if ccode is undefined country */
16294 if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
16295 strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
16296 strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
16297 strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
16298 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
16299 }
16300 }
16301#endif /* !CUSTOM_COUNTRY_CODE */
16302
16303#if defined(KEEP_KR_REGREV)
16304 if (strncmp(country_iso_code, "KR", 3) == 0) {
16305 if (!CHECK_IS_BLOB(dhdp) || CHECK_IS_MULT_REGREV(dhdp)) {
16306 if (strncmp(dhd->pub.vars_ccode, "KR", 3) == 0) {
16307 cspec->rev = dhd->pub.vars_regrev;
16308 }
16309 }
16310 }
16311#endif /* KEEP_KR_REGREV */
16312
16313#ifdef KEEP_JP_REGREV
16314 if (strncmp(country_iso_code, "JP", 3) == 0) {
16315 if (CHECK_IS_BLOB(dhdp) && !CHECK_IS_MULT_REGREV(dhdp)) {
16316 if (strncmp(dhd->pub.vars_ccode, "J1", 3) == 0) {
16317 memcpy(cspec->ccode, dhd->pub.vars_ccode,
16318 sizeof(dhd->pub.vars_ccode));
16319 }
16320 } else {
16321 if (strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) {
16322 cspec->rev = dhd->pub.vars_regrev;
16323 }
16324 }
16325 }
16326#endif /* KEEP_JP_REGREV */
16327 BCM_REFERENCE(dhd);
16328}
16329void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
16330{
16331 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16332#ifdef WL_CFG80211
16333 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
16334#endif // endif
16335
16336 if (dhd && dhd->pub.up) {
16337 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
16338#ifdef DHD_DISABLE_VHTMODE
16339 dhd_disable_vhtmode(&dhd->pub);
16340#endif /* DHD_DISABLE_VHTMODE */
16341
16342#ifdef WL_CFG80211
16343 wl_update_wiphybands(cfg, notify);
16344#endif // endif
16345 }
16346}
16347
16348#ifdef DHD_DISABLE_VHTMODE
16349void
16350dhd_disable_vhtmode(dhd_pub_t *dhd)
16351{
16352 int ret = 0;
16353 uint32 vhtmode = FALSE;
16354 char buf[32];
16355
16356 /* Get vhtmode */
16357 ret = dhd_iovar(dhd, 0, "vhtmode", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
16358 if (ret < 0) {
16359 DHD_ERROR(("%s Get vhtmode Fail ret %d\n", __FUNCTION__, ret));
16360 return;
16361 }
16362 memcpy(&vhtmode, buf, sizeof(uint32));
16363 if (vhtmode == 0) {
16364 DHD_ERROR(("%s Get vhtmode is 0\n", __FUNCTION__));
16365 return;
16366 }
16367 vhtmode = FALSE;
16368
16369 /* Set vhtmode */
16370 ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode, sizeof(vhtmode), NULL, 0, TRUE);
16371 if (ret == 0) {
16372 DHD_ERROR(("%s Set vhtmode Success %d\n", __FUNCTION__, vhtmode));
16373 } else {
16374 if (ret == BCME_NOTDOWN) {
16375 uint wl_down = 1;
16376 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
16377 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
16378 if (ret) {
16379 DHD_ERROR(("%s WL_DOWN Fail ret %d\n", __FUNCTION__, ret));
16380 return;
16381 }
16382
16383 ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode,
16384 sizeof(vhtmode), NULL, 0, TRUE);
16385 DHD_ERROR(("%s Set vhtmode %d, ret %d\n", __FUNCTION__, vhtmode, ret));
16386
16387 ret = dhd_wl_ioctl_cmd(dhd, WLC_UP,
16388 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
16389 if (ret) {
16390 DHD_ERROR(("%s WL_UP Fail ret %d\n", __FUNCTION__, ret));
16391 }
16392 } else {
16393 DHD_ERROR(("%s Set vhtmode 0 failed %d\n", __FUNCTION__, ret));
16394 }
16395 }
16396}
16397#endif /* DHD_DISABLE_VHTMODE */
16398
16399void dhd_bus_band_set(struct net_device *dev, uint band)
16400{
16401 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16402#ifdef WL_CFG80211
16403 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
16404#endif // endif
16405 if (dhd && dhd->pub.up) {
16406#ifdef WL_CFG80211
16407 wl_update_wiphybands(cfg, true);
16408#endif // endif
16409 }
16410}
16411
16412int dhd_net_set_fw_path(struct net_device *dev, char *fw)
16413{
16414 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16415
16416 if (!fw || fw[0] == '\0')
16417 return -EINVAL;
16418
16419 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
16420 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
16421
16422#if defined(SOFTAP)
16423 if (strstr(fw, "apsta") != NULL) {
16424 DHD_INFO(("GOT APSTA FIRMWARE\n"));
16425 ap_fw_loaded = TRUE;
16426 } else {
16427 DHD_INFO(("GOT STA FIRMWARE\n"));
16428 ap_fw_loaded = FALSE;
16429 }
16430#endif // endif
16431 return 0;
16432}
16433
16434void dhd_net_if_lock(struct net_device *dev)
16435{
16436 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16437 dhd_net_if_lock_local(dhd);
16438}
16439
16440void dhd_net_if_unlock(struct net_device *dev)
16441{
16442 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16443 dhd_net_if_unlock_local(dhd);
16444}
16445
16446static void dhd_net_if_lock_local(dhd_info_t *dhd)
16447{
16448#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16449 if (dhd)
16450 mutex_lock(&dhd->dhd_net_if_mutex);
16451#endif // endif
16452}
16453
16454static void dhd_net_if_unlock_local(dhd_info_t *dhd)
16455{
16456#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16457 if (dhd)
16458 mutex_unlock(&dhd->dhd_net_if_mutex);
16459#endif // endif
16460}
16461
16462static void dhd_suspend_lock(dhd_pub_t *pub)
16463{
16464#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16465 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16466 if (dhd)
16467 mutex_lock(&dhd->dhd_suspend_mutex);
16468#endif // endif
16469}
16470
16471static void dhd_suspend_unlock(dhd_pub_t *pub)
16472{
16473#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16474 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16475 if (dhd)
16476 mutex_unlock(&dhd->dhd_suspend_mutex);
16477#endif // endif
16478}
16479
16480unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
16481{
16482 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16483 unsigned long flags = 0;
16484
16485 if (dhd)
16486 spin_lock_irqsave(&dhd->dhd_lock, flags);
16487
16488 return flags;
16489}
16490
16491void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
16492{
16493 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16494
16495 if (dhd)
16496 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
16497}
16498
16499/* Linux specific multipurpose spinlock API */
16500void *
16501dhd_os_spin_lock_init(osl_t *osh)
16502{
16503 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16504 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16505 /* and this results in kernel asserts in internal builds */
16506 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
16507 if (lock)
16508 spin_lock_init(lock);
16509 return ((void *)lock);
16510}
16511void
16512dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
16513{
16514 if (lock)
16515 MFREE(osh, lock, sizeof(spinlock_t) + 4);
16516}
16517unsigned long
16518dhd_os_spin_lock(void *lock)
16519{
16520 unsigned long flags = 0;
16521
16522 if (lock)
16523 spin_lock_irqsave((spinlock_t *)lock, flags);
16524
16525 return flags;
16526}
16527void
16528dhd_os_spin_unlock(void *lock, unsigned long flags)
16529{
16530 if (lock)
16531 spin_unlock_irqrestore((spinlock_t *)lock, flags);
16532}
16533
16534void *
16535dhd_os_dbgring_lock_init(osl_t *osh)
16536{
16537 struct mutex *mtx = NULL;
16538
16539 mtx = MALLOCZ(osh, sizeof(*mtx));
16540 if (mtx)
16541 mutex_init(mtx);
16542
16543 return mtx;
16544}
16545
16546void
16547dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
16548{
16549 if (mtx) {
16550 mutex_destroy(mtx);
16551 MFREE(osh, mtx, sizeof(struct mutex));
16552 }
16553}
16554
16555static int
16556dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
16557{
16558 return (atomic_read(&dhd->pend_8021x_cnt));
16559}
16560
16561#define MAX_WAIT_FOR_8021X_TX 100
16562
16563int
16564dhd_wait_pend8021x(struct net_device *dev)
16565{
16566 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16567 int timeout = msecs_to_jiffies(10);
16568 int ntimes = MAX_WAIT_FOR_8021X_TX;
16569 int pend = dhd_get_pend_8021x_cnt(dhd);
16570
16571 while (ntimes && pend) {
16572 if (pend) {
16573 set_current_state(TASK_INTERRUPTIBLE);
16574 DHD_PERIM_UNLOCK(&dhd->pub);
16575 schedule_timeout(timeout);
16576 DHD_PERIM_LOCK(&dhd->pub);
16577 set_current_state(TASK_RUNNING);
16578 ntimes--;
16579 }
16580 pend = dhd_get_pend_8021x_cnt(dhd);
16581 }
16582 if (ntimes == 0)
16583 {
16584 atomic_set(&dhd->pend_8021x_cnt, 0);
16585 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
16586 }
16587 return pend;
16588}
16589
16590#if defined(DHD_DEBUG)
16591int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
16592{
16593 int ret = 0;
16594 struct file *fp = NULL;
16595 mm_segment_t old_fs;
16596 loff_t pos = 0;
16597 /* change to KERNEL_DS address limit */
16598 old_fs = get_fs();
16599 set_fs(KERNEL_DS);
16600
16601 /* open file to write */
16602 fp = filp_open(file_name, flags, 0664);
16603 if (IS_ERR(fp)) {
16604 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
16605 goto exit;
16606 }
16607
16608 /* Write buf to file */
16609 ret = vfs_write(fp, buf, size, &pos);
16610 if (ret < 0) {
16611 DHD_ERROR(("write file error, err = %d\n", ret));
16612 goto exit;
16613 }
16614
16615 /* Sync file from filesystem to physical media */
16616 ret = vfs_fsync(fp, 0);
16617 if (ret < 0) {
16618 DHD_ERROR(("sync file error, error = %d\n", ret));
16619 goto exit;
16620 }
16621 ret = BCME_OK;
16622
16623exit:
16624 /* close file before return */
16625 if (!IS_ERR(fp))
16626 filp_close(fp, current->files);
16627
16628 /* restore previous address limit */
16629 set_fs(old_fs);
16630
16631 return ret;
16632}
16633#endif // endif
16634
16635#ifdef DHD_DEBUG
16636static void
16637dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
16638{
16639 char *type_str = NULL;
16640
16641 switch (type) {
16642 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
16643 type_str = "resumed_on_timeout";
16644 break;
16645 case DUMP_TYPE_D3_ACK_TIMEOUT:
16646 type_str = "D3_ACK_timeout";
16647 break;
16648 case DUMP_TYPE_DONGLE_TRAP:
16649 type_str = "Dongle_Trap";
16650 break;
16651 case DUMP_TYPE_MEMORY_CORRUPTION:
16652 type_str = "Memory_Corruption";
16653 break;
16654 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
16655 type_str = "PKTID_AUDIT_Fail";
16656 break;
16657 case DUMP_TYPE_PKTID_INVALID:
16658 type_str = "PKTID_INVALID";
16659 break;
16660 case DUMP_TYPE_SCAN_TIMEOUT:
16661 type_str = "SCAN_timeout";
16662 break;
16663 case DUMP_TYPE_SCAN_BUSY:
16664 type_str = "SCAN_Busy";
16665 break;
16666 case DUMP_TYPE_BY_SYSDUMP:
16667 if (substr_type == CMD_UNWANTED) {
16668 type_str = "BY_SYSDUMP_FORUSER_unwanted";
16669 } else if (substr_type == CMD_DISCONNECTED) {
16670 type_str = "BY_SYSDUMP_FORUSER_disconnected";
16671 } else {
16672 type_str = "BY_SYSDUMP_FORUSER";
16673 }
16674 break;
16675 case DUMP_TYPE_BY_LIVELOCK:
16676 type_str = "BY_LIVELOCK";
16677 break;
16678 case DUMP_TYPE_AP_LINKUP_FAILURE:
16679 type_str = "BY_AP_LINK_FAILURE";
16680 break;
16681 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
16682 type_str = "INVALID_ACCESS";
16683 break;
16684 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
16685 type_str = "ERROR_RX_TIMED_OUT";
16686 break;
16687 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
16688 type_str = "ERROR_TX_TIMED_OUT";
16689 break;
16690 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
16691 type_str = "CFG_VENDOR_TRIGGERED";
16692 break;
16693 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
16694 type_str = "BY_INVALID_RING_RDWR";
16695 break;
16696 case DUMP_TYPE_IFACE_OP_FAILURE:
16697 type_str = "BY_IFACE_OP_FAILURE";
16698 break;
16699 case DUMP_TYPE_TRANS_ID_MISMATCH:
16700 type_str = "BY_TRANS_ID_MISMATCH";
16701 break;
16702#ifdef DEBUG_DNGL_INIT_FAIL
16703 case DUMP_TYPE_DONGLE_INIT_FAILURE:
16704 type_str = "DONGLE_INIT_FAIL";
16705 break;
16706#endif /* DEBUG_DNGL_INIT_FAIL */
16707#ifdef SUPPORT_LINKDOWN_RECOVERY
16708 case DUMP_TYPE_READ_SHM_FAIL:
16709 type_str = "READ_SHM_FAIL";
16710 break;
16711#endif /* SUPPORT_LINKDOWN_RECOVERY */
16712 case DUMP_TYPE_DONGLE_HOST_EVENT:
16713 type_str = "BY_DONGLE_HOST_EVENT";
16714 break;
16715 case DUMP_TYPE_SMMU_FAULT:
16716 type_str = "SMMU_FAULT";
16717 break;
16718 case DUMP_TYPE_BY_USER:
16719 type_str = "BY_USER";
16720 break;
16721#ifdef DHD_ERPOM
16722 case DUMP_TYPE_DUE_TO_BT:
16723 type_str = "DUE_TO_BT";
16724 break;
16725#endif /* DHD_ERPOM */
16726 default:
16727 type_str = "Unknown_type";
16728 break;
16729 }
16730
16731 strncpy(buf, type_str, strlen(type_str));
16732 buf[strlen(type_str)] = 0;
16733}
16734
16735int
16736write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
16737{
16738 int ret = 0;
16739 char memdump_path[128];
16740 char memdump_type[32];
16741 struct timeval curtime;
16742 uint32 file_mode;
16743
16744 /* Init file name */
16745 memset(memdump_path, 0, sizeof(memdump_path));
16746 memset(memdump_type, 0, sizeof(memdump_type));
16747 do_gettimeofday(&curtime);
16748 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
16749#ifdef CUSTOMER_HW4_DEBUG
16750 get_debug_dump_time(dhd->debug_dump_time_str);
16751 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16752 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16753 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16754#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16755 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16756 "/data/misc/wifi/", fname, memdump_type,
16757 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16758 file_mode = O_CREAT | O_WRONLY;
16759#else
16760 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16761 "/installmedia/", fname, memdump_type,
16762 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16763 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16764 * calling BUG_ON immediately after collecting the socram dump.
16765 * So the file write operation should directly write the contents into the
16766 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16767 * instead of appending.
16768 */
16769 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16770 {
16771 struct file *fp = filp_open(memdump_path, file_mode, 0664);
16772 /* Check if it is live Brix image having /installmedia, else use /data */
16773 if (IS_ERR(fp)) {
16774 DHD_ERROR(("open file %s, try /data/\n", memdump_path));
16775 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16776 "/data/", fname, memdump_type,
16777 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16778 } else {
16779 filp_close(fp, NULL);
16780 }
16781 }
16782#endif /* CUSTOMER_HW4_DEBUG */
16783
16784 /* print SOCRAM dump file path */
16785 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
16786
16787#ifdef DHD_LOG_DUMP
16788 dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
16789#endif /* DHD_LOG_DUMP */
16790
16791 /* Write file */
16792 ret = write_file(memdump_path, file_mode, buf, size);
16793
16794#ifdef DHD_DUMP_MNGR
16795 if (ret == BCME_OK) {
16796 dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
16797 }
16798#endif /* DHD_DUMP_MNGR */
16799
16800 return ret;
16801}
16802#endif /* DHD_DEBUG */
16803
16804int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
16805{
16806 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16807 unsigned long flags;
16808 int ret = 0;
16809
16810 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16811 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16812 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
16813 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
16814#ifdef CONFIG_HAS_WAKELOCK
16815 if (dhd->wakelock_rx_timeout_enable)
16816 wake_lock_timeout(&dhd->wl_rxwake,
16817 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
16818 if (dhd->wakelock_ctrl_timeout_enable)
16819 wake_lock_timeout(&dhd->wl_ctrlwake,
16820 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
16821#endif // endif
16822 dhd->wakelock_rx_timeout_enable = 0;
16823 dhd->wakelock_ctrl_timeout_enable = 0;
16824 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16825 }
16826 return ret;
16827}
16828
16829int net_os_wake_lock_timeout(struct net_device *dev)
16830{
16831 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16832 int ret = 0;
16833
16834 if (dhd)
16835 ret = dhd_os_wake_lock_timeout(&dhd->pub);
16836 return ret;
16837}
16838
16839int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
16840{
16841 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16842 unsigned long flags;
16843
16844 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16845 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16846 if (val > dhd->wakelock_rx_timeout_enable)
16847 dhd->wakelock_rx_timeout_enable = val;
16848 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16849 }
16850 return 0;
16851}
16852
16853int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
16854{
16855 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16856 unsigned long flags;
16857
16858 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16859 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16860 if (val > dhd->wakelock_ctrl_timeout_enable)
16861 dhd->wakelock_ctrl_timeout_enable = val;
16862 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16863 }
16864 return 0;
16865}
16866
16867int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
16868{
16869 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16870 unsigned long flags;
16871
16872 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16873 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16874 dhd->wakelock_ctrl_timeout_enable = 0;
16875#ifdef CONFIG_HAS_WAKELOCK
16876 if (wake_lock_active(&dhd->wl_ctrlwake))
16877 wake_unlock(&dhd->wl_ctrlwake);
16878#endif // endif
16879 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16880 }
16881 return 0;
16882}
16883
16884int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
16885{
16886 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16887 int ret = 0;
16888
16889 if (dhd)
16890 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
16891 return ret;
16892}
16893
16894int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
16895{
16896 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16897 int ret = 0;
16898
16899 if (dhd)
16900 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
16901 return ret;
16902}
16903
16904#if defined(DHD_TRACE_WAKE_LOCK)
16905#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16906#include <linux/hashtable.h>
16907#else
16908#include <linux/hash.h>
16909#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16910
16911#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16912/* Define 2^5 = 32 bucket size hash table */
16913DEFINE_HASHTABLE(wklock_history, 5);
16914#else
16915/* Define 2^5 = 32 bucket size hash table */
16916struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
16917#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16918
16919atomic_t trace_wklock_onoff;
16920typedef enum dhd_wklock_type {
16921 DHD_WAKE_LOCK,
16922 DHD_WAKE_UNLOCK,
16923 DHD_WAIVE_LOCK,
16924 DHD_RESTORE_LOCK
16925} dhd_wklock_t;
16926
16927struct wk_trace_record {
16928 unsigned long addr; /* Address of the instruction */
16929 dhd_wklock_t lock_type; /* lock_type */
16930 unsigned long long counter; /* counter information */
16931 struct hlist_node wklock_node; /* hash node */
16932};
16933
16934static struct wk_trace_record *find_wklock_entry(unsigned long addr)
16935{
16936 struct wk_trace_record *wklock_info;
16937#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16938 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
16939#else
16940 struct hlist_node *entry;
16941 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
16942 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
16943#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16944 {
16945 if (wklock_info->addr == addr) {
16946 return wklock_info;
16947 }
16948 }
16949 return NULL;
16950}
16951
16952#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16953#define HASH_ADD(hashtable, node, key) \
16954 do { \
16955 hash_add(hashtable, node, key); \
16956 } while (0);
16957#else
16958#define HASH_ADD(hashtable, node, key) \
16959 do { \
16960 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
16961 hlist_add_head(node, &hashtable[index]); \
16962 } while (0);
16963#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
16964
16965#define STORE_WKLOCK_RECORD(wklock_type) \
16966 do { \
16967 struct wk_trace_record *wklock_info = NULL; \
16968 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
16969 wklock_info = find_wklock_entry(func_addr); \
16970 if (wklock_info) { \
16971 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
16972 wklock_info->counter = dhd->wakelock_counter; \
16973 } else { \
16974 wklock_info->counter++; \
16975 } \
16976 } else { \
16977 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
16978 if (!wklock_info) {\
16979 printk("Can't allocate wk_trace_record \n"); \
16980 } else { \
16981 wklock_info->addr = func_addr; \
16982 wklock_info->lock_type = wklock_type; \
16983 if (wklock_type == DHD_WAIVE_LOCK || \
16984 wklock_type == DHD_RESTORE_LOCK) { \
16985 wklock_info->counter = dhd->wakelock_counter; \
16986 } else { \
16987 wklock_info->counter++; \
16988 } \
16989 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16990 } \
16991 } \
16992 } while (0);
16993
16994static inline void dhd_wk_lock_rec_dump(void)
16995{
16996 int bkt;
16997 struct wk_trace_record *wklock_info;
16998
16999#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17000 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
17001#else
17002 struct hlist_node *entry = NULL;
17003 int max_index = ARRAY_SIZE(wklock_history);
17004 for (bkt = 0; bkt < max_index; bkt++)
17005 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
17006#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17007 {
17008 switch (wklock_info->lock_type) {
17009 case DHD_WAKE_LOCK:
17010 printk("wakelock lock : %pS lock_counter : %llu \n",
17011 (void *)wklock_info->addr, wklock_info->counter);
17012 break;
17013 case DHD_WAKE_UNLOCK:
17014 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
17015 (void *)wklock_info->addr, wklock_info->counter);
17016 break;
17017 case DHD_WAIVE_LOCK:
17018 printk("wakelock waive : %pS before_waive : %llu \n",
17019 (void *)wklock_info->addr, wklock_info->counter);
17020 break;
17021 case DHD_RESTORE_LOCK:
17022 printk("wakelock restore : %pS, after_waive : %llu \n",
17023 (void *)wklock_info->addr, wklock_info->counter);
17024 break;
17025 }
17026 }
17027}
17028
17029static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
17030{
17031 unsigned long flags;
17032#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17033 int i;
17034#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17035
17036 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17037#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17038 hash_init(wklock_history);
17039#else
17040 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
17041 INIT_HLIST_HEAD(&wklock_history[i]);
17042#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17043 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17044 atomic_set(&trace_wklock_onoff, 1);
17045}
17046
17047static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
17048{
17049 int bkt;
17050 struct wk_trace_record *wklock_info;
17051 struct hlist_node *tmp;
17052 unsigned long flags;
17053#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17054 struct hlist_node *entry = NULL;
17055 int max_index = ARRAY_SIZE(wklock_history);
17056#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17057
17058 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17059#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17060 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
17061#else
17062 for (bkt = 0; bkt < max_index; bkt++)
17063 hlist_for_each_entry_safe(wklock_info, entry, tmp,
17064 &wklock_history[bkt], wklock_node)
17065#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17066 {
17067#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17068 hash_del(&wklock_info->wklock_node);
17069#else
17070 hlist_del_init(&wklock_info->wklock_node);
17071#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17072 kfree(wklock_info);
17073 }
17074 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17075}
17076
17077void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
17078{
17079 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
17080 unsigned long flags;
17081
17082 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
17083 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17084 dhd_wk_lock_rec_dump();
17085 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17086
17087}
17088#else
17089#define STORE_WKLOCK_RECORD(wklock_type)
17090#endif /* ! DHD_TRACE_WAKE_LOCK */
17091
17092int dhd_os_wake_lock(dhd_pub_t *pub)
17093{
17094 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17095 unsigned long flags;
17096 int ret = 0;
17097
17098 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17099 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17100 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
17101#ifdef CONFIG_HAS_WAKELOCK
17102 wake_lock(&dhd->wl_wifi);
17103#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17104 dhd_bus_dev_pm_stay_awake(pub);
17105#endif // endif
17106 }
17107#ifdef DHD_TRACE_WAKE_LOCK
17108 if (atomic_read(&trace_wklock_onoff)) {
17109 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
17110 }
17111#endif /* DHD_TRACE_WAKE_LOCK */
17112 dhd->wakelock_counter++;
17113 ret = dhd->wakelock_counter;
17114 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17115 }
17116
17117 return ret;
17118}
17119
17120void dhd_event_wake_lock(dhd_pub_t *pub)
17121{
17122 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17123
17124 if (dhd) {
17125#ifdef CONFIG_HAS_WAKELOCK
17126 wake_lock(&dhd->wl_evtwake);
17127#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17128 dhd_bus_dev_pm_stay_awake(pub);
17129#endif // endif
17130 }
17131}
17132
17133void
17134dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
17135{
17136#ifdef CONFIG_HAS_WAKELOCK
17137 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17138
17139 if (dhd) {
17140 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
17141 }
17142#endif /* CONFIG_HAS_WAKE_LOCK */
17143}
17144
17145void
17146dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
17147{
17148#ifdef CONFIG_HAS_WAKELOCK
17149 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17150
17151 if (dhd) {
17152 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
17153 }
17154#endif /* CONFIG_HAS_WAKE_LOCK */
17155}
17156
17157int net_os_wake_lock(struct net_device *dev)
17158{
17159 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17160 int ret = 0;
17161
17162 if (dhd)
17163 ret = dhd_os_wake_lock(&dhd->pub);
17164 return ret;
17165}
17166
17167int dhd_os_wake_unlock(dhd_pub_t *pub)
17168{
17169 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17170 unsigned long flags;
17171 int ret = 0;
17172
17173 dhd_os_wake_lock_timeout(pub);
17174 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17175 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17176
17177 if (dhd->wakelock_counter > 0) {
17178 dhd->wakelock_counter--;
17179#ifdef DHD_TRACE_WAKE_LOCK
17180 if (atomic_read(&trace_wklock_onoff)) {
17181 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
17182 }
17183#endif /* DHD_TRACE_WAKE_LOCK */
17184 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
17185#ifdef CONFIG_HAS_WAKELOCK
17186 wake_unlock(&dhd->wl_wifi);
17187#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17188 dhd_bus_dev_pm_relax(pub);
17189#endif // endif
17190 }
17191 ret = dhd->wakelock_counter;
17192 }
17193 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17194 }
17195 return ret;
17196}
17197
17198void dhd_event_wake_unlock(dhd_pub_t *pub)
17199{
17200 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17201
17202 if (dhd) {
17203#ifdef CONFIG_HAS_WAKELOCK
17204 wake_unlock(&dhd->wl_evtwake);
17205#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17206 dhd_bus_dev_pm_relax(pub);
17207#endif // endif
17208 }
17209}
17210
17211void dhd_pm_wake_unlock(dhd_pub_t *pub)
17212{
17213#ifdef CONFIG_HAS_WAKELOCK
17214 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17215
17216 if (dhd) {
17217 /* if wl_pmwake is active, unlock it */
17218 if (wake_lock_active(&dhd->wl_pmwake)) {
17219 wake_unlock(&dhd->wl_pmwake);
17220 }
17221 }
17222#endif /* CONFIG_HAS_WAKELOCK */
17223}
17224
17225void dhd_txfl_wake_unlock(dhd_pub_t *pub)
17226{
17227#ifdef CONFIG_HAS_WAKELOCK
17228 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17229
17230 if (dhd) {
17231 /* if wl_txflwake is active, unlock it */
17232 if (wake_lock_active(&dhd->wl_txflwake)) {
17233 wake_unlock(&dhd->wl_txflwake);
17234 }
17235 }
17236#endif /* CONFIG_HAS_WAKELOCK */
17237}
17238
17239int dhd_os_check_wakelock(dhd_pub_t *pub)
17240{
17241#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17242 KERNEL_VERSION(2, 6, 36)))
17243 dhd_info_t *dhd;
17244
17245 if (!pub)
17246 return 0;
17247 dhd = (dhd_info_t *)(pub->info);
17248#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17249
17250#ifdef CONFIG_HAS_WAKELOCK
17251 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
17252 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
17253 (wake_lock_active(&dhd->wl_wdwake))))
17254 return 1;
17255#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17256 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
17257 return 1;
17258#endif // endif
17259 return 0;
17260}
17261
17262int
17263dhd_os_check_wakelock_all(dhd_pub_t *pub)
17264{
17265#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17266 KERNEL_VERSION(2, 6, 36)))
17267#if defined(CONFIG_HAS_WAKELOCK)
17268 int l1, l2, l3, l4, l7, l8, l9;
17269 int l5 = 0, l6 = 0;
17270 int c, lock_active;
17271#endif /* CONFIG_HAS_WAKELOCK */
17272 dhd_info_t *dhd;
17273
17274 if (!pub) {
17275 return 0;
17276 }
17277 dhd = (dhd_info_t *)(pub->info);
17278 if (!dhd) {
17279 return 0;
17280 }
17281#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17282
17283#ifdef CONFIG_HAS_WAKELOCK
17284 c = dhd->wakelock_counter;
17285 l1 = wake_lock_active(&dhd->wl_wifi);
17286 l2 = wake_lock_active(&dhd->wl_wdwake);
17287 l3 = wake_lock_active(&dhd->wl_rxwake);
17288 l4 = wake_lock_active(&dhd->wl_ctrlwake);
17289 l7 = wake_lock_active(&dhd->wl_evtwake);
17290#ifdef BCMPCIE_OOB_HOST_WAKE
17291 l5 = wake_lock_active(&dhd->wl_intrwake);
17292#endif /* BCMPCIE_OOB_HOST_WAKE */
17293#ifdef DHD_USE_SCAN_WAKELOCK
17294 l6 = wake_lock_active(&dhd->wl_scanwake);
17295#endif /* DHD_USE_SCAN_WAKELOCK */
17296 l8 = wake_lock_active(&dhd->wl_pmwake);
17297 l9 = wake_lock_active(&dhd->wl_txflwake);
17298 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
17299
17300 /* Indicate to the Host to avoid going to suspend if internal locks are up */
17301 if (lock_active) {
17302 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
17303 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
17304 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
17305 return 1;
17306 }
17307#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17308 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
17309 return 1;
17310 }
17311#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
17312 return 0;
17313}
17314
17315int net_os_wake_unlock(struct net_device *dev)
17316{
17317 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17318 int ret = 0;
17319
17320 if (dhd)
17321 ret = dhd_os_wake_unlock(&dhd->pub);
17322 return ret;
17323}
17324
17325int dhd_os_wd_wake_lock(dhd_pub_t *pub)
17326{
17327 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17328 unsigned long flags;
17329 int ret = 0;
17330
17331 if (dhd) {
17332 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17333 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
17334#ifdef CONFIG_HAS_WAKELOCK
17335 /* if wakelock_wd_counter was never used : lock it at once */
17336 wake_lock(&dhd->wl_wdwake);
17337#endif // endif
17338 }
17339 dhd->wakelock_wd_counter++;
17340 ret = dhd->wakelock_wd_counter;
17341 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17342 }
17343 return ret;
17344}
17345
17346int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
17347{
17348 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17349 unsigned long flags;
17350 int ret = 0;
17351
17352 if (dhd) {
17353 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17354 if (dhd->wakelock_wd_counter > 0) {
17355 dhd->wakelock_wd_counter = 0;
17356 if (!dhd->waive_wakelock) {
17357#ifdef CONFIG_HAS_WAKELOCK
17358 wake_unlock(&dhd->wl_wdwake);
17359#endif // endif
17360 }
17361 }
17362 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17363 }
17364 return ret;
17365}
17366
17367#ifdef BCMPCIE_OOB_HOST_WAKE
17368void
17369dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
17370{
17371#ifdef CONFIG_HAS_WAKELOCK
17372 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17373
17374 if (dhd) {
17375 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
17376 }
17377#endif /* CONFIG_HAS_WAKELOCK */
17378}
17379
17380void
17381dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
17382{
17383#ifdef CONFIG_HAS_WAKELOCK
17384 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17385
17386 if (dhd) {
17387 /* if wl_intrwake is active, unlock it */
17388 if (wake_lock_active(&dhd->wl_intrwake)) {
17389 wake_unlock(&dhd->wl_intrwake);
17390 }
17391 }
17392#endif /* CONFIG_HAS_WAKELOCK */
17393}
17394#endif /* BCMPCIE_OOB_HOST_WAKE */
17395
17396#ifdef DHD_USE_SCAN_WAKELOCK
17397void
17398dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
17399{
17400#ifdef CONFIG_HAS_WAKELOCK
17401 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17402
17403 if (dhd) {
17404 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
17405 }
17406#endif /* CONFIG_HAS_WAKELOCK */
17407}
17408
17409void
17410dhd_os_scan_wake_unlock(dhd_pub_t *pub)
17411{
17412#ifdef CONFIG_HAS_WAKELOCK
17413 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17414
17415 if (dhd) {
17416 /* if wl_scanwake is active, unlock it */
17417 if (wake_lock_active(&dhd->wl_scanwake)) {
17418 wake_unlock(&dhd->wl_scanwake);
17419 }
17420 }
17421#endif /* CONFIG_HAS_WAKELOCK */
17422}
17423#endif /* DHD_USE_SCAN_WAKELOCK */
17424
17425/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
17426 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
17427 */
17428int dhd_os_wake_lock_waive(dhd_pub_t *pub)
17429{
17430 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17431 unsigned long flags;
17432 int ret = 0;
17433
17434 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17435 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17436
17437 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17438 if (dhd->waive_wakelock == FALSE) {
17439#ifdef DHD_TRACE_WAKE_LOCK
17440 if (atomic_read(&trace_wklock_onoff)) {
17441 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
17442 }
17443#endif /* DHD_TRACE_WAKE_LOCK */
17444 /* record current lock status */
17445 dhd->wakelock_before_waive = dhd->wakelock_counter;
17446 dhd->waive_wakelock = TRUE;
17447 }
17448 ret = dhd->wakelock_wd_counter;
17449 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17450 }
17451 return ret;
17452}
17453
17454int dhd_os_wake_lock_restore(dhd_pub_t *pub)
17455{
17456 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17457 unsigned long flags;
17458 int ret = 0;
17459
17460 if (!dhd)
17461 return 0;
17462 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
17463 return 0;
17464
17465 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17466
17467 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17468 if (!dhd->waive_wakelock)
17469 goto exit;
17470
17471 dhd->waive_wakelock = FALSE;
17472 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
17473 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
17474 * the lock in between, do the same by calling wake_unlock or pm_relax
17475 */
17476#ifdef DHD_TRACE_WAKE_LOCK
17477 if (atomic_read(&trace_wklock_onoff)) {
17478 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
17479 }
17480#endif /* DHD_TRACE_WAKE_LOCK */
17481
17482 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
17483#ifdef CONFIG_HAS_WAKELOCK
17484 wake_lock(&dhd->wl_wifi);
17485#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17486 dhd_bus_dev_pm_stay_awake(&dhd->pub);
17487#endif // endif
17488 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
17489#ifdef CONFIG_HAS_WAKELOCK
17490 wake_unlock(&dhd->wl_wifi);
17491#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17492 dhd_bus_dev_pm_relax(&dhd->pub);
17493#endif // endif
17494 }
17495 dhd->wakelock_before_waive = 0;
17496exit:
17497 ret = dhd->wakelock_wd_counter;
17498 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17499 return ret;
17500}
17501
17502void dhd_os_wake_lock_init(struct dhd_info *dhd)
17503{
17504 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
17505 dhd->wakelock_counter = 0;
17506 dhd->wakelock_rx_timeout_enable = 0;
17507 dhd->wakelock_ctrl_timeout_enable = 0;
17508 /* wakelocks prevent a system from going into a low power state */
17509#ifdef CONFIG_HAS_WAKELOCK
17510 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
17511 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
17512 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
17513 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
17514 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
17515 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
17516#ifdef BCMPCIE_OOB_HOST_WAKE
17517 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
17518#endif /* BCMPCIE_OOB_HOST_WAKE */
17519#ifdef DHD_USE_SCAN_WAKELOCK
17520 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
17521#endif /* DHD_USE_SCAN_WAKELOCK */
17522#endif /* CONFIG_HAS_WAKELOCK */
17523#ifdef DHD_TRACE_WAKE_LOCK
17524 dhd_wk_lock_trace_init(dhd);
17525#endif /* DHD_TRACE_WAKE_LOCK */
17526}
17527
17528void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
17529{
17530 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
17531#ifdef CONFIG_HAS_WAKELOCK
17532 dhd->wakelock_counter = 0;
17533 dhd->wakelock_rx_timeout_enable = 0;
17534 dhd->wakelock_ctrl_timeout_enable = 0;
17535 wake_lock_destroy(&dhd->wl_wifi);
17536 wake_lock_destroy(&dhd->wl_rxwake);
17537 wake_lock_destroy(&dhd->wl_ctrlwake);
17538 wake_lock_destroy(&dhd->wl_evtwake);
17539 wake_lock_destroy(&dhd->wl_pmwake);
17540 wake_lock_destroy(&dhd->wl_txflwake);
17541#ifdef BCMPCIE_OOB_HOST_WAKE
17542 wake_lock_destroy(&dhd->wl_intrwake);
17543#endif /* BCMPCIE_OOB_HOST_WAKE */
17544#ifdef DHD_USE_SCAN_WAKELOCK
17545 wake_lock_destroy(&dhd->wl_scanwake);
17546#endif /* DHD_USE_SCAN_WAKELOCK */
17547#ifdef DHD_TRACE_WAKE_LOCK
17548 dhd_wk_lock_trace_deinit(dhd);
17549#endif /* DHD_TRACE_WAKE_LOCK */
17550#endif /* CONFIG_HAS_WAKELOCK */
17551}
17552
17553bool dhd_os_check_if_up(dhd_pub_t *pub)
17554{
17555 if (!pub)
17556 return FALSE;
17557 return pub->up;
17558}
17559
17560#if defined(BCMSDIO) || defined(BCMPCIE)
17561/* function to collect firmware, chip id and chip version info */
17562void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
17563{
17564 int i;
17565
17566 i = snprintf(info_string, sizeof(info_string),
17567 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
17568
17569 if (!dhdp)
17570 return;
17571
17572 i = snprintf(&info_string[i], sizeof(info_string) - i,
17573 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
17574 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
17575}
17576#endif /* BCMSDIO || BCMPCIE */
17577int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
17578{
17579 int ifidx;
17580 int ret = 0;
17581 dhd_info_t *dhd = NULL;
17582
17583 if (!net || !DEV_PRIV(net)) {
17584 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
17585 __FUNCTION__, net, DEV_PRIV(net)));
17586 return -EINVAL;
17587 }
17588
17589 dhd = DHD_DEV_INFO(net);
17590 if (!dhd)
17591 return -EINVAL;
17592
17593 ifidx = dhd_net2idx(dhd, net);
17594 if (ifidx == DHD_BAD_IF) {
17595 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
17596 return -ENODEV;
17597 }
17598
17599 DHD_OS_WAKE_LOCK(&dhd->pub);
17600 DHD_PERIM_LOCK(&dhd->pub);
17601
17602 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
17603 dhd_check_hang(net, &dhd->pub, ret);
17604
17605 DHD_PERIM_UNLOCK(&dhd->pub);
17606 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17607
17608 return ret;
17609}
17610
17611bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
17612{
17613 struct net_device *net;
17614
17615 net = dhd_idx2net(dhdp, ifidx);
17616 if (!net) {
17617 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
17618 return -EINVAL;
17619 }
17620
17621 return dhd_check_hang(net, dhdp, ret);
17622}
17623
17624/* Return instance */
17625int dhd_get_instance(dhd_pub_t *dhdp)
17626{
17627 return dhdp->info->unit;
17628}
17629
17630#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
17631#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
17632int dhd_deepsleep(struct net_device *dev, int flag)
17633{
17634 char iovbuf[20];
17635 uint powervar = 0;
17636 dhd_info_t *dhd;
17637 dhd_pub_t *dhdp;
17638 int cnt = 0;
17639 int ret = 0;
17640
17641 dhd = DHD_DEV_INFO(dev);
17642 dhdp = &dhd->pub;
17643
17644 switch (flag) {
17645 case 1 : /* Deepsleep on */
17646 DHD_ERROR(("[WiFi] Deepsleep On\n"));
17647 /* give some time to sysioc_work before deepsleep */
17648 OSL_SLEEP(200);
17649#ifdef PKT_FILTER_SUPPORT
17650 /* disable pkt filter */
17651 dhd_enable_packet_filter(0, dhdp);
17652#endif /* PKT_FILTER_SUPPORT */
17653 /* Disable MPC */
17654 powervar = 0;
17655 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
17656 0, TRUE);
17657
17658 /* Enable Deepsleep */
17659 powervar = 1;
17660 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
17661 NULL, 0, TRUE);
17662 break;
17663
17664 case 0: /* Deepsleep Off */
17665 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
17666
17667 /* Disable Deepsleep */
17668 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
17669 powervar = 0;
17670 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
17671 sizeof(powervar), NULL, 0, TRUE);
17672
17673 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
17674 sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
17675 if (ret < 0) {
17676 DHD_ERROR(("the error of dhd deepsleep status"
17677 " ret value :%d\n", ret));
17678 } else {
17679 if (!(*(int *)iovbuf)) {
17680 DHD_ERROR(("deepsleep mode is 0,"
17681 " count: %d\n", cnt));
17682 break;
17683 }
17684 }
17685 }
17686
17687 /* Enable MPC */
17688 powervar = 1;
17689 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
17690 0, TRUE);
17691 break;
17692 }
17693
17694 return 0;
17695}
17696#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
17697
17698#ifdef PROP_TXSTATUS
17699
17700void dhd_wlfc_plat_init(void *dhd)
17701{
17702#ifdef USE_DYNAMIC_F2_BLKSIZE
17703 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
17704#endif /* USE_DYNAMIC_F2_BLKSIZE */
17705 return;
17706}
17707
17708void dhd_wlfc_plat_deinit(void *dhd)
17709{
17710#ifdef USE_DYNAMIC_F2_BLKSIZE
17711 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
17712#endif /* USE_DYNAMIC_F2_BLKSIZE */
17713 return;
17714}
17715
17716bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
17717{
17718#ifdef SKIP_WLFC_ON_CONCURRENT
17719
17720#ifdef WL_CFG80211
17721 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
17722 if (net)
17723 /* enable flow control in vsdb mode */
17724 return !(wl_cfg80211_is_concurrent_mode(net));
17725#else
17726 return TRUE; /* skip flow control */
17727#endif /* WL_CFG80211 */
17728
17729#else
17730 return FALSE;
17731#endif /* SKIP_WLFC_ON_CONCURRENT */
17732 return FALSE;
17733}
17734#endif /* PROP_TXSTATUS */
17735
17736#ifdef BCMDBGFS
17737#include <linux/debugfs.h>
17738
17739typedef struct dhd_dbgfs {
17740 struct dentry *debugfs_dir;
17741 struct dentry *debugfs_mem;
17742 dhd_pub_t *dhdp;
17743 uint32 size;
17744} dhd_dbgfs_t;
17745
17746dhd_dbgfs_t g_dbgfs;
17747
17748extern uint32 dhd_readregl(void *bp, uint32 addr);
17749extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
17750
17751static int
17752dhd_dbg_state_open(struct inode *inode, struct file *file)
17753{
17754 file->private_data = inode->i_private;
17755 return 0;
17756}
17757
17758static ssize_t
17759dhd_dbg_state_read(struct file *file, char __user *ubuf,
17760 size_t count, loff_t *ppos)
17761{
17762 ssize_t rval;
17763 uint32 tmp;
17764 loff_t pos = *ppos;
17765 size_t ret;
17766
17767 if (pos < 0)
17768 return -EINVAL;
17769 if (pos >= g_dbgfs.size || !count)
17770 return 0;
17771 if (count > g_dbgfs.size - pos)
17772 count = g_dbgfs.size - pos;
17773
17774 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17775 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
17776
17777 ret = copy_to_user(ubuf, &tmp, 4);
17778 if (ret == count)
17779 return -EFAULT;
17780
17781 count -= ret;
17782 *ppos = pos + count;
17783 rval = count;
17784
17785 return rval;
17786}
17787
17788static ssize_t
17789dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
17790{
17791 loff_t pos = *ppos;
17792 size_t ret;
17793 uint32 buf;
17794
17795 if (pos < 0)
17796 return -EINVAL;
17797 if (pos >= g_dbgfs.size || !count)
17798 return 0;
17799 if (count > g_dbgfs.size - pos)
17800 count = g_dbgfs.size - pos;
17801
17802 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
17803 if (ret == count)
17804 return -EFAULT;
17805
17806 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17807 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
17808
17809 return count;
17810}
17811
17812loff_t
17813dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
17814{
17815 loff_t pos = -1;
17816
17817 switch (whence) {
17818 case 0:
17819 pos = off;
17820 break;
17821 case 1:
17822 pos = file->f_pos + off;
17823 break;
17824 case 2:
17825 pos = g_dbgfs.size - off;
17826 }
17827 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
17828}
17829
17830static const struct file_operations dhd_dbg_state_ops = {
17831 .read = dhd_dbg_state_read,
17832 .write = dhd_debugfs_write,
17833 .open = dhd_dbg_state_open,
17834 .llseek = dhd_debugfs_lseek
17835};
17836
17837static void dhd_dbgfs_create(void)
17838{
17839 if (g_dbgfs.debugfs_dir) {
17840 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
17841 NULL, &dhd_dbg_state_ops);
17842 }
17843}
17844
17845void dhd_dbgfs_init(dhd_pub_t *dhdp)
17846{
17847 g_dbgfs.dhdp = dhdp;
17848 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
17849
17850 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
17851 if (IS_ERR(g_dbgfs.debugfs_dir)) {
17852 g_dbgfs.debugfs_dir = NULL;
17853 return;
17854 }
17855
17856 dhd_dbgfs_create();
17857
17858 return;
17859}
17860
17861void dhd_dbgfs_remove(void)
17862{
17863 debugfs_remove(g_dbgfs.debugfs_mem);
17864 debugfs_remove(g_dbgfs.debugfs_dir);
17865
17866 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
17867}
17868#endif /* BCMDBGFS */
17869
17870#ifdef CUSTOM_SET_CPUCORE
17871void dhd_set_cpucore(dhd_pub_t *dhd, int set)
17872{
17873 int e_dpc = 0, e_rxf = 0, retry_set = 0;
17874
17875 if (!(dhd->chan_isvht80)) {
17876 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
17877 return;
17878 }
17879
17880 if (DPC_CPUCORE) {
17881 do {
17882 if (set == TRUE) {
17883 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17884 cpumask_of(DPC_CPUCORE));
17885 } else {
17886 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17887 cpumask_of(PRIMARY_CPUCORE));
17888 }
17889 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17890 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
17891 return;
17892 }
17893 if (e_dpc < 0)
17894 OSL_SLEEP(1);
17895 } while (e_dpc < 0);
17896 }
17897 if (RXF_CPUCORE) {
17898 do {
17899 if (set == TRUE) {
17900 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17901 cpumask_of(RXF_CPUCORE));
17902 } else {
17903 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17904 cpumask_of(PRIMARY_CPUCORE));
17905 }
17906 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17907 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
17908 return;
17909 }
17910 if (e_rxf < 0)
17911 OSL_SLEEP(1);
17912 } while (e_rxf < 0);
17913 }
17914#ifdef DHD_OF_SUPPORT
17915 interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
17916#endif /* DHD_OF_SUPPORT */
17917 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
17918
17919 return;
17920}
17921#endif /* CUSTOM_SET_CPUCORE */
17922
17923#ifdef DHD_MCAST_REGEN
17924/* Get interface specific ap_isolate configuration */
17925int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
17926{
17927 dhd_info_t *dhd = dhdp->info;
17928 dhd_if_t *ifp;
17929
17930 ASSERT(idx < DHD_MAX_IFS);
17931
17932 ifp = dhd->iflist[idx];
17933
17934 return ifp->mcast_regen_bss_enable;
17935}
17936
17937/* Set interface specific mcast_regen configuration */
17938int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
17939{
17940 dhd_info_t *dhd = dhdp->info;
17941 dhd_if_t *ifp;
17942
17943 ASSERT(idx < DHD_MAX_IFS);
17944
17945 ifp = dhd->iflist[idx];
17946
17947 ifp->mcast_regen_bss_enable = val;
17948
17949 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
17950 * is enabled
17951 */
17952 dhd_update_rx_pkt_chainable_state(dhdp, idx);
17953 return BCME_OK;
17954}
17955#endif /* DHD_MCAST_REGEN */
17956
17957/* Get interface specific ap_isolate configuration */
17958int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
17959{
17960 dhd_info_t *dhd = dhdp->info;
17961 dhd_if_t *ifp;
17962
17963 ASSERT(idx < DHD_MAX_IFS);
17964
17965 ifp = dhd->iflist[idx];
17966
17967 return ifp->ap_isolate;
17968}
17969
17970/* Set interface specific ap_isolate configuration */
17971int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
17972{
17973 dhd_info_t *dhd = dhdp->info;
17974 dhd_if_t *ifp;
17975
17976 ASSERT(idx < DHD_MAX_IFS);
17977
17978 ifp = dhd->iflist[idx];
17979
17980 if (ifp)
17981 ifp->ap_isolate = val;
17982
17983 return 0;
17984}
17985
17986#ifdef DHD_FW_COREDUMP
17987void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
17988{
17989 unsigned long flags = 0;
17990 dhd_dump_t *dump = NULL;
17991 dhd_info_t *dhd_info = NULL;
17992 dhd_info = (dhd_info_t *)dhdp->info;
17993 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
17994 if (dump == NULL) {
17995 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
17996 return;
17997 }
17998 dump->buf = buf;
17999 dump->bufsize = size;
18000#ifdef DHD_LOG_DUMP
18001 dhd_print_buf_addr(dhdp, "memdump", buf, size);
18002#endif /* DHD_LOG_DUMP */
18003
18004 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
18005 BUG_ON(1);
18006 }
18007
18008#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM)
18009 if (
18010#if defined(DEBUG_DNGL_INIT_FAIL)
18011 (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
18012#endif /* DEBUG_DNGL_INIT_FAIL */
18013#ifdef DHD_ERPOM
18014 (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
18015#endif /* DHD_ERPOM */
18016 FALSE)
18017 {
18018#ifdef DHD_LOG_DUMP
18019 log_dump_type_t *flush_type = NULL;
18020#endif // endif
18021 dhd_info->scheduled_memdump = FALSE;
18022 dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
18023 /* for dongle init fail cases, 'dhd_mem_dump' does
18024 * not call 'dhd_log_dump', so call it here.
18025 */
18026#ifdef DHD_LOG_DUMP
18027 flush_type = MALLOCZ(dhdp->osh,
18028 sizeof(log_dump_type_t));
18029 if (flush_type) {
18030 *flush_type = DLD_BUF_TYPE_ALL;
18031 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
18032 dhd_log_dump(dhdp->info, flush_type, 0);
18033 }
18034#endif /* DHD_LOG_DUMP */
18035 return;
18036 }
18037#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */
18038
18039 dhd_info->scheduled_memdump = TRUE;
18040 /* bus busy bit for mem dump will be cleared in mem dump
18041 * work item context, after mem dump file is written
18042 */
18043 DHD_GENERAL_LOCK(dhdp, flags);
18044 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
18045 DHD_GENERAL_UNLOCK(dhdp, flags);
18046 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
18047 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
18048 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18049}
18050static void
18051dhd_mem_dump(void *handle, void *event_info, u8 event)
18052{
18053 dhd_info_t *dhd = handle;
18054 dhd_pub_t *dhdp = NULL;
18055 dhd_dump_t *dump = event_info;
18056 unsigned long flags = 0;
18057
18058 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18059
18060 if (!dhd) {
18061 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
18062 return;
18063 }
18064
18065 dhdp = &dhd->pub;
18066
18067 DHD_GENERAL_LOCK(dhdp, flags);
18068 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18069 DHD_GENERAL_UNLOCK(dhdp, flags);
18070 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
18071 goto exit;
18072 }
18073 DHD_GENERAL_UNLOCK(dhdp, flags);
18074
18075#ifdef D2H_MINIDUMP
18076 /* dump minidump */
18077 if (dhd_bus_is_minidump_enabled(dhdp)) {
18078 dhd_d2h_minidump(&dhd->pub);
18079 } else {
18080 DHD_ERROR(("minidump is not enabled\n"));
18081 }
18082#endif /* D2H_MINIDUMP */
18083
18084 if (!dump) {
18085 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
18086 goto exit;
18087 }
18088
18089 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
18090 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
18091#ifdef DHD_DEBUG_UART
18092 dhd->pub.memdump_success = FALSE;
18093#endif /* DHD_DEBUG_UART */
18094 }
18095
18096 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
18097 * context, no need to schedule another work queue for log dump. In case of
18098 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
18099 * cfg layer is itself scheduling the log_dump work queue.
18100 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
18101 * collect debug_dump as it may be called from non-sleepable context.
18102 */
18103#ifdef DHD_LOG_DUMP
18104 if (dhd->scheduled_memdump &&
18105 dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
18106 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
18107 sizeof(log_dump_type_t));
18108 if (flush_type) {
18109 *flush_type = DLD_BUF_TYPE_ALL;
18110 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
18111 dhd_log_dump(dhd, flush_type, 0);
18112 }
18113 }
18114#endif /* DHD_LOG_DUMP */
18115
18116#ifdef DHD_PKT_LOGGING
18117 copy_debug_dump_time(dhdp->debug_dump_time_pktlog_str, dhdp->debug_dump_time_str);
18118#endif /* DHD_PKT_LOGGING */
18119 clear_debug_dump_time(dhdp->debug_dump_time_str);
18120
18121 /* before calling bug on, wait for other logs to be dumped.
18122 * we cannot wait in case dhd_mem_dump is called directly
18123 * as it may not be in a sleepable context
18124 */
18125 if (dhd->scheduled_memdump) {
18126 uint bitmask = 0;
18127 int timeleft = 0;
18128#ifdef DHD_SSSR_DUMP
18129 bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
18130#endif // endif
18131 if (bitmask != 0) {
18132 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
18133 &dhdp->dhd_bus_busy_state, bitmask, 0);
18134 if ((timeleft == 0) || (timeleft == 1)) {
18135 DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n",
18136 __FUNCTION__, dhdp->dhd_bus_busy_state));
18137 }
18138 }
18139 }
18140
18141 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
18142#ifdef DHD_LOG_DUMP
18143 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
18144#endif /* DHD_LOG_DUMP */
18145 dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
18146#ifdef DHD_DEBUG_UART
18147 dhd->pub.memdump_success == TRUE &&
18148#endif /* DHD_DEBUG_UART */
18149#ifdef DNGL_EVENT_SUPPORT
18150 dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
18151#endif /* DNGL_EVENT_SUPPORT */
18152 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
18153
18154#ifdef SHOW_LOGTRACE
18155 /* Wait till event_log_dispatcher_work finishes */
18156 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
18157#endif /* SHOW_LOGTRACE */
18158
18159 BUG_ON(1);
18160 }
18161
18162exit:
18163 if (dump)
18164 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
18165 DHD_GENERAL_LOCK(dhdp, flags);
18166 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
18167 dhd_os_busbusy_wake(dhdp);
18168 DHD_GENERAL_UNLOCK(dhdp, flags);
18169 dhd->scheduled_memdump = FALSE;
18170}
18171#endif /* DHD_FW_COREDUMP */
18172
18173#ifdef D2H_MINIDUMP
18174void
18175dhd_d2h_minidump(dhd_pub_t *dhdp)
18176{
18177 char d2h_minidump[128];
18178 dhd_dma_buf_t *minidump_buf;
18179
18180 minidump_buf = dhd_prot_get_minidump_buf(dhdp);
18181 if (minidump_buf->va == NULL) {
18182 DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__));
18183 return;
18184 }
18185
18186 /* Init file name */
18187 memset(d2h_minidump, 0, sizeof(d2h_minidump));
18188 snprintf(d2h_minidump, sizeof(d2h_minidump), "%s", "d2h_minidump");
18189
18190 if (write_dump_to_file(dhdp, (uint8 *)minidump_buf->va,
18191 BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN, d2h_minidump)) {
18192 DHD_ERROR(("%s: failed to dump d2h_minidump to file\n",
18193 __FUNCTION__));
18194 }
18195}
18196#endif /* D2H_MINIDUMP */
18197
18198#ifdef DHD_SSSR_DUMP
18199
18200static void
18201dhd_sssr_dump(void *handle, void *event_info, u8 event)
18202{
18203 dhd_info_t *dhd = handle;
18204 dhd_pub_t *dhdp;
18205 int i;
18206 char before_sr_dump[128];
18207 char after_sr_dump[128];
18208 unsigned long flags = 0;
18209 uint dig_buf_size = 0;
18210
18211 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18212
18213 if (!dhd) {
18214 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
18215 return;
18216 }
18217
18218 dhdp = &dhd->pub;
18219
18220 DHD_GENERAL_LOCK(dhdp, flags);
18221 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18222 DHD_GENERAL_UNLOCK(dhdp, flags);
18223 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
18224 goto exit;
18225 }
18226 DHD_GENERAL_UNLOCK(dhdp, flags);
18227
18228 for (i = 0; i < MAX_NUM_D11CORES; i++) {
18229 /* Init file name */
18230 memset(before_sr_dump, 0, sizeof(before_sr_dump));
18231 memset(after_sr_dump, 0, sizeof(after_sr_dump));
18232
18233 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
18234 "sssr_core", i, "before_SR");
18235 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
18236 "sssr_core", i, "after_SR");
18237
18238 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
18239 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
18240 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
18241 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
18242 __FUNCTION__));
18243 }
18244 }
18245 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
18246 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
18247 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
18248 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
18249 __FUNCTION__));
18250 }
18251 }
18252 }
18253
18254 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
18255 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
18256 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
18257 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
18258 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
18259 }
18260
18261 if (dhdp->sssr_dig_buf_before) {
18262 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
18263 dig_buf_size, "sssr_dig_before_SR")) {
18264 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
18265 __FUNCTION__));
18266 }
18267 }
18268
18269 if (dhdp->sssr_dig_buf_after) {
18270 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
18271 dig_buf_size, "sssr_dig_after_SR")) {
18272 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
18273 __FUNCTION__));
18274 }
18275 }
18276
18277exit:
18278 DHD_GENERAL_LOCK(dhdp, flags);
18279 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
18280 dhd_os_busbusy_wake(dhdp);
18281 DHD_GENERAL_UNLOCK(dhdp, flags);
18282}
18283
18284void
18285dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
18286{
18287 unsigned long flags = 0;
18288
18289 /* bus busy bit for sssr dump will be cleared in sssr dump
18290 * work item context, after sssr dump files are created
18291 */
18292 DHD_GENERAL_LOCK(dhdp, flags);
18293 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
18294 DHD_GENERAL_UNLOCK(dhdp, flags);
18295
18296 if (dhdp->info->no_wq_sssrdump) {
18297 dhd_sssr_dump(dhdp->info, 0, 0);
18298 return;
18299 }
18300
18301 DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__));
18302 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
18303 DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18304}
18305#endif /* DHD_SSSR_DUMP */
18306
18307#ifdef DHD_LOG_DUMP
18308static void
18309dhd_log_dump(void *handle, void *event_info, u8 event)
18310{
18311 dhd_info_t *dhd = handle;
18312 log_dump_type_t *type = (log_dump_type_t *)event_info;
18313
18314 if (!dhd || !type) {
18315 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
18316 return;
18317 }
18318
18319#ifdef WL_CFG80211
18320 /* flush the fw side logs */
18321 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
18322 FW_LOGSET_MASK_ALL);
18323#endif // endif
18324 /* there are currently 3 possible contexts from which
18325 * log dump can be scheduled -
18326 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
18327 * 3.HEALTH CHECK event
18328 * The concise debug info buffer is a shared resource
18329 * and in case a trap is one of the contexts then both the
18330 * scheduled work queues need to run because trap data is
18331 * essential for debugging. Hence a mutex lock is acquired
18332 * before calling do_dhd_log_dump().
18333 */
18334 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
18335 dhd_os_logdump_lock(&dhd->pub);
18336 DHD_OS_WAKE_LOCK(&dhd->pub);
18337 if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
18338 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
18339 }
18340 DHD_OS_WAKE_UNLOCK(&dhd->pub);
18341 dhd_os_logdump_unlock(&dhd->pub);
18342}
18343
18344void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
18345{
18346 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
18347 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
18348 type, DHD_WQ_WORK_DHD_LOG_DUMP,
18349 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18350}
18351
18352static void
18353dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
18354{
18355 if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
18356 (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)) {
18357#if defined(CONFIG_ARM64)
18358 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
18359 name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
18360#elif defined(__ARM_ARCH_7A__)
18361 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
18362 name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
18363#endif /* __ARM_ARCH_7A__ */
18364 }
18365}
18366
18367static void
18368dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
18369{
18370 int i;
18371 unsigned long wr_size = 0;
18372 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
18373 size_t log_size = 0;
18374 char buf_name[DHD_PRINT_BUF_NAME_LEN];
18375 dhd_dbg_ring_t *ring = NULL;
18376
18377 BCM_REFERENCE(ring);
18378
18379 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18380 dld_buf = &g_dld_buf[i];
18381 log_size = (unsigned long)dld_buf->max -
18382 (unsigned long)dld_buf->buffer;
18383 if (dld_buf->wraparound) {
18384 wr_size = log_size;
18385 } else {
18386 wr_size = (unsigned long)dld_buf->present -
18387 (unsigned long)dld_buf->front;
18388 }
18389 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
18390 dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
18391 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
18392 dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
18393 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
18394 dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
18395 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
18396 dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
18397 }
18398
18399#ifdef DEBUGABILITY_ECNTRS_LOGGING
18400 /* periodic flushing of ecounters is NOT supported */
18401 if (*type == DLD_BUF_TYPE_ALL &&
18402 logdump_ecntr_enable &&
18403 dhdp->ecntr_dbg_ring) {
18404
18405 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
18406 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
18407 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
18408 LOG_DUMP_ECNTRS_MAX_BUFSIZE);
18409 }
18410#endif /* DEBUGABILITY_ECNTRS_LOGGING */
18411
18412#ifdef BCMPCIE
18413 if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
18414 dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
18415 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
18416 }
18417#endif /* BCMPCIE */
18418
18419#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18420 /* if health check event was received */
18421 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18422 dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
18423 HEALTH_CHK_BUF_SIZE);
18424 }
18425#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18426
18427 /* append the concise debug information */
18428 if (dhdp->concise_dbg_buf) {
18429 dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
18430 CONCISE_DUMP_BUFLEN);
18431 }
18432}
18433
18434#ifdef CUSTOMER_HW4_DEBUG
18435static void
18436dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
18437{
18438 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
18439 char *end = NULL;
18440 unsigned long plen = 0;
18441
18442 if (!bufptr || !len)
18443 return;
18444
18445 memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
18446 end = bufptr + len;
18447 while (bufptr < end) {
18448 if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
18449 memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
18450 tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
18451 printf("%s", tmp_buf);
18452 bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
18453 } else {
18454 plen = (unsigned long)end - (unsigned long)bufptr;
18455 memcpy(tmp_buf, bufptr, plen);
18456 tmp_buf[plen] = '\0';
18457 printf("%s", tmp_buf);
18458 bufptr += plen;
18459 }
18460 }
18461}
18462
18463static void
18464dhd_log_dump_print_tail(dhd_pub_t *dhdp,
18465 struct dhd_log_dump_buf *dld_buf,
18466 uint tail_len)
18467{
18468 char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
18469 unsigned long len_flush1 = 0, len_flush2 = 0;
18470 unsigned long flags = 0;
18471
18472 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18473 spin_lock_irqsave(&dld_buf->lock, flags);
18474 flush_ptr1 = dld_buf->present - tail_len;
18475 if (flush_ptr1 >= dld_buf->front) {
18476 /* tail content is within the buffer */
18477 flush_ptr2 = NULL;
18478 len_flush1 = tail_len;
18479 } else if (dld_buf->wraparound) {
18480 /* tail content spans the buffer length i.e, wrap around */
18481 flush_ptr1 = dld_buf->front;
18482 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
18483 len_flush2 = (unsigned long)tail_len - len_flush1;
18484 flush_ptr2 = (char *)((unsigned long)dld_buf->max -
18485 (unsigned long)len_flush2);
18486 } else {
18487 /* amt of logs in buffer is less than tail size */
18488 flush_ptr1 = dld_buf->front;
18489 flush_ptr2 = NULL;
18490 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
18491 }
18492 spin_unlock_irqrestore(&dld_buf->lock, flags);
18493
18494 printf("\n================= LOG_DUMP tail =================\n");
18495 if (flush_ptr2) {
18496 dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
18497 }
18498 dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
18499 printf("\n===================================================\n");
18500}
18501#endif /* CUSTOMER_HW4_DEBUG */
18502
18503/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
18504static int
18505do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
18506{
18507 int ret = 0, i = 0;
18508 struct file *fp = NULL;
18509 mm_segment_t old_fs;
18510 loff_t pos = 0;
18511 unsigned int wr_size = 0;
18512 char dump_path[128];
18513 uint32 file_mode;
18514 unsigned long flags = 0;
18515 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
18516 size_t log_size = 0;
18517 size_t fspace_remain = 0;
18518 struct kstat stat;
18519 char time_str[128];
18520 char *ts = NULL;
18521 uint32 remain_len = 0;
18522 log_dump_section_hdr_t sec_hdr;
18523 dhd_info_t *dhd_info = NULL;
18524
18525 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18526
18527 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
18528 * so not freeing 'type' here is ok, even if we want to free 'type'
18529 * we cannot do so, since 'dhdp->osh' is unavailable
18530 * as dhdp is null
18531 */
18532 if (!dhdp || !type) {
18533 if (dhdp) {
18534 DHD_GENERAL_LOCK(dhdp, flags);
18535 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18536 dhd_os_busbusy_wake(dhdp);
18537 DHD_GENERAL_UNLOCK(dhdp, flags);
18538 }
18539 return BCME_ERROR;
18540 }
18541
18542 DHD_GENERAL_LOCK(dhdp, flags);
18543 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18544 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18545 dhd_os_busbusy_wake(dhdp);
18546 DHD_GENERAL_UNLOCK(dhdp, flags);
18547 MFREE(dhdp->osh, type, sizeof(*type));
18548 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
18549 return BCME_ERROR;
18550 }
18551 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
18552 DHD_GENERAL_UNLOCK(dhdp, flags);
18553
18554 dhd_info = (dhd_info_t *)dhdp->info;
18555 BCM_REFERENCE(dhd_info);
18556
18557 /* in case of trap get preserve logs from ETD */
18558#if defined(BCMPCIE) && defined(DEBUGABILITY_ETD_PRSRV_LOGS)
18559 if (dhdp->dongle_trap_occured &&
18560 dhdp->extended_trap_data) {
18561 dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
18562 &dhd_info->event_data);
18563 }
18564#endif /* BCMPCIE */
18565
18566#ifdef SHOW_LOGTRACE
18567 /* flush the event work items to get any fw events/logs
18568 * flush_work is a blocking call
18569 */
18570 flush_delayed_work(&dhd_info->event_log_dispatcher_work);
18571#endif /* SHOW_LOGTRACE */
18572
18573#ifdef CUSTOMER_HW4_DEBUG
18574 /* print last 'x' KB of preserve buffer data to kmsg console
18575 * this is to address cases where debug_dump is not
18576 * available for debugging
18577 */
18578 dhd_log_dump_print_tail(dhdp,
18579 &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
18580#endif /* CUSTOMER_HW4_DEBUG */
18581
18582 /* change to KERNEL_DS address limit */
18583 old_fs = get_fs();
18584 set_fs(KERNEL_DS);
18585
18586 /* Init file name */
18587 memset(dump_path, 0, sizeof(dump_path));
18588 switch (dhdp->debug_dump_subcmd) {
18589 case CMD_UNWANTED:
18590 snprintf(dump_path, sizeof(dump_path), "%s",
18591 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18592 DHD_DUMP_SUBSTR_UNWANTED);
18593 break;
18594 case CMD_DISCONNECTED:
18595 snprintf(dump_path, sizeof(dump_path), "%s",
18596 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18597 DHD_DUMP_SUBSTR_DISCONNECTED);
18598 break;
18599 default:
18600 snprintf(dump_path, sizeof(dump_path), "%s",
18601 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
18602 }
18603
18604 if (!dhdp->logdump_periodic_flush) {
18605 get_debug_dump_time(dhdp->debug_dump_time_str);
18606 snprintf(dump_path + strlen(dump_path),
18607 sizeof(dump_path) - strlen(dump_path),
18608 "_%s", dhdp->debug_dump_time_str);
18609 }
18610
18611 memset(time_str, 0, sizeof(time_str));
18612 ts = dhd_log_dump_get_timestamp();
18613 snprintf(time_str, sizeof(time_str),
18614 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
18615
18616 DHD_ERROR(("DHD version: %s\n", dhd_version));
18617 DHD_ERROR(("F/W version: %s\n", fw_version));
18618 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18619
18620 dhd_log_dump_buf_addr(dhdp, type);
18621
18622 /* if this is the first time after dhd is loaded,
18623 * or, if periodic flush is disabled, clear the log file
18624 */
18625 if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
18626 file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
18627 else
18628 file_mode = O_CREAT | O_RDWR | O_SYNC;
18629
18630 fp = filp_open(dump_path, file_mode, 0664);
18631 if (IS_ERR(fp)) {
18632 /* If android installed image, try '/data' directory */
18633#if defined(CONFIG_X86)
18634 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
18635 __FUNCTION__));
18636 snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
18637 if (!dhdp->logdump_periodic_flush) {
18638 snprintf(dump_path + strlen(dump_path),
18639 sizeof(dump_path) - strlen(dump_path),
18640 "_%s", dhdp->debug_dump_time_str);
18641 }
18642 fp = filp_open(dump_path, file_mode, 0664);
18643 if (IS_ERR(fp)) {
18644 ret = PTR_ERR(fp);
18645 DHD_ERROR(("open file error, err = %d\n", ret));
18646 goto exit;
18647 }
18648 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18649#else
18650 ret = PTR_ERR(fp);
18651 DHD_ERROR(("open file error, err = %d\n", ret));
18652 goto exit;
18653#endif /* CONFIG_X86 && OEM_ANDROID */
18654 }
18655
18656 ret = vfs_stat(dump_path, &stat);
18657 if (ret < 0) {
18658 DHD_ERROR(("file stat error, err = %d\n", ret));
18659 goto exit;
18660 }
18661
18662 /* if some one else has changed the file */
18663 if (dhdp->last_file_posn != 0 &&
18664 stat.size < dhdp->last_file_posn) {
18665 dhdp->last_file_posn = 0;
18666 }
18667
18668 if (dhdp->logdump_periodic_flush) {
18669 log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
18670 /* calculate the amount of space required to dump all logs */
18671 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
18672 if (*type != DLD_BUF_TYPE_ALL && i != *type)
18673 continue;
18674
18675 if (g_dld_buf[i].wraparound) {
18676 log_size += (unsigned long)g_dld_buf[i].max
18677 - (unsigned long)g_dld_buf[i].buffer;
18678 } else {
18679 spin_lock_irqsave(&g_dld_buf[i].lock, flags);
18680 log_size += (unsigned long)g_dld_buf[i].present -
18681 (unsigned long)g_dld_buf[i].front;
18682 spin_unlock_irqrestore(&g_dld_buf[i].lock, flags);
18683 }
18684 log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
18685
18686 if (*type != DLD_BUF_TYPE_ALL && i == *type)
18687 break;
18688 }
18689
18690 ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
18691 if (ret < 0) {
18692 DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
18693 goto exit;
18694 }
18695 pos = fp->f_pos;
18696
18697 /* if the max file size is reached, wrap around to beginning of the file
18698 * we're treating the file as a large ring buffer
18699 */
18700 fspace_remain = logdump_max_filesize - pos;
18701 if (log_size > fspace_remain) {
18702 fp->f_pos -= pos;
18703 pos = fp->f_pos;
18704 }
18705 }
18706 /* write the timestamp hdr to the file first */
18707 ret = vfs_write(fp, time_str, strlen(time_str), &pos);
18708 if (ret < 0) {
18709 DHD_ERROR(("write file error, err = %d\n", ret));
18710 goto exit;
18711 }
18712
18713 /* prep the section header */
18714 memset(&sec_hdr, 0, sizeof(sec_hdr));
18715 sec_hdr.magic = LOG_DUMP_MAGIC;
18716 sec_hdr.timestamp = local_clock();
18717
18718 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
18719 unsigned int buf_size = 0;
18720
18721 if (*type != DLD_BUF_TYPE_ALL && i != *type)
18722 continue;
18723
18724 /* calculate the length of the log */
18725 dld_buf = &g_dld_buf[i];
18726 buf_size = (unsigned long)dld_buf->max -
18727 (unsigned long)dld_buf->buffer;
18728 if (dld_buf->wraparound) {
18729 wr_size = buf_size;
18730 } else {
18731 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18732 spin_lock_irqsave(&dld_buf->lock, flags);
18733 wr_size = (unsigned long)dld_buf->present -
18734 (unsigned long)dld_buf->front;
18735 spin_unlock_irqrestore(&dld_buf->lock, flags);
18736 }
18737
18738 /* write the section header first */
18739 sec_hdr.type = dld_hdrs[i].sec_type;
18740 sec_hdr.length = wr_size;
18741 vfs_write(fp, dld_hdrs[i].hdr_str, strlen(dld_hdrs[i].hdr_str), &pos);
18742 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18743 /* write the log */
18744 ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
18745 if (ret < 0) {
18746 DHD_ERROR(("write file error, err = %d\n", ret));
18747 goto exit;
18748 }
18749
18750 /* re-init dhd_log_dump_buf structure */
18751 spin_lock_irqsave(&dld_buf->lock, flags);
18752 dld_buf->wraparound = 0;
18753 dld_buf->present = dld_buf->front;
18754 dld_buf->remain = buf_size;
18755 bzero(dld_buf->buffer, buf_size);
18756 spin_unlock_irqrestore(&dld_buf->lock, flags);
18757
18758 if (*type != DLD_BUF_TYPE_ALL)
18759 break;
18760 }
18761
18762#ifdef DEBUGABILITY_ECNTRS_LOGGING
18763 /* periodic flushing of ecounters is NOT supported */
18764 if (*type == DLD_BUF_TYPE_ALL &&
18765 logdump_ecntr_enable &&
18766 dhdp->ecntr_dbg_ring) {
18767 dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
18768 fp, (unsigned long *)&pos, &sec_hdr);
18769 }
18770#endif /* DEBUGABILITY_ECNTRS_LOGGING */
18771
18772#ifdef BCMPCIE
18773 /* append extended trap data to the file in case of traps */
18774 if (dhdp->dongle_trap_occured &&
18775 dhdp->extended_trap_data) {
18776 /* write the section header first */
18777 vfs_write(fp, EXT_TRAP_LOG_HDR, strlen(EXT_TRAP_LOG_HDR), &pos);
18778 sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
18779 sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
18780 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18781 /* write the log */
18782 ret = vfs_write(fp, (char *)dhdp->extended_trap_data,
18783 BCMPCIE_EXT_TRAP_DATA_MAXLEN, &pos);
18784 if (ret < 0) {
18785 DHD_ERROR(("write file error of ext trap info,"
18786 " err = %d\n", ret));
18787 goto exit;
18788 }
18789 }
18790#endif /* BCMPCIE */
18791
18792#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18793 /* if health check event was received, dump to file */
18794 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18795 /* write the section header first */
18796 vfs_write(fp, HEALTH_CHK_LOG_HDR, strlen(HEALTH_CHK_LOG_HDR), &pos);
18797 sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
18798 sec_hdr.length = HEALTH_CHK_BUF_SIZE;
18799 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18800 /* write the log */
18801 ret = vfs_write(fp, (char *)dhdp->health_chk_event_data,
18802 HEALTH_CHK_BUF_SIZE, &pos);
18803 if (ret < 0) {
18804 DHD_ERROR(("write file error of health chk info,"
18805 " err = %d\n", ret));
18806 goto exit;
18807 }
18808 }
18809#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18810
18811#ifdef DHD_DUMP_PCIE_RINGS
18812 /* write the section header first */
18813 vfs_write(fp, FLOWRING_DUMP_HDR, strlen(FLOWRING_DUMP_HDR), &pos);
18814 /* Write the ring summary */
18815 ret = vfs_write(fp, dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN - remain_len, &pos);
18816 if (ret < 0) {
18817 DHD_ERROR(("write file error of concise debug info,"
18818 " err = %d\n", ret));
18819 goto exit;
18820 }
18821 sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
18822 sec_hdr.length = ((H2DRING_TXPOST_ITEMSIZE
18823 * H2DRING_TXPOST_MAX_ITEM)
18824 + (D2HRING_TXCMPLT_ITEMSIZE
18825 * D2HRING_TXCMPLT_MAX_ITEM)
18826 + (H2DRING_RXPOST_ITEMSIZE
18827 * H2DRING_RXPOST_MAX_ITEM)
18828 + (D2HRING_RXCMPLT_ITEMSIZE
18829 * D2HRING_RXCMPLT_MAX_ITEM)
18830 + (H2DRING_CTRL_SUB_ITEMSIZE
18831 * H2DRING_CTRL_SUB_MAX_ITEM)
18832 + (D2HRING_CTRL_CMPLT_ITEMSIZE
18833 * D2HRING_CTRL_CMPLT_MAX_ITEM)
18834 + (H2DRING_INFO_BUFPOST_ITEMSIZE
18835 * H2DRING_DYNAMIC_INFO_MAX_ITEM)
18836 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE
18837 * D2HRING_DYNAMIC_INFO_MAX_ITEM));
18838 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18839 /* write the log */
18840 ret = dhd_d2h_h2d_ring_dump(dhdp, fp, (unsigned long *)&pos);
18841 if (ret < 0) {
18842 DHD_ERROR(("%s: error dumping ring data!\n",
18843 __FUNCTION__));
18844 goto exit;
18845 }
18846#endif /* DHD_DUMP_PCIE_RINGS */
18847
18848 /* append the concise debug information to the file.
18849 * This is the information which is seen
18850 * when a 'dhd dump' iovar is fired
18851 */
18852 if (dhdp->concise_dbg_buf) {
18853 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18854 if (remain_len <= 0) {
18855 DHD_ERROR(("%s: error getting concise debug info !\n",
18856 __FUNCTION__));
18857 goto exit;
18858 } else {
18859 /* write the section header first */
18860 vfs_write(fp, DHD_DUMP_LOG_HDR, strlen(DHD_DUMP_LOG_HDR), &pos);
18861 sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
18862 sec_hdr.length = CONCISE_DUMP_BUFLEN - remain_len;
18863 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18864 /* write the log */
18865 ret = vfs_write(fp, dhdp->concise_dbg_buf,
18866 CONCISE_DUMP_BUFLEN - remain_len, &pos);
18867 if (ret < 0) {
18868 DHD_ERROR(("write file error of concise debug info,"
18869 " err = %d\n", ret));
18870 goto exit;
18871 }
18872 }
18873 }
18874
18875 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18876 ret = dhd_log_dump_cookie_to_file(dhdp, fp, (unsigned long *)&pos);
18877 if (ret < 0) {
18878 DHD_ERROR(("write file error of cooke info, err = %d\n", ret));
18879 goto exit;
18880 }
18881 }
18882
18883 if (dhdp->logdump_periodic_flush) {
18884 /* store the last position written to in the file for future use */
18885 dhdp->last_file_posn = pos;
18886 }
18887
18888exit:
18889 MFREE(dhdp->osh, type, sizeof(*type));
18890 if (!IS_ERR(fp) && fp != NULL) {
18891 filp_close(fp, NULL);
18892 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
18893 __FUNCTION__, dump_path));
18894 }
18895 set_fs(old_fs);
18896 DHD_GENERAL_LOCK(dhdp, flags);
18897 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18898 dhd_os_busbusy_wake(dhdp);
18899 DHD_GENERAL_UNLOCK(dhdp, flags);
18900
18901#ifdef DHD_DUMP_MNGR
18902 if (ret >= 0) {
18903 dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
18904 }
18905#endif /* DHD_DUMP_MNGR */
18906
18907 return (ret < 0) ? BCME_ERROR : BCME_OK;
18908}
18909#endif /* DHD_LOG_DUMP */
18910
18911/*
18912 * This call is to get the memdump size so that,
18913 * halutil can alloc that much buffer in user space.
18914 */
18915int
18916dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
18917{
18918 int ret = BCME_OK;
18919 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18920 dhd_pub_t *dhdp = &dhd->pub;
18921
18922 if (dhdp->busstate == DHD_BUS_DOWN) {
18923 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
18924 return BCME_ERROR;
18925 }
18926
18927 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
18928 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18929 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
18930 return BCME_ERROR;
18931 }
18932#ifdef DHD_PCIE_RUNTIMEPM
18933 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
18934#endif /* DHD_PCIE_RUNTIMEPM */
18935 ret = dhd_common_socram_dump(dhdp);
18936 if (ret == BCME_OK) {
18937 *dump_size = dhdp->soc_ram_length;
18938 }
18939 return ret;
18940}
18941
18942/*
18943 * This is to get the actual memdup after getting the memdump size
18944 */
18945int
18946dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
18947{
18948 int ret = BCME_OK;
18949 int orig_len = 0;
18950 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18951 dhd_pub_t *dhdp = &dhd->pub;
18952 if (buf == NULL)
18953 return BCME_ERROR;
18954 orig_len = *size;
18955 if (dhdp->soc_ram) {
18956 if (orig_len >= dhdp->soc_ram_length) {
18957 memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
18958 /* reset the storage of dump */
18959 memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
18960 *size = dhdp->soc_ram_length;
18961 } else {
18962 ret = BCME_BUFTOOSHORT;
18963 DHD_ERROR(("The length of the buffer is too short"
18964 " to save the memory dump with %d\n", dhdp->soc_ram_length));
18965 }
18966 } else {
18967 DHD_ERROR(("socram_dump is not ready to get\n"));
18968 ret = BCME_NOTREADY;
18969 }
18970 return ret;
18971}
18972
18973int
18974dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
18975{
18976 char *fw_str;
18977
18978 if (size == 0)
18979 return BCME_BADARG;
18980
18981 fw_str = strstr(info_string, "Firmware: ");
18982 if (fw_str == NULL) {
18983 return BCME_ERROR;
18984 }
18985
18986 memset(*buf, 0, size);
18987 if (dhd_ver) {
18988 strncpy(*buf, dhd_version, size - 1);
18989 } else {
18990 strncpy(*buf, fw_str, size - 1);
18991 }
18992 return BCME_OK;
18993}
18994
18995bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
18996{
18997 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
18998}
18999
19000#ifdef DHD_L2_FILTER
19001arp_table_t*
19002dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
19003{
19004 dhd_info_t *dhd = dhdp->info;
19005 dhd_if_t *ifp;
19006
19007 ASSERT(bssidx < DHD_MAX_IFS);
19008
19009 ifp = dhd->iflist[bssidx];
19010 return ifp->phnd_arp_table;
19011}
19012
19013int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
19014{
19015 dhd_info_t *dhd = dhdp->info;
19016 dhd_if_t *ifp;
19017
19018 ASSERT(idx < DHD_MAX_IFS);
19019
19020 ifp = dhd->iflist[idx];
19021
19022 if (ifp)
19023 return ifp->parp_enable;
19024 else
19025 return FALSE;
19026}
19027
19028/* Set interface specific proxy arp configuration */
19029int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
19030{
19031 dhd_info_t *dhd = dhdp->info;
19032 dhd_if_t *ifp;
19033 ASSERT(idx < DHD_MAX_IFS);
19034 ifp = dhd->iflist[idx];
19035
19036 if (!ifp)
19037 return BCME_ERROR;
19038
19039 /* At present all 3 variables are being
19040 * handled at once
19041 */
19042 ifp->parp_enable = val;
19043 ifp->parp_discard = val;
19044 ifp->parp_allnode = val;
19045
19046 /* Flush ARP entries when disabled */
19047 if (val == FALSE) {
19048 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
19049 FALSE, dhdp->tickcnt);
19050 }
19051 return BCME_OK;
19052}
19053
19054bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
19055{
19056 dhd_info_t *dhd = dhdp->info;
19057 dhd_if_t *ifp;
19058
19059 ASSERT(idx < DHD_MAX_IFS);
19060
19061 ifp = dhd->iflist[idx];
19062
19063 ASSERT(ifp);
19064 return ifp->parp_discard;
19065}
19066
19067bool
19068dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
19069{
19070 dhd_info_t *dhd = dhdp->info;
19071 dhd_if_t *ifp;
19072
19073 ASSERT(idx < DHD_MAX_IFS);
19074
19075 ifp = dhd->iflist[idx];
19076
19077 ASSERT(ifp);
19078
19079 return ifp->parp_allnode;
19080}
19081
19082int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
19083{
19084 dhd_info_t *dhd = dhdp->info;
19085 dhd_if_t *ifp;
19086
19087 ASSERT(idx < DHD_MAX_IFS);
19088
19089 ifp = dhd->iflist[idx];
19090
19091 ASSERT(ifp);
19092
19093 return ifp->dhcp_unicast;
19094}
19095
19096int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
19097{
19098 dhd_info_t *dhd = dhdp->info;
19099 dhd_if_t *ifp;
19100 ASSERT(idx < DHD_MAX_IFS);
19101 ifp = dhd->iflist[idx];
19102
19103 ASSERT(ifp);
19104
19105 ifp->dhcp_unicast = val;
19106 return BCME_OK;
19107}
19108
19109int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
19110{
19111 dhd_info_t *dhd = dhdp->info;
19112 dhd_if_t *ifp;
19113
19114 ASSERT(idx < DHD_MAX_IFS);
19115
19116 ifp = dhd->iflist[idx];
19117
19118 ASSERT(ifp);
19119
19120 return ifp->block_ping;
19121}
19122
19123int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
19124{
19125 dhd_info_t *dhd = dhdp->info;
19126 dhd_if_t *ifp;
19127 ASSERT(idx < DHD_MAX_IFS);
19128 ifp = dhd->iflist[idx];
19129
19130 ASSERT(ifp);
19131
19132 ifp->block_ping = val;
19133 /* Disable rx_pkt_chain feature for interface if block_ping option is
19134 * enabled
19135 */
19136 dhd_update_rx_pkt_chainable_state(dhdp, idx);
19137 return BCME_OK;
19138}
19139
19140int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
19141{
19142 dhd_info_t *dhd = dhdp->info;
19143 dhd_if_t *ifp;
19144
19145 ASSERT(idx < DHD_MAX_IFS);
19146
19147 ifp = dhd->iflist[idx];
19148
19149 ASSERT(ifp);
19150
19151 return ifp->grat_arp;
19152}
19153
19154int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
19155{
19156 dhd_info_t *dhd = dhdp->info;
19157 dhd_if_t *ifp;
19158 ASSERT(idx < DHD_MAX_IFS);
19159 ifp = dhd->iflist[idx];
19160
19161 ASSERT(ifp);
19162
19163 ifp->grat_arp = val;
19164
19165 return BCME_OK;
19166}
19167
19168int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
19169{
19170 dhd_info_t *dhd = dhdp->info;
19171 dhd_if_t *ifp;
19172
19173 ASSERT(idx < DHD_MAX_IFS);
19174
19175 ifp = dhd->iflist[idx];
19176
19177 ASSERT(ifp);
19178
19179 return ifp->block_tdls;
19180}
19181
19182int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
19183{
19184 dhd_info_t *dhd = dhdp->info;
19185 dhd_if_t *ifp;
19186 ASSERT(idx < DHD_MAX_IFS);
19187 ifp = dhd->iflist[idx];
19188
19189 ASSERT(ifp);
19190
19191 ifp->block_tdls = val;
19192
19193 return BCME_OK;
19194}
19195#endif /* DHD_L2_FILTER */
19196
19197#if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
19198int dhd_rps_cpus_enable(struct net_device *net, int enable)
19199{
19200 dhd_info_t *dhd = DHD_DEV_INFO(net);
19201 dhd_if_t *ifp;
19202 int ifidx;
19203 char * RPS_CPU_SETBUF;
19204
19205 ifidx = dhd_net2idx(dhd, net);
19206 if (ifidx == DHD_BAD_IF) {
19207 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
19208 return -ENODEV;
19209 }
19210
19211 if (ifidx == PRIMARY_INF) {
19212 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
19213 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
19214 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
19215 } else {
19216 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
19217 RPS_CPU_SETBUF = RPS_CPUS_MASK;
19218 }
19219 } else if (ifidx == VIRTUAL_INF) {
19220 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
19221 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
19222 } else {
19223 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
19224 return -EINVAL;
19225 }
19226
19227 ifp = dhd->iflist[ifidx];
19228 if (ifp) {
19229 if (enable) {
19230 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
19231 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
19232 } else {
19233 custom_rps_map_clear(ifp->net->_rx);
19234 }
19235 } else {
19236 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
19237 return -ENODEV;
19238 }
19239 return BCME_OK;
19240}
19241
19242int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
19243{
19244 struct rps_map *old_map, *map;
19245 cpumask_var_t mask;
19246 int err, cpu, i;
19247 static DEFINE_SPINLOCK(rps_map_lock);
19248
19249 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19250
19251 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
19252 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
19253 return -ENOMEM;
19254 }
19255
19256 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
19257 if (err) {
19258 free_cpumask_var(mask);
19259 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
19260 return err;
19261 }
19262
19263 map = kzalloc(max_t(unsigned int,
19264 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
19265 GFP_KERNEL);
19266 if (!map) {
19267 free_cpumask_var(mask);
19268 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
19269 return -ENOMEM;
19270 }
19271
19272 i = 0;
19273 for_each_cpu(cpu, mask) {
19274 map->cpus[i++] = cpu;
19275 }
19276
19277 if (i) {
19278 map->len = i;
19279 } else {
19280 kfree(map);
19281 map = NULL;
19282 free_cpumask_var(mask);
19283 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
19284 return -1;
19285 }
19286
19287 spin_lock(&rps_map_lock);
19288 old_map = rcu_dereference_protected(queue->rps_map,
19289 lockdep_is_held(&rps_map_lock));
19290 rcu_assign_pointer(queue->rps_map, map);
19291 spin_unlock(&rps_map_lock);
19292
19293 if (map) {
19294 static_key_slow_inc(&rps_needed);
19295 }
19296 if (old_map) {
19297 kfree_rcu(old_map, rcu);
19298 static_key_slow_dec(&rps_needed);
19299 }
19300 free_cpumask_var(mask);
19301
19302 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
19303 return map->len;
19304}
19305
19306void custom_rps_map_clear(struct netdev_rx_queue *queue)
19307{
19308 struct rps_map *map;
19309
19310 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19311
19312 map = rcu_dereference_protected(queue->rps_map, 1);
19313 if (map) {
19314 RCU_INIT_POINTER(queue->rps_map, NULL);
19315 kfree_rcu(map, rcu);
19316 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
19317 }
19318}
19319#endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
19320
19321#if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
19322 defined(ARGOS_NOTIFY_CB)
19323
19324static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19325 unsigned long speed, void *v);
19326static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19327 unsigned long speed, void *v);
19328#if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19329static int argos_status_notifier_config_mumimo_cb(struct notifier_block *notifier,
19330 unsigned long speed, void *v);
19331#endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19332
19333#ifdef DYNAMIC_MUMIMO_CONTROL
19334#define MUMIMO_CONTROL_TIMER_INTERVAL_MS 5000
19335
19336void
19337argos_config_mumimo_timer(unsigned long data)
19338{
19339 argos_mumimo_ctrl *ctrl_data = (argos_mumimo_ctrl *)data;
19340
19341 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
19342 schedule_work(&ctrl_data->mumimo_ctrl_work);
19343}
19344
19345void
19346argos_config_mumimo_handler(struct work_struct *work)
19347{
19348 argos_mumimo_ctrl *ctrl_data;
19349 struct net_device *dev;
19350 int err;
19351 int new_cap;
19352
19353 ctrl_data = container_of(work, argos_mumimo_ctrl, mumimo_ctrl_work);
19354
19355 dev = ctrl_data->dev;
19356
19357 if (!dev) {
19358 return;
19359 }
19360
19361 new_cap = ctrl_data->cur_murx_bfe_cap;
19362 err = wl_set_murx_bfe_cap(dev, new_cap, TRUE);
19363 if (err) {
19364 DHD_ERROR(("%s: Failed to set murx_bfe_cap to %d, err=%d\n",
19365 __FUNCTION__, new_cap, err));
19366 } else {
19367 DHD_ERROR(("%s: Newly configured murx_bfe_cap = %d\n",
19368 __FUNCTION__, new_cap));
19369 }
19370}
19371
19372void
19373argos_status_notifier_config_mumimo(struct notifier_block *notifier,
19374 unsigned long speed, void *v)
19375{
19376 struct net_device *dev;
19377 int prev_murx_bfe_cap;
19378 int cap;
19379 dhd_info_t *dhd;
19380
19381 dev = argos_mumimo_ctrl_data.dev;
19382 if (!dev) {
19383 return;
19384 }
19385
19386 dhd = DHD_DEV_INFO(dev);
19387 if (!dhd) {
19388 return;
19389 }
19390
19391 /* Check if STA reassociate with the AP after murx configuration */
19392 if (dhd->pub.reassoc_mumimo_sw) {
19393 /* Cancel the MU-MIMO control timer */
19394 if (timer_pending(&argos_mumimo_ctrl_data.config_timer)) {
19395 del_timer_sync(&argos_mumimo_ctrl_data.config_timer);
19396 }
19397
19398 DHD_ERROR(("%s: Reassociation is in progress...\n", __FUNCTION__));
19399 return;
19400 }
19401
19402 /* Check if current associated AP supports MU-MIMO capability
19403 * or current Tput meets the condition for MU-MIMO configuration
19404 */
19405 if ((wl_check_bss_support_mumimo(dev) <= 0) ||
19406 ((speed < MUMIMO_TO_SUMIMO_TPUT_THRESHOLD) &&
19407 (speed >= SUMIMO_TO_MUMIMO_TPUT_THRESHOLD))) {
19408 return;
19409 }
19410
19411 prev_murx_bfe_cap = argos_mumimo_ctrl_data.cur_murx_bfe_cap;
19412
19413 /* Check the TPut condition */
19414 if (speed >= MUMIMO_TO_SUMIMO_TPUT_THRESHOLD) {
19415 cap = 0;
19416 } else {
19417 cap = 1;
19418 }
19419
19420 if (prev_murx_bfe_cap != cap) {
19421 /* Cancel the MU-MIMO control timer */
19422 if (timer_pending(&argos_mumimo_ctrl_data.config_timer)) {
19423 del_timer_sync(&argos_mumimo_ctrl_data.config_timer);
19424 }
19425
19426 /* Update the new value */
19427 argos_mumimo_ctrl_data.cur_murx_bfe_cap = cap;
19428
19429 /* Arm the MU-MIMO control timer */
19430 mod_timer(&argos_mumimo_ctrl_data.config_timer,
19431 jiffies + msecs_to_jiffies(MUMIMO_CONTROL_TIMER_INTERVAL_MS));
19432
19433 DHD_ERROR(("%s: Arm the MU-MIMO control timer, cur_murx_bfe_cap=%d\n",
19434 __FUNCTION__, cap));
19435 }
19436}
19437
19438void
19439argos_config_mumimo_init(struct net_device *dev)
19440{
19441 init_timer(&argos_mumimo_ctrl_data.config_timer);
19442 argos_mumimo_ctrl_data.config_timer.data = (unsigned long)&argos_mumimo_ctrl_data;
19443 argos_mumimo_ctrl_data.config_timer.function = argos_config_mumimo_timer;
19444 argos_mumimo_ctrl_data.dev = dev;
19445 INIT_WORK(&argos_mumimo_ctrl_data.mumimo_ctrl_work, argos_config_mumimo_handler);
19446 argos_mumimo_ctrl_data.cur_murx_bfe_cap = -1;
19447}
19448
19449void
19450argos_config_mumimo_deinit(void)
19451{
19452 argos_mumimo_ctrl_data.dev = NULL;
19453 if (timer_pending(&argos_mumimo_ctrl_data.config_timer)) {
19454 del_timer_sync(&argos_mumimo_ctrl_data.config_timer);
19455 }
19456
19457 cancel_work_sync(&argos_mumimo_ctrl_data.mumimo_ctrl_work);
19458}
19459
19460void
19461argos_config_mumimo_reset(void)
19462{
19463 argos_mumimo_ctrl_data.cur_murx_bfe_cap = -1;
19464}
19465#endif /* DYNAMIC_MUMIMO_CONTROL */
19466
19467int
19468argos_register_notifier_init(struct net_device *net)
19469{
19470 int ret = 0;
19471
19472 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19473 argos_rps_ctrl_data.wlan_primary_netdev = net;
19474 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19475#ifdef DYNAMIC_MUMIMO_CONTROL
19476 argos_config_mumimo_init(net);
19477#endif /* DYNAMIC_MUMIMO_CONTROL */
19478
19479 if (argos_wifi.notifier_call == NULL) {
19480 argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
19481 ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19482 if (ret < 0) {
19483 DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
19484 goto exit;
19485 }
19486 }
19487
19488#if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19489 if (argos_mimo.notifier_call == NULL) {
19490 argos_mimo.notifier_call = argos_status_notifier_config_mumimo_cb;
19491 ret = sec_argos_register_notifier(&argos_mimo, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL);
19492 if (ret < 0) {
19493 DHD_ERROR(("DHD:Failed to register WIFI for MIMO notifier, ret=%d\n", ret));
19494 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19495 goto exit;
19496 }
19497 }
19498#endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19499
19500 if (argos_p2p.notifier_call == NULL) {
19501 argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
19502 ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19503 if (ret < 0) {
19504 DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
19505 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19506#if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19507 sec_argos_unregister_notifier(&argos_mimo, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL);
19508#endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19509 goto exit;
19510 }
19511 }
19512
19513 return 0;
19514
19515exit:
19516 if (argos_wifi.notifier_call) {
19517 argos_wifi.notifier_call = NULL;
19518 }
19519
19520#if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19521 if (argos_mimo.notifier_call) {
19522 argos_mimo.notifier_call = NULL;
19523 }
19524#endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19525
19526 if (argos_p2p.notifier_call) {
19527 argos_p2p.notifier_call = NULL;
19528 }
19529
19530 return ret;
19531}
19532
19533int
19534argos_register_notifier_deinit(void)
19535{
19536 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19537
19538 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19539 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
19540 return -1;
19541 }
19542
19543#ifdef DYNAMIC_MUMIMO_CONTROL
19544 argos_config_mumimo_deinit();
19545#endif /* DYNAMIC_MUMIMO_CONTROL */
19546
19547#if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19548 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19549#endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19550
19551 if (argos_p2p.notifier_call) {
19552 sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19553 argos_p2p.notifier_call = NULL;
19554 }
19555
19556#if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19557 if (argos_mimo.notifier_call) {
19558 sec_argos_unregister_notifier(&argos_mimo, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL);
19559 argos_mimo.notifier_call = NULL;
19560 }
19561#endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19562
19563 if (argos_wifi.notifier_call) {
19564 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19565 argos_wifi.notifier_call = NULL;
19566 }
19567
19568 argos_rps_ctrl_data.wlan_primary_netdev = NULL;
19569 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19570
19571 return 0;
19572}
19573
19574int
19575argos_status_notifier_cb(struct notifier_block *notifier,
19576 unsigned long speed, void *v)
19577{
19578 dhd_info_t *dhd;
19579 dhd_pub_t *dhdp;
19580
19581 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19582
19583 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19584 goto exit;
19585 }
19586
19587 dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
19588 if (dhd == NULL) {
19589 goto exit;
19590 }
19591
19592 dhdp = &dhd->pub;
19593 if (dhdp == NULL || !dhdp->up) {
19594 goto exit;
19595 }
19596 /* Check if reported TPut value is more than threshold value */
19597 if (speed > RPS_TPUT_THRESHOLD) {
19598 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
19599 /* It does not need to configre rps_cpus
19600 * if Load Balance is enabled
19601 */
19602#if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19603 int err = 0;
19604
19605 if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
19606 err = custom_rps_map_set(
19607 argos_rps_ctrl_data.wlan_primary_netdev->_rx,
19608 RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
19609 } else {
19610 DHD_ERROR(("DHD: %s: RPS_Set fail,"
19611 " Core=%d Offline\n", __FUNCTION__,
19612 RPS_CPUS_WLAN_CORE_ID));
19613 err = -1;
19614 }
19615
19616 if (err < 0) {
19617 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
19618 "speed=%ld, error=%d\n",
19619 __FUNCTION__, speed, err));
19620 } else {
19621#endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19622#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19623 if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
19624 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
19625 __FUNCTION__, TCPACK_SUP_HOLD));
19626 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
19627 }
19628#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19629 argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
19630#if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19631 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
19632 __FUNCTION__, speed));
19633 }
19634#endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19635 }
19636 } else {
19637 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
19638#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19639 if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
19640 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
19641 __FUNCTION__));
19642 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
19643 }
19644#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19645#if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19646 /* It does not need to configre rps_cpus
19647 * if Load Balance is enabled
19648 */
19649 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19650 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
19651 OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
19652#endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19653 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19654 }
19655 }
19656
19657exit:
19658 return NOTIFY_OK;
19659}
19660
19661int
19662argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19663 unsigned long speed, void *v)
19664{
19665 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19666 argos_status_notifier_cb(notifier, speed, v);
19667#if !defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19668 argos_status_notifier_config_mumimo(notifier, speed, v);
19669#endif /* !CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19670
19671 return NOTIFY_OK;
19672}
19673
19674#if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19675int
19676argos_status_notifier_config_mumimo_cb(struct notifier_block *notifier,
19677 unsigned long speed, void *v)
19678{
19679 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19680 argos_status_notifier_config_mumimo(notifier, speed, v);
19681
19682 return NOTIFY_OK;
19683}
19684#endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19685
19686int
19687argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19688 unsigned long speed, void *v)
19689{
19690 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19691 argos_status_notifier_cb(notifier, speed, v);
19692
19693 return NOTIFY_OK;
19694}
19695#endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
19696
19697#ifdef DHD_DEBUG_PAGEALLOC
19698
19699void
19700dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
19701{
19702 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
19703
19704 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
19705 __FUNCTION__, addr_corrupt, (uint32)len));
19706
19707 DHD_OS_WAKE_LOCK(dhdp);
19708 prhex("Page Corruption:", addr_corrupt, len);
19709 dhd_dump_to_kernelog(dhdp);
19710#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
19711 /* Load the dongle side dump to host memory and then BUG_ON() */
19712 dhdp->memdump_enabled = DUMP_MEMONLY;
19713 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
19714 dhd_bus_mem_dump(dhdp);
19715#endif /* BCMPCIE && DHD_FW_COREDUMP */
19716 DHD_OS_WAKE_UNLOCK(dhdp);
19717}
19718EXPORT_SYMBOL(dhd_page_corrupt_cb);
19719#endif /* DHD_DEBUG_PAGEALLOC */
19720
19721#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
19722void
19723dhd_pktid_error_handler(dhd_pub_t *dhdp)
19724{
19725 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
19726 DHD_OS_WAKE_LOCK(dhdp);
19727 dhd_dump_to_kernelog(dhdp);
19728#ifdef DHD_FW_COREDUMP
19729 /* Load the dongle side dump to host memory */
19730 if (dhdp->memdump_enabled == DUMP_DISABLED) {
19731 dhdp->memdump_enabled = DUMP_MEMFILE;
19732 }
19733 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
19734 dhd_bus_mem_dump(dhdp);
19735#endif /* DHD_FW_COREDUMP */
19736 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
19737 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
19738 DHD_OS_WAKE_UNLOCK(dhdp);
19739}
19740#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
19741
19742struct net_device *
19743dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
19744{
19745 dhd_info_t *dhd = dhdp->info;
19746
19747 if (dhd->iflist[0] && dhd->iflist[0]->net)
19748 return dhd->iflist[0]->net;
19749 else
19750 return NULL;
19751}
19752
19753#ifdef DHD_DHCP_DUMP
19754static void
19755dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
19756{
19757 struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
19758 struct iphdr *h = &b->ip_header;
19759 uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
19760 int dhcp_type = 0, len, opt_len;
19761
19762 /* check IP header */
19763 if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
19764 return;
19765 }
19766
19767 /* check UDP port for bootp (67, 68) */
19768 if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
19769 b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
19770 return;
19771 }
19772
19773 /* check header length */
19774 if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
19775 return;
19776 }
19777
19778 len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
19779 opt_len = len
19780 - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
19781
19782 /* parse bootp options */
19783 if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
19784 ptr = &b->options[4];
19785 while (ptr < end && *ptr != 0xff) {
19786 opt = ptr++;
19787 if (*opt == 0) {
19788 continue;
19789 }
19790 ptr += *ptr + 1;
19791 if (ptr >= end) {
19792 break;
19793 }
19794 /* 53 is dhcp type */
19795 if (*opt == 53) {
19796 if (opt[1]) {
19797 dhcp_type = opt[2];
19798 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
19799 ifname, dhcp_types[dhcp_type],
19800 tx ? "TX" : "RX", dhcp_ops[b->op]));
19801 break;
19802 }
19803 }
19804 }
19805 }
19806}
19807#endif /* DHD_DHCP_DUMP */
19808
19809#ifdef DHD_ICMP_DUMP
19810static void
19811dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
19812{
19813 uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
19814 struct iphdr *iph = (struct iphdr *)pkt;
19815 struct icmphdr *icmph;
19816
19817 /* check IP header */
19818 if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
19819 return;
19820 }
19821
19822 icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
19823 if (icmph->type == ICMP_ECHO) {
19824 DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
19825 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
19826 } else if (icmph->type == ICMP_ECHOREPLY) {
19827 DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
19828 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
19829 } else {
19830 DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
19831 ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
19832 }
19833}
19834#endif /* DHD_ICMP_DUMP */
19835
19836#ifdef SHOW_LOGTRACE
19837void
19838dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
19839{
19840 dhd_dbg_ring_status_t ring_status;
19841 uint32 rlen = 0;
19842#if defined(DEBUGABILITY)
19843 rlen = dhd_dbg_pull_single_from_ring(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
19844 TRACE_LOG_BUF_MAX_SIZE, TRUE);
19845#elif defined(DEBUGABILITY_ECNTRS_LOGGING)
19846 rlen = dhd_dbg_ring_pull_single(dhd_pub->ecntr_dbg_ring, trace_buf_info->buf,
19847 TRACE_LOG_BUF_MAX_SIZE, TRUE);
19848#else
19849 ASSERT(0);
19850#endif /* DEBUGABILITY */
19851
19852 trace_buf_info->size = rlen;
19853 trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
19854 if (rlen == 0) {
19855 trace_buf_info->availability = BUF_NOT_AVAILABLE;
19856 return;
19857 }
19858 dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
19859 if (ring_status.written_bytes != ring_status.read_bytes) {
19860 trace_buf_info->availability = NEXT_BUF_AVAIL;
19861 }
19862}
19863#endif /* SHOW_LOGTRACE */
19864
19865bool
19866dhd_fw_download_status(dhd_pub_t * dhd_pub)
19867{
19868 return dhd_pub->fw_download_done;
19869}
19870
19871int
19872dhd_create_to_notifier_skt(void)
19873{
19874#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
19875 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
19876 /* Kernel version 3.6 is a special case which accepts 4 arguments */
19877 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
19878#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
19879 /* Kernel version 3.5 and below use this old API format */
19880 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
19881 dhd_process_daemon_msg, NULL, THIS_MODULE);
19882#else
19883 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
19884 &dhd_netlink_cfg);
19885#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
19886 if (!nl_to_event_sk)
19887 {
19888 printf("Error creating socket.\n");
19889 return -1;
19890 }
19891 DHD_INFO(("nl_to socket created successfully...\n"));
19892 return 0;
19893}
19894
19895void
19896dhd_destroy_to_notifier_skt(void)
19897{
19898 DHD_INFO(("Destroying nl_to socket\n"));
19899 netlink_kernel_release(nl_to_event_sk);
19900}
19901
19902static void
19903dhd_recv_msg_from_daemon(struct sk_buff *skb)
19904{
19905 struct nlmsghdr *nlh;
19906 bcm_to_info_t *cmd;
19907
19908 nlh = (struct nlmsghdr *)skb->data;
19909 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
19910 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
19911 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
19912 DHD_INFO(("DHD Daemon Started\n"));
19913 }
19914}
19915
19916int
19917dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
19918{
19919 struct nlmsghdr *nlh;
19920 struct sk_buff *skb_out;
19921
19922 BCM_REFERENCE(skb);
19923 if (sender_pid == 0) {
19924 DHD_INFO(("Invalid PID 0\n"));
19925 return -1;
19926 }
19927
19928 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
19929 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
19930 return -1;
19931 }
19932 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
19933 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
19934 memcpy(nlmsg_data(nlh), (char *)data, size);
19935
19936 if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
19937 DHD_INFO(("Error sending message\n"));
19938 }
19939 return 0;
19940}
19941
19942static void
19943dhd_process_daemon_msg(struct sk_buff *skb)
19944{
19945 bcm_to_info_t to_info;
19946
19947 to_info.magic = BCM_TO_MAGIC;
19948 to_info.reason = REASON_DAEMON_STARTED;
19949 to_info.trap = NO_TRAP;
19950
19951 dhd_recv_msg_from_daemon(skb);
19952 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
19953}
19954
19955#ifdef DHD_LOG_DUMP
19956bool
19957dhd_log_dump_ecntr_enabled(void)
19958{
19959 return (bool)logdump_ecntr_enable;
19960}
19961
19962void
19963dhd_log_dump_init(dhd_pub_t *dhd)
19964{
19965 struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
19966 int i = 0;
19967 uint8 *prealloc_buf = NULL, *bufptr = NULL;
19968#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
19969 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
19970#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
19971 int ret;
19972 dhd_dbg_ring_t *ring = NULL;
19973 unsigned long flags = 0;
19974 dhd_info_t *dhd_info = dhd->info;
19975 void *cookie_buf = NULL;
19976
19977 BCM_REFERENCE(ret);
19978 BCM_REFERENCE(ring);
19979 BCM_REFERENCE(flags);
19980
19981 /* sanity check */
19982 if (logdump_prsrv_tailsize <= 0 ||
19983 logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
19984 logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
19985 }
19986 /* now adjust the preserve log flush size based on the
19987 * kernel printk log buffer size
19988 */
19989#ifdef CONFIG_LOG_BUF_SHIFT
19990 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
19991 " limit prsrv tail size to = %uKB\n",
19992 __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
19993 logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
19994
19995 if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
19996 logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
19997 }
19998#else
19999 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
20000 __FUNCTION__, logdump_prsrv_tailsize/1024);
20001#endif /* CONFIG_LOG_BUF_SHIFT */
20002
20003 mutex_init(&dhd_info->logdump_lock);
20004
20005 /* initialize log dump buf structures */
20006 memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
20007
20008 /* set the log dump buffer size based on the module_param */
20009 if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
20010 logdump_max_bufsize <= 0)
20011 dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
20012 else
20013 dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
20014
20015 /* pre-alloc the memory for the log buffers & 'special' buffer */
20016 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
20017#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20018 DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
20019 __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE));
20020 prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
20021 dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
20022 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20023#else
20024 prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
20025 dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20026#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20027 if (!prealloc_buf) {
20028 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
20029 goto fail;
20030 }
20031 if (!dld_buf_special->buffer) {
20032 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
20033 goto fail;
20034 }
20035
20036 bufptr = prealloc_buf;
20037 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20038 dld_buf = &g_dld_buf[i];
20039 dld_buf->dhd_pub = dhd;
20040 spin_lock_init(&dld_buf->lock);
20041 dld_buf->wraparound = 0;
20042 if (i != DLD_BUF_TYPE_SPECIAL) {
20043 dld_buf->buffer = bufptr;
20044 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20045 bufptr = (uint8 *)dld_buf->max;
20046 } else {
20047 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20048 }
20049 dld_buf->present = dld_buf->front = dld_buf->buffer;
20050 dld_buf->remain = dld_buf_size[i];
20051 dld_buf->enable = 1;
20052 }
20053
20054#ifdef DEBUGABILITY_ECNTRS_LOGGING
20055 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20056 dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
20057 if (!dhd->ecntr_dbg_ring)
20058 goto fail;
20059
20060 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20061 ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
20062 ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
20063 bufptr);
20064 if (ret != BCME_OK) {
20065 DHD_ERROR(("%s: unable to init ecntr ring !\n",
20066 __FUNCTION__));
20067 goto fail;
20068 }
20069 DHD_DBG_RING_LOCK(ring->lock, flags);
20070 ring->state = RING_ACTIVE;
20071 ring->threshold = 0;
20072 DHD_DBG_RING_UNLOCK(ring->lock, flags);
20073
20074 bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
20075#endif /* DEBUGABILITY_ECNTRS_LOGGING */
20076
20077 /* Concise buffer is used as intermediate buffer for following purposes
20078 * a) pull ecounters records temporarily before
20079 * writing it to file
20080 * b) to store dhd dump data before putting it to file
20081 * It should have a size equal to
20082 * MAX(largest possible ecntr record, 'dhd dump' data size)
20083 */
20084 dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
20085 if (!dhd->concise_dbg_buf) {
20086 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
20087 __FUNCTION__));
20088 goto fail;
20089 }
20090
20091#if defined(DHD_EVENT_LOG_FILTER)
20092 ret = dhd_event_log_filter_init(dhd,
20093 bufptr,
20094 LOG_DUMP_FILTER_MAX_BUFSIZE);
20095 if (ret != BCME_OK) {
20096 goto fail;
20097 }
20098#endif /* DHD_EVENT_LOG_FILTER */
20099
20100 cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
20101 if (!cookie_buf) {
20102 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
20103 __FUNCTION__));
20104 goto fail;
20105 }
20106 ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20107 if (ret != BCME_OK) {
20108 MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20109 goto fail;
20110 }
20111 return;
20112
20113fail:
20114
20115 if (dhd->logdump_cookie) {
20116 dhd_logdump_cookie_deinit(dhd);
20117 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20118 dhd->logdump_cookie = NULL;
20119 }
20120#if defined(DHD_EVENT_LOG_FILTER)
20121 if (dhd->event_log_filter) {
20122 dhd_event_log_filter_deinit(dhd);
20123 }
20124#endif /* DHD_EVENT_LOG_FILTER */
20125
20126 if (dhd->concise_dbg_buf) {
20127 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20128 }
20129
20130#ifdef DEBUGABILITY_ECNTRS_LOGGING
20131 if (dhd->ecntr_dbg_ring) {
20132 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20133 dhd_dbg_ring_deinit(dhd, ring);
20134 ring->ring_buf = NULL;
20135 ring->ring_size = 0;
20136 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20137 dhd->ecntr_dbg_ring = NULL;
20138 }
20139#endif /* DEBUGABILITY_ECNTRS_LOGGING */
20140
20141#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20142 if (prealloc_buf) {
20143 DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20144 }
20145 if (dld_buf_special->buffer) {
20146 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20147 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20148 }
20149#else
20150 if (prealloc_buf) {
20151 MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20152 }
20153 if (dld_buf_special->buffer) {
20154 MFREE(dhd->osh, dld_buf_special->buffer,
20155 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20156 }
20157#endif /* CONFIG_DHD_USE_STATIC_BUF */
20158 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20159 dld_buf = &g_dld_buf[i];
20160 dld_buf->enable = 0;
20161 dld_buf->buffer = NULL;
20162 }
20163
20164 mutex_destroy(&dhd_info->logdump_lock);
20165}
20166
20167void
20168dhd_log_dump_deinit(dhd_pub_t *dhd)
20169{
20170 struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
20171 int i = 0;
20172 dhd_info_t *dhd_info = dhd->info;
20173 dhd_dbg_ring_t *ring = NULL;
20174
20175 BCM_REFERENCE(ring);
20176
20177 if (dhd->concise_dbg_buf) {
20178 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20179 dhd->concise_dbg_buf = NULL;
20180 }
20181
20182 if (dhd->logdump_cookie) {
20183 dhd_logdump_cookie_deinit(dhd);
20184 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20185 dhd->logdump_cookie = NULL;
20186 }
20187
20188#if defined(DHD_EVENT_LOG_FILTER)
20189 if (dhd->event_log_filter) {
20190 dhd_event_log_filter_deinit(dhd);
20191 }
20192#endif /* DHD_EVENT_LOG_FILTER */
20193
20194#ifdef DEBUGABILITY_ECNTRS_LOGGING
20195 if (dhd->ecntr_dbg_ring) {
20196 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20197 dhd_dbg_ring_deinit(dhd, ring);
20198 ring->ring_buf = NULL;
20199 ring->ring_size = 0;
20200 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20201 dhd->ecntr_dbg_ring = NULL;
20202 }
20203#endif /* DEBUGABILITY_ECNTRS_LOGGING */
20204
20205 /* 'general' buffer points to start of the pre-alloc'd memory */
20206 dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
20207 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
20208#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20209 if (dld_buf->buffer) {
20210 DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20211 }
20212 if (dld_buf_special->buffer) {
20213 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20214 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20215 }
20216#else
20217 if (dld_buf->buffer) {
20218 MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20219 }
20220 if (dld_buf_special->buffer) {
20221 MFREE(dhd->osh, dld_buf_special->buffer,
20222 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20223 }
20224#endif /* CONFIG_DHD_USE_STATIC_BUF */
20225 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20226 dld_buf = &g_dld_buf[i];
20227 dld_buf->enable = 0;
20228 dld_buf->buffer = NULL;
20229 }
20230
20231 mutex_destroy(&dhd_info->logdump_lock);
20232}
20233
20234void
20235dhd_log_dump_write(int type, char *binary_data,
20236 int binary_len, const char *fmt, ...)
20237{
20238 int len = 0;
20239 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
20240 va_list args;
20241 unsigned long flags = 0;
20242 struct dhd_log_dump_buf *dld_buf = NULL;
20243 bool flush_log = FALSE;
20244
20245 if (type < 0 || type >= DLD_BUFFER_NUM) {
20246 DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
20247 __FUNCTION__, type));
20248 return;
20249 }
20250
20251 dld_buf = &g_dld_buf[type];
20252
20253 if (dld_buf->enable != 1) {
20254 return;
20255 }
20256
20257 va_start(args, fmt);
20258 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
20259 /* Non ANSI C99 compliant returns -1,
20260 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
20261 */
20262 va_end(args);
20263 if (len < 0) {
20264 return;
20265 }
20266
20267 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
20268 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
20269 tmp_buf[len] = '\0';
20270 }
20271
20272 /* make a critical section to eliminate race conditions */
20273 spin_lock_irqsave(&dld_buf->lock, flags);
20274 if (dld_buf->remain < len) {
20275 dld_buf->wraparound = 1;
20276 dld_buf->present = dld_buf->front;
20277 dld_buf->remain = dld_buf_size[type];
20278 /* if wrap around happens, flush the ring buffer to the file */
20279 flush_log = TRUE;
20280 }
20281
20282 memcpy(dld_buf->present, tmp_buf, len);
20283 dld_buf->remain -= len;
20284 dld_buf->present += len;
20285 spin_unlock_irqrestore(&dld_buf->lock, flags);
20286
20287 /* double check invalid memory operation */
20288 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
20289
20290 if (dld_buf->dhd_pub) {
20291 dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
20292 dhdp->logdump_periodic_flush =
20293 logdump_periodic_flush;
20294 if (logdump_periodic_flush && flush_log) {
20295 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
20296 sizeof(log_dump_type_t));
20297 if (flush_type) {
20298 *flush_type = type;
20299 dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
20300 }
20301 }
20302 }
20303}
20304
20305char*
20306dhd_log_dump_get_timestamp(void)
20307{
20308 static char buf[16];
20309 u64 ts_nsec;
20310 unsigned long rem_nsec;
20311
20312 ts_nsec = local_clock();
20313 rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
20314 snprintf(buf, sizeof(buf), "%5lu.%06lu",
20315 (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
20316
20317 return buf;
20318}
20319#endif /* DHD_LOG_DUMP */
20320
20321#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
20322void
20323dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
20324{
20325 dhd_info_t * dhd;
20326
20327 if (dhdp) {
20328 dhd = dhdp->info;
20329 if (dhd) {
20330 flush_workqueue(dhd->tx_wq);
20331 flush_workqueue(dhd->rx_wq);
20332 }
20333 }
20334
20335 return;
20336}
20337#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
20338
20339#ifdef DHD_LB_TXP
20340#define DHD_LB_TXBOUND 64
20341/*
20342 * Function that performs the TX processing on a given CPU
20343 */
20344bool
20345dhd_lb_tx_process(dhd_info_t *dhd)
20346{
20347 struct sk_buff *skb;
20348 int cnt = 0;
20349 struct net_device *net;
20350 int ifidx;
20351 bool resched = FALSE;
20352
20353 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
20354 if (dhd == NULL) {
20355 DHD_ERROR((" Null pointer DHD \r\n"));
20356 return resched;
20357 }
20358
20359 BCM_REFERENCE(net);
20360
20361 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
20362
20363 /* Base Loop to perform the actual Tx */
20364 do {
20365 skb = skb_dequeue(&dhd->tx_pend_queue);
20366 if (skb == NULL) {
20367 DHD_TRACE(("Dequeued a Null Packet \r\n"));
20368 break;
20369 }
20370 cnt++;
20371
20372 net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
20373 ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
20374
20375 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
20376 net, ifidx));
20377
20378 __dhd_sendpkt(&dhd->pub, ifidx, skb);
20379
20380 if (cnt >= DHD_LB_TXBOUND) {
20381 resched = TRUE;
20382 break;
20383 }
20384
20385 } while (1);
20386
20387 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
20388
20389 return resched;
20390}
20391
20392void
20393dhd_lb_tx_handler(unsigned long data)
20394{
20395 dhd_info_t *dhd = (dhd_info_t *)data;
20396
20397 if (dhd_lb_tx_process(dhd)) {
20398 dhd_tasklet_schedule(&dhd->tx_tasklet);
20399 }
20400}
20401
20402#endif /* DHD_LB_TXP */
20403
20404#ifdef DHD_DEBUG_UART
20405bool
20406dhd_debug_uart_is_running(struct net_device *dev)
20407{
20408 dhd_info_t *dhd = DHD_DEV_INFO(dev);
20409
20410 if (dhd->duart_execute) {
20411 return TRUE;
20412 }
20413
20414 return FALSE;
20415}
20416
20417static void
20418dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
20419{
20420 dhd_pub_t *dhdp = handle;
20421 dhd_debug_uart_exec(dhdp, "rd");
20422}
20423
20424static void
20425dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
20426{
20427 int ret;
20428
20429 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
20430 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
20431
20432#ifdef DHD_FW_COREDUMP
20433 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
20434#endif // endif
20435 {
20436 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
20437#ifdef DHD_FW_COREDUMP
20438 dhdp->memdump_success == FALSE ||
20439#endif // endif
20440 FALSE) {
20441 dhdp->info->duart_execute = TRUE;
20442 DHD_ERROR(("DHD: %s - execute %s %s\n",
20443 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
20444 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
20445 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
20446 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
20447 dhdp->info->duart_execute = FALSE;
20448
20449#ifdef DHD_LOG_DUMP
20450 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
20451#endif // endif
20452 {
20453 BUG_ON(1);
20454 }
20455 }
20456 }
20457}
20458#endif /* DHD_DEBUG_UART */
20459
20460#if defined(DHD_BLOB_EXISTENCE_CHECK)
20461void
20462dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
20463{
20464 struct file *fp;
20465 char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
20466 fp = filp_open(filepath, O_RDONLY, 0);
20467 if (IS_ERR(fp)) {
20468 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
20469 filepath));
20470 dhdp->is_blob = FALSE;
20471 } else {
20472 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
20473 dhdp->is_blob = TRUE;
20474#if defined(CONCATE_BLOB)
20475 strncat(fw_path, "_blob", strlen("_blob"));
20476#else
20477 BCM_REFERENCE(fw_path);
20478#endif /* SKIP_CONCATE_BLOB */
20479 filp_close(fp, NULL);
20480 }
20481}
20482#endif /* DHD_BLOB_EXISTENCE_CHECK */
20483
20484#if defined(PCIE_FULL_DONGLE)
20485/** test / loopback */
20486void
20487dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
20488{
20489 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
20490 dhd_info_t *dhd_info = (dhd_info_t *)handle;
20491
20492 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
20493 DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__));
20494 return;
20495 }
20496 if (dhd_info == NULL) {
20497 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
20498 return;
20499 }
20500 if (dmmap == NULL) {
20501 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
20502 return;
20503 }
20504 dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
20505}
20506
20507void
20508dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
20509{
20510 dhd_info_t *dhd_info = dhdp->info;
20511
20512 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
20513 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
20514}
20515#endif /* PCIE_FULL_DONGLE */
20516/* ---------------------------- End of sysfs implementation ------------------------------------- */
20517
20518#ifdef SET_PCIE_IRQ_CPU_CORE
20519void
20520dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
20521{
20522 unsigned int pcie_irq = 0;
20523
20524 if (!dhdp) {
20525 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
20526 return;
20527 }
20528
20529 if (!dhdp->bus) {
20530 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
20531 return;
20532 }
20533
20534 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
20535
20536 if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
20537 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
20538 return;
20539 }
20540
20541 /*
20542 irq_set_affinity() assign dedicated CPU core PCIe interrupt
20543 If dedicated CPU core is not on-line,
20544 PCIe interrupt scheduled on CPU core 0
20545 */
20546 switch (affinity_cmd) {
20547 case PCIE_IRQ_AFFINITY_OFF:
20548 break;
20549 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
20550 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20551 break;
20552#ifdef CONFIG_SOC_EXYNOS9810
20553 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
20554 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
20555 __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
20556 irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
20557 break;
20558#endif /* CONFIG_SOC_EXYNOS9810 */
20559 default:
20560 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
20561 __FUNCTION__, affinity_cmd));
20562 }
20563}
20564#endif /* SET_PCIE_IRQ_CPU_CORE */
20565
20566int
20567dhd_write_file(const char *filepath, char *buf, int buf_len)
20568{
20569 struct file *fp = NULL;
20570 mm_segment_t old_fs;
20571 int ret = 0;
20572
20573 /* change to KERNEL_DS address limit */
20574 old_fs = get_fs();
20575 set_fs(KERNEL_DS);
20576
20577 /* File is always created. */
20578 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
20579 if (IS_ERR(fp)) {
20580 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
20581 __FUNCTION__, filepath, PTR_ERR(fp)));
20582 ret = BCME_ERROR;
20583 } else {
20584 if (fp->f_mode & FMODE_WRITE) {
20585 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
20586 if (ret < 0) {
20587 DHD_ERROR(("%s: Couldn't write file '%s'\n",
20588 __FUNCTION__, filepath));
20589 ret = BCME_ERROR;
20590 } else {
20591 ret = BCME_OK;
20592 }
20593 }
20594 filp_close(fp, NULL);
20595 }
20596
20597 /* restore previous address limit */
20598 set_fs(old_fs);
20599
20600 return ret;
20601}
20602
20603int
20604dhd_read_file(const char *filepath, char *buf, int buf_len)
20605{
20606 struct file *fp = NULL;
20607 mm_segment_t old_fs;
20608 int ret;
20609
20610 /* change to KERNEL_DS address limit */
20611 old_fs = get_fs();
20612 set_fs(KERNEL_DS);
20613
20614 fp = filp_open(filepath, O_RDONLY, 0);
20615 if (IS_ERR(fp)) {
20616 set_fs(old_fs);
20617 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
20618 return BCME_ERROR;
20619 }
20620
20621 ret = kernel_read(fp, 0, buf, buf_len);
20622 filp_close(fp, NULL);
20623
20624 /* restore previous address limit */
20625 set_fs(old_fs);
20626
20627 /* Return the number of bytes read */
20628 if (ret > 0) {
20629 /* Success to read */
20630 ret = 0;
20631 } else {
20632 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
20633 __FUNCTION__, filepath, ret));
20634 ret = BCME_ERROR;
20635 }
20636
20637 return ret;
20638}
20639
20640int
20641dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
20642{
20643 int ret;
20644
20645 ret = dhd_write_file(filepath, buf, buf_len);
20646 if (ret < 0) {
20647 return ret;
20648 }
20649
20650 /* Read the file again and check if the file size is not zero */
20651 memset(buf, 0, buf_len);
20652 ret = dhd_read_file(filepath, buf, buf_len);
20653
20654 return ret;
20655}
20656
20657#ifdef FILTER_IE
20658int dhd_read_from_file(dhd_pub_t *dhd)
20659{
20660 int ret = 0, nread = 0;
20661 void *fd;
20662 uint8 *buf;
20663 NULL_CHECK(dhd, "dhd is NULL", ret);
20664
20665 buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
20666 if (!buf) {
20667 DHD_ERROR(("error: failed to alllocate buf.\n"));
20668 return BCME_NOMEM;
20669 }
20670
20671 /* open file to read */
20672 fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
20673 if (!fd) {
20674 DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH));
20675 ret = BCME_EPERM;
20676 goto exit;
20677 }
20678 nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
20679 if (nread > 0) {
20680 buf[nread] = '\0';
20681 if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
20682 DHD_ERROR(("error: failed to parse filter ie\n"));
20683 }
20684 } else {
20685 DHD_ERROR(("error: zero length file.failed to read\n"));
20686 ret = BCME_ERROR;
20687 }
20688 dhd_os_close_image1(dhd, fd);
20689exit:
20690 if (buf) {
20691 MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
20692 buf = NULL;
20693 }
20694 return ret;
20695}
20696
20697int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
20698{
20699 uint8* pstr = buf;
20700 int element_count = 0;
20701
20702 if (buf == NULL) {
20703 return BCME_ERROR;
20704 }
20705
20706 while (*pstr != '\0') {
20707 if (*pstr == '\n') {
20708 element_count++;
20709 }
20710 pstr++;
20711 }
20712 /*
20713 * New line character must not be present after last line.
20714 * To count last line
20715 */
20716 element_count++;
20717
20718 return element_count;
20719}
20720
20721int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
20722{
20723 uint8 i, j, msb, lsb, oui_len = 0;
20724 /*
20725 * OUI can vary from 3 bytes to 5 bytes.
20726 * While reading from file as ascii input it can
20727 * take maximum size of 14 bytes and minumum size of
20728 * 8 bytes including ":"
20729 * Example 5byte OUI <AB:DE:BE:CD:FA>
20730 * Example 3byte OUI <AB:DC:EF>
20731 */
20732
20733 if ((inbuf == NULL) || (len < 8) || (len > 14)) {
20734 DHD_ERROR(("error: failed to parse OUI \n"));
20735 return BCME_ERROR;
20736 }
20737
20738 for (j = 0, i = 0; i < len; i += 3, ++j) {
20739 if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
20740 DHD_ERROR(("error: invalid OUI format \n"));
20741 return BCME_ERROR;
20742 }
20743 msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
20744 lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
20745 'A' + 10 : inbuf[i + 1] - '0';
20746 oui[j] = (msb << 4) | lsb;
20747 }
20748 /* Size of oui.It can vary from 3/4/5 */
20749 oui_len = j;
20750
20751 return oui_len;
20752}
20753
20754int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
20755{
20756 int i = 0;
20757
20758 while (i < len) {
20759 if (!bcm_isdigit(buf[i])) {
20760 DHD_ERROR(("error: non digit value found in filter_ie \n"));
20761 return BCME_ERROR;
20762 }
20763 i++;
20764 }
20765 if (bcm_atoi((char*)buf) > 255) {
20766 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
20767 return BCME_ERROR;
20768 }
20769
20770 return BCME_OK;
20771}
20772
20773int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
20774{
20775 int element_count = 0, i = 0, oui_size = 0, ret = 0;
20776 uint16 bufsize, buf_space_left, id = 0, len = 0;
20777 uint16 filter_iovsize, all_tlvsize;
20778 wl_filter_ie_tlv_t *p_ie_tlv = NULL;
20779 wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
20780 char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
20781 uint8 data[20];
20782
20783 element_count = dhd_get_filter_ie_count(dhd, buf);
20784 DHD_INFO(("total element count %d \n", element_count));
20785 /* Calculate the whole buffer size */
20786 filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
20787 p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
20788
20789 if (p_filter_iov == NULL) {
20790 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
20791 return BCME_ERROR;
20792 }
20793
20794 /* setup filter iovar header */
20795 p_filter_iov->version = WL_FILTER_IE_VERSION;
20796 p_filter_iov->len = filter_iovsize;
20797 p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
20798 p_filter_iov->pktflag = FC_PROBE_REQ;
20799 p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
20800 /* setup TLVs */
20801 bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
20802 p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
20803 buf_space_left = bufsize;
20804
20805 while ((i < element_count) && (buf != NULL)) {
20806 len = 0;
20807 /* token contains one line of input data */
20808 token = bcmstrtok((char**)&buf, "\n", NULL);
20809 if (token == NULL) {
20810 break;
20811 }
20812 if ((ele_token = bcmstrstr(token, ",")) == NULL) {
20813 /* only element id is present */
20814 if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
20815 DHD_ERROR(("error: Invalid element id \n"));
20816 ret = BCME_ERROR;
20817 goto exit;
20818 }
20819 id = bcm_atoi((char*)token);
20820 data[len++] = WL_FILTER_IE_SET;
20821 } else {
20822 /* oui is present */
20823 ele_token = bcmstrtok(&token, ",", NULL);
20824 if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
20825 strlen(ele_token)) == BCME_ERROR)) {
20826 DHD_ERROR(("error: Invalid element id \n"));
20827 ret = BCME_ERROR;
20828 goto exit;
20829 }
20830 id = bcm_atoi((char*)ele_token);
20831 data[len++] = WL_FILTER_IE_SET;
20832 if ((oui_token = bcmstrstr(token, ",")) == NULL) {
20833 oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
20834 if (oui_size == BCME_ERROR) {
20835 DHD_ERROR(("error: Invalid OUI \n"));
20836 ret = BCME_ERROR;
20837 goto exit;
20838 }
20839 len += oui_size;
20840 } else {
20841 /* type is present */
20842 oui_token = bcmstrtok(&token, ",", NULL);
20843 if ((oui_token == NULL) || ((oui_size =
20844 dhd_parse_oui(dhd, oui_token,
20845 &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
20846 DHD_ERROR(("error: Invalid OUI \n"));
20847 ret = BCME_ERROR;
20848 goto exit;
20849 }
20850 len += oui_size;
20851 if ((type = bcmstrstr(token, ",")) == NULL) {
20852 if (dhd_check_valid_ie(dhd, token,
20853 strlen(token)) == BCME_ERROR) {
20854 DHD_ERROR(("error: Invalid type \n"));
20855 ret = BCME_ERROR;
20856 goto exit;
20857 }
20858 data[len++] = bcm_atoi((char*)token);
20859 } else {
20860 /* subtype is present */
20861 type = bcmstrtok(&token, ",", NULL);
20862 if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
20863 strlen(type)) == BCME_ERROR)) {
20864 DHD_ERROR(("error: Invalid type \n"));
20865 ret = BCME_ERROR;
20866 goto exit;
20867 }
20868 data[len++] = bcm_atoi((char*)type);
20869 /* subtype is last element */
20870 if ((token == NULL) || (*token == '\0') ||
20871 (dhd_check_valid_ie(dhd, token,
20872 strlen(token)) == BCME_ERROR)) {
20873 DHD_ERROR(("error: Invalid subtype \n"));
20874 ret = BCME_ERROR;
20875 goto exit;
20876 }
20877 data[len++] = bcm_atoi((char*)token);
20878 }
20879 }
20880 }
20881 ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
20882 &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
20883 if (ret != BCME_OK) {
20884 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
20885 "status=%d\n", __FUNCTION__, ret));
20886 goto exit;
20887 }
20888 i++;
20889 }
20890 if (i == 0) {
20891 /* file is empty or first line is blank */
20892 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
20893 ret = BCME_ERROR;
20894 goto exit;
20895 }
20896 /* update the iov header, set len to include all TLVs + header */
20897 all_tlvsize = (bufsize - buf_space_left);
20898 p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
20899 ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
20900 p_filter_iov->len, NULL, 0, TRUE);
20901 if (ret != BCME_OK) {
20902 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
20903 }
20904exit:
20905 /* clean up */
20906 if (p_filter_iov) {
20907 MFREE(dhd->osh, p_filter_iov, filter_iovsize);
20908 p_filter_iov = NULL;
20909 }
20910 return ret;
20911}
20912#endif /* FILTER_IE */
20913#ifdef DHD_WAKE_STATUS
20914wake_counts_t*
20915dhd_get_wakecount(dhd_pub_t *dhdp)
20916{
20917 return dhd_bus_get_wakecount(dhdp);
20918}
20919#endif /* DHD_WAKE_STATUS */
20920
20921int
20922dhd_get_random_bytes(uint8 *buf, uint len)
20923{
20924#ifdef BCMPCIE
20925 get_random_bytes_arch(buf, len);
20926#endif /* BCMPCIE */
20927 return BCME_OK;
20928}
20929
20930#if defined(DHD_HANG_SEND_UP_TEST)
20931void
20932dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
20933{
20934 dhd_info_t *dhd = NULL;
20935 dhd_pub_t *dhdp = NULL;
20936 uint reason = HANG_REASON_MAX;
20937 uint32 fw_test_code = 0;
20938 dhd = DHD_DEV_INFO(dev);
20939
20940 if (dhd) {
20941 dhdp = &dhd->pub;
20942 }
20943
20944 if (!dhd || !dhdp) {
20945 return;
20946 }
20947
20948 reason = (uint) bcm_strtoul(string_num, NULL, 0);
20949 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
20950
20951 if (reason == 0) {
20952 if (dhdp->req_hang_type) {
20953 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
20954 __FUNCTION__, dhdp->req_hang_type));
20955 dhdp->req_hang_type = 0;
20956 return;
20957 } else {
20958 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
20959 return;
20960 }
20961 } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
20962 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
20963 return;
20964 }
20965
20966 if (dhdp->req_hang_type != 0) {
20967 DHD_ERROR(("Already HANG requested for test\n"));
20968 return;
20969 }
20970
20971 switch (reason) {
20972 case HANG_REASON_IOCTL_RESP_TIMEOUT:
20973 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
20974 dhdp->req_hang_type = reason;
20975 fw_test_code = 102; /* resumed on timeour */
20976 dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
20977 WLC_SET_VAR, TRUE, 0);
20978 break;
20979 case HANG_REASON_DONGLE_TRAP:
20980 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
20981 dhdp->req_hang_type = reason;
20982 fw_test_code = 99; /* dongle trap */
20983 dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
20984 WLC_SET_VAR, TRUE, 0);
20985 break;
20986 case HANG_REASON_D3_ACK_TIMEOUT:
20987 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
20988 dhdp->req_hang_type = reason;
20989 break;
20990 case HANG_REASON_BUS_DOWN:
20991 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
20992 dhdp->req_hang_type = reason;
20993 break;
20994 case HANG_REASON_PCIE_LINK_DOWN:
20995 case HANG_REASON_MSGBUF_LIVELOCK:
20996 dhdp->req_hang_type = 0;
20997 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
20998 break;
20999 case HANG_REASON_IFACE_DEL_FAILURE:
21000 dhdp->req_hang_type = 0;
21001 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
21002 break;
21003 case HANG_REASON_HT_AVAIL_ERROR:
21004 dhdp->req_hang_type = 0;
21005 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
21006 break;
21007 case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
21008 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
21009 dhdp->req_hang_type = reason;
21010 break;
21011 default:
21012 dhdp->req_hang_type = 0;
21013 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
21014 break;
21015 }
21016}
21017#endif /* DHD_HANG_SEND_UP_TEST */
21018
21019#ifdef DHD_ERPOM
21020static void
21021dhd_error_recovery(void *handle, void *event_info, u8 event)
21022{
21023 dhd_info_t *dhd = handle;
21024 dhd_pub_t *dhdp;
21025 int ret = 0;
21026
21027 if (!dhd) {
21028 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21029 return;
21030 }
21031
21032 dhdp = &dhd->pub;
21033
21034 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
21035 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
21036 __FUNCTION__));
21037 return;
21038 }
21039
21040 ret = dhd_bus_perform_flr_with_quiesce(dhdp);
21041 if (ret != BCME_DNGL_DEVRESET) {
21042 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
21043 "toggle REG_ON\n", __FUNCTION__, ret));
21044 /* toggle REG_ON */
21045 dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
21046 return;
21047 }
21048}
21049
21050void
21051dhd_schedule_reset(dhd_pub_t *dhdp)
21052{
21053 if (dhdp->enable_erpom) {
21054 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
21055 DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
21056 }
21057}
21058#endif /* DHD_ERPOM */
21059
21060#ifdef DHD_PKT_LOGGING
21061void
21062dhd_pktlog_dump(void *handle, void *event_info, u8 event)
21063{
21064 dhd_info_t *dhd = handle;
21065
21066 if (!dhd) {
21067 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21068 return;
21069 }
21070
21071 if (dhd_pktlog_write_file(&dhd->pub)) {
21072 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
21073 return;
21074 }
21075}
21076
21077void
21078dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
21079{
21080 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21081 (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
21082 dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
21083}
21084#endif /* DHD_PKT_LOGGING */
21085
21086#ifdef BIGDATA_SOFTAP
21087void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
21088{
21089 struct bcm_cfg80211 *cfg;
21090 dhd_pub_t *dhdp;
21091 ap_sta_wq_data_t *p_wq_data;
21092
21093 if (!bcm_cfg || !ndev || !e) {
21094 WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
21095 return;
21096 }
21097
21098 cfg = (struct bcm_cfg80211 *)bcm_cfg;
21099 dhdp = (dhd_pub_t *)cfg->pub;
21100
21101 if (!dhdp || !cfg->ap_sta_info) {
21102 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
21103 return;
21104 }
21105
21106 p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
21107 if (unlikely(!p_wq_data)) {
21108 DHD_ERROR(("%s(): could not allocate memory for - "
21109 "ap_sta_wq_data_t\n", __FUNCTION__));
21110 return;
21111 }
21112
21113 mutex_lock(&cfg->ap_sta_info->wq_data_sync);
21114
21115 memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
21116 p_wq_data->dhdp = dhdp;
21117 p_wq_data->bcm_cfg = cfg;
21118 p_wq_data->ndev = (struct net_device *)ndev;
21119
21120 mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
21121
21122 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21123 p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
21124 wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
21125
21126}
21127#endif /* BIGDATA_SOFTAP */
21128
21129void
21130get_debug_dump_time(char *str)
21131{
21132 struct timeval curtime;
21133 unsigned long local_time;
21134 struct rtc_time tm;
21135
21136 if (!strlen(str)) {
21137 do_gettimeofday(&curtime);
21138 local_time = (u32)(curtime.tv_sec -
21139 (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
21140 rtc_time_to_tm(local_time, &tm);
21141
21142 snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
21143 tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
21144 tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
21145 }
21146}
21147
21148void
21149clear_debug_dump_time(char *str)
21150{
21151 memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
21152}
21153#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
21154void
21155copy_debug_dump_time(char *dest, char *src)
21156{
21157 memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
21158}
21159#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
21160
21161#define KIRQ_PRINT_BUF_LEN 256
21162
21163void
21164dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
21165{
21166 unsigned long flags = 0;
21167 struct irq_desc *desc;
21168 int i; /* cpu iterator */
21169 struct bcmstrbuf strbuf;
21170 char tmp_buf[KIRQ_PRINT_BUF_LEN];
21171
21172#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
21173 desc = irq_to_desc(irq_num);
21174 if (!desc) {
21175 DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
21176 return;
21177 }
21178 bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
21179 raw_spin_lock_irqsave(&desc->lock, flags);
21180 bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
21181 for_each_online_cpu(i)
21182 bcm_bprintf(&strbuf, "%10u ",
21183 desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
21184 if (desc->irq_data.chip) {
21185 if (desc->irq_data.chip->name)
21186 bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
21187 else
21188 bcm_bprintf(&strbuf, " %8s", "-");
21189 } else {
21190 bcm_bprintf(&strbuf, " %8s", "None");
21191 }
21192#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
21193 if (desc->irq_data.domain)
21194 bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
21195#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
21196 bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
21197#endif // endif
21198#endif /* LINUX VERSION > 3.1.0 */
21199
21200 if (desc->name)
21201 bcm_bprintf(&strbuf, "-%-8s", desc->name);
21202
21203 DHD_ERROR(("%s\n", strbuf.origbuf));
21204 raw_spin_unlock_irqrestore(&desc->lock, flags);
21205#endif /* LINUX VERSION > 2.6.28 */
21206}
21207
21208void
21209dhd_show_kirqstats(dhd_pub_t *dhd)
21210{
21211 unsigned int irq = -1;
21212#ifdef BCMPCIE
21213 dhdpcie_get_pcieirq(dhd->bus, &irq);
21214#endif /* BCMPCIE */
21215#ifdef BCMSDIO
21216 irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
21217#endif /* BCMSDIO */
21218 if (irq != -1) {
21219#ifdef BCMPCIE
21220 DHD_ERROR(("DUMP data kernel irq stats : \n"));
21221#endif /* BCMPCIE */
21222#ifdef BCMSDIO
21223 DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
21224#endif /* BCMSDIO */
21225 dhd_print_kirqstats(dhd, irq);
21226 }
21227#ifdef BCMPCIE_OOB_HOST_WAKE
21228 irq = dhdpcie_get_oob_irq_num(dhd->bus);
21229 if (irq) {
21230 DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
21231 dhd_print_kirqstats(dhd, irq);
21232 }
21233#endif /* BCMPCIE_OOB_HOST_WAKE */
21234}
21235
21236void
21237dhd_print_tasklet_status(dhd_pub_t *dhd)
21238{
21239 dhd_info_t *dhdinfo;
21240
21241 if (!dhd) {
21242 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
21243 return;
21244 }
21245
21246 dhdinfo = dhd->info;
21247
21248 if (!dhdinfo) {
21249 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
21250 return;
21251 }
21252
21253 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
21254}
21255
21256/*
21257 * DHD RING
21258 */
21259#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
21260#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
21261
21262#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
21263#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
21264
21265#define DHD_RING_MAGIC 0x20170910
21266#define DHD_RING_IDX_INVALID 0xffffffff
21267
21268typedef struct {
21269 uint32 elem_size;
21270 uint32 elem_cnt;
21271 uint32 write_idx; /* next write index, -1 : not started */
21272 uint32 read_idx; /* next read index, -1 : not start */
21273
21274 /* protected elements during serialization */
21275 int lock_idx; /* start index of locked, element will not be overried */
21276 int lock_count; /* number of locked, from lock idx */
21277
21278 /* saved data elements */
21279 void *elem;
21280} dhd_fixed_ring_info_t;
21281
21282typedef struct {
21283 uint32 magic;
21284 uint32 type;
21285 struct mutex ring_sync; /* pointer to mutex */
21286 union {
21287 dhd_fixed_ring_info_t fixed;
21288 };
21289} dhd_ring_info_t;
21290
21291uint32
21292dhd_ring_get_hdr_size(void)
21293{
21294 return sizeof(dhd_ring_info_t);
21295}
21296
21297void *
21298dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt)
21299{
21300 dhd_ring_info_t *ret_ring;
21301
21302 if (!buf) {
21303 DHD_RING_ERR(("NO RING BUFFER\n"));
21304 return NULL;
21305 }
21306 if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
21307 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
21308 return NULL;
21309 }
21310
21311 ret_ring = (dhd_ring_info_t *)buf;
21312 ret_ring->type = DHD_RING_TYPE_FIXED;
21313 mutex_init(&ret_ring->ring_sync);
21314 ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
21315 ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
21316 ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
21317 ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
21318 ret_ring->fixed.elem_size = elem_size;
21319 ret_ring->fixed.elem_cnt = elem_cnt;
21320 ret_ring->magic = DHD_RING_MAGIC;
21321 return ret_ring;
21322}
21323
21324void
21325dhd_ring_deinit(void *_ring)
21326{
21327 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21328 dhd_fixed_ring_info_t *fixed;
21329 if (!ring) {
21330 return;
21331 }
21332
21333 if (ring->magic != DHD_RING_MAGIC) {
21334 return;
21335 }
21336
21337 mutex_destroy(&ring->ring_sync);
21338 fixed = &ring->fixed;
21339 memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
21340 fixed->elem_size = fixed->elem_cnt = 0;
21341 ring->type = 0;
21342 ring->magic = 0;
21343 return;
21344}
21345
21346/* get counts between two indexes of ring buffer (internal only) */
21347static inline int
21348__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
21349{
21350 if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
21351 return 0;
21352 }
21353
21354 return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
21355}
21356
21357static inline int
21358__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
21359{
21360 return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21361}
21362
21363static inline void *
21364__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
21365{
21366 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21367 return NULL;
21368 }
21369 return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
21370}
21371
21372static inline void
21373__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
21374{
21375 uint32 next_idx;
21376
21377 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21378 DHD_RING_ERR(("EMPTY RING\n"));
21379 return;
21380 }
21381
21382 next_idx = (ring->read_idx + 1) % ring->elem_cnt;
21383 if (ring->read_idx == ring->write_idx) {
21384 /* Become empty */
21385 ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
21386 return;
21387 }
21388
21389 ring->read_idx = next_idx;
21390 return;
21391}
21392
21393static inline void *
21394__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
21395{
21396 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21397 return NULL;
21398 }
21399 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21400}
21401
21402static inline void *
21403__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
21404{
21405 uint32 tmp_idx;
21406
21407 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21408 ring->read_idx = ring->write_idx = 0;
21409 return (uint8 *)ring->elem;
21410 }
21411
21412 /* check next index is not locked */
21413 tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
21414 if (ring->lock_idx == tmp_idx) {
21415 return NULL;
21416 }
21417
21418 ring->write_idx = tmp_idx;
21419 if (ring->write_idx == ring->read_idx) {
21420 /* record is full, drop oldest one */
21421 ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
21422
21423 }
21424 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21425}
21426
21427static inline uint32
21428__dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t *ring, void *ptr, char *sig)
21429{
21430 uint32 diff;
21431 uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
21432
21433 if (ptr < ring->elem) {
21434 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
21435 return ret_idx;
21436 }
21437 diff = (uint32)((uint8 *)ptr - (uint8 *)ring->elem);
21438 if (diff % ring->elem_size != 0) {
21439 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
21440 return ret_idx;
21441 }
21442 ret_idx = diff / ring->elem_size;
21443 if (ret_idx >= ring->elem_cnt) {
21444 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring->elem_cnt, ret_idx));
21445 }
21446 return ret_idx;
21447}
21448
21449static inline void *
21450__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev)
21451{
21452 uint32 cur_idx;
21453
21454 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21455 DHD_RING_ERR(("EMPTY RING\n"));
21456 return NULL;
21457 }
21458
21459 cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "NEXT");
21460 if (cur_idx >= ring->elem_cnt) {
21461 return NULL;
21462 }
21463
21464 if (cur_idx == ring->write_idx) {
21465 /* no more new record */
21466 return NULL;
21467 }
21468
21469 cur_idx = (cur_idx + 1) % ring->elem_cnt;
21470 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21471}
21472
21473static inline void *
21474__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev)
21475{
21476 uint32 cur_idx;
21477
21478 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21479 DHD_RING_ERR(("EMPTY RING\n"));
21480 return NULL;
21481 }
21482 cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "PREV");
21483 if (cur_idx >= ring->elem_cnt) {
21484 return NULL;
21485 }
21486 if (cur_idx == ring->read_idx) {
21487 /* no more new record */
21488 return NULL;
21489 }
21490
21491 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
21492 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21493}
21494
21495static inline void
21496__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr)
21497{
21498 uint32 first_idx;
21499 uint32 last_idx;
21500 uint32 ring_filled_cnt;
21501 uint32 tmp_cnt;
21502
21503 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21504 DHD_RING_ERR(("EMPTY RING\n"));
21505 return;
21506 }
21507
21508 if (first_ptr) {
21509 first_idx = __dhd_fixed_ring_ptr2idx(ring, first_ptr, "LCK FIRST");
21510 if (first_idx >= ring->elem_cnt) {
21511 return;
21512 }
21513 } else {
21514 first_idx = ring->read_idx;
21515 }
21516
21517 if (last_ptr) {
21518 last_idx = __dhd_fixed_ring_ptr2idx(ring, last_ptr, "LCK LAST");
21519 if (last_idx >= ring->elem_cnt) {
21520 return;
21521 }
21522 } else {
21523 last_idx = ring->write_idx;
21524 }
21525
21526 ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21527 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
21528 if (tmp_cnt > ring_filled_cnt) {
21529 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21530 ring->write_idx, ring->read_idx, first_idx));
21531 return;
21532 }
21533
21534 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
21535 if (tmp_cnt > ring_filled_cnt) {
21536 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21537 ring->write_idx, ring->read_idx, last_idx));
21538 return;
21539 }
21540
21541 ring->lock_idx = first_idx;
21542 ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
21543 return;
21544}
21545
21546static inline void
21547__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
21548{
21549 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21550 DHD_RING_ERR(("EMPTY RING\n"));
21551 return;
21552 }
21553
21554 ring->lock_idx = DHD_RING_IDX_INVALID;
21555 ring->lock_count = 0;
21556 return;
21557}
21558static inline void *
21559__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
21560{
21561 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21562 DHD_RING_ERR(("EMPTY RING\n"));
21563 return NULL;
21564 }
21565 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21566 DHD_RING_ERR(("NO LOCK POINT\n"));
21567 return NULL;
21568 }
21569 return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
21570}
21571
21572static inline void *
21573__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
21574{
21575 int lock_last_idx;
21576 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21577 DHD_RING_ERR(("EMPTY RING\n"));
21578 return NULL;
21579 }
21580 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21581 DHD_RING_ERR(("NO LOCK POINT\n"));
21582 return NULL;
21583 }
21584
21585 lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
21586 return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
21587}
21588
21589static inline int
21590__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
21591{
21592 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21593 DHD_RING_ERR(("EMPTY RING\n"));
21594 return BCME_ERROR;
21595 }
21596 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21597 DHD_RING_ERR(("NO LOCK POINT\n"));
21598 return BCME_ERROR;
21599 }
21600 return ring->lock_count;
21601}
21602
21603static inline void
21604__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
21605{
21606 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21607 DHD_RING_ERR(("EMPTY RING\n"));
21608 return;
21609 }
21610 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21611 DHD_RING_ERR(("NO LOCK POINT\n"));
21612 return;
21613 }
21614
21615 ring->lock_count--;
21616 if (ring->lock_count <= 0) {
21617 ring->lock_idx = DHD_RING_IDX_INVALID;
21618 } else {
21619 ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
21620 }
21621 return;
21622}
21623
21624/* Get first element : oldest element */
21625void *
21626dhd_ring_get_first(void *_ring)
21627{
21628 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21629 void *ret = NULL;
21630
21631 if (!ring || ring->magic != DHD_RING_MAGIC) {
21632 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21633 return NULL;
21634 }
21635
21636 mutex_lock(&ring->ring_sync);
21637 if (ring->type == DHD_RING_TYPE_FIXED) {
21638 ret = __dhd_fixed_ring_get_first(&ring->fixed);
21639 }
21640 mutex_unlock(&ring->ring_sync);
21641 return ret;
21642}
21643
21644/* Free first element : oldest element */
21645void
21646dhd_ring_free_first(void *_ring)
21647{
21648 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21649
21650 if (!ring || ring->magic != DHD_RING_MAGIC) {
21651 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21652 return;
21653 }
21654
21655 mutex_lock(&ring->ring_sync);
21656 if (ring->type == DHD_RING_TYPE_FIXED) {
21657 __dhd_fixed_ring_free_first(&ring->fixed);
21658 }
21659 mutex_unlock(&ring->ring_sync);
21660 return;
21661}
21662
21663/* Get latest element */
21664void *
21665dhd_ring_get_last(void *_ring)
21666{
21667 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21668 void *ret = NULL;
21669
21670 if (!ring || ring->magic != DHD_RING_MAGIC) {
21671 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21672 return NULL;
21673 }
21674
21675 mutex_lock(&ring->ring_sync);
21676 if (ring->type == DHD_RING_TYPE_FIXED) {
21677 ret = __dhd_fixed_ring_get_last(&ring->fixed);
21678 }
21679 mutex_unlock(&ring->ring_sync);
21680 return ret;
21681}
21682
21683/* Get next point can be written
21684 * will overwrite which doesn't read
21685 * will return NULL if next pointer is locked
21686 */
21687void *
21688dhd_ring_get_empty(void *_ring)
21689{
21690 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21691 void *ret = NULL;
21692
21693 if (!ring || ring->magic != DHD_RING_MAGIC) {
21694 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21695 return NULL;
21696 }
21697
21698 mutex_lock(&ring->ring_sync);
21699 if (ring->type == DHD_RING_TYPE_FIXED) {
21700 ret = __dhd_fixed_ring_get_empty(&ring->fixed);
21701 }
21702 mutex_unlock(&ring->ring_sync);
21703 return ret;
21704}
21705
21706void *
21707dhd_ring_get_next(void *_ring, void *cur)
21708{
21709 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21710 void *ret = NULL;
21711
21712 if (!ring || ring->magic != DHD_RING_MAGIC) {
21713 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21714 return NULL;
21715 }
21716
21717 mutex_lock(&ring->ring_sync);
21718 if (ring->type == DHD_RING_TYPE_FIXED) {
21719 ret = __dhd_fixed_ring_get_next(&ring->fixed, cur);
21720 }
21721 mutex_unlock(&ring->ring_sync);
21722 return ret;
21723}
21724
21725void *
21726dhd_ring_get_prev(void *_ring, void *cur)
21727{
21728 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21729 void *ret = NULL;
21730
21731 if (!ring || ring->magic != DHD_RING_MAGIC) {
21732 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21733 return NULL;
21734 }
21735
21736 mutex_lock(&ring->ring_sync);
21737 if (ring->type == DHD_RING_TYPE_FIXED) {
21738 ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur);
21739 }
21740 mutex_unlock(&ring->ring_sync);
21741 return ret;
21742}
21743
21744int
21745dhd_ring_get_cur_size(void *_ring)
21746{
21747 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21748 int cnt = 0;
21749
21750 if (!ring || ring->magic != DHD_RING_MAGIC) {
21751 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21752 return cnt;
21753 }
21754
21755 mutex_lock(&ring->ring_sync);
21756 if (ring->type == DHD_RING_TYPE_FIXED) {
21757 cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
21758 }
21759 mutex_unlock(&ring->ring_sync);
21760 return cnt;
21761}
21762
21763/* protect element between lock_ptr and write_idx */
21764void
21765dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
21766{
21767 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21768
21769 if (!ring || ring->magic != DHD_RING_MAGIC) {
21770 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21771 return;
21772 }
21773
21774 mutex_lock(&ring->ring_sync);
21775 if (ring->type == DHD_RING_TYPE_FIXED) {
21776 __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr);
21777 }
21778 mutex_unlock(&ring->ring_sync);
21779 return;
21780}
21781
21782/* free all lock */
21783void
21784dhd_ring_lock_free(void *_ring)
21785{
21786 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21787
21788 if (!ring || ring->magic != DHD_RING_MAGIC) {
21789 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21790 return;
21791 }
21792
21793 mutex_lock(&ring->ring_sync);
21794 if (ring->type == DHD_RING_TYPE_FIXED) {
21795 __dhd_fixed_ring_lock_free(&ring->fixed);
21796 }
21797 mutex_unlock(&ring->ring_sync);
21798 return;
21799}
21800
21801void *
21802dhd_ring_lock_get_first(void *_ring)
21803{
21804 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21805 void *ret = NULL;
21806
21807 if (!ring || ring->magic != DHD_RING_MAGIC) {
21808 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21809 return NULL;
21810 }
21811
21812 mutex_lock(&ring->ring_sync);
21813 if (ring->type == DHD_RING_TYPE_FIXED) {
21814 ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
21815 }
21816 mutex_unlock(&ring->ring_sync);
21817 return ret;
21818}
21819
21820void *
21821dhd_ring_lock_get_last(void *_ring)
21822{
21823 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21824 void *ret = NULL;
21825
21826 if (!ring || ring->magic != DHD_RING_MAGIC) {
21827 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21828 return NULL;
21829 }
21830
21831 mutex_lock(&ring->ring_sync);
21832 if (ring->type == DHD_RING_TYPE_FIXED) {
21833 ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
21834 }
21835 mutex_unlock(&ring->ring_sync);
21836 return ret;
21837}
21838
21839int
21840dhd_ring_lock_get_count(void *_ring)
21841{
21842 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21843 int ret = BCME_ERROR;
21844
21845 if (!ring || ring->magic != DHD_RING_MAGIC) {
21846 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21847 return ret;
21848 }
21849
21850 mutex_lock(&ring->ring_sync);
21851 if (ring->type == DHD_RING_TYPE_FIXED) {
21852 ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
21853 }
21854 mutex_unlock(&ring->ring_sync);
21855 return ret;
21856}
21857
21858/* free first locked element */
21859void
21860dhd_ring_lock_free_first(void *_ring)
21861{
21862 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21863
21864 if (!ring || ring->magic != DHD_RING_MAGIC) {
21865 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21866 return;
21867 }
21868
21869 mutex_lock(&ring->ring_sync);
21870 if (ring->type == DHD_RING_TYPE_FIXED) {
21871 __dhd_fixed_ring_lock_free_first(&ring->fixed);
21872 }
21873 mutex_unlock(&ring->ring_sync);
21874 return;
21875}
21876
21877#ifdef DHD_DUMP_MNGR
21878#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
21879#define DHD_VFS_INODE(dir) (dir->d_inode)
21880#else
21881#define DHD_VFS_INODE(dir) d_inode(dir)
21882#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
21883
21884#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
21885#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
21886#else
21887#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
21888#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
21889
21890static int
21891dhd_file_delete(char *path)
21892{
21893 struct path file_path;
21894 int err;
21895 struct dentry *dir;
21896
21897 err = kern_path(path, 0, &file_path);
21898
21899 if (err < 0) {
21900 return err;
21901 }
21902 if (FALSE ||
21903#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
21904 !d_is_file(file_path.dentry) ||
21905#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
21906 d_really_is_negative(file_path.dentry)
21907#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
21908#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
21909)
21910 {
21911 err = -EINVAL;
21912 } else {
21913 dir = dget_parent(file_path.dentry);
21914
21915 if (!IS_ERR(dir)) {
21916 err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
21917 dput(dir);
21918 } else {
21919 err = PTR_ERR(dir);
21920 }
21921 }
21922
21923 path_put(&file_path);
21924
21925 if (err < 0) {
21926 DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
21927 }
21928
21929 return err;
21930}
21931
21932static int
21933dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
21934{
21935 int i;
21936 int fm_idx = -1;
21937
21938 for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
21939 if (strlen(fm_ptr->elems[i].type_name) == 0) {
21940 fm_idx = i;
21941 break;
21942 }
21943 if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
21944 fm_idx = i;
21945 break;
21946 }
21947 }
21948
21949 if (fm_idx == -1) {
21950 return fm_idx;
21951 }
21952
21953 if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
21954 strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
21955 fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
21956 fm_ptr->elems[fm_idx].file_idx = 0;
21957 }
21958
21959 return fm_idx;
21960}
21961
21962/*
21963 * dhd_dump_file_manage_enqueue - enqueue dump file path
21964 * and delete odest file if file count is max.
21965*/
21966void
21967dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
21968{
21969 int fm_idx;
21970 int fp_idx;
21971 dhd_dump_file_manage_t *fm_ptr;
21972 DFM_elem_t *elem;
21973
21974 if (!dhd || !dhd->dump_file_manage) {
21975 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
21976 __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
21977 return;
21978 }
21979
21980 fm_ptr = dhd->dump_file_manage;
21981
21982 /* find file_manage idx */
21983 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
21984 if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
21985 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
21986 __FUNCTION__, fname));
21987 return;
21988 }
21989
21990 elem = &fm_ptr->elems[fm_idx];
21991 fp_idx = elem->file_idx;
21992 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
21993 __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
21994
21995 /* delete oldest file */
21996 if (strlen(elem->file_path[fp_idx]) != 0) {
21997 if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
21998 DHD_ERROR(("%s(): Failed to delete file: %s\n",
21999 __FUNCTION__, elem->file_path[fp_idx]));
22000 } else {
22001 DHD_ERROR(("%s(): Successed to delete file: %s\n",
22002 __FUNCTION__, elem->file_path[fp_idx]));
22003 }
22004 }
22005
22006 /* save dump file path */
22007 strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
22008 elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
22009
22010 /* change file index to next file index */
22011 elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
22012}
22013#endif /* DHD_DUMP_MNGR */
22014
22015#ifdef DHD_MAP_LOGGING
22016/* Will be called from SMMU fault handler */
22017void
22018dhd_debug_info_dump(void)
22019{
22020 dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
22021 uint32 irq = (uint32)-1;
22022
22023 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
22024 dhdp->smmu_fault_occurred = TRUE;
22025
22026 /* Disable PCIe IRQ */
22027 dhdpcie_get_pcieirq(dhdp->bus, &irq);
22028 if (irq != (uint32)-1) {
22029 disable_irq_nosync(irq);
22030 }
22031
22032 DHD_OS_WAKE_LOCK(dhdp);
22033 dhd_prot_debug_info_print(dhdp);
22034 osl_dma_map_dump(dhdp->osh);
22035#ifdef DHD_MAP_PKTID_LOGGING
22036 dhd_pktid_logging_dump(dhdp);
22037#endif /* DHD_MAP_PKTID_LOGGING */
22038#ifdef DHD_FW_COREDUMP
22039 /* Load the dongle side dump to host memory */
22040 dhdp->memdump_enabled = DUMP_MEMONLY;
22041 dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
22042 dhd_bus_mem_dump(dhdp);
22043#endif /* DHD_FW_COREDUMP */
22044 DHD_OS_WAKE_UNLOCK(dhdp);
22045}
22046EXPORT_SYMBOL(dhd_debug_info_dump);
22047#endif /* DHD_MAP_LOGGING */
22048int
22049dhd_get_host_whitelist_region(void *buf, uint len)
22050{
22051 dma_wl_addr_region_host_t *host_reg;
22052 uint64 wl_end;
22053
22054 if ((wlreg_len_h == 0) && (wlreg_len_l == 0)) {
22055 return BCME_RANGE;
22056 }
22057
22058 host_reg = (dma_wl_addr_region_host_t *)buf;
22059 wl_end = wlreg_len_h + wlreg_h;
22060 wl_end = (wl_end & MASK_32_BITS) << 32;
22061 wl_end += wlreg_l;
22062 wl_end += wlreg_len_l;
22063 /* Now write whitelist region(s) */
22064 host_reg->hreg_start.addr_low = wlreg_l;
22065 host_reg->hreg_start.addr_high = wlreg_h;
22066 host_reg->hreg_end.addr_low = EXTRACT_LOW32(wl_end);
22067 host_reg->hreg_end.addr_high = EXTRACT_HIGH32(wl_end);
22068 return BCME_OK;
22069}
22070
22071#ifdef SUPPORT_SET_TID
22072/*
22073 * Set custom TID value for UDP frame based on UID value.
22074 * This will be triggered by android private command below.
22075 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
22076 * Mode 0(SET_TID_OFF) : Disable changing TID
22077 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
22078 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
22079*/
22080void
22081dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
22082{
22083 struct ether_header *eh = NULL;
22084 struct sock *sk = NULL;
22085 uint8 *pktdata = NULL;
22086 uint8 *ip_hdr = NULL;
22087 uint8 cur_prio;
22088 uint8 prio;
22089 uint32 uid;
22090
22091 if (dhdp->tid_mode == SET_TID_OFF) {
22092 return;
22093 }
22094
22095 pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
22096 eh = (struct ether_header *) pktdata;
22097 ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
22098
22099 if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
22100 return;
22101 }
22102
22103 cur_prio = PKTPRIO(pkt);
22104 prio = dhdp->target_tid;
22105 uid = dhdp->target_uid;
22106
22107 if ((cur_prio == prio) ||
22108 (cur_prio != PRIO_8021D_BE)) {
22109 return;
22110 }
22111
22112 sk = ((struct sk_buff*)(pkt))->sk;
22113
22114 if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
22115 (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
22116 PKTSETPRIO(pkt, prio);
22117 }
22118}
22119#endif /* SUPPORT_SET_TID */
22120#ifdef DHDTCPSYNC_FLOOD_BLK
22121static void dhd_blk_tsfl_handler(struct work_struct * work)
22122{
22123 dhd_if_t *ifp = NULL;
22124 dhd_pub_t *dhdp = NULL;
22125 /* Ignore compiler warnings due to -Werror=cast-qual */
22126#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22127#pragma GCC diagnostic push
22128#pragma GCC diagnostic ignored "-Wcast-qual"
22129#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22130 ifp = container_of(work, dhd_if_t, blk_tsfl_work);
22131#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22132#pragma GCC diagnostic pop
22133#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22134 if (ifp) {
22135 dhdp = &ifp->info->pub;
22136 if (dhdp) {
22137 if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
22138 (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
22139 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
22140 wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
22141 } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
22142 (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
22143 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
22144 wl_cfg80211_disassoc(ifp->net);
22145 }
22146 }
22147 }
22148}
22149void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
22150{
22151 ifp->tsync_rcvd = 0;
22152 ifp->tsyncack_txed = 0;
22153 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22154}
22155void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
22156{
22157 dhd_if_t *ifp = NULL;
22158 if (dev) {
22159 ifp = DHD_DEV_IFP(dev);
22160 }
22161 if (ifp) {
22162 ifp->tsync_rcvd = 0;
22163 ifp->tsyncack_txed = 0;
22164 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22165 }
22166}
22167#endif /* DHDTCPSYNC_FLOOD_BLK */