update dhd to 100.10.545.11 [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.315.x / dhd_linux.c
CommitLineData
d2839953
RC
1/*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
965f77c4 5 * Copyright (C) 1999-2019, Broadcom.
d2839953
RC
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
965f77c4 28 * $Id: dhd_linux.c 822756 2019-05-30 13:20:26Z $
d2839953
RC
29 */
30
31#include <typedefs.h>
32#include <linuxver.h>
33#include <osl.h>
34#include <bcmstdlib_s.h>
35#ifdef SHOW_LOGTRACE
36#include <linux/syscalls.h>
37#include <event_log.h>
38#endif /* SHOW_LOGTRACE */
39
40#if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE)
41#include <bcmmsgbuf.h>
42#endif /* PCIE_FULL_DONGLE */
43
44#include <linux/init.h>
45#include <linux/kernel.h>
46#include <linux/slab.h>
47#include <linux/skbuff.h>
48#include <linux/netdevice.h>
49#include <linux/inetdevice.h>
50#include <linux/rtnetlink.h>
51#include <linux/etherdevice.h>
52#include <linux/random.h>
53#include <linux/spinlock.h>
54#include <linux/ethtool.h>
55#include <linux/fcntl.h>
56#include <linux/fs.h>
d2839953
RC
57#include <linux/ip.h>
58#include <linux/reboot.h>
59#include <linux/notifier.h>
60#include <linux/irq.h>
61#include <net/addrconf.h>
62#ifdef ENABLE_ADAPTIVE_SCHED
63#include <linux/cpufreq.h>
64#endif /* ENABLE_ADAPTIVE_SCHED */
65#include <linux/rtc.h>
d2839953 66#include <linux/namei.h>
d2839953
RC
67#include <asm/uaccess.h>
68#include <asm/unaligned.h>
965f77c4 69#include <dhd_linux_priv.h>
d2839953
RC
70
71#include <epivers.h>
72#include <bcmutils.h>
73#include <bcmendian.h>
74#include <bcmdevs.h>
75#include <bcmiov.h>
76
77#include <ethernet.h>
78#include <bcmevent.h>
79#include <vlan.h>
80#include <802.3.h>
81
d2839953
RC
82#include <dhd_linux_wq.h>
83#include <dhd.h>
84#include <dhd_linux.h>
965f77c4 85#include <dhd_linux_pktdump.h>
d2839953
RC
86#ifdef DHD_WET
87#include <dhd_wet.h>
88#endif /* DHD_WET */
89#ifdef PCIE_FULL_DONGLE
90#include <dhd_flowring.h>
91#endif // endif
92#include <dhd_bus.h>
93#include <dhd_proto.h>
94#include <dhd_config.h>
965f77c4
RC
95#ifdef WL_ESCAN
96#include <wl_escan.h>
97#endif
d2839953
RC
98#include <dhd_dbg.h>
99#include <dhd_dbg_ring.h>
100#include <dhd_debug.h>
101#ifdef CONFIG_HAS_WAKELOCK
102#include <linux/wakelock.h>
103#endif // endif
104#if defined(WL_CFG80211)
105#include <wl_cfg80211.h>
106#endif /* WL_CFG80211 */
107#ifdef PNO_SUPPORT
108#include <dhd_pno.h>
109#endif // endif
110#ifdef RTT_SUPPORT
111#include <dhd_rtt.h>
112#endif // endif
113
3910ce8e
LJ
114#ifdef CSI_SUPPORT
115#include <dhd_csi.h>
116#endif /* CSI_SUPPORT */
117
d2839953
RC
118#ifdef CONFIG_COMPAT
119#include <linux/compat.h>
120#endif // endif
121
122#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
123 defined(CONFIG_SOC_EXYNOS9820)
124#include <linux/exynos-pci-ctrl.h>
125#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
126
127#ifdef DHD_L2_FILTER
128#include <bcmicmp.h>
129#include <bcm_l2_filter.h>
130#include <dhd_l2_filter.h>
131#endif /* DHD_L2_FILTER */
132
133#ifdef DHD_PSTA
134#include <dhd_psta.h>
135#endif /* DHD_PSTA */
136
137#ifdef AMPDU_VO_ENABLE
138#include <802.1d.h>
139#endif /* AMPDU_VO_ENABLE */
140
965f77c4 141#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
d2839953 142#include <dhd_ip.h>
965f77c4 143#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
d2839953 144#include <dhd_daemon.h>
965f77c4
RC
145#ifdef DHD_4WAYM4_FAIL_DISCONNECT
146#include <eapol.h>
147#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
d2839953
RC
148#ifdef DHD_DEBUG_PAGEALLOC
149typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
150void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
151extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
152#endif /* DHD_DEBUG_PAGEALLOC */
153
154#define IP_PROT_RESERVED 0xFF
155
965f77c4
RC
156#ifdef DHD_4WAYM4_FAIL_DISCONNECT
157static void dhd_m4_state_handler(struct work_struct * work);
158#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
d2839953 159
965f77c4
RC
160#ifdef DHDTCPSYNC_FLOOD_BLK
161static void dhd_blk_tsfl_handler(struct work_struct * work);
162#endif /* DHDTCPSYNC_FLOOD_BLK */
d2839953
RC
163
164#ifdef WL_NATOE
165#include <dhd_linux_nfct.h>
166#endif /* WL_NATOE */
167
d2839953
RC
168#if defined(SOFTAP)
169extern bool ap_cfg_running;
170extern bool ap_fw_loaded;
171#endif // endif
172
d2839953
RC
173#ifdef FIX_CPU_MIN_CLOCK
174#include <linux/pm_qos.h>
175#endif /* FIX_CPU_MIN_CLOCK */
176
177#ifdef SET_RANDOM_MAC_SOFTAP
178#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
179#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
180#endif // endif
181static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
182#endif /* SET_RANDOM_MAC_SOFTAP */
183
184#ifdef ENABLE_ADAPTIVE_SCHED
185#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
186#ifndef CUSTOM_CPUFREQ_THRESH
187#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
188#endif /* CUSTOM_CPUFREQ_THRESH */
189#endif /* ENABLE_ADAPTIVE_SCHED */
190
191/* enable HOSTIP cache update from the host side when an eth0:N is up */
192#define AOE_IP_ALIAS_SUPPORT 1
193
194#ifdef PROP_TXSTATUS
195#include <wlfc_proto.h>
196#include <dhd_wlfc.h>
197#endif // endif
198
199#include <wl_android.h>
d2839953
RC
200
201/* Maximum STA per radio */
202#define DHD_MAX_STA 32
203
204#ifdef CUSTOMER_HW_AMLOGIC
205#include <linux/amlogic/wifi_dt.h>
206#endif
207
208const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
209const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
210#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
211
212#ifdef ARP_OFFLOAD_SUPPORT
213void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
214static int dhd_inetaddr_notifier_call(struct notifier_block *this,
215 unsigned long event, void *ptr);
216static struct notifier_block dhd_inetaddr_notifier = {
217 .notifier_call = dhd_inetaddr_notifier_call
218};
219/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
220 * created in kernel notifier link list (with 'next' pointing to itself)
221 */
222static bool dhd_inetaddr_notifier_registered = FALSE;
223#endif /* ARP_OFFLOAD_SUPPORT */
224
225#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
226int dhd_inet6addr_notifier_call(struct notifier_block *this,
227 unsigned long event, void *ptr);
228static struct notifier_block dhd_inet6addr_notifier = {
229 .notifier_call = dhd_inet6addr_notifier_call
230};
231/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
232 * created in kernel notifier link list (with 'next' pointing to itself)
233 */
234static bool dhd_inet6addr_notifier_registered = FALSE;
235#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
236
965f77c4 237#if defined(CONFIG_PM_SLEEP)
d2839953
RC
238#include <linux/suspend.h>
239volatile bool dhd_mmc_suspend = FALSE;
240DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
965f77c4 241#endif /* defined(CONFIG_PM_SLEEP) */
d2839953
RC
242
243#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
244extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
245#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
965f77c4 246static void dhd_hang_process(struct work_struct *work_data);
d2839953 247MODULE_LICENSE("GPL and additional rights");
d2839953
RC
248
249#if defined(MULTIPLE_SUPPLICANT)
250#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
251DEFINE_MUTEX(_dhd_mutex_lock_);
252#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
965f77c4 253#endif
3910ce8e 254static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force);
d2839953
RC
255
256#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
257#define MAX_CONSECUTIVE_HANG_COUNTS 5
258#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
259
260#include <dhd_bus.h>
261
262#ifdef DHD_ULP
263#include <dhd_ulp.h>
264#endif /* DHD_ULP */
265
266#ifndef PROP_TXSTATUS
267#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
268#else
269#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
270#endif // endif
271
272#ifdef PROP_TXSTATUS
273extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
274extern void dhd_wlfc_plat_init(void *dhd);
275extern void dhd_wlfc_plat_deinit(void *dhd);
276#endif /* PROP_TXSTATUS */
965f77c4 277#ifdef USE_DYNAMIC_F2_BLKSIZE
d2839953
RC
278extern uint sd_f2_blocksize;
279extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
965f77c4 280#endif /* USE_DYNAMIC_F2_BLKSIZE */
d2839953
RC
281
282/* Linux wireless extension support */
283#if defined(WL_WIRELESS_EXT)
284#include <wl_iw.h>
285extern wl_iw_extra_params_t g_wl_iw_params;
286#endif /* defined(WL_WIRELESS_EXT) */
287
288#ifdef CONFIG_PARTIALSUSPEND_SLP
289#include <linux/partialsuspend_slp.h>
290#define CONFIG_HAS_EARLYSUSPEND
291#define DHD_USE_EARLYSUSPEND
292#define register_early_suspend register_pre_suspend
293#define unregister_early_suspend unregister_pre_suspend
294#define early_suspend pre_suspend
295#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
296#else
297#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
298#include <linux/earlysuspend.h>
299#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
300#endif /* CONFIG_PARTIALSUSPEND_SLP */
301
302#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
303#include <linux/nl80211.h>
304#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
305
d2839953
RC
306#if defined(PKT_FILTER_SUPPORT) && defined(APF)
307static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
308 u8* program, uint32 program_len);
309static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
310 uint32 mode, uint32 enable);
311static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
312#endif /* PKT_FILTER_SUPPORT && APF */
313
965f77c4
RC
314#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
315static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
316#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
d2839953 317
965f77c4 318#if defined(ARGOS_NOTIFY_CB)
d2839953
RC
319/* ARGOS notifer data */
320static struct notifier_block argos_wifi; /* STA */
321static struct notifier_block argos_p2p; /* P2P */
d2839953 322argos_rps_ctrl argos_rps_ctrl_data;
d2839953
RC
323#endif // endif
324
d2839953 325#ifdef DHD_FW_COREDUMP
965f77c4 326static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
d2839953
RC
327#endif /* DHD_FW_COREDUMP */
328
329#ifdef DHD_LOG_DUMP
d2839953 330
d2839953 331struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
d2839953
RC
332
333/* Only header for log dump buffers is stored in array
334 * header for sections like 'dhd dump', 'ext trap'
335 * etc, is not in the array, because they are not log
336 * ring buffers
337 */
338dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
339 {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
340 {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
341 {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
342};
343
965f77c4
RC
344static int dld_buf_size[DLD_BUFFER_NUM] = {
345 LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
346 LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
347 LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
348};
349
d2839953
RC
350static void dhd_log_dump_init(dhd_pub_t *dhd);
351static void dhd_log_dump_deinit(dhd_pub_t *dhd);
352static void dhd_log_dump(void *handle, void *event_info, u8 event);
353static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
965f77c4
RC
354static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type);
355static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size);
356void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
357void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
d2839953 358static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
965f77c4 359static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
d2839953
RC
360#endif /* DHD_LOG_DUMP */
361
362#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
363#include <linux/workqueue.h>
364#include <linux/pm_runtime.h>
365#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
366
367#ifdef DHD_DEBUG_UART
368#include <linux/kmod.h>
369#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
370static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
371static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
372#endif /* DHD_DEBUG_UART */
373
374static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
375static struct notifier_block dhd_reboot_notifier = {
376 .notifier_call = dhd_reboot_callback,
377 .priority = 1,
378};
379
380#ifdef BCMPCIE
381static int is_reboot = 0;
382#endif /* BCMPCIE */
383
384dhd_pub_t *g_dhd_pub = NULL;
385
386#if defined(BT_OVER_SDIO)
387#include "dhd_bt_interface.h"
388#endif /* defined (BT_OVER_SDIO) */
389
d2839953
RC
390#ifdef WL_STATIC_IF
391bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
392#endif /* WL_STATIC_IF */
393
394atomic_t exit_in_progress = ATOMIC_INIT(0);
395
d2839953
RC
396static void dhd_process_daemon_msg(struct sk_buff *skb);
397static void dhd_destroy_to_notifier_skt(void);
398static int dhd_create_to_notifier_skt(void);
399static struct sock *nl_to_event_sk = NULL;
400int sender_pid = 0;
401
402#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
403struct netlink_kernel_cfg dhd_netlink_cfg = {
404 .groups = 1,
405 .input = dhd_process_daemon_msg,
406};
965f77c4 407#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
d2839953
RC
408
409#if defined(BT_OVER_SDIO)
410/* Flag to indicate if driver is initialized */
411uint dhd_driver_init_done = TRUE;
412#else
413/* Flag to indicate if driver is initialized */
414uint dhd_driver_init_done = FALSE;
415#endif // endif
416/* Flag to indicate if we should download firmware on driver load */
417uint dhd_download_fw_on_driverload = TRUE;
418
419/* Definitions to provide path to the firmware and nvram
420 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
421 */
422char firmware_path[MOD_PARAM_PATHLEN];
423char nvram_path[MOD_PARAM_PATHLEN];
424char clm_path[MOD_PARAM_PATHLEN];
425char config_path[MOD_PARAM_PATHLEN];
426#ifdef DHD_UCODE_DOWNLOAD
427char ucode_path[MOD_PARAM_PATHLEN];
428#endif /* DHD_UCODE_DOWNLOAD */
429
430module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
431
432/* backup buffer for firmware and nvram path */
433char fw_bak_path[MOD_PARAM_PATHLEN];
434char nv_bak_path[MOD_PARAM_PATHLEN];
435
436/* information string to keep firmware, chio, cheip version info visiable from log */
437char info_string[MOD_PARAM_INFOLEN];
438module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
439int op_mode = 0;
440int disable_proptx = 0;
441module_param(op_mode, int, 0644);
442extern int wl_control_wl_start(struct net_device *dev);
965f77c4 443#if defined(BCMLXSDMMC) || defined(BCMDBUS)
d2839953 444struct semaphore dhd_registration_sem;
965f77c4 445#endif /* BCMXSDMMC */
d2839953
RC
446
447#ifdef DHD_LOG_DUMP
448int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
449module_param(logdump_max_filesize, int, 0644);
450int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
451module_param(logdump_max_bufsize, int, 0644);
452int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
453int logdump_periodic_flush = FALSE;
454module_param(logdump_periodic_flush, int, 0644);
455#ifdef EWP_ECNTRS_LOGGING
456int logdump_ecntr_enable = TRUE;
457#else
458int logdump_ecntr_enable = FALSE;
459#endif /* EWP_ECNTRS_LOGGING */
460module_param(logdump_ecntr_enable, int, 0644);
965f77c4
RC
461#ifdef EWP_RTT_LOGGING
462int logdump_rtt_enable = TRUE;
463#else
464int logdump_rtt_enable = FALSE;
465#endif /* EWP_RTT_LOGGING */
466module_param(logdump_rtt_enable, int, 0644);
d2839953
RC
467#endif /* DHD_LOG_DUMP */
468#ifdef EWP_EDL
469int host_edl_support = TRUE;
470module_param(host_edl_support, int, 0644);
471#endif // endif
472
473/* deferred handlers */
474static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
475static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
476static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
477static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
478#ifdef WL_NATOE
479static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
480static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
481#endif /* WL_NATOE */
482
483#ifdef DHD_UPDATE_INTF_MAC
484static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
485#endif /* DHD_UPDATE_INTF_MAC */
486#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
487static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
488#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
489#ifdef WL_CFG80211
490extern void dhd_netdev_free(struct net_device *ndev);
491#endif /* WL_CFG80211 */
492static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
493
965f77c4
RC
494#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
495static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
496#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
497
d2839953
RC
498#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
499/* update rx_pkt_chainable state of dhd interface */
500static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
501#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
502
503/* Error bits */
504module_param(dhd_msg_level, int, 0);
505#if defined(WL_WIRELESS_EXT)
506module_param(iw_msg_level, int, 0);
507#endif
508#ifdef WL_CFG80211
509module_param(wl_dbg_level, int, 0);
510#endif
511module_param(android_msg_level, int, 0);
512module_param(config_msg_level, int, 0);
513
514#ifdef ARP_OFFLOAD_SUPPORT
515/* ARP offload enable */
516uint dhd_arp_enable = TRUE;
517module_param(dhd_arp_enable, uint, 0);
518
519/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
520
521#ifdef ENABLE_ARP_SNOOP_MODE
965f77c4
RC
522uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
523 ARP_OL_UPDATE_HOST_CACHE);
d2839953 524#else
965f77c4 525uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
d2839953
RC
526#endif /* ENABLE_ARP_SNOOP_MODE */
527
528module_param(dhd_arp_mode, uint, 0);
529#endif /* ARP_OFFLOAD_SUPPORT */
530
531/* Disable Prop tx */
532module_param(disable_proptx, int, 0644);
533/* load firmware and/or nvram values from the filesystem */
534module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
535module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
536module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
537#ifdef DHD_UCODE_DOWNLOAD
538module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
539#endif /* DHD_UCODE_DOWNLOAD */
540
541/* wl event forwarding */
542#ifdef WL_EVENT_ENAB
543uint wl_event_enable = true;
544#else
545uint wl_event_enable = false;
546#endif /* WL_EVENT_ENAB */
547module_param(wl_event_enable, uint, 0660);
548
549/* wl event forwarding */
550#ifdef LOGTRACE_PKT_SENDUP
551uint logtrace_pkt_sendup = true;
552#else
553uint logtrace_pkt_sendup = false;
554#endif /* LOGTRACE_PKT_SENDUP */
555module_param(logtrace_pkt_sendup, uint, 0660);
556
557/* Watchdog interval */
558/* extend watchdog expiration to 2 seconds when DPC is running */
559#define WATCHDOG_EXTEND_INTERVAL (2000)
560
561uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
562module_param(dhd_watchdog_ms, uint, 0);
563
564#if defined(DHD_DEBUG)
565/* Console poll interval */
566uint dhd_console_ms = 0;
567module_param(dhd_console_ms, uint, 0644);
568#else
569uint dhd_console_ms = 0;
570#endif /* DHD_DEBUG */
571
572uint dhd_slpauto = TRUE;
573module_param(dhd_slpauto, uint, 0);
574
575#ifdef PKT_FILTER_SUPPORT
576/* Global Pkt filter enable control */
577uint dhd_pkt_filter_enable = TRUE;
578module_param(dhd_pkt_filter_enable, uint, 0);
579#endif // endif
580
581/* Pkt filter init setup */
582uint dhd_pkt_filter_init = 0;
583module_param(dhd_pkt_filter_init, uint, 0);
584
585/* Pkt filter mode control */
586#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
587uint dhd_master_mode = FALSE;
588#else
589uint dhd_master_mode = FALSE;
590#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
591module_param(dhd_master_mode, uint, 0);
592
593int dhd_watchdog_prio = 0;
594module_param(dhd_watchdog_prio, int, 0);
595
596/* DPC thread priority */
597int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
598module_param(dhd_dpc_prio, int, 0);
599
600/* RX frame thread priority */
601int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
602module_param(dhd_rxf_prio, int, 0);
603
604#if !defined(BCMDBUS)
605extern int dhd_dongle_ramsize;
606module_param(dhd_dongle_ramsize, int, 0);
607#endif /* !BCMDBUS */
608
609#ifdef WL_CFG80211
610int passive_channel_skip = 0;
611module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
612#endif /* WL_CFG80211 */
613
614#ifdef DHD_MSI_SUPPORT
615uint enable_msi = TRUE;
616module_param(enable_msi, uint, 0);
617#endif /* PCIE_FULL_DONGLE */
618
619#ifdef DHD_SSSR_DUMP
965f77c4 620int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
d2839953
RC
621extern uint support_sssr_dump;
622module_param(support_sssr_dump, uint, 0);
623#endif /* DHD_SSSR_DUMP */
624
625/* Keep track of number of instances */
626static int dhd_found = 0;
627static int instance_base = 0; /* Starting instance number */
628module_param(instance_base, int, 0644);
629
965f77c4 630#if defined(DHD_LB_RXP)
d2839953
RC
631static int dhd_napi_weight = 32;
632module_param(dhd_napi_weight, int, 0644);
965f77c4 633#endif /* DHD_LB_RXP */
d2839953
RC
634
635#ifdef PCIE_FULL_DONGLE
636extern int h2d_max_txpost;
637module_param(h2d_max_txpost, int, 0644);
638
639extern uint dma_ring_indices;
640module_param(dma_ring_indices, uint, 0644);
641
642extern bool h2d_phase;
643module_param(h2d_phase, bool, 0644);
644extern bool force_trap_bad_h2d_phase;
645module_param(force_trap_bad_h2d_phase, bool, 0644);
646#endif /* PCIE_FULL_DONGLE */
647
648#ifdef FORCE_TPOWERON
649/*
650 * On Fire's reference platform, coming out of L1.2,
651 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
652 * Due to this delay, with tPowerOn < 50
653 * there is a chance of the refclk sense to trigger on noise.
654 *
655 * 0x29 when written to L1SSControl2 translates to 50us.
656 */
657#define FORCE_TPOWERON_50US 0x29
658uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
659module_param(tpoweron_scale, uint, 0644);
660#endif /* FORCE_TPOWERON */
661
d2839953 662#ifdef SHOW_LOGTRACE
12cae11e
SC
663static char *logstrs_path = "/data/vendor/misc/wifi/logstrs.bin";
664char *st_str_file_path = "/data/vendor/misc/wifi/rtecdc.bin";
665static char *map_file_path = "/data/vendor/misc/wifi/rtecdc.map";
666static char *rom_st_str_file_path = "/data/vendor/misc/wifi/roml.bin";
667static char *rom_map_file_path = "/data/vendor/misc/wifi/roml.map";
d2839953
RC
668static char *ram_file_str = "rtecdc";
669static char *rom_file_str = "roml";
670
671module_param(logstrs_path, charp, S_IRUGO);
672module_param(st_str_file_path, charp, S_IRUGO);
673module_param(map_file_path, charp, S_IRUGO);
674module_param(rom_st_str_file_path, charp, S_IRUGO);
675module_param(rom_map_file_path, charp, S_IRUGO);
676
677static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
678static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
679 uint32 *rodata_end);
680static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
681 char *map_file);
682#endif /* SHOW_LOGTRACE */
683
965f77c4
RC
684#ifdef USE_WFA_CERT_CONF
685int g_frameburst = 1;
686#endif /* USE_WFA_CERT_CONF */
d2839953 687
965f77c4 688static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
d2839953 689
965f77c4
RC
690/* DHD Perimiter lock only used in router with bypass forwarding. */
691#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
692#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
693#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
d2839953 694
965f77c4
RC
695#ifdef PCIE_FULL_DONGLE
696#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
697#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
698 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
699#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
700 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
d2839953 701
965f77c4
RC
702#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
703static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
704 struct list_head *snapshot_list);
705static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
706#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
707#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
708#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
709#endif /* PCIE_FULL_DONGLE */
d2839953 710
965f77c4
RC
711/* Control fw roaming */
712#ifdef BCMCCX
713uint dhd_roam_disable = 0;
714#else
715uint dhd_roam_disable = 0;
716#endif /* BCMCCX */
d2839953 717
965f77c4
RC
718#ifdef BCMDBGFS
719extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
720extern void dhd_dbgfs_remove(void);
721#endif // endif
d2839953 722
965f77c4
RC
723static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
724module_param(pcie_txs_metadata_enable, int, 0);
d2839953 725
965f77c4
RC
726/* Control radio state */
727uint dhd_radio_up = 1;
d2839953 728
965f77c4
RC
729/* Network inteface name */
730char iface_name[IFNAMSIZ] = {'\0'};
731module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
d2839953 732
965f77c4 733/* The following are specific to the SDIO dongle */
d2839953 734
965f77c4
RC
735/* IOCTL response timeout */
736int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
d2839953 737
965f77c4
RC
738/* DS Exit response timeout */
739int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
d2839953 740
965f77c4
RC
741/* Idle timeout for backplane clock */
742int dhd_idletime = DHD_IDLETIME_TICKS;
743module_param(dhd_idletime, int, 0);
d2839953 744
965f77c4
RC
745/* Use polling */
746uint dhd_poll = FALSE;
747module_param(dhd_poll, uint, 0);
d2839953 748
965f77c4
RC
749/* Use interrupts */
750uint dhd_intr = TRUE;
751module_param(dhd_intr, uint, 0);
d2839953 752
965f77c4
RC
753/* SDIO Drive Strength (in milliamps) */
754uint dhd_sdiod_drive_strength = 6;
755module_param(dhd_sdiod_drive_strength, uint, 0);
d2839953 756
965f77c4
RC
757#ifdef BCMSDIO
758/* Tx/Rx bounds */
759extern uint dhd_txbound;
760extern uint dhd_rxbound;
761module_param(dhd_txbound, uint, 0);
762module_param(dhd_rxbound, uint, 0);
d2839953 763
965f77c4
RC
764/* Deferred transmits */
765extern uint dhd_deferred_tx;
766module_param(dhd_deferred_tx, uint, 0);
d2839953 767
965f77c4 768#endif /* BCMSDIO */
d2839953 769
965f77c4
RC
770#ifdef SDTEST
771/* Echo packet generator (pkts/s) */
772uint dhd_pktgen = 0;
773module_param(dhd_pktgen, uint, 0);
d2839953 774
965f77c4
RC
775/* Echo packet len (0 => sawtooth, max 2040) */
776uint dhd_pktgen_len = 0;
777module_param(dhd_pktgen_len, uint, 0);
778#endif /* SDTEST */
d2839953 779
965f77c4
RC
780#if defined(BCMSUP_4WAY_HANDSHAKE)
781/* Use in dongle supplicant for 4-way handshake */
782#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
783/* Enable idsup by default (if supported in fw) */
784uint dhd_use_idsup = 1;
785#else
786uint dhd_use_idsup = 0;
787#endif /* WLFBT || WL_ENABLE_IDSUP */
788module_param(dhd_use_idsup, uint, 0);
789#endif /* BCMSUP_4WAY_HANDSHAKE */
d2839953 790
965f77c4
RC
791#ifndef BCMDBUS
792/* Allow delayed firmware download for debug purpose */
793int allow_delay_fwdl = FALSE;
794module_param(allow_delay_fwdl, int, 0);
795#endif /* !BCMDBUS */
d2839953 796
965f77c4
RC
797#ifdef ECOUNTER_PERIODIC_DISABLE
798uint enable_ecounter = FALSE;
799#else
800uint enable_ecounter = TRUE;
801#endif // endif
802module_param(enable_ecounter, uint, 0);
d2839953 803
965f77c4
RC
804/* TCM verification flag */
805uint dhd_tcm_test_enable = FALSE;
806module_param(dhd_tcm_test_enable, uint, 0644);
d2839953 807
965f77c4
RC
808extern char dhd_version[];
809extern char fw_version[];
810extern char clm_version[];
d2839953 811
965f77c4
RC
812int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
813static void dhd_net_if_lock_local(dhd_info_t *dhd);
814static void dhd_net_if_unlock_local(dhd_info_t *dhd);
815static void dhd_suspend_lock(dhd_pub_t *dhdp);
816static void dhd_suspend_unlock(dhd_pub_t *dhdp);
d2839953 817
965f77c4
RC
818/* Monitor interface */
819int dhd_monitor_init(void *dhd_pub);
820int dhd_monitor_uninit(void);
d2839953 821
965f77c4
RC
822#ifdef DHD_PM_CONTROL_FROM_FILE
823bool g_pm_control;
824#ifdef DHD_EXPORT_CNTL_FILE
825int pmmode_val;
826#endif /* DHD_EXPORT_CNTL_FILE */
827void sec_control_pm(dhd_pub_t *dhd, uint *);
828#endif /* DHD_PM_CONTROL_FROM_FILE */
d2839953 829
965f77c4
RC
830#if defined(WL_WIRELESS_EXT)
831struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
832#endif /* defined(WL_WIRELESS_EXT) */
d2839953 833
965f77c4
RC
834#ifndef BCMDBUS
835static void dhd_dpc(ulong data);
836#endif /* !BCMDBUS */
837/* forward decl */
838extern int dhd_wait_pend8021x(struct net_device *dev);
839void dhd_os_wd_timer_extend(void *bus, bool extend);
d2839953 840
965f77c4
RC
841#ifdef TOE
842#ifndef BDC
843#error TOE requires BDC
844#endif /* !BDC */
845static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
846static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
847#endif /* TOE */
d2839953 848
965f77c4
RC
849static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
850 wl_event_msg_t *event_ptr, void **data_ptr);
d2839953 851
965f77c4
RC
852#if defined(CONFIG_PM_SLEEP)
853static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
d2839953 854{
965f77c4
RC
855 int ret = NOTIFY_DONE;
856 bool suspend = FALSE;
d2839953
RC
857
858#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
859#pragma GCC diagnostic push
860#pragma GCC diagnostic ignored "-Wcast-qual"
861#endif // endif
965f77c4 862 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
d2839953
RC
863#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
864#pragma GCC diagnostic pop
865#endif // endif
866
965f77c4
RC
867 BCM_REFERENCE(dhdinfo);
868 BCM_REFERENCE(suspend);
d2839953 869
965f77c4
RC
870 switch (action) {
871 case PM_HIBERNATION_PREPARE:
872 case PM_SUSPEND_PREPARE:
873 suspend = TRUE;
874 break;
d2839953 875
965f77c4
RC
876 case PM_POST_HIBERNATION:
877 case PM_POST_SUSPEND:
878 suspend = FALSE;
879 break;
d2839953
RC
880 }
881
3910ce8e
LJ
882 printf("%s: action=%ld, suspend=%d, suspend_mode=%d\n",
883 __FUNCTION__, action, suspend, dhdinfo->pub.conf->suspend_mode);
965f77c4
RC
884 if (suspend) {
885 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
3910ce8e
LJ
886 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
887 dhd_suspend_resume_helper(dhdinfo, suspend, 0);
888#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
965f77c4 889 dhd_wlfc_suspend(&dhdinfo->pub);
3910ce8e
LJ
890#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
891 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
892 dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
965f77c4
RC
893 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
894 } else {
3910ce8e
LJ
895 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
896 dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
897#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
965f77c4 898 dhd_wlfc_resume(&dhdinfo->pub);
965f77c4 899#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
3910ce8e
LJ
900 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
901 dhd_suspend_resume_helper(dhdinfo, suspend, 0);
902 }
965f77c4 903
3910ce8e
LJ
904#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
905 KERNEL_VERSION(2, 6, 39))
965f77c4
RC
906 dhd_mmc_suspend = suspend;
907 smp_mb();
3910ce8e 908#endif
d2839953 909
d2839953
RC
910 return ret;
911}
912
965f77c4
RC
913/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
914 * created in kernel notifier link list (with 'next' pointing to itself)
915 */
916static bool dhd_pm_notifier_registered = FALSE;
d2839953 917
965f77c4
RC
918extern int register_pm_notifier(struct notifier_block *nb);
919extern int unregister_pm_notifier(struct notifier_block *nb);
920#endif /* CONFIG_PM_SLEEP */
d2839953 921
965f77c4
RC
922/* Request scheduling of the bus rx frame */
923static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
924static void dhd_os_rxflock(dhd_pub_t *pub);
925static void dhd_os_rxfunlock(dhd_pub_t *pub);
d2839953 926
965f77c4
RC
927#if defined(DHD_H2D_LOG_TIME_SYNC)
928static void
929dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
930#endif /* DHD_H2D_LOG_TIME_SYNC */
d2839953 931
965f77c4
RC
932/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
933typedef struct dhd_dev_priv {
934 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
935 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
936 int ifidx; /* interface index */
937 void * lkup;
938} dhd_dev_priv_t;
d2839953 939
965f77c4
RC
940#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
941#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
942#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
943#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
944#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
945#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
d2839953 946
965f77c4
RC
947/** Clear the dhd net_device's private structure. */
948static inline void
949dhd_dev_priv_clear(struct net_device * dev)
950{
951 dhd_dev_priv_t * dev_priv;
952 ASSERT(dev != (struct net_device *)NULL);
953 dev_priv = DHD_DEV_PRIV(dev);
954 dev_priv->dhd = (dhd_info_t *)NULL;
955 dev_priv->ifp = (dhd_if_t *)NULL;
956 dev_priv->ifidx = DHD_BAD_IF;
957 dev_priv->lkup = (void *)NULL;
958}
d2839953 959
965f77c4
RC
960/** Setup the dhd net_device's private structure. */
961static inline void
962dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
963 int ifidx)
964{
965 dhd_dev_priv_t * dev_priv;
966 ASSERT(dev != (struct net_device *)NULL);
967 dev_priv = DHD_DEV_PRIV(dev);
968 dev_priv->dhd = dhd;
969 dev_priv->ifp = ifp;
970 dev_priv->ifidx = ifidx;
d2839953
RC
971}
972
965f77c4
RC
973/* Return interface pointer */
974struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
d2839953 975{
965f77c4 976 ASSERT(ifidx < DHD_MAX_IFS);
d2839953 977
965f77c4
RC
978 if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
979 return NULL;
d2839953 980
965f77c4
RC
981 return dhdp->info->iflist[ifidx];
982}
d2839953 983
965f77c4 984#ifdef PCIE_FULL_DONGLE
d2839953 985
965f77c4
RC
986/** Dummy objects are defined with state representing bad|down.
987 * Performance gains from reducing branch conditionals, instruction parallelism,
988 * dual issue, reducing load shadows, avail of larger pipelines.
989 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
990 * is accessed via the dhd_sta_t.
991 */
d2839953 992
965f77c4
RC
993/* Dummy dhd_info object */
994dhd_info_t dhd_info_null = {
995 .pub = {
996 .info = &dhd_info_null,
997#ifdef DHDTCPACK_SUPPRESS
998 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
999#endif /* DHDTCPACK_SUPPRESS */
1000 .up = FALSE,
1001 .busstate = DHD_BUS_DOWN
d2839953 1002 }
965f77c4
RC
1003};
1004#define DHD_INFO_NULL (&dhd_info_null)
1005#define DHD_PUB_NULL (&dhd_info_null.pub)
d2839953 1006
965f77c4
RC
1007/* Dummy netdevice object */
1008struct net_device dhd_net_dev_null = {
1009 .reg_state = NETREG_UNREGISTERED
1010};
1011#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
d2839953 1012
965f77c4
RC
1013/* Dummy dhd_if object */
1014dhd_if_t dhd_if_null = {
1015#ifdef WMF
1016 .wmf = { .wmf_enable = TRUE },
1017#endif // endif
1018 .info = DHD_INFO_NULL,
1019 .net = DHD_NET_DEV_NULL,
1020 .idx = DHD_BAD_IF
1021};
1022#define DHD_IF_NULL (&dhd_if_null)
d2839953 1023
965f77c4 1024#define DHD_STA_NULL ((dhd_sta_t *)NULL)
d2839953 1025
965f77c4 1026/** Interface STA list management. */
d2839953 1027
965f77c4
RC
1028/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1029static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1030static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
d2839953 1031
965f77c4
RC
1032/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1033static void dhd_if_del_sta_list(dhd_if_t * ifp);
1034static void dhd_if_flush_sta(dhd_if_t * ifp);
d2839953 1035
965f77c4
RC
1036/* Construct/Destruct a sta pool. */
1037static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1038static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1039/* Clear the pool of dhd_sta_t objects for built-in type driver */
1040static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
d2839953 1041
965f77c4
RC
1042/** Reset a dhd_sta object and free into the dhd pool. */
1043static void
1044dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
d2839953 1045{
965f77c4 1046 int prio;
d2839953 1047
965f77c4 1048 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
d2839953 1049
965f77c4 1050 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
d2839953 1051
965f77c4
RC
1052 /*
1053 * Flush and free all packets in all flowring's queues belonging to sta.
1054 * Packets in flow ring will be flushed later.
1055 */
1056 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1057 uint16 flowid = sta->flowid[prio];
d2839953 1058
965f77c4
RC
1059 if (flowid != FLOWID_INVALID) {
1060 unsigned long flags;
1061 flow_ring_node_t * flow_ring_node;
d2839953 1062
965f77c4
RC
1063#ifdef DHDTCPACK_SUPPRESS
1064 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1065 * when there is a newly coming packet from network stack.
1066 */
1067 dhd_tcpack_info_tbl_clean(dhdp);
1068#endif /* DHDTCPACK_SUPPRESS */
d2839953 1069
965f77c4
RC
1070 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1071 if (flow_ring_node) {
1072 flow_queue_t *queue = &flow_ring_node->queue;
d2839953 1073
965f77c4
RC
1074 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1075 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
d2839953 1076
965f77c4
RC
1077 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1078 void * pkt;
1079 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
1080 NULL) {
1081 PKTFREE(dhdp->osh, pkt, TRUE);
1082 }
1083 }
d2839953 1084
965f77c4
RC
1085 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1086 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1087 }
1088 }
d2839953 1089
965f77c4
RC
1090 sta->flowid[prio] = FLOWID_INVALID;
1091 }
d2839953 1092
965f77c4
RC
1093 id16_map_free(dhdp->staid_allocator, sta->idx);
1094 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1095 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1096 sta->ifidx = DHD_BAD_IF;
1097 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1098 INIT_LIST_HEAD(&sta->list);
1099 sta->idx = ID16_INVALID; /* implying free */
d2839953
RC
1100}
1101
965f77c4
RC
1102/** Allocate a dhd_sta object from the dhd pool. */
1103static dhd_sta_t *
1104dhd_sta_alloc(dhd_pub_t * dhdp)
d2839953 1105{
965f77c4
RC
1106 uint16 idx;
1107 dhd_sta_t * sta;
1108 dhd_sta_pool_t * sta_pool;
d2839953 1109
965f77c4 1110 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
d2839953 1111
965f77c4
RC
1112 idx = id16_map_alloc(dhdp->staid_allocator);
1113 if (idx == ID16_INVALID) {
1114 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1115 return DHD_STA_NULL;
1116 }
d2839953 1117
965f77c4
RC
1118 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1119 sta = &sta_pool[idx];
d2839953 1120
965f77c4
RC
1121 ASSERT((sta->idx == ID16_INVALID) &&
1122 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
d2839953 1123
965f77c4 1124 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
d2839953 1125
965f77c4 1126 sta->idx = idx; /* implying allocated */
d2839953 1127
965f77c4 1128 return sta;
d2839953
RC
1129}
1130
965f77c4
RC
1131/** Delete all STAs in an interface's STA list. */
1132static void
1133dhd_if_del_sta_list(dhd_if_t *ifp)
d2839953 1134{
965f77c4
RC
1135 dhd_sta_t *sta, *next;
1136 unsigned long flags;
d2839953 1137
965f77c4
RC
1138 DHD_IF_STA_LIST_LOCK(ifp, flags);
1139#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1140#pragma GCC diagnostic push
1141#pragma GCC diagnostic ignored "-Wcast-qual"
1142#endif // endif
1143 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1144 list_del(&sta->list);
1145 dhd_sta_free(&ifp->info->pub, sta);
1146 }
1147#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1148#pragma GCC diagnostic pop
1149#endif // endif
1150 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
d2839953
RC
1151
1152 return;
1153}
1154
965f77c4
RC
1155/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1156static void
1157dhd_if_flush_sta(dhd_if_t * ifp)
d2839953 1158{
d2839953
RC
1159}
1160
965f77c4
RC
1161/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1162static int
1163dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1164{
1165 int idx, prio, sta_pool_memsz;
1166 dhd_sta_t * sta;
1167 dhd_sta_pool_t * sta_pool;
1168 void * staid_allocator;
d2839953 1169
965f77c4
RC
1170 ASSERT(dhdp != (dhd_pub_t *)NULL);
1171 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
d2839953 1172
965f77c4
RC
1173 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1174 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1175 if (staid_allocator == NULL) {
1176 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1177 return BCME_ERROR;
1178 }
d2839953 1179
965f77c4
RC
1180 /* Pre allocate a pool of dhd_sta objects (one extra). */
1181 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1182 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1183 if (sta_pool == NULL) {
1184 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1185 id16_map_fini(dhdp->osh, staid_allocator);
1186 return BCME_ERROR;
1187 }
d2839953 1188
965f77c4
RC
1189 dhdp->sta_pool = sta_pool;
1190 dhdp->staid_allocator = staid_allocator;
d2839953 1191
965f77c4
RC
1192 /* Initialize all sta(s) for the pre-allocated free pool. */
1193 bzero((uchar *)sta_pool, sta_pool_memsz);
1194 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1195 sta = &sta_pool[idx];
1196 sta->idx = id16_map_alloc(staid_allocator);
1197 ASSERT(sta->idx <= max_sta);
1198 }
d2839953 1199
965f77c4
RC
1200 /* Now place them into the pre-allocated free pool. */
1201 for (idx = 1; idx <= max_sta; idx++) {
1202 sta = &sta_pool[idx];
1203 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1204 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1205 }
1206 dhd_sta_free(dhdp, sta);
1207 }
d2839953 1208
965f77c4
RC
1209 return BCME_OK;
1210}
d2839953 1211
965f77c4
RC
1212/** Destruct the pool of dhd_sta_t objects.
1213 * Caller must ensure that no STA objects are currently associated with an if.
1214 */
1215static void
1216dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1217{
1218 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
d2839953 1219
965f77c4
RC
1220 if (sta_pool) {
1221 int idx;
1222 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1223 for (idx = 1; idx <= max_sta; idx++) {
1224 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1225 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1226 }
1227 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1228 dhdp->sta_pool = NULL;
1229 }
d2839953 1230
965f77c4
RC
1231 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1232 dhdp->staid_allocator = NULL;
1233}
d2839953 1234
965f77c4
RC
1235/* Clear the pool of dhd_sta_t objects for built-in type driver */
1236static void
1237dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1238{
1239 int idx, prio, sta_pool_memsz;
1240 dhd_sta_t * sta;
1241 dhd_sta_pool_t * sta_pool;
1242 void *staid_allocator;
d2839953 1243
965f77c4
RC
1244 if (!dhdp) {
1245 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1246 return;
1247 }
d2839953 1248
965f77c4
RC
1249 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1250 staid_allocator = dhdp->staid_allocator;
d2839953 1251
965f77c4
RC
1252 if (!sta_pool) {
1253 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1254 return;
1255 }
d2839953 1256
965f77c4
RC
1257 if (!staid_allocator) {
1258 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1259 return;
1260 }
d2839953 1261
965f77c4
RC
1262 /* clear free pool */
1263 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1264 bzero((uchar *)sta_pool, sta_pool_memsz);
d2839953 1265
965f77c4
RC
1266 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1267 id16_map_clear(staid_allocator, max_sta, 1);
d2839953 1268
965f77c4
RC
1269 /* Initialize all sta(s) for the pre-allocated free pool. */
1270 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1271 sta = &sta_pool[idx];
1272 sta->idx = id16_map_alloc(staid_allocator);
1273 ASSERT(sta->idx <= max_sta);
1274 }
1275 /* Now place them into the pre-allocated free pool. */
1276 for (idx = 1; idx <= max_sta; idx++) {
1277 sta = &sta_pool[idx];
1278 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1279 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1280 }
1281 dhd_sta_free(dhdp, sta);
1282 }
1283}
d2839953 1284
965f77c4
RC
1285/** Find STA with MAC address ea in an interface's STA list. */
1286dhd_sta_t *
1287dhd_find_sta(void *pub, int ifidx, void *ea)
1288{
1289 dhd_sta_t *sta;
1290 dhd_if_t *ifp;
1291 unsigned long flags;
d2839953 1292
965f77c4
RC
1293 ASSERT(ea != NULL);
1294 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1295 if (ifp == NULL)
1296 return DHD_STA_NULL;
d2839953 1297
965f77c4
RC
1298 DHD_IF_STA_LIST_LOCK(ifp, flags);
1299#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1300#pragma GCC diagnostic push
1301#pragma GCC diagnostic ignored "-Wcast-qual"
1302#endif // endif
1303 list_for_each_entry(sta, &ifp->sta_list, list) {
1304 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1305 DHD_INFO(("%s: Found STA " MACDBG "\n",
1306 __FUNCTION__, MAC2STRDBG((char *)ea)));
1307 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1308 return sta;
1309 }
1310 }
1311#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1312#pragma GCC diagnostic pop
1313#endif // endif
1314 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
d2839953 1315
965f77c4
RC
1316 return DHD_STA_NULL;
1317}
d2839953 1318
965f77c4
RC
1319/** Add STA into the interface's STA list. */
1320dhd_sta_t *
1321dhd_add_sta(void *pub, int ifidx, void *ea)
1322{
1323 dhd_sta_t *sta;
1324 dhd_if_t *ifp;
1325 unsigned long flags;
d2839953 1326
965f77c4
RC
1327 ASSERT(ea != NULL);
1328 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1329 if (ifp == NULL)
1330 return DHD_STA_NULL;
d2839953 1331
965f77c4
RC
1332 if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
1333 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
1334 return DHD_STA_NULL;
1335 }
d2839953 1336
965f77c4
RC
1337 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1338 if (sta == DHD_STA_NULL) {
1339 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1340 return DHD_STA_NULL;
1341 }
d2839953 1342
965f77c4 1343 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
d2839953 1344
965f77c4
RC
1345 /* link the sta and the dhd interface */
1346 sta->ifp = ifp;
1347 sta->ifidx = ifidx;
1348 INIT_LIST_HEAD(&sta->list);
d2839953 1349
965f77c4 1350 DHD_IF_STA_LIST_LOCK(ifp, flags);
d2839953 1351
965f77c4 1352 list_add_tail(&sta->list, &ifp->sta_list);
d2839953 1353
965f77c4
RC
1354 DHD_ERROR(("%s: Adding STA " MACDBG "\n",
1355 __FUNCTION__, MAC2STRDBG((char *)ea)));
d2839953 1356
965f77c4 1357 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
d2839953 1358
965f77c4
RC
1359 return sta;
1360}
d2839953 1361
965f77c4
RC
1362/** Delete all STAs from the interface's STA list. */
1363void
1364dhd_del_all_sta(void *pub, int ifidx)
d2839953 1365{
965f77c4
RC
1366 dhd_sta_t *sta, *next;
1367 dhd_if_t *ifp;
1368 unsigned long flags;
1369
1370 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1371 if (ifp == NULL)
1372 return;
d2839953 1373
965f77c4 1374 DHD_IF_STA_LIST_LOCK(ifp, flags);
d2839953
RC
1375#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1376#pragma GCC diagnostic push
1377#pragma GCC diagnostic ignored "-Wcast-qual"
1378#endif // endif
965f77c4
RC
1379 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1380
1381 list_del(&sta->list);
1382 dhd_sta_free(&ifp->info->pub, sta);
1383#ifdef DHD_L2_FILTER
1384 if (ifp->parp_enable) {
1385 /* clear Proxy ARP cache of specific Ethernet Address */
1386 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
1387 ifp->phnd_arp_table, FALSE,
1388 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1389 }
1390#endif /* DHD_L2_FILTER */
1391 }
d2839953
RC
1392#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1393#pragma GCC diagnostic pop
1394#endif // endif
965f77c4 1395 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
d2839953 1396
965f77c4
RC
1397 return;
1398}
d2839953 1399
965f77c4
RC
1400/** Delete STA from the interface's STA list. */
1401void
1402dhd_del_sta(void *pub, int ifidx, void *ea)
1403{
1404 dhd_sta_t *sta, *next;
1405 dhd_if_t *ifp;
1406 unsigned long flags;
d2839953 1407
965f77c4
RC
1408 ASSERT(ea != NULL);
1409 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1410 if (ifp == NULL)
1411 return;
d2839953 1412
965f77c4
RC
1413 DHD_IF_STA_LIST_LOCK(ifp, flags);
1414#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1415#pragma GCC diagnostic push
1416#pragma GCC diagnostic ignored "-Wcast-qual"
1417#endif // endif
1418 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1419 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1420 DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
1421 __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
1422 list_del(&sta->list);
1423 dhd_sta_free(&ifp->info->pub, sta);
1424 }
d2839953 1425 }
965f77c4
RC
1426#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1427#pragma GCC diagnostic pop
d2839953 1428#endif // endif
965f77c4
RC
1429 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1430#ifdef DHD_L2_FILTER
1431 if (ifp->parp_enable) {
1432 /* clear Proxy ARP cache of specific Ethernet Address */
1433 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1434 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1435 }
1436#endif /* DHD_L2_FILTER */
1437 return;
1438}
d2839953 1439
965f77c4
RC
1440/** Add STA if it doesn't exist. Not reentrant. */
1441dhd_sta_t*
1442dhd_findadd_sta(void *pub, int ifidx, void *ea)
1443{
1444 dhd_sta_t *sta;
1445
1446 sta = dhd_find_sta(pub, ifidx, ea);
1447
1448 if (!sta) {
1449 /* Add entry */
1450 sta = dhd_add_sta(pub, ifidx, ea);
1451 }
1452
1453 return sta;
d2839953
RC
1454}
1455
965f77c4
RC
1456#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1457static struct list_head *
1458dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1459{
1460 unsigned long flags;
1461 dhd_sta_t *sta, *snapshot;
d2839953 1462
965f77c4 1463 INIT_LIST_HEAD(snapshot_list);
d2839953 1464
965f77c4 1465 DHD_IF_STA_LIST_LOCK(ifp, flags);
d2839953 1466
965f77c4
RC
1467 list_for_each_entry(sta, &ifp->sta_list, list) {
1468 /* allocate one and add to snapshot */
1469 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1470 if (snapshot == NULL) {
1471 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1472 continue;
1473 }
d2839953 1474
965f77c4 1475 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
d2839953 1476
965f77c4
RC
1477 INIT_LIST_HEAD(&snapshot->list);
1478 list_add_tail(&snapshot->list, snapshot_list);
1479 }
d2839953 1480
965f77c4
RC
1481 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1482
1483 return snapshot_list;
d2839953
RC
1484}
1485
965f77c4
RC
1486static void
1487dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
d2839953 1488{
965f77c4
RC
1489 dhd_sta_t *sta, *next;
1490
1491 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1492 list_del(&sta->list);
1493 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1494 }
d2839953 1495}
965f77c4 1496#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
d2839953 1497
965f77c4
RC
1498#else
1499static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1500static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1501static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1502static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1503static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1504dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1505dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
1506void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1507#endif /* PCIE_FULL_DONGLE */
d2839953 1508
965f77c4
RC
1509#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
1510void
1511dhd_axi_error_dispatch(dhd_pub_t *dhdp)
1512{
1513 dhd_info_t *dhd = dhdp->info;
1514 schedule_work(&dhd->axi_error_dispatcher_work);
1515}
d2839953 1516
965f77c4
RC
1517static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
1518{
1519 struct dhd_info *dhd =
1520 container_of(work, struct dhd_info, axi_error_dispatcher_work);
1521 dhd_axi_error(&dhd->pub);
d2839953 1522}
965f77c4 1523#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
d2839953 1524
965f77c4
RC
1525/** Returns dhd iflist index corresponding the the bssidx provided by apps */
1526int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1527{
1528 dhd_if_t *ifp;
1529 dhd_info_t *dhd = dhdp->info;
1530 int i;
d2839953 1531
965f77c4
RC
1532 ASSERT(bssidx < DHD_MAX_IFS);
1533 ASSERT(dhdp);
d2839953 1534
965f77c4
RC
1535 for (i = 0; i < DHD_MAX_IFS; i++) {
1536 ifp = dhd->iflist[i];
1537 if (ifp && (ifp->bssidx == bssidx)) {
1538 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1539 ifp->name, bssidx, i));
1540 break;
1541 }
d2839953 1542 }
965f77c4
RC
1543 return i;
1544}
d2839953 1545
965f77c4
RC
1546static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1547{
1548 uint32 store_idx;
1549 uint32 sent_idx;
d2839953 1550
965f77c4
RC
1551 if (!skb) {
1552 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1553 return BCME_ERROR;
1554 }
d2839953 1555
965f77c4
RC
1556 dhd_os_rxflock(dhdp);
1557 store_idx = dhdp->store_idx;
1558 sent_idx = dhdp->sent_idx;
1559 if (dhdp->skbbuf[store_idx] != NULL) {
1560 /* Make sure the previous packets are processed */
1561 dhd_os_rxfunlock(dhdp);
1562 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1563 skb, store_idx, sent_idx));
1564 /* removed msleep here, should use wait_event_timeout if we
1565 * want to give rx frame thread a chance to run
1566 */
1567#if defined(WAIT_DEQUEUE)
1568 OSL_SLEEP(1);
1569#endif // endif
1570 return BCME_ERROR;
1571 }
1572 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1573 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1574 dhdp->skbbuf[store_idx] = skb;
1575 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1576 dhd_os_rxfunlock(dhdp);
d2839953 1577
965f77c4
RC
1578 return BCME_OK;
1579}
d2839953 1580
965f77c4 1581static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
d2839953 1582{
965f77c4
RC
1583 uint32 store_idx;
1584 uint32 sent_idx;
1585 void *skb;
d2839953 1586
965f77c4 1587 dhd_os_rxflock(dhdp);
d2839953 1588
965f77c4
RC
1589 store_idx = dhdp->store_idx;
1590 sent_idx = dhdp->sent_idx;
1591 skb = dhdp->skbbuf[sent_idx];
d2839953 1592
965f77c4
RC
1593 if (skb == NULL) {
1594 dhd_os_rxfunlock(dhdp);
1595 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1596 store_idx, sent_idx));
1597 return NULL;
1598 }
d2839953 1599
965f77c4
RC
1600 dhdp->skbbuf[sent_idx] = NULL;
1601 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
d2839953 1602
965f77c4
RC
1603 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1604 skb, sent_idx));
d2839953 1605
965f77c4 1606 dhd_os_rxfunlock(dhdp);
d2839953 1607
965f77c4
RC
1608 return skb;
1609}
d2839953 1610
965f77c4
RC
1611int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1612{
1613 if (prepost) { /* pre process */
1614 dhd_read_cis(dhdp);
1615 dhd_check_module_cid(dhdp);
1616 dhd_check_module_mac(dhdp);
1617 dhd_set_macaddr_from_file(dhdp);
1618 } else { /* post process */
1619 dhd_write_macaddr(&dhdp->mac);
1620 dhd_clear_cis(dhdp);
d2839953
RC
1621 }
1622
965f77c4 1623 return 0;
d2839953
RC
1624}
1625
965f77c4
RC
1626#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
1627static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
d2839953 1628{
965f77c4
RC
1629 struct net_device *primary_ndev;
1630 struct bcm_cfg80211 *cfg;
1631 unsigned long flags = 0;
1632 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
d2839953 1633
965f77c4
RC
1634 if (!primary_ndev) {
1635 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
1636 return BCME_ERROR;
1637 }
1638 cfg = wl_get_cfg(primary_ndev);
d2839953 1639
965f77c4
RC
1640 if (!cfg) {
1641 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
1642 return BCME_ERROR;
d2839953
RC
1643 }
1644
965f77c4
RC
1645 DHD_GENERAL_LOCK(dhdp, flags);
1646 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
1647 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
1648 dhd_os_busbusy_wake(dhdp);
1649 DHD_GENERAL_UNLOCK(dhdp, flags);
1650 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
1651 return BCME_ERROR;
1652 }
1653 DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
1654 DHD_GENERAL_UNLOCK(dhdp, flags);
d2839953 1655
965f77c4
RC
1656 DHD_OS_WAKE_LOCK(dhdp);
1657 /* check for hal started and only then send event if not clear dump state here */
1658 if (wl_cfg80211_is_hal_started(cfg)) {
1659 int timeleft = 0;
d2839953 1660
965f77c4
RC
1661 DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
1662 dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
d2839953 1663
965f77c4
RC
1664 DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
1665 __FUNCTION__, dhdp->dhd_bus_busy_state));
1666 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
1667 &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
1668 if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
1669 DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
1670 __FUNCTION__, dhdp->dhd_bus_busy_state));
1671 }
1672 } else {
1673 DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
1674 }
1675 DHD_OS_WAKE_UNLOCK(dhdp);
1676 /* In case of dhd_os_busbusy_wait_bitmask() timeout,
1677 * hal dump bit will not be cleared. Hence clearing it here.
1678 */
1679 DHD_GENERAL_LOCK(dhdp, flags);
1680 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
1681 dhd_os_busbusy_wake(dhdp);
1682 DHD_GENERAL_UNLOCK(dhdp, flags);
d2839953 1683
965f77c4 1684 return BCME_OK;
d2839953 1685}
965f77c4 1686#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
d2839953 1687
965f77c4
RC
1688// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
1689#if defined(PKT_FILTER_SUPPORT)
1690#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1691static bool
1692_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
d2839953 1693{
965f77c4
RC
1694 bool _apply = FALSE;
1695 /* In case of IBSS mode, apply arp pkt filter */
1696 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
1697 _apply = TRUE;
1698 goto exit;
1699 }
1700 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1701 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
1702 _apply = TRUE;
1703 goto exit;
d2839953 1704 }
d2839953 1705
965f77c4
RC
1706exit:
1707 return _apply;
d2839953 1708}
965f77c4 1709#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
d2839953 1710
965f77c4
RC
1711void
1712dhd_set_packet_filter(dhd_pub_t *dhd)
d2839953 1713{
965f77c4 1714 int i;
d2839953
RC
1715
1716 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1717 if (dhd_pkt_filter_enable) {
1718 for (i = 0; i < dhd->pktfilter_count; i++) {
1719 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1720 }
1721 }
1722}
1723
1724void
1725dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1726{
1727 int i;
1728
1729 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
965f77c4
RC
1730 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value &&
1731 !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) {
d2839953
RC
1732 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
1733 return;
1734 }
1735 /* 1 - Enable packet filter, only allow unicast packet to send up */
1736 /* 0 - Disable packet filter */
1737 if (dhd_pkt_filter_enable && (!value ||
965f77c4
RC
1738 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) ||
1739 dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)))
d2839953
RC
1740 {
1741 for (i = 0; i < dhd->pktfilter_count; i++) {
1742// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
1743#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1744 if (value && (i == DHD_ARP_FILTER_NUM) &&
1745 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1746 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1747 "val %d, cnt %d, op_mode 0x%x\n",
1748 value, i, dhd->op_mode));
1749 continue;
1750 }
1751#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1752 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1753 value, dhd_master_mode);
1754 }
1755 }
1756}
1757
1758int
1759dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
1760{
1761 char *filterp = NULL;
1762 int filter_id = 0;
1763
1764 switch (num) {
1765 case DHD_BROADCAST_FILTER_NUM:
1766 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
1767 filter_id = 101;
1768 break;
1769 case DHD_MULTICAST4_FILTER_NUM:
1770 filter_id = 102;
1771 if (FW_SUPPORTED((dhdp), pf6)) {
1772 if (dhdp->pktfilter[num] != NULL) {
1773 dhd_pktfilter_offload_delete(dhdp, filter_id);
1774 dhdp->pktfilter[num] = NULL;
1775 }
1776 if (!add_remove) {
1777 filterp = DISCARD_IPV4_MCAST;
1778 add_remove = 1;
1779 break;
1780 }
1781 }
1782 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
1783 break;
1784 case DHD_MULTICAST6_FILTER_NUM:
1785 filter_id = 103;
1786 if (FW_SUPPORTED((dhdp), pf6)) {
1787 if (dhdp->pktfilter[num] != NULL) {
1788 dhd_pktfilter_offload_delete(dhdp, filter_id);
1789 dhdp->pktfilter[num] = NULL;
1790 }
1791 if (!add_remove) {
1792 filterp = DISCARD_IPV6_MCAST;
1793 add_remove = 1;
1794 break;
1795 }
1796 }
1797 filterp = "103 0 0 0 0xFFFF 0x3333";
1798 break;
1799 case DHD_MDNS_FILTER_NUM:
1800 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
1801 filter_id = 104;
1802 break;
1803 case DHD_ARP_FILTER_NUM:
1804 filterp = "105 0 0 12 0xFFFF 0x0806";
1805 filter_id = 105;
1806 break;
1807 case DHD_BROADCAST_ARP_FILTER_NUM:
1808 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
1809 " 0xFFFFFFFFFFFF0000000000000806";
1810 filter_id = 106;
1811 break;
1812 default:
1813 return -EINVAL;
1814 }
1815
1816 /* Add filter */
1817 if (add_remove) {
1818 dhdp->pktfilter[num] = filterp;
1819 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
1820 } else { /* Delete filter */
1821 if (dhdp->pktfilter[num]) {
1822 dhd_pktfilter_offload_delete(dhdp, filter_id);
1823 dhdp->pktfilter[num] = NULL;
1824 }
1825 }
1826
1827 return 0;
1828}
1829#endif /* PKT_FILTER_SUPPORT */
1830
1831static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1832{
3910ce8e 1833#ifndef SUPPORT_PM2_ONLY
d2839953 1834 int power_mode = PM_MAX;
3910ce8e 1835#endif /* SUPPORT_PM2_ONLY */
d2839953
RC
1836 /* wl_pkt_filter_enable_t enable_parm; */
1837 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1838 int ret = 0;
1839#ifdef DHD_USE_EARLYSUSPEND
1840#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
1841 int bcn_timeout = 0;
1842#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
1843#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
1844 int roam_time_thresh = 0; /* (ms) */
1845#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
1846#ifndef ENABLE_FW_ROAM_SUSPEND
3910ce8e 1847 uint roamvar = 1;
d2839953
RC
1848#endif /* ENABLE_FW_ROAM_SUSPEND */
1849#ifdef ENABLE_BCN_LI_BCN_WAKEUP
965f77c4 1850 int bcn_li_bcn = 1;
d2839953
RC
1851#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
1852 uint nd_ra_filter = 0;
1853#ifdef ENABLE_IPMCAST_FILTER
1854 int ipmcast_l2filter;
1855#endif /* ENABLE_IPMCAST_FILTER */
1856#ifdef CUSTOM_EVENT_PM_WAKE
1857 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
1858#endif /* CUSTOM_EVENT_PM_WAKE */
1859#endif /* DHD_USE_EARLYSUSPEND */
1860#ifdef PASS_ALL_MCAST_PKTS
1861 struct dhd_info *dhdinfo;
1862 uint32 allmulti;
1863 uint i;
1864#endif /* PASS_ALL_MCAST_PKTS */
1865#ifdef DYNAMIC_SWOOB_DURATION
1866#ifndef CUSTOM_INTR_WIDTH
1867#define CUSTOM_INTR_WIDTH 100
1868 int intr_width = 0;
1869#endif /* CUSTOM_INTR_WIDTH */
1870#endif /* DYNAMIC_SWOOB_DURATION */
1871
1872#if defined(BCMPCIE)
1873 int lpas = 0;
1874 int dtim_period = 0;
1875 int bcn_interval = 0;
1876 int bcn_to_dly = 0;
1877#if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1878 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
1879#else
1880 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
1881#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
1882#endif /* OEM_ANDROID && BCMPCIE */
1883
1884 if (!dhd)
1885 return -ENODEV;
1886
1887#ifdef PASS_ALL_MCAST_PKTS
1888 dhdinfo = dhd->info;
1889#endif /* PASS_ALL_MCAST_PKTS */
1890
1891 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1892 __FUNCTION__, value, dhd->in_suspend));
1893
1894 dhd_suspend_lock(dhd);
1895
1896#ifdef CUSTOM_SET_CPUCORE
1897 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1898 /* set specific cpucore */
1899 dhd_set_cpucore(dhd, TRUE);
1900#endif /* CUSTOM_SET_CPUCORE */
d2839953
RC
1901 if (dhd->up) {
1902 if (value && dhd->in_suspend) {
1903#ifdef PKT_FILTER_SUPPORT
1904 dhd->early_suspended = 1;
1905#endif // endif
1906 /* Kernel suspended */
1907 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
1908
3910ce8e 1909#ifndef SUPPORT_PM2_ONLY
d2839953 1910 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
47fa5ad5 1911 sizeof(power_mode), TRUE, 0);
3910ce8e 1912#endif /* SUPPORT_PM2_ONLY */
d2839953
RC
1913
1914#ifdef PKT_FILTER_SUPPORT
1915 /* Enable packet filter,
1916 * only allow unicast packet to send up
1917 */
1918 dhd_enable_packet_filter(1, dhd);
1919#ifdef APF
1920 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
1921#endif /* APF */
1922#endif /* PKT_FILTER_SUPPORT */
965f77c4
RC
1923#ifdef ARP_OFFLOAD_SUPPORT
1924 dhd_arp_offload_enable(dhd, TRUE);
1925#endif /* ARP_OFFLOAD_SUPPORT */
d2839953
RC
1926
1927#ifdef PASS_ALL_MCAST_PKTS
1928 allmulti = 0;
1929 for (i = 0; i < DHD_MAX_IFS; i++) {
1930 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
1931 ret = dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
1932 sizeof(allmulti), NULL, 0, TRUE);
1933 if (ret < 0) {
1934 DHD_ERROR(("%s allmulti failed %d\n", __FUNCTION__, ret));
1935 }
1936 }
1937#endif /* PASS_ALL_MCAST_PKTS */
1938
1939 /* If DTIM skip is set up as default, force it to wake
1940 * each third DTIM for better power savings. Note that
1941 * one side effect is a chance to miss BC/MC packet.
1942 */
1943#ifdef WLTDLS
1944 /* Do not set bcn_li_ditm on WFD mode */
1945 if (dhd->tdls_mode) {
1946 bcn_li_dtim = 0;
1947 } else
1948#endif /* WLTDLS */
1949#if defined(BCMPCIE)
1950 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
1951 &bcn_interval);
1952 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
1953 sizeof(bcn_li_dtim), NULL, 0, TRUE);
1954 if (ret < 0) {
1955 DHD_ERROR(("%s bcn_li_dtim failed %d\n", __FUNCTION__, ret));
1956 }
1957 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
1958 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
1959 /*
1960 * Increase max roaming threshold from 2 secs to 8 secs
1961 * the real roam threshold is MIN(max_roam_threshold,
1962 * bcn_timeout/2)
1963 */
1964 lpas = 1;
1965 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
1966 NULL, 0, TRUE);
1967 if (ret < 0) {
1968 DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__, ret));
1969 }
1970 bcn_to_dly = 1;
1971 /*
1972 * if bcn_to_dly is 1, the real roam threshold is
1973 * MIN(max_roam_threshold, bcn_timeout -1);
1974 * notify link down event after roaming procedure complete
1975 * if we hit bcn_timeout while we are in roaming progress.
1976 */
1977 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
1978 sizeof(bcn_to_dly), NULL, 0, TRUE);
1979 if (ret < 0) {
1980 DHD_ERROR(("%s bcn_to_dly failed %d\n", __FUNCTION__, ret));
1981 }
1982 /* Increase beacon timeout to 6 secs or use bigger one */
1983 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
1984 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
1985 sizeof(bcn_timeout), NULL, 0, TRUE);
1986 if (ret < 0) {
1987 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, ret));
1988 }
1989 }
1990#else
1991 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1992 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
1993 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
1994 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1995#endif /* OEM_ANDROID && BCMPCIE */
965f77c4
RC
1996#ifdef WL_CFG80211
1997 /* Disable cfg80211 feature events during suspend */
1998 ret = wl_cfg80211_config_suspend_events(
1999 dhd_linux_get_primary_netdev(dhd), FALSE);
2000 if (ret < 0) {
2001 DHD_ERROR(("failed to disable events (%d)\n", ret));
2002 }
2003#endif /* WL_CFG80211 */
d2839953
RC
2004#ifdef DHD_USE_EARLYSUSPEND
2005#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2006 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2007 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2008 sizeof(bcn_timeout), NULL, 0, TRUE);
2009 if (ret < 0) {
2010 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, ret));
2011 }
2012#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2013#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2014 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2015 ret = dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
2016 sizeof(roam_time_thresh), NULL, 0, TRUE);
2017 if (ret < 0) {
2018 DHD_ERROR(("%s roam_time_thresh failed %d\n", __FUNCTION__, ret));
2019 }
2020#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2021#ifndef ENABLE_FW_ROAM_SUSPEND
2022 /* Disable firmware roaming during suspend */
2023 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2024 sizeof(roamvar), NULL, 0, TRUE);
2025 if (ret < 0) {
2026 DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
2027 }
2028#endif /* ENABLE_FW_ROAM_SUSPEND */
2029#ifdef ENABLE_BCN_LI_BCN_WAKEUP
965f77c4
RC
2030 if (bcn_li_dtim) {
2031 bcn_li_bcn = 0;
2032 }
d2839953
RC
2033 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2034 sizeof(bcn_li_bcn), NULL, 0, TRUE);
2035 if (ret < 0) {
2036 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
2037 }
2038#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2039#if defined(WL_CFG80211) && defined(WL_BCNRECV)
2040 ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
2041 if (ret != BCME_OK) {
2042 DHD_ERROR(("failed to stop beacon recv event on"
2043 " suspend state (%d)\n", ret));
2044 }
2045#endif /* WL_CFG80211 && WL_BCNRECV */
2046#ifdef NDO_CONFIG_SUPPORT
2047 if (dhd->ndo_enable) {
2048 if (!dhd->ndo_host_ip_overflow) {
2049 /* enable ND offload on suspend */
2050 ret = dhd_ndo_enable(dhd, TRUE);
2051 if (ret < 0) {
2052 DHD_ERROR(("%s: failed to enable NDO\n",
2053 __FUNCTION__));
2054 }
2055 } else {
2056 DHD_INFO(("%s: NDO disabled on suspend due to"
2057 "HW capacity\n", __FUNCTION__));
2058 }
2059 }
2060#endif /* NDO_CONFIG_SUPPORT */
2061#ifndef APF
2062 if (FW_SUPPORTED(dhd, ndoe))
2063#else
2064 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
2065#endif /* APF */
2066 {
2067 /* enable IPv6 RA filter in firmware during suspend */
2068 nd_ra_filter = 1;
2069 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2070 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
2071 NULL, 0, TRUE);
2072 if (ret < 0)
2073 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2074 ret));
2075 }
2076 dhd_os_suppress_logging(dhd, TRUE);
2077#ifdef ENABLE_IPMCAST_FILTER
2078 ipmcast_l2filter = 1;
2079 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2080 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2081 NULL, 0, TRUE);
2082 if (ret < 0) {
2083 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
2084 }
2085#endif /* ENABLE_IPMCAST_FILTER */
2086#ifdef DYNAMIC_SWOOB_DURATION
2087 intr_width = CUSTOM_INTR_WIDTH;
2088 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2089 sizeof(intr_width), NULL, 0, TRUE);
2090 if (ret < 0) {
2091 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2092 }
2093#endif /* DYNAMIC_SWOOB_DURATION */
2094#ifdef CUSTOM_EVENT_PM_WAKE
2095 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
2096 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2097 (char *)&pm_awake_thresh,
2098 sizeof(pm_awake_thresh), NULL, 0, TRUE);
2099 if (ret < 0) {
2100 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2101 __FUNCTION__, ret));
2102 }
2103#endif /* CUSTOM_EVENT_PM_WAKE */
965f77c4
RC
2104#ifdef CONFIG_SILENT_ROAM
2105 if (!dhd->sroamed) {
2106 ret = dhd_sroam_set_mon(dhd, TRUE);
2107 if (ret < 0) {
2108 DHD_ERROR(("%s set sroam failed %d\n",
2109 __FUNCTION__, ret));
2110 }
2111 }
2112 dhd->sroamed = FALSE;
2113#endif /* CONFIG_SILENT_ROAM */
d2839953 2114#endif /* DHD_USE_EARLYSUSPEND */
d2839953 2115 } else {
d2839953
RC
2116#ifdef PKT_FILTER_SUPPORT
2117 dhd->early_suspended = 0;
2118#endif // endif
2119 /* Kernel resumed */
2120 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
2121#ifdef DYNAMIC_SWOOB_DURATION
2122 intr_width = 0;
2123 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2124 sizeof(intr_width), NULL, 0, TRUE);
2125 if (ret < 0) {
2126 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2127 }
2128#endif /* DYNAMIC_SWOOB_DURATION */
3910ce8e
LJ
2129#ifndef SUPPORT_PM2_ONLY
2130 power_mode = PM_FAST;
d2839953 2131 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
47fa5ad5 2132 sizeof(power_mode), TRUE, 0);
3910ce8e 2133#endif /* SUPPORT_PM2_ONLY */
d2839953
RC
2134#if defined(WL_CFG80211) && defined(WL_BCNRECV)
2135 ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
2136 if (ret != BCME_OK) {
2137 DHD_ERROR(("failed to resume beacon recv state (%d)\n",
2138 ret));
2139 }
2140#endif /* WL_CF80211 && WL_BCNRECV */
965f77c4
RC
2141#ifdef ARP_OFFLOAD_SUPPORT
2142 dhd_arp_offload_enable(dhd, FALSE);
2143#endif /* ARP_OFFLOAD_SUPPORT */
d2839953
RC
2144#ifdef PKT_FILTER_SUPPORT
2145 /* disable pkt filter */
2146 dhd_enable_packet_filter(0, dhd);
2147#ifdef APF
2148 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
2149#endif /* APF */
2150#endif /* PKT_FILTER_SUPPORT */
2151#ifdef PASS_ALL_MCAST_PKTS
2152 allmulti = 1;
2153 for (i = 0; i < DHD_MAX_IFS; i++) {
2154 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2155 ret = dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
2156 sizeof(allmulti), NULL, 0, TRUE);
2157 if (ret < 0) {
2158 DHD_ERROR(("%s: allmulti failed:%d\n", __FUNCTION__, ret));
2159 }
2160 }
2161#endif /* PASS_ALL_MCAST_PKTS */
2162#if defined(BCMPCIE)
2163 /* restore pre-suspend setting */
2164 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2165 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2166 if (ret < 0) {
2167 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n", __FUNCTION__, ret));
2168 }
2169 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
2170 0, TRUE);
2171 if (ret < 0) {
2172 DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret));
2173 }
2174 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2175 sizeof(bcn_to_dly), NULL, 0, TRUE);
2176 if (ret < 0) {
2177 DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret));
2178 }
2179 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2180 sizeof(bcn_timeout), NULL, 0, TRUE);
2181 if (ret < 0) {
2182 DHD_ERROR(("%s:bcn_timeout failed:%d\n", __FUNCTION__, ret));
2183 }
2184#else
2185 /* restore pre-suspend setting for dtim_skip */
2186 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2187 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2188 if (ret < 0) {
2189 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
2190 }
2191#endif /* OEM_ANDROID && BCMPCIE */
2192#ifdef DHD_USE_EARLYSUSPEND
2193#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2194 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2195 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2196 sizeof(bcn_timeout), NULL, 0, TRUE);
2197 if (ret < 0) {
2198 DHD_ERROR(("%s:bcn_timeout failed:%d\n", __FUNCTION__, ret));
2199 }
2200#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2201#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2202 roam_time_thresh = 2000;
2203 ret = dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
2204 sizeof(roam_time_thresh), NULL, 0, TRUE);
2205 if (ret < 0) {
2206 DHD_ERROR(("%s:roam_time_thresh failed:%d\n", __FUNCTION__, ret));
2207 }
2208
2209#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2210#ifndef ENABLE_FW_ROAM_SUSPEND
2211 roamvar = dhd_roam_disable;
2212 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2213 sizeof(roamvar), NULL, 0, TRUE);
2214 if (ret < 0) {
2215 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
2216 }
2217#endif /* ENABLE_FW_ROAM_SUSPEND */
2218#ifdef ENABLE_BCN_LI_BCN_WAKEUP
d2839953
RC
2219 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2220 sizeof(bcn_li_bcn), NULL, 0, TRUE);
2221 if (ret < 0) {
2222 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n", __FUNCTION__, ret));
2223 }
2224#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2225#ifdef NDO_CONFIG_SUPPORT
2226 if (dhd->ndo_enable) {
2227 /* Disable ND offload on resume */
2228 ret = dhd_ndo_enable(dhd, FALSE);
2229 if (ret < 0) {
2230 DHD_ERROR(("%s: failed to disable NDO\n",
2231 __FUNCTION__));
2232 }
2233 }
2234#endif /* NDO_CONFIG_SUPPORT */
2235#ifndef APF
2236 if (FW_SUPPORTED(dhd, ndoe))
2237#else
2238 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
2239#endif /* APF */
2240 {
2241 /* disable IPv6 RA filter in firmware during suspend */
2242 nd_ra_filter = 0;
2243 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2244 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
2245 NULL, 0, TRUE);
2246 if (ret < 0) {
2247 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2248 ret));
2249 }
2250 }
2251 dhd_os_suppress_logging(dhd, FALSE);
2252#ifdef ENABLE_IPMCAST_FILTER
2253 ipmcast_l2filter = 0;
2254 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2255 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2256 NULL, 0, TRUE);
2257 if (ret < 0) {
2258 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
2259 }
2260#endif /* ENABLE_IPMCAST_FILTER */
2261#ifdef CUSTOM_EVENT_PM_WAKE
2262 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2263 (char *)&pm_awake_thresh,
2264 sizeof(pm_awake_thresh), NULL, 0, TRUE);
2265 if (ret < 0) {
2266 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2267 __FUNCTION__, ret));
2268 }
2269#endif /* CUSTOM_EVENT_PM_WAKE */
965f77c4
RC
2270#ifdef CONFIG_SILENT_ROAM
2271 ret = dhd_sroam_set_mon(dhd, FALSE);
2272 if (ret < 0) {
2273 DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
2274 }
2275#endif /* CONFIG_SILENT_ROAM */
d2839953 2276#endif /* DHD_USE_EARLYSUSPEND */
965f77c4
RC
2277#ifdef WL_CFG80211
2278 /* Enable cfg80211 feature events during resume */
2279 ret = wl_cfg80211_config_suspend_events(
2280 dhd_linux_get_primary_netdev(dhd), TRUE);
2281 if (ret < 0) {
2282 DHD_ERROR(("failed to enable events (%d)\n", ret));
2283 }
2284#endif /* WL_CFG80211 */
d2839953 2285#ifdef DHD_LB_IRQSET
965f77c4 2286 dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
d2839953 2287#endif /* DHD_LB_IRQSET */
d2839953
RC
2288 }
2289 }
2290 dhd_suspend_unlock(dhd);
2291
2292 return 0;
2293}
2294
2295static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2296{
2297 dhd_pub_t *dhdp = &dhd->pub;
2298 int ret = 0;
2299
2300 DHD_OS_WAKE_LOCK(dhdp);
2301 DHD_PERIM_LOCK(dhdp);
2302
2303 /* Set flag when early suspend was called */
2304 dhdp->in_suspend = val;
2305 if ((force || !dhdp->suspend_disable_flag) &&
965f77c4 2306 (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND)))
d2839953
RC
2307 {
2308 ret = dhd_set_suspend(val, dhdp);
2309 }
2310
2311 DHD_PERIM_UNLOCK(dhdp);
2312 DHD_OS_WAKE_UNLOCK(dhdp);
2313 return ret;
2314}
2315
2316#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2317static void dhd_early_suspend(struct early_suspend *h)
2318{
2319 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2320 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2321
3910ce8e 2322 if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
d2839953 2323 dhd_suspend_resume_helper(dhd, 1, 0);
3910ce8e
LJ
2324 dhd_conf_set_suspend_resume(&dhd->pub, 1);
2325 }
d2839953
RC
2326}
2327
2328static void dhd_late_resume(struct early_suspend *h)
2329{
2330 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2331 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2332
3910ce8e
LJ
2333 if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
2334 dhd_conf_set_suspend_resume(&dhd->pub, 0);
d2839953 2335 dhd_suspend_resume_helper(dhd, 0, 0);
3910ce8e 2336 }
d2839953
RC
2337}
2338#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2339
2340/*
2341 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2342 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2343 *
2344 * dhd_timeout_start(&tmo, usec);
2345 * while (!dhd_timeout_expired(&tmo))
2346 * if (poll_something())
2347 * break;
2348 * if (dhd_timeout_expired(&tmo))
2349 * fatal();
2350 */
2351
2352void
2353dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2354{
2355 tmo->limit = usec;
2356 tmo->increment = 0;
2357 tmo->elapsed = 0;
2358 tmo->tick = jiffies_to_usecs(1);
2359}
2360
2361int
2362dhd_timeout_expired(dhd_timeout_t *tmo)
2363{
2364 /* Does nothing the first call */
2365 if (tmo->increment == 0) {
2366 tmo->increment = 1;
2367 return 0;
2368 }
2369
2370 if (tmo->elapsed >= tmo->limit)
2371 return 1;
2372
2373 /* Add the delay that's about to take place */
2374 tmo->elapsed += tmo->increment;
2375
2376 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2377 OSL_DELAY(tmo->increment);
2378 tmo->increment *= 2;
2379 if (tmo->increment > tmo->tick)
2380 tmo->increment = tmo->tick;
2381 } else {
965f77c4
RC
2382 /*
2383 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
2384 * context where the exact wakeup time is flexible, it would be good
2385 * to use usleep_range() instead of udelay(). It takes a few advantages
2386 * such as improving responsiveness and reducing power.
2387 */
2388 OSL_SLEEP(jiffies_to_msecs(1));
d2839953
RC
2389 }
2390
2391 return 0;
2392}
2393
2394int
2395dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2396{
2397 int i = 0;
2398
2399 if (!dhd) {
2400 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2401 return DHD_BAD_IF;
2402 }
2403
2404 while (i < DHD_MAX_IFS) {
2405 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2406 return i;
2407 i++;
2408 }
2409
2410 return DHD_BAD_IF;
2411}
2412
2413struct net_device * dhd_idx2net(void *pub, int ifidx)
2414{
2415 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2416 struct dhd_info *dhd_info;
2417
2418 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2419 return NULL;
2420 dhd_info = dhd_pub->info;
2421 if (dhd_info && dhd_info->iflist[ifidx])
2422 return dhd_info->iflist[ifidx]->net;
2423 return NULL;
2424}
2425
2426int
2427dhd_ifname2idx(dhd_info_t *dhd, char *name)
2428{
2429 int i = DHD_MAX_IFS;
2430
2431 ASSERT(dhd);
2432
2433 if (name == NULL || *name == '\0')
2434 return 0;
2435
2436 while (--i > 0)
2437 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2438 break;
2439
2440 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2441
2442 return i; /* default - the primary interface */
2443}
2444
2445char *
2446dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2447{
2448 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2449
2450 ASSERT(dhd);
2451
2452 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2453 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2454 return "<if_bad>";
2455 }
2456
2457 if (dhd->iflist[ifidx] == NULL) {
2458 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2459 return "<if_null>";
2460 }
2461
2462 if (dhd->iflist[ifidx]->net)
2463 return dhd->iflist[ifidx]->net->name;
2464
2465 return "<if_none>";
2466}
2467
2468uint8 *
2469dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2470{
2471 int i;
2472 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2473
2474 ASSERT(dhd);
2475 for (i = 0; i < DHD_MAX_IFS; i++)
2476 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2477 return dhd->iflist[i]->mac_addr;
2478
2479 return NULL;
2480}
2481
2482static void
2483_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2484{
2485 struct net_device *dev;
d2839953 2486 struct netdev_hw_addr *ha;
d2839953
RC
2487 uint32 allmulti, cnt;
2488
2489 wl_ioctl_t ioc;
2490 char *buf, *bufp;
2491 uint buflen;
2492 int ret;
2493
2494#ifdef MCAST_LIST_ACCUMULATION
2495 int i;
2496 uint32 cnt_iface[DHD_MAX_IFS];
2497 cnt = 0;
2498 allmulti = 0;
2499
2500 for (i = 0; i < DHD_MAX_IFS; i++) {
2501 if (dhd->iflist[i]) {
2502 dev = dhd->iflist[i]->net;
2503 if (!dev)
2504 continue;
d2839953 2505 netif_addr_lock_bh(dev);
d2839953
RC
2506 cnt_iface[i] = netdev_mc_count(dev);
2507 cnt += cnt_iface[i];
d2839953 2508 netif_addr_unlock_bh(dev);
d2839953
RC
2509
2510 /* Determine initial value of allmulti flag */
2511 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2512 }
2513 }
2514#else /* !MCAST_LIST_ACCUMULATION */
2515 if (!dhd->iflist[ifidx]) {
2516 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
2517 return;
2518 }
2519 dev = dhd->iflist[ifidx]->net;
2520 if (!dev)
2521 return;
d2839953 2522 netif_addr_lock_bh(dev);
d2839953 2523 cnt = netdev_mc_count(dev);
d2839953 2524 netif_addr_unlock_bh(dev);
d2839953
RC
2525
2526 /* Determine initial value of allmulti flag */
2527 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2528#endif /* MCAST_LIST_ACCUMULATION */
2529
2530#ifdef PASS_ALL_MCAST_PKTS
2531#ifdef PKT_FILTER_SUPPORT
2532 if (!dhd->pub.early_suspended)
2533#endif /* PKT_FILTER_SUPPORT */
2534 allmulti = TRUE;
2535#endif /* PASS_ALL_MCAST_PKTS */
2536
2537 /* Send down the multicast list first. */
2538
2539 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
2540 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2541 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2542 dhd_ifname(&dhd->pub, ifidx), cnt));
2543 return;
2544 }
2545
2546 strncpy(bufp, "mcast_list", buflen - 1);
2547 bufp[buflen - 1] = '\0';
2548 bufp += strlen("mcast_list") + 1;
2549
2550 cnt = htol32(cnt);
2551 memcpy(bufp, &cnt, sizeof(cnt));
2552 bufp += sizeof(cnt);
2553
2554#ifdef MCAST_LIST_ACCUMULATION
2555 for (i = 0; i < DHD_MAX_IFS; i++) {
2556 if (dhd->iflist[i]) {
2557 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
2558 dev = dhd->iflist[i]->net;
2559
d2839953 2560 netif_addr_lock_bh(dev);
d2839953
RC
2561#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2562#pragma GCC diagnostic push
2563#pragma GCC diagnostic ignored "-Wcast-qual"
2564#endif // endif
2565 netdev_for_each_mc_addr(ha, dev) {
2566#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2567#pragma GCC diagnostic pop
2568#endif // endif
2569 if (!cnt_iface[i])
2570 break;
2571 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2572 bufp += ETHER_ADDR_LEN;
2573 DHD_TRACE(("_dhd_set_multicast_list: cnt "
2574 "%d " MACDBG "\n",
2575 cnt_iface[i], MAC2STRDBG(ha->addr)));
2576 cnt_iface[i]--;
2577 }
d2839953 2578 netif_addr_unlock_bh(dev);
d2839953
RC
2579 }
2580 }
2581#else /* !MCAST_LIST_ACCUMULATION */
d2839953 2582 netif_addr_lock_bh(dev);
d2839953
RC
2583#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2584#pragma GCC diagnostic push
2585#pragma GCC diagnostic ignored "-Wcast-qual"
2586#endif // endif
2587 netdev_for_each_mc_addr(ha, dev) {
2588 if (!cnt)
2589 break;
2590 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2591 bufp += ETHER_ADDR_LEN;
2592 cnt--;
2593 }
2594#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2595#pragma GCC diagnostic pop
2596#endif // endif
d2839953 2597 netif_addr_unlock_bh(dev);
d2839953
RC
2598#endif /* MCAST_LIST_ACCUMULATION */
2599
2600 memset(&ioc, 0, sizeof(ioc));
2601 ioc.cmd = WLC_SET_VAR;
2602 ioc.buf = buf;
2603 ioc.len = buflen;
2604 ioc.set = TRUE;
2605
2606 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2607 if (ret < 0) {
2608 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2609 dhd_ifname(&dhd->pub, ifidx), cnt));
2610 allmulti = cnt ? TRUE : allmulti;
2611 }
2612
2613 MFREE(dhd->pub.osh, buf, buflen);
2614
2615 /* Now send the allmulti setting. This is based on the setting in the
2616 * net_device flags, but might be modified above to be turned on if we
2617 * were trying to set some addresses and dongle rejected it...
2618 */
2619
2620 allmulti = htol32(allmulti);
2621 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
2622 sizeof(allmulti), NULL, 0, TRUE);
2623 if (ret < 0) {
2624 DHD_ERROR(("%s: set allmulti %d failed\n",
2625 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2626 }
2627
2628 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2629
2630#ifdef MCAST_LIST_ACCUMULATION
2631 allmulti = 0;
2632 for (i = 0; i < DHD_MAX_IFS; i++) {
2633 if (dhd->iflist[i]) {
2634 dev = dhd->iflist[i]->net;
2635 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2636 }
2637 }
2638#else
2639 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2640#endif /* MCAST_LIST_ACCUMULATION */
2641
2642 allmulti = htol32(allmulti);
2643
2644 memset(&ioc, 0, sizeof(ioc));
2645 ioc.cmd = WLC_SET_PROMISC;
2646 ioc.buf = &allmulti;
2647 ioc.len = sizeof(allmulti);
2648 ioc.set = TRUE;
2649
2650 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2651 if (ret < 0) {
2652 DHD_ERROR(("%s: set promisc %d failed\n",
2653 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2654 }
2655}
2656
2657int
2658_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2659{
2660 int ret;
2661
2662 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
2663 ETHER_ADDR_LEN, NULL, 0, TRUE);
2664 if (ret < 0) {
2665 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2666 } else {
2667 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2668 if (ifidx == 0)
2669 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2670 }
2671
2672 return ret;
2673}
2674
2675#ifdef DHD_PSTA
2676/* Get psta/psr configuration configuration */
2677int dhd_get_psta_mode(dhd_pub_t *dhdp)
2678{
2679 dhd_info_t *dhd = dhdp->info;
2680 return (int)dhd->psta_mode;
2681}
2682/* Set psta/psr configuration configuration */
2683int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
2684{
2685 dhd_info_t *dhd = dhdp->info;
2686 dhd->psta_mode = val;
2687 return 0;
2688}
2689#endif /* DHD_PSTA */
2690
2691#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
2692static void
2693dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
2694{
2695 dhd_info_t *dhd = dhdp->info;
2696 dhd_if_t *ifp;
2697
2698 ASSERT(idx < DHD_MAX_IFS);
2699
2700 ifp = dhd->iflist[idx];
2701
2702 if (
2703#ifdef DHD_L2_FILTER
2704 (ifp->block_ping) ||
2705#endif // endif
2706#ifdef DHD_WET
2707 (dhd->wet_mode) ||
2708#endif // endif
2709#ifdef DHD_MCAST_REGEN
2710 (ifp->mcast_regen_bss_enable) ||
2711#endif // endif
2712 FALSE) {
2713 ifp->rx_pkt_chainable = FALSE;
2714 }
2715}
2716#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
2717
2718#ifdef DHD_WET
2719/* Get wet configuration configuration */
2720int dhd_get_wet_mode(dhd_pub_t *dhdp)
2721{
2722 dhd_info_t *dhd = dhdp->info;
2723 return (int)dhd->wet_mode;
2724}
2725
2726/* Set wet configuration configuration */
2727int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
2728{
2729 dhd_info_t *dhd = dhdp->info;
2730 dhd->wet_mode = val;
2731 dhd_update_rx_pkt_chainable_state(dhdp, 0);
2732 return 0;
2733}
2734#endif /* DHD_WET */
2735
2736#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2737int32 dhd_role_to_nl80211_iftype(int32 role)
2738{
2739 switch (role) {
2740 case WLC_E_IF_ROLE_STA:
2741 return NL80211_IFTYPE_STATION;
2742 case WLC_E_IF_ROLE_AP:
2743 return NL80211_IFTYPE_AP;
2744 case WLC_E_IF_ROLE_WDS:
2745 return NL80211_IFTYPE_WDS;
2746 case WLC_E_IF_ROLE_P2P_GO:
2747 return NL80211_IFTYPE_P2P_GO;
2748 case WLC_E_IF_ROLE_P2P_CLIENT:
2749 return NL80211_IFTYPE_P2P_CLIENT;
2750 case WLC_E_IF_ROLE_IBSS:
2751 case WLC_E_IF_ROLE_NAN:
2752 return NL80211_IFTYPE_ADHOC;
2753 default:
2754 return NL80211_IFTYPE_UNSPECIFIED;
2755 }
2756}
2757#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2758
2759static void
2760dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2761{
2762 dhd_info_t *dhd = handle;
2763 dhd_if_event_t *if_event = event_info;
2764 int ifidx, bssidx;
2765 int ret;
2766#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2767 struct wl_if_event_info info;
965f77c4
RC
2768#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2769 struct net_device *ndev = NULL;
2770#endif
d2839953
RC
2771#else
2772 struct net_device *ndev;
2773#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2774
2775 BCM_REFERENCE(ret);
2776 if (event != DHD_WQ_WORK_IF_ADD) {
2777 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2778 return;
2779 }
2780
2781 if (!dhd) {
2782 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2783 return;
2784 }
2785
2786 if (!if_event) {
2787 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2788 return;
2789 }
2790
2791 dhd_net_if_lock_local(dhd);
2792 DHD_OS_WAKE_LOCK(&dhd->pub);
2793 DHD_PERIM_LOCK(&dhd->pub);
2794
2795 ifidx = if_event->event.ifidx;
2796 bssidx = if_event->event.bssidx;
2797 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2798
2799#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2800 if (if_event->event.ifidx > 0) {
2801 u8 *mac_addr;
2802 bzero(&info, sizeof(info));
2803 info.ifidx = ifidx;
2804 info.bssidx = bssidx;
2805 info.role = if_event->event.role;
2806 strncpy(info.name, if_event->name, IFNAMSIZ);
2807 if (is_valid_ether_addr(if_event->mac)) {
2808 mac_addr = if_event->mac;
2809 } else {
2810 mac_addr = NULL;
2811 }
2812
965f77c4
RC
2813#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2814 if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
2815 &info, mac_addr, NULL, true)) == NULL)
2816#else
d2839953 2817 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
965f77c4
RC
2818 &info, mac_addr, NULL, true) == NULL)
2819#endif
2820 {
d2839953
RC
2821 /* Do the post interface create ops */
2822 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
2823 goto done;
2824 }
2825 }
2826#else
2827 /* This path is for non-android case */
2828 /* The interface name in host and in event msg are same */
2829 /* if name in event msg is used to create dongle if list on host */
2830 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2831 if_event->mac, bssidx, TRUE, if_event->name);
2832 if (!ndev) {
2833 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2834 goto done;
2835 }
2836
2837 DHD_PERIM_UNLOCK(&dhd->pub);
2838 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2839 DHD_PERIM_LOCK(&dhd->pub);
2840 if (ret != BCME_OK) {
2841 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2842 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2843 goto done;
2844 }
2845#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2846
2847#ifndef PCIE_FULL_DONGLE
2848 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2849 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
2850 uint32 var_int = 1;
2851 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
2852 NULL, 0, TRUE);
2853 if (ret != BCME_OK) {
2854 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2855 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2856 }
2857 }
2858#endif /* PCIE_FULL_DONGLE */
2859
2860done:
2861 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
965f77c4
RC
2862#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2863 dhd_bridge_dev_set(dhd, ifidx, ndev);
2864#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
d2839953
RC
2865
2866 DHD_PERIM_UNLOCK(&dhd->pub);
2867 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2868 dhd_net_if_unlock_local(dhd);
2869}
2870
2871static void
2872dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2873{
2874 dhd_info_t *dhd = handle;
2875 int ifidx;
2876 dhd_if_event_t *if_event = event_info;
2877
2878 if (event != DHD_WQ_WORK_IF_DEL) {
2879 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2880 return;
2881 }
2882
2883 if (!dhd) {
2884 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2885 return;
2886 }
2887
2888 if (!if_event) {
2889 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2890 return;
2891 }
2892
2893 dhd_net_if_lock_local(dhd);
2894 DHD_OS_WAKE_LOCK(&dhd->pub);
2895 DHD_PERIM_LOCK(&dhd->pub);
2896
2897 ifidx = if_event->event.ifidx;
2898 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
965f77c4
RC
2899#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2900 dhd_bridge_dev_set(dhd, ifidx, NULL);
2901#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
d2839953
RC
2902
2903 DHD_PERIM_UNLOCK(&dhd->pub);
2904 if (!dhd->pub.info->iflist[ifidx]) {
2905 /* No matching netdev found */
2906 DHD_ERROR(("Netdev not found! Do nothing.\n"));
2907 goto done;
2908 }
2909#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2910 if (if_event->event.ifidx > 0) {
2911 /* Do the post interface del ops */
2912 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
2913 true, if_event->event.ifidx) != 0) {
2914 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
2915 goto done;
2916 }
2917 }
2918#else
2919 /* For non-cfg80211 drivers */
2920 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2921#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2922
2923done:
2924 DHD_PERIM_LOCK(&dhd->pub);
2925 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2926 DHD_PERIM_UNLOCK(&dhd->pub);
2927 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2928 dhd_net_if_unlock_local(dhd);
2929}
2930
2931#ifdef DHD_UPDATE_INTF_MAC
2932static void
2933dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
2934{
2935 dhd_info_t *dhd = handle;
2936 int ifidx;
2937 dhd_if_event_t *if_event = event_info;
2938
2939 if (event != DHD_WQ_WORK_IF_UPDATE) {
2940 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2941 return;
2942 }
2943
2944 if (!dhd) {
2945 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2946 return;
2947 }
2948
2949 if (!if_event) {
2950 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2951 return;
2952 }
2953
2954 dhd_net_if_lock_local(dhd);
2955 DHD_OS_WAKE_LOCK(&dhd->pub);
2956
2957 ifidx = if_event->event.ifidx;
2958 DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
2959
2960 dhd_op_if_update(&dhd->pub, ifidx);
2961
2962 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2963
2964 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2965 dhd_net_if_unlock_local(dhd);
2966}
2967
2968int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
2969{
2970 dhd_info_t * dhdinfo = NULL;
2971 dhd_if_t * ifp = NULL;
2972 int ret = 0;
2973 char buf[128];
2974
2975 if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
2976 DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
2977 return -1;
2978 } else {
2979 dhdinfo = (dhd_info_t *)dhdpub->info;
2980 ifp = dhdinfo->iflist[ifidx];
2981 if (NULL==ifp) {
2982 DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
2983 return -2;
2984 }
2985 }
2986
2987 DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
2988 // Get MAC address
2989 strcpy(buf, "cur_etheraddr");
2990 ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
2991 if (0>ret) {
2992 DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
2993 // avoid collision
2994 dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
2995 // force locally administrate address
2996 ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
2997 } else {
2998 DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
2999 ifp->name, ifp->idx,
3000 (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
3001 (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
3002 memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
3003 if (dhdinfo->iflist[ifp->idx]->net) {
3004 memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
3005 }
3006 }
3007
3008 return ret;
3009}
3010#endif /* DHD_UPDATE_INTF_MAC */
3011
3012static void
3013dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3014{
3015 dhd_info_t *dhd = handle;
3016 dhd_if_t *ifp = event_info;
3017
3018 if (event != DHD_WQ_WORK_SET_MAC) {
3019 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3020 }
3021
3022 if (!dhd) {
3023 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3024 return;
3025 }
3026
3027 dhd_net_if_lock_local(dhd);
3028 DHD_OS_WAKE_LOCK(&dhd->pub);
3029 DHD_PERIM_LOCK(&dhd->pub);
3030
3031 // terence 20160907: fix for not able to set mac when wlan0 is down
3032 if (ifp == NULL || !ifp->set_macaddress) {
3033 goto done;
3034 }
3035 if (ifp == NULL || !dhd->pub.up) {
3036 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3037 goto done;
3038 }
3039
3040 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3041 ifp->set_macaddress = FALSE;
3042 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3043 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3044 else
3045 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3046
3047done:
3048 DHD_PERIM_UNLOCK(&dhd->pub);
3049 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3050 dhd_net_if_unlock_local(dhd);
3051}
3052
3053static void
3054dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3055{
3056 dhd_info_t *dhd = handle;
3057 int ifidx = (int)((long int)event_info);
3058 dhd_if_t *ifp = NULL;
3059
3060 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3061 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3062 return;
3063 }
3064
3065 if (!dhd) {
3066 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3067 return;
3068 }
3069
3070 dhd_net_if_lock_local(dhd);
3071 DHD_OS_WAKE_LOCK(&dhd->pub);
3072 DHD_PERIM_LOCK(&dhd->pub);
3073
3074 ifp = dhd->iflist[ifidx];
3075
3076 if (ifp == NULL || !dhd->pub.up) {
3077 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3078 goto done;
3079 }
3080
3081 if (ifp == NULL || !dhd->pub.up) {
3082 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3083 goto done;
3084 }
3085
3086 ifidx = ifp->idx;
3087
3088#ifdef MCAST_LIST_ACCUMULATION
3089 ifidx = 0;
3090#endif /* MCAST_LIST_ACCUMULATION */
3091
3092 _dhd_set_multicast_list(dhd, ifidx);
3093 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3094
3095done:
3096 DHD_PERIM_UNLOCK(&dhd->pub);
3097 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3098 dhd_net_if_unlock_local(dhd);
3099}
3100
3101static int
3102dhd_set_mac_address(struct net_device *dev, void *addr)
3103{
3104 int ret = 0;
3105
3106 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3107 struct sockaddr *sa = (struct sockaddr *)addr;
3108 int ifidx;
3109 dhd_if_t *dhdif;
3110
3111 ifidx = dhd_net2idx(dhd, dev);
3112 if (ifidx == DHD_BAD_IF)
3113 return -1;
3114
3115 dhdif = dhd->iflist[ifidx];
3116
3117 dhd_net_if_lock_local(dhd);
3118 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3119 dhdif->set_macaddress = TRUE;
3120 dhd_net_if_unlock_local(dhd);
3121 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3122 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3123 return ret;
3124}
3125
3126static void
3127dhd_set_multicast_list(struct net_device *dev)
3128{
3129 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3130 int ifidx;
3131
3132 ifidx = dhd_net2idx(dhd, dev);
3133 if (ifidx == DHD_BAD_IF)
3134 return;
3135
3136 dhd->iflist[ifidx]->set_multicast = TRUE;
3137 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
3138 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
3139
3140 // terence 20160907: fix for not able to set mac when wlan0 is down
3141 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3142 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3143}
3144
3145#ifdef DHD_UCODE_DOWNLOAD
3146/* Get ucode path */
3147char *
3148dhd_get_ucode_path(dhd_pub_t *dhdp)
3149{
3150 dhd_info_t *dhd = dhdp->info;
3151 return dhd->uc_path;
3152}
3153#endif /* DHD_UCODE_DOWNLOAD */
3154
3155#ifdef PROP_TXSTATUS
3156int
3157dhd_os_wlfc_block(dhd_pub_t *pub)
3158{
3159 dhd_info_t *di = (dhd_info_t *)(pub->info);
3160 ASSERT(di != NULL);
3161 /* terence 20161229: don't do spin lock if proptx not enabled */
3162 if (disable_proptx)
3163 return 1;
3164#ifdef BCMDBUS
3165 spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
3166#else
3167 spin_lock_bh(&di->wlfc_spinlock);
3168#endif /* BCMDBUS */
3169 return 1;
3170}
3171
3172int
3173dhd_os_wlfc_unblock(dhd_pub_t *pub)
3174{
3175 dhd_info_t *di = (dhd_info_t *)(pub->info);
3176
3177 ASSERT(di != NULL);
3178 /* terence 20161229: don't do spin lock if proptx not enabled */
3179 if (disable_proptx)
3180 return 1;
3181#ifdef BCMDBUS
3182 spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
965f77c4
RC
3183#else
3184 spin_unlock_bh(&di->wlfc_spinlock);
3185#endif /* BCMDBUS */
3186 return 1;
d2839953 3187}
965f77c4
RC
3188
3189#endif /* PROP_TXSTATUS */
d2839953
RC
3190
3191/* This routine do not support Packet chain feature, Currently tested for
3192 * proxy arp feature
3193 */
3194int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3195{
3196 struct sk_buff *skb;
3197 void *skbhead = NULL;
3198 void *skbprev = NULL;
3199 dhd_if_t *ifp;
3200 ASSERT(!PKTISCHAINED(p));
3201 skb = PKTTONATIVE(dhdp->osh, p);
3202
3203 ifp = dhdp->info->iflist[ifidx];
3204 skb->dev = ifp->net;
3205
3206 skb->protocol = eth_type_trans(skb, skb->dev);
3207
3208 if (in_interrupt()) {
3209 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3210 __FUNCTION__, __LINE__);
3211 netif_rx(skb);
3212 } else {
3213 if (dhdp->info->rxthread_enabled) {
3214 if (!skbhead) {
3215 skbhead = skb;
3216 } else {
3217 PKTSETNEXT(dhdp->osh, skbprev, skb);
3218 }
3219 skbprev = skb;
3220 } else {
3221 /* If the receive is not processed inside an ISR,
3222 * the softirqd must be woken explicitly to service
3223 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3224 * by netif_rx_ni(), but in earlier kernels, we need
3225 * to do it manually.
3226 */
3227 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3228 __FUNCTION__, __LINE__);
d2839953 3229 netif_rx_ni(skb);
d2839953
RC
3230 }
3231 }
3232
3233 if (dhdp->info->rxthread_enabled && skbhead)
3234 dhd_sched_rxf(dhdp, skbhead);
3235
3236 return BCME_OK;
3237}
3238
3239int BCMFASTPATH
3240__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3241{
3242 int ret = BCME_OK;
3243 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3244 struct ether_header *eh = NULL;
965f77c4
RC
3245 bool pkt_ether_type_802_1x = FALSE;
3246 uint8 pkt_flow_prio;
3247
d2839953
RC
3248#if defined(DHD_L2_FILTER)
3249 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3250#endif // endif
3251
3252 /* Reject if down */
3253 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3254 /* free the packet here since the caller won't */
3255 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3256 return -ENODEV;
3257 }
3258
3259#ifdef PCIE_FULL_DONGLE
3260 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3261 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3262 PKTCFREE(dhdp->osh, pktbuf, TRUE);
d2839953 3263 return NETDEV_TX_BUSY;
d2839953
RC
3264 }
3265#endif /* PCIE_FULL_DONGLE */
3266
3267 /* Reject if pktlen > MAX_MTU_SZ */
3268 if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
3269 /* free the packet here since the caller won't */
3270 dhdp->tx_big_packets++;
3271 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3272 return BCME_ERROR;
3273 }
3274
3275#ifdef DHD_L2_FILTER
3276 /* if dhcp_unicast is enabled, we need to convert the */
3277 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3278 if (ifp->dhcp_unicast) {
3279 uint8* mac_addr;
3280 uint8* ehptr = NULL;
3281 int ret;
3282 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3283 if (ret == BCME_OK) {
3284 /* if given mac address having valid entry in sta list
3285 * copy the given mac address, and return with BCME_OK
3286 */
3287 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3288 ehptr = PKTDATA(dhdp->osh, pktbuf);
3289 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3290 }
3291 }
3292 }
3293
3294 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3295 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3296 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3297 return BCME_ERROR;
3298 }
3299 }
3300
3301 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3302 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3303
3304 /* Drop the packets if l2 filter has processed it already
3305 * otherwise continue with the normal path
3306 */
3307 if (ret == BCME_OK) {
3308 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3309 return BCME_ERROR;
3310 }
3311 }
3312#endif /* DHD_L2_FILTER */
3313 /* Update multicast statistic */
3314 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3315 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3316 eh = (struct ether_header *)pktdata;
3317
3318 if (ETHER_ISMULTI(eh->ether_dhost))
3319 dhdp->tx_multicast++;
3320 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3321#ifdef DHD_LOSSLESS_ROAMING
3322 uint8 prio = (uint8)PKTPRIO(pktbuf);
3323
3324 /* back up 802.1x's priority */
3325 dhdp->prio_8021x = prio;
3326#endif /* DHD_LOSSLESS_ROAMING */
965f77c4 3327 pkt_ether_type_802_1x = TRUE;
d2839953
RC
3328 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
3329 atomic_inc(&dhd->pend_8021x_cnt);
3330#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
3331 wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
3332 pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
3333#endif /* WL_CFG80211 && WL_WPS_SYNC */
d2839953 3334 }
965f77c4
RC
3335 dhd_dump_pkt(dhdp, ifidx, pktdata,
3336 (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
d2839953 3337 } else {
965f77c4
RC
3338 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3339 return BCME_ERROR;
d2839953
RC
3340 }
3341
3342 {
3343 /* Look into the packet and update the packet priority */
3344#ifndef PKTPRIO_OVERRIDE
3345 if (PKTPRIO(pktbuf) == 0)
3346#endif /* !PKTPRIO_OVERRIDE */
3347 {
3348#if defined(QOS_MAP_SET)
3349 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
3350#else
3351 pktsetprio(pktbuf, FALSE);
3352#endif /* QOS_MAP_SET */
3353 }
3354#ifndef PKTPRIO_OVERRIDE
3355 else {
3356 /* Some protocols like OZMO use priority values from 256..263.
3357 * these are magic values to indicate a specific 802.1d priority.
3358 * make sure that priority field is in range of 0..7
3359 */
3360 PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
3361 }
3362#endif /* !PKTPRIO_OVERRIDE */
3363 }
3364
965f77c4
RC
3365 BCM_REFERENCE(pkt_ether_type_802_1x);
3366 BCM_REFERENCE(pkt_flow_prio);
3367
3368#ifdef SUPPORT_SET_TID
3369 dhd_set_tid_based_on_uid(dhdp, pktbuf);
3370#endif /* SUPPORT_SET_TID */
3371
d2839953
RC
3372#ifdef PCIE_FULL_DONGLE
3373 /*
3374 * Lkup the per interface hash table, for a matching flowring. If one is not
3375 * available, allocate a unique flowid and add a flowring entry.
3376 * The found or newly created flowid is placed into the pktbuf's tag.
3377 */
965f77c4
RC
3378
3379#ifdef DHD_LOSSLESS_ROAMING
3380 /* For LLR override and use flowring with prio 7 for 802.1x packets */
3381 if (pkt_ether_type_802_1x) {
3382 pkt_flow_prio = PRIO_8021D_NC;
3383 } else
3384#endif /* DHD_LOSSLESS_ROAMING */
3385 {
3386 pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
3387 }
3388
3389 ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
d2839953
RC
3390 if (ret != BCME_OK) {
3391 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3392 return ret;
3393 }
965f77c4 3394#endif /* PCIE_FULL_DONGLE */
d2839953
RC
3395 /* terence 20150901: Micky add to ajust the 802.1X priority */
3396 /* Set the 802.1X packet with the highest priority 7 */
3397 if (dhdp->conf->pktprio8021x >= 0)
3398 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3399
3400#ifdef PROP_TXSTATUS
3401 if (dhd_wlfc_is_supported(dhdp)) {
3402 /* store the interface ID */
3403 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3404
3405 /* store destination MAC in the tag as well */
3406 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3407
3408 /* decide which FIFO this packet belongs to */
3409 if (ETHER_ISMULTI(eh->ether_dhost))
3410 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3411 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3412 else
3413 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3414 } else
3415#endif /* PROP_TXSTATUS */
3416 {
3417 /* If the protocol uses a data header, apply it */
3418 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3419 }
3420
3421 /* Use bus module to send data frame */
3422#ifdef PROP_TXSTATUS
3423 {
3424 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3425 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3426 /* non-proptxstatus way */
3427#ifdef BCMPCIE
3428 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3429#else
3430 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3431#endif /* BCMPCIE */
3432 }
3433 }
3434#else
3435#ifdef BCMPCIE
3436 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3437#else
3438 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3439#endif /* BCMPCIE */
3440#endif /* PROP_TXSTATUS */
3441#ifdef BCMDBUS
3442 if (ret)
3443 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3444#endif /* BCMDBUS */
3445
3446 return ret;
3447}
3448
3449int BCMFASTPATH
3450dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3451{
3452 int ret = 0;
3453 unsigned long flags;
3454 dhd_if_t *ifp;
3455
3456 DHD_GENERAL_LOCK(dhdp, flags);
3457 ifp = dhd_get_ifp(dhdp, ifidx);
3458 if (!ifp || ifp->del_in_progress) {
3459 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
3460 __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
3461 DHD_GENERAL_UNLOCK(dhdp, flags);
3462 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3463 return -ENODEV;
3464 }
3465 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3466 DHD_ERROR(("%s: returning as busstate=%d\n",
3467 __FUNCTION__, dhdp->busstate));
3468 DHD_GENERAL_UNLOCK(dhdp, flags);
3469 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3470 return -ENODEV;
3471 }
3472 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3473 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
3474 DHD_GENERAL_UNLOCK(dhdp, flags);
3475
3476 DHD_GENERAL_LOCK(dhdp, flags);
3477 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
3478 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3479 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3480 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
3481 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3482 dhd_os_tx_completion_wake(dhdp);
3483 dhd_os_busbusy_wake(dhdp);
3484 DHD_GENERAL_UNLOCK(dhdp, flags);
3485 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3486 return -ENODEV;
3487 }
3488 DHD_GENERAL_UNLOCK(dhdp, flags);
3489
3490 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3491
3492 DHD_GENERAL_LOCK(dhdp, flags);
3493 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
3494 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3495 dhd_os_tx_completion_wake(dhdp);
3496 dhd_os_busbusy_wake(dhdp);
3497 DHD_GENERAL_UNLOCK(dhdp, flags);
3498 return ret;
3499}
3500
d2839953
RC
3501int BCMFASTPATH
3502dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3503{
3504 int ret;
3505 uint datalen;
3506 void *pktbuf;
3507 dhd_info_t *dhd = DHD_DEV_INFO(net);
3508 dhd_if_t *ifp = NULL;
3509 int ifidx;
3510 unsigned long flags;
3511 uint8 htsfdlystat_sz = 0;
3512
3513 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3514
3515 if (dhd_query_bus_erros(&dhd->pub)) {
3516 return -ENODEV;
3517 }
3518
d2839953
RC
3519 DHD_GENERAL_LOCK(&dhd->pub, flags);
3520 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
3521 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3522
3523 DHD_GENERAL_LOCK(&dhd->pub, flags);
3524#ifdef BCMPCIE
3525 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3526 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3527 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
3528 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3529#ifdef PCIE_FULL_DONGLE
3530 /* Stop tx queues if suspend is in progress */
3531 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3532 dhd_bus_stop_queue(dhd->pub.bus);
3533 }
3534#endif /* PCIE_FULL_DONGLE */
3535 dhd_os_busbusy_wake(&dhd->pub);
3536 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
d2839953 3537 return NETDEV_TX_BUSY;
d2839953
RC
3538 }
3539#else
3540 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3541 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3542 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
3543 }
3544#endif
3545
3546 DHD_OS_WAKE_LOCK(&dhd->pub);
3547 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3548
3549 /* Reject if down */
3550 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
3551 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3552 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3553 netif_stop_queue(net);
3554 /* Send Event when bus down detected during data session */
3555 if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
3556 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3557 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3558 net_os_send_hang_message(net);
3559 }
3560 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3561 dhd_os_busbusy_wake(&dhd->pub);
3562 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3563 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3564 DHD_OS_WAKE_UNLOCK(&dhd->pub);
d2839953 3565 return NETDEV_TX_BUSY;
d2839953
RC
3566 }
3567
3568 ifp = DHD_DEV_IFP(net);
3569 ifidx = DHD_DEV_IFIDX(net);
3570 if (!ifp || (ifidx == DHD_BAD_IF) ||
3571 ifp->del_in_progress) {
3572 DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
3573 __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
3574 netif_stop_queue(net);
3575 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3576 dhd_os_busbusy_wake(&dhd->pub);
3577 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3578 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3579 DHD_OS_WAKE_UNLOCK(&dhd->pub);
d2839953 3580 return NETDEV_TX_BUSY;
d2839953
RC
3581 }
3582
3583 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
3584 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3585
3586 ASSERT(ifidx == dhd_net2idx(dhd, net));
3587 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
3588
3589 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3590
3910ce8e
LJ
3591 skb_orphan(skb);
3592
d2839953
RC
3593 /* re-align socket buffer if "skb->data" is odd address */
3594 if (((unsigned long)(skb->data)) & 0x1) {
3595 unsigned char *data = skb->data;
3596 uint32 length = skb->len;
3597 PKTPUSH(dhd->pub.osh, skb, 1);
3598 memmove(skb->data, data, length);
3599 PKTSETLEN(dhd->pub.osh, skb, length);
3600 }
3601
3602 datalen = PKTLEN(dhd->pub.osh, skb);
3603
3604 /* Make sure there's enough room for any header */
3605 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
3606 struct sk_buff *skb2;
3607
3608 DHD_INFO(("%s: insufficient headroom\n",
3609 dhd_ifname(&dhd->pub, ifidx)));
3610 dhd->pub.tx_realloc++;
3611
3612 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3613 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
3614
3615 dev_kfree_skb(skb);
3616 if ((skb = skb2) == NULL) {
3617 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
3618 dhd_ifname(&dhd->pub, ifidx)));
3619 ret = -ENOMEM;
3620 goto done;
3621 }
3622 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3623 }
3624
965f77c4
RC
3625 /* move from dhdsdio_sendfromq(), try to orphan skb early */
3626 if (dhd->pub.conf->orphan_move == 2)
3627 PKTORPHAN(skb, dhd->pub.conf->tsq);
3628 else if (dhd->pub.conf->orphan_move == 3)
3629 skb_orphan(skb);
3630
d2839953
RC
3631 /* Convert to packet */
3632 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
3633 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
3634 dhd_ifname(&dhd->pub, ifidx)));
3635 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3636 dev_kfree_skb_any(skb);
3637 ret = -ENOMEM;
3638 goto done;
3639 }
3640
3641#ifdef DHD_WET
3642 /* wet related packet proto manipulation should be done in DHD
3643 since dongle doesn't have complete payload
3644 */
3645 if (WET_ENABLED(&dhd->pub) &&
3646 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
3647 DHD_INFO(("%s:%s: wet send proc failed\n",
3648 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
3649 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
3650 ret = -EFAULT;
3651 goto done;
3652 }
3653#endif /* DHD_WET */
3654
3655#ifdef DHD_PSTA
3656 /* PSR related packet proto manipulation should be done in DHD
3657 * since dongle doesn't have complete payload
3658 */
3659 if (PSR_ENABLED(&dhd->pub) &&
3660 (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
3661
3662 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
3663 dhd_ifname(&dhd->pub, ifidx)));
3664 }
3665#endif /* DHD_PSTA */
3666
965f77c4
RC
3667#ifdef DHDTCPSYNC_FLOOD_BLK
3668 if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
3669 ifp->tsyncack_txed ++;
3670 }
3671#endif /* DHDTCPSYNC_FLOOD_BLK */
3672
d2839953
RC
3673#ifdef DHDTCPACK_SUPPRESS
3674 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
3675 /* If this packet has been hold or got freed, just return */
3676 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
3677 ret = 0;
3678 goto done;
3679 }
3680 } else {
3681 /* If this packet has replaced another packet and got freed, just return */
3682 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
3683 ret = 0;
3684 goto done;
3685 }
3686 }
3687#endif /* DHDTCPACK_SUPPRESS */
3688
3689 /*
3690 * If Load Balance is enabled queue the packet
3691 * else send directly from here.
3692 */
3693#if defined(DHD_LB_TXP)
3694 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
3695#else
3696 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
3697#endif // endif
3698
3699done:
3700 if (ret) {
3701 ifp->stats.tx_dropped++;
3702 dhd->pub.tx_dropped++;
3703 } else {
3704#ifdef PROP_TXSTATUS
3705 /* tx_packets counter can counted only when wlfc is disabled */
3706 if (!dhd_wlfc_is_supported(&dhd->pub))
3707#endif // endif
3708 {
3709 dhd->pub.tx_packets++;
3710 ifp->stats.tx_packets++;
3711 ifp->stats.tx_bytes += datalen;
3712 }
3713 }
3714
3715 DHD_GENERAL_LOCK(&dhd->pub, flags);
3716 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3717 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
3718 dhd_os_tx_completion_wake(&dhd->pub);
3719 dhd_os_busbusy_wake(&dhd->pub);
3720 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3721 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3722 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3723 /* Return ok: we always eat the packet */
d2839953 3724 return NETDEV_TX_OK;
d2839953
RC
3725}
3726
3727#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3728void dhd_rx_wq_wakeup(struct work_struct *ptr)
3729{
3730 struct dhd_rx_tx_work *work;
3731 struct dhd_pub * pub;
3732
3733 work = container_of(ptr, struct dhd_rx_tx_work, work);
3734
3735 pub = work->pub;
3736
3737 DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
3738
3739 if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
3740 return;
3741 }
3742
3743 DHD_OS_WAKE_LOCK(pub);
3744 if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
3745
3746 // do nothing but wakeup the bus.
3747 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
3748 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
3749 }
3750 DHD_OS_WAKE_UNLOCK(pub);
3751 kfree(work);
3752}
3753
3754void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
3755{
3756 struct dhd_rx_tx_work *work;
3757 int ret;
3758 dhd_info_t *dhd;
3759 struct dhd_bus * bus;
3760
3761 work = container_of(ptr, struct dhd_rx_tx_work, work);
3762
3763 dhd = DHD_DEV_INFO(work->net);
3764
3765 bus = dhd->pub.bus;
3766
3767 if (atomic_read(&dhd->pub.block_bus)) {
3768 kfree_skb(work->skb);
3769 kfree(work);
3770 dhd_netif_start_queue(bus);
3771 return;
3772 }
3773
3774 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
3775 ret = dhd_start_xmit(work->skb, work->net);
3776 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3777 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3778 }
3779 kfree(work);
3780 dhd_netif_start_queue(bus);
3781
3782 if (ret)
3783 netdev_err(work->net,
3784 "error: dhd_start_xmit():%d\n", ret);
3785}
3786
3787int BCMFASTPATH
3788dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
3789{
3790 struct dhd_rx_tx_work *start_xmit_work;
3791 int ret;
3792 dhd_info_t *dhd = DHD_DEV_INFO(net);
3793
3794 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3795 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
3796
3797 dhd_netif_stop_queue(dhd->pub.bus);
3798
3799 start_xmit_work = (struct dhd_rx_tx_work*)
3800 kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
3801
3802 if (!start_xmit_work) {
3803 netdev_err(net,
3804 "error: failed to alloc start_xmit_work\n");
3805 ret = -ENOMEM;
3806 goto exit;
3807 }
3808
3809 INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
3810 start_xmit_work->skb = skb;
3811 start_xmit_work->net = net;
3812 queue_work(dhd->tx_wq, &start_xmit_work->work);
3813 ret = NET_XMIT_SUCCESS;
3814
3815 } else if (dhd->pub.busstate == DHD_BUS_DATA) {
3816 ret = dhd_start_xmit(skb, net);
3817 } else {
3818 /* when bus is down */
3819 ret = -ENODEV;
3820 }
3821
3822exit:
3823 return ret;
3824}
3825void
3826dhd_bus_wakeup_work(dhd_pub_t *dhdp)
3827{
3828 struct dhd_rx_tx_work *rx_work;
3829 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3830
3831 rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
3832 if (!rx_work) {
3833 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
3834 return;
3835 }
3836
3837 INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
3838 rx_work->pub = dhdp;
3839 queue_work(dhd->rx_wq, &rx_work->work);
3840
3841}
3842#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
965f77c4
RC
3843
3844static void
3845__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
3846{
3847
3848 if ((state == ON) && (dhdp->txoff == FALSE)) {
3849 netif_stop_queue(net);
3850 dhd_prot_update_pktid_txq_stop_cnt(dhdp);
3851 } else if (state == ON) {
3910ce8e 3852 DHD_INFO(("%s: Netif Queue has already stopped\n", __FUNCTION__));
965f77c4
RC
3853 }
3854 if ((state == OFF) && (dhdp->txoff == TRUE)) {
3855 netif_wake_queue(net);
3856 dhd_prot_update_pktid_txq_start_cnt(dhdp);
3857 } else if (state == OFF) {
3910ce8e 3858 DHD_INFO(("%s: Netif Queue has already started\n", __FUNCTION__));
965f77c4
RC
3859 }
3860}
3861
d2839953
RC
3862void
3863dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
3864{
3865 struct net_device *net;
3866 dhd_info_t *dhd = dhdp->info;
3867 int i;
3868
3869 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3870
3871 ASSERT(dhd);
3872
3873#ifdef DHD_LOSSLESS_ROAMING
3874 /* block flowcontrol during roaming */
3875 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
3876 return;
3877 }
3878#endif // endif
3879
3880 if (ifidx == ALL_INTERFACES) {
d2839953
RC
3881 for (i = 0; i < DHD_MAX_IFS; i++) {
3882 if (dhd->iflist[i]) {
3883 net = dhd->iflist[i]->net;
965f77c4 3884 __dhd_txflowcontrol(dhdp, net, state);
d2839953
RC
3885 }
3886 }
3887 } else {
3888 if (dhd->iflist[ifidx]) {
3889 net = dhd->iflist[ifidx]->net;
965f77c4 3890 __dhd_txflowcontrol(dhdp, net, state);
d2839953
RC
3891 }
3892 }
965f77c4 3893 dhdp->txoff = state;
d2839953
RC
3894}
3895
3896#ifdef DHD_MCAST_REGEN
3897/*
3898 * Description: This function is called to do the reverse translation
3899 *
3900 * Input eh - pointer to the ethernet header
3901 */
3902int32
3903dhd_mcast_reverse_translation(struct ether_header *eh)
3904{
3905 uint8 *iph;
3906 uint32 dest_ip;
3907
3908 iph = (uint8 *)eh + ETHER_HDR_LEN;
3909 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
3910
3911 /* Only IP packets are handled */
3912 if (eh->ether_type != hton16(ETHER_TYPE_IP))
3913 return BCME_ERROR;
3914
3915 /* Non-IPv4 multicast packets are not handled */
3916 if (IP_VER(iph) != IP_VER_4)
3917 return BCME_ERROR;
3918
3919 /*
3920 * The packet has a multicast IP and unicast MAC. That means
3921 * we have to do the reverse translation
3922 */
3923 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
3924 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
3925 return BCME_OK;
3926 }
3927
3928 return BCME_ERROR;
3929}
3930#endif /* MCAST_REGEN */
3931
3932#ifdef SHOW_LOGTRACE
3933static void
3934dhd_netif_rx_ni(struct sk_buff * skb)
3935{
3936 /* Do not call netif_recieve_skb as this workqueue scheduler is
3937 * not from NAPI Also as we are not in INTR context, do not call
3938 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
3939 * does netif_rx, disables irq, raise NET_IF_RX softirq and
3940 * enables interrupts back
3941 */
d2839953 3942 netif_rx_ni(skb);
d2839953
RC
3943}
3944
3945static int
3946dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
3947{
3948 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3949 int ret = BCME_OK;
3950 uint datalen;
3951 bcm_event_msg_u_t evu;
3952 void *data = NULL;
3953 void *pktdata = NULL;
3954 bcm_event_t *pvt_data;
3955 uint pktlen;
3956
3957 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
3958
3959 /* In dhd_rx_frame, header is stripped using skb_pull
3960 * of size ETH_HLEN, so adjust pktlen accordingly
3961 */
3962 pktlen = skb->len + ETH_HLEN;
3963
d2839953 3964 pktdata = (void *)skb_mac_header(skb);
d2839953
RC
3965 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3966
3967 if (ret != BCME_OK) {
3968 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
3969 __FUNCTION__, ret));
3970 goto exit;
3971 }
3972
3973 datalen = ntoh32(evu.event.datalen);
3974
3975 pvt_data = (bcm_event_t *)pktdata;
3976 data = &pvt_data[1];
3977
3978 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
3979
3980exit:
3981 return ret;
3982}
3983
3984/*
3985 * dhd_event_logtrace_process_items processes
3986 * each skb from evt_trace_queue.
3987 * Returns TRUE if more packets to be processed
3988 * else returns FALSE
3989 */
3990
3991static int
3992dhd_event_logtrace_process_items(dhd_info_t *dhd)
3993{
3994 dhd_pub_t *dhdp;
3995 struct sk_buff *skb;
3996 uint32 qlen;
3997 uint32 process_len;
3998
3999 if (!dhd) {
4000 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4001 return 0;
4002 }
4003
4004 dhdp = &dhd->pub;
4005
4006 if (!dhdp) {
4007 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
4008 return 0;
4009 }
4010
4011 qlen = skb_queue_len(&dhd->evt_trace_queue);
4012 process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
4013
4014 /* Run while loop till bound is reached or skb queue is empty */
4015 while (process_len--) {
4016 int ifid = 0;
4017 skb = skb_dequeue(&dhd->evt_trace_queue);
4018 if (skb == NULL) {
4019 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
4020 __FUNCTION__));
4021 break;
4022 }
4023 BCM_REFERENCE(ifid);
4024#ifdef PCIE_FULL_DONGLE
4025 /* Check if pkt is from INFO ring or WLC_E_TRACE */
4026 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
4027 if (ifid == DHD_DUMMY_INFO_IF) {
4028 /* Process logtrace from info rings */
4029 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
4030 } else
4031#endif /* PCIE_FULL_DONGLE */
4032 {
4033 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
4034 dhd_event_logtrace_pkt_process(dhdp, skb);
4035 }
4036
965f77c4
RC
4037 /* Dummy sleep so that scheduler kicks in after processing any logprints */
4038 OSL_SLEEP(0);
4039
d2839953
RC
4040 /* Send packet up if logtrace_pkt_sendup is TRUE */
4041 if (dhdp->logtrace_pkt_sendup) {
4042#ifdef DHD_USE_STATIC_CTRLBUF
4043 /* If bufs are allocated via static buf pool
4044 * and logtrace_pkt_sendup enabled, make a copy,
4045 * free the local one and send the copy up.
4046 */
4047 void *npkt = PKTDUP(dhdp->osh, skb);
4048 /* Clone event and send it up */
4049 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4050 if (npkt) {
4051 skb = npkt;
4052 } else {
4053 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
4054 /* Packet is already freed, go to next packet */
4055 continue;
4056 }
4057#endif /* DHD_USE_STATIC_CTRLBUF */
4058#ifdef PCIE_FULL_DONGLE
4059 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
4060 * to send skb to network layer, assign skb->dev with
4061 * Primary interface n/w device
4062 */
4063 if (ifid == DHD_DUMMY_INFO_IF) {
4064 skb = PKTTONATIVE(dhdp->osh, skb);
4065 skb->dev = dhd->iflist[0]->net;
4066 }
4067#endif /* PCIE_FULL_DONGLE */
4068 /* Send pkt UP */
4069 dhd_netif_rx_ni(skb);
4070 } else {
4071 /* Don't send up. Free up the packet. */
4072#ifdef DHD_USE_STATIC_CTRLBUF
4073 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4074#else
4075 PKTFREE(dhdp->osh, skb, FALSE);
4076#endif /* DHD_USE_STATIC_CTRLBUF */
4077 }
4078 }
4079
4080 /* Reschedule if more packets to be processed */
4081 return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
4082}
4083
4084#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4085static int
4086dhd_logtrace_thread(void *data)
4087{
4088 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4089 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4090 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
4091 int ret;
4092
4093 while (1) {
965f77c4 4094 dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
d2839953 4095 if (!binary_sema_down(tsk)) {
965f77c4 4096 dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
d2839953
RC
4097 SMP_RD_BARRIER_DEPENDS();
4098 if (dhd->pub.dongle_reset == FALSE) {
4099 do {
965f77c4
RC
4100 /* Check terminated before processing the items */
4101 if (tsk->terminated) {
4102 DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
4103 goto exit;
4104 }
d2839953
RC
4105#ifdef EWP_EDL
4106 /* check if EDL is being used */
4107 if (dhd->pub.dongle_edl_support) {
4108 ret = dhd_prot_process_edl_complete(&dhd->pub,
4109 &dhd->event_data);
4110 } else {
4111 ret = dhd_event_logtrace_process_items(dhd);
4112 }
4113#else
4114 ret = dhd_event_logtrace_process_items(dhd);
4115#endif /* EWP_EDL */
4116 /* if ret > 0, bound has reached so to be fair to other
4117 * processes need to yield the scheduler.
4118 * The comment above yield()'s definition says:
4119 * If you want to use yield() to wait for something,
4120 * use wait_event().
4121 * If you want to use yield() to be 'nice' for others,
4122 * use cond_resched().
4123 * If you still want to use yield(), do not!
4124 */
4125 if (ret > 0) {
4126 cond_resched();
4127 OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
4128 } else if (ret < 0) {
4129 DHD_ERROR(("%s: ERROR should not reach here\n",
4130 __FUNCTION__));
4131 }
4132 } while (ret > 0);
4133 }
d2839953
RC
4134 if (tsk->flush_ind) {
4135 DHD_ERROR(("%s: flushed\n", __FUNCTION__));
965f77c4 4136 dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
d2839953
RC
4137 tsk->flush_ind = 0;
4138 complete(&tsk->flushed);
4139 }
4140 } else {
4141 DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
965f77c4 4142 dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
d2839953
RC
4143 break;
4144 }
4145 }
965f77c4 4146exit:
d2839953 4147 complete_and_exit(&tsk->completed, 0);
965f77c4 4148 dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
d2839953
RC
4149}
4150#else
4151static void
4152dhd_event_logtrace_process(struct work_struct * work)
4153{
4154 int ret = 0;
4155/* Ignore compiler warnings due to -Werror=cast-qual */
4156#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4157#pragma GCC diagnostic push
4158#pragma GCC diagnostic ignored "-Wcast-qual"
4159#endif // endif
4160 struct delayed_work *dw = to_delayed_work(work);
4161 struct dhd_info *dhd =
4162 container_of(dw, struct dhd_info, event_log_dispatcher_work);
4163#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4164#pragma GCC diagnostic pop
4165#endif // endif
4166#ifdef EWP_EDL
4167 if (dhd->pub.dongle_edl_support) {
4168 ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
4169 } else {
4170 ret = dhd_event_logtrace_process_items(dhd);
4171 }
4172#else
4173 ret = dhd_event_logtrace_process_items(dhd);
4174#endif /* EWP_EDL */
4175
4176 if (ret > 0) {
4177 schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
4178 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
4179 }
4180
4181 return;
4182}
4183#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4184
4185void
4186dhd_schedule_logtrace(void *dhd_info)
4187{
4188 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
4189
4190#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4191 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4192 binary_sema_up(&dhd->thr_logtrace_ctl);
4193 } else {
4194 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4195 dhd->thr_logtrace_ctl.thr_pid));
4196 }
4197#else
4198 schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
4199#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4200 return;
4201}
4202
4203void
4204dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
4205{
4206#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4207 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4208 PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4209 } else {
4210 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4211 dhd->thr_logtrace_ctl.thr_pid));
4212 }
4213#else
4214 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
4215#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4216}
4217
4218void
4219dhd_flush_logtrace_process(dhd_info_t *dhd)
4220{
4221#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4222 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4223 PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4224 } else {
4225 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4226 dhd->thr_logtrace_ctl.thr_pid));
4227 }
4228#else
4229 flush_delayed_work(&dhd->event_log_dispatcher_work);
4230#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4231}
4232
4233int
4234dhd_init_logtrace_process(dhd_info_t *dhd)
4235{
4236#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4237 dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
4238 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
4239 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4240 DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
4241 return BCME_ERROR;
4242 } else {
4243 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
4244 dhd->thr_logtrace_ctl.thr_pid));
4245 }
4246#else
4247 INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
4248#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4249 return BCME_OK;
4250}
4251
4252int
4253dhd_reinit_logtrace_process(dhd_info_t *dhd)
4254{
4255#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4256 /* Re-init only if PROC_STOP from dhd_stop was called
4257 * which can be checked via thr_pid
4258 */
4259 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4260 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
4261 0, "dhd_logtrace_thread");
4262 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4263 DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
4264 return BCME_ERROR;
4265 } else {
4266 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
4267 dhd->thr_logtrace_ctl.thr_pid));
4268 }
4269 }
4270#else
4271 /* No need to re-init for WQ as calcel_delayed_work_sync will
4272 * will not delete the WQ
4273 */
4274#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4275 return BCME_OK;
4276}
4277
4278void
4279dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4280{
4281 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4282
4283#ifdef PCIE_FULL_DONGLE
4284 /* Add ifidx in the PKTTAG */
4285 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
4286#endif /* PCIE_FULL_DONGLE */
4287 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
4288
4289 dhd_schedule_logtrace(dhd);
4290}
4291
4292void
4293dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
4294{
4295 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4296 struct sk_buff *skb;
4297
4298 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
4299#ifdef DHD_USE_STATIC_CTRLBUF
4300 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4301#else
4302 PKTFREE(dhdp->osh, skb, FALSE);
4303#endif /* DHD_USE_STATIC_CTRLBUF */
4304 }
4305}
4306
4307void
4308dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
4309{
4310 struct sk_buff *skb = NULL;
4311 uint32 pktsize = 0;
4312 void *pkt = NULL;
4313 info_buf_payload_hdr_t *infobuf = NULL;
4314 dhd_info_t *dhd = dhdp->info;
4315 uint8 *pktdata = NULL;
4316
4317 if (!msg)
4318 return;
4319
4320 /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
4321 infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
965f77c4
RC
4322 pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
4323 sizeof(uint32));
d2839953
RC
4324 pkt = PKTGET(dhdp->osh, pktsize, FALSE);
4325 if (!pkt) {
4326 DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
4327 } else {
4328 PKTSETLEN(dhdp->osh, pkt, pktsize);
4329 pktdata = PKTDATA(dhdp->osh, pkt);
4330 memcpy(pktdata, msg, pktsize);
4331 /* For infobuf packets assign skb->dev with
4332 * Primary interface n/w device
4333 */
4334 skb = PKTTONATIVE(dhdp->osh, pkt);
4335 skb->dev = dhd->iflist[0]->net;
4336 /* Send pkt UP */
4337 dhd_netif_rx_ni(skb);
4338 }
4339}
4340#endif /* SHOW_LOGTRACE */
4341
4342/** Called when a frame is received by the dongle on interface 'ifidx' */
4343void
4344dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4345{
4346 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4347 struct sk_buff *skb;
4348 uchar *eth;
4349 uint len;
4350 void *data, *pnext = NULL;
4351 int i;
4352 dhd_if_t *ifp;
4353 wl_event_msg_t event;
4354 int tout_rx = 0;
4355 int tout_ctrl = 0;
4356 void *skbhead = NULL;
4357 void *skbprev = NULL;
4358 uint16 protocol;
4359 unsigned char *dump_data;
4360#ifdef DHD_MCAST_REGEN
4361 uint8 interface_role;
4362 if_flow_lkup_t *if_flow_lkup;
4363 unsigned long flags;
4364#endif // endif
4365#ifdef DHD_WAKE_STATUS
4366 int pkt_wake = 0;
4367 wake_counts_t *wcp = NULL;
4368#endif /* DHD_WAKE_STATUS */
4369
4370 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
965f77c4 4371 BCM_REFERENCE(dump_data);
d2839953
RC
4372
4373 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4374 struct ether_header *eh;
4375
4376 pnext = PKTNEXT(dhdp->osh, pktbuf);
4377 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4378
4379 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
4380 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
4381 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
4382 */
4383 if (ifidx == DHD_DUMMY_INFO_IF) {
4384 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
4385 * context in case of PCIe FD, in case of other bus this will be from
4386 * DPC context. If we get bunch of events from Dongle then printing all
4387 * of them from Tasklet/DPC context that too in data path is costly.
4388 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
4389 * events with type WLC_E_TRACE.
4390 * We'll print this console logs from the WorkQueue context by enqueing SKB
4391 * here and Dequeuing will be done in WorkQueue and will be freed only if
4392 * logtrace_pkt_sendup is TRUE
4393 */
4394#ifdef SHOW_LOGTRACE
4395 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
4396#else /* !SHOW_LOGTRACE */
4397 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
4398 * free the PKT here itself
4399 */
4400#ifdef DHD_USE_STATIC_CTRLBUF
4401 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4402#else
4403 PKTFREE(dhdp->osh, pktbuf, FALSE);
4404#endif /* DHD_USE_STATIC_CTRLBUF */
4405#endif /* SHOW_LOGTRACE */
4406 continue;
4407 }
4408#ifdef DHD_WAKE_STATUS
4409#ifdef BCMDBUS
4410 wcp = NULL;
4411#else
4412 pkt_wake = dhd_bus_get_bus_wake(dhdp);
4413 wcp = dhd_bus_get_wakecount(dhdp);
4414#endif /* BCMDBUS */
4415 if (wcp == NULL) {
4416 /* If wakeinfo count buffer is null do not update wake count values */
4417 pkt_wake = 0;
4418 }
4419#endif /* DHD_WAKE_STATUS */
4420
4421 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4422
965f77c4
RC
4423 if (ifidx >= DHD_MAX_IFS) {
4424 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
4425 __FUNCTION__, ifidx));
4426 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4427#ifdef DHD_USE_STATIC_CTRLBUF
4428 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4429#else
4430 PKTFREE(dhdp->osh, pktbuf, FALSE);
4431#endif /* DHD_USE_STATIC_CTRLBUF */
4432 } else {
4433 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4434 }
4435 continue;
4436 }
4437
d2839953
RC
4438 ifp = dhd->iflist[ifidx];
4439 if (ifp == NULL) {
4440 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4441 __FUNCTION__));
4442 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4443#ifdef DHD_USE_STATIC_CTRLBUF
4444 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4445#else
4446 PKTFREE(dhdp->osh, pktbuf, FALSE);
4447#endif /* DHD_USE_STATIC_CTRLBUF */
4448 } else {
4449 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4450 }
4451 continue;
4452 }
4453
4454 /* Dropping only data packets before registering net device to avoid kernel panic */
4455#ifndef PROP_TXSTATUS_VSDB
4456 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4457 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4458#else
4459 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4460 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4461#endif /* PROP_TXSTATUS_VSDB */
4462 {
4463 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4464 __FUNCTION__));
4465 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4466 continue;
4467 }
4468
4469#ifdef PROP_TXSTATUS
4470 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4471 /* WLFC may send header only packet when
4472 there is an urgent message but no packet to
4473 piggy-back on
4474 */
4475 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4476 continue;
4477 }
4478#endif // endif
4479#ifdef DHD_L2_FILTER
4480 /* If block_ping is enabled drop the ping packet */
4481 if (ifp->block_ping) {
4482 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4483 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4484 continue;
4485 }
4486 }
4487 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4488 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4489 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4490 continue;
4491 }
4492 }
4493 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4494 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4495
4496 /* Drop the packets if l2 filter has processed it already
4497 * otherwise continue with the normal path
4498 */
4499 if (ret == BCME_OK) {
4500 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4501 continue;
4502 }
4503 }
4504 if (ifp->block_tdls) {
4505 if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
4506 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4507 continue;
4508 }
4509 }
4510#endif /* DHD_L2_FILTER */
4511
4512#ifdef DHD_MCAST_REGEN
4513 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
4514 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
4515 ASSERT(if_flow_lkup);
4516
4517 interface_role = if_flow_lkup[ifidx].role;
4518 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
4519
4520 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
4521 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
4522 ETHER_ISUCAST(eh->ether_dhost)) {
4523 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
4524#ifdef DHD_PSTA
4525 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
4526 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
4527 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
4528 if (ifidx != 0) {
4529 /* Let the primary in PSTA interface handle this
4530 * frame after unicast to Multicast conversion
4531 */
4532 ifp = dhd_get_ifp(dhdp, 0);
4533 ASSERT(ifp);
4534 }
4535 }
4536 }
4537#endif /* PSTA */
4538 }
4539#endif /* MCAST_REGEN */
4540
965f77c4
RC
4541#ifdef DHDTCPSYNC_FLOOD_BLK
4542 if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
4543 int delta_sec;
4544 int delta_sync;
4545 int sync_per_sec;
4546 u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
4547 ifp->tsync_rcvd ++;
4548 delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
4549 delta_sec = curr_time - ifp->last_sync;
4550 if (delta_sec > 1) {
4551 sync_per_sec = delta_sync/delta_sec;
4552 if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
4553 schedule_work(&ifp->blk_tsfl_work);
4554 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
4555 "sync recvied %d pkt/sec \n",
4556 ifidx, sync_per_sec));
4557 }
4558 dhd_reset_tcpsync_info_by_ifp(ifp);
4559 }
4560
4561 }
4562#endif /* DHDTCPSYNC_FLOOD_BLK */
4563
d2839953
RC
4564#ifdef DHDTCPACK_SUPPRESS
4565 dhd_tcpdata_info_get(dhdp, pktbuf);
4566#endif // endif
4567 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4568
4569 ASSERT(ifp);
4570 skb->dev = ifp->net;
4571#ifdef DHD_WET
4572 /* wet related packet proto manipulation should be done in DHD
4573 * since dongle doesn't have complete payload
4574 */
4575 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
4576 pktbuf) < 0)) {
4577 DHD_INFO(("%s:%s: wet recv proc failed\n",
4578 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
4579 }
4580#endif /* DHD_WET */
4581
4582#ifdef DHD_PSTA
4583 if (PSR_ENABLED(dhdp) &&
4584 (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4585 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4586 dhd_ifname(dhdp, ifidx)));
4587 }
4588#endif /* DHD_PSTA */
4589
4590#ifdef PCIE_FULL_DONGLE
4591 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4592 (!ifp->ap_isolate)) {
4593 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4594 if (ETHER_ISUCAST(eh->ether_dhost)) {
4595 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4596 dhd_sendpkt(dhdp, ifidx, pktbuf);
4597 continue;
4598 }
4599 } else {
4600 void *npktbuf = NULL;
4601 if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
4602 (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) {
4603 dhd_sendpkt(dhdp, ifidx, npktbuf);
4604 }
4605 }
4606 }
4607#endif /* PCIE_FULL_DONGLE */
965f77c4
RC
4608#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
4609 if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
4610 (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
4611 (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
4612 DHD_ERROR(("%s: Reassoc is in progress. "
4613 "Drop EAPOL M1 frame\n", __FUNCTION__));
4614 PKTFREE(dhdp->osh, pktbuf, FALSE);
4615 continue;
4616 }
4617#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
d2839953
RC
4618 /* Get the protocol, maintain skb around eth_type_trans()
4619 * The main reason for this hack is for the limitation of
4620 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4621 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4622 * coping of the packet coming from the network stack to add
4623 * BDC, Hardware header etc, during network interface registration
4624 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4625 * for BDC, Hardware header etc. and not just the ETH_HLEN
4626 */
4627 eth = skb->data;
4628 len = skb->len;
d2839953 4629 dump_data = skb->data;
d2839953 4630 protocol = (skb->data[12] << 8) | skb->data[13];
965f77c4 4631
d2839953
RC
4632 if (protocol == ETHER_TYPE_802_1X) {
4633 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
4634#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
4635 wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
4636#endif /* WL_CFG80211 && WL_WPS_SYNC */
965f77c4
RC
4637#ifdef DHD_4WAYM4_FAIL_DISCONNECT
4638 if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
4639 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
4640 }
4641#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
d2839953 4642 }
965f77c4 4643 dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
d2839953
RC
4644
4645 skb->protocol = eth_type_trans(skb, skb->dev);
4646
4647 if (skb->pkt_type == PACKET_MULTICAST) {
4648 dhd->pub.rx_multicast++;
4649 ifp->stats.multicast++;
4650 }
4651
4652 skb->data = eth;
4653 skb->len = len;
4654
4655 DHD_DBG_PKT_MON_RX(dhdp, skb);
4656 /* Strip header, count, deliver upward */
4657 skb_pull(skb, ETH_HLEN);
4658
4659 /* Process special event packets and then discard them */
4660 memset(&event, 0, sizeof(event));
4661
4662 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4663 bcm_event_msg_u_t evu;
965f77c4
RC
4664 int ret_event, event_type;
4665 void *pkt_data = skb_mac_header(skb);
d2839953 4666
965f77c4 4667 ret_event = wl_host_event_get_data(pkt_data, len, &evu);
d2839953
RC
4668
4669 if (ret_event != BCME_OK) {
4670 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
4671 __FUNCTION__, ret_event));
4672#ifdef DHD_USE_STATIC_CTRLBUF
4673 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4674#else
4675 PKTFREE(dhdp->osh, pktbuf, FALSE);
4676#endif // endif
4677 continue;
4678 }
4679
4680 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
4681 event_type = ntoh32_ua((void *)&event.event_type);
4682#ifdef SHOW_LOGTRACE
4683 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
4684 * context in case of PCIe FD, in case of other bus this will be from
4685 * DPC context. If we get bunch of events from Dongle then printing all
4686 * of them from Tasklet/DPC context that too in data path is costly.
4687 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
4688 * events with type WLC_E_TRACE.
4689 * We'll print this console logs from the WorkQueue context by enqueing SKB
4690 * here and Dequeuing will be done in WorkQueue and will be freed only if
4691 * logtrace_pkt_sendup is true
4692 */
4693 if (event_type == WLC_E_TRACE) {
3910ce8e 4694 DHD_EVENT(("%s: WLC_E_TRACE\n", __FUNCTION__));
d2839953
RC
4695 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
4696 continue;
4697 }
4698#endif /* SHOW_LOGTRACE */
4699
965f77c4 4700 ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
d2839953
RC
4701
4702 wl_event_to_host_order(&event);
4703 if (!tout_ctrl)
4704 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4705
4706#if defined(PNO_SUPPORT)
4707 if (event_type == WLC_E_PFN_NET_FOUND) {
4708 /* enforce custom wake lock to garantee that Kernel not suspended */
4709 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4710 }
4711#endif /* PNO_SUPPORT */
4712 if (numpkt != 1) {
4713 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
4714 __FUNCTION__));
4715 }
4716
4717#ifdef DHD_WAKE_STATUS
4718 if (unlikely(pkt_wake)) {
4719#ifdef DHD_WAKE_EVENT_STATUS
4720 if (event.event_type < WLC_E_LAST) {
4721 wcp->rc_event[event.event_type]++;
4722 wcp->rcwake++;
4723 pkt_wake = 0;
4724 }
4725#endif /* DHD_WAKE_EVENT_STATUS */
4726 }
4727#endif /* DHD_WAKE_STATUS */
4728
4729 /* For delete virtual interface event, wl_host_event returns positive
4730 * i/f index, do not proceed. just free the pkt.
4731 */
4732 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
4733 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
4734 __FUNCTION__));
4735#ifdef DHD_USE_STATIC_CTRLBUF
4736 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4737#else
4738 PKTFREE(dhdp->osh, pktbuf, FALSE);
4739#endif // endif
4740 continue;
4741 }
4742
4743 /*
4744 * For the event packets, there is a possibility
4745 * of ifidx getting modifed.Thus update the ifp
4746 * once again.
4747 */
4748 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4749 ifp = dhd->iflist[ifidx];
4750#ifndef PROP_TXSTATUS_VSDB
4751 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
4752#else
4753 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
4754 dhd->pub.up))
4755#endif /* PROP_TXSTATUS_VSDB */
4756 {
4757 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
4758 __FUNCTION__));
4759#ifdef DHD_USE_STATIC_CTRLBUF
4760 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4761#else
4762 PKTFREE(dhdp->osh, pktbuf, FALSE);
4763#endif // endif
4764 continue;
4765 }
4766
3910ce8e
LJ
4767#ifdef SENDPROB
4768 if (dhdp->wl_event_enabled ||
4769 (dhdp->recv_probereq && (event.event_type == WLC_E_PROBREQ_MSG)))
4770#else
4771 if (dhdp->wl_event_enabled)
4772#endif
4773 {
d2839953
RC
4774#ifdef DHD_USE_STATIC_CTRLBUF
4775 /* If event bufs are allocated via static buf pool
4776 * and wl events are enabled, make a copy, free the
4777 * local one and send the copy up.
4778 */
4779 void *npkt = PKTDUP(dhdp->osh, skb);
4780 /* Clone event and send it up */
4781 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4782 if (npkt) {
4783 skb = npkt;
4784 } else {
4785 DHD_ERROR(("skb clone failed. dropping event.\n"));
4786 continue;
4787 }
4788#endif /* DHD_USE_STATIC_CTRLBUF */
4789 } else {
4790 /* If event enabled not explictly set, drop events */
4791#ifdef DHD_USE_STATIC_CTRLBUF
4792 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4793#else
4794 PKTFREE(dhdp->osh, pktbuf, FALSE);
4795#endif /* DHD_USE_STATIC_CTRLBUF */
4796 continue;
4797 }
4798 } else {
4799 tout_rx = DHD_PACKET_TIMEOUT_MS;
4800
4801#ifdef PROP_TXSTATUS
4802 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4803#endif /* PROP_TXSTATUS */
4804
4805#ifdef DHD_WAKE_STATUS
4806 if (unlikely(pkt_wake)) {
4807 wcp->rxwake++;
4808#ifdef DHD_WAKE_RX_STATUS
4809#define ETHER_ICMP6_HEADER 20
4810#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
4811#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
4812#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
4813
4814 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
4815 wcp->rx_arp++;
4816 if (dump_data[0] == 0xFF) { /* Broadcast */
4817 wcp->rx_bcast++;
4818 } else if (dump_data[0] & 0x01) { /* Multicast */
4819 wcp->rx_mcast++;
4820 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
4821 wcp->rx_multi_ipv6++;
4822 if ((skb->len > ETHER_ICMP6_HEADER) &&
4823 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
4824 wcp->rx_icmpv6++;
4825 if (skb->len > ETHER_ICMPV6_TYPE) {
4826 switch (dump_data[ETHER_ICMPV6_TYPE]) {
4827 case NDISC_ROUTER_ADVERTISEMENT:
4828 wcp->rx_icmpv6_ra++;
4829 break;
4830 case NDISC_NEIGHBOUR_ADVERTISEMENT:
4831 wcp->rx_icmpv6_na++;
4832 break;
4833 case NDISC_NEIGHBOUR_SOLICITATION:
4834 wcp->rx_icmpv6_ns++;
4835 break;
4836 }
4837 }
4838 }
4839 } else if (dump_data[2] == 0x5E) {
4840 wcp->rx_multi_ipv4++;
4841 } else {
4842 wcp->rx_multi_other++;
4843 }
4844 } else { /* Unicast */
4845 wcp->rx_ucast++;
4846 }
4847#undef ETHER_ICMP6_HEADER
4848#undef ETHER_IPV6_SADDR
4849#undef ETHER_IPV6_DAADR
4850#undef ETHER_ICMPV6_TYPE
4851#endif /* DHD_WAKE_RX_STATUS */
4852 pkt_wake = 0;
4853 }
4854#endif /* DHD_WAKE_STATUS */
4855 }
4856
4857#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
4858 ifp->net->last_rx = jiffies;
4859#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
4860
4861 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4862 dhdp->dstats.rx_bytes += skb->len;
4863 dhdp->rx_packets++; /* Local count */
4864 ifp->stats.rx_bytes += skb->len;
4865 ifp->stats.rx_packets++;
4866 }
4867
4868 if (in_interrupt()) {
4869 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4870 __FUNCTION__, __LINE__);
4871 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4872#if defined(DHD_LB_RXP)
4873 netif_receive_skb(skb);
4874#else /* !defined(DHD_LB_RXP) */
4875 netif_rx(skb);
4876#endif /* !defined(DHD_LB_RXP) */
4877 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4878 } else {
4879 if (dhd->rxthread_enabled) {
4880 if (!skbhead)
4881 skbhead = skb;
4882 else
4883 PKTSETNEXT(dhdp->osh, skbprev, skb);
4884 skbprev = skb;
4885 } else {
4886
4887 /* If the receive is not processed inside an ISR,
4888 * the softirqd must be woken explicitly to service
4889 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4890 * by netif_rx_ni(), but in earlier kernels, we need
4891 * to do it manually.
4892 */
4893 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4894 __FUNCTION__, __LINE__);
4895
4896#if defined(ARGOS_NOTIFY_CB)
4897 argos_register_notifier_deinit();
4898#endif // endif
4899#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
4900 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
4901#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
d2839953 4902 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
965f77c4 4903#if defined(DHD_LB_RXP)
d2839953 4904 netif_receive_skb(skb);
d2839953 4905#else /* !defined(DHD_LB_RXP) */
d2839953 4906 netif_rx_ni(skb);
965f77c4 4907#endif /* defined(DHD_LB_RXP) */
d2839953 4908 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
d2839953
RC
4909 }
4910 }
4911 }
4912
4913 if (dhd->rxthread_enabled && skbhead)
4914 dhd_sched_rxf(dhdp, skbhead);
4915
4916 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4917 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4918}
4919
4920void
4921dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4922{
4923 /* Linux version has nothing to do */
4924 return;
4925}
4926
4927void
4928dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4929{
4930 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4931 struct ether_header *eh;
4932 uint16 type;
4933
4934 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4935
4936 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4937 type = ntoh16(eh->ether_type);
4938
4939 if (type == ETHER_TYPE_802_1X) {
4940 atomic_dec(&dhd->pend_8021x_cnt);
4941 }
4942
4943#ifdef PROP_TXSTATUS
4944 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4945 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4946 uint datalen = PKTLEN(dhd->pub.osh, txp);
4947 if (ifp != NULL) {
4948 if (success) {
4949 dhd->pub.tx_packets++;
4950 ifp->stats.tx_packets++;
4951 ifp->stats.tx_bytes += datalen;
4952 } else {
4953 ifp->stats.tx_dropped++;
4954 }
4955 }
4956 }
4957#endif // endif
4958}
4959
4960static struct net_device_stats *
4961dhd_get_stats(struct net_device *net)
4962{
4963 dhd_info_t *dhd = DHD_DEV_INFO(net);
4964 dhd_if_t *ifp;
4965
4966 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4967
4968 if (!dhd) {
4969 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
4970 goto error;
4971 }
4972
4973 ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
4974 if (!ifp) {
4975 /* return empty stats */
4976 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4977 goto error;
4978 }
4979
4980 if (dhd->pub.up) {
4981 /* Use the protocol to get dongle stats */
4982 dhd_prot_dstats(&dhd->pub);
4983 }
4984 return &ifp->stats;
4985
4986error:
4987 memset(&net->stats, 0, sizeof(net->stats));
4988 return &net->stats;
4989}
4990
4991#ifndef BCMDBUS
4992static int
4993dhd_watchdog_thread(void *data)
4994{
4995 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4996 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4997 /* This thread doesn't need any user-level access,
4998 * so get rid of all our resources
4999 */
5000 if (dhd_watchdog_prio > 0) {
5001 struct sched_param param;
5002 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
5003 dhd_watchdog_prio:(MAX_RT_PRIO-1);
5004 setScheduler(current, SCHED_FIFO, &param);
5005 }
5006
5007 while (1) {
5008 if (down_interruptible (&tsk->sema) == 0) {
5009 unsigned long flags;
5010 unsigned long jiffies_at_start = jiffies;
5011 unsigned long time_lapse;
5012#ifdef BCMPCIE
5013 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5014#endif /* BCMPCIE */
5015
5016 SMP_RD_BARRIER_DEPENDS();
5017 if (tsk->terminated) {
5018#ifdef BCMPCIE
5019 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5020#endif /* BCMPCIE */
5021 break;
5022 }
5023
5024 if (dhd->pub.dongle_reset == FALSE) {
5025 DHD_TIMER(("%s:\n", __FUNCTION__));
5026 dhd_bus_watchdog(&dhd->pub);
5027
5028 DHD_GENERAL_LOCK(&dhd->pub, flags);
5029 /* Count the tick for reference */
5030 dhd->pub.tickcnt++;
5031#ifdef DHD_L2_FILTER
5032 dhd_l2_filter_watchdog(&dhd->pub);
5033#endif /* DHD_L2_FILTER */
5034 time_lapse = jiffies - jiffies_at_start;
5035
5036 /* Reschedule the watchdog */
5037 if (dhd->wd_timer_valid) {
5038 mod_timer(&dhd->timer,
5039 jiffies +
5040 msecs_to_jiffies(dhd_watchdog_ms) -
5041 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
5042 }
5043 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5044 }
5045#ifdef BCMPCIE
5046 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5047#endif /* BCMPCIE */
5048 } else {
5049 break;
5050 }
5051 }
5052
5053 complete_and_exit(&tsk->completed, 0);
5054}
5055
965f77c4 5056static void dhd_watchdog(ulong data)
d2839953 5057{
d2839953 5058 dhd_info_t *dhd = (dhd_info_t *)data;
d2839953
RC
5059 unsigned long flags;
5060
5061 if (dhd->pub.dongle_reset) {
5062 return;
5063 }
5064
5065 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
5066 up(&dhd->thr_wdt_ctl.sema);
5067 return;
5068 }
5069
5070#ifdef BCMPCIE
5071 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5072#endif /* BCMPCIE */
5073 /* Call the bus module watchdog */
5074 dhd_bus_watchdog(&dhd->pub);
5075
5076 DHD_GENERAL_LOCK(&dhd->pub, flags);
5077 /* Count the tick for reference */
5078 dhd->pub.tickcnt++;
5079
5080#ifdef DHD_L2_FILTER
5081 dhd_l2_filter_watchdog(&dhd->pub);
5082#endif /* DHD_L2_FILTER */
5083 /* Reschedule the watchdog */
5084 if (dhd->wd_timer_valid)
5085 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
5086 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5087#ifdef BCMPCIE
5088 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5089#endif /* BCMPCIE */
5090}
5091
5092#ifdef ENABLE_ADAPTIVE_SCHED
5093static void
5094dhd_sched_policy(int prio)
5095{
5096 struct sched_param param;
5097 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
5098 param.sched_priority = 0;
5099 setScheduler(current, SCHED_NORMAL, &param);
5100 } else {
5101 if (get_scheduler_policy(current) != SCHED_FIFO) {
5102 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
5103 setScheduler(current, SCHED_FIFO, &param);
5104 }
5105 }
5106}
5107#endif /* ENABLE_ADAPTIVE_SCHED */
5108#ifdef DEBUG_CPU_FREQ
5109static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
5110{
5111 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
5112 struct cpufreq_freqs *freq = data;
5113 if (dhd) {
5114 if (!dhd->new_freq)
5115 goto exit;
5116 if (val == CPUFREQ_POSTCHANGE) {
5117 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
5118 freq->new, freq->cpu));
5119 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
5120 }
5121 }
5122exit:
5123 return 0;
5124}
5125#endif /* DEBUG_CPU_FREQ */
5126
5127static int
5128dhd_dpc_thread(void *data)
5129{
5130 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5131 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5132
5133 /* This thread doesn't need any user-level access,
5134 * so get rid of all our resources
5135 */
5136 if (dhd_dpc_prio > 0)
5137 {
5138 struct sched_param param;
5139 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
5140 setScheduler(current, SCHED_FIFO, &param);
5141 }
5142
5143#ifdef CUSTOM_DPC_CPUCORE
5144 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
5145#endif // endif
5146#ifdef CUSTOM_SET_CPUCORE
5147 dhd->pub.current_dpc = current;
5148#endif /* CUSTOM_SET_CPUCORE */
5149 /* Run until signal received */
5150 while (1) {
5151 if (dhd->pub.conf->dpc_cpucore >= 0) {
5152 printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
5153 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
5154 dhd->pub.conf->dpc_cpucore = -1;
5155 }
5156 if (!binary_sema_down(tsk)) {
5157#ifdef ENABLE_ADAPTIVE_SCHED
5158 dhd_sched_policy(dhd_dpc_prio);
5159#endif /* ENABLE_ADAPTIVE_SCHED */
5160 SMP_RD_BARRIER_DEPENDS();
5161 if (tsk->terminated) {
5162 break;
5163 }
5164
5165 /* Call bus dpc unless it indicated down (then clean stop) */
5166 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5167#ifdef DEBUG_DPC_THREAD_WATCHDOG
5168 int resched_cnt = 0;
5169#endif /* DEBUG_DPC_THREAD_WATCHDOG */
5170 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
5171 while (dhd_bus_dpc(dhd->pub.bus)) {
5172 /* process all data */
5173#ifdef DEBUG_DPC_THREAD_WATCHDOG
5174 resched_cnt++;
5175 if (resched_cnt > MAX_RESCHED_CNT) {
5176 DHD_INFO(("%s Calling msleep to"
5177 "let other processes run. \n",
5178 __FUNCTION__));
5179 dhd->pub.dhd_bug_on = true;
5180 resched_cnt = 0;
5181 OSL_SLEEP(1);
5182 }
5183#endif /* DEBUG_DPC_THREAD_WATCHDOG */
5184 }
5185 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
5186 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5187 } else {
5188 if (dhd->pub.up)
5189 dhd_bus_stop(dhd->pub.bus, TRUE);
5190 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5191 }
5192 } else {
5193 break;
5194 }
5195 }
5196 complete_and_exit(&tsk->completed, 0);
5197}
5198
5199static int
5200dhd_rxf_thread(void *data)
5201{
5202 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5203 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5204#if defined(WAIT_DEQUEUE)
5205#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
5206 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
5207#endif // endif
5208 dhd_pub_t *pub = &dhd->pub;
5209
5210 /* This thread doesn't need any user-level access,
5211 * so get rid of all our resources
5212 */
5213 if (dhd_rxf_prio > 0)
5214 {
5215 struct sched_param param;
5216 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5217 setScheduler(current, SCHED_FIFO, &param);
5218 }
5219
5220#ifdef CUSTOM_SET_CPUCORE
5221 dhd->pub.current_rxf = current;
5222#endif /* CUSTOM_SET_CPUCORE */
5223 /* Run until signal received */
5224 while (1) {
5225 if (dhd->pub.conf->rxf_cpucore >= 0) {
5226 printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
5227 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
5228 dhd->pub.conf->rxf_cpucore = -1;
5229 }
5230 if (down_interruptible(&tsk->sema) == 0) {
5231 void *skb;
d2839953
RC
5232#ifdef ENABLE_ADAPTIVE_SCHED
5233 dhd_sched_policy(dhd_rxf_prio);
5234#endif /* ENABLE_ADAPTIVE_SCHED */
5235
5236 SMP_RD_BARRIER_DEPENDS();
5237
5238 if (tsk->terminated) {
5239 break;
5240 }
5241 skb = dhd_rxf_dequeue(pub);
5242
5243 if (skb == NULL) {
5244 continue;
5245 }
5246 while (skb) {
5247 void *skbnext = PKTNEXT(pub->osh, skb);
5248 PKTSETNEXT(pub->osh, skb, NULL);
5249 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5250 __FUNCTION__, __LINE__);
d2839953 5251 netif_rx_ni(skb);
d2839953
RC
5252 skb = skbnext;
5253 }
5254#if defined(WAIT_DEQUEUE)
5255 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5256 OSL_SLEEP(1);
5257 watchdogTime = OSL_SYSUPTIME();
5258 }
5259#endif // endif
5260
5261 DHD_OS_WAKE_UNLOCK(pub);
5262 } else {
5263 break;
5264 }
5265 }
5266 complete_and_exit(&tsk->completed, 0);
5267}
5268
5269#ifdef BCMPCIE
5270void dhd_dpc_enable(dhd_pub_t *dhdp)
5271{
5272#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
5273 dhd_info_t *dhd;
5274
5275 if (!dhdp || !dhdp->info)
5276 return;
5277 dhd = dhdp->info;
5278#endif /* DHD_LB_RXP || DHD_LB_TXP */
5279
5280#ifdef DHD_LB_RXP
5281 __skb_queue_head_init(&dhd->rx_pend_queue);
5282#endif /* DHD_LB_RXP */
5283
5284#ifdef DHD_LB_TXP
5285 skb_queue_head_init(&dhd->tx_pend_queue);
5286#endif /* DHD_LB_TXP */
5287}
5288#endif /* BCMPCIE */
5289
5290#ifdef BCMPCIE
5291void
5292dhd_dpc_kill(dhd_pub_t *dhdp)
5293{
5294 dhd_info_t *dhd;
5295
5296 if (!dhdp) {
5297 return;
5298 }
5299
5300 dhd = dhdp->info;
5301
5302 if (!dhd) {
5303 return;
5304 }
5305
5306 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5307 tasklet_kill(&dhd->tasklet);
5308 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5309 }
5310
5311#ifdef DHD_LB
5312#ifdef DHD_LB_RXP
5313 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
5314 __skb_queue_purge(&dhd->rx_pend_queue);
5315#endif /* DHD_LB_RXP */
5316#ifdef DHD_LB_TXP
5317 cancel_work_sync(&dhd->tx_dispatcher_work);
5318 skb_queue_purge(&dhd->tx_pend_queue);
5319#endif /* DHD_LB_TXP */
5320
5321 /* Kill the Load Balancing Tasklets */
5322#if defined(DHD_LB_TXC)
5323 tasklet_kill(&dhd->tx_compl_tasklet);
5324#endif /* DHD_LB_TXC */
5325#if defined(DHD_LB_RXC)
5326 tasklet_kill(&dhd->rx_compl_tasklet);
5327#endif /* DHD_LB_RXC */
5328#if defined(DHD_LB_TXP)
5329 tasklet_kill(&dhd->tx_tasklet);
5330#endif /* DHD_LB_TXP */
5331#endif /* DHD_LB */
5332}
5333
5334void
5335dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
5336{
5337 dhd_info_t *dhd;
5338
5339 if (!dhdp) {
5340 return;
5341 }
5342
5343 dhd = dhdp->info;
5344
5345 if (!dhd) {
5346 return;
5347 }
5348
5349 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5350 tasklet_kill(&dhd->tasklet);
5351 }
5352}
5353#endif /* BCMPCIE */
5354
5355static void
5356dhd_dpc(ulong data)
5357{
5358 dhd_info_t *dhd;
5359
5360 dhd = (dhd_info_t *)data;
5361
5362 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5363 * down below , wake lock is set,
5364 * the tasklet is initialized in dhd_attach()
5365 */
5366 /* Call bus dpc unless it indicated down (then clean stop) */
5367 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5368#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
5369 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5370#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
5371 if (dhd_bus_dpc(dhd->pub.bus)) {
5372 tasklet_schedule(&dhd->tasklet);
5373 }
5374 } else {
5375 dhd_bus_stop(dhd->pub.bus, TRUE);
5376 }
5377}
5378
5379void
5380dhd_sched_dpc(dhd_pub_t *dhdp)
5381{
5382 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5383
5384 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5385 DHD_OS_WAKE_LOCK(dhdp);
5386 /* If the semaphore does not get up,
5387 * wake unlock should be done here
5388 */
5389 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5390 DHD_OS_WAKE_UNLOCK(dhdp);
5391 }
5392 return;
5393 } else {
965f77c4 5394 dhd_bus_set_dpc_sched_time(dhdp);
d2839953
RC
5395 tasklet_schedule(&dhd->tasklet);
5396 }
5397}
5398#endif /* BCMDBUS */
5399
5400static void
5401dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5402{
5403 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5404
5405 DHD_OS_WAKE_LOCK(dhdp);
5406
5407 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5408 do {
5409 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5410 break;
5411 } while (1);
5412 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5413 up(&dhd->thr_rxf_ctl.sema);
5414 }
5415 return;
5416}
5417
5418#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5419#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5420
5421#ifdef TOE
5422/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5423static int
5424dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5425{
5426 char buf[32];
5427 int ret;
5428
5429 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
5430
5431 if (ret < 0) {
5432 if (ret == -EIO) {
5433 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
5434 ifidx)));
5435 return -EOPNOTSUPP;
5436 }
5437
5438 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5439 return ret;
5440 }
5441
5442 memcpy(toe_ol, buf, sizeof(uint32));
5443 return 0;
5444}
5445
5446/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5447static int
5448dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5449{
5450 int toe, ret;
5451
5452 /* Set toe_ol as requested */
5453 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
5454 if (ret < 0) {
5455 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5456 dhd_ifname(&dhd->pub, ifidx), ret));
5457 return ret;
5458 }
5459
5460 /* Enable toe globally only if any components are enabled. */
5461 toe = (toe_ol != 0);
5462 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
5463 if (ret < 0) {
5464 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5465 return ret;
5466 }
5467
5468 return 0;
5469}
5470#endif /* TOE */
5471
5472#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5473void dhd_set_scb_probe(dhd_pub_t *dhd)
5474{
5475 wl_scb_probe_t scb_probe;
5476 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5477 int ret;
5478
5479 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5480 return;
5481 }
5482
5483 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
5484 if (ret < 0) {
5485 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5486 }
5487
5488 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5489
5490 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5491
5492 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
5493 TRUE);
5494 if (ret < 0) {
5495 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5496 return;
5497 }
5498}
5499#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5500
d2839953
RC
5501static void
5502dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5503{
5504 dhd_info_t *dhd = DHD_DEV_INFO(net);
5505
5506 snprintf(info->driver, sizeof(info->driver), "wl");
5507 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5508}
5509
5510struct ethtool_ops dhd_ethtool_ops = {
5511 .get_drvinfo = dhd_ethtool_get_drvinfo
5512};
d2839953 5513
d2839953
RC
5514static int
5515dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5516{
5517 struct ethtool_drvinfo info;
5518 char drvname[sizeof(info.driver)];
5519 uint32 cmd;
5520#ifdef TOE
5521 struct ethtool_value edata;
5522 uint32 toe_cmpnt, csum_dir;
5523 int ret;
5524#endif // endif
5525
5526 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5527
5528 /* all ethtool calls start with a cmd word */
5529 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5530 return -EFAULT;
5531
5532 switch (cmd) {
5533 case ETHTOOL_GDRVINFO:
5534 /* Copy out any request driver name */
5535 if (copy_from_user(&info, uaddr, sizeof(info)))
5536 return -EFAULT;
3910ce8e
LJ
5537 strncpy(drvname, info.driver, sizeof(drvname) - 1);
5538 drvname[sizeof(drvname) - 1] = '\0';
d2839953
RC
5539
5540 /* clear struct for return */
5541 memset(&info, 0, sizeof(info));
5542 info.cmd = cmd;
5543
5544 /* if dhd requested, identify ourselves */
5545 if (strcmp(drvname, "?dhd") == 0) {
5546 snprintf(info.driver, sizeof(info.driver), "dhd");
5547 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5548 info.version[sizeof(info.version) - 1] = '\0';
5549 }
5550
5551 /* otherwise, require dongle to be up */
5552 else if (!dhd->pub.up) {
5553 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5554 return -ENODEV;
5555 }
5556
5557 /* finally, report dongle driver type */
5558 else if (dhd->pub.iswl)
5559 snprintf(info.driver, sizeof(info.driver), "wl");
5560 else
5561 snprintf(info.driver, sizeof(info.driver), "xx");
5562
5563 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5564 if (copy_to_user(uaddr, &info, sizeof(info)))
5565 return -EFAULT;
5566 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5567 (int)sizeof(drvname), drvname, info.driver));
5568 break;
5569
5570#ifdef TOE
5571 /* Get toe offload components from dongle */
5572 case ETHTOOL_GRXCSUM:
5573 case ETHTOOL_GTXCSUM:
5574 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5575 return ret;
5576
5577 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5578
5579 edata.cmd = cmd;
5580 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5581
5582 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5583 return -EFAULT;
5584 break;
5585
5586 /* Set toe offload components in dongle */
5587 case ETHTOOL_SRXCSUM:
5588 case ETHTOOL_STXCSUM:
5589 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5590 return -EFAULT;
5591
5592 /* Read the current settings, update and write back */
5593 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5594 return ret;
5595
5596 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5597
5598 if (edata.data != 0)
5599 toe_cmpnt |= csum_dir;
5600 else
5601 toe_cmpnt &= ~csum_dir;
5602
5603 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5604 return ret;
5605
5606 /* If setting TX checksum mode, tell Linux the new mode */
5607 if (cmd == ETHTOOL_STXCSUM) {
5608 if (edata.data)
5609 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5610 else
5611 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5612 }
5613
5614 break;
5615#endif /* TOE */
5616
5617 default:
5618 return -EOPNOTSUPP;
5619 }
5620
5621 return 0;
5622}
d2839953
RC
5623
5624static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5625{
5626 if (!dhdp) {
5627 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5628 return FALSE;
5629 }
5630
5631 if (!dhdp->up)
5632 return FALSE;
5633
5634#if !defined(BCMPCIE) && !defined(BCMDBUS)
5635 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
5636 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5637 return FALSE;
5638 }
5639#endif /* !BCMPCIE && !BCMDBUS */
5640
5641 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5642 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5643#ifdef BCMPCIE
5644 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
5645 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5646 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5647#else
5648 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5649 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5650#endif /* BCMPCIE */
5651 if (dhdp->hang_reason == 0) {
5652 if (dhdp->dongle_trap_occured) {
5653 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5654#ifdef BCMPCIE
5655 } else if (dhdp->d3ackcnt_timeout) {
965f77c4
RC
5656 dhdp->hang_reason = dhdp->is_sched_error ?
5657 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
5658 HANG_REASON_D3_ACK_TIMEOUT;
d2839953
RC
5659#endif /* BCMPCIE */
5660 } else {
965f77c4
RC
5661 dhdp->hang_reason = dhdp->is_sched_error ?
5662 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
5663 HANG_REASON_IOCTL_RESP_TIMEOUT;
d2839953
RC
5664 }
5665 }
965f77c4 5666 printf("%s\n", info_string);
d2839953
RC
5667 net_os_send_hang_message(net);
5668 return TRUE;
5669 }
5670 return FALSE;
5671}
5672
5673#ifdef WL_MONITOR
5674bool
5675dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
5676{
5677 return (dhd->info->monitor_type != 0);
5678}
5679
5680void
5681dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
5682{
5683 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5684 {
5685 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
5686 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
5687 switch (amsdu_flag) {
5688 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
5689 default:
5690 if (!dhd->monitor_skb) {
5691 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
5692 == NULL)
5693 return;
5694 }
5695 if (dhd->monitor_type && dhd->monitor_dev)
5696 dhd->monitor_skb->dev = dhd->monitor_dev;
5697 else {
5698 PKTFREE(dhdp->osh, pkt, FALSE);
5699 dhd->monitor_skb = NULL;
5700 return;
5701 }
5702 dhd->monitor_skb->protocol =
5703 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
5704 dhd->monitor_len = 0;
5705 break;
5706
5707 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
5708 if (!dhd->monitor_skb) {
5709 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
5710 == NULL)
5711 return;
5712 dhd->monitor_len = 0;
5713 }
5714 if (dhd->monitor_type && dhd->monitor_dev)
5715 dhd->monitor_skb->dev = dhd->monitor_dev;
5716 else {
5717 PKTFREE(dhdp->osh, pkt, FALSE);
5718 dev_kfree_skb(dhd->monitor_skb);
5719 return;
5720 }
5721 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
5722 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5723 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
5724 PKTFREE(dhdp->osh, pkt, FALSE);
5725 return;
5726
5727 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
5728 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
5729 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5730 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
5731 PKTFREE(dhdp->osh, pkt, FALSE);
5732 return;
5733
5734 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
5735 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
5736 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5737 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
5738 PKTFREE(dhdp->osh, pkt, FALSE);
5739 skb_put(dhd->monitor_skb, dhd->monitor_len);
5740 dhd->monitor_skb->protocol =
5741 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
5742 dhd->monitor_len = 0;
5743 break;
5744 }
5745 }
5746
5747 if (in_interrupt()) {
5748 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5749 __FUNCTION__, __LINE__);
5750 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5751 netif_rx(dhd->monitor_skb);
5752 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5753 } else {
5754 /* If the receive is not processed inside an ISR,
5755 * the softirqd must be woken explicitly to service
5756 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5757 * by netif_rx_ni(), but in earlier kernels, we need
5758 * to do it manually.
5759 */
5760 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
5761 __FUNCTION__, __LINE__);
5762
d2839953
RC
5763 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5764 netif_rx_ni(dhd->monitor_skb);
5765 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
d2839953
RC
5766 }
5767
5768 dhd->monitor_skb = NULL;
5769}
5770
5771typedef struct dhd_mon_dev_priv {
5772 struct net_device_stats stats;
5773} dhd_mon_dev_priv_t;
5774
5775#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
5776#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
5777#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
5778
5779static int
5780dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
5781{
5782 PKTFREE(NULL, skb, FALSE);
5783 return 0;
5784}
5785
5786#if defined(BT_OVER_SDIO)
5787
5788void
5789dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
5790{
5791 dhdp->info->bus_user_count++;
5792}
5793
5794void
5795dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
5796{
5797 dhdp->info->bus_user_count--;
5798}
5799
5800/* Return values:
5801 * Success: Returns 0
5802 * Failure: Returns -1 or errono code
5803 */
5804int
5805dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
5806{
5807 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
5808 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5809 int ret = 0;
5810
5811 mutex_lock(&dhd->bus_user_lock);
5812 ++dhd->bus_user_count;
5813 if (dhd->bus_user_count < 0) {
5814 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
5815 ret = -1;
5816 goto exit;
5817 }
5818
5819 if (dhd->bus_user_count == 1) {
5820
5821 dhd->pub.hang_was_sent = 0;
5822
5823 /* First user, turn on WL_REG, start the bus */
5824 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
5825
5826 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
5827 /* Enable F1 */
5828 ret = dhd_bus_resume(dhdp, 0);
5829 if (ret) {
5830 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
5831 __FUNCTION__, ret));
5832 goto exit;
5833 }
5834 }
5835
5836 dhd_update_fw_nv_path(dhd);
5837 /* update firmware and nvram path to sdio bus */
5838 dhd_bus_update_fw_nv_path(dhd->pub.bus,
5839 dhd->fw_path, dhd->nv_path);
5840 /* download the firmware, Enable F2 */
5841 /* TODO: Should be done only in case of FW switch */
5842 ret = dhd_bus_devreset(dhdp, FALSE);
5843 dhd_bus_resume(dhdp, 1);
5844 if (!ret) {
5845 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
5846 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
5847 ret = -EFAULT;
5848 }
5849 } else {
5850 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
5851 }
5852 } else {
5853 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
5854 __FUNCTION__, dhd->bus_user_count));
5855 }
5856exit:
5857 mutex_unlock(&dhd->bus_user_lock);
5858 return ret;
5859}
5860EXPORT_SYMBOL(dhd_bus_get);
5861
5862/* Return values:
5863 * Success: Returns 0
5864 * Failure: Returns -1 or errono code
5865 */
5866int
5867dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
5868{
5869 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
5870 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5871 int ret = 0;
5872 BCM_REFERENCE(owner);
5873
5874 mutex_lock(&dhd->bus_user_lock);
5875 --dhd->bus_user_count;
5876 if (dhd->bus_user_count < 0) {
5877 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
5878 dhd->bus_user_count = 0;
5879 ret = -1;
5880 goto exit;
5881 }
5882
5883 if (dhd->bus_user_count == 0) {
5884 /* Last user, stop the bus and turn Off WL_REG */
5885 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
5886 __FUNCTION__));
5887#ifdef PROP_TXSTATUS
5888 if (dhd->pub.wlfc_enabled) {
5889 dhd_wlfc_deinit(&dhd->pub);
5890 }
5891#endif /* PROP_TXSTATUS */
5892#ifdef PNO_SUPPORT
5893 if (dhd->pub.pno_state) {
5894 dhd_pno_deinit(&dhd->pub);
5895 }
5896#endif /* PNO_SUPPORT */
5897#ifdef RTT_SUPPORT
5898 if (dhd->pub.rtt_state) {
5899 dhd_rtt_deinit(&dhd->pub);
5900 }
5901#endif /* RTT_SUPPORT */
5902 ret = dhd_bus_devreset(dhdp, TRUE);
5903 if (!ret) {
5904 dhd_bus_suspend(dhdp);
5905 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
5906 }
5907 } else {
5908 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
5909 __FUNCTION__, dhd->bus_user_count));
5910 }
5911exit:
5912 mutex_unlock(&dhd->bus_user_lock);
5913 return ret;
5914}
5915EXPORT_SYMBOL(dhd_bus_put);
5916
5917int
5918dhd_net_bus_get(struct net_device *dev)
5919{
5920 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5921 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
5922}
5923
5924int
5925dhd_net_bus_put(struct net_device *dev)
5926{
5927 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5928 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
5929}
5930
5931/*
5932 * Function to enable the Bus Clock
5933 * Returns BCME_OK on success and BCME_xxx on failure
5934 *
5935 * This function is not callable from non-sleepable context
5936 */
5937int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
5938{
5939 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
5940
5941 int ret;
5942
5943 dhd_os_sdlock(dhdp);
5944 /*
5945 * The second argument is TRUE, that means, we expect
5946 * the function to "wait" until the clocks are really
5947 * available
5948 */
5949 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
5950 dhd_os_sdunlock(dhdp);
5951
5952 return ret;
5953}
5954EXPORT_SYMBOL(dhd_bus_clk_enable);
5955
5956/*
5957 * Function to disable the Bus Clock
5958 * Returns BCME_OK on success and BCME_xxx on failure
5959 *
5960 * This function is not callable from non-sleepable context
5961 */
5962int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
5963{
5964 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
5965
5966 int ret;
5967
5968 dhd_os_sdlock(dhdp);
5969 /*
5970 * The second argument is TRUE, that means, we expect
5971 * the function to "wait" until the clocks are really
5972 * disabled
5973 */
5974 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
5975 dhd_os_sdunlock(dhdp);
5976
5977 return ret;
5978}
5979EXPORT_SYMBOL(dhd_bus_clk_disable);
5980
5981/*
5982 * Function to reset bt_use_count counter to zero.
5983 *
5984 * This function is not callable from non-sleepable context
5985 */
5986void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
5987{
5988 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
5989
5990 /* take the lock and reset bt use count */
5991 dhd_os_sdlock(dhdp);
5992 dhdsdio_reset_bt_use_count(dhdp->bus);
5993 dhd_os_sdunlock(dhdp);
5994}
5995EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
5996
5997void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
5998{
5999 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6000 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
6001
965f77c4 6002 dhdp->hang_was_sent = 0;
d2839953 6003
965f77c4 6004 dhd_os_send_hang_message(&dhd->pub);
d2839953
RC
6005}
6006EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
6007
6008#endif /* BT_OVER_SDIO */
6009
6010static int
6011dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6012{
6013 return 0;
6014}
6015
6016static struct net_device_stats*
6017dhd_monitor_get_stats(struct net_device *dev)
6018{
6019 return &DHD_MON_DEV_STATS(dev);
6020}
6021
6022static const struct net_device_ops netdev_monitor_ops =
6023{
6024 .ndo_start_xmit = dhd_monitor_start,
6025 .ndo_get_stats = dhd_monitor_get_stats,
6026 .ndo_do_ioctl = dhd_monitor_ioctl
6027};
6028
6029static void
965f77c4 6030dhd_add_monitor_if(dhd_info_t *dhd)
d2839953 6031{
d2839953
RC
6032 struct net_device *dev;
6033 char *devname;
6034 uint32 scan_suppress = FALSE;
6035 int ret = BCME_OK;
6036
965f77c4
RC
6037 if (!dhd) {
6038 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
d2839953
RC
6039 return;
6040 }
6041
965f77c4
RC
6042 if (dhd->monitor_dev) {
6043 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
d2839953
RC
6044 return;
6045 }
6046
6047 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
6048 if (!dev) {
6049 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
6050 return;
6051 }
6052
6053 devname = "radiotap";
6054
6055 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
6056
6057#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
6058#define ARPHRD_IEEE80211_PRISM 802
6059#endif // endif
6060
6061#ifndef ARPHRD_IEEE80211_RADIOTAP
6062#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
6063#endif /* ARPHRD_IEEE80211_RADIOTAP */
6064
6065 dev->type = ARPHRD_IEEE80211_RADIOTAP;
6066
d2839953 6067 dev->netdev_ops = &netdev_monitor_ops;
d2839953 6068
965f77c4 6069 if (register_netdevice(dev)) {
d2839953
RC
6070 DHD_ERROR(("%s, register_netdev failed for %s\n",
6071 __FUNCTION__, dev->name));
6072 free_netdev(dev);
965f77c4 6073 return;
d2839953
RC
6074 }
6075
6076 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6077 scan_suppress = TRUE;
6078 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
6079 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6080 sizeof(scan_suppress), NULL, 0, TRUE);
6081 if (ret < 0) {
6082 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
6083 }
6084 }
6085
6086 dhd->monitor_dev = dev;
6087}
6088
6089static void
965f77c4 6090dhd_del_monitor_if(dhd_info_t *dhd)
d2839953 6091{
965f77c4
RC
6092 int ret = BCME_OK;
6093 uint32 scan_suppress = FALSE;
d2839953
RC
6094
6095 if (!dhd) {
6096 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6097 return;
6098 }
6099
965f77c4
RC
6100 if (!dhd->monitor_dev) {
6101 DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
6102 return;
6103 }
d2839953 6104
965f77c4
RC
6105 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6106 scan_suppress = FALSE;
6107 /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
6108 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6109 sizeof(scan_suppress), NULL, 0, TRUE);
6110 if (ret < 0) {
6111 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
6112 }
6113 }
d2839953 6114
965f77c4
RC
6115 if (dhd->monitor_dev) {
6116 if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
6117 free_netdev(dhd->monitor_dev);
6118 } else {
6119 unregister_netdevice(dhd->monitor_dev);
6120 }
d2839953
RC
6121 dhd->monitor_dev = NULL;
6122 }
6123}
6124
6125static void
965f77c4 6126dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
d2839953 6127{
965f77c4 6128 dhd_info_t *dhd = pub->info;
d2839953
RC
6129
6130 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
d2839953 6131
965f77c4 6132 dhd_net_if_lock_local(dhd);
d2839953 6133 if (!val) {
965f77c4
RC
6134 /* Delete monitor */
6135 dhd_del_monitor_if(dhd);
6136 } else {
6137 /* Add monitor */
6138 dhd_add_monitor_if(dhd);
d2839953 6139 }
965f77c4
RC
6140 dhd->monitor_type = val;
6141 dhd_net_if_unlock_local(dhd);
d2839953
RC
6142}
6143#endif /* WL_MONITOR */
6144
6145#if defined(DHD_H2D_LOG_TIME_SYNC)
6146/*
6147 * Helper function:
6148 * Used for RTE console message time syncing with Host printk
6149 */
6150void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
6151{
6152 dhd_info_t *info = dhdp->info;
6153
6154 /* Ideally the "state" should be always TRUE */
6155 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
6156 DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
6157 dhd_deferred_work_rte_log_time_sync,
6158 DHD_WQ_WORK_PRIORITY_LOW);
6159}
6160
6161void
6162dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
6163{
6164 dhd_info_t *dhd_info = handle;
6165 dhd_pub_t *dhd;
6166
6167 if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
6168 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
6169 return;
6170 }
6171
6172 if (!dhd_info) {
6173 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6174 return;
6175 }
6176
6177 dhd = &dhd_info->pub;
6178
6179 /*
6180 * Function to send IOVAR for console timesyncing
6181 * between Host and Dongle.
6182 * If the IOVAR fails,
6183 * 1. dhd_rte_time_sync_ms is set to 0 and
6184 * 2. HOST Dongle console time sync will *not* happen.
6185 */
6186 dhd_h2d_log_time_sync(dhd);
6187}
6188#endif /* DHD_H2D_LOG_TIME_SYNC */
6189
6190int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
6191{
6192 int bcmerror = BCME_OK;
6193 int buflen = 0;
6194 struct net_device *net;
6195
6196 net = dhd_idx2net(pub, ifidx);
6197 if (!net) {
6198 bcmerror = BCME_BADARG;
6199 /*
6200 * The netdev pointer is bad means the DHD can't communicate
6201 * to higher layers, so just return from here
6202 */
6203 return bcmerror;
6204 }
6205
6206 /* check for local dhd ioctl and handle it */
6207 if (ioc->driver == DHD_IOCTL_MAGIC) {
6208 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
6209 if (data_buf)
6210 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
6211 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
6212 if (bcmerror)
6213 pub->bcmerror = bcmerror;
6214 goto done;
6215 }
6216
6217 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
6218 if (data_buf)
6219 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
6220
6221#ifndef BCMDBUS
6222 /* send to dongle (must be up, and wl). */
6223 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
6224 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
6225 int ret;
6226 if (atomic_read(&exit_in_progress)) {
6227 DHD_ERROR(("%s module exit in progress\n", __func__));
6228 bcmerror = BCME_DONGLE_DOWN;
6229 goto done;
6230 }
6231 ret = dhd_bus_start(pub);
6232 if (ret != 0) {
6233 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6234 bcmerror = BCME_DONGLE_DOWN;
6235 goto done;
6236 }
6237 } else {
6238 bcmerror = BCME_DONGLE_DOWN;
6239 goto done;
6240 }
6241 }
6242
6243 if (!pub->iswl) {
6244 bcmerror = BCME_DONGLE_DOWN;
6245 goto done;
6246 }
6247#endif /* !BCMDBUS */
6248
6249 /*
6250 * Flush the TX queue if required for proper message serialization:
6251 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
6252 * prevent M4 encryption and
6253 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
6254 * prevent disassoc frame being sent before WPS-DONE frame.
6255 */
6256 if (ioc->cmd == WLC_SET_KEY ||
6257 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6258 strncmp("wsec_key", data_buf, 9) == 0) ||
6259 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6260 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
6261 ioc->cmd == WLC_DISASSOC)
6262 dhd_wait_pend8021x(net);
6263
6264 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
6265 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
6266 bcmerror = BCME_UNSUPPORTED;
6267 goto done;
6268 }
965f77c4 6269
d2839953
RC
6270 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
6271
6272#ifdef WL_MONITOR
6273 /* Intercept monitor ioctl here, add/del monitor if */
6274 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
6275 int val = 0;
6276 if (data_buf != NULL && buflen != 0) {
6277 if (buflen >= 4) {
6278 val = *(int*)data_buf;
6279 } else if (buflen >= 2) {
6280 val = *(short*)data_buf;
6281 } else {
6282 val = *(char*)data_buf;
6283 }
6284 }
6285 dhd_set_monitor(pub, ifidx, val);
6286 }
6287#endif /* WL_MONITOR */
6288
6289done:
6290 dhd_check_hang(net, pub, bcmerror);
6291
6292 return bcmerror;
6293}
6294
6295/**
6296 * Called by the OS (optionally via a wrapper function).
6297 * @param net Linux per dongle instance
6298 * @param ifr Linux request structure
6299 * @param cmd e.g. SIOCETHTOOL
6300 */
6301static int
6302dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
6303{
6304 dhd_info_t *dhd = DHD_DEV_INFO(net);
6305 dhd_ioctl_t ioc;
6306 int bcmerror = 0;
6307 int ifidx;
6308 int ret;
6309 void *local_buf = NULL; /**< buffer in kernel space */
6310 void __user *ioc_buf_user = NULL; /**< buffer in user space */
6311 u16 buflen = 0;
6312
6313 if (atomic_read(&exit_in_progress)) {
6314 DHD_ERROR(("%s module exit in progress\n", __func__));
6315 bcmerror = BCME_DONGLE_DOWN;
6316 return OSL_ERROR(bcmerror);
6317 }
6318
6319 DHD_OS_WAKE_LOCK(&dhd->pub);
6320 DHD_PERIM_LOCK(&dhd->pub);
6321
6322 /* Interface up check for built-in type */
6323 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
6324 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
6325 DHD_PERIM_UNLOCK(&dhd->pub);
6326 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6327 return OSL_ERROR(BCME_NOTUP);
6328 }
6329
6330 ifidx = dhd_net2idx(dhd, net);
6331 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
6332
6333#if defined(WL_STATIC_IF)
6334 /* skip for static ndev when it is down */
6335 if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
6336 DHD_PERIM_UNLOCK(&dhd->pub);
6337 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6338 return -1;
6339 }
6340#endif /* WL_STATIC_iF */
6341
6342 if (ifidx == DHD_BAD_IF) {
6343 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
6344 DHD_PERIM_UNLOCK(&dhd->pub);
6345 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6346 return -1;
6347 }
6348
6349#if defined(WL_WIRELESS_EXT)
6350 /* linux wireless extensions */
6351 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
6352 /* may recurse, do NOT lock */
6353 ret = wl_iw_ioctl(net, ifr, cmd);
6354 DHD_PERIM_UNLOCK(&dhd->pub);
6355 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6356 return ret;
6357 }
6358#endif /* defined(WL_WIRELESS_EXT) */
6359
d2839953
RC
6360 if (cmd == SIOCETHTOOL) {
6361 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
6362 DHD_PERIM_UNLOCK(&dhd->pub);
6363 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6364 return ret;
6365 }
d2839953
RC
6366
6367 if (cmd == SIOCDEVPRIVATE+1) {
6368 ret = wl_android_priv_cmd(net, ifr);
6369 dhd_check_hang(net, &dhd->pub, ret);
6370 DHD_PERIM_UNLOCK(&dhd->pub);
6371 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6372 return ret;
6373 }
6374
6375 if (cmd != SIOCDEVPRIVATE) {
6376 DHD_PERIM_UNLOCK(&dhd->pub);
6377 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6378 return -EOPNOTSUPP;
6379 }
6380
6381 memset(&ioc, 0, sizeof(ioc));
6382
6383#ifdef CONFIG_COMPAT
6384#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
6385 if (in_compat_syscall())
6386#else
6387 if (is_compat_task())
6388#endif /* LINUX_VER >= 4.6 */
6389 {
6390 compat_wl_ioctl_t compat_ioc;
6391 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
6392 bcmerror = BCME_BADADDR;
6393 goto done;
6394 }
6395 ioc.cmd = compat_ioc.cmd;
6396 if (ioc.cmd & WLC_SPEC_FLAG) {
6397 memset(&ioc, 0, sizeof(ioc));
6398 /* Copy the ioc control structure part of ioctl request */
6399 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
6400 bcmerror = BCME_BADADDR;
6401 goto done;
6402 }
6403 ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
6404
6405 /* To differentiate between wl and dhd read 4 more byes */
6406 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
6407 sizeof(uint)) != 0)) {
6408 bcmerror = BCME_BADADDR;
6409 goto done;
6410 }
6411
6412 } else { /* ioc.cmd & WLC_SPEC_FLAG */
6413 ioc.buf = compat_ptr(compat_ioc.buf);
6414 ioc.len = compat_ioc.len;
6415 ioc.set = compat_ioc.set;
6416 ioc.used = compat_ioc.used;
6417 ioc.needed = compat_ioc.needed;
6418 /* To differentiate between wl and dhd read 4 more byes */
6419 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
6420 sizeof(uint)) != 0)) {
6421 bcmerror = BCME_BADADDR;
6422 goto done;
6423 }
6424 } /* ioc.cmd & WLC_SPEC_FLAG */
6425 } else
6426#endif /* CONFIG_COMPAT */
6427 {
6428 /* Copy the ioc control structure part of ioctl request */
6429 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
6430 bcmerror = BCME_BADADDR;
6431 goto done;
6432 }
6433#ifdef CONFIG_COMPAT
6434 ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
6435#endif
6436
6437 /* To differentiate between wl and dhd read 4 more byes */
6438 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
6439 sizeof(uint)) != 0)) {
6440 bcmerror = BCME_BADADDR;
6441 goto done;
6442 }
6443 }
6444
3910ce8e 6445#ifndef CONFIG_VTS_SUPPORT
d2839953
RC
6446 if (!capable(CAP_NET_ADMIN)) {
6447 bcmerror = BCME_EPERM;
6448 goto done;
6449 }
3910ce8e 6450#endif
d2839953
RC
6451
6452 /* Take backup of ioc.buf and restore later */
6453 ioc_buf_user = ioc.buf;
6454
6455 if (ioc.len > 0) {
6456 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
6457 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
6458 bcmerror = BCME_NOMEM;
6459 goto done;
6460 }
6461
6462 DHD_PERIM_UNLOCK(&dhd->pub);
6463 if (copy_from_user(local_buf, ioc.buf, buflen)) {
6464 DHD_PERIM_LOCK(&dhd->pub);
6465 bcmerror = BCME_BADADDR;
6466 goto done;
6467 }
6468 DHD_PERIM_LOCK(&dhd->pub);
6469
6470 *((char *)local_buf + buflen) = '\0';
6471
6472 /* For some platforms accessing userspace memory
6473 * of ioc.buf is causing kernel panic, so to avoid that
6474 * make ioc.buf pointing to kernel space memory local_buf
6475 */
6476 ioc.buf = local_buf;
6477 }
6478
6479 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
6480 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
6481 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
6482 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
6483 bcmerror = BCME_DONGLE_DOWN;
6484 goto done;
6485 }
6486
6487 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
6488
6489 /* Restore back userspace pointer to ioc.buf */
6490 ioc.buf = ioc_buf_user;
6491
6492 if (!bcmerror && buflen && local_buf && ioc.buf) {
6493 DHD_PERIM_UNLOCK(&dhd->pub);
6494 if (copy_to_user(ioc.buf, local_buf, buflen))
6495 bcmerror = -EFAULT;
6496 DHD_PERIM_LOCK(&dhd->pub);
6497 }
6498
6499done:
6500 if (local_buf)
6501 MFREE(dhd->pub.osh, local_buf, buflen+1);
6502
6503 DHD_PERIM_UNLOCK(&dhd->pub);
6504 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6505
6506 return OSL_ERROR(bcmerror);
6507}
6508
6509#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
6510/* Flags to indicate if we distingish power off policy when
6511 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
6512 */
6513int trigger_deep_sleep = 0;
6514#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
6515
6516#ifdef FIX_CPU_MIN_CLOCK
6517static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
6518{
6519 if (dhd) {
d2839953 6520 mutex_init(&dhd->cpufreq_fix);
d2839953
RC
6521 dhd->cpufreq_fix_status = FALSE;
6522 }
6523 return 0;
6524}
6525
6526static void dhd_fix_cpu_freq(dhd_info_t *dhd)
6527{
d2839953 6528 mutex_lock(&dhd->cpufreq_fix);
d2839953
RC
6529 if (dhd && !dhd->cpufreq_fix_status) {
6530 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
6531#ifdef FIX_BUS_MIN_CLOCK
6532 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
6533#endif /* FIX_BUS_MIN_CLOCK */
6534 DHD_ERROR(("pm_qos_add_requests called\n"));
6535
6536 dhd->cpufreq_fix_status = TRUE;
6537 }
d2839953 6538 mutex_unlock(&dhd->cpufreq_fix);
d2839953
RC
6539}
6540
6541static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
6542{
d2839953 6543 mutex_lock(&dhd ->cpufreq_fix);
d2839953 6544 if (dhd && dhd->cpufreq_fix_status != TRUE) {
d2839953 6545 mutex_unlock(&dhd->cpufreq_fix);
d2839953
RC
6546 return;
6547 }
6548
6549 pm_qos_remove_request(&dhd->dhd_cpu_qos);
6550#ifdef FIX_BUS_MIN_CLOCK
6551 pm_qos_remove_request(&dhd->dhd_bus_qos);
6552#endif /* FIX_BUS_MIN_CLOCK */
6553 DHD_ERROR(("pm_qos_add_requests called\n"));
6554
6555 dhd->cpufreq_fix_status = FALSE;
d2839953 6556 mutex_unlock(&dhd->cpufreq_fix);
d2839953
RC
6557}
6558#endif /* FIX_CPU_MIN_CLOCK */
6559
6560#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6561static int
6562dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
6563{
6564 int error;
6565 dhd_info_t *dhd = DHD_DEV_INFO(net);
6566
6567 if (atomic_read(&dhd->pub.block_bus))
6568 return -EHOSTDOWN;
6569
6570 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
6571 return BCME_ERROR;
6572
6573 error = dhd_ioctl_entry(net, ifr, cmd);
6574
6575 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
6576 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
6577
6578 return error;
6579}
6580#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6581
6582static int
6583dhd_stop(struct net_device *net)
6584{
6585 int ifidx = 0;
6586 bool skip_reset = false;
6587#if defined(WL_CFG80211)
6588 unsigned long flags = 0;
6589#ifdef WL_STATIC_IF
6590 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
6591#endif /* WL_STATIC_IF */
6592#endif /* WL_CFG80211 */
6593 dhd_info_t *dhd = DHD_DEV_INFO(net);
6594 DHD_OS_WAKE_LOCK(&dhd->pub);
6595 DHD_PERIM_LOCK(&dhd->pub);
965f77c4 6596 printf("%s: Enter %s\n", __FUNCTION__, net->name);
d2839953
RC
6597 dhd->pub.rxcnt_timeout = 0;
6598 dhd->pub.txcnt_timeout = 0;
6599
6600#ifdef BCMPCIE
6601 dhd->pub.d3ackcnt_timeout = 0;
6602#endif /* BCMPCIE */
6603
6604 mutex_lock(&dhd->pub.ndev_op_sync);
6605
6606 if (dhd->pub.up == 0) {
6607 goto exit;
6608 }
6609
6610 dhd_if_flush_sta(DHD_DEV_IFP(net));
6611
6612#ifdef FIX_CPU_MIN_CLOCK
6613 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
6614 dhd_rollback_cpu_freq(dhd);
6615#endif /* FIX_CPU_MIN_CLOCK */
6616
6617 ifidx = dhd_net2idx(dhd, net);
6618 BCM_REFERENCE(ifidx);
6619
965f77c4
RC
6620 DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx));
6621
d2839953
RC
6622#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
6623 /* If static if is operational, don't reset the chip */
6624 if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
965f77c4 6625 DHD_ERROR(("static if operational. skip chip reset.\n"));
d2839953 6626 skip_reset = true;
965f77c4 6627 wl_cfg80211_sta_ifdown(net);
d2839953
RC
6628 goto exit;
6629 }
6630#endif /* WL_STATIC_IF && WL_CFG80211 */
965f77c4
RC
6631
6632 DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
d2839953
RC
6633#ifdef WL_CFG80211
6634
6635 /* Disable Runtime PM before interface down */
6636 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
6637
6638 spin_lock_irqsave(&dhd->pub.up_lock, flags);
6639 dhd->pub.up = 0;
6640 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
6641#else
6642 dhd->pub.up = 0;
6643#endif /* WL_CFG80211 */
6644
6645#ifdef WL_CFG80211
6646 if (ifidx == 0) {
6647 dhd_if_t *ifp;
6648 wl_cfg80211_down(net);
6649
6650 ifp = dhd->iflist[0];
6651 /*
6652 * For CFG80211: Clean up all the left over virtual interfaces
6653 * when the primary Interface is brought down. [ifconfig wlan0 down]
6654 */
6655 if (!dhd_download_fw_on_driverload) {
965f77c4 6656 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
d2839953
RC
6657 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
6658 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
6659 int i;
6660#ifdef WL_CFG80211_P2P_DEV_IF
6661 wl_cfg80211_del_p2p_wdev(net);
6662#endif /* WL_CFG80211_P2P_DEV_IF */
965f77c4
RC
6663#ifdef DHD_4WAYM4_FAIL_DISCONNECT
6664 dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
6665#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6666#ifdef DHD_PKTDUMP_ROAM
6667 dhd_dump_pkt_clear(&dhd->pub);
6668#endif /* DHD_PKTDUMP_ROAM */
d2839953
RC
6669
6670 dhd_net_if_lock_local(dhd);
6671 for (i = 1; i < DHD_MAX_IFS; i++)
6672 dhd_remove_if(&dhd->pub, i, FALSE);
6673
6674 if (ifp && ifp->net) {
6675 dhd_if_del_sta_list(ifp);
6676 }
6677#ifdef ARP_OFFLOAD_SUPPORT
6678 if (dhd_inetaddr_notifier_registered) {
6679 dhd_inetaddr_notifier_registered = FALSE;
6680 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6681 }
6682#endif /* ARP_OFFLOAD_SUPPORT */
6683#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6684 if (dhd_inet6addr_notifier_registered) {
6685 dhd_inet6addr_notifier_registered = FALSE;
6686 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6687 }
6688#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6689 dhd_net_if_unlock_local(dhd);
6690 }
6691#if 0
6692 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
6693 cancel_work_sync(dhd->dhd_deferred_wq);
6694#endif
6695
6696#ifdef SHOW_LOGTRACE
6697 /* Wait till event logs work/kthread finishes */
6698 dhd_cancel_logtrace_process_sync(dhd);
6699#endif /* SHOW_LOGTRACE */
6700
6701#if defined(DHD_LB_RXP)
6702 __skb_queue_purge(&dhd->rx_pend_queue);
6703#endif /* DHD_LB_RXP */
6704
6705#if defined(DHD_LB_TXP)
6706 skb_queue_purge(&dhd->tx_pend_queue);
6707#endif /* DHD_LB_TXP */
6708 }
6709
6710#if defined(ARGOS_NOTIFY_CB)
6711 argos_register_notifier_deinit();
6712#endif // endif
6713#ifdef DHDTCPACK_SUPPRESS
6714 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6715#endif /* DHDTCPACK_SUPPRESS */
6716#if defined(DHD_LB_RXP)
6717 if (ifp && ifp->net == dhd->rx_napi_netdev) {
6718 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6719 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6720 skb_queue_purge(&dhd->rx_napi_queue);
6721 napi_disable(&dhd->rx_napi_struct);
6722 netif_napi_del(&dhd->rx_napi_struct);
6723 dhd->rx_napi_netdev = NULL;
6724 }
6725#endif /* DHD_LB_RXP */
6726 }
6727#endif /* WL_CFG80211 */
6728
6729 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
6730
6731#ifdef PROP_TXSTATUS
6732 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6733#endif // endif
6734#ifdef SHOW_LOGTRACE
6735 if (!dhd_download_fw_on_driverload) {
6736 /* Release the skbs from queue for WLC_E_TRACE event */
6737 dhd_event_logtrace_flush_queue(&dhd->pub);
6738 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
6739 if (dhd->event_data.fmts) {
6740 MFREE(dhd->pub.osh, dhd->event_data.fmts,
6741 dhd->event_data.fmts_size);
6742 dhd->event_data.fmts = NULL;
6743 }
6744 if (dhd->event_data.raw_fmts) {
6745 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
6746 dhd->event_data.raw_fmts_size);
6747 dhd->event_data.raw_fmts = NULL;
6748 }
6749 if (dhd->event_data.raw_sstr) {
6750 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
6751 dhd->event_data.raw_sstr_size);
6752 dhd->event_data.raw_sstr = NULL;
6753 }
6754 if (dhd->event_data.rom_raw_sstr) {
6755 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
6756 dhd->event_data.rom_raw_sstr_size);
6757 dhd->event_data.rom_raw_sstr = NULL;
6758 }
6759 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
6760 }
6761 }
6762#endif /* SHOW_LOGTRACE */
6763#ifdef APF
6764 dhd_dev_apf_delete_filter(net);
6765#endif /* APF */
6766
6767 /* Stop the protocol module */
6768 dhd_prot_stop(&dhd->pub);
6769
6770 OLD_MOD_DEC_USE_COUNT;
6771exit:
965f77c4 6772 if (skip_reset == false) {
47fa5ad5 6773#if defined(WL_WIRELESS_EXT)
965f77c4
RC
6774 if (ifidx == 0) {
6775 wl_iw_down(net, &dhd->pub);
6776 }
6777#endif /* defined(WL_WIRELESS_EXT) */
47fa5ad5 6778#ifdef WL_ESCAN
965f77c4
RC
6779 if (ifidx == 0) {
6780 wl_escan_down(net, &dhd->pub);
6781 }
47fa5ad5 6782#endif /* WL_ESCAN */
d2839953
RC
6783 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
6784#if defined(BT_OVER_SDIO)
6785 dhd_bus_put(&dhd->pub, WLAN_MODULE);
6786 wl_android_set_wifi_on_flag(FALSE);
6787#else
6788 wl_android_wifi_off(net, TRUE);
965f77c4 6789#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
d2839953
RC
6790#ifdef WL_EXT_IAPSTA
6791 wl_ext_iapsta_dettach_netdev(net, ifidx);
965f77c4
RC
6792#endif /* WL_EXT_IAPSTA */
6793#ifdef WL_ESCAN
6794 wl_escan_event_dettach(net, &dhd->pub);
6795#endif /* WL_ESCAN */
6796 wl_ext_event_dettach_netdev(net, ifidx);
6797#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
d2839953
RC
6798#endif /* BT_OVER_SDIO */
6799 }
6800#ifdef SUPPORT_DEEP_SLEEP
6801 else {
6802 /* CSP#505233: Flags to indicate if we distingish
6803 * power off policy when user set the memu
6804 * "Keep Wi-Fi on during sleep" to "Never"
6805 */
6806 if (trigger_deep_sleep) {
6807 dhd_deepsleep(net, 1);
6808 trigger_deep_sleep = 0;
6809 }
6810 }
6811#endif /* SUPPORT_DEEP_SLEEP */
6812 dhd->pub.hang_was_sent = 0;
965f77c4 6813 dhd->pub.hang_was_pending = 0;
d2839953
RC
6814
6815 /* Clear country spec for for built-in type driver */
6816 if (!dhd_download_fw_on_driverload) {
6817 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6818 dhd->pub.dhd_cspec.rev = 0;
6819 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6820 }
6821
6822#ifdef BCMDBGFS
6823 dhd_dbgfs_remove();
6824#endif // endif
6825 }
6826
6827 DHD_PERIM_UNLOCK(&dhd->pub);
6828 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6829
6830 /* Destroy wakelock */
6831 if (!dhd_download_fw_on_driverload &&
6832 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
6833 (skip_reset == false)) {
6834 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6835 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6836 }
965f77c4 6837 printf("%s: Exit %s\n", __FUNCTION__, net->name);
d2839953
RC
6838
6839 mutex_unlock(&dhd->pub.ndev_op_sync);
6840 return 0;
6841}
6842
6843#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
6844 defined(USE_INITIAL_SHORT_DWELL_TIME))
6845extern bool g_first_broadcast_scan;
6846#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
6847
6848#ifdef WL11U
6849static int dhd_interworking_enable(dhd_pub_t *dhd)
6850{
6851 uint32 enable = true;
6852 int ret = BCME_OK;
6853
6854 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
6855 if (ret < 0) {
6856 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6857 }
6858
6859 return ret;
6860}
6861#endif /* WL11u */
6862
6863static int
6864dhd_open(struct net_device *net)
6865{
6866 dhd_info_t *dhd = DHD_DEV_INFO(net);
6867#ifdef TOE
6868 uint32 toe_ol;
6869#endif // endif
6870 int ifidx;
6871 int32 ret = 0;
6872#if defined(OOB_INTR_ONLY)
6873 uint32 bus_type = -1;
6874 uint32 bus_num = -1;
6875 uint32 slot_num = -1;
6876 wifi_adapter_info_t *adapter = NULL;
6877#endif
6878#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
6879 int bytes_written = 0;
d2839953
RC
6880#endif
6881
6882 mutex_lock(&dhd->pub.ndev_op_sync);
6883
6884 if (dhd->pub.up == 1) {
6885 /* already up */
6886 DHD_ERROR(("Primary net_device is already up \n"));
6887 mutex_unlock(&dhd->pub.ndev_op_sync);
6888 return BCME_OK;
6889 }
6890
6891 if (!dhd_download_fw_on_driverload) {
6892 if (!dhd_driver_init_done) {
6893 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6894 mutex_unlock(&dhd->pub.ndev_op_sync);
6895 return -1;
6896 }
6897 }
6898
965f77c4 6899 printf("%s: Enter %s\n", __FUNCTION__, net->name);
d2839953
RC
6900 DHD_MUTEX_LOCK();
6901 /* Init wakelock */
6902 if (!dhd_download_fw_on_driverload) {
6903 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6904 DHD_OS_WAKE_LOCK_INIT(dhd);
6905 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6906 }
6907
6908#ifdef SHOW_LOGTRACE
6909 skb_queue_head_init(&dhd->evt_trace_queue);
6910
6911 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
6912 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
6913 if (ret == BCME_OK) {
6914 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
6915 st_str_file_path, map_file_path);
6916 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
6917 rom_st_str_file_path, rom_map_file_path);
6918 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
6919 }
6920 }
6921#endif /* SHOW_LOGTRACE */
6922 }
6923
6924 DHD_OS_WAKE_LOCK(&dhd->pub);
6925 DHD_PERIM_LOCK(&dhd->pub);
6926 dhd->pub.dongle_trap_occured = 0;
6927 dhd->pub.hang_was_sent = 0;
965f77c4 6928 dhd->pub.hang_was_pending = 0;
d2839953
RC
6929 dhd->pub.hang_reason = 0;
6930 dhd->pub.iovar_timeout_occured = 0;
6931#ifdef PCIE_FULL_DONGLE
6932 dhd->pub.d3ack_timeout_occured = 0;
6933 dhd->pub.livelock_occured = 0;
965f77c4 6934 dhd->pub.pktid_audit_failed = 0;
d2839953 6935#endif /* PCIE_FULL_DONGLE */
965f77c4
RC
6936 dhd->pub.iface_op_failed = 0;
6937 dhd->pub.scan_timeout_occurred = 0;
6938 dhd->pub.scan_busy_occurred = 0;
d2839953 6939 dhd->pub.smmu_fault_occurred = 0;
d2839953
RC
6940
6941#ifdef DHD_LOSSLESS_ROAMING
6942 dhd->pub.dequeue_prec_map = ALLPRIO;
6943#endif // endif
6944
6945#if 0
6946 /*
6947 * Force start if ifconfig_up gets called before START command
6948 * We keep WEXT's wl_control_wl_start to provide backward compatibility
6949 * This should be removed in the future
6950 */
6951 ret = wl_control_wl_start(net);
6952 if (ret != 0) {
6953 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6954 ret = -1;
6955 goto exit;
6956 }
6957
6958#endif // endif
6959
6960 ifidx = dhd_net2idx(dhd, net);
6961 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6962
6963 if (ifidx < 0) {
6964 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6965 ret = -1;
6966 goto exit;
6967 }
6968
6969 if (!dhd->iflist[ifidx]) {
6970 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6971 ret = -1;
6972 goto exit;
6973 }
6974
6975 if (ifidx == 0) {
6976 atomic_set(&dhd->pend_8021x_cnt, 0);
6977 if (!dhd_download_fw_on_driverload) {
6978 DHD_ERROR(("\n%s\n", dhd_version));
965f77c4
RC
6979 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
6980#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
6981 wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
6982#ifdef WL_ESCAN
6983 wl_escan_event_attach(net, &dhd->pub);
6984#endif /* WL_ESCAN */
d2839953
RC
6985#ifdef WL_EXT_IAPSTA
6986 wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
965f77c4
RC
6987#endif /* WL_EXT_IAPSTA */
6988#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
d2839953
RC
6989#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
6990 g_first_broadcast_scan = TRUE;
6991#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
6992#ifdef SHOW_LOGTRACE
6993 /* dhd_cancel_logtrace_process_sync is called in dhd_stop
6994 * for built-in models. Need to start logtrace kthread before
6995 * calling wifi on, because once wifi is on, EDL will be in action
6996 * any moment, and if kthread is not active, FW event logs will
6997 * not be available
6998 */
6999 if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
7000 goto exit;
7001 }
7002#endif /* SHOW_LOGTRACE */
7003#if defined(BT_OVER_SDIO)
7004 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
7005 wl_android_set_wifi_on_flag(TRUE);
7006#else
7007 ret = wl_android_wifi_on(net);
7008#endif /* BT_OVER_SDIO */
7009 if (ret != 0) {
7010 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
7011 __FUNCTION__, ret));
7012 ret = -1;
7013 goto exit;
7014 }
d2839953
RC
7015 }
7016#ifdef SUPPORT_DEEP_SLEEP
7017 else {
7018 /* Flags to indicate if we distingish
7019 * power off policy when user set the memu
7020 * "Keep Wi-Fi on during sleep" to "Never"
7021 */
7022 if (trigger_deep_sleep) {
7023#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
7024 g_first_broadcast_scan = TRUE;
7025#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
7026 dhd_deepsleep(net, 0);
7027 trigger_deep_sleep = 0;
7028 }
7029 }
7030#endif /* SUPPORT_DEEP_SLEEP */
7031#ifdef FIX_CPU_MIN_CLOCK
7032 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
7033 dhd_init_cpufreq_fix(dhd);
7034 dhd_fix_cpu_freq(dhd);
7035 }
7036#endif /* FIX_CPU_MIN_CLOCK */
7037#if defined(OOB_INTR_ONLY)
7038 if (dhd->pub.conf->dpc_cpucore >= 0) {
7039 dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
7040 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7041 if (adapter) {
7042 printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
7043 irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
7044 }
7045 }
7046#endif
7047
7048 if (dhd->pub.busstate != DHD_BUS_DATA) {
7049#ifdef BCMDBUS
7050 dhd_set_path(&dhd->pub);
7051 DHD_MUTEX_UNLOCK();
7052 wait_event_interruptible_timeout(dhd->adapter->status_event,
7053 wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
7054 msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
7055 DHD_MUTEX_LOCK();
7056 if ((ret = dbus_up(dhd->pub.bus)) != 0) {
7057 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
7058 goto exit;
7059 } else {
7060 dhd->pub.busstate = DHD_BUS_DATA;
7061 }
7062 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7063 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7064 goto exit;
7065 }
7066#else
7067 /* try to bring up bus */
7068 DHD_PERIM_UNLOCK(&dhd->pub);
7069
7070#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7071 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
7072 ret = dhd_bus_start(&dhd->pub);
7073 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7074 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7075 }
7076#else
7077 ret = dhd_bus_start(&dhd->pub);
7078#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7079
7080 DHD_PERIM_LOCK(&dhd->pub);
7081 if (ret) {
7082 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7083 ret = -1;
7084 goto exit;
7085 }
7086#endif /* !BCMDBUS */
7087
7088 }
7089#ifdef WL_EXT_IAPSTA
7090 wl_ext_iapsta_attach_name(net, ifidx);
7091#endif
7092
7093#ifdef BT_OVER_SDIO
7094 if (dhd->pub.is_bt_recovery_required) {
7095 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
7096 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
7097 }
7098 dhd->pub.is_bt_recovery_required = FALSE;
7099#endif // endif
7100
7101 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
7102 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7103
7104#ifdef TOE
7105 /* Get current TOE mode from dongle */
7106 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
7107 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
7108 } else {
7109 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
7110 }
7111#endif /* TOE */
7112
7113#if defined(DHD_LB_RXP)
7114 __skb_queue_head_init(&dhd->rx_pend_queue);
7115 if (dhd->rx_napi_netdev == NULL) {
7116 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
7117 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
7118 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
7119 dhd_napi_poll, dhd_napi_weight);
7120 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
7121 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7122 napi_enable(&dhd->rx_napi_struct);
7123 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
7124 skb_queue_head_init(&dhd->rx_napi_queue);
7125 } /* rx_napi_netdev == NULL */
7126#endif /* DHD_LB_RXP */
d2839953
RC
7127
7128#if defined(DHD_LB_TXP)
7129 /* Use the variant that uses locks */
7130 skb_queue_head_init(&dhd->tx_pend_queue);
7131#endif /* DHD_LB_TXP */
7132
7133#if defined(WL_CFG80211)
7134 if (unlikely(wl_cfg80211_up(net))) {
7135 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
7136 ret = -1;
7137 goto exit;
7138 }
7139 if (!dhd_download_fw_on_driverload) {
7140#ifdef ARP_OFFLOAD_SUPPORT
7141 dhd->pend_ipaddr = 0;
7142 if (!dhd_inetaddr_notifier_registered) {
7143 dhd_inetaddr_notifier_registered = TRUE;
7144 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7145 }
7146#endif /* ARP_OFFLOAD_SUPPORT */
7147#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7148 if (!dhd_inet6addr_notifier_registered) {
7149 dhd_inet6addr_notifier_registered = TRUE;
7150 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7151 }
7152#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7153 }
7154
965f77c4
RC
7155#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
7156 dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
7157#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
7158#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
7159 dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
7160#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
7161#ifdef DHD_LB_IRQSET
7162 dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
7163#endif /* DHD_LB_IRQSET */
d2839953
RC
7164#if defined(ARGOS_NOTIFY_CB)
7165 argos_register_notifier_init(net);
7166#endif // endif
7167#if defined(NUM_SCB_MAX_PROBE)
7168 dhd_set_scb_probe(&dhd->pub);
7169#endif /* NUM_SCB_MAX_PROBE */
7170#endif /* WL_CFG80211 */
47fa5ad5 7171#if defined(WL_WIRELESS_EXT)
965f77c4
RC
7172 if (unlikely(wl_iw_up(net, &dhd->pub))) {
7173 DHD_ERROR(("%s: failed to bring up wext\n", __FUNCTION__));
7174 ret = -1;
7175 goto exit;
7176 }
7177#endif
47fa5ad5
RC
7178#ifdef WL_ESCAN
7179 if (unlikely(wl_escan_up(net, &dhd->pub))) {
7180 DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
7181 ret = -1;
7182 goto exit;
7183 }
965f77c4
RC
7184#endif /* WL_ESCAN */
7185#if defined(ISAM_PREINIT)
47fa5ad5 7186 if (!dhd_download_fw_on_driverload) {
965f77c4
RC
7187 if (dhd->pub.conf) {
7188 wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written);
7189 wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written);
7190 wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written);
47fa5ad5
RC
7191 }
7192 }
7193#endif
d2839953
RC
7194 }
7195
d2839953
RC
7196 dhd->pub.up = 1;
7197
7198 if (wl_event_enable) {
7199 /* For wl utility to receive events */
7200 dhd->pub.wl_event_enabled = true;
7201 } else {
7202 dhd->pub.wl_event_enabled = false;
7203 }
7204
7205 if (logtrace_pkt_sendup) {
7206 /* For any deamon to recieve logtrace */
7207 dhd->pub.logtrace_pkt_sendup = true;
7208 } else {
7209 dhd->pub.logtrace_pkt_sendup = false;
7210 }
7211
965f77c4
RC
7212 OLD_MOD_INC_USE_COUNT;
7213
7214#ifdef BCMDBGFS
7215 dhd_dbgfs_init(&dhd->pub);
7216#endif // endif
7217
7218exit:
7219 mutex_unlock(&dhd->pub.ndev_op_sync);
7220 if (ret) {
7221 dhd_stop(net);
7222 }
7223
7224 DHD_PERIM_UNLOCK(&dhd->pub);
7225 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7226 DHD_MUTEX_UNLOCK();
7227
7228 printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
7229 return ret;
7230}
7231
7232/*
7233 * ndo_start handler for primary ndev
7234 */
7235static int
7236dhd_pri_open(struct net_device *net)
7237{
7238 s32 ret;
7239
7240 ret = dhd_open(net);
7241 if (unlikely(ret)) {
7242 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
7243 return ret;
7244 }
7245
7246 /* Allow transmit calls */
7247 netif_start_queue(net);
7248 DHD_ERROR(("[%s] tx queue started\n", net->name));
3910ce8e
LJ
7249
7250#if defined(SET_RPS_CPUS)
7251 dhd_rps_cpus_enable(net, TRUE);
7252 DHD_ERROR(("[%s] RPS started\n", net->name));
7253#endif
7254
7255#if defined(SET_XPS_CPUS)
7256 dhd_xps_cpus_enable(net, TRUE);
7257 DHD_ERROR(("[%s] XPS started\n", net->name));
7258#endif
7259
965f77c4
RC
7260 return ret;
7261}
d2839953 7262
965f77c4
RC
7263/*
7264 * ndo_stop handler for primary ndev
7265 */
7266static int
7267dhd_pri_stop(struct net_device *net)
7268{
7269 s32 ret;
d2839953 7270
965f77c4
RC
7271 /* stop tx queue */
7272 netif_stop_queue(net);
7273 DHD_ERROR(("[%s] tx queue stopped\n", net->name));
d2839953 7274
965f77c4
RC
7275 ret = dhd_stop(net);
7276 if (unlikely(ret)) {
7277 DHD_ERROR(("dhd_stop failed: %d\n", ret));
7278 return ret;
7279 }
d2839953 7280
d2839953
RC
7281 return ret;
7282}
7283
7284#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7285/*
7286 * For static I/Fs, the firmware interface init
7287 * is done from the IFF_UP context.
7288 */
7289static int
7290dhd_static_if_open(struct net_device *net)
7291{
7292 s32 ret = 0;
7293 struct bcm_cfg80211 *cfg;
7294 struct net_device *primary_netdev = NULL;
7295
7296 cfg = wl_get_cfg(net);
7297 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
7298
7299 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
7300 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
7301 ret = BCME_OK;
7302 goto done;
7303 }
7304
965f77c4 7305 printf("%s: Enter %s\n", __FUNCTION__, net->name);
d2839953
RC
7306 /* Ensure fw is initialized. If it is already initialized,
7307 * dhd_open will return success.
7308 */
7309 ret = dhd_open(primary_netdev);
7310 if (unlikely(ret)) {
7311 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
7312 goto done;
7313 }
7314
7315 ret = wl_cfg80211_static_if_open(net);
7316 if (!ret) {
7317 /* Allow transmit calls */
7318 netif_start_queue(net);
7319 }
7320done:
965f77c4 7321 printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
d2839953
RC
7322 return ret;
7323}
7324
7325static int
7326dhd_static_if_stop(struct net_device *net)
7327{
7328 struct bcm_cfg80211 *cfg;
7329 struct net_device *primary_netdev = NULL;
7330 int ret = BCME_OK;
965f77c4 7331 dhd_info_t *dhd = DHD_DEV_INFO(net);
d2839953 7332
965f77c4 7333 printf("%s: Enter %s\n", __FUNCTION__, net->name);
d2839953
RC
7334
7335 /* Ensure queue is disabled */
7336 netif_tx_disable(net);
7337
7338 cfg = wl_get_cfg(net);
7339 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
7340 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
7341 return BCME_OK;
7342 }
7343
7344 ret = wl_cfg80211_static_if_close(net);
7345
965f77c4
RC
7346 if (dhd->pub.up == 0) {
7347 /* If fw is down, return */
7348 DHD_ERROR(("fw down\n"));
7349 return BCME_OK;
7350 }
d2839953
RC
7351 /* If STA iface is not in operational, invoke dhd_close from this
7352 * context.
7353 */
7354 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
7355 if (!(primary_netdev->flags & IFF_UP)) {
7356 ret = dhd_stop(primary_netdev);
965f77c4
RC
7357 } else {
7358 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
d2839953 7359 }
965f77c4 7360 printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
d2839953
RC
7361
7362 return ret;
7363}
7364#endif /* WL_STATIC_IF && WL_CF80211 */
7365
7366int dhd_do_driver_init(struct net_device *net)
7367{
7368 dhd_info_t *dhd = NULL;
7369
7370 if (!net) {
7371 DHD_ERROR(("Primary Interface not initialized \n"));
7372 return -EINVAL;
7373 }
7374
7375 DHD_MUTEX_IS_LOCK_RETURN();
7376
7377 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
7378 dhd = DHD_DEV_INFO(net);
7379
7380 /* If driver is already initialized, do nothing
7381 */
7382 if (dhd->pub.busstate == DHD_BUS_DATA) {
7383 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
7384 return 0;
7385 }
7386
7387 if (dhd_open(net) < 0) {
7388 DHD_ERROR(("Driver Init Failed \n"));
7389 return -1;
7390 }
7391
7392 return 0;
7393}
7394
7395int
7396dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
7397{
7398
7399#ifdef WL_CFG80211
7400 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7401 ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
7402 return BCME_OK;
7403#endif // endif
7404
7405 /* handle IF event caused by wl commands, SoftAP, WEXT and
7406 * anything else. This has to be done asynchronously otherwise
7407 * DPC will be blocked (and iovars will timeout as DPC has no chance
7408 * to read the response back)
7409 */
7410 if (ifevent->ifidx > 0) {
7411 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7412 if (if_event == NULL) {
7413 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
7414 MALLOCED(dhdinfo->pub.osh)));
7415 return BCME_NOMEM;
7416 }
7417
7418 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7419 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7420 strncpy(if_event->name, name, IFNAMSIZ);
7421 if_event->name[IFNAMSIZ - 1] = '\0';
7422 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
7423 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
7424 }
7425
7426 return BCME_OK;
7427}
7428
7429int
7430dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
7431{
7432 dhd_if_event_t *if_event;
7433
7434#ifdef WL_CFG80211
7435 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7436 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
7437 return BCME_OK;
7438#endif /* WL_CFG80211 */
7439
7440 /* handle IF event caused by wl commands, SoftAP, WEXT and
7441 * anything else
7442 */
7443 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7444 if (if_event == NULL) {
7445 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
7446 MALLOCED(dhdinfo->pub.osh)));
7447 return BCME_NOMEM;
7448 }
7449 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7450 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7451 strncpy(if_event->name, name, IFNAMSIZ);
7452 if_event->name[IFNAMSIZ - 1] = '\0';
7453 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
7454 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
7455
7456 return BCME_OK;
7457}
7458
7459int
7460dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
7461{
7462#ifdef DHD_UPDATE_INTF_MAC
7463 dhd_if_event_t *if_event;
7464#endif /* DHD_UPDATE_INTF_MAC */
7465
7466#ifdef WL_CFG80211
7467 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7468 ifevent->ifidx, name, mac, ifevent->bssidx);
7469#endif /* WL_CFG80211 */
7470
7471#ifdef DHD_UPDATE_INTF_MAC
7472 /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
7473 * anything else
7474 */
7475 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7476 if (if_event == NULL) {
7477 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
7478 MALLOCED(dhdinfo->pub.osh)));
7479 return BCME_NOMEM;
7480 }
7481 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7482 // construct a change event
7483 if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
7484 if_event->event.opcode = WLC_E_IF_CHANGE;
7485 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7486 strncpy(if_event->name, name, IFNAMSIZ);
7487 if_event->name[IFNAMSIZ - 1] = '\0';
7488 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
7489 dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
7490#endif /* DHD_UPDATE_INTF_MAC */
7491
7492 return BCME_OK;
7493}
7494
7495#ifdef WL_NATOE
7496/* Handler to update natoe info and bind with new subscriptions if there is change in config */
7497static void
7498dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
7499{
7500 dhd_info_t *dhd = handle;
7501 wl_event_data_natoe_t *natoe = event_info;
7502 dhd_nfct_info_t *nfct = dhd->pub.nfct;
7503
7504 if (event != DHD_WQ_WORK_NATOE_EVENT) {
7505 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7506 return;
7507 }
7508
7509 if (!dhd) {
7510 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7511 return;
7512 }
7513 if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
7514 (natoe->start_port < natoe->end_port)) {
7515 /* Rebind subscriptions to start receiving notifications from groups */
7516 if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
7517 dhd_ct_close(nfct);
7518 }
7519 dhd_ct_send_dump_req(nfct);
7520 } else if (!natoe->natoe_active) {
7521 /* Rebind subscriptions to stop receiving notifications from groups */
7522 if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
7523 dhd_ct_close(nfct);
7524 }
7525 }
7526}
7527
7528/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
7529 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
7530 */
7531int
7532dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
7533{
7534 wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
7535
7536 if (dhd->nfct) {
7537 wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
7538 uint8 prev_enable = natoe->natoe_active;
7539
7540 spin_lock_bh(&dhd->nfct_lock);
7541 memcpy(natoe, event_data, sizeof(*event_data));
7542 spin_unlock_bh(&dhd->nfct_lock);
7543
7544 if (prev_enable != event_data->natoe_active) {
7545 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
7546 (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
7547 dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
7548 }
7549 return BCME_OK;
7550 }
7551 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
7552 return BCME_ERROR;
7553}
7554
7555/* Handler to send natoe ioctl to dongle */
7556static void
7557dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
7558{
7559 dhd_info_t *dhd = handle;
7560 dhd_ct_ioc_t *ct_ioc = event_info;
7561
7562 if (event != DHD_WQ_WORK_NATOE_IOCTL) {
7563 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7564 return;
7565 }
7566
7567 if (!dhd) {
7568 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7569 return;
7570 }
7571
7572 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
7573 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
7574 }
7575}
7576
7577/* When Netlink message contains port collision info, the info must be sent to dongle FW
7578 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
7579 */
7580void
7581dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
7582{
7583
7584 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
7585 DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
7586 DHD_WQ_WORK_PRIORITY_HIGH);
7587}
7588#endif /* WL_NATOE */
7589
7590/* This API maps ndev to ifp inclusive of static IFs */
7591static dhd_if_t *
7592dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
7593{
7594 dhd_if_t *ifp = NULL;
7595#ifdef WL_STATIC_IF
7596 u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
7597#else
7598 u32 ifidx = (DHD_MAX_IFS - 1);
7599#endif /* WL_STATIC_IF */
7600
7601 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
7602 do {
7603 ifp = dhdinfo->iflist[ifidx];
7604 if (ifp && (ifp->net == ndev)) {
7605 DHD_TRACE(("match found for %s. ifidx:%d\n",
7606 ndev->name, ifidx));
7607 return ifp;
7608 }
7609 } while (ifidx--);
7610
7611 DHD_ERROR(("no entry found for %s\n", ndev->name));
7612 return NULL;
7613}
7614
7615bool
7616dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
7617{
7618 dhd_if_t *ifp = NULL;
7619
7620 if (!dhdp || !ndev) {
7621 DHD_ERROR(("wrong input\n"));
7622 ASSERT(0);
7623 return false;
7624 }
7625
7626 ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
7627 return (ifp && (ifp->static_if == true));
7628}
7629
7630#ifdef WL_STATIC_IF
7631/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
7632 * are not known. For e.g: static i/f case. This function lets to update it once
7633 * it is known.
7634 */
7635s32
7636dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
7637 uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
7638{
7639 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
7640 dhd_if_t *ifp, *ifp_new;
7641 s32 cur_idx;
7642 dhd_dev_priv_t * dev_priv;
7643
7644 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
7645 if_state, ifidx));
7646
7647 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
7648
7649 if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
7650 return -ENODEV;
7651 }
7652 cur_idx = ifp->idx;
7653
7654 if (if_state == NDEV_STATE_OS_IF_CREATED) {
7655 /* mark static if */
7656 ifp->static_if = TRUE;
7657 return BCME_OK;
7658 }
7659
7660 ifp_new = dhdinfo->iflist[ifidx];
7661 if (ifp_new && (ifp_new != ifp)) {
7662 /* There should be only one entry for a given ifidx. */
7663 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
7664 ASSERT(0);
7665 dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
7666 net_os_send_hang_message(ifp->net);
7667 return -EINVAL;
7668 }
7669
7670 /* For static if delete case, cleanup the if before ifidx update */
7671 if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
7672 (if_state == NDEV_STATE_FW_IF_FAILED)) {
7673 dhd_cleanup_if(ifp->net);
7674 dev_priv = DHD_DEV_PRIV(ndev);
7675 dev_priv->ifidx = ifidx;
7676 }
7677
7678 /* update the iflist ifidx slot with cached info */
7679 dhdinfo->iflist[ifidx] = ifp;
7680 dhdinfo->iflist[cur_idx] = NULL;
7681
7682 /* update the values */
7683 ifp->idx = ifidx;
7684 ifp->bssidx = bssidx;
7685
7686 if (if_state == NDEV_STATE_FW_IF_CREATED) {
7687 dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
7688 /* initialize the dongle provided if name */
7689 if (dngl_name) {
7690 strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
7691 } else if (ndev->name[0] != '\0') {
7692 strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
7693 }
965f77c4
RC
7694 if (mac != NULL) {
7695 (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
7696 }
7697#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
7698 wl_ext_event_attach_netdev(ndev, ifidx, bssidx);
7699#ifdef WL_ESCAN
7700 wl_escan_event_attach(ndev, dhdp);
7701#endif /* WL_ESCAN */
7702#ifdef WL_EXT_IAPSTA
7703 wl_ext_iapsta_ifadding(ndev, ifidx);
7704 wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx);
7705 wl_ext_iapsta_attach_name(ndev, ifidx);
7706#endif /* WL_EXT_IAPSTA */
7707 } else if (if_state == NDEV_STATE_FW_IF_DELETED) {
7708#ifdef WL_EXT_IAPSTA
7709 wl_ext_iapsta_dettach_netdev(ndev, cur_idx);
7710#endif /* WL_EXT_IAPSTA */
7711#ifdef WL_ESCAN
7712 wl_escan_event_dettach(ndev, dhdp);
7713#endif /* WL_ESCAN */
7714 wl_ext_event_dettach_netdev(ndev, cur_idx);
7715#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
d2839953
RC
7716 }
7717 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
7718 ifidx, cur_idx, if_state));
7719 return BCME_OK;
7720}
7721#endif /* WL_STATIC_IF */
7722
7723/* unregister and free the existing net_device interface (if any) in iflist and
7724 * allocate a new one. the slot is reused. this function does NOT register the
7725 * new interface to linux kernel. dhd_register_if does the job
7726 */
7727struct net_device*
7728dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
7729 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
7730{
7731 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
7732 dhd_if_t *ifp;
7733
7734 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
7735
7736 ifp = dhdinfo->iflist[ifidx];
7737
7738 if (ifp != NULL) {
7739 if (ifp->net != NULL) {
7740 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
7741 __FUNCTION__, ifp->net->name, ifidx));
7742
7743 if (ifidx == 0) {
7744 /* For primary ifidx (0), there shouldn't be
7745 * any netdev present already.
7746 */
7747 DHD_ERROR(("Primary ifidx populated already\n"));
7748 ASSERT(0);
7749 return NULL;
7750 }
7751
7752 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
7753
7754 /* in unregister_netdev case, the interface gets freed by net->destructor
7755 * (which is set to free_netdev)
7756 */
7757 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
7758 free_netdev(ifp->net);
7759 } else {
7760 netif_stop_queue(ifp->net);
7761 if (need_rtnl_lock)
7762 unregister_netdev(ifp->net);
7763 else
7764 unregister_netdevice(ifp->net);
7765 }
7766 ifp->net = NULL;
7767 }
7768 } else {
7769 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
7770 if (ifp == NULL) {
7771 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
7772 return NULL;
7773 }
7774 }
7775
7776 memset(ifp, 0, sizeof(dhd_if_t));
7777 ifp->info = dhdinfo;
7778 ifp->idx = ifidx;
7779 ifp->bssidx = bssidx;
7780#ifdef DHD_MCAST_REGEN
7781 ifp->mcast_regen_bss_enable = FALSE;
7782#endif // endif
7783 /* set to TRUE rx_pkt_chainable at alloc time */
7784 ifp->rx_pkt_chainable = TRUE;
7785
7786 if (mac != NULL)
7787 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
7788
7789 /* Allocate etherdev, including space for private structure */
7790 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
7791 if (ifp->net == NULL) {
7792 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
7793 goto fail;
7794 }
7795
7796 /* Setup the dhd interface's netdevice private structure. */
7797 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
7798
7799 if (name && name[0]) {
7800 strncpy(ifp->net->name, name, IFNAMSIZ);
7801 ifp->net->name[IFNAMSIZ - 1] = '\0';
7802 }
7803
7804#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
7805#define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
7806#else
7807#define IFP_NET_DESTRUCTOR ifp->net->destructor
7808#endif // endif
7809
7810#ifdef WL_CFG80211
7811 if (ifidx == 0) {
7812 IFP_NET_DESTRUCTOR = free_netdev;
7813 } else {
7814 IFP_NET_DESTRUCTOR = dhd_netdev_free;
7815 }
7816#else
7817 IFP_NET_DESTRUCTOR = free_netdev;
7818#endif /* WL_CFG80211 */
7819 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
7820 ifp->name[IFNAMSIZ - 1] = '\0';
7821 dhdinfo->iflist[ifidx] = ifp;
7822
7823 /* initialize the dongle provided if name */
7824 if (dngl_name) {
7825 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
7826 } else if (name) {
7827 strncpy(ifp->dngl_name, name, IFNAMSIZ);
7828 }
7829
7830#ifdef PCIE_FULL_DONGLE
7831 /* Initialize STA info list */
7832 INIT_LIST_HEAD(&ifp->sta_list);
7833 DHD_IF_STA_LIST_LOCK_INIT(ifp);
7834#endif /* PCIE_FULL_DONGLE */
7835
7836#ifdef DHD_L2_FILTER
7837 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
7838 ifp->parp_allnode = TRUE;
7839#endif /* DHD_L2_FILTER */
7840
7841 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
7842
965f77c4
RC
7843#ifdef DHD_4WAYM4_FAIL_DISCONNECT
7844 INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
7845#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
7846
7847#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
7848 ifp->recv_reassoc_evt = FALSE;
7849 ifp->post_roam_evt = FALSE;
7850#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
7851
7852#ifdef DHDTCPSYNC_FLOOD_BLK
7853 INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
7854 dhd_reset_tcpsync_info_by_ifp(ifp);
7855#endif /* DHDTCPSYNC_FLOOD_BLK */
7856
d2839953
RC
7857 return ifp->net;
7858
7859fail:
7860 if (ifp != NULL) {
7861 if (ifp->net != NULL) {
7862#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
7863 if (ifp->net == dhdinfo->rx_napi_netdev) {
7864 napi_disable(&dhdinfo->rx_napi_struct);
7865 netif_napi_del(&dhdinfo->rx_napi_struct);
7866 skb_queue_purge(&dhdinfo->rx_napi_queue);
7867 dhdinfo->rx_napi_netdev = NULL;
7868 }
7869#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
7870 dhd_dev_priv_clear(ifp->net);
7871 free_netdev(ifp->net);
7872 ifp->net = NULL;
7873 }
7874 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
7875 ifp = NULL;
7876 }
7877
7878 dhdinfo->iflist[ifidx] = NULL;
7879 return NULL;
7880}
7881
7882static void
965f77c4 7883dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
d2839953 7884{
d2839953 7885#ifdef PCIE_FULL_DONGLE
965f77c4 7886 s32 ifidx = 0;
d2839953
RC
7887 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
7888#endif /* PCIE_FULL_DONGLE */
7889
d2839953 7890 if (ifp != NULL) {
965f77c4
RC
7891 if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
7892 DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
7893 ASSERT(0);
7894 return;
7895 }
d2839953
RC
7896#ifdef DHD_L2_FILTER
7897 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
7898 NULL, FALSE, dhdpub->tickcnt);
7899 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
7900 ifp->phnd_arp_table = NULL;
7901#endif /* DHD_L2_FILTER */
7902
7903 dhd_if_del_sta_list(ifp);
7904#ifdef PCIE_FULL_DONGLE
7905 /* Delete flowrings of virtual interface */
965f77c4 7906 ifidx = ifp->idx;
d2839953
RC
7907 if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
7908 dhd_flow_rings_delete(dhdp, ifidx);
7909 }
7910#endif /* PCIE_FULL_DONGLE */
7911 }
7912}
7913
7914void
7915dhd_cleanup_if(struct net_device *net)
7916{
7917 dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
7918 dhd_pub_t *dhdp = &dhdinfo->pub;
7919 dhd_if_t *ifp;
7920
7921 if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
7922 (ifp->idx >= DHD_MAX_IFS)) {
7923 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
7924 ASSERT(0);
7925 return;
7926 }
7927
965f77c4 7928 dhd_cleanup_ifp(dhdp, ifp);
d2839953
RC
7929}
7930
7931/* unregister and free the the net_device interface associated with the indexed
7932 * slot, also free the slot memory and set the slot pointer to NULL
7933 */
7934#define DHD_TX_COMPLETION_TIMEOUT 5000
7935int
7936dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
7937{
7938 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
7939 dhd_if_t *ifp;
7940 unsigned long flags;
965f77c4 7941 long timeout;
d2839953
RC
7942
7943 ifp = dhdinfo->iflist[ifidx];
7944
7945 if (ifp != NULL) {
965f77c4
RC
7946#ifdef DHD_4WAYM4_FAIL_DISCONNECT
7947 cancel_delayed_work_sync(&ifp->m4state_work);
7948#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
7949
7950#ifdef DHDTCPSYNC_FLOOD_BLK
7951 cancel_work_sync(&ifp->blk_tsfl_work);
7952#endif /* DHDTCPSYNC_FLOOD_BLK */
7953
d2839953
RC
7954#ifdef WL_STATIC_IF
7955 /* static IF will be handled in detach */
7956 if (ifp->static_if) {
7957 DHD_TRACE(("Skip del iface for static interface\n"));
7958 return BCME_OK;
7959 }
7960#endif /* WL_STATIC_IF */
7961 if (ifp->net != NULL) {
7962 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
7963
7964 DHD_GENERAL_LOCK(dhdpub, flags);
7965 ifp->del_in_progress = true;
7966 DHD_GENERAL_UNLOCK(dhdpub, flags);
7967
7968 /* If TX is in progress, hold the if del */
7969 if (DHD_IF_IS_TX_ACTIVE(ifp)) {
7970 DHD_INFO(("TX in progress. Wait for it to be complete."));
7971 timeout = wait_event_timeout(dhdpub->tx_completion_wait,
7972 ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
7973 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
7974 if (!timeout) {
7975 /* Tx completion timeout. Attempt proceeding ahead */
7976 DHD_ERROR(("Tx completion timed out!\n"));
7977 ASSERT(0);
7978 }
7979 } else {
7980 DHD_TRACE(("No outstanding TX!\n"));
7981 }
7982 dhdinfo->iflist[ifidx] = NULL;
7983 /* in unregister_netdev case, the interface gets freed by net->destructor
7984 * (which is set to free_netdev)
7985 */
7986 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
7987 free_netdev(ifp->net);
7988 } else {
7989 netif_tx_disable(ifp->net);
7990
7991#if defined(SET_RPS_CPUS)
7992 custom_rps_map_clear(ifp->net->_rx);
7993#endif /* SET_RPS_CPUS */
7994#if defined(SET_RPS_CPUS)
7995#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
7996 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
7997#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
7998#endif // endif
7999 if (need_rtnl_lock)
8000 unregister_netdev(ifp->net);
8001 else
8002 unregister_netdevice(ifp->net);
b7805517 8003#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
d2839953 8004#ifdef WL_EXT_IAPSTA
965f77c4
RC
8005 wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
8006#endif /* WL_EXT_IAPSTA */
8007#ifdef WL_ESCAN
8008 wl_escan_event_dettach(ifp->net, dhdpub);
8009#endif /* WL_ESCAN */
8010 wl_ext_event_dettach_netdev(ifp->net, ifidx);
8011#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
d2839953
RC
8012 }
8013 ifp->net = NULL;
8014 DHD_GENERAL_LOCK(dhdpub, flags);
8015 ifp->del_in_progress = false;
8016 DHD_GENERAL_UNLOCK(dhdpub, flags);
8017 }
965f77c4 8018 dhd_cleanup_ifp(dhdpub, ifp);
d2839953
RC
8019 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8020
8021 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8022 ifp = NULL;
8023 }
8024
8025 return BCME_OK;
8026}
8027
d2839953 8028static struct net_device_ops dhd_ops_pri = {
965f77c4
RC
8029 .ndo_open = dhd_pri_open,
8030 .ndo_stop = dhd_pri_stop,
d2839953
RC
8031 .ndo_get_stats = dhd_get_stats,
8032#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8033 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8034 .ndo_start_xmit = dhd_start_xmit_wrapper,
8035#else
8036 .ndo_do_ioctl = dhd_ioctl_entry,
8037 .ndo_start_xmit = dhd_start_xmit,
8038#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8039 .ndo_set_mac_address = dhd_set_mac_address,
8040#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8041 .ndo_set_rx_mode = dhd_set_multicast_list,
8042#else
8043 .ndo_set_multicast_list = dhd_set_multicast_list,
8044#endif // endif
8045};
8046
8047static struct net_device_ops dhd_ops_virt = {
8048#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8049 .ndo_open = dhd_static_if_open,
8050 .ndo_stop = dhd_static_if_stop,
8051#endif // endif
8052 .ndo_get_stats = dhd_get_stats,
8053#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8054 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8055 .ndo_start_xmit = dhd_start_xmit_wrapper,
8056#else
8057 .ndo_do_ioctl = dhd_ioctl_entry,
8058 .ndo_start_xmit = dhd_start_xmit,
8059#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8060 .ndo_set_mac_address = dhd_set_mac_address,
8061#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8062 .ndo_set_rx_mode = dhd_set_multicast_list,
8063#else
8064 .ndo_set_multicast_list = dhd_set_multicast_list,
8065#endif // endif
8066};
d2839953
RC
8067
8068int
8069dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
8070 unsigned long buflen)
8071{
8072 loff_t wr_posn = *posn;
8073
8074 if (!fp || !buf || buflen == 0)
8075 return -1;
8076
965f77c4 8077 if (compat_vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
d2839953
RC
8078 return -1;
8079
8080 *posn = wr_posn;
8081 return 0;
8082}
8083
8084#ifdef SHOW_LOGTRACE
8085int
8086dhd_os_read_file(void *file, char *buf, uint32 size)
8087{
8088 struct file *filep = (struct file *)file;
8089
8090 if (!file || !buf)
8091 return -1;
8092
8093 return vfs_read(filep, buf, size, &filep->f_pos);
8094}
8095
8096int
8097dhd_os_seek_file(void *file, int64 offset)
8098{
8099 struct file *filep = (struct file *)file;
8100 if (!file)
8101 return -1;
8102
8103 /* offset can be -ve */
8104 filep->f_pos = filep->f_pos + offset;
8105
8106 return 0;
8107}
8108
8109static int
8110dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
8111{
8112 struct file *filep = NULL;
8113 struct kstat stat;
8114 mm_segment_t fs;
8115 char *raw_fmts = NULL;
8116 int logstrs_size = 0;
8117 int error = 0;
8118
8119 fs = get_fs();
8120 set_fs(KERNEL_DS);
8121
8122 filep = filp_open(logstrs_path, O_RDONLY, 0);
8123
8124 if (IS_ERR(filep)) {
8125 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
8126 goto fail;
8127 }
8128 error = vfs_stat(logstrs_path, &stat);
8129 if (error) {
8130 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
8131 goto fail;
8132 }
8133 logstrs_size = (int) stat.size;
8134
8135 if (logstrs_size == 0) {
8136 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
8137 goto fail1;
8138 }
8139
8140 raw_fmts = MALLOC(osh, logstrs_size);
8141 if (raw_fmts == NULL) {
8142 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
8143 goto fail;
8144 }
8145
8146 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
8147 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
8148 goto fail;
8149 }
8150
8151 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
8152 == BCME_OK) {
8153 filp_close(filep, NULL);
8154 set_fs(fs);
8155 return BCME_OK;
8156 }
8157
8158fail:
8159 if (raw_fmts) {
8160 MFREE(osh, raw_fmts, logstrs_size);
8161 raw_fmts = NULL;
8162 }
8163
8164fail1:
8165 if (!IS_ERR(filep))
8166 filp_close(filep, NULL);
8167
8168 set_fs(fs);
8169 temp->fmts = NULL;
8170 return BCME_ERROR;
8171}
8172
8173static int
8174dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
8175 uint32 *rodata_end)
8176{
8177 struct file *filep = NULL;
8178 mm_segment_t fs;
8179 int err = BCME_ERROR;
8180
8181 if (fname == NULL) {
8182 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
8183 return BCME_ERROR;
8184 }
8185
8186 fs = get_fs();
8187 set_fs(KERNEL_DS);
8188
8189 filep = filp_open(fname, O_RDONLY, 0);
8190 if (IS_ERR(filep)) {
8191 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
8192 goto fail;
8193 }
8194
8195 if ((err = dhd_parse_map_file(osh, filep, ramstart,
8196 rodata_start, rodata_end)) < 0)
8197 goto fail;
8198
8199fail:
8200 if (!IS_ERR(filep))
8201 filp_close(filep, NULL);
8202
8203 set_fs(fs);
8204
8205 return err;
8206}
8207
8208static int
8209dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
8210{
8211 struct file *filep = NULL;
8212 mm_segment_t fs;
8213 char *raw_fmts = NULL;
8214 uint32 logstrs_size = 0;
8215 int error = 0;
8216 uint32 ramstart = 0;
8217 uint32 rodata_start = 0;
8218 uint32 rodata_end = 0;
8219 uint32 logfilebase = 0;
8220
8221 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
8222 if (error != BCME_OK) {
8223 DHD_ERROR(("readmap Error!! \n"));
8224 /* don't do event log parsing in actual case */
8225 if (strstr(str_file, ram_file_str) != NULL) {
8226 temp->raw_sstr = NULL;
8227 } else if (strstr(str_file, rom_file_str) != NULL) {
8228 temp->rom_raw_sstr = NULL;
8229 }
8230 return error;
8231 }
8232 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
8233 ramstart, rodata_start, rodata_end));
8234
8235 fs = get_fs();
8236 set_fs(KERNEL_DS);
8237
8238 filep = filp_open(str_file, O_RDONLY, 0);
8239 if (IS_ERR(filep)) {
8240 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
8241 goto fail;
8242 }
8243
8244 if (TRUE) {
8245 /* Full file size is huge. Just read required part */
8246 logstrs_size = rodata_end - rodata_start;
8247 logfilebase = rodata_start - ramstart;
8248 }
8249
8250 if (logstrs_size == 0) {
8251 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
8252 goto fail1;
8253 }
8254
8255 raw_fmts = MALLOC(osh, logstrs_size);
8256 if (raw_fmts == NULL) {
8257 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
8258 goto fail;
8259 }
8260
8261 if (TRUE) {
8262 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
8263 if (error < 0) {
8264 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
8265 goto fail;
8266 }
8267 }
8268
8269 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
8270 if (error != logstrs_size) {
8271 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
8272 goto fail;
8273 }
8274
8275 if (strstr(str_file, ram_file_str) != NULL) {
8276 temp->raw_sstr = raw_fmts;
8277 temp->raw_sstr_size = logstrs_size;
8278 temp->rodata_start = rodata_start;
8279 temp->rodata_end = rodata_end;
8280 } else if (strstr(str_file, rom_file_str) != NULL) {
8281 temp->rom_raw_sstr = raw_fmts;
8282 temp->rom_raw_sstr_size = logstrs_size;
8283 temp->rom_rodata_start = rodata_start;
8284 temp->rom_rodata_end = rodata_end;
8285 }
8286
8287 filp_close(filep, NULL);
8288 set_fs(fs);
8289
8290 return BCME_OK;
8291
8292fail:
8293 if (raw_fmts) {
8294 MFREE(osh, raw_fmts, logstrs_size);
8295 raw_fmts = NULL;
8296 }
8297
8298fail1:
8299 if (!IS_ERR(filep))
8300 filp_close(filep, NULL);
8301
8302 set_fs(fs);
8303
8304 if (strstr(str_file, ram_file_str) != NULL) {
8305 temp->raw_sstr = NULL;
8306 } else if (strstr(str_file, rom_file_str) != NULL) {
8307 temp->rom_raw_sstr = NULL;
8308 }
8309
8310 return error;
8311} /* dhd_init_static_strs_array */
8312
d2839953
RC
8313#endif /* SHOW_LOGTRACE */
8314
8315#ifdef DHD_ERPOM
8316uint enable_erpom = 0;
8317module_param(enable_erpom, int, 0);
8318
8319int
8320dhd_wlan_power_off_handler(void *handler, unsigned char reason)
8321{
8322 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
8323 bool dongle_isolation = dhdp->dongle_isolation;
8324
8325 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
8326
8327 if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
8328#if defined(DHD_FW_COREDUMP)
8329 /* save core dump to a file */
8330 if (dhdp->memdump_enabled) {
8331#ifdef DHD_SSSR_DUMP
965f77c4 8332 dhdp->collect_sssr = TRUE;
d2839953
RC
8333#endif /* DHD_SSSR_DUMP */
8334 dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
8335 dhd_bus_mem_dump(dhdp);
8336 }
8337#endif /* DHD_FW_COREDUMP */
8338 }
8339
8340 /* pause data on all the interfaces */
8341 dhd_bus_stop_queue(dhdp->bus);
8342
8343 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
8344 dhdp->dongle_isolation = TRUE;
8345 dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
8346 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
8347 return 0;
8348}
8349
8350int
8351dhd_wlan_power_on_handler(void *handler, unsigned char reason)
8352{
8353 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
8354 bool dongle_isolation = dhdp->dongle_isolation;
8355
8356 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
8357 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
8358 dhdp->dongle_isolation = TRUE;
8359 dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
8360 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
8361 /* resume data on all the interfaces */
8362 dhd_bus_start_queue(dhdp->bus);
8363 return 0;
8364
8365}
8366
8367#endif /* DHD_ERPOM */
8368
8369#ifdef BCMDBUS
8370uint
8371dhd_get_rxsz(dhd_pub_t *pub)
8372{
8373 struct net_device *net = NULL;
8374 dhd_info_t *dhd = NULL;
8375 uint rxsz;
8376
8377 /* Assign rxsz for dbus_attach */
8378 dhd = pub->info;
8379 net = dhd->iflist[0]->net;
8380 net->hard_header_len = ETH_HLEN + pub->hdrlen;
8381 rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
8382
8383 return rxsz;
8384}
8385
8386void
8387dhd_set_path(dhd_pub_t *pub)
8388{
8389 dhd_info_t *dhd = NULL;
8390
8391 dhd = pub->info;
8392
8393 /* try to download image and nvram to the dongle */
8394 if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
8395 DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
8396 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
8397 dhd_bus_update_fw_nv_path(dhd->pub.bus,
8398 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
8399 }
8400}
8401#endif
8402
8403/** Called once for each hardware (dongle) instance that this DHD manages */
8404dhd_pub_t *
8405dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
8406#ifdef BCMDBUS
8407 , void *data
8408#endif
8409)
8410{
8411 dhd_info_t *dhd = NULL;
8412 struct net_device *net = NULL;
8413 char if_name[IFNAMSIZ] = {'\0'};
8414#ifdef SHOW_LOGTRACE
8415 int ret;
8416#endif /* SHOW_LOGTRACE */
8417#ifdef DHD_ERPOM
8418 pom_func_handler_t *pom_handler;
8419#endif /* DHD_ERPOM */
8420#if defined(BCMSDIO) || defined(BCMPCIE)
8421 uint32 bus_type = -1;
8422 uint32 bus_num = -1;
8423 uint32 slot_num = -1;
8424 wifi_adapter_info_t *adapter = NULL;
8425#elif defined(BCMDBUS)
8426 wifi_adapter_info_t *adapter = data;
8427#endif
8428#ifdef GET_CUSTOM_MAC_ENABLE
8429 char hw_ether[62];
8430#endif /* GET_CUSTOM_MAC_ENABLE */
8431
8432 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
8433 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8434
8435#ifdef PCIE_FULL_DONGLE
8436 ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
8437 ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
8438#endif /* PCIE_FULL_DONGLE */
8439
8440 /* will implement get_ids for DBUS later */
8441#if defined(BCMSDIO)
8442 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
8443#endif // endif
8444#if defined(BCMSDIO) || defined(BCMPCIE)
8445 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
8446#endif
8447
8448 /* Allocate primary dhd_info */
8449 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
8450 if (dhd == NULL) {
8451 dhd = MALLOC(osh, sizeof(dhd_info_t));
8452 if (dhd == NULL) {
8453 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
8454 goto dhd_null_flag;
8455 }
8456 }
8457 memset(dhd, 0, sizeof(dhd_info_t));
8458 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
8459
8460 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
8461
8462 dhd->pub.osh = osh;
8463#ifdef DUMP_IOCTL_IOV_LIST
8464 dll_init(&(dhd->pub.dump_iovlist_head));
8465#endif /* DUMP_IOCTL_IOV_LIST */
8466 dhd->adapter = adapter;
8467 dhd->pub.adapter = (void *)adapter;
8468#ifdef BT_OVER_SDIO
8469 dhd->pub.is_bt_recovery_required = FALSE;
8470 mutex_init(&dhd->bus_user_lock);
8471#endif /* BT_OVER_SDIO */
8472
8473 g_dhd_pub = &dhd->pub;
d2839953
RC
8474
8475#ifdef DHD_DEBUG
8476 dll_init(&(dhd->pub.mw_list_head));
8477#endif /* DHD_DEBUG */
8478
8479#ifdef GET_CUSTOM_MAC_ENABLE
3910ce8e 8480 wifi_platform_get_mac_addr(dhd->adapter, hw_ether, iface_name);
d2839953
RC
8481 bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
8482#endif /* GET_CUSTOM_MAC_ENABLE */
8483#ifdef CUSTOM_FORCE_NODFS_FLAG
8484 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
8485 dhd->pub.force_country_change = TRUE;
8486#endif /* CUSTOM_FORCE_NODFS_FLAG */
8487#ifdef CUSTOM_COUNTRY_CODE
8488 get_customized_country_code(dhd->adapter,
8489 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
8490 dhd->pub.dhd_cflags);
8491#endif /* CUSTOM_COUNTRY_CODE */
8492#ifndef BCMDBUS
8493 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
8494 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
8495#ifdef DHD_WET
8496 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
8497#endif /* DHD_WET */
8498 /* Initialize thread based operation and lock */
8499 sema_init(&dhd->sdsem, 1);
8500#endif /* !BCMDBUS */
8501 dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
8502
8503 /* Link to info module */
8504 dhd->pub.info = dhd;
8505
8506 /* Link to bus module */
8507 dhd->pub.bus = bus;
8508 dhd->pub.hdrlen = bus_hdrlen;
965f77c4 8509 dhd->pub.txoff = FALSE;
d2839953
RC
8510
8511 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
8512 * because dhd_detech will check .info is NULL or not.
8513 */
8514 if (dhd_conf_attach(&dhd->pub) != 0) {
8515 DHD_ERROR(("dhd_conf_attach failed\n"));
8516 goto fail;
8517 }
8518#ifndef BCMDBUS
8519 dhd_conf_reset(&dhd->pub);
8520 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
8521 dhd_conf_preinit(&dhd->pub);
8522#endif /* !BCMDBUS */
8523
8524 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
8525 * This is indeed a hack but we have to make it work properly before we have a better
8526 * solution
8527 */
8528 dhd_update_fw_nv_path(dhd);
8529
8530 /* Set network interface name if it was provided as module parameter */
8531 if (iface_name[0]) {
8532 int len;
8533 char ch;
8534 strncpy(if_name, iface_name, IFNAMSIZ);
8535 if_name[IFNAMSIZ - 1] = 0;
8536 len = strlen(if_name);
8537 ch = if_name[len - 1];
8538 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
3910ce8e 8539 strncat(if_name, "%d", IFNAMSIZ - len - 1);
d2839953
RC
8540 }
8541
8542 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
8543 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
8544 if (net == NULL) {
8545 goto fail;
8546 }
8547 mutex_init(&dhd->pub.ndev_op_sync);
8548
8549 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
8550#ifdef DHD_L2_FILTER
8551 /* initialize the l2_filter_cnt */
8552 dhd->pub.l2_filter_cnt = 0;
8553#endif // endif
d2839953 8554 net->netdev_ops = NULL;
d2839953
RC
8555
8556 mutex_init(&dhd->dhd_iovar_mutex);
8557 sema_init(&dhd->proto_sem, 1);
8558#ifdef DHD_ULP
8559 if (!(dhd_ulp_init(osh, &dhd->pub)))
8560 goto fail;
8561#endif /* DHD_ULP */
8562
8563#ifdef PROP_TXSTATUS
8564 spin_lock_init(&dhd->wlfc_spinlock);
8565
8566 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
8567 dhd->pub.plat_init = dhd_wlfc_plat_init;
8568 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
8569
8570#ifdef DHD_WLFC_THREAD
8571 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
8572 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
8573 if (IS_ERR(dhd->pub.wlfc_thread)) {
8574 DHD_ERROR(("create wlfc thread failed\n"));
8575 goto fail;
8576 } else {
8577 wake_up_process(dhd->pub.wlfc_thread);
8578 }
8579#endif /* DHD_WLFC_THREAD */
8580#endif /* PROP_TXSTATUS */
8581
8582 /* Initialize other structure content */
8583 init_waitqueue_head(&dhd->ioctl_resp_wait);
8584 init_waitqueue_head(&dhd->d3ack_wait);
8585 init_waitqueue_head(&dhd->ctrl_wait);
8586 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
8587 init_waitqueue_head(&dhd->dmaxfer_wait);
8588 init_waitqueue_head(&dhd->pub.tx_completion_wait);
8589 dhd->pub.dhd_bus_busy_state = 0;
d2839953
RC
8590 /* Initialize the spinlocks */
8591 spin_lock_init(&dhd->sdlock);
8592 spin_lock_init(&dhd->txqlock);
8593 spin_lock_init(&dhd->dhd_lock);
8594 spin_lock_init(&dhd->rxf_lock);
8595#ifdef WLTDLS
8596 spin_lock_init(&dhd->pub.tdls_lock);
8597#endif /* WLTDLS */
d2839953
RC
8598#if defined(RXFRAME_THREAD)
8599 dhd->rxthread_enabled = TRUE;
8600#endif /* defined(RXFRAME_THREAD) */
8601
8602#ifdef DHDTCPACK_SUPPRESS
8603 spin_lock_init(&dhd->tcpack_lock);
8604#endif /* DHDTCPACK_SUPPRESS */
8605
8606 /* Initialize Wakelock stuff */
8607 spin_lock_init(&dhd->wakelock_spinlock);
8608 spin_lock_init(&dhd->wakelock_evt_spinlock);
8609 DHD_OS_WAKE_LOCK_INIT(dhd);
8610 dhd->wakelock_counter = 0;
8611 /* wakelocks prevent a system from going into a low power state */
8612#ifdef CONFIG_HAS_WAKELOCK
8613 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
8614 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
8615 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
8616#endif /* CONFIG_HAS_WAKELOCK */
8617
d2839953
RC
8618 mutex_init(&dhd->dhd_net_if_mutex);
8619 mutex_init(&dhd->dhd_suspend_mutex);
8620#if defined(PKT_FILTER_SUPPORT) && defined(APF)
8621 mutex_init(&dhd->dhd_apf_mutex);
8622#endif /* PKT_FILTER_SUPPORT && APF */
d2839953
RC
8623 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
8624
8625 /* Attach and link in the protocol */
8626 if (dhd_prot_attach(&dhd->pub) != 0) {
8627 DHD_ERROR(("dhd_prot_attach failed\n"));
8628 goto fail;
8629 }
8630 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
8631
8632#ifdef WL_CFG80211
8633 spin_lock_init(&dhd->pub.up_lock);
8634 /* Attach and link in the cfg80211 */
8635 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
8636 DHD_ERROR(("wl_cfg80211_attach failed\n"));
8637 goto fail;
8638 }
8639
8640 dhd_monitor_init(&dhd->pub);
8641 dhd_state |= DHD_ATTACH_STATE_CFG80211;
8642#endif // endif
8643
965f77c4
RC
8644#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
8645 if (wl_ext_event_attach(net, &dhd->pub) != 0) {
8646 DHD_ERROR(("wl_ext_event_attach failed\n"));
8647 goto fail;
8648 }
d2839953 8649#ifdef WL_ESCAN
965f77c4 8650 /* Attach and link in the escan */
d2839953
RC
8651 if (wl_escan_attach(net, &dhd->pub) != 0) {
8652 DHD_ERROR(("wl_escan_attach failed\n"));
8653 goto fail;
8654 }
965f77c4
RC
8655#endif /* WL_ESCAN */
8656#ifdef WL_EXT_IAPSTA
8657 if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
8658 DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
8659 goto fail;
b7805517 8660 }
965f77c4
RC
8661#endif /* WL_EXT_IAPSTA */
8662#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
8663#if defined(WL_WIRELESS_EXT)
d2839953
RC
8664 /* Attach and link in the iw */
8665 if (wl_iw_attach(net, &dhd->pub) != 0) {
8666 DHD_ERROR(("wl_iw_attach failed\n"));
8667 goto fail;
8668 }
8669 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
d2839953 8670#endif /* defined(WL_WIRELESS_EXT) */
d2839953
RC
8671
8672#ifdef SHOW_LOGTRACE
8673 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
8674 if (ret == BCME_OK) {
8675 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
8676 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
8677 rom_map_file_path);
8678 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
8679 }
8680#endif /* SHOW_LOGTRACE */
8681
d2839953
RC
8682 /* attach debug if support */
8683 if (dhd_os_dbg_attach(&dhd->pub)) {
8684 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
8685 goto fail;
8686 }
965f77c4 8687#ifdef DEBUGABILITY
d2839953
RC
8688#if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
8689 /* enable verbose ring to support dump_trace_buf */
8690 dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
8691#endif /* SHOW_LOGTRACE */
8692
8693#ifdef DBG_PKT_MON
8694 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
8695#ifdef DBG_PKT_MON_INIT_DEFAULT
8696 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
8697#endif /* DBG_PKT_MON_INIT_DEFAULT */
8698#endif /* DBG_PKT_MON */
8699#endif /* DEBUGABILITY */
8700
965f77c4
RC
8701#ifdef DHD_STATUS_LOGGING
8702 dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
8703 MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
8704 if (dhd->pub.statlog == NULL) {
8705 DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
8706 }
8707#endif /* DHD_STATUS_LOGGING */
8708
d2839953
RC
8709#ifdef DHD_LOG_DUMP
8710 dhd_log_dump_init(&dhd->pub);
8711#endif /* DHD_LOG_DUMP */
965f77c4
RC
8712#ifdef DHD_PKTDUMP_ROAM
8713 dhd_dump_pkt_init(&dhd->pub);
8714#endif /* DHD_PKTDUMP_ROAM */
d2839953
RC
8715
8716 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
8717 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
8718 goto fail;
8719 }
8720
8721#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8722 dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
8723 if (!dhd->tx_wq) {
8724 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
8725 goto fail;
8726 }
8727 dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
8728 if (!dhd->rx_wq) {
8729 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
8730 destroy_workqueue(dhd->tx_wq);
8731 dhd->tx_wq = NULL;
8732 goto fail;
8733 }
8734#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8735
8736#ifndef BCMDBUS
8737 /* Set up the watchdog timer */
965f77c4 8738 init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
d2839953
RC
8739 dhd->default_wd_interval = dhd_watchdog_ms;
8740
8741 if (dhd_watchdog_prio >= 0) {
8742 /* Initialize watchdog thread */
8743 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
8744 if (dhd->thr_wdt_ctl.thr_pid < 0) {
8745 goto fail;
8746 }
8747
8748 } else {
8749 dhd->thr_wdt_ctl.thr_pid = -1;
8750 }
8751
8752#ifdef SHOW_LOGTRACE
8753 skb_queue_head_init(&dhd->evt_trace_queue);
965f77c4
RC
8754
8755 /* Create ring proc entries */
8756 dhd_dbg_ring_proc_create(&dhd->pub);
d2839953
RC
8757#endif /* SHOW_LOGTRACE */
8758
8759 /* Set up the bottom half handler */
8760 if (dhd_dpc_prio >= 0) {
8761 /* Initialize DPC thread */
8762 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
8763 if (dhd->thr_dpc_ctl.thr_pid < 0) {
8764 goto fail;
8765 }
8766 } else {
8767 /* use tasklet for dpc */
8768 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
8769 dhd->thr_dpc_ctl.thr_pid = -1;
8770 }
8771
8772 if (dhd->rxthread_enabled) {
8773 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
8774 /* Initialize RXF thread */
8775 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
8776 if (dhd->thr_rxf_ctl.thr_pid < 0) {
8777 goto fail;
8778 }
8779 }
8780#endif /* !BCMDBUS */
8781
8782 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
8783
8784#if defined(CONFIG_PM_SLEEP)
8785 if (!dhd_pm_notifier_registered) {
8786 dhd_pm_notifier_registered = TRUE;
8787 dhd->pm_notifier.notifier_call = dhd_pm_callback;
8788 dhd->pm_notifier.priority = 10;
8789 register_pm_notifier(&dhd->pm_notifier);
8790 }
8791
8792#endif /* CONFIG_PM_SLEEP */
8793
8794#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8795 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
8796 dhd->early_suspend.suspend = dhd_early_suspend;
8797 dhd->early_suspend.resume = dhd_late_resume;
8798 register_early_suspend(&dhd->early_suspend);
8799 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
8800#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
8801
8802#ifdef ARP_OFFLOAD_SUPPORT
8803 dhd->pend_ipaddr = 0;
8804 if (!dhd_inetaddr_notifier_registered) {
8805 dhd_inetaddr_notifier_registered = TRUE;
8806 register_inetaddr_notifier(&dhd_inetaddr_notifier);
8807 }
8808#endif /* ARP_OFFLOAD_SUPPORT */
8809
8810#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8811 if (!dhd_inet6addr_notifier_registered) {
8812 dhd_inet6addr_notifier_registered = TRUE;
8813 register_inet6addr_notifier(&dhd_inet6addr_notifier);
8814 }
8815#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8816 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
965f77c4 8817 INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
d2839953
RC
8818#ifdef DEBUG_CPU_FREQ
8819 dhd->new_freq = alloc_percpu(int);
8820 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
8821 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
8822#endif // endif
8823#ifdef DHDTCPACK_SUPPRESS
8824#ifdef BCMSDIO
8825 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
8826#elif defined(BCMPCIE)
8827 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
8828#else
8829 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8830#endif /* BCMSDIO */
8831#endif /* DHDTCPACK_SUPPRESS */
8832
8833#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
8834#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
8835
8836#ifdef DHD_DEBUG_PAGEALLOC
8837 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
8838#endif /* DHD_DEBUG_PAGEALLOC */
8839
8840#if defined(DHD_LB)
8841
8842 dhd_lb_set_default_cpus(dhd);
8843 DHD_LB_STATS_INIT(&dhd->pub);
8844
8845 /* Initialize the CPU Masks */
8846 if (dhd_cpumasks_init(dhd) == 0) {
8847 /* Now we have the current CPU maps, run through candidacy */
8848 dhd_select_cpu_candidacy(dhd);
8849
8850 /* Register the call backs to CPU Hotplug sub-system */
8851 dhd_register_cpuhp_callback(dhd);
8852
8853 } else {
8854 /*
8855 * We are unable to initialize CPU masks, so candidacy algorithm
8856 * won't run, but still Load Balancing will be honoured based
8857 * on the CPUs allocated for a given job statically during init
8858 */
8859 dhd->cpu_notifier.notifier_call = NULL;
8860 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
8861 __FUNCTION__));
8862 }
8863
8864#ifdef DHD_LB_TXP
8865#ifdef DHD_LB_TXP_DEFAULT_ENAB
8866 /* Trun ON the feature by default */
8867 atomic_set(&dhd->lb_txp_active, 1);
8868#else
8869 /* Trun OFF the feature by default */
8870 atomic_set(&dhd->lb_txp_active, 0);
8871#endif /* DHD_LB_TXP_DEFAULT_ENAB */
8872#endif /* DHD_LB_TXP */
8873
965f77c4
RC
8874#ifdef DHD_LB_RXP
8875 /* Trun ON the feature by default */
8876 atomic_set(&dhd->lb_rxp_active, 1);
8877#endif /* DHD_LB_RXP */
8878
d2839953
RC
8879 /* Initialize the Load Balancing Tasklets and Napi object */
8880#if defined(DHD_LB_TXC)
8881 tasklet_init(&dhd->tx_compl_tasklet,
8882 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
8883 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
8884 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
8885#endif /* DHD_LB_TXC */
d2839953
RC
8886#if defined(DHD_LB_RXC)
8887 tasklet_init(&dhd->rx_compl_tasklet,
8888 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
8889 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
8890 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
8891#endif /* DHD_LB_RXC */
8892
8893#if defined(DHD_LB_RXP)
8894 __skb_queue_head_init(&dhd->rx_pend_queue);
8895 skb_queue_head_init(&dhd->rx_napi_queue);
8896 /* Initialize the work that dispatches NAPI job to a given core */
8897 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
8898 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
8899#endif /* DHD_LB_RXP */
8900
8901#if defined(DHD_LB_TXP)
8902 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
8903 skb_queue_head_init(&dhd->tx_pend_queue);
8904 /* Initialize the work that dispatches TX job to a given core */
8905 tasklet_init(&dhd->tx_tasklet,
8906 dhd_lb_tx_handler, (ulong)(dhd));
8907 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
8908#endif /* DHD_LB_TXP */
8909
8910 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
8911#endif /* DHD_LB */
8912
965f77c4
RC
8913#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
8914 INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
8915#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
8916
d2839953
RC
8917#if defined(BCMPCIE)
8918 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
8919 if (dhd->pub.extended_trap_data == NULL) {
8920 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
8921 }
965f77c4
RC
8922#ifdef DNGL_AXI_ERROR_LOGGING
8923 dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
8924 if (dhd->pub.axi_err_dump == NULL) {
8925 DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
8926 }
8927#endif /* DNGL_AXI_ERROR_LOGGING */
d2839953
RC
8928#endif /* BCMPCIE && ETD */
8929
8930#ifdef SHOW_LOGTRACE
8931 if (dhd_init_logtrace_process(dhd) != BCME_OK) {
8932 goto fail;
8933 }
8934#endif /* SHOW_LOGTRACE */
8935
8936 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
8937
8938#ifdef EWP_EDL
8939 if (host_edl_support) {
8940 if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
8941 host_edl_support = FALSE;
8942 }
8943 }
8944#endif /* EWP_EDL */
8945
8946 (void)dhd_sysfs_init(dhd);
8947
8948#ifdef WL_NATOE
8949 /* Open Netlink socket for NF_CONNTRACK notifications */
8950 dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
8951 CT_ALL);
8952#endif /* WL_NATOE */
8953
8954 dhd_state |= DHD_ATTACH_STATE_DONE;
8955 dhd->dhd_state = dhd_state;
8956
8957 dhd_found++;
3910ce8e
LJ
8958
8959#ifdef CSI_SUPPORT
8960 dhd_csi_init(&dhd->pub);
8961#endif /* CSI_SUPPORT */
d2839953
RC
8962
8963#ifdef DHD_DUMP_MNGR
8964 dhd->pub.dump_file_manage =
8965 (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
8966 if (unlikely(!dhd->pub.dump_file_manage)) {
8967 DHD_ERROR(("%s(): could not allocate memory for - "
8968 "dhd_dump_file_manage_t\n", __FUNCTION__));
8969 }
8970#endif /* DHD_DUMP_MNGR */
8971#ifdef DHD_FW_COREDUMP
965f77c4
RC
8972 /* Set memdump default values */
8973 dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
d2839953
RC
8974 /* Check the memdump capability */
8975 dhd_get_memdump_info(&dhd->pub);
8976#endif /* DHD_FW_COREDUMP */
8977
8978#ifdef DHD_ERPOM
8979 if (enable_erpom) {
8980 pom_handler = &dhd->pub.pom_wlan_handler;
8981 pom_handler->func_id = WLAN_FUNC_ID;
8982 pom_handler->handler = (void *)g_dhd_pub;
8983 pom_handler->power_off = dhd_wlan_power_off_handler;
8984 pom_handler->power_on = dhd_wlan_power_on_handler;
8985
8986 dhd->pub.pom_func_register = NULL;
8987 dhd->pub.pom_func_deregister = NULL;
8988 dhd->pub.pom_toggle_reg_on = NULL;
8989
8990 dhd->pub.pom_func_register = symbol_get(pom_func_register);
8991 dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
8992 dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
8993
8994 symbol_put(pom_func_register);
8995 symbol_put(pom_func_deregister);
8996 symbol_put(pom_toggle_reg_on);
8997
8998 if (!dhd->pub.pom_func_register ||
8999 !dhd->pub.pom_func_deregister ||
9000 !dhd->pub.pom_toggle_reg_on) {
9001 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
9002 "POM is not loaded\n", __FUNCTION__));
9003 ASSERT(0);
9004 goto fail;
9005 }
9006 dhd->pub.pom_func_register(pom_handler);
9007 dhd->pub.enable_erpom = TRUE;
9008
9009 }
9010#endif /* DHD_ERPOM */
9011 return &dhd->pub;
9012
9013fail:
9014 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
9015 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9016 __FUNCTION__, dhd_state, &dhd->pub));
9017 dhd->dhd_state = dhd_state;
9018 dhd_detach(&dhd->pub);
9019 dhd_free(&dhd->pub);
9020 }
9021
9022dhd_null_flag:
9023 return NULL;
9024}
9025
9026int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9027{
9028 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9029 return DHD_FLAG_HOSTAP_MODE;
9030 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9031 return DHD_FLAG_P2P_MODE;
9032 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9033 return DHD_FLAG_IBSS_MODE;
9034 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9035 return DHD_FLAG_MFG_MODE;
9036
9037 return DHD_FLAG_STA_MODE;
9038}
9039
9040int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9041{
9042 return dhd_get_fw_mode(dhdp->info);
9043}
9044
9045extern char * nvram_get(const char *name);
9046bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9047{
9048 int fw_len;
9049 int nv_len;
9050 int clm_len;
9051 int conf_len;
9052 const char *fw = NULL;
9053 const char *nv = NULL;
9054 const char *clm = NULL;
9055 const char *conf = NULL;
9056#ifdef DHD_UCODE_DOWNLOAD
9057 int uc_len;
9058 const char *uc = NULL;
9059#endif /* DHD_UCODE_DOWNLOAD */
9060 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9061 int fw_path_len = sizeof(dhdinfo->fw_path);
9062 int nv_path_len = sizeof(dhdinfo->nv_path);
9063
9064 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9065 * The path from adapter info is used for initialization only (as it won't change).
9066 *
9067 * The firmware_path/nvram_path module parameter may be changed by the system at run
9068 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9069 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9070 * module parameter after it is copied. We won't update the path until the module parameter
9071 * is changed again (first character is not '\0')
9072 */
9073
9074 /* set default firmware and nvram path for built-in type driver */
9075// if (!dhd_download_fw_on_driverload) {
9076#ifdef CONFIG_BCMDHD_FW_PATH
9077 fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
9078#endif /* CONFIG_BCMDHD_FW_PATH */
9079#ifdef CONFIG_BCMDHD_NVRAM_PATH
9080 nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
9081#endif /* CONFIG_BCMDHD_NVRAM_PATH */
9082// }
9083
9084 /* check if we need to initialize the path */
9085 if (dhdinfo->fw_path[0] == '\0') {
9086 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9087 fw = adapter->fw_path;
9088 }
9089 if (dhdinfo->nv_path[0] == '\0') {
9090 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9091 nv = adapter->nv_path;
9092 }
9093 if (dhdinfo->clm_path[0] == '\0') {
9094 if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
9095 clm = adapter->clm_path;
9096 }
9097 if (dhdinfo->conf_path[0] == '\0') {
9098 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
9099 conf = adapter->conf_path;
9100 }
9101
9102 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9103 *
9104 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9105 */
9106 if (firmware_path[0] != '\0')
9107 fw = firmware_path;
9108
9109 if (nvram_path[0] != '\0')
9110 nv = nvram_path;
9111 if (clm_path[0] != '\0')
9112 clm = clm_path;
9113 if (config_path[0] != '\0')
9114 conf = config_path;
9115
9116#ifdef DHD_UCODE_DOWNLOAD
9117 if (ucode_path[0] != '\0')
9118 uc = ucode_path;
9119#endif /* DHD_UCODE_DOWNLOAD */
9120
9121 if (fw && fw[0] != '\0') {
9122 fw_len = strlen(fw);
9123 if (fw_len >= fw_path_len) {
9124 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
9125 return FALSE;
9126 }
9127 strncpy(dhdinfo->fw_path, fw, fw_path_len);
9128 if (dhdinfo->fw_path[fw_len-1] == '\n')
9129 dhdinfo->fw_path[fw_len-1] = '\0';
9130 }
9131 if (nv && nv[0] != '\0') {
9132 nv_len = strlen(nv);
9133 if (nv_len >= nv_path_len) {
9134 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
9135 return FALSE;
9136 }
9137 memset(dhdinfo->nv_path, 0, nv_path_len);
9138 strncpy(dhdinfo->nv_path, nv, nv_path_len);
9139 dhdinfo->nv_path[nv_len] = '\0';
9140#ifdef DHD_USE_SINGLE_NVRAM_FILE
9141 /* Remove "_net" or "_mfg" tag from current nvram path */
9142 {
9143 char *nvram_tag = "nvram_";
9144 char *ext_tag = ".txt";
9145 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
9146 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
9147 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
9148 if (valid_buf) {
9149 char *sp = sp_nvram + strlen(nvram_tag) - 1;
9150 uint32 padding_size = (uint32)(dhdinfo->nv_path +
9151 nv_path_len - sp);
9152 memset(sp, 0, padding_size);
9153 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
9154 nv_len = strlen(dhdinfo->nv_path);
9155 DHD_INFO(("%s: new nvram path = %s\n",
9156 __FUNCTION__, dhdinfo->nv_path));
9157 } else if (sp_nvram) {
9158 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
9159 __FUNCTION__));
9160 return FALSE;
9161 } else {
9162 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
9163 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
9164 }
9165 }
9166#endif /* DHD_USE_SINGLE_NVRAM_FILE */
9167 if (dhdinfo->nv_path[nv_len-1] == '\n')
9168 dhdinfo->nv_path[nv_len-1] = '\0';
9169 }
9170 if (clm && clm[0] != '\0') {
9171 clm_len = strlen(clm);
9172 if (clm_len >= sizeof(dhdinfo->clm_path)) {
9173 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
9174 return FALSE;
9175 }
9176 strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
9177 if (dhdinfo->clm_path[clm_len-1] == '\n')
9178 dhdinfo->clm_path[clm_len-1] = '\0';
9179 }
9180 if (conf && conf[0] != '\0') {
9181 conf_len = strlen(conf);
9182 if (conf_len >= sizeof(dhdinfo->conf_path)) {
9183 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
9184 return FALSE;
9185 }
9186 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
9187 if (dhdinfo->conf_path[conf_len-1] == '\n')
9188 dhdinfo->conf_path[conf_len-1] = '\0';
9189 }
9190#ifdef DHD_UCODE_DOWNLOAD
9191 if (uc && uc[0] != '\0') {
9192 uc_len = strlen(uc);
9193 if (uc_len >= sizeof(dhdinfo->uc_path)) {
9194 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
9195 return FALSE;
9196 }
9197 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
9198 if (dhdinfo->uc_path[uc_len-1] == '\n')
9199 dhdinfo->uc_path[uc_len-1] = '\0';
9200 }
9201#endif /* DHD_UCODE_DOWNLOAD */
9202
9203#if 0
9204 /* clear the path in module parameter */
9205 if (dhd_download_fw_on_driverload) {
9206 firmware_path[0] = '\0';
9207 nvram_path[0] = '\0';
9208 clm_path[0] = '\0';
9209 config_path[0] = '\0';
9210 }
9211#endif
9212#ifdef DHD_UCODE_DOWNLOAD
9213 ucode_path[0] = '\0';
9214 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
9215#endif /* DHD_UCODE_DOWNLOAD */
9216
9217 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
9218 if (dhdinfo->fw_path[0] == '\0') {
9219 DHD_ERROR(("firmware path not found\n"));
9220 return FALSE;
9221 }
9222 if (dhdinfo->nv_path[0] == '\0') {
9223 DHD_ERROR(("nvram path not found\n"));
9224 return FALSE;
9225 }
9226
9227 return TRUE;
9228}
9229
9230#if defined(BT_OVER_SDIO)
9231extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
9232{
9233 int fw_len;
9234 const char *fw = NULL;
9235 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9236
9237 /* Update bt firmware path. The path may be from adapter info or module parameter
9238 * The path from adapter info is used for initialization only (as it won't change).
9239 *
9240 * The btfw_path module parameter may be changed by the system at run
9241 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
9242 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
9243 * module parameter after it is copied. We won't update the path until the module parameter
9244 * is changed again (first character is not '\0')
9245 */
9246
9247 /* set default firmware and nvram path for built-in type driver */
9248 if (!dhd_download_fw_on_driverload) {
9249#ifdef CONFIG_BCMDHD_BTFW_PATH
9250 fw = CONFIG_BCMDHD_BTFW_PATH;
9251#endif /* CONFIG_BCMDHD_FW_PATH */
9252 }
9253
9254 /* check if we need to initialize the path */
9255 if (dhdinfo->btfw_path[0] == '\0') {
9256 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
9257 fw = adapter->btfw_path;
9258 }
9259
9260 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9261 */
9262 if (btfw_path[0] != '\0')
9263 fw = btfw_path;
9264
9265 if (fw && fw[0] != '\0') {
9266 fw_len = strlen(fw);
9267 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
9268 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
9269 return FALSE;
9270 }
9271 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
9272 if (dhdinfo->btfw_path[fw_len-1] == '\n')
9273 dhdinfo->btfw_path[fw_len-1] = '\0';
9274 }
9275
9276 /* clear the path in module parameter */
9277 btfw_path[0] = '\0';
9278
9279 if (dhdinfo->btfw_path[0] == '\0') {
9280 DHD_ERROR(("bt firmware path not found\n"));
9281 return FALSE;
9282 }
9283
9284 return TRUE;
9285}
9286#endif /* defined (BT_OVER_SDIO) */
9287
9288#if defined(BT_OVER_SDIO)
9289wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
9290{
9291 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
9292 /* assuming that dhd_pub_t type pointer is available from a global variable */
9293 return (wlan_bt_handle_t) g_dhd_pub;
9294} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
9295
9296int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
9297{
9298 int ret = -1;
9299 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
9300 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
9301
9302 /* Download BT firmware image to the dongle */
9303 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
9304 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
9305 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
9306 if (ret < 0) {
9307 DHD_ERROR(("%s: failed to download btfw from: %s\n",
9308 __FUNCTION__, dhd->btfw_path));
9309 return ret;
9310 }
9311 }
9312 return ret;
9313} EXPORT_SYMBOL(dhd_download_btfw);
9314#endif /* defined (BT_OVER_SDIO) */
9315
9316#ifndef BCMDBUS
9317int
9318dhd_bus_start(dhd_pub_t *dhdp)
9319{
9320 int ret = -1;
9321 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
9322 unsigned long flags;
9323
9324#if defined(DHD_DEBUG) && defined(BCMSDIO)
9325 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
9326#endif /* DHD_DEBUG && BCMSDIO */
9327 ASSERT(dhd);
9328
9329 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
9330 dhdp->dongle_trap_occured = 0;
965f77c4
RC
9331#ifdef DHD_SSSR_DUMP
9332 /* Flag to indicate sssr dump is collected */
9333 dhdp->sssr_dump_collected = 0;
9334#endif /* DHD_SSSR_DUMP */
d2839953
RC
9335 dhdp->iovar_timeout_occured = 0;
9336#ifdef PCIE_FULL_DONGLE
9337 dhdp->d3ack_timeout_occured = 0;
9338 dhdp->livelock_occured = 0;
965f77c4 9339 dhdp->pktid_audit_failed = 0;
d2839953 9340#endif /* PCIE_FULL_DONGLE */
965f77c4
RC
9341 dhd->pub.iface_op_failed = 0;
9342 dhd->pub.scan_timeout_occurred = 0;
9343 dhd->pub.scan_busy_occurred = 0;
9344 /* Clear induced error during initialize */
9345 dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
9346
9347 /* set default value for now. Will be updated again in dhd_preinit_ioctls()
9348 * after querying FW
9349 */
9350 dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
9351 dhdp->event_log_max_sets_queried = FALSE;
d2839953 9352 dhdp->smmu_fault_occurred = 0;
965f77c4
RC
9353#ifdef DNGL_AXI_ERROR_LOGGING
9354 dhdp->axi_error = FALSE;
9355#endif /* DNGL_AXI_ERROR_LOGGING */
d2839953
RC
9356
9357 DHD_PERIM_LOCK(dhdp);
9358 /* try to download image and nvram to the dongle */
9359 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
9360 /* Indicate FW Download has not yet done */
965f77c4 9361 dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
d2839953
RC
9362 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
9363 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
9364#if defined(DHD_DEBUG) && defined(BCMSDIO)
9365 fw_download_start = OSL_SYSUPTIME();
9366#endif /* DHD_DEBUG && BCMSDIO */
9367 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
9368 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
9369#if defined(DHD_DEBUG) && defined(BCMSDIO)
9370 fw_download_end = OSL_SYSUPTIME();
9371#endif /* DHD_DEBUG && BCMSDIO */
9372 if (ret < 0) {
9373 DHD_ERROR(("%s: failed to download firmware %s\n",
9374 __FUNCTION__, dhd->fw_path));
9375 DHD_PERIM_UNLOCK(dhdp);
9376 return ret;
9377 }
9378 /* Indicate FW Download has succeeded */
965f77c4 9379 dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
d2839953
RC
9380 }
9381 if (dhd->pub.busstate != DHD_BUS_LOAD) {
9382 DHD_PERIM_UNLOCK(dhdp);
9383 return -ENETDOWN;
9384 }
9385
9386#ifdef BCMSDIO
9387 dhd_os_sdlock(dhdp);
9388#endif /* BCMSDIO */
9389
9390 /* Start the watchdog timer */
9391 dhd->pub.tickcnt = 0;
9392 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
9393
9394 /* Bring up the bus */
9395 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
9396
9397 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
9398#ifdef BCMSDIO
9399 dhd_os_sdunlock(dhdp);
9400#endif /* BCMSDIO */
9401 DHD_PERIM_UNLOCK(dhdp);
9402 return ret;
9403 }
9404
9405 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
9406
9407#ifdef DHD_ULP
9408 dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
9409#endif /* DHD_ULP */
9410#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
9411 /* Host registration for OOB interrupt */
9412 if (dhd_bus_oob_intr_register(dhdp)) {
9413 /* deactivate timer and wait for the handler to finish */
9414#if !defined(BCMPCIE_OOB_HOST_WAKE)
9415 DHD_GENERAL_LOCK(&dhd->pub, flags);
9416 dhd->wd_timer_valid = FALSE;
9417 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9418 del_timer_sync(&dhd->timer);
9419
9420#endif /* !BCMPCIE_OOB_HOST_WAKE */
9421 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9422 DHD_PERIM_UNLOCK(dhdp);
9423 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
9424 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9425 return -ENODEV;
9426 }
9427
9428#if defined(BCMPCIE_OOB_HOST_WAKE)
9429 dhd_bus_oob_intr_set(dhdp, TRUE);
9430#else
9431 /* Enable oob at firmware */
9432 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
9433#endif /* BCMPCIE_OOB_HOST_WAKE */
9434#elif defined(FORCE_WOWLAN)
9435 /* Enable oob at firmware */
9436 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
9437#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
9438#ifdef PCIE_FULL_DONGLE
9439 {
9440 /* max_h2d_rings includes H2D common rings */
9441 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
9442
9443 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
9444 max_h2d_rings));
9445 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
9446#ifdef BCMSDIO
9447 dhd_os_sdunlock(dhdp);
9448#endif /* BCMSDIO */
9449 DHD_PERIM_UNLOCK(dhdp);
9450 return ret;
9451 }
9452 }
9453#endif /* PCIE_FULL_DONGLE */
9454
9455 /* Do protocol initialization necessary for IOCTL/IOVAR */
9456 ret = dhd_prot_init(&dhd->pub);
9457 if (unlikely(ret) != BCME_OK) {
9458 DHD_PERIM_UNLOCK(dhdp);
9459 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9460 return ret;
9461 }
9462
9463 /* If bus is not ready, can't come up */
9464 if (dhd->pub.busstate != DHD_BUS_DATA) {
9465 DHD_GENERAL_LOCK(&dhd->pub, flags);
9466 dhd->wd_timer_valid = FALSE;
9467 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9468 del_timer_sync(&dhd->timer);
9469 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
9470 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9471#ifdef BCMSDIO
9472 dhd_os_sdunlock(dhdp);
9473#endif /* BCMSDIO */
9474 DHD_PERIM_UNLOCK(dhdp);
9475 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9476 return -ENODEV;
9477 }
9478
9479#ifdef BCMSDIO
9480 dhd_os_sdunlock(dhdp);
9481#endif /* BCMSDIO */
9482
9483 /* Bus is ready, query any dongle information */
9484#if defined(DHD_DEBUG) && defined(BCMSDIO)
9485 f2_sync_start = OSL_SYSUPTIME();
9486#endif /* DHD_DEBUG && BCMSDIO */
9487 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
9488 DHD_GENERAL_LOCK(&dhd->pub, flags);
9489 dhd->wd_timer_valid = FALSE;
9490 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9491 del_timer_sync(&dhd->timer);
9492 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
9493 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9494 DHD_PERIM_UNLOCK(dhdp);
9495 return ret;
9496 }
9497
9498#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
9499 defined(CONFIG_SOC_EXYNOS9820)
9500 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
9501 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
9502#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
9503
9504#if defined(DHD_DEBUG) && defined(BCMSDIO)
9505 f2_sync_end = OSL_SYSUPTIME();
9506 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
9507 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
9508#endif /* DHD_DEBUG && BCMSDIO */
9509
9510#ifdef ARP_OFFLOAD_SUPPORT
9511 if (dhd->pend_ipaddr) {
9512#ifdef AOE_IP_ALIAS_SUPPORT
9513 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
9514#endif /* AOE_IP_ALIAS_SUPPORT */
9515 dhd->pend_ipaddr = 0;
9516 }
9517#endif /* ARP_OFFLOAD_SUPPORT */
9518
9519 DHD_PERIM_UNLOCK(dhdp);
9520
9521 return 0;
9522}
9523#endif /* !BCMDBUS */
9524
9525#ifdef WLTDLS
9526int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
9527{
9528 uint32 tdls = tdls_on;
9529 int ret = 0;
9530 uint32 tdls_auto_op = 0;
9531 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
9532 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
9533 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
9534 uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
9535 uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
9536
9537 BCM_REFERENCE(mac);
9538 if (!FW_SUPPORTED(dhd, tdls))
9539 return BCME_ERROR;
9540
9541 if (dhd->tdls_enable == tdls_on)
9542 goto auto_mode;
9543 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
9544 if (ret < 0) {
9545 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
9546 goto exit;
9547 }
9548 dhd->tdls_enable = tdls_on;
9549auto_mode:
9550
9551 tdls_auto_op = auto_on;
9552 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
9553 0, TRUE);
9554 if (ret < 0) {
9555 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
9556 goto exit;
9557 }
9558
9559 if (tdls_auto_op) {
9560 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
9561 sizeof(tdls_idle_time), NULL, 0, TRUE);
9562 if (ret < 0) {
9563 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
9564 goto exit;
9565 }
9566 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
9567 sizeof(tdls_rssi_high), NULL, 0, TRUE);
9568 if (ret < 0) {
9569 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
9570 goto exit;
9571 }
9572 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
9573 sizeof(tdls_rssi_low), NULL, 0, TRUE);
9574 if (ret < 0) {
9575 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
9576 goto exit;
9577 }
9578 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
9579 sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
9580 if (ret < 0) {
9581 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
9582 goto exit;
9583 }
9584 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
9585 sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
9586 if (ret < 0) {
9587 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
9588 goto exit;
9589 }
9590 }
9591
9592exit:
9593 return ret;
9594}
9595
9596int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
9597{
9598 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9599 int ret = 0;
9600 if (dhd)
9601 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
9602 else
9603 ret = BCME_ERROR;
9604 return ret;
9605}
9606
9607int
9608dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
9609{
9610 int ret = 0;
9611 bool auto_on = false;
9612 uint32 mode = wfd_mode;
9613
9614#ifdef ENABLE_TDLS_AUTO_MODE
9615 if (wfd_mode) {
9616 auto_on = false;
9617 } else {
9618 auto_on = true;
9619 }
9620#else
9621 auto_on = false;
9622#endif /* ENABLE_TDLS_AUTO_MODE */
9623 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
9624 if (ret < 0) {
9625 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
9626 return ret;
9627 }
9628
9629 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
9630 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
9631 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
9632 return ret;
9633 }
9634
9635 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
9636 if (ret < 0) {
9637 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
9638 return ret;
9639 }
9640
9641 dhd->tdls_mode = mode;
9642 return ret;
9643}
9644#ifdef PCIE_FULL_DONGLE
9645int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
9646{
9647 dhd_pub_t *dhd_pub = dhdp;
9648 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
9649 tdls_peer_node_t *new = NULL, *prev = NULL;
9650 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
9651 uint8 *da = (uint8 *)&event->addr.octet[0];
9652 bool connect = FALSE;
9653 uint32 reason = ntoh32(event->reason);
9654 unsigned long flags;
9655
965f77c4
RC
9656 /* No handling needed for peer discovered reason */
9657 if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
9658 return BCME_ERROR;
9659 }
d2839953
RC
9660 if (reason == WLC_E_TDLS_PEER_CONNECTED)
9661 connect = TRUE;
9662 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
9663 connect = FALSE;
9664 else
9665 {
9666 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
9667 return BCME_ERROR;
9668 }
9669 if (ifindex == DHD_BAD_IF)
9670 return BCME_ERROR;
9671
9672 if (connect) {
9673 while (cur != NULL) {
9674 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
9675 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
9676 __FUNCTION__, __LINE__));
9677 return BCME_ERROR;
9678 }
9679 cur = cur->next;
9680 }
9681
9682 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
9683 if (new == NULL) {
9684 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
9685 return BCME_ERROR;
9686 }
9687 memcpy(new->addr, da, ETHER_ADDR_LEN);
9688 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
9689 new->next = dhd_pub->peer_tbl.node;
9690 dhd_pub->peer_tbl.node = new;
9691 dhd_pub->peer_tbl.tdls_peer_count++;
9692 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
9693
9694 } else {
9695 while (cur != NULL) {
9696 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
9697 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
9698 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
9699 if (prev)
9700 prev->next = cur->next;
9701 else
9702 dhd_pub->peer_tbl.node = cur->next;
9703 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
9704 dhd_pub->peer_tbl.tdls_peer_count--;
9705 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
9706 return BCME_OK;
9707 }
9708 prev = cur;
9709 cur = cur->next;
9710 }
9711 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
9712 }
9713 return BCME_OK;
9714}
9715#endif /* PCIE_FULL_DONGLE */
9716#endif // endif
9717
9718bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
9719{
9720 if (!dhd)
9721 return FALSE;
9722
9723 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
9724 return TRUE;
9725 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
9726 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
9727 return TRUE;
9728 else
9729 return FALSE;
9730}
9731#if !defined(AP) && defined(WLP2P)
9732/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
9733 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
9734 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
9735 * would still be named as fw_bcmdhd_apsta.
9736 */
9737uint32
9738dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
9739{
9740 int32 ret = 0;
9741 char buf[WLC_IOCTL_SMLEN];
9742 bool mchan_supported = FALSE;
9743 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
9744 * test mode, that means we only will use the mode as it is
9745 */
9746 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
9747 return 0;
9748 if (FW_SUPPORTED(dhd, vsdb)) {
9749 mchan_supported = TRUE;
9750 }
9751 if (!FW_SUPPORTED(dhd, p2p)) {
9752 DHD_TRACE(("Chip does not support p2p\n"));
9753 return 0;
9754 } else {
9755 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
9756 memset(buf, 0, sizeof(buf));
9757 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
9758 sizeof(buf), FALSE);
9759 if (ret < 0) {
9760 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
9761 return 0;
9762 } else {
9763 if (buf[0] == 1) {
9764 /* By default, chip supports single chan concurrency,
9765 * now lets check for mchan
9766 */
9767 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
9768 if (mchan_supported)
9769 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
9770 if (FW_SUPPORTED(dhd, rsdb)) {
9771 ret |= DHD_FLAG_RSDB_MODE;
9772 }
9773#ifdef WL_SUPPORT_MULTIP2P
9774 if (FW_SUPPORTED(dhd, mp2p)) {
9775 ret |= DHD_FLAG_MP2P_MODE;
9776 }
9777#endif /* WL_SUPPORT_MULTIP2P */
9778#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
9779 return ret;
9780#else
9781 return 0;
9782#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
9783 }
9784 }
9785 }
9786 return 0;
9787}
9788#endif // endif
9789
9790#if defined(WLADPS)
9791
9792int
9793dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
9794{
9795 int i;
9796 int len;
9797 int ret = BCME_OK;
9798
9799 bcm_iov_buf_t *iov_buf = NULL;
9800 wl_adps_params_v1_t *data = NULL;
9801
9802 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
9803 iov_buf = MALLOC(dhd->osh, len);
9804 if (iov_buf == NULL) {
9805 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
9806 ret = BCME_NOMEM;
9807 goto exit;
9808 }
9809
9810 iov_buf->version = WL_ADPS_IOV_VER;
9811 iov_buf->len = sizeof(*data);
9812 iov_buf->id = WL_ADPS_IOV_MODE;
9813
9814 data = (wl_adps_params_v1_t *)iov_buf->data;
9815 data->version = ADPS_SUB_IOV_VERSION_1;
9816 data->length = sizeof(*data);
9817 data->mode = on;
9818
9819 for (i = 1; i <= MAX_BANDS; i++) {
9820 data->band = i;
9821 ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
9822 if (ret < 0) {
9823 if (ret == BCME_UNSUPPORTED) {
9824 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
9825 ret = BCME_OK;
9826 goto exit;
9827 }
9828 else {
9829 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
9830 __FUNCTION__, on ? "On" : "Off", i, ret));
9831 goto exit;
9832 }
9833 }
9834 }
9835
9836exit:
9837 if (iov_buf) {
9838 MFREE(dhd->osh, iov_buf, len);
9839 iov_buf = NULL;
9840 }
9841 return ret;
9842}
9843#endif // endif
9844
9845int
9846dhd_preinit_ioctls(dhd_pub_t *dhd)
9847{
9848 int ret = 0;
9849 char eventmask[WL_EVENTING_MASK_LEN];
9850 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
9851 uint32 buf_key_b4_m4 = 1;
9852 uint8 msglen;
9853 eventmsgs_ext_t *eventmask_msg = NULL;
965f77c4 9854 uint32 event_log_max_sets = 0;
d2839953
RC
9855 char* iov_buf = NULL;
9856 int ret2 = 0;
9857 uint32 wnm_cap = 0;
9858#if defined(BCMSUP_4WAY_HANDSHAKE)
9859 uint32 sup_wpa = 1;
9860#endif /* BCMSUP_4WAY_HANDSHAKE */
9861#if defined(CUSTOM_AMPDU_BA_WSIZE)
9862 uint32 ampdu_ba_wsize = 0;
9863#endif // endif
9864#if defined(CUSTOM_AMPDU_MPDU)
9865 int32 ampdu_mpdu = 0;
9866#endif // endif
9867#if defined(CUSTOM_AMPDU_RELEASE)
9868 int32 ampdu_release = 0;
9869#endif // endif
9870#if defined(CUSTOM_AMSDU_AGGSF)
9871 int32 amsdu_aggsf = 0;
9872#endif // endif
9873
9874#if defined(BCMSDIO) || defined(BCMDBUS)
9875#ifdef PROP_TXSTATUS
9876 int wlfc_enable = TRUE;
9877#ifndef DISABLE_11N
9878 uint32 hostreorder = 1;
9879 uint wl_down = 1;
9880#endif /* DISABLE_11N */
9881#endif /* PROP_TXSTATUS */
9882#endif /* BCMSDIO || BCMDBUS */
9883#ifndef PCIE_FULL_DONGLE
9884 uint32 wl_ap_isolate;
9885#endif /* PCIE_FULL_DONGLE */
9886 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
9887 uint wnm_bsstrans_resp = 0;
9888#ifdef SUPPORT_SET_CAC
9889 uint32 cac = 1;
9890#endif /* SUPPORT_SET_CAC */
9891
d2839953
RC
9892#ifdef DHD_ENABLE_LPC
9893 uint32 lpc = 1;
9894#endif /* DHD_ENABLE_LPC */
9895 uint power_mode = PM_FAST;
9896#if defined(BCMSDIO)
9897 uint32 dongle_align = DHD_SDALIGN;
9898 uint32 glom = CUSTOM_GLOM_SETTING;
9899#endif /* defined(BCMSDIO) */
9900#if defined(USE_WL_CREDALL)
9901 uint32 credall = 1;
9902#endif // endif
9903 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
9904 uint scancache_enab = TRUE;
9905#ifdef ENABLE_BCN_LI_BCN_WAKEUP
9906 uint32 bcn_li_bcn = 1;
9907#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
9908 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
9909#if defined(ARP_OFFLOAD_SUPPORT)
965f77c4 9910 int arpoe = 0;
d2839953
RC
9911#endif // endif
9912 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
9913 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
9914 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
9915 char buf[WLC_IOCTL_SMLEN];
9916 char *ptr;
9917 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
9918#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
9919 wl_el_tag_params_t *el_tag = NULL;
9920#endif /* DHD_8021X_DUMP */
9921#ifdef ROAM_ENABLE
9922 uint roamvar = 0;
9923 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
9924 int roam_scan_period[2] = {10, WLC_BAND_ALL};
9925 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
9926#ifdef ROAM_AP_ENV_DETECTION
9927 int roam_env_mode = AP_ENV_INDETERMINATE;
9928#endif /* ROAM_AP_ENV_DETECTION */
9929#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
9930 int roam_fullscan_period = 60;
9931#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
9932 int roam_fullscan_period = 120;
9933#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
9934#ifdef DISABLE_BCNLOSS_ROAM
9935 uint roam_bcnloss_off = 1;
9936#endif /* DISABLE_BCNLOSS_ROAM */
9937#else
9938#ifdef DISABLE_BUILTIN_ROAM
9939 uint roamvar = 1;
9940#endif /* DISABLE_BUILTIN_ROAM */
9941#endif /* ROAM_ENABLE */
9942
9943#if defined(SOFTAP)
9944 uint dtim = 1;
9945#endif // endif
9946#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
9947 struct ether_addr p2p_ea;
9948#endif // endif
9949#ifdef BCMCCX
9950 uint32 ccx = 1;
9951#endif // endif
9952#ifdef SOFTAP_UAPSD_OFF
9953 uint32 wme_apsd = 0;
9954#endif /* SOFTAP_UAPSD_OFF */
9955#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
9956 uint32 apsta = 1; /* Enable APSTA mode */
9957#elif defined(SOFTAP_AND_GC)
9958 uint32 apsta = 0;
9959 int ap_mode = 1;
9960#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
9961#ifdef GET_CUSTOM_MAC_ENABLE
9962 struct ether_addr ea_addr;
9963 char hw_ether[62];
9964#endif /* GET_CUSTOM_MAC_ENABLE */
9965#ifdef OKC_SUPPORT
9966 uint32 okc = 1;
9967#endif // endif
9968
9969#ifdef DISABLE_11N
9970 uint32 nmode = 0;
9971#endif /* DISABLE_11N */
9972
9973#ifdef USE_WL_TXBF
9974 uint32 txbf = 1;
9975#endif /* USE_WL_TXBF */
9976#ifdef DISABLE_TXBFR
9977 uint32 txbf_bfr_cap = 0;
9978#endif /* DISABLE_TXBFR */
9979#ifdef AMPDU_VO_ENABLE
9980 struct ampdu_tid_control tid;
9981#endif // endif
9982#if defined(PROP_TXSTATUS)
9983#ifdef USE_WFA_CERT_CONF
9984 uint32 proptx = 0;
9985#endif /* USE_WFA_CERT_CONF */
9986#endif /* PROP_TXSTATUS */
9987#ifdef DHD_SET_FW_HIGHSPEED
9988 uint32 ack_ratio = 250;
9989 uint32 ack_ratio_depth = 64;
9990#endif /* DHD_SET_FW_HIGHSPEED */
9991#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
9992 uint32 vht_features = 0; /* init to 0, will be set based on each support */
9993#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
9994#ifdef DISABLE_11N_PROPRIETARY_RATES
9995 uint32 ht_features = 0;
9996#endif /* DISABLE_11N_PROPRIETARY_RATES */
9997#ifdef CUSTOM_PSPRETEND_THR
9998 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
9999#endif // endif
10000#ifdef CUSTOM_EVENT_PM_WAKE
10001 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
10002#endif /* CUSTOM_EVENT_PM_WAKE */
10003#ifdef DISABLE_PRUNED_SCAN
10004 uint32 scan_features = 0;
10005#endif /* DISABLE_PRUNED_SCAN */
10006#ifdef BCMPCIE_OOB_HOST_WAKE
10007 uint32 hostwake_oob = 0;
10008#endif /* BCMPCIE_OOB_HOST_WAKE */
965f77c4
RC
10009#ifdef EVENT_LOG_RATE_HC
10010 /* threshold number of lines per second */
10011#define EVENT_LOG_RATE_HC_THRESHOLD 1000
10012 uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
10013#endif /* EVENT_LOG_RATE_HC */
10014 wl_wlc_version_t wlc_ver;
d2839953
RC
10015
10016#ifdef PKT_FILTER_SUPPORT
10017 dhd_pkt_filter_enable = TRUE;
10018#ifdef APF
10019 dhd->apf_set = FALSE;
10020#endif /* APF */
10021#endif /* PKT_FILTER_SUPPORT */
d2839953
RC
10022 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
10023#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
10024 dhd->max_dtim_enable = TRUE;
10025#else
10026 dhd->max_dtim_enable = FALSE;
10027#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
965f77c4
RC
10028 dhd->disable_dtim_in_suspend = FALSE;
10029#ifdef SUPPORT_SET_TID
10030 dhd->tid_mode = SET_TID_OFF;
10031 dhd->target_uid = 0;
10032 dhd->target_tid = 0;
10033#endif /* SUPPORT_SET_TID */
d2839953
RC
10034 DHD_TRACE(("Enter %s\n", __FUNCTION__));
10035
10036#ifdef DHDTCPACK_SUPPRESS
10037 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
10038#endif
10039 dhd->op_mode = 0;
10040
10041#if defined(CUSTOM_COUNTRY_CODE)
10042 /* clear AP flags */
10043 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
10044#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
10045
965f77c4
RC
10046 /* query for 'ver' to get version info from firmware */
10047 memset(buf, 0, sizeof(buf));
10048 ptr = buf;
10049 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
10050 if (ret < 0)
10051 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
10052 else {
10053 bcmstrtok(&ptr, "\n", 0);
10054 /* Print fw version info */
10055 strncpy(fw_version, buf, FW_VER_STR_LEN);
10056 fw_version[FW_VER_STR_LEN-1] = '\0';
10057 }
10058
10059 /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
10060 if (strstr(fw_version, "WLTEST") != NULL) {
10061 DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
10062 __FUNCTION__));
10063 op_mode = DHD_FLAG_MFG_MODE;
10064 }
10065
d2839953
RC
10066 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10067 (op_mode == DHD_FLAG_MFG_MODE)) {
10068 dhd->op_mode = DHD_FLAG_MFG_MODE;
10069#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
10070 /* disable runtimePM by default in MFG mode. */
10071 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
10072#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
10073 /* Check and adjust IOCTL response timeout for Manufactring firmware */
10074 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
10075 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
10076 __FUNCTION__));
10077 } else {
10078 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
10079 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
10080 }
10081#ifdef BCMPCIE_OOB_HOST_WAKE
10082 ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
10083 sizeof(hostwake_oob), FALSE);
10084 if (ret < 0) {
10085 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
10086 } else {
10087 if (hostwake_oob == 0) {
10088 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
10089 __FUNCTION__));
10090 ret = BCME_UNSUPPORTED;
10091 goto done;
10092 } else {
10093 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
10094 }
10095 }
10096#endif /* BCMPCIE_OOB_HOST_WAKE */
965f77c4
RC
10097
10098#ifdef DNGL_AXI_ERROR_LOGGING
10099 ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
10100 sizeof(dhd->axierror_logbuf_addr), FALSE);
10101 if (ret < 0) {
10102 DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
10103 dhd->axierror_logbuf_addr = 0;
10104 } else {
10105 DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__,
10106 dhd->axierror_logbuf_addr));
10107 }
10108#endif /* DNGL_AXI_ERROR_LOGGING */
10109
10110#ifdef EVENT_LOG_RATE_HC
10111 ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
10112 sizeof(event_log_rate_hc), NULL, 0, TRUE);
10113 if (ret < 0) {
10114 DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
10115 } else {
10116 DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
10117 event_log_rate_hc));
10118 }
10119#endif /* EVENT_LOG_RATE_HC */
10120
d2839953 10121#ifdef GET_CUSTOM_MAC_ENABLE
965f77c4 10122 memset(hw_ether, 0, sizeof(hw_ether));
3910ce8e 10123 ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, iface_name);
965f77c4
RC
10124#ifdef GET_CUSTOM_MAC_FROM_CONFIG
10125 if (!memcmp(&ether_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) {
10126 ret = 0;
10127 } else
10128#endif
d2839953
RC
10129 if (!ret) {
10130 memset(buf, 0, sizeof(buf));
965f77c4
RC
10131#ifdef GET_CUSTOM_MAC_FROM_CONFIG
10132 memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether));
10133#endif
d2839953
RC
10134 bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
10135 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
10136 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
10137 if (ret < 0) {
10138 memset(buf, 0, sizeof(buf));
10139 bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
10140 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
10141 if (ret) {
d2839953
RC
10142 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
10143 __FUNCTION__, MAC2STRDBG(hw_ether), ret));
965f77c4 10144 prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN);
d2839953
RC
10145 ret = BCME_NOTUP;
10146 goto done;
10147 }
10148 }
10149 } else {
10150 DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
10151 ret = BCME_NOTUP;
10152 goto done;
10153 }
10154#endif /* GET_CUSTOM_MAC_ENABLE */
10155 /* Get the default device MAC address directly from firmware */
10156 memset(buf, 0, sizeof(buf));
10157 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
10158 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
10159 FALSE, 0)) < 0) {
10160 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
10161 ret = BCME_NOTUP;
10162 goto done;
10163 }
10164 /* Update public MAC address after reading from Firmware */
10165 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
10166
10167 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
10168 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
10169 goto done;
10170 }
10171
10172 /* get a capabilities from firmware */
10173 {
10174 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
10175 memset(dhd->fw_capabilities, 0, cap_buf_size);
10176 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
10177 FALSE);
10178 if (ret < 0) {
10179 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
10180 __FUNCTION__, ret));
10181 return 0;
10182 }
10183
10184 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
10185 dhd->fw_capabilities[0] = ' ';
10186 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
10187 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
10188 }
10189
10190 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
10191 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
10192#ifdef SET_RANDOM_MAC_SOFTAP
10193 uint rand_mac;
10194#endif /* SET_RANDOM_MAC_SOFTAP */
10195 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
10196#if defined(ARP_OFFLOAD_SUPPORT)
10197 arpoe = 0;
10198#endif // endif
10199#ifdef PKT_FILTER_SUPPORT
965f77c4
RC
10200 if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))
10201 dhd_pkt_filter_enable = TRUE;
10202 else
d2839953
RC
10203 dhd_pkt_filter_enable = FALSE;
10204#endif // endif
10205#ifdef SET_RANDOM_MAC_SOFTAP
10206 SRANDOM32((uint)jiffies);
10207 rand_mac = RANDOM32();
10208 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
10209 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
10210 iovbuf[2] = (unsigned char)vendor_oui;
10211 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
10212 iovbuf[4] = (unsigned char)(rand_mac >> 8);
10213 iovbuf[5] = (unsigned char)(rand_mac >> 16);
10214
10215 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
10216 TRUE);
10217 if (ret < 0) {
10218 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
10219 } else
10220 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
10221#endif /* SET_RANDOM_MAC_SOFTAP */
10222#ifdef USE_DYNAMIC_F2_BLKSIZE
10223 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
10224#endif /* USE_DYNAMIC_F2_BLKSIZE */
10225#ifdef SOFTAP_UAPSD_OFF
10226 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
10227 TRUE);
10228 if (ret < 0) {
10229 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
10230 __FUNCTION__, ret));
10231 }
10232#endif /* SOFTAP_UAPSD_OFF */
10233#if defined(CUSTOM_COUNTRY_CODE)
10234 /* set AP flag for specific country code of SOFTAP */
10235 dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
10236#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
10237 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10238 (op_mode == DHD_FLAG_MFG_MODE)) {
10239#if defined(ARP_OFFLOAD_SUPPORT)
10240 arpoe = 0;
10241#endif /* ARP_OFFLOAD_SUPPORT */
10242#ifdef PKT_FILTER_SUPPORT
10243 dhd_pkt_filter_enable = FALSE;
10244#endif /* PKT_FILTER_SUPPORT */
10245 dhd->op_mode = DHD_FLAG_MFG_MODE;
10246#ifdef USE_DYNAMIC_F2_BLKSIZE
10247 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
10248#endif /* USE_DYNAMIC_F2_BLKSIZE */
10249#ifndef CUSTOM_SET_ANTNPM
10250 if (FW_SUPPORTED(dhd, rsdb)) {
10251 wl_config_t rsdb_mode;
10252 memset(&rsdb_mode, 0, sizeof(rsdb_mode));
10253 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
10254 NULL, 0, TRUE);
10255 if (ret < 0) {
10256 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
10257 __FUNCTION__, ret));
10258 }
10259 }
10260#endif /* !CUSTOM_SET_ANTNPM */
10261 } else {
10262 uint32 concurrent_mode = 0;
10263 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
10264 (op_mode == DHD_FLAG_P2P_MODE)) {
10265#if defined(ARP_OFFLOAD_SUPPORT)
10266 arpoe = 0;
10267#endif // endif
10268#ifdef PKT_FILTER_SUPPORT
10269 dhd_pkt_filter_enable = FALSE;
10270#endif // endif
10271 dhd->op_mode = DHD_FLAG_P2P_MODE;
10272 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
10273 (op_mode == DHD_FLAG_IBSS_MODE)) {
10274 dhd->op_mode = DHD_FLAG_IBSS_MODE;
10275 } else
10276 dhd->op_mode = DHD_FLAG_STA_MODE;
10277#if !defined(AP) && defined(WLP2P)
10278 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
10279 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
10280#if defined(ARP_OFFLOAD_SUPPORT)
10281 arpoe = 1;
10282#endif // endif
10283 dhd->op_mode |= concurrent_mode;
10284 }
10285
10286 /* Check if we are enabling p2p */
10287 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
10288 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
10289 TRUE);
10290 if (ret < 0)
10291 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
10292
10293#if defined(SOFTAP_AND_GC)
10294 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
10295 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
10296 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
10297 }
10298#endif // endif
10299 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
10300 ETHER_SET_LOCALADDR(&p2p_ea);
10301 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
10302 NULL, 0, TRUE);
10303 if (ret < 0)
10304 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
10305 else
10306 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
10307 }
10308#else
10309 (void)concurrent_mode;
10310#endif // endif
10311 }
d2839953
RC
10312
10313#ifdef DISABLE_PRUNED_SCAN
10314 if (FW_SUPPORTED(dhd, rsdb)) {
10315 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
10316 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
10317 if (ret < 0) {
10318 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
10319 __FUNCTION__, ret));
10320 } else {
10321 memcpy(&scan_features, iovbuf, 4);
10322 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
10323 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
10324 sizeof(scan_features), NULL, 0, TRUE);
10325 if (ret < 0) {
10326 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
10327 __FUNCTION__, ret));
10328 }
10329 }
10330 }
10331#endif /* DISABLE_PRUNED_SCAN */
10332
10333 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
10334 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
10335#if defined(DHD_BLOB_EXISTENCE_CHECK)
10336 if (!dhd->is_blob)
10337#endif /* DHD_BLOB_EXISTENCE_CHECK */
10338 {
10339 /* get a ccode and revision for the country code */
10340#if defined(CUSTOM_COUNTRY_CODE)
10341 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
10342 &dhd->dhd_cspec, dhd->dhd_cflags);
10343#else
10344 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
10345 &dhd->dhd_cspec);
10346#endif /* CUSTOM_COUNTRY_CODE */
10347 }
10348
10349#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
10350 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
10351 dhd->info->rxthread_enabled = FALSE;
10352 else
10353 dhd->info->rxthread_enabled = TRUE;
10354#endif // endif
10355 /* Set Country code */
10356 if (dhd->dhd_cspec.ccode[0] != 0) {
10357 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
10358 NULL, 0, TRUE);
10359 if (ret < 0)
10360 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
10361 }
10362
10363 /* Set Listen Interval */
10364 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
10365 NULL, 0, TRUE);
10366 if (ret < 0)
10367 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
10368
10369#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
10370#ifdef USE_WFA_CERT_CONF
10371 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
10372 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
10373 }
10374#endif /* USE_WFA_CERT_CONF */
10375 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
10376 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
10377#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
10378#if defined(ROAM_ENABLE)
10379#ifdef DISABLE_BCNLOSS_ROAM
10380 ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
10381 sizeof(roam_bcnloss_off), NULL, 0, TRUE);
10382#endif /* DISABLE_BCNLOSS_ROAM */
10383 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
10384 sizeof(roam_trigger), TRUE, 0)) < 0)
10385 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
10386 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
10387 sizeof(roam_scan_period), TRUE, 0)) < 0)
10388 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
10389 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
10390 sizeof(roam_delta), TRUE, 0)) < 0)
10391 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
10392 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
10393 sizeof(roam_fullscan_period), NULL, 0, TRUE);
10394 if (ret < 0)
10395 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
10396#ifdef ROAM_AP_ENV_DETECTION
10397 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
10398 if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
10399 sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
10400 dhd->roam_env_detection = TRUE;
10401 else
10402 dhd->roam_env_detection = FALSE;
10403 }
10404#endif /* ROAM_AP_ENV_DETECTION */
10405#endif /* ROAM_ENABLE */
10406
10407#ifdef CUSTOM_EVENT_PM_WAKE
10408 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
10409 sizeof(pm_awake_thresh), NULL, 0, TRUE);
10410 if (ret < 0) {
10411 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
10412 }
10413#endif /* CUSTOM_EVENT_PM_WAKE */
10414#ifdef OKC_SUPPORT
10415 ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
10416#endif // endif
10417#ifdef BCMCCX
10418 ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
10419#endif /* BCMCCX */
965f77c4 10420
d2839953 10421#ifdef WLTDLS
965f77c4
RC
10422 dhd->tdls_enable = FALSE;
10423 dhd_tdls_set_mode(dhd, false);
d2839953
RC
10424#endif /* WLTDLS */
10425
10426#ifdef DHD_ENABLE_LPC
10427 /* Set lpc 1 */
10428 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
10429 if (ret < 0) {
10430 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
10431
10432 if (ret == BCME_NOTDOWN) {
10433 uint wl_down = 1;
10434 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
10435 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
10436 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
10437
10438 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
10439 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
10440 }
10441 }
10442#endif /* DHD_ENABLE_LPC */
10443
10444#ifdef WLADPS
10445 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
10446 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
10447 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
10448 __FUNCTION__, ret));
10449 }
10450 }
10451#endif /* WLADPS */
10452
10453#ifdef DHD_PM_CONTROL_FROM_FILE
10454 sec_control_pm(dhd, &power_mode);
10455#else
10456 /* Set PowerSave mode */
10457 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
10458#endif /* DHD_PM_CONTROL_FROM_FILE */
10459
10460#if defined(BCMSDIO)
10461 /* Match Host and Dongle rx alignment */
10462 ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
10463 NULL, 0, TRUE);
10464
10465#if defined(USE_WL_CREDALL)
10466 /* enable credall to reduce the chance of no bus credit happened. */
10467 ret = dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
10468#endif // endif
10469
10470#ifdef USE_WFA_CERT_CONF
10471 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
10472 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
10473 }
10474#endif /* USE_WFA_CERT_CONF */
10475 if (glom != DEFAULT_GLOM_VALUE) {
10476 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
10477 ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
10478 }
10479#endif /* defined(BCMSDIO) */
10480
10481 /* Setup timeout if Beacons are lost and roam is off to report link down */
10482 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0,
10483 TRUE);
10484
10485 /* Setup assoc_retry_max count to reconnect target AP in dongle */
10486 ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0,
10487 TRUE);
10488
10489#if defined(AP) && !defined(WLP2P)
10490 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
10491
10492#endif /* defined(AP) && !defined(WLP2P) */
10493
10494#ifdef MIMO_ANT_SETTING
10495 dhd_sel_ant_from_file(dhd);
10496#endif /* MIMO_ANT_SETTING */
10497
10498#if defined(SOFTAP)
10499 if (ap_fw_loaded == TRUE) {
10500 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
10501 }
10502#endif // endif
10503
10504#if defined(KEEP_ALIVE)
10505 {
10506 /* Set Keep Alive : be sure to use FW with -keepalive */
10507 int res;
10508
10509#if defined(SOFTAP)
10510 if (ap_fw_loaded == FALSE)
10511#endif // endif
10512 if (!(dhd->op_mode &
10513 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
10514 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
10515 DHD_ERROR(("%s set keeplive failed %d\n",
10516 __FUNCTION__, res));
10517 }
10518 }
10519#endif /* defined(KEEP_ALIVE) */
10520
10521#ifdef USE_WL_TXBF
10522 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
10523 if (ret < 0)
10524 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
10525
10526#endif /* USE_WL_TXBF */
10527
10528 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
10529 0, TRUE);
10530 if (ret < 0) {
10531 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
10532 }
10533
965f77c4
RC
10534 ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
10535 sizeof(event_log_max_sets), FALSE);
10536 if (ret == BCME_OK) {
10537 dhd->event_log_max_sets = event_log_max_sets;
10538 } else {
10539 dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
10540 }
10541 /* Make sure max_sets is set first with wmb and then sets_queried,
10542 * this will be used during parsing the logsets in the reverse order.
10543 */
10544 OSL_SMP_WMB();
10545 dhd->event_log_max_sets_queried = TRUE;
10546 DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
10547 __FUNCTION__, dhd->event_log_max_sets, ret));
10548
d2839953
RC
10549#ifdef DISABLE_TXBFR
10550 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
10551 0, TRUE);
10552 if (ret < 0) {
10553 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
10554 }
10555#endif /* DISABLE_TXBFR */
10556
10557#ifdef USE_WFA_CERT_CONF
10558#ifdef USE_WL_FRAMEBURST
10559 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
10560 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
10561 }
10562#endif /* USE_WL_FRAMEBURST */
10563 g_frameburst = frameburst;
10564#endif /* USE_WFA_CERT_CONF */
10565#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
10566 /* Disable Framebursting for SofAP */
10567 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
10568 frameburst = 0;
10569 }
10570#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
10571 /* Set frameburst to value */
10572 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
10573 sizeof(frameburst), TRUE, 0)) < 0) {
10574 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
10575 }
10576#ifdef DHD_SET_FW_HIGHSPEED
10577 /* Set ack_ratio */
10578 ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
10579 if (ret < 0) {
10580 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
10581 }
10582
10583 /* Set ack_ratio_depth */
10584 ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
10585 sizeof(ack_ratio_depth), NULL, 0, TRUE);
10586 if (ret < 0) {
10587 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
10588 }
10589#endif /* DHD_SET_FW_HIGHSPEED */
10590
10591 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
10592 if (iov_buf == NULL) {
10593 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
10594 ret = BCME_NOMEM;
10595 goto done;
10596 }
10597
10598#if defined(CUSTOM_AMPDU_BA_WSIZE)
10599 /* Set ampdu ba wsize to 64 or 16 */
10600#ifdef CUSTOM_AMPDU_BA_WSIZE
10601 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
10602#endif // endif
10603 if (ampdu_ba_wsize != 0) {
10604 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
10605 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
10606 if (ret < 0) {
10607 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
10608 __FUNCTION__, ampdu_ba_wsize, ret));
10609 }
10610 }
10611#endif // endif
10612
10613#if defined(CUSTOM_AMPDU_MPDU)
10614 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
10615 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
10616 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
10617 NULL, 0, TRUE);
10618 if (ret < 0) {
10619 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
10620 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
10621 }
10622 }
10623#endif /* CUSTOM_AMPDU_MPDU */
10624
10625#if defined(CUSTOM_AMPDU_RELEASE)
10626 ampdu_release = CUSTOM_AMPDU_RELEASE;
10627 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
10628 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
10629 sizeof(ampdu_release), NULL, 0, TRUE);
10630 if (ret < 0) {
10631 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
10632 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
10633 }
10634 }
10635#endif /* CUSTOM_AMPDU_RELEASE */
10636
10637#if defined(CUSTOM_AMSDU_AGGSF)
10638 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
10639 if (amsdu_aggsf != 0) {
10640 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
10641 NULL, 0, TRUE);
10642 if (ret < 0) {
10643 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
10644 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
10645 }
10646 }
10647#endif /* CUSTOM_AMSDU_AGGSF */
10648
10649#if defined(BCMSUP_4WAY_HANDSHAKE)
10650 /* Read 4-way handshake requirements */
10651 if (dhd_use_idsup == 1) {
10652 ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
10653 (char *)&iovbuf, sizeof(iovbuf), FALSE);
10654 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
10655 * in-dongle supplicant.
10656 */
10657 if (ret >= 0 || ret == BCME_NOTREADY)
10658 dhd->fw_4way_handshake = TRUE;
10659 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
10660 }
10661#endif /* BCMSUP_4WAY_HANDSHAKE */
10662#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
10663 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
10664 NULL, 0, FALSE);
10665 if (ret < 0) {
10666 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
10667 vht_features = 0;
10668 } else {
10669#ifdef SUPPORT_2G_VHT
10670 vht_features |= 0x3; /* 2G support */
10671#endif /* SUPPORT_2G_VHT */
10672#ifdef SUPPORT_5G_1024QAM_VHT
10673 vht_features |= 0x6; /* 5G 1024 QAM support */
10674#endif /* SUPPORT_5G_1024QAM_VHT */
10675 }
10676 if (vht_features) {
10677 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
10678 NULL, 0, TRUE);
10679 if (ret < 0) {
10680 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
10681
10682 if (ret == BCME_NOTDOWN) {
10683 uint wl_down = 1;
10684 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
10685 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
10686 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
10687 " vht_features = 0x%x\n",
10688 __FUNCTION__, ret, vht_features));
10689
10690 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
10691 sizeof(vht_features), NULL, 0, TRUE);
10692
10693 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
10694 }
10695 }
10696 }
10697#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
10698#ifdef DISABLE_11N_PROPRIETARY_RATES
10699 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
10700 TRUE);
10701 if (ret < 0) {
10702 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
10703 }
10704#endif /* DISABLE_11N_PROPRIETARY_RATES */
965f77c4
RC
10705#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
10706#if defined(DISABLE_HE_ENAB)
10707 control_he_enab = 0;
10708#endif /* DISABLE_HE_ENAB */
10709 dhd_control_he_enab(dhd, control_he_enab);
10710#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
10711
d2839953
RC
10712#ifdef CUSTOM_PSPRETEND_THR
10713 /* Turn off MPC in AP mode */
10714 ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
10715 sizeof(pspretend_thr), NULL, 0, TRUE);
10716 if (ret < 0) {
10717 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
10718 __FUNCTION__, ret));
10719 }
10720#endif // endif
10721
10722 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
10723 NULL, 0, TRUE);
10724 if (ret < 0) {
10725 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
10726 }
10727#ifdef SUPPORT_SET_CAC
10728 ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
10729 if (ret < 0) {
10730 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
10731 }
10732#endif /* SUPPORT_SET_CAC */
10733#ifdef DHD_ULP
10734 /* Get the required details from dongle during preinit ioctl */
10735 dhd_ulp_preinit(dhd);
10736#endif /* DHD_ULP */
10737
10738 /* Read event_msgs mask */
10739 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
10740 sizeof(iovbuf), FALSE);
10741 if (ret < 0) {
10742 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
10743 goto done;
10744 }
10745 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
10746
10747 /* Setup event_msgs */
10748 setbit(eventmask, WLC_E_SET_SSID);
10749 setbit(eventmask, WLC_E_PRUNE);
10750 setbit(eventmask, WLC_E_AUTH);
10751 setbit(eventmask, WLC_E_AUTH_IND);
10752 setbit(eventmask, WLC_E_ASSOC);
10753 setbit(eventmask, WLC_E_REASSOC);
10754 setbit(eventmask, WLC_E_REASSOC_IND);
10755 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
10756 setbit(eventmask, WLC_E_DEAUTH);
10757 setbit(eventmask, WLC_E_DEAUTH_IND);
10758 setbit(eventmask, WLC_E_DISASSOC_IND);
10759 setbit(eventmask, WLC_E_DISASSOC);
10760 setbit(eventmask, WLC_E_JOIN);
10761 setbit(eventmask, WLC_E_START);
10762 setbit(eventmask, WLC_E_ASSOC_IND);
10763 setbit(eventmask, WLC_E_PSK_SUP);
10764 setbit(eventmask, WLC_E_LINK);
10765 setbit(eventmask, WLC_E_MIC_ERROR);
10766 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
10767 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
10768#ifdef LIMIT_BORROW
10769 setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
10770#endif // endif
10771#ifndef WL_CFG80211
10772 setbit(eventmask, WLC_E_PMKID_CACHE);
10773// setbit(eventmask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
10774#endif // endif
10775 setbit(eventmask, WLC_E_JOIN_START);
10776// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
10777#ifdef DHD_DEBUG
10778 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
10779#endif // endif
10780#ifdef PNO_SUPPORT
10781 setbit(eventmask, WLC_E_PFN_NET_FOUND);
10782 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
10783 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
10784 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
10785#endif /* PNO_SUPPORT */
10786 /* enable dongle roaming event */
10787#ifdef WL_CFG80211
965f77c4 10788#if !defined(ROAM_EVT_DISABLE)
d2839953 10789 setbit(eventmask, WLC_E_ROAM);
965f77c4 10790#endif /* !ROAM_EVT_DISABLE */
d2839953
RC
10791 setbit(eventmask, WLC_E_BSSID);
10792#endif /* WL_CFG80211 */
10793#ifdef BCMCCX
10794 setbit(eventmask, WLC_E_ADDTS_IND);
10795 setbit(eventmask, WLC_E_DELTS_IND);
10796#endif /* BCMCCX */
10797#ifdef WLTDLS
10798 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
10799#endif /* WLTDLS */
10800#ifdef WL_ESCAN
10801 setbit(eventmask, WLC_E_ESCAN_RESULT);
10802#endif /* WL_ESCAN */
3910ce8e
LJ
10803#ifdef CSI_SUPPORT
10804 setbit(eventmask, WLC_E_CSI);
10805#endif /* CSI_SUPPORT */
d2839953
RC
10806#ifdef RTT_SUPPORT
10807 setbit(eventmask, WLC_E_PROXD);
10808#endif /* RTT_SUPPORT */
10809#ifdef WL_CFG80211
10810 setbit(eventmask, WLC_E_ESCAN_RESULT);
10811 setbit(eventmask, WLC_E_AP_STARTED);
10812 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
10813 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
10814 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
10815 }
10816#endif /* WL_CFG80211 */
10817
10818#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
10819 if (dhd_logtrace_from_file(dhd)) {
10820 setbit(eventmask, WLC_E_TRACE);
10821 } else {
10822 clrbit(eventmask, WLC_E_TRACE);
10823 }
10824#elif defined(SHOW_LOGTRACE)
10825 setbit(eventmask, WLC_E_TRACE);
10826#else
10827 clrbit(eventmask, WLC_E_TRACE);
3910ce8e
LJ
10828 if (dhd->conf->chip == BCM43752_CHIP_ID)
10829 setbit(eventmask, WLC_E_TRACE);
d2839953
RC
10830#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
10831
10832 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
10833#ifdef CUSTOM_EVENT_PM_WAKE
10834 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
10835#endif /* CUSTOM_EVENT_PM_WAKE */
10836#ifdef DHD_LOSSLESS_ROAMING
10837 setbit(eventmask, WLC_E_ROAM_PREP);
10838#endif // endif
10839 /* nan events */
10840 setbit(eventmask, WLC_E_NAN);
10841#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
10842 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
10843#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
10844
10845#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
10846 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
10847#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
10848
d2839953
RC
10849 /* Write updated Event mask */
10850 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
10851 if (ret < 0) {
10852 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
10853 goto done;
10854 }
10855
10856 /* make up event mask ext message iovar for event larger than 128 */
10857 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
10858 eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
10859 if (eventmask_msg == NULL) {
10860 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
10861 ret = BCME_NOMEM;
10862 goto done;
10863 }
10864 bzero(eventmask_msg, msglen);
10865 eventmask_msg->ver = EVENTMSGS_VER;
10866 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
10867
10868 /* Read event_msgs_ext mask */
10869 ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
10870 WLC_IOCTL_SMLEN, FALSE);
10871
10872 if (ret2 == 0) { /* event_msgs_ext must be supported */
10873 bcopy(iov_buf, eventmask_msg, msglen);
10874#ifdef RSSI_MONITOR_SUPPORT
10875 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
10876#endif /* RSSI_MONITOR_SUPPORT */
10877#ifdef GSCAN_SUPPORT
10878 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
10879 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
10880 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
10881 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
10882#endif /* GSCAN_SUPPORT */
10883 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
10884#ifdef BT_WIFI_HANDOVER
10885 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
10886#endif /* BT_WIFI_HANDOVER */
10887#ifdef DBG_PKT_MON
10888 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
10889#endif /* DBG_PKT_MON */
10890#ifdef DHD_ULP
10891 setbit(eventmask_msg->mask, WLC_E_ULP);
10892#endif // endif
10893#ifdef WL_NATOE
10894 setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
10895#endif /* WL_NATOE */
10896#ifdef WL_NAN
10897 setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
10898#endif /* WL_NAN */
10899#ifdef WL_MBO
10900 setbit(eventmask_msg->mask, WLC_E_MBO);
10901#endif /* WL_MBO */
3910ce8e
LJ
10902#ifdef WL_CLIENT_SAE
10903 setbit(eventmask_msg->mask, WLC_E_JOIN_START);
10904#endif /* WL_CLIENT_SAE */
965f77c4
RC
10905#ifdef WL_BCNRECV
10906 setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
10907#endif /* WL_BCNRECV */
10908#ifdef WL_CAC_TS
10909 setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
10910 setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
10911#endif /* WL_CAC_TS */
10912#ifdef WL_CHAN_UTIL
10913 setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
10914#endif /* WL_CHAN_UTIL */
10915
d2839953
RC
10916 /* Write updated Event mask */
10917 eventmask_msg->ver = EVENTMSGS_VER;
10918 eventmask_msg->command = EVENTMSGS_SET_MASK;
10919 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
10920 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
10921 TRUE);
10922 if (ret < 0) {
10923 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
10924 goto done;
10925 }
10926 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
10927 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
10928 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
10929 __FUNCTION__, ret2));
10930 } else {
10931 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
10932 ret = ret2;
10933 goto done;
10934 }
10935
10936#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10937 /* Enabling event log trace for EAP events */
10938 el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
10939 if (el_tag == NULL) {
10940 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
10941 (int)sizeof(wl_el_tag_params_t)));
10942 ret = BCME_NOMEM;
10943 goto done;
10944 }
10945 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
10946 el_tag->set = 1;
10947 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
10948 ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0,
10949 TRUE);
10950#endif /* DHD_8021X_DUMP */
10951
10952 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
10953 sizeof(scan_assoc_time), TRUE, 0);
10954 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
10955 sizeof(scan_unassoc_time), TRUE, 0);
10956 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
10957 sizeof(scan_passive_time), TRUE, 0);
10958
10959#ifdef ARP_OFFLOAD_SUPPORT
10960 /* Set and enable ARP offload feature for STA only */
10961#if defined(SOFTAP)
10962 if (arpoe && !ap_fw_loaded)
10963#else
10964 if (arpoe)
10965#endif // endif
10966 {
10967 dhd_arp_offload_enable(dhd, TRUE);
10968 dhd_arp_offload_set(dhd, dhd_arp_mode);
10969 } else {
10970 dhd_arp_offload_enable(dhd, FALSE);
10971 dhd_arp_offload_set(dhd, 0);
10972 }
10973 dhd_arp_enable = arpoe;
10974#endif /* ARP_OFFLOAD_SUPPORT */
10975
10976#ifdef PKT_FILTER_SUPPORT
10977 /* Setup default defintions for pktfilter , enable in suspend */
10978 if (dhd_master_mode) {
10979 dhd->pktfilter_count = 6;
10980 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
10981 if (!FW_SUPPORTED(dhd, pf6)) {
10982 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
10983 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
10984 } else {
10985 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
10986 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
10987 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
10988 }
10989 /* apply APP pktfilter */
10990 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
10991
10992#ifdef BLOCK_IPV6_PACKET
10993 /* Setup filter to allow only IPv4 unicast frames */
10994 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
10995 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
10996 " "
10997 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
10998#else
10999 /* Setup filter to allow only unicast */
11000 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11001#endif /* BLOCK_IPV6_PACKET */
11002
11003#ifdef PASS_IPV4_SUSPEND
11004 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
11005#else
11006 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11007 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11008#endif /* PASS_IPV4_SUSPEND */
11009 if (FW_SUPPORTED(dhd, pf6)) {
11010 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11011 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
11012 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11013 dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
11014 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11015 dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
11016 dhd->pktfilter_count = 10;
11017 }
11018
11019#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11020 dhd->pktfilter_count = 4;
11021 /* Setup filter to block broadcast and NAT Keepalive packets */
11022 /* discard all broadcast packets */
11023 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11024 /* discard NAT Keepalive packets */
11025 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11026 /* discard NAT Keepalive packets */
11027 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11028 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11029#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11030 } else
11031 dhd_conf_discard_pkt_filter(dhd);
11032 dhd_conf_add_pkt_filter(dhd);
11033
11034#if defined(SOFTAP)
11035 if (ap_fw_loaded) {
11036 dhd_enable_packet_filter(0, dhd);
11037 }
11038#endif /* defined(SOFTAP) */
11039 dhd_set_packet_filter(dhd);
11040#endif /* PKT_FILTER_SUPPORT */
11041#ifdef DISABLE_11N
11042 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
11043 if (ret < 0)
11044 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
11045#endif /* DISABLE_11N */
11046
11047#ifdef ENABLE_BCN_LI_BCN_WAKEUP
11048 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0,
11049 TRUE);
11050#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11051#ifdef AMPDU_VO_ENABLE
11052 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
11053 tid.enable = TRUE;
11054 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11055
11056 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
11057 tid.enable = TRUE;
11058 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11059#endif // endif
11060 /* query for 'clmver' to get clm version info from firmware */
11061 memset(buf, 0, sizeof(buf));
11062 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11063 if (ret < 0)
3910ce8e 11064 DHD_ERROR(("%s clmver failed %d\n", __FUNCTION__, ret));
d2839953 11065 else {
965f77c4
RC
11066 char *ver_temp_buf = NULL, *ver_date_buf = NULL;
11067 int len;
d2839953 11068
965f77c4 11069 if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
d2839953
RC
11070 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11071 } else {
965f77c4
RC
11072 ver_date_buf = bcmstrstr(buf, "Creation:");
11073 ptr = (ver_temp_buf + strlen("Data:"));
11074 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
d2839953
RC
11075 DHD_ERROR(("Couldn't find New line character\n"));
11076 } else {
11077 memset(clm_version, 0, CLM_VER_STR_LEN);
965f77c4
RC
11078 len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf);
11079 if (ver_date_buf) {
11080 ptr = (ver_date_buf + strlen("Creation:"));
11081 ver_date_buf = bcmstrtok(&ptr, "\n", 0);
11082 if (ver_date_buf)
11083 snprintf(clm_version+len, CLM_VER_STR_LEN-1-len,
11084 " (%s)", ver_date_buf);
11085 }
11086 DHD_INFO(("CLM version = %s\n", clm_version));
d2839953
RC
11087 }
11088 }
965f77c4
RC
11089
11090 if (strlen(clm_version)) {
11091 DHD_INFO(("CLM version = %s\n", clm_version));
11092 } else {
11093 DHD_ERROR(("Couldn't find CLM version!\n"));
11094 }
d2839953 11095 }
965f77c4 11096 dhd_set_version_info(dhd, fw_version);
d2839953 11097
965f77c4
RC
11098#ifdef WRITE_WLANINFO
11099 sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
11100#endif /* WRITE_WLANINFO */
11101
11102 /* query for 'wlc_ver' to get version info from firmware */
11103 memset(&wlc_ver, 0, sizeof(wl_wlc_version_t));
3910ce8e 11104 ret2 = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
965f77c4 11105 sizeof(wl_wlc_version_t), FALSE);
3910ce8e
LJ
11106 if (ret2 < 0) {
11107 DHD_ERROR(("%s wlc_ver failed %d\n", __FUNCTION__, ret2));
11108 if (ret2 != BCME_UNSUPPORTED)
11109 ret = ret2;
11110 } else {
965f77c4
RC
11111 dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
11112 dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
d2839953
RC
11113 }
11114#ifdef GEN_SOFTAP_INFO_FILE
11115 sec_save_softap_info();
11116#endif /* GEN_SOFTAP_INFO_FILE */
11117
11118#if defined(BCMSDIO)
11119 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
11120#endif /* defined(BCMSDIO) */
11121
11122#if defined(BCMSDIO) || defined(BCMDBUS)
11123#ifdef PROP_TXSTATUS
11124 if (disable_proptx ||
11125#ifdef PROP_TXSTATUS_VSDB
11126 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
11127 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
11128 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
11129#endif /* PROP_TXSTATUS_VSDB */
11130 FALSE) {
11131 wlfc_enable = FALSE;
11132 }
11133 ret = dhd_conf_get_disable_proptx(dhd);
11134 if (ret == 0){
11135 disable_proptx = 0;
11136 wlfc_enable = TRUE;
11137 } else if (ret >= 1) {
11138 disable_proptx = 1;
11139 wlfc_enable = FALSE;
11140 /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
11141 hostreorder = 0;
11142 }
11143
11144#if defined(PROP_TXSTATUS)
11145#ifdef USE_WFA_CERT_CONF
11146 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
11147 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
11148 wlfc_enable = proptx;
11149 }
11150#endif /* USE_WFA_CERT_CONF */
11151#endif /* PROP_TXSTATUS */
11152
11153#ifndef DISABLE_11N
11154 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11155 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
11156 NULL, 0, TRUE);
11157 if (ret2 < 0) {
11158 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
11159 if (ret2 != BCME_UNSUPPORTED)
11160 ret = ret2;
11161
11162 if (ret == BCME_NOTDOWN) {
11163 uint wl_down = 1;
11164 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
11165 sizeof(wl_down), TRUE, 0);
11166 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
11167 __FUNCTION__, ret2, hostreorder));
11168
11169 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
11170 sizeof(hostreorder), NULL, 0, TRUE);
11171 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
11172 if (ret2 != BCME_UNSUPPORTED)
11173 ret = ret2;
11174 }
11175 if (ret2 != BCME_OK)
11176 hostreorder = 0;
11177 }
11178#endif /* DISABLE_11N */
11179
11180 if (wlfc_enable) {
11181 dhd_wlfc_init(dhd);
11182 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
11183 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
11184 }
11185#ifndef DISABLE_11N
11186 else if (hostreorder)
11187 dhd_wlfc_hostreorder_init(dhd);
11188#endif /* DISABLE_11N */
11189#else
11190 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
11191 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
11192 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
11193#endif /* PROP_TXSTATUS */
11194#endif /* BCMSDIO || BCMDBUS */
11195#ifndef PCIE_FULL_DONGLE
11196 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
11197 if (FW_SUPPORTED(dhd, ap)) {
11198 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
11199 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
11200 NULL, 0, TRUE);
11201 if (ret < 0)
11202 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11203 }
11204#endif /* PCIE_FULL_DONGLE */
11205#ifdef PNO_SUPPORT
11206 if (!dhd->pno_state) {
11207 dhd_pno_init(dhd);
11208 }
11209#endif // endif
11210#ifdef RTT_SUPPORT
11211 if (!dhd->rtt_state) {
11212 ret = dhd_rtt_init(dhd);
11213 if (ret < 0) {
11214 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
11215 }
11216 }
11217#endif // endif
11218#ifdef FILTER_IE
11219 /* Failure to configure filter IE is not a fatal error, ignore it. */
11220 if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
11221 dhd_read_from_file(dhd);
11222#endif /* FILTER_IE */
11223#ifdef WL11U
11224 dhd_interworking_enable(dhd);
11225#endif /* WL11U */
11226
11227#ifdef NDO_CONFIG_SUPPORT
11228 dhd->ndo_enable = FALSE;
11229 dhd->ndo_host_ip_overflow = FALSE;
11230 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
11231#endif /* NDO_CONFIG_SUPPORT */
11232
11233 /* ND offload version supported */
11234 dhd->ndo_version = dhd_ndo_get_version(dhd);
11235 if (dhd->ndo_version > 0) {
11236 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
11237
11238#ifdef NDO_CONFIG_SUPPORT
11239 /* enable Unsolicited NA filter */
11240 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
11241 if (ret < 0) {
11242 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
11243 }
11244#endif /* NDO_CONFIG_SUPPORT */
11245 }
11246
11247 /* check dongle supports wbtext (product policy) or not */
11248 dhd->wbtext_support = FALSE;
11249 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
11250 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
11251 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
11252 }
11253 dhd->wbtext_policy = wnm_bsstrans_resp;
11254 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
11255 dhd->wbtext_support = TRUE;
11256 }
11257 /* driver can turn off wbtext feature through makefile */
11258 if (dhd->wbtext_support) {
11259 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
11260 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
11261 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
11262 DHD_ERROR(("failed to disable WBTEXT\n"));
11263 }
11264 }
11265
965f77c4 11266#ifdef DHD_NON_DMA_M2M_CORRUPTION
d2839953 11267 /* check pcie non dma loopback */
965f77c4
RC
11268 if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
11269 (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
d2839953 11270 goto done;
d2839953
RC
11271 }
11272#endif /* DHD_NON_DMA_M2M_CORRUPTION */
11273
11274 /* WNM capabilities */
11275 wnm_cap = 0
11276#ifdef WL11U
11277 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
11278#endif // endif
11279 ;
965f77c4
RC
11280#if defined(WL_MBO) && defined(WL_OCE)
11281 if (FW_SUPPORTED(dhd, estm)) {
11282 wnm_cap |= WL_WNM_ESTM;
11283 }
11284#endif /* WL_MBO && WL_OCE */
d2839953
RC
11285 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
11286 DHD_ERROR(("failed to set WNM capabilities\n"));
11287 }
11288
11289 if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
965f77c4 11290 dhd_ecounter_configure(dhd, TRUE);
d2839953
RC
11291 }
11292
11293 /* store the preserve log set numbers */
11294 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
11295 != BCME_OK) {
11296 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
11297 }
11298
11299#ifdef WL_MONITOR
11300 if (FW_SUPPORTED(dhd, monitor)) {
11301 dhd->monitor_enable = TRUE;
11302 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
11303 } else {
11304 dhd->monitor_enable = FALSE;
11305 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
11306 }
11307#endif /* WL_MONITOR */
11308
965f77c4
RC
11309#ifdef CONFIG_SILENT_ROAM
11310 dhd->sroam_turn_on = TRUE;
11311 dhd->sroamed = FALSE;
11312#endif /* CONFIG_SILENT_ROAM */
11313
d2839953
RC
11314 dhd_conf_postinit_ioctls(dhd);
11315done:
11316
11317 if (eventmask_msg) {
11318 MFREE(dhd->osh, eventmask_msg, msglen);
11319 eventmask_msg = NULL;
11320 }
11321 if (iov_buf) {
11322 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
11323 iov_buf = NULL;
11324 }
11325#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11326 if (el_tag) {
11327 MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
11328 el_tag = NULL;
11329 }
11330#endif /* DHD_8021X_DUMP */
11331 return ret;
11332}
11333
11334int
11335dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
11336 uint res_len, int set)
11337{
11338 char *buf = NULL;
11339 int input_len;
11340 wl_ioctl_t ioc;
11341 int ret;
11342
11343 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
11344 return BCME_BADARG;
11345
11346 input_len = strlen(name) + 1 + param_len;
11347 if (input_len > WLC_IOCTL_MAXLEN)
11348 return BCME_BADARG;
11349
11350 buf = NULL;
11351 if (set) {
11352 if (res_buf || res_len != 0) {
11353 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
11354 ret = BCME_BADARG;
11355 goto exit;
11356 }
11357 buf = MALLOCZ(pub->osh, input_len);
11358 if (!buf) {
11359 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11360 ret = BCME_NOMEM;
11361 goto exit;
11362 }
11363 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11364 if (!ret) {
11365 ret = BCME_NOMEM;
11366 goto exit;
11367 }
11368
11369 ioc.cmd = WLC_SET_VAR;
11370 ioc.buf = buf;
11371 ioc.len = input_len;
11372 ioc.set = set;
11373
11374 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11375 } else {
11376 if (!res_buf || !res_len) {
11377 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
11378 ret = BCME_BADARG;
11379 goto exit;
11380 }
11381
11382 if (res_len < input_len) {
11383 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
11384 res_len, input_len));
11385 buf = MALLOCZ(pub->osh, input_len);
11386 if (!buf) {
11387 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11388 ret = BCME_NOMEM;
11389 goto exit;
11390 }
11391 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11392 if (!ret) {
11393 ret = BCME_NOMEM;
11394 goto exit;
11395 }
11396
11397 ioc.cmd = WLC_GET_VAR;
11398 ioc.buf = buf;
11399 ioc.len = input_len;
11400 ioc.set = set;
11401
11402 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11403
11404 if (ret == BCME_OK) {
11405 memcpy(res_buf, buf, res_len);
11406 }
11407 } else {
11408 memset(res_buf, 0, res_len);
11409 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
11410 if (!ret) {
11411 ret = BCME_NOMEM;
11412 goto exit;
11413 }
11414
11415 ioc.cmd = WLC_GET_VAR;
11416 ioc.buf = res_buf;
11417 ioc.len = res_len;
11418 ioc.set = set;
11419
11420 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11421 }
11422 }
11423exit:
11424 if (buf) {
11425 MFREE(pub->osh, buf, input_len);
11426 buf = NULL;
11427 }
11428 return ret;
11429}
11430
11431int
11432dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
11433 uint cmd_len, char **resptr, uint resp_len)
11434{
11435 int len = resp_len;
11436 int ret;
11437 char *buf = *resptr;
11438 wl_ioctl_t ioc;
11439 if (resp_len > WLC_IOCTL_MAXLEN)
11440 return BCME_BADARG;
11441
11442 memset(buf, 0, resp_len);
11443
11444 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
11445 if (ret == 0) {
11446 return BCME_BUFTOOSHORT;
11447 }
11448
11449 memset(&ioc, 0, sizeof(ioc));
11450
11451 ioc.cmd = WLC_GET_VAR;
11452 ioc.buf = buf;
11453 ioc.len = len;
11454 ioc.set = 0;
11455
11456 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11457
11458 return ret;
11459}
11460
11461int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
11462{
11463 struct dhd_info *dhd = dhdp->info;
11464 struct net_device *dev = NULL;
11465
11466 ASSERT(dhd && dhd->iflist[ifidx]);
11467 dev = dhd->iflist[ifidx]->net;
11468 ASSERT(dev);
11469
11470 if (netif_running(dev)) {
11471 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
11472 return BCME_NOTDOWN;
11473 }
11474
11475#define DHD_MIN_MTU 1500
11476#define DHD_MAX_MTU 1752
11477
11478 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
11479 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
11480 return BCME_BADARG;
11481 }
11482
11483 dev->mtu = new_mtu;
11484 return 0;
11485}
11486
11487#ifdef ARP_OFFLOAD_SUPPORT
11488/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
11489void
11490aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
11491{
11492 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
11493 int i;
11494 int ret;
11495
11496 bzero(ipv4_buf, sizeof(ipv4_buf));
11497
11498 /* display what we've got */
11499 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
11500 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
11501#ifdef AOE_DBG
11502 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
11503#endif // endif
11504 /* now we saved hoste_ip table, clr it in the dongle AOE */
11505 dhd_aoe_hostip_clr(dhd_pub, idx);
11506
11507 if (ret) {
11508 DHD_ERROR(("%s failed\n", __FUNCTION__));
11509 return;
11510 }
11511
11512 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
11513 if (add && (ipv4_buf[i] == 0)) {
11514 ipv4_buf[i] = ipa;
11515 add = FALSE; /* added ipa to local table */
11516 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
11517 __FUNCTION__, i));
11518 } else if (ipv4_buf[i] == ipa) {
11519 ipv4_buf[i] = 0;
11520 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
11521 __FUNCTION__, ipa, i));
11522 }
11523
11524 if (ipv4_buf[i] != 0) {
11525 /* add back host_ip entries from our local cache */
11526 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
11527 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
11528 __FUNCTION__, ipv4_buf[i], i));
11529 }
11530 }
11531#ifdef AOE_DBG
11532 /* see the resulting hostip table */
11533 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
11534 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
11535 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
11536#endif // endif
11537}
11538
11539/*
11540 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
11541 * whenever there is an event related to an IP address.
11542 * ptr : kernel provided pointer to IP address that has changed
11543 */
11544static int dhd_inetaddr_notifier_call(struct notifier_block *this,
11545 unsigned long event,
11546 void *ptr)
11547{
11548 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
11549
11550 dhd_info_t *dhd;
11551 dhd_pub_t *dhd_pub;
11552 int idx;
11553
11554 if (!dhd_arp_enable)
11555 return NOTIFY_DONE;
11556 if (!ifa || !(ifa->ifa_dev->dev))
11557 return NOTIFY_DONE;
11558
d2839953
RC
11559 /* Filter notifications meant for non Broadcom devices */
11560 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
11561 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
11562#if defined(WL_ENABLE_P2P_IF)
11563 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
11564#endif /* WL_ENABLE_P2P_IF */
11565 return NOTIFY_DONE;
11566 }
d2839953
RC
11567
11568 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
11569 if (!dhd)
11570 return NOTIFY_DONE;
11571
11572 dhd_pub = &dhd->pub;
11573
11574 if (dhd_pub->arp_version == 1) {
11575 idx = 0;
11576 } else {
11577 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
11578 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
11579 break;
11580 }
11581 if (idx < DHD_MAX_IFS)
11582 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
11583 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
11584 else {
11585 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
11586 idx = 0;
11587 }
11588 }
11589
11590 switch (event) {
11591 case NETDEV_UP:
11592 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
11593 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
11594
11595 /*
11596 * Skip if Bus is not in a state to transport the IOVAR
11597 * (or) the Dongle is not ready.
11598 */
11599 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
11600 dhd->pub.busstate == DHD_BUS_LOAD) {
11601 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
11602 __FUNCTION__, dhd->pub.busstate));
11603 if (dhd->pend_ipaddr) {
11604 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
11605 __FUNCTION__, dhd->pend_ipaddr));
11606 }
11607 dhd->pend_ipaddr = ifa->ifa_address;
11608 break;
11609 }
11610
11611#ifdef AOE_IP_ALIAS_SUPPORT
11612 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
11613 __FUNCTION__));
11614 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
11615#endif /* AOE_IP_ALIAS_SUPPORT */
965f77c4 11616 dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE);
d2839953
RC
11617 break;
11618
11619 case NETDEV_DOWN:
11620 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
11621 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
11622 dhd->pend_ipaddr = 0;
11623#ifdef AOE_IP_ALIAS_SUPPORT
11624 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
11625 __FUNCTION__));
11626 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
11627 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
11628 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
11629 } else
11630#endif /* AOE_IP_ALIAS_SUPPORT */
11631 {
11632 dhd_aoe_hostip_clr(&dhd->pub, idx);
11633 dhd_aoe_arp_clr(&dhd->pub, idx);
11634 }
965f77c4 11635 dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE);
d2839953
RC
11636 break;
11637
11638 default:
11639 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
11640 __func__, ifa->ifa_label, event));
11641 break;
11642 }
11643 return NOTIFY_DONE;
11644}
11645#endif /* ARP_OFFLOAD_SUPPORT */
11646
11647#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
11648/* Neighbor Discovery Offload: defered handler */
11649static void
11650dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
11651{
11652 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
11653 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
11654 dhd_pub_t *dhdp;
11655 int ret;
11656
11657 if (!dhd) {
11658 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
11659 goto done;
11660 }
11661 dhdp = &dhd->pub;
11662
11663 if (event != DHD_WQ_WORK_IPV6_NDO) {
11664 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
11665 goto done;
11666 }
11667
11668 if (!ndo_work) {
11669 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
11670 return;
11671 }
11672
11673 switch (ndo_work->event) {
11674 case NETDEV_UP:
11675#ifndef NDO_CONFIG_SUPPORT
11676 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
11677 ret = dhd_ndo_enable(dhdp, TRUE);
11678 if (ret < 0) {
11679 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
11680 }
11681#endif /* !NDO_CONFIG_SUPPORT */
11682 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
11683 if (dhdp->ndo_version > 0) {
11684 /* inet6 addr notifier called only for unicast address */
11685 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
11686 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
11687 } else {
11688 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
11689 ndo_work->if_idx);
11690 }
11691 if (ret < 0) {
11692 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
11693 __FUNCTION__, ret));
11694 }
11695 break;
11696 case NETDEV_DOWN:
11697 if (dhdp->ndo_version > 0) {
11698 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
11699 ret = dhd_ndo_remove_ip_by_addr(dhdp,
11700 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
11701 } else {
11702 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
11703 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
11704 }
11705 if (ret < 0) {
11706 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
11707 __FUNCTION__, ret));
11708 goto done;
11709 }
11710#ifdef NDO_CONFIG_SUPPORT
11711 if (dhdp->ndo_host_ip_overflow) {
11712 ret = dhd_dev_ndo_update_inet6addr(
11713 dhd_idx2net(dhdp, ndo_work->if_idx));
11714 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
11715 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
11716 __FUNCTION__, ret));
11717 goto done;
11718 }
11719 }
11720#else /* !NDO_CONFIG_SUPPORT */
11721 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
11722 ret = dhd_ndo_enable(dhdp, FALSE);
11723 if (ret < 0) {
11724 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
11725 goto done;
11726 }
11727#endif /* NDO_CONFIG_SUPPORT */
11728 break;
11729
11730 default:
11731 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
11732 break;
11733 }
11734done:
11735
11736 /* free ndo_work. alloced while scheduling the work */
11737 if (ndo_work) {
11738 kfree(ndo_work);
11739 }
11740
11741 return;
11742} /* dhd_init_logstrs_array */
11743
11744/*
11745 * Neighbor Discovery Offload: Called when an interface
11746 * is assigned with ipv6 address.
11747 * Handles only primary interface
11748 */
11749int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
11750{
11751 dhd_info_t *dhd;
11752 dhd_pub_t *dhdp;
11753 struct inet6_ifaddr *inet6_ifa = ptr;
11754 struct ipv6_work_info_t *ndo_info;
11755 int idx;
11756
d2839953
RC
11757 /* Filter notifications meant for non Broadcom devices */
11758 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
11759 return NOTIFY_DONE;
11760 }
d2839953
RC
11761
11762 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
11763 if (!dhd) {
11764 return NOTIFY_DONE;
11765 }
11766 dhdp = &dhd->pub;
11767
11768 /* Supports only primary interface */
11769 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
11770 if (idx != 0) {
11771 return NOTIFY_DONE;
11772 }
11773
11774 /* FW capability */
11775 if (!FW_SUPPORTED(dhdp, ndoe)) {
11776 return NOTIFY_DONE;
11777 }
11778
11779 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
11780 if (!ndo_info) {
11781 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
11782 return NOTIFY_DONE;
11783 }
11784
11785 /* fill up ndo_info */
11786 ndo_info->event = event;
11787 ndo_info->if_idx = idx;
11788 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
11789
11790 /* defer the work to thread as it may block kernel */
11791 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
11792 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
11793 return NOTIFY_DONE;
11794}
11795#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
11796
965f77c4
RC
11797/* Network attach to be invoked from the bus probe handlers */
11798int
11799dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
11800{
11801 struct net_device *primary_ndev;
11802 BCM_REFERENCE(primary_ndev);
11803
11804 /* Register primary net device */
11805 if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
11806 return BCME_ERROR;
11807 }
11808
11809#if defined(WL_CFG80211)
11810 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
11811 if (wl_cfg80211_net_attach(primary_ndev) < 0) {
11812 /* fail the init */
11813 dhd_remove_if(dhdp, 0, TRUE);
11814 return BCME_ERROR;
11815 }
11816#endif /* WL_CFG80211 */
11817 return BCME_OK;
11818}
11819
d2839953
RC
11820int
11821dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
11822{
11823 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
11824 dhd_if_t *ifp;
11825 struct net_device *net = NULL;
11826 int err = 0;
11827 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
11828
11829 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
11830
11831 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
11832 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
11833 return BCME_ERROR;
11834 }
11835
11836 ASSERT(dhd && dhd->iflist[ifidx]);
11837 ifp = dhd->iflist[ifidx];
11838 net = ifp->net;
11839 ASSERT(net && (ifp->idx == ifidx));
11840
d2839953
RC
11841 ASSERT(!net->netdev_ops);
11842 net->netdev_ops = &dhd_ops_virt;
d2839953
RC
11843
11844 /* Ok, link into the network layer... */
11845 if (ifidx == 0) {
11846 /*
11847 * device functions for the primary interface only
11848 */
d2839953 11849 net->netdev_ops = &dhd_ops_pri;
d2839953
RC
11850 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
11851 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
11852 } else {
11853 /*
11854 * We have to use the primary MAC for virtual interfaces
11855 */
11856 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
11857 /*
11858 * Android sets the locally administered bit to indicate that this is a
11859 * portable hotspot. This will not work in simultaneous AP/STA mode,
11860 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
11861 */
11862 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
11863 ETHER_ADDR_LEN)) {
11864 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
11865 __func__, net->name));
11866 temp_addr[0] |= 0x02;
11867 }
11868 }
11869
11870 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
d2839953 11871 net->ethtool_ops = &dhd_ethtool_ops;
d2839953
RC
11872
11873#if defined(WL_WIRELESS_EXT)
11874#if WIRELESS_EXT < 19
11875 net->get_wireless_stats = dhd_get_wireless_stats;
11876#endif /* WIRELESS_EXT < 19 */
11877#if WIRELESS_EXT > 12
11878 net->wireless_handlers = &wl_iw_handler_def;
11879#endif /* WIRELESS_EXT > 12 */
11880#endif /* defined(WL_WIRELESS_EXT) */
11881
11882 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
11883
11884#ifdef WLMESH
11885 if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
11886 temp_addr[4] ^= 0x80;
11887 temp_addr[4] += ifidx;
11888 temp_addr[5] += ifidx;
11889 }
11890#endif
11891 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
11892
11893 if (ifidx == 0)
11894 printf("%s\n", dhd_version);
965f77c4 11895 else {
d2839953 11896#ifdef WL_EXT_IAPSTA
3910ce8e 11897 wl_ext_iapsta_update_net_device(net, ifidx);
965f77c4 11898#endif /* WL_EXT_IAPSTA */
d2839953
RC
11899 if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
11900 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
11901 else
11902 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
11903 }
11904
11905 if (need_rtnl_lock)
11906 err = register_netdev(net);
11907 else
11908 err = register_netdevice(net);
11909
11910 if (err != 0) {
11911 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
11912 goto fail;
11913 }
965f77c4 11914#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
3910ce8e 11915 wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
965f77c4 11916#ifdef WL_ESCAN
3910ce8e 11917 wl_escan_event_attach(net, dhdp);
965f77c4 11918#endif /* WL_ESCAN */
d2839953 11919#ifdef WL_EXT_IAPSTA
3910ce8e 11920 wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
d2839953 11921 wl_ext_iapsta_attach_name(net, ifidx);
965f77c4 11922#endif /* WL_EXT_IAPSTA */
3910ce8e 11923#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
d2839953
RC
11924
11925 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
11926 MAC2STRDBG(net->dev_addr));
11927
11928#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
11929// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
11930#endif // endif
11931
965f77c4 11932#if (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS))
d2839953
RC
11933 if (ifidx == 0) {
11934#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
11935 up(&dhd_registration_sem);
11936#endif /* BCMLXSDMMC */
11937 if (!dhd_download_fw_on_driverload) {
11938#ifdef WL_CFG80211
11939 wl_terminate_event_handler(net);
11940#endif /* WL_CFG80211 */
11941#if defined(DHD_LB_RXP)
11942 __skb_queue_purge(&dhd->rx_pend_queue);
11943#endif /* DHD_LB_RXP */
11944
11945#if defined(DHD_LB_TXP)
11946 skb_queue_purge(&dhd->tx_pend_queue);
11947#endif /* DHD_LB_TXP */
11948
11949#ifdef SHOW_LOGTRACE
11950 /* Release the skbs from queue for WLC_E_TRACE event */
11951 dhd_event_logtrace_flush_queue(dhdp);
11952#endif /* SHOW_LOGTRACE */
11953
11954#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
11955 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
11956#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
11957 dhd_net_bus_devreset(net, TRUE);
11958#ifdef BCMLXSDMMC
11959 dhd_net_bus_suspend(net);
11960#endif /* BCMLXSDMMC */
11961 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
11962#if defined(BT_OVER_SDIO)
11963 dhd->bus_user_count--;
11964#endif /* BT_OVER_SDIO */
11965 }
47fa5ad5 11966#if defined(WL_WIRELESS_EXT)
965f77c4 11967 wl_iw_down(net, &dhd->pub);
47fa5ad5 11968#endif /* defined(WL_WIRELESS_EXT) */
d2839953 11969 }
965f77c4 11970#endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */
d2839953
RC
11971 return 0;
11972
11973fail:
d2839953 11974 net->netdev_ops = NULL;
d2839953
RC
11975 return err;
11976}
11977
11978void
11979dhd_bus_detach(dhd_pub_t *dhdp)
11980{
11981 dhd_info_t *dhd;
11982
11983 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
11984
11985 if (dhdp) {
11986 dhd = (dhd_info_t *)dhdp->info;
11987 if (dhd) {
11988
11989 /*
11990 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
11991 * calling stop again will cuase SD read/write errors.
11992 */
11993 if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
11994 /* Stop the protocol module */
11995 dhd_prot_stop(&dhd->pub);
11996
11997 /* Stop the bus module */
11998#ifdef BCMDBUS
11999 /* Force Dongle terminated */
12000 if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
12001 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
12002 __FUNCTION__));
12003 dbus_stop(dhd->pub.bus);
12004 dhd->pub.busstate = DHD_BUS_DOWN;
12005#else
12006 dhd_bus_stop(dhd->pub.bus, TRUE);
12007#endif /* BCMDBUS */
12008 }
12009
12010#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12011 dhd_bus_oob_intr_unregister(dhdp);
12012#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12013 }
12014 }
12015}
12016
12017void dhd_detach(dhd_pub_t *dhdp)
12018{
12019 dhd_info_t *dhd;
12020 unsigned long flags;
12021 int timer_valid = FALSE;
12022 struct net_device *dev;
12023#ifdef WL_CFG80211
12024 struct bcm_cfg80211 *cfg = NULL;
12025#endif // endif
12026 if (!dhdp)
12027 return;
12028
12029 dhd = (dhd_info_t *)dhdp->info;
12030 if (!dhd)
12031 return;
12032
12033 dev = dhd->iflist[0]->net;
12034
12035 if (dev) {
12036 rtnl_lock();
3910ce8e
LJ
12037#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
12038 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
12039 cfg = wl_get_cfg(dev);
12040 if (cfg && cfg->static_ndev && (cfg->static_ndev->flags & IFF_UP)) {
12041 dev_close(cfg->static_ndev);
12042 }
12043 }
12044#endif /* WL_CFG80211 && WL_STATIC_IF */
d2839953
RC
12045 if (dev->flags & IFF_UP) {
12046 /* If IFF_UP is still up, it indicates that
12047 * "ifconfig wlan0 down" hasn't been called.
12048 * So invoke dev_close explicitly here to
12049 * bring down the interface.
12050 */
12051 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12052 dev_close(dev);
12053 }
12054 rtnl_unlock();
12055 }
12056
12057 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
12058
965f77c4 12059 DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
d2839953
RC
12060 dhd->pub.up = 0;
12061 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
12062 /* Give sufficient time for threads to start running in case
12063 * dhd_attach() has failed
12064 */
12065 OSL_SLEEP(100);
12066 }
12067#ifdef DHD_WET
12068 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
12069#endif /* DHD_WET */
12070#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
12071#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
12072
12073#ifdef PROP_TXSTATUS
12074#ifdef DHD_WLFC_THREAD
12075 if (dhd->pub.wlfc_thread) {
12076 kthread_stop(dhd->pub.wlfc_thread);
12077 dhdp->wlfc_thread_go = TRUE;
12078 wake_up_interruptible(&dhdp->wlfc_wqhead);
12079 }
12080 dhd->pub.wlfc_thread = NULL;
12081#endif /* DHD_WLFC_THREAD */
12082#endif /* PROP_TXSTATUS */
12083
12084#ifdef WL_CFG80211
12085 if (dev)
12086 wl_cfg80211_down(dev);
12087#endif /* WL_CFG80211 */
12088
12089 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
12090
12091 dhd_bus_detach(dhdp);
12092#ifdef BCMPCIE
12093 if (is_reboot == SYS_RESTART) {
12094 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
12095 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
12096 dhdpcie_bus_clock_stop(dhdp->bus);
12097 wifi_platform_set_power(dhd_wifi_platdata->adapters,
12098 FALSE, WIFI_TURNOFF_DELAY);
12099 }
12100 }
12101#endif /* BCMPCIE */
12102#ifndef PCIE_FULL_DONGLE
12103 if (dhdp->prot)
12104 dhd_prot_detach(dhdp);
12105#endif /* !PCIE_FULL_DONGLE */
12106 }
12107
12108#ifdef ARP_OFFLOAD_SUPPORT
12109 if (dhd_inetaddr_notifier_registered) {
12110 dhd_inetaddr_notifier_registered = FALSE;
12111 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
12112 }
12113#endif /* ARP_OFFLOAD_SUPPORT */
12114#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12115 if (dhd_inet6addr_notifier_registered) {
12116 dhd_inet6addr_notifier_registered = FALSE;
12117 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
12118 }
12119#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12120#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
12121 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
12122 if (dhd->early_suspend.suspend)
12123 unregister_early_suspend(&dhd->early_suspend);
12124 }
12125#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
12126
12127#if defined(WL_WIRELESS_EXT)
d2839953
RC
12128 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
12129 /* Detatch and unlink in the iw */
965f77c4 12130 wl_iw_detach(dev, dhdp);
d2839953 12131 }
d2839953 12132#endif /* defined(WL_WIRELESS_EXT) */
965f77c4 12133#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
d2839953
RC
12134#ifdef WL_EXT_IAPSTA
12135 wl_ext_iapsta_dettach(dhdp);
965f77c4
RC
12136#endif /* WL_EXT_IAPSTA */
12137#ifdef WL_ESCAN
12138 wl_escan_detach(dev, dhdp);
12139#endif /* WL_ESCAN */
12140 wl_ext_event_dettach(dhdp);
12141#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
d2839953
RC
12142
12143#ifdef DHD_ULP
12144 dhd_ulp_deinit(dhd->pub.osh, dhdp);
12145#endif /* DHD_ULP */
12146
12147 /* delete all interfaces, start with virtual */
12148 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
12149 int i = 1;
12150 dhd_if_t *ifp;
12151
12152 /* Cleanup virtual interfaces */
12153 dhd_net_if_lock_local(dhd);
12154 for (i = 1; i < DHD_MAX_IFS; i++) {
12155 if (dhd->iflist[i]) {
12156 dhd_remove_if(&dhd->pub, i, TRUE);
12157 }
12158 }
12159 dhd_net_if_unlock_local(dhd);
12160
12161 /* delete primary interface 0 */
12162 ifp = dhd->iflist[0];
12163 if (ifp && ifp->net) {
12164
12165#ifdef WL_CFG80211
12166 cfg = wl_get_cfg(ifp->net);
12167#endif // endif
12168 /* in unregister_netdev case, the interface gets freed by net->destructor
12169 * (which is set to free_netdev)
12170 */
12171 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
12172 free_netdev(ifp->net);
12173 } else {
12174#if defined(ARGOS_NOTIFY_CB)
12175 argos_register_notifier_deinit();
12176#endif // endif
12177#ifdef SET_RPS_CPUS
12178 custom_rps_map_clear(ifp->net->_rx);
12179#endif /* SET_RPS_CPUS */
12180 netif_tx_disable(ifp->net);
12181 unregister_netdev(ifp->net);
12182 }
12183#ifdef PCIE_FULL_DONGLE
12184 ifp->net = DHD_NET_DEV_NULL;
12185#else
12186 ifp->net = NULL;
12187#endif /* PCIE_FULL_DONGLE */
12188
12189#ifdef DHD_L2_FILTER
12190 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
12191 NULL, FALSE, dhdp->tickcnt);
12192 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
12193 ifp->phnd_arp_table = NULL;
12194#endif /* DHD_L2_FILTER */
12195
12196 dhd_if_del_sta_list(ifp);
12197
12198 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
12199 dhd->iflist[0] = NULL;
3910ce8e
LJ
12200#ifdef WL_CFG80211
12201 if (cfg && cfg->wdev)
12202 cfg->wdev->netdev = NULL;
12203#endif
d2839953
RC
12204 }
12205 }
12206
12207 /* Clear the watchdog timer */
12208 DHD_GENERAL_LOCK(&dhd->pub, flags);
12209 timer_valid = dhd->wd_timer_valid;
12210 dhd->wd_timer_valid = FALSE;
12211 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12212 if (timer_valid)
12213 del_timer_sync(&dhd->timer);
12214 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
12215
12216#ifdef BCMDBUS
12217 tasklet_kill(&dhd->tasklet);
12218#else
12219 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
12220 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
12221 PROC_STOP(&dhd->thr_wdt_ctl);
12222 }
12223
12224 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
12225 PROC_STOP(&dhd->thr_rxf_ctl);
12226 }
12227
12228 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
12229 PROC_STOP(&dhd->thr_dpc_ctl);
12230 } else
12231 {
12232 tasklet_kill(&dhd->tasklet);
12233 }
12234 }
12235#endif /* BCMDBUS */
12236
12237#ifdef WL_NATOE
12238 if (dhd->pub.nfct) {
12239 dhd_ct_close(dhd->pub.nfct);
12240 }
12241#endif /* WL_NATOE */
12242
12243#ifdef DHD_LB
12244 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
12245 /* Clear the flag first to avoid calling the cpu notifier */
12246 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
12247
12248 /* Kill the Load Balancing Tasklets */
12249#ifdef DHD_LB_RXP
12250 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
12251 __skb_queue_purge(&dhd->rx_pend_queue);
12252#endif /* DHD_LB_RXP */
12253#ifdef DHD_LB_TXP
12254 cancel_work_sync(&dhd->tx_dispatcher_work);
12255 tasklet_kill(&dhd->tx_tasklet);
12256 __skb_queue_purge(&dhd->tx_pend_queue);
12257#endif /* DHD_LB_TXP */
12258#ifdef DHD_LB_TXC
12259 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
12260 tasklet_kill(&dhd->tx_compl_tasklet);
12261#endif /* DHD_LB_TXC */
12262#ifdef DHD_LB_RXC
12263 tasklet_kill(&dhd->rx_compl_tasklet);
12264#endif /* DHD_LB_RXC */
12265
12266 /* Unregister from CPU Hotplug framework */
12267 dhd_unregister_cpuhp_callback(dhd);
12268
12269 dhd_cpumasks_deinit(dhd);
12270 DHD_LB_STATS_DEINIT(&dhd->pub);
12271 }
12272#endif /* DHD_LB */
12273
3910ce8e
LJ
12274#ifdef CSI_SUPPORT
12275 dhd_csi_deinit(dhdp);
12276#endif /* CSI_SUPPORT */
12277
965f77c4
RC
12278#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
12279 cancel_work_sync(&dhd->axi_error_dispatcher_work);
12280#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
12281
d2839953
RC
12282 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
12283
12284#ifdef WL_CFG80211
12285 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
12286 if (!cfg) {
12287 DHD_ERROR(("cfg NULL!\n"));
12288 ASSERT(0);
12289 } else {
12290 wl_cfg80211_detach(cfg);
12291 dhd_monitor_uninit();
12292 }
12293 }
12294#endif // endif
12295
12296#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
12297 destroy_workqueue(dhd->tx_wq);
12298 dhd->tx_wq = NULL;
12299 destroy_workqueue(dhd->rx_wq);
12300 dhd->rx_wq = NULL;
12301#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
12302#ifdef DEBUGABILITY
12303 if (dhdp->dbg) {
12304#ifdef DBG_PKT_MON
12305 dhd_os_dbg_detach_pkt_monitor(dhdp);
12306 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
12307#endif /* DBG_PKT_MON */
d2839953
RC
12308 }
12309#endif /* DEBUGABILITY */
965f77c4
RC
12310 if (dhdp->dbg) {
12311 dhd_os_dbg_detach(dhdp);
12312 }
12313#ifdef DHD_STATUS_LOGGING
12314 dhd_detach_statlog(dhdp);
12315#endif /* DHD_STATUS_LOGGING */
12316#ifdef DHD_PKTDUMP_ROAM
12317 dhd_dump_pkt_deinit(dhdp);
12318#endif /* DHD_PKTDUMP_ROAM */
d2839953
RC
12319#ifdef SHOW_LOGTRACE
12320 /* Release the skbs from queue for WLC_E_TRACE event */
12321 dhd_event_logtrace_flush_queue(dhdp);
12322
12323 /* Wait till event logtrace context finishes */
12324 dhd_cancel_logtrace_process_sync(dhd);
965f77c4
RC
12325
12326 /* Remove ring proc entries */
12327 dhd_dbg_ring_proc_destroy(&dhd->pub);
d2839953
RC
12328
12329 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
12330 if (dhd->event_data.fmts) {
12331 MFREE(dhd->pub.osh, dhd->event_data.fmts,
12332 dhd->event_data.fmts_size);
12333 dhd->event_data.fmts = NULL;
12334 }
12335 if (dhd->event_data.raw_fmts) {
12336 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
12337 dhd->event_data.raw_fmts_size);
12338 dhd->event_data.raw_fmts = NULL;
12339 }
12340 if (dhd->event_data.raw_sstr) {
12341 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
12342 dhd->event_data.raw_sstr_size);
12343 dhd->event_data.raw_sstr = NULL;
12344 }
12345 if (dhd->event_data.rom_raw_sstr) {
12346 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
12347 dhd->event_data.rom_raw_sstr_size);
12348 dhd->event_data.rom_raw_sstr = NULL;
12349 }
12350 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
12351 }
12352#endif /* SHOW_LOGTRACE */
12353#ifdef PNO_SUPPORT
12354 if (dhdp->pno_state)
12355 dhd_pno_deinit(dhdp);
12356#endif // endif
12357#ifdef RTT_SUPPORT
12358 if (dhdp->rtt_state) {
12359 dhd_rtt_deinit(dhdp);
12360 }
12361#endif // endif
12362#if defined(CONFIG_PM_SLEEP)
12363 if (dhd_pm_notifier_registered) {
12364 unregister_pm_notifier(&dhd->pm_notifier);
12365 dhd_pm_notifier_registered = FALSE;
12366 }
12367#endif /* CONFIG_PM_SLEEP */
12368
12369#ifdef DEBUG_CPU_FREQ
12370 if (dhd->new_freq)
12371 free_percpu(dhd->new_freq);
12372 dhd->new_freq = NULL;
12373 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
12374#endif // endif
12375 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
12376#ifdef CONFIG_HAS_WAKELOCK
12377 dhd->wakelock_wd_counter = 0;
12378 wake_lock_destroy(&dhd->wl_wdwake);
12379 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
12380 wake_lock_destroy(&dhd->wl_wifi);
12381#endif /* CONFIG_HAS_WAKELOCK */
12382 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
12383 DHD_OS_WAKE_LOCK_DESTROY(dhd);
12384 }
12385
12386#ifdef DHDTCPACK_SUPPRESS
12387 /* This will free all MEM allocated for TCPACK SUPPRESS */
12388 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
12389#endif /* DHDTCPACK_SUPPRESS */
12390
12391#ifdef PCIE_FULL_DONGLE
12392 dhd_flow_rings_deinit(dhdp);
12393 if (dhdp->prot)
12394 dhd_prot_detach(dhdp);
12395#endif // endif
12396
12397#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
12398 dhd_free_tdls_peer_list(dhdp);
12399#endif // endif
12400
12401#ifdef DUMP_IOCTL_IOV_LIST
12402 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
12403#endif /* DUMP_IOCTL_IOV_LIST */
12404#ifdef DHD_DEBUG
12405 /* memory waste feature list initilization */
12406 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
12407#endif /* DHD_DEBUG */
12408#ifdef WL_MONITOR
965f77c4 12409 dhd_del_monitor_if(dhd);
d2839953
RC
12410#endif /* WL_MONITOR */
12411
12412#ifdef DHD_ERPOM
12413 if (dhdp->enable_erpom) {
12414 dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
12415 }
12416#endif /* DHD_ERPOM */
12417
965f77c4
RC
12418 cancel_work_sync(&dhd->dhd_hang_process_work);
12419
d2839953
RC
12420 /* Prefer adding de-init code above this comment unless necessary.
12421 * The idea is to cancel work queue, sysfs and flags at the end.
12422 */
12423 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
12424 dhd->dhd_deferred_wq = NULL;
12425
12426 /* log dump related buffers should be freed after wq is purged */
12427#ifdef DHD_LOG_DUMP
12428 dhd_log_dump_deinit(&dhd->pub);
12429#endif /* DHD_LOG_DUMP */
12430#if defined(BCMPCIE)
12431 if (dhdp->extended_trap_data)
12432 {
12433 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
12434 dhdp->extended_trap_data = NULL;
12435 }
965f77c4
RC
12436#ifdef DNGL_AXI_ERROR_LOGGING
12437 if (dhdp->axi_err_dump)
12438 {
12439 MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
12440 dhdp->axi_err_dump = NULL;
12441 }
12442#endif /* DNGL_AXI_ERROR_LOGGING */
d2839953
RC
12443#endif /* BCMPCIE */
12444
12445#ifdef DHD_DUMP_MNGR
12446 if (dhd->pub.dump_file_manage) {
12447 MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
12448 sizeof(dhd_dump_file_manage_t));
12449 }
12450#endif /* DHD_DUMP_MNGR */
12451 dhd_sysfs_exit(dhd);
965f77c4 12452 dhd->pub.fw_download_status = FW_UNLOADED;
d2839953
RC
12453
12454#if defined(BT_OVER_SDIO)
12455 mutex_destroy(&dhd->bus_user_lock);
12456#endif /* BT_OVER_SDIO */
12457 dhd_conf_detach(dhdp);
12458
12459} /* dhd_detach */
12460
12461void
12462dhd_free(dhd_pub_t *dhdp)
12463{
12464 dhd_info_t *dhd;
12465 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12466
12467 if (dhdp) {
12468 int i;
12469 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
12470 if (dhdp->reorder_bufs[i]) {
12471 reorder_info_t *ptr;
12472 uint32 buf_size = sizeof(struct reorder_info);
12473
12474 ptr = dhdp->reorder_bufs[i];
12475
12476 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
12477 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
12478 i, ptr->max_idx, buf_size));
12479
12480 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
12481 dhdp->reorder_bufs[i] = NULL;
12482 }
12483 }
12484
12485 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
12486
12487 dhd = (dhd_info_t *)dhdp->info;
12488 if (dhdp->soc_ram) {
12489#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
12490 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
12491#else
12492 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
12493#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
12494 dhdp->soc_ram = NULL;
12495 }
12496 if (dhd != NULL) {
12497
12498 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
12499 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
12500 DHD_PREALLOC_DHD_INFO, 0, FALSE))
12501 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
12502 dhd = NULL;
12503 }
12504 }
12505}
12506
12507void
12508dhd_clear(dhd_pub_t *dhdp)
12509{
12510 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12511
12512 if (dhdp) {
12513 int i;
12514#ifdef DHDTCPACK_SUPPRESS
12515 /* Clean up timer/data structure for any remaining/pending packet or timer. */
12516 dhd_tcpack_info_tbl_clean(dhdp);
12517#endif /* DHDTCPACK_SUPPRESS */
12518 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
12519 if (dhdp->reorder_bufs[i]) {
12520 reorder_info_t *ptr;
12521 uint32 buf_size = sizeof(struct reorder_info);
12522
12523 ptr = dhdp->reorder_bufs[i];
12524
12525 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
12526 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
12527 i, ptr->max_idx, buf_size));
12528
12529 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
12530 dhdp->reorder_bufs[i] = NULL;
12531 }
12532 }
12533
12534 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
12535
12536 if (dhdp->soc_ram) {
12537#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
12538 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
12539#else
12540 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
12541#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
12542 dhdp->soc_ram = NULL;
12543 }
12544 }
12545}
12546
12547static void
12548dhd_module_cleanup(void)
12549{
12550 printf("%s: Enter\n", __FUNCTION__);
12551
12552 dhd_bus_unregister();
12553
12554 wl_android_exit();
12555
12556 dhd_wifi_platform_unregister_drv();
12557
12558#ifdef CUSTOMER_HW_AMLOGIC
12559#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
12560 wifi_teardown_dt();
12561#endif
12562#endif
12563 printf("%s: Exit\n", __FUNCTION__);
12564}
12565
12566static void __exit
12567dhd_module_exit(void)
12568{
12569 atomic_set(&exit_in_progress, 1);
12570 dhd_module_cleanup();
12571 unregister_reboot_notifier(&dhd_reboot_notifier);
12572 dhd_destroy_to_notifier_skt();
12573}
12574
12575static int __init
12576dhd_module_init(void)
12577{
12578 int err;
12579 int retry = POWERUP_MAX_RETRY;
12580
12581 printf("%s: in %s\n", __FUNCTION__, dhd_version);
12582#ifdef CUSTOMER_HW_AMLOGIC
12583#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
12584 if (wifi_setup_dt()) {
12585 printf("wifi_dt : fail to setup dt\n");
12586 }
12587#endif
12588#endif
12589
12590 DHD_PERIM_RADIO_INIT();
12591
12592 if (firmware_path[0] != '\0') {
12593 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
12594 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
12595 }
12596
12597 if (nvram_path[0] != '\0') {
12598 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
12599 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
12600 }
12601
12602 do {
12603 err = dhd_wifi_platform_register_drv();
12604 if (!err) {
12605 register_reboot_notifier(&dhd_reboot_notifier);
12606 break;
12607 } else {
12608 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
12609 __FUNCTION__, retry));
12610 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
12611 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
12612 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
12613 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
12614 }
12615 } while (retry--);
12616
12617 dhd_create_to_notifier_skt();
12618
12619 if (err) {
12620#ifdef CUSTOMER_HW_AMLOGIC
12621#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
12622 wifi_teardown_dt();
12623#endif
12624#endif
12625 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
12626 } else {
12627 if (!dhd_download_fw_on_driverload) {
12628 dhd_driver_init_done = TRUE;
12629 }
12630 }
12631
12632 printf("%s: Exit err=%d\n", __FUNCTION__, err);
12633 return err;
12634}
12635
12636static int
12637dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
12638{
12639 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
12640 if (code == SYS_RESTART) {
12641#ifdef BCMPCIE
12642 is_reboot = code;
12643#endif /* BCMPCIE */
12644 }
12645 return NOTIFY_DONE;
12646}
12647
d2839953
RC
12648#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
12649#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
12650 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
12651 defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
12652 defined(CONFIG_ARCH_SDM845) || defined(CONFIG_SOC_EXYNOS9820) || \
12653 defined(CONFIG_ARCH_SM8150)
12654deferred_module_init_sync(dhd_module_init);
12655#else
12656deferred_module_init(dhd_module_init);
12657#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
12658 * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
12659 * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845 || CONFIG_SOC_EXYNOS9820
12660 * CONFIG_ARCH_SM8150
12661 */
12662#elif defined(USE_LATE_INITCALL_SYNC)
12663late_initcall_sync(dhd_module_init);
12664#else
12665late_initcall(dhd_module_init);
12666#endif /* USE_LATE_INITCALL_SYNC */
d2839953
RC
12667
12668module_exit(dhd_module_exit);
12669
12670/*
12671 * OS specific functions required to implement DHD driver in OS independent way
12672 */
12673int
12674dhd_os_proto_block(dhd_pub_t *pub)
12675{
12676 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12677
12678 if (dhd) {
12679 DHD_PERIM_UNLOCK(pub);
12680
12681 down(&dhd->proto_sem);
12682
12683 DHD_PERIM_LOCK(pub);
12684 return 1;
12685 }
12686
12687 return 0;
12688}
12689
12690int
12691dhd_os_proto_unblock(dhd_pub_t *pub)
12692{
12693 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12694
12695 if (dhd) {
12696 up(&dhd->proto_sem);
12697 return 1;
12698 }
12699
12700 return 0;
12701}
12702
12703void
12704dhd_os_dhdiovar_lock(dhd_pub_t *pub)
12705{
12706 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12707
12708 if (dhd) {
12709 mutex_lock(&dhd->dhd_iovar_mutex);
12710 }
12711}
12712
12713void
12714dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
12715{
12716 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12717
12718 if (dhd) {
12719 mutex_unlock(&dhd->dhd_iovar_mutex);
12720 }
12721}
12722
12723void
12724dhd_os_logdump_lock(dhd_pub_t *pub)
12725{
12726 dhd_info_t *dhd = NULL;
12727
12728 if (!pub)
12729 return;
12730
12731 dhd = (dhd_info_t *)(pub->info);
12732
12733 if (dhd) {
12734 mutex_lock(&dhd->logdump_lock);
12735 }
12736}
12737
12738void
12739dhd_os_logdump_unlock(dhd_pub_t *pub)
12740{
12741 dhd_info_t *dhd = NULL;
12742
12743 if (!pub)
12744 return;
12745
12746 dhd = (dhd_info_t *)(pub->info);
12747
12748 if (dhd) {
12749 mutex_unlock(&dhd->logdump_lock);
12750 }
12751}
12752
12753unsigned long
12754dhd_os_dbgring_lock(void *lock)
12755{
12756 if (!lock)
12757 return 0;
12758
12759 mutex_lock((struct mutex *)lock);
12760
12761 return 0;
12762}
12763
12764void
12765dhd_os_dbgring_unlock(void *lock, unsigned long flags)
12766{
12767 BCM_REFERENCE(flags);
12768
12769 if (!lock)
12770 return;
12771
12772 mutex_unlock((struct mutex *)lock);
12773}
12774
12775unsigned int
12776dhd_os_get_ioctl_resp_timeout(void)
12777{
12778 return ((unsigned int)dhd_ioctl_timeout_msec);
12779}
12780
12781void
12782dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
12783{
12784 dhd_ioctl_timeout_msec = (int)timeout_msec;
12785}
12786
12787int
965f77c4 12788dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
d2839953
RC
12789{
12790 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
965f77c4 12791 int timeout;
d2839953
RC
12792
12793 /* Convert timeout in millsecond to jiffies */
d2839953 12794 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
d2839953
RC
12795
12796 DHD_PERIM_UNLOCK(pub);
12797
12798 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
12799
d2839953
RC
12800 DHD_PERIM_LOCK(pub);
12801
12802 return timeout;
12803}
12804
12805int
12806dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
12807{
12808 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12809
12810 wake_up(&dhd->ioctl_resp_wait);
12811 return 0;
12812}
12813
12814int
12815dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
12816{
12817 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12818 int timeout;
12819
12820 /* Convert timeout in millsecond to jiffies */
d2839953 12821 timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
d2839953
RC
12822
12823 DHD_PERIM_UNLOCK(pub);
12824
12825 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
12826
12827 DHD_PERIM_LOCK(pub);
12828
12829 return timeout;
12830}
12831
12832int
12833dhd_os_d3ack_wake(dhd_pub_t *pub)
12834{
12835 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12836
12837 wake_up(&dhd->d3ack_wait);
12838 return 0;
12839}
12840
12841int
12842dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
12843{
12844 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12845 int timeout;
12846
12847 /* Wait for bus usage contexts to gracefully exit within some timeout value
12848 * Set time out to little higher than dhd_ioctl_timeout_msec,
12849 * so that IOCTL timeout should not get affected.
12850 */
12851 /* Convert timeout in millsecond to jiffies */
d2839953 12852 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
d2839953
RC
12853
12854 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
12855
12856 return timeout;
12857}
12858
12859/*
12860 * Wait until the condition *var == condition is met.
12861 * Returns 0 if the @condition evaluated to false after the timeout elapsed
12862 * Returns 1 if the @condition evaluated to true
12863 */
12864int
12865dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
12866{
12867 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12868 int timeout;
12869
12870 /* Convert timeout in millsecond to jiffies */
d2839953 12871 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
d2839953
RC
12872
12873 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
12874
12875 return timeout;
12876}
12877
12878/*
12879 * Wait until the '(*var & bitmask) == condition' is met.
12880 * Returns 0 if the @condition evaluated to false after the timeout elapsed
12881 * Returns 1 if the @condition evaluated to true
12882 */
12883int
12884dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
12885 uint bitmask, uint condition)
12886{
12887 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12888 int timeout;
12889
12890 /* Convert timeout in millsecond to jiffies */
d2839953 12891 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
d2839953
RC
12892
12893 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
12894 ((*var & bitmask) == condition), timeout);
12895
12896 return timeout;
12897}
12898
12899int
12900dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
12901{
12902 int ret = 0;
12903 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
965f77c4
RC
12904 int timeout;
12905
12906 timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
d2839953
RC
12907
12908 DHD_PERIM_UNLOCK(pub);
965f77c4 12909 ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
d2839953
RC
12910 DHD_PERIM_LOCK(pub);
12911
12912 return ret;
12913
12914}
12915
12916int
12917dhd_os_dmaxfer_wake(dhd_pub_t *pub)
12918{
12919 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12920
12921 wake_up(&dhd->dmaxfer_wait);
12922 return 0;
12923}
12924
12925void
12926dhd_os_tx_completion_wake(dhd_pub_t *dhd)
12927{
12928 /* Call wmb() to make sure before waking up the other event value gets updated */
12929 OSL_SMP_WMB();
12930 wake_up(&dhd->tx_completion_wait);
12931}
12932
d2839953 12933/* Fix compilation error for FC11 */
965f77c4 12934INLINE int
d2839953
RC
12935dhd_os_busbusy_wake(dhd_pub_t *pub)
12936{
12937 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12938 /* Call wmb() to make sure before waking up the other event value gets updated */
12939 OSL_SMP_WMB();
12940 wake_up(&dhd->dhd_bus_busy_state_wait);
12941 return 0;
12942}
12943
12944void
12945dhd_os_wd_timer_extend(void *bus, bool extend)
12946{
12947#ifndef BCMDBUS
12948 dhd_pub_t *pub = bus;
12949 dhd_info_t *dhd = (dhd_info_t *)pub->info;
12950
965f77c4
RC
12951 if (extend)
12952 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
12953 else
12954 dhd_os_wd_timer(bus, dhd->default_wd_interval);
12955#endif /* !BCMDBUS */
12956}
12957
12958void
12959dhd_os_wd_timer(void *bus, uint wdtick)
12960{
12961#ifndef BCMDBUS
12962 dhd_pub_t *pub = bus;
12963 dhd_info_t *dhd = (dhd_info_t *)pub->info;
12964 unsigned long flags;
12965
12966 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12967
12968 if (!dhd) {
12969 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
12970 return;
12971 }
12972
12973 DHD_GENERAL_LOCK(pub, flags);
12974
12975 /* don't start the wd until fw is loaded */
12976 if (pub->busstate == DHD_BUS_DOWN) {
12977 DHD_GENERAL_UNLOCK(pub, flags);
12978#ifdef BCMSDIO
12979 if (!wdtick) {
12980 DHD_OS_WD_WAKE_UNLOCK(pub);
12981 }
12982#endif /* BCMSDIO */
12983 return;
12984 }
12985
12986 /* Totally stop the timer */
12987 if (!wdtick && dhd->wd_timer_valid == TRUE) {
12988 dhd->wd_timer_valid = FALSE;
12989 DHD_GENERAL_UNLOCK(pub, flags);
12990 del_timer_sync(&dhd->timer);
12991#ifdef BCMSDIO
12992 DHD_OS_WD_WAKE_UNLOCK(pub);
12993#endif /* BCMSDIO */
12994 return;
12995 }
12996
12997 if (wdtick) {
12998#ifdef BCMSDIO
12999 DHD_OS_WD_WAKE_LOCK(pub);
13000 dhd_watchdog_ms = (uint)wdtick;
13001#endif /* BCMSDIO */
13002 /* Re arm the timer, at last watchdog period */
13003 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13004 dhd->wd_timer_valid = TRUE;
13005 }
13006 DHD_GENERAL_UNLOCK(pub, flags);
13007#endif /* !BCMDBUS */
13008}
13009
13010void *
13011dhd_os_open_image1(dhd_pub_t *pub, char *filename)
13012{
13013 struct file *fp;
13014 int size;
13015
13016 fp = filp_open(filename, O_RDONLY, 0);
13017 /*
13018 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13019 * Alternative:
13020 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13021 * ???
13022 */
13023 if (IS_ERR(fp)) {
13024 fp = NULL;
13025 goto err;
13026 }
13027
13028 if (!S_ISREG(file_inode(fp)->i_mode)) {
13029 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
13030 fp = NULL;
13031 goto err;
13032 }
13033
13034 size = i_size_read(file_inode(fp));
13035 if (size <= 0) {
13036 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
13037 fp = NULL;
13038 goto err;
13039 }
13040
13041 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
13042
13043err:
13044 return fp;
13045}
13046
13047int
13048dhd_os_get_image_block(char *buf, int len, void *image)
13049{
13050 struct file *fp = (struct file *)image;
13051 int rdlen;
13052 int size;
13053
13054 if (!image) {
13055 return 0;
13056 }
13057
13058 size = i_size_read(file_inode(fp));
13059 rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
13060
13061 if (len >= size && size != rdlen) {
13062 return -EIO;
13063 }
13064
13065 if (rdlen > 0) {
13066 fp->f_pos += rdlen;
13067 }
13068
13069 return rdlen;
13070}
13071
13072#if defined(BT_OVER_SDIO)
13073int
13074dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
13075{
13076 struct file *fp = (struct file *)image;
13077 int rd_len;
13078 uint str_len = 0;
13079 char *str_end = NULL;
13080
13081 if (!image)
13082 return 0;
13083
13084 rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
13085 str_end = strnchr(str, len, '\n');
13086 if (str_end == NULL) {
13087 goto err;
13088 }
13089 str_len = (uint)(str_end - str);
13090
13091 /* Advance file pointer past the string length */
13092 fp->f_pos += str_len + 1;
13093 bzero(str_end, rd_len - str_len);
13094
13095err:
13096 return str_len;
13097}
13098#endif /* defined (BT_OVER_SDIO) */
13099
13100int
13101dhd_os_get_image_size(void *image)
13102{
13103 struct file *fp = (struct file *)image;
13104 int size;
13105 if (!image) {
13106 return 0;
13107 }
13108
13109 size = i_size_read(file_inode(fp));
13110
13111 return size;
13112}
13113
13114void
13115dhd_os_close_image1(dhd_pub_t *pub, void *image)
13116{
13117 if (image) {
13118 filp_close((struct file *)image, NULL);
13119 }
13120}
13121
13122void
13123dhd_os_sdlock(dhd_pub_t *pub)
13124{
13125 dhd_info_t *dhd;
13126
13127 dhd = (dhd_info_t *)(pub->info);
13128
13129#ifdef BCMDBUS
13130 spin_lock_bh(&dhd->sdlock);
13131#else
13132 if (dhd_dpc_prio >= 0)
13133 down(&dhd->sdsem);
13134 else
13135 spin_lock_bh(&dhd->sdlock);
13136#endif /* !BCMDBUS */
13137}
13138
13139void
13140dhd_os_sdunlock(dhd_pub_t *pub)
13141{
13142 dhd_info_t *dhd;
13143
13144 dhd = (dhd_info_t *)(pub->info);
13145
13146#ifdef BCMDBUS
13147 spin_unlock_bh(&dhd->sdlock);
13148#else
13149 if (dhd_dpc_prio >= 0)
13150 up(&dhd->sdsem);
13151 else
13152 spin_unlock_bh(&dhd->sdlock);
13153#endif /* !BCMDBUS */
13154}
13155
13156void
13157dhd_os_sdlock_txq(dhd_pub_t *pub)
13158{
13159 dhd_info_t *dhd;
13160
13161 dhd = (dhd_info_t *)(pub->info);
13162#ifdef BCMDBUS
13163 spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
13164#else
13165 spin_lock_bh(&dhd->txqlock);
13166#endif /* BCMDBUS */
13167}
13168
13169void
13170dhd_os_sdunlock_txq(dhd_pub_t *pub)
13171{
13172 dhd_info_t *dhd;
13173
13174 dhd = (dhd_info_t *)(pub->info);
13175#ifdef BCMDBUS
13176 spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
13177#else
13178 spin_unlock_bh(&dhd->txqlock);
13179#endif /* BCMDBUS */
13180}
13181
13182void
13183dhd_os_sdlock_rxq(dhd_pub_t *pub)
13184{
13185}
13186
13187void
13188dhd_os_sdunlock_rxq(dhd_pub_t *pub)
13189{
13190}
13191
13192static void
13193dhd_os_rxflock(dhd_pub_t *pub)
13194{
13195 dhd_info_t *dhd;
13196
13197 dhd = (dhd_info_t *)(pub->info);
13198 spin_lock_bh(&dhd->rxf_lock);
13199
d2839953
RC
13200}
13201
965f77c4
RC
13202static void
13203dhd_os_rxfunlock(dhd_pub_t *pub)
d2839953 13204{
965f77c4 13205 dhd_info_t *dhd;
d2839953 13206
965f77c4
RC
13207 dhd = (dhd_info_t *)(pub->info);
13208 spin_unlock_bh(&dhd->rxf_lock);
13209}
d2839953 13210
965f77c4
RC
13211#ifdef DHDTCPACK_SUPPRESS
13212unsigned long
13213dhd_os_tcpacklock(dhd_pub_t *pub)
13214{
13215 dhd_info_t *dhd;
13216 unsigned long flags = 0;
d2839953 13217
965f77c4 13218 dhd = (dhd_info_t *)(pub->info);
d2839953 13219
965f77c4 13220 if (dhd) {
d2839953 13221#ifdef BCMSDIO
965f77c4
RC
13222 spin_lock_bh(&dhd->tcpack_lock);
13223#else
13224 spin_lock_irqsave(&dhd->tcpack_lock, flags);
d2839953 13225#endif /* BCMSDIO */
d2839953
RC
13226 }
13227
965f77c4
RC
13228 return flags;
13229}
13230
13231void
13232dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
13233{
13234 dhd_info_t *dhd;
13235
d2839953 13236#ifdef BCMSDIO
965f77c4 13237 BCM_REFERENCE(flags);
d2839953 13238#endif /* BCMSDIO */
d2839953 13239
965f77c4
RC
13240 dhd = (dhd_info_t *)(pub->info);
13241
13242 if (dhd) {
d2839953 13243#ifdef BCMSDIO
965f77c4
RC
13244 spin_unlock_bh(&dhd->tcpack_lock);
13245#else
13246 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
d2839953 13247#endif /* BCMSDIO */
d2839953 13248 }
d2839953 13249}
965f77c4 13250#endif /* DHDTCPACK_SUPPRESS */
d2839953 13251
965f77c4 13252uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
d2839953 13253{
965f77c4
RC
13254 uint8* buf;
13255 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
d2839953 13256
965f77c4
RC
13257 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
13258 if (buf == NULL && kmalloc_if_fail)
13259 buf = kmalloc(size, flags);
d2839953 13260
965f77c4
RC
13261 return buf;
13262}
d2839953 13263
965f77c4
RC
13264void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
13265{
13266}
d2839953 13267
965f77c4
RC
13268#if defined(WL_WIRELESS_EXT)
13269struct iw_statistics *
13270dhd_get_wireless_stats(struct net_device *dev)
13271{
13272 int res = 0;
13273 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 13274
965f77c4
RC
13275 if (!dhd->pub.up) {
13276 return NULL;
13277 }
13278
3910ce8e
LJ
13279 if (!(dev->flags & IFF_UP)) {
13280 return NULL;
13281 }
13282
965f77c4
RC
13283 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
13284
13285 if (res == 0)
13286 return &dhd->iw.wstats;
13287 else
13288 return NULL;
d2839953 13289}
965f77c4 13290#endif /* defined(WL_WIRELESS_EXT) */
d2839953 13291
965f77c4
RC
13292static int
13293dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
13294 wl_event_msg_t *event, void **data)
d2839953 13295{
965f77c4
RC
13296 int bcmerror = 0;
13297#ifdef WL_CFG80211
13298 unsigned long flags = 0;
13299#endif /* WL_CFG80211 */
13300 ASSERT(dhd != NULL);
d2839953 13301
965f77c4
RC
13302#ifdef SHOW_LOGTRACE
13303 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13304 &dhd->event_data);
13305#else
13306 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13307 NULL);
13308#endif /* SHOW_LOGTRACE */
13309 if (unlikely(bcmerror != BCME_OK)) {
13310 return bcmerror;
d2839953
RC
13311 }
13312
965f77c4
RC
13313 if (ntoh32(event->event_type) == WLC_E_IF) {
13314 /* WLC_E_IF event types are consumed by wl_process_host_event.
13315 * For ifadd/del ops, the netdev ptr may not be valid at this
13316 * point. so return before invoking cfg80211/wext handlers.
13317 */
13318 return BCME_OK;
d2839953
RC
13319 }
13320
965f77c4
RC
13321#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
13322 wl_ext_event_send(dhd->pub.event_params, event, *data);
13323#endif
13324
13325#ifdef WL_CFG80211
13326 if (dhd->iflist[ifidx]->net) {
13327 spin_lock_irqsave(&dhd->pub.up_lock, flags);
13328 if (dhd->pub.up) {
13329 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
13330 }
13331 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
d2839953 13332 }
965f77c4 13333#endif /* defined(WL_CFG80211) */
d2839953 13334
965f77c4 13335 return (bcmerror);
d2839953
RC
13336}
13337
965f77c4
RC
13338/* send up locally generated event */
13339void
13340dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
d2839953 13341{
965f77c4
RC
13342 switch (ntoh32(event->event_type)) {
13343 /* Handle error case or further events here */
13344 default:
13345 break;
13346 }
13347}
d2839953 13348
965f77c4
RC
13349#ifdef LOG_INTO_TCPDUMP
13350void
13351dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
13352{
13353 struct sk_buff *p, *skb;
13354 uint32 pktlen;
13355 int len;
13356 dhd_if_t *ifp;
13357 dhd_info_t *dhd;
13358 uchar *skb_data;
13359 int ifidx = 0;
13360 struct ether_header eth;
d2839953 13361
965f77c4
RC
13362 pktlen = sizeof(eth) + data_len;
13363 dhd = dhdp->info;
d2839953 13364
965f77c4
RC
13365 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
13366 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
d2839953 13367
965f77c4
RC
13368 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
13369 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
13370 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
13371 eth.ether_type = hton16(ETHER_TYPE_BRCM);
13372
13373 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
13374 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
13375 skb = PKTTONATIVE(dhdp->osh, p);
13376 skb_data = skb->data;
13377 len = skb->len;
13378
13379 ifidx = dhd_ifname2idx(dhd, "wlan0");
13380 ifp = dhd->iflist[ifidx];
13381 if (ifp == NULL)
13382 ifp = dhd->iflist[0];
13383
13384 ASSERT(ifp);
13385 skb->dev = ifp->net;
13386 skb->protocol = eth_type_trans(skb, skb->dev);
13387 skb->data = skb_data;
13388 skb->len = len;
13389
13390 /* Strip header, count, deliver upward */
13391 skb_pull(skb, ETH_HLEN);
13392
13393 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
13394 __FUNCTION__, __LINE__);
13395 /* Send the packet */
13396 if (in_interrupt()) {
13397 netif_rx(skb);
13398 } else {
13399 netif_rx_ni(skb);
13400 }
13401 } else {
13402 /* Could not allocate a sk_buf */
13403 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
13404 }
d2839953 13405}
965f77c4 13406#endif /* LOG_INTO_TCPDUMP */
d2839953 13407
965f77c4 13408void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
d2839953 13409{
965f77c4
RC
13410#if defined(BCMSDIO)
13411 struct dhd_info *dhdinfo = dhd->info;
d2839953 13412
965f77c4 13413 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
d2839953 13414
965f77c4
RC
13415 dhd_os_sdunlock(dhd);
13416 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
13417 dhd_os_sdlock(dhd);
13418#endif /* defined(BCMSDIO) */
13419 return;
13420} /* dhd_init_static_strs_array */
d2839953 13421
965f77c4 13422void dhd_wait_event_wakeup(dhd_pub_t *dhd)
d2839953 13423{
965f77c4
RC
13424#if defined(BCMSDIO)
13425 struct dhd_info *dhdinfo = dhd->info;
13426 if (waitqueue_active(&dhdinfo->ctrl_wait))
13427 wake_up(&dhdinfo->ctrl_wait);
13428#endif // endif
13429 return;
d2839953
RC
13430}
13431
965f77c4
RC
13432#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
13433int
13434dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
d2839953 13435{
965f77c4 13436 int ret;
d2839953 13437
965f77c4 13438 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 13439
965f77c4
RC
13440#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13441 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
13442 return BCME_ERROR;
13443#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
d2839953 13444
965f77c4
RC
13445 if (flag == TRUE) {
13446 /* Issue wl down command before resetting the chip */
13447 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
13448 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
13449 }
13450#ifdef PROP_TXSTATUS
13451 if (dhd->pub.wlfc_enabled) {
13452 dhd_wlfc_deinit(&dhd->pub);
13453 }
13454#endif /* PROP_TXSTATUS */
13455#ifdef PNO_SUPPORT
13456 if (dhd->pub.pno_state) {
13457 dhd_pno_deinit(&dhd->pub);
13458 }
13459#endif // endif
13460#ifdef RTT_SUPPORT
13461 if (dhd->pub.rtt_state) {
13462 dhd_rtt_deinit(&dhd->pub);
13463 }
13464#endif /* RTT_SUPPORT */
d2839953 13465
965f77c4
RC
13466#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
13467 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
13468#endif /* DBG_PKT_MON */
13469 }
d2839953 13470
965f77c4
RC
13471#ifdef BCMSDIO
13472 if (!flag) {
13473 dhd_update_fw_nv_path(dhd);
13474 /* update firmware and nvram path to sdio bus */
13475 dhd_bus_update_fw_nv_path(dhd->pub.bus,
13476 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
13477 }
13478#endif /* BCMSDIO */
d2839953 13479
965f77c4 13480 ret = dhd_bus_devreset(&dhd->pub, flag);
d2839953 13481
965f77c4
RC
13482#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13483 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
13484 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
13485#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
d2839953 13486
965f77c4
RC
13487 if (flag) {
13488 /* Clear some flags for recovery logic */
13489 dhd->pub.dongle_trap_occured = 0;
13490 dhd->pub.iovar_timeout_occured = 0;
13491#ifdef PCIE_FULL_DONGLE
13492 dhd->pub.d3ack_timeout_occured = 0;
13493 dhd->pub.livelock_occured = 0;
13494 dhd->pub.pktid_audit_failed = 0;
13495#endif /* PCIE_FULL_DONGLE */
13496 dhd->pub.iface_op_failed = 0;
13497 dhd->pub.scan_timeout_occurred = 0;
13498 dhd->pub.scan_busy_occurred = 0;
13499 dhd->pub.smmu_fault_occurred = 0;
13500 }
d2839953 13501
965f77c4
RC
13502 if (ret) {
13503 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
13504 }
d2839953 13505
965f77c4 13506 return ret;
d2839953
RC
13507}
13508
965f77c4
RC
13509#ifdef BCMSDIO
13510int
13511dhd_net_bus_suspend(struct net_device *dev)
d2839953 13512{
965f77c4
RC
13513 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13514 return dhd_bus_suspend(&dhd->pub);
d2839953
RC
13515}
13516
965f77c4
RC
13517int
13518dhd_net_bus_resume(struct net_device *dev, uint8 stage)
d2839953 13519{
965f77c4
RC
13520 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13521 return dhd_bus_resume(&dhd->pub, stage);
d2839953
RC
13522}
13523
965f77c4
RC
13524#endif /* BCMSDIO */
13525#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
d2839953 13526
965f77c4 13527int net_os_set_suspend_disable(struct net_device *dev, int val)
d2839953 13528{
965f77c4
RC
13529 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13530 int ret = 0;
d2839953
RC
13531
13532 if (dhd) {
965f77c4
RC
13533 ret = dhd->pub.suspend_disable_flag;
13534 dhd->pub.suspend_disable_flag = val;
d2839953 13535 }
965f77c4 13536 return ret;
d2839953
RC
13537}
13538
965f77c4 13539int net_os_set_suspend(struct net_device *dev, int val, int force)
d2839953 13540{
965f77c4
RC
13541 int ret = 0;
13542 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 13543
3910ce8e
LJ
13544 if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
13545 if (!val)
13546 dhd_conf_set_suspend_resume(&dhd->pub, val);
965f77c4
RC
13547#ifdef CONFIG_MACH_UNIVERSAL7420
13548#endif /* CONFIG_MACH_UNIVERSAL7420 */
13549#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
13550 ret = dhd_set_suspend(val, &dhd->pub);
d2839953 13551#else
965f77c4
RC
13552 ret = dhd_suspend_resume_helper(dhd, val, force);
13553#endif // endif
13554#ifdef WL_CFG80211
13555 wl_cfg80211_update_power_mode(dev);
13556#endif // endif
3910ce8e
LJ
13557 if (val)
13558 dhd_conf_set_suspend_resume(&dhd->pub, val);
d2839953 13559 }
965f77c4 13560 return ret;
d2839953 13561}
d2839953 13562
965f77c4 13563int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
d2839953 13564{
965f77c4 13565 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 13566
965f77c4
RC
13567 if (dhd) {
13568 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
13569 __FUNCTION__, val));
13570 dhd->pub.suspend_bcn_li_dtim = val;
13571 }
d2839953 13572
965f77c4 13573 return 0;
d2839953
RC
13574}
13575
965f77c4 13576int net_os_set_max_dtim_enable(struct net_device *dev, int val)
d2839953 13577{
965f77c4
RC
13578 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13579
13580 if (dhd) {
13581 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
13582 __FUNCTION__, (val ? "Enable" : "Disable")));
13583 if (val) {
13584 dhd->pub.max_dtim_enable = TRUE;
13585 } else {
13586 dhd->pub.max_dtim_enable = FALSE;
13587 }
13588 } else {
13589 return -1;
13590 }
13591
13592 return 0;
d2839953
RC
13593}
13594
965f77c4
RC
13595#ifdef DISABLE_DTIM_IN_SUSPEND
13596int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
d2839953 13597{
d2839953
RC
13598 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13599
965f77c4
RC
13600 if (dhd) {
13601 DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
13602 __FUNCTION__, (val ? "Enable" : "Disable")));
13603 if (val) {
13604 dhd->pub.disable_dtim_in_suspend = TRUE;
13605 } else {
13606 dhd->pub.disable_dtim_in_suspend = FALSE;
13607 }
13608 } else {
13609 return -1;
d2839953
RC
13610 }
13611
965f77c4 13612 return 0;
d2839953 13613}
965f77c4 13614#endif /* DISABLE_DTIM_IN_SUSPEND */
d2839953 13615
965f77c4
RC
13616#ifdef PKT_FILTER_SUPPORT
13617int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
d2839953 13618{
965f77c4 13619 int ret = 0;
d2839953 13620
965f77c4
RC
13621#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
13622 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 13623
965f77c4
RC
13624 if (!dhd_master_mode)
13625 add_remove = !add_remove;
13626 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
13627 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
13628 return 0;
d2839953
RC
13629 }
13630
965f77c4
RC
13631#ifdef BLOCK_IPV6_PACKET
13632 /* customer want to use NO IPV6 packets only */
13633 if (num == DHD_MULTICAST6_FILTER_NUM) {
13634 return 0;
d2839953 13635 }
965f77c4 13636#endif /* BLOCK_IPV6_PACKET */
d2839953 13637
965f77c4
RC
13638 if (num >= dhd->pub.pktfilter_count) {
13639 return -EINVAL;
d2839953 13640 }
d2839953 13641
965f77c4
RC
13642 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
13643#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
13644
13645 return ret;
d2839953
RC
13646}
13647
965f77c4
RC
13648int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
13649
d2839953 13650{
965f77c4
RC
13651 int ret = 0;
13652
13653 /* Packet filtering is set only if we still in early-suspend and
13654 * we need either to turn it ON or turn it OFF
13655 * We can always turn it OFF in case of early-suspend, but we turn it
13656 * back ON only if suspend_disable_flag was not set
13657 */
13658 if (dhdp && dhdp->up) {
13659 if (dhdp->in_suspend) {
13660 if (!val || (val && !dhdp->suspend_disable_flag))
13661 dhd_enable_packet_filter(val, dhdp);
13662 }
d2839953 13663 }
965f77c4 13664 return ret;
d2839953
RC
13665}
13666
965f77c4
RC
13667/* function to enable/disable packet for Network device */
13668int net_os_enable_packet_filter(struct net_device *dev, int val)
d2839953 13669{
965f77c4 13670 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 13671
965f77c4
RC
13672 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
13673 return dhd_os_enable_packet_filter(&dhd->pub, val);
13674}
13675#endif /* PKT_FILTER_SUPPORT */
d2839953 13676
965f77c4
RC
13677int
13678dhd_dev_init_ioctl(struct net_device *dev)
13679{
13680 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13681 int ret;
d2839953 13682
965f77c4
RC
13683 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
13684 goto done;
d2839953 13685
965f77c4
RC
13686done:
13687 return ret;
d2839953 13688}
d2839953 13689
965f77c4
RC
13690int
13691dhd_dev_get_feature_set(struct net_device *dev)
d2839953 13692{
965f77c4
RC
13693 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
13694 dhd_pub_t *dhd = (&ptr->pub);
13695 int feature_set = 0;
d2839953 13696
965f77c4
RC
13697 if (FW_SUPPORTED(dhd, sta))
13698 feature_set |= WIFI_FEATURE_INFRA;
13699 if (FW_SUPPORTED(dhd, dualband))
13700 feature_set |= WIFI_FEATURE_INFRA_5G;
13701 if (FW_SUPPORTED(dhd, p2p))
13702 feature_set |= WIFI_FEATURE_P2P;
13703 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
13704 feature_set |= WIFI_FEATURE_SOFT_AP;
13705 if (FW_SUPPORTED(dhd, tdls))
13706 feature_set |= WIFI_FEATURE_TDLS;
13707 if (FW_SUPPORTED(dhd, vsdb))
13708 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
13709 if (FW_SUPPORTED(dhd, nan)) {
13710 feature_set |= WIFI_FEATURE_NAN;
13711 /* NAN is essentail for d2d rtt */
13712 if (FW_SUPPORTED(dhd, rttd2d))
13713 feature_set |= WIFI_FEATURE_D2D_RTT;
13714 }
13715#ifdef RTT_SUPPORT
13716 if (dhd->rtt_supported) {
13717 feature_set |= WIFI_FEATURE_D2D_RTT;
13718 feature_set |= WIFI_FEATURE_D2AP_RTT;
13719 }
13720#endif /* RTT_SUPPORT */
13721#ifdef LINKSTAT_SUPPORT
13722 feature_set |= WIFI_FEATURE_LINKSTAT;
13723#endif /* LINKSTAT_SUPPORT */
d2839953 13724
965f77c4
RC
13725#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
13726 if (dhd_is_pno_supported(dhd)) {
13727 feature_set |= WIFI_FEATURE_PNO;
13728#ifdef GSCAN_SUPPORT
13729 /* terence 20171115: remove to get GTS PASS
13730 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
13731 */
13732// feature_set |= WIFI_FEATURE_GSCAN;
13733// feature_set |= WIFI_FEATURE_HAL_EPNO;
13734#endif /* GSCAN_SUPPORT */
13735 }
13736#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
13737#ifdef RSSI_MONITOR_SUPPORT
13738 if (FW_SUPPORTED(dhd, rssi_mon)) {
13739 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
13740 }
13741#endif /* RSSI_MONITOR_SUPPORT */
13742#ifdef WL11U
13743 feature_set |= WIFI_FEATURE_HOTSPOT;
13744#endif /* WL11U */
13745#ifdef NDO_CONFIG_SUPPORT
13746 feature_set |= WIFI_FEATURE_CONFIG_NDO;
13747#endif /* NDO_CONFIG_SUPPORT */
13748#ifdef KEEP_ALIVE
13749 feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
13750#endif /* KEEP_ALIVE */
13751#ifdef FILTER_IE
13752 if (FW_SUPPORTED(dhd, fie)) {
13753 feature_set |= WIFI_FEATURE_FILTER_IE;
13754 }
13755#endif /* FILTER_IE */
13756#ifdef ROAMEXP_SUPPORT
13757 /* Check if the Android O roam feature is supported by FW */
13758 if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
13759 feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
13760 }
13761#endif /* ROAMEXP_SUPPORT */
13762 return feature_set;
d2839953
RC
13763}
13764
d2839953 13765int
965f77c4 13766dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
d2839953 13767{
965f77c4
RC
13768 int feature_set_full;
13769 int ret = 0;
d2839953 13770
965f77c4 13771 feature_set_full = dhd_dev_get_feature_set(dev);
d2839953 13772
965f77c4
RC
13773 /* Common feature set for all interface */
13774 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
13775 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
13776 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
13777 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
13778 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
13779 (feature_set_full & WIFI_FEATURE_EPR);
d2839953 13780
965f77c4
RC
13781 /* Specific feature group for each interface */
13782 switch (num) {
13783 case 0:
13784 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
13785 /* Not supported yet */
13786 /* (feature_set_full & WIFI_FEATURE_NAN) | */
13787 (feature_set_full & WIFI_FEATURE_TDLS) |
13788 (feature_set_full & WIFI_FEATURE_PNO) |
13789 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
13790 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
13791 (feature_set_full & WIFI_FEATURE_GSCAN) |
13792 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
13793 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
13794 break;
d2839953 13795
965f77c4
RC
13796 case 1:
13797 ret |= (feature_set_full & WIFI_FEATURE_P2P);
13798 /* Not yet verified NAN with P2P */
13799 /* (feature_set_full & WIFI_FEATURE_NAN) | */
13800 break;
d2839953 13801
965f77c4
RC
13802 case 2:
13803 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
13804 (feature_set_full & WIFI_FEATURE_TDLS) |
13805 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
13806 break;
d2839953 13807
965f77c4
RC
13808 default:
13809 ret = WIFI_FEATURE_INVALID;
13810 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
13811 break;
d2839953
RC
13812 }
13813
13814 return ret;
13815}
13816
965f77c4 13817#ifdef CUSTOM_FORCE_NODFS_FLAG
d2839953 13818int
965f77c4 13819dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
d2839953
RC
13820{
13821 dhd_info_t *dhd = DHD_DEV_INFO(dev);
965f77c4
RC
13822
13823 if (nodfs)
13824 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
13825 else
13826 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
13827 dhd->pub.force_country_change = TRUE;
13828 return 0;
d2839953 13829}
965f77c4 13830#endif /* CUSTOM_FORCE_NODFS_FLAG */
d2839953 13831
965f77c4 13832#ifdef NDO_CONFIG_SUPPORT
d2839953 13833int
965f77c4 13834dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
d2839953 13835{
965f77c4
RC
13836 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
13837 dhd_pub_t *dhdp = &dhd->pub;
13838 int ret = 0;
d2839953 13839
965f77c4
RC
13840 if (enable) {
13841 /* enable ND offload feature (will be enabled in FW on suspend) */
13842 dhdp->ndo_enable = TRUE;
d2839953 13843
965f77c4
RC
13844 /* Update changes of anycast address & DAD failed address */
13845 ret = dhd_dev_ndo_update_inet6addr(dev);
13846 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
13847 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
13848 return ret;
13849 }
13850 } else {
13851 /* disable ND offload feature */
13852 dhdp->ndo_enable = FALSE;
d2839953 13853
965f77c4
RC
13854 /* disable ND offload in FW */
13855 ret = dhd_ndo_enable(dhdp, FALSE);
13856 if (ret < 0) {
13857 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
13858 }
d2839953
RC
13859 }
13860 return ret;
13861}
13862
965f77c4
RC
13863/* #pragma used as a WAR to fix build failure,
13864* ignore dropping of 'const' qualifier in 'list_entry' macro
13865* this pragma disables the warning only for the following function
13866*/
13867#pragma GCC diagnostic push
13868#pragma GCC diagnostic ignored "-Wcast-qual"
13869
13870static int
13871dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
d2839953 13872{
965f77c4
RC
13873 struct inet6_ifaddr *ifa;
13874 struct ifacaddr6 *acaddr = NULL;
13875 int addr_count = 0;
d2839953 13876
965f77c4
RC
13877 /* lock */
13878 read_lock_bh(&inet6->lock);
13879
13880 /* Count valid unicast address */
13881 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
13882 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
13883 addr_count++;
13884 }
d2839953 13885 }
965f77c4
RC
13886
13887 /* Count anycast address */
13888 acaddr = inet6->ac_list;
13889 while (acaddr) {
13890 addr_count++;
13891 acaddr = acaddr->aca_next;
13892 }
13893
13894 /* unlock */
13895 read_unlock_bh(&inet6->lock);
13896
13897 return addr_count;
d2839953
RC
13898}
13899
965f77c4
RC
13900int
13901dhd_dev_ndo_update_inet6addr(struct net_device *dev)
d2839953 13902{
965f77c4
RC
13903 dhd_info_t *dhd;
13904 dhd_pub_t *dhdp;
13905 struct inet6_dev *inet6;
13906 struct inet6_ifaddr *ifa;
13907 struct ifacaddr6 *acaddr = NULL;
13908 struct in6_addr *ipv6_addr = NULL;
13909 int cnt, i;
13910 int ret = BCME_OK;
d2839953 13911
965f77c4
RC
13912 /*
13913 * this function evaulates host ip address in struct inet6_dev
13914 * unicast addr in inet6_dev->addr_list
13915 * anycast addr in inet6_dev->ac_list
13916 * while evaluating inet6_dev, read_lock_bh() is required to prevent
13917 * access on null(freed) pointer.
13918 */
d2839953 13919
965f77c4
RC
13920 if (dev) {
13921 inet6 = dev->ip6_ptr;
13922 if (!inet6) {
13923 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
13924 return BCME_ERROR;
13925 }
13926
13927 dhd = DHD_DEV_INFO(dev);
13928 if (!dhd) {
13929 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
13930 return BCME_ERROR;
13931 }
13932 dhdp = &dhd->pub;
13933
13934 if (dhd_net2idx(dhd, dev) != 0) {
13935 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
13936 return BCME_ERROR;
13937 }
13938 } else {
13939 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
13940 return BCME_ERROR;
13941 }
13942
13943 /* Check host IP overflow */
13944 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
13945 if (cnt > dhdp->ndo_max_host_ip) {
13946 if (!dhdp->ndo_host_ip_overflow) {
13947 dhdp->ndo_host_ip_overflow = TRUE;
13948 /* Disable ND offload in FW */
13949 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
13950 ret = dhd_ndo_enable(dhdp, FALSE);
13951 }
13952
13953 return ret;
13954 }
d2839953 13955
965f77c4
RC
13956 /*
13957 * Allocate ipv6 addr buffer to store addresses to be added/removed.
13958 * driver need to lock inet6_dev while accessing structure. but, driver
13959 * cannot use ioctl while inet6_dev locked since it requires scheduling
13960 * hence, copy addresses to the buffer and do ioctl after unlock.
13961 */
13962 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
13963 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
13964 if (!ipv6_addr) {
13965 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
13966 return BCME_NOMEM;
13967 }
d2839953 13968
965f77c4
RC
13969 /* Find DAD failed unicast address to be removed */
13970 cnt = 0;
13971 read_lock_bh(&inet6->lock);
13972 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
13973 /* DAD failed unicast address */
13974 if ((ifa->flags & IFA_F_DADFAILED) &&
13975 (cnt < dhdp->ndo_max_host_ip)) {
13976 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
13977 cnt++;
d2839953 13978 }
d2839953 13979 }
965f77c4 13980 read_unlock_bh(&inet6->lock);
d2839953 13981
965f77c4
RC
13982 /* Remove DAD failed unicast address */
13983 for (i = 0; i < cnt; i++) {
13984 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
13985 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
13986 if (ret < 0) {
13987 goto done;
13988 }
13989 }
d2839953 13990
965f77c4
RC
13991 /* Remove all anycast address */
13992 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
13993 if (ret < 0) {
13994 goto done;
13995 }
d2839953 13996
965f77c4
RC
13997 /*
13998 * if ND offload was disabled due to host ip overflow,
13999 * attempt to add valid unicast address.
14000 */
14001 if (dhdp->ndo_host_ip_overflow) {
14002 /* Find valid unicast address */
14003 cnt = 0;
14004 read_lock_bh(&inet6->lock);
14005 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14006 /* valid unicast address */
14007 if (!(ifa->flags & IFA_F_DADFAILED) &&
14008 (cnt < dhdp->ndo_max_host_ip)) {
14009 memcpy(&ipv6_addr[cnt], &ifa->addr,
14010 sizeof(struct in6_addr));
14011 cnt++;
14012 }
14013 }
14014 read_unlock_bh(&inet6->lock);
d2839953 14015
965f77c4
RC
14016 /* Add valid unicast address */
14017 for (i = 0; i < cnt; i++) {
14018 ret = dhd_ndo_add_ip_with_type(dhdp,
14019 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
14020 if (ret < 0) {
14021 goto done;
14022 }
14023 }
14024 }
d2839953 14025
965f77c4
RC
14026 /* Find anycast address */
14027 cnt = 0;
14028 read_lock_bh(&inet6->lock);
14029 acaddr = inet6->ac_list;
14030 while (acaddr) {
14031 if (cnt < dhdp->ndo_max_host_ip) {
14032 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
14033 cnt++;
14034 }
14035 acaddr = acaddr->aca_next;
d2839953 14036 }
965f77c4 14037 read_unlock_bh(&inet6->lock);
d2839953 14038
965f77c4
RC
14039 /* Add anycast address */
14040 for (i = 0; i < cnt; i++) {
14041 ret = dhd_ndo_add_ip_with_type(dhdp,
14042 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14043 if (ret < 0) {
14044 goto done;
14045 }
d2839953 14046 }
d2839953 14047
965f77c4
RC
14048 /* Now All host IP addr were added successfully */
14049 if (dhdp->ndo_host_ip_overflow) {
14050 dhdp->ndo_host_ip_overflow = FALSE;
14051 if (dhdp->in_suspend) {
14052 /* drvier is in (early) suspend state, need to enable ND offload in FW */
14053 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
14054 ret = dhd_ndo_enable(dhdp, TRUE);
14055 }
d2839953
RC
14056 }
14057
965f77c4
RC
14058done:
14059 if (ipv6_addr) {
14060 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14061 }
d2839953
RC
14062
14063 return ret;
14064}
965f77c4 14065#pragma GCC diagnostic pop
d2839953 14066
965f77c4 14067#endif /* NDO_CONFIG_SUPPORT */
d2839953 14068
965f77c4
RC
14069#ifdef PNO_SUPPORT
14070/* Linux wrapper to call common dhd_pno_stop_for_ssid */
14071int
14072dhd_dev_pno_stop_for_ssid(struct net_device *dev)
d2839953 14073{
965f77c4 14074 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 14075
965f77c4 14076 return (dhd_pno_stop_for_ssid(&dhd->pub));
d2839953
RC
14077}
14078
965f77c4
RC
14079/* Linux wrapper to call common dhd_pno_set_for_ssid */
14080int
14081dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
14082 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
d2839953
RC
14083{
14084 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14085
965f77c4
RC
14086 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
14087 pno_repeat, pno_freq_expo_max, channel_list, nchan));
d2839953 14088}
d2839953 14089
965f77c4 14090/* Linux wrapper to call common dhd_pno_enable */
d2839953 14091int
965f77c4 14092dhd_dev_pno_enable(struct net_device *dev, int enable)
d2839953
RC
14093{
14094 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 14095
965f77c4 14096 return (dhd_pno_enable(&dhd->pub, enable));
d2839953
RC
14097}
14098
965f77c4 14099/* Linux wrapper to call common dhd_pno_set_for_hotlist */
d2839953 14100int
965f77c4
RC
14101dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
14102 struct dhd_pno_hotlist_params *hotlist_params)
d2839953 14103{
965f77c4
RC
14104 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14105 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
14106}
14107/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
14108int
14109dhd_dev_pno_stop_for_batch(struct net_device *dev)
14110{
14111 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14112 return (dhd_pno_stop_for_batch(&dhd->pub));
d2839953
RC
14113}
14114
965f77c4 14115/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
d2839953 14116int
965f77c4 14117dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
d2839953 14118{
965f77c4
RC
14119 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14120 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
14121}
d2839953 14122
965f77c4
RC
14123/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
14124int
14125dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
14126{
14127 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14128 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
14129}
14130#endif /* PNO_SUPPORT */
d2839953 14131
965f77c4
RC
14132#if defined(PNO_SUPPORT)
14133#ifdef GSCAN_SUPPORT
14134bool
14135dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
14136{
14137 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14138
965f77c4
RC
14139 return (dhd_is_legacy_pno_enabled(&dhd->pub));
14140}
d2839953 14141
965f77c4
RC
14142int
14143dhd_dev_set_epno(struct net_device *dev)
14144{
14145 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14146 if (!dhd) {
14147 return BCME_ERROR;
d2839953 14148 }
965f77c4 14149 return dhd_pno_set_epno(&dhd->pub);
d2839953 14150}
d2839953 14151int
965f77c4 14152dhd_dev_flush_fw_epno(struct net_device *dev)
d2839953
RC
14153{
14154 dhd_info_t *dhd = DHD_DEV_INFO(dev);
965f77c4
RC
14155 if (!dhd) {
14156 return BCME_ERROR;
14157 }
14158 return dhd_pno_flush_fw_epno(&dhd->pub);
d2839953 14159}
d2839953 14160
965f77c4 14161/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
d2839953 14162int
965f77c4
RC
14163dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
14164 void *buf, bool flush)
d2839953
RC
14165{
14166 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14167
965f77c4
RC
14168 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
14169}
d2839953 14170
965f77c4
RC
14171/* Linux wrapper to call common dhd_wait_batch_results_complete */
14172int
14173dhd_dev_wait_batch_results_complete(struct net_device *dev)
14174{
14175 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14176
965f77c4 14177 return (dhd_wait_batch_results_complete(&dhd->pub));
d2839953
RC
14178}
14179
965f77c4
RC
14180/* Linux wrapper to call common dhd_pno_lock_batch_results */
14181int
14182dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
14183{
14184 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14185
965f77c4
RC
14186 return (dhd_pno_lock_batch_results(&dhd->pub));
14187}
14188/* Linux wrapper to call common dhd_pno_unlock_batch_results */
14189void
14190dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
d2839953 14191{
965f77c4 14192 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14193
965f77c4
RC
14194 return (dhd_pno_unlock_batch_results(&dhd->pub));
14195}
d2839953 14196
965f77c4
RC
14197/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
14198int
14199dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
14200{
14201 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14202
965f77c4
RC
14203 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
14204}
d2839953 14205
965f77c4
RC
14206/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
14207int
14208dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
14209{
14210 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14211
965f77c4 14212 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
d2839953
RC
14213}
14214
965f77c4
RC
14215/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
14216void *
14217dhd_dev_hotlist_scan_event(struct net_device *dev,
14218 const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
d2839953 14219{
965f77c4 14220 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14221
965f77c4
RC
14222 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
14223}
d2839953 14224
965f77c4
RC
14225/* Linux wrapper to call common dhd_process_full_gscan_result */
14226void *
14227dhd_dev_process_full_gscan_result(struct net_device *dev,
14228const void *data, uint32 len, int *send_evt_bytes)
14229{
14230 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14231
965f77c4
RC
14232 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
14233}
d2839953 14234
965f77c4
RC
14235void
14236dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
14237{
14238 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14239
965f77c4 14240 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
d2839953 14241
965f77c4
RC
14242 return;
14243}
14244
14245int
14246dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
14247{
14248 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14249
965f77c4
RC
14250 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
14251}
d2839953 14252
965f77c4
RC
14253/* Linux wrapper to call common dhd_retreive_batch_scan_results */
14254int
14255dhd_dev_retrieve_batch_scan(struct net_device *dev)
14256{
14257 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14258
965f77c4
RC
14259 return (dhd_retreive_batch_scan_results(&dhd->pub));
14260}
d2839953 14261
965f77c4
RC
14262/* Linux wrapper to call common dhd_pno_process_epno_result */
14263void * dhd_dev_process_epno_result(struct net_device *dev,
14264 const void *data, uint32 event, int *send_evt_bytes)
14265{
14266 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14267
965f77c4
RC
14268 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
14269}
d2839953 14270
965f77c4
RC
14271int
14272dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
14273 wlc_roam_exp_params_t *roam_param)
14274{
14275 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14276 wl_roam_exp_cfg_t roam_exp_cfg;
14277 int err;
d2839953 14278
965f77c4
RC
14279 if (!roam_param) {
14280 return BCME_BADARG;
d2839953 14281 }
d2839953 14282
965f77c4
RC
14283 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
14284 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
14285 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
14286 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
14287 roam_param->cur_bssid_boost));
14288 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
14289 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
d2839953 14290
965f77c4
RC
14291 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
14292 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14293 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
14294 if (dhd->pub.lazy_roam_enable) {
14295 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
d2839953 14296 }
965f77c4
RC
14297 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14298 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14299 TRUE);
14300 if (err < 0) {
14301 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
d2839953 14302 }
965f77c4 14303 return err;
d2839953 14304}
d2839953 14305
d2839953 14306int
965f77c4 14307dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
d2839953 14308{
965f77c4
RC
14309 int err;
14310 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14311 wl_roam_exp_cfg_t roam_exp_cfg;
d2839953 14312
965f77c4
RC
14313 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
14314 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14315 if (enable) {
14316 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
14317 }
14318
14319 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14320 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14321 TRUE);
14322 if (err < 0) {
14323 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14324 } else {
14325 dhd->pub.lazy_roam_enable = (enable != 0);
14326 }
14327 return err;
d2839953
RC
14328}
14329
d2839953 14330int
965f77c4
RC
14331dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
14332 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
d2839953 14333{
965f77c4
RC
14334 int err;
14335 uint len;
14336 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14337
965f77c4
RC
14338 bssid_pref->version = BSSID_PREF_LIST_VERSION;
14339 /* By default programming bssid pref flushes out old values */
14340 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
14341 len = sizeof(wl_bssid_pref_cfg_t);
14342 if (bssid_pref->count) {
14343 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
14344 }
14345 err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
14346 (char *)bssid_pref, len, NULL, 0, TRUE);
14347 if (err != BCME_OK) {
14348 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
14349 }
14350 return err;
d2839953 14351}
965f77c4 14352#endif /* GSCAN_SUPPORT */
d2839953 14353
965f77c4 14354#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
d2839953 14355int
965f77c4
RC
14356dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
14357 uint32 len, uint32 flush)
d2839953 14358{
965f77c4
RC
14359 int err;
14360 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14361 int macmode;
d2839953 14362
965f77c4
RC
14363 if (blacklist) {
14364 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
14365 len, TRUE, 0);
14366 if (err != BCME_OK) {
14367 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
14368 return err;
14369 }
14370 }
14371 /* By default programming blacklist flushes out old values */
14372 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
14373 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
14374 sizeof(macmode), TRUE, 0);
14375 if (err != BCME_OK) {
14376 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
14377 }
14378 return err;
d2839953
RC
14379}
14380
d2839953 14381int
965f77c4
RC
14382dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
14383 uint32 len, uint32 flush)
d2839953 14384{
965f77c4
RC
14385 int err;
14386 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14387 wl_ssid_whitelist_t whitelist_ssid_flush;
14388
14389 if (!ssid_whitelist) {
14390 if (flush) {
14391 ssid_whitelist = &whitelist_ssid_flush;
14392 ssid_whitelist->ssid_count = 0;
14393 } else {
14394 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
14395 return BCME_BADARG;
14396 }
14397 }
14398 ssid_whitelist->version = SSID_WHITELIST_VERSION;
14399 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
14400 err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
14401 0, TRUE);
14402 if (err != BCME_OK) {
14403 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
14404 }
14405 return err;
d2839953 14406}
965f77c4
RC
14407#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
14408
14409#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
14410/* Linux wrapper to call common dhd_pno_get_gscan */
14411void *
14412dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
14413 void *info, uint32 *len)
d2839953 14414{
965f77c4 14415 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
d2839953 14416
965f77c4 14417 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
d2839953 14418}
965f77c4
RC
14419#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
14420#endif // endif
d2839953 14421
965f77c4 14422#ifdef RSSI_MONITOR_SUPPORT
d2839953 14423int
965f77c4
RC
14424dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
14425 int8 max_rssi, int8 min_rssi)
d2839953 14426{
965f77c4
RC
14427 int err;
14428 wl_rssi_monitor_cfg_t rssi_monitor;
14429 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14430
14431 rssi_monitor.version = RSSI_MONITOR_VERSION;
14432 rssi_monitor.max_rssi = max_rssi;
14433 rssi_monitor.min_rssi = min_rssi;
14434 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
14435 err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
14436 NULL, 0, TRUE);
14437 if (err < 0 && err != BCME_UNSUPPORTED) {
14438 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
14439 }
14440 return err;
d2839953 14441}
965f77c4 14442#endif /* RSSI_MONITOR_SUPPORT */
d2839953 14443
965f77c4
RC
14444#ifdef DHDTCPACK_SUPPRESS
14445int
14446dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
d2839953 14447{
965f77c4 14448 int err;
d2839953
RC
14449 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14450
965f77c4
RC
14451 err = dhd_tcpack_suppress_set(&dhd->pub, enable);
14452 if (err != BCME_OK) {
14453 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
14454 }
14455 return err;
d2839953 14456}
965f77c4 14457#endif /* DHDTCPACK_SUPPRESS */
d2839953
RC
14458
14459int
965f77c4 14460dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
d2839953 14461{
965f77c4
RC
14462 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14463 dhd_pub_t *dhdp = &dhd->pub;
14464
14465 if (!dhdp || !oui) {
14466 DHD_ERROR(("NULL POINTER : %s\n",
14467 __FUNCTION__));
d2839953
RC
14468 return BCME_ERROR;
14469 }
965f77c4
RC
14470 if (ETHER_ISMULTI(oui)) {
14471 DHD_ERROR(("Expected unicast OUI\n"));
14472 return BCME_ERROR;
14473 } else {
14474 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
14475 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
14476 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
14477 MACOUI2STRDBG(rand_mac_oui)));
14478 }
14479 return BCME_OK;
d2839953 14480}
965f77c4 14481
d2839953 14482int
965f77c4 14483dhd_set_rand_mac_oui(dhd_pub_t *dhd)
d2839953 14484{
965f77c4
RC
14485 int err;
14486 wl_pfn_macaddr_cfg_t wl_cfg;
14487 uint8 *rand_mac_oui = dhd->rand_mac_oui;
14488
14489 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
14490 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
14491 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
14492 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
14493 wl_cfg.flags = 0;
14494 } else {
14495 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
d2839953 14496 }
965f77c4
RC
14497
14498 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
14499 MACOUI2STRDBG(rand_mac_oui)));
14500
14501 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
14502 if (err < 0) {
14503 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
14504 }
14505 return err;
d2839953
RC
14506}
14507
965f77c4 14508#if defined(RTT_SUPPORT) && defined(WL_CFG80211)
d2839953
RC
14509/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
14510int
965f77c4 14511dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
d2839953
RC
14512{
14513 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14514
965f77c4 14515 return (dhd_rtt_set_cfg(&dhd->pub, buf));
d2839953
RC
14516}
14517
d2839953 14518int
965f77c4 14519dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
d2839953
RC
14520{
14521 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14522
965f77c4 14523 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
d2839953
RC
14524}
14525
d2839953 14526int
965f77c4 14527dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
d2839953
RC
14528{
14529 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14530
965f77c4 14531 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
d2839953 14532}
965f77c4
RC
14533
14534int
14535dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
d2839953
RC
14536{
14537 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14538
965f77c4 14539 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
d2839953
RC
14540}
14541
d2839953 14542int
965f77c4 14543dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
d2839953
RC
14544{
14545 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14546
965f77c4 14547 return (dhd_rtt_capability(&dhd->pub, capa));
d2839953
RC
14548}
14549
d2839953 14550int
965f77c4 14551dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
d2839953
RC
14552{
14553 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
965f77c4 14554 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
d2839953
RC
14555}
14556
965f77c4
RC
14557int
14558dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
d2839953
RC
14559{
14560 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
965f77c4 14561 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
d2839953
RC
14562}
14563
965f77c4 14564int dhd_dev_rtt_cancel_responder(struct net_device *dev)
d2839953
RC
14565{
14566 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
965f77c4
RC
14567 return (dhd_rtt_cancel_responder(&dhd->pub));
14568}
14569
14570#endif /* RTT_SUPPORT */
14571
14572#ifdef KEEP_ALIVE
14573#define KA_TEMP_BUF_SIZE 512
14574#define KA_FRAME_SIZE 300
14575
14576int
14577dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
14578 uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
14579{
14580 const int ETHERTYPE_LEN = 2;
14581 char *pbuf = NULL;
14582 const char *str;
14583 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
14584 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
14585 int buf_len = 0;
14586 int str_len = 0;
14587 int res = BCME_ERROR;
14588 int len_bytes = 0;
14589 int i = 0;
14590
14591 /* ether frame to have both max IP pkt (256 bytes) and ether header */
14592 char *pmac_frame = NULL;
14593 char *pmac_frame_begin = NULL;
14594
14595 /*
14596 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
14597 * dongle shall reject a mkeep_alive request.
14598 */
14599 if (!dhd_support_sta_mode(dhd_pub))
14600 return res;
14601
14602 DHD_TRACE(("%s execution\n", __FUNCTION__));
14603
14604 if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
14605 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
14606 res = BCME_NOMEM;
14607 return res;
14608 }
14609
14610 if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
14611 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
14612 res = BCME_NOMEM;
14613 goto exit;
14614 }
14615 pmac_frame_begin = pmac_frame;
14616
14617 /*
14618 * Get current mkeep-alive status.
14619 */
14620 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
14621 KA_TEMP_BUF_SIZE, FALSE);
14622 if (res < 0) {
14623 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
14624 goto exit;
14625 } else {
14626 /* Check available ID whether it is occupied */
14627 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
14628 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
14629 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
14630 __FUNCTION__, mkeep_alive_id));
14631
14632 /* Current occupied ID info */
14633 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
14634 DHD_ERROR((" Id : %d\n"
14635 " Period: %d msec\n"
14636 " Length: %d\n"
14637 " Packet: 0x",
14638 mkeep_alive_pktp->keep_alive_id,
14639 dtoh32(mkeep_alive_pktp->period_msec),
14640 dtoh16(mkeep_alive_pktp->len_bytes)));
14641
14642 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
14643 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
14644 }
14645 DHD_ERROR(("\n"));
14646
14647 res = BCME_NOTFOUND;
14648 goto exit;
14649 }
14650 }
14651
14652 /* Request the specified ID */
14653 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
14654 memset(pbuf, 0, KA_TEMP_BUF_SIZE);
14655 str = "mkeep_alive";
14656 str_len = strlen(str);
14657 strncpy(pbuf, str, str_len);
14658 pbuf[str_len] = '\0';
14659
14660 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
14661 mkeep_alive_pkt.period_msec = htod32(period_msec);
14662 buf_len = str_len + 1;
14663 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
14664 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
14665
14666 /* ID assigned */
14667 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
14668
14669 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
14670
14671 /*
14672 * Build up Ethernet Frame
14673 */
14674
14675 /* Mapping dest mac addr */
14676 memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
14677 pmac_frame += ETHER_ADDR_LEN;
d2839953 14678
965f77c4
RC
14679 /* Mapping src mac addr */
14680 memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
14681 pmac_frame += ETHER_ADDR_LEN;
d2839953 14682
965f77c4
RC
14683 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
14684 *(pmac_frame++) = 0x08;
14685 *(pmac_frame++) = 0x00;
d2839953 14686
965f77c4
RC
14687 /* Mapping IP pkt */
14688 memcpy(pmac_frame, ip_pkt, ip_pkt_len);
14689 pmac_frame += ip_pkt_len;
d2839953 14690
965f77c4
RC
14691 /*
14692 * Length of ether frame (assume to be all hexa bytes)
14693 * = src mac + dst mac + ether type + ip pkt len
14694 */
14695 len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
14696 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
14697 buf_len += len_bytes;
14698 mkeep_alive_pkt.len_bytes = htod16(len_bytes);
d2839953 14699
965f77c4
RC
14700 /*
14701 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
14702 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
14703 * guarantee that the buffer is properly aligned.
14704 */
14705 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
d2839953 14706
965f77c4
RC
14707 res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
14708exit:
14709 if (pmac_frame_begin) {
14710 MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
14711 pmac_frame_begin = NULL;
14712 }
14713 if (pbuf) {
14714 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
14715 pbuf = NULL;
14716 }
14717 return res;
d2839953
RC
14718}
14719
d2839953 14720int
965f77c4 14721dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
d2839953 14722{
965f77c4
RC
14723 char *pbuf = NULL;
14724 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
14725 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
14726 int res = BCME_ERROR;
14727 int i = 0;
d2839953 14728
965f77c4
RC
14729 /*
14730 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
14731 * dongle shall reject a mkeep_alive request.
14732 */
14733 if (!dhd_support_sta_mode(dhd_pub))
14734 return res;
d2839953 14735
965f77c4 14736 DHD_TRACE(("%s execution\n", __FUNCTION__));
d2839953 14737
965f77c4
RC
14738 /*
14739 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
14740 */
14741 if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
14742 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
14743 return res;
14744 }
d2839953 14745
965f77c4
RC
14746 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
14747 sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
14748 if (res < 0) {
14749 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
14750 goto exit;
14751 } else {
14752 /* Check occupied ID */
14753 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
14754 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
14755 DHD_INFO((" Id : %d\n"
14756 " Period: %d msec\n"
14757 " Length: %d\n"
14758 " Packet: 0x",
14759 mkeep_alive_pktp->keep_alive_id,
14760 dtoh32(mkeep_alive_pktp->period_msec),
14761 dtoh16(mkeep_alive_pktp->len_bytes)));
d2839953 14762
965f77c4
RC
14763 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
14764 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
14765 }
14766 DHD_INFO(("\n"));
d2839953
RC
14767 }
14768
965f77c4
RC
14769 /* Make it stop if available */
14770 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
14771 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
14772 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
d2839953 14773
965f77c4
RC
14774 mkeep_alive_pkt.period_msec = 0;
14775 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
14776 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
14777 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
14778
14779 res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
14780 (char *)&mkeep_alive_pkt,
14781 WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
14782 } else {
14783 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
14784 res = BCME_NOTFOUND;
d2839953 14785 }
965f77c4
RC
14786exit:
14787 if (pbuf) {
14788 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
14789 pbuf = NULL;
d2839953 14790 }
965f77c4 14791 return res;
d2839953 14792}
965f77c4 14793#endif /* KEEP_ALIVE */
d2839953 14794
965f77c4
RC
14795#if defined(PKT_FILTER_SUPPORT) && defined(APF)
14796static void _dhd_apf_lock_local(dhd_info_t *dhd)
d2839953 14797{
965f77c4
RC
14798 if (dhd) {
14799 mutex_lock(&dhd->dhd_apf_mutex);
d2839953 14800 }
965f77c4 14801}
d2839953 14802
965f77c4
RC
14803static void _dhd_apf_unlock_local(dhd_info_t *dhd)
14804{
14805 if (dhd) {
14806 mutex_unlock(&dhd->dhd_apf_mutex);
d2839953 14807 }
d2839953
RC
14808}
14809
965f77c4
RC
14810static int
14811__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
14812 u8* program, uint32 program_len)
d2839953 14813{
965f77c4
RC
14814 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
14815 dhd_pub_t *dhdp = &dhd->pub;
14816 wl_pkt_filter_t * pkt_filterp;
14817 wl_apf_program_t *apf_program;
14818 char *buf;
14819 u32 cmd_len, buf_len;
14820 int ifidx, ret;
14821 char cmd[] = "pkt_filter_add";
d2839953 14822
965f77c4
RC
14823 ifidx = dhd_net2idx(dhd, ndev);
14824 if (ifidx == DHD_BAD_IF) {
14825 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
14826 return -ENODEV;
d2839953 14827 }
965f77c4
RC
14828
14829 cmd_len = sizeof(cmd);
14830
14831 /* Check if the program_len is more than the expected len
14832 * and if the program is NULL return from here.
14833 */
14834 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
14835 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
14836 __FUNCTION__, program_len, program));
14837 return -EINVAL;
d2839953 14838 }
965f77c4
RC
14839 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
14840 WL_APF_PROGRAM_FIXED_LEN + program_len;
d2839953 14841
965f77c4
RC
14842 buf = MALLOCZ(dhdp->osh, buf_len);
14843 if (unlikely(!buf)) {
14844 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
14845 return -ENOMEM;
14846 }
d2839953 14847
965f77c4
RC
14848 memcpy(buf, cmd, cmd_len);
14849
14850 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
14851 pkt_filterp->id = htod32(filter_id);
14852 pkt_filterp->negate_match = htod32(FALSE);
14853 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
14854
14855 apf_program = &pkt_filterp->u.apf_program;
14856 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
14857 apf_program->instr_len = htod16(program_len);
14858 memcpy(apf_program->instrs, program, program_len);
14859
14860 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
14861 if (unlikely(ret)) {
14862 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
14863 __FUNCTION__, filter_id, ret));
d2839953 14864 }
965f77c4
RC
14865
14866 if (buf) {
14867 MFREE(dhdp->osh, buf, buf_len);
d2839953 14868 }
965f77c4 14869 return ret;
d2839953
RC
14870}
14871
965f77c4
RC
14872static int
14873__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
14874 uint32 mode, uint32 enable)
d2839953 14875{
965f77c4
RC
14876 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
14877 dhd_pub_t *dhdp = &dhd->pub;
14878 wl_pkt_filter_enable_t * pkt_filterp;
14879 char *buf;
14880 u32 cmd_len, buf_len;
14881 int ifidx, ret;
14882 char cmd[] = "pkt_filter_enable";
d2839953 14883
965f77c4
RC
14884 ifidx = dhd_net2idx(dhd, ndev);
14885 if (ifidx == DHD_BAD_IF) {
14886 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
14887 return -ENODEV;
d2839953 14888 }
965f77c4
RC
14889
14890 cmd_len = sizeof(cmd);
14891 buf_len = cmd_len + sizeof(*pkt_filterp);
14892
14893 buf = MALLOCZ(dhdp->osh, buf_len);
14894 if (unlikely(!buf)) {
14895 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
14896 return -ENOMEM;
d2839953 14897 }
d2839953 14898
965f77c4 14899 memcpy(buf, cmd, cmd_len);
d2839953 14900
965f77c4
RC
14901 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
14902 pkt_filterp->id = htod32(filter_id);
14903 pkt_filterp->enable = htod32(enable);
14904
14905 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
14906 if (unlikely(ret)) {
14907 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
14908 __FUNCTION__, filter_id, ret));
14909 goto exit;
14910 }
14911
14912 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
14913 WLC_SET_VAR, TRUE, ifidx);
14914 if (unlikely(ret)) {
14915 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
14916 __FUNCTION__, filter_id, ret));
14917 }
14918
14919exit:
14920 if (buf) {
14921 MFREE(dhdp->osh, buf, buf_len);
14922 }
14923 return ret;
d2839953 14924}
d2839953 14925
965f77c4
RC
14926static int
14927__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
d2839953 14928{
965f77c4
RC
14929 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
14930 dhd_pub_t *dhdp = &dhd->pub;
14931 int ifidx, ret;
d2839953 14932
965f77c4
RC
14933 ifidx = dhd_net2idx(dhd, ndev);
14934 if (ifidx == DHD_BAD_IF) {
14935 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
14936 return -ENODEV;
d2839953 14937 }
965f77c4
RC
14938
14939 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
14940 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
14941 if (unlikely(ret)) {
14942 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
14943 __FUNCTION__, filter_id, ret));
14944 }
14945
14946 return ret;
d2839953 14947}
d2839953 14948
965f77c4 14949void dhd_apf_lock(struct net_device *dev)
d2839953 14950{
965f77c4
RC
14951 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14952 _dhd_apf_lock_local(dhd);
14953}
d2839953 14954
965f77c4
RC
14955void dhd_apf_unlock(struct net_device *dev)
14956{
14957 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14958 _dhd_apf_unlock_local(dhd);
d2839953 14959}
d2839953
RC
14960
14961int
965f77c4 14962dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
d2839953 14963{
965f77c4 14964 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
d2839953 14965 dhd_pub_t *dhdp = &dhd->pub;
965f77c4 14966 int ifidx, ret;
d2839953 14967
965f77c4
RC
14968 if (!FW_SUPPORTED(dhdp, apf)) {
14969 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
14970
14971 /*
14972 * Notify Android framework that APF is not supported by setting
14973 * version as zero.
14974 */
14975 *version = 0;
14976 return BCME_OK;
d2839953 14977 }
965f77c4
RC
14978
14979 ifidx = dhd_net2idx(dhd, ndev);
14980 if (ifidx == DHD_BAD_IF) {
14981 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
14982 return -ENODEV;
d2839953 14983 }
965f77c4
RC
14984
14985 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
14986 WLC_GET_VAR, FALSE, ifidx);
14987 if (unlikely(ret)) {
14988 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
14989 __FUNCTION__, ret));
14990 }
14991
14992 return ret;
d2839953
RC
14993}
14994
14995int
965f77c4 14996dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
d2839953 14997{
965f77c4
RC
14998 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
14999 dhd_pub_t *dhdp = &dhd->pub;
15000 int ifidx, ret;
d2839953 15001
965f77c4
RC
15002 if (!FW_SUPPORTED(dhdp, apf)) {
15003 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15004 *max_len = 0;
15005 return BCME_OK;
d2839953
RC
15006 }
15007
965f77c4
RC
15008 ifidx = dhd_net2idx(dhd, ndev);
15009 if (ifidx == DHD_BAD_IF) {
15010 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
15011 return -ENODEV;
15012 }
d2839953 15013
965f77c4
RC
15014 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
15015 WLC_GET_VAR, FALSE, ifidx);
15016 if (unlikely(ret)) {
15017 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15018 __FUNCTION__, ret));
d2839953 15019 }
965f77c4
RC
15020
15021 return ret;
d2839953
RC
15022}
15023
d2839953 15024int
965f77c4
RC
15025dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
15026 uint32 program_len)
d2839953 15027{
965f77c4
RC
15028 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15029 dhd_pub_t *dhdp = &dhd->pub;
15030 int ret;
d2839953 15031
965f77c4 15032 DHD_APF_LOCK(ndev);
d2839953 15033
965f77c4
RC
15034 /* delete, if filter already exists */
15035 if (dhdp->apf_set) {
15036 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15037 if (unlikely(ret)) {
15038 goto exit;
15039 }
15040 dhdp->apf_set = FALSE;
15041 }
d2839953 15042
965f77c4
RC
15043 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
15044 if (ret) {
15045 goto exit;
15046 }
15047 dhdp->apf_set = TRUE;
d2839953 15048
965f77c4
RC
15049 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15050 /* Driver is still in (early) suspend state, enable APF filter back */
15051 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15052 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15053 }
15054exit:
15055 DHD_APF_UNLOCK(ndev);
d2839953 15056
965f77c4 15057 return ret;
d2839953
RC
15058}
15059
15060int
965f77c4 15061dhd_dev_apf_enable_filter(struct net_device *ndev)
d2839953 15062{
965f77c4
RC
15063 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15064 dhd_pub_t *dhdp = &dhd->pub;
15065 int ret = 0;
15066 bool nan_dp_active = false;
d2839953 15067
965f77c4
RC
15068 DHD_APF_LOCK(ndev);
15069#ifdef WL_NAN
15070 nan_dp_active = wl_cfgnan_is_dp_active(ndev);
15071#endif /* WL_NAN */
15072 if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
15073 !nan_dp_active)) {
15074 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15075 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15076 }
d2839953 15077
965f77c4 15078 DHD_APF_UNLOCK(ndev);
d2839953 15079
965f77c4 15080 return ret;
d2839953
RC
15081}
15082
15083int
965f77c4 15084dhd_dev_apf_disable_filter(struct net_device *ndev)
d2839953 15085{
965f77c4
RC
15086 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15087 dhd_pub_t *dhdp = &dhd->pub;
15088 int ret = 0;
d2839953 15089
965f77c4 15090 DHD_APF_LOCK(ndev);
d2839953 15091
965f77c4
RC
15092 if (dhdp->apf_set) {
15093 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15094 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
15095 }
d2839953 15096
965f77c4 15097 DHD_APF_UNLOCK(ndev);
d2839953 15098
965f77c4
RC
15099 return ret;
15100}
d2839953
RC
15101
15102int
965f77c4 15103dhd_dev_apf_delete_filter(struct net_device *ndev)
d2839953 15104{
965f77c4
RC
15105 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15106 dhd_pub_t *dhdp = &dhd->pub;
15107 int ret = 0;
d2839953 15108
965f77c4 15109 DHD_APF_LOCK(ndev);
d2839953 15110
965f77c4
RC
15111 if (dhdp->apf_set) {
15112 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15113 if (!ret) {
15114 dhdp->apf_set = FALSE;
d2839953
RC
15115 }
15116 }
15117
965f77c4 15118 DHD_APF_UNLOCK(ndev);
d2839953 15119
965f77c4
RC
15120 return ret;
15121}
15122#endif /* PKT_FILTER_SUPPORT && APF */
d2839953 15123
965f77c4
RC
15124static void dhd_hang_process(struct work_struct *work_data)
15125{
15126 struct net_device *dev;
15127#ifdef IFACE_HANG_FORCE_DEV_CLOSE
15128 struct net_device *ndev;
15129 uint8 i = 0;
15130#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
15131/* Ignore compiler warnings due to -Werror=cast-qual */
15132#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
15133#pragma GCC diagnostic push
15134#pragma GCC diagnostic ignored "-Wcast-qual"
15135#endif // endif
15136 struct dhd_info *dhd =
15137 container_of(work_data, dhd_info_t, dhd_hang_process_work);
15138#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
15139#pragma GCC diagnostic pop
15140#endif // endif
d2839953 15141
965f77c4
RC
15142 if (!dhd || !dhd->iflist[0])
15143 return;
15144 dev = dhd->iflist[0]->net;
d2839953 15145
965f77c4
RC
15146 if (dev) {
15147#if defined(WL_WIRELESS_EXT)
15148 wl_iw_send_priv_event(dev, "HANG");
15149#endif // endif
15150#if defined(WL_CFG80211)
15151 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
15152#endif // endif
15153 }
15154#ifdef IFACE_HANG_FORCE_DEV_CLOSE
d2839953 15155 /*
965f77c4
RC
15156 * For HW2, dev_close need to be done to recover
15157 * from upper layer after hang. For Interposer skip
15158 * dev_close so that dhd iovars can be used to take
15159 * socramdump after crash, also skip for HW4 as
15160 * handling of hang event is different
d2839953 15161 */
d2839953 15162
965f77c4
RC
15163 rtnl_lock();
15164 for (i = 0; i < DHD_MAX_IFS; i++) {
15165 ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
15166 if (ndev && (ndev->flags & IFF_UP)) {
15167 DHD_ERROR(("ndev->name : %s dev close\n",
15168 ndev->name));
15169 dev_close(ndev);
15170 }
d2839953 15171 }
965f77c4
RC
15172 rtnl_unlock();
15173#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
d2839953
RC
15174}
15175
965f77c4
RC
15176#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
15177extern dhd_pub_t *link_recovery;
15178void dhd_host_recover_link(void)
d2839953 15179{
965f77c4
RC
15180 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
15181 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
15182 dhd_bus_set_linkdown(link_recovery, TRUE);
15183 dhd_os_send_hang_message(link_recovery);
15184}
15185EXPORT_SYMBOL(dhd_host_recover_link);
15186#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
d2839953 15187
965f77c4
RC
15188int dhd_os_send_hang_message(dhd_pub_t *dhdp)
15189{
15190 int ret = 0;
15191#ifdef WL_CFG80211
15192 struct net_device *primary_ndev;
15193 struct bcm_cfg80211 *cfg;
15194#ifdef DHD_FILE_DUMP_EVENT
15195 dhd_info_t *dhd_info = NULL;
15196#endif /* DHD_FILE_DUMP_EVENT */
15197#endif /* WL_CFG80211 */
d2839953 15198
965f77c4
RC
15199 if (!dhdp) {
15200 DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
15201 return -EINVAL;
d2839953
RC
15202 }
15203
965f77c4
RC
15204#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
15205 dhd_info = (dhd_info_t *)dhdp->info;
d2839953 15206
965f77c4
RC
15207 if (dhd_info->scheduled_memdump) {
15208 DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
15209 dhdp->hang_was_pending = 1;
15210 return BCME_OK;
d2839953 15211 }
965f77c4 15212#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
d2839953 15213
965f77c4
RC
15214#ifdef WL_CFG80211
15215 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
15216 if (!primary_ndev) {
15217 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
15218 return -ENODEV;
15219 }
15220 cfg = wl_get_cfg(primary_ndev);
15221 if (!cfg) {
15222 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
15223 return -EINVAL;
15224 }
d2839953 15225
965f77c4
RC
15226 /* Skip sending HANG event to framework if driver is not ready */
15227 if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
15228 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
15229 return -ENODEV;
d2839953 15230 }
965f77c4
RC
15231#endif /* WL_CFG80211 */
15232
15233 if (!dhdp->hang_was_sent) {
15234#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
15235 dhdp->hang_counts++;
15236 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
15237 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
15238 __func__, dhdp->hang_counts));
15239 BUG_ON(1);
15240 }
15241#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
15242#ifdef DHD_DEBUG_UART
15243 /* If PCIe lane has broken, execute the debug uart application
15244 * to gether a ramdump data from dongle via uart
15245 */
15246 if (!dhdp->info->duart_execute) {
15247 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
15248 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
15249 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
15250 }
15251#endif /* DHD_DEBUG_UART */
15252 dhdp->hang_was_sent = 1;
15253#ifdef BT_OVER_SDIO
15254 dhdp->is_bt_recovery_required = TRUE;
15255#endif // endif
15256 schedule_work(&dhdp->info->dhd_hang_process_work);
15257 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
15258 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
d2839953 15259 }
965f77c4 15260 return ret;
d2839953 15261}
d2839953 15262
965f77c4 15263int net_os_send_hang_message(struct net_device *dev)
d2839953 15264{
965f77c4
RC
15265 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15266 int ret = 0;
15267
d2839953 15268 if (dhd) {
965f77c4
RC
15269 /* Report FW problem when enabled */
15270 if (dhd->pub.hang_report) {
15271#ifdef BT_OVER_SDIO
15272 if (netif_running(dev)) {
15273#endif /* BT_OVER_SDIO */
15274 ret = dhd_os_send_hang_message(&dhd->pub);
15275#ifdef BT_OVER_SDIO
15276 }
15277 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
15278 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
15279#endif /* BT_OVER_SDIO */
15280 } else {
15281 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
15282 __FUNCTION__));
15283 }
d2839953 15284 }
965f77c4 15285 return ret;
d2839953
RC
15286}
15287
965f77c4 15288int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
d2839953 15289{
965f77c4
RC
15290 dhd_info_t *dhd = NULL;
15291 dhd_pub_t *dhdp = NULL;
15292 int reason;
15293
15294 dhd = DHD_DEV_INFO(dev);
d2839953 15295 if (dhd) {
965f77c4 15296 dhdp = &dhd->pub;
d2839953 15297 }
965f77c4
RC
15298
15299 if (!dhd || !dhdp) {
15300 return 0;
15301 }
15302
15303 reason = bcm_strtoul(string_num, NULL, 0);
15304 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
15305
15306 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
15307 reason = 0;
15308 }
15309
15310 dhdp->hang_reason = reason;
15311
15312 return net_os_send_hang_message(dev);
d2839953
RC
15313}
15314
965f77c4 15315int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
d2839953 15316{
965f77c4
RC
15317 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15318 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
15319}
d2839953 15320
965f77c4
RC
15321bool dhd_force_country_change(struct net_device *dev)
15322{
15323 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 15324
965f77c4
RC
15325 if (dhd && dhd->pub.up)
15326 return dhd->pub.force_country_change;
15327 return FALSE;
15328}
d2839953 15329
965f77c4
RC
15330void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
15331 wl_country_t *cspec)
15332{
15333 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15334#if defined(DHD_BLOB_EXISTENCE_CHECK)
15335 if (!dhd->pub.is_blob)
15336#endif /* DHD_BLOB_EXISTENCE_CHECK */
15337 {
15338#if defined(CUSTOM_COUNTRY_CODE)
15339 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
15340 dhd->pub.dhd_cflags);
15341#else
15342 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
15343#endif /* CUSTOM_COUNTRY_CODE */
d2839953 15344 }
965f77c4
RC
15345#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
15346 else {
15347 /* Replace the ccode to XZ if ccode is undefined country */
15348 if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
15349 strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
15350 strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
15351 strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
15352 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
15353 }
d2839953 15354 }
965f77c4 15355#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
d2839953 15356
965f77c4
RC
15357 BCM_REFERENCE(dhd);
15358}
d2839953 15359
965f77c4
RC
15360void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
15361{
15362 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15363#ifdef WL_CFG80211
15364 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15365#endif // endif
d2839953 15366
965f77c4
RC
15367 if (dhd && dhd->pub.up) {
15368 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
15369#ifdef WL_CFG80211
15370 wl_update_wiphybands(cfg, notify);
15371#endif // endif
d2839953 15372 }
965f77c4 15373}
d2839953 15374
965f77c4
RC
15375void dhd_bus_band_set(struct net_device *dev, uint band)
15376{
15377 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15378#ifdef WL_CFG80211
15379 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15380#endif // endif
15381 if (dhd && dhd->pub.up) {
15382#ifdef WL_CFG80211
15383 wl_update_wiphybands(cfg, true);
15384#endif // endif
d2839953 15385 }
d2839953
RC
15386}
15387
965f77c4 15388int dhd_net_set_fw_path(struct net_device *dev, char *fw)
d2839953 15389{
965f77c4 15390 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 15391
965f77c4
RC
15392 if (!fw || fw[0] == '\0')
15393 return -EINVAL;
d2839953 15394
965f77c4
RC
15395 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
15396 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
d2839953 15397
965f77c4
RC
15398#if defined(SOFTAP)
15399 if (strstr(fw, "apsta") != NULL) {
15400 DHD_INFO(("GOT APSTA FIRMWARE\n"));
15401 ap_fw_loaded = TRUE;
15402 } else {
15403 DHD_INFO(("GOT STA FIRMWARE\n"));
15404 ap_fw_loaded = FALSE;
d2839953 15405 }
965f77c4
RC
15406#endif // endif
15407 return 0;
15408}
d2839953 15409
965f77c4
RC
15410void dhd_net_if_lock(struct net_device *dev)
15411{
15412 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15413 dhd_net_if_lock_local(dhd);
15414}
d2839953 15415
965f77c4
RC
15416void dhd_net_if_unlock(struct net_device *dev)
15417{
15418 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15419 dhd_net_if_unlock_local(dhd);
15420}
d2839953 15421
965f77c4
RC
15422static void dhd_net_if_lock_local(dhd_info_t *dhd)
15423{
15424 if (dhd)
15425 mutex_lock(&dhd->dhd_net_if_mutex);
15426}
d2839953 15427
965f77c4
RC
15428static void dhd_net_if_unlock_local(dhd_info_t *dhd)
15429{
15430 if (dhd)
15431 mutex_unlock(&dhd->dhd_net_if_mutex);
15432}
d2839953 15433
965f77c4
RC
15434static void dhd_suspend_lock(dhd_pub_t *pub)
15435{
15436 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15437 if (dhd)
15438 mutex_lock(&dhd->dhd_suspend_mutex);
d2839953
RC
15439}
15440
965f77c4 15441static void dhd_suspend_unlock(dhd_pub_t *pub)
d2839953 15442{
965f77c4
RC
15443 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15444 if (dhd)
15445 mutex_unlock(&dhd->dhd_suspend_mutex);
15446}
d2839953 15447
965f77c4
RC
15448unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
15449{
15450 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15451 unsigned long flags = 0;
d2839953 15452
965f77c4
RC
15453 if (dhd)
15454 spin_lock_irqsave(&dhd->dhd_lock, flags);
d2839953 15455
965f77c4 15456 return flags;
d2839953
RC
15457}
15458
965f77c4 15459void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
d2839953 15460{
965f77c4
RC
15461 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15462
15463 if (dhd)
15464 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
d2839953
RC
15465}
15466
965f77c4
RC
15467/* Linux specific multipurpose spinlock API */
15468void *
15469dhd_os_spin_lock_init(osl_t *osh)
d2839953 15470{
965f77c4
RC
15471 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
15472 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
15473 /* and this results in kernel asserts in internal builds */
15474 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
15475 if (lock)
15476 spin_lock_init(lock);
15477 return ((void *)lock);
15478}
15479void
15480dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
15481{
15482 if (lock)
15483 MFREE(osh, lock, sizeof(spinlock_t) + 4);
d2839953 15484}
965f77c4
RC
15485unsigned long
15486dhd_os_spin_lock(void *lock)
15487{
15488 unsigned long flags = 0;
d2839953 15489
965f77c4
RC
15490 if (lock)
15491 spin_lock_irqsave((spinlock_t *)lock, flags);
15492
15493 return flags;
15494}
15495void
15496dhd_os_spin_unlock(void *lock, unsigned long flags)
d2839953 15497{
965f77c4
RC
15498 if (lock)
15499 spin_unlock_irqrestore((spinlock_t *)lock, flags);
15500}
d2839953 15501
965f77c4
RC
15502void *
15503dhd_os_dbgring_lock_init(osl_t *osh)
15504{
15505 struct mutex *mtx = NULL;
d2839953 15506
965f77c4
RC
15507 mtx = MALLOCZ(osh, sizeof(*mtx));
15508 if (mtx)
15509 mutex_init(mtx);
d2839953 15510
965f77c4
RC
15511 return mtx;
15512}
d2839953 15513
965f77c4
RC
15514void
15515dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
15516{
15517 if (mtx) {
15518 mutex_destroy(mtx);
15519 MFREE(osh, mtx, sizeof(struct mutex));
d2839953 15520 }
965f77c4 15521}
d2839953 15522
965f77c4
RC
15523static int
15524dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
15525{
15526 return (atomic_read(&dhd->pend_8021x_cnt));
d2839953
RC
15527}
15528
965f77c4
RC
15529#define MAX_WAIT_FOR_8021X_TX 100
15530
d2839953 15531int
965f77c4 15532dhd_wait_pend8021x(struct net_device *dev)
d2839953 15533{
965f77c4
RC
15534 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15535 int timeout = msecs_to_jiffies(10);
15536 int ntimes = MAX_WAIT_FOR_8021X_TX;
15537 int pend = dhd_get_pend_8021x_cnt(dhd);
d2839953 15538
965f77c4
RC
15539 while (ntimes && pend) {
15540 if (pend) {
15541 set_current_state(TASK_INTERRUPTIBLE);
15542 DHD_PERIM_UNLOCK(&dhd->pub);
15543 schedule_timeout(timeout);
15544 DHD_PERIM_LOCK(&dhd->pub);
15545 set_current_state(TASK_RUNNING);
15546 ntimes--;
15547 }
15548 pend = dhd_get_pend_8021x_cnt(dhd);
d2839953 15549 }
965f77c4
RC
15550 if (ntimes == 0)
15551 {
15552 atomic_set(&dhd->pend_8021x_cnt, 0);
3910ce8e 15553 WL_MSG(dev->name, "TIMEOUT\n");
d2839953 15554 }
965f77c4 15555 return pend;
d2839953
RC
15556}
15557
965f77c4
RC
15558#if defined(DHD_DEBUG)
15559int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
d2839953 15560{
965f77c4
RC
15561 int ret = 0;
15562 struct file *fp = NULL;
15563 mm_segment_t old_fs;
15564 loff_t pos = 0;
15565 /* change to KERNEL_DS address limit */
15566 old_fs = get_fs();
15567 set_fs(KERNEL_DS);
d2839953 15568
965f77c4
RC
15569 /* open file to write */
15570 fp = filp_open(file_name, flags, 0664);
15571 if (IS_ERR(fp)) {
15572 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
15573 goto exit;
15574 }
d2839953 15575
965f77c4
RC
15576 /* Write buf to file */
15577 ret = compat_vfs_write(fp, buf, size, &pos);
15578 if (ret < 0) {
15579 DHD_ERROR(("write file error, err = %d\n", ret));
15580 goto exit;
d2839953
RC
15581 }
15582
965f77c4
RC
15583 /* Sync file from filesystem to physical media */
15584 ret = vfs_fsync(fp, 0);
15585 if (ret < 0) {
15586 DHD_ERROR(("sync file error, error = %d\n", ret));
d2839953
RC
15587 goto exit;
15588 }
965f77c4 15589 ret = BCME_OK;
d2839953 15590
d2839953 15591exit:
965f77c4
RC
15592 /* close file before return */
15593 if (!IS_ERR(fp))
15594 filp_close(fp, current->files);
15595
15596 /* restore previous address limit */
15597 set_fs(old_fs);
d2839953
RC
15598
15599 return ret;
15600}
965f77c4 15601#endif // endif
d2839953 15602
965f77c4
RC
15603#ifdef DHD_DEBUG
15604static void
15605dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
d2839953 15606{
965f77c4 15607 char *type_str = NULL;
d2839953 15608
965f77c4
RC
15609 switch (type) {
15610 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
15611 type_str = "resumed_on_timeout";
15612 break;
15613 case DUMP_TYPE_D3_ACK_TIMEOUT:
15614 type_str = "D3_ACK_timeout";
15615 break;
15616 case DUMP_TYPE_DONGLE_TRAP:
15617 type_str = "Dongle_Trap";
15618 break;
15619 case DUMP_TYPE_MEMORY_CORRUPTION:
15620 type_str = "Memory_Corruption";
15621 break;
15622 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
15623 type_str = "PKTID_AUDIT_Fail";
15624 break;
15625 case DUMP_TYPE_PKTID_INVALID:
15626 type_str = "PKTID_INVALID";
15627 break;
15628 case DUMP_TYPE_SCAN_TIMEOUT:
15629 type_str = "SCAN_timeout";
15630 break;
15631 case DUMP_TYPE_SCAN_BUSY:
15632 type_str = "SCAN_Busy";
15633 break;
15634 case DUMP_TYPE_BY_SYSDUMP:
15635 if (substr_type == CMD_UNWANTED) {
15636 type_str = "BY_SYSDUMP_FORUSER_unwanted";
15637 } else if (substr_type == CMD_DISCONNECTED) {
15638 type_str = "BY_SYSDUMP_FORUSER_disconnected";
15639 } else {
15640 type_str = "BY_SYSDUMP_FORUSER";
15641 }
15642 break;
15643 case DUMP_TYPE_BY_LIVELOCK:
15644 type_str = "BY_LIVELOCK";
15645 break;
15646 case DUMP_TYPE_AP_LINKUP_FAILURE:
15647 type_str = "BY_AP_LINK_FAILURE";
15648 break;
15649 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
15650 type_str = "INVALID_ACCESS";
15651 break;
15652 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
15653 type_str = "ERROR_RX_TIMED_OUT";
15654 break;
15655 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
15656 type_str = "ERROR_TX_TIMED_OUT";
15657 break;
15658 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
15659 type_str = "CFG_VENDOR_TRIGGERED";
15660 break;
15661 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
15662 type_str = "BY_INVALID_RING_RDWR";
15663 break;
15664 case DUMP_TYPE_IFACE_OP_FAILURE:
15665 type_str = "BY_IFACE_OP_FAILURE";
15666 break;
15667 case DUMP_TYPE_TRANS_ID_MISMATCH:
15668 type_str = "BY_TRANS_ID_MISMATCH";
15669 break;
15670#ifdef DEBUG_DNGL_INIT_FAIL
15671 case DUMP_TYPE_DONGLE_INIT_FAILURE:
15672 type_str = "DONGLE_INIT_FAIL";
15673 break;
15674#endif /* DEBUG_DNGL_INIT_FAIL */
15675 case DUMP_TYPE_DONGLE_HOST_EVENT:
15676 type_str = "BY_DONGLE_HOST_EVENT";
15677 break;
15678 case DUMP_TYPE_SMMU_FAULT:
15679 type_str = "SMMU_FAULT";
15680 break;
15681 case DUMP_TYPE_BY_USER:
15682 type_str = "BY_USER";
15683 break;
15684#ifdef DHD_ERPOM
15685 case DUMP_TYPE_DUE_TO_BT:
15686 type_str = "DUE_TO_BT";
15687 break;
15688#endif /* DHD_ERPOM */
15689 case DUMP_TYPE_LOGSET_BEYOND_RANGE:
15690 type_str = "LOGSET_BEYOND_RANGE";
15691 break;
15692 case DUMP_TYPE_CTO_RECOVERY:
15693 type_str = "CTO_RECOVERY";
15694 break;
15695 case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
15696 type_str = "SEQUENTIAL_PRIVCMD_ERROR";
15697 break;
15698 case DUMP_TYPE_PROXD_TIMEOUT:
15699 type_str = "PROXD_TIMEOUT";
15700 break;
15701 case DUMP_TYPE_PKTID_POOL_DEPLETED:
15702 type_str = "PKTID_POOL_DEPLETED";
15703 break;
15704 default:
15705 type_str = "Unknown_type";
15706 break;
d2839953
RC
15707 }
15708
965f77c4
RC
15709 strncpy(buf, type_str, strlen(type_str));
15710 buf[strlen(type_str)] = 0;
d2839953
RC
15711}
15712
965f77c4
RC
15713void
15714dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
d2839953 15715{
965f77c4
RC
15716 char memdump_type[32];
15717 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
d2839953 15718 dhd_pub_t *dhdp = &dhd->pub;
d2839953 15719
965f77c4
RC
15720 /* Init file name */
15721 memset(memdump_path, 0, len);
15722 memset(memdump_type, 0, sizeof(memdump_type));
15723 dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
15724 clear_debug_dump_time(dhdp->debug_dump_time_str);
15725 get_debug_dump_time(dhdp->debug_dump_time_str);
15726 snprintf(memdump_path, len, "%s%s_%s_" "%s",
15727 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
15728 if (strstr(fname, "sssr_dump")) {
15729 DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
15730 } else {
15731 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
15732 memdump_path, FILE_NAME_HAL_TAG));
d2839953 15733 }
d2839953
RC
15734}
15735
15736int
965f77c4 15737write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
d2839953 15738{
d2839953 15739 int ret = 0;
965f77c4
RC
15740 char memdump_path[128];
15741 char memdump_type[32];
15742 uint32 file_mode;
d2839953 15743
965f77c4
RC
15744 /* Init file name */
15745 memset(memdump_path, 0, sizeof(memdump_path));
15746 memset(memdump_type, 0, sizeof(memdump_type));
15747 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
15748 clear_debug_dump_time(dhd->debug_dump_time_str);
15749 get_debug_dump_time(dhd->debug_dump_time_str);
15750 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
15751 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
15752 file_mode = O_CREAT | O_WRONLY | O_SYNC;
d2839953 15753
965f77c4
RC
15754 /* print SOCRAM dump file path */
15755 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
d2839953 15756
965f77c4
RC
15757#ifdef DHD_LOG_DUMP
15758 dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
15759#endif /* DHD_LOG_DUMP */
d2839953 15760
965f77c4
RC
15761 /* Write file */
15762 ret = write_file(memdump_path, file_mode, buf, size);
d2839953 15763
965f77c4
RC
15764#ifdef DHD_DUMP_MNGR
15765 if (ret == BCME_OK) {
15766 dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
d2839953 15767 }
965f77c4 15768#endif /* DHD_DUMP_MNGR */
d2839953 15769
965f77c4 15770 return ret;
d2839953 15771}
965f77c4 15772#endif /* DHD_DEBUG */
d2839953 15773
965f77c4 15774int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
d2839953 15775{
965f77c4
RC
15776 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15777 unsigned long flags;
d2839953
RC
15778 int ret = 0;
15779
965f77c4
RC
15780 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
15781 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
15782 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
15783 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
15784#ifdef CONFIG_HAS_WAKELOCK
15785 if (dhd->wakelock_rx_timeout_enable)
15786 wake_lock_timeout(&dhd->wl_rxwake,
15787 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
15788 if (dhd->wakelock_ctrl_timeout_enable)
15789 wake_lock_timeout(&dhd->wl_ctrlwake,
15790 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
d2839953 15791#endif // endif
965f77c4
RC
15792 dhd->wakelock_rx_timeout_enable = 0;
15793 dhd->wakelock_ctrl_timeout_enable = 0;
15794 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953
RC
15795 }
15796 return ret;
15797}
15798
965f77c4 15799int net_os_wake_lock_timeout(struct net_device *dev)
d2839953
RC
15800{
15801 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15802 int ret = 0;
15803
965f77c4
RC
15804 if (dhd)
15805 ret = dhd_os_wake_lock_timeout(&dhd->pub);
d2839953
RC
15806 return ret;
15807}
15808
965f77c4 15809int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
d2839953 15810{
965f77c4
RC
15811 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15812 unsigned long flags;
d2839953 15813
965f77c4
RC
15814 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
15815 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
15816 if (val > dhd->wakelock_rx_timeout_enable)
15817 dhd->wakelock_rx_timeout_enable = val;
15818 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953 15819 }
965f77c4
RC
15820 return 0;
15821}
d2839953 15822
965f77c4
RC
15823int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
15824{
15825 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15826 unsigned long flags;
d2839953 15827
965f77c4
RC
15828 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
15829 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
15830 if (val > dhd->wakelock_ctrl_timeout_enable)
15831 dhd->wakelock_ctrl_timeout_enable = val;
15832 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953 15833 }
965f77c4
RC
15834 return 0;
15835}
d2839953 15836
965f77c4
RC
15837int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
15838{
15839 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15840 unsigned long flags;
d2839953 15841
965f77c4
RC
15842 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
15843 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
15844 dhd->wakelock_ctrl_timeout_enable = 0;
15845#ifdef CONFIG_HAS_WAKELOCK
15846 if (wake_lock_active(&dhd->wl_ctrlwake))
15847 wake_unlock(&dhd->wl_ctrlwake);
15848#endif // endif
15849 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
15850 }
15851 return 0;
d2839953 15852}
d2839953 15853
965f77c4 15854int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
d2839953
RC
15855{
15856 dhd_info_t *dhd = DHD_DEV_INFO(dev);
965f77c4
RC
15857 int ret = 0;
15858
15859 if (dhd)
15860 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
15861 return ret;
d2839953
RC
15862}
15863
965f77c4 15864int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
d2839953
RC
15865{
15866 dhd_info_t *dhd = DHD_DEV_INFO(dev);
965f77c4 15867 int ret = 0;
d2839953 15868
965f77c4
RC
15869 if (dhd)
15870 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
15871 return ret;
d2839953
RC
15872}
15873
965f77c4
RC
15874#if defined(DHD_TRACE_WAKE_LOCK)
15875#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
15876#include <linux/hashtable.h>
15877#else
15878#include <linux/hash.h>
15879#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
15880
15881#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
15882/* Define 2^5 = 32 bucket size hash table */
15883DEFINE_HASHTABLE(wklock_history, 5);
15884#else
15885/* Define 2^5 = 32 bucket size hash table */
15886struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
15887#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
15888
15889atomic_t trace_wklock_onoff;
15890typedef enum dhd_wklock_type {
15891 DHD_WAKE_LOCK,
15892 DHD_WAKE_UNLOCK,
15893 DHD_WAIVE_LOCK,
15894 DHD_RESTORE_LOCK
15895} dhd_wklock_t;
15896
15897struct wk_trace_record {
15898 unsigned long addr; /* Address of the instruction */
15899 dhd_wklock_t lock_type; /* lock_type */
15900 unsigned long long counter; /* counter information */
15901 struct hlist_node wklock_node; /* hash node */
15902};
15903
15904static struct wk_trace_record *find_wklock_entry(unsigned long addr)
d2839953 15905{
965f77c4
RC
15906 struct wk_trace_record *wklock_info;
15907#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
15908 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
d2839953 15909#else
965f77c4
RC
15910 struct hlist_node *entry;
15911 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
15912 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
15913#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
15914 {
15915 if (wklock_info->addr == addr) {
15916 return wklock_info;
d2839953
RC
15917 }
15918 }
965f77c4 15919 return NULL;
d2839953
RC
15920}
15921
965f77c4
RC
15922#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
15923#define HASH_ADD(hashtable, node, key) \
15924 do { \
15925 hash_add(hashtable, node, key); \
15926 } while (0);
15927#else
15928#define HASH_ADD(hashtable, node, key) \
15929 do { \
15930 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
15931 hlist_add_head(node, &hashtable[index]); \
15932 } while (0);
15933#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
d2839953 15934
965f77c4
RC
15935#define STORE_WKLOCK_RECORD(wklock_type) \
15936 do { \
15937 struct wk_trace_record *wklock_info = NULL; \
15938 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
15939 wklock_info = find_wklock_entry(func_addr); \
15940 if (wklock_info) { \
15941 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
15942 wklock_info->counter = dhd->wakelock_counter; \
15943 } else { \
15944 wklock_info->counter++; \
15945 } \
15946 } else { \
15947 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
15948 if (!wklock_info) {\
15949 printk("Can't allocate wk_trace_record \n"); \
15950 } else { \
15951 wklock_info->addr = func_addr; \
15952 wklock_info->lock_type = wklock_type; \
15953 if (wklock_type == DHD_WAIVE_LOCK || \
15954 wklock_type == DHD_RESTORE_LOCK) { \
15955 wklock_info->counter = dhd->wakelock_counter; \
15956 } else { \
15957 wklock_info->counter++; \
15958 } \
15959 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
15960 } \
15961 } \
15962 } while (0);
d2839953 15963
965f77c4 15964static inline void dhd_wk_lock_rec_dump(void)
d2839953 15965{
965f77c4
RC
15966 int bkt;
15967 struct wk_trace_record *wklock_info;
15968
15969#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
15970 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
15971#else
15972 struct hlist_node *entry = NULL;
15973 int max_index = ARRAY_SIZE(wklock_history);
15974 for (bkt = 0; bkt < max_index; bkt++)
15975 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
15976#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
15977 {
15978 switch (wklock_info->lock_type) {
15979 case DHD_WAKE_LOCK:
15980 printk("wakelock lock : %pS lock_counter : %llu \n",
15981 (void *)wklock_info->addr, wklock_info->counter);
15982 break;
15983 case DHD_WAKE_UNLOCK:
15984 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
15985 (void *)wklock_info->addr, wklock_info->counter);
15986 break;
15987 case DHD_WAIVE_LOCK:
15988 printk("wakelock waive : %pS before_waive : %llu \n",
15989 (void *)wklock_info->addr, wklock_info->counter);
15990 break;
15991 case DHD_RESTORE_LOCK:
15992 printk("wakelock restore : %pS, after_waive : %llu \n",
15993 (void *)wklock_info->addr, wklock_info->counter);
15994 break;
15995 }
15996 }
d2839953
RC
15997}
15998
965f77c4 15999static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
d2839953 16000{
965f77c4
RC
16001 unsigned long flags;
16002#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16003 int i;
16004#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
d2839953 16005
965f77c4
RC
16006 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16007#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16008 hash_init(wklock_history);
16009#else
16010 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
16011 INIT_HLIST_HEAD(&wklock_history[i]);
16012#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16013 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16014 atomic_set(&trace_wklock_onoff, 1);
d2839953
RC
16015}
16016
965f77c4 16017static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
d2839953 16018{
965f77c4
RC
16019 int bkt;
16020 struct wk_trace_record *wklock_info;
16021 struct hlist_node *tmp;
16022 unsigned long flags;
16023#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16024 struct hlist_node *entry = NULL;
16025 int max_index = ARRAY_SIZE(wklock_history);
16026#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
d2839953 16027
965f77c4
RC
16028 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16029#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16030 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
16031#else
16032 for (bkt = 0; bkt < max_index; bkt++)
16033 hlist_for_each_entry_safe(wklock_info, entry, tmp,
16034 &wklock_history[bkt], wklock_node)
16035#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16036 {
16037#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16038 hash_del(&wklock_info->wklock_node);
16039#else
16040 hlist_del_init(&wklock_info->wklock_node);
16041#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16042 kfree(wklock_info);
16043 }
16044 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953
RC
16045}
16046
965f77c4 16047void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
d2839953 16048{
965f77c4
RC
16049 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
16050 unsigned long flags;
d2839953 16051
965f77c4
RC
16052 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
16053 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16054 dhd_wk_lock_rec_dump();
16055 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953 16056
d2839953 16057}
965f77c4
RC
16058#else
16059#define STORE_WKLOCK_RECORD(wklock_type)
16060#endif /* ! DHD_TRACE_WAKE_LOCK */
d2839953 16061
965f77c4 16062int dhd_os_wake_lock(dhd_pub_t *pub)
d2839953 16063{
d2839953 16064 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
965f77c4
RC
16065 unsigned long flags;
16066 int ret = 0;
16067
16068 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16069 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16070 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16071#ifdef CONFIG_HAS_WAKELOCK
16072 wake_lock(&dhd->wl_wifi);
16073#elif defined(BCMSDIO)
16074 dhd_bus_dev_pm_stay_awake(pub);
d2839953 16075#endif // endif
965f77c4
RC
16076 }
16077#ifdef DHD_TRACE_WAKE_LOCK
16078 if (atomic_read(&trace_wklock_onoff)) {
16079 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
16080 }
16081#endif /* DHD_TRACE_WAKE_LOCK */
16082 dhd->wakelock_counter++;
16083 ret = dhd->wakelock_counter;
16084 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16085 }
16086
16087 return ret;
d2839953
RC
16088}
16089
965f77c4 16090void dhd_event_wake_lock(dhd_pub_t *pub)
d2839953
RC
16091{
16092 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16093
965f77c4
RC
16094 if (dhd) {
16095#ifdef CONFIG_HAS_WAKELOCK
16096 wake_lock(&dhd->wl_evtwake);
16097#elif defined(BCMSDIO)
16098 dhd_bus_dev_pm_stay_awake(pub);
16099#endif // endif
16100 }
d2839953
RC
16101}
16102
965f77c4
RC
16103void
16104dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
d2839953 16105{
965f77c4 16106#ifdef CONFIG_HAS_WAKELOCK
d2839953
RC
16107 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16108
965f77c4
RC
16109 if (dhd) {
16110 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
16111 }
16112#endif /* CONFIG_HAS_WAKE_LOCK */
d2839953
RC
16113}
16114
d2839953 16115void
965f77c4 16116dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
d2839953 16117{
965f77c4
RC
16118#ifdef CONFIG_HAS_WAKELOCK
16119 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16120
965f77c4
RC
16121 if (dhd) {
16122 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
16123 }
16124#endif /* CONFIG_HAS_WAKE_LOCK */
d2839953
RC
16125}
16126
965f77c4 16127int net_os_wake_lock(struct net_device *dev)
d2839953 16128{
965f77c4
RC
16129 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16130 int ret = 0;
d2839953 16131
965f77c4
RC
16132 if (dhd)
16133 ret = dhd_os_wake_lock(&dhd->pub);
16134 return ret;
d2839953
RC
16135}
16136
965f77c4 16137int dhd_os_wake_unlock(dhd_pub_t *pub)
d2839953 16138{
965f77c4
RC
16139 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16140 unsigned long flags;
16141 int ret = 0;
d2839953 16142
965f77c4
RC
16143 dhd_os_wake_lock_timeout(pub);
16144 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16145 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
d2839953 16146
965f77c4
RC
16147 if (dhd->wakelock_counter > 0) {
16148 dhd->wakelock_counter--;
16149#ifdef DHD_TRACE_WAKE_LOCK
16150 if (atomic_read(&trace_wklock_onoff)) {
16151 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
16152 }
16153#endif /* DHD_TRACE_WAKE_LOCK */
16154 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16155#ifdef CONFIG_HAS_WAKELOCK
16156 wake_unlock(&dhd->wl_wifi);
16157#elif defined(BCMSDIO)
16158 dhd_bus_dev_pm_relax(pub);
16159#endif // endif
16160 }
16161 ret = dhd->wakelock_counter;
d2839953 16162 }
965f77c4 16163 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953 16164 }
965f77c4 16165 return ret;
d2839953
RC
16166}
16167
965f77c4 16168void dhd_event_wake_unlock(dhd_pub_t *pub)
d2839953 16169{
965f77c4 16170 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16171
965f77c4
RC
16172 if (dhd) {
16173#ifdef CONFIG_HAS_WAKELOCK
16174 wake_unlock(&dhd->wl_evtwake);
16175#elif defined(BCMSDIO)
16176 dhd_bus_dev_pm_relax(pub);
16177#endif // endif
d2839953 16178 }
965f77c4 16179}
d2839953 16180
965f77c4
RC
16181void dhd_pm_wake_unlock(dhd_pub_t *pub)
16182{
16183#ifdef CONFIG_HAS_WAKELOCK
16184 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16185
16186 if (dhd) {
16187 /* if wl_pmwake is active, unlock it */
16188 if (wake_lock_active(&dhd->wl_pmwake)) {
16189 wake_unlock(&dhd->wl_pmwake);
16190 }
d2839953 16191 }
965f77c4
RC
16192#endif /* CONFIG_HAS_WAKELOCK */
16193}
d2839953 16194
965f77c4
RC
16195void dhd_txfl_wake_unlock(dhd_pub_t *pub)
16196{
16197#ifdef CONFIG_HAS_WAKELOCK
16198 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16199
16200 if (dhd) {
16201 /* if wl_txflwake is active, unlock it */
16202 if (wake_lock_active(&dhd->wl_txflwake)) {
16203 wake_unlock(&dhd->wl_txflwake);
16204 }
d2839953 16205 }
965f77c4
RC
16206#endif /* CONFIG_HAS_WAKELOCK */
16207}
d2839953 16208
965f77c4
RC
16209int dhd_os_check_wakelock(dhd_pub_t *pub)
16210{
16211#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
16212 dhd_info_t *dhd;
d2839953 16213
965f77c4
RC
16214 if (!pub)
16215 return 0;
16216 dhd = (dhd_info_t *)(pub->info);
16217#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
d2839953 16218
965f77c4
RC
16219#ifdef CONFIG_HAS_WAKELOCK
16220 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
16221 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
16222 (wake_lock_active(&dhd->wl_wdwake))))
16223 return 1;
16224#elif defined(BCMSDIO)
16225 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
16226 return 1;
d2839953 16227#endif // endif
965f77c4
RC
16228 return 0;
16229}
d2839953 16230
965f77c4
RC
16231int
16232dhd_os_check_wakelock_all(dhd_pub_t *pub)
d2839953 16233{
965f77c4
RC
16234#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
16235#if defined(CONFIG_HAS_WAKELOCK)
16236 int l1, l2, l3, l4, l7, l8, l9;
16237 int l5 = 0, l6 = 0;
16238 int c, lock_active;
16239#endif /* CONFIG_HAS_WAKELOCK */
16240 dhd_info_t *dhd;
d2839953 16241
965f77c4
RC
16242 if (!pub) {
16243 return 0;
16244 }
16245 dhd = (dhd_info_t *)(pub->info);
16246 if (!dhd) {
16247 return 0;
d2839953 16248 }
965f77c4 16249#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
d2839953 16250
965f77c4
RC
16251#ifdef CONFIG_HAS_WAKELOCK
16252 c = dhd->wakelock_counter;
16253 l1 = wake_lock_active(&dhd->wl_wifi);
16254 l2 = wake_lock_active(&dhd->wl_wdwake);
16255 l3 = wake_lock_active(&dhd->wl_rxwake);
16256 l4 = wake_lock_active(&dhd->wl_ctrlwake);
16257 l7 = wake_lock_active(&dhd->wl_evtwake);
16258#ifdef BCMPCIE_OOB_HOST_WAKE
16259 l5 = wake_lock_active(&dhd->wl_intrwake);
16260#endif /* BCMPCIE_OOB_HOST_WAKE */
16261#ifdef DHD_USE_SCAN_WAKELOCK
16262 l6 = wake_lock_active(&dhd->wl_scanwake);
16263#endif /* DHD_USE_SCAN_WAKELOCK */
16264 l8 = wake_lock_active(&dhd->wl_pmwake);
16265 l9 = wake_lock_active(&dhd->wl_txflwake);
16266 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
16267
16268 /* Indicate to the Host to avoid going to suspend if internal locks are up */
16269 if (lock_active) {
16270 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
16271 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
16272 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
16273 return 1;
16274 }
16275#elif defined(BCMSDIO)
16276 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
16277 return 1;
16278 }
16279#endif /* defined(BCMSDIO) */
16280 return 0;
d2839953
RC
16281}
16282
965f77c4 16283int net_os_wake_unlock(struct net_device *dev)
d2839953 16284{
965f77c4 16285 dhd_info_t *dhd = DHD_DEV_INFO(dev);
d2839953 16286 int ret = 0;
d2839953 16287
965f77c4
RC
16288 if (dhd)
16289 ret = dhd_os_wake_unlock(&dhd->pub);
16290 return ret;
16291}
d2839953 16292
965f77c4
RC
16293int dhd_os_wd_wake_lock(dhd_pub_t *pub)
16294{
16295 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16296 unsigned long flags;
16297 int ret = 0;
d2839953 16298
965f77c4
RC
16299 if (dhd) {
16300 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16301 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
16302#ifdef CONFIG_HAS_WAKELOCK
16303 /* if wakelock_wd_counter was never used : lock it at once */
16304 wake_lock(&dhd->wl_wdwake);
16305#endif // endif
16306 }
16307 dhd->wakelock_wd_counter++;
16308 ret = dhd->wakelock_wd_counter;
16309 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
d2839953 16310 }
d2839953
RC
16311 return ret;
16312}
d2839953 16313
965f77c4 16314int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
d2839953
RC
16315{
16316 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16317 unsigned long flags;
16318 int ret = 0;
16319
965f77c4 16320 if (dhd) {
d2839953 16321 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
965f77c4
RC
16322 if (dhd->wakelock_wd_counter > 0) {
16323 dhd->wakelock_wd_counter = 0;
16324 if (!dhd->waive_wakelock) {
d2839953 16325#ifdef CONFIG_HAS_WAKELOCK
965f77c4 16326 wake_unlock(&dhd->wl_wdwake);
d2839953 16327#endif // endif
965f77c4
RC
16328 }
16329 }
d2839953
RC
16330 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16331 }
16332 return ret;
16333}
16334
965f77c4
RC
16335#ifdef BCMPCIE_OOB_HOST_WAKE
16336void
16337dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
d2839953 16338{
965f77c4
RC
16339#ifdef CONFIG_HAS_WAKELOCK
16340 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16341
965f77c4
RC
16342 if (dhd) {
16343 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
16344 }
16345#endif /* CONFIG_HAS_WAKELOCK */
d2839953
RC
16346}
16347
965f77c4
RC
16348void
16349dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
d2839953 16350{
965f77c4 16351#ifdef CONFIG_HAS_WAKELOCK
d2839953 16352 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16353
965f77c4
RC
16354 if (dhd) {
16355 /* if wl_intrwake is active, unlock it */
16356 if (wake_lock_active(&dhd->wl_intrwake)) {
16357 wake_unlock(&dhd->wl_intrwake);
16358 }
d2839953 16359 }
965f77c4 16360#endif /* CONFIG_HAS_WAKELOCK */
d2839953 16361}
965f77c4 16362#endif /* BCMPCIE_OOB_HOST_WAKE */
d2839953 16363
965f77c4
RC
16364#ifdef DHD_USE_SCAN_WAKELOCK
16365void
16366dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
d2839953 16367{
965f77c4 16368#ifdef CONFIG_HAS_WAKELOCK
d2839953 16369 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16370
965f77c4
RC
16371 if (dhd) {
16372 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
d2839953 16373 }
965f77c4 16374#endif /* CONFIG_HAS_WAKELOCK */
d2839953
RC
16375}
16376
965f77c4
RC
16377void
16378dhd_os_scan_wake_unlock(dhd_pub_t *pub)
d2839953 16379{
965f77c4 16380#ifdef CONFIG_HAS_WAKELOCK
d2839953 16381 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
d2839953 16382
965f77c4
RC
16383 if (dhd) {
16384 /* if wl_scanwake is active, unlock it */
16385 if (wake_lock_active(&dhd->wl_scanwake)) {
16386 wake_unlock(&dhd->wl_scanwake);
16387 }
d2839953 16388 }
965f77c4 16389#endif /* CONFIG_HAS_WAKELOCK */
d2839953 16390}
965f77c4 16391#endif /* DHD_USE_SCAN_WAKELOCK */
d2839953 16392
965f77c4
RC
16393/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
16394 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
16395 */
16396int dhd_os_wake_lock_waive(dhd_pub_t *pub)
d2839953 16397{
965f77c4
RC
16398 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16399 unsigned long flags;
d2839953
RC
16400 int ret = 0;
16401
965f77c4
RC
16402 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16403 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16404
16405 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16406 if (dhd->waive_wakelock == FALSE) {
16407#ifdef DHD_TRACE_WAKE_LOCK
16408 if (atomic_read(&trace_wklock_onoff)) {
16409 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
16410 }
16411#endif /* DHD_TRACE_WAKE_LOCK */
16412 /* record current lock status */
16413 dhd->wakelock_before_waive = dhd->wakelock_counter;
16414 dhd->waive_wakelock = TRUE;
16415 }
16416 ret = dhd->wakelock_wd_counter;
16417 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16418 }
d2839953
RC
16419 return ret;
16420}
16421
965f77c4 16422int dhd_os_wake_lock_restore(dhd_pub_t *pub)
d2839953 16423{
965f77c4
RC
16424 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16425 unsigned long flags;
d2839953
RC
16426 int ret = 0;
16427
965f77c4
RC
16428 if (!dhd)
16429 return 0;
16430 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
16431 return 0;
d2839953 16432
965f77c4 16433 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
d2839953 16434
965f77c4
RC
16435 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16436 if (!dhd->waive_wakelock)
16437 goto exit;
d2839953 16438
965f77c4
RC
16439 dhd->waive_wakelock = FALSE;
16440 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
16441 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
16442 * the lock in between, do the same by calling wake_unlock or pm_relax
16443 */
16444#ifdef DHD_TRACE_WAKE_LOCK
16445 if (atomic_read(&trace_wklock_onoff)) {
16446 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
16447 }
16448#endif /* DHD_TRACE_WAKE_LOCK */
d2839953 16449
965f77c4
RC
16450 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
16451#ifdef CONFIG_HAS_WAKELOCK
16452 wake_lock(&dhd->wl_wifi);
16453#elif defined(BCMSDIO)
16454 dhd_bus_dev_pm_stay_awake(&dhd->pub);
16455#endif // endif
16456 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
16457#ifdef CONFIG_HAS_WAKELOCK
16458 wake_unlock(&dhd->wl_wifi);
16459#elif defined(BCMSDIO)
16460 dhd_bus_dev_pm_relax(&dhd->pub);
16461#endif // endif
d2839953 16462 }
965f77c4
RC
16463 dhd->wakelock_before_waive = 0;
16464exit:
16465 ret = dhd->wakelock_wd_counter;
16466 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16467 return ret;
d2839953
RC
16468}
16469
965f77c4
RC
16470void dhd_os_wake_lock_init(struct dhd_info *dhd)
16471{
16472 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
16473 dhd->wakelock_counter = 0;
16474 dhd->wakelock_rx_timeout_enable = 0;
16475 dhd->wakelock_ctrl_timeout_enable = 0;
16476 /* wakelocks prevent a system from going into a low power state */
16477#ifdef CONFIG_HAS_WAKELOCK
16478 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
16479 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
16480 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
16481 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
16482 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
16483 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
16484#ifdef BCMPCIE_OOB_HOST_WAKE
16485 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
16486#endif /* BCMPCIE_OOB_HOST_WAKE */
16487#ifdef DHD_USE_SCAN_WAKELOCK
16488 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
16489#endif /* DHD_USE_SCAN_WAKELOCK */
16490#endif /* CONFIG_HAS_WAKELOCK */
16491#ifdef DHD_TRACE_WAKE_LOCK
16492 dhd_wk_lock_trace_init(dhd);
16493#endif /* DHD_TRACE_WAKE_LOCK */
16494}
d2839953 16495
965f77c4 16496void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
d2839953 16497{
965f77c4
RC
16498 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
16499#ifdef CONFIG_HAS_WAKELOCK
16500 dhd->wakelock_counter = 0;
16501 dhd->wakelock_rx_timeout_enable = 0;
16502 dhd->wakelock_ctrl_timeout_enable = 0;
16503 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
16504 wake_lock_destroy(&dhd->wl_rxwake);
16505 wake_lock_destroy(&dhd->wl_ctrlwake);
16506 wake_lock_destroy(&dhd->wl_evtwake);
16507 wake_lock_destroy(&dhd->wl_pmwake);
16508 wake_lock_destroy(&dhd->wl_txflwake);
16509#ifdef BCMPCIE_OOB_HOST_WAKE
16510 wake_lock_destroy(&dhd->wl_intrwake);
16511#endif /* BCMPCIE_OOB_HOST_WAKE */
16512#ifdef DHD_USE_SCAN_WAKELOCK
16513 wake_lock_destroy(&dhd->wl_scanwake);
16514#endif /* DHD_USE_SCAN_WAKELOCK */
16515#ifdef DHD_TRACE_WAKE_LOCK
16516 dhd_wk_lock_trace_deinit(dhd);
16517#endif /* DHD_TRACE_WAKE_LOCK */
16518#endif /* CONFIG_HAS_WAKELOCK */
16519}
d2839953 16520
965f77c4
RC
16521bool dhd_os_check_if_up(dhd_pub_t *pub)
16522{
16523 if (!pub)
16524 return FALSE;
16525 return pub->up;
d2839953
RC
16526}
16527
965f77c4
RC
16528/* function to collect firmware, chip id and chip version info */
16529void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
d2839953 16530{
d2839953 16531 int i;
d2839953 16532
965f77c4
RC
16533 i = snprintf(info_string, sizeof(info_string),
16534 " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
16535 printf("%s\n", info_string);
16536
16537 if (!dhdp)
16538 return;
16539
16540 i = snprintf(&info_string[i], sizeof(info_string) - i,
16541 "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
16542 dhd_conf_get_chiprev(dhdp));
d2839953
RC
16543}
16544
965f77c4 16545int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
d2839953 16546{
965f77c4
RC
16547 int ifidx;
16548 int ret = 0;
16549 dhd_info_t *dhd = NULL;
d2839953 16550
965f77c4
RC
16551 if (!net || !DEV_PRIV(net)) {
16552 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
16553 __FUNCTION__, net, DEV_PRIV(net)));
16554 return -EINVAL;
16555 }
16556
16557 dhd = DHD_DEV_INFO(net);
16558 if (!dhd)
16559 return -EINVAL;
16560
16561 ifidx = dhd_net2idx(dhd, net);
16562 if (ifidx == DHD_BAD_IF) {
16563 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
16564 return -ENODEV;
16565 }
16566
16567 DHD_OS_WAKE_LOCK(&dhd->pub);
16568 DHD_PERIM_LOCK(&dhd->pub);
16569
16570 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
16571 dhd_check_hang(net, &dhd->pub, ret);
16572
16573 DHD_PERIM_UNLOCK(&dhd->pub);
16574 DHD_OS_WAKE_UNLOCK(&dhd->pub);
16575
16576 return ret;
d2839953
RC
16577}
16578
965f77c4 16579bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
d2839953 16580{
965f77c4 16581 struct net_device *net;
d2839953 16582
965f77c4
RC
16583 net = dhd_idx2net(dhdp, ifidx);
16584 if (!net) {
16585 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
16586 return -EINVAL;
16587 }
d2839953 16588
965f77c4 16589 return dhd_check_hang(net, dhdp, ret);
d2839953 16590}
d2839953 16591
965f77c4
RC
16592/* Return instance */
16593int dhd_get_instance(dhd_pub_t *dhdp)
d2839953 16594{
965f77c4
RC
16595 return dhdp->info->unit;
16596}
16597
16598#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
16599#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
16600int dhd_deepsleep(struct net_device *dev, int flag)
16601{
16602 char iovbuf[20];
16603 uint powervar = 0;
16604 dhd_info_t *dhd;
16605 dhd_pub_t *dhdp;
16606 int cnt = 0;
d2839953
RC
16607 int ret = 0;
16608
965f77c4
RC
16609 dhd = DHD_DEV_INFO(dev);
16610 dhdp = &dhd->pub;
16611
16612 switch (flag) {
16613 case 1 : /* Deepsleep on */
16614 DHD_ERROR(("[WiFi] Deepsleep On\n"));
16615 /* give some time to sysioc_work before deepsleep */
16616 OSL_SLEEP(200);
16617#ifdef PKT_FILTER_SUPPORT
16618 /* disable pkt filter */
16619 dhd_enable_packet_filter(0, dhdp);
16620#endif /* PKT_FILTER_SUPPORT */
16621 /* Disable MPC */
16622 powervar = 0;
16623 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
16624 0, TRUE);
16625
16626 /* Enable Deepsleep */
16627 powervar = 1;
16628 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
16629 NULL, 0, TRUE);
16630 break;
16631
16632 case 0: /* Deepsleep Off */
16633 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
16634
16635 /* Disable Deepsleep */
16636 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
16637 powervar = 0;
16638 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
16639 sizeof(powervar), NULL, 0, TRUE);
16640
16641 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
16642 sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
16643 if (ret < 0) {
16644 DHD_ERROR(("the error of dhd deepsleep status"
16645 " ret value :%d\n", ret));
16646 } else {
16647 if (!(*(int *)iovbuf)) {
16648 DHD_ERROR(("deepsleep mode is 0,"
16649 " count: %d\n", cnt));
16650 break;
16651 }
16652 }
16653 }
16654
16655 /* Enable MPC */
16656 powervar = 1;
16657 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
16658 0, TRUE);
16659 break;
d2839953
RC
16660 }
16661
965f77c4 16662 return 0;
d2839953 16663}
965f77c4 16664#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
d2839953 16665
965f77c4 16666#ifdef PROP_TXSTATUS
d2839953 16667
965f77c4
RC
16668void dhd_wlfc_plat_init(void *dhd)
16669{
16670#ifdef USE_DYNAMIC_F2_BLKSIZE
16671 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
16672#endif /* USE_DYNAMIC_F2_BLKSIZE */
16673 return;
d2839953
RC
16674}
16675
965f77c4 16676void dhd_wlfc_plat_deinit(void *dhd)
d2839953 16677{
965f77c4
RC
16678#ifdef USE_DYNAMIC_F2_BLKSIZE
16679 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
16680#endif /* USE_DYNAMIC_F2_BLKSIZE */
16681 return;
d2839953
RC
16682}
16683
965f77c4 16684bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
d2839953 16685{
965f77c4 16686#ifdef SKIP_WLFC_ON_CONCURRENT
d2839953 16687
965f77c4
RC
16688#ifdef WL_CFG80211
16689 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
16690 if (net)
16691 /* enable flow control in vsdb mode */
16692 return !(wl_cfg80211_is_concurrent_mode(net));
16693#else
16694 return TRUE; /* skip flow control */
16695#endif /* WL_CFG80211 */
16696
16697#else
16698 return FALSE;
16699#endif /* SKIP_WLFC_ON_CONCURRENT */
16700 return FALSE;
d2839953 16701}
965f77c4 16702#endif /* PROP_TXSTATUS */
d2839953 16703
965f77c4
RC
16704#ifdef BCMDBGFS
16705#include <linux/debugfs.h>
d2839953 16706
965f77c4
RC
16707typedef struct dhd_dbgfs {
16708 struct dentry *debugfs_dir;
16709 struct dentry *debugfs_mem;
16710 dhd_pub_t *dhdp;
16711 uint32 size;
16712} dhd_dbgfs_t;
d2839953 16713
965f77c4 16714dhd_dbgfs_t g_dbgfs;
d2839953 16715
965f77c4
RC
16716extern uint32 dhd_readregl(void *bp, uint32 addr);
16717extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
d2839953 16718
965f77c4
RC
16719static int
16720dhd_dbg_state_open(struct inode *inode, struct file *file)
16721{
16722 file->private_data = inode->i_private;
16723 return 0;
d2839953
RC
16724}
16725
965f77c4
RC
16726static ssize_t
16727dhd_dbg_state_read(struct file *file, char __user *ubuf,
16728 size_t count, loff_t *ppos)
d2839953 16729{
965f77c4
RC
16730 ssize_t rval;
16731 uint32 tmp;
16732 loff_t pos = *ppos;
16733 size_t ret;
d2839953 16734
965f77c4
RC
16735 if (pos < 0)
16736 return -EINVAL;
16737 if (pos >= g_dbgfs.size || !count)
16738 return 0;
16739 if (count > g_dbgfs.size - pos)
16740 count = g_dbgfs.size - pos;
d2839953 16741
965f77c4
RC
16742 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
16743 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
d2839953 16744
965f77c4
RC
16745 ret = copy_to_user(ubuf, &tmp, 4);
16746 if (ret == count)
16747 return -EFAULT;
d2839953 16748
965f77c4
RC
16749 count -= ret;
16750 *ppos = pos + count;
16751 rval = count;
d2839953 16752
965f77c4 16753 return rval;
d2839953
RC
16754}
16755
965f77c4
RC
16756static ssize_t
16757dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
d2839953 16758{
965f77c4
RC
16759 loff_t pos = *ppos;
16760 size_t ret;
16761 uint32 buf;
d2839953 16762
965f77c4
RC
16763 if (pos < 0)
16764 return -EINVAL;
16765 if (pos >= g_dbgfs.size || !count)
d2839953 16766 return 0;
965f77c4
RC
16767 if (count > g_dbgfs.size - pos)
16768 count = g_dbgfs.size - pos;
d2839953 16769
965f77c4
RC
16770 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
16771 if (ret == count)
16772 return -EFAULT;
16773
16774 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
16775 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
16776
16777 return count;
d2839953
RC
16778}
16779
965f77c4
RC
16780loff_t
16781dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
d2839953 16782{
965f77c4 16783 loff_t pos = -1;
d2839953 16784
965f77c4
RC
16785 switch (whence) {
16786 case 0:
16787 pos = off;
16788 break;
16789 case 1:
16790 pos = file->f_pos + off;
16791 break;
16792 case 2:
16793 pos = g_dbgfs.size - off;
d2839953 16794 }
965f77c4
RC
16795 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
16796}
d2839953 16797
965f77c4
RC
16798static const struct file_operations dhd_dbg_state_ops = {
16799 .read = dhd_dbg_state_read,
16800 .write = dhd_debugfs_write,
16801 .open = dhd_dbg_state_open,
16802 .llseek = dhd_debugfs_lseek
16803};
d2839953 16804
965f77c4
RC
16805static void dhd_dbgfs_create(void)
16806{
16807 if (g_dbgfs.debugfs_dir) {
16808 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
16809 NULL, &dhd_dbg_state_ops);
d2839953 16810 }
d2839953
RC
16811}
16812
965f77c4 16813void dhd_dbgfs_init(dhd_pub_t *dhdp)
d2839953 16814{
965f77c4
RC
16815 g_dbgfs.dhdp = dhdp;
16816 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
d2839953 16817
965f77c4
RC
16818 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
16819 if (IS_ERR(g_dbgfs.debugfs_dir)) {
16820 g_dbgfs.debugfs_dir = NULL;
16821 return;
16822 }
16823
16824 dhd_dbgfs_create();
16825
16826 return;
d2839953
RC
16827}
16828
965f77c4 16829void dhd_dbgfs_remove(void)
d2839953 16830{
965f77c4
RC
16831 debugfs_remove(g_dbgfs.debugfs_mem);
16832 debugfs_remove(g_dbgfs.debugfs_dir);
d2839953 16833
965f77c4 16834 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
d2839953 16835}
965f77c4 16836#endif /* BCMDBGFS */
d2839953 16837
965f77c4
RC
16838#ifdef CUSTOM_SET_CPUCORE
16839void dhd_set_cpucore(dhd_pub_t *dhd, int set)
d2839953 16840{
965f77c4 16841 int e_dpc = 0, e_rxf = 0, retry_set = 0;
d2839953 16842
965f77c4
RC
16843 if (!(dhd->chan_isvht80)) {
16844 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
16845 return;
16846 }
16847
16848 if (DPC_CPUCORE) {
16849 do {
16850 if (set == TRUE) {
16851 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
16852 cpumask_of(DPC_CPUCORE));
16853 } else {
16854 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
16855 cpumask_of(PRIMARY_CPUCORE));
d2839953 16856 }
965f77c4
RC
16857 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
16858 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
16859 return;
16860 }
16861 if (e_dpc < 0)
16862 OSL_SLEEP(1);
16863 } while (e_dpc < 0);
d2839953 16864 }
965f77c4
RC
16865 if (RXF_CPUCORE) {
16866 do {
16867 if (set == TRUE) {
16868 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
16869 cpumask_of(RXF_CPUCORE));
16870 } else {
16871 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
16872 cpumask_of(PRIMARY_CPUCORE));
16873 }
16874 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
16875 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
16876 return;
16877 }
16878 if (e_rxf < 0)
16879 OSL_SLEEP(1);
16880 } while (e_rxf < 0);
16881 }
16882 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
16883
16884 return;
d2839953 16885}
965f77c4 16886#endif /* CUSTOM_SET_CPUCORE */
d2839953 16887
965f77c4
RC
16888#ifdef DHD_MCAST_REGEN
16889/* Get interface specific ap_isolate configuration */
16890int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
d2839953 16891{
965f77c4
RC
16892 dhd_info_t *dhd = dhdp->info;
16893 dhd_if_t *ifp;
d2839953 16894
965f77c4
RC
16895 ASSERT(idx < DHD_MAX_IFS);
16896
16897 ifp = dhd->iflist[idx];
16898
16899 return ifp->mcast_regen_bss_enable;
d2839953
RC
16900}
16901
965f77c4
RC
16902/* Set interface specific mcast_regen configuration */
16903int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
d2839953 16904{
965f77c4
RC
16905 dhd_info_t *dhd = dhdp->info;
16906 dhd_if_t *ifp;
d2839953 16907
965f77c4
RC
16908 ASSERT(idx < DHD_MAX_IFS);
16909
16910 ifp = dhd->iflist[idx];
16911
16912 ifp->mcast_regen_bss_enable = val;
16913
16914 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
16915 * is enabled
16916 */
16917 dhd_update_rx_pkt_chainable_state(dhdp, idx);
16918 return BCME_OK;
d2839953 16919}
965f77c4 16920#endif /* DHD_MCAST_REGEN */
d2839953 16921
965f77c4
RC
16922/* Get interface specific ap_isolate configuration */
16923int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
d2839953 16924{
965f77c4
RC
16925 dhd_info_t *dhd = dhdp->info;
16926 dhd_if_t *ifp;
d2839953 16927
965f77c4
RC
16928 ASSERT(idx < DHD_MAX_IFS);
16929
16930 ifp = dhd->iflist[idx];
16931
16932 return ifp->ap_isolate;
d2839953
RC
16933}
16934
965f77c4
RC
16935/* Set interface specific ap_isolate configuration */
16936int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
d2839953 16937{
965f77c4
RC
16938 dhd_info_t *dhd = dhdp->info;
16939 dhd_if_t *ifp;
d2839953 16940
965f77c4
RC
16941 ASSERT(idx < DHD_MAX_IFS);
16942
16943 ifp = dhd->iflist[idx];
16944
16945 if (ifp)
16946 ifp->ap_isolate = val;
16947
16948 return 0;
d2839953 16949}
d2839953 16950
965f77c4
RC
16951#ifdef DHD_FW_COREDUMP
16952void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
d2839953 16953{
965f77c4
RC
16954 unsigned long flags = 0;
16955 dhd_dump_t *dump = NULL;
16956 dhd_info_t *dhd_info = NULL;
16957#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
16958 log_dump_type_t type = DLD_BUF_TYPE_ALL;
16959#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
d2839953 16960
965f77c4
RC
16961 dhd_info = (dhd_info_t *)dhdp->info;
16962 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
16963 if (dump == NULL) {
16964 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
16965 return;
16966 }
16967 dump->buf = buf;
16968 dump->bufsize = size;
16969#ifdef BCMPCIE
16970 dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
16971 (uint32 *)(&dump->hscb_bufsize));
16972#else /* BCMPCIE */
16973 dump->hscb_bufsize = 0;
16974#endif /* BCMPCIE */
d2839953 16975
965f77c4
RC
16976#ifdef DHD_LOG_DUMP
16977 dhd_print_buf_addr(dhdp, "memdump", buf, size);
16978#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
16979 /* Print out buffer infomation */
16980 dhd_log_dump_buf_addr(dhdp, &type);
16981#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
16982#endif /* DHD_LOG_DUMP */
16983
16984 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
16985 BUG_ON(1);
16986 }
16987
16988#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \
16989 defined(DNGL_AXI_ERROR_LOGGING)
16990 if (
16991#if defined(DEBUG_DNGL_INIT_FAIL)
16992 (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
16993#endif /* DEBUG_DNGL_INIT_FAIL */
16994#ifdef DHD_ERPOM
16995 (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
16996#endif /* DHD_ERPOM */
16997#ifdef DNGL_AXI_ERROR_LOGGING
16998 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
16999#endif /* DNGL_AXI_ERROR_LOGGING */
17000 FALSE)
17001 {
17002#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
17003 log_dump_type_t *flush_type = NULL;
17004#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
17005 dhd_info->scheduled_memdump = FALSE;
17006 (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
17007#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
17008 /* for dongle init fail cases, 'dhd_mem_dump' does
17009 * not call 'dhd_log_dump', so call it here.
17010 */
17011 flush_type = MALLOCZ(dhdp->osh,
17012 sizeof(log_dump_type_t));
17013 if (flush_type) {
17014 *flush_type = DLD_BUF_TYPE_ALL;
17015 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
17016 dhd_log_dump(dhdp->info, flush_type, 0);
d2839953 17017 }
965f77c4
RC
17018#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
17019 return;
d2839953 17020 }
965f77c4
RC
17021#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */
17022
17023 dhd_info->scheduled_memdump = TRUE;
17024 /* bus busy bit for mem dump will be cleared in mem dump
17025 * work item context, after mem dump file is written
17026 */
17027 DHD_GENERAL_LOCK(dhdp, flags);
17028 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
17029 DHD_GENERAL_UNLOCK(dhdp, flags);
17030 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
17031 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
17032 DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
d2839953
RC
17033}
17034
965f77c4
RC
17035static int
17036dhd_mem_dump(void *handle, void *event_info, u8 event)
d2839953 17037{
965f77c4
RC
17038 dhd_info_t *dhd = handle;
17039 dhd_pub_t *dhdp = NULL;
17040 unsigned long flags = 0;
d2839953 17041 int ret = 0;
965f77c4 17042 dhd_dump_t *dump = NULL;
d2839953 17043
965f77c4 17044 DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
d2839953 17045
965f77c4
RC
17046 if (!dhd) {
17047 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17048 return -ENODEV;
17049 }
d2839953 17050
965f77c4
RC
17051 dhdp = &dhd->pub;
17052 if (!dhdp) {
17053 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
17054 return -ENODEV;
17055 }
17056
17057 DHD_GENERAL_LOCK(dhdp, flags);
17058 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
17059 DHD_GENERAL_UNLOCK(dhdp, flags);
17060 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
17061 ret = -ENODEV;
d2839953 17062 goto exit;
965f77c4
RC
17063 }
17064 DHD_GENERAL_UNLOCK(dhdp, flags);
d2839953 17065
965f77c4
RC
17066#ifdef DHD_SSSR_DUMP
17067 if (dhdp->sssr_inited && dhdp->collect_sssr) {
17068 dhdpcie_sssr_dump(dhdp);
17069 }
17070 dhdp->collect_sssr = FALSE;
17071#endif /* DHD_SSSR_DUMP */
17072#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
17073 dhd_wait_for_file_dump(dhdp);
17074#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
17075
17076 dump = (dhd_dump_t *)event_info;
17077 if (!dump) {
17078 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
17079 ret = -EINVAL;
17080 goto exit;
17081 }
17082
17083 /*
17084 * If kernel does not have file write access enabled
17085 * then skip writing dumps to files.
17086 * The dumps will be pushed to HAL layer which will
17087 * write into files
d2839953 17088 */
965f77c4
RC
17089#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
17090
17091 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
17092 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
17093#ifdef DHD_DEBUG_UART
17094 dhd->pub.memdump_success = FALSE;
17095#endif /* DHD_DEBUG_UART */
d2839953 17096 }
d2839953 17097
965f77c4
RC
17098 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
17099 * context, no need to schedule another work queue for log dump. In case of
17100 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
17101 * cfg layer is itself scheduling the log_dump work queue.
17102 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
17103 * collect debug_dump as it may be called from non-sleepable context.
17104 */
17105#ifdef DHD_LOG_DUMP
17106 if (dhd->scheduled_memdump &&
17107 dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
17108 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
17109 sizeof(log_dump_type_t));
17110 if (flush_type) {
17111 *flush_type = DLD_BUF_TYPE_ALL;
17112 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
17113 dhd_log_dump(dhd, flush_type, 0);
17114 }
d2839953 17115 }
965f77c4 17116#endif /* DHD_LOG_DUMP */
d2839953 17117
965f77c4 17118 clear_debug_dump_time(dhdp->debug_dump_time_str);
d2839953 17119
965f77c4
RC
17120 /* before calling bug on, wait for other logs to be dumped.
17121 * we cannot wait in case dhd_mem_dump is called directly
17122 * as it may not be in a sleepable context
17123 */
17124 if (dhd->scheduled_memdump) {
17125 uint bitmask = 0;
17126 int timeleft = 0;
17127#ifdef DHD_SSSR_DUMP
17128 bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
17129#endif // endif
17130 if (bitmask != 0) {
17131 DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
17132 __FUNCTION__, dhdp->dhd_bus_busy_state));
17133 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
17134 &dhdp->dhd_bus_busy_state, bitmask, 0);
17135 if ((timeleft == 0) || (timeleft == 1)) {
17136 DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
17137 __FUNCTION__, dhdp->dhd_bus_busy_state));
17138 }
17139 }
17140 }
d2839953 17141
965f77c4
RC
17142 if (dump->hscb_buf && dump->hscb_bufsize) {
17143 DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
17144 if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
17145 dump->hscb_bufsize, "mem_dump_hscb")) {
17146 DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
17147#ifdef DHD_DEBUG_UART
17148 dhd->pub.memdump_success = FALSE;
17149#endif /* DHD_DEBUG_UART */
17150 }
17151 }
17152#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
d2839953 17153
965f77c4
RC
17154 DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
17155 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
17156#ifdef DHD_LOG_DUMP
17157 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
17158#endif /* DHD_LOG_DUMP */
17159 dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
17160#ifdef DHD_DEBUG_UART
17161 dhd->pub.memdump_success == TRUE &&
17162#endif /* DHD_DEBUG_UART */
17163#ifdef DNGL_EVENT_SUPPORT
17164 dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
17165#endif /* DNGL_EVENT_SUPPORT */
17166 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
d2839953 17167
965f77c4
RC
17168#ifdef SHOW_LOGTRACE
17169 /* Wait till logtrace context is flushed */
17170 dhd_flush_logtrace_process(dhd);
17171#endif /* SHOW_LOGTRACE */
d2839953 17172
965f77c4 17173 DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
b7805517 17174 BUG_ON(1);
965f77c4
RC
17175 }
17176 DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
d2839953 17177
965f77c4
RC
17178exit:
17179 if (dump) {
17180 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
17181 }
17182 DHD_GENERAL_LOCK(dhdp, flags);
17183 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
17184 dhd_os_busbusy_wake(dhdp);
17185 DHD_GENERAL_UNLOCK(dhdp, flags);
17186 dhd->scheduled_memdump = FALSE;
17187 if (dhdp->hang_was_pending) {
17188 DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
17189 dhd_os_send_hang_message(dhdp);
17190 dhdp->hang_was_pending = 0;
17191 }
17192 DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
17193 return ret;
d2839953 17194}
965f77c4 17195#endif /* DHD_FW_COREDUMP */
d2839953 17196
965f77c4
RC
17197#ifdef DHD_SSSR_DUMP
17198int
17199dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
d2839953 17200{
965f77c4
RC
17201 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17202 dhd_pub_t *dhdp = &dhd_info->pub;
17203 int pos = 0, ret = BCME_ERROR;
17204 uint dig_buf_size = 0;
d2839953 17205
965f77c4
RC
17206 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17207 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17208 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17209 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17210 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
d2839953
RC
17211 }
17212
965f77c4
RC
17213 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17214 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
17215 NULL, user_buf, dig_buf_size, &pos);
d2839953 17216 }
965f77c4
RC
17217 return ret;
17218}
d2839953 17219
965f77c4
RC
17220int
17221dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
17222{
17223 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17224 dhd_pub_t *dhdp = &dhd_info->pub;
17225 int pos = 0, ret = BCME_ERROR;
17226 uint dig_buf_size = 0;
17227
17228 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17229 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17230 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17231 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17232 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
17233 }
d2839953 17234
965f77c4
RC
17235 if (dhdp->sssr_dig_buf_after) {
17236 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
17237 NULL, user_buf, dig_buf_size, &pos);
17238 }
d2839953
RC
17239 return ret;
17240}
17241
965f77c4
RC
17242int
17243dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
d2839953 17244{
965f77c4
RC
17245 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17246 dhd_pub_t *dhdp = &dhd_info->pub;
17247 int pos = 0, ret = BCME_ERROR;
d2839953 17248
965f77c4
RC
17249 if (dhdp->sssr_d11_before[core] &&
17250 dhdp->sssr_d11_outofreset[core] &&
17251 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17252 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
17253 NULL, user_buf, len, &pos);
d2839953 17254 }
965f77c4 17255 return ret;
d2839953
RC
17256}
17257
965f77c4
RC
17258int
17259dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
d2839953 17260{
965f77c4
RC
17261 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17262 dhd_pub_t *dhdp = &dhd_info->pub;
17263 int pos = 0, ret = BCME_ERROR;
17264
17265 if (dhdp->sssr_d11_after[core] &&
17266 dhdp->sssr_d11_outofreset[core]) {
17267 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
17268 NULL, user_buf, len, &pos);
17269 }
17270 return ret;
d2839953
RC
17271}
17272
965f77c4
RC
17273static void
17274dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
d2839953 17275{
965f77c4 17276 dhd_info_t *dhd = dhdinfo;
d2839953 17277 dhd_pub_t *dhdp;
965f77c4
RC
17278 int i;
17279 char before_sr_dump[128];
17280 char after_sr_dump[128];
17281 unsigned long flags = 0;
17282 uint dig_buf_size = 0;
d2839953 17283
965f77c4 17284 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
d2839953 17285
965f77c4
RC
17286 if (!dhd) {
17287 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17288 return;
17289 }
d2839953 17290
965f77c4 17291 dhdp = &dhd->pub;
d2839953 17292
965f77c4
RC
17293 DHD_GENERAL_LOCK(dhdp, flags);
17294 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
17295 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
17296 DHD_GENERAL_UNLOCK(dhdp, flags);
17297 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
17298 goto exit;
17299 }
17300 DHD_GENERAL_UNLOCK(dhdp, flags);
d2839953 17301
965f77c4
RC
17302 for (i = 0; i < MAX_NUM_D11CORES; i++) {
17303 /* Init file name */
17304 memset(before_sr_dump, 0, sizeof(before_sr_dump));
17305 memset(after_sr_dump, 0, sizeof(after_sr_dump));
d2839953 17306
965f77c4
RC
17307 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
17308 "sssr_dump_core", i, "before_SR");
17309 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
17310 "sssr_dump_core", i, "after_SR");
17311
17312 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
17313 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17314 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
17315 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
17316 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
17317 __FUNCTION__));
17318 }
17319 }
17320 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
17321 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
17322 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
17323 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
17324 __FUNCTION__));
d2839953 17325 }
965f77c4
RC
17326 }
17327 }
d2839953 17328
965f77c4
RC
17329 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17330 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17331 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17332 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17333 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
d2839953
RC
17334 }
17335
965f77c4
RC
17336 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17337 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
17338 dig_buf_size, "sssr_dump_dig_before_SR")) {
17339 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
17340 __FUNCTION__));
17341 }
17342 }
d2839953 17343
965f77c4
RC
17344 if (dhdp->sssr_dig_buf_after) {
17345 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
17346 dig_buf_size, "sssr_dump_dig_after_SR")) {
17347 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
17348 __FUNCTION__));
17349 }
17350 }
d2839953 17351
965f77c4
RC
17352exit:
17353 DHD_GENERAL_LOCK(dhdp, flags);
17354 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
17355 dhd_os_busbusy_wake(dhdp);
17356 DHD_GENERAL_UNLOCK(dhdp, flags);
d2839953
RC
17357}
17358
965f77c4
RC
17359void
17360dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
d2839953 17361{
965f77c4
RC
17362 dhdp->sssr_dump_mode = dump_mode;
17363
17364 /*
17365 * If kernel does not have file write access enabled
17366 * then skip writing dumps to files.
17367 * The dumps will be pushed to HAL layer which will
17368 * write into files
17369 */
17370#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
d2839953 17371 return;
965f77c4
RC
17372#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
17373
17374 /*
17375 * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
17376 * Without workqueue -
17377 * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
17378 * : These are called in own handler, not in the interrupt context
17379 * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
17380 * Thus, it doesn't neeed to dump SSSR in workqueue
17381 */
17382 DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
17383 dhd_sssr_dump_to_file(dhdp->info);
17384
d2839953 17385}
965f77c4 17386#endif /* DHD_SSSR_DUMP */
d2839953 17387
965f77c4
RC
17388#ifdef DHD_LOG_DUMP
17389static void
17390dhd_log_dump(void *handle, void *event_info, u8 event)
d2839953 17391{
965f77c4
RC
17392 dhd_info_t *dhd = handle;
17393 log_dump_type_t *type = (log_dump_type_t *)event_info;
17394
17395 if (!dhd || !type) {
17396 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17397 return;
17398 }
d2839953
RC
17399
17400#ifdef WL_CFG80211
965f77c4
RC
17401 /* flush the fw side logs */
17402 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
17403 FW_LOGSET_MASK_ALL);
17404#endif // endif
17405 /* there are currently 3 possible contexts from which
17406 * log dump can be scheduled -
17407 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
17408 * 3.HEALTH CHECK event
17409 * The concise debug info buffer is a shared resource
17410 * and in case a trap is one of the contexts then both the
17411 * scheduled work queues need to run because trap data is
17412 * essential for debugging. Hence a mutex lock is acquired
17413 * before calling do_dhd_log_dump().
17414 */
17415 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
17416 dhd_os_logdump_lock(&dhd->pub);
17417 DHD_OS_WAKE_LOCK(&dhd->pub);
17418 if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
17419 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
17420 }
17421 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17422 dhd_os_logdump_unlock(&dhd->pub);
17423}
d2839953 17424
965f77c4
RC
17425void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
17426{
17427 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
17428 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
17429 type, DHD_WQ_WORK_DHD_LOG_DUMP,
17430 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
d2839953 17431}
d2839953 17432
965f77c4
RC
17433static void
17434dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
17435{
965f77c4
RC
17436#ifdef DHD_FW_COREDUMP
17437 if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
17438 (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
17439 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT))
17440#else
17441 if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)
17442#endif
17443 {
17444#if defined(CONFIG_ARM64)
17445 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
17446 name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
17447#elif defined(__ARM_ARCH_7A__)
17448 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
17449 name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
17450#endif /* __ARM_ARCH_7A__ */
17451 }
965f77c4 17452}
d2839953 17453
965f77c4
RC
17454static void
17455dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
17456{
17457 int i;
17458 unsigned long wr_size = 0;
17459 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
17460 size_t log_size = 0;
17461 char buf_name[DHD_PRINT_BUF_NAME_LEN];
17462 dhd_dbg_ring_t *ring = NULL;
d2839953 17463
965f77c4 17464 BCM_REFERENCE(ring);
d2839953 17465
965f77c4
RC
17466 for (i = 0; i < DLD_BUFFER_NUM; i++) {
17467 dld_buf = &g_dld_buf[i];
17468 log_size = (unsigned long)dld_buf->max -
17469 (unsigned long)dld_buf->buffer;
17470 if (dld_buf->wraparound) {
17471 wr_size = log_size;
17472 } else {
17473 wr_size = (unsigned long)dld_buf->present -
17474 (unsigned long)dld_buf->front;
17475 }
17476 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
17477 dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
17478 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
17479 dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
17480 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
17481 dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
17482 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
17483 dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
17484 }
d2839953 17485
965f77c4
RC
17486#ifdef EWP_ECNTRS_LOGGING
17487 /* periodic flushing of ecounters is NOT supported */
17488 if (*type == DLD_BUF_TYPE_ALL &&
17489 logdump_ecntr_enable &&
17490 dhdp->ecntr_dbg_ring) {
d2839953 17491
965f77c4
RC
17492 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
17493 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
17494 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
17495 LOG_DUMP_ECNTRS_MAX_BUFSIZE);
17496 }
17497#endif /* EWP_ECNTRS_LOGGING */
d2839953 17498
965f77c4
RC
17499#ifdef DHD_STATUS_LOGGING
17500 if (dhdp->statlog) {
17501 dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
17502 dhd_statlog_get_logbuf_len(dhdp));
17503 }
17504#endif /* DHD_STATUS_LOGGING */
d2839953 17505
965f77c4
RC
17506#ifdef EWP_RTT_LOGGING
17507 /* periodic flushing of ecounters is NOT supported */
17508 if (*type == DLD_BUF_TYPE_ALL &&
17509 logdump_rtt_enable &&
17510 dhdp->rtt_dbg_ring) {
d2839953 17511
965f77c4
RC
17512 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
17513 dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE);
17514 dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
17515 LOG_DUMP_RTT_MAX_BUFSIZE);
17516 }
17517#endif /* EWP_RTT_LOGGING */
d2839953 17518
965f77c4
RC
17519#ifdef BCMPCIE
17520 if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
17521 dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
17522 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
17523 }
17524#endif /* BCMPCIE */
d2839953 17525
965f77c4
RC
17526#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
17527 /* if health check event was received */
17528 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
17529 dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
17530 HEALTH_CHK_BUF_SIZE);
17531 }
17532#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
17533
17534 /* append the concise debug information */
17535 if (dhdp->concise_dbg_buf) {
17536 dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
17537 CONCISE_DUMP_BUFLEN);
17538 }
d2839953
RC
17539}
17540
965f77c4
RC
17541#ifdef DHD_SSSR_DUMP
17542int
17543dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
d2839953 17544{
965f77c4 17545 int i = 0;
d2839953 17546
965f77c4 17547 DHD_ERROR(("%s\n", __FUNCTION__));
d2839953 17548
965f77c4
RC
17549 /* core 0 */
17550 i = 0;
17551 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
17552 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17553 arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
17554 DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
17555 arr_len[SSSR_C0_D11_BEFORE]));
17556#ifdef DHD_LOG_DUMP
17557 dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
17558 dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
17559#endif /* DHD_LOG_DUMP */
17560 }
17561 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
17562 arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
17563 DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
17564 arr_len[SSSR_C0_D11_AFTER]));
17565#ifdef DHD_LOG_DUMP
17566 dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
17567 dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
17568#endif /* DHD_LOG_DUMP */
17569 }
d2839953 17570
965f77c4
RC
17571 /* core 1 */
17572 i = 1;
17573 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
17574 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17575 arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
17576 DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
17577 arr_len[SSSR_C1_D11_BEFORE]));
17578#ifdef DHD_LOG_DUMP
17579 dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
17580 dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
17581#endif /* DHD_LOG_DUMP */
17582 }
17583 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
17584 arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
17585 DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
17586 arr_len[SSSR_C1_D11_AFTER]));
17587#ifdef DHD_LOG_DUMP
17588 dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
17589 dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
17590#endif /* DHD_LOG_DUMP */
17591 }
d2839953 17592
965f77c4
RC
17593 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
17594 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
17595 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
17596 DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
17597 arr_len[SSSR_DIG_BEFORE]));
17598 DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
17599 arr_len[SSSR_DIG_AFTER]));
17600#ifdef DHD_LOG_DUMP
17601 if (dhd->sssr_dig_buf_before) {
17602 dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
17603 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
17604 }
17605 if (dhd->sssr_dig_buf_after) {
17606 dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
17607 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
17608 }
17609#endif /* DHD_LOG_DUMP */
17610 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17611 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
17612 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
17613 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
17614 DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
17615 arr_len[SSSR_DIG_BEFORE]));
17616 DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
17617 arr_len[SSSR_DIG_AFTER]));
17618#ifdef DHD_LOG_DUMP
17619 if (dhd->sssr_dig_buf_before) {
17620 dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
17621 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
17622 }
17623 if (dhd->sssr_dig_buf_after) {
17624 dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
17625 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
17626 }
17627#endif /* DHD_LOG_DUMP */
17628 }
17629 return BCME_OK;
d2839953
RC
17630}
17631
965f77c4
RC
17632void
17633dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
d2839953 17634{
965f77c4
RC
17635 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17636 dhd_pub_t *dhdp = &dhd_info->pub;
d2839953 17637
965f77c4
RC
17638 if (dhdp->sssr_dump_collected) {
17639 dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
d2839953 17640 }
d2839953 17641}
965f77c4 17642#endif /* DHD_SSSR_DUMP */
d2839953 17643
965f77c4
RC
17644uint32
17645dhd_get_time_str_len()
d2839953 17646{
965f77c4
RC
17647 char *ts = NULL, time_str[128];
17648
17649 ts = dhd_log_dump_get_timestamp();
17650 snprintf(time_str, sizeof(time_str),
17651 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
17652 return strlen(time_str);
d2839953
RC
17653}
17654
965f77c4
RC
17655#if defined(BCMPCIE)
17656uint32
17657dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17658{
965f77c4
RC
17659 int length = 0;
17660 log_dump_section_hdr_t sec_hdr;
17661 dhd_info_t *dhd_info;
d2839953 17662
965f77c4
RC
17663 if (ndev) {
17664 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17665 dhdp = &dhd_info->pub;
d2839953
RC
17666 }
17667
965f77c4
RC
17668 if (!dhdp)
17669 return length;
d2839953 17670
965f77c4
RC
17671 if (dhdp->extended_trap_data) {
17672 length = (strlen(EXT_TRAP_LOG_HDR)
17673 + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
17674 }
17675 return length;
d2839953 17676}
965f77c4 17677#endif
d2839953 17678
965f77c4
RC
17679#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
17680uint32
17681dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17682{
965f77c4
RC
17683 int length = 0;
17684 log_dump_section_hdr_t sec_hdr;
17685 dhd_info_t *dhd_info;
d2839953 17686
965f77c4
RC
17687 if (ndev) {
17688 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17689 dhdp = &dhd_info->pub;
17690 }
17691
17692 if (!dhdp)
17693 return length;
17694
17695 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
17696 length = (strlen(HEALTH_CHK_LOG_HDR)
17697 + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
17698 }
17699 return length;
d2839953 17700}
965f77c4 17701#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
d2839953 17702
965f77c4
RC
17703uint32
17704dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17705{
965f77c4
RC
17706 int length = 0;
17707 log_dump_section_hdr_t sec_hdr;
17708 dhd_info_t *dhd_info;
17709 uint32 remain_len = 0;
d2839953 17710
965f77c4
RC
17711 if (ndev) {
17712 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17713 dhdp = &dhd_info->pub;
d2839953
RC
17714 }
17715
965f77c4
RC
17716 if (!dhdp)
17717 return length;
d2839953 17718
965f77c4
RC
17719 if (dhdp->concise_dbg_buf) {
17720 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
17721 if (remain_len <= 0) {
17722 DHD_ERROR(("%s: error getting concise debug info !\n",
17723 __FUNCTION__));
17724 return length;
17725 }
17726 length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) +
17727 (CONCISE_DUMP_BUFLEN - remain_len));
17728 }
17729 return length;
d2839953 17730}
d2839953 17731
965f77c4
RC
17732uint32
17733dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17734{
965f77c4
RC
17735 int length = 0;
17736 dhd_info_t *dhd_info;
17737
17738 if (ndev) {
17739 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17740 dhdp = &dhd_info->pub;
17741 }
d2839953 17742
965f77c4
RC
17743 if (!dhdp)
17744 return length;
d2839953 17745
965f77c4
RC
17746 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
17747 length = dhd_log_dump_cookie_len(dhdp);
17748 }
17749 return length;
d2839953 17750
d2839953
RC
17751}
17752
965f77c4
RC
17753#ifdef DHD_DUMP_PCIE_RINGS
17754uint32
17755dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17756{
965f77c4
RC
17757 int length = 0;
17758 log_dump_section_hdr_t sec_hdr;
17759 dhd_info_t *dhd_info;
17760 uint16 h2d_flowrings_total;
17761 uint32 remain_len = 0;
d2839953 17762
965f77c4
RC
17763 if (ndev) {
17764 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17765 dhdp = &dhd_info->pub;
17766 }
d2839953 17767
965f77c4
RC
17768 if (!dhdp)
17769 return length;
d2839953 17770
965f77c4
RC
17771 if (dhdp->concise_dbg_buf) {
17772 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
17773 if (remain_len <= 0) {
17774 DHD_ERROR(("%s: error getting concise debug info !\n",
17775 __FUNCTION__));
17776 return length;
17777 }
17778 }
d2839953 17779
965f77c4
RC
17780 length += strlen(FLOWRING_DUMP_HDR);
17781 length += CONCISE_DUMP_BUFLEN - remain_len;
17782 length += sizeof(sec_hdr);
17783 h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
17784 length += ((H2DRING_TXPOST_ITEMSIZE
17785 * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total)
17786 + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
17787 + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
17788 + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
17789 + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
17790 + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
17791#ifdef EWP_EDL
17792 + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
17793#else
17794 + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
17795 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
17796#endif /* EWP_EDL */
17797 return length;
d2839953 17798}
965f77c4 17799#endif /* DHD_DUMP_PCIE_RINGS */
d2839953 17800
965f77c4
RC
17801#ifdef EWP_ECNTRS_LOGGING
17802uint32
17803dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17804{
965f77c4
RC
17805 dhd_info_t *dhd_info;
17806 log_dump_section_hdr_t sec_hdr;
17807 int length = 0;
17808 dhd_dbg_ring_t *ring;
d2839953 17809
965f77c4
RC
17810 if (ndev) {
17811 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17812 dhdp = &dhd_info->pub;
17813 }
d2839953 17814
965f77c4
RC
17815 if (!dhdp)
17816 return length;
d2839953 17817
965f77c4
RC
17818 if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
17819 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
17820 length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
17821 }
17822 return length;
d2839953 17823}
965f77c4 17824#endif /* EWP_ECNTRS_LOGGING */
d2839953 17825
965f77c4
RC
17826#ifdef EWP_RTT_LOGGING
17827uint32
17828dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
d2839953 17829{
965f77c4
RC
17830 dhd_info_t *dhd_info;
17831 log_dump_section_hdr_t sec_hdr;
17832 int length = 0;
17833 dhd_dbg_ring_t *ring;
d2839953 17834
965f77c4
RC
17835 if (ndev) {
17836 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
17837 dhdp = &dhd_info->pub;
17838 }
d2839953 17839
965f77c4
RC
17840 if (!dhdp)
17841 return length;
d2839953 17842
965f77c4
RC
17843 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
17844 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
17845 length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
17846 }
17847 return length;
d2839953 17848}
965f77c4 17849#endif /* EWP_RTT_LOGGING */
d2839953 17850
965f77c4
RC
17851int
17852dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
17853 void *fp, uint32 len, int type, void *pos)
d2839953 17854{
965f77c4
RC
17855 int ret = BCME_OK;
17856 struct dhd_log_dump_buf *dld_buf;
17857 log_dump_section_hdr_t sec_hdr;
17858 dhd_info_t *dhd_info;
9d723ca5 17859
965f77c4 17860 dld_buf = &g_dld_buf[type];
d2839953 17861
965f77c4
RC
17862 if (dev) {
17863 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17864 dhdp = &dhd_info->pub;
17865 } else if (!dhdp) {
17866 return BCME_ERROR;
d2839953
RC
17867 }
17868
965f77c4 17869 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
d2839953 17870
965f77c4 17871 dhd_init_sec_hdr(&sec_hdr);
d2839953 17872
965f77c4
RC
17873 /* write the section header first */
17874 ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
17875 strlen(dld_hdrs[type].hdr_str), pos);
17876 if (ret < 0)
17877 goto exit;
17878 len -= (uint32)strlen(dld_hdrs[type].hdr_str);
17879 len -= (uint32)sizeof(sec_hdr);
17880 sec_hdr.type = dld_hdrs[type].sec_type;
17881 sec_hdr.length = len;
17882 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
17883 if (ret < 0)
17884 goto exit;
17885 ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
17886 if (ret < 0)
17887 goto exit;
d2839953 17888
965f77c4
RC
17889exit:
17890 return ret;
d2839953
RC
17891}
17892
965f77c4
RC
17893static int
17894dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
d2839953
RC
17895{
17896 unsigned long flags = 0;
965f77c4
RC
17897#ifdef EWP_EDL
17898 int i = 0;
17899#endif /* EWP_EDL */
d2839953 17900 dhd_info_t *dhd_info = NULL;
965f77c4
RC
17901
17902 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
17903 * so not freeing 'type' here is ok, even if we want to free 'type'
17904 * we cannot do so, since 'dhdp->osh' is unavailable
17905 * as dhdp is null
17906 */
17907 if (!dhdp || !type) {
17908 if (dhdp) {
17909 DHD_GENERAL_LOCK(dhdp, flags);
17910 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
17911 dhd_os_busbusy_wake(dhdp);
17912 DHD_GENERAL_UNLOCK(dhdp, flags);
17913 }
17914 return BCME_ERROR;
d2839953 17915 }
965f77c4
RC
17916
17917 dhd_info = (dhd_info_t *)dhdp->info;
17918 /* in case of trap get preserve logs from ETD */
17919#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
17920 if (dhdp->dongle_trap_occured &&
17921 dhdp->extended_trap_data) {
17922 dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
17923 &dhd_info->event_data);
d2839953 17924 }
d2839953 17925#endif /* BCMPCIE */
965f77c4
RC
17926
17927 /* flush the event work items to get any fw events/logs
17928 * flush_work is a blocking call
17929 */
17930#ifdef SHOW_LOGTRACE
17931#ifdef EWP_EDL
17932 if (dhd_info->pub.dongle_edl_support) {
17933 /* wait till existing edl items are processed */
17934 dhd_flush_logtrace_process(dhd_info);
17935 /* dhd_flush_logtrace_process will ensure the work items in the ring
17936 * (EDL ring) from rd to wr are processed. But if wr had
17937 * wrapped around, only the work items from rd to ring-end are processed.
17938 * So to ensure that the work items at the
17939 * beginning of ring are also processed in the wrap around case, call
17940 * it twice
17941 */
17942 for (i = 0; i < 2; i++) {
17943 /* blocks till the edl items are processed */
17944 dhd_flush_logtrace_process(dhd_info);
17945 }
17946 } else {
17947 dhd_flush_logtrace_process(dhd_info);
d2839953 17948 }
965f77c4
RC
17949#else
17950 dhd_flush_logtrace_process(dhd_info);
17951#endif /* EWP_EDL */
17952#endif /* SHOW_LOGTRACE */
d2839953 17953
965f77c4
RC
17954 return BCME_OK;
17955}
d2839953 17956
965f77c4
RC
17957int
17958dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
17959{
17960 dhd_info_t *dhd_info;
17961
17962 if (dev) {
17963 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17964 dhdp = &dhd_info->pub;
d2839953
RC
17965 }
17966
965f77c4
RC
17967 if (!dhdp)
17968 return BCME_ERROR;
17969
17970 memset(dump_path, 0, size);
17971
17972 switch (dhdp->debug_dump_subcmd) {
17973 case CMD_UNWANTED:
17974 snprintf(dump_path, size, "%s",
17975 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
17976 DHD_DUMP_SUBSTR_UNWANTED);
17977 break;
17978 case CMD_DISCONNECTED:
17979 snprintf(dump_path, size, "%s",
17980 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
17981 DHD_DUMP_SUBSTR_DISCONNECTED);
17982 break;
17983 default:
17984 snprintf(dump_path, size, "%s",
17985 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
d2839953 17986 }
d2839953 17987
965f77c4
RC
17988 if (!dhdp->logdump_periodic_flush) {
17989 get_debug_dump_time(dhdp->debug_dump_time_str);
17990 snprintf(dump_path + strlen(dump_path),
17991 size - strlen(dump_path),
17992 "_%s", dhdp->debug_dump_time_str);
17993 }
17994 return BCME_OK;
17995}
17996
17997uint32
17998dhd_get_dld_len(int log_type)
17999{
18000 unsigned long wr_size = 0;
18001 unsigned long buf_size = 0;
18002 unsigned long flags = 0;
18003 struct dhd_log_dump_buf *dld_buf;
18004 log_dump_section_hdr_t sec_hdr;
18005
18006 /* calculate the length of the log */
18007 dld_buf = &g_dld_buf[log_type];
18008 buf_size = (unsigned long)dld_buf->max -
18009 (unsigned long)dld_buf->buffer;
18010
18011 if (dld_buf->wraparound) {
18012 wr_size = buf_size;
18013 } else {
18014 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18015 spin_lock_irqsave(&dld_buf->lock, flags);
18016 wr_size = (unsigned long)dld_buf->present -
18017 (unsigned long)dld_buf->front;
18018 spin_unlock_irqrestore(&dld_buf->lock, flags);
18019 }
18020 return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
18021}
18022
18023static void
18024dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
18025{
18026 char *ts = NULL;
18027 memset(time_str, 0, size);
18028 ts = dhd_log_dump_get_timestamp();
18029 snprintf(time_str, size,
18030 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
d2839953
RC
18031}
18032
965f77c4
RC
18033int
18034dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
d2839953 18035{
965f77c4
RC
18036 char *ts = NULL;
18037 int ret = 0;
18038 char time_str[128];
d2839953 18039
965f77c4
RC
18040 memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
18041 ts = dhd_log_dump_get_timestamp();
18042 snprintf(time_str, sizeof(time_str),
18043 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
d2839953 18044
965f77c4
RC
18045 /* write the timestamp hdr to the file first */
18046 ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
18047 if (ret < 0) {
18048 DHD_ERROR(("write file error, err = %d\n", ret));
d2839953 18049 }
965f77c4
RC
18050 return ret;
18051}
d2839953 18052
965f77c4
RC
18053#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18054int
18055dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18056 void *fp, uint32 len, void *pos)
18057{
18058 int ret = BCME_OK;
18059 log_dump_section_hdr_t sec_hdr;
18060 dhd_info_t *dhd_info;
d2839953 18061
965f77c4
RC
18062 if (dev) {
18063 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18064 dhdp = &dhd_info->pub;
d2839953 18065 }
d2839953 18066
965f77c4
RC
18067 if (!dhdp)
18068 return BCME_ERROR;
d2839953 18069
965f77c4
RC
18070 dhd_init_sec_hdr(&sec_hdr);
18071
18072 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18073 /* write the section header first */
18074 ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
18075 strlen(HEALTH_CHK_LOG_HDR), pos);
18076 if (ret < 0)
18077 goto exit;
18078
18079 len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
18080 sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
18081 sec_hdr.length = HEALTH_CHK_BUF_SIZE;
18082 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18083 if (ret < 0)
18084 goto exit;
18085
18086 len -= (uint32)sizeof(sec_hdr);
18087 /* write the log */
18088 ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
18089 user_buf, len, pos);
18090 if (ret < 0)
18091 goto exit;
d2839953 18092 }
965f77c4
RC
18093exit:
18094 return ret;
18095}
18096#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
d2839953 18097
965f77c4
RC
18098#ifdef BCMPCIE
18099int
18100dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18101 void *fp, uint32 len, void *pos)
18102{
18103 int ret = BCME_OK;
18104 log_dump_section_hdr_t sec_hdr;
18105 dhd_info_t *dhd_info;
18106
18107 if (dev) {
18108 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18109 dhdp = &dhd_info->pub;
d2839953 18110 }
d2839953 18111
965f77c4
RC
18112 if (!dhdp)
18113 return BCME_ERROR;
d2839953 18114
965f77c4
RC
18115 dhd_init_sec_hdr(&sec_hdr);
18116
18117 /* append extended trap data to the file in case of traps */
18118 if (dhdp->dongle_trap_occured &&
18119 dhdp->extended_trap_data) {
18120 /* write the section header first */
18121 ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
18122 strlen(EXT_TRAP_LOG_HDR), pos);
18123 if (ret < 0)
18124 goto exit;
18125
18126 len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
18127 sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
18128 sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
18129 ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18130 if (ret < 0)
18131 goto exit;
18132
18133 len -= (uint32)sizeof(sec_hdr);
18134 /* write the log */
18135 ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
18136 user_buf, len, pos);
18137 if (ret < 0)
18138 goto exit;
d2839953 18139 }
965f77c4
RC
18140exit:
18141 return ret;
18142}
18143#endif /* BCMPCIE */
d2839953 18144
965f77c4
RC
18145int
18146dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18147 void *fp, uint32 len, void *pos)
18148{
18149 int ret = BCME_OK;
18150 log_dump_section_hdr_t sec_hdr;
18151 dhd_info_t *dhd_info;
18152
18153 if (dev) {
18154 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18155 dhdp = &dhd_info->pub;
d2839953
RC
18156 }
18157
965f77c4
RC
18158 if (!dhdp)
18159 return BCME_ERROR;
d2839953 18160
965f77c4 18161 dhd_init_sec_hdr(&sec_hdr);
d2839953 18162
965f77c4
RC
18163 ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
18164 if (ret < 0)
18165 goto exit;
18166
18167 len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
18168 sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
18169 sec_hdr.length = len;
18170 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18171 if (ret < 0)
18172 goto exit;
18173
18174 len -= (uint32)sizeof(sec_hdr);
18175
18176 if (dhdp->concise_dbg_buf) {
18177 dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18178 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
18179 if (ret < 0)
18180 goto exit;
d2839953 18181 }
d2839953
RC
18182
18183exit:
965f77c4 18184 return ret;
d2839953 18185}
d2839953 18186
965f77c4
RC
18187int
18188dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18189 void *fp, uint32 len, void *pos)
d2839953 18190{
965f77c4
RC
18191 int ret = BCME_OK;
18192 dhd_info_t *dhd_info;
d2839953 18193
965f77c4
RC
18194 if (dev) {
18195 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18196 dhdp = &dhd_info->pub;
18197 }
d2839953 18198
965f77c4
RC
18199 if (!dhdp)
18200 return BCME_ERROR;
18201
18202 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18203 ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
d2839953 18204 }
965f77c4
RC
18205 return ret;
18206}
d2839953 18207
965f77c4
RC
18208#ifdef DHD_DUMP_PCIE_RINGS
18209int
18210dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18211 void *fp, uint32 len, void *pos)
18212{
18213 log_dump_section_hdr_t sec_hdr;
18214 int ret = BCME_OK;
18215 uint32 remain_len = 0;
18216 dhd_info_t *dhd_info;
d2839953 18217
965f77c4
RC
18218 if (dev) {
18219 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18220 dhdp = &dhd_info->pub;
d2839953 18221 }
d2839953 18222
965f77c4
RC
18223 if (!dhdp)
18224 return BCME_ERROR;
d2839953 18225
965f77c4 18226 dhd_init_sec_hdr(&sec_hdr);
d2839953 18227
965f77c4
RC
18228 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18229 memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
d2839953 18230
965f77c4
RC
18231 /* write the section header first */
18232 ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
18233 strlen(FLOWRING_DUMP_HDR), pos);
18234 if (ret < 0)
18235 goto exit;
d2839953 18236
965f77c4
RC
18237 /* Write the ring summary */
18238 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
18239 (CONCISE_DUMP_BUFLEN - remain_len), pos);
18240 if (ret < 0)
18241 goto exit;
d2839953 18242
965f77c4
RC
18243 sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
18244 sec_hdr.length = len;
18245 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18246 if (ret < 0)
18247 goto exit;
18248
18249 /* write the log */
18250 ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
18251 if (ret < 0)
18252 goto exit;
d2839953
RC
18253
18254exit:
965f77c4 18255 return ret;
d2839953 18256}
965f77c4 18257#endif /* DHD_DUMP_PCIE_RINGS */
d2839953 18258
965f77c4
RC
18259#ifdef EWP_ECNTRS_LOGGING
18260int
18261dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18262 void *fp, uint32 len, void *pos)
d2839953 18263{
965f77c4
RC
18264 log_dump_section_hdr_t sec_hdr;
18265 int ret = BCME_OK;
18266 dhd_info_t *dhd_info;
d2839953 18267
965f77c4
RC
18268 if (dev) {
18269 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18270 dhdp = &dhd_info->pub;
18271 }
d2839953 18272
965f77c4
RC
18273 if (!dhdp)
18274 return BCME_ERROR;
d2839953 18275
965f77c4
RC
18276 dhd_init_sec_hdr(&sec_hdr);
18277
18278 if (logdump_ecntr_enable &&
18279 dhdp->ecntr_dbg_ring) {
18280 sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
18281 ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
18282 user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
d2839953 18283 }
965f77c4 18284 return ret;
d2839953 18285
d2839953 18286}
965f77c4 18287#endif /* EWP_ECNTRS_LOGGING */
d2839953 18288
965f77c4
RC
18289#ifdef EWP_RTT_LOGGING
18290int
18291dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18292 void *fp, uint32 len, void *pos)
d2839953 18293{
965f77c4
RC
18294 log_dump_section_hdr_t sec_hdr;
18295 int ret = BCME_OK;
18296 dhd_info_t *dhd_info;
d2839953 18297
965f77c4
RC
18298 if (dev) {
18299 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18300 dhdp = &dhd_info->pub;
d2839953
RC
18301 }
18302
965f77c4
RC
18303 if (!dhdp)
18304 return BCME_ERROR;
d2839953 18305
965f77c4 18306 dhd_init_sec_hdr(&sec_hdr);
d2839953 18307
965f77c4
RC
18308 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
18309 ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
18310 user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
d2839953 18311 }
965f77c4
RC
18312 return ret;
18313
d2839953 18314}
965f77c4 18315#endif /* EWP_RTT_LOGGING */
d2839953 18316
965f77c4
RC
18317#ifdef DHD_STATUS_LOGGING
18318int
18319dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18320 void *fp, uint32 len, void *pos)
d2839953 18321{
965f77c4 18322 dhd_info_t *dhd_info;
d2839953 18323
965f77c4
RC
18324 if (dev) {
18325 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18326 dhdp = &dhd_info->pub;
18327 }
d2839953 18328
965f77c4
RC
18329 if (!dhdp) {
18330 return BCME_ERROR;
d2839953
RC
18331 }
18332
965f77c4
RC
18333 return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
18334}
d2839953 18335
965f77c4
RC
18336uint32
18337dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
18338{
18339 dhd_info_t *dhd_info;
18340 uint32 length = 0;
d2839953 18341
965f77c4
RC
18342 if (ndev) {
18343 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18344 dhdp = &dhd_info->pub;
d2839953 18345 }
d2839953 18346
965f77c4
RC
18347 if (dhdp) {
18348 length = dhd_statlog_get_logbuf_len(dhdp);
d2839953 18349 }
d2839953 18350
965f77c4
RC
18351 return length;
18352}
18353#endif /* DHD_STATUS_LOGGING */
18354
18355void
18356dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
18357{
18358 /* prep the section header */
18359 memset(sec_hdr, 0, sizeof(*sec_hdr));
18360 sec_hdr->magic = LOG_DUMP_MAGIC;
18361 sec_hdr->timestamp = local_clock();
d2839953
RC
18362}
18363
18364/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
18365static int
18366do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
18367{
18368 int ret = 0, i = 0;
18369 struct file *fp = NULL;
18370 mm_segment_t old_fs;
18371 loff_t pos = 0;
d2839953
RC
18372 char dump_path[128];
18373 uint32 file_mode;
18374 unsigned long flags = 0;
d2839953
RC
18375 size_t log_size = 0;
18376 size_t fspace_remain = 0;
18377 struct kstat stat;
18378 char time_str[128];
965f77c4 18379 unsigned int len = 0;
d2839953 18380 log_dump_section_hdr_t sec_hdr;
d2839953
RC
18381
18382 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18383
965f77c4
RC
18384 DHD_GENERAL_LOCK(dhdp, flags);
18385 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18386 DHD_GENERAL_UNLOCK(dhdp, flags);
18387 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
18388 goto exit1;
d2839953 18389 }
965f77c4
RC
18390 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
18391 DHD_GENERAL_UNLOCK(dhdp, flags);
d2839953 18392
965f77c4
RC
18393 if ((ret = dhd_log_flush(dhdp, type)) < 0) {
18394 goto exit1;
d2839953 18395 }
965f77c4
RC
18396 /* change to KERNEL_DS address limit */
18397 old_fs = get_fs();
18398 set_fs(KERNEL_DS);
d2839953 18399
965f77c4 18400 dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
d2839953 18401
965f77c4 18402 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
d2839953
RC
18403 DHD_ERROR(("DHD version: %s\n", dhd_version));
18404 DHD_ERROR(("F/W version: %s\n", fw_version));
d2839953
RC
18405
18406 dhd_log_dump_buf_addr(dhdp, type);
18407
965f77c4
RC
18408 dhd_get_time_str(dhdp, time_str, 128);
18409
d2839953
RC
18410 /* if this is the first time after dhd is loaded,
18411 * or, if periodic flush is disabled, clear the log file
18412 */
18413 if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
18414 file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
18415 else
18416 file_mode = O_CREAT | O_RDWR | O_SYNC;
18417
18418 fp = filp_open(dump_path, file_mode, 0664);
18419 if (IS_ERR(fp)) {
18420 /* If android installed image, try '/data' directory */
18421#if defined(CONFIG_X86)
18422 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
18423 __FUNCTION__));
18424 snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
18425 if (!dhdp->logdump_periodic_flush) {
18426 snprintf(dump_path + strlen(dump_path),
18427 sizeof(dump_path) - strlen(dump_path),
18428 "_%s", dhdp->debug_dump_time_str);
18429 }
18430 fp = filp_open(dump_path, file_mode, 0664);
18431 if (IS_ERR(fp)) {
18432 ret = PTR_ERR(fp);
18433 DHD_ERROR(("open file error, err = %d\n", ret));
965f77c4 18434 goto exit2;
d2839953
RC
18435 }
18436 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18437#else
18438 ret = PTR_ERR(fp);
18439 DHD_ERROR(("open file error, err = %d\n", ret));
965f77c4 18440 goto exit2;
d2839953
RC
18441#endif /* CONFIG_X86 && OEM_ANDROID */
18442 }
18443
18444 ret = vfs_stat(dump_path, &stat);
18445 if (ret < 0) {
18446 DHD_ERROR(("file stat error, err = %d\n", ret));
965f77c4 18447 goto exit2;
d2839953
RC
18448 }
18449
18450 /* if some one else has changed the file */
18451 if (dhdp->last_file_posn != 0 &&
18452 stat.size < dhdp->last_file_posn) {
18453 dhdp->last_file_posn = 0;
18454 }
18455
18456 if (dhdp->logdump_periodic_flush) {
18457 log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
18458 /* calculate the amount of space required to dump all logs */
18459 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
18460 if (*type != DLD_BUF_TYPE_ALL && i != *type)
18461 continue;
18462
18463 if (g_dld_buf[i].wraparound) {
18464 log_size += (unsigned long)g_dld_buf[i].max
18465 - (unsigned long)g_dld_buf[i].buffer;
18466 } else {
18467 spin_lock_irqsave(&g_dld_buf[i].lock, flags);
18468 log_size += (unsigned long)g_dld_buf[i].present -
18469 (unsigned long)g_dld_buf[i].front;
18470 spin_unlock_irqrestore(&g_dld_buf[i].lock, flags);
18471 }
18472 log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
18473
18474 if (*type != DLD_BUF_TYPE_ALL && i == *type)
18475 break;
18476 }
18477
18478 ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
18479 if (ret < 0) {
18480 DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
965f77c4 18481 goto exit2;
d2839953
RC
18482 }
18483 pos = fp->f_pos;
18484
18485 /* if the max file size is reached, wrap around to beginning of the file
18486 * we're treating the file as a large ring buffer
18487 */
18488 fspace_remain = logdump_max_filesize - pos;
18489 if (log_size > fspace_remain) {
18490 fp->f_pos -= pos;
18491 pos = fp->f_pos;
18492 }
18493 }
d2839953 18494
965f77c4 18495 dhd_print_time_str(0, fp, len, &pos);
d2839953
RC
18496
18497 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
d2839953
RC
18498
18499 if (*type != DLD_BUF_TYPE_ALL && i != *type)
18500 continue;
18501
965f77c4
RC
18502 len = dhd_get_dld_len(i);
18503 dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
d2839953
RC
18504 if (*type != DLD_BUF_TYPE_ALL)
18505 break;
18506 }
18507
18508#ifdef EWP_ECNTRS_LOGGING
18509 /* periodic flushing of ecounters is NOT supported */
18510 if (*type == DLD_BUF_TYPE_ALL &&
18511 logdump_ecntr_enable &&
18512 dhdp->ecntr_dbg_ring) {
18513 dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
965f77c4
RC
18514 fp, (unsigned long *)&pos,
18515 &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
d2839953
RC
18516 }
18517#endif /* EWP_ECNTRS_LOGGING */
18518
965f77c4
RC
18519#ifdef DHD_STATUS_LOGGING
18520 if (dhdp->statlog) {
18521 /* write the statlog */
18522 len = dhd_get_status_log_len(NULL, dhdp);
18523 if (len) {
18524 if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
18525 len, &pos) < 0) {
18526 goto exit2;
18527 }
d2839953
RC
18528 }
18529 }
965f77c4
RC
18530#endif /* DHD_STATUS_LOGGING */
18531
18532#ifdef EWP_RTT_LOGGING
18533 /* periodic flushing of ecounters is NOT supported */
18534 if (*type == DLD_BUF_TYPE_ALL &&
18535 logdump_rtt_enable &&
18536 dhdp->rtt_dbg_ring) {
18537 dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
18538 fp, (unsigned long *)&pos,
18539 &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
18540 }
18541#endif /* EWP_RTT_LOGGING */
18542
18543#ifdef BCMPCIE
18544 len = dhd_get_ext_trap_len(NULL, dhdp);
18545 if (len) {
18546 if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
18547 goto exit2;
18548 }
d2839953
RC
18549#endif /* BCMPCIE */
18550
18551#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
965f77c4
RC
18552 len = dhd_get_health_chk_len(NULL, dhdp);
18553 if (len) {
18554 if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
18555 goto exit2;
d2839953
RC
18556 }
18557#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18558
965f77c4
RC
18559 len = dhd_get_dhd_dump_len(NULL, dhdp);
18560 if (len) {
18561 if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
18562 goto exit2;
d2839953 18563 }
d2839953 18564
965f77c4
RC
18565 len = dhd_get_cookie_log_len(NULL, dhdp);
18566 if (len) {
18567 if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
18568 goto exit2;
d2839953
RC
18569 }
18570
965f77c4
RC
18571#ifdef DHD_DUMP_PCIE_RINGS
18572 len = dhd_get_flowring_len(NULL, dhdp);
18573 if (len) {
18574 if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
18575 goto exit2;
d2839953 18576 }
965f77c4 18577#endif // endif
d2839953
RC
18578
18579 if (dhdp->logdump_periodic_flush) {
18580 /* store the last position written to in the file for future use */
18581 dhdp->last_file_posn = pos;
18582 }
18583
965f77c4 18584exit2:
d2839953
RC
18585 if (!IS_ERR(fp) && fp != NULL) {
18586 filp_close(fp, NULL);
18587 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
18588 __FUNCTION__, dump_path));
18589 }
18590 set_fs(old_fs);
965f77c4
RC
18591exit1:
18592 if (type) {
18593 MFREE(dhdp->osh, type, sizeof(*type));
18594 }
d2839953
RC
18595 DHD_GENERAL_LOCK(dhdp, flags);
18596 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18597 dhd_os_busbusy_wake(dhdp);
18598 DHD_GENERAL_UNLOCK(dhdp, flags);
18599
18600#ifdef DHD_DUMP_MNGR
18601 if (ret >= 0) {
18602 dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
18603 }
18604#endif /* DHD_DUMP_MNGR */
18605
18606 return (ret < 0) ? BCME_ERROR : BCME_OK;
18607}
18608#endif /* DHD_LOG_DUMP */
18609
965f77c4
RC
18610/* This function writes data to the file pointed by fp, OR
18611 * copies data to the user buffer sent by upper layer(HAL).
18612 */
18613int
18614dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos)
d2839953 18615{
965f77c4 18616 int ret = BCME_OK;
d2839953 18617
965f77c4
RC
18618 if (fp) {
18619 ret = compat_vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
d2839953 18620 if (ret < 0) {
965f77c4
RC
18621 DHD_ERROR(("write file error, err = %d\n", ret));
18622 goto exit;
d2839953 18623 }
965f77c4
RC
18624 } else {
18625#ifdef CONFIG_COMPAT
18626#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
18627 if (in_compat_syscall()) {
18628#else
18629 if (is_compat_task()) {
18630#endif /* LINUX_VER >= 4.6 */
18631 void * usr_ptr = compat_ptr((uintptr_t) user_buf);
18632 ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)),
18633 mem_buf, buf_len);
18634 if (ret) {
18635 DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
18636 goto exit;
18637 }
18638 }
18639 else
18640#endif /* CONFIG_COMPAT */
18641 {
18642 ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
18643 mem_buf, buf_len);
18644 if (ret) {
18645 DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
18646 goto exit;
18647 }
18648 }
18649 (*(int *)pos) += buf_len;
d2839953 18650 }
965f77c4
RC
18651exit:
18652 return ret;
d2839953 18653}
d2839953
RC
18654
18655/*
18656 * This call is to get the memdump size so that,
18657 * halutil can alloc that much buffer in user space.
18658 */
18659int
18660dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
18661{
18662 int ret = BCME_OK;
18663 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18664 dhd_pub_t *dhdp = &dhd->pub;
18665
18666 if (dhdp->busstate == DHD_BUS_DOWN) {
18667 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
18668 return BCME_ERROR;
18669 }
18670
18671 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
18672 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18673 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
18674 return BCME_ERROR;
18675 }
18676 ret = dhd_common_socram_dump(dhdp);
18677 if (ret == BCME_OK) {
18678 *dump_size = dhdp->soc_ram_length;
18679 }
18680 return ret;
18681}
18682
18683/*
18684 * This is to get the actual memdup after getting the memdump size
18685 */
18686int
18687dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
18688{
18689 int ret = BCME_OK;
18690 int orig_len = 0;
18691 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18692 dhd_pub_t *dhdp = &dhd->pub;
18693 if (buf == NULL)
18694 return BCME_ERROR;
18695 orig_len = *size;
18696 if (dhdp->soc_ram) {
18697 if (orig_len >= dhdp->soc_ram_length) {
965f77c4 18698 *buf = dhdp->soc_ram;
d2839953
RC
18699 *size = dhdp->soc_ram_length;
18700 } else {
18701 ret = BCME_BUFTOOSHORT;
18702 DHD_ERROR(("The length of the buffer is too short"
18703 " to save the memory dump with %d\n", dhdp->soc_ram_length));
18704 }
18705 } else {
18706 DHD_ERROR(("socram_dump is not ready to get\n"));
18707 ret = BCME_NOTREADY;
18708 }
18709 return ret;
18710}
18711
18712int
18713dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
18714{
18715 char *fw_str;
18716
18717 if (size == 0)
18718 return BCME_BADARG;
18719
18720 fw_str = strstr(info_string, "Firmware: ");
18721 if (fw_str == NULL) {
18722 return BCME_ERROR;
18723 }
18724
18725 memset(*buf, 0, size);
18726 if (dhd_ver) {
18727 strncpy(*buf, dhd_version, size - 1);
18728 } else {
18729 strncpy(*buf, fw_str, size - 1);
18730 }
18731 return BCME_OK;
18732}
18733
965f77c4
RC
18734#ifdef DNGL_AXI_ERROR_LOGGING
18735int
18736dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
18737{
18738 int ret = BCME_OK;
18739 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18740 dhd_pub_t *dhdp = &dhd->pub;
18741 loff_t pos = 0;
18742 if (user_buf == NULL) {
18743 DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
18744 return BCME_ERROR;
18745 }
18746
18747 ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
18748 NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
18749
18750 if (ret < 0) {
18751 DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
18752 return ret;
18753 }
18754 return ret;
18755}
18756
18757int
18758dhd_os_get_axi_error_dump_size(struct net_device *dev)
18759{
18760 int size = -1;
18761
18762 size = sizeof(dhd_axi_error_dump_t);
18763 if (size < 0) {
18764 DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
18765 }
18766 return size;
18767}
18768
18769void
18770dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
18771{
18772 snprintf(dump_path, len, "%s",
18773 DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
18774}
18775#endif /* DNGL_AXI_ERROR_LOGGING */
18776
d2839953
RC
18777bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
18778{
18779 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
18780}
18781
18782#ifdef DHD_L2_FILTER
18783arp_table_t*
18784dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
18785{
18786 dhd_info_t *dhd = dhdp->info;
18787 dhd_if_t *ifp;
18788
18789 ASSERT(bssidx < DHD_MAX_IFS);
18790
18791 ifp = dhd->iflist[bssidx];
18792 return ifp->phnd_arp_table;
18793}
18794
18795int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
18796{
18797 dhd_info_t *dhd = dhdp->info;
18798 dhd_if_t *ifp;
18799
18800 ASSERT(idx < DHD_MAX_IFS);
18801
18802 ifp = dhd->iflist[idx];
18803
18804 if (ifp)
18805 return ifp->parp_enable;
18806 else
18807 return FALSE;
18808}
18809
18810/* Set interface specific proxy arp configuration */
18811int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
18812{
18813 dhd_info_t *dhd = dhdp->info;
18814 dhd_if_t *ifp;
18815 ASSERT(idx < DHD_MAX_IFS);
18816 ifp = dhd->iflist[idx];
18817
18818 if (!ifp)
18819 return BCME_ERROR;
18820
18821 /* At present all 3 variables are being
18822 * handled at once
18823 */
18824 ifp->parp_enable = val;
18825 ifp->parp_discard = val;
18826 ifp->parp_allnode = val;
18827
18828 /* Flush ARP entries when disabled */
18829 if (val == FALSE) {
18830 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
18831 FALSE, dhdp->tickcnt);
18832 }
18833 return BCME_OK;
18834}
18835
18836bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
18837{
18838 dhd_info_t *dhd = dhdp->info;
18839 dhd_if_t *ifp;
18840
18841 ASSERT(idx < DHD_MAX_IFS);
18842
18843 ifp = dhd->iflist[idx];
18844
18845 ASSERT(ifp);
18846 return ifp->parp_discard;
18847}
18848
18849bool
18850dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
18851{
18852 dhd_info_t *dhd = dhdp->info;
18853 dhd_if_t *ifp;
18854
18855 ASSERT(idx < DHD_MAX_IFS);
18856
18857 ifp = dhd->iflist[idx];
18858
18859 ASSERT(ifp);
18860
18861 return ifp->parp_allnode;
18862}
18863
18864int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
18865{
18866 dhd_info_t *dhd = dhdp->info;
18867 dhd_if_t *ifp;
18868
18869 ASSERT(idx < DHD_MAX_IFS);
18870
18871 ifp = dhd->iflist[idx];
18872
18873 ASSERT(ifp);
18874
18875 return ifp->dhcp_unicast;
18876}
18877
18878int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
18879{
18880 dhd_info_t *dhd = dhdp->info;
18881 dhd_if_t *ifp;
18882 ASSERT(idx < DHD_MAX_IFS);
18883 ifp = dhd->iflist[idx];
18884
18885 ASSERT(ifp);
18886
18887 ifp->dhcp_unicast = val;
18888 return BCME_OK;
18889}
18890
18891int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
18892{
18893 dhd_info_t *dhd = dhdp->info;
18894 dhd_if_t *ifp;
18895
18896 ASSERT(idx < DHD_MAX_IFS);
18897
18898 ifp = dhd->iflist[idx];
18899
18900 ASSERT(ifp);
18901
18902 return ifp->block_ping;
18903}
18904
18905int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
18906{
18907 dhd_info_t *dhd = dhdp->info;
18908 dhd_if_t *ifp;
18909 ASSERT(idx < DHD_MAX_IFS);
18910 ifp = dhd->iflist[idx];
18911
18912 ASSERT(ifp);
18913
18914 ifp->block_ping = val;
18915 /* Disable rx_pkt_chain feature for interface if block_ping option is
18916 * enabled
18917 */
18918 dhd_update_rx_pkt_chainable_state(dhdp, idx);
18919 return BCME_OK;
18920}
18921
18922int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
18923{
18924 dhd_info_t *dhd = dhdp->info;
18925 dhd_if_t *ifp;
18926
18927 ASSERT(idx < DHD_MAX_IFS);
18928
18929 ifp = dhd->iflist[idx];
18930
18931 ASSERT(ifp);
18932
18933 return ifp->grat_arp;
18934}
18935
18936int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
18937{
18938 dhd_info_t *dhd = dhdp->info;
18939 dhd_if_t *ifp;
18940 ASSERT(idx < DHD_MAX_IFS);
18941 ifp = dhd->iflist[idx];
18942
18943 ASSERT(ifp);
18944
18945 ifp->grat_arp = val;
18946
18947 return BCME_OK;
18948}
18949
18950int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
18951{
18952 dhd_info_t *dhd = dhdp->info;
18953 dhd_if_t *ifp;
18954
18955 ASSERT(idx < DHD_MAX_IFS);
18956
18957 ifp = dhd->iflist[idx];
18958
18959 ASSERT(ifp);
18960
18961 return ifp->block_tdls;
18962}
18963
18964int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
18965{
18966 dhd_info_t *dhd = dhdp->info;
18967 dhd_if_t *ifp;
18968 ASSERT(idx < DHD_MAX_IFS);
18969 ifp = dhd->iflist[idx];
18970
18971 ASSERT(ifp);
18972
18973 ifp->block_tdls = val;
18974
18975 return BCME_OK;
18976}
18977#endif /* DHD_L2_FILTER */
18978
3910ce8e
LJ
18979#if defined(SET_XPS_CPUS)
18980int dhd_xps_cpus_enable(struct net_device *net, int enable)
18981{
18982 dhd_info_t *dhd = DHD_DEV_INFO(net);
18983 dhd_if_t *ifp;
18984 int ifidx;
18985 char * XPS_CPU_SETBUF;
18986
18987 ifidx = dhd_net2idx(dhd, net);
18988 if (ifidx == DHD_BAD_IF) {
18989 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
18990 return -ENODEV;
18991 }
18992
18993 if (ifidx == PRIMARY_INF) {
18994 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
18995 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
18996 XPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
18997 } else {
18998 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
18999 XPS_CPU_SETBUF = RPS_CPUS_MASK;
19000 }
19001 } else if (ifidx == VIRTUAL_INF) {
19002 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
19003 XPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
19004 } else {
19005 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
19006 return -EINVAL;
19007 }
19008
19009 ifp = dhd->iflist[ifidx];
19010 if (ifp) {
19011 if (enable) {
19012 DHD_INFO(("%s : set xps_cpus as [%s]\n", __FUNCTION__, XPS_CPU_SETBUF));
19013 custom_xps_map_set(ifp->net, XPS_CPU_SETBUF, strlen(XPS_CPU_SETBUF));
19014 } else {
19015 custom_xps_map_clear(ifp->net);
19016 }
19017 } else {
19018 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
19019 return -ENODEV;
19020 }
19021 return BCME_OK;
19022}
19023
19024int custom_xps_map_set(struct net_device *net, char *buf, size_t len)
19025{
19026 cpumask_var_t mask;
19027 int err;
19028
19029 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19030
19031 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
19032 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
19033 return -ENOMEM;
19034 }
19035
19036 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
19037 if (err) {
19038 free_cpumask_var(mask);
19039 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
19040 return err;
19041 }
19042
19043 err = netif_set_xps_queue(net, mask, 0);
19044
19045 free_cpumask_var(mask);
19046
19047 if (0 == err)
19048 DHD_ERROR(("%s : Done. mapping cpu\n", __FUNCTION__));
19049
19050 return err;
19051}
19052
19053void custom_xps_map_clear(struct net_device *net)
19054{
19055 struct xps_dev_maps *dev_maps;
19056
19057 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19058
19059 rcu_read_lock();
19060 dev_maps = rcu_dereference(net->xps_maps);
19061 rcu_read_unlock();
19062
19063 if (dev_maps) {
19064 RCU_INIT_POINTER(net->xps_maps, NULL);
19065 kfree_rcu(dev_maps, rcu);
19066 DHD_INFO(("%s : xps_cpus map clear.\n", __FUNCTION__));
19067 }
19068}
19069#endif // endif
19070
d2839953
RC
19071#if defined(SET_RPS_CPUS)
19072int dhd_rps_cpus_enable(struct net_device *net, int enable)
19073{
19074 dhd_info_t *dhd = DHD_DEV_INFO(net);
19075 dhd_if_t *ifp;
19076 int ifidx;
19077 char * RPS_CPU_SETBUF;
19078
19079 ifidx = dhd_net2idx(dhd, net);
19080 if (ifidx == DHD_BAD_IF) {
19081 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
19082 return -ENODEV;
19083 }
19084
19085 if (ifidx == PRIMARY_INF) {
19086 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
19087 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
19088 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
19089 } else {
19090 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
19091 RPS_CPU_SETBUF = RPS_CPUS_MASK;
19092 }
19093 } else if (ifidx == VIRTUAL_INF) {
19094 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
19095 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
19096 } else {
19097 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
19098 return -EINVAL;
19099 }
19100
19101 ifp = dhd->iflist[ifidx];
19102 if (ifp) {
19103 if (enable) {
19104 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
19105 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
19106 } else {
19107 custom_rps_map_clear(ifp->net->_rx);
19108 }
19109 } else {
19110 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
19111 return -ENODEV;
19112 }
19113 return BCME_OK;
19114}
19115
19116int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
19117{
19118 struct rps_map *old_map, *map;
19119 cpumask_var_t mask;
19120 int err, cpu, i;
19121 static DEFINE_SPINLOCK(rps_map_lock);
19122
19123 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19124
19125 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
19126 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
19127 return -ENOMEM;
19128 }
19129
19130 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
19131 if (err) {
19132 free_cpumask_var(mask);
19133 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
19134 return err;
19135 }
19136
19137 map = kzalloc(max_t(unsigned int,
19138 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
19139 GFP_KERNEL);
19140 if (!map) {
19141 free_cpumask_var(mask);
19142 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
19143 return -ENOMEM;
19144 }
19145
19146 i = 0;
19147 for_each_cpu(cpu, mask) {
19148 map->cpus[i++] = cpu;
19149 }
19150
19151 if (i) {
19152 map->len = i;
19153 } else {
19154 kfree(map);
19155 map = NULL;
19156 free_cpumask_var(mask);
19157 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
19158 return -1;
19159 }
19160
19161 spin_lock(&rps_map_lock);
19162 old_map = rcu_dereference_protected(queue->rps_map,
19163 lockdep_is_held(&rps_map_lock));
19164 rcu_assign_pointer(queue->rps_map, map);
19165 spin_unlock(&rps_map_lock);
19166
19167 if (map) {
19168 static_key_slow_inc(&rps_needed);
19169 }
19170 if (old_map) {
19171 kfree_rcu(old_map, rcu);
19172 static_key_slow_dec(&rps_needed);
19173 }
19174 free_cpumask_var(mask);
19175
3910ce8e 19176 DHD_ERROR(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
d2839953
RC
19177 return map->len;
19178}
19179
19180void custom_rps_map_clear(struct netdev_rx_queue *queue)
19181{
19182 struct rps_map *map;
19183
19184 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19185
19186 map = rcu_dereference_protected(queue->rps_map, 1);
19187 if (map) {
19188 RCU_INIT_POINTER(queue->rps_map, NULL);
19189 kfree_rcu(map, rcu);
19190 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
19191 }
19192}
19193#endif // endif
19194
19195#if defined(ARGOS_NOTIFY_CB)
965f77c4
RC
19196
19197static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19198 unsigned long speed, void *v);
19199static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19200 unsigned long speed, void *v);
19201
d2839953
RC
19202int
19203argos_register_notifier_init(struct net_device *net)
19204{
19205 int ret = 0;
19206
19207 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19208 argos_rps_ctrl_data.wlan_primary_netdev = net;
19209 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19210
19211 if (argos_wifi.notifier_call == NULL) {
19212 argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
19213 ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19214 if (ret < 0) {
19215 DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
19216 goto exit;
19217 }
19218 }
19219
19220 if (argos_p2p.notifier_call == NULL) {
19221 argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
19222 ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19223 if (ret < 0) {
19224 DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
19225 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19226 goto exit;
19227 }
19228 }
19229
19230 return 0;
19231
19232exit:
19233 if (argos_wifi.notifier_call) {
19234 argos_wifi.notifier_call = NULL;
19235 }
19236
19237 if (argos_p2p.notifier_call) {
19238 argos_p2p.notifier_call = NULL;
19239 }
19240
19241 return ret;
19242}
19243
19244int
19245argos_register_notifier_deinit(void)
19246{
19247 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19248
19249 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19250 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
19251 return -1;
19252 }
19253#ifndef DHD_LB
19254 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19255#endif /* !DHD_LB */
19256
19257 if (argos_p2p.notifier_call) {
19258 sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19259 argos_p2p.notifier_call = NULL;
19260 }
19261
19262 if (argos_wifi.notifier_call) {
19263 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19264 argos_wifi.notifier_call = NULL;
19265 }
19266
19267 argos_rps_ctrl_data.wlan_primary_netdev = NULL;
19268 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19269
19270 return 0;
19271}
19272
19273int
19274argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19275 unsigned long speed, void *v)
19276{
19277 dhd_info_t *dhd;
19278 dhd_pub_t *dhdp;
19279#if defined(ARGOS_NOTIFY_CB)
19280 unsigned int pcie_irq = 0;
19281#endif /* ARGOS_NOTIFY_CB */
19282 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19283
19284 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19285 goto exit;
19286 }
19287
19288 dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
19289 if (dhd == NULL) {
19290 goto exit;
19291 }
19292
19293 dhdp = &dhd->pub;
19294 if (dhdp == NULL || !dhdp->up) {
19295 goto exit;
19296 }
d2839953
RC
19297 /* Check if reported TPut value is more than threshold value */
19298 if (speed > RPS_TPUT_THRESHOLD) {
19299 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
19300 /* It does not need to configre rps_cpus
19301 * if Load Balance is enabled
19302 */
19303#ifndef DHD_LB
19304 int err = 0;
19305
19306 if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
19307 err = custom_rps_map_set(
19308 argos_rps_ctrl_data.wlan_primary_netdev->_rx,
19309 RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
19310 } else {
19311 DHD_ERROR(("DHD: %s: RPS_Set fail,"
19312 " Core=%d Offline\n", __FUNCTION__,
19313 RPS_CPUS_WLAN_CORE_ID));
19314 err = -1;
19315 }
19316
19317 if (err < 0) {
19318 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
19319 "speed=%ld, error=%d\n",
19320 __FUNCTION__, speed, err));
19321 } else {
19322#endif /* !DHD_LB */
19323#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
19324 if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
19325 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
19326 __FUNCTION__, TCPACK_SUP_HOLD));
19327 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
19328 }
19329#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19330 argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
19331#ifndef DHD_LB
19332 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
19333 __FUNCTION__, speed));
19334 }
19335#endif /* !DHD_LB */
19336 }
19337 } else {
19338 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
19339#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
19340 if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
19341 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
19342 __FUNCTION__));
19343 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
19344 }
19345#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19346#ifndef DHD_LB
19347 /* It does not need to configre rps_cpus
19348 * if Load Balance is enabled
19349 */
19350 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19351 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
19352 OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
19353#endif /* !DHD_LB */
19354 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19355 }
19356 }
19357
19358exit:
19359 return NOTIFY_OK;
19360}
19361
19362int
19363argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19364 unsigned long speed, void *v)
19365{
19366 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19367 return argos_status_notifier_wifi_cb(notifier, speed, v);
19368}
19369#endif // endif
19370
19371#ifdef DHD_DEBUG_PAGEALLOC
19372
19373void
19374dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
19375{
19376 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
19377
19378 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
19379 __FUNCTION__, addr_corrupt, (uint32)len));
19380
19381 DHD_OS_WAKE_LOCK(dhdp);
19382 prhex("Page Corruption:", addr_corrupt, len);
19383 dhd_dump_to_kernelog(dhdp);
19384#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
19385 /* Load the dongle side dump to host memory and then BUG_ON() */
19386 dhdp->memdump_enabled = DUMP_MEMONLY;
19387 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
19388 dhd_bus_mem_dump(dhdp);
19389#endif /* BCMPCIE && DHD_FW_COREDUMP */
19390 DHD_OS_WAKE_UNLOCK(dhdp);
19391}
19392EXPORT_SYMBOL(dhd_page_corrupt_cb);
19393#endif /* DHD_DEBUG_PAGEALLOC */
19394
19395#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
19396void
19397dhd_pktid_error_handler(dhd_pub_t *dhdp)
19398{
19399 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
19400 DHD_OS_WAKE_LOCK(dhdp);
19401 dhd_dump_to_kernelog(dhdp);
19402#ifdef DHD_FW_COREDUMP
19403 /* Load the dongle side dump to host memory */
19404 if (dhdp->memdump_enabled == DUMP_DISABLED) {
19405 dhdp->memdump_enabled = DUMP_MEMFILE;
19406 }
19407 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
19408 dhd_bus_mem_dump(dhdp);
19409#endif /* DHD_FW_COREDUMP */
19410 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
19411 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
19412 DHD_OS_WAKE_UNLOCK(dhdp);
19413}
19414#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
19415
19416struct net_device *
19417dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
19418{
19419 dhd_info_t *dhd = dhdp->info;
19420
19421 if (dhd->iflist[0] && dhd->iflist[0]->net)
19422 return dhd->iflist[0]->net;
19423 else
19424 return NULL;
19425}
19426
965f77c4 19427fw_download_status_t
d2839953
RC
19428dhd_fw_download_status(dhd_pub_t * dhd_pub)
19429{
965f77c4 19430 return dhd_pub->fw_download_status;
d2839953
RC
19431}
19432
965f77c4 19433static int
d2839953
RC
19434dhd_create_to_notifier_skt(void)
19435{
19436#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
19437 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
19438 /* Kernel version 3.6 is a special case which accepts 4 arguments */
19439 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
19440#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
19441 /* Kernel version 3.5 and below use this old API format */
19442 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
19443 dhd_process_daemon_msg, NULL, THIS_MODULE);
19444#else
19445 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
19446 &dhd_netlink_cfg);
19447#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
19448 if (!nl_to_event_sk)
19449 {
19450 printf("Error creating socket.\n");
19451 return -1;
19452 }
19453 DHD_INFO(("nl_to socket created successfully...\n"));
19454 return 0;
19455}
19456
19457void
19458dhd_destroy_to_notifier_skt(void)
19459{
19460 DHD_INFO(("Destroying nl_to socket\n"));
19461 netlink_kernel_release(nl_to_event_sk);
19462}
19463
19464static void
19465dhd_recv_msg_from_daemon(struct sk_buff *skb)
19466{
19467 struct nlmsghdr *nlh;
19468 bcm_to_info_t *cmd;
19469
19470 nlh = (struct nlmsghdr *)skb->data;
19471 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
19472 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
19473 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
19474 DHD_INFO(("DHD Daemon Started\n"));
19475 }
19476}
19477
19478int
19479dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
19480{
19481 struct nlmsghdr *nlh;
19482 struct sk_buff *skb_out;
965f77c4 19483 int ret = BCME_ERROR;
d2839953
RC
19484
19485 BCM_REFERENCE(skb);
19486 if (sender_pid == 0) {
19487 DHD_INFO(("Invalid PID 0\n"));
965f77c4
RC
19488 skb_out = NULL;
19489 goto err;
d2839953
RC
19490 }
19491
19492 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
19493 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
965f77c4
RC
19494 ret = BCME_NOMEM;
19495 goto err;
d2839953
RC
19496 }
19497 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
965f77c4
RC
19498 if (nlh == NULL) {
19499 DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
19500 goto err;
d2839953 19501 }
965f77c4
RC
19502 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
19503 (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
d2839953 19504
965f77c4
RC
19505 if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
19506 DHD_ERROR(("Error sending message, ret:%d\n", ret));
19507 /* skb is already freed inside nlmsg_unicast() on error case */
19508 /* explicitly making skb_out to NULL to avoid double free */
19509 skb_out = NULL;
19510 goto err;
d2839953 19511 }
965f77c4
RC
19512 return BCME_OK;
19513err:
19514 if (skb_out) {
19515 nlmsg_free(skb_out);
d2839953 19516 }
965f77c4 19517 return ret;
d2839953
RC
19518}
19519
19520static void
19521dhd_process_daemon_msg(struct sk_buff *skb)
19522{
19523 bcm_to_info_t to_info;
19524
19525 to_info.magic = BCM_TO_MAGIC;
19526 to_info.reason = REASON_DAEMON_STARTED;
19527 to_info.trap = NO_TRAP;
19528
19529 dhd_recv_msg_from_daemon(skb);
19530 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
19531}
19532
19533#ifdef DHD_LOG_DUMP
19534bool
19535dhd_log_dump_ecntr_enabled(void)
19536{
19537 return (bool)logdump_ecntr_enable;
19538}
19539
965f77c4
RC
19540bool
19541dhd_log_dump_rtt_enabled(void)
19542{
19543 return (bool)logdump_rtt_enable;
19544}
19545
d2839953
RC
19546void
19547dhd_log_dump_init(dhd_pub_t *dhd)
19548{
19549 struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
19550 int i = 0;
19551 uint8 *prealloc_buf = NULL, *bufptr = NULL;
19552#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
19553 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
19554#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
19555 int ret;
19556 dhd_dbg_ring_t *ring = NULL;
19557 unsigned long flags = 0;
19558 dhd_info_t *dhd_info = dhd->info;
19559 void *cookie_buf = NULL;
19560
19561 BCM_REFERENCE(ret);
19562 BCM_REFERENCE(ring);
19563 BCM_REFERENCE(flags);
19564
19565 /* sanity check */
19566 if (logdump_prsrv_tailsize <= 0 ||
19567 logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
19568 logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
19569 }
19570 /* now adjust the preserve log flush size based on the
19571 * kernel printk log buffer size
19572 */
19573#ifdef CONFIG_LOG_BUF_SHIFT
19574 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
19575 " limit prsrv tail size to = %uKB\n",
19576 __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
19577 logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
19578
19579 if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
19580 logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
19581 }
19582#else
19583 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
19584 __FUNCTION__, logdump_prsrv_tailsize/1024);
19585#endif /* CONFIG_LOG_BUF_SHIFT */
19586
19587 mutex_init(&dhd_info->logdump_lock);
19588
19589 /* initialize log dump buf structures */
19590 memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
19591
19592 /* set the log dump buffer size based on the module_param */
19593 if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
19594 logdump_max_bufsize <= 0)
19595 dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
19596 else
19597 dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
19598
19599 /* pre-alloc the memory for the log buffers & 'special' buffer */
19600 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
19601#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
19602 DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
19603 __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE));
19604 prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
19605 dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
19606 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
19607#else
19608 prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
19609 dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
19610#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
19611 if (!prealloc_buf) {
19612 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
19613 goto fail;
19614 }
19615 if (!dld_buf_special->buffer) {
19616 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
19617 goto fail;
19618 }
19619
19620 bufptr = prealloc_buf;
19621 for (i = 0; i < DLD_BUFFER_NUM; i++) {
19622 dld_buf = &g_dld_buf[i];
19623 dld_buf->dhd_pub = dhd;
19624 spin_lock_init(&dld_buf->lock);
19625 dld_buf->wraparound = 0;
19626 if (i != DLD_BUF_TYPE_SPECIAL) {
19627 dld_buf->buffer = bufptr;
19628 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
19629 bufptr = (uint8 *)dld_buf->max;
19630 } else {
19631 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
19632 }
19633 dld_buf->present = dld_buf->front = dld_buf->buffer;
19634 dld_buf->remain = dld_buf_size[i];
19635 dld_buf->enable = 1;
19636 }
19637
19638#ifdef EWP_ECNTRS_LOGGING
19639 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
19640 dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
19641 if (!dhd->ecntr_dbg_ring)
19642 goto fail;
19643
19644 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
19645 ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
19646 ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
965f77c4 19647 bufptr, TRUE);
d2839953
RC
19648 if (ret != BCME_OK) {
19649 DHD_ERROR(("%s: unable to init ecntr ring !\n",
19650 __FUNCTION__));
19651 goto fail;
19652 }
19653 DHD_DBG_RING_LOCK(ring->lock, flags);
19654 ring->state = RING_ACTIVE;
19655 ring->threshold = 0;
19656 DHD_DBG_RING_UNLOCK(ring->lock, flags);
19657
19658 bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
19659#endif /* EWP_ECNTRS_LOGGING */
19660
965f77c4
RC
19661#ifdef EWP_RTT_LOGGING
19662 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
19663 dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
19664 if (!dhd->rtt_dbg_ring)
19665 goto fail;
19666
19667 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
19668 ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
19669 RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
19670 bufptr, TRUE);
19671 if (ret != BCME_OK) {
19672 DHD_ERROR(("%s: unable to init ecntr ring !\n",
19673 __FUNCTION__));
19674 goto fail;
19675 }
19676 DHD_DBG_RING_LOCK(ring->lock, flags);
19677 ring->state = RING_ACTIVE;
19678 ring->threshold = 0;
19679 DHD_DBG_RING_UNLOCK(ring->lock, flags);
19680
19681 bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
19682#endif /* EWP_RTT_LOGGING */
19683
d2839953
RC
19684 /* Concise buffer is used as intermediate buffer for following purposes
19685 * a) pull ecounters records temporarily before
19686 * writing it to file
19687 * b) to store dhd dump data before putting it to file
19688 * It should have a size equal to
19689 * MAX(largest possible ecntr record, 'dhd dump' data size)
19690 */
19691 dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
19692 if (!dhd->concise_dbg_buf) {
19693 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
19694 __FUNCTION__));
19695 goto fail;
19696 }
19697
19698 cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
19699 if (!cookie_buf) {
19700 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
19701 __FUNCTION__));
19702 goto fail;
19703 }
19704 ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
19705 if (ret != BCME_OK) {
19706 MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
19707 goto fail;
19708 }
19709 return;
19710
19711fail:
19712
19713 if (dhd->logdump_cookie) {
19714 dhd_logdump_cookie_deinit(dhd);
19715 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
19716 dhd->logdump_cookie = NULL;
19717 }
19718
19719 if (dhd->concise_dbg_buf) {
19720 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
19721 }
19722
19723#ifdef EWP_ECNTRS_LOGGING
19724 if (dhd->ecntr_dbg_ring) {
19725 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
19726 dhd_dbg_ring_deinit(dhd, ring);
19727 ring->ring_buf = NULL;
19728 ring->ring_size = 0;
19729 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
19730 dhd->ecntr_dbg_ring = NULL;
19731 }
19732#endif /* EWP_ECNTRS_LOGGING */
19733
965f77c4
RC
19734#ifdef EWP_RTT_LOGGING
19735 if (dhd->rtt_dbg_ring) {
19736 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
19737 dhd_dbg_ring_deinit(dhd, ring);
19738 ring->ring_buf = NULL;
19739 ring->ring_size = 0;
19740 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
19741 dhd->rtt_dbg_ring = NULL;
19742 }
19743#endif /* EWP_RTT_LOGGING */
19744
d2839953
RC
19745#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
19746 if (prealloc_buf) {
19747 DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
19748 }
19749 if (dld_buf_special->buffer) {
19750 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
19751 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
19752 }
19753#else
19754 if (prealloc_buf) {
19755 MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
19756 }
19757 if (dld_buf_special->buffer) {
19758 MFREE(dhd->osh, dld_buf_special->buffer,
19759 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
19760 }
19761#endif /* CONFIG_DHD_USE_STATIC_BUF */
19762 for (i = 0; i < DLD_BUFFER_NUM; i++) {
19763 dld_buf = &g_dld_buf[i];
19764 dld_buf->enable = 0;
19765 dld_buf->buffer = NULL;
19766 }
19767
19768 mutex_destroy(&dhd_info->logdump_lock);
19769}
19770
19771void
19772dhd_log_dump_deinit(dhd_pub_t *dhd)
19773{
19774 struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
19775 int i = 0;
19776 dhd_info_t *dhd_info = dhd->info;
19777 dhd_dbg_ring_t *ring = NULL;
19778
19779 BCM_REFERENCE(ring);
19780
19781 if (dhd->concise_dbg_buf) {
19782 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
19783 dhd->concise_dbg_buf = NULL;
19784 }
19785
19786 if (dhd->logdump_cookie) {
19787 dhd_logdump_cookie_deinit(dhd);
19788 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
19789 dhd->logdump_cookie = NULL;
19790 }
19791
19792#ifdef EWP_ECNTRS_LOGGING
19793 if (dhd->ecntr_dbg_ring) {
19794 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
19795 dhd_dbg_ring_deinit(dhd, ring);
19796 ring->ring_buf = NULL;
19797 ring->ring_size = 0;
19798 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
19799 dhd->ecntr_dbg_ring = NULL;
19800 }
19801#endif /* EWP_ECNTRS_LOGGING */
19802
965f77c4
RC
19803#ifdef EWP_RTT_LOGGING
19804 if (dhd->rtt_dbg_ring) {
19805 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
19806 dhd_dbg_ring_deinit(dhd, ring);
19807 ring->ring_buf = NULL;
19808 ring->ring_size = 0;
19809 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
19810 dhd->rtt_dbg_ring = NULL;
19811 }
19812#endif /* EWP_RTT_LOGGING */
19813
d2839953
RC
19814 /* 'general' buffer points to start of the pre-alloc'd memory */
19815 dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
19816 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
19817#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
19818 if (dld_buf->buffer) {
19819 DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
19820 }
19821 if (dld_buf_special->buffer) {
19822 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
19823 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
19824 }
19825#else
19826 if (dld_buf->buffer) {
19827 MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
19828 }
19829 if (dld_buf_special->buffer) {
19830 MFREE(dhd->osh, dld_buf_special->buffer,
19831 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
19832 }
19833#endif /* CONFIG_DHD_USE_STATIC_BUF */
19834 for (i = 0; i < DLD_BUFFER_NUM; i++) {
19835 dld_buf = &g_dld_buf[i];
19836 dld_buf->enable = 0;
19837 dld_buf->buffer = NULL;
19838 }
19839
19840 mutex_destroy(&dhd_info->logdump_lock);
19841}
19842
19843void
19844dhd_log_dump_write(int type, char *binary_data,
19845 int binary_len, const char *fmt, ...)
19846{
19847 int len = 0;
19848 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
19849 va_list args;
19850 unsigned long flags = 0;
19851 struct dhd_log_dump_buf *dld_buf = NULL;
19852 bool flush_log = FALSE;
19853
19854 if (type < 0 || type >= DLD_BUFFER_NUM) {
19855 DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
19856 __FUNCTION__, type));
19857 return;
19858 }
19859
19860 dld_buf = &g_dld_buf[type];
19861
19862 if (dld_buf->enable != 1) {
19863 return;
19864 }
19865
19866 va_start(args, fmt);
19867 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
19868 /* Non ANSI C99 compliant returns -1,
19869 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
19870 */
19871 va_end(args);
19872 if (len < 0) {
19873 return;
19874 }
19875
19876 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
19877 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
19878 tmp_buf[len] = '\0';
19879 }
19880
19881 /* make a critical section to eliminate race conditions */
19882 spin_lock_irqsave(&dld_buf->lock, flags);
19883 if (dld_buf->remain < len) {
19884 dld_buf->wraparound = 1;
19885 dld_buf->present = dld_buf->front;
19886 dld_buf->remain = dld_buf_size[type];
19887 /* if wrap around happens, flush the ring buffer to the file */
19888 flush_log = TRUE;
19889 }
19890
19891 memcpy(dld_buf->present, tmp_buf, len);
19892 dld_buf->remain -= len;
19893 dld_buf->present += len;
19894 spin_unlock_irqrestore(&dld_buf->lock, flags);
19895
19896 /* double check invalid memory operation */
19897 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
19898
19899 if (dld_buf->dhd_pub) {
19900 dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
19901 dhdp->logdump_periodic_flush =
19902 logdump_periodic_flush;
19903 if (logdump_periodic_flush && flush_log) {
19904 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
19905 sizeof(log_dump_type_t));
19906 if (flush_type) {
19907 *flush_type = type;
19908 dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
19909 }
19910 }
19911 }
19912}
19913
19914char*
19915dhd_log_dump_get_timestamp(void)
19916{
19917 static char buf[16];
19918 u64 ts_nsec;
19919 unsigned long rem_nsec;
19920
19921 ts_nsec = local_clock();
19922 rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
19923 snprintf(buf, sizeof(buf), "%5lu.%06lu",
19924 (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
19925
19926 return buf;
19927}
19928#endif /* DHD_LOG_DUMP */
19929
19930#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
19931void
19932dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
19933{
19934 dhd_info_t * dhd;
19935
19936 if (dhdp) {
19937 dhd = dhdp->info;
19938 if (dhd) {
19939 flush_workqueue(dhd->tx_wq);
19940 flush_workqueue(dhd->rx_wq);
19941 }
19942 }
19943
19944 return;
19945}
19946#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
19947
d2839953
RC
19948#ifdef DHD_DEBUG_UART
19949bool
19950dhd_debug_uart_is_running(struct net_device *dev)
19951{
19952 dhd_info_t *dhd = DHD_DEV_INFO(dev);
19953
19954 if (dhd->duart_execute) {
19955 return TRUE;
19956 }
19957
19958 return FALSE;
19959}
19960
19961static void
19962dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
19963{
19964 dhd_pub_t *dhdp = handle;
19965 dhd_debug_uart_exec(dhdp, "rd");
19966}
19967
19968static void
19969dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
19970{
19971 int ret;
19972
19973 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
19974 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
19975
19976#ifdef DHD_FW_COREDUMP
19977 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
19978#endif // endif
19979 {
965f77c4
RC
19980 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
19981 dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
d2839953
RC
19982#ifdef DHD_FW_COREDUMP
19983 dhdp->memdump_success == FALSE ||
19984#endif // endif
19985 FALSE) {
19986 dhdp->info->duart_execute = TRUE;
19987 DHD_ERROR(("DHD: %s - execute %s %s\n",
19988 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
19989 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
19990 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
19991 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
19992 dhdp->info->duart_execute = FALSE;
19993
19994#ifdef DHD_LOG_DUMP
19995 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
19996#endif // endif
19997 {
19998 BUG_ON(1);
19999 }
20000 }
20001 }
20002}
20003#endif /* DHD_DEBUG_UART */
20004
20005#if defined(DHD_BLOB_EXISTENCE_CHECK)
20006void
20007dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
20008{
20009 struct file *fp;
20010 char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
20011 fp = filp_open(filepath, O_RDONLY, 0);
20012 if (IS_ERR(fp)) {
20013 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
20014 filepath));
20015 dhdp->is_blob = FALSE;
20016 } else {
20017 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
20018 dhdp->is_blob = TRUE;
20019#if defined(CONCATE_BLOB)
20020 strncat(fw_path, "_blob", strlen("_blob"));
20021#else
20022 BCM_REFERENCE(fw_path);
20023#endif /* SKIP_CONCATE_BLOB */
20024 filp_close(fp, NULL);
20025 }
20026}
20027#endif /* DHD_BLOB_EXISTENCE_CHECK */
20028
20029#if defined(PCIE_FULL_DONGLE)
20030/** test / loopback */
20031void
20032dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
20033{
20034 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
20035 dhd_info_t *dhd_info = (dhd_info_t *)handle;
20036
20037 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
20038 DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__));
20039 return;
20040 }
20041 if (dhd_info == NULL) {
20042 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
20043 return;
20044 }
20045 if (dmmap == NULL) {
20046 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
20047 return;
20048 }
20049 dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
20050}
20051
20052void
20053dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
20054{
20055 dhd_info_t *dhd_info = dhdp->info;
20056
20057 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
20058 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
20059}
20060#endif /* PCIE_FULL_DONGLE */
20061/* ---------------------------- End of sysfs implementation ------------------------------------- */
965f77c4 20062
d2839953
RC
20063#ifdef SET_PCIE_IRQ_CPU_CORE
20064void
965f77c4 20065dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
d2839953 20066{
965f77c4
RC
20067 unsigned int pcie_irq = 0;
20068
d2839953
RC
20069 if (!dhdp) {
20070 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
20071 return;
20072 }
20073
20074 if (!dhdp->bus) {
20075 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
20076 return;
20077 }
20078
965f77c4
RC
20079 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
20080
20081 if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
20082 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
d2839953
RC
20083 return;
20084 }
965f77c4
RC
20085
20086 /*
20087 irq_set_affinity() assign dedicated CPU core PCIe interrupt
20088 If dedicated CPU core is not on-line,
20089 PCIe interrupt scheduled on CPU core 0
20090 */
20091 switch (affinity_cmd) {
20092 case PCIE_IRQ_AFFINITY_OFF:
20093 break;
20094 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
20095#if defined(CONFIG_ARCH_SM8150)
20096 irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
20097 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20098#else /* Exynos and Others */
20099 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20100#endif /* CONFIG_ARCH_SM8150 */
20101 break;
20102#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
20103 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
20104 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
20105 __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
20106 irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
20107 break;
20108#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
20109 default:
20110 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
20111 __FUNCTION__, affinity_cmd));
20112 }
d2839953
RC
20113}
20114#endif /* SET_PCIE_IRQ_CPU_CORE */
20115
20116int
20117dhd_write_file(const char *filepath, char *buf, int buf_len)
20118{
20119 struct file *fp = NULL;
20120 mm_segment_t old_fs;
20121 int ret = 0;
20122
20123 /* change to KERNEL_DS address limit */
20124 old_fs = get_fs();
20125 set_fs(KERNEL_DS);
20126
20127 /* File is always created. */
20128 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
20129 if (IS_ERR(fp)) {
20130 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
20131 __FUNCTION__, filepath, PTR_ERR(fp)));
20132 ret = BCME_ERROR;
20133 } else {
20134 if (fp->f_mode & FMODE_WRITE) {
965f77c4 20135 ret = compat_vfs_write(fp, buf, buf_len, &fp->f_pos);
d2839953
RC
20136 if (ret < 0) {
20137 DHD_ERROR(("%s: Couldn't write file '%s'\n",
20138 __FUNCTION__, filepath));
20139 ret = BCME_ERROR;
20140 } else {
20141 ret = BCME_OK;
20142 }
20143 }
20144 filp_close(fp, NULL);
20145 }
20146
20147 /* restore previous address limit */
20148 set_fs(old_fs);
20149
20150 return ret;
20151}
20152
20153int
20154dhd_read_file(const char *filepath, char *buf, int buf_len)
20155{
20156 struct file *fp = NULL;
20157 mm_segment_t old_fs;
20158 int ret;
20159
20160 /* change to KERNEL_DS address limit */
20161 old_fs = get_fs();
20162 set_fs(KERNEL_DS);
20163
20164 fp = filp_open(filepath, O_RDONLY, 0);
20165 if (IS_ERR(fp)) {
20166 set_fs(old_fs);
20167 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
20168 return BCME_ERROR;
20169 }
20170
20171 ret = compat_kernel_read(fp, 0, buf, buf_len);
20172 filp_close(fp, NULL);
20173
20174 /* restore previous address limit */
20175 set_fs(old_fs);
20176
20177 /* Return the number of bytes read */
20178 if (ret > 0) {
20179 /* Success to read */
20180 ret = 0;
20181 } else {
20182 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
20183 __FUNCTION__, filepath, ret));
20184 ret = BCME_ERROR;
20185 }
20186
20187 return ret;
20188}
20189
20190int
20191dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
20192{
20193 int ret;
20194
20195 ret = dhd_write_file(filepath, buf, buf_len);
20196 if (ret < 0) {
20197 return ret;
20198 }
20199
20200 /* Read the file again and check if the file size is not zero */
20201 memset(buf, 0, buf_len);
20202 ret = dhd_read_file(filepath, buf, buf_len);
20203
20204 return ret;
20205}
20206
20207#ifdef FILTER_IE
20208int dhd_read_from_file(dhd_pub_t *dhd)
20209{
20210 int ret = 0, nread = 0;
20211 void *fd;
20212 uint8 *buf;
20213 NULL_CHECK(dhd, "dhd is NULL", ret);
20214
20215 buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
20216 if (!buf) {
20217 DHD_ERROR(("error: failed to alllocate buf.\n"));
20218 return BCME_NOMEM;
20219 }
20220
20221 /* open file to read */
20222 fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
20223 if (!fd) {
20224 DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH));
20225 ret = BCME_EPERM;
20226 goto exit;
20227 }
20228 nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
20229 if (nread > 0) {
20230 buf[nread] = '\0';
20231 if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
20232 DHD_ERROR(("error: failed to parse filter ie\n"));
20233 }
20234 } else {
20235 DHD_ERROR(("error: zero length file.failed to read\n"));
20236 ret = BCME_ERROR;
20237 }
20238 dhd_os_close_image1(dhd, fd);
20239exit:
20240 if (buf) {
20241 MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
20242 buf = NULL;
20243 }
20244 return ret;
20245}
20246
20247int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
20248{
20249 uint8* pstr = buf;
20250 int element_count = 0;
20251
20252 if (buf == NULL) {
20253 return BCME_ERROR;
20254 }
20255
20256 while (*pstr != '\0') {
20257 if (*pstr == '\n') {
20258 element_count++;
20259 }
20260 pstr++;
20261 }
20262 /*
20263 * New line character must not be present after last line.
20264 * To count last line
20265 */
20266 element_count++;
20267
20268 return element_count;
20269}
20270
20271int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
20272{
20273 uint8 i, j, msb, lsb, oui_len = 0;
20274 /*
20275 * OUI can vary from 3 bytes to 5 bytes.
20276 * While reading from file as ascii input it can
20277 * take maximum size of 14 bytes and minumum size of
20278 * 8 bytes including ":"
20279 * Example 5byte OUI <AB:DE:BE:CD:FA>
20280 * Example 3byte OUI <AB:DC:EF>
20281 */
20282
20283 if ((inbuf == NULL) || (len < 8) || (len > 14)) {
20284 DHD_ERROR(("error: failed to parse OUI \n"));
20285 return BCME_ERROR;
20286 }
20287
20288 for (j = 0, i = 0; i < len; i += 3, ++j) {
20289 if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
20290 DHD_ERROR(("error: invalid OUI format \n"));
20291 return BCME_ERROR;
20292 }
20293 msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
20294 lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
20295 'A' + 10 : inbuf[i + 1] - '0';
20296 oui[j] = (msb << 4) | lsb;
20297 }
20298 /* Size of oui.It can vary from 3/4/5 */
20299 oui_len = j;
20300
20301 return oui_len;
20302}
20303
20304int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
20305{
20306 int i = 0;
20307
20308 while (i < len) {
20309 if (!bcm_isdigit(buf[i])) {
20310 DHD_ERROR(("error: non digit value found in filter_ie \n"));
20311 return BCME_ERROR;
20312 }
20313 i++;
20314 }
20315 if (bcm_atoi((char*)buf) > 255) {
20316 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
20317 return BCME_ERROR;
20318 }
20319
20320 return BCME_OK;
20321}
20322
20323int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
20324{
20325 int element_count = 0, i = 0, oui_size = 0, ret = 0;
20326 uint16 bufsize, buf_space_left, id = 0, len = 0;
20327 uint16 filter_iovsize, all_tlvsize;
20328 wl_filter_ie_tlv_t *p_ie_tlv = NULL;
20329 wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
20330 char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
20331 uint8 data[20];
20332
20333 element_count = dhd_get_filter_ie_count(dhd, buf);
20334 DHD_INFO(("total element count %d \n", element_count));
20335 /* Calculate the whole buffer size */
20336 filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
20337 p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
20338
20339 if (p_filter_iov == NULL) {
20340 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
20341 return BCME_ERROR;
20342 }
20343
20344 /* setup filter iovar header */
20345 p_filter_iov->version = WL_FILTER_IE_VERSION;
20346 p_filter_iov->len = filter_iovsize;
20347 p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
20348 p_filter_iov->pktflag = FC_PROBE_REQ;
20349 p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
20350 /* setup TLVs */
20351 bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
20352 p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
20353 buf_space_left = bufsize;
20354
20355 while ((i < element_count) && (buf != NULL)) {
20356 len = 0;
20357 /* token contains one line of input data */
20358 token = bcmstrtok((char**)&buf, "\n", NULL);
20359 if (token == NULL) {
20360 break;
20361 }
20362 if ((ele_token = bcmstrstr(token, ",")) == NULL) {
20363 /* only element id is present */
20364 if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
20365 DHD_ERROR(("error: Invalid element id \n"));
20366 ret = BCME_ERROR;
20367 goto exit;
20368 }
20369 id = bcm_atoi((char*)token);
20370 data[len++] = WL_FILTER_IE_SET;
20371 } else {
20372 /* oui is present */
20373 ele_token = bcmstrtok(&token, ",", NULL);
20374 if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
20375 strlen(ele_token)) == BCME_ERROR)) {
20376 DHD_ERROR(("error: Invalid element id \n"));
20377 ret = BCME_ERROR;
20378 goto exit;
20379 }
20380 id = bcm_atoi((char*)ele_token);
20381 data[len++] = WL_FILTER_IE_SET;
20382 if ((oui_token = bcmstrstr(token, ",")) == NULL) {
20383 oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
20384 if (oui_size == BCME_ERROR) {
20385 DHD_ERROR(("error: Invalid OUI \n"));
20386 ret = BCME_ERROR;
20387 goto exit;
20388 }
20389 len += oui_size;
20390 } else {
20391 /* type is present */
20392 oui_token = bcmstrtok(&token, ",", NULL);
20393 if ((oui_token == NULL) || ((oui_size =
20394 dhd_parse_oui(dhd, oui_token,
20395 &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
20396 DHD_ERROR(("error: Invalid OUI \n"));
20397 ret = BCME_ERROR;
20398 goto exit;
20399 }
20400 len += oui_size;
20401 if ((type = bcmstrstr(token, ",")) == NULL) {
20402 if (dhd_check_valid_ie(dhd, token,
20403 strlen(token)) == BCME_ERROR) {
20404 DHD_ERROR(("error: Invalid type \n"));
20405 ret = BCME_ERROR;
20406 goto exit;
20407 }
20408 data[len++] = bcm_atoi((char*)token);
20409 } else {
20410 /* subtype is present */
20411 type = bcmstrtok(&token, ",", NULL);
20412 if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
20413 strlen(type)) == BCME_ERROR)) {
20414 DHD_ERROR(("error: Invalid type \n"));
20415 ret = BCME_ERROR;
20416 goto exit;
20417 }
20418 data[len++] = bcm_atoi((char*)type);
20419 /* subtype is last element */
20420 if ((token == NULL) || (*token == '\0') ||
20421 (dhd_check_valid_ie(dhd, token,
20422 strlen(token)) == BCME_ERROR)) {
20423 DHD_ERROR(("error: Invalid subtype \n"));
20424 ret = BCME_ERROR;
20425 goto exit;
20426 }
20427 data[len++] = bcm_atoi((char*)token);
20428 }
20429 }
20430 }
20431 ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
20432 &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
20433 if (ret != BCME_OK) {
20434 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
20435 "status=%d\n", __FUNCTION__, ret));
20436 goto exit;
20437 }
20438 i++;
20439 }
20440 if (i == 0) {
20441 /* file is empty or first line is blank */
20442 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
20443 ret = BCME_ERROR;
20444 goto exit;
20445 }
20446 /* update the iov header, set len to include all TLVs + header */
20447 all_tlvsize = (bufsize - buf_space_left);
20448 p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
20449 ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
20450 p_filter_iov->len, NULL, 0, TRUE);
20451 if (ret != BCME_OK) {
20452 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
20453 }
20454exit:
20455 /* clean up */
20456 if (p_filter_iov) {
20457 MFREE(dhd->osh, p_filter_iov, filter_iovsize);
20458 p_filter_iov = NULL;
20459 }
20460 return ret;
20461}
20462#endif /* FILTER_IE */
20463#ifdef DHD_WAKE_STATUS
20464wake_counts_t*
20465dhd_get_wakecount(dhd_pub_t *dhdp)
20466{
20467#ifdef BCMDBUS
20468 return NULL;
20469#else
20470 return dhd_bus_get_wakecount(dhdp);
20471#endif /* BCMDBUS */
20472}
20473#endif /* DHD_WAKE_STATUS */
20474
20475int
20476dhd_get_random_bytes(uint8 *buf, uint len)
20477{
20478#ifdef BCMPCIE
965f77c4
RC
20479#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
20480 int rndlen = get_random_bytes_arch(buf, len);
20481 if (rndlen != len) {
20482 bzero(buf, len);
20483 get_random_bytes(buf, len);
20484 }
20485#else
d2839953 20486 get_random_bytes_arch(buf, len);
965f77c4 20487#endif // endif
d2839953
RC
20488#endif /* BCMPCIE */
20489 return BCME_OK;
20490}
20491
20492#ifdef DHD_ERPOM
20493static void
20494dhd_error_recovery(void *handle, void *event_info, u8 event)
20495{
20496 dhd_info_t *dhd = handle;
20497 dhd_pub_t *dhdp;
20498 int ret = 0;
20499
20500 if (!dhd) {
20501 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
20502 return;
20503 }
20504
20505 dhdp = &dhd->pub;
20506
20507 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
20508 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
20509 __FUNCTION__));
20510 return;
20511 }
20512
965f77c4 20513 ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
d2839953
RC
20514 if (ret != BCME_DNGL_DEVRESET) {
20515 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
20516 "toggle REG_ON\n", __FUNCTION__, ret));
20517 /* toggle REG_ON */
20518 dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
20519 return;
20520 }
20521}
20522
20523void
20524dhd_schedule_reset(dhd_pub_t *dhdp)
20525{
20526 if (dhdp->enable_erpom) {
20527 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
20528 DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
20529 }
20530}
20531#endif /* DHD_ERPOM */
20532
20533void
20534get_debug_dump_time(char *str)
20535{
965f77c4 20536 struct osl_timespec curtime;
d2839953
RC
20537 unsigned long local_time;
20538 struct rtc_time tm;
20539
20540 if (!strlen(str)) {
965f77c4 20541 osl_do_gettimeofday(&curtime);
d2839953
RC
20542 local_time = (u32)(curtime.tv_sec -
20543 (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
20544 rtc_time_to_tm(local_time, &tm);
20545
20546 snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
20547 tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
20548 tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
20549 }
20550}
20551
20552void
20553clear_debug_dump_time(char *str)
20554{
20555 memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
20556}
20557
d2839953
RC
20558void
20559dhd_print_tasklet_status(dhd_pub_t *dhd)
20560{
20561 dhd_info_t *dhdinfo;
20562
20563 if (!dhd) {
20564 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
20565 return;
20566 }
20567
20568 dhdinfo = dhd->info;
20569
20570 if (!dhdinfo) {
20571 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
20572 return;
20573 }
20574
20575 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
20576}
20577
20578/*
20579 * DHD RING
20580 */
20581#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
20582#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
20583
20584#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
20585#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
20586
20587#define DHD_RING_MAGIC 0x20170910
20588#define DHD_RING_IDX_INVALID 0xffffffff
20589
965f77c4
RC
20590#define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
20591#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
20592#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
20593#define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
20594
d2839953
RC
20595typedef struct {
20596 uint32 elem_size;
20597 uint32 elem_cnt;
20598 uint32 write_idx; /* next write index, -1 : not started */
20599 uint32 read_idx; /* next read index, -1 : not start */
20600
20601 /* protected elements during serialization */
20602 int lock_idx; /* start index of locked, element will not be overried */
20603 int lock_count; /* number of locked, from lock idx */
20604
20605 /* saved data elements */
20606 void *elem;
20607} dhd_fixed_ring_info_t;
20608
965f77c4
RC
20609typedef struct {
20610 uint32 elem_size;
20611 uint32 elem_cnt;
20612 uint32 idx; /* -1 : not started */
20613 uint32 rsvd; /* reserved for future use */
20614
20615 /* protected elements during serialization */
20616 atomic_t ring_locked;
20617 /* check the overwriting */
20618 uint32 ring_overwrited;
20619
20620 /* saved data elements */
20621 void *elem;
20622} dhd_singleidx_ring_info_t;
20623
d2839953
RC
20624typedef struct {
20625 uint32 magic;
20626 uint32 type;
965f77c4 20627 void *ring_sync; /* spinlock for sync */
d2839953
RC
20628 union {
20629 dhd_fixed_ring_info_t fixed;
965f77c4 20630 dhd_singleidx_ring_info_t single;
d2839953
RC
20631 };
20632} dhd_ring_info_t;
20633
20634uint32
20635dhd_ring_get_hdr_size(void)
20636{
20637 return sizeof(dhd_ring_info_t);
20638}
20639
20640void *
965f77c4
RC
20641dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
20642 uint32 elem_cnt, uint32 type)
d2839953
RC
20643{
20644 dhd_ring_info_t *ret_ring;
20645
20646 if (!buf) {
20647 DHD_RING_ERR(("NO RING BUFFER\n"));
20648 return NULL;
20649 }
965f77c4 20650
d2839953
RC
20651 if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
20652 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
20653 return NULL;
20654 }
20655
965f77c4
RC
20656 if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
20657 DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
20658 return NULL;
20659 }
20660
d2839953 20661 ret_ring = (dhd_ring_info_t *)buf;
965f77c4
RC
20662 ret_ring->type = type;
20663 ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
d2839953 20664 ret_ring->magic = DHD_RING_MAGIC;
965f77c4
RC
20665
20666 if (type == DHD_RING_TYPE_FIXED) {
20667 ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
20668 ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
20669 ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
20670 ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
20671 ret_ring->fixed.elem_size = elem_size;
20672 ret_ring->fixed.elem_cnt = elem_cnt;
20673 } else {
20674 ret_ring->single.idx = DHD_RING_IDX_INVALID;
20675 atomic_set(&ret_ring->single.ring_locked, 0);
20676 ret_ring->single.ring_overwrited = 0;
20677 ret_ring->single.rsvd = 0;
20678 ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
20679 ret_ring->single.elem_size = elem_size;
20680 ret_ring->single.elem_cnt = elem_cnt;
20681 }
20682
d2839953
RC
20683 return ret_ring;
20684}
20685
20686void
965f77c4 20687dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
d2839953
RC
20688{
20689 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
d2839953
RC
20690 if (!ring) {
20691 return;
20692 }
20693
20694 if (ring->magic != DHD_RING_MAGIC) {
20695 return;
20696 }
20697
965f77c4
RC
20698 if (ring->type != DHD_RING_TYPE_FIXED &&
20699 ring->type != DHD_RING_TYPE_SINGLE_IDX) {
20700 return;
20701 }
20702
20703 DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
20704 ring->ring_sync = NULL;
20705 if (ring->type == DHD_RING_TYPE_FIXED) {
20706 dhd_fixed_ring_info_t *fixed = &ring->fixed;
20707 memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
20708 fixed->elem_size = fixed->elem_cnt = 0;
20709 } else {
20710 dhd_singleidx_ring_info_t *single = &ring->single;
20711 memset(single->elem, 0, single->elem_size * single->elem_cnt);
20712 single->elem_size = single->elem_cnt = 0;
20713 }
d2839953
RC
20714 ring->type = 0;
20715 ring->magic = 0;
d2839953
RC
20716}
20717
965f77c4
RC
20718static inline uint32
20719__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
20720{
20721 uint32 diff;
20722 uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
20723 uint32 elem_size, elem_cnt;
20724 void *elem;
20725
20726 if (type == DHD_RING_TYPE_FIXED) {
20727 dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
20728 elem_size = fixed->elem_size;
20729 elem_cnt = fixed->elem_cnt;
20730 elem = fixed->elem;
20731 } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
20732 dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
20733 elem_size = single->elem_size;
20734 elem_cnt = single->elem_cnt;
20735 elem = single->elem;
20736 } else {
20737 DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
20738 return ret_idx;
20739 }
20740
20741 if (ptr < elem) {
20742 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
20743 return ret_idx;
20744 }
20745 diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
20746 if (diff % elem_size != 0) {
20747 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
20748 return ret_idx;
20749 }
20750 ret_idx = diff / elem_size;
20751 if (ret_idx >= elem_cnt) {
20752 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
20753 }
20754 return ret_idx;
20755}
20756
20757/* Sub functions for fixed ring */
d2839953
RC
20758/* get counts between two indexes of ring buffer (internal only) */
20759static inline int
20760__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
20761{
20762 if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
20763 return 0;
20764 }
20765
20766 return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
20767}
20768
20769static inline int
20770__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
20771{
20772 return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
20773}
20774
20775static inline void *
20776__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
20777{
20778 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20779 return NULL;
20780 }
20781 return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
20782}
20783
20784static inline void
20785__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
20786{
20787 uint32 next_idx;
20788
20789 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20790 DHD_RING_ERR(("EMPTY RING\n"));
20791 return;
20792 }
20793
20794 next_idx = (ring->read_idx + 1) % ring->elem_cnt;
20795 if (ring->read_idx == ring->write_idx) {
20796 /* Become empty */
20797 ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
20798 return;
20799 }
20800
20801 ring->read_idx = next_idx;
20802 return;
20803}
20804
20805static inline void *
20806__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
20807{
20808 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20809 return NULL;
20810 }
20811 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
20812}
20813
20814static inline void *
20815__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
20816{
20817 uint32 tmp_idx;
20818
20819 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20820 ring->read_idx = ring->write_idx = 0;
20821 return (uint8 *)ring->elem;
20822 }
20823
20824 /* check next index is not locked */
20825 tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
20826 if (ring->lock_idx == tmp_idx) {
20827 return NULL;
20828 }
20829
20830 ring->write_idx = tmp_idx;
20831 if (ring->write_idx == ring->read_idx) {
20832 /* record is full, drop oldest one */
20833 ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
20834
20835 }
20836 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
20837}
20838
d2839953 20839static inline void *
965f77c4 20840__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
d2839953
RC
20841{
20842 uint32 cur_idx;
20843
20844 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20845 DHD_RING_ERR(("EMPTY RING\n"));
20846 return NULL;
20847 }
20848
965f77c4 20849 cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
d2839953
RC
20850 if (cur_idx >= ring->elem_cnt) {
20851 return NULL;
20852 }
20853
20854 if (cur_idx == ring->write_idx) {
20855 /* no more new record */
20856 return NULL;
20857 }
20858
20859 cur_idx = (cur_idx + 1) % ring->elem_cnt;
20860 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
20861}
20862
20863static inline void *
965f77c4 20864__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
d2839953
RC
20865{
20866 uint32 cur_idx;
20867
20868 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20869 DHD_RING_ERR(("EMPTY RING\n"));
20870 return NULL;
20871 }
965f77c4 20872 cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
d2839953
RC
20873 if (cur_idx >= ring->elem_cnt) {
20874 return NULL;
20875 }
20876 if (cur_idx == ring->read_idx) {
20877 /* no more new record */
20878 return NULL;
20879 }
20880
20881 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
20882 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
20883}
20884
20885static inline void
965f77c4 20886__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
d2839953
RC
20887{
20888 uint32 first_idx;
20889 uint32 last_idx;
20890 uint32 ring_filled_cnt;
20891 uint32 tmp_cnt;
20892
20893 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20894 DHD_RING_ERR(("EMPTY RING\n"));
20895 return;
20896 }
20897
20898 if (first_ptr) {
965f77c4 20899 first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
d2839953
RC
20900 if (first_idx >= ring->elem_cnt) {
20901 return;
20902 }
20903 } else {
20904 first_idx = ring->read_idx;
20905 }
20906
20907 if (last_ptr) {
965f77c4 20908 last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
d2839953
RC
20909 if (last_idx >= ring->elem_cnt) {
20910 return;
20911 }
20912 } else {
20913 last_idx = ring->write_idx;
20914 }
20915
20916 ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
20917 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
20918 if (tmp_cnt > ring_filled_cnt) {
20919 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
20920 ring->write_idx, ring->read_idx, first_idx));
20921 return;
20922 }
20923
20924 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
20925 if (tmp_cnt > ring_filled_cnt) {
20926 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
20927 ring->write_idx, ring->read_idx, last_idx));
20928 return;
20929 }
20930
20931 ring->lock_idx = first_idx;
20932 ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
20933 return;
20934}
20935
20936static inline void
20937__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
20938{
20939 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20940 DHD_RING_ERR(("EMPTY RING\n"));
20941 return;
20942 }
20943
20944 ring->lock_idx = DHD_RING_IDX_INVALID;
20945 ring->lock_count = 0;
20946 return;
20947}
20948static inline void *
20949__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
20950{
20951 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20952 DHD_RING_ERR(("EMPTY RING\n"));
20953 return NULL;
20954 }
20955 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
20956 DHD_RING_ERR(("NO LOCK POINT\n"));
20957 return NULL;
20958 }
20959 return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
20960}
20961
20962static inline void *
20963__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
20964{
20965 int lock_last_idx;
20966 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20967 DHD_RING_ERR(("EMPTY RING\n"));
20968 return NULL;
20969 }
20970 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
20971 DHD_RING_ERR(("NO LOCK POINT\n"));
20972 return NULL;
20973 }
20974
20975 lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
20976 return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
20977}
20978
20979static inline int
20980__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
20981{
20982 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20983 DHD_RING_ERR(("EMPTY RING\n"));
20984 return BCME_ERROR;
20985 }
20986 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
20987 DHD_RING_ERR(("NO LOCK POINT\n"));
20988 return BCME_ERROR;
20989 }
20990 return ring->lock_count;
20991}
20992
20993static inline void
20994__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
20995{
20996 if (ring->read_idx == DHD_RING_IDX_INVALID) {
20997 DHD_RING_ERR(("EMPTY RING\n"));
20998 return;
20999 }
21000 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21001 DHD_RING_ERR(("NO LOCK POINT\n"));
21002 return;
21003 }
21004
21005 ring->lock_count--;
21006 if (ring->lock_count <= 0) {
21007 ring->lock_idx = DHD_RING_IDX_INVALID;
21008 } else {
21009 ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
21010 }
21011 return;
21012}
21013
965f77c4
RC
21014static inline void
21015__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
21016{
21017 ring->read_idx = idx;
21018}
21019
21020static inline void
21021__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
21022{
21023 ring->write_idx = idx;
21024}
21025
21026static inline uint32
21027__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
21028{
21029 return ring->read_idx;
21030}
21031
21032static inline uint32
21033__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
21034{
21035 return ring->write_idx;
21036}
21037
21038/* Sub functions for single index ring */
21039static inline void *
21040__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
21041{
21042 uint32 tmp_idx = 0;
21043
21044 if (ring->idx == DHD_RING_IDX_INVALID) {
21045 return NULL;
21046 }
21047
21048 if (ring->ring_overwrited) {
21049 tmp_idx = (ring->idx + 1) % ring->elem_cnt;
21050 }
21051
21052 return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
21053}
21054
21055static inline void *
21056__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
21057{
21058 if (ring->idx == DHD_RING_IDX_INVALID) {
21059 return NULL;
21060 }
21061
21062 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
21063}
21064
21065static inline void *
21066__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
21067{
21068 if (ring->idx == DHD_RING_IDX_INVALID) {
21069 ring->idx = 0;
21070 return (uint8 *)ring->elem;
21071 }
21072
21073 /* check the lock is held */
21074 if (atomic_read(&ring->ring_locked)) {
21075 return NULL;
21076 }
21077
21078 /* check the index rollover */
21079 if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
21080 ring->ring_overwrited = 1;
21081 }
21082
21083 ring->idx = (ring->idx + 1) % ring->elem_cnt;
21084
21085 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
21086}
21087
21088static inline void *
21089__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
21090{
21091 uint32 cur_idx;
21092
21093 if (ring->idx == DHD_RING_IDX_INVALID) {
21094 DHD_RING_ERR(("EMPTY RING\n"));
21095 return NULL;
21096 }
21097
21098 cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
21099 if (cur_idx >= ring->elem_cnt) {
21100 return NULL;
21101 }
21102
21103 if (cur_idx == ring->idx) {
21104 /* no more new record */
21105 return NULL;
21106 }
21107
21108 cur_idx = (cur_idx + 1) % ring->elem_cnt;
21109
21110 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21111}
21112
21113static inline void *
21114__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
21115{
21116 uint32 cur_idx;
21117
21118 if (ring->idx == DHD_RING_IDX_INVALID) {
21119 DHD_RING_ERR(("EMPTY RING\n"));
21120 return NULL;
21121 }
21122 cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
21123 if (cur_idx >= ring->elem_cnt) {
21124 return NULL;
21125 }
21126
21127 if (!ring->ring_overwrited && cur_idx == 0) {
21128 /* no more new record */
21129 return NULL;
21130 }
21131
21132 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
21133 if (ring->ring_overwrited && cur_idx == ring->idx) {
21134 /* no more new record */
21135 return NULL;
21136 }
21137
21138 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21139}
21140
21141static inline void
21142__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
21143{
21144 if (!atomic_read(&ring->ring_locked)) {
21145 atomic_set(&ring->ring_locked, 1);
21146 }
21147}
21148
21149static inline void
21150__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
21151{
21152 if (atomic_read(&ring->ring_locked)) {
21153 atomic_set(&ring->ring_locked, 0);
21154 }
21155}
21156
d2839953
RC
21157/* Get first element : oldest element */
21158void *
21159dhd_ring_get_first(void *_ring)
21160{
21161 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21162 void *ret = NULL;
965f77c4 21163 unsigned long flags;
d2839953
RC
21164
21165 if (!ring || ring->magic != DHD_RING_MAGIC) {
21166 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21167 return NULL;
21168 }
21169
965f77c4 21170 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21171 if (ring->type == DHD_RING_TYPE_FIXED) {
21172 ret = __dhd_fixed_ring_get_first(&ring->fixed);
21173 }
965f77c4
RC
21174 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21175 ret = __dhd_singleidx_ring_get_first(&ring->single);
21176 }
21177 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21178 return ret;
21179}
21180
21181/* Free first element : oldest element */
21182void
21183dhd_ring_free_first(void *_ring)
21184{
21185 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
965f77c4 21186 unsigned long flags;
d2839953
RC
21187
21188 if (!ring || ring->magic != DHD_RING_MAGIC) {
21189 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21190 return;
21191 }
21192
965f77c4 21193 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21194 if (ring->type == DHD_RING_TYPE_FIXED) {
21195 __dhd_fixed_ring_free_first(&ring->fixed);
21196 }
965f77c4
RC
21197 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21198}
21199
21200void
21201dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
21202{
21203 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21204 unsigned long flags;
21205
21206 if (!ring || ring->magic != DHD_RING_MAGIC) {
21207 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21208 return;
21209 }
21210
21211 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
21212 if (ring->type == DHD_RING_TYPE_FIXED) {
21213 __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
21214 }
21215 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21216}
21217
21218void
21219dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
21220{
21221 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21222 unsigned long flags;
21223
21224 if (!ring || ring->magic != DHD_RING_MAGIC) {
21225 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21226 return;
21227 }
21228
21229 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
21230 if (ring->type == DHD_RING_TYPE_FIXED) {
21231 __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
21232 }
21233 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21234}
21235
21236uint32
21237dhd_ring_get_read_idx(void *_ring)
21238{
21239 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21240 uint32 read_idx = DHD_RING_IDX_INVALID;
21241 unsigned long flags;
21242
21243 if (!ring || ring->magic != DHD_RING_MAGIC) {
21244 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21245 return read_idx;
21246 }
21247
21248 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
21249 if (ring->type == DHD_RING_TYPE_FIXED) {
21250 read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
21251 }
21252 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21253
21254 return read_idx;
21255}
21256
21257uint32
21258dhd_ring_get_write_idx(void *_ring)
21259{
21260 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21261 uint32 write_idx = DHD_RING_IDX_INVALID;
21262 unsigned long flags;
21263
21264 if (!ring || ring->magic != DHD_RING_MAGIC) {
21265 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21266 return write_idx;
21267 }
21268
21269 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
21270 if (ring->type == DHD_RING_TYPE_FIXED) {
21271 write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
21272 }
21273 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21274
21275 return write_idx;
d2839953
RC
21276}
21277
21278/* Get latest element */
21279void *
21280dhd_ring_get_last(void *_ring)
21281{
21282 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21283 void *ret = NULL;
965f77c4 21284 unsigned long flags;
d2839953
RC
21285
21286 if (!ring || ring->magic != DHD_RING_MAGIC) {
21287 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21288 return NULL;
21289 }
21290
965f77c4 21291 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21292 if (ring->type == DHD_RING_TYPE_FIXED) {
21293 ret = __dhd_fixed_ring_get_last(&ring->fixed);
21294 }
965f77c4
RC
21295 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21296 ret = __dhd_singleidx_ring_get_last(&ring->single);
21297 }
21298 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21299 return ret;
21300}
21301
21302/* Get next point can be written
21303 * will overwrite which doesn't read
21304 * will return NULL if next pointer is locked
21305 */
21306void *
21307dhd_ring_get_empty(void *_ring)
21308{
21309 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21310 void *ret = NULL;
965f77c4 21311 unsigned long flags;
d2839953
RC
21312
21313 if (!ring || ring->magic != DHD_RING_MAGIC) {
21314 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21315 return NULL;
21316 }
21317
965f77c4 21318 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21319 if (ring->type == DHD_RING_TYPE_FIXED) {
21320 ret = __dhd_fixed_ring_get_empty(&ring->fixed);
21321 }
965f77c4
RC
21322 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21323 ret = __dhd_singleidx_ring_get_empty(&ring->single);
21324 }
21325 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21326 return ret;
21327}
21328
21329void *
21330dhd_ring_get_next(void *_ring, void *cur)
21331{
21332 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21333 void *ret = NULL;
965f77c4 21334 unsigned long flags;
d2839953
RC
21335
21336 if (!ring || ring->magic != DHD_RING_MAGIC) {
21337 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21338 return NULL;
21339 }
21340
965f77c4 21341 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953 21342 if (ring->type == DHD_RING_TYPE_FIXED) {
965f77c4
RC
21343 ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
21344 }
21345 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21346 ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
d2839953 21347 }
965f77c4 21348 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21349 return ret;
21350}
21351
21352void *
21353dhd_ring_get_prev(void *_ring, void *cur)
21354{
21355 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21356 void *ret = NULL;
965f77c4 21357 unsigned long flags;
d2839953
RC
21358
21359 if (!ring || ring->magic != DHD_RING_MAGIC) {
21360 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21361 return NULL;
21362 }
21363
965f77c4 21364 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953 21365 if (ring->type == DHD_RING_TYPE_FIXED) {
965f77c4
RC
21366 ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
21367 }
21368 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21369 ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
d2839953 21370 }
965f77c4 21371 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21372 return ret;
21373}
21374
21375int
21376dhd_ring_get_cur_size(void *_ring)
21377{
21378 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21379 int cnt = 0;
965f77c4 21380 unsigned long flags;
d2839953
RC
21381
21382 if (!ring || ring->magic != DHD_RING_MAGIC) {
21383 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21384 return cnt;
21385 }
21386
965f77c4 21387 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21388 if (ring->type == DHD_RING_TYPE_FIXED) {
21389 cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
21390 }
965f77c4 21391 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21392 return cnt;
21393}
21394
21395/* protect element between lock_ptr and write_idx */
21396void
21397dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
21398{
21399 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
965f77c4 21400 unsigned long flags;
d2839953
RC
21401
21402 if (!ring || ring->magic != DHD_RING_MAGIC) {
21403 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21404 return;
21405 }
21406
965f77c4 21407 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953 21408 if (ring->type == DHD_RING_TYPE_FIXED) {
965f77c4 21409 __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
d2839953 21410 }
965f77c4 21411 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21412}
21413
21414/* free all lock */
21415void
21416dhd_ring_lock_free(void *_ring)
21417{
21418 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
965f77c4 21419 unsigned long flags;
d2839953
RC
21420
21421 if (!ring || ring->magic != DHD_RING_MAGIC) {
21422 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21423 return;
21424 }
21425
965f77c4 21426 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21427 if (ring->type == DHD_RING_TYPE_FIXED) {
21428 __dhd_fixed_ring_lock_free(&ring->fixed);
21429 }
965f77c4 21430 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21431}
21432
21433void *
21434dhd_ring_lock_get_first(void *_ring)
21435{
21436 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21437 void *ret = NULL;
965f77c4 21438 unsigned long flags;
d2839953
RC
21439
21440 if (!ring || ring->magic != DHD_RING_MAGIC) {
21441 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21442 return NULL;
21443 }
21444
965f77c4 21445 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21446 if (ring->type == DHD_RING_TYPE_FIXED) {
21447 ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
21448 }
965f77c4 21449 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21450 return ret;
21451}
21452
21453void *
21454dhd_ring_lock_get_last(void *_ring)
21455{
21456 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21457 void *ret = NULL;
965f77c4 21458 unsigned long flags;
d2839953
RC
21459
21460 if (!ring || ring->magic != DHD_RING_MAGIC) {
21461 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21462 return NULL;
21463 }
21464
965f77c4 21465 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21466 if (ring->type == DHD_RING_TYPE_FIXED) {
21467 ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
21468 }
965f77c4 21469 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21470 return ret;
21471}
21472
21473int
21474dhd_ring_lock_get_count(void *_ring)
21475{
21476 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21477 int ret = BCME_ERROR;
965f77c4 21478 unsigned long flags;
d2839953
RC
21479
21480 if (!ring || ring->magic != DHD_RING_MAGIC) {
21481 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21482 return ret;
21483 }
21484
965f77c4 21485 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21486 if (ring->type == DHD_RING_TYPE_FIXED) {
21487 ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
21488 }
965f77c4 21489 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21490 return ret;
21491}
21492
21493/* free first locked element */
21494void
21495dhd_ring_lock_free_first(void *_ring)
21496{
21497 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
965f77c4 21498 unsigned long flags;
d2839953
RC
21499
21500 if (!ring || ring->magic != DHD_RING_MAGIC) {
21501 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21502 return;
21503 }
21504
965f77c4 21505 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
d2839953
RC
21506 if (ring->type == DHD_RING_TYPE_FIXED) {
21507 __dhd_fixed_ring_lock_free_first(&ring->fixed);
21508 }
965f77c4
RC
21509 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21510}
21511
21512void
21513dhd_ring_whole_lock(void *_ring)
21514{
21515 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21516 unsigned long flags;
21517
21518 if (!ring || ring->magic != DHD_RING_MAGIC) {
21519 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21520 return;
21521 }
21522
21523 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
21524 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21525 __dhd_singleidx_ring_whole_lock(&ring->single);
21526 }
21527 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
21528}
21529
21530void
21531dhd_ring_whole_unlock(void *_ring)
21532{
21533 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21534 unsigned long flags;
21535
21536 if (!ring || ring->magic != DHD_RING_MAGIC) {
21537 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21538 return;
21539 }
21540
21541 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
21542 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
21543 __dhd_singleidx_ring_whole_unlock(&ring->single);
21544 }
21545 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
d2839953
RC
21546}
21547
d2839953
RC
21548#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
21549#define DHD_VFS_INODE(dir) (dir->d_inode)
21550#else
21551#define DHD_VFS_INODE(dir) d_inode(dir)
21552#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
21553
21554#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
21555#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
21556#else
21557#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
21558#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
965f77c4 21559int
d2839953
RC
21560dhd_file_delete(char *path)
21561{
21562 struct path file_path;
21563 int err;
21564 struct dentry *dir;
21565
21566 err = kern_path(path, 0, &file_path);
21567
21568 if (err < 0) {
965f77c4 21569 DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
d2839953
RC
21570 return err;
21571 }
965f77c4 21572 if (
d2839953
RC
21573#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
21574 !d_is_file(file_path.dentry) ||
965f77c4
RC
21575#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
21576 d_really_is_negative(file_path.dentry) ||
21577#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
d2839953 21578#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
965f77c4 21579 FALSE)
d2839953
RC
21580 {
21581 err = -EINVAL;
21582 } else {
21583 dir = dget_parent(file_path.dentry);
21584
21585 if (!IS_ERR(dir)) {
21586 err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
21587 dput(dir);
21588 } else {
21589 err = PTR_ERR(dir);
21590 }
21591 }
21592
21593 path_put(&file_path);
21594
21595 if (err < 0) {
21596 DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
21597 }
21598
21599 return err;
21600}
965f77c4 21601#ifdef DHD_DUMP_MNGR
d2839953
RC
21602static int
21603dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
21604{
21605 int i;
21606 int fm_idx = -1;
21607
21608 for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
21609 if (strlen(fm_ptr->elems[i].type_name) == 0) {
21610 fm_idx = i;
21611 break;
21612 }
21613 if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
21614 fm_idx = i;
21615 break;
21616 }
21617 }
21618
21619 if (fm_idx == -1) {
21620 return fm_idx;
21621 }
21622
21623 if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
21624 strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
965f77c4 21625 fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
d2839953
RC
21626 fm_ptr->elems[fm_idx].file_idx = 0;
21627 }
21628
21629 return fm_idx;
21630}
21631
21632/*
21633 * dhd_dump_file_manage_enqueue - enqueue dump file path
21634 * and delete odest file if file count is max.
21635*/
21636void
21637dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
21638{
21639 int fm_idx;
21640 int fp_idx;
21641 dhd_dump_file_manage_t *fm_ptr;
21642 DFM_elem_t *elem;
21643
21644 if (!dhd || !dhd->dump_file_manage) {
21645 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
21646 __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
21647 return;
21648 }
21649
21650 fm_ptr = dhd->dump_file_manage;
21651
21652 /* find file_manage idx */
21653 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
21654 if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
21655 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
21656 __FUNCTION__, fname));
21657 return;
21658 }
21659
21660 elem = &fm_ptr->elems[fm_idx];
21661 fp_idx = elem->file_idx;
21662 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
21663 __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
21664
21665 /* delete oldest file */
21666 if (strlen(elem->file_path[fp_idx]) != 0) {
21667 if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
21668 DHD_ERROR(("%s(): Failed to delete file: %s\n",
21669 __FUNCTION__, elem->file_path[fp_idx]));
21670 } else {
21671 DHD_ERROR(("%s(): Successed to delete file: %s\n",
21672 __FUNCTION__, elem->file_path[fp_idx]));
21673 }
21674 }
21675
21676 /* save dump file path */
21677 strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
965f77c4 21678 elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
d2839953
RC
21679
21680 /* change file index to next file index */
21681 elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
21682}
21683#endif /* DHD_DUMP_MNGR */
21684
21685#ifdef DHD_MAP_LOGGING
21686/* Will be called from SMMU fault handler */
21687void
965f77c4 21688dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
d2839953
RC
21689{
21690 dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
21691 uint32 irq = (uint32)-1;
21692
21693 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
965f77c4 21694 DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
d2839953 21695 dhdp->smmu_fault_occurred = TRUE;
965f77c4
RC
21696#ifdef DNGL_AXI_ERROR_LOGGING
21697 dhdp->axi_error = TRUE;
21698 dhdp->axi_err_dump->axid = axid;
21699 dhdp->axi_err_dump->fault_address = fault_addr;
21700#endif /* DNGL_AXI_ERROR_LOGGING */
d2839953
RC
21701
21702 /* Disable PCIe IRQ */
21703 dhdpcie_get_pcieirq(dhdp->bus, &irq);
21704 if (irq != (uint32)-1) {
21705 disable_irq_nosync(irq);
21706 }
21707
965f77c4 21708 /* Take debug information first */
d2839953 21709 DHD_OS_WAKE_LOCK(dhdp);
965f77c4 21710 dhd_prot_smmu_fault_dump(dhdp);
d2839953 21711 DHD_OS_WAKE_UNLOCK(dhdp);
965f77c4
RC
21712
21713 /* Take AXI information if possible */
21714#ifdef DNGL_AXI_ERROR_LOGGING
21715#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
21716 dhd_axi_error_dispatch(dhdp);
21717#else
21718 dhd_axi_error(dhdp);
21719#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
21720#endif /* DNGL_AXI_ERROR_LOGGING */
d2839953 21721}
965f77c4 21722EXPORT_SYMBOL(dhd_smmu_fault_handler);
d2839953
RC
21723#endif /* DHD_MAP_LOGGING */
21724
21725#ifdef DHD_WIFI_SHUTDOWN
21726void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
21727{
21728 dhd_pub_t *dhd_pub = NULL;
21729 dhd_info_t *dhd_info = NULL;
21730 dhd_if_t *dhd_if = NULL;
21731
21732 DHD_ERROR(("%s enter\n", __FUNCTION__));
21733 dhd_pub = g_dhd_pub;
21734
21735 if (dhd_os_check_if_up(dhd_pub)) {
21736 dhd_info = (dhd_info_t *)dhd_pub->info;
21737 dhd_if = dhd_info->iflist[0];
21738 ASSERT(dhd_if);
21739 ASSERT(dhd_if->net);
21740 if (dhd_if && dhd_if->net) {
21741 dhd_stop(dhd_if->net);
21742 }
21743 }
21744}
21745#endif /* DHD_WIFI_SHUTDOWN */
21746
21747#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
21748int
21749compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
21750{
21751 return (int)kernel_read(file, addr, (size_t)count, &offset);
21752}
965f77c4
RC
21753int
21754compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset)
21755{
21756 return (int)kernel_write(file, addr, count, offset);
21757}
d2839953
RC
21758#else
21759int
21760compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
21761{
21762 return kernel_read(file, offset, addr, count);
21763}
965f77c4
RC
21764int
21765compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset)
21766{
21767 return (int)vfs_write(file, addr, count, offset);
21768}
d2839953
RC
21769#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
21770
965f77c4
RC
21771#ifdef DHDTCPSYNC_FLOOD_BLK
21772static void dhd_blk_tsfl_handler(struct work_struct * work)
21773{
21774 dhd_if_t *ifp = NULL;
21775 dhd_pub_t *dhdp = NULL;
21776 /* Ignore compiler warnings due to -Werror=cast-qual */
21777#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
21778#pragma GCC diagnostic push
21779#pragma GCC diagnostic ignored "-Wcast-qual"
21780#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
21781 ifp = container_of(work, dhd_if_t, blk_tsfl_work);
21782#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
21783#pragma GCC diagnostic pop
21784#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
21785 if (ifp) {
21786 dhdp = &ifp->info->pub;
21787 if (dhdp) {
21788 if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
21789 (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
21790 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
21791 wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
21792 } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
21793 (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
21794 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
21795 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
21796 }
21797 }
21798 }
21799}
21800void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
21801{
21802 ifp->tsync_rcvd = 0;
21803 ifp->tsyncack_txed = 0;
21804 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
21805}
21806void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
21807{
21808 dhd_if_t *ifp = NULL;
21809 if (dev) {
21810 ifp = DHD_DEV_IFP(dev);
21811 }
21812 if (ifp) {
21813 ifp->tsync_rcvd = 0;
21814 ifp->tsyncack_txed = 0;
21815 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
21816 }
21817}
21818#endif /* DHDTCPSYNC_FLOOD_BLK */
21819
21820#ifdef DHD_4WAYM4_FAIL_DISCONNECT
21821static void dhd_m4_state_handler(struct work_struct *work)
21822{
21823 dhd_if_t *ifp = NULL;
21824 /* Ignore compiler warnings due to -Werror=cast-qual */
21825#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
21826#pragma GCC diagnostic push
21827#pragma GCC diagnostic ignored "-Wcast-qual"
21828#endif // endif
21829 struct delayed_work *dw = to_delayed_work(work);
21830 ifp = container_of(dw, dhd_if_t, m4state_work);
21831#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
21832#pragma GCC diagnostic pop
21833#endif // endif
21834
21835 if (ifp && ifp->net &&
21836 (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
21837 DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
21838 ifp->net->name));
21839 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
21840 }
21841}
21842
21843void
21844dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
21845{
21846 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
21847 struct ether_header *eh;
21848 uint16 type;
21849
21850 if (!success) {
21851 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
21852
21853 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
21854 type = ntoh16(eh->ether_type);
21855 if (type == ETHER_TYPE_802_1X) {
21856 if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
21857 dhd_if_t *ifp = NULL;
21858 ifp = dhd->iflist[ifidx];
21859 if (!ifp || !ifp->net) {
21860 return;
21861 }
21862
21863 DHD_INFO(("%s: M4 TX failed on %d.\n",
21864 __FUNCTION__, ifidx));
21865
21866 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
21867 schedule_delayed_work(&ifp->m4state_work,
21868 msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
21869 }
21870 }
21871 }
21872}
21873
21874void
21875dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
21876{
21877 dhd_info_t *dhdinfo;
21878 dhd_if_t *ifp;
21879
21880 if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
21881 DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
21882 return;
21883 }
21884
21885 dhdinfo = (dhd_info_t *)(dhdp->info);
21886 if (!dhdinfo) {
21887 DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
21888 return;
21889 }
21890
21891 ifp = dhdinfo->iflist[ifidx];
21892 if (ifp) {
21893 cancel_delayed_work_sync(&ifp->m4state_work);
21894 }
21895}
21896#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
21897
21898#ifdef DHD_HP2P
21899unsigned long
21900dhd_os_hp2plock(dhd_pub_t *pub)
21901{
21902 dhd_info_t *dhd;
21903 unsigned long flags = 0;
21904
21905 dhd = (dhd_info_t *)(pub->info);
21906
21907 if (dhd) {
21908 spin_lock_irqsave(&dhd->hp2p_lock, flags);
21909 }
21910
21911 return flags;
21912}
21913
21914void
21915dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
21916{
21917 dhd_info_t *dhd;
21918
21919 dhd = (dhd_info_t *)(pub->info);
21920
21921 if (dhd) {
21922 spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
21923 }
21924}
21925#endif /* DHD_HP2P */
21926#ifdef DNGL_AXI_ERROR_LOGGING
21927static void
21928dhd_axi_error_dump(void *handle, void *event_info, u8 event)
21929{
21930 dhd_info_t *dhd = (dhd_info_t *)handle;
21931 dhd_pub_t *dhdp = NULL;
21932
21933 if (!dhd) {
21934 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21935 goto exit;
21936 }
21937
21938 dhdp = &dhd->pub;
21939 if (!dhdp) {
21940 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
21941 goto exit;
21942 }
21943
21944 /**
21945 * First save axi error information to a file
21946 * because panic should happen right after this.
21947 * After dhd reset, dhd reads the file, and do hang event process
21948 * to send axi error stored on the file to Bigdata server
21949 */
21950 if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
21951 DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
21952 __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
21953 }
21954
21955 DHD_OS_WAKE_LOCK(dhdp);
21956#ifdef DHD_FW_COREDUMP
21957#ifdef DHD_SSSR_DUMP
21958 dhdp->collect_sssr = TRUE;
21959#endif /* DHD_SSSR_DUMP */
21960 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
21961 dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
21962#endif /* DHD_FW_COREDUMP */
21963 DHD_OS_WAKE_UNLOCK(dhdp);
21964
21965exit:
21966 /* Trigger kernel panic after taking necessary dumps */
21967 BUG_ON(1);
21968}
21969
21970void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
21971{
21972 DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
21973 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21974 type, DHD_WQ_WORK_AXI_ERROR_DUMP,
21975 dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
21976}
21977#endif /* DNGL_AXI_ERROR_LOGGING */
21978
21979#ifdef BCMPCIE
21980static void
21981dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
21982{
21983 dhd_info_t *dhd = handle;
21984 dhd_pub_t *dhdp = NULL;
21985
21986 if (!dhd) {
21987 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21988 BUG_ON(1);
21989 return;
21990 }
21991
21992 dhdp = &dhd->pub;
21993 dhdpcie_cto_recovery_handler(dhdp);
21994}
21995
21996void
21997dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
21998{
21999 DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
22000 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
22001 NULL, DHD_WQ_WORK_CTO_RECOVERY,
22002 dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
22003}
22004#endif /* BCMPCIE */
22005
22006#ifdef SUPPORT_SET_TID
22007/*
22008 * Set custom TID value for UDP frame based on UID value.
22009 * This will be triggered by android private command below.
22010 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
22011 * Mode 0(SET_TID_OFF) : Disable changing TID
22012 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
22013 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
22014*/
22015void
22016dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
22017{
22018 struct ether_header *eh = NULL;
22019 struct sock *sk = NULL;
22020 uint8 *pktdata = NULL;
22021 uint8 *ip_hdr = NULL;
22022 uint8 cur_prio;
22023 uint8 prio;
22024 uint32 uid;
22025
22026 if (dhdp->tid_mode == SET_TID_OFF) {
22027 return;
22028 }
22029
22030 pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
22031 eh = (struct ether_header *) pktdata;
22032 ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
22033
22034 if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
22035 return;
22036 }
22037
22038 cur_prio = PKTPRIO(pkt);
22039 prio = dhdp->target_tid;
22040 uid = dhdp->target_uid;
22041
22042 if ((cur_prio == prio) ||
22043 (cur_prio != PRIO_8021D_BE)) {
22044 return;
22045 }
22046
22047 sk = ((struct sk_buff*)(pkt))->sk;
22048
22049 if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
22050 (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
22051 PKTSETPRIO(pkt, prio);
22052 }
22053}
22054#endif /* SUPPORT_SET_TID */
22055
d2839953
RC
22056void *dhd_get_pub(struct net_device *dev)
22057{
22058 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
22059 if (dhdinfo)
22060 return (void *)&dhdinfo->pub;
22061 else {
22062 printf("%s: null dhdinfo\n", __FUNCTION__);
22063 return NULL;
22064 }
22065}
22066
22067void *dhd_get_conf(struct net_device *dev)
22068{
22069 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
22070 if (dhdinfo)
22071 return (void *)dhdinfo->pub.conf;
22072 else {
22073 printf("%s: null dhdinfo\n", __FUNCTION__);
22074 return NULL;
22075 }
22076}
22077
22078bool dhd_os_wd_timer_enabled(void *bus)
22079{
22080 dhd_pub_t *pub = bus;
22081 dhd_info_t *dhd = (dhd_info_t *)pub->info;
22082
22083 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
22084 if (!dhd) {
22085 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
22086 return FALSE;
22087 }
22088 return dhd->wd_timer_valid;
22089}
965f77c4
RC
22090
22091#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
22092/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */
22093static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev)
22094{
22095 struct net_device *primary_ndev = NULL, *br_dev = NULL;
22096 int cmd;
22097 struct ifreq ifr;
22098
22099 /* add new interface to bridge dev */
22100 if (dev) {
22101 int found = 0, i;
22102 DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx));
22103 for (i = 0 ; i < ifidx; i++) {
22104 DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx));
22105 /* search the primary interface */
22106 if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) {
22107 primary_ndev = dhd->pub.info->iflist[i]->net;
22108 DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name));
22109 found = 1;
22110 break;
22111 }
22112 }
22113 if (found == 0) {
22114 DHD_ERROR(("Can not find primary dev %s\n", dev->name));
22115 return;
22116 }
22117 cmd = SIOCBRADDIF;
22118 ifr.ifr_ifindex = dev->ifindex;
22119 } else { /* del interface from bridge dev */
22120 primary_ndev = dhd->pub.info->iflist[ifidx]->net;
22121 cmd = SIOCBRDELIF;
22122 ifr.ifr_ifindex = primary_ndev->ifindex;
22123 }
22124 /* if primary net device is bridged */
22125 if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) {
22126 rtnl_lock();
22127 /* get bridge device */
22128 br_dev = netdev_master_upper_dev_get(primary_ndev);
22129 if (br_dev) {
22130 const struct net_device_ops *ops = br_dev->netdev_ops;
22131 DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name));
22132 if (ops) {
22133 if (cmd == SIOCBRADDIF) {
22134 DHD_ERROR(("br call ndo_add_slave\n"));
22135 ops->ndo_add_slave(br_dev, dev);
22136 /* Also bring wds0.x interface up automatically */
22137 dev_change_flags(dev, dev->flags | IFF_UP);
22138 }
22139 else {
22140 DHD_ERROR(("br call ndo_del_slave\n"));
22141 ops->ndo_del_slave(br_dev, primary_ndev);
22142 }
22143 }
22144 }
22145 else {
22146 DHD_ERROR(("no br dev\n"));
22147 }
22148 rtnl_unlock();
22149 }
22150 else {
22151 DHD_ERROR(("device %s is not bridged\n", primary_ndev->name));
22152 }
22153}
22154#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */