bcmdhd: if driver is not yet initialized, wait and retry
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.101.10.240.x / dhd_linux.c
CommitLineData
84813812
LJ
1/*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface.
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 2020, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 *
22 * <<Broadcom-WL-IPTag/Open:>>
23 *
24 * $Id$
25 */
26
27#include <typedefs.h>
28#include <linuxver.h>
29#include <osl.h>
30#ifdef SHOW_LOGTRACE
31#include <linux/syscalls.h>
32#include <event_log.h>
33#endif /* SHOW_LOGTRACE */
34
35#if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE)
36#include <bcmmsgbuf.h>
37#endif /* PCIE_FULL_DONGLE */
38
39#include <linux/init.h>
40#include <linux/kernel.h>
41#include <linux/slab.h>
42#include <linux/skbuff.h>
43#include <linux/netdevice.h>
44#include <linux/inetdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/etherdevice.h>
47#include <linux/random.h>
48#include <linux/spinlock.h>
49#include <linux/ethtool.h>
50#include <linux/fcntl.h>
51#include <linux/fs.h>
52#include <linux/ip.h>
53#include <linux/reboot.h>
54#include <linux/notifier.h>
55#include <linux/irq.h>
56#if defined(CONFIG_TIZEN)
57#include <linux/net_stat_tizen.h>
58#endif /* CONFIG_TIZEN */
59#include <net/addrconf.h>
60#ifdef ENABLE_ADAPTIVE_SCHED
61#include <linux/cpufreq.h>
62#endif /* ENABLE_ADAPTIVE_SCHED */
63#include <linux/rtc.h>
64#include <linux/namei.h>
65#include <asm/uaccess.h>
66#include <asm/unaligned.h>
67#include <dhd_linux_priv.h>
68
69#include <epivers.h>
70#include <bcmutils.h>
71#include <bcmendian.h>
72#include <bcmdevs.h>
73#include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
74#include <bcmiov.h>
75#include <bcmstdlib_s.h>
76
77#include <ethernet.h>
78#include <bcmevent.h>
79#include <vlan.h>
80#include <802.3.h>
81
82#include <dhd_linux_wq.h>
83#include <dhd.h>
84#include <dhd_linux.h>
85#include <dhd_linux_pktdump.h>
86#ifdef DHD_WET
87#include <dhd_wet.h>
88#endif /* DHD_WET */
89#ifdef PCIE_FULL_DONGLE
90#include <dhd_flowring.h>
91#endif
92#include <dhd_bus.h>
93#include <dhd_proto.h>
94#include <dhd_config.h>
95#ifdef WL_ESCAN
96#include <wl_escan.h>
97#endif
98#include <dhd_dbg.h>
99#include <dhd_dbg_ring.h>
100#include <dhd_debug.h>
101#ifdef CONFIG_HAS_WAKELOCK
102#include <linux/wakelock.h>
103#endif
104#if defined(WL_CFG80211)
105#include <wl_cfg80211.h>
106#ifdef WL_BAM
107#include <wl_bam.h>
108#endif /* WL_BAM */
109#endif /* WL_CFG80211 */
110#ifdef PNO_SUPPORT
111#include <dhd_pno.h>
112#endif
113#ifdef RTT_SUPPORT
114#include <dhd_rtt.h>
115#endif
116
117#include <dhd_linux_sock_qos.h>
118
119#ifdef CSI_SUPPORT
120#include <dhd_csi.h>
121#endif /* CSI_SUPPORT */
122
123#ifdef CONFIG_COMPAT
124#include <linux/compat.h>
125#endif
126
127#ifdef CONFIG_ARCH_EXYNOS
128#ifndef SUPPORT_EXYNOS7420
129#include <linux/exynos-pci-ctrl.h>
130#endif /* SUPPORT_EXYNOS7420 */
131#endif /* CONFIG_ARCH_EXYNOS */
132
133#ifdef DHD_L2_FILTER
134#include <bcmicmp.h>
135#include <bcm_l2_filter.h>
136#include <dhd_l2_filter.h>
137#endif /* DHD_L2_FILTER */
138
139#ifdef DHD_PSTA
140#include <dhd_psta.h>
141#endif /* DHD_PSTA */
142
143#ifdef AMPDU_VO_ENABLE
144/* XXX: Enabling VO AMPDU to reduce FER */
145#include <802.1d.h>
146#endif /* AMPDU_VO_ENABLE */
147
148#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
149#include <dhd_ip.h>
150#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
151#include <dhd_daemon.h>
152#ifdef DHD_PKT_LOGGING
153#include <dhd_pktlog.h>
154#endif /* DHD_PKT_LOGGING */
155#ifdef DHD_4WAYM4_FAIL_DISCONNECT
156#include <eapol.h>
157#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
158#ifdef DHD_DEBUG_PAGEALLOC
159typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
160void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
161extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
162#endif /* DHD_DEBUG_PAGEALLOC */
163
164#ifdef ENABLE_DHD_GRO
165#include <net/sch_generic.h>
166#endif /* ENABLE_DHD_GRO */
167
168#define IP_PROT_RESERVED 0xFF
169
170#ifdef DHD_MQ
171#define MQ_MAX_QUEUES AC_COUNT
172#define MQ_MAX_CPUS 16
173int enable_mq = TRUE;
174module_param(enable_mq, int, 0644);
175int mq_select_disable = FALSE;
176#endif
177
178#if defined(DHD_LB)
179#if !defined(PCIE_FULL_DONGLE)
180#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
181#endif /* !PCIE_FULL_DONGLE */
182#endif /* DHD_LB */
183
184#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
185 defined(DHD_LB_STATS)
186#if !defined(DHD_LB)
187#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
188#endif /* !DHD_LB */
189#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
190
191#ifdef DHD_4WAYM4_FAIL_DISCONNECT
192static void dhd_m4_state_handler(struct work_struct * work);
193#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
194
195#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
196static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
197#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
198
199#ifdef FIX_CPU_MIN_CLOCK
200#include <linux/pm_qos.h>
201#endif /* FIX_CPU_MIN_CLOCK */
202
203#ifdef ENABLE_ADAPTIVE_SCHED
204#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
205#ifndef CUSTOM_CPUFREQ_THRESH
206#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
207#endif /* CUSTOM_CPUFREQ_THRESH */
208#endif /* ENABLE_ADAPTIVE_SCHED */
209
210/* enable HOSTIP cache update from the host side when an eth0:N is up */
211#define AOE_IP_ALIAS_SUPPORT 1
212
213#ifdef PROP_TXSTATUS
214#include <wlfc_proto.h>
215#include <dhd_wlfc.h>
216#endif
217
218#include <wl_android.h>
219
220/* Maximum STA per radio */
221#define DHD_MAX_STA 32
222
223#ifdef CUSTOMER_HW_AMLOGIC
224#include <linux/amlogic/wifi_dt.h>
225#endif
226
227#ifdef DHD_EVENT_LOG_FILTER
228#include <dhd_event_log_filter.h>
229#endif /* DHD_EVENT_LOG_FILTER */
230
231#ifdef DHDTCPSYNC_FLOOD_BLK
232static void dhd_blk_tsfl_handler(struct work_struct * work);
233#endif /* DHDTCPSYNC_FLOOD_BLK */
234
235#ifdef WL_NATOE
236#include <dhd_linux_nfct.h>
237#endif /* WL_NATOE */
238
239#ifdef DHD_TX_PROFILE
240#include <bcmarp.h>
241#include <bcmicmp.h>
242#include <bcmudp.h>
243#include <bcmproto.h>
244#endif /* defined(DHD_TX_PROFILE) */
245
246#ifdef SET_RANDOM_MAC_SOFTAP
247#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
248#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
249#endif
250static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
251#endif /* SET_RANDOM_MAC_SOFTAP */
252
253/* XXX: where does this belong? */
254/* XXX: this needs to reviewed for host OS. */
255const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
256const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
257#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
258
259#ifdef ARP_OFFLOAD_SUPPORT
260void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
261static int dhd_inetaddr_notifier_call(struct notifier_block *this,
262 unsigned long event, void *ptr);
263static struct notifier_block dhd_inetaddr_notifier = {
264 .notifier_call = dhd_inetaddr_notifier_call
265};
266/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
267 * created in the kernel notifier link list (with 'next' pointing to itself)
268 */
269static bool dhd_inetaddr_notifier_registered = FALSE;
270#endif /* ARP_OFFLOAD_SUPPORT */
271
272#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
273int dhd_inet6addr_notifier_call(struct notifier_block *this,
274 unsigned long event, void *ptr);
275static struct notifier_block dhd_inet6addr_notifier = {
276 .notifier_call = dhd_inet6addr_notifier_call
277};
278/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
279 * created in kernel notifier link list (with 'next' pointing to itself)
280 */
281static bool dhd_inet6addr_notifier_registered = FALSE;
282#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
283
284#if defined(CONFIG_PM_SLEEP)
285#include <linux/suspend.h>
286volatile bool dhd_mmc_suspend = FALSE;
287DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
288#ifdef ENABLE_WAKEUP_PKT_DUMP
289volatile bool dhd_mmc_wake = FALSE;
290long long temp_raw;
291#endif /* ENABLE_WAKEUP_PKT_DUMP */
292#endif /* defined(CONFIG_PM_SLEEP) */
293
294#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
295extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
296#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
297static void dhd_hang_process(struct work_struct *work_data);
298MODULE_LICENSE("GPL and additional rights");
299
300#if defined(MULTIPLE_SUPPLICANT)
301#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
302DEFINE_MUTEX(_dhd_mutex_lock_);
303#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
304#endif
305static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force);
306
307#include <dhd_bus.h>
308
309/* XXX Set up an MTU change notifier per linux/notifier.h? */
310#ifndef PROP_TXSTATUS
311#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
312#else
313#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
314#endif
315
316#ifdef PROP_TXSTATUS
317extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
318extern void dhd_wlfc_plat_init(void *dhd);
319extern void dhd_wlfc_plat_deinit(void *dhd);
320#endif /* PROP_TXSTATUS */
321#ifdef USE_DYNAMIC_F2_BLKSIZE
322extern uint sd_f2_blocksize;
323extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
324#endif /* USE_DYNAMIC_F2_BLKSIZE */
325
326/* Linux wireless extension support */
327#if defined(WL_WIRELESS_EXT)
328#include <wl_iw.h>
329#endif /* defined(WL_WIRELESS_EXT) */
330
331#ifdef CONFIG_PARTIALSUSPEND_SLP
332/* XXX SLP use defferent earlysuspend header file and some functions
333 * But most of meaning is same as Android
334 */
335#include <linux/partialsuspend_slp.h>
336#define CONFIG_HAS_EARLYSUSPEND
337#define DHD_USE_EARLYSUSPEND
338#define register_early_suspend register_pre_suspend
339#define unregister_early_suspend unregister_pre_suspend
340#define early_suspend pre_suspend
341#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
342#else
343#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
344#include <linux/earlysuspend.h>
345#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
346#endif /* CONFIG_PARTIALSUSPEND_SLP */
347
348#ifdef CONFIG_IRQ_HISTORY
349#include <linux/power/irq_history.h>
350#endif /* CONFIG_IRQ_HISTORY */
351
352#include <linux/nl80211.h>
353
354#if defined(PKT_FILTER_SUPPORT) && defined(APF)
355static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
356 u8* program, uint32 program_len);
357static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
358 uint32 mode, uint32 enable);
359static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
360#endif /* PKT_FILTER_SUPPORT && APF */
361
362#ifdef DHD_FW_COREDUMP
363static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
364#endif /* DHD_FW_COREDUMP */
365
366#ifdef DHD_LOG_DUMP
367
368struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
369
370/* Only header for log dump buffers is stored in array
371 * header for sections like 'dhd dump', 'ext trap'
372 * etc, is not in the array, because they are not log
373 * ring buffers
374 */
375dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
376 {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
377 {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
378 {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
379};
380static int dld_buf_size[DLD_BUFFER_NUM] = {
381 LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
382 LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
383 LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
384};
385
386static void dhd_log_dump_init(dhd_pub_t *dhd);
387static void dhd_log_dump_deinit(dhd_pub_t *dhd);
388static void dhd_log_dump(void *handle, void *event_info, u8 event);
389static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
390static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
391static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
392#endif /* DHD_LOG_DUMP */
393
394#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
395#include <linux/workqueue.h>
396#include <linux/pm_runtime.h>
397#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
398
399#ifdef DHD_DEBUG_UART
400#include <linux/kmod.h>
401#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
402static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
403static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
404#endif /* DHD_DEBUG_UART */
405
406static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
407static struct notifier_block dhd_reboot_notifier = {
408 .notifier_call = dhd_reboot_callback,
409 .priority = 1,
410};
411
412#ifdef BCMPCIE
413static int is_reboot = 0;
414#endif /* BCMPCIE */
415
416dhd_pub_t *g_dhd_pub = NULL;
417
418#if defined(BT_OVER_SDIO)
419#include "dhd_bt_interface.h"
420#endif /* defined (BT_OVER_SDIO) */
421
422#ifdef WL_STATIC_IF
423bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
424#endif /* WL_STATIC_IF */
425
426atomic_t exit_in_progress = ATOMIC_INIT(0);
427
428static void dhd_process_daemon_msg(struct sk_buff *skb);
429static void dhd_destroy_to_notifier_skt(void);
430static int dhd_create_to_notifier_skt(void);
431static struct sock *nl_to_event_sk = NULL;
432int sender_pid = 0;
433
434#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
435struct netlink_kernel_cfg dhd_netlink_cfg = {
436 .groups = 1,
437 .input = dhd_process_daemon_msg,
438};
439#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
440
441#if defined(BT_OVER_SDIO)
442/* Flag to indicate if driver is initialized */
443uint dhd_driver_init_done = TRUE;
444#else
445/* Flag to indicate if driver is initialized */
446uint dhd_driver_init_done = FALSE;
447#endif
448/* Flag to indicate if we should download firmware on driver load */
449uint dhd_download_fw_on_driverload = TRUE;
450
451/* Definitions to provide path to the firmware and nvram
452 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
453 */
454char firmware_path[MOD_PARAM_PATHLEN];
455char nvram_path[MOD_PARAM_PATHLEN];
456char clm_path[MOD_PARAM_PATHLEN];
457char config_path[MOD_PARAM_PATHLEN];
458#ifdef DHD_UCODE_DOWNLOAD
459char ucode_path[MOD_PARAM_PATHLEN];
460#endif /* DHD_UCODE_DOWNLOAD */
461
462module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
463
464/* backup buffer for firmware and nvram path */
465char fw_bak_path[MOD_PARAM_PATHLEN];
466char nv_bak_path[MOD_PARAM_PATHLEN];
467
468/* information string to keep firmware, chio, cheip version info visiable from log */
469char info_string[MOD_PARAM_INFOLEN];
470module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
471int op_mode = 0;
472int disable_proptx = 0;
473module_param(op_mode, int, 0644);
474extern int wl_control_wl_start(struct net_device *dev);
475#if defined(BCMLXSDMMC) || defined(BCMDBUS)
476struct semaphore dhd_registration_sem;
477#endif /* BCMXSDMMC */
478void dhd_generate_rand_mac_addr(struct ether_addr *ea_addr);
479
480#ifdef DHD_LOG_DUMP
481int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
482module_param(logdump_max_filesize, int, 0644);
483int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
484module_param(logdump_max_bufsize, int, 0644);
485int logdump_periodic_flush = FALSE;
486module_param(logdump_periodic_flush, int, 0644);
487#ifdef EWP_ECNTRS_LOGGING
488int logdump_ecntr_enable = TRUE;
489#else
490int logdump_ecntr_enable = FALSE;
491#endif /* EWP_ECNTRS_LOGGING */
492module_param(logdump_ecntr_enable, int, 0644);
493#ifdef EWP_RTT_LOGGING
494int logdump_rtt_enable = TRUE;
495#else
496int logdump_rtt_enable = FALSE;
497#endif /* EWP_RTT_LOGGING */
498int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
499#endif /* DHD_LOG_DUMP */
500
501#ifdef EWP_EDL
502int host_edl_support = TRUE;
503module_param(host_edl_support, int, 0644);
504#endif
505
506/* deferred handlers */
507static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
508static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
509static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
510static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
511#ifdef WL_NATOE
512static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
513static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
514#endif /* WL_NATOE */
515
516#ifdef DHD_UPDATE_INTF_MAC
517static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
518#endif /* DHD_UPDATE_INTF_MAC */
519#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
520static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
521#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
522#ifdef WL_CFG80211
523extern void dhd_netdev_free(struct net_device *ndev);
524#endif /* WL_CFG80211 */
525static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
526
527#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
528static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
529#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
530
531#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
532/* update rx_pkt_chainable state of dhd interface */
533static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
534#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
535
536/* Error bits */
537module_param(dhd_msg_level, int, 0);
538#if defined(WL_WIRELESS_EXT)
539module_param(iw_msg_level, int, 0);
540#endif
541#ifdef WL_CFG80211
542module_param(wl_dbg_level, int, 0);
543#endif
544module_param(android_msg_level, int, 0);
545module_param(config_msg_level, int, 0);
546
547#ifdef ARP_OFFLOAD_SUPPORT
548/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
549/* XXX ARP HOST Auto Reply can cause dongle trap at VSDB situation */
550/* XXX ARP OL SNOOP can be used to more good quility */
551
552#ifdef ENABLE_ARP_SNOOP_MODE
553uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
554 ARP_OL_UPDATE_HOST_CACHE);
555#else
556uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
557#endif /* ENABLE_ARP_SNOOP_MODE */
558
559module_param(dhd_arp_mode, uint, 0);
560#endif /* ARP_OFFLOAD_SUPPORT */
561
562/* Disable Prop tx */
563module_param(disable_proptx, int, 0644);
564/* load firmware and/or nvram values from the filesystem */
565module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
566module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
567module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
568#ifdef DHD_UCODE_DOWNLOAD
569module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
570#endif /* DHD_UCODE_DOWNLOAD */
571
572/* wl event forwarding */
573#ifdef WL_EVENT_ENAB
574uint wl_event_enable = true;
575#else
576uint wl_event_enable = false;
577#endif /* WL_EVENT_ENAB */
578module_param(wl_event_enable, uint, 0660);
579
580/* wl event forwarding */
581#ifdef LOGTRACE_PKT_SENDUP
582uint logtrace_pkt_sendup = true;
583#else
584uint logtrace_pkt_sendup = false;
585#endif /* LOGTRACE_PKT_SENDUP */
586module_param(logtrace_pkt_sendup, uint, 0660);
587
588/* Watchdog interval */
589/* extend watchdog expiration to 2 seconds when DPC is running */
590#define WATCHDOG_EXTEND_INTERVAL (2000)
591
592uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
593module_param(dhd_watchdog_ms, uint, 0);
594
595#ifdef DHD_PCIE_RUNTIMEPM
596uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
597#endif /* DHD_PCIE_RUNTIMEPMT */
598#if defined(DHD_DEBUG)
599/* Console poll interval */
600uint dhd_console_ms = 0; /* XXX andrey by default no fw msg prints */
601module_param(dhd_console_ms, uint, 0644);
602#else
603uint dhd_console_ms = 0;
604#endif /* DHD_DEBUG */
605
606uint dhd_slpauto = TRUE;
607module_param(dhd_slpauto, uint, 0);
608
609#ifdef PKT_FILTER_SUPPORT
610/* Global Pkt filter enable control */
611uint dhd_pkt_filter_enable = TRUE;
612module_param(dhd_pkt_filter_enable, uint, 0);
613#endif
614
615/* Pkt filter init setup */
616uint dhd_pkt_filter_init = 0;
617module_param(dhd_pkt_filter_init, uint, 0);
618
619/* Pkt filter mode control */
620#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
621uint dhd_master_mode = FALSE;
622#else
623uint dhd_master_mode = FALSE;
624#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
625module_param(dhd_master_mode, uint, 0);
626
627int dhd_watchdog_prio = 0;
628module_param(dhd_watchdog_prio, int, 0);
629
630/* DPC thread priority */
631int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
632module_param(dhd_dpc_prio, int, 0);
633
634/* RX frame thread priority */
635int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
636module_param(dhd_rxf_prio, int, 0);
637
638#if !defined(BCMDBUS)
639extern int dhd_dongle_ramsize;
640module_param(dhd_dongle_ramsize, int, 0);
641#endif /* !BCMDBUS */
642
643#ifdef WL_CFG80211
644int passive_channel_skip = 0;
645module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
646#endif /* WL_CFG80211 */
647static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
648
649#ifdef DHD_MSI_SUPPORT
650uint enable_msi = TRUE;
651module_param(enable_msi, uint, 0);
652#endif /* PCIE_FULL_DONGLE */
653
654#ifdef DHD_SSSR_DUMP
655int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
656module_param(sssr_enab, uint, 0);
657module_param(fis_enab, uint, 0);
658#endif /* DHD_SSSR_DUMP */
659
660/* Keep track of number of instances */
661static int dhd_found = 0;
662static int instance_base = 0; /* Starting instance number */
663module_param(instance_base, int, 0644);
664
665#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
666/*
667 * Rx path process budget(dhd_napi_weight) number of packets in one go and hands over
668 * the packets to network stack.
669 *
670 * dhd_dpc tasklet is the producer(packets received from dongle) and dhd_napi_poll()
671 * is the consumer. The maximum number of packets that can be received from the dongle
672 * at any given point of time are D2HRING_RXCMPLT_MAX_ITEM.
673 * Also DHD will always post fresh rx buffers to dongle while processing rx completions.
674 *
675 * The consumer must consume the packets at equal are better rate than the producer.
676 * i.e if dhd_napi_poll() does not process at the same rate as the producer(dhd_dpc),
677 * rx_process_queue depth increases, which can even consume the entire system memory.
678 * During UDP receive use case at 2Gbps, it was observed that the packets queued
679 * in rx_process_queue alone was taking 1.8GB when the budget is 64.
680 *
681 * Hence the budget must at least be D2HRING_RXCMPLT_MAX_ITEM. Also provide 50%
682 * more buffer to dhd_napi_weight, so that above explained scenario will never hit.
683 */
684static int dhd_napi_weight = (D2HRING_RXCMPLT_MAX_ITEM + (D2HRING_RXCMPLT_MAX_ITEM / 2));
685module_param(dhd_napi_weight, int, 0644);
686#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
687
688#ifdef PCIE_FULL_DONGLE
689extern int h2d_max_txpost;
690module_param(h2d_max_txpost, int, 0644);
691
692#if defined(DHD_HTPUT_TUNABLES)
693extern int h2d_htput_max_txpost;
694module_param(h2d_htput_max_txpost, int, 0644);
695#endif /* DHD_HTPUT_TUNABLES */
696
697extern uint dma_ring_indices;
698module_param(dma_ring_indices, uint, 0644);
699
700extern bool h2d_phase;
701module_param(h2d_phase, bool, 0644);
702extern bool force_trap_bad_h2d_phase;
703module_param(force_trap_bad_h2d_phase, bool, 0644);
704#endif /* PCIE_FULL_DONGLE */
705
706#ifdef FORCE_TPOWERON
707/*
708 * On Fire's reference platform, coming out of L1.2,
709 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
710 * Due to this delay, with tPowerOn < 50
711 * there is a chance of the refclk sense to trigger on noise.
712 *
713 * 0x29 when written to L1SSControl2 translates to 50us.
714 */
715#define FORCE_TPOWERON_50US 0x29
716uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
717module_param(tpoweron_scale, uint, 0644);
718#endif /* FORCE_TPOWERON */
719
720#ifdef SHOW_LOGTRACE
721#if defined(CUSTOMER_HW4_DEBUG)
722#define WIFI_PATH "/etc/wifi/"
723static char *logstrs_path = VENDOR_PATH WIFI_PATH"logstrs.bin";
724char *st_str_file_path = VENDOR_PATH WIFI_PATH"rtecdc.bin";
725static char *map_file_path = VENDOR_PATH WIFI_PATH"rtecdc.map";
726static char *rom_st_str_file_path = VENDOR_PATH WIFI_PATH"roml.bin";
727static char *rom_map_file_path = VENDOR_PATH WIFI_PATH"roml.map";
728#else
729static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
730char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
731static char *map_file_path = PLATFORM_PATH"rtecdc.map";
732static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
733static char *rom_map_file_path = PLATFORM_PATH"roml.map";
734#endif /* CUSTOMER_HW4_DEBUG */
735
736static char *ram_file_str = "rtecdc";
737static char *rom_file_str = "roml";
738
739module_param(logstrs_path, charp, S_IRUGO);
740module_param(st_str_file_path, charp, S_IRUGO);
741module_param(map_file_path, charp, S_IRUGO);
742module_param(rom_st_str_file_path, charp, S_IRUGO);
743module_param(rom_map_file_path, charp, S_IRUGO);
744
745static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
746static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
747 uint32 *rodata_end);
748static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
749 char *map_file);
750#endif /* SHOW_LOGTRACE */
751
752#define DHD_MEMDUMP_TYPE_STR_LEN 32
753#define DHD_MEMDUMP_PATH_STR_LEN 128
754
755#ifdef DHD_TX_PROFILE
756/* process layer 3 headers, to ultimately determine if a
757 * dhd_tx_profile_protocol_t matches
758 */
759static int process_layer3_headers(uint8 **p, int plen, uint16 *type);
760
761/* process layer 2 headers, to ultimately determine if a
762 * dhd_tx_profile_protocol_t matches
763 */
764static int process_layer2_headers(uint8 **p, int *plen, uint16 *type);
765
766/* whether or not a dhd_tx_profile_protocol_t matches with data in a packet */
767bool dhd_protocol_matches_profile(uint8 *p, int plen, const
768 dhd_tx_profile_protocol_t *proto);
769#endif /* defined(DHD_TX_PROFILE) */
770
771#ifdef CUSTOMER_HW4_DEBUG
772#define PATH_BANDLOCK_INFO PLATFORM_PATH".bandlock.info"
773#elif defined(BOARD_HIKEY)
774#define PATH_BANDLOCK_INFO "/data/misc/wifi/.bandlock.info"
775#elif defined(__ARM_ARCH_7A__)
776#define PATH_BANDLOCK_INFO "/data/misc/wifi/.bandlock.info"
777#else
778#define PATH_BANDLOCK_INFO "/installmedia/.bandlock.info"
779#endif /* CUSTOMER_HW4_DEBUG */
780
781static void dhd_set_bandlock(dhd_pub_t * dhd);
782
783static void
784dhd_tx_stop_queues(struct net_device *net)
785{
786#ifdef DHD_MQ
787 netif_tx_stop_all_queues(net);
788#else
789 netif_stop_queue(net);
790#endif
791}
792
793static void
794dhd_tx_start_queues(struct net_device *net)
795{
796#ifdef DHD_MQ
797 netif_tx_wake_all_queues(net);
798#else
799 netif_wake_queue(net);
800#endif
801}
802
803#ifdef USE_WFA_CERT_CONF
804int g_frameburst = 1;
805#endif /* USE_WFA_CERT_CONF */
806
807static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
808
809#ifdef PCIE_FULL_DONGLE
810#define DHD_IF_STA_LIST_LOCK_INIT(lock) spin_lock_init(lock)
811
812#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
813static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
814 struct list_head *snapshot_list);
815static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
816#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
817#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
818#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
819#endif /* PCIE_FULL_DONGLE */
820
821/* Control fw roaming */
822#ifdef BCMCCX
823uint dhd_roam_disable = 0;
824#else
825uint dhd_roam_disable = 0;
826#endif /* BCMCCX */
827
828#ifdef BCMDBGFS
829extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
830extern void dhd_dbgfs_remove(void);
831#endif
832
833/* Enable TX status metadta report: 0=disable 1=enable 2=debug */
834static uint pcie_txs_metadata_enable = 0;
835module_param(pcie_txs_metadata_enable, int, 0);
836
837/* Control radio state */
838uint dhd_radio_up = 1;
839
840/* Network inteface name */
841char iface_name[IFNAMSIZ] = {'\0'};
842module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
843
844/* The following are specific to the SDIO dongle */
845
846/* IOCTL response timeout */
847int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
848
849/* DS Exit response timeout */
850int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
851
852/* Idle timeout for backplane clock */
853int dhd_idletime = DHD_IDLETIME_TICKS;
854module_param(dhd_idletime, int, 0);
855
856/* Use polling */
857uint dhd_poll = FALSE;
858module_param(dhd_poll, uint, 0);
859
860/* Use interrupts */
861uint dhd_intr = TRUE;
862module_param(dhd_intr, uint, 0);
863
864/* SDIO Drive Strength (in milliamps) */
865uint dhd_sdiod_drive_strength = 6;
866module_param(dhd_sdiod_drive_strength, uint, 0);
867
868#ifdef BCMSDIO
869/* Tx/Rx bounds */
870extern uint dhd_txbound;
871extern uint dhd_rxbound;
872module_param(dhd_txbound, uint, 0);
873module_param(dhd_rxbound, uint, 0);
874
875/* Deferred transmits */
876extern uint dhd_deferred_tx;
877module_param(dhd_deferred_tx, uint, 0);
878
879#endif /* BCMSDIO */
880
881#ifdef SDTEST
882/* Echo packet generator (pkts/s) */
883uint dhd_pktgen = 0;
884module_param(dhd_pktgen, uint, 0);
885
886/* Echo packet len (0 => sawtooth, max 2040) */
887uint dhd_pktgen_len = 0;
888module_param(dhd_pktgen_len, uint, 0);
889#endif /* SDTEST */
890
891#if defined(BCMSUP_4WAY_HANDSHAKE)
892/* Use in dongle supplicant for 4-way handshake */
893#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
894/* Enable idsup by default (if supported in fw) */
895uint dhd_use_idsup = 1;
896#else
897uint dhd_use_idsup = 0;
898#endif /* WLFBT || WL_ENABLE_IDSUP */
899module_param(dhd_use_idsup, uint, 0);
900#endif /* BCMSUP_4WAY_HANDSHAKE */
901
902#ifndef BCMDBUS
903/* Allow delayed firmware download for debug purpose */
904int allow_delay_fwdl = FALSE;
905module_param(allow_delay_fwdl, int, 0);
906#endif /* !BCMDBUS */
907
908#ifdef GDB_PROXY
909/* Adds/replaces deadman_to= in NVRAM file with deadman_to=0 */
910static uint nodeadman = 0;
911module_param(nodeadman, uint, 0);
912#endif /* GDB_PROXY */
913
914#ifdef ECOUNTER_PERIODIC_DISABLE
915uint enable_ecounter = FALSE;
916#else
917uint enable_ecounter = TRUE;
918#endif
919module_param(enable_ecounter, uint, 0);
920
921/* TCM verification flag */
922uint dhd_tcm_test_enable = FALSE;
923module_param(dhd_tcm_test_enable, uint, 0644);
924
925extern char dhd_version[];
926extern char fw_version[];
927extern char clm_version[];
928
929int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
930static void dhd_net_if_lock_local(dhd_info_t *dhd);
931static void dhd_net_if_unlock_local(dhd_info_t *dhd);
932static void dhd_suspend_lock(dhd_pub_t *dhdp);
933static void dhd_suspend_unlock(dhd_pub_t *dhdp);
934
935/* Monitor interface */
936int dhd_monitor_init(void *dhd_pub);
937int dhd_monitor_uninit(void);
938
939#ifdef DHD_PM_CONTROL_FROM_FILE
940bool g_pm_control;
941#ifdef DHD_EXPORT_CNTL_FILE
942uint32 pmmode_val = 0xFF;
943#endif /* DHD_EXPORT_CNTL_FILE */
944void sec_control_pm(dhd_pub_t *dhd, uint *);
945#endif /* DHD_PM_CONTROL_FROM_FILE */
946
947#if defined(WL_WIRELESS_EXT)
948struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
949#endif /* defined(WL_WIRELESS_EXT) */
950
951#ifdef DHD_PM_OVERRIDE
952bool g_pm_override;
953#endif /* DHD_PM_OVERRIDE */
954
955#ifndef BCMDBUS
956static void dhd_dpc(ulong data);
957#endif /* !BCMDBUS */
958/* forward decl */
959extern int dhd_wait_pend8021x(struct net_device *dev);
960void dhd_os_wd_timer_extend(void *bus, bool extend);
961
962#ifdef TOE
963#ifndef BDC
964#error TOE requires BDC
965#endif /* !BDC */
966static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
967static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
968#endif /* TOE */
969
970static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
971 wl_event_msg_t *event_ptr, void **data_ptr);
972
973#if defined(CONFIG_PM_SLEEP)
974static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
975{
976 int ret = NOTIFY_DONE;
977 bool suspend = FALSE;
978 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, const dhd_info_t, pm_notifier);;
979
980 BCM_REFERENCE(dhdinfo);
981 BCM_REFERENCE(suspend);
982
983 switch (action) {
984 case PM_HIBERNATION_PREPARE:
985 case PM_SUSPEND_PREPARE:
986 suspend = TRUE;
987 break;
988
989 case PM_POST_HIBERNATION:
990 case PM_POST_SUSPEND:
991 suspend = FALSE;
992 break;
993 }
994
995 printf("%s: action=%ld, suspend=%d, suspend_mode=%d\n",
996 __FUNCTION__, action, suspend, dhdinfo->pub.conf->suspend_mode);
997 if (suspend) {
998 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
999 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1000 dhd_suspend_resume_helper(dhdinfo, suspend, 0);
1001#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1002 dhd_wlfc_suspend(&dhdinfo->pub);
1003#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1004 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1005 dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
1006 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1007 } else {
1008 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1009 dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
1010#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1011 dhd_wlfc_resume(&dhdinfo->pub);
1012#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1013 if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
1014 dhd_suspend_resume_helper(dhdinfo, suspend, 0);
1015 }
1016
1017#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1018 KERNEL_VERSION(2, 6, 39))
1019 dhd_mmc_suspend = suspend;
1020 smp_mb();
1021#endif
1022
1023 return ret;
1024}
1025
1026/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1027 * created in kernel notifier link list (with 'next' pointing to itself)
1028 */
1029static bool dhd_pm_notifier_registered = FALSE;
1030
1031extern int register_pm_notifier(struct notifier_block *nb);
1032extern int unregister_pm_notifier(struct notifier_block *nb);
1033#endif /* CONFIG_PM_SLEEP */
1034
1035/* Request scheduling of the bus rx frame */
1036static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1037static void dhd_os_rxflock(dhd_pub_t *pub);
1038static void dhd_os_rxfunlock(dhd_pub_t *pub);
1039
1040#if defined(DHD_H2D_LOG_TIME_SYNC)
1041static void
1042dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
1043#endif /* DHD_H2D_LOG_TIME_SYNC */
1044
1045/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1046typedef struct dhd_dev_priv {
1047 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1048 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1049 int ifidx; /* interface index */
1050 void * lkup;
1051} dhd_dev_priv_t;
1052
1053#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1054#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1055#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1056#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1057#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1058#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1059
1060/** Clear the dhd net_device's private structure. */
1061static inline void
1062dhd_dev_priv_clear(struct net_device * dev)
1063{
1064 dhd_dev_priv_t * dev_priv;
1065 ASSERT(dev != (struct net_device *)NULL);
1066 dev_priv = DHD_DEV_PRIV(dev);
1067 dev_priv->dhd = (dhd_info_t *)NULL;
1068 dev_priv->ifp = (dhd_if_t *)NULL;
1069 dev_priv->ifidx = DHD_BAD_IF;
1070 dev_priv->lkup = (void *)NULL;
1071}
1072
1073/** Setup the dhd net_device's private structure. */
1074static inline void
1075dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1076 int ifidx)
1077{
1078 dhd_dev_priv_t * dev_priv;
1079 ASSERT(dev != (struct net_device *)NULL);
1080 dev_priv = DHD_DEV_PRIV(dev);
1081 dev_priv->dhd = dhd;
1082 dev_priv->ifp = ifp;
1083 dev_priv->ifidx = ifidx;
1084}
1085
1086/* Return interface pointer */
1087struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1088{
1089 ASSERT(ifidx < DHD_MAX_IFS);
1090
1091 if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
1092 return NULL;
1093
1094 return dhdp->info->iflist[ifidx];
1095}
1096
1097#ifdef PCIE_FULL_DONGLE
1098
1099/** Dummy objects are defined with state representing bad|down.
1100 * Performance gains from reducing branch conditionals, instruction parallelism,
1101 * dual issue, reducing load shadows, avail of larger pipelines.
1102 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1103 * is accessed via the dhd_sta_t.
1104 */
1105
1106/* Dummy dhd_info object */
1107dhd_info_t dhd_info_null = {
1108 .pub = {
1109 .info = &dhd_info_null,
1110#ifdef DHDTCPACK_SUPPRESS
1111 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1112#endif /* DHDTCPACK_SUPPRESS */
1113 .up = FALSE,
1114 .busstate = DHD_BUS_DOWN
1115 }
1116};
1117#define DHD_INFO_NULL (&dhd_info_null)
1118#define DHD_PUB_NULL (&dhd_info_null.pub)
1119
1120/* Dummy netdevice object */
1121struct net_device dhd_net_dev_null = {
1122 .reg_state = NETREG_UNREGISTERED
1123};
1124#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1125
1126/* Dummy dhd_if object */
1127dhd_if_t dhd_if_null = {
1128#ifdef WMF
1129 .wmf = { .wmf_enable = TRUE },
1130#endif
1131 .info = DHD_INFO_NULL,
1132 .net = DHD_NET_DEV_NULL,
1133 .idx = DHD_BAD_IF
1134};
1135#define DHD_IF_NULL (&dhd_if_null)
1136
1137/* XXX should we use the sta_pool[0] object as DHD_STA_NULL? */
1138#define DHD_STA_NULL ((dhd_sta_t *)NULL)
1139
1140/** Interface STA list management. */
1141
1142/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1143static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1144static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1145
1146/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1147static void dhd_if_del_sta_list(dhd_if_t * ifp);
1148
1149/* Construct/Destruct a sta pool. */
1150static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1151static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1152/* Clear the pool of dhd_sta_t objects for built-in type driver */
1153static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1154
1155/** Reset a dhd_sta object and free into the dhd pool. */
1156static void
1157dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1158{
1159 int prio;
1160
1161 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1162
1163 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1164
1165 /*
1166 * Flush and free all packets in all flowring's queues belonging to sta.
1167 * Packets in flow ring will be flushed later.
1168 */
1169 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1170 uint16 flowid = sta->flowid[prio];
1171
1172 if (flowid != FLOWID_INVALID) {
1173 unsigned long flags;
1174 flow_ring_node_t * flow_ring_node;
1175
1176#ifdef DHDTCPACK_SUPPRESS
1177 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1178 * when there is a newly coming packet from network stack.
1179 */
1180 dhd_tcpack_info_tbl_clean(dhdp);
1181#endif /* DHDTCPACK_SUPPRESS */
1182
1183 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1184 if (flow_ring_node) {
1185 flow_queue_t *queue = &flow_ring_node->queue;
1186
1187 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1188 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1189
1190 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1191 void * pkt;
1192 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
1193 NULL) {
1194 PKTFREE(dhdp->osh, pkt, TRUE);
1195 }
1196 }
1197
1198 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1199 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1200 }
1201 }
1202
1203 sta->flowid[prio] = FLOWID_INVALID;
1204 }
1205
1206 id16_map_free(dhdp->staid_allocator, sta->idx);
1207 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1208 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1209 sta->ifidx = DHD_BAD_IF;
1210 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1211 INIT_LIST_HEAD(&sta->list);
1212 sta->idx = ID16_INVALID; /* implying free */
1213}
1214
1215/** Allocate a dhd_sta object from the dhd pool. */
1216static dhd_sta_t *
1217dhd_sta_alloc(dhd_pub_t * dhdp)
1218{
1219 uint16 idx;
1220 dhd_sta_t * sta;
1221 dhd_sta_pool_t * sta_pool;
1222
1223 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1224
1225 idx = id16_map_alloc(dhdp->staid_allocator);
1226 if (idx == ID16_INVALID) {
1227 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1228 return DHD_STA_NULL;
1229 }
1230
1231 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1232 sta = &sta_pool[idx];
1233
1234 ASSERT((sta->idx == ID16_INVALID) &&
1235 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1236
1237 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1238
1239 sta->idx = idx; /* implying allocated */
1240
1241 return sta;
1242}
1243
1244/** Delete all STAs in an interface's STA list. */
1245static void
1246dhd_if_del_sta_list(dhd_if_t *ifp)
1247{
1248 dhd_sta_t *sta, *next;
1249 unsigned long flags;
1250
1251 DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1252 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1253 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1254 GCC_DIAGNOSTIC_POP();
1255 list_del(&sta->list);
1256 dhd_sta_free(&ifp->info->pub, sta);
1257 }
1258
1259 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1260
1261 return;
1262}
1263
1264/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1265static int
1266dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1267{
1268 int idx, prio, sta_pool_memsz;
1269 dhd_sta_t * sta;
1270 dhd_sta_pool_t * sta_pool;
1271 void * staid_allocator;
1272
1273 ASSERT(dhdp != (dhd_pub_t *)NULL);
1274 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1275
1276 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1277 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1278 if (staid_allocator == NULL) {
1279 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1280 return BCME_ERROR;
1281 }
1282
1283 /* Pre allocate a pool of dhd_sta objects (one extra). */
1284 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1285 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1286 if (sta_pool == NULL) {
1287 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1288 id16_map_fini(dhdp->osh, staid_allocator);
1289 return BCME_ERROR;
1290 }
1291
1292 dhdp->sta_pool = sta_pool;
1293 dhdp->staid_allocator = staid_allocator;
1294
1295 /* Initialize all sta(s) for the pre-allocated free pool. */
1296 bzero((uchar *)sta_pool, sta_pool_memsz);
1297 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1298 sta = &sta_pool[idx];
1299 sta->idx = id16_map_alloc(staid_allocator);
1300 ASSERT(sta->idx <= max_sta);
1301 }
1302
1303 /* Now place them into the pre-allocated free pool. */
1304 for (idx = 1; idx <= max_sta; idx++) {
1305 sta = &sta_pool[idx];
1306 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1307 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1308 }
1309 dhd_sta_free(dhdp, sta);
1310 }
1311
1312 return BCME_OK;
1313}
1314
1315/** Destruct the pool of dhd_sta_t objects.
1316 * Caller must ensure that no STA objects are currently associated with an if.
1317 */
1318static void
1319dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1320{
1321 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1322
1323 if (sta_pool) {
1324 int idx;
1325 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1326 for (idx = 1; idx <= max_sta; idx++) {
1327 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1328 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1329 }
1330 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1331 }
1332
1333 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1334 dhdp->staid_allocator = NULL;
1335}
1336
1337/* Clear the pool of dhd_sta_t objects for built-in type driver */
1338static void
1339dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1340{
1341 int idx, prio, sta_pool_memsz;
1342 dhd_sta_t * sta;
1343 dhd_sta_pool_t * sta_pool;
1344 void *staid_allocator;
1345
1346 if (!dhdp) {
1347 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1348 return;
1349 }
1350
1351 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1352 staid_allocator = dhdp->staid_allocator;
1353
1354 if (!sta_pool) {
1355 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1356 return;
1357 }
1358
1359 if (!staid_allocator) {
1360 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1361 return;
1362 }
1363
1364 /* clear free pool */
1365 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1366 bzero((uchar *)sta_pool, sta_pool_memsz);
1367
1368 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1369 id16_map_clear(staid_allocator, max_sta, 1);
1370
1371 /* Initialize all sta(s) for the pre-allocated free pool. */
1372 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1373 sta = &sta_pool[idx];
1374 sta->idx = id16_map_alloc(staid_allocator);
1375 ASSERT(sta->idx <= max_sta);
1376 }
1377 /* Now place them into the pre-allocated free pool. */
1378 for (idx = 1; idx <= max_sta; idx++) {
1379 sta = &sta_pool[idx];
1380 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1381 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1382 }
1383 dhd_sta_free(dhdp, sta);
1384 }
1385}
1386
1387/** Find STA with MAC address ea in an interface's STA list. */
1388dhd_sta_t *
1389dhd_find_sta(void *pub, int ifidx, void *ea)
1390{
1391 dhd_sta_t *sta;
1392 dhd_if_t *ifp;
1393 unsigned long flags;
1394
1395 ASSERT(ea != NULL);
1396 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1397 if (ifp == NULL)
1398 return DHD_STA_NULL;
1399
1400 DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1401 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1402 list_for_each_entry(sta, &ifp->sta_list, list) {
1403 GCC_DIAGNOSTIC_POP();
1404 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1405 DHD_INFO(("%s: Found STA " MACDBG "\n",
1406 __FUNCTION__, MAC2STRDBG((char *)ea)));
1407 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1408 return sta;
1409 }
1410 }
1411
1412 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1413
1414 return DHD_STA_NULL;
1415}
1416
1417/** Add STA into the interface's STA list. */
1418dhd_sta_t *
1419dhd_add_sta(void *pub, int ifidx, void *ea)
1420{
1421 dhd_sta_t *sta;
1422 dhd_if_t *ifp;
1423 unsigned long flags;
1424
1425 ASSERT(ea != NULL);
1426 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1427 if (ifp == NULL)
1428 return DHD_STA_NULL;
1429
1430 if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
1431 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
1432 return DHD_STA_NULL;
1433 }
1434
1435 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1436 if (sta == DHD_STA_NULL) {
1437 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1438 return DHD_STA_NULL;
1439 }
1440
1441 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1442
1443 /* link the sta and the dhd interface */
1444 sta->ifp = ifp;
1445 sta->ifidx = ifidx;
1446 INIT_LIST_HEAD(&sta->list);
1447
1448 DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1449
1450 list_add_tail(&sta->list, &ifp->sta_list);
1451
1452 DHD_ERROR(("%s: Adding STA " MACDBG "\n",
1453 __FUNCTION__, MAC2STRDBG((char *)ea)));
1454
1455 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1456
1457 return sta;
1458}
1459
1460/** Delete all STAs from the interface's STA list. */
1461void
1462dhd_del_all_sta(void *pub, int ifidx)
1463{
1464 dhd_sta_t *sta, *next;
1465 dhd_if_t *ifp;
1466 unsigned long flags;
1467
1468 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1469 if (ifp == NULL)
1470 return;
1471
1472 DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1473 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1474 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1475 GCC_DIAGNOSTIC_POP();
1476 list_del(&sta->list);
1477 dhd_sta_free(&ifp->info->pub, sta);
1478#ifdef DHD_L2_FILTER
1479 if (ifp->parp_enable) {
1480 /* clear Proxy ARP cache of specific Ethernet Address */
1481 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
1482 ifp->phnd_arp_table, FALSE,
1483 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1484 }
1485#endif /* DHD_L2_FILTER */
1486 }
1487
1488 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1489
1490 return;
1491}
1492
1493/** Delete STA from the interface's STA list. */
1494void
1495dhd_del_sta(void *pub, int ifidx, void *ea)
1496{
1497 dhd_sta_t *sta, *next;
1498 dhd_if_t *ifp;
1499 unsigned long flags;
1500
1501 ASSERT(ea != NULL);
1502 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1503 if (ifp == NULL)
1504 return;
1505
1506 DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1507 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1508 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1509 GCC_DIAGNOSTIC_POP();
1510 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1511 DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
1512 __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
1513 list_del(&sta->list);
1514 dhd_sta_free(&ifp->info->pub, sta);
1515 }
1516 }
1517
1518 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1519#ifdef DHD_L2_FILTER
1520 if (ifp->parp_enable) {
1521 /* clear Proxy ARP cache of specific Ethernet Address */
1522 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1523 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1524 }
1525#endif /* DHD_L2_FILTER */
1526 return;
1527}
1528
1529/** Add STA if it doesn't exist. Not reentrant. */
1530dhd_sta_t*
1531dhd_findadd_sta(void *pub, int ifidx, void *ea)
1532{
1533 dhd_sta_t *sta;
1534
1535 sta = dhd_find_sta(pub, ifidx, ea);
1536
1537 if (!sta) {
1538 /* Add entry */
1539 sta = dhd_add_sta(pub, ifidx, ea);
1540 }
1541
1542 return sta;
1543}
1544
1545#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1546static struct list_head *
1547dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1548{
1549 unsigned long flags;
1550 dhd_sta_t *sta, *snapshot;
1551
1552 INIT_LIST_HEAD(snapshot_list);
1553
1554 DHD_IF_STA_LIST_LOCK(&ifp->sta_list_lock, flags);
1555
1556 list_for_each_entry(sta, &ifp->sta_list, list) {
1557 /* allocate one and add to snapshot */
1558 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1559 if (snapshot == NULL) {
1560 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1561 continue;
1562 }
1563
1564 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1565
1566 INIT_LIST_HEAD(&snapshot->list);
1567 list_add_tail(&snapshot->list, snapshot_list);
1568 }
1569
1570 DHD_IF_STA_LIST_UNLOCK(&ifp->sta_list_lock, flags);
1571
1572 return snapshot_list;
1573}
1574
1575static void
1576dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1577{
1578 dhd_sta_t *sta, *next;
1579
1580 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1581 list_del(&sta->list);
1582 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1583 }
1584}
1585#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1586
1587#else
1588static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1589static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1590static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1591static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1592dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1593dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
1594void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1595#endif /* PCIE_FULL_DONGLE */
1596
1597#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
1598void
1599dhd_axi_error_dispatch(dhd_pub_t *dhdp)
1600{
1601 dhd_info_t *dhd = dhdp->info;
1602 schedule_work(&dhd->axi_error_dispatcher_work);
1603}
1604
1605static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
1606{
1607 struct dhd_info *dhd =
1608 container_of(work, struct dhd_info, axi_error_dispatcher_work);
1609 dhd_axi_error(&dhd->pub);
1610}
1611#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
1612
1613/** Returns dhd iflist index corresponding the the bssidx provided by apps */
1614int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1615{
1616 dhd_if_t *ifp;
1617 dhd_info_t *dhd = dhdp->info;
1618 int i;
1619
1620 ASSERT(bssidx < DHD_MAX_IFS);
1621 ASSERT(dhdp);
1622
1623 for (i = 0; i < DHD_MAX_IFS; i++) {
1624 ifp = dhd->iflist[i];
1625 if (ifp && (ifp->bssidx == bssidx)) {
1626 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1627 ifp->name, bssidx, i));
1628 break;
1629 }
1630 }
1631 return i;
1632}
1633
1634static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1635{
1636 uint32 store_idx;
1637 uint32 sent_idx;
1638
1639 if (!skb) {
1640 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1641 return BCME_ERROR;
1642 }
1643
1644 dhd_os_rxflock(dhdp);
1645 store_idx = dhdp->store_idx;
1646 sent_idx = dhdp->sent_idx;
1647 if (dhdp->skbbuf[store_idx] != NULL) {
1648 /* Make sure the previous packets are processed */
1649 dhd_os_rxfunlock(dhdp);
1650 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1651 skb, store_idx, sent_idx));
1652 /* removed msleep here, should use wait_event_timeout if we
1653 * want to give rx frame thread a chance to run
1654 */
1655#if defined(WAIT_DEQUEUE)
1656 OSL_SLEEP(1);
1657#endif
1658 return BCME_ERROR;
1659 }
1660 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1661 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1662 dhdp->skbbuf[store_idx] = skb;
1663 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1664 dhd_os_rxfunlock(dhdp);
1665
1666 return BCME_OK;
1667}
1668
1669static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1670{
1671 uint32 store_idx;
1672 uint32 sent_idx;
1673 void *skb;
1674
1675 dhd_os_rxflock(dhdp);
1676
1677 store_idx = dhdp->store_idx;
1678 sent_idx = dhdp->sent_idx;
1679 skb = dhdp->skbbuf[sent_idx];
1680
1681 if (skb == NULL) {
1682 dhd_os_rxfunlock(dhdp);
1683 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1684 store_idx, sent_idx));
1685 return NULL;
1686 }
1687
1688 dhdp->skbbuf[sent_idx] = NULL;
1689 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1690
1691 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1692 skb, sent_idx));
1693
1694 dhd_os_rxfunlock(dhdp);
1695
1696 return skb;
1697}
1698
1699int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1700{
1701 uint chipid = dhd_bus_chip_id(dhdp);
1702 uint revid = dhd_bus_chiprev_id(dhdp);
1703 int ret = BCME_OK;
1704 if (prepost) { /* pre process */
1705 ret = dhd_alloc_cis(dhdp);
1706 if (ret != BCME_OK) {
1707 return ret;
1708 }
1709 switch (chipid) {
1710 case BCM4389_CHIP_GRPID:
1711 if (revid == 3) {
1712 /* BCM4389A0 is used legacy cisdump iovar */
1713 dhd_read_cis(dhdp);
1714 } else {
1715 /* BCM4389B0 or higher rev is used new otp iovar */
1716 dhd_read_otp_sw_rgn(dhdp);
1717 }
1718 break;
1719 default:
1720 dhd_read_cis(dhdp);
1721 break;
1722 }
1723 dhd_check_module_cid(dhdp);
1724 dhd_check_module_mac(dhdp);
1725 dhd_set_macaddr_from_file(dhdp);
1726 } else { /* post process */
1727 dhd_write_macaddr(&dhdp->mac);
1728 dhd_clear_cis(dhdp);
1729 }
1730
1731 return BCME_OK;
1732}
1733
1734// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
1735#if defined(PKT_FILTER_SUPPORT)
1736#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1737static bool
1738_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
1739{
1740 bool _apply = FALSE;
1741 /* In case of IBSS mode, apply arp pkt filter */
1742 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
1743 _apply = TRUE;
1744 goto exit;
1745 }
1746 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1747 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
1748 _apply = TRUE;
1749 goto exit;
1750 }
1751
1752exit:
1753 return _apply;
1754}
1755#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1756
1757void
1758dhd_set_packet_filter(dhd_pub_t *dhd)
1759{
1760 int i;
1761
1762 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1763 if (dhd_pkt_filter_enable) {
1764 for (i = 0; i < dhd->pktfilter_count; i++) {
1765 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1766 }
1767 }
1768}
1769
1770void
1771dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1772{
1773 int i;
1774
1775 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
1776 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value &&
1777 !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) {
1778 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
1779 return;
1780 }
1781 /* 1 - Enable packet filter, only allow unicast packet to send up */
1782 /* 0 - Disable packet filter */
1783 if (dhd_pkt_filter_enable && (!value ||
1784 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) ||
1785 dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)))
1786 {
1787 for (i = 0; i < dhd->pktfilter_count; i++) {
1788// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
1789#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1790 if (value && (i == DHD_ARP_FILTER_NUM) &&
1791 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1792 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1793 "val %d, cnt %d, op_mode 0x%x\n",
1794 value, i, dhd->op_mode));
1795 continue;
1796 }
1797#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1798#ifdef APSTA_BLOCK_ARP_DURING_DHCP
1799 if (value && (i == DHD_BROADCAST_ARP_FILTER_NUM) &&
1800 dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM]) {
1801 /* XXX: BROADCAST_ARP_FILTER is only for the
1802 * STA/SoftAP concurrent mode (Please refer to RB:90348)
1803 * Remove the filter for other cases explicitly
1804 */
1805 DHD_ERROR(("%s: Remove the DHD_BROADCAST_ARP_FILTER\n",
1806 __FUNCTION__));
1807 dhd_packet_filter_add_remove(dhd, FALSE,
1808 DHD_BROADCAST_ARP_FILTER_NUM);
1809 }
1810#endif /* APSTA_BLOCK_ARP_DURING_DHCP */
1811 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1812 value, dhd_master_mode);
1813 }
1814 }
1815}
1816
1817int
1818dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
1819{
1820 char *filterp = NULL;
1821 int filter_id = 0;
1822
1823 switch (num) {
1824 case DHD_BROADCAST_FILTER_NUM:
1825 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
1826 filter_id = 101;
1827 break;
1828 case DHD_MULTICAST4_FILTER_NUM:
1829 filter_id = 102;
1830 if (FW_SUPPORTED((dhdp), pf6)) {
1831 if (dhdp->pktfilter[num] != NULL) {
1832 dhd_pktfilter_offload_delete(dhdp, filter_id);
1833 dhdp->pktfilter[num] = NULL;
1834 }
1835 if (!add_remove) {
1836 filterp = DISCARD_IPV4_MCAST;
1837 add_remove = 1;
1838 break;
1839 }
1840 } /* XXX: intend omitting else case */
1841 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
1842 break;
1843 case DHD_MULTICAST6_FILTER_NUM:
1844 filter_id = 103;
1845 if (FW_SUPPORTED((dhdp), pf6)) {
1846 if (dhdp->pktfilter[num] != NULL) {
1847 dhd_pktfilter_offload_delete(dhdp, filter_id);
1848 dhdp->pktfilter[num] = NULL;
1849 }
1850 if (!add_remove) {
1851 filterp = DISCARD_IPV6_MCAST;
1852 add_remove = 1;
1853 break;
1854 }
1855 } /* XXX: intend omitting else case */
1856 filterp = "103 0 0 0 0xFFFF 0x3333";
1857 break;
1858 case DHD_MDNS_FILTER_NUM:
1859 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
1860 filter_id = 104;
1861 break;
1862 case DHD_ARP_FILTER_NUM:
1863 filterp = "105 0 0 12 0xFFFF 0x0806";
1864 filter_id = 105;
1865 break;
1866 case DHD_BROADCAST_ARP_FILTER_NUM:
1867 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
1868 " 0xFFFFFFFFFFFF0000000000000806";
1869 filter_id = 106;
1870 break;
1871 default:
1872 return -EINVAL;
1873 }
1874
1875 /* Add filter */
1876 if (add_remove) {
1877 dhdp->pktfilter[num] = filterp;
1878 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
1879 } else { /* Delete filter */
1880 if (dhdp->pktfilter[num] != NULL) {
1881 dhd_pktfilter_offload_delete(dhdp, filter_id);
1882 dhdp->pktfilter[num] = NULL;
1883 }
1884 }
1885
1886 return 0;
1887}
1888#endif /* PKT_FILTER_SUPPORT */
1889
1890static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1891{
1892#ifndef SUPPORT_PM2_ONLY
1893 int power_mode = PM_MAX;
1894#endif /* SUPPORT_PM2_ONLY */
1895 /* wl_pkt_filter_enable_t enable_parm; */
1896 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1897 int ret = 0;
1898#ifdef DHD_USE_EARLYSUSPEND
1899#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
1900 int bcn_timeout = 0;
1901#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
1902#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
1903 int roam_time_thresh = 0; /* (ms) */
1904#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
1905#ifndef ENABLE_FW_ROAM_SUSPEND
1906 uint roamvar = 1;
1907#endif /* ENABLE_FW_ROAM_SUSPEND */
1908#ifdef ENABLE_BCN_LI_BCN_WAKEUP
1909 int bcn_li_bcn = 1;
1910#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
1911 uint nd_ra_filter = 0;
1912#ifdef ENABLE_IPMCAST_FILTER
1913 int ipmcast_l2filter;
1914#endif /* ENABLE_IPMCAST_FILTER */
1915#ifdef CUSTOM_EVENT_PM_WAKE
1916 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
1917#endif /* CUSTOM_EVENT_PM_WAKE */
1918#endif /* DHD_USE_EARLYSUSPEND */
1919#ifdef PASS_ALL_MCAST_PKTS
1920 struct dhd_info *dhdinfo;
1921 uint32 allmulti;
1922 uint i;
1923#endif /* PASS_ALL_MCAST_PKTS */
1924#ifdef DYNAMIC_SWOOB_DURATION
1925#ifndef CUSTOM_INTR_WIDTH
1926#define CUSTOM_INTR_WIDTH 100
1927 int intr_width = 0;
1928#endif /* CUSTOM_INTR_WIDTH */
1929#endif /* DYNAMIC_SWOOB_DURATION */
1930
1931#if defined(BCMPCIE)
1932 int lpas = 0;
1933 int dtim_period = 0;
1934 int bcn_interval = 0;
1935 int bcn_to_dly = 0;
1936#if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1937 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
1938#else
1939 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
1940#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
1941#endif /* OEM_ANDROID && BCMPCIE */
1942
1943 if (!dhd)
1944 return -ENODEV;
1945
1946#ifdef PASS_ALL_MCAST_PKTS
1947 dhdinfo = dhd->info;
1948#endif /* PASS_ALL_MCAST_PKTS */
1949
1950 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1951 __FUNCTION__, value, dhd->in_suspend));
1952
1953 dhd_suspend_lock(dhd);
1954
1955#ifdef CUSTOM_SET_CPUCORE
1956 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1957 /* set specific cpucore */
1958 dhd_set_cpucore(dhd, TRUE);
1959#endif /* CUSTOM_SET_CPUCORE */
1960 if (dhd->up) {
1961 if (value && dhd->in_suspend) {
1962#ifdef PKT_FILTER_SUPPORT
1963 dhd->early_suspended = 1;
1964#endif
1965 /* Kernel suspended */
1966 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
1967
1968#ifndef SUPPORT_PM2_ONLY
1969 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1970 sizeof(power_mode), TRUE, 0);
1971#endif /* SUPPORT_PM2_ONLY */
1972
1973#ifdef PKT_FILTER_SUPPORT
1974 /* Enable packet filter,
1975 * only allow unicast packet to send up
1976 */
1977 dhd_enable_packet_filter(1, dhd);
1978#ifdef APF
1979 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
1980#endif /* APF */
1981#endif /* PKT_FILTER_SUPPORT */
1982#ifdef ARP_OFFLOAD_SUPPORT
1983 if (dhd->arpoe_enable) {
1984 dhd_arp_offload_enable(dhd, TRUE);
1985 }
1986#endif /* ARP_OFFLOAD_SUPPORT */
1987
1988#ifdef PASS_ALL_MCAST_PKTS
1989 allmulti = 0;
1990 for (i = 0; i < DHD_MAX_IFS; i++) {
1991 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
1992 ret = dhd_iovar(dhd, i, "allmulti",
1993 (char *)&allmulti,
1994 sizeof(allmulti),
1995 NULL, 0, TRUE);
1996 if (ret < 0) {
1997 DHD_ERROR(("%s allmulti failed %d\n",
1998 __FUNCTION__, ret));
1999 }
2000 }
2001#endif /* PASS_ALL_MCAST_PKTS */
2002
2003 /* If DTIM skip is set up as default, force it to wake
2004 * each third DTIM for better power savings. Note that
2005 * one side effect is a chance to miss BC/MC packet.
2006 */
2007#ifdef WLTDLS
2008 /* Do not set bcn_li_ditm on WFD mode */
2009 if (dhd->tdls_mode) {
2010 bcn_li_dtim = 0;
2011 } else
2012#endif /* WLTDLS */
2013#if defined(BCMPCIE)
2014 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
2015 &bcn_interval);
2016 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2017 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2018 if (ret < 0) {
2019 DHD_ERROR(("%s bcn_li_dtim failed %d\n",
2020 __FUNCTION__, ret));
2021 }
2022 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
2023 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
2024 /*
2025 * Increase max roaming threshold from 2 secs to 8 secs
2026 * the real roam threshold is MIN(max_roam_threshold,
2027 * bcn_timeout/2)
2028 */
2029 lpas = 1;
2030 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
2031 NULL, 0, TRUE);
2032 if (ret < 0) {
2033 if (ret == BCME_UNSUPPORTED) {
2034 DHD_ERROR(("%s lpas, UNSUPPORTED\n",
2035 __FUNCTION__));
2036 } else {
2037 DHD_ERROR(("%s set lpas failed %d\n",
2038 __FUNCTION__, ret));
2039 }
2040 }
2041 bcn_to_dly = 1;
2042 /*
2043 * if bcn_to_dly is 1, the real roam threshold is
2044 * MIN(max_roam_threshold, bcn_timeout -1);
2045 * notify link down event after roaming procedure complete
2046 * if we hit bcn_timeout while we are in roaming progress.
2047 */
2048 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2049 sizeof(bcn_to_dly), NULL, 0, TRUE);
2050 if (ret < 0) {
2051 if (ret == BCME_UNSUPPORTED) {
2052 DHD_ERROR(("%s bcn_to_dly, UNSUPPORTED\n",
2053 __FUNCTION__));
2054 } else {
2055 DHD_ERROR(("%s set bcn_to_dly failed %d\n",
2056 __FUNCTION__, ret));
2057 }
2058 }
2059 /* Increase beacon timeout to 6 secs or use bigger one */
2060 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
2061 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2062 sizeof(bcn_timeout), NULL, 0, TRUE);
2063 if (ret < 0) {
2064 DHD_ERROR(("%s set bcn_timeout failed %d\n",
2065 __FUNCTION__, ret));
2066 }
2067 }
2068#else
2069 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2070 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2071 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
2072 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2073#endif /* OEM_ANDROID && BCMPCIE */
2074
2075#ifdef DHD_USE_EARLYSUSPEND
2076#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2077 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2078 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2079 sizeof(bcn_timeout), NULL, 0, TRUE);
2080 if (ret < 0) {
2081 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
2082 ret));
2083 }
2084#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2085#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2086 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2087 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
2088 (char *)&roam_time_thresh,
2089 sizeof(roam_time_thresh), NULL, 0, TRUE);
2090 if (ret < 0) {
2091 DHD_ERROR(("%s roam_time_thresh failed %d\n",
2092 __FUNCTION__, ret));
2093 }
2094#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2095#ifndef ENABLE_FW_ROAM_SUSPEND
2096 /* Disable firmware roaming during suspend */
2097 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2098 sizeof(roamvar), NULL, 0, TRUE);
2099 if (ret < 0) {
2100 DHD_ERROR(("%s roam_off failed %d\n",
2101 __FUNCTION__, ret));
2102 }
2103#endif /* ENABLE_FW_ROAM_SUSPEND */
2104#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2105 if (bcn_li_dtim) {
2106 bcn_li_bcn = 0;
2107 }
2108 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2109 sizeof(bcn_li_bcn), NULL, 0, TRUE);
2110 if (ret < 0) {
2111 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
2112 }
2113#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2114#if defined(WL_CFG80211) && defined(WL_BCNRECV)
2115 ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
2116 if (ret != BCME_OK) {
2117 DHD_ERROR(("failed to stop beacon recv event on"
2118 " suspend state (%d)\n", ret));
2119 }
2120#endif /* WL_CFG80211 && WL_BCNRECV */
2121#ifdef NDO_CONFIG_SUPPORT
2122 if (dhd->ndo_enable) {
2123 if (!dhd->ndo_host_ip_overflow) {
2124 /* enable ND offload on suspend */
2125 ret = dhd_ndo_enable(dhd, TRUE);
2126 if (ret < 0) {
2127 DHD_ERROR(("%s: failed to enable NDO\n",
2128 __FUNCTION__));
2129 }
2130 } else {
2131 DHD_INFO(("%s: NDO disabled on suspend due to"
2132 "HW capacity\n", __FUNCTION__));
2133 }
2134 }
2135#endif /* NDO_CONFIG_SUPPORT */
2136#ifndef APF
2137 if (FW_SUPPORTED(dhd, ndoe))
2138#else
2139 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
2140#endif /* APF */
2141 {
2142 /* enable IPv6 RA filter in firmware during suspend */
2143 nd_ra_filter = 1;
2144 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2145 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
2146 NULL, 0, TRUE);
2147 if (ret < 0)
2148 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2149 ret));
2150 }
2151 dhd_os_suppress_logging(dhd, TRUE);
2152#ifdef ENABLE_IPMCAST_FILTER
2153 ipmcast_l2filter = 1;
2154 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2155 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2156 NULL, 0, TRUE);
2157 if (ret < 0) {
2158 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
2159 }
2160#endif /* ENABLE_IPMCAST_FILTER */
2161#ifdef DYNAMIC_SWOOB_DURATION
2162 intr_width = CUSTOM_INTR_WIDTH;
2163 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2164 sizeof(intr_width), NULL, 0, TRUE);
2165 if (ret < 0) {
2166 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2167 }
2168#endif /* DYNAMIC_SWOOB_DURATION */
2169#ifdef CUSTOM_EVENT_PM_WAKE
2170 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
2171 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2172 (char *)&pm_awake_thresh,
2173 sizeof(pm_awake_thresh), NULL, 0, TRUE);
2174 if (ret < 0) {
2175 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2176 __FUNCTION__, ret));
2177 }
2178#endif /* CUSTOM_EVENT_PM_WAKE */
2179#ifdef CONFIG_SILENT_ROAM
2180 if (!dhd->sroamed) {
2181 ret = dhd_sroam_set_mon(dhd, TRUE);
2182 if (ret < 0) {
2183 DHD_ERROR(("%s set sroam failed %d\n",
2184 __FUNCTION__, ret));
2185 }
2186 }
2187 dhd->sroamed = FALSE;
2188#endif /* CONFIG_SILENT_ROAM */
2189#endif /* DHD_USE_EARLYSUSPEND */
2190 } else {
2191#ifdef PKT_FILTER_SUPPORT
2192 dhd->early_suspended = 0;
2193#endif
2194 /* Kernel resumed */
2195 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
2196#ifdef DYNAMIC_SWOOB_DURATION
2197 intr_width = 0;
2198 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2199 sizeof(intr_width), NULL, 0, TRUE);
2200 if (ret < 0) {
2201 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2202 }
2203#endif /* DYNAMIC_SWOOB_DURATION */
2204#ifndef SUPPORT_PM2_ONLY
2205 power_mode = PM_FAST;
2206 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2207 sizeof(power_mode), TRUE, 0);
2208#endif /* SUPPORT_PM2_ONLY */
2209#if defined(WL_CFG80211) && defined(WL_BCNRECV)
2210 ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
2211 if (ret != BCME_OK) {
2212 DHD_ERROR(("failed to resume beacon recv state (%d)\n",
2213 ret));
2214 }
2215#endif /* WL_CF80211 && WL_BCNRECV */
2216#ifdef ARP_OFFLOAD_SUPPORT
2217 if (dhd->arpoe_enable) {
2218 dhd_arp_offload_enable(dhd, FALSE);
2219 }
2220#endif /* ARP_OFFLOAD_SUPPORT */
2221#ifdef PKT_FILTER_SUPPORT
2222 /* disable pkt filter */
2223 dhd_enable_packet_filter(0, dhd);
2224#ifdef APF
2225 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
2226#endif /* APF */
2227#endif /* PKT_FILTER_SUPPORT */
2228#ifdef PASS_ALL_MCAST_PKTS
2229 allmulti = 1;
2230 for (i = 0; i < DHD_MAX_IFS; i++) {
2231 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2232 ret = dhd_iovar(dhd, i, "allmulti",
2233 (char *)&allmulti,
2234 sizeof(allmulti), NULL,
2235 0, TRUE);
2236 if (ret < 0) {
2237 DHD_ERROR(("%s: allmulti failed:%d\n",
2238 __FUNCTION__, ret));
2239 }
2240 }
2241#endif /* PASS_ALL_MCAST_PKTS */
2242#if defined(BCMPCIE)
2243 /* restore pre-suspend setting */
2244 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2245 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2246 if (ret < 0) {
2247 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
2248 __FUNCTION__, ret));
2249 }
2250 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
2251 0, TRUE);
2252 if (ret < 0) {
2253 if (ret == BCME_UNSUPPORTED) {
2254 DHD_ERROR(("%s lpas, UNSUPPORTED\n", __FUNCTION__));
2255 } else {
2256 DHD_ERROR(("%s set lpas failed %d\n",
2257 __FUNCTION__, ret));
2258 }
2259 }
2260 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2261 sizeof(bcn_to_dly), NULL, 0, TRUE);
2262 if (ret < 0) {
2263 if (ret == BCME_UNSUPPORTED) {
2264 DHD_ERROR(("%s bcn_to_dly UNSUPPORTED\n",
2265 __FUNCTION__));
2266 } else {
2267 DHD_ERROR(("%s set bcn_to_dly failed %d\n",
2268 __FUNCTION__, ret));
2269 }
2270 }
2271 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2272 sizeof(bcn_timeout), NULL, 0, TRUE);
2273 if (ret < 0) {
2274 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
2275 __FUNCTION__, ret));
2276 }
2277#else
2278 /* restore pre-suspend setting for dtim_skip */
2279 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2280 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2281 if (ret < 0) {
2282 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
2283 }
2284#endif /* OEM_ANDROID && BCMPCIE */
2285#ifdef DHD_USE_EARLYSUSPEND
2286#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2287 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2288 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2289 sizeof(bcn_timeout), NULL, 0, TRUE);
2290 if (ret < 0) {
2291 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
2292 __FUNCTION__, ret));
2293 }
2294#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2295#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2296 roam_time_thresh = 2000;
2297 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
2298 (char *)&roam_time_thresh,
2299 sizeof(roam_time_thresh), NULL, 0, TRUE);
2300 if (ret < 0) {
2301 DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
2302 __FUNCTION__, ret));
2303 }
2304
2305#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2306#ifndef ENABLE_FW_ROAM_SUSPEND
2307 roamvar = dhd_roam_disable;
2308 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2309 sizeof(roamvar), NULL, 0, TRUE);
2310 if (ret < 0) {
2311 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
2312 }
2313#endif /* ENABLE_FW_ROAM_SUSPEND */
2314#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2315 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2316 sizeof(bcn_li_bcn), NULL, 0, TRUE);
2317 if (ret < 0) {
2318 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
2319 __FUNCTION__, ret));
2320 }
2321#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2322#ifdef NDO_CONFIG_SUPPORT
2323 if (dhd->ndo_enable) {
2324 /* Disable ND offload on resume */
2325 ret = dhd_ndo_enable(dhd, FALSE);
2326 if (ret < 0) {
2327 DHD_ERROR(("%s: failed to disable NDO\n",
2328 __FUNCTION__));
2329 }
2330 }
2331#endif /* NDO_CONFIG_SUPPORT */
2332#ifndef APF
2333 if (FW_SUPPORTED(dhd, ndoe))
2334#else
2335 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
2336#endif /* APF */
2337 {
2338 /* disable IPv6 RA filter in firmware during suspend */
2339 nd_ra_filter = 0;
2340 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2341 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
2342 NULL, 0, TRUE);
2343 if (ret < 0) {
2344 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2345 ret));
2346 }
2347 }
2348 dhd_os_suppress_logging(dhd, FALSE);
2349#ifdef ENABLE_IPMCAST_FILTER
2350 ipmcast_l2filter = 0;
2351 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2352 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2353 NULL, 0, TRUE);
2354 if (ret < 0) {
2355 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
2356 }
2357#endif /* ENABLE_IPMCAST_FILTER */
2358#ifdef CUSTOM_EVENT_PM_WAKE
2359 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2360 (char *)&pm_awake_thresh,
2361 sizeof(pm_awake_thresh), NULL, 0, TRUE);
2362 if (ret < 0) {
2363 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2364 __FUNCTION__, ret));
2365 }
2366#endif /* CUSTOM_EVENT_PM_WAKE */
2367#ifdef CONFIG_SILENT_ROAM
2368 ret = dhd_sroam_set_mon(dhd, FALSE);
2369 if (ret < 0) {
2370 DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
2371 }
2372#endif /* CONFIG_SILENT_ROAM */
2373#endif /* DHD_USE_EARLYSUSPEND */
2374 }
2375 }
2376 dhd_suspend_unlock(dhd);
2377
2378 return 0;
2379}
2380
2381static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2382{
2383 dhd_pub_t *dhdp = &dhd->pub;
2384 int ret = 0;
2385
2386 DHD_OS_WAKE_LOCK(dhdp);
2387
2388 /* Set flag when early suspend was called */
2389 dhdp->in_suspend = val;
2390 if ((force || !dhdp->suspend_disable_flag) &&
2391 (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND)))
2392 {
2393 ret = dhd_set_suspend(val, dhdp);
2394 }
2395
2396 DHD_OS_WAKE_UNLOCK(dhdp);
2397 return ret;
2398}
2399
2400#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2401static void dhd_early_suspend(struct early_suspend *h)
2402{
2403 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2404 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2405
2406 if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
2407 dhd_suspend_resume_helper(dhd, 1, 0);
2408 dhd_conf_set_suspend_resume(&dhd->pub, 1);
2409 }
2410}
2411
2412static void dhd_late_resume(struct early_suspend *h)
2413{
2414 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2415 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2416
2417 if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
2418 dhd_conf_set_suspend_resume(&dhd->pub, 0);
2419 dhd_suspend_resume_helper(dhd, 0, 0);
2420 }
2421}
2422#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2423
2424/*
2425 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2426 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2427 *
2428 * dhd_timeout_start(&tmo, usec);
2429 * while (!dhd_timeout_expired(&tmo))
2430 * if (poll_something())
2431 * break;
2432 * if (dhd_timeout_expired(&tmo))
2433 * fatal();
2434 */
2435
2436void
2437dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2438{
2439 tmo->limit = usec;
2440 tmo->increment = 0;
2441 tmo->elapsed = 0;
2442 tmo->tick = 10 * USEC_PER_MSEC; /* 10 msec */
2443}
2444
2445int
2446dhd_timeout_expired(dhd_timeout_t *tmo)
2447{
2448 /* Does nothing the first call */
2449 if (tmo->increment == 0) {
2450 tmo->increment = USEC_PER_MSEC; /* Start with 1 msec */
2451 return 0;
2452 }
2453
2454 if (tmo->elapsed >= tmo->limit)
2455 return 1;
2456
2457 DHD_INFO(("%s: CAN_SLEEP():%d tmo->increment=%ld msec\n",
2458 __FUNCTION__, CAN_SLEEP(), tmo->increment / USEC_PER_MSEC));
2459
2460 CAN_SLEEP() ? OSL_SLEEP(tmo->increment / USEC_PER_MSEC) : OSL_DELAY(tmo->increment);
2461
2462 /* Till tmo->tick, the delay will be in 2x, after that delay will be constant
2463 * tmo->tick (10 msec), till timer elapses.
2464 */
2465 tmo->increment = (tmo->increment >= tmo->tick) ? tmo->tick : (tmo->increment * 2);
2466
2467 /* Add the delay that's about to take place */
2468 tmo->elapsed += tmo->increment;
2469
2470 return 0;
2471}
2472
2473int
2474dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2475{
2476 int i = 0;
2477
2478 if (!dhd) {
2479 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2480 return DHD_BAD_IF;
2481 }
2482
2483 while (i < DHD_MAX_IFS) {
2484 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2485 return i;
2486 i++;
2487 }
2488
2489 return DHD_BAD_IF;
2490}
2491
2492struct net_device * dhd_idx2net(void *pub, int ifidx)
2493{
2494 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2495 struct dhd_info *dhd_info;
2496
2497 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2498 return NULL;
2499 dhd_info = dhd_pub->info;
2500 if (dhd_info && dhd_info->iflist[ifidx])
2501 return dhd_info->iflist[ifidx]->net;
2502 return NULL;
2503}
2504
2505int
2506dhd_ifname2idx(dhd_info_t *dhd, char *name)
2507{
2508 int i = DHD_MAX_IFS;
2509
2510 ASSERT(dhd);
2511
2512 if (name == NULL || *name == '\0')
2513 return 0;
2514
2515 while (--i > 0)
2516 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2517 break;
2518
2519 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2520
2521 return i; /* default - the primary interface */
2522}
2523
2524char *
2525dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2526{
2527 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2528
2529 ASSERT(dhd);
2530
2531 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2532 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2533 return "<if_bad>";
2534 }
2535
2536 if (dhd->iflist[ifidx] == NULL) {
2537 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2538 return "<if_null>";
2539 }
2540
2541 if (dhd->iflist[ifidx]->net)
2542 return dhd->iflist[ifidx]->net->name;
2543
2544 return "<if_none>";
2545}
2546
2547uint8 *
2548dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2549{
2550 int i;
2551 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2552
2553 ASSERT(dhd);
2554 for (i = 0; i < DHD_MAX_IFS; i++)
2555 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2556 return dhd->iflist[i]->mac_addr;
2557
2558 return NULL;
2559}
2560
2561static void
2562_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2563{
2564 struct net_device *dev;
2565 struct netdev_hw_addr *ha;
2566 uint32 allmulti, cnt;
2567
2568 wl_ioctl_t ioc;
2569 char *buf, *bufp;
2570 uint buflen;
2571 int ret;
2572
2573#ifdef MCAST_LIST_ACCUMULATION
2574 int i;
2575 uint32 cnt_iface[DHD_MAX_IFS];
2576 cnt = 0;
2577 allmulti = 0;
2578
2579 for (i = 0; i < DHD_MAX_IFS; i++) {
2580 if (dhd->iflist[i]) {
2581 dev = dhd->iflist[i]->net;
2582 if (!dev)
2583 continue;
2584 netif_addr_lock_bh(dev);
2585 cnt_iface[i] = netdev_mc_count(dev);
2586 cnt += cnt_iface[i];
2587 netif_addr_unlock_bh(dev);
2588
2589 /* Determine initial value of allmulti flag */
2590 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2591 }
2592 }
2593#else /* !MCAST_LIST_ACCUMULATION */
2594 if (!dhd->iflist[ifidx]) {
2595 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
2596 return;
2597 }
2598 dev = dhd->iflist[ifidx]->net;
2599 if (!dev)
2600 return;
2601 netif_addr_lock_bh(dev);
2602 cnt = netdev_mc_count(dev);
2603 netif_addr_unlock_bh(dev);
2604
2605 /* Determine initial value of allmulti flag */
2606 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2607#endif /* MCAST_LIST_ACCUMULATION */
2608
2609#ifdef PASS_ALL_MCAST_PKTS
2610#ifdef PKT_FILTER_SUPPORT
2611 if (!dhd->pub.early_suspended)
2612#endif /* PKT_FILTER_SUPPORT */
2613 allmulti = TRUE;
2614#endif /* PASS_ALL_MCAST_PKTS */
2615
2616 /* Send down the multicast list first. */
2617
2618 /* XXX Not using MAXMULTILIST to avoid including wlc_pub.h; but
2619 * maybe we should? (Or should that be in wlioctl.h instead?)
2620 */
2621
2622 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
2623 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2624 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2625 dhd_ifname(&dhd->pub, ifidx), cnt));
2626 return;
2627 }
2628
2629 strlcpy(bufp, "mcast_list", buflen);
2630 bufp += strlen("mcast_list") + 1;
2631
2632 cnt = htol32(cnt);
2633 memcpy(bufp, &cnt, sizeof(cnt));
2634 bufp += sizeof(cnt);
2635
2636#ifdef MCAST_LIST_ACCUMULATION
2637 for (i = 0; i < DHD_MAX_IFS; i++) {
2638 if (dhd->iflist[i]) {
2639 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
2640 dev = dhd->iflist[i]->net;
2641
2642 netif_addr_lock_bh(dev);
2643 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
2644 netdev_for_each_mc_addr(ha, dev) {
2645 GCC_DIAGNOSTIC_POP();
2646 if (!cnt_iface[i])
2647 break;
2648 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2649 bufp += ETHER_ADDR_LEN;
2650 DHD_TRACE(("_dhd_set_multicast_list: cnt "
2651 "%d " MACDBG "\n",
2652 cnt_iface[i], MAC2STRDBG(ha->addr)));
2653 cnt_iface[i]--;
2654 }
2655 netif_addr_unlock_bh(dev);
2656 }
2657 }
2658#else /* !MCAST_LIST_ACCUMULATION */
2659 netif_addr_lock_bh(dev);
2660 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
2661 netdev_for_each_mc_addr(ha, dev) {
2662 GCC_DIAGNOSTIC_POP();
2663 if (!cnt)
2664 break;
2665 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2666 bufp += ETHER_ADDR_LEN;
2667 cnt--;
2668 }
2669 netif_addr_unlock_bh(dev);
2670#endif /* MCAST_LIST_ACCUMULATION */
2671
2672 memset(&ioc, 0, sizeof(ioc));
2673 ioc.cmd = WLC_SET_VAR;
2674 ioc.buf = buf;
2675 ioc.len = buflen;
2676 ioc.set = TRUE;
2677
2678 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2679 if (ret < 0) {
2680 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2681 dhd_ifname(&dhd->pub, ifidx), cnt));
2682 allmulti = cnt ? TRUE : allmulti;
2683 }
2684
2685 MFREE(dhd->pub.osh, buf, buflen);
2686
2687 /* Now send the allmulti setting. This is based on the setting in the
2688 * net_device flags, but might be modified above to be turned on if we
2689 * were trying to set some addresses and dongle rejected it...
2690 */
2691
2692 allmulti = htol32(allmulti);
2693 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
2694 sizeof(allmulti), NULL, 0, TRUE);
2695 if (ret < 0) {
2696 DHD_ERROR(("%s: set allmulti %d failed\n",
2697 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2698 }
2699
2700 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2701
2702#ifdef MCAST_LIST_ACCUMULATION
2703 allmulti = 0;
2704 for (i = 0; i < DHD_MAX_IFS; i++) {
2705 if (dhd->iflist[i]) {
2706 dev = dhd->iflist[i]->net;
2707 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2708 }
2709 }
2710#else
2711 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2712#endif /* MCAST_LIST_ACCUMULATION */
2713
2714 allmulti = htol32(allmulti);
2715
2716 memset(&ioc, 0, sizeof(ioc));
2717 ioc.cmd = WLC_SET_PROMISC;
2718 ioc.buf = &allmulti;
2719 ioc.len = sizeof(allmulti);
2720 ioc.set = TRUE;
2721
2722 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2723 if (ret < 0) {
2724 DHD_ERROR(("%s: set promisc %d failed\n",
2725 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2726 }
2727}
2728
2729int
2730_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr, bool skip_stop)
2731{
2732 int ret;
2733
2734#ifdef DHD_NOTIFY_MAC_CHANGED
2735 if (skip_stop) {
2736 WL_MSG(dhd_ifname(&dhd->pub, ifidx), "close dev for mac changing\n");
2737 dhd->pub.skip_dhd_stop = TRUE;
2738 dev_close(dhd->iflist[ifidx]->net);
2739 }
2740#endif /* DHD_NOTIFY_MAC_CHANGED */
2741
2742 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
2743 ETHER_ADDR_LEN, NULL, 0, TRUE);
2744 if (ret < 0) {
2745 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2746#ifdef DHD_NOTIFY_MAC_CHANGED
2747 if (skip_stop)
2748 dhd->pub.skip_dhd_stop = FALSE;
2749 return ret;
2750#endif /* DHD_NOTIFY_MAC_CHANGED */
2751 } else {
2752 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2753 if (ifidx == 0)
2754 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2755 }
2756
2757#ifdef DHD_NOTIFY_MAC_CHANGED
2758 if (skip_stop) {
2759 dev_open(dhd->iflist[ifidx]->net);
2760 dhd->pub.skip_dhd_stop = FALSE;
2761 WL_MSG(dhd_ifname(&dhd->pub, ifidx), "notify mac changed done\n");
2762 }
2763#endif /* DHD_NOTIFY_MAC_CHANGED */
2764
2765 return ret;
2766}
2767
2768int dhd_update_rand_mac_addr(dhd_pub_t *dhd)
2769{
2770 struct ether_addr mac_addr;
2771 dhd_generate_rand_mac_addr(&mac_addr);
2772 if (_dhd_set_mac_address(dhd->info, 0, mac_addr.octet, TRUE) != 0) {
2773 DHD_ERROR(("randmac setting failed\n"));
2774#ifdef STA_RANDMAC_ENFORCED
2775 return BCME_BADADDR;
2776#endif /* STA_RANDMAC_ENFORCED */
2777 }
2778 return BCME_OK;
2779}
2780
2781#ifdef DHD_PSTA
2782/* Get psta/psr configuration configuration */
2783int dhd_get_psta_mode(dhd_pub_t *dhdp)
2784{
2785 dhd_info_t *dhd = dhdp->info;
2786 return (int)dhd->psta_mode;
2787}
2788/* Set psta/psr configuration configuration */
2789int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
2790{
2791 dhd_info_t *dhd = dhdp->info;
2792 dhd->psta_mode = val;
2793 return 0;
2794}
2795#endif /* DHD_PSTA */
2796
2797#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
2798static void
2799dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
2800{
2801 dhd_info_t *dhd = dhdp->info;
2802 dhd_if_t *ifp;
2803
2804 ASSERT(idx < DHD_MAX_IFS);
2805
2806 ifp = dhd->iflist[idx];
2807
2808 if (
2809#ifdef DHD_L2_FILTER
2810 (ifp->block_ping) ||
2811#endif
2812#ifdef DHD_WET
2813 (dhd->wet_mode) ||
2814#endif
2815#ifdef DHD_MCAST_REGEN
2816 (ifp->mcast_regen_bss_enable) ||
2817#endif
2818 FALSE) {
2819 ifp->rx_pkt_chainable = FALSE;
2820 }
2821}
2822#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
2823
2824#ifdef DHD_WET
2825/* Get wet configuration configuration */
2826int dhd_get_wet_mode(dhd_pub_t *dhdp)
2827{
2828 dhd_info_t *dhd = dhdp->info;
2829 return (int)dhd->wet_mode;
2830}
2831
2832/* Set wet configuration configuration */
2833int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
2834{
2835 dhd_info_t *dhd = dhdp->info;
2836 dhd->wet_mode = val;
2837 dhd_update_rx_pkt_chainable_state(dhdp, 0);
2838 return 0;
2839}
2840#endif /* DHD_WET */
2841
2842#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2843int32 dhd_role_to_nl80211_iftype(int32 role)
2844{
2845 switch (role) {
2846 case WLC_E_IF_ROLE_STA:
2847 return NL80211_IFTYPE_STATION;
2848 case WLC_E_IF_ROLE_AP:
2849 return NL80211_IFTYPE_AP;
2850 case WLC_E_IF_ROLE_WDS:
2851 return NL80211_IFTYPE_WDS;
2852 case WLC_E_IF_ROLE_P2P_GO:
2853 return NL80211_IFTYPE_P2P_GO;
2854 case WLC_E_IF_ROLE_P2P_CLIENT:
2855 return NL80211_IFTYPE_P2P_CLIENT;
2856 case WLC_E_IF_ROLE_IBSS:
2857 case WLC_E_IF_ROLE_NAN:
2858 return NL80211_IFTYPE_ADHOC;
2859 default:
2860 return NL80211_IFTYPE_UNSPECIFIED;
2861 }
2862}
2863#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2864
2865static void
2866dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2867{
2868 dhd_info_t *dhd = handle;
2869 dhd_if_event_t *if_event = event_info;
2870 int ifidx, bssidx;
2871 int ret;
2872#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2873 struct wl_if_event_info info;
2874#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2875 struct net_device *ndev = NULL;
2876#endif
2877#else
2878 struct net_device *ndev;
2879#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2880
2881 BCM_REFERENCE(ret);
2882 if (event != DHD_WQ_WORK_IF_ADD) {
2883 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2884 return;
2885 }
2886
2887 if (!dhd) {
2888 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2889 return;
2890 }
2891
2892 if (!if_event) {
2893 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2894 return;
2895 }
2896
2897 dhd_net_if_lock_local(dhd);
2898 DHD_OS_WAKE_LOCK(&dhd->pub);
2899
2900 ifidx = if_event->event.ifidx;
2901 bssidx = if_event->event.bssidx;
2902 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2903
2904#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2905 if (if_event->event.ifidx > 0) {
2906 u8 *mac_addr;
2907 bzero(&info, sizeof(info));
2908 info.ifidx = ifidx;
2909 info.bssidx = bssidx;
2910 info.role = if_event->event.role;
2911 strlcpy(info.name, if_event->name, sizeof(info.name));
2912 if (is_valid_ether_addr(if_event->mac)) {
2913 mac_addr = if_event->mac;
2914 } else {
2915 mac_addr = NULL;
2916 }
2917
2918#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2919 if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
2920 &info, mac_addr, NULL, true)) == NULL)
2921#else
2922 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
2923 &info, mac_addr, NULL, true) == NULL)
2924#endif
2925 {
2926 /* Do the post interface create ops */
2927 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
2928 goto done;
2929 }
2930 }
2931#else
2932 /* This path is for non-android case */
2933 /* The interface name in host and in event msg are same */
2934 /* if name in event msg is used to create dongle if list on host */
2935 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2936 if_event->mac, bssidx, TRUE, if_event->name);
2937 if (!ndev) {
2938 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2939 goto done;
2940 }
2941
2942 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2943 if (ret != BCME_OK) {
2944 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2945 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2946 goto done;
2947 }
2948#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2949
2950#ifndef PCIE_FULL_DONGLE
2951 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2952 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
2953 uint32 var_int = 1;
2954 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
2955 NULL, 0, TRUE);
2956 if (ret != BCME_OK) {
2957 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2958 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2959 }
2960 }
2961#endif /* PCIE_FULL_DONGLE */
2962
2963done:
2964 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2965#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
2966 dhd_bridge_dev_set(dhd, ifidx, ndev);
2967#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
2968
2969 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2970 dhd_net_if_unlock_local(dhd);
2971}
2972
2973static void
2974dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2975{
2976 dhd_info_t *dhd = handle;
2977 int ifidx;
2978 dhd_if_event_t *if_event = event_info;
2979
2980 if (event != DHD_WQ_WORK_IF_DEL) {
2981 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2982 return;
2983 }
2984
2985 if (!dhd) {
2986 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2987 return;
2988 }
2989
2990 if (!if_event) {
2991 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2992 return;
2993 }
2994
2995 dhd_net_if_lock_local(dhd);
2996 DHD_OS_WAKE_LOCK(&dhd->pub);
2997
2998 ifidx = if_event->event.ifidx;
2999 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3000#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
3001 dhd_bridge_dev_set(dhd, ifidx, NULL);
3002#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
3003
3004 if (!dhd->pub.info->iflist[ifidx]) {
3005 /* No matching netdev found */
3006 DHD_ERROR(("Netdev not found! Do nothing.\n"));
3007 goto done;
3008 }
3009#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3010 if (if_event->event.ifidx > 0) {
3011 /* Do the post interface del ops */
3012 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
3013 true, if_event->event.ifidx) != 0) {
3014 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
3015 goto done;
3016 }
3017 }
3018#else
3019 /* For non-cfg80211 drivers */
3020 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3021#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3022
3023done:
3024 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3025 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3026 dhd_net_if_unlock_local(dhd);
3027}
3028
3029#ifdef DHD_UPDATE_INTF_MAC
3030static void
3031dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
3032{
3033 dhd_info_t *dhd = handle;
3034 int ifidx;
3035 dhd_if_event_t *if_event = event_info;
3036
3037 if (event != DHD_WQ_WORK_IF_UPDATE) {
3038 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3039 return;
3040 }
3041
3042 if (!dhd) {
3043 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3044 return;
3045 }
3046
3047 if (!if_event) {
3048 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3049 return;
3050 }
3051
3052 dhd_net_if_lock_local(dhd);
3053 DHD_OS_WAKE_LOCK(&dhd->pub);
3054
3055 ifidx = if_event->event.ifidx;
3056 DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
3057
3058 dhd_op_if_update(&dhd->pub, ifidx);
3059
3060 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3061
3062 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3063 dhd_net_if_unlock_local(dhd);
3064}
3065
3066int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
3067{
3068 dhd_info_t * dhdinfo = NULL;
3069 dhd_if_t * ifp = NULL;
3070 int ret = 0;
3071 char buf[128];
3072
3073 if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
3074 DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
3075 return -1;
3076 } else {
3077 dhdinfo = (dhd_info_t *)dhdpub->info;
3078 ifp = dhdinfo->iflist[ifidx];
3079 if (NULL==ifp) {
3080 DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
3081 return -2;
3082 }
3083 }
3084
3085 DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
3086 // Get MAC address
3087 strcpy(buf, "cur_etheraddr");
3088 ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
3089 if (0>ret) {
3090 DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
3091 // avoid collision
3092 dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
3093 // force locally administrate address
3094 ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
3095 } else {
3096 DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
3097 ifp->name, ifp->idx,
3098 (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
3099 (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
3100 memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
3101 if (dhdinfo->iflist[ifp->idx]->net) {
3102 memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
3103 }
3104 }
3105
3106 return ret;
3107}
3108#endif /* DHD_UPDATE_INTF_MAC */
3109
3110static void
3111dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3112{
3113 dhd_info_t *dhd = handle;
3114 dhd_if_t *ifp = event_info;
3115
3116 if (event != DHD_WQ_WORK_SET_MAC) {
3117 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3118 }
3119
3120 if (!dhd) {
3121 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3122 return;
3123 }
3124
3125 dhd_net_if_lock_local(dhd);
3126 DHD_OS_WAKE_LOCK(&dhd->pub);
3127
3128 // terence 20160907: fix for not able to set mac when wlan0 is down
3129 if (ifp == NULL || !ifp->set_macaddress) {
3130 goto done;
3131 }
3132 if (ifp == NULL || !dhd->pub.up) {
3133 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3134 goto done;
3135 }
3136
3137 ifp->set_macaddress = FALSE;
3138
3139#ifdef DHD_NOTIFY_MAC_CHANGED
3140 rtnl_lock();
3141#endif /* DHD_NOTIFY_MAC_CHANGED */
3142
3143 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr, TRUE) == 0)
3144 WL_MSG(dhd_ifname(&dhd->pub, ifp->idx), "MACID is overwritten\n");
3145 else
3146 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3147
3148#ifdef DHD_NOTIFY_MAC_CHANGED
3149 rtnl_unlock();
3150#endif /* DHD_NOTIFY_MAC_CHANGED */
3151
3152done:
3153 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3154 dhd_net_if_unlock_local(dhd);
3155}
3156
3157static void
3158dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3159{
3160 dhd_info_t *dhd = handle;
3161 int ifidx = (int)((long int)event_info);
3162 dhd_if_t *ifp = NULL;
3163
3164 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3165 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3166 return;
3167 }
3168
3169 if (!dhd) {
3170 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3171 return;
3172 }
3173
3174 dhd_net_if_lock_local(dhd);
3175 DHD_OS_WAKE_LOCK(&dhd->pub);
3176
3177 ifp = dhd->iflist[ifidx];
3178
3179 if (ifp == NULL || !dhd->pub.up) {
3180 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3181 goto done;
3182 }
3183
3184 if (ifp == NULL || !dhd->pub.up) {
3185 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3186 goto done;
3187 }
3188
3189 ifidx = ifp->idx;
3190
3191#ifdef MCAST_LIST_ACCUMULATION
3192 ifidx = 0;
3193#endif /* MCAST_LIST_ACCUMULATION */
3194
3195 _dhd_set_multicast_list(dhd, ifidx);
3196 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3197
3198done:
3199 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3200 dhd_net_if_unlock_local(dhd);
3201}
3202
3203static int
3204dhd_set_mac_address(struct net_device *dev, void *addr)
3205{
3206 int ret = 0;
3207
3208 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3209 struct sockaddr *sa = (struct sockaddr *)addr;
3210 int ifidx;
3211 dhd_if_t *dhdif;
3212#ifdef WL_STATIC_IF
3213 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
3214#endif /* WL_STATIC_IF */
3215 dhd_pub_t *dhdp = &dhd->pub;
3216
3217 BCM_REFERENCE(ifidx);
3218
3219 DHD_TRACE(("%s \n", __func__));
3220
3221 dhdif = dhd_get_ifp_by_ndev(dhdp, dev);
3222 if (!dhdif) {
3223 return -ENODEV;
3224 }
3225 ifidx = dhdif->idx;
3226 dhd_net_if_lock_local(dhd);
3227 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3228 dhdif->set_macaddress = TRUE;
3229 dhd_net_if_unlock_local(dhd);
3230
3231 WL_MSG(dev->name, "iftype = %d macaddr = "MACDBG"\n",
3232 dev->ieee80211_ptr->iftype, MAC2STRDBG(&dhdif->mac_addr));
3233#ifdef WL_CFG80211
3234 /* Check wdev->iftype for the role */
3235 if (wl_cfg80211_macaddr_sync_reqd(dev)) {
3236 /* Supplicant and certain user layer applications expect macaddress to be
3237 * set once the context returns. so set it from the same context
3238 */
3239#ifdef WL_STATIC_IF
3240 if (IS_CFG80211_STATIC_IF(cfg, dev) && !(dev->flags & IFF_UP)) {
3241 /* In softap case, the macaddress will be applied before interface up
3242 * and hence curether_addr can't be done at this stage (no fw iface
3243 * available). Store the address and return. macaddr will be applied
3244 * from interface create context.
3245 */
3246 (void)memcpy_s(dev->dev_addr, ETH_ALEN, dhdif->mac_addr, ETH_ALEN);
3247#ifdef DHD_NOTIFY_MAC_CHANGED
3248 dev_open(dev);
3249#endif /* DHD_NOTIFY_MAC_CHANGED */
3250 return ret;
3251 }
3252#endif /* WL_STATIC_IF */
3253 wl_cfg80211_handle_macaddr_change(dev, dhdif->mac_addr);
3254 return _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr, TRUE);
3255 }
3256#endif /* WL_CFG80211 */
3257
3258 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3259 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3260 return ret;
3261}
3262
3263static void
3264dhd_set_multicast_list(struct net_device *dev)
3265{
3266 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3267 int ifidx;
3268
3269 ifidx = dhd_net2idx(dhd, dev);
3270 if (ifidx == DHD_BAD_IF)
3271 return;
3272
3273 dhd->iflist[ifidx]->set_multicast = TRUE;
3274 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
3275 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
3276
3277 // terence 20160907: fix for not able to set mac when wlan0 is down
3278 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3279 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3280}
3281
3282#ifdef DHD_UCODE_DOWNLOAD
3283/* Get ucode path */
3284char *
3285dhd_get_ucode_path(dhd_pub_t *dhdp)
3286{
3287 dhd_info_t *dhd = dhdp->info;
3288 return dhd->uc_path;
3289}
3290#endif /* DHD_UCODE_DOWNLOAD */
3291
3292#ifdef PROP_TXSTATUS
3293int
3294dhd_os_wlfc_block(dhd_pub_t *pub)
3295{
3296 dhd_info_t *di = (dhd_info_t *)(pub->info);
3297 ASSERT(di != NULL);
3298 /* terence 20161229: don't do spin lock if proptx not enabled */
3299 if (disable_proptx)
3300 return 1;
3301#ifdef BCMDBUS
3302 spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
3303#else
3304 spin_lock_bh(&di->wlfc_spinlock);
3305#endif /* BCMDBUS */
3306 return 1;
3307}
3308
3309int
3310dhd_os_wlfc_unblock(dhd_pub_t *pub)
3311{
3312 dhd_info_t *di = (dhd_info_t *)(pub->info);
3313
3314 ASSERT(di != NULL);
3315 /* terence 20161229: don't do spin lock if proptx not enabled */
3316 if (disable_proptx)
3317 return 1;
3318#ifdef BCMDBUS
3319 spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
3320#else
3321 spin_unlock_bh(&di->wlfc_spinlock);
3322#endif /* BCMDBUS */
3323 return 1;
3324}
3325
3326#endif /* PROP_TXSTATUS */
3327
3328#if defined(WL_MONITOR) && defined(BCMSDIO)
3329static void
3330dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx);
3331bool
3332dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
3333#endif /* WL_MONITOR && BCMSDIO */
3334
3335/* This routine do not support Packet chain feature, Currently tested for
3336 * proxy arp feature
3337 */
3338int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3339{
3340 struct sk_buff *skb;
3341 void *skbhead = NULL;
3342 void *skbprev = NULL;
3343 dhd_if_t *ifp;
3344 ASSERT(!PKTISCHAINED(p));
3345 skb = PKTTONATIVE(dhdp->osh, p);
3346
3347 ifp = dhdp->info->iflist[ifidx];
3348 skb->dev = ifp->net;
3349 skb->protocol = eth_type_trans(skb, skb->dev);
3350
3351 if (in_interrupt()) {
3352 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3353 __FUNCTION__, __LINE__);
3354 netif_rx(skb);
3355 } else {
3356 if (dhdp->info->rxthread_enabled) {
3357 if (!skbhead) {
3358 skbhead = skb;
3359 } else {
3360 PKTSETNEXT(dhdp->osh, skbprev, skb);
3361 }
3362 skbprev = skb;
3363 } else {
3364 /* If the receive is not processed inside an ISR,
3365 * the softirqd must be woken explicitly to service
3366 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3367 * by netif_rx_ni(), but in earlier kernels, we need
3368 * to do it manually.
3369 */
3370 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3371 __FUNCTION__, __LINE__);
3372#if defined(WL_MONITOR) && defined(BCMSDIO)
3373 if (dhd_monitor_enabled(dhdp, ifidx))
3374 dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx);
3375 else
3376#endif /* WL_MONITOR && BCMSDIO */
3377 netif_rx_ni(skb);
3378 }
3379 }
3380
3381 if (dhdp->info->rxthread_enabled && skbhead)
3382 dhd_sched_rxf(dhdp, skbhead);
3383
3384 return BCME_OK;
3385}
3386
3387int
3388BCMFASTPATH(__dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3389{
3390 int ret = BCME_OK;
3391 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3392 struct ether_header *eh = NULL;
3393 bool pkt_ether_type_802_1x = FALSE;
3394 uint8 pkt_flow_prio;
3395 uint8 dhd_udr = FALSE;
3396
3397#if defined(DHD_L2_FILTER)
3398 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3399#endif
3400
3401 /* Reject if down */
3402 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3403 /* free the packet here since the caller won't */
3404 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3405 return -ENODEV;
3406 }
3407
3408#ifdef PCIE_FULL_DONGLE
3409 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3410 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3411 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3412 return NETDEV_TX_BUSY;
3413 }
3414#endif /* PCIE_FULL_DONGLE */
3415
3416 /* Reject if pktlen > MAX_MTU_SZ */
3417 if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
3418 /* free the packet here since the caller won't */
3419 dhdp->tx_big_packets++;
3420 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3421 return BCME_ERROR;
3422 }
3423
3424#ifdef DHD_L2_FILTER
3425 /* if dhcp_unicast is enabled, we need to convert the */
3426 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3427 if (ifp->dhcp_unicast) {
3428 uint8* mac_addr;
3429 uint8* ehptr = NULL;
3430 int ret;
3431 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3432 if (ret == BCME_OK) {
3433 /* if given mac address having valid entry in sta list
3434 * copy the given mac address, and return with BCME_OK
3435 */
3436 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3437 ehptr = PKTDATA(dhdp->osh, pktbuf);
3438 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3439 }
3440 }
3441 }
3442
3443 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3444 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3445 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3446 return BCME_ERROR;
3447 }
3448 }
3449
3450 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3451 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3452
3453 /* Drop the packets if l2 filter has processed it already
3454 * otherwise continue with the normal path
3455 */
3456 if (ret == BCME_OK) {
3457 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3458 return BCME_ERROR;
3459 }
3460 }
3461#endif /* DHD_L2_FILTER */
3462 /* Update multicast statistic */
3463 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3464 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3465 eh = (struct ether_header *)pktdata;
3466
3467 if (ETHER_ISMULTI(eh->ether_dhost))
3468 dhdp->tx_multicast++;
3469 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3470#ifdef DHD_LOSSLESS_ROAMING
3471 uint8 prio = (uint8)PKTPRIO(pktbuf);
3472
3473 /* back up 802.1x's priority */
3474 dhdp->prio_8021x = prio;
3475#endif /* DHD_LOSSLESS_ROAMING */
3476 pkt_ether_type_802_1x = TRUE;
3477 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
3478 atomic_inc(&dhd->pend_8021x_cnt);
3479#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
3480 wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
3481 pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
3482#endif /* WL_CFG80211 && WL_WPS_SYNC */
3483 }
3484 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3485 if (dhd_check_dhcp(pktdata)) {
3486 dhd_udr = TRUE;
3487 } else if (dhd_check_dns(pktdata)) {
3488 dhd_udr = TRUE;
3489 }
3490 }
3491 if (ntoh16(eh->ether_type) == ETHER_TYPE_ARP) {
3492 if (dhd_check_arp(pktdata)) {
3493 dhd_udr = TRUE;
3494 }
3495 }
3496 dhd_dump_pkt(dhdp, ifidx, pktdata,
3497 (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
3498 } else {
3499 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3500 return BCME_ERROR;
3501 }
3502
3503 {
3504 /* Look into the packet and update the packet priority */
3505#ifndef PKTPRIO_OVERRIDE
3506 /* XXX RB:6270 Ignore skb->priority from TCP/IP stack */
3507 if (PKTPRIO(pktbuf) == 0)
3508#endif /* !PKTPRIO_OVERRIDE */
3509 {
3510#if defined(QOS_MAP_SET)
3511 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
3512#else
3513 pktsetprio(pktbuf, FALSE);
3514#endif /* QOS_MAP_SET */
3515 }
3516#ifndef PKTPRIO_OVERRIDE
3517 else {
3518 /* Some protocols like OZMO use priority values from 256..263.
3519 * these are magic values to indicate a specific 802.1d priority.
3520 * make sure that priority field is in range of 0..7
3521 */
3522 PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
3523 }
3524#endif /* !PKTPRIO_OVERRIDE */
3525 }
3526
3527 BCM_REFERENCE(pkt_ether_type_802_1x);
3528 BCM_REFERENCE(pkt_flow_prio);
3529 /* Intercept and create Socket level statistics */
3530 /*
3531 * TODO: Some how moving this code block above the pktsetprio code
3532 * is resetting the priority back to 0, but this does not happen for
3533 * packets generated from iperf uisng -S option. Can't understand why.
3534 */
3535 dhd_update_sock_flows(dhd, pktbuf);
3536
3537#ifdef SUPPORT_SET_TID
3538 dhd_set_tid_based_on_uid(dhdp, pktbuf);
3539#endif /* SUPPORT_SET_TID */
3540
3541#ifdef PCIE_FULL_DONGLE
3542 /*
3543 * Lkup the per interface hash table, for a matching flowring. If one is not
3544 * available, allocate a unique flowid and add a flowring entry.
3545 * The found or newly created flowid is placed into the pktbuf's tag.
3546 */
3547
3548#ifdef DHD_LOSSLESS_ROAMING
3549 /* For LLR override and use flowring with prio 7 for 802.1x packets */
3550 if (pkt_ether_type_802_1x) {
3551 pkt_flow_prio = PRIO_8021D_NC;
3552 } else
3553#endif /* DHD_LOSSLESS_ROAMING */
3554#ifdef DHD_TX_PROFILE
3555 if (dhdp->tx_profile_enab && dhdp->num_profiles > 0 &&
3556 dhd_protocol_matches_profile(PKTDATA(dhdp->osh, pktbuf),
3557 PKTLEN(dhdp->osh, pktbuf), dhdp->protocol_filters)) {
3558 /* we only have support for one tx_profile at the moment */
3559
3560 /* tagged packets must be put into TID 6 */
3561 pkt_flow_prio = PRIO_8021D_VO;
3562 } else
3563#endif /* defined(DHD_TX_PROFILE) */
3564 {
3565 pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
3566 }
3567
3568 ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
3569 if (ret != BCME_OK) {
3570 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3571 return ret;
3572 }
3573#endif /* PCIE_FULL_DONGLE */
3574 /* terence 20150901: Micky add to ajust the 802.1X priority */
3575 /* Set the 802.1X packet with the highest priority 7 */
3576 if (dhdp->conf->pktprio8021x >= 0)
3577 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3578
3579#ifdef PROP_TXSTATUS
3580 if (dhd_wlfc_is_supported(dhdp)) {
3581 /* store the interface ID */
3582 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3583
3584 /* store destination MAC in the tag as well */
3585 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3586
3587 /* decide which FIFO this packet belongs to */
3588 if (ETHER_ISMULTI(eh->ether_dhost))
3589 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3590 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3591 else
3592 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3593 } else
3594#endif /* PROP_TXSTATUS */
3595 {
3596 /* If the protocol uses a data header, apply it */
3597 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3598 }
3599#ifdef PCIE_FULL_DONGLE
3600 DHD_PKTTAG_SET_PKT_UDR((dhd_pkttag_fr_t *)PKTTAG(pktbuf), dhd_udr);
3601#else
3602 BCM_REFERENCE(dhd_udr);
3603#endif /* PCIE_FULL_DONGLE */
3604
3605 /* Use bus module to send data frame */
3606#ifdef PROP_TXSTATUS
3607 {
3608 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3609 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3610 /* non-proptxstatus way */
3611#ifdef BCMPCIE
3612 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3613#else
3614 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3615#endif /* BCMPCIE */
3616 }
3617 }
3618#else
3619#ifdef BCMPCIE
3620 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3621#else
3622 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3623#endif /* BCMPCIE */
3624#endif /* PROP_TXSTATUS */
3625#ifdef BCMDBUS
3626 if (ret)
3627 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3628#endif /* BCMDBUS */
3629
3630 return ret;
3631}
3632
3633int
3634BCMFASTPATH(dhd_sendpkt)(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3635{
3636 int ret = 0;
3637 unsigned long flags;
3638 dhd_if_t *ifp;
3639
3640 DHD_GENERAL_LOCK(dhdp, flags);
3641 ifp = dhd_get_ifp(dhdp, ifidx);
3642 if (!ifp || ifp->del_in_progress) {
3643 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
3644 __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
3645 DHD_GENERAL_UNLOCK(dhdp, flags);
3646 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3647 return -ENODEV;
3648 }
3649 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3650 DHD_ERROR(("%s: returning as busstate=%d\n",
3651 __FUNCTION__, dhdp->busstate));
3652 DHD_GENERAL_UNLOCK(dhdp, flags);
3653 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3654 return -ENODEV;
3655 }
3656 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3657 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
3658 DHD_GENERAL_UNLOCK(dhdp, flags);
3659
3660#ifdef DHD_PCIE_RUNTIMEPM
3661 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3662 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3663 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3664 ret = -EBUSY;
3665 goto exit;
3666 }
3667#endif /* DHD_PCIE_RUNTIMEPM */
3668
3669 DHD_GENERAL_LOCK(dhdp, flags);
3670 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
3671 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3672 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3673 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
3674 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3675 dhd_os_tx_completion_wake(dhdp);
3676 dhd_os_busbusy_wake(dhdp);
3677 DHD_GENERAL_UNLOCK(dhdp, flags);
3678 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3679 return -ENODEV;
3680 }
3681 DHD_GENERAL_UNLOCK(dhdp, flags);
3682
3683 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3684
3685#ifdef DHD_PCIE_RUNTIMEPM
3686exit:
3687#endif
3688 DHD_GENERAL_LOCK(dhdp, flags);
3689 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
3690 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3691 dhd_os_tx_completion_wake(dhdp);
3692 dhd_os_busbusy_wake(dhdp);
3693 DHD_GENERAL_UNLOCK(dhdp, flags);
3694 return ret;
3695}
3696
3697#ifdef DHD_MQ
3698#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
3699static uint16
3700BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb,
3701 void *accel_priv, select_queue_fallback_t fallback)
3702#else
3703static uint16
3704BCMFASTPATH(dhd_select_queue)(struct net_device *net, struct sk_buff *skb)
3705#endif /* LINUX_VERSION_CODE */
3706{
3707 dhd_info_t *dhd_info = DHD_DEV_INFO(net);
3708 dhd_pub_t *dhdp = &dhd_info->pub;
3709 uint16 prio = 0;
3710
3711 BCM_REFERENCE(dhd_info);
3712 BCM_REFERENCE(dhdp);
3713 BCM_REFERENCE(prio);
3714
3715#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
3716 if (mq_select_disable) {
3717 /* if driver side queue selection is disabled via sysfs, call the kernel
3718 * supplied fallback function to select the queue, which is usually
3719 * '__netdev_pick_tx()' in net/core/dev.c
3720 */
3721 return fallback(net, skb);
3722 }
3723#endif /* LINUX_VERSION */
3724
3725 prio = dhdp->flow_prio_map[skb->priority];
3726 if (prio < AC_COUNT)
3727 return prio;
3728 else
3729 return AC_BK;
3730}
3731#endif /* DHD_MQ */
3732
3733int
3734BCMFASTPATH(dhd_start_xmit)(struct sk_buff *skb, struct net_device *net)
3735{
3736 int ret;
3737 uint datalen;
3738 void *pktbuf;
3739 dhd_info_t *dhd = DHD_DEV_INFO(net);
3740 dhd_if_t *ifp = NULL;
3741 int ifidx;
3742 unsigned long flags;
3743 uint8 htsfdlystat_sz = 0;
3744#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
3745 int qidx = 0;
3746 int cpuid = 0;
3747 int prio = 0;
3748#endif /* DHD_MQ && DHD_MQ_STATS */
3749
3750 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3751
3752#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
3753 qidx = skb_get_queue_mapping(skb);
3754 /* if in a non pre-emptable context, smp_processor_id can be used
3755 * else get_cpu and put_cpu should be used
3756 */
3757 if (!CAN_SLEEP()) {
3758 cpuid = smp_processor_id();
3759 }
3760 else {
3761 cpuid = get_cpu();
3762 put_cpu();
3763 }
3764 prio = dhd->pub.flow_prio_map[skb->priority];
3765 DHD_TRACE(("%s: Q idx = %d, CPU = %d, prio = %d \n", __FUNCTION__,
3766 qidx, cpuid, prio));
3767 dhd->pktcnt_qac_histo[qidx][prio]++;
3768 dhd->pktcnt_per_ac[prio]++;
3769 dhd->cpu_qstats[qidx][cpuid]++;
3770#endif /* DHD_MQ && DHD_MQ_STATS */
3771
3772 if (dhd_query_bus_erros(&dhd->pub)) {
3773 return -ENODEV;
3774 }
3775
3776 DHD_GENERAL_LOCK(&dhd->pub, flags);
3777 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
3778 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3779
3780#ifdef DHD_PCIE_RUNTIMEPM
3781 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3782 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3783 /* stop the network queue temporarily until resume done */
3784 DHD_GENERAL_LOCK(&dhd->pub, flags);
3785 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3786 dhd_bus_stop_queue(dhd->pub.bus);
3787 }
3788 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3789 dhd_os_busbusy_wake(&dhd->pub);
3790 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3791 return NETDEV_TX_BUSY;
3792 }
3793#endif /* DHD_PCIE_RUNTIMEPM */
3794
3795 DHD_GENERAL_LOCK(&dhd->pub, flags);
3796#ifdef BCMPCIE
3797 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3798 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3799 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
3800 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3801#ifdef PCIE_FULL_DONGLE
3802 /* Stop tx queues if suspend is in progress */
3803 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3804 dhd_bus_stop_queue(dhd->pub.bus);
3805 }
3806#endif /* PCIE_FULL_DONGLE */
3807 dhd_os_busbusy_wake(&dhd->pub);
3808 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3809 return NETDEV_TX_BUSY;
3810 }
3811#else
3812 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3813 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3814 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
3815 }
3816#endif
3817
3818 DHD_OS_WAKE_LOCK(&dhd->pub);
3819
3820#if defined(DHD_HANG_SEND_UP_TEST)
3821 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
3822 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
3823 dhd->pub.busstate = DHD_BUS_DOWN;
3824 }
3825#endif /* DHD_HANG_SEND_UP_TEST */
3826
3827 /* Reject if down */
3828 /* XXX kernel panic issue when first bootup time,
3829 * rmmod without interface down make unnecessary hang event.
3830 */
3831 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
3832 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3833 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3834 dhd_tx_stop_queues(net);
3835 /* Send Event when bus down detected during data session */
3836 if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
3837 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3838 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3839 net_os_send_hang_message(net);
3840 }
3841 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3842 dhd_os_busbusy_wake(&dhd->pub);
3843 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3844 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3845 return NETDEV_TX_BUSY;
3846 }
3847
3848 ifp = DHD_DEV_IFP(net);
3849 ifidx = DHD_DEV_IFIDX(net);
3850 if (ifidx == DHD_BAD_IF) {
3851 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
3852 dhd_tx_stop_queues(net);
3853 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3854 dhd_os_busbusy_wake(&dhd->pub);
3855 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3856 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3857 return NETDEV_TX_BUSY;
3858 }
3859
3860 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3861
3862 /* If tput test is in progress */
3863 if (dhd->pub.tput_data.tput_test_running) {
3864 return NETDEV_TX_BUSY;
3865 }
3866
3867 ASSERT(ifidx == dhd_net2idx(dhd, net));
3868 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
3869
3870 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3871
3872 /* re-align socket buffer if "skb->data" is odd address */
3873 if (((unsigned long)(skb->data)) & 0x1) {
3874 unsigned char *data = skb->data;
3875 uint32 length = skb->len;
3876 PKTPUSH(dhd->pub.osh, skb, 1);
3877 memmove(skb->data, data, length);
3878 PKTSETLEN(dhd->pub.osh, skb, length);
3879 }
3880
3881 datalen = PKTLEN(dhd->pub.osh, skb);
3882
3883#ifdef HOST_TPUT_TEST
3884 dhd_os_sdlock_txq(&dhd->pub);
3885 dhd->pub.net_len += datalen;
3886 dhd_os_sdunlock_txq(&dhd->pub);
3887 if ((dhd->pub.conf->data_drop_mode == XMIT_DROP) &&
3888 (PKTLEN(dhd->pub.osh, skb) > 500)) {
3889 dev_kfree_skb(skb);
3890 return NETDEV_TX_OK;
3891 }
3892#endif
3893 /* Make sure there's enough room for any header */
3894 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
3895 struct sk_buff *skb2;
3896
3897 DHD_INFO(("%s: insufficient headroom\n",
3898 dhd_ifname(&dhd->pub, ifidx)));
3899 dhd->pub.tx_realloc++;
3900
3901 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3902 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
3903
3904 dev_kfree_skb(skb);
3905 if ((skb = skb2) == NULL) {
3906 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
3907 dhd_ifname(&dhd->pub, ifidx)));
3908 ret = -ENOMEM;
3909 goto done;
3910 }
3911 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3912 }
3913
3914 /* move from dhdsdio_sendfromq(), try to orphan skb early */
3915 if (dhd->pub.conf->orphan_move == 2)
3916 PKTORPHAN(skb, dhd->pub.conf->tsq);
3917 else if (dhd->pub.conf->orphan_move == 3)
3918 skb_orphan(skb);
3919
3920 /* Convert to packet */
3921 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
3922 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
3923 dhd_ifname(&dhd->pub, ifidx)));
3924 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3925 dev_kfree_skb_any(skb);
3926 ret = -ENOMEM;
3927 goto done;
3928 }
3929
3930#ifdef DHD_WET
3931 /* wet related packet proto manipulation should be done in DHD
3932 since dongle doesn't have complete payload
3933 */
3934 if (WET_ENABLED(&dhd->pub) &&
3935 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
3936 DHD_INFO(("%s:%s: wet send proc failed\n",
3937 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
3938 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
3939 ret = -EFAULT;
3940 goto done;
3941 }
3942#endif /* DHD_WET */
3943
3944#ifdef DHD_PSTA
3945 /* PSR related packet proto manipulation should be done in DHD
3946 * since dongle doesn't have complete payload
3947 */
3948 if (PSR_ENABLED(&dhd->pub) &&
3949 (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
3950
3951 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
3952 dhd_ifname(&dhd->pub, ifidx)));
3953 }
3954#endif /* DHD_PSTA */
3955
3956#ifdef DHDTCPSYNC_FLOOD_BLK
3957 if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
3958 ifp->tsyncack_txed ++;
3959 }
3960#endif /* DHDTCPSYNC_FLOOD_BLK */
3961
3962#ifdef DHDTCPACK_SUPPRESS
3963 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
3964 /* If this packet has been hold or got freed, just return */
3965 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
3966 ret = 0;
3967 goto done;
3968 }
3969 } else {
3970 /* If this packet has replaced another packet and got freed, just return */
3971 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
3972 ret = 0;
3973 goto done;
3974 }
3975 }
3976#endif /* DHDTCPACK_SUPPRESS */
3977
3978 /*
3979 * If Load Balance is enabled queue the packet
3980 * else send directly from here.
3981 */
3982#if defined(DHD_LB_TXP)
3983 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
3984#else
3985 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
3986#endif
3987
3988done:
3989 /* XXX Bus modules may have different "native" error spaces? */
3990 /* XXX USB is native linux and it'd be nice to retain errno */
3991 /* XXX meaning, but SDIO is not so we'd need an OSL_ERROR. */
3992 if (ret) {
3993 ifp->stats.tx_dropped++;
3994 dhd->pub.tx_dropped++;
3995 } else {
3996#ifdef PROP_TXSTATUS
3997 /* tx_packets counter can counted only when wlfc is disabled */
3998 if (!dhd_wlfc_is_supported(&dhd->pub))
3999#endif
4000 {
4001 dhd->pub.tx_packets++;
4002 ifp->stats.tx_packets++;
4003 ifp->stats.tx_bytes += datalen;
4004 }
4005 dhd->pub.actual_tx_pkts++;
4006 }
4007
4008 DHD_GENERAL_LOCK(&dhd->pub, flags);
4009 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4010 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
4011 dhd_os_tx_completion_wake(&dhd->pub);
4012 dhd_os_busbusy_wake(&dhd->pub);
4013 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4014 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4015 /* Return ok: we always eat the packet */
4016 return NETDEV_TX_OK;
4017}
4018
4019#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4020void dhd_rx_wq_wakeup(struct work_struct *ptr)
4021{
4022 struct dhd_rx_tx_work *work;
4023 struct dhd_pub * pub;
4024
4025 work = container_of(ptr, struct dhd_rx_tx_work, work);
4026
4027 pub = work->pub;
4028
4029 DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
4030
4031 if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
4032 return;
4033 }
4034
4035 DHD_OS_WAKE_LOCK(pub);
4036 if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
4037
4038 // do nothing but wakeup the bus.
4039 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
4040 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
4041 }
4042 DHD_OS_WAKE_UNLOCK(pub);
4043 kfree(work);
4044}
4045
4046void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
4047{
4048 struct dhd_rx_tx_work *work;
4049 int ret;
4050 dhd_info_t *dhd;
4051 struct dhd_bus * bus;
4052
4053 work = container_of(ptr, struct dhd_rx_tx_work, work);
4054
4055 dhd = DHD_DEV_INFO(work->net);
4056
4057 bus = dhd->pub.bus;
4058
4059 if (atomic_read(&dhd->pub.block_bus)) {
4060 kfree_skb(work->skb);
4061 kfree(work);
4062 dhd_netif_start_queue(bus);
4063 return;
4064 }
4065
4066 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
4067 ret = dhd_start_xmit(work->skb, work->net);
4068 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
4069 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
4070 }
4071 kfree(work);
4072 dhd_netif_start_queue(bus);
4073
4074 if (ret)
4075 netdev_err(work->net,
4076 "error: dhd_start_xmit():%d\n", ret);
4077}
4078
4079int
4080BCMFASTPATH(dhd_start_xmit_wrapper)(struct sk_buff *skb, struct net_device *net)
4081{
4082 struct dhd_rx_tx_work *start_xmit_work;
4083 int ret;
4084 dhd_info_t *dhd = DHD_DEV_INFO(net);
4085
4086 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4087 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
4088
4089 dhd_netif_stop_queue(dhd->pub.bus);
4090
4091 start_xmit_work = (struct dhd_rx_tx_work*)
4092 kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
4093
4094 if (!start_xmit_work) {
4095 netdev_err(net,
4096 "error: failed to alloc start_xmit_work\n");
4097 ret = -ENOMEM;
4098 goto exit;
4099 }
4100
4101 INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
4102 start_xmit_work->skb = skb;
4103 start_xmit_work->net = net;
4104 queue_work(dhd->tx_wq, &start_xmit_work->work);
4105 ret = NET_XMIT_SUCCESS;
4106
4107 } else if (dhd->pub.busstate == DHD_BUS_DATA) {
4108 ret = dhd_start_xmit(skb, net);
4109 } else {
4110 /* when bus is down */
4111 ret = -ENODEV;
4112 }
4113
4114exit:
4115 return ret;
4116}
4117void
4118dhd_bus_wakeup_work(dhd_pub_t *dhdp)
4119{
4120 struct dhd_rx_tx_work *rx_work;
4121 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4122
4123 rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
4124 if (!rx_work) {
4125 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
4126 return;
4127 }
4128
4129 INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
4130 rx_work->pub = dhdp;
4131 queue_work(dhd->rx_wq, &rx_work->work);
4132
4133}
4134#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4135
4136static void
4137__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
4138{
4139 if (state == ON) {
4140 if (!netif_queue_stopped(net)) {
4141 DHD_ERROR(("%s: Stop Netif Queue\n", __FUNCTION__));
4142 netif_stop_queue(net);
4143 } else {
4144 DHD_LOG_MEM(("%s: Netif Queue already stopped\n", __FUNCTION__));
4145 }
4146 }
4147
4148 if (state == OFF) {
4149 if (netif_queue_stopped(net)) {
4150 DHD_ERROR(("%s: Start Netif Queue\n", __FUNCTION__));
4151 netif_wake_queue(net);
4152 } else {
4153 DHD_LOG_MEM(("%s: Netif Queue already started\n", __FUNCTION__));
4154 }
4155 }
4156}
4157
4158void
4159dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4160{
4161 struct net_device *net;
4162 dhd_info_t *dhd = dhdp->info;
4163 int i;
4164
4165 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4166
4167 ASSERT(dhd);
4168
4169#ifdef DHD_LOSSLESS_ROAMING
4170 /* block flowcontrol during roaming */
4171 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4172 return;
4173 }
4174#endif
4175
4176 if (ifidx == ALL_INTERFACES) {
4177 for (i = 0; i < DHD_MAX_IFS; i++) {
4178 if (dhd->iflist[i]) {
4179 net = dhd->iflist[i]->net;
4180 __dhd_txflowcontrol(dhdp, net, state);
4181 }
4182 }
4183 } else {
4184 if (dhd->iflist[ifidx]) {
4185 net = dhd->iflist[ifidx]->net;
4186 __dhd_txflowcontrol(dhdp, net, state);
4187 }
4188 }
4189 dhdp->txoff = state;
4190}
4191
4192#ifdef DHD_MCAST_REGEN
4193/*
4194 * Description: This function is called to do the reverse translation
4195 *
4196 * Input eh - pointer to the ethernet header
4197 */
4198int32
4199dhd_mcast_reverse_translation(struct ether_header *eh)
4200{
4201 uint8 *iph;
4202 uint32 dest_ip;
4203
4204 iph = (uint8 *)eh + ETHER_HDR_LEN;
4205 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4206
4207 /* Only IP packets are handled */
4208 if (eh->ether_type != hton16(ETHER_TYPE_IP))
4209 return BCME_ERROR;
4210
4211 /* Non-IPv4 multicast packets are not handled */
4212 if (IP_VER(iph) != IP_VER_4)
4213 return BCME_ERROR;
4214
4215 /*
4216 * The packet has a multicast IP and unicast MAC. That means
4217 * we have to do the reverse translation
4218 */
4219 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
4220 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
4221 return BCME_OK;
4222 }
4223
4224 return BCME_ERROR;
4225}
4226#endif /* MCAST_REGEN */
4227
4228#ifdef SHOW_LOGTRACE
4229static void
4230dhd_netif_rx_ni(struct sk_buff * skb)
4231{
4232 /* Do not call netif_recieve_skb as this workqueue scheduler is
4233 * not from NAPI Also as we are not in INTR context, do not call
4234 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
4235 * does netif_rx, disables irq, raise NET_IF_RX softirq and
4236 * enables interrupts back
4237 */
4238 netif_rx_ni(skb);
4239}
4240
4241static int
4242dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
4243{
4244 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4245 int ret = BCME_OK;
4246 uint datalen;
4247 bcm_event_msg_u_t evu;
4248 void *data = NULL;
4249 void *pktdata = NULL;
4250 bcm_event_t *pvt_data;
4251 uint pktlen;
4252
4253 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
4254
4255 /* In dhd_rx_frame, header is stripped using skb_pull
4256 * of size ETH_HLEN, so adjust pktlen accordingly
4257 */
4258 pktlen = skb->len + ETH_HLEN;
4259
4260 pktdata = (void *)skb_mac_header(skb);
4261 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
4262
4263 if (ret != BCME_OK) {
4264 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
4265 __FUNCTION__, ret));
4266 goto exit;
4267 }
4268
4269 datalen = ntoh32(evu.event.datalen);
4270
4271 pvt_data = (bcm_event_t *)pktdata;
4272 data = &pvt_data[1];
4273
4274 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
4275
4276exit:
4277 return ret;
4278}
4279
4280/*
4281 * dhd_event_logtrace_process_items processes
4282 * each skb from evt_trace_queue.
4283 * Returns TRUE if more packets to be processed
4284 * else returns FALSE
4285 */
4286
4287static int
4288dhd_event_logtrace_process_items(dhd_info_t *dhd)
4289{
4290 dhd_pub_t *dhdp;
4291 struct sk_buff *skb;
4292 uint32 qlen;
4293 uint32 process_len;
4294
4295 if (!dhd) {
4296 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4297 return 0;
4298 }
4299
4300 dhdp = &dhd->pub;
4301
4302 if (!dhdp) {
4303 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
4304 return 0;
4305 }
4306
4307 qlen = skb_queue_len(&dhd->evt_trace_queue);
4308 process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
4309
4310 /* Run while loop till bound is reached or skb queue is empty */
4311 while (process_len--) {
4312 int ifid = 0;
4313 skb = skb_dequeue(&dhd->evt_trace_queue);
4314 if (skb == NULL) {
4315 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
4316 __FUNCTION__));
4317 break;
4318 }
4319 BCM_REFERENCE(ifid);
4320#ifdef PCIE_FULL_DONGLE
4321 /* Check if pkt is from INFO ring or WLC_E_TRACE */
4322 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
4323 if (ifid == DHD_DUMMY_INFO_IF) {
4324 /* Process logtrace from info rings */
4325 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
4326 } else
4327#endif /* PCIE_FULL_DONGLE */
4328 {
4329 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
4330 dhd_event_logtrace_pkt_process(dhdp, skb);
4331 }
4332
4333 /* Dummy sleep so that scheduler kicks in after processing any logprints */
4334 OSL_SLEEP(0);
4335
4336 /* Send packet up if logtrace_pkt_sendup is TRUE */
4337 if (dhdp->logtrace_pkt_sendup) {
4338#ifdef DHD_USE_STATIC_CTRLBUF
4339 /* If bufs are allocated via static buf pool
4340 * and logtrace_pkt_sendup enabled, make a copy,
4341 * free the local one and send the copy up.
4342 */
4343 void *npkt = PKTDUP(dhdp->osh, skb);
4344 /* Clone event and send it up */
4345 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4346 if (npkt) {
4347 skb = npkt;
4348 } else {
4349 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
4350 /* Packet is already freed, go to next packet */
4351 continue;
4352 }
4353#endif /* DHD_USE_STATIC_CTRLBUF */
4354#ifdef PCIE_FULL_DONGLE
4355 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
4356 * to send skb to network layer, assign skb->dev with
4357 * Primary interface n/w device
4358 */
4359 if (ifid == DHD_DUMMY_INFO_IF) {
4360 skb = PKTTONATIVE(dhdp->osh, skb);
4361 skb->dev = dhd->iflist[0]->net;
4362 }
4363#endif /* PCIE_FULL_DONGLE */
4364 /* Send pkt UP */
4365 dhd_netif_rx_ni(skb);
4366 } else {
4367 /* Don't send up. Free up the packet. */
4368#ifdef DHD_USE_STATIC_CTRLBUF
4369 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4370#else
4371 PKTFREE(dhdp->osh, skb, FALSE);
4372#endif /* DHD_USE_STATIC_CTRLBUF */
4373 }
4374 }
4375
4376 /* Reschedule if more packets to be processed */
4377 return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
4378}
4379
4380#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4381static int
4382dhd_logtrace_thread(void *data)
4383{
4384 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4385 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4386 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
4387 int ret;
4388
4389 while (1) {
4390 dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
4391 if (!binary_sema_down(tsk)) {
4392 dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
4393 SMP_RD_BARRIER_DEPENDS();
4394 if (dhd->pub.dongle_reset == FALSE) {
4395 do {
4396 /* Check terminated before processing the items */
4397 if (tsk->terminated) {
4398 DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
4399 goto exit;
4400 }
4401#ifdef EWP_EDL
4402 /* check if EDL is being used */
4403 if (dhd->pub.dongle_edl_support) {
4404 ret = dhd_prot_process_edl_complete(&dhd->pub,
4405 &dhd->event_data);
4406 } else {
4407 ret = dhd_event_logtrace_process_items(dhd);
4408 }
4409#else
4410 ret = dhd_event_logtrace_process_items(dhd);
4411#endif /* EWP_EDL */
4412 /* if ret > 0, bound has reached so to be fair to other
4413 * processes need to yield the scheduler.
4414 * The comment above yield()'s definition says:
4415 * If you want to use yield() to wait for something,
4416 * use wait_event().
4417 * If you want to use yield() to be 'nice' for others,
4418 * use cond_resched().
4419 * If you still want to use yield(), do not!
4420 */
4421 if (ret > 0) {
4422 cond_resched();
4423 OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
4424 } else if (ret < 0) {
4425 DHD_ERROR(("%s: ERROR should not reach here\n",
4426 __FUNCTION__));
4427 }
4428 } while (ret > 0);
4429 }
4430 if (tsk->flush_ind) {
4431 DHD_ERROR(("%s: flushed\n", __FUNCTION__));
4432 dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
4433 tsk->flush_ind = 0;
4434 complete(&tsk->flushed);
4435 }
4436 } else {
4437 DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
4438 dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
4439 break;
4440 }
4441 }
4442exit:
4443 complete_and_exit(&tsk->completed, 0);
4444 dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
4445}
4446#else
4447static void
4448dhd_event_logtrace_process(struct work_struct * work)
4449{
4450/* Ignore compiler warnings due to -Werror=cast-qual */
4451 struct delayed_work *dw = to_delayed_work(work);
4452 struct dhd_info *dhd;
4453
4454 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4455 dhd = container_of(dw, struct dhd_info, event_log_dispatcher_work);
4456 GCC_DIAGNOSTIC_POP();
4457
4458#ifdef EWP_EDL
4459 if (dhd->pub.dongle_edl_support) {
4460 ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
4461 } else {
4462 ret = dhd_event_logtrace_process_items(dhd);
4463 }
4464#else
4465 ret = dhd_event_logtrace_process_items(dhd);
4466#endif /* EWP_EDL */
4467
4468 if (ret > 0) {
4469 schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
4470 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
4471 }
4472 return;
4473}
4474#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4475
4476void
4477dhd_schedule_logtrace(void *dhd_info)
4478{
4479 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
4480
4481#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4482 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4483 binary_sema_up(&dhd->thr_logtrace_ctl);
4484 } else {
4485 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4486 dhd->thr_logtrace_ctl.thr_pid));
4487 }
4488#else
4489 schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
4490#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4491 return;
4492}
4493
4494void
4495dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
4496{
4497#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4498 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4499 PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4500 } else {
4501 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4502 dhd->thr_logtrace_ctl.thr_pid));
4503 }
4504#else
4505 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
4506#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4507}
4508
4509void
4510dhd_flush_logtrace_process(dhd_info_t *dhd)
4511{
4512#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4513 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4514 PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4515 } else {
4516 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4517 dhd->thr_logtrace_ctl.thr_pid));
4518 }
4519#else
4520 flush_delayed_work(&dhd->event_log_dispatcher_work);
4521#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4522}
4523
4524int
4525dhd_init_logtrace_process(dhd_info_t *dhd)
4526{
4527#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4528 dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
4529 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
4530 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4531 DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
4532 return BCME_ERROR;
4533 } else {
4534 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4535 dhd->thr_logtrace_ctl.thr_pid));
4536 }
4537#else
4538 INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
4539#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4540 return BCME_OK;
4541}
4542
4543int
4544dhd_reinit_logtrace_process(dhd_info_t *dhd)
4545{
4546#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4547 /* Re-init only if PROC_STOP from dhd_stop was called
4548 * which can be checked via thr_pid
4549 */
4550 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4551 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
4552 0, "dhd_logtrace_thread");
4553 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4554 DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
4555 return BCME_ERROR;
4556 } else {
4557 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4558 dhd->thr_logtrace_ctl.thr_pid));
4559 }
4560 }
4561#else
4562 /* No need to re-init for WQ as calcel_delayed_work_sync will
4563 * will not delete the WQ
4564 */
4565#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4566 return BCME_OK;
4567}
4568
4569void
4570dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4571{
4572 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4573
4574#ifdef PCIE_FULL_DONGLE
4575 /* Add ifidx in the PKTTAG */
4576 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
4577#endif /* PCIE_FULL_DONGLE */
4578 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
4579
4580 dhd_schedule_logtrace(dhd);
4581}
4582
4583void
4584dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
4585{
4586 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4587 struct sk_buff *skb;
4588
4589 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
4590#ifdef DHD_USE_STATIC_CTRLBUF
4591 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4592#else
4593 PKTFREE(dhdp->osh, skb, FALSE);
4594#endif /* DHD_USE_STATIC_CTRLBUF */
4595 }
4596}
4597
4598#ifdef EWP_EDL
4599void
4600dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
4601{
4602 struct sk_buff *skb = NULL;
4603 uint32 pktsize = 0;
4604 void *pkt = NULL;
4605 info_buf_payload_hdr_t *infobuf = NULL;
4606 dhd_info_t *dhd = dhdp->info;
4607 uint8 *pktdata = NULL;
4608
4609 if (!msg)
4610 return;
4611
4612 /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
4613 infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
4614 pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
4615 sizeof(uint32));
4616 pkt = PKTGET(dhdp->osh, pktsize, FALSE);
4617 if (!pkt) {
4618 DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
4619 } else {
4620 PKTSETLEN(dhdp->osh, pkt, pktsize);
4621 pktdata = PKTDATA(dhdp->osh, pkt);
4622 memcpy(pktdata, msg, pktsize);
4623 /* For infobuf packets assign skb->dev with
4624 * Primary interface n/w device
4625 */
4626 skb = PKTTONATIVE(dhdp->osh, pkt);
4627 skb->dev = dhd->iflist[0]->net;
4628 /* Send pkt UP */
4629 dhd_netif_rx_ni(skb);
4630 }
4631}
4632#endif /* EWP_EDL */
4633#endif /* SHOW_LOGTRACE */
4634
4635#ifdef EWP_EDL
4636static void
4637dhd_edl_process_work(struct work_struct *work)
4638{
4639 struct delayed_work *dw = to_delayed_work(work);
4640 struct dhd_info *dhd_info;
4641 /* Ignore compiler warnings due to -Werror=cast-qual */
4642 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
4643 dhd_info = container_of(dw, struct dhd_info, edl_dispatcher_work);
4644 GCC_DIAGNOSTIC_POP();
4645
4646 if (dhd_info)
4647 dhd_prot_process_edl_complete(&dhd_info->pub, &dhd_info->event_data);
4648}
4649
4650void
4651dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms)
4652{
4653 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4654 schedule_delayed_work(&dhd->edl_dispatcher_work, msecs_to_jiffies(delay_ms));
4655}
4656#endif /* EWP_EDL */
4657
4658#ifdef ENABLE_WAKEUP_PKT_DUMP
4659static void
4660update_wake_pkt_info(struct sk_buff *skb)
4661{
4662 struct iphdr *ip_header;
4663 struct ipv6hdr *ipv6_header;
4664 struct udphdr *udp_header;
4665 struct tcphdr *tcp_header;
4666 uint16 dport = 0;
4667
4668 ip_header = (struct iphdr *)(skb->data);
4669
4670 temp_raw |= ((long long)ntoh16(skb->protocol)) << 48;
4671
4672 DHD_INFO(("eth_hdr(skb)->h_dest : %pM\n", eth_hdr(skb)->h_dest));
4673 if (eth_hdr(skb)->h_dest[0] & 0x01) {
4674 temp_raw |= (long long)1 << 39;
4675 }
4676
4677 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4678 wl_event_msg_t event;
4679 bcm_event_msg_u_t evu;
4680 int ret;
4681 uint event_type;
4682
4683 ret = wl_host_event_get_data(
4684#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4685 skb_mac_header(skb),
4686#else
4687 skb->mac.raw,
4688#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4689 skb->len, &evu);
4690 if (ret != BCME_OK) {
4691 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
4692 __FUNCTION__, ret));
4693 }
4694
4695 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
4696 event_type = ntoh32_ua((void *)&event.event_type);
4697
4698 temp_raw |= (long long)event_type << 40;
4699 } else if (ntoh16(skb->protocol) == ETHER_TYPE_IP ||
4700 ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
4701 if (ip_header->version == 6) {
4702 ipv6_header = (struct ipv6hdr *)ip_header;
4703 temp_raw |= ((long long)ipv6_header->nexthdr) << 40;
4704 dport = 0;
4705
4706 if (ipv6_header->daddr.s6_addr[0] & 0xff) {
4707 temp_raw |= (long long)1 << 38;
4708 }
4709
4710 DHD_INFO(("IPv6 [%x]%pI6c > %pI6c:%d\n",
4711 ip_header->protocol, &(ipv6_header->saddr.s6_addr),
4712 &(ipv6_header->daddr.s6_addr), dport));
4713 } else if (ip_header->version == 4) {
4714 temp_raw |= ((long long)ip_header->protocol) << 40;
4715
4716#define IP_HDR_OFFSET ((char *)ip_header + IPV4_HLEN(ip_header))
4717 if (ip_header->protocol == IPPROTO_TCP) {
4718 tcp_header = (struct tcphdr *)IP_HDR_OFFSET;
4719 dport = ntohs(tcp_header->dest);
4720 }
4721 else if (ip_header->protocol == IPPROTO_UDP) {
4722 udp_header = (struct udphdr *)IP_HDR_OFFSET;
4723 dport = ntohs(udp_header->dest);
4724 }
4725
4726 if (ipv4_is_multicast(ip_header->daddr)) {
4727 temp_raw |= (long long)1 << 38;
4728 }
4729
4730 DHD_INFO(("IP [%x] %pI4 > %pI4:%d\n",
4731 ip_header->protocol, &(ip_header->saddr),
4732 &(ip_header->daddr), dport));
4733 }
4734
4735 temp_raw |= (long long)dport << 16;
4736 }
4737}
4738#endif /* ENABLE_WAKEUP_PKT_DUMP */
4739
4740/** Called when a frame is received by the dongle on interface 'ifidx' */
4741void
4742dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4743{
4744 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4745 struct sk_buff *skb;
4746 uchar *eth;
4747 uint len;
4748 void *data, *pnext = NULL;
4749 int i;
4750 dhd_if_t *ifp;
4751 wl_event_msg_t event;
4752 int tout_rx = 0;
4753 int tout_ctrl = 0;
4754 void *skbhead = NULL;
4755 void *skbprev = NULL;
4756 uint16 protocol;
4757 unsigned char *dump_data;
4758#ifdef DHD_MCAST_REGEN
4759 uint8 interface_role;
4760 if_flow_lkup_t *if_flow_lkup;
4761 unsigned long flags;
4762#endif
4763#ifdef DHD_WAKE_STATUS
4764 int pkt_wake = 0;
4765 wake_counts_t *wcp = NULL;
4766#endif /* DHD_WAKE_STATUS */
4767#ifdef ENABLE_DHD_GRO
4768 bool dhd_gro_enable = TRUE;
4769#endif /* ENABLE_DHD_GRO */
4770
4771 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4772 BCM_REFERENCE(dump_data);
4773
4774#ifdef ENABLE_DHD_GRO
4775 if (ifidx < DHD_MAX_IFS) {
4776 ifp = dhd->iflist[ifidx];
4777 if (ifp && ifp->net->qdisc) {
4778 if (!ifp->net->qdisc->ops->cl_ops) {
4779 dhd_gro_enable = TRUE;
4780 DHD_TRACE(("%s: enable sw gro\n", __FUNCTION__));
4781 } else {
4782 dhd_gro_enable = FALSE;
4783 DHD_TRACE(("%s: disable sw gro becasue of qdisc traffic control\n",
4784 __FUNCTION__));
4785 }
4786 }
4787 }
4788#endif /* ENABLE_DHD_GRO */
4789
4790 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4791 struct ether_header *eh;
4792
4793 pnext = PKTNEXT(dhdp->osh, pktbuf);
4794 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4795
4796 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
4797 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
4798 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
4799 */
4800 if (ifidx == DHD_DUMMY_INFO_IF) {
4801 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
4802 * context in case of PCIe FD, in case of other bus this will be from
4803 * DPC context. If we get bunch of events from Dongle then printing all
4804 * of them from Tasklet/DPC context that too in data path is costly.
4805 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
4806 * events with type WLC_E_TRACE.
4807 * We'll print this console logs from the WorkQueue context by enqueing SKB
4808 * here and Dequeuing will be done in WorkQueue and will be freed only if
4809 * logtrace_pkt_sendup is TRUE
4810 */
4811#ifdef SHOW_LOGTRACE
4812 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
4813#else /* !SHOW_LOGTRACE */
4814 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
4815 * free the PKT here itself
4816 */
4817#ifdef DHD_USE_STATIC_CTRLBUF
4818 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4819#else
4820 PKTFREE(dhdp->osh, pktbuf, FALSE);
4821#endif /* DHD_USE_STATIC_CTRLBUF */
4822#endif /* SHOW_LOGTRACE */
4823 continue;
4824 }
4825#ifdef DHD_WAKE_STATUS
4826#ifdef BCMDBUS
4827 wcp = NULL;
4828#else
4829 pkt_wake = dhd_bus_get_bus_wake(dhdp);
4830 wcp = dhd_bus_get_wakecount(dhdp);
4831#endif /* BCMDBUS */
4832 if (wcp == NULL) {
4833 /* If wakeinfo count buffer is null do not update wake count values */
4834 pkt_wake = 0;
4835 }
4836#endif /* DHD_WAKE_STATUS */
4837
4838 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4839 if (dhd->pub.tput_data.tput_test_running &&
4840 dhd->pub.tput_data.direction == TPUT_DIR_RX &&
4841 ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
4842 dhd_tput_test_rx(dhdp, pktbuf);
4843 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
4844 continue;
4845 }
4846
4847 if (ifidx >= DHD_MAX_IFS) {
4848 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
4849 __FUNCTION__, ifidx));
4850 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4851#ifdef DHD_USE_STATIC_CTRLBUF
4852 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4853#else
4854 PKTFREE(dhdp->osh, pktbuf, FALSE);
4855#endif /* DHD_USE_STATIC_CTRLBUF */
4856 } else {
4857 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4858 }
4859 continue;
4860 }
4861
4862 ifp = dhd->iflist[ifidx];
4863 if (ifp == NULL) {
4864 DHD_ERROR_RLMT(("%s: ifp is NULL. drop packet\n",
4865 __FUNCTION__));
4866 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4867#ifdef DHD_USE_STATIC_CTRLBUF
4868 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4869#else
4870 PKTFREE(dhdp->osh, pktbuf, FALSE);
4871#endif /* DHD_USE_STATIC_CTRLBUF */
4872 } else {
4873 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4874 }
4875 continue;
4876 }
4877
4878 /* Dropping only data packets before registering net device to avoid kernel panic */
4879#ifndef PROP_TXSTATUS_VSDB
4880 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4881 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4882#else
4883 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4884 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4885#endif /* PROP_TXSTATUS_VSDB */
4886 {
4887 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4888 __FUNCTION__));
4889 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4890 continue;
4891 }
4892
4893#ifdef PROP_TXSTATUS
4894 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4895 /* WLFC may send header only packet when
4896 there is an urgent message but no packet to
4897 piggy-back on
4898 */
4899 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4900 continue;
4901 }
4902#endif
4903#ifdef DHD_L2_FILTER
4904 /* If block_ping is enabled drop the ping packet */
4905 if (ifp->block_ping) {
4906 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4907 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4908 continue;
4909 }
4910 }
4911 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4912 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4913 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4914 continue;
4915 }
4916 }
4917 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4918 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4919
4920 /* Drop the packets if l2 filter has processed it already
4921 * otherwise continue with the normal path
4922 */
4923 if (ret == BCME_OK) {
4924 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4925 continue;
4926 }
4927 }
4928 if (ifp->block_tdls) {
4929 if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
4930 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4931 continue;
4932 }
4933 }
4934#endif /* DHD_L2_FILTER */
4935
4936#ifdef DHD_MCAST_REGEN
4937 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
4938 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
4939 ASSERT(if_flow_lkup);
4940
4941 interface_role = if_flow_lkup[ifidx].role;
4942 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
4943
4944 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
4945 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
4946 ETHER_ISUCAST(eh->ether_dhost)) {
4947 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
4948#ifdef DHD_PSTA
4949 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
4950 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
4951 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
4952 if (ifidx != 0) {
4953 /* Let the primary in PSTA interface handle this
4954 * frame after unicast to Multicast conversion
4955 */
4956 ifp = dhd_get_ifp(dhdp, 0);
4957 ASSERT(ifp);
4958 }
4959 }
4960 }
4961#endif /* PSTA */
4962 }
4963#endif /* MCAST_REGEN */
4964
4965#ifdef DHDTCPSYNC_FLOOD_BLK
4966 if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
4967 int delta_sec;
4968 int delta_sync;
4969 int sync_per_sec;
4970 u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
4971 ifp->tsync_rcvd ++;
4972 delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
4973 delta_sec = curr_time - ifp->last_sync;
4974 if (delta_sec > 1) {
4975 sync_per_sec = delta_sync/delta_sec;
4976 if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
4977 schedule_work(&ifp->blk_tsfl_work);
4978 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
4979 "sync recvied %d pkt/sec \n",
4980 ifidx, sync_per_sec));
4981 ifp->tsync_per_sec = sync_per_sec;
4982 }
4983 dhd_reset_tcpsync_info_by_ifp(ifp);
4984 }
4985
4986 }
4987#endif /* DHDTCPSYNC_FLOOD_BLK */
4988
4989#ifdef DHDTCPACK_SUPPRESS
4990 dhd_tcpdata_info_get(dhdp, pktbuf);
4991#endif
4992 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4993
4994 ASSERT(ifp);
4995 skb->dev = ifp->net;
4996#ifdef DHD_WET
4997 /* wet related packet proto manipulation should be done in DHD
4998 * since dongle doesn't have complete payload
4999 */
5000 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
5001 pktbuf) < 0)) {
5002 DHD_INFO(("%s:%s: wet recv proc failed\n",
5003 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
5004 }
5005#endif /* DHD_WET */
5006
5007#ifdef DHD_PSTA
5008 if (PSR_ENABLED(dhdp) &&
5009 (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
5010 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
5011 dhd_ifname(dhdp, ifidx)));
5012 }
5013#endif /* DHD_PSTA */
5014
5015#ifdef PCIE_FULL_DONGLE
5016 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
5017 (!ifp->ap_isolate)) {
5018 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5019 if (ETHER_ISUCAST(eh->ether_dhost)) {
5020 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
5021 dhd_sendpkt(dhdp, ifidx, pktbuf);
5022 continue;
5023 }
5024 } else {
5025 void *npktbuf = NULL;
5026 if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
5027 (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) {
5028 dhd_sendpkt(dhdp, ifidx, npktbuf);
5029 }
5030 }
5031 }
5032#endif /* PCIE_FULL_DONGLE */
5033#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
5034 if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
5035 (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
5036 (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
5037 DHD_ERROR(("%s: Reassoc is in progress. "
5038 "Drop EAPOL M1 frame\n", __FUNCTION__));
5039 PKTFREE(dhdp->osh, pktbuf, FALSE);
5040 continue;
5041 }
5042#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5043 /* Get the protocol, maintain skb around eth_type_trans()
5044 * The main reason for this hack is for the limitation of
5045 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5046 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5047 * coping of the packet coming from the network stack to add
5048 * BDC, Hardware header etc, during network interface registration
5049 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5050 * for BDC, Hardware header etc. and not just the ETH_HLEN
5051 */
5052 eth = skb->data;
5053 len = skb->len;
5054 dump_data = skb->data;
5055 protocol = (skb->data[12] << 8) | skb->data[13];
5056
5057 if (protocol == ETHER_TYPE_802_1X) {
5058 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
5059#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
5060 wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
5061#endif /* WL_CFG80211 && WL_WPS_SYNC */
5062#ifdef DHD_4WAYM4_FAIL_DISCONNECT
5063 if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
5064 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
5065 }
5066#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
5067 }
5068 dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
5069
5070#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5071 if (pkt_wake) {
5072 prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
5073 DHD_ERROR(("config check in_suspend: %d ", dhdp->in_suspend));
5074#ifdef ARP_OFFLOAD_SUPPORT
5075 DHD_ERROR(("arp hmac_update:%d \n", dhdp->hmac_updated));
5076#endif /* ARP_OFFLOAD_SUPPORT */
5077 }
5078#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5079
5080 skb->protocol = eth_type_trans(skb, skb->dev);
5081
5082 if (skb->pkt_type == PACKET_MULTICAST) {
5083 dhd->pub.rx_multicast++;
5084 ifp->stats.multicast++;
5085 }
5086
5087 skb->data = eth;
5088 skb->len = len;
5089
5090 /* TODO: XXX: re-look into dropped packets. */
5091 DHD_DBG_PKT_MON_RX(dhdp, skb);
5092#ifdef DHD_PKT_LOGGING
5093 DHD_PKTLOG_RX(dhdp, skb);
5094#endif /* DHD_PKT_LOGGING */
5095 /* Strip header, count, deliver upward */
5096 skb_pull(skb, ETH_HLEN);
5097
5098#ifdef ENABLE_WAKEUP_PKT_DUMP
5099 if (dhd_mmc_wake) {
5100 DHD_INFO(("wake_pkt %s(%d)\n", __FUNCTION__, __LINE__));
5101 if (DHD_INFO_ON()) {
5102 prhex("wake_pkt", (char*) eth, MIN(len, 48));
5103 }
5104 update_wake_pkt_info(skb);
5105#ifdef CONFIG_IRQ_HISTORY
5106 add_irq_history(0, "WIFI");
5107#endif
5108 dhd_mmc_wake = FALSE;
5109 }
5110#endif /* ENABLE_WAKEUP_PKT_DUMP */
5111
5112 /* Process special event packets and then discard them */
5113 /* XXX Decide on a better way to fit this in */
5114 memset(&event, 0, sizeof(event));
5115
5116 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
5117 bcm_event_msg_u_t evu;
5118 int ret_event, event_type;
5119 void *pkt_data = skb_mac_header(skb);
5120
5121 ret_event = wl_host_event_get_data(pkt_data, len, &evu);
5122
5123 if (ret_event != BCME_OK) {
5124 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5125 __FUNCTION__, ret_event));
5126#ifdef DHD_USE_STATIC_CTRLBUF
5127 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5128#else
5129 PKTFREE(dhdp->osh, pktbuf, FALSE);
5130#endif
5131 continue;
5132 }
5133
5134 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5135 event_type = ntoh32_ua((void *)&event.event_type);
5136#ifdef SHOW_LOGTRACE
5137 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5138 * context in case of PCIe FD, in case of other bus this will be from
5139 * DPC context. If we get bunch of events from Dongle then printing all
5140 * of them from Tasklet/DPC context that too in data path is costly.
5141 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5142 * events with type WLC_E_TRACE.
5143 * We'll print this console logs from the WorkQueue context by enqueing SKB
5144 * here and Dequeuing will be done in WorkQueue and will be freed only if
5145 * logtrace_pkt_sendup is true
5146 */
5147 if (event_type == WLC_E_TRACE) {
5148 DHD_EVENT(("%s: WLC_E_TRACE\n", __FUNCTION__));
5149 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5150 continue;
5151 }
5152#endif /* SHOW_LOGTRACE */
5153
5154 ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
5155
5156 wl_event_to_host_order(&event);
5157 if (!tout_ctrl)
5158 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
5159
5160#if defined(PNO_SUPPORT)
5161 if (event_type == WLC_E_PFN_NET_FOUND) {
5162 /* enforce custom wake lock to garantee that Kernel not suspended */
5163 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
5164 }
5165#endif /* PNO_SUPPORT */
5166 if (numpkt != 1) {
5167 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
5168 __FUNCTION__));
5169 }
5170
5171#ifdef DHD_WAKE_STATUS
5172 if (unlikely(pkt_wake)) {
5173#ifdef DHD_WAKE_EVENT_STATUS
5174 if (event.event_type < WLC_E_LAST) {
5175 wcp->rc_event[event.event_type]++;
5176 wcp->rcwake++;
5177 pkt_wake = 0;
5178 }
5179#endif /* DHD_WAKE_EVENT_STATUS */
5180 }
5181#endif /* DHD_WAKE_STATUS */
5182
5183 /* For delete virtual interface event, wl_host_event returns positive
5184 * i/f index, do not proceed. just free the pkt.
5185 */
5186 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
5187 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
5188 __FUNCTION__));
5189#ifdef DHD_USE_STATIC_CTRLBUF
5190 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5191#else
5192 PKTFREE(dhdp->osh, pktbuf, FALSE);
5193#endif
5194 continue;
5195 }
5196
5197 /*
5198 * For the event packets, there is a possibility
5199 * of ifidx getting modifed.Thus update the ifp
5200 * once again.
5201 */
5202 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
5203 ifp = dhd->iflist[ifidx];
5204#ifndef PROP_TXSTATUS_VSDB
5205 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
5206#else
5207 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
5208 dhd->pub.up))
5209#endif /* PROP_TXSTATUS_VSDB */
5210 {
5211 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
5212 __FUNCTION__));
5213#ifdef DHD_USE_STATIC_CTRLBUF
5214 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5215#else
5216 PKTFREE(dhdp->osh, pktbuf, FALSE);
5217#endif
5218 continue;
5219 }
5220
5221#ifdef SENDPROB
5222 if (dhdp->wl_event_enabled ||
5223 (dhdp->recv_probereq && (event.event_type == WLC_E_PROBREQ_MSG)))
5224#else
5225 if (dhdp->wl_event_enabled)
5226#endif
5227 {
5228#ifdef DHD_USE_STATIC_CTRLBUF
5229 /* If event bufs are allocated via static buf pool
5230 * and wl events are enabled, make a copy, free the
5231 * local one and send the copy up.
5232 */
5233 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
5234 /* Copy event and send it up */
5235 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5236 if (nskb) {
5237 skb = nskb;
5238 } else {
5239 DHD_ERROR(("skb clone failed. dropping event.\n"));
5240 continue;
5241 }
5242#endif /* DHD_USE_STATIC_CTRLBUF */
5243 } else {
5244 /* If event enabled not explictly set, drop events */
5245#ifdef DHD_USE_STATIC_CTRLBUF
5246 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5247#else
5248 PKTFREE(dhdp->osh, pktbuf, FALSE);
5249#endif /* DHD_USE_STATIC_CTRLBUF */
5250 continue;
5251 }
5252 } else {
5253 tout_rx = DHD_PACKET_TIMEOUT_MS;
5254
5255#ifdef PROP_TXSTATUS
5256 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
5257#endif /* PROP_TXSTATUS */
5258
5259#ifdef DHD_WAKE_STATUS
5260 if (unlikely(pkt_wake)) {
5261 wcp->rxwake++;
5262#ifdef DHD_WAKE_RX_STATUS
5263#define ETHER_ICMP6_HEADER 20
5264#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
5265#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
5266#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
5267
5268 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
5269 wcp->rx_arp++;
5270 if (dump_data[0] == 0xFF) { /* Broadcast */
5271 wcp->rx_bcast++;
5272 } else if (dump_data[0] & 0x01) { /* Multicast */
5273 wcp->rx_mcast++;
5274 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
5275 wcp->rx_multi_ipv6++;
5276 if ((skb->len > ETHER_ICMP6_HEADER) &&
5277 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
5278 wcp->rx_icmpv6++;
5279 if (skb->len > ETHER_ICMPV6_TYPE) {
5280 switch (dump_data[ETHER_ICMPV6_TYPE]) {
5281 case NDISC_ROUTER_ADVERTISEMENT:
5282 wcp->rx_icmpv6_ra++;
5283 break;
5284 case NDISC_NEIGHBOUR_ADVERTISEMENT:
5285 wcp->rx_icmpv6_na++;
5286 break;
5287 case NDISC_NEIGHBOUR_SOLICITATION:
5288 wcp->rx_icmpv6_ns++;
5289 break;
5290 }
5291 }
5292 }
5293 } else if (dump_data[2] == 0x5E) {
5294 wcp->rx_multi_ipv4++;
5295 } else {
5296 wcp->rx_multi_other++;
5297 }
5298 } else { /* Unicast */
5299 wcp->rx_ucast++;
5300 }
5301#undef ETHER_ICMP6_HEADER
5302#undef ETHER_IPV6_SADDR
5303#undef ETHER_IPV6_DAADR
5304#undef ETHER_ICMPV6_TYPE
5305#endif /* DHD_WAKE_RX_STATUS */
5306 pkt_wake = 0;
5307 }
5308#endif /* DHD_WAKE_STATUS */
5309 }
5310
5311#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
5312 ifp->net->last_rx = jiffies;
5313#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
5314
5315 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
5316 dhdp->dstats.rx_bytes += skb->len;
5317 dhdp->rx_packets++; /* Local count */
5318 ifp->stats.rx_bytes += skb->len;
5319 ifp->stats.rx_packets++;
5320 }
5321
5322 /* XXX WL here makes sure data is 4-byte aligned? */
5323 if (in_interrupt()) {
5324 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5325 __FUNCTION__, __LINE__);
5326#if defined(DHD_LB_RXP)
5327#ifdef ENABLE_DHD_GRO
5328 /* The pktlog module clones a skb using skb_clone and
5329 * stores the skb point to the ring buffer of the pktlog module.
5330 * Once the buffer is full,
5331 * the PKTFREE is called for removing the oldest skb.
5332 * The kernel panic occurred when the pktlog module free
5333 * the rx frame handled by napi_gro_receive().
5334 * It is a fix code that DHD don't use napi_gro_receive() for
5335 * the packet used in pktlog module.
5336 */
5337 if (dhd_gro_enable && !skb_cloned(skb) &&
5338 ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
5339 napi_gro_receive(&dhd->rx_napi_struct, skb);
5340 } else {
5341 netif_receive_skb(skb);
5342 }
5343#else
5344#if defined(WL_MONITOR) && defined(BCMSDIO)
5345 if (dhd_monitor_enabled(dhdp, ifidx))
5346 dhd_rx_mon_pkt_sdio(dhdp, skb, ifidx);
5347 else
5348#endif /* WL_MONITOR && BCMSDIO */
5349 netif_receive_skb(skb);
5350#endif /* ENABLE_DHD_GRO */
5351#else /* !defined(DHD_LB_RXP) */
5352 netif_rx(skb);
5353#endif /* !defined(DHD_LB_RXP) */
5354 } else {
5355 if (dhd->rxthread_enabled) {
5356 if (!skbhead)
5357 skbhead = skb;
5358 else
5359 PKTSETNEXT(dhdp->osh, skbprev, skb);
5360 skbprev = skb;
5361 } else {
5362
5363 /* If the receive is not processed inside an ISR,
5364 * the softirqd must be woken explicitly to service
5365 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5366 * by netif_rx_ni(), but in earlier kernels, we need
5367 * to do it manually.
5368 */
5369 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5370 __FUNCTION__, __LINE__);
5371
5372#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
5373 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5374#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
5375#if defined(DHD_LB_RXP)
5376#ifdef ENABLE_DHD_GRO
5377 if (dhd_gro_enable && !skb_cloned(skb) &&
5378 ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
5379 napi_gro_receive(&dhd->rx_napi_struct, skb);
5380 } else {
5381 netif_receive_skb(skb);
5382 }
5383#else
5384 netif_receive_skb(skb);
5385#endif /* ENABLE_DHD_GRO */
5386#else /* !defined(DHD_LB_RXP) */
5387 netif_rx_ni(skb);
5388#endif /* !defined(DHD_LB_RXP) */
5389 }
5390 }
5391 }
5392
5393 if (dhd->rxthread_enabled && skbhead)
5394 dhd_sched_rxf(dhdp, skbhead);
5395
5396 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
5397 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
5398}
5399
5400void
5401dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
5402{
5403 /* Linux version has nothing to do */
5404 return;
5405}
5406
5407void
5408dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
5409{
5410 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
5411 struct ether_header *eh;
5412 uint16 type;
5413
5414 if (dhdp->tput_data.tput_test_running) {
5415
5416 dhdp->batch_tx_pkts_cmpl++;
5417
5418 /* don't count the stop pkt */
5419 if (success &&
5420 dhdp->batch_tx_pkts_cmpl <= dhdp->batch_tx_num_pkts)
5421 dhdp->tput_data.pkts_good++;
5422 else if (!success)
5423 dhdp->tput_data.pkts_bad++;
5424
5425 /* we dont care for the stop packet in tput test */
5426 if (dhdp->batch_tx_pkts_cmpl == dhdp->batch_tx_num_pkts) {
5427 dhdp->tput_stop_ts = OSL_SYSUPTIME_US();
5428 dhdp->tput_data.pkts_cmpl += dhdp->batch_tx_pkts_cmpl;
5429 dhdp->tput_data.num_pkts += dhdp->batch_tx_num_pkts;
5430 dhd_os_tput_test_wake(dhdp);
5431 }
5432 }
5433 /* XXX where does this stuff belong to? */
5434 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
5435
5436 /* XXX Use packet tag when it is available to identify its type */
5437
5438 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
5439 type = ntoh16(eh->ether_type);
5440
5441 if (type == ETHER_TYPE_802_1X) {
5442 atomic_dec(&dhd->pend_8021x_cnt);
5443 }
5444
5445#ifdef PROP_TXSTATUS
5446 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
5447 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
5448 uint datalen = PKTLEN(dhd->pub.osh, txp);
5449 if (ifp != NULL) {
5450 if (success) {
5451 dhd->pub.tx_packets++;
5452 ifp->stats.tx_packets++;
5453 ifp->stats.tx_bytes += datalen;
5454 } else {
5455 ifp->stats.tx_dropped++;
5456 }
5457 }
5458 }
5459#endif
5460 if (success) {
5461 dhd->pub.tot_txcpl++;
5462 }
5463}
5464
5465int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition,
5466 uint timeout_ms)
5467{
5468 int timeout;
5469
5470 /* Convert timeout in millsecond to jiffies */
5471 timeout = msecs_to_jiffies(timeout_ms);
5472 pub->tput_test_done = FALSE;
5473 condition = (uint *)&pub->tput_test_done;
5474 timeout = wait_event_timeout(pub->tx_tput_test_wait,
5475 (*condition), timeout);
5476
5477 return timeout;
5478}
5479
5480int dhd_os_tput_test_wake(dhd_pub_t * pub)
5481{
5482 OSL_SMP_WMB();
5483 pub->tput_test_done = TRUE;
5484 OSL_SMP_WMB();
5485 wake_up(&(pub->tx_tput_test_wait));
5486 return 0;
5487}
5488
5489static struct net_device_stats *
5490dhd_get_stats(struct net_device *net)
5491{
5492 dhd_info_t *dhd = DHD_DEV_INFO(net);
5493 dhd_if_t *ifp;
5494
5495 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5496
5497 if (!dhd) {
5498 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
5499 goto error;
5500 }
5501
5502 ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
5503 if (!ifp) {
5504 /* return empty stats */
5505 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
5506 goto error;
5507 }
5508
5509 if (dhd->pub.up) {
5510 /* Use the protocol to get dongle stats */
5511 dhd_prot_dstats(&dhd->pub);
5512 }
5513 return &ifp->stats;
5514
5515error:
5516 memset(&net->stats, 0, sizeof(net->stats));
5517 return &net->stats;
5518}
5519
5520#ifndef BCMDBUS
5521static int
5522dhd_watchdog_thread(void *data)
5523{
5524 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5525 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5526 /* This thread doesn't need any user-level access,
5527 * so get rid of all our resources
5528 */
5529 if (dhd_watchdog_prio > 0) {
5530 struct sched_param param;
5531 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
5532 dhd_watchdog_prio:(MAX_RT_PRIO-1);
5533 setScheduler(current, SCHED_FIFO, &param);
5534 }
5535
5536 while (1) {
5537 if (down_interruptible (&tsk->sema) == 0) {
5538 unsigned long flags;
5539 unsigned long jiffies_at_start = jiffies;
5540 unsigned long time_lapse;
5541#ifdef BCMPCIE
5542 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5543#endif /* BCMPCIE */
5544
5545 SMP_RD_BARRIER_DEPENDS();
5546 if (tsk->terminated) {
5547#ifdef BCMPCIE
5548 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5549#endif /* BCMPCIE */
5550 break;
5551 }
5552
5553 if (dhd->pub.dongle_reset == FALSE) {
5554 DHD_TIMER(("%s:\n", __FUNCTION__));
5555 dhd_analyze_sock_flows(dhd, dhd_watchdog_ms);
5556 dhd_bus_watchdog(&dhd->pub);
5557
5558 DHD_GENERAL_LOCK(&dhd->pub, flags);
5559 /* Count the tick for reference */
5560 dhd->pub.tickcnt++;
5561#ifdef DHD_L2_FILTER
5562 dhd_l2_filter_watchdog(&dhd->pub);
5563#endif /* DHD_L2_FILTER */
5564 time_lapse = jiffies - jiffies_at_start;
5565
5566 /* Reschedule the watchdog */
5567 if (dhd->wd_timer_valid) {
5568 mod_timer(&dhd->timer,
5569 jiffies +
5570 msecs_to_jiffies(dhd_watchdog_ms) -
5571 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
5572 }
5573 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5574 }
5575#ifdef BCMPCIE
5576 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5577#endif /* BCMPCIE */
5578 } else {
5579 break;
5580 }
5581 }
5582
5583 complete_and_exit(&tsk->completed, 0);
5584}
5585
5586static void dhd_watchdog(ulong data)
5587{
5588 dhd_info_t *dhd = (dhd_info_t *)data;
5589 unsigned long flags;
5590
5591 if (dhd->pub.dongle_reset) {
5592 return;
5593 }
5594
5595 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
5596 up(&dhd->thr_wdt_ctl.sema);
5597 return;
5598 }
5599
5600#ifdef BCMPCIE
5601 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5602#endif /* BCMPCIE */
5603 /* Call the bus module watchdog */
5604 dhd_bus_watchdog(&dhd->pub);
5605
5606 DHD_GENERAL_LOCK(&dhd->pub, flags);
5607 /* Count the tick for reference */
5608 dhd->pub.tickcnt++;
5609
5610#ifdef DHD_L2_FILTER
5611 dhd_l2_filter_watchdog(&dhd->pub);
5612#endif /* DHD_L2_FILTER */
5613 /* Reschedule the watchdog */
5614 if (dhd->wd_timer_valid)
5615 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
5616 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5617#ifdef BCMPCIE
5618 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5619#endif /* BCMPCIE */
5620}
5621
5622#ifdef DHD_PCIE_RUNTIMEPM
5623static int
5624dhd_rpm_state_thread(void *data)
5625{
5626 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5627 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5628
5629 while (1) {
5630 if (down_interruptible (&tsk->sema) == 0) {
5631 unsigned long flags;
5632 unsigned long jiffies_at_start = jiffies;
5633 unsigned long time_lapse;
5634
5635 SMP_RD_BARRIER_DEPENDS();
5636 if (tsk->terminated) {
5637 break;
5638 }
5639
5640 if (dhd->pub.dongle_reset == FALSE) {
5641 DHD_TIMER(("%s:\n", __FUNCTION__));
5642 if (dhd->pub.up) {
5643#if defined(PCIE_INB_DW)
5644 dhd_bus_dw_deassert(&dhd->pub);
5645#endif
5646 if (dhd_get_rpm_state(&dhd->pub)) {
5647 dhd_runtimepm_state(&dhd->pub);
5648 }
5649 }
5650 DHD_GENERAL_LOCK(&dhd->pub, flags);
5651 time_lapse = jiffies - jiffies_at_start;
5652
5653 /* Reschedule the watchdog */
5654 if (dhd->rpm_timer_valid) {
5655 mod_timer(&dhd->rpm_timer,
5656 jiffies +
5657 msecs_to_jiffies(dhd_runtimepm_ms) -
5658 min(msecs_to_jiffies(dhd_runtimepm_ms),
5659 time_lapse));
5660 }
5661 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5662 }
5663 } else {
5664 break;
5665 }
5666 }
5667
5668 complete_and_exit(&tsk->completed, 0);
5669}
5670
5671static void dhd_runtimepm(ulong data)
5672{
5673 dhd_info_t *dhd = (dhd_info_t *)data;
5674
5675 if (dhd->pub.dongle_reset) {
5676 return;
5677 }
5678
5679 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
5680 up(&dhd->thr_rpm_ctl.sema);
5681 return;
5682 }
5683}
5684
5685void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
5686{
5687 dhd_set_rpm_state(dhdp, FALSE);
5688 dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
5689}
5690
5691void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
5692{
5693 /* Enable Runtime PM except for MFG Mode */
5694 if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
5695 if (dhd_get_idletime(dhdp)) {
5696 dhd_set_rpm_state(dhdp, TRUE);
5697 }
5698 }
5699}
5700
5701#endif /* DHD_PCIE_RUNTIMEPM */
5702
5703#ifdef ENABLE_ADAPTIVE_SCHED
5704static void
5705dhd_sched_policy(int prio)
5706{
5707 struct sched_param param;
5708 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
5709 param.sched_priority = 0;
5710 setScheduler(current, SCHED_NORMAL, &param);
5711 } else {
5712 if (get_scheduler_policy(current) != SCHED_FIFO) {
5713 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
5714 setScheduler(current, SCHED_FIFO, &param);
5715 }
5716 }
5717}
5718#endif /* ENABLE_ADAPTIVE_SCHED */
5719#ifdef DEBUG_CPU_FREQ
5720static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
5721{
5722 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
5723 struct cpufreq_freqs *freq = data;
5724 if (dhd) {
5725 if (!dhd->new_freq)
5726 goto exit;
5727 if (val == CPUFREQ_POSTCHANGE) {
5728 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
5729 freq->new, freq->cpu));
5730 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
5731 }
5732 }
5733exit:
5734 return 0;
5735}
5736#endif /* DEBUG_CPU_FREQ */
5737
5738static int
5739dhd_dpc_thread(void *data)
5740{
5741 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5742 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5743
5744 /* This thread doesn't need any user-level access,
5745 * so get rid of all our resources
5746 */
5747 if (dhd_dpc_prio > 0)
5748 {
5749 struct sched_param param;
5750 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
5751 setScheduler(current, SCHED_FIFO, &param);
5752 }
5753
5754#ifdef CUSTOM_DPC_CPUCORE
5755 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
5756#endif
5757#ifdef CUSTOM_SET_CPUCORE
5758 dhd->pub.current_dpc = current;
5759#endif /* CUSTOM_SET_CPUCORE */
5760 /* Run until signal received */
5761 while (1) {
5762 if (dhd->pub.conf->dpc_cpucore >= 0) {
5763 printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
5764 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
5765 dhd->pub.conf->dpc_cpucore = -1;
5766 }
5767 if (!binary_sema_down(tsk)) {
5768#ifdef ENABLE_ADAPTIVE_SCHED
5769 dhd_sched_policy(dhd_dpc_prio);
5770#endif /* ENABLE_ADAPTIVE_SCHED */
5771 SMP_RD_BARRIER_DEPENDS();
5772 if (tsk->terminated) {
5773 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5774 break;
5775 }
5776
5777 /* Call bus dpc unless it indicated down (then clean stop) */
5778 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5779#ifdef DEBUG_DPC_THREAD_WATCHDOG
5780 int resched_cnt = 0;
5781#endif /* DEBUG_DPC_THREAD_WATCHDOG */
5782 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
5783 while (dhd_bus_dpc(dhd->pub.bus)) {
5784 /* process all data */
5785#ifdef DEBUG_DPC_THREAD_WATCHDOG
5786 resched_cnt++;
5787 if (resched_cnt > MAX_RESCHED_CNT) {
5788 DHD_INFO(("%s Calling msleep to"
5789 "let other processes run. \n",
5790 __FUNCTION__));
5791 dhd->pub.dhd_bug_on = true;
5792 resched_cnt = 0;
5793 OSL_SLEEP(1);
5794 }
5795#endif /* DEBUG_DPC_THREAD_WATCHDOG */
5796 }
5797 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
5798 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5799 } else {
5800 if (dhd->pub.up)
5801 dhd_bus_stop(dhd->pub.bus, TRUE);
5802 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5803 }
5804 } else {
5805 break;
5806 }
5807 }
5808 complete_and_exit(&tsk->completed, 0);
5809}
5810
5811static int
5812dhd_rxf_thread(void *data)
5813{
5814 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5815 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5816#if defined(WAIT_DEQUEUE)
5817#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
5818 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
5819#endif
5820 dhd_pub_t *pub = &dhd->pub;
5821
5822 /* This thread doesn't need any user-level access,
5823 * so get rid of all our resources
5824 */
5825 if (dhd_rxf_prio > 0)
5826 {
5827 struct sched_param param;
5828 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5829 setScheduler(current, SCHED_FIFO, &param);
5830 }
5831
5832#ifdef CUSTOM_SET_CPUCORE
5833 dhd->pub.current_rxf = current;
5834#endif /* CUSTOM_SET_CPUCORE */
5835 /* Run until signal received */
5836 while (1) {
5837 if (dhd->pub.conf->rxf_cpucore >= 0) {
5838 printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
5839 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
5840 dhd->pub.conf->rxf_cpucore = -1;
5841 }
5842 if (down_interruptible(&tsk->sema) == 0) {
5843 void *skb;
5844#ifdef ENABLE_ADAPTIVE_SCHED
5845 dhd_sched_policy(dhd_rxf_prio);
5846#endif /* ENABLE_ADAPTIVE_SCHED */
5847
5848 SMP_RD_BARRIER_DEPENDS();
5849
5850 if (tsk->terminated) {
5851 DHD_OS_WAKE_UNLOCK(pub);
5852 break;
5853 }
5854 skb = dhd_rxf_dequeue(pub);
5855
5856 if (skb == NULL) {
5857 continue;
5858 }
5859 while (skb) {
5860 void *skbnext = PKTNEXT(pub->osh, skb);
5861 PKTSETNEXT(pub->osh, skb, NULL);
5862 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5863 __FUNCTION__, __LINE__);
5864#if defined(WL_MONITOR) && defined(BCMSDIO)
5865 if (dhd_monitor_enabled(pub, 0))
5866 dhd_rx_mon_pkt_sdio(pub, skb, 0);
5867 else
5868#endif /* WL_MONITOR && BCMSDIO */
5869 netif_rx_ni(skb);
5870 skb = skbnext;
5871 }
5872#if defined(WAIT_DEQUEUE)
5873 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5874 OSL_SLEEP(1);
5875 watchdogTime = OSL_SYSUPTIME();
5876 }
5877#endif
5878
5879 DHD_OS_WAKE_UNLOCK(pub);
5880 } else {
5881 break;
5882 }
5883 }
5884 complete_and_exit(&tsk->completed, 0);
5885}
5886
5887#ifdef BCMPCIE
5888void dhd_dpc_enable(dhd_pub_t *dhdp)
5889{
5890#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
5891 dhd_info_t *dhd;
5892
5893 if (!dhdp || !dhdp->info)
5894 return;
5895 dhd = dhdp->info;
5896#endif /* DHD_LB_RXP || DHD_LB_TXP */
5897
5898#ifdef DHD_LB_RXP
5899 __skb_queue_head_init(&dhd->rx_pend_queue);
5900#endif /* DHD_LB_RXP */
5901
5902#ifdef DHD_LB_TXP
5903 skb_queue_head_init(&dhd->tx_pend_queue);
5904#endif /* DHD_LB_TXP */
5905}
5906#endif /* BCMPCIE */
5907
5908#ifdef BCMPCIE
5909void
5910dhd_dpc_kill(dhd_pub_t *dhdp)
5911{
5912 dhd_info_t *dhd;
5913
5914 if (!dhdp) {
5915 return;
5916 }
5917
5918 dhd = dhdp->info;
5919
5920 if (!dhd) {
5921 return;
5922 }
5923
5924 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5925 tasklet_kill(&dhd->tasklet);
5926 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5927 }
5928
5929#ifdef DHD_LB
5930#ifdef DHD_LB_RXP
5931 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
5932 __skb_queue_purge(&dhd->rx_pend_queue);
5933#endif /* DHD_LB_RXP */
5934#ifdef DHD_LB_TXP
5935 cancel_work_sync(&dhd->tx_dispatcher_work);
5936 skb_queue_purge(&dhd->tx_pend_queue);
5937#endif /* DHD_LB_TXP */
5938
5939 /* Kill the Load Balancing Tasklets */
5940#if defined(DHD_LB_TXC)
5941 tasklet_kill(&dhd->tx_compl_tasklet);
5942#endif /* DHD_LB_TXC */
5943#if defined(DHD_LB_RXC)
5944 tasklet_kill(&dhd->rx_compl_tasklet);
5945#endif /* DHD_LB_RXC */
5946#if defined(DHD_LB_TXP)
5947 tasklet_kill(&dhd->tx_tasklet);
5948#endif /* DHD_LB_TXP */
5949#endif /* DHD_LB */
5950}
5951
5952void
5953dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
5954{
5955 dhd_info_t *dhd;
5956
5957 if (!dhdp) {
5958 return;
5959 }
5960
5961 dhd = dhdp->info;
5962
5963 if (!dhd) {
5964 return;
5965 }
5966
5967 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5968 tasklet_kill(&dhd->tasklet);
5969 }
5970}
5971#endif /* BCMPCIE */
5972
5973static void
5974dhd_dpc(ulong data)
5975{
5976 dhd_info_t *dhd;
5977
5978 dhd = (dhd_info_t *)data;
5979
5980 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5981 * down below , wake lock is set,
5982 * the tasklet is initialized in dhd_attach()
5983 */
5984 /* Call bus dpc unless it indicated down (then clean stop) */
5985 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5986#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
5987 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5988#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
5989 if (dhd_bus_dpc(dhd->pub.bus)) {
5990 tasklet_schedule(&dhd->tasklet);
5991 }
5992 } else {
5993 dhd_bus_stop(dhd->pub.bus, TRUE);
5994 }
5995}
5996
5997void
5998dhd_sched_dpc(dhd_pub_t *dhdp)
5999{
6000 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6001
6002 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
6003 DHD_OS_WAKE_LOCK(dhdp);
6004 /* If the semaphore does not get up,
6005 * wake unlock should be done here
6006 */
6007 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
6008 DHD_OS_WAKE_UNLOCK(dhdp);
6009 }
6010 return;
6011 } else {
6012 tasklet_schedule(&dhd->tasklet);
6013 }
6014}
6015#endif /* BCMDBUS */
6016
6017static void
6018dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
6019{
6020 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6021
6022 DHD_OS_WAKE_LOCK(dhdp);
6023
6024 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6025 do {
6026 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
6027 break;
6028 } while (1);
6029 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6030 up(&dhd->thr_rxf_ctl.sema);
6031 } else {
6032 DHD_OS_WAKE_UNLOCK(dhdp);
6033 }
6034 return;
6035}
6036
6037#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6038#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6039
6040#ifdef TOE
6041/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6042static int
6043dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
6044{
6045 char buf[32];
6046 int ret;
6047
6048 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6049
6050 if (ret < 0) {
6051 if (ret == -EIO) {
6052 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
6053 ifidx)));
6054 return -EOPNOTSUPP;
6055 }
6056
6057 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6058 return ret;
6059 }
6060
6061 memcpy(toe_ol, buf, sizeof(uint32));
6062 return 0;
6063}
6064
6065/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6066static int
6067dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
6068{
6069 int toe, ret;
6070
6071 /* Set toe_ol as requested */
6072 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
6073 if (ret < 0) {
6074 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6075 dhd_ifname(&dhd->pub, ifidx), ret));
6076 return ret;
6077 }
6078
6079 /* Enable toe globally only if any components are enabled. */
6080 toe = (toe_ol != 0);
6081 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
6082 if (ret < 0) {
6083 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6084 return ret;
6085 }
6086
6087 return 0;
6088}
6089#endif /* TOE */
6090
6091#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6092void dhd_set_scb_probe(dhd_pub_t *dhd)
6093{
6094 wl_scb_probe_t scb_probe;
6095 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
6096 int ret;
6097
6098 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6099 return;
6100 }
6101
6102 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
6103 if (ret < 0) {
6104 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
6105 }
6106
6107 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
6108
6109 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
6110
6111 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
6112 TRUE);
6113 if (ret < 0) {
6114 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
6115 return;
6116 }
6117}
6118#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6119
6120static void
6121dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
6122{
6123 dhd_info_t *dhd = DHD_DEV_INFO(net);
6124
6125 snprintf(info->driver, sizeof(info->driver), "wl");
6126 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
6127}
6128
6129struct ethtool_ops dhd_ethtool_ops = {
6130 .get_drvinfo = dhd_ethtool_get_drvinfo
6131};
6132
6133static int
6134dhd_ethtool(dhd_info_t *dhd, void *uaddr)
6135{
6136 struct ethtool_drvinfo info;
6137 char drvname[sizeof(info.driver)];
6138 uint32 cmd;
6139#ifdef TOE
6140 struct ethtool_value edata;
6141 uint32 toe_cmpnt, csum_dir;
6142 int ret;
6143#endif
6144
6145 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6146
6147 /* all ethtool calls start with a cmd word */
6148 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
6149 return -EFAULT;
6150
6151 switch (cmd) {
6152 case ETHTOOL_GDRVINFO:
6153 /* Copy out any request driver name */
6154 bzero(&info.driver, sizeof(info.driver));
6155 if (copy_from_user(&info, uaddr, sizeof(info)))
6156 return -EFAULT;
6157 if (info.driver[sizeof(info.driver) - 1] != '\0') {
6158 DHD_ERROR(("%s: Exceeds the size of info.driver"
6159 "truncating last byte with null\n", __FUNCTION__));
6160 info.driver[sizeof(info.driver) - 1] = '\0';
6161 }
6162 strlcpy(drvname, info.driver, sizeof(drvname));
6163
6164 /* clear struct for return */
6165 memset(&info, 0, sizeof(info));
6166 info.cmd = cmd;
6167
6168 /* if dhd requested, identify ourselves */
6169 if (strcmp(drvname, "?dhd") == 0) {
6170 snprintf(info.driver, sizeof(info.driver), "dhd");
6171 strlcpy(info.version, EPI_VERSION_STR, sizeof(info.version));
6172 }
6173
6174 /* otherwise, require dongle to be up */
6175 else if (!dhd->pub.up) {
6176 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
6177 return -ENODEV;
6178 }
6179
6180 /* finally, report dongle driver type */
6181 else if (dhd->pub.iswl)
6182 snprintf(info.driver, sizeof(info.driver), "wl");
6183 else
6184 snprintf(info.driver, sizeof(info.driver), "xx");
6185
6186 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
6187 if (copy_to_user(uaddr, &info, sizeof(info)))
6188 return -EFAULT;
6189 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
6190 (int)sizeof(drvname), drvname, info.driver));
6191 break;
6192
6193#ifdef TOE
6194 /* Get toe offload components from dongle */
6195 case ETHTOOL_GRXCSUM:
6196 case ETHTOOL_GTXCSUM:
6197 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
6198 return ret;
6199
6200 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
6201
6202 edata.cmd = cmd;
6203 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
6204
6205 if (copy_to_user(uaddr, &edata, sizeof(edata)))
6206 return -EFAULT;
6207 break;
6208
6209 /* Set toe offload components in dongle */
6210 case ETHTOOL_SRXCSUM:
6211 case ETHTOOL_STXCSUM:
6212 if (copy_from_user(&edata, uaddr, sizeof(edata)))
6213 return -EFAULT;
6214
6215 /* Read the current settings, update and write back */
6216 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
6217 return ret;
6218
6219 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
6220
6221 if (edata.data != 0)
6222 toe_cmpnt |= csum_dir;
6223 else
6224 toe_cmpnt &= ~csum_dir;
6225
6226 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
6227 return ret;
6228
6229 /* If setting TX checksum mode, tell Linux the new mode */
6230 if (cmd == ETHTOOL_STXCSUM) {
6231 if (edata.data)
6232 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
6233 else
6234 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
6235 }
6236
6237 break;
6238#endif /* TOE */
6239
6240 default:
6241 return -EOPNOTSUPP;
6242 }
6243
6244 return 0;
6245}
6246
6247/* XXX function to detect that FW is dead and send Event up */
6248static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
6249{
6250 if (!dhdp) {
6251 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
6252 return FALSE;
6253 }
6254
6255 if (!dhdp->up)
6256 return FALSE;
6257
6258#if !defined(BCMPCIE) && !defined(BCMDBUS)
6259 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
6260 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
6261 return FALSE;
6262 }
6263#endif /* !BCMPCIE && !BCMDBUS */
6264
6265 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
6266 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
6267#ifdef BCMPCIE
6268 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
6269 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
6270 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
6271#else
6272 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
6273 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
6274#endif /* BCMPCIE */
6275 if (dhdp->hang_reason == 0) {
6276 if (dhdp->dongle_trap_occured) {
6277 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
6278#ifdef BCMPCIE
6279 } else if (dhdp->d3ackcnt_timeout) {
6280 dhdp->hang_reason = dhdp->is_sched_error ?
6281 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
6282 HANG_REASON_D3_ACK_TIMEOUT;
6283#endif /* BCMPCIE */
6284 } else {
6285 dhdp->hang_reason = dhdp->is_sched_error ?
6286 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
6287 HANG_REASON_IOCTL_RESP_TIMEOUT;
6288 }
6289 }
6290 printf("%s\n", info_string);
6291 net_os_send_hang_message(net);
6292 return TRUE;
6293 }
6294 return FALSE;
6295}
6296
6297#ifdef WL_MONITOR
6298bool
6299dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
6300{
6301 return (dhd->info->monitor_type != 0);
6302}
6303
6304#ifdef BCMSDIO
6305static void
6306dhd_rx_mon_pkt_sdio(dhd_pub_t *dhdp, void *pkt, int ifidx)
6307{
6308 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6309
6310 if (!dhd->monitor_skb) {
6311 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
6312 return;
6313 }
6314
6315 if (dhd->monitor_type && dhd->monitor_dev)
6316 dhd->monitor_skb->dev = dhd->monitor_dev;
6317 else {
6318 PKTFREE(dhdp->osh, pkt, FALSE);
6319 dhd->monitor_skb = NULL;
6320 return;
6321 }
6322
6323 dhd->monitor_skb->protocol =
6324 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6325 dhd->monitor_len = 0;
6326
6327 netif_rx_ni(dhd->monitor_skb);
6328
6329 dhd->monitor_skb = NULL;
6330}
6331#elif defined(BCMPCIE)
6332void
6333dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
6334{
6335 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6336 {
6337 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
6338 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
6339 switch (amsdu_flag) {
6340 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
6341 default:
6342 if (!dhd->monitor_skb) {
6343 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
6344 == NULL)
6345 return;
6346 }
6347 if (dhd->monitor_type && dhd->monitor_dev)
6348 dhd->monitor_skb->dev = dhd->monitor_dev;
6349 else {
6350 PKTFREE(dhdp->osh, pkt, FALSE);
6351 dhd->monitor_skb = NULL;
6352 return;
6353 }
6354 dhd->monitor_skb->protocol =
6355 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6356 dhd->monitor_len = 0;
6357 break;
6358
6359 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
6360 if (!dhd->monitor_skb) {
6361 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
6362 == NULL)
6363 return;
6364 dhd->monitor_len = 0;
6365 }
6366 if (dhd->monitor_type && dhd->monitor_dev)
6367 dhd->monitor_skb->dev = dhd->monitor_dev;
6368 else {
6369 PKTFREE(dhdp->osh, pkt, FALSE);
6370 dev_kfree_skb(dhd->monitor_skb);
6371 return;
6372 }
6373 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
6374 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6375 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
6376 PKTFREE(dhdp->osh, pkt, FALSE);
6377 return;
6378
6379 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
6380 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
6381 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6382 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
6383 PKTFREE(dhdp->osh, pkt, FALSE);
6384 return;
6385
6386 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
6387 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
6388 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6389 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
6390 PKTFREE(dhdp->osh, pkt, FALSE);
6391 skb_put(dhd->monitor_skb, dhd->monitor_len);
6392 dhd->monitor_skb->protocol =
6393 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6394 dhd->monitor_len = 0;
6395 break;
6396 }
6397 }
6398
6399 /* XXX WL here makes sure data is 4-byte aligned? */
6400 if (in_interrupt()) {
6401 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6402 __FUNCTION__, __LINE__);
6403 netif_rx(dhd->monitor_skb);
6404 } else {
6405 /* If the receive is not processed inside an ISR,
6406 * the softirqd must be woken explicitly to service
6407 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6408 * by netif_rx_ni(), but in earlier kernels, we need
6409 * to do it manually.
6410 */
6411 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
6412 __FUNCTION__, __LINE__);
6413
6414 netif_rx_ni(dhd->monitor_skb);
6415 }
6416
6417 dhd->monitor_skb = NULL;
6418}
6419#endif
6420
6421typedef struct dhd_mon_dev_priv {
6422 struct net_device_stats stats;
6423} dhd_mon_dev_priv_t;
6424
6425#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
6426#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
6427#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
6428
6429static int
6430dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
6431{
6432 PKTFREE(NULL, skb, FALSE);
6433 return 0;
6434}
6435
6436#if defined(BT_OVER_SDIO)
6437
6438void
6439dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
6440{
6441 dhdp->info->bus_user_count++;
6442}
6443
6444void
6445dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
6446{
6447 dhdp->info->bus_user_count--;
6448}
6449
6450/* Return values:
6451 * Success: Returns 0
6452 * Failure: Returns -1 or errono code
6453 */
6454int
6455dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
6456{
6457 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6458 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6459 int ret = 0;
6460
6461 mutex_lock(&dhd->bus_user_lock);
6462 ++dhd->bus_user_count;
6463 if (dhd->bus_user_count < 0) {
6464 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6465 ret = -1;
6466 goto exit;
6467 }
6468
6469 if (dhd->bus_user_count == 1) {
6470
6471 dhd->pub.hang_was_sent = 0;
6472
6473 /* First user, turn on WL_REG, start the bus */
6474 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
6475
6476 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
6477 /* Enable F1 */
6478 ret = dhd_bus_resume(dhdp, 0);
6479 if (ret) {
6480 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
6481 __FUNCTION__, ret));
6482 goto exit;
6483 }
6484 }
6485
6486 /* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware
6487 * name. This is indeed a hack but we have to make it work properly before we have
6488 * a better solution
6489 */
6490 dhd_update_fw_nv_path(dhd);
6491 /* update firmware and nvram path to sdio bus */
6492 dhd_bus_update_fw_nv_path(dhd->pub.bus,
6493 dhd->fw_path, dhd->nv_path);
6494 /* download the firmware, Enable F2 */
6495 /* TODO: Should be done only in case of FW switch */
6496 ret = dhd_bus_devreset(dhdp, FALSE);
6497 dhd_bus_resume(dhdp, 1);
6498 if (!ret) {
6499 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
6500 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
6501 ret = -EFAULT;
6502 }
6503 } else {
6504 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
6505 }
6506 } else {
6507 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
6508 __FUNCTION__, dhd->bus_user_count));
6509 }
6510exit:
6511 mutex_unlock(&dhd->bus_user_lock);
6512 return ret;
6513}
6514EXPORT_SYMBOL(dhd_bus_get);
6515
6516/* Return values:
6517 * Success: Returns 0
6518 * Failure: Returns -1 or errono code
6519 */
6520int
6521dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
6522{
6523 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6524 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6525 int ret = 0;
6526 BCM_REFERENCE(owner);
6527
6528 mutex_lock(&dhd->bus_user_lock);
6529 --dhd->bus_user_count;
6530 if (dhd->bus_user_count < 0) {
6531 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6532 dhd->bus_user_count = 0;
6533 ret = -1;
6534 goto exit;
6535 }
6536
6537 if (dhd->bus_user_count == 0) {
6538 /* Last user, stop the bus and turn Off WL_REG */
6539 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
6540 __FUNCTION__));
6541#ifdef PROP_TXSTATUS
6542 if (dhd->pub.wlfc_enabled) {
6543 dhd_wlfc_deinit(&dhd->pub);
6544 }
6545#endif /* PROP_TXSTATUS */
6546#ifdef PNO_SUPPORT
6547 if (dhd->pub.pno_state) {
6548 dhd_pno_deinit(&dhd->pub);
6549 }
6550#endif /* PNO_SUPPORT */
6551#ifdef RTT_SUPPORT
6552 if (dhd->pub.rtt_state) {
6553 dhd_rtt_deinit(&dhd->pub);
6554 }
6555#endif /* RTT_SUPPORT */
6556 ret = dhd_bus_devreset(dhdp, TRUE);
6557 if (!ret) {
6558 dhd_bus_suspend(dhdp);
6559 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
6560 }
6561 } else {
6562 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
6563 __FUNCTION__, dhd->bus_user_count));
6564 }
6565exit:
6566 mutex_unlock(&dhd->bus_user_lock);
6567 return ret;
6568}
6569EXPORT_SYMBOL(dhd_bus_put);
6570
6571int
6572dhd_net_bus_get(struct net_device *dev)
6573{
6574 dhd_info_t *dhd = DHD_DEV_INFO(dev);
6575 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
6576}
6577
6578int
6579dhd_net_bus_put(struct net_device *dev)
6580{
6581 dhd_info_t *dhd = DHD_DEV_INFO(dev);
6582 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
6583}
6584
6585/*
6586 * Function to enable the Bus Clock
6587 * Returns BCME_OK on success and BCME_xxx on failure
6588 *
6589 * This function is not callable from non-sleepable context
6590 */
6591int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
6592{
6593 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6594
6595 int ret;
6596
6597 dhd_os_sdlock(dhdp);
6598 /*
6599 * The second argument is TRUE, that means, we expect
6600 * the function to "wait" until the clocks are really
6601 * available
6602 */
6603 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
6604 dhd_os_sdunlock(dhdp);
6605
6606 return ret;
6607}
6608EXPORT_SYMBOL(dhd_bus_clk_enable);
6609
6610/*
6611 * Function to disable the Bus Clock
6612 * Returns BCME_OK on success and BCME_xxx on failure
6613 *
6614 * This function is not callable from non-sleepable context
6615 */
6616int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
6617{
6618 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6619
6620 int ret;
6621
6622 dhd_os_sdlock(dhdp);
6623 /*
6624 * The second argument is TRUE, that means, we expect
6625 * the function to "wait" until the clocks are really
6626 * disabled
6627 */
6628 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
6629 dhd_os_sdunlock(dhdp);
6630
6631 return ret;
6632}
6633EXPORT_SYMBOL(dhd_bus_clk_disable);
6634
6635/*
6636 * Function to reset bt_use_count counter to zero.
6637 *
6638 * This function is not callable from non-sleepable context
6639 */
6640void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
6641{
6642 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6643
6644 /* take the lock and reset bt use count */
6645 dhd_os_sdlock(dhdp);
6646 dhdsdio_reset_bt_use_count(dhdp->bus);
6647 dhd_os_sdunlock(dhdp);
6648}
6649EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
6650
6651void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
6652{
6653 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6654 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
6655
6656 dhdp->hang_was_sent = 0;
6657
6658 dhd_os_send_hang_message(&dhd->pub);
6659}
6660EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
6661
6662#endif /* BT_OVER_SDIO */
6663
6664static int
6665dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6666{
6667 return 0;
6668}
6669
6670static struct net_device_stats*
6671dhd_monitor_get_stats(struct net_device *dev)
6672{
6673 return &DHD_MON_DEV_STATS(dev);
6674}
6675
6676static const struct net_device_ops netdev_monitor_ops =
6677{
6678 .ndo_start_xmit = dhd_monitor_start,
6679 .ndo_get_stats = dhd_monitor_get_stats,
6680 .ndo_do_ioctl = dhd_monitor_ioctl
6681};
6682
6683static void
6684dhd_add_monitor_if(dhd_info_t *dhd)
6685{
6686 struct net_device *dev;
6687 char *devname;
6688 uint32 scan_suppress = FALSE;
6689 int ret = BCME_OK;
6690
6691 if (!dhd) {
6692 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6693 return;
6694 }
6695
6696 if (dhd->monitor_dev) {
6697 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
6698 return;
6699 }
6700
6701 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
6702 if (!dev) {
6703 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
6704 return;
6705 }
6706
6707 devname = "radiotap";
6708
6709 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
6710
6711#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
6712#define ARPHRD_IEEE80211_PRISM 802
6713#endif
6714
6715#ifndef ARPHRD_IEEE80211_RADIOTAP
6716#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
6717#endif /* ARPHRD_IEEE80211_RADIOTAP */
6718
6719 dev->type = ARPHRD_IEEE80211_RADIOTAP;
6720
6721 dev->netdev_ops = &netdev_monitor_ops;
6722
6723 /* XXX: This is called from IOCTL path, in this case, rtnl_lock is already taken.
6724 * So, register_netdev() shouldn't be called. It leads to deadlock.
6725 * To avoid deadlock due to rtnl_lock(), register_netdevice() should be used.
6726 */
6727 if (register_netdevice(dev)) {
6728 DHD_ERROR(("%s, register_netdev failed for %s\n",
6729 __FUNCTION__, dev->name));
6730 free_netdev(dev);
6731 return;
6732 }
6733
6734 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6735#ifdef DHD_PCIE_RUNTIMEPM
6736 /* Disable RuntimePM in monitor mode */
6737 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
6738 DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__));
6739#endif /* DHD_PCIE_RUNTIME_PM */
6740 scan_suppress = TRUE;
6741 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
6742 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6743 sizeof(scan_suppress), NULL, 0, TRUE);
6744 if (ret < 0) {
6745 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
6746 }
6747 }
6748
6749 dhd->monitor_dev = dev;
6750}
6751
6752static void
6753dhd_del_monitor_if(dhd_info_t *dhd)
6754{
6755 int ret = BCME_OK;
6756 uint32 scan_suppress = FALSE;
6757
6758 if (!dhd) {
6759 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6760 return;
6761 }
6762
6763 if (!dhd->monitor_dev) {
6764 DHD_ERROR(("%s: monitor i/f doesn't exist\n", __FUNCTION__));
6765 return;
6766 }
6767
6768 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6769#ifdef DHD_PCIE_RUNTIMEPM
6770 /* Enable RuntimePM */
6771 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
6772 DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__));
6773#endif /* DHD_PCIE_RUNTIME_PM */
6774 scan_suppress = FALSE;
6775 /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
6776 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6777 sizeof(scan_suppress), NULL, 0, TRUE);
6778 if (ret < 0) {
6779 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
6780 }
6781 }
6782
6783 if (dhd->monitor_dev) {
6784 if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
6785 free_netdev(dhd->monitor_dev);
6786 } else {
6787 if (rtnl_is_locked()) {
6788 unregister_netdevice(dhd->monitor_dev);
6789 } else {
6790 unregister_netdev(dhd->monitor_dev);
6791 }
6792 }
6793 dhd->monitor_dev = NULL;
6794 }
6795}
6796
6797void
6798dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
6799{
6800 dhd_info_t *dhd = pub->info;
6801
6802 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
6803
6804 dhd_net_if_lock_local(dhd);
6805 if (!val) {
6806 /* Delete monitor */
6807 dhd_del_monitor_if(dhd);
6808 } else {
6809 /* Add monitor */
6810 dhd_add_monitor_if(dhd);
6811 }
6812 dhd->monitor_type = val;
6813 dhd_net_if_unlock_local(dhd);
6814}
6815#endif /* WL_MONITOR */
6816
6817#if defined(DHD_H2D_LOG_TIME_SYNC)
6818/*
6819 * Helper function:
6820 * Used for RTE console message time syncing with Host printk
6821 */
6822void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
6823{
6824 dhd_info_t *info = dhdp->info;
6825
6826 /* Ideally the "state" should be always TRUE */
6827 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
6828 DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
6829 dhd_deferred_work_rte_log_time_sync,
6830 DHD_WQ_WORK_PRIORITY_LOW);
6831}
6832
6833void
6834dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
6835{
6836 dhd_info_t *dhd_info = handle;
6837 dhd_pub_t *dhd;
6838
6839 if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
6840 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
6841 return;
6842 }
6843
6844 if (!dhd_info) {
6845 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6846 return;
6847 }
6848
6849 dhd = &dhd_info->pub;
6850
6851 /*
6852 * Function to send IOVAR for console timesyncing
6853 * between Host and Dongle.
6854 * If the IOVAR fails,
6855 * 1. dhd_rte_time_sync_ms is set to 0 and
6856 * 2. HOST Dongle console time sync will *not* happen.
6857 */
6858 dhd_h2d_log_time_sync(dhd);
6859}
6860#endif /* DHD_H2D_LOG_TIME_SYNC */
6861
6862int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
6863{
6864 int bcmerror = BCME_OK;
6865 int buflen = 0;
6866 struct net_device *net;
6867
6868 net = dhd_idx2net(pub, ifidx);
6869 if (!net) {
6870 bcmerror = BCME_BADARG;
6871 /*
6872 * The netdev pointer is bad means the DHD can't communicate
6873 * to higher layers, so just return from here
6874 */
6875 return bcmerror;
6876 }
6877
6878 /* check for local dhd ioctl and handle it */
6879 if (ioc->driver == DHD_IOCTL_MAGIC) {
6880 if (data_buf) {
6881 /* Return error if nvram size is too big */
6882 if (!bcmstricmp((char *)data_buf, "vars")) {
6883 DHD_ERROR(("%s: nvram len(%d) MAX_NVRAMBUF_SIZE(%d)\n",
6884 __FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE));
6885 if (ioc->len > MAX_NVRAMBUF_SIZE) {
6886 DHD_ERROR(("%s: nvram len(%d) > MAX_NVRAMBUF_SIZE(%d)\n",
6887 __FUNCTION__, ioc->len, MAX_NVRAMBUF_SIZE));
6888 bcmerror = BCME_BUFTOOLONG;
6889 goto done;
6890 }
6891 buflen = ioc->len;
6892 } else {
6893 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
6894 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
6895 }
6896 }
6897 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
6898 if (bcmerror)
6899 pub->bcmerror = bcmerror;
6900 goto done;
6901 }
6902
6903 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
6904 if (data_buf)
6905 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
6906
6907#ifndef BCMDBUS
6908 /* send to dongle (must be up, and wl). */
6909 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
6910 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
6911 int ret;
6912 if (atomic_read(&exit_in_progress)) {
6913 DHD_ERROR(("%s module exit in progress\n", __func__));
6914 bcmerror = BCME_DONGLE_DOWN;
6915 goto done;
6916 }
6917 ret = dhd_bus_start(pub);
6918 if (ret != 0) {
6919 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6920 bcmerror = BCME_DONGLE_DOWN;
6921 goto done;
6922 }
6923 } else {
6924 bcmerror = BCME_DONGLE_DOWN;
6925 goto done;
6926 }
6927 }
6928
6929 if (!pub->iswl) {
6930 bcmerror = BCME_DONGLE_DOWN;
6931 goto done;
6932 }
6933#endif /* !BCMDBUS */
6934
6935 /*
6936 * Flush the TX queue if required for proper message serialization:
6937 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
6938 * prevent M4 encryption and
6939 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
6940 * prevent disassoc frame being sent before WPS-DONE frame.
6941 */
6942 if (ioc->cmd == WLC_SET_KEY ||
6943 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6944 strncmp("wsec_key", data_buf, 9) == 0) ||
6945 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6946 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
6947 ioc->cmd == WLC_DISASSOC)
6948 dhd_wait_pend8021x(net);
6949
6950 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
6951 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
6952 bcmerror = BCME_UNSUPPORTED;
6953 goto done;
6954 }
6955
6956 /* XXX this typecast is BAD !!! */
6957 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
6958
6959done:
6960 dhd_check_hang(net, pub, bcmerror);
6961
6962 return bcmerror;
6963}
6964
6965/* XXX For the moment, local ioctls will return BCM errors */
6966/* XXX Others return linux codes, need to be changed... */
6967/**
6968 * Called by the OS (optionally via a wrapper function).
6969 * @param net Linux per dongle instance
6970 * @param ifr Linux request structure
6971 * @param cmd e.g. SIOCETHTOOL
6972 */
6973static int
6974dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
6975{
6976 dhd_info_t *dhd = DHD_DEV_INFO(net);
6977 dhd_ioctl_t ioc;
6978 int bcmerror = 0;
6979 int ifidx;
6980 int ret;
6981 void *local_buf = NULL; /**< buffer in kernel space */
6982 void __user *ioc_buf_user = NULL; /**< buffer in user space */
6983 u16 buflen = 0;
6984
6985 if (atomic_read(&exit_in_progress)) {
6986 DHD_ERROR(("%s module exit in progress\n", __func__));
6987 bcmerror = BCME_DONGLE_DOWN;
6988 return OSL_ERROR(bcmerror);
6989 }
6990
6991 DHD_OS_WAKE_LOCK(&dhd->pub);
6992
6993 /* Interface up check for built-in type */
6994 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
6995 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
6996 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6997 return OSL_ERROR(BCME_NOTUP);
6998 }
6999
7000 ifidx = dhd_net2idx(dhd, net);
7001 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
7002
7003#if defined(WL_STATIC_IF)
7004 /* skip for static ndev when it is down */
7005 if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
7006 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7007 return -1;
7008 }
7009#endif /* WL_STATIC_iF */
7010
7011 if (ifidx == DHD_BAD_IF) {
7012 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
7013 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7014 return -1;
7015 }
7016
7017#if defined(WL_WIRELESS_EXT)
7018 /* linux wireless extensions */
7019 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
7020 /* may recurse, do NOT lock */
7021 ret = wl_iw_ioctl(net, ifr, cmd);
7022 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7023 return ret;
7024 }
7025#endif /* defined(WL_WIRELESS_EXT) */
7026
7027 if (cmd == SIOCETHTOOL) {
7028 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
7029 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7030 return ret;
7031 }
7032
7033 if (cmd == SIOCDEVPRIVATE+1) {
7034 ret = wl_android_priv_cmd(net, ifr);
7035 dhd_check_hang(net, &dhd->pub, ret);
7036 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7037 return ret;
7038 }
7039
7040 if (cmd != SIOCDEVPRIVATE) {
7041 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7042 return -EOPNOTSUPP;
7043 }
7044
7045 memset(&ioc, 0, sizeof(ioc));
7046
7047#ifdef CONFIG_COMPAT
7048#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7049 if (in_compat_syscall())
7050#else
7051 if (is_compat_task())
7052#endif /* LINUX_VER >= 4.6 */
7053 {
7054 compat_wl_ioctl_t compat_ioc;
7055 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
7056 bcmerror = BCME_BADADDR;
7057 goto done;
7058 }
7059 ioc.cmd = compat_ioc.cmd;
7060 if (ioc.cmd & WLC_SPEC_FLAG) {
7061 memset(&ioc, 0, sizeof(ioc));
7062 /* Copy the ioc control structure part of ioctl request */
7063 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7064 bcmerror = BCME_BADADDR;
7065 goto done;
7066 }
7067 ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
7068
7069 /* To differentiate between wl and dhd read 4 more byes */
7070 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7071 sizeof(uint)) != 0)) {
7072 bcmerror = BCME_BADADDR;
7073 goto done;
7074 }
7075
7076 } else { /* ioc.cmd & WLC_SPEC_FLAG */
7077 ioc.buf = compat_ptr(compat_ioc.buf);
7078 ioc.len = compat_ioc.len;
7079 ioc.set = compat_ioc.set;
7080 ioc.used = compat_ioc.used;
7081 ioc.needed = compat_ioc.needed;
7082 /* To differentiate between wl and dhd read 4 more byes */
7083 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
7084 sizeof(uint)) != 0)) {
7085 bcmerror = BCME_BADADDR;
7086 goto done;
7087 }
7088 } /* ioc.cmd & WLC_SPEC_FLAG */
7089 } else
7090#endif /* CONFIG_COMPAT */
7091 {
7092 /* Copy the ioc control structure part of ioctl request */
7093 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7094 bcmerror = BCME_BADADDR;
7095 goto done;
7096 }
7097#ifdef CONFIG_COMPAT
7098 ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
7099#endif
7100
7101 /* To differentiate between wl and dhd read 4 more byes */
7102 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7103 sizeof(uint)) != 0)) {
7104 bcmerror = BCME_BADADDR;
7105 goto done;
7106 }
7107 }
7108
7109#ifndef CONFIG_VTS_SUPPORT
7110 if (!capable(CAP_NET_ADMIN)) {
7111 bcmerror = BCME_EPERM;
7112 goto done;
7113 }
7114#endif
7115
7116 /* Take backup of ioc.buf and restore later */
7117 ioc_buf_user = ioc.buf;
7118
7119 if (ioc.len > 0) {
7120 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
7121 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
7122 bcmerror = BCME_NOMEM;
7123 goto done;
7124 }
7125
7126 if (copy_from_user(local_buf, ioc.buf, buflen)) {
7127 bcmerror = BCME_BADADDR;
7128 goto done;
7129 }
7130
7131 *((char *)local_buf + buflen) = '\0';
7132
7133 /* For some platforms accessing userspace memory
7134 * of ioc.buf is causing kernel panic, so to avoid that
7135 * make ioc.buf pointing to kernel space memory local_buf
7136 */
7137 ioc.buf = local_buf;
7138 }
7139
7140 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7141 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
7142 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
7143 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
7144 bcmerror = BCME_DONGLE_DOWN;
7145 goto done;
7146 }
7147
7148 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
7149
7150 /* Restore back userspace pointer to ioc.buf */
7151 ioc.buf = ioc_buf_user;
7152
7153 if (!bcmerror && buflen && local_buf && ioc.buf) {
7154 if (copy_to_user(ioc.buf, local_buf, buflen))
7155 bcmerror = -EFAULT;
7156 }
7157
7158done:
7159 if (local_buf)
7160 MFREE(dhd->pub.osh, local_buf, buflen+1);
7161
7162 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7163
7164 return OSL_ERROR(bcmerror);
7165}
7166
7167#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
7168/* Flags to indicate if we distingish power off policy when
7169 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
7170 */
7171int trigger_deep_sleep = 0;
7172#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
7173
7174#ifdef FIX_CPU_MIN_CLOCK
7175static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
7176{
7177 if (dhd) {
7178 mutex_init(&dhd->cpufreq_fix);
7179 dhd->cpufreq_fix_status = FALSE;
7180 }
7181 return 0;
7182}
7183
7184static void dhd_fix_cpu_freq(dhd_info_t *dhd)
7185{
7186 mutex_lock(&dhd->cpufreq_fix);
7187 if (dhd && !dhd->cpufreq_fix_status) {
7188 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
7189#ifdef FIX_BUS_MIN_CLOCK
7190 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
7191#endif /* FIX_BUS_MIN_CLOCK */
7192 DHD_ERROR(("pm_qos_add_requests called\n"));
7193
7194 dhd->cpufreq_fix_status = TRUE;
7195 }
7196 mutex_unlock(&dhd->cpufreq_fix);
7197}
7198
7199static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
7200{
7201 mutex_lock(&dhd ->cpufreq_fix);
7202 if (dhd && dhd->cpufreq_fix_status != TRUE) {
7203 mutex_unlock(&dhd->cpufreq_fix);
7204 return;
7205 }
7206
7207 pm_qos_remove_request(&dhd->dhd_cpu_qos);
7208#ifdef FIX_BUS_MIN_CLOCK
7209 pm_qos_remove_request(&dhd->dhd_bus_qos);
7210#endif /* FIX_BUS_MIN_CLOCK */
7211 DHD_ERROR(("pm_qos_add_requests called\n"));
7212
7213 dhd->cpufreq_fix_status = FALSE;
7214 mutex_unlock(&dhd->cpufreq_fix);
7215}
7216#endif /* FIX_CPU_MIN_CLOCK */
7217
7218#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7219static int
7220dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
7221{
7222 int error;
7223 dhd_info_t *dhd = DHD_DEV_INFO(net);
7224
7225 if (atomic_read(&dhd->pub.block_bus))
7226 return -EHOSTDOWN;
7227
7228 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
7229 return BCME_ERROR;
7230
7231 error = dhd_ioctl_entry(net, ifr, cmd);
7232
7233 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7234 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7235
7236 return error;
7237}
7238#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7239
7240static int
7241dhd_stop(struct net_device *net)
7242{
7243 int ifidx = 0;
7244 bool skip_reset = false;
7245#ifdef WL_CFG80211
7246 unsigned long flags = 0;
7247#ifdef WL_STATIC_IF
7248 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
7249#endif /* WL_STATIC_IF */
7250#endif /* WL_CFG80211 */
7251 dhd_info_t *dhd = DHD_DEV_INFO(net);
7252 DHD_OS_WAKE_LOCK(&dhd->pub);
7253 WL_MSG(net->name, "Enter\n");
7254 dhd->pub.rxcnt_timeout = 0;
7255 dhd->pub.txcnt_timeout = 0;
7256
7257#ifdef BCMPCIE
7258 dhd->pub.d3ackcnt_timeout = 0;
7259#endif /* BCMPCIE */
7260
7261 mutex_lock(&dhd->pub.ndev_op_sync);
7262 if (dhd->pub.up == 0) {
7263 goto exit;
7264 }
7265#if defined(DHD_HANG_SEND_UP_TEST)
7266 if (dhd->pub.req_hang_type) {
7267 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
7268 __FUNCTION__, dhd->pub.req_hang_type));
7269 dhd->pub.req_hang_type = 0;
7270 }
7271#endif /* DHD_HANG_SEND_UP_TEST */
7272
7273#ifdef FIX_CPU_MIN_CLOCK
7274 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
7275 dhd_rollback_cpu_freq(dhd);
7276#endif /* FIX_CPU_MIN_CLOCK */
7277
7278 ifidx = dhd_net2idx(dhd, net);
7279 BCM_REFERENCE(ifidx);
7280
7281 DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx));
7282
7283#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7284 /* If static if is operational, don't reset the chip */
7285 if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
7286 WL_MSG(net->name, "static if operational. skip chip reset.\n");
7287 skip_reset = true;
7288 wl_cfg80211_sta_ifdown(net);
7289 goto exit;
7290 }
7291#endif /* WL_STATIC_IF && WL_CFG80211 */
7292 if (dhd->pub.skip_dhd_stop) {
7293 WL_MSG(net->name, "skip chip reset.\n");
7294 skip_reset = true;
7295#if defined(WL_CFG80211)
7296 wl_cfg80211_sta_ifdown(net);
7297#endif /* WL_CFG80211 */
7298 goto exit;
7299 }
7300
7301#ifdef WL_CFG80211
7302 if (ifidx == 0) {
7303 dhd_if_t *ifp;
7304 wl_cfg80211_down(net);
7305
7306 DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
7307#ifdef WL_CFG80211
7308 /* Disable Runtime PM before interface down */
7309 DHD_STOP_RPM_TIMER(&dhd->pub);
7310
7311 DHD_UP_LOCK(&dhd->pub.up_lock, flags);
7312 dhd->pub.up = 0;
7313 DHD_UP_UNLOCK(&dhd->pub.up_lock, flags);
7314#else
7315 dhd->pub.up = 0;
7316#endif /* WL_CFG80211 */
7317
7318 ifp = dhd->iflist[0];
7319 /*
7320 * For CFG80211: Clean up all the left over virtual interfaces
7321 * when the primary Interface is brought down. [ifconfig wlan0 down]
7322 */
7323 if (!dhd_download_fw_on_driverload) {
7324 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
7325 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
7326 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7327 int i;
7328#ifdef DHD_4WAYM4_FAIL_DISCONNECT
7329 dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
7330#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
7331#ifdef DHD_PKTDUMP_ROAM
7332 dhd_dump_pkt_clear(&dhd->pub);
7333#endif /* DHD_PKTDUMP_ROAM */
7334
7335 dhd_net_if_lock_local(dhd);
7336 for (i = 1; i < DHD_MAX_IFS; i++)
7337 dhd_remove_if(&dhd->pub, i, FALSE);
7338
7339 if (ifp && ifp->net) {
7340 dhd_if_del_sta_list(ifp);
7341 }
7342#ifdef ARP_OFFLOAD_SUPPORT
7343 if (dhd_inetaddr_notifier_registered) {
7344 dhd_inetaddr_notifier_registered = FALSE;
7345 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7346 }
7347#endif /* ARP_OFFLOAD_SUPPORT */
7348#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7349 if (dhd_inet6addr_notifier_registered) {
7350 dhd_inet6addr_notifier_registered = FALSE;
7351 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7352 }
7353#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7354 dhd_net_if_unlock_local(dhd);
7355 }
7356#if 0
7357 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
7358 cancel_work_sync(dhd->dhd_deferred_wq);
7359#endif
7360
7361#ifdef SHOW_LOGTRACE
7362 /* Wait till event logs work/kthread finishes */
7363 dhd_cancel_logtrace_process_sync(dhd);
7364#endif /* SHOW_LOGTRACE */
7365
7366#ifdef EWP_EDL
7367 cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
7368#endif
7369
7370#if defined(DHD_LB_RXP)
7371 __skb_queue_purge(&dhd->rx_pend_queue);
7372#endif /* DHD_LB_RXP */
7373
7374#if defined(DHD_LB_TXP)
7375 skb_queue_purge(&dhd->tx_pend_queue);
7376#endif /* DHD_LB_TXP */
7377 }
7378#ifdef DHDTCPACK_SUPPRESS
7379 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7380#endif /* DHDTCPACK_SUPPRESS */
7381#if defined(DHD_LB_RXP)
7382 if (ifp && ifp->net == dhd->rx_napi_netdev) {
7383 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
7384 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7385 skb_queue_purge(&dhd->rx_napi_queue);
7386 napi_disable(&dhd->rx_napi_struct);
7387 netif_napi_del(&dhd->rx_napi_struct);
7388 dhd->rx_napi_netdev = NULL;
7389 }
7390#endif /* DHD_LB_RXP */
7391 }
7392#endif /* WL_CFG80211 */
7393
7394 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
7395#ifdef DHD_SDTC_ETB_DUMP
7396 if (dhd->pub.sdtc_etb_inited) {
7397 dhd_sdtc_etb_deinit(&dhd->pub);
7398 }
7399#endif /* DHD_SDTC_ETB_DUMP */
7400
7401#ifdef PROP_TXSTATUS
7402 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
7403#endif
7404#ifdef SHOW_LOGTRACE
7405 if (!dhd_download_fw_on_driverload) {
7406 /* Release the skbs from queue for WLC_E_TRACE event */
7407 dhd_event_logtrace_flush_queue(&dhd->pub);
7408 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
7409 if (dhd->event_data.fmts) {
7410 MFREE(dhd->pub.osh, dhd->event_data.fmts,
7411 dhd->event_data.fmts_size);
7412 }
7413 if (dhd->event_data.raw_fmts) {
7414 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
7415 dhd->event_data.raw_fmts_size);
7416 }
7417 if (dhd->event_data.raw_sstr) {
7418 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
7419 dhd->event_data.raw_sstr_size);
7420 }
7421 if (dhd->event_data.rom_raw_sstr) {
7422 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
7423 dhd->event_data.rom_raw_sstr_size);
7424 }
7425 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
7426 }
7427 }
7428#endif /* SHOW_LOGTRACE */
7429#ifdef APF
7430 dhd_dev_apf_delete_filter(net);
7431#endif /* APF */
7432
7433 /* Stop the protocol module */
7434 dhd_prot_stop(&dhd->pub);
7435
7436 OLD_MOD_DEC_USE_COUNT;
7437exit:
7438 if (skip_reset == false) {
7439#if defined(WL_WIRELESS_EXT)
7440 if (ifidx == 0) {
7441 wl_iw_down(net, &dhd->pub);
7442 }
7443#endif /* defined(WL_WIRELESS_EXT) */
7444#ifdef WL_ESCAN
7445 if (ifidx == 0) {
7446 wl_escan_down(net, &dhd->pub);
7447 }
7448#endif /* WL_ESCAN */
7449 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
7450#if defined(WLAN_ACCEL_BOOT)
7451 wl_android_wifi_accel_off(net, dhd->wl_accel_force_reg_on);
7452#else
7453#if defined(BT_OVER_SDIO)
7454 dhd_bus_put(&dhd->pub, WLAN_MODULE);
7455 wl_android_set_wifi_on_flag(FALSE);
7456#else
7457 wl_android_wifi_off(net, TRUE);
7458#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
7459#ifdef WL_EXT_IAPSTA
7460 wl_ext_iapsta_dettach_netdev(net, ifidx);
7461#endif /* WL_EXT_IAPSTA */
7462#ifdef WL_ESCAN
7463 wl_escan_event_dettach(net, &dhd->pub);
7464#endif /* WL_ESCAN */
7465 wl_ext_event_dettach_netdev(net, ifidx);
7466#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
7467#endif /* BT_OVER_SDIO */
7468#endif /* WLAN_ACCEL_BOOT */
7469 }
7470#ifdef SUPPORT_DEEP_SLEEP
7471 else {
7472 /* CSP#505233: Flags to indicate if we distingish
7473 * power off policy when user set the memu
7474 * "Keep Wi-Fi on during sleep" to "Never"
7475 */
7476 if (trigger_deep_sleep) {
7477 dhd_deepsleep(net, 1);
7478 trigger_deep_sleep = 0;
7479 }
7480 }
7481#endif /* SUPPORT_DEEP_SLEEP */
7482 dhd->pub.hang_was_sent = 0;
7483 dhd->pub.hang_was_pending = 0;
7484
7485 /* Clear country spec for for built-in type driver */
7486 if (!dhd_download_fw_on_driverload) {
7487 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
7488 dhd->pub.dhd_cspec.rev = 0;
7489 dhd->pub.dhd_cspec.ccode[0] = 0x00;
7490 }
7491
7492#ifdef BCMDBGFS
7493 dhd_dbgfs_remove();
7494#endif
7495 }
7496
7497 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7498
7499 /* Destroy wakelock */
7500 if (!dhd_download_fw_on_driverload &&
7501 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
7502 (skip_reset == false)) {
7503 DHD_OS_WAKE_LOCK_DESTROY(dhd);
7504 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
7505 }
7506 WL_MSG(net->name, "Exit\n");
7507
7508 mutex_unlock(&dhd->pub.ndev_op_sync);
7509 return 0;
7510}
7511
7512#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
7513 defined(USE_INITIAL_SHORT_DWELL_TIME))
7514extern bool g_first_broadcast_scan;
7515#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
7516
7517#ifdef WL11U
7518static int dhd_interworking_enable(dhd_pub_t *dhd)
7519{
7520 uint32 enable = true;
7521 int ret = BCME_OK;
7522
7523 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
7524 if (ret < 0) {
7525 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
7526 }
7527
7528 return ret;
7529}
7530#endif /* WL11u */
7531
7532#if defined(WLAN_ACCEL_BOOT)
7533void
7534dhd_verify_firmware_name_change(dhd_info_t *dhd)
7535{
7536 DHD_ERROR(("%s: firmware_path:%s dhd->fw_path=%s\n", __FUNCTION__,
7537 firmware_path, dhd->fw_path));
7538
7539 if (firmware_path[0] == '\0') {
7540 strlcpy(firmware_path, dhd->fw_path, sizeof(firmware_path));
7541 }
7542
7543 /*
7544 * In phones, dhd->fw_path will be appended with chip versions (eg bcmdhd_sta.bin_b0),
7545 * so instead of direct compare with module param, search for sub string presence.
7546 */
7547 if (bcmstrnstr(dhd->fw_path, sizeof(dhd->fw_path), firmware_path, sizeof(firmware_path))
7548 == NULL) {
7549 DHD_ERROR(("%s: firmware path has changed, set force reg on", __FUNCTION__));
7550 dhd->wl_accel_force_reg_on = TRUE;
7551 }
7552}
7553#endif /* WLAN_ACCEL_BOOT */
7554
7555int
7556dhd_open(struct net_device *net)
7557{
7558 dhd_info_t *dhd = DHD_DEV_INFO(net);
7559#ifdef TOE
7560 uint32 toe_ol;
7561#endif
7562 int ifidx;
7563 int32 ret = 0;
7564#if defined(OOB_INTR_ONLY)
7565 uint32 bus_type = -1;
7566 uint32 bus_num = -1;
7567 uint32 slot_num = -1;
7568 wifi_adapter_info_t *adapter = NULL;
7569#endif
7570#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
7571 int bytes_written = 0;
7572#endif
80c32cdd 7573 int retry;
84813812
LJ
7574
7575#if defined(PREVENT_REOPEN_DURING_HANG)
7576 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
7577 if (dhd->pub.hang_was_sent == 1) {
7578 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
7579 /* Force to bring down WLAN interface in case dhd_stop() is not called
7580 * from the upper layer when HANG event is triggered.
7581 */
7582 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
7583 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
7584 dhd_stop(net);
7585 } else {
7586 return -1;
7587 }
7588 }
7589#endif /* PREVENT_REOPEN_DURING_HANG */
7590
7591 mutex_lock(&dhd->pub.ndev_op_sync);
7592
7593 if (dhd->pub.up == 1) {
7594 /* already up */
7595 WL_MSG(net->name, "Primary net_device is already up\n");
7596 mutex_unlock(&dhd->pub.ndev_op_sync);
7597 return BCME_OK;
7598 }
7599
7600 if (!dhd_download_fw_on_driverload) {
7601#if defined(WLAN_ACCEL_BOOT)
7602 if (dhd->wl_accel_boot_on_done == FALSE) {
7603 DHD_ERROR(("%s: WLAN accel boot not done yet\n", __FUNCTION__));
7604 mutex_unlock(&dhd->pub.ndev_op_sync);
7605 return -1;
7606 }
7607#endif /* WLAN_ACCEL_BOOT */
80c32cdd 7608 for (retry_init = 0; ++retry_init; ) {
7609 if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
7610 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
7611 if (retry_init > 3) {
7612 mutex_unlock(&dhd->pub.ndev_op_sync);
7613 return -1;
7614 } else {
7615 OSL_SLEEP(1000);
7616 }
7617 } else {
7618 break;
7619 }
84813812
LJ
7620 }
7621 }
7622
7623 WL_MSG(net->name, "Enter\n");
7624 DHD_MUTEX_LOCK();
7625 /* Init wakelock */
7626 if (!dhd_download_fw_on_driverload) {
7627 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
7628 DHD_OS_WAKE_LOCK_INIT(dhd);
7629 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7630 }
7631
7632#ifdef SHOW_LOGTRACE
7633 skb_queue_head_init(&dhd->evt_trace_queue);
7634
7635 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
7636 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
7637 if (ret == BCME_OK) {
7638 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7639 st_str_file_path, map_file_path);
7640 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7641 rom_st_str_file_path, rom_map_file_path);
7642 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
7643 }
7644 }
7645#endif /* SHOW_LOGTRACE */
7646 }
7647
7648 DHD_OS_WAKE_LOCK(&dhd->pub);
7649 dhd->pub.dongle_trap_occured = 0;
7650 dhd->pub.hang_was_sent = 0;
7651 dhd->pub.hang_was_pending = 0;
7652 dhd->pub.hang_reason = 0;
7653 dhd->pub.iovar_timeout_occured = 0;
7654#ifdef PCIE_FULL_DONGLE
7655 dhd->pub.d3ack_timeout_occured = 0;
7656 dhd->pub.livelock_occured = 0;
7657 dhd->pub.pktid_audit_failed = 0;
7658#endif /* PCIE_FULL_DONGLE */
7659 dhd->pub.iface_op_failed = 0;
7660 dhd->pub.scan_timeout_occurred = 0;
7661 dhd->pub.scan_busy_occurred = 0;
7662 dhd->pub.smmu_fault_occurred = 0;
7663#ifdef DHD_LOSSLESS_ROAMING
7664 dhd->pub.dequeue_prec_map = ALLPRIO;
7665#endif
7666#if 0
7667 /*
7668 * Force start if ifconfig_up gets called before START command
7669 * We keep WEXT's wl_control_wl_start to provide backward compatibility
7670 * This should be removed in the future
7671 */
7672 ret = wl_control_wl_start(net);
7673 if (ret != 0) {
7674 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7675 ret = -1;
7676 goto exit;
7677 }
7678
7679#endif
7680
7681 ifidx = dhd_net2idx(dhd, net);
7682 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
7683
7684 if (ifidx < 0) {
7685 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
7686 ret = -1;
7687 goto exit;
7688 }
7689
7690 if (!dhd->iflist[ifidx]) {
7691 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
7692 ret = -1;
7693 goto exit;
7694 }
7695
7696 DHD_ERROR(("%s: ######### called for ifidx=%d #########\n", __FUNCTION__, ifidx));
7697
7698#if defined(WLAN_ACCEL_BOOT)
7699 dhd_verify_firmware_name_change(dhd);
7700#endif /* WLAN_ACCEL_BOOT */
7701
7702 if (ifidx == 0) {
7703 atomic_set(&dhd->pend_8021x_cnt, 0);
7704 if (!dhd_download_fw_on_driverload) {
7705 DHD_ERROR(("\n%s\n", dhd_version));
7706 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
7707#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
7708 wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
7709#ifdef WL_ESCAN
7710 wl_escan_event_attach(net, &dhd->pub);
7711#endif /* WL_ESCAN */
7712#ifdef WL_EXT_IAPSTA
7713 wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
7714#endif /* WL_EXT_IAPSTA */
7715#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
7716#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
7717 g_first_broadcast_scan = TRUE;
7718#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
7719#ifdef SHOW_LOGTRACE
7720 /* dhd_cancel_logtrace_process_sync is called in dhd_stop
7721 * for built-in models. Need to start logtrace kthread before
7722 * calling wifi on, because once wifi is on, EDL will be in action
7723 * any moment, and if kthread is not active, FW event logs will
7724 * not be available
7725 */
7726 if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
7727 goto exit;
7728 }
7729#endif /* SHOW_LOGTRACE */
7730#if defined(WLAN_ACCEL_BOOT)
7731 ret = wl_android_wifi_accel_on(net, dhd->wl_accel_force_reg_on);
7732 /* Enable wl_accel_force_reg_on if ON fails, else disable it */
7733 if (ret) {
7734 dhd->wl_accel_force_reg_on = TRUE;
7735 } else {
7736 dhd->wl_accel_force_reg_on = FALSE;
7737 }
7738#else
7739#if defined(BT_OVER_SDIO)
7740 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
7741 wl_android_set_wifi_on_flag(TRUE);
7742#else
7743 ret = wl_android_wifi_on(net);
7744#endif /* BT_OVER_SDIO */
7745#endif /* WLAN_ACCEL_BOOT */
7746 if (ret != 0) {
7747 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
7748 __FUNCTION__, ret));
7749 ret = -1;
7750 goto exit;
7751 }
7752 }
7753#ifdef SUPPORT_DEEP_SLEEP
7754 else {
7755 /* Flags to indicate if we distingish
7756 * power off policy when user set the memu
7757 * "Keep Wi-Fi on during sleep" to "Never"
7758 */
7759 if (trigger_deep_sleep) {
7760#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
7761 g_first_broadcast_scan = TRUE;
7762#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
7763 dhd_deepsleep(net, 0);
7764 trigger_deep_sleep = 0;
7765 }
7766 }
7767#endif /* SUPPORT_DEEP_SLEEP */
7768#ifdef FIX_CPU_MIN_CLOCK
7769 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
7770 dhd_init_cpufreq_fix(dhd);
7771 dhd_fix_cpu_freq(dhd);
7772 }
7773#endif /* FIX_CPU_MIN_CLOCK */
7774#if defined(OOB_INTR_ONLY)
7775 if (dhd->pub.conf->dpc_cpucore >= 0) {
7776 dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
7777 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7778 if (adapter) {
7779 printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
7780 irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
7781 }
7782 }
7783#endif
7784
7785 if (dhd->pub.busstate != DHD_BUS_DATA) {
7786#ifdef BCMDBUS
7787 dhd_set_path(&dhd->pub);
7788 DHD_MUTEX_UNLOCK();
7789 wait_event_interruptible_timeout(dhd->adapter->status_event,
7790 wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
7791 msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
7792 DHD_MUTEX_LOCK();
7793 if ((ret = dbus_up(dhd->pub.bus)) != 0) {
7794 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
7795 goto exit;
7796 } else {
7797 dhd->pub.busstate = DHD_BUS_DATA;
7798 }
7799 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7800 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7801 goto exit;
7802 }
7803#else
7804 /* try to bring up bus */
7805
7806#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7807 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
7808 ret = dhd_bus_start(&dhd->pub);
7809 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7810 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7811 }
7812#else
7813 ret = dhd_bus_start(&dhd->pub);
7814#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7815
7816 if (ret) {
7817 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7818 ret = -1;
7819 goto exit;
7820 }
7821#endif /* !BCMDBUS */
7822
7823 }
7824#ifdef WL_EXT_IAPSTA
7825 wl_ext_iapsta_attach_name(net, ifidx);
7826#endif
7827
7828#ifdef BT_OVER_SDIO
7829 if (dhd->pub.is_bt_recovery_required) {
7830 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
7831 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
7832 }
7833 dhd->pub.is_bt_recovery_required = FALSE;
7834#endif
7835
7836 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
7837 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7838
7839#ifdef TOE
7840 /* Get current TOE mode from dongle */
7841 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
7842 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
7843 } else {
7844 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
7845 }
7846#endif /* TOE */
7847
7848#ifdef ENABLE_DHD_GRO
7849 dhd->iflist[ifidx]->net->features |= NETIF_F_GRO;
7850#endif /* ENABLE_DHD_GRO */
7851 netdev_update_features(net);
7852
7853#if defined(DHD_LB_RXP)
7854 __skb_queue_head_init(&dhd->rx_pend_queue);
7855 if (dhd->rx_napi_netdev == NULL) {
7856 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
7857 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
7858 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
7859 dhd_napi_poll, dhd_napi_weight);
7860 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
7861 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7862 napi_enable(&dhd->rx_napi_struct);
7863 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
7864 skb_queue_head_init(&dhd->rx_napi_queue);
7865 __skb_queue_head_init(&dhd->rx_process_queue);
7866 } /* rx_napi_netdev == NULL */
7867#endif /* DHD_LB_RXP */
7868
7869#if defined(DHD_LB_TXP)
7870 /* Use the variant that uses locks */
7871 skb_queue_head_init(&dhd->tx_pend_queue);
7872#endif /* DHD_LB_TXP */
7873
7874#ifdef DHD_PM_OVERRIDE
7875 g_pm_override = FALSE;
7876#endif /* DHD_PM_OVERRIDE */
7877#if defined(WL_CFG80211)
7878 if (unlikely(wl_cfg80211_up(net))) {
7879 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
7880 ret = -1;
7881 goto exit;
7882 }
7883 if (!dhd_download_fw_on_driverload) {
7884#ifdef ARP_OFFLOAD_SUPPORT
7885 dhd->pend_ipaddr = 0;
7886 if (!dhd_inetaddr_notifier_registered) {
7887 dhd_inetaddr_notifier_registered = TRUE;
7888 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7889 }
7890#endif /* ARP_OFFLOAD_SUPPORT */
7891#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7892 if (!dhd_inet6addr_notifier_registered) {
7893 dhd_inet6addr_notifier_registered = TRUE;
7894 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7895 }
7896#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7897 }
7898
7899#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
7900 dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
7901#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
7902#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
7903 dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
7904#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
7905#if defined(NUM_SCB_MAX_PROBE)
7906 dhd_set_scb_probe(&dhd->pub);
7907#endif /* NUM_SCB_MAX_PROBE */
7908#endif /* WL_CFG80211 */
7909#if defined(WL_WIRELESS_EXT)
7910 if (unlikely(wl_iw_up(net, &dhd->pub))) {
7911 DHD_ERROR(("%s: failed to bring up wext\n", __FUNCTION__));
7912 ret = -1;
7913 goto exit;
7914 }
7915#endif
7916#ifdef WL_ESCAN
7917 if (unlikely(wl_escan_up(net, &dhd->pub))) {
7918 DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
7919 ret = -1;
7920 goto exit;
7921 }
7922#endif /* WL_ESCAN */
7923#if defined(ISAM_PREINIT)
7924 if (!dhd_download_fw_on_driverload) {
7925 if (dhd->pub.conf) {
7926 wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written);
7927 wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written);
7928 wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written);
7929 }
7930 }
7931#endif
7932 }
7933
7934 dhd->pub.up = 1;
7935 DHD_START_RPM_TIMER(&dhd->pub);
7936
7937 if (wl_event_enable) {
7938 /* For wl utility to receive events */
7939 dhd->pub.wl_event_enabled = true;
7940 } else {
7941 dhd->pub.wl_event_enabled = false;
7942 }
7943
7944 if (logtrace_pkt_sendup) {
7945 /* For any deamon to recieve logtrace */
7946 dhd->pub.logtrace_pkt_sendup = true;
7947 } else {
7948 dhd->pub.logtrace_pkt_sendup = false;
7949 }
7950
7951 OLD_MOD_INC_USE_COUNT;
7952
7953#ifdef BCMDBGFS
7954 dhd_dbgfs_init(&dhd->pub);
7955#endif
7956
7957exit:
7958 mutex_unlock(&dhd->pub.ndev_op_sync);
7959 if (ret) {
7960 dhd_stop(net);
7961 }
7962
7963 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7964 DHD_MUTEX_UNLOCK();
7965
7966 WL_MSG(net->name, "Exit ret=%d\n", ret);
7967 return ret;
7968}
7969
7970/*
7971 * ndo_start handler for primary ndev
7972 */
7973static int
7974dhd_pri_open(struct net_device *net)
7975{
7976 s32 ret;
7977
7978 ret = dhd_open(net);
7979 if (unlikely(ret)) {
7980 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
7981 return ret;
7982 }
7983
7984 /* Allow transmit calls */
7985 dhd_tx_start_queues(net);
7986 WL_MSG(net->name, "tx queue started\n");
7987 return ret;
7988}
7989
7990/*
7991 * ndo_stop handler for primary ndev
7992 */
7993static int
7994dhd_pri_stop(struct net_device *net)
7995{
7996 s32 ret;
7997
7998 /* Set state and stop OS transmissions */
7999 dhd_tx_stop_queues(net);
8000 WL_MSG(net->name, "tx queue stopped\n");
8001
8002 ret = dhd_stop(net);
8003 if (unlikely(ret)) {
8004 DHD_ERROR(("dhd_stop failed: %d\n", ret));
8005 return ret;
8006 }
8007
8008 return ret;
8009}
8010
8011#ifdef PCIE_INB_DW
8012bool
8013dhd_check_cfg_in_progress(dhd_pub_t *dhdp)
8014{
8015#if defined(WL_CFG80211)
8016 return wl_cfg80211_check_in_progress(dhd_linux_get_primary_netdev(dhdp));
8017#endif /* WL_CFG80211 */
8018 return FALSE;
8019}
8020#endif
8021
8022#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
8023/*
8024 * For static I/Fs, the firmware interface init
8025 * is done from the IFF_UP context.
8026 */
8027static int
8028dhd_static_if_open(struct net_device *net)
8029{
8030 s32 ret = 0;
8031 struct bcm_cfg80211 *cfg;
8032 struct net_device *primary_netdev = NULL;
8033
8034 cfg = wl_get_cfg(net);
8035 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
8036
8037 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
8038 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
8039 ret = BCME_OK;
8040 goto done;
8041 }
8042
8043 WL_MSG(net->name, "Enter\n");
8044 /* Ensure fw is initialized. If it is already initialized,
8045 * dhd_open will return success.
8046 */
8047 ret = dhd_open(primary_netdev);
8048 if (unlikely(ret)) {
8049 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
8050 goto done;
8051 }
8052
8053 ret = wl_cfg80211_static_if_open(net);
8054 if (ret == BCME_OK) {
8055 /* Allow transmit calls */
8056 netif_start_queue(net);
8057 }
8058done:
8059 WL_MSG(net->name, "Exit ret=%d\n", ret);
8060 return ret;
8061}
8062
8063static int
8064dhd_static_if_stop(struct net_device *net)
8065{
8066 struct bcm_cfg80211 *cfg;
8067 struct net_device *primary_netdev = NULL;
8068 int ret = BCME_OK;
8069 dhd_info_t *dhd = DHD_DEV_INFO(net);
8070
8071 WL_MSG(net->name, "Enter\n");
8072
8073 cfg = wl_get_cfg(net);
8074 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
8075 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
8076 return BCME_OK;
8077 }
8078 if (dhd->pub.skip_dhd_stop) {
8079 WL_MSG(net->name, "Exit skip stop\n");
8080 return BCME_OK;
8081 }
8082
8083 /* Ensure queue is disabled */
8084 netif_tx_disable(net);
8085
8086 ret = wl_cfg80211_static_if_close(net);
8087
8088 if (dhd->pub.up == 0) {
8089 /* If fw is down, return */
8090 DHD_ERROR(("fw down\n"));
8091 return BCME_OK;
8092 }
8093 /* If STA iface is not in operational, invoke dhd_close from this
8094 * context.
8095 */
8096 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
8097 if (!(primary_netdev->flags & IFF_UP)) {
8098 ret = dhd_stop(primary_netdev);
8099 } else {
8100 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
8101 }
8102 WL_MSG(net->name, "Exit ret=%d\n", ret);
8103
8104 return ret;
8105}
8106#endif /* WL_STATIC_IF && WL_CF80211 */
8107
8108int dhd_do_driver_init(struct net_device *net)
8109{
8110 dhd_info_t *dhd = NULL;
8111
8112 if (!net) {
8113 DHD_ERROR(("Primary Interface not initialized \n"));
8114 return -EINVAL;
8115 }
8116
8117 DHD_MUTEX_IS_LOCK_RETURN();
8118
8119 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8120 dhd = DHD_DEV_INFO(net);
8121
8122 /* If driver is already initialized, do nothing
8123 */
8124 if (dhd->pub.busstate == DHD_BUS_DATA) {
8125 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8126 return 0;
8127 }
8128
8129 if (dhd_open(net) < 0) {
8130 DHD_ERROR(("Driver Init Failed \n"));
8131 return -1;
8132 }
8133
8134 return 0;
8135}
8136
8137int
8138dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8139{
8140
8141#ifdef WL_CFG80211
8142 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8143 ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
8144 return BCME_OK;
8145#endif
8146
8147 /* handle IF event caused by wl commands, SoftAP, WEXT and
8148 * anything else. This has to be done asynchronously otherwise
8149 * DPC will be blocked (and iovars will timeout as DPC has no chance
8150 * to read the response back)
8151 */
8152 if (ifevent->ifidx > 0) {
8153 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8154 if (if_event == NULL) {
8155 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8156 MALLOCED(dhdinfo->pub.osh)));
8157 return BCME_NOMEM;
8158 }
8159
8160 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8161 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8162 strlcpy(if_event->name, name, sizeof(if_event->name));
8163 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
8164 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8165 }
8166
8167 return BCME_OK;
8168}
8169
8170int
8171dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8172{
8173 dhd_if_event_t *if_event;
8174
8175#ifdef WL_CFG80211
8176 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8177 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8178 return BCME_OK;
8179#endif /* WL_CFG80211 */
8180
8181 /* handle IF event caused by wl commands, SoftAP, WEXT and
8182 * anything else
8183 */
8184 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8185 if (if_event == NULL) {
8186 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8187 MALLOCED(dhdinfo->pub.osh)));
8188 return BCME_NOMEM;
8189 }
8190 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8191 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8192 strlcpy(if_event->name, name, sizeof(if_event->name));
8193 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
8194 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8195
8196 return BCME_OK;
8197}
8198
8199int
8200dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8201{
8202#ifdef DHD_UPDATE_INTF_MAC
8203 dhd_if_event_t *if_event;
8204#endif /* DHD_UPDATE_INTF_MAC */
8205
8206#ifdef WL_CFG80211
8207 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8208 ifevent->ifidx, name, mac, ifevent->bssidx);
8209#endif /* WL_CFG80211 */
8210
8211#ifdef DHD_UPDATE_INTF_MAC
8212 /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
8213 * anything else
8214 */
8215 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8216 if (if_event == NULL) {
8217 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8218 MALLOCED(dhdinfo->pub.osh)));
8219 return BCME_NOMEM;
8220 }
8221 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8222 // construct a change event
8223 if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
8224 if_event->event.opcode = WLC_E_IF_CHANGE;
8225 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8226 strncpy(if_event->name, name, IFNAMSIZ);
8227 if_event->name[IFNAMSIZ - 1] = '\0';
8228 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
8229 dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8230#endif /* DHD_UPDATE_INTF_MAC */
8231
8232 return BCME_OK;
8233}
8234
8235#ifdef WL_NATOE
8236/* Handler to update natoe info and bind with new subscriptions if there is change in config */
8237static void
8238dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
8239{
8240 dhd_info_t *dhd = handle;
8241 wl_event_data_natoe_t *natoe = event_info;
8242 dhd_nfct_info_t *nfct = dhd->pub.nfct;
8243
8244 if (event != DHD_WQ_WORK_NATOE_EVENT) {
8245 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8246 return;
8247 }
8248
8249 if (!dhd) {
8250 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8251 return;
8252 }
8253 if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
8254 (natoe->start_port < natoe->end_port)) {
8255 /* Rebind subscriptions to start receiving notifications from groups */
8256 if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
8257 dhd_ct_close(nfct);
8258 }
8259 dhd_ct_send_dump_req(nfct);
8260 } else if (!natoe->natoe_active) {
8261 /* Rebind subscriptions to stop receiving notifications from groups */
8262 if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
8263 dhd_ct_close(nfct);
8264 }
8265 }
8266}
8267
8268/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
8269 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
8270 */
8271int
8272dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
8273{
8274 wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
8275
8276 if (dhd->nfct) {
8277 wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
8278 uint8 prev_enable = natoe->natoe_active;
8279
8280 spin_lock_bh(&dhd->nfct_lock);
8281 memcpy(natoe, event_data, sizeof(*event_data));
8282 spin_unlock_bh(&dhd->nfct_lock);
8283
8284 if (prev_enable != event_data->natoe_active) {
8285 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
8286 (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
8287 dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
8288 }
8289 return BCME_OK;
8290 }
8291 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
8292 return BCME_ERROR;
8293}
8294
8295/* Handler to send natoe ioctl to dongle */
8296static void
8297dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
8298{
8299 dhd_info_t *dhd = handle;
8300 dhd_ct_ioc_t *ct_ioc = event_info;
8301
8302 if (event != DHD_WQ_WORK_NATOE_IOCTL) {
8303 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8304 return;
8305 }
8306
8307 if (!dhd) {
8308 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8309 return;
8310 }
8311
8312 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
8313 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
8314 }
8315}
8316
8317/* When Netlink message contains port collision info, the info must be sent to dongle FW
8318 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
8319 */
8320void
8321dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
8322{
8323
8324 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
8325 DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
8326 DHD_WQ_WORK_PRIORITY_HIGH);
8327}
8328#endif /* WL_NATOE */
8329
8330/* This API maps ndev to ifp inclusive of static IFs */
8331static dhd_if_t *
8332dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
8333{
8334 dhd_if_t *ifp = NULL;
8335#ifdef WL_STATIC_IF
8336 u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
8337#else
8338 u32 ifidx = (DHD_MAX_IFS - 1);
8339#endif /* WL_STATIC_IF */
8340
8341 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
8342 do {
8343 ifp = dhdinfo->iflist[ifidx];
8344 if (ifp && (ifp->net == ndev)) {
8345 DHD_TRACE(("match found for %s. ifidx:%d\n",
8346 ndev->name, ifidx));
8347 return ifp;
8348 }
8349 } while (ifidx--);
8350
8351 DHD_ERROR(("no entry found for %s\n", ndev->name));
8352 return NULL;
8353}
8354
8355bool
8356dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
8357{
8358 dhd_if_t *ifp = NULL;
8359
8360 if (!dhdp || !ndev) {
8361 DHD_ERROR(("wrong input\n"));
8362 ASSERT(0);
8363 return false;
8364 }
8365
8366 ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
8367 return (ifp && (ifp->static_if == true));
8368}
8369
8370#ifdef WL_STATIC_IF
8371/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
8372 * are not known. For e.g: static i/f case. This function lets to update it once
8373 * it is known.
8374 */
8375s32
8376dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
8377 uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
8378{
8379 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
8380 dhd_if_t *ifp, *ifp_new;
8381 s32 cur_idx;
8382 dhd_dev_priv_t * dev_priv;
8383
8384 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
8385 if_state, ifidx));
8386
8387 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
8388
8389 if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
8390 return -ENODEV;
8391 }
8392 cur_idx = ifp->idx;
8393
8394 if (if_state == NDEV_STATE_OS_IF_CREATED) {
8395 /* mark static if */
8396 ifp->static_if = TRUE;
8397 return BCME_OK;
8398 }
8399
8400 ifp_new = dhdinfo->iflist[ifidx];
8401 if (ifp_new && (ifp_new != ifp)) {
8402 /* There should be only one entry for a given ifidx. */
8403 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
8404 ASSERT(0);
8405 dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
8406 net_os_send_hang_message(ifp->net);
8407 return -EINVAL;
8408 }
8409
8410 /* For static if delete case, cleanup the if before ifidx update */
8411 if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
8412 (if_state == NDEV_STATE_FW_IF_FAILED)) {
8413 dhd_cleanup_if(ifp->net);
8414 dev_priv = DHD_DEV_PRIV(ndev);
8415 dev_priv->ifidx = ifidx;
8416 }
8417
8418 /* update the iflist ifidx slot with cached info */
8419 dhdinfo->iflist[ifidx] = ifp;
8420 dhdinfo->iflist[cur_idx] = NULL;
8421
8422 /* update the values */
8423 ifp->idx = ifidx;
8424 ifp->bssidx = bssidx;
8425
8426 if (if_state == NDEV_STATE_FW_IF_CREATED) {
8427 dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
8428 /* initialize the dongle provided if name */
8429 if (dngl_name) {
8430 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
8431 } else if (ndev->name[0] != '\0') {
8432 strncpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
8433 }
8434 if (mac != NULL && ifp->set_macaddress == FALSE) {
8435 /* To and fro locations have same size - ETHER_ADDR_LEN */
8436 (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
8437 }
8438#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
8439 wl_ext_event_attach_netdev(ndev, ifidx, bssidx);
8440#ifdef WL_ESCAN
8441 wl_escan_event_attach(ndev, dhdp);
8442#endif /* WL_ESCAN */
8443#ifdef WL_EXT_IAPSTA
8444 wl_ext_iapsta_ifadding(ndev, ifidx);
8445 wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx);
8446 wl_ext_iapsta_attach_name(ndev, ifidx);
8447#endif /* WL_EXT_IAPSTA */
8448 } else if (if_state == NDEV_STATE_FW_IF_DELETED) {
8449#ifdef WL_EXT_IAPSTA
8450 wl_ext_iapsta_dettach_netdev(ndev, cur_idx);
8451#endif /* WL_EXT_IAPSTA */
8452#ifdef WL_ESCAN
8453 wl_escan_event_dettach(ndev, dhdp);
8454#endif /* WL_ESCAN */
8455 wl_ext_event_dettach_netdev(ndev, cur_idx);
8456#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
8457 }
8458 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
8459 ifidx, cur_idx, if_state));
8460 return BCME_OK;
8461}
8462#endif /* WL_STATIC_IF */
8463
8464/* unregister and free the existing net_device interface (if any) in iflist and
8465 * allocate a new one. the slot is reused. this function does NOT register the
8466 * new interface to linux kernel. dhd_register_if does the job
8467 */
8468struct net_device*
8469dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
8470 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
8471{
8472 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8473 dhd_if_t *ifp;
8474
8475 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
8476
8477 ifp = dhdinfo->iflist[ifidx];
8478
8479 if (ifp != NULL) {
8480 if (ifp->net != NULL) {
8481 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8482 __FUNCTION__, ifp->net->name, ifidx));
8483
8484 if (ifidx == 0) {
8485 /* For primary ifidx (0), there shouldn't be
8486 * any netdev present already.
8487 */
8488 DHD_ERROR(("Primary ifidx populated already\n"));
8489 ASSERT(0);
8490 return NULL;
8491 }
8492
8493 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
8494
8495 /* in unregister_netdev case, the interface gets freed by net->destructor
8496 * (which is set to free_netdev)
8497 */
8498#if defined(CONFIG_TIZEN)
8499 net_stat_tizen_unregister(ifp->net);
8500#endif /* CONFIG_TIZEN */
8501 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8502 free_netdev(ifp->net);
8503 } else {
8504 dhd_tx_stop_queues(ifp->net);
8505 if (need_rtnl_lock)
8506 unregister_netdev(ifp->net);
8507 else
8508 unregister_netdevice(ifp->net);
8509 }
8510 ifp->net = NULL;
8511 }
8512 } else {
8513 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
8514 if (ifp == NULL) {
8515 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
8516 return NULL;
8517 }
8518 }
8519
8520 memset(ifp, 0, sizeof(dhd_if_t));
8521 ifp->info = dhdinfo;
8522 ifp->idx = ifidx;
8523 ifp->bssidx = bssidx;
8524#ifdef DHD_MCAST_REGEN
8525 ifp->mcast_regen_bss_enable = FALSE;
8526#endif
8527 /* set to TRUE rx_pkt_chainable at alloc time */
8528 ifp->rx_pkt_chainable = TRUE;
8529
8530 if (mac != NULL)
8531 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8532
8533 /* Allocate etherdev, including space for private structure */
8534#ifdef DHD_MQ
8535 if (enable_mq) {
8536 ifp->net = alloc_etherdev_mq(DHD_DEV_PRIV_SIZE, MQ_MAX_QUEUES);
8537 } else {
8538 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
8539 }
8540#else
8541 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
8542#endif /* DHD_MQ */
8543
8544 if (ifp->net == NULL) {
8545 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
8546 goto fail;
8547 }
8548
8549 /* Setup the dhd interface's netdevice private structure. */
8550 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
8551
8552 if (name && name[0]) {
8553 strlcpy(ifp->net->name, name, IFNAMSIZ);
8554 }
8555
8556#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
8557 /* as priv_destructor calls free_netdev, no need to set need_free_netdev */
8558 ifp->net->needs_free_netdev = 0;
8559#ifdef WL_CFG80211
8560 if (ifidx == 0)
8561 ifp->net->priv_destructor = free_netdev;
8562 else
8563 ifp->net->priv_destructor = dhd_netdev_free;
8564#else
8565 ifp->net->priv_destructor = free_netdev;
8566#endif /* WL_CFG80211 */
8567#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */
8568#ifdef WL_CFG80211
8569 if (ifidx == 0)
8570 ifp->net->destructor = free_netdev;
8571 else
8572 ifp->net->destructor = dhd_netdev_free;
8573#else
8574 ifp->net->destructor = free_netdev;
8575#endif /* WL_CFG80211 */
8576#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9) */
8577 strlcpy(ifp->name, ifp->net->name, sizeof(ifp->name));
8578 dhdinfo->iflist[ifidx] = ifp;
8579
8580/* initialize the dongle provided if name */
8581 if (dngl_name) {
8582 strlcpy(ifp->dngl_name, dngl_name, sizeof(ifp->dngl_name));
8583 } else if (name) {
8584 strlcpy(ifp->dngl_name, name, sizeof(ifp->dngl_name));
8585 }
8586
8587#ifdef PCIE_FULL_DONGLE
8588 /* Initialize STA info list */
8589 INIT_LIST_HEAD(&ifp->sta_list);
8590 DHD_IF_STA_LIST_LOCK_INIT(&ifp->sta_list_lock);
8591#endif /* PCIE_FULL_DONGLE */
8592
8593#ifdef DHD_L2_FILTER
8594 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
8595 ifp->parp_allnode = TRUE;
8596#endif /* DHD_L2_FILTER */
8597
8598 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8599
8600#ifdef DHD_4WAYM4_FAIL_DISCONNECT
8601 INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
8602#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
8603
8604#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
8605 ifp->recv_reassoc_evt = FALSE;
8606 ifp->post_roam_evt = FALSE;
8607#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
8608
8609#ifdef DHDTCPSYNC_FLOOD_BLK
8610 INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
8611 dhd_reset_tcpsync_info_by_ifp(ifp);
8612#endif /* DHDTCPSYNC_FLOOD_BLK */
8613
8614 return ifp->net;
8615
8616fail:
8617 if (ifp != NULL) {
8618 if (ifp->net != NULL) {
8619#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8620 if (ifp->net == dhdinfo->rx_napi_netdev) {
8621 napi_disable(&dhdinfo->rx_napi_struct);
8622 netif_napi_del(&dhdinfo->rx_napi_struct);
8623 skb_queue_purge(&dhdinfo->rx_napi_queue);
8624 dhdinfo->rx_napi_netdev = NULL;
8625 }
8626#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8627 dhd_dev_priv_clear(ifp->net);
8628 free_netdev(ifp->net);
8629 ifp->net = NULL;
8630 }
8631 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8632 }
8633 dhdinfo->iflist[ifidx] = NULL;
8634 return NULL;
8635}
8636
8637static void
8638dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
8639{
8640#ifdef PCIE_FULL_DONGLE
8641 s32 ifidx = 0;
8642 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8643#endif /* PCIE_FULL_DONGLE */
8644
8645 if (ifp != NULL) {
8646 if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
8647 DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
8648 ASSERT(0);
8649 return;
8650 }
8651#ifdef DHD_L2_FILTER
8652 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
8653 NULL, FALSE, dhdpub->tickcnt);
8654 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
8655 ifp->phnd_arp_table = NULL;
8656#endif /* DHD_L2_FILTER */
8657
8658 dhd_if_del_sta_list(ifp);
8659#ifdef PCIE_FULL_DONGLE
8660 /* Delete flowrings of virtual interface */
8661 ifidx = ifp->idx;
8662 if ((ifidx != 0) &&
8663 ((if_flow_lkup != NULL) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP))) {
8664 dhd_flow_rings_delete(dhdp, ifidx);
8665 }
8666#endif /* PCIE_FULL_DONGLE */
8667 }
8668}
8669
8670void
8671dhd_cleanup_if(struct net_device *net)
8672{
8673 dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
8674 dhd_pub_t *dhdp = &dhdinfo->pub;
8675 dhd_if_t *ifp;
8676
8677 ifp = dhd_get_ifp_by_ndev(dhdp, net);
8678 if (ifp) {
8679 if (ifp->idx >= DHD_MAX_IFS) {
8680 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp->idx));
8681 ASSERT(0);
8682 return;
8683 }
8684 dhd_cleanup_ifp(dhdp, ifp);
8685 }
8686}
8687
8688/* unregister and free the the net_device interface associated with the indexed
8689 * slot, also free the slot memory and set the slot pointer to NULL
8690 */
8691#define DHD_TX_COMPLETION_TIMEOUT 5000
8692int
8693dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
8694{
8695 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8696 dhd_if_t *ifp;
8697 unsigned long flags;
8698 long timeout;
8699
8700 ifp = dhdinfo->iflist[ifidx];
8701
8702 if (ifp != NULL) {
8703#ifdef DHD_4WAYM4_FAIL_DISCONNECT
8704 cancel_delayed_work_sync(&ifp->m4state_work);
8705#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
8706
8707#ifdef DHDTCPSYNC_FLOOD_BLK
8708 cancel_work_sync(&ifp->blk_tsfl_work);
8709#endif /* DHDTCPSYNC_FLOOD_BLK */
8710
8711 dhd_cleanup_ifp(dhdpub, ifp);
8712#ifdef WL_STATIC_IF
8713 if (ifp->static_if) {
8714 /* static IF will be handled in detach */
8715 DHD_TRACE(("Skip del iface for static interface\n"));
8716 return BCME_OK;
8717 }
8718#endif /* WL_STATIC_IF */
8719 if (ifp->net != NULL) {
8720 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
8721
8722 DHD_GENERAL_LOCK(dhdpub, flags);
8723 ifp->del_in_progress = true;
8724 DHD_GENERAL_UNLOCK(dhdpub, flags);
8725
8726 /* If TX is in progress, hold the if del */
8727 if (DHD_IF_IS_TX_ACTIVE(ifp)) {
8728 DHD_INFO(("TX in progress. Wait for it to be complete."));
8729 timeout = wait_event_timeout(dhdpub->tx_completion_wait,
8730 ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
8731 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
8732 if (!timeout) {
8733 /* Tx completion timeout. Attempt proceeding ahead */
8734 DHD_ERROR(("Tx completion timed out!\n"));
8735 ASSERT(0);
8736 }
8737 } else {
8738 DHD_TRACE(("No outstanding TX!\n"));
8739 }
8740 dhdinfo->iflist[ifidx] = NULL;
8741 /* in unregister_netdev case, the interface gets freed by net->destructor
8742 * (which is set to free_netdev)
8743 */
8744 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8745 free_netdev(ifp->net);
8746 } else {
8747 netif_tx_disable(ifp->net);
8748
8749#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
8750 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
8751#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
8752 if (need_rtnl_lock)
8753 unregister_netdev(ifp->net);
8754 else
8755 unregister_netdevice(ifp->net);
8756#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
8757#ifdef WL_EXT_IAPSTA
8758 wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
8759#endif /* WL_EXT_IAPSTA */
8760#ifdef WL_ESCAN
8761 wl_escan_event_dettach(ifp->net, dhdpub);
8762#endif /* WL_ESCAN */
8763 wl_ext_event_dettach_netdev(ifp->net, ifidx);
8764#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
8765 }
8766 ifp->net = NULL;
8767 DHD_GENERAL_LOCK(dhdpub, flags);
8768 ifp->del_in_progress = false;
8769 DHD_GENERAL_UNLOCK(dhdpub, flags);
8770 }
8771 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8772
8773 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8774 ifp = NULL;
8775 }
8776
8777 return BCME_OK;
8778}
8779
8780static struct net_device_ops dhd_ops_pri = {
8781 .ndo_open = dhd_pri_open,
8782 .ndo_stop = dhd_pri_stop,
8783 .ndo_get_stats = dhd_get_stats,
8784#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8785 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8786 .ndo_start_xmit = dhd_start_xmit_wrapper,
8787#else
8788 .ndo_do_ioctl = dhd_ioctl_entry,
8789 .ndo_start_xmit = dhd_start_xmit,
8790#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8791 .ndo_set_mac_address = dhd_set_mac_address,
8792#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8793 .ndo_set_rx_mode = dhd_set_multicast_list,
8794#else
8795 .ndo_set_multicast_list = dhd_set_multicast_list,
8796#endif
8797#ifdef DHD_MQ
8798 .ndo_select_queue = dhd_select_queue
8799#endif
8800};
8801
8802static struct net_device_ops dhd_ops_virt = {
8803#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8804 .ndo_open = dhd_static_if_open,
8805 .ndo_stop = dhd_static_if_stop,
8806#endif
8807 .ndo_get_stats = dhd_get_stats,
8808#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8809 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8810 .ndo_start_xmit = dhd_start_xmit_wrapper,
8811#else
8812 .ndo_do_ioctl = dhd_ioctl_entry,
8813 .ndo_start_xmit = dhd_start_xmit,
8814#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8815 .ndo_set_mac_address = dhd_set_mac_address,
8816#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8817 .ndo_set_rx_mode = dhd_set_multicast_list,
8818#else
8819 .ndo_set_multicast_list = dhd_set_multicast_list,
8820#endif
8821};
8822
8823int
8824dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
8825 unsigned long buflen)
8826{
8827 loff_t wr_posn = *posn;
8828
8829 if (!fp || !buf || buflen == 0)
8830 return -1;
8831
8832 if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
8833 return -1;
8834
8835 *posn = wr_posn;
8836 return 0;
8837}
8838
8839#ifdef SHOW_LOGTRACE
8840int
8841dhd_os_read_file(void *file, char *buf, uint32 size)
8842{
8843 struct file *filep = (struct file *)file;
8844
8845 if (!file || !buf)
8846 return -1;
8847
8848 return vfs_read(filep, buf, size, &filep->f_pos);
8849}
8850
8851int
8852dhd_os_seek_file(void *file, int64 offset)
8853{
8854 struct file *filep = (struct file *)file;
8855 if (!file)
8856 return -1;
8857
8858 /* offset can be -ve */
8859 filep->f_pos = filep->f_pos + offset;
8860
8861 return 0;
8862}
8863
8864static int
8865dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
8866{
8867 struct file *filep = NULL;
8868 struct kstat stat;
8869 mm_segment_t fs;
8870 char *raw_fmts = NULL;
8871 int logstrs_size = 0;
8872 int error = 0;
8873
8874 if (control_logtrace != LOGTRACE_PARSED_FMT) {
8875 DHD_ERROR_NO_HW4(("%s : turned off logstr parsing\n", __FUNCTION__));
8876 return BCME_ERROR;
8877 }
8878
8879 fs = get_fs();
8880 set_fs(KERNEL_DS);
8881
8882 filep = filp_open(logstrs_path, O_RDONLY, 0);
8883
8884 if (IS_ERR(filep)) {
8885 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
8886 goto fail;
8887 }
8888 error = vfs_stat(logstrs_path, &stat);
8889 if (error) {
8890 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
8891 goto fail;
8892 }
8893 logstrs_size = (int) stat.size;
8894
8895 if (logstrs_size == 0) {
8896 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
8897 goto fail1;
8898 }
8899
8900 if (temp->raw_fmts != NULL) {
8901 raw_fmts = temp->raw_fmts; /* reuse already malloced raw_fmts */
8902 } else {
8903 raw_fmts = MALLOC(osh, logstrs_size);
8904 if (raw_fmts == NULL) {
8905 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
8906 goto fail;
8907 }
8908 }
8909
8910 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
8911 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
8912 goto fail;
8913 }
8914
8915 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
8916 == BCME_OK) {
8917 filp_close(filep, NULL);
8918 set_fs(fs);
8919 return BCME_OK;
8920 }
8921
8922fail:
8923 if (raw_fmts) {
8924 MFREE(osh, raw_fmts, logstrs_size);
8925 }
8926 if (temp->fmts != NULL) {
8927 MFREE(osh, temp->fmts, temp->num_fmts * sizeof(char *));
8928 }
8929
8930fail1:
8931 if (!IS_ERR(filep))
8932 filp_close(filep, NULL);
8933
8934 set_fs(fs);
8935 temp->fmts = NULL;
8936 temp->raw_fmts = NULL;
8937
8938 return BCME_ERROR;
8939}
8940
8941static int
8942dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
8943 uint32 *rodata_end)
8944{
8945 struct file *filep = NULL;
8946 mm_segment_t fs;
8947 int err = BCME_ERROR;
8948
8949 if (fname == NULL) {
8950 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
8951 return BCME_ERROR;
8952 }
8953
8954 fs = get_fs();
8955 set_fs(KERNEL_DS);
8956
8957 filep = filp_open(fname, O_RDONLY, 0);
8958 if (IS_ERR(filep)) {
8959 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
8960 goto fail;
8961 }
8962
8963 if ((err = dhd_parse_map_file(osh, filep, ramstart,
8964 rodata_start, rodata_end)) < 0)
8965 goto fail;
8966
8967fail:
8968 if (!IS_ERR(filep))
8969 filp_close(filep, NULL);
8970
8971 set_fs(fs);
8972
8973 return err;
8974}
8975
8976static int
8977dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
8978{
8979 struct file *filep = NULL;
8980 mm_segment_t fs;
8981 char *raw_fmts = NULL;
8982 uint32 logstrs_size = 0;
8983 int error = 0;
8984 uint32 ramstart = 0;
8985 uint32 rodata_start = 0;
8986 uint32 rodata_end = 0;
8987 uint32 logfilebase = 0;
8988
8989 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
8990 if (error != BCME_OK) {
8991 DHD_ERROR(("readmap Error!! \n"));
8992 /* don't do event log parsing in actual case */
8993 if (strstr(str_file, ram_file_str) != NULL) {
8994 temp->raw_sstr = NULL;
8995 } else if (strstr(str_file, rom_file_str) != NULL) {
8996 temp->rom_raw_sstr = NULL;
8997 }
8998 return error;
8999 }
9000 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9001 ramstart, rodata_start, rodata_end));
9002
9003 fs = get_fs();
9004 set_fs(KERNEL_DS);
9005
9006 filep = filp_open(str_file, O_RDONLY, 0);
9007 if (IS_ERR(filep)) {
9008 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
9009 goto fail;
9010 }
9011
9012 if (TRUE) {
9013 /* Full file size is huge. Just read required part */
9014 logstrs_size = rodata_end - rodata_start;
9015 logfilebase = rodata_start - ramstart;
9016 }
9017
9018 if (logstrs_size == 0) {
9019 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9020 goto fail1;
9021 }
9022
9023 if (strstr(str_file, ram_file_str) != NULL && temp->raw_sstr != NULL) {
9024 raw_fmts = temp->raw_sstr; /* reuse already malloced raw_fmts */
9025 } else if (strstr(str_file, rom_file_str) != NULL && temp->rom_raw_sstr != NULL) {
9026 raw_fmts = temp->rom_raw_sstr; /* reuse already malloced raw_fmts */
9027 } else {
9028 raw_fmts = MALLOC(osh, logstrs_size);
9029
9030 if (raw_fmts == NULL) {
9031 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9032 goto fail;
9033 }
9034 }
9035
9036 if (TRUE) {
9037 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
9038 if (error < 0) {
9039 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
9040 goto fail;
9041 }
9042 }
9043
9044 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
9045 if (error != logstrs_size) {
9046 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
9047 goto fail;
9048 }
9049
9050 if (strstr(str_file, ram_file_str) != NULL) {
9051 temp->raw_sstr = raw_fmts;
9052 temp->raw_sstr_size = logstrs_size;
9053 temp->rodata_start = rodata_start;
9054 temp->rodata_end = rodata_end;
9055 } else if (strstr(str_file, rom_file_str) != NULL) {
9056 temp->rom_raw_sstr = raw_fmts;
9057 temp->rom_raw_sstr_size = logstrs_size;
9058 temp->rom_rodata_start = rodata_start;
9059 temp->rom_rodata_end = rodata_end;
9060 }
9061
9062 filp_close(filep, NULL);
9063 set_fs(fs);
9064
9065 return BCME_OK;
9066
9067fail:
9068 if (raw_fmts) {
9069 MFREE(osh, raw_fmts, logstrs_size);
9070 }
9071
9072fail1:
9073 if (!IS_ERR(filep))
9074 filp_close(filep, NULL);
9075
9076 set_fs(fs);
9077
9078 if (strstr(str_file, ram_file_str) != NULL) {
9079 temp->raw_sstr = NULL;
9080 } else if (strstr(str_file, rom_file_str) != NULL) {
9081 temp->rom_raw_sstr = NULL;
9082 }
9083
9084 return error;
9085} /* dhd_init_static_strs_array */
9086
9087#endif /* SHOW_LOGTRACE */
9088
9089#ifdef DHD_ERPOM
9090uint enable_erpom = 0;
9091module_param(enable_erpom, int, 0);
9092
9093int
9094dhd_wlan_power_off_handler(void *handler, unsigned char reason)
9095{
9096 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
9097 bool dongle_isolation = dhdp->dongle_isolation;
9098
9099 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
9100
9101 if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
9102#if defined(DHD_FW_COREDUMP)
9103 /* save core dump to a file */
9104 if (dhdp->memdump_enabled) {
9105#ifdef DHD_SSSR_DUMP
9106 DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
9107 dhdp->collect_sssr = TRUE;
9108#endif /* DHD_SSSR_DUMP */
9109 dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
9110 dhd_bus_mem_dump(dhdp);
9111 }
9112#endif /* DHD_FW_COREDUMP */
9113 }
9114
9115 /* pause data on all the interfaces */
9116 dhd_bus_stop_queue(dhdp->bus);
9117
9118 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9119 dhdp->dongle_isolation = TRUE;
9120 dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
9121 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
9122 return 0;
9123}
9124
9125int
9126dhd_wlan_power_on_handler(void *handler, unsigned char reason)
9127{
9128 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
9129 bool dongle_isolation = dhdp->dongle_isolation;
9130
9131 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
9132 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9133 dhdp->dongle_isolation = TRUE;
9134 dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
9135 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
9136 /* resume data on all the interfaces */
9137 dhd_bus_start_queue(dhdp->bus);
9138 return 0;
9139
9140}
9141
9142#endif /* DHD_ERPOM */
9143
9144#ifdef BCMDBUS
9145uint
9146dhd_get_rxsz(dhd_pub_t *pub)
9147{
9148 struct net_device *net = NULL;
9149 dhd_info_t *dhd = NULL;
9150 uint rxsz;
9151
9152 /* Assign rxsz for dbus_attach */
9153 dhd = pub->info;
9154 net = dhd->iflist[0]->net;
9155 net->hard_header_len = ETH_HLEN + pub->hdrlen;
9156 rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9157
9158 return rxsz;
9159}
9160
9161void
9162dhd_set_path(dhd_pub_t *pub)
9163{
9164 dhd_info_t *dhd = NULL;
9165
9166 dhd = pub->info;
9167
9168 /* try to download image and nvram to the dongle */
9169 if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
9170 DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
9171 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
9172 dhd_bus_update_fw_nv_path(dhd->pub.bus,
9173 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
9174 }
9175}
9176#endif
9177
9178/** Called once for each hardware (dongle) instance that this DHD manages */
9179dhd_pub_t *
9180dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
9181#ifdef BCMDBUS
9182 , void *data
9183#endif
9184)
9185{
9186 dhd_info_t *dhd = NULL;
9187 struct net_device *net = NULL;
9188 char if_name[IFNAMSIZ] = {'\0'};
9189#ifdef SHOW_LOGTRACE
9190 int ret;
9191#endif /* SHOW_LOGTRACE */
9192#ifdef DHD_ERPOM
9193 pom_func_handler_t *pom_handler;
9194#endif /* DHD_ERPOM */
9195#if defined(BCMSDIO) || defined(BCMPCIE)
9196 uint32 bus_type = -1;
9197 uint32 bus_num = -1;
9198 uint32 slot_num = -1;
9199 wifi_adapter_info_t *adapter = NULL;
9200#elif defined(BCMDBUS)
9201 wifi_adapter_info_t *adapter = data;
9202#endif
9203#ifdef GET_CUSTOM_MAC_ENABLE
9204 char hw_ether[62];
9205#endif /* GET_CUSTOM_MAC_ENABLE */
9206
9207 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
9208 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9209
9210#ifdef PCIE_FULL_DONGLE
9211 ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
9212 ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
9213#endif /* PCIE_FULL_DONGLE */
9214
9215 /* will implement get_ids for DBUS later */
9216#if defined(BCMSDIO)
9217 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
9218#endif
9219#if defined(BCMSDIO) || defined(BCMPCIE)
9220 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
9221#endif
9222
9223 /* Allocate primary dhd_info */
9224 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
9225 if (dhd == NULL) {
9226 dhd = MALLOC(osh, sizeof(dhd_info_t));
9227 if (dhd == NULL) {
9228 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
9229 goto dhd_null_flag;
9230 }
9231 }
9232 memset(dhd, 0, sizeof(dhd_info_t));
9233 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
9234
9235 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
9236
9237 dhd->pub.osh = osh;
9238#ifdef DUMP_IOCTL_IOV_LIST
9239 dll_init(&(dhd->pub.dump_iovlist_head));
9240#endif /* DUMP_IOCTL_IOV_LIST */
9241
9242 dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */
9243
9244 dhd->adapter = adapter;
9245 dhd->pub.adapter = (void *)adapter;
9246#ifdef BT_OVER_SDIO
9247 dhd->pub.is_bt_recovery_required = FALSE;
9248 mutex_init(&dhd->bus_user_lock);
9249#endif /* BT_OVER_SDIO */
9250
9251 g_dhd_pub = &dhd->pub;
9252
9253#ifdef DHD_DEBUG
9254 dll_init(&(dhd->pub.mw_list_head));
9255#endif /* DHD_DEBUG */
9256
9257#ifdef GET_CUSTOM_MAC_ENABLE
9258 wifi_platform_get_mac_addr(dhd->adapter, hw_ether, iface_name);
9259 bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
9260#endif /* GET_CUSTOM_MAC_ENABLE */
9261#ifdef CUSTOM_FORCE_NODFS_FLAG
9262 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
9263 dhd->pub.force_country_change = TRUE;
9264#endif /* CUSTOM_FORCE_NODFS_FLAG */
9265#ifdef CUSTOM_COUNTRY_CODE
9266 get_customized_country_code(dhd->adapter,
9267 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
9268 dhd->pub.dhd_cflags);
9269#endif /* CUSTOM_COUNTRY_CODE */
9270#ifndef BCMDBUS
9271 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
9272 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
9273#ifdef DHD_WET
9274 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
9275#endif /* DHD_WET */
9276 /* Initialize thread based operation and lock */
9277 sema_init(&dhd->sdsem, 1);
9278#endif /* !BCMDBUS */
9279 dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
9280
9281 /* Link to info module */
9282 dhd->pub.info = dhd;
9283
9284 /* Link to bus module */
9285 dhd->pub.bus = bus;
9286 dhd->pub.hdrlen = bus_hdrlen;
9287 dhd->pub.txoff = FALSE;
9288
9289 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
9290 * because dhd_detech will check .info is NULL or not.
9291 */
9292 if (dhd_conf_attach(&dhd->pub) != 0) {
9293 DHD_ERROR(("dhd_conf_attach failed\n"));
9294 goto fail;
9295 }
9296#ifndef BCMDBUS
9297 dhd_conf_reset(&dhd->pub);
9298 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
9299 dhd_conf_preinit(&dhd->pub);
9300#endif /* !BCMDBUS */
9301
9302 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9303 * This is indeed a hack but we have to make it work properly before we have a better
9304 * solution
9305 */
9306 dhd_update_fw_nv_path(dhd);
9307
9308 /* Set network interface name if it was provided as module parameter */
9309 if (iface_name[0]) {
9310 int len;
9311 char ch;
9312 strlcpy(if_name, iface_name, sizeof(if_name));
9313 len = strlen(if_name);
9314 ch = if_name[len - 1];
9315 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) {
9316 strcat(if_name, "%d");
9317 }
9318 }
9319
9320 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9321 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
9322 if (net == NULL) {
9323 goto fail;
9324 }
9325 mutex_init(&dhd->pub.ndev_op_sync);
9326
9327 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
9328#ifdef DHD_L2_FILTER
9329 /* initialize the l2_filter_cnt */
9330 dhd->pub.l2_filter_cnt = 0;
9331#endif
9332 net->netdev_ops = NULL;
9333
9334 mutex_init(&dhd->dhd_iovar_mutex);
9335 sema_init(&dhd->proto_sem, 1);
9336
9337#if defined(DHD_HANG_SEND_UP_TEST)
9338 dhd->pub.req_hang_type = 0;
9339#endif /* DHD_HANG_SEND_UP_TEST */
9340
9341#ifdef PROP_TXSTATUS
9342 spin_lock_init(&dhd->wlfc_spinlock);
9343
9344 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
9345 dhd->pub.plat_init = dhd_wlfc_plat_init;
9346 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
9347
9348#ifdef DHD_WLFC_THREAD
9349 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
9350 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
9351 if (IS_ERR(dhd->pub.wlfc_thread)) {
9352 DHD_ERROR(("create wlfc thread failed\n"));
9353 goto fail;
9354 } else {
9355 wake_up_process(dhd->pub.wlfc_thread);
9356 }
9357#endif /* DHD_WLFC_THREAD */
9358#endif /* PROP_TXSTATUS */
9359
9360 /* Initialize other structure content */
9361 /* XXX Some of this goes away, leftover from USB */
9362 /* XXX Some could also move to bus_init()? */
9363 init_waitqueue_head(&dhd->ioctl_resp_wait);
9364 init_waitqueue_head(&dhd->pub.tx_tput_test_wait);
9365 init_waitqueue_head(&dhd->d3ack_wait);
9366#ifdef PCIE_INB_DW
9367 init_waitqueue_head(&dhd->ds_exit_wait);
9368#endif /* PCIE_INB_DW */
9369 init_waitqueue_head(&dhd->ctrl_wait);
9370 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
9371 init_waitqueue_head(&dhd->dmaxfer_wait);
9372 init_waitqueue_head(&dhd->pub.tx_completion_wait);
9373 dhd->pub.dhd_bus_busy_state = 0;
9374 /* Initialize the spinlocks */
9375 spin_lock_init(&dhd->sdlock);
9376 spin_lock_init(&dhd->txqlock);
9377 spin_lock_init(&dhd->dhd_lock);
9378 spin_lock_init(&dhd->rxf_lock);
9379#ifdef WLTDLS
9380 spin_lock_init(&dhd->pub.tdls_lock);
9381#endif /* WLTDLS */
9382#if defined(RXFRAME_THREAD)
9383 dhd->rxthread_enabled = TRUE;
9384#endif /* defined(RXFRAME_THREAD) */
9385
9386#ifdef DHDTCPACK_SUPPRESS
9387 spin_lock_init(&dhd->tcpack_lock);
9388#endif /* DHDTCPACK_SUPPRESS */
9389
9390 /* Initialize Wakelock stuff */
9391 spin_lock_init(&dhd->wakelock_spinlock);
9392 spin_lock_init(&dhd->wakelock_evt_spinlock);
9393 DHD_OS_WAKE_LOCK_INIT(dhd);
9394 dhd->wakelock_counter = 0;
9395 /* wakelocks prevent a system from going into a low power state */
9396#ifdef CONFIG_HAS_WAKELOCK
9397 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
9398 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
9399 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
9400#endif /* CONFIG_HAS_WAKELOCK */
9401
9402 mutex_init(&dhd->dhd_net_if_mutex);
9403 mutex_init(&dhd->dhd_suspend_mutex);
9404#if defined(PKT_FILTER_SUPPORT) && defined(APF)
9405 mutex_init(&dhd->dhd_apf_mutex);
9406#endif /* PKT_FILTER_SUPPORT && APF */
9407 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
9408
9409 /* Attach and link in the protocol */
9410 if (dhd_prot_attach(&dhd->pub) != 0) {
9411 DHD_ERROR(("dhd_prot_attach failed\n"));
9412 goto fail;
9413 }
9414 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
9415
9416#ifdef WL_CFG80211
9417 spin_lock_init(&dhd->pub.up_lock);
9418 /* Attach and link in the cfg80211 */
9419 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
9420 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9421 goto fail;
9422 }
9423
9424 dhd_monitor_init(&dhd->pub);
9425 dhd_state |= DHD_ATTACH_STATE_CFG80211;
9426#endif
9427
9428#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
9429 if (wl_ext_event_attach(net, &dhd->pub) != 0) {
9430 DHD_ERROR(("wl_ext_event_attach failed\n"));
9431 goto fail;
9432 }
9433#ifdef WL_ESCAN
9434 /* Attach and link in the escan */
9435 if (wl_escan_attach(net, &dhd->pub) != 0) {
9436 DHD_ERROR(("wl_escan_attach failed\n"));
9437 goto fail;
9438 }
9439#endif /* WL_ESCAN */
9440#ifdef WL_EXT_IAPSTA
9441 if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
9442 DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
9443 goto fail;
9444 }
9445#endif /* WL_EXT_IAPSTA */
9446#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
9447#if defined(WL_WIRELESS_EXT)
9448 /* Attach and link in the iw */
9449 if (wl_iw_attach(net, &dhd->pub) != 0) {
9450 DHD_ERROR(("wl_iw_attach failed\n"));
9451 goto fail;
9452 }
9453 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
9454#endif /* defined(WL_WIRELESS_EXT) */
9455
9456#ifdef SHOW_LOGTRACE
9457 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
9458 if (ret == BCME_OK) {
9459 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
9460 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
9461 rom_map_file_path);
9462 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
9463 }
9464#endif /* SHOW_LOGTRACE */
9465
9466 /* attach debug if support */
9467 if (dhd_os_dbg_attach(&dhd->pub)) {
9468 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
9469 goto fail;
9470 }
9471#ifdef DEBUGABILITY
9472
9473#ifdef DBG_PKT_MON
9474 dhd->pub.dbg->pkt_mon_lock = osl_spin_lock_init(dhd->pub.osh);
9475#ifdef DBG_PKT_MON_INIT_DEFAULT
9476 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
9477#endif /* DBG_PKT_MON_INIT_DEFAULT */
9478#endif /* DBG_PKT_MON */
9479
9480#endif /* DEBUGABILITY */
9481
9482#ifdef DHD_MEM_STATS
9483 dhd->pub.mem_stats_lock = osl_spin_lock_init(dhd->pub.osh);
9484 dhd->pub.txpath_mem = 0;
9485 dhd->pub.rxpath_mem = 0;
9486#endif /* DHD_MEM_STATS */
9487
9488#ifdef DHD_STATUS_LOGGING
9489 dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
9490 MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
9491 if (dhd->pub.statlog == NULL) {
9492 DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
9493 }
9494#endif /* DHD_STATUS_LOGGING */
9495
9496#ifdef DHD_LOG_DUMP
9497 dhd_log_dump_init(&dhd->pub);
9498#endif /* DHD_LOG_DUMP */
9499#ifdef DHD_PKTDUMP_ROAM
9500 dhd_dump_pkt_init(&dhd->pub);
9501#endif /* DHD_PKTDUMP_ROAM */
9502#ifdef DHD_PKT_LOGGING
9503 dhd_os_attach_pktlog(&dhd->pub);
9504#endif /* DHD_PKT_LOGGING */
9505
9506#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9507 dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
9508 if (dhd->pub.hang_info == NULL) {
9509 DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
9510 }
9511#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
9512 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
9513 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
9514 goto fail;
9515 }
9516
9517#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9518 dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
9519 if (!dhd->tx_wq) {
9520 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
9521 goto fail;
9522 }
9523 dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
9524 if (!dhd->rx_wq) {
9525 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
9526 destroy_workqueue(dhd->tx_wq);
9527 dhd->tx_wq = NULL;
9528 goto fail;
9529 }
9530#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9531
9532#ifndef BCMDBUS
9533 /* Set up the watchdog timer */
9534 init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
9535 dhd->default_wd_interval = dhd_watchdog_ms;
9536
9537 if (dhd_watchdog_prio >= 0) {
9538 /* Initialize watchdog thread */
9539 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
9540 if (dhd->thr_wdt_ctl.thr_pid < 0) {
9541 goto fail;
9542 }
9543
9544 } else {
9545 dhd->thr_wdt_ctl.thr_pid = -1;
9546 }
9547
9548#ifdef DHD_PCIE_RUNTIMEPM
9549 /* Setup up the runtime PM Idlecount timer */
9550 init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
9551 dhd->rpm_timer_valid = FALSE;
9552
9553 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
9554 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
9555 if (dhd->thr_rpm_ctl.thr_pid < 0) {
9556 goto fail;
9557 }
9558#endif /* DHD_PCIE_RUNTIMEPM */
9559
9560#ifdef SHOW_LOGTRACE
9561 skb_queue_head_init(&dhd->evt_trace_queue);
9562
9563 /* Create ring proc entries */
9564 dhd_dbg_ring_proc_create(&dhd->pub);
9565#endif /* SHOW_LOGTRACE */
9566
9567 /* Set up the bottom half handler */
9568 if (dhd_dpc_prio >= 0) {
9569 /* Initialize DPC thread */
9570 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
9571 if (dhd->thr_dpc_ctl.thr_pid < 0) {
9572 goto fail;
9573 }
9574 } else {
9575 /* use tasklet for dpc */
9576 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
9577 dhd->thr_dpc_ctl.thr_pid = -1;
9578 }
9579
9580 if (dhd->rxthread_enabled) {
9581 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
9582 /* Initialize RXF thread */
9583 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
9584 if (dhd->thr_rxf_ctl.thr_pid < 0) {
9585 goto fail;
9586 }
9587 }
9588#endif /* !BCMDBUS */
9589
9590 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
9591
9592#if defined(CONFIG_PM_SLEEP)
9593 if (!dhd_pm_notifier_registered) {
9594 dhd_pm_notifier_registered = TRUE;
9595 dhd->pm_notifier.notifier_call = dhd_pm_callback;
9596 dhd->pm_notifier.priority = 10;
9597 register_pm_notifier(&dhd->pm_notifier);
9598 }
9599
9600#endif /* CONFIG_PM_SLEEP */
9601
9602#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9603 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
9604 dhd->early_suspend.suspend = dhd_early_suspend;
9605 dhd->early_suspend.resume = dhd_late_resume;
9606 register_early_suspend(&dhd->early_suspend);
9607 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
9608#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9609
9610#ifdef ARP_OFFLOAD_SUPPORT
9611 dhd->pend_ipaddr = 0;
9612 if (!dhd_inetaddr_notifier_registered) {
9613 dhd_inetaddr_notifier_registered = TRUE;
9614 register_inetaddr_notifier(&dhd_inetaddr_notifier);
9615 }
9616#endif /* ARP_OFFLOAD_SUPPORT */
9617
9618#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9619 if (!dhd_inet6addr_notifier_registered) {
9620 dhd_inet6addr_notifier_registered = TRUE;
9621 register_inet6addr_notifier(&dhd_inet6addr_notifier);
9622 }
9623#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9624 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
9625 INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
9626#ifdef DEBUG_CPU_FREQ
9627 dhd->new_freq = alloc_percpu(int);
9628 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
9629 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9630#endif
9631#ifdef DHDTCPACK_SUPPRESS
9632#ifdef BCMSDIO
9633 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
9634#elif defined(BCMPCIE)
9635 /* xxx : In case of PCIe based Samsung Android project, enable TCP ACK Suppress
9636 * when throughput is higher than threshold, following rps_cpus setting.
9637 */
9638 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
9639#else
9640 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9641#endif /* BCMSDIO */
9642#endif /* DHDTCPACK_SUPPRESS */
9643
9644#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9645#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9646
9647#ifdef DHD_DEBUG_PAGEALLOC
9648 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
9649#endif /* DHD_DEBUG_PAGEALLOC */
9650
9651#if defined(DHD_LB)
9652
9653 dhd_lb_set_default_cpus(dhd);
9654 DHD_LB_STATS_INIT(&dhd->pub);
9655
9656 /* Initialize the CPU Masks */
9657 if (dhd_cpumasks_init(dhd) == 0) {
9658 /* Now we have the current CPU maps, run through candidacy */
9659 dhd_select_cpu_candidacy(dhd);
9660
9661 /* Register the call backs to CPU Hotplug sub-system */
9662 dhd_register_cpuhp_callback(dhd);
9663
9664 } else {
9665 /*
9666 * We are unable to initialize CPU masks, so candidacy algorithm
9667 * won't run, but still Load Balancing will be honoured based
9668 * on the CPUs allocated for a given job statically during init
9669 */
9670 dhd->cpu_notifier.notifier_call = NULL;
9671 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9672 __FUNCTION__));
9673 }
9674
9675#ifdef DHD_LB_TXP
9676#ifdef DHD_LB_TXP_DEFAULT_ENAB
9677 /* Trun ON the feature by default */
9678 atomic_set(&dhd->lb_txp_active, 1);
9679#else
9680 /* Trun OFF the feature by default */
9681 atomic_set(&dhd->lb_txp_active, 0);
9682#endif /* DHD_LB_TXP_DEFAULT_ENAB */
9683#endif /* DHD_LB_TXP */
9684
9685#ifdef DHD_LB_RXP
9686 /* Trun ON the feature by default */
9687 atomic_set(&dhd->lb_rxp_active, 1);
9688#endif /* DHD_LB_RXP */
9689
9690 /* Initialize the Load Balancing Tasklets and Napi object */
9691#if defined(DHD_LB_TXC)
9692 tasklet_init(&dhd->tx_compl_tasklet,
9693 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
9694 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
9695 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
9696#endif /* DHD_LB_TXC */
9697#if defined(DHD_LB_RXC)
9698 tasklet_init(&dhd->rx_compl_tasklet,
9699 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
9700 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
9701 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
9702#endif /* DHD_LB_RXC */
9703
9704#if defined(DHD_LB_RXP)
9705 __skb_queue_head_init(&dhd->rx_pend_queue);
9706 skb_queue_head_init(&dhd->rx_napi_queue);
9707 __skb_queue_head_init(&dhd->rx_process_queue);
9708 /* Initialize the work that dispatches NAPI job to a given core */
9709 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_work);
9710 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
9711#endif /* DHD_LB_RXP */
9712
9713#if defined(DHD_LB_TXP)
9714 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
9715 skb_queue_head_init(&dhd->tx_pend_queue);
9716 /* Initialize the work that dispatches TX job to a given core */
9717 tasklet_init(&dhd->tx_tasklet,
9718 dhd_lb_tx_handler, (ulong)(dhd));
9719 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
9720#endif /* DHD_LB_TXP */
9721
9722 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
9723#endif /* DHD_LB */
9724
9725#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
9726 INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
9727#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
9728
9729#if defined(BCMPCIE)
9730 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
9731 if (dhd->pub.extended_trap_data == NULL) {
9732 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
9733 }
9734#ifdef DNGL_AXI_ERROR_LOGGING
9735 dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
9736 if (dhd->pub.axi_err_dump == NULL) {
9737 DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
9738 }
9739#endif /* DNGL_AXI_ERROR_LOGGING */
9740#endif /* BCMPCIE */
9741
9742#ifdef SHOW_LOGTRACE
9743 if (dhd_init_logtrace_process(dhd) != BCME_OK) {
9744 goto fail;
9745 }
9746#endif /* SHOW_LOGTRACE */
9747
9748#ifdef EWP_EDL
9749 INIT_DELAYED_WORK(&dhd->edl_dispatcher_work, dhd_edl_process_work);
9750#endif
9751
9752 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
9753 DHD_SSSR_REG_INFO_INIT(&dhd->pub);
9754
9755#ifdef EWP_EDL
9756 if (host_edl_support) {
9757 if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
9758 host_edl_support = FALSE;
9759 }
9760 }
9761#endif /* EWP_EDL */
9762
9763 dhd_init_sock_flows_buf(dhd, dhd_watchdog_ms);
9764
9765 (void)dhd_sysfs_init(dhd);
9766
9767#ifdef WL_NATOE
9768 /* Open Netlink socket for NF_CONNTRACK notifications */
9769 dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
9770 CT_ALL);
9771#endif /* WL_NATOE */
9772#ifdef GDB_PROXY
9773 dhd->pub.gdb_proxy_nodeadman = nodeadman != 0;
9774#endif /* GDB_PROXY */
9775 dhd_state |= DHD_ATTACH_STATE_DONE;
9776 dhd->dhd_state = dhd_state;
9777
9778 dhd_found++;
9779
9780#ifdef CSI_SUPPORT
9781 dhd_csi_init(&dhd->pub);
9782#endif /* CSI_SUPPORT */
9783
9784#ifdef DHD_FW_COREDUMP
9785 /* Set memdump default values */
9786#ifdef CUSTOMER_HW4_DEBUG
9787 dhd->pub.memdump_enabled = DUMP_DISABLED;
9788#else
9789 dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
9790#endif /* CUSTOMER_HW4_DEBUG */
9791 /* Check the memdump capability */
9792 dhd_get_memdump_info(&dhd->pub);
9793#endif /* DHD_FW_COREDUMP */
9794
9795#ifdef DHD_ERPOM
9796 if (enable_erpom) {
9797 pom_handler = &dhd->pub.pom_wlan_handler;
9798 pom_handler->func_id = WLAN_FUNC_ID;
9799 pom_handler->handler = (void *)g_dhd_pub;
9800 pom_handler->power_off = dhd_wlan_power_off_handler;
9801 pom_handler->power_on = dhd_wlan_power_on_handler;
9802
9803 dhd->pub.pom_func_register = NULL;
9804 dhd->pub.pom_func_deregister = NULL;
9805 dhd->pub.pom_toggle_reg_on = NULL;
9806
9807 dhd->pub.pom_func_register = symbol_get(pom_func_register);
9808 dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
9809 dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
9810
9811 symbol_put(pom_func_register);
9812 symbol_put(pom_func_deregister);
9813 symbol_put(pom_toggle_reg_on);
9814
9815 if (!dhd->pub.pom_func_register ||
9816 !dhd->pub.pom_func_deregister ||
9817 !dhd->pub.pom_toggle_reg_on) {
9818 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
9819 "POM is not loaded\n", __FUNCTION__));
9820 ASSERT(0);
9821 goto fail;
9822 }
9823 dhd->pub.pom_func_register(pom_handler);
9824 dhd->pub.enable_erpom = TRUE;
9825
9826 }
9827#endif /* DHD_ERPOM */
9828
9829#ifdef DHD_DUMP_MNGR
9830 dhd->pub.dump_file_manage =
9831 (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
9832 if (unlikely(!dhd->pub.dump_file_manage)) {
9833 DHD_ERROR(("%s(): could not allocate memory for - "
9834 "dhd_dump_file_manage_t\n", __FUNCTION__));
9835 }
9836#endif /* DHD_DUMP_MNGR */
9837
9838#ifdef RTT_SUPPORT
9839 if (dhd_rtt_attach(&dhd->pub)) {
9840 DHD_ERROR(("dhd_rtt_attach has failed\n"));
9841 goto fail;
9842 }
9843#endif /* RTT_SUPPORT */
9844
9845#ifdef DHD_TX_PROFILE
9846 if (dhd_tx_profile_attach(&dhd->pub) != BCME_OK) {
9847 DHD_ERROR(("%s:\tdhd_tx_profile_attach has failed\n", __FUNCTION__));
9848 goto fail;
9849 }
9850#endif /* defined(DHD_TX_PROFILE) */
9851
9852 return &dhd->pub;
9853
9854fail:
9855 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
9856 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9857 __FUNCTION__, dhd_state, &dhd->pub));
9858 dhd->dhd_state = dhd_state;
9859 dhd_detach(&dhd->pub);
9860 dhd_free(&dhd->pub);
9861 }
9862
9863dhd_null_flag:
9864 return NULL;
9865}
9866
9867int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9868{
9869 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9870 return DHD_FLAG_HOSTAP_MODE;
9871 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9872 return DHD_FLAG_P2P_MODE;
9873 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9874 return DHD_FLAG_IBSS_MODE;
9875 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9876 return DHD_FLAG_MFG_MODE;
9877
9878 return DHD_FLAG_STA_MODE;
9879}
9880
9881int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9882{
9883 return dhd_get_fw_mode(dhdp->info);
9884}
9885
9886extern char * nvram_get(const char *name);
9887bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9888{
9889 int fw_len;
9890 int nv_len;
9891 int clm_len;
9892 int conf_len;
9893 const char *fw = NULL;
9894 const char *nv = NULL;
9895 const char *clm = NULL;
9896 const char *conf = NULL;
9897#ifdef DHD_UCODE_DOWNLOAD
9898 int uc_len;
9899 const char *uc = NULL;
9900#endif /* DHD_UCODE_DOWNLOAD */
9901 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9902 int fw_path_len = sizeof(dhdinfo->fw_path);
9903 int nv_path_len = sizeof(dhdinfo->nv_path);
9904
9905 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9906 * The path from adapter info is used for initialization only (as it won't change).
9907 *
9908 * The firmware_path/nvram_path module parameter may be changed by the system at run
9909 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9910 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9911 * module parameter after it is copied. We won't update the path until the module parameter
9912 * is changed again (first character is not '\0')
9913 */
9914
9915 /* set default firmware and nvram path for built-in type driver */
9916// if (!dhd_download_fw_on_driverload) {
9917#ifdef CONFIG_BCMDHD_FW_PATH
9918 fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
9919#endif /* CONFIG_BCMDHD_FW_PATH */
9920#ifdef CONFIG_BCMDHD_NVRAM_PATH
9921 nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
9922#endif /* CONFIG_BCMDHD_NVRAM_PATH */
9923// }
9924
9925 /* check if we need to initialize the path */
9926 if (dhdinfo->fw_path[0] == '\0') {
9927 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9928 fw = adapter->fw_path;
9929
9930 }
9931 if (dhdinfo->nv_path[0] == '\0') {
9932 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9933 nv = adapter->nv_path;
9934 }
9935 if (dhdinfo->clm_path[0] == '\0') {
9936 if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
9937 clm = adapter->clm_path;
9938 }
9939 if (dhdinfo->conf_path[0] == '\0') {
9940 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
9941 conf = adapter->conf_path;
9942 }
9943
9944 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9945 *
9946 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9947 */
9948 if (firmware_path[0] != '\0')
9949 fw = firmware_path;
9950
9951 if (nvram_path[0] != '\0')
9952 nv = nvram_path;
9953 if (clm_path[0] != '\0')
9954 clm = clm_path;
9955 if (config_path[0] != '\0')
9956 conf = config_path;
9957
9958#ifdef DHD_UCODE_DOWNLOAD
9959 if (ucode_path[0] != '\0')
9960 uc = ucode_path;
9961#endif /* DHD_UCODE_DOWNLOAD */
9962
9963 if (fw && fw[0] != '\0') {
9964 fw_len = strlen(fw);
9965 if (fw_len >= fw_path_len) {
9966 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
9967 return FALSE;
9968 }
9969 strlcpy(dhdinfo->fw_path, fw, fw_path_len);
9970 }
9971 if (nv && nv[0] != '\0') {
9972 nv_len = strlen(nv);
9973 if (nv_len >= nv_path_len) {
9974 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
9975 return FALSE;
9976 }
9977 memset(dhdinfo->nv_path, 0, nv_path_len);
9978 strlcpy(dhdinfo->nv_path, nv, nv_path_len);
9979#ifdef DHD_USE_SINGLE_NVRAM_FILE
9980 /* Remove "_net" or "_mfg" tag from current nvram path */
9981 {
9982 char *nvram_tag = "nvram_";
9983 char *ext_tag = ".txt";
9984 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
9985 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
9986 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
9987 if (valid_buf) {
9988 char *sp = sp_nvram + strlen(nvram_tag) - 1;
9989 uint32 padding_size = (uint32)(dhdinfo->nv_path +
9990 nv_path_len - sp);
9991 memset(sp, 0, padding_size);
9992 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
9993 nv_len = strlen(dhdinfo->nv_path);
9994 DHD_INFO(("%s: new nvram path = %s\n",
9995 __FUNCTION__, dhdinfo->nv_path));
9996 } else if (sp_nvram) {
9997 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
9998 __FUNCTION__));
9999 return FALSE;
10000 } else {
10001 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10002 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
10003 }
10004 }
10005#endif /* DHD_USE_SINGLE_NVRAM_FILE */
10006 }
10007 if (clm && clm[0] != '\0') {
10008 clm_len = strlen(clm);
10009 if (clm_len >= sizeof(dhdinfo->clm_path)) {
10010 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
10011 return FALSE;
10012 }
10013 strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
10014 if (dhdinfo->clm_path[clm_len-1] == '\n')
10015 dhdinfo->clm_path[clm_len-1] = '\0';
10016 }
10017 if (conf && conf[0] != '\0') {
10018 conf_len = strlen(conf);
10019 if (conf_len >= sizeof(dhdinfo->conf_path)) {
10020 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
10021 return FALSE;
10022 }
10023 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
10024 if (dhdinfo->conf_path[conf_len-1] == '\n')
10025 dhdinfo->conf_path[conf_len-1] = '\0';
10026 }
10027#ifdef DHD_UCODE_DOWNLOAD
10028 if (uc && uc[0] != '\0') {
10029 uc_len = strlen(uc);
10030 if (uc_len >= sizeof(dhdinfo->uc_path)) {
10031 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10032 return FALSE;
10033 }
10034 strlcpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
10035 }
10036#endif /* DHD_UCODE_DOWNLOAD */
10037
10038#if 0
10039 /* clear the path in module parameter */
10040 if (dhd_download_fw_on_driverload) {
10041 firmware_path[0] = '\0';
10042 nvram_path[0] = '\0';
10043 clm_path[0] = '\0';
10044 config_path[0] = '\0';
10045 }
10046#endif
10047#ifdef DHD_UCODE_DOWNLOAD
10048 ucode_path[0] = '\0';
10049 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
10050#endif /* DHD_UCODE_DOWNLOAD */
10051
10052 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10053 if (dhdinfo->fw_path[0] == '\0') {
10054 DHD_ERROR(("firmware path not found\n"));
10055 return FALSE;
10056 }
10057 if (dhdinfo->nv_path[0] == '\0') {
10058 DHD_ERROR(("nvram path not found\n"));
10059 return FALSE;
10060 }
10061
10062 return TRUE;
10063}
10064
10065#if defined(BT_OVER_SDIO)
10066extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
10067{
10068 int fw_len;
10069 const char *fw = NULL;
10070 wifi_adapter_info_t *adapter = dhdinfo->adapter;
10071
10072 /* Update bt firmware path. The path may be from adapter info or module parameter
10073 * The path from adapter info is used for initialization only (as it won't change).
10074 *
10075 * The btfw_path module parameter may be changed by the system at run
10076 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10077 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10078 * module parameter after it is copied. We won't update the path until the module parameter
10079 * is changed again (first character is not '\0')
10080 */
10081
10082 /* set default firmware and nvram path for built-in type driver */
10083 if (!dhd_download_fw_on_driverload) {
10084#ifdef CONFIG_BCMDHD_BTFW_PATH
10085 fw = CONFIG_BCMDHD_BTFW_PATH;
10086#endif /* CONFIG_BCMDHD_FW_PATH */
10087 }
10088
10089 /* check if we need to initialize the path */
10090 if (dhdinfo->btfw_path[0] == '\0') {
10091 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
10092 fw = adapter->btfw_path;
10093 }
10094
10095 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10096 */
10097 if (btfw_path[0] != '\0')
10098 fw = btfw_path;
10099
10100 if (fw && fw[0] != '\0') {
10101 fw_len = strlen(fw);
10102 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
10103 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10104 return FALSE;
10105 }
10106 strlcpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
10107 }
10108
10109 /* clear the path in module parameter */
10110 btfw_path[0] = '\0';
10111
10112 if (dhdinfo->btfw_path[0] == '\0') {
10113 DHD_ERROR(("bt firmware path not found\n"));
10114 return FALSE;
10115 }
10116
10117 return TRUE;
10118}
10119#endif /* defined (BT_OVER_SDIO) */
10120
10121#ifdef CUSTOMER_HW4_DEBUG
10122bool dhd_validate_chipid(dhd_pub_t *dhdp)
10123{
10124 uint chipid = dhd_bus_chip_id(dhdp);
10125 uint config_chipid;
10126
10127#ifdef BCM4389_CHIP_DEF
10128 config_chipid = BCM4389_CHIP_ID;
10129#elif defined(BCM4375_CHIP)
10130 config_chipid = BCM4375_CHIP_ID;
10131#elif defined(BCM4361_CHIP)
10132 config_chipid = BCM4361_CHIP_ID;
10133#elif defined(BCM4359_CHIP)
10134 config_chipid = BCM4359_CHIP_ID;
10135#elif defined(BCM4358_CHIP)
10136 config_chipid = BCM4358_CHIP_ID;
10137#elif defined(BCM4354_CHIP)
10138 config_chipid = BCM4354_CHIP_ID;
10139#elif defined(BCM4339_CHIP)
10140 config_chipid = BCM4339_CHIP_ID;
10141#elif defined(BCM4335_CHIP)
10142 config_chipid = BCM4335_CHIP_ID;
10143#elif defined(BCM43430_CHIP)
10144 config_chipid = BCM43430_CHIP_ID;
10145#elif defined(BCM43018_CHIP)
10146 config_chipid = BCM43018_CHIP_ID;
10147#elif defined(BCM43455_CHIP)
10148 config_chipid = BCM4345_CHIP_ID;
10149#elif defined(BCM43454_CHIP)
10150 config_chipid = BCM43454_CHIP_ID;
10151#elif defined(BCM43012_CHIP_)
10152 config_chipid = BCM43012_CHIP_ID;
10153#elif defined(BCM43013_CHIP)
10154 config_chipid = BCM43012_CHIP_ID;
10155#else
10156 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10157 " please add CONFIG_BCMXXXX into the Kernel and"
10158 " BCMXXXX_CHIP definition into the DHD driver\n",
10159 __FUNCTION__));
10160 config_chipid = 0;
10161
10162 return FALSE;
10163#endif /* BCM4354_CHIP */
10164
10165#if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10166 if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
10167 return TRUE;
10168 }
10169#endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
10170#if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10171 if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
10172 return TRUE;
10173 }
10174#endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
10175#if defined(BCM4359_CHIP)
10176 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
10177 return TRUE;
10178 }
10179#endif /* BCM4359_CHIP */
10180#if defined(BCM4361_CHIP)
10181 if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
10182 return TRUE;
10183 }
10184#endif /* BCM4361_CHIP */
10185
10186 return config_chipid == chipid;
10187}
10188#endif /* CUSTOMER_HW4_DEBUG */
10189
10190#if defined(BT_OVER_SDIO)
10191wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
10192{
10193 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
10194 /* assuming that dhd_pub_t type pointer is available from a global variable */
10195 return (wlan_bt_handle_t) g_dhd_pub;
10196} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
10197
10198int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
10199{
10200 int ret = -1;
10201 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
10202 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10203
10204 /* Download BT firmware image to the dongle */
10205 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
10206 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
10207 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
10208 if (ret < 0) {
10209 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10210 __FUNCTION__, dhd->btfw_path));
10211 return ret;
10212 }
10213 }
10214 return ret;
10215} EXPORT_SYMBOL(dhd_download_btfw);
10216#endif /* defined (BT_OVER_SDIO) */
10217
10218#ifndef BCMDBUS
10219int
10220dhd_bus_start(dhd_pub_t *dhdp)
10221{
10222 int ret = -1;
10223 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10224 unsigned long flags;
10225
10226#if defined(DHD_DEBUG) && defined(BCMSDIO)
10227 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
10228#endif /* DHD_DEBUG && BCMSDIO */
10229 ASSERT(dhd);
10230
10231 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
10232 dhdp->memdump_type = 0;
10233 dhdp->dongle_trap_occured = 0;
10234#ifdef DHD_SSSR_DUMP
10235 /* Flag to indicate sssr dump is collected */
10236 dhdp->sssr_dump_collected = 0;
10237#endif /* DHD_SSSR_DUMP */
10238 dhdp->iovar_timeout_occured = 0;
10239#ifdef PCIE_FULL_DONGLE
10240 dhdp->d3ack_timeout_occured = 0;
10241 dhdp->livelock_occured = 0;
10242 dhdp->pktid_audit_failed = 0;
10243#endif /* PCIE_FULL_DONGLE */
10244 dhd->pub.iface_op_failed = 0;
10245 dhd->pub.scan_timeout_occurred = 0;
10246 dhd->pub.scan_busy_occurred = 0;
10247 /* Clear induced error during initialize */
10248 dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
10249 dhd->pub.tput_test_done = FALSE;
10250
10251 /* try to download image and nvram to the dongle */
10252 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
10253 /* Indicate FW Download has not yet done */
10254 dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
10255 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
10256 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
10257#if defined(DHD_DEBUG) && defined(BCMSDIO)
10258 fw_download_start = OSL_SYSUPTIME();
10259#endif /* DHD_DEBUG && BCMSDIO */
10260 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
10261 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
10262#if defined(DHD_DEBUG) && defined(BCMSDIO)
10263 fw_download_end = OSL_SYSUPTIME();
10264#endif /* DHD_DEBUG && BCMSDIO */
10265 if (ret < 0) {
10266 DHD_ERROR(("%s: failed to download firmware %s\n",
10267 __FUNCTION__, dhd->fw_path));
10268 return ret;
10269 }
10270 /* Indicate FW Download has succeeded */
10271 dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
10272 }
10273 if (dhd->pub.busstate != DHD_BUS_LOAD) {
10274 return -ENETDOWN;
10275 }
10276
10277#ifdef BCMSDIO
10278 dhd_os_sdlock(dhdp);
10279#endif /* BCMSDIO */
10280
10281 /* Start the watchdog timer */
10282 dhd->pub.tickcnt = 0;
10283 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
10284
10285 /* Bring up the bus */
10286 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
10287
10288 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
10289#ifdef BCMSDIO
10290 dhd_os_sdunlock(dhdp);
10291#endif /* BCMSDIO */
10292 return ret;
10293 }
10294#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
10295 /* Host registration for OOB interrupt */
10296 if (dhd_bus_oob_intr_register(dhdp)) {
10297 /* deactivate timer and wait for the handler to finish */
10298#if !defined(BCMPCIE_OOB_HOST_WAKE)
10299 DHD_GENERAL_LOCK(&dhd->pub, flags);
10300 dhd->wd_timer_valid = FALSE;
10301 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10302 del_timer_sync(&dhd->timer);
10303
10304#endif /* !BCMPCIE_OOB_HOST_WAKE */
10305 DHD_STOP_RPM_TIMER(&dhd->pub);
10306
10307 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
10308 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10309 return -ENODEV;
10310 }
10311
10312#if defined(BCMPCIE_OOB_HOST_WAKE)
10313 dhd_bus_oob_intr_set(dhdp, TRUE);
10314#else
10315 /* Enable oob at firmware */
10316 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10317#endif /* BCMPCIE_OOB_HOST_WAKE */
10318#elif defined(FORCE_WOWLAN)
10319 /* Enable oob at firmware */
10320 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10321#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
10322#ifdef PCIE_FULL_DONGLE
10323 {
10324 /* max_h2d_rings includes H2D common rings */
10325 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
10326
10327 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
10328 max_h2d_rings));
10329 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
10330#ifdef BCMSDIO
10331 dhd_os_sdunlock(dhdp);
10332#endif /* BCMSDIO */
10333 return ret;
10334 }
10335 }
10336#endif /* PCIE_FULL_DONGLE */
10337
10338 /* set default value for now. Will be updated again in dhd_preinit_ioctls()
10339 * after querying FW
10340 */
10341 dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
10342 dhdp->event_log_max_sets_queried = FALSE;
10343
10344 dhdp->smmu_fault_occurred = 0;
10345#ifdef DNGL_AXI_ERROR_LOGGING
10346 dhdp->axi_error = FALSE;
10347#endif /* DNGL_AXI_ERROR_LOGGING */
10348
10349 /* Do protocol initialization necessary for IOCTL/IOVAR */
10350 ret = dhd_prot_init(&dhd->pub);
10351 if (unlikely(ret) != BCME_OK) {
10352 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10353 return ret;
10354 }
10355
10356 /* If bus is not ready, can't come up */
10357 if (dhd->pub.busstate != DHD_BUS_DATA) {
10358 DHD_GENERAL_LOCK(&dhd->pub, flags);
10359 dhd->wd_timer_valid = FALSE;
10360 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10361 del_timer_sync(&dhd->timer);
10362 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
10363 DHD_STOP_RPM_TIMER(&dhd->pub);
10364#ifdef BCMSDIO
10365 dhd_os_sdunlock(dhdp);
10366#endif /* BCMSDIO */
10367 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10368 return -ENODEV;
10369 }
10370
10371#ifdef BCMSDIO
10372 dhd_os_sdunlock(dhdp);
10373#endif /* BCMSDIO */
10374
10375 /* Bus is ready, query any dongle information */
10376 /* XXX Since dhd_sync_with_dongle can sleep, should module count surround it? */
10377#if defined(DHD_DEBUG) && defined(BCMSDIO)
10378 f2_sync_start = OSL_SYSUPTIME();
10379#endif /* DHD_DEBUG && BCMSDIO */
10380 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
10381 DHD_GENERAL_LOCK(&dhd->pub, flags);
10382 dhd->wd_timer_valid = FALSE;
10383 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10384 del_timer_sync(&dhd->timer);
10385 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
10386 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10387 return ret;
10388 }
10389
10390#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
10391#if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420)
10392 /* XXX: JIRA SWWLAN-139454: Added L1ss enable
10393 * after firmware download completion due to link down issue
10394 * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point
10395 */
10396 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
10397 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
10398#endif /* !CONFIG_SOC_EXYNOS8890 && !SUPPORT_EXYNOS7420 */
10399#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
10400#if defined(DHD_DEBUG) && defined(BCMSDIO)
10401 f2_sync_end = OSL_SYSUPTIME();
10402 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10403 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
10404#endif /* DHD_DEBUG && BCMSDIO */
10405
10406#ifdef ARP_OFFLOAD_SUPPORT
10407 if (dhd->pend_ipaddr) {
10408#ifdef AOE_IP_ALIAS_SUPPORT
10409 /* XXX Assume pending ip address is belong to primary interface */
10410 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
10411#endif /* AOE_IP_ALIAS_SUPPORT */
10412 dhd->pend_ipaddr = 0;
10413 }
10414#endif /* ARP_OFFLOAD_SUPPORT */
10415
10416 return 0;
10417}
10418#endif /* !BCMDBUS */
10419
10420#ifdef WLTDLS
10421int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
10422{
10423 uint32 tdls = tdls_on;
10424 int ret = 0;
10425 uint32 tdls_auto_op = 0;
10426 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
10427 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
10428 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
10429 uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
10430 uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
10431
10432 BCM_REFERENCE(mac);
10433 if (!FW_SUPPORTED(dhd, tdls))
10434 return BCME_ERROR;
10435
10436 if (dhd->tdls_enable == tdls_on)
10437 goto auto_mode;
10438 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
10439 if (ret < 0) {
10440 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
10441 goto exit;
10442 }
10443 dhd->tdls_enable = tdls_on;
10444auto_mode:
10445
10446 tdls_auto_op = auto_on;
10447 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
10448 0, TRUE);
10449 if (ret < 0) {
10450 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
10451 goto exit;
10452 }
10453
10454 if (tdls_auto_op) {
10455 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
10456 sizeof(tdls_idle_time), NULL, 0, TRUE);
10457 if (ret < 0) {
10458 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
10459 goto exit;
10460 }
10461 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
10462 sizeof(tdls_rssi_high), NULL, 0, TRUE);
10463 if (ret < 0) {
10464 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
10465 goto exit;
10466 }
10467 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
10468 sizeof(tdls_rssi_low), NULL, 0, TRUE);
10469 if (ret < 0) {
10470 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
10471 goto exit;
10472 }
10473 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
10474 sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
10475 if (ret < 0) {
10476 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
10477 goto exit;
10478 }
10479 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
10480 sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
10481 if (ret < 0) {
10482 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
10483 goto exit;
10484 }
10485 }
10486
10487exit:
10488 return ret;
10489}
10490
10491int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
10492{
10493 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10494 int ret = 0;
10495 if (dhd)
10496 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
10497 else
10498 ret = BCME_ERROR;
10499 return ret;
10500}
10501
10502int
10503dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
10504{
10505 int ret = 0;
10506 bool auto_on = false;
10507 uint32 mode = wfd_mode;
10508
10509#ifdef ENABLE_TDLS_AUTO_MODE
10510 if (wfd_mode) {
10511 auto_on = false;
10512 } else {
10513 auto_on = true;
10514 }
10515#else
10516 auto_on = false;
10517#endif /* ENABLE_TDLS_AUTO_MODE */
10518 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
10519 if (ret < 0) {
10520 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
10521 return ret;
10522 }
10523
10524 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
10525 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
10526 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
10527 return ret;
10528 }
10529
10530 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
10531 if (ret < 0) {
10532 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
10533 return ret;
10534 }
10535
10536 dhd->tdls_mode = mode;
10537 return ret;
10538}
10539#ifdef PCIE_FULL_DONGLE
10540int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
10541{
10542 dhd_pub_t *dhd_pub = dhdp;
10543 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
10544 tdls_peer_node_t *new = NULL, *prev = NULL;
10545 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
10546 uint8 *da = (uint8 *)&event->addr.octet[0];
10547 bool connect = FALSE;
10548 uint32 reason = ntoh32(event->reason);
10549 unsigned long flags;
10550
10551 /* No handling needed for peer discovered reason */
10552 if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
10553 return BCME_ERROR;
10554 }
10555 if (reason == WLC_E_TDLS_PEER_CONNECTED)
10556 connect = TRUE;
10557 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
10558 connect = FALSE;
10559 else
10560 {
10561 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
10562 return BCME_ERROR;
10563 }
10564 if (ifindex == DHD_BAD_IF)
10565 return BCME_ERROR;
10566
10567 if (connect) {
10568 while (cur != NULL) {
10569 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10570 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10571 __FUNCTION__, __LINE__));
10572 return BCME_ERROR;
10573 }
10574 cur = cur->next;
10575 }
10576
10577 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
10578 if (new == NULL) {
10579 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
10580 return BCME_ERROR;
10581 }
10582 memcpy(new->addr, da, ETHER_ADDR_LEN);
10583 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10584 new->next = dhd_pub->peer_tbl.node;
10585 dhd_pub->peer_tbl.node = new;
10586 dhd_pub->peer_tbl.tdls_peer_count++;
10587 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10588
10589 } else {
10590 while (cur != NULL) {
10591 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10592 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
10593 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10594 if (prev)
10595 prev->next = cur->next;
10596 else
10597 dhd_pub->peer_tbl.node = cur->next;
10598 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
10599 dhd_pub->peer_tbl.tdls_peer_count--;
10600 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10601 return BCME_OK;
10602 }
10603 prev = cur;
10604 cur = cur->next;
10605 }
10606 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
10607 }
10608 return BCME_OK;
10609}
10610#endif /* PCIE_FULL_DONGLE */
10611#endif
10612
10613bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
10614{
10615 if (!dhd)
10616 return FALSE;
10617
10618 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
10619 return TRUE;
10620 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
10621 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
10622 return TRUE;
10623 else
10624 return FALSE;
10625}
10626#if !defined(AP) && defined(WLP2P)
10627/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10628 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10629 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10630 * would still be named as fw_bcmdhd_apsta.
10631 */
10632uint32
10633dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
10634{
10635 int32 ret = 0;
10636 char buf[WLC_IOCTL_SMLEN];
10637 bool mchan_supported = FALSE;
10638 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10639 * test mode, that means we only will use the mode as it is
10640 */
10641 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
10642 return 0;
10643 if (FW_SUPPORTED(dhd, vsdb)) {
10644 mchan_supported = TRUE;
10645 }
10646 if (!FW_SUPPORTED(dhd, p2p)) {
10647 DHD_TRACE(("Chip does not support p2p\n"));
10648 return 0;
10649 } else {
10650 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10651 memset(buf, 0, sizeof(buf));
10652 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
10653 sizeof(buf), FALSE);
10654 if (ret < 0) {
10655 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
10656 return 0;
10657 } else {
10658 if (buf[0] == 1) {
10659 /* By default, chip supports single chan concurrency,
10660 * now lets check for mchan
10661 */
10662 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
10663 if (mchan_supported)
10664 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
10665 if (FW_SUPPORTED(dhd, rsdb)) {
10666 ret |= DHD_FLAG_RSDB_MODE;
10667 }
10668#ifdef WL_SUPPORT_MULTIP2P
10669 if (FW_SUPPORTED(dhd, mp2p)) {
10670 ret |= DHD_FLAG_MP2P_MODE;
10671 }
10672#endif /* WL_SUPPORT_MULTIP2P */
10673#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10674 return ret;
10675#else
10676 return 0;
10677#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10678 }
10679 }
10680 }
10681 return 0;
10682}
10683#endif
10684
10685#ifdef WLAIBSS
10686int
10687dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
10688{
10689 int ret = BCME_OK;
10690 aibss_bcn_force_config_t bcn_config;
10691 uint32 aibss;
10692#ifdef WLAIBSS_PS
10693 uint32 aibss_ps;
10694 s32 atim;
10695#endif /* WLAIBSS_PS */
10696 int ibss_coalesce;
10697
10698 aibss = 1;
10699 ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
10700 if (ret < 0) {
10701 if (ret == BCME_UNSUPPORTED) {
10702 DHD_ERROR(("%s aibss , UNSUPPORTED\n", __FUNCTION__));
10703 return BCME_OK;
10704 } else {
10705 DHD_ERROR(("%s Set aibss to %d err(%d)\n", __FUNCTION__, aibss, ret));
10706 return ret;
10707 }
10708 }
10709
10710#ifdef WLAIBSS_PS
10711 aibss_ps = 1;
10712 ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
10713 if (ret < 0) {
10714 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
10715 __FUNCTION__, aibss, ret));
10716 return ret;
10717 }
10718
10719 atim = 10;
10720 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
10721 (char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
10722 DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
10723 __FUNCTION__, ret));
10724 return ret;
10725 }
10726#endif /* WLAIBSS_PS */
10727
10728 memset(&bcn_config, 0, sizeof(bcn_config));
10729 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
10730 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
10731 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
10732 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
10733 bcn_config.len = sizeof(bcn_config);
10734
10735 ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
10736 sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
10737 if (ret < 0) {
10738 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
10739 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
10740 AIBSS_BCN_FLOOD_DUR, ret));
10741 return ret;
10742 }
10743
10744 ibss_coalesce = IBSS_COALESCE_DEFAULT;
10745 ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
10746 sizeof(ibss_coalesce), NULL, 0, TRUE);
10747 if (ret < 0) {
10748 DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
10749 __FUNCTION__, ret));
10750 return ret;
10751 }
10752
10753 dhd->op_mode |= DHD_FLAG_IBSS_MODE;
10754 return BCME_OK;
10755}
10756#endif /* WLAIBSS */
10757
10758#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10759#ifdef WL_BAM
10760static int
10761dhd_check_adps_bad_ap(dhd_pub_t *dhd)
10762{
10763 struct net_device *ndev;
10764 struct bcm_cfg80211 *cfg;
10765 struct wl_profile *profile;
10766 struct ether_addr bssid;
10767
10768 if (!dhd_is_associated(dhd, 0, NULL)) {
10769 DHD_ERROR(("%s - not associated\n", __FUNCTION__));
10770 return BCME_OK;
10771 }
10772
10773 ndev = dhd_linux_get_primary_netdev(dhd);
10774 if (!ndev) {
10775 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
10776 return -ENODEV;
10777 }
10778
10779 cfg = wl_get_cfg(ndev);
10780 if (!cfg) {
10781 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
10782 return -EINVAL;
10783 }
10784
10785 profile = wl_get_profile_by_netdev(cfg, ndev);
10786 memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
10787 if (wl_adps_bad_ap_check(cfg, &bssid)) {
10788 if (wl_adps_enabled(cfg, ndev)) {
10789 wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
10790 }
10791 }
10792
10793 return BCME_OK;
10794}
10795#endif /* WL_BAM */
10796
10797int
10798dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
10799{
10800 int i;
10801 int len;
10802 int ret = BCME_OK;
10803
10804 bcm_iov_buf_t *iov_buf = NULL;
10805 wl_adps_params_v1_t *data = NULL;
10806
10807 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
10808 iov_buf = MALLOC(dhd->osh, len);
10809 if (iov_buf == NULL) {
10810 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
10811 ret = BCME_NOMEM;
10812 goto exit;
10813 }
10814
10815 iov_buf->version = WL_ADPS_IOV_VER;
10816 iov_buf->len = sizeof(*data);
10817 iov_buf->id = WL_ADPS_IOV_MODE;
10818
10819 data = (wl_adps_params_v1_t *)iov_buf->data;
10820 data->version = ADPS_SUB_IOV_VERSION_1;
10821 data->length = sizeof(*data);
10822 data->mode = on;
10823
10824 for (i = 1; i <= MAX_BANDS; i++) {
10825 data->band = i;
10826 ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
10827 if (ret < 0) {
10828 if (ret == BCME_UNSUPPORTED) {
10829 DHD_ERROR(("%s adps, UNSUPPORTED\n", __FUNCTION__));
10830 ret = BCME_OK;
10831 goto exit;
10832 }
10833 else {
10834 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10835 __FUNCTION__, on ? "On" : "Off", i, ret));
10836 goto exit;
10837 }
10838 }
10839 }
10840
10841#ifdef WL_BAM
10842 if (on) {
10843 dhd_check_adps_bad_ap(dhd);
10844 }
10845#endif /* WL_BAM */
10846
10847exit:
10848 if (iov_buf) {
10849 MFREE(dhd->osh, iov_buf, len);
10850 }
10851 return ret;
10852}
10853#endif /* WLADPS || WLADPS_PRIVATE_CMD */
10854
10855int
10856dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
10857{
10858 wl_el_set_type_t logset_type, logset_op;
10859 wl_el_set_all_type_v1_t *logset_all_type_op = NULL;
10860 bool use_logset_all_type = FALSE;
10861 int ret = BCME_ERROR;
10862 int err = 0;
10863 uint8 i = 0;
10864 int el_set_all_type_len;
10865
10866 if (!dhd || !logset_mask)
10867 return BCME_BADARG;
10868
10869 el_set_all_type_len = OFFSETOF(wl_el_set_all_type_v1_t, set_type) +
10870 (sizeof(wl_el_set_type_v1_t) * dhd->event_log_max_sets);
10871
10872 logset_all_type_op = (wl_el_set_all_type_v1_t *) MALLOC(dhd->osh, el_set_all_type_len);
10873 if (logset_all_type_op == NULL) {
10874 DHD_ERROR(("%s: failed to allocate %d bytes for logset_all_type_op\n",
10875 __FUNCTION__, el_set_all_type_len));
10876 return BCME_NOMEM;
10877 }
10878
10879 *logset_mask = 0;
10880 memset(&logset_type, 0, sizeof(logset_type));
10881 memset(&logset_op, 0, sizeof(logset_op));
10882 logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
10883 logset_type.len = htod16(sizeof(wl_el_set_type_t));
10884
10885 /* Try with set = event_log_max_sets, if fails, use legacy event_log_set_type */
10886 logset_type.set = dhd->event_log_max_sets;
10887 err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type, sizeof(logset_type),
10888 (char *)logset_all_type_op, el_set_all_type_len, FALSE);
10889 if (err == BCME_OK) {
10890 DHD_ERROR(("%s: use optimised use_logset_all_type\n", __FUNCTION__));
10891 use_logset_all_type = TRUE;
10892 }
10893
10894 for (i = 0; i < dhd->event_log_max_sets; i++) {
10895 if (use_logset_all_type) {
10896 logset_op.type = logset_all_type_op->set_type[i].type_val;
10897 } else {
10898 logset_type.set = i;
10899 err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
10900 sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
10901 }
10902 /* the iovar may return 'unsupported' error if a log set number is not present
10903 * in the fw, so we should not return on error !
10904 */
10905 if (err == BCME_OK &&
10906 logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
10907 *logset_mask |= 0x01u << i;
10908 ret = BCME_OK;
10909 DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
10910 }
10911 }
10912
10913 MFREE(dhd->osh, logset_all_type_op, el_set_all_type_len);
10914 return ret;
10915}
10916
10917int
10918dhd_optimised_preinit_ioctls(dhd_pub_t * dhd)
10919{
10920 int ret = 0;
10921 /* Room for "event_msgs_ext" + '\0' + bitvec */
10922 char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
10923 uint32 event_log_max_sets = 0;
10924 char* iov_buf = NULL;
10925 /* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED,
10926 * based on FW build tag.
10927 */
10928 int ret2 = 0;
10929#if defined(BCMSUP_4WAY_HANDSHAKE)
10930 uint32 sup_wpa = 1;
10931#endif /* BCMSUP_4WAY_HANDSHAKE */
10932
10933 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
10934 uint wnm_bsstrans_resp = 0;
10935#ifdef DHD_PM_CONTROL_FROM_FILE
10936 uint power_mode = PM_FAST;
10937#endif /* DHD_PM_CONTROL_FROM_FILE */
10938 char buf[WLC_IOCTL_SMLEN];
10939 char *ptr;
10940#ifdef ROAM_ENABLE
10941 uint roamvar = 0;
10942#ifdef ROAM_AP_ENV_DETECTION
10943 int roam_env_mode = 0;
10944#endif /* ROAM_AP_ENV_DETECTION */
10945#endif /* ROAM_ENABLE */
10946#if defined(SOFTAP)
10947 uint dtim = 1;
10948#endif
10949/* xxx andrey tmp fix for dk8000 build error */
10950 struct ether_addr p2p_ea;
10951#ifdef GET_CUSTOM_MAC_ENABLE
10952 struct ether_addr ea_addr;
10953#endif /* GET_CUSTOM_MAC_ENABLE */
10954#ifdef BCMPCIE_OOB_HOST_WAKE
10955 uint32 hostwake_oob = 0;
10956#endif /* BCMPCIE_OOB_HOST_WAKE */
10957 wl_wlc_version_t wlc_ver;
10958
10959#ifdef PKT_FILTER_SUPPORT
10960 dhd_pkt_filter_enable = TRUE;
10961#ifdef APF
10962 dhd->apf_set = FALSE;
10963#endif /* APF */
10964#endif /* PKT_FILTER_SUPPORT */
10965 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
10966#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
10967 dhd->max_dtim_enable = TRUE;
10968#else
10969 dhd->max_dtim_enable = FALSE;
10970#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
10971 dhd->disable_dtim_in_suspend = FALSE;
10972#ifdef CUSTOM_SET_OCLOFF
10973 dhd->ocl_off = FALSE;
10974#endif /* CUSTOM_SET_OCLOFF */
10975#ifdef SUPPORT_SET_TID
10976 dhd->tid_mode = SET_TID_OFF;
10977 dhd->target_uid = 0;
10978 dhd->target_tid = 0;
10979#endif /* SUPPORT_SET_TID */
10980 DHD_TRACE(("Enter %s\n", __FUNCTION__));
10981 dhd->op_mode = 0;
10982
10983#ifdef ARP_OFFLOAD_SUPPORT
10984 /* arpoe will be applied from the supsend context */
10985 dhd->arpoe_enable = TRUE;
10986 dhd->arpol_configured = FALSE;
10987#endif /* ARP_OFFLOAD_SUPPORT */
10988
10989 /* clear AP flags */
10990#if defined(CUSTOM_COUNTRY_CODE)
10991 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
10992#endif /* CUSTOM_COUNTRY_CODE */
10993
10994#ifdef CUSTOMER_HW4_DEBUG
10995 if (!dhd_validate_chipid(dhd)) {
10996 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
10997 __FUNCTION__, dhd_bus_chip_id(dhd)));
10998#ifndef SUPPORT_MULTIPLE_CHIPS
10999 ret = BCME_BADARG;
11000 goto done;
11001#endif /* !SUPPORT_MULTIPLE_CHIPS */
11002 }
11003#endif /* CUSTOMER_HW4_DEBUG */
11004
11005 /* query for 'ver' to get version info from firmware */
11006 memset(buf, 0, sizeof(buf));
11007 ptr = buf;
11008 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11009 if (ret < 0)
11010 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11011 else {
11012 bcmstrtok(&ptr, "\n", 0);
11013 /* Print fw version info */
11014 DHD_ERROR(("Firmware version = %s\n", buf));
11015 strncpy(fw_version, buf, FW_VER_STR_LEN);
11016 fw_version[FW_VER_STR_LEN-1] = '\0';
11017#if defined(BCMSDIO) || defined(BCMPCIE)
11018 dhd_set_version_info(dhd, buf);
11019#endif /* BCMSDIO || BCMPCIE */
11020 }
11021
11022 /* query for 'wlc_ver' to get version info from firmware */
11023 /* memsetting to zero */
11024 memset_s(&wlc_ver, sizeof(wl_wlc_version_t), 0,
11025 sizeof(wl_wlc_version_t));
11026 ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
11027 sizeof(wl_wlc_version_t), FALSE);
11028 if (ret < 0)
11029 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11030 else {
11031 dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
11032 dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
11033 }
11034#ifdef BOARD_HIKEY
11035 /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
11036 if (strstr(fw_version, "WLTEST") != NULL) {
11037 DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
11038 __FUNCTION__));
11039 op_mode = DHD_FLAG_MFG_MODE;
11040 }
11041#endif /* BOARD_HIKEY */
11042
11043 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11044 (op_mode == DHD_FLAG_MFG_MODE)) {
11045 dhd->op_mode = DHD_FLAG_MFG_MODE;
11046#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11047 /* disable runtimePM by default in MFG mode. */
11048 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
11049#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11050#ifdef DHD_PCIE_RUNTIMEPM
11051 /* Disable RuntimePM in mfg mode */
11052 DHD_DISABLE_RUNTIME_PM(dhd);
11053 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
11054#endif /* DHD_PCIE_RUNTIME_PM */
11055 /* Check and adjust IOCTL response timeout for Manufactring firmware */
11056 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
11057 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
11058 __FUNCTION__));
11059
11060#if defined(ARP_OFFLOAD_SUPPORT)
11061 dhd->arpoe_enable = FALSE;
11062#endif /* ARP_OFFLOAD_SUPPORT */
11063#ifdef PKT_FILTER_SUPPORT
11064 dhd_pkt_filter_enable = FALSE;
11065#endif /* PKT_FILTER_SUPPORT */
11066#ifndef CUSTOM_SET_ANTNPM
11067 if (FW_SUPPORTED(dhd, rsdb)) {
11068 wl_config_t rsdb_mode;
11069 memset(&rsdb_mode, 0, sizeof(rsdb_mode));
11070 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
11071 NULL, 0, TRUE);
11072 if (ret < 0) {
11073 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11074 __FUNCTION__, ret));
11075 }
11076 }
11077#endif /* !CUSTOM_SET_ANTNPM */
11078 } else {
11079 uint32 concurrent_mode = 0;
11080 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
11081 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
11082
11083 BCM_REFERENCE(concurrent_mode);
11084
11085 dhd->op_mode = DHD_FLAG_STA_MODE;
11086
11087 BCM_REFERENCE(p2p_ea);
11088#if !defined(AP) && defined(WLP2P)
11089 if ((concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
11090 dhd->op_mode |= concurrent_mode;
11091 }
11092
11093 /* Check if we are enabling p2p */
11094 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11095 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
11096 ETHER_SET_LOCALADDR(&p2p_ea);
11097 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
11098 NULL, 0, TRUE);
11099 if (ret < 0)
11100 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
11101 else
11102 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11103 }
11104#endif
11105
11106 }
11107
11108#ifdef BCMPCIE_OOB_HOST_WAKE
11109 ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
11110 sizeof(hostwake_oob), FALSE);
11111 if (ret < 0) {
11112 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
11113 } else {
11114 if (hostwake_oob == 0) {
11115 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
11116 __FUNCTION__));
11117 ret = BCME_UNSUPPORTED;
11118 goto done;
11119 } else {
11120 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
11121 }
11122 }
11123#endif /* BCMPCIE_OOB_HOST_WAKE */
11124
11125#ifdef DNGL_AXI_ERROR_LOGGING
11126 ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
11127 sizeof(dhd->axierror_logbuf_addr), FALSE);
11128 if (ret < 0) {
11129 DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
11130 dhd->axierror_logbuf_addr = 0;
11131 } else {
11132 DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
11133 __FUNCTION__, dhd->axierror_logbuf_addr));
11134 }
11135#endif /* DNGL_AXI_ERROR_LOGGING */
11136
11137#ifdef GET_CUSTOM_MAC_ENABLE
11138 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
11139 if (!ret) {
11140 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
11141 TRUE);
11142 if (ret < 0) {
11143 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11144 ret = BCME_NOTUP;
11145 goto done;
11146 }
11147 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
11148 } else
11149#endif /* GET_CUSTOM_MAC_ENABLE */
11150 {
11151 /* Get the default device MAC address directly from firmware */
11152 ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11153 if (ret < 0) {
11154 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
11155 ret = BCME_NOTUP;
11156 goto done;
11157 }
11158
11159 DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
11160 __FUNCTION__, MAC2STRDBG(&buf)));
11161
11162#ifdef MACADDR_PROVISION_ENFORCED
11163 if (ETHER_IS_LOCALADDR(buf)) {
11164 DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__));
11165 ret = BCME_BADADDR;
11166 goto done;
11167 }
11168#endif /* MACADDR_PROVISION_ENFORCED */
11169
11170 /* Update public MAC address after reading from Firmware */
11171 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
11172 }
11173
11174 if (ETHER_ISNULLADDR(dhd->mac.octet)) {
11175 DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__));
11176 ret = BCME_BADADDR;
11177 goto done;
11178 } else {
11179 (void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN,
11180 dhd->mac.octet, ETHER_ADDR_LEN);
11181 }
11182
11183 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
11184 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
11185 goto done;
11186 }
11187
11188 /* get a capabilities from firmware */
11189 {
11190 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
11191 memset(dhd->fw_capabilities, 0, cap_buf_size);
11192 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
11193 FALSE);
11194 if (ret < 0) {
11195 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11196 __FUNCTION__, ret));
11197 return 0;
11198 }
11199
11200 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
11201 dhd->fw_capabilities[0] = ' ';
11202 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
11203 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
11204 }
11205
11206 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
11207 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
11208#if defined(DHD_BLOB_EXISTENCE_CHECK)
11209 if (!dhd->is_blob)
11210#endif /* DHD_BLOB_EXISTENCE_CHECK */
11211 {
11212 /* get a ccode and revision for the country code */
11213#if defined(CUSTOM_COUNTRY_CODE)
11214 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
11215 &dhd->dhd_cspec, dhd->dhd_cflags);
11216#else
11217 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
11218 &dhd->dhd_cspec);
11219#endif /* CUSTOM_COUNTRY_CODE */
11220 }
11221
11222#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11223 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
11224 dhd->info->rxthread_enabled = FALSE;
11225 else
11226 dhd->info->rxthread_enabled = TRUE;
11227#endif
11228 /* Set Country code */
11229 if (dhd->dhd_cspec.ccode[0] != 0) {
11230 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
11231 NULL, 0, TRUE);
11232 if (ret < 0)
11233 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
11234 }
11235
11236#if defined(ROAM_ENABLE)
11237 BCM_REFERENCE(roamvar);
11238#ifdef USE_WFA_CERT_CONF
11239 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
11240 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
11241 }
11242 /* roamvar is set to 0 by preinit fw, change only if roamvar is non-zero */
11243 if (roamvar != 0) {
11244 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11245 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0,
11246 TRUE);
11247 if (ret < 0) {
11248 DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
11249 }
11250 }
11251#endif /* USE_WFA_CERT_CONF */
11252
11253#ifdef ROAM_AP_ENV_DETECTION
11254 /* Changed to GET iovar to read roam_env_mode */
11255 dhd->roam_env_detection = FALSE;
11256 ret = dhd_iovar(dhd, 0, "roam_env_detection", NULL, 0, (char *)&roam_env_mode,
11257 sizeof(roam_env_mode), FALSE);
11258 if (ret < 0) {
11259 DHD_ERROR(("%s: roam_env_detection IOVAR not present\n", __FUNCTION__));
11260 } else {
11261 if (roam_env_mode == AP_ENV_INDETERMINATE) {
11262 dhd->roam_env_detection = TRUE;
11263 }
11264 }
11265#endif /* ROAM_AP_ENV_DETECTION */
11266#ifdef CONFIG_ROAM_RSSI_LIMIT
11267 ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G);
11268 if (ret < 0) {
11269 DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret));
11270 }
11271#endif /* CONFIG_ROAM_RSSI_LIMIT */
11272#endif /* ROAM_ENABLE */
11273
11274#ifdef WLTDLS
11275 dhd->tdls_enable = FALSE;
11276 /* query tdls_eable */
11277 ret = dhd_iovar(dhd, 0, "tdls_enable", NULL, 0, (char *)&dhd->tdls_enable,
11278 sizeof(dhd->tdls_enable), FALSE);
11279 DHD_ERROR(("%s: tdls_enable=%d ret=%d\n", __FUNCTION__, dhd->tdls_enable, ret));
11280#endif /* WLTDLS */
11281
11282#ifdef DHD_PM_CONTROL_FROM_FILE
11283 sec_control_pm(dhd, &power_mode);
11284#endif /* DHD_PM_CONTROL_FROM_FILE */
11285
11286#ifdef MIMO_ANT_SETTING
11287 dhd_sel_ant_from_file(dhd);
11288#endif /* MIMO_ANT_SETTING */
11289
11290#if defined(SOFTAP)
11291 if (ap_fw_loaded == TRUE) {
11292 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
11293 }
11294#endif
11295
11296#if defined(KEEP_ALIVE)
11297 /* Set Keep Alive : be sure to use FW with -keepalive */
11298 if (!(dhd->op_mode &
11299 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
11300 if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
11301 DHD_ERROR(("%s set keeplive failed %d\n",
11302 __FUNCTION__, ret));
11303 }
11304#endif /* defined(KEEP_ALIVE) */
11305
11306 ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
11307 sizeof(event_log_max_sets), FALSE);
11308 if (ret == BCME_OK) {
11309 dhd->event_log_max_sets = event_log_max_sets;
11310 } else {
11311 dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
11312 }
11313 BCM_REFERENCE(iovbuf);
11314 /* Make sure max_sets is set first with wmb and then sets_queried,
11315 * this will be used during parsing the logsets in the reverse order.
11316 */
11317 OSL_SMP_WMB();
11318 dhd->event_log_max_sets_queried = TRUE;
11319 DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
11320 __FUNCTION__, dhd->event_log_max_sets, ret));
11321
11322#ifdef USE_WFA_CERT_CONF
11323#ifdef USE_WL_FRAMEBURST
11324 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
11325 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
11326 }
11327#endif /* USE_WL_FRAMEBURST */
11328 g_frameburst = frameburst;
11329#endif /* USE_WFA_CERT_CONF */
11330
11331#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11332 /* Disable Framebursting for SofAP */
11333 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
11334 frameburst = 0;
11335 }
11336#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11337
11338 BCM_REFERENCE(frameburst);
11339#if defined(USE_WL_FRAMEBURST) || defined(DISABLE_WL_FRAMEBURST_SOFTAP)
11340 /* frameburst is set to 1 by preinit fw, change if otherwise */
11341 if (frameburst != 1) {
11342 /* Set frameburst to value */
11343 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
11344 sizeof(frameburst), TRUE, 0)) < 0) {
11345 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
11346 }
11347 }
11348#endif /* USE_WL_FRAMEBURST || DISABLE_WL_FRAMEBURST_SOFTAP */
11349
11350 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
11351 if (iov_buf == NULL) {
11352 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
11353 ret = BCME_NOMEM;
11354 goto done;
11355 }
11356
11357#if defined(BCMSUP_4WAY_HANDSHAKE)
11358 /* Read 4-way handshake requirements */
11359 if (dhd_use_idsup == 1) {
11360 ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
11361 (char *)&iovbuf, sizeof(iovbuf), FALSE);
11362 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
11363 * in-dongle supplicant.
11364 */
11365 if (ret >= 0 || ret == BCME_NOTREADY)
11366 dhd->fw_4way_handshake = TRUE;
11367 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
11368 }
11369#endif /* BCMSUP_4WAY_HANDSHAKE */
11370
11371#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11372 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11373#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11374
11375#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11376 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11377#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11378
11379#ifdef ARP_OFFLOAD_SUPPORT
11380 DHD_ERROR(("arp_enable:%d arp_ol:%d\n",
11381 dhd->arpoe_enable, dhd->arpol_configured));
11382#endif /* ARP_OFFLOAD_SUPPORT */
11383 /*
11384 * Retaining pktfilter fotr temporary, once fw preinit includes this,
11385 * this will be removed. Caution is to skip the pktfilter check during
11386 * each pktfilter removal.
11387 */
11388#ifdef PKT_FILTER_SUPPORT
11389 /* Setup default defintions for pktfilter , enable in suspend */
11390 dhd->pktfilter_count = 6;
11391 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11392 if (!FW_SUPPORTED(dhd, pf6)) {
11393 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11394 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11395 } else {
11396 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11397 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11398 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11399 }
11400 /* apply APP pktfilter */
11401 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11402
11403#ifdef BLOCK_IPV6_PACKET
11404 /* Setup filter to allow only IPv4 unicast frames */
11405 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
11406 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
11407 " "
11408 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
11409#else
11410 /* Setup filter to allow only unicast */
11411 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11412#endif /* BLOCK_IPV6_PACKET */
11413
11414#ifdef PASS_IPV4_SUSPEND
11415 /* XXX customer want to get IPv4 multicast packets */
11416 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
11417#else
11418 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11419 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11420#endif /* PASS_IPV4_SUSPEND */
11421 if (FW_SUPPORTED(dhd, pf6)) {
11422 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11423 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
11424 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11425 dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
11426 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11427 dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
11428 /* Immediately pkt filter TYPE 6 Dicard NETBIOS packet(port 137) */
11429 dhd->pktfilter[DHD_UDPNETBIOS_DROP_FILTER_NUM] = DISCARD_UDPNETBIOS;
11430 dhd->pktfilter_count = 11;
11431 }
11432
11433#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11434 dhd->pktfilter_count = 4;
11435 /* Setup filter to block broadcast and NAT Keepalive packets */
11436 /* discard all broadcast packets */
11437 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11438 /* discard NAT Keepalive packets */
11439 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11440 /* discard NAT Keepalive packets */
11441 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11442 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11443#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11444
11445#if defined(SOFTAP)
11446 if (ap_fw_loaded) {
11447 /* XXX Andrey: fo SOFTAP disable pkt filters (if there were any ) */
11448 dhd_enable_packet_filter(0, dhd);
11449 }
11450#endif /* defined(SOFTAP) */
11451 dhd_set_packet_filter(dhd);
11452#endif /* PKT_FILTER_SUPPORT */
11453
11454 /* query for 'clmver' to get clm version info from firmware */
11455 bzero(buf, sizeof(buf));
11456 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11457 if (ret < 0)
11458 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11459 else {
11460 char *ver_temp_buf = NULL;
11461
11462 if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
11463 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11464 } else {
11465 ptr = (ver_temp_buf + strlen("Data:"));
11466 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
11467 DHD_ERROR(("Couldn't find New line character\n"));
11468 } else {
11469 bzero(clm_version, CLM_VER_STR_LEN);
11470 strlcpy(clm_version, ver_temp_buf,
11471 MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN));
11472 DHD_INFO(("CLM version = %s\n", clm_version));
11473 }
11474 }
11475
11476#if defined(CUSTOMER_HW4_DEBUG)
11477 if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
11478 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
11479 } else {
11480 char tokenlim;
11481 ptr = (ver_temp_buf + strlen("Customization:"));
11482 if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
11483 DHD_ERROR(("Couldn't find project blob version"
11484 "or New line character\n"));
11485 } else if (tokenlim == '(') {
11486 snprintf(clm_version,
11487 CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
11488 clm_version, ver_temp_buf);
11489 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
11490 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
11491 DHD_ERROR(("Couldn't find New line character\n"));
11492 } else {
11493 snprintf(clm_version,
11494 strlen(clm_version) + strlen(ver_temp_buf),
11495 "%s%s", clm_version, ver_temp_buf);
11496 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
11497 clm_version));
11498
11499 }
11500 } else if (tokenlim == '\n') {
11501 snprintf(clm_version,
11502 strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
11503 "%s, Blob ver = Major : ", clm_version);
11504 snprintf(clm_version,
11505 strlen(clm_version) + strlen(ver_temp_buf) + 1,
11506 "%s%s", clm_version, ver_temp_buf);
11507 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
11508 }
11509 }
11510#endif /* CUSTOMER_HW4_DEBUG */
11511 if (strlen(clm_version)) {
11512 DHD_ERROR(("CLM version = %s\n", clm_version));
11513 } else {
11514 DHD_ERROR(("Couldn't find CLM version!\n"));
11515 }
11516
11517 }
11518
11519#ifdef WRITE_WLANINFO
11520 sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
11521#endif /* WRITE_WLANINFO */
11522
11523#ifdef GEN_SOFTAP_INFO_FILE
11524 sec_save_softap_info();
11525#endif /* GEN_SOFTAP_INFO_FILE */
11526
11527#ifdef PNO_SUPPORT
11528 if (!dhd->pno_state) {
11529 dhd_pno_init(dhd);
11530 }
11531#endif
11532
11533#ifdef RTT_SUPPORT
11534 if (dhd->rtt_state) {
11535 ret = dhd_rtt_init(dhd);
11536 if (ret < 0) {
11537 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
11538 }
11539 }
11540#endif
11541
11542#ifdef FILTER_IE
11543 /* Failure to configure filter IE is not a fatal error, ignore it. */
11544 if (FW_SUPPORTED(dhd, fie) &&
11545 !(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
11546 dhd_read_from_file(dhd);
11547 }
11548#endif /* FILTER_IE */
11549
11550#ifdef NDO_CONFIG_SUPPORT
11551 dhd->ndo_enable = FALSE;
11552 dhd->ndo_host_ip_overflow = FALSE;
11553 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
11554#endif /* NDO_CONFIG_SUPPORT */
11555
11556 /* ND offload version supported */
11557 dhd->ndo_version = dhd_ndo_get_version(dhd);
11558
11559 /* check dongle supports wbtext (product policy) or not */
11560 dhd->wbtext_support = FALSE;
11561 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
11562 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
11563 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
11564 }
11565 dhd->wbtext_policy = wnm_bsstrans_resp;
11566 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
11567 dhd->wbtext_support = TRUE;
11568 }
11569#ifndef WBTEXT
11570 /* driver can turn off wbtext feature through makefile */
11571 if (dhd->wbtext_support) {
11572 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
11573 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
11574 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
11575 DHD_ERROR(("failed to disable WBTEXT\n"));
11576 }
11577 }
11578#endif /* !WBTEXT */
11579
11580#ifdef DHD_NON_DMA_M2M_CORRUPTION
11581 /* check pcie non dma loopback */
11582 if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
11583 (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
11584 goto done;
11585 }
11586#endif /* DHD_NON_DMA_M2M_CORRUPTION */
11587
11588#ifdef CUSTOM_ASSOC_TIMEOUT
11589 /* set recreate_bi_timeout to increase assoc timeout :
11590 * 20 * 100TU * 1024 / 1000 = 2 secs
11591 * (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000)
11592 */
11593 if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout",
11594 CUSTOM_ASSOC_TIMEOUT,
11595 WLC_SET_VAR, TRUE, 0) != BCME_OK) {
11596 DHD_ERROR(("failed to set assoc timeout\n"));
11597 }
11598#endif /* CUSTOM_ASSOC_TIMEOUT */
11599
11600 BCM_REFERENCE(ret2);
11601#ifdef WL_MONITOR
11602 if (FW_SUPPORTED(dhd, monitor)) {
11603 dhd->monitor_enable = TRUE;
11604 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
11605 } else {
11606 dhd->monitor_enable = FALSE;
11607 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
11608 }
11609#endif /* WL_MONITOR */
11610
11611 /* store the preserve log set numbers */
11612 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
11613 != BCME_OK) {
11614 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
11615 }
11616
11617#ifdef CONFIG_SILENT_ROAM
11618 dhd->sroam_turn_on = TRUE;
11619 dhd->sroamed = FALSE;
11620#endif /* CONFIG_SILENT_ROAM */
11621
11622 dhd_set_bandlock(dhd);
11623
11624done:
11625 if (iov_buf) {
11626 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
11627 }
11628 return ret;
11629}
11630
11631int
11632dhd_legacy_preinit_ioctls(dhd_pub_t *dhd)
11633{
11634 int ret = 0;
11635 /* Room for "event_msgs_ext" + '\0' + bitvec */
11636// char iovbuf[WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE + 16];
11637 char *mask;
11638 uint32 buf_key_b4_m4 = 1;
11639 uint8 msglen;
11640 eventmsgs_ext_t *eventmask_msg = NULL;
11641 uint32 event_log_max_sets = 0;
11642 char* iov_buf = NULL;
11643 /* XXX: Use ret2 for return check of IOVARS that might return BCME_UNSUPPORTED,
11644 * based on FW build tag.
11645 */
11646 int ret2 = 0;
11647 uint32 wnm_cap = 0;
11648#if defined(BCMSUP_4WAY_HANDSHAKE)
11649 uint32 sup_wpa = 1;
11650#endif /* BCMSUP_4WAY_HANDSHAKE */
11651#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
11652 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
11653 uint32 ampdu_ba_wsize = 0;
11654#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
11655#if defined(CUSTOM_AMPDU_MPDU)
11656 int32 ampdu_mpdu = 0;
11657#endif
11658#if defined(CUSTOM_AMPDU_RELEASE)
11659 int32 ampdu_release = 0;
11660#endif
11661#if defined(CUSTOM_AMSDU_AGGSF)
11662 int32 amsdu_aggsf = 0;
11663#endif
11664
11665#if defined(BCMSDIO) || defined(BCMDBUS)
11666#ifdef PROP_TXSTATUS
11667 int wlfc_enable = TRUE;
11668#ifndef DISABLE_11N
11669 uint32 hostreorder = 1;
11670 uint wl_down = 1;
11671#endif /* DISABLE_11N */
11672#endif /* PROP_TXSTATUS */
11673#endif /* BCMSDIO || BCMDBUS */
11674
11675#ifndef PCIE_FULL_DONGLE
11676 uint32 wl_ap_isolate;
11677#endif /* PCIE_FULL_DONGLE */
11678 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
11679 uint wnm_bsstrans_resp = 0;
11680#ifdef SUPPORT_SET_CAC
11681 uint32 cac = 1;
11682#endif /* SUPPORT_SET_CAC */
11683#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11684 uint32 vht_features = 0; /* init to 0, will be set based on each support */
11685#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11686
11687#ifdef DHD_ENABLE_LPC
11688 uint32 lpc = 1;
11689#endif /* DHD_ENABLE_LPC */
11690 uint power_mode = PM_FAST;
11691#if defined(BCMSDIO)
11692 uint32 dongle_align = DHD_SDALIGN;
11693 uint32 glom = CUSTOM_GLOM_SETTING;
11694#endif /* defined(BCMSDIO) */
11695 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
11696 uint scancache_enab = TRUE;
11697#ifdef ENABLE_BCN_LI_BCN_WAKEUP
11698 uint32 bcn_li_bcn = 1;
11699#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11700 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
11701 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
11702 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
11703 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
11704 char buf[WLC_IOCTL_SMLEN];
11705 char *ptr;
11706 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
11707#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11708 wl_el_tag_params_t *el_tag = NULL;
11709#endif /* DHD_8021X_DUMP */
11710#ifdef ROAM_ENABLE
11711 uint roamvar = 0;
11712 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
11713 int roam_scan_period[2] = {10, WLC_BAND_ALL};
11714 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
11715#ifdef ROAM_AP_ENV_DETECTION
11716 int roam_env_mode = AP_ENV_INDETERMINATE;
11717#endif /* ROAM_AP_ENV_DETECTION */
11718#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
11719 int roam_fullscan_period = 60;
11720#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
11721 int roam_fullscan_period = 120;
11722#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
11723#ifdef DISABLE_BCNLOSS_ROAM
11724 uint roam_bcnloss_off = 1;
11725#endif /* DISABLE_BCNLOSS_ROAM */
11726#else
11727#ifdef DISABLE_BUILTIN_ROAM
11728 uint roamvar = 1;
11729#endif /* DISABLE_BUILTIN_ROAM */
11730#endif /* ROAM_ENABLE */
11731
11732#if defined(SOFTAP)
11733 uint dtim = 1;
11734#endif
11735/* xxx andrey tmp fix for dk8000 build error */
11736#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
11737 struct ether_addr p2p_ea;
11738#endif
11739#ifdef BCMCCX
11740 uint32 ccx = 1;
11741#endif
11742#ifdef SOFTAP_UAPSD_OFF
11743 uint32 wme_apsd = 0;
11744#endif /* SOFTAP_UAPSD_OFF */
11745#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
11746 uint32 apsta = 1; /* Enable APSTA mode */
11747#elif defined(SOFTAP_AND_GC)
11748 uint32 apsta = 0;
11749 int ap_mode = 1;
11750#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
11751#ifdef GET_CUSTOM_MAC_ENABLE
11752 struct ether_addr ea_addr;
11753 char hw_ether[62];
11754#endif /* GET_CUSTOM_MAC_ENABLE */
11755#ifdef OKC_SUPPORT
11756 uint32 okc = 1;
11757#endif
11758
11759#ifdef DISABLE_11N
11760 uint32 nmode = 0;
11761#endif /* DISABLE_11N */
11762
11763#ifdef USE_WL_TXBF
11764 uint32 txbf = 1;
11765#endif /* USE_WL_TXBF */
11766#ifdef DISABLE_TXBFR
11767 uint32 txbf_bfr_cap = 0;
11768#endif /* DISABLE_TXBFR */
11769#ifdef AMPDU_VO_ENABLE
11770 /* XXX: Enabling VO AMPDU to reduce FER */
11771 struct ampdu_tid_control tid;
11772#endif
11773#if defined(PROP_TXSTATUS)
11774#ifdef USE_WFA_CERT_CONF
11775 uint32 proptx = 0;
11776#endif /* USE_WFA_CERT_CONF */
11777#endif /* PROP_TXSTATUS */
11778#ifdef DHD_SET_FW_HIGHSPEED
11779 uint32 ack_ratio = 250;
11780 uint32 ack_ratio_depth = 64;
11781#endif /* DHD_SET_FW_HIGHSPEED */
11782#ifdef DISABLE_11N_PROPRIETARY_RATES
11783 uint32 ht_features = 0;
11784#endif /* DISABLE_11N_PROPRIETARY_RATES */
11785#ifdef CUSTOM_PSPRETEND_THR
11786 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
11787#endif
11788#ifdef CUSTOM_EVENT_PM_WAKE
11789 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
11790#endif /* CUSTOM_EVENT_PM_WAKE */
11791#ifdef DISABLE_PRUNED_SCAN
11792 uint32 scan_features = 0;
11793#endif /* DISABLE_PRUNED_SCAN */
11794#ifdef BCMPCIE_OOB_HOST_WAKE
11795 uint32 hostwake_oob = 0;
11796#endif /* BCMPCIE_OOB_HOST_WAKE */
11797#ifdef EVENT_LOG_RATE_HC
11798 /* threshold number of lines per second */
11799#define EVENT_LOG_RATE_HC_THRESHOLD 1000
11800 uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
11801#endif /* EVENT_LOG_RATE_HC */
11802#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
11803 uint32 btmdelta = WBTEXT_BTMDELTA;
11804#endif /* WBTEXT && WBTEXT_BTMDELTA */
11805#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
11806 uint32 rrm_bcn_req_thrtl_win = RRM_BCNREQ_MAX_CHAN_TIME * 2;
11807 uint32 rrm_bcn_req_max_off_chan_time = RRM_BCNREQ_MAX_CHAN_TIME;
11808#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
11809#ifdef PKT_FILTER_SUPPORT
11810 dhd_pkt_filter_enable = TRUE;
11811#ifdef APF
11812 dhd->apf_set = FALSE;
11813#endif /* APF */
11814#endif /* PKT_FILTER_SUPPORT */
11815 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
11816#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
11817 dhd->max_dtim_enable = TRUE;
11818#else
11819 dhd->max_dtim_enable = FALSE;
11820#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
11821 dhd->disable_dtim_in_suspend = FALSE;
11822#ifdef CUSTOM_SET_OCLOFF
11823 dhd->ocl_off = FALSE;
11824#endif /* CUSTOM_SET_OCLOFF */
11825#ifdef SUPPORT_SET_TID
11826 dhd->tid_mode = SET_TID_OFF;
11827 dhd->target_uid = 0;
11828 dhd->target_tid = 0;
11829#endif /* SUPPORT_SET_TID */
11830 DHD_TRACE(("Enter %s\n", __FUNCTION__));
11831
11832#ifdef DHDTCPACK_SUPPRESS
11833 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
11834#endif
11835 dhd->op_mode = 0;
11836#ifdef ARP_OFFLOAD_SUPPORT
11837 /* arpoe will be applied from the supsend context */
11838 dhd->arpoe_enable = TRUE;
11839 dhd->arpol_configured = FALSE;
11840#endif /* ARP_OFFLOAD_SUPPORT */
11841
11842 /* clear AP flags */
11843#if defined(CUSTOM_COUNTRY_CODE)
11844 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
11845#endif /* CUSTOM_COUNTRY_CODE */
11846
11847#ifdef CUSTOMER_HW4_DEBUG
11848 if (!dhd_validate_chipid(dhd)) {
11849 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
11850 __FUNCTION__, dhd_bus_chip_id(dhd)));
11851#ifndef SUPPORT_MULTIPLE_CHIPS
11852 ret = BCME_BADARG;
11853 goto done;
11854#endif /* !SUPPORT_MULTIPLE_CHIPS */
11855 }
11856#endif /* CUSTOMER_HW4_DEBUG */
11857
11858 /* query for 'ver' to get version info from firmware */
11859 memset(buf, 0, sizeof(buf));
11860 ptr = buf;
11861 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11862 if (ret < 0)
11863 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11864 else {
11865 bcmstrtok(&ptr, "\n", 0);
11866 /* Print fw version info */
11867 strncpy(fw_version, buf, FW_VER_STR_LEN);
11868 fw_version[FW_VER_STR_LEN-1] = '\0';
11869 }
11870
11871#ifdef BOARD_HIKEY
11872 /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
11873 if (strstr(fw_version, "WLTEST") != NULL) {
11874 DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
11875 __FUNCTION__));
11876 op_mode = DHD_FLAG_MFG_MODE;
11877 }
11878#endif /* BOARD_HIKEY */
11879
11880 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11881 (op_mode == DHD_FLAG_MFG_MODE)) {
11882 dhd->op_mode = DHD_FLAG_MFG_MODE;
11883#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11884 /* disable runtimePM by default in MFG mode. */
11885 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
11886#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11887#ifdef DHD_PCIE_RUNTIMEPM
11888 /* Disable RuntimePM in mfg mode */
11889 DHD_DISABLE_RUNTIME_PM(dhd);
11890 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
11891#endif /* DHD_PCIE_RUNTIME_PM */
11892 /* Check and adjust IOCTL response timeout for Manufactring firmware */
11893 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
11894 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
11895 __FUNCTION__));
11896 } else {
11897 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
11898 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
11899 }
11900#ifdef BCMPCIE_OOB_HOST_WAKE
11901 ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
11902 sizeof(hostwake_oob), FALSE);
11903 if (ret < 0) {
11904 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
11905 } else {
11906 if (hostwake_oob == 0) {
11907 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
11908 __FUNCTION__));
11909 ret = BCME_UNSUPPORTED;
11910 goto done;
11911 } else {
11912 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
11913 }
11914 }
11915#endif /* BCMPCIE_OOB_HOST_WAKE */
11916
11917#ifdef DNGL_AXI_ERROR_LOGGING
11918 ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
11919 sizeof(dhd->axierror_logbuf_addr), FALSE);
11920 if (ret < 0) {
11921 DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
11922 dhd->axierror_logbuf_addr = 0;
11923 } else {
11924 DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n",
11925 __FUNCTION__, dhd->axierror_logbuf_addr));
11926 }
11927#endif /* DNGL_AXI_ERROR_LOGGING */
11928
11929#ifdef EVENT_LOG_RATE_HC
11930 ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
11931 sizeof(event_log_rate_hc), NULL, 0, TRUE);
11932 if (ret < 0) {
11933 DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
11934 } else {
11935 DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
11936 event_log_rate_hc));
11937 }
11938#endif /* EVENT_LOG_RATE_HC */
11939
11940#ifdef GET_CUSTOM_MAC_ENABLE
11941 memset(hw_ether, 0, sizeof(hw_ether));
11942 ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, iface_name);
11943#ifdef GET_CUSTOM_MAC_FROM_CONFIG
11944 if (!memcmp(&ether_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) {
11945 ret = 0;
11946 } else
11947#endif
11948 if (!ret) {
11949 memset(buf, 0, sizeof(buf));
11950#ifdef GET_CUSTOM_MAC_FROM_CONFIG
11951 memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether));
11952#endif
11953 bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
11954 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
11955 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
11956 if (ret < 0) {
11957 memset(buf, 0, sizeof(buf));
11958 bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
11959 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
11960 if (ret) {
11961 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
11962 __FUNCTION__, MAC2STRDBG(hw_ether), ret));
11963 prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN);
11964 ret = BCME_NOTUP;
11965 goto done;
11966 }
11967 }
11968 } else {
11969 DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
11970 ret = BCME_NOTUP;
11971 goto done;
11972 }
11973#endif /* GET_CUSTOM_MAC_ENABLE */
11974 /* Get the default device MAC address directly from firmware */
11975 ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11976 if (ret < 0) {
11977 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
11978 ret = BCME_NOTUP;
11979 goto done;
11980 }
11981
11982 DHD_ERROR(("%s: use firmware generated mac_address "MACDBG"\n",
11983 __FUNCTION__, MAC2STRDBG(&buf)));
11984
11985#ifdef MACADDR_PROVISION_ENFORCED
11986 if (ETHER_IS_LOCALADDR(buf)) {
11987 DHD_ERROR(("%s: error! not using provision mac addr!\n", __FUNCTION__));
11988 ret = BCME_BADADDR;
11989 goto done;
11990 }
11991#endif /* MACADDR_PROVISION_ENFORCED */
11992
11993 /* Update public MAC address after reading from Firmware */
11994 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
11995
11996 if (ETHER_ISNULLADDR(dhd->mac.octet)) {
11997 DHD_ERROR(("%s: NULL MAC address during pre-init\n", __FUNCTION__));
11998 ret = BCME_BADADDR;
11999 goto done;
12000 } else {
12001 (void)memcpy_s(dhd_linux_get_primary_netdev(dhd)->perm_addr, ETHER_ADDR_LEN,
12002 dhd->mac.octet, ETHER_ADDR_LEN);
12003 }
12004#if defined(WL_STA_ASSOC_RAND) && defined(WL_STA_INIT_RAND)
12005 /* Set cur_etheraddr of primary interface to randomized address to ensure
12006 * that any action frame transmission will happen using randomized macaddr
12007 * primary netdev->perm_addr will hold the original factory MAC.
12008 */
12009 {
12010 if ((ret = dhd_update_rand_mac_addr(dhd)) < 0) {
12011 DHD_ERROR(("%s: failed to set macaddress\n", __FUNCTION__));
12012 goto done;
12013 }
12014 }
12015#endif /* WL_STA_ASSOC_RAND && WL_STA_INIT_RAND */
12016
12017 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
12018 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
12019 goto done;
12020 }
12021
12022 /* get a capabilities from firmware */
12023 {
12024 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
12025 memset(dhd->fw_capabilities, 0, cap_buf_size);
12026 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
12027 FALSE);
12028 if (ret < 0) {
12029 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
12030 __FUNCTION__, ret));
12031 return 0;
12032 }
12033
12034 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
12035 dhd->fw_capabilities[0] = ' ';
12036 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
12037 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
12038 }
12039
12040 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
12041 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
12042#ifdef SET_RANDOM_MAC_SOFTAP
12043 uint rand_mac;
12044#endif /* SET_RANDOM_MAC_SOFTAP */
12045 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
12046#ifdef PKT_FILTER_SUPPORT
12047 if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))
12048 dhd_pkt_filter_enable = TRUE;
12049 else
12050 dhd_pkt_filter_enable = FALSE;
12051#endif
12052#ifdef SET_RANDOM_MAC_SOFTAP
12053 SRANDOM32((uint)jiffies);
12054 rand_mac = RANDOM32();
12055 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
12056 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
12057 iovbuf[2] = (unsigned char)vendor_oui;
12058 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
12059 iovbuf[4] = (unsigned char)(rand_mac >> 8);
12060 iovbuf[5] = (unsigned char)(rand_mac >> 16);
12061
12062 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
12063 TRUE);
12064 if (ret < 0) {
12065 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
12066 } else
12067 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
12068#endif /* SET_RANDOM_MAC_SOFTAP */
12069#ifdef USE_DYNAMIC_F2_BLKSIZE
12070 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12071#endif /* USE_DYNAMIC_F2_BLKSIZE */
12072#ifdef SOFTAP_UAPSD_OFF
12073 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
12074 TRUE);
12075 if (ret < 0) {
12076 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
12077 __FUNCTION__, ret));
12078 }
12079#endif /* SOFTAP_UAPSD_OFF */
12080
12081 /* set AP flag for specific country code of SOFTAP */
12082#if defined(CUSTOM_COUNTRY_CODE)
12083 dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
12084#endif /* CUSTOM_COUNTRY_CODE */
12085 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
12086 (op_mode == DHD_FLAG_MFG_MODE)) {
12087#if defined(ARP_OFFLOAD_SUPPORT)
12088 dhd->arpoe_enable = FALSE;
12089#endif /* ARP_OFFLOAD_SUPPORT */
12090#ifdef PKT_FILTER_SUPPORT
12091 dhd_pkt_filter_enable = FALSE;
12092#endif /* PKT_FILTER_SUPPORT */
12093 dhd->op_mode = DHD_FLAG_MFG_MODE;
12094#ifdef USE_DYNAMIC_F2_BLKSIZE
12095 /* XXX The 'wl counters' command triggers SDIO bus error
12096 * if F2 block size is greater than 128 bytes using 4354A1
12097 * manufacturing firmware. To avoid this problem, F2 block
12098 * size is set to 128 bytes only for DHD_FLAG_MFG_MODE.
12099 * There is no problem for other chipset since big data
12100 * transcation through SDIO bus is not happened during
12101 * manufacturing test.
12102 */
12103 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12104#endif /* USE_DYNAMIC_F2_BLKSIZE */
12105#ifndef CUSTOM_SET_ANTNPM
12106 if (FW_SUPPORTED(dhd, rsdb)) {
12107 wl_config_t rsdb_mode;
12108 memset(&rsdb_mode, 0, sizeof(rsdb_mode));
12109 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
12110 NULL, 0, TRUE);
12111 if (ret < 0) {
12112 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
12113 __FUNCTION__, ret));
12114 }
12115 }
12116#endif /* !CUSTOM_SET_ANTNPM */
12117 } else {
12118 uint32 concurrent_mode = 0;
12119 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
12120 (op_mode == DHD_FLAG_P2P_MODE)) {
12121#ifdef PKT_FILTER_SUPPORT
12122 dhd_pkt_filter_enable = FALSE;
12123#endif
12124 dhd->op_mode = DHD_FLAG_P2P_MODE;
12125 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
12126 (op_mode == DHD_FLAG_IBSS_MODE)) {
12127 dhd->op_mode = DHD_FLAG_IBSS_MODE;
12128 } else
12129 dhd->op_mode = DHD_FLAG_STA_MODE;
12130#if !defined(AP) && defined(WLP2P)
12131 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
12132 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
12133 dhd->op_mode |= concurrent_mode;
12134 }
12135
12136 /* Check if we are enabling p2p */
12137 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
12138 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
12139 TRUE);
12140 if (ret < 0)
12141 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
12142
12143#if defined(SOFTAP_AND_GC)
12144 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
12145 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
12146 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
12147 }
12148#endif
12149 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
12150 ETHER_SET_LOCALADDR(&p2p_ea);
12151 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
12152 NULL, 0, TRUE);
12153 if (ret < 0)
12154 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
12155 else
12156 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
12157 }
12158#else
12159 (void)concurrent_mode;
12160#endif
12161 }
12162
12163#ifdef DISABLE_PRUNED_SCAN
12164 if (FW_SUPPORTED(dhd, rsdb)) {
12165 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
12166 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
12167 if (ret < 0) {
12168 if (ret == BCME_UNSUPPORTED) {
12169 DHD_ERROR(("%s get scan_features, UNSUPPORTED\n",
12170 __FUNCTION__));
12171 } else {
12172 DHD_ERROR(("%s get scan_features err(%d)\n",
12173 __FUNCTION__, ret));
12174 }
12175
12176 } else {
12177 memcpy(&scan_features, iovbuf, 4);
12178 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
12179 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
12180 sizeof(scan_features), NULL, 0, TRUE);
12181 if (ret < 0) {
12182 DHD_ERROR(("%s set scan_features err(%d)\n",
12183 __FUNCTION__, ret));
12184 }
12185 }
12186 }
12187#endif /* DISABLE_PRUNED_SCAN */
12188
12189 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
12190 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
12191#if defined(DHD_BLOB_EXISTENCE_CHECK)
12192 if (!dhd->is_blob)
12193#endif /* DHD_BLOB_EXISTENCE_CHECK */
12194 {
12195 /* get a ccode and revision for the country code */
12196#if defined(CUSTOM_COUNTRY_CODE)
12197 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
12198 &dhd->dhd_cspec, dhd->dhd_cflags);
12199#else
12200 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
12201 &dhd->dhd_cspec);
12202#endif /* CUSTOM_COUNTRY_CODE */
12203 }
12204
12205#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
12206 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
12207 dhd->info->rxthread_enabled = FALSE;
12208 else
12209 dhd->info->rxthread_enabled = TRUE;
12210#endif
12211 /* Set Country code */
12212 if (dhd->dhd_cspec.ccode[0] != 0) {
12213 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
12214 NULL, 0, TRUE);
12215 if (ret < 0)
12216 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
12217 }
12218
12219 /* Set Listen Interval */
12220 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
12221 NULL, 0, TRUE);
12222 if (ret < 0)
12223 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
12224
12225#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
12226#ifdef USE_WFA_CERT_CONF
12227 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
12228 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
12229 }
12230#endif /* USE_WFA_CERT_CONF */
12231 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
12232 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
12233 if (ret < 0) {
12234 DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
12235 }
12236#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
12237#if defined(ROAM_ENABLE)
12238#ifdef DISABLE_BCNLOSS_ROAM
12239 ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
12240 sizeof(roam_bcnloss_off), NULL, 0, TRUE);
12241 if (ret < 0) {
12242 DHD_ERROR(("%s roam_bcnloss_off failed %d\n", __FUNCTION__, ret));
12243 }
12244#endif /* DISABLE_BCNLOSS_ROAM */
12245 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
12246 sizeof(roam_trigger), TRUE, 0)) < 0)
12247 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
12248 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
12249 sizeof(roam_scan_period), TRUE, 0)) < 0)
12250 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
12251 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
12252 sizeof(roam_delta), TRUE, 0)) < 0)
12253 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
12254 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
12255 sizeof(roam_fullscan_period), NULL, 0, TRUE);
12256 if (ret < 0)
12257 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
12258#ifdef ROAM_AP_ENV_DETECTION
12259 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
12260 if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
12261 sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
12262 dhd->roam_env_detection = TRUE;
12263 else
12264 dhd->roam_env_detection = FALSE;
12265 }
12266#endif /* ROAM_AP_ENV_DETECTION */
12267#ifdef CONFIG_ROAM_RSSI_LIMIT
12268 ret = dhd_roam_rssi_limit_set(dhd, CUSTOM_ROAMRSSI_2G, CUSTOM_ROAMRSSI_5G);
12269 if (ret < 0) {
12270 DHD_ERROR(("%s set roam_rssi_limit failed ret %d\n", __FUNCTION__, ret));
12271 }
12272#endif /* CONFIG_ROAM_RSSI_LIMIT */
12273#endif /* ROAM_ENABLE */
12274
12275#ifdef CUSTOM_EVENT_PM_WAKE
12276 /* XXX need to check time value */
12277 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
12278 sizeof(pm_awake_thresh), NULL, 0, TRUE);
12279 if (ret < 0) {
12280 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
12281 }
12282#endif /* CUSTOM_EVENT_PM_WAKE */
12283#ifdef OKC_SUPPORT
12284 dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
12285#endif
12286#ifdef BCMCCX
12287 dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
12288#endif /* BCMCCX */
12289
12290#ifdef WLTDLS
12291 dhd->tdls_enable = FALSE;
12292 dhd_tdls_set_mode(dhd, false);
12293#endif /* WLTDLS */
12294
12295#ifdef DHD_ENABLE_LPC
12296 /* Set lpc 1 */
12297 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
12298 if (ret < 0) {
12299 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
12300
12301 if (ret == BCME_NOTDOWN) {
12302 uint wl_down = 1;
12303 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
12304 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
12305 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
12306
12307 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
12308 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
12309 }
12310 }
12311#endif /* DHD_ENABLE_LPC */
12312
12313#ifdef WLADPS
12314 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
12315 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK &&
12316 (ret != BCME_UNSUPPORTED)) {
12317 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
12318 __FUNCTION__, ret));
12319 }
12320 }
12321#endif /* WLADPS */
12322
12323#ifdef DHD_PM_CONTROL_FROM_FILE
12324 sec_control_pm(dhd, &power_mode);
12325#else
12326 /* Set PowerSave mode */
12327 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
12328#endif /* DHD_PM_CONTROL_FROM_FILE */
12329
12330#if defined(BCMSDIO)
12331 /* Match Host and Dongle rx alignment */
12332 ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
12333 NULL, 0, TRUE);
12334 if (ret < 0) {
12335 DHD_ERROR(("%s set bus:txglomalign failed %d\n", __FUNCTION__, ret));
12336 }
12337
12338#ifdef USE_WFA_CERT_CONF
12339 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
12340 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
12341 }
12342#endif /* USE_WFA_CERT_CONF */
12343 if (glom != DEFAULT_GLOM_VALUE) {
12344 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
12345 ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
12346 if (ret < 0) {
12347 DHD_ERROR(("%s set bus:txglom failed %d\n", __FUNCTION__, ret));
12348 }
12349 }
12350#endif /* defined(BCMSDIO) */
12351
12352 /* Setup timeout if Beacons are lost and roam is off to report link down */
12353 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout),
12354 NULL, 0, TRUE);
12355 if (ret < 0) {
12356 DHD_ERROR(("%s set bcn_timeout failed %d\n", __FUNCTION__, ret));
12357 }
12358
12359 /* Setup assoc_retry_max count to reconnect target AP in dongle */
12360 ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max),
12361 NULL, 0, TRUE);
12362 if (ret < 0) {
12363 DHD_ERROR(("%s set assoc_retry_max failed %d\n", __FUNCTION__, ret));
12364 }
12365
12366#if defined(AP) && !defined(WLP2P)
12367 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
12368 if (ret < 0) {
12369 DHD_ERROR(("%s set apsta failed %d\n", __FUNCTION__, ret));
12370 }
12371
12372#endif /* defined(AP) && !defined(WLP2P) */
12373
12374#ifdef MIMO_ANT_SETTING
12375 dhd_sel_ant_from_file(dhd);
12376#endif /* MIMO_ANT_SETTING */
12377
12378#if defined(SOFTAP)
12379 if (ap_fw_loaded == TRUE) {
12380 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
12381 }
12382#endif
12383
12384#if defined(KEEP_ALIVE)
12385 {
12386 /* Set Keep Alive : be sure to use FW with -keepalive */
12387 int res;
12388
12389#if defined(SOFTAP)
12390 if (ap_fw_loaded == FALSE)
12391#endif
12392 if (!(dhd->op_mode &
12393 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
12394 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
12395 DHD_ERROR(("%s set keeplive failed %d\n",
12396 __FUNCTION__, res));
12397 }
12398 }
12399#endif /* defined(KEEP_ALIVE) */
12400
12401#ifdef USE_WL_TXBF
12402 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
12403 if (ret < 0)
12404 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
12405
12406#endif /* USE_WL_TXBF */
12407
12408 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
12409 0, TRUE);
12410 if (ret < 0) {
12411 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
12412 }
12413
12414 ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
12415 sizeof(event_log_max_sets), FALSE);
12416 if (ret == BCME_OK) {
12417 dhd->event_log_max_sets = event_log_max_sets;
12418 } else {
12419 dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
12420 }
12421 /* Make sure max_sets is set first with wmb and then sets_queried,
12422 * this will be used during parsing the logsets in the reverse order.
12423 */
12424 OSL_SMP_WMB();
12425 dhd->event_log_max_sets_queried = TRUE;
12426 DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
12427 __FUNCTION__, dhd->event_log_max_sets, ret));
12428
12429#ifdef DISABLE_TXBFR
12430 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
12431 0, TRUE);
12432 if (ret < 0) {
12433 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
12434 }
12435#endif /* DISABLE_TXBFR */
12436
12437#ifdef USE_WFA_CERT_CONF
12438#ifdef USE_WL_FRAMEBURST
12439 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
12440 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
12441 }
12442#endif /* USE_WL_FRAMEBURST */
12443 g_frameburst = frameburst;
12444#endif /* USE_WFA_CERT_CONF */
12445#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
12446 /* Disable Framebursting for SofAP */
12447 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
12448 frameburst = 0;
12449 }
12450#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
12451 /* Set frameburst to value */
12452 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
12453 sizeof(frameburst), TRUE, 0)) < 0) {
12454 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
12455 }
12456#ifdef DHD_SET_FW_HIGHSPEED
12457 /* Set ack_ratio */
12458 ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
12459 if (ret < 0) {
12460 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
12461 }
12462
12463 /* Set ack_ratio_depth */
12464 ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
12465 sizeof(ack_ratio_depth), NULL, 0, TRUE);
12466 if (ret < 0) {
12467 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
12468 }
12469#endif /* DHD_SET_FW_HIGHSPEED */
12470
12471 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
12472 if (iov_buf == NULL) {
12473 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
12474 ret = BCME_NOMEM;
12475 goto done;
12476 }
12477
12478 BCM_REFERENCE(ret2);
12479
12480#ifdef WLAIBSS
12481 /* Apply AIBSS configurations */
12482 if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
12483 DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
12484 __FUNCTION__, ret));
12485 goto done;
12486 }
12487#endif /* WLAIBSS */
12488
12489#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
12490 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
12491 /* Set ampdu ba wsize to 64 or 16 */
12492#ifdef CUSTOM_AMPDU_BA_WSIZE
12493 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
12494#endif
12495#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
12496 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
12497 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
12498#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
12499 if (ampdu_ba_wsize != 0) {
12500 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
12501 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
12502 if (ret < 0) {
12503 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
12504 __FUNCTION__, ampdu_ba_wsize, ret));
12505 }
12506 }
12507#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
12508
12509#if defined(CUSTOM_AMPDU_MPDU)
12510 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
12511 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
12512 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
12513 NULL, 0, TRUE);
12514 if (ret < 0) {
12515 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
12516 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
12517 }
12518 }
12519#endif /* CUSTOM_AMPDU_MPDU */
12520
12521#if defined(CUSTOM_AMPDU_RELEASE)
12522 ampdu_release = CUSTOM_AMPDU_RELEASE;
12523 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
12524 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
12525 sizeof(ampdu_release), NULL, 0, TRUE);
12526 if (ret < 0) {
12527 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
12528 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
12529 }
12530 }
12531#endif /* CUSTOM_AMPDU_RELEASE */
12532
12533#if defined(CUSTOM_AMSDU_AGGSF)
12534 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
12535 if (amsdu_aggsf != 0) {
12536 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
12537 NULL, 0, TRUE);
12538 if (ret < 0) {
12539 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
12540 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
12541 }
12542 }
12543#endif /* CUSTOM_AMSDU_AGGSF */
12544
12545#if defined(BCMSUP_4WAY_HANDSHAKE)
12546 /* Read 4-way handshake requirements */
12547 if (dhd_use_idsup == 1) {
12548 ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
12549 (char *)&iovbuf, sizeof(iovbuf), FALSE);
12550 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
12551 * in-dongle supplicant.
12552 */
12553 if (ret >= 0 || ret == BCME_NOTREADY)
12554 dhd->fw_4way_handshake = TRUE;
12555 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
12556 }
12557#endif /* BCMSUP_4WAY_HANDSHAKE */
12558#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
12559 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
12560 (char *)&vht_features, sizeof(vht_features), FALSE);
12561 if (ret < 0) {
12562 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
12563 vht_features = 0;
12564 } else {
12565#ifdef SUPPORT_2G_VHT
12566 vht_features |= 0x3; /* 2G support */
12567#endif /* SUPPORT_2G_VHT */
12568#ifdef SUPPORT_5G_1024QAM_VHT
12569 vht_features |= 0x6; /* 5G 1024 QAM support */
12570#endif /* SUPPORT_5G_1024QAM_VHT */
12571 }
12572 if (vht_features) {
12573 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
12574 NULL, 0, TRUE);
12575 if (ret < 0) {
12576 if (ret == BCME_NOTDOWN) {
12577 uint wl_down = 1;
12578 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
12579 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
12580 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
12581 " vht_features = 0x%x\n",
12582 __FUNCTION__, ret, vht_features));
12583
12584 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
12585 sizeof(vht_features), NULL, 0, TRUE);
12586
12587 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
12588 }
12589 if (ret != BCME_BADOPTION) {
12590 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
12591 } else {
12592 DHD_INFO(("%s vht_features ret(%d) - need to check BANDLOCK\n",
12593 __FUNCTION__, ret));
12594 }
12595 }
12596 }
12597#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
12598#ifdef DISABLE_11N_PROPRIETARY_RATES
12599 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
12600 TRUE);
12601 if (ret < 0) {
12602 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
12603 }
12604#endif /* DISABLE_11N_PROPRIETARY_RATES */
12605#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
12606#if defined(DISABLE_HE_ENAB)
12607 /* XXX DISABLE_HE_ENAB has higher priority than CUSTOM_CONTROL_HE_ENAB */
12608 control_he_enab = 0;
12609#endif /* DISABLE_HE_ENAB */
12610 dhd_control_he_enab(dhd, control_he_enab);
12611#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
12612
12613#ifdef CUSTOM_PSPRETEND_THR
12614 /* Turn off MPC in AP mode */
12615 ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
12616 sizeof(pspretend_thr), NULL, 0, TRUE);
12617 if (ret < 0) {
12618 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
12619 __FUNCTION__, ret));
12620 }
12621#endif
12622
12623 /* XXX Enable firmware key buffering before sent 4-way M4 */
12624 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
12625 NULL, 0, TRUE);
12626 if (ret < 0) {
12627 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
12628 }
12629#ifdef SUPPORT_SET_CAC
12630 ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
12631 if (ret < 0) {
12632 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
12633 }
12634#endif /* SUPPORT_SET_CAC */
12635 /* make up event mask ext message iovar for event larger than 128 */
12636 msglen = WL_EVENTING_MASK_EXT_LEN + EVENTMSGS_EXT_STRUCT_SIZE;
12637 eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
12638 if (eventmask_msg == NULL) {
12639 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
12640 ret = BCME_NOMEM;
12641 goto done;
12642 }
12643 bzero(eventmask_msg, msglen);
12644 eventmask_msg->ver = EVENTMSGS_VER;
12645 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
12646
12647 /* Read event_msgs_ext mask */
12648 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
12649 WLC_IOCTL_SMLEN, FALSE);
12650
12651 /* event_msgs_ext must be supported */
12652 if (ret != BCME_OK) {
12653 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret));
12654 goto done;
12655 }
12656
12657 bcopy(iov_buf, eventmask_msg, msglen);
12658 /* make up event mask ext message iovar for event larger than 128 */
12659 mask = eventmask_msg->mask;
12660
12661 /* Setup event_msgs */
12662 setbit(mask, WLC_E_SET_SSID);
12663 setbit(mask, WLC_E_PRUNE);
12664 setbit(mask, WLC_E_AUTH);
12665 setbit(mask, WLC_E_AUTH_IND);
12666 setbit(mask, WLC_E_ASSOC);
12667 setbit(mask, WLC_E_REASSOC);
12668 setbit(mask, WLC_E_REASSOC_IND);
12669 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
12670 setbit(mask, WLC_E_DEAUTH);
12671 setbit(mask, WLC_E_DEAUTH_IND);
12672 setbit(mask, WLC_E_DISASSOC_IND);
12673 setbit(mask, WLC_E_DISASSOC);
12674 setbit(mask, WLC_E_JOIN);
12675 setbit(mask, WLC_E_START);
12676 setbit(mask, WLC_E_ASSOC_IND);
12677 setbit(mask, WLC_E_PSK_SUP);
12678 setbit(mask, WLC_E_LINK);
12679 setbit(mask, WLC_E_MIC_ERROR);
12680 setbit(mask, WLC_E_ASSOC_REQ_IE);
12681 setbit(mask, WLC_E_ASSOC_RESP_IE);
12682#ifdef LIMIT_BORROW
12683 setbit(mask, WLC_E_ALLOW_CREDIT_BORROW);
12684#endif
12685#ifndef WL_CFG80211
12686 setbit(mask, WLC_E_PMKID_CACHE);
12687// setbit(mask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
12688#endif
12689 setbit(mask, WLC_E_JOIN_START);
12690// setbit(mask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
12691#ifdef DHD_DEBUG
12692 setbit(mask, WLC_E_SCAN_CONFIRM_IND);
12693#endif
12694#ifdef PNO_SUPPORT
12695 setbit(mask, WLC_E_PFN_NET_FOUND);
12696 setbit(mask, WLC_E_PFN_BEST_BATCHING);
12697 setbit(mask, WLC_E_PFN_BSSID_NET_FOUND);
12698 setbit(mask, WLC_E_PFN_BSSID_NET_LOST);
12699#endif /* PNO_SUPPORT */
12700 /* enable dongle roaming event */
12701#ifdef WL_CFG80211
12702#if !defined(ROAM_EVT_DISABLE)
12703 setbit(mask, WLC_E_ROAM);
12704#endif /* !ROAM_EVT_DISABLE */
12705 setbit(mask, WLC_E_BSSID);
12706#endif /* WL_CFG80211 */
12707#ifdef BCMCCX
12708 setbit(mask, WLC_E_ADDTS_IND);
12709 setbit(mask, WLC_E_DELTS_IND);
12710#endif /* BCMCCX */
12711#ifdef WLTDLS
12712 setbit(mask, WLC_E_TDLS_PEER_EVENT);
12713#endif /* WLTDLS */
12714#ifdef WL_ESCAN
12715 setbit(mask, WLC_E_ESCAN_RESULT);
12716#endif /* WL_ESCAN */
12717#ifdef CSI_SUPPORT
12718 setbit(eventmask, WLC_E_CSI);
12719#endif /* CSI_SUPPORT */
12720#ifdef RTT_SUPPORT
12721 setbit(mask, WLC_E_PROXD);
12722#endif /* RTT_SUPPORT */
12723#ifdef WL_CFG80211
12724 setbit(mask, WLC_E_ESCAN_RESULT);
12725 setbit(mask, WLC_E_AP_STARTED);
12726 setbit(mask, WLC_E_ACTION_FRAME_RX);
12727 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
12728 setbit(mask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
12729 }
12730#endif /* WL_CFG80211 */
12731#ifdef WLAIBSS
12732 setbit(mask, WLC_E_AIBSS_TXFAIL);
12733#endif /* WLAIBSS */
12734
12735#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
12736 if (dhd_logtrace_from_file(dhd)) {
12737 setbit(mask, WLC_E_TRACE);
12738 } else {
12739 clrbit(mask, WLC_E_TRACE);
12740 }
12741#elif defined(SHOW_LOGTRACE)
12742 setbit(mask, WLC_E_TRACE);
12743#else
12744 clrbit(mask, WLC_E_TRACE);
12745 if (dhd->conf->chip == BCM43752_CHIP_ID)
12746 setbit(mask, WLC_E_TRACE);
12747#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
12748
12749 setbit(mask, WLC_E_CSA_COMPLETE_IND);
12750#ifdef CUSTOM_EVENT_PM_WAKE
12751 setbit(mask, WLC_E_EXCESS_PM_WAKE_EVENT);
12752#endif /* CUSTOM_EVENT_PM_WAKE */
12753#ifdef DHD_LOSSLESS_ROAMING
12754 setbit(mask, WLC_E_ROAM_PREP);
12755#endif
12756 /* nan events */
12757 setbit(mask, WLC_E_NAN);
12758#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
12759 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
12760#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
12761
12762#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
12763 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
12764#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
12765
12766#ifdef RSSI_MONITOR_SUPPORT
12767 setbit(mask, WLC_E_RSSI_LQM);
12768#endif /* RSSI_MONITOR_SUPPORT */
12769#ifdef GSCAN_SUPPORT
12770 setbit(mask, WLC_E_PFN_GSCAN_FULL_RESULT);
12771 setbit(mask, WLC_E_PFN_SCAN_COMPLETE);
12772 setbit(mask, WLC_E_PFN_SSID_EXT);
12773 setbit(mask, WLC_E_ROAM_EXP_EVENT);
12774#endif /* GSCAN_SUPPORT */
12775 setbit(mask, WLC_E_RSSI_LQM);
12776#ifdef BT_WIFI_HANDOVER
12777 setbit(mask, WLC_E_BT_WIFI_HANDOVER_REQ);
12778#endif /* BT_WIFI_HANDOVER */
12779#ifdef DBG_PKT_MON
12780 setbit(mask, WLC_E_ROAM_PREP);
12781#endif /* DBG_PKT_MON */
12782#ifdef WL_NATOE
12783 setbit(mask, WLC_E_NATOE_NFCT);
12784#endif /* WL_NATOE */
12785#ifdef WL_NAN
12786 setbit(mask, WLC_E_SLOTTED_BSS_PEER_OP);
12787#endif /* WL_NAN */
12788#ifdef WL_BCNRECV
12789 setbit(mask, WLC_E_BCNRECV_ABORTED);
12790#endif /* WL_BCNRECV */
12791#ifdef WL_MBO
12792 setbit(mask, WLC_E_MBO);
12793#endif /* WL_MBO */
12794#ifdef WL_CLIENT_SAE
12795 setbit(eventmask_msg->mask, WLC_E_JOIN_START);
12796#endif /* WL_CLIENT_SAE */
12797#ifdef WL_CAC_TS
12798 setbit(mask, WLC_E_ADDTS_IND);
12799 setbit(mask, WLC_E_DELTS_IND);
12800#endif /* WL_BCNRECV */
12801
12802 /* Write updated Event mask */
12803 eventmask_msg->ver = EVENTMSGS_VER;
12804 eventmask_msg->command = EVENTMSGS_SET_MASK;
12805 eventmask_msg->len = WL_EVENTING_MASK_EXT_LEN;
12806 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
12807 TRUE);
12808 if (ret < 0) {
12809 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
12810 goto done;
12811 }
12812
12813#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12814 /* Enabling event log trace for EAP events */
12815 el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
12816 if (el_tag == NULL) {
12817 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
12818 (int)sizeof(wl_el_tag_params_t)));
12819 ret = BCME_NOMEM;
12820 goto done;
12821 }
12822 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
12823 el_tag->set = 1;
12824 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
12825 ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL,
12826 0, TRUE);
12827 if (ret < 0) {
12828 DHD_ERROR(("%s set event_log_tag_control fail %d\n", __FUNCTION__, ret));
12829 }
12830#endif /* DHD_8021X_DUMP */
12831
12832 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
12833 sizeof(scan_assoc_time), TRUE, 0);
12834 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
12835 sizeof(scan_unassoc_time), TRUE, 0);
12836 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
12837 sizeof(scan_passive_time), TRUE, 0);
12838
12839#ifdef ARP_OFFLOAD_SUPPORT
12840 DHD_ERROR(("arp_enable:%d arp_ol:%d\n",
12841 dhd->arpoe_enable, dhd->arpol_configured));
12842#endif /* ARP_OFFLOAD_SUPPORT */
12843
12844#ifdef PKT_FILTER_SUPPORT
12845 /* Setup default defintions for pktfilter , enable in suspend */
12846 if (dhd_master_mode) {
12847 dhd->pktfilter_count = 6;
12848 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
12849 if (!FW_SUPPORTED(dhd, pf6)) {
12850 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
12851 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
12852 } else {
12853 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
12854 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
12855 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
12856 }
12857 /* apply APP pktfilter */
12858 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
12859
12860#ifdef BLOCK_IPV6_PACKET
12861 /* Setup filter to allow only IPv4 unicast frames */
12862 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
12863 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
12864 " "
12865 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
12866#else
12867 /* Setup filter to allow only unicast */
12868 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
12869#endif /* BLOCK_IPV6_PACKET */
12870
12871#ifdef PASS_IPV4_SUSPEND
12872 /* XXX customer want to get IPv4 multicast packets */
12873 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
12874#else
12875 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
12876 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
12877#endif /* PASS_IPV4_SUSPEND */
12878 if (FW_SUPPORTED(dhd, pf6)) {
12879 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
12880 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
12881 dhd->pktfilter_count = 8;
12882 }
12883
12884#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
12885 dhd->pktfilter_count = 4;
12886 /* Setup filter to block broadcast and NAT Keepalive packets */
12887 /* discard all broadcast packets */
12888 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
12889 /* discard NAT Keepalive packets */
12890 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
12891 /* discard NAT Keepalive packets */
12892 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
12893 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
12894#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
12895 } else
12896 dhd_conf_discard_pkt_filter(dhd);
12897 dhd_conf_add_pkt_filter(dhd);
12898
12899#if defined(SOFTAP)
12900 if (ap_fw_loaded) {
12901 /* XXX Andrey: fo SOFTAP disable pkt filters (if there were any ) */
12902 dhd_enable_packet_filter(0, dhd);
12903 }
12904#endif /* defined(SOFTAP) */
12905 dhd_set_packet_filter(dhd);
12906#endif /* PKT_FILTER_SUPPORT */
12907#ifdef DISABLE_11N
12908 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
12909 if (ret < 0)
12910 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
12911#endif /* DISABLE_11N */
12912
12913#ifdef ENABLE_BCN_LI_BCN_WAKEUP
12914 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn),
12915 NULL, 0, TRUE);
12916 if (ret < 0) {
12917 DHD_ERROR(("%s: set bcn_li_bcn failed %d\n", __FUNCTION__, ret));
12918 }
12919#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
12920#ifdef AMPDU_VO_ENABLE
12921 /* XXX: Enabling VO AMPDU to reduce FER */
12922 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
12923 tid.enable = TRUE;
12924 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
12925 if (ret < 0) {
12926 DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret));
12927 }
12928
12929 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
12930 tid.enable = TRUE;
12931 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
12932 if (ret < 0) {
12933 DHD_ERROR(("%s ampdu_tid %d\n", __FUNCTION__, ret));
12934 }
12935#endif
12936 /* query for 'clmver' to get clm version info from firmware */
12937 bzero(buf, sizeof(buf));
12938 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
12939 if (ret < 0)
12940 DHD_ERROR(("%s clmver failed %d\n", __FUNCTION__, ret));
12941 else {
12942 char *ver_temp_buf = NULL, *ver_date_buf = NULL;
12943 int len;
12944
12945 if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
12946 DHD_ERROR(("Couldn't find \"Data:\"\n"));
12947 } else {
12948 ver_date_buf = bcmstrstr(buf, "Creation:");
12949 ptr = (ver_temp_buf + strlen("Data:"));
12950 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
12951 DHD_ERROR(("Couldn't find New line character\n"));
12952 } else {
12953 memset(clm_version, 0, CLM_VER_STR_LEN);
12954 len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf);
12955 if (ver_date_buf) {
12956 ptr = (ver_date_buf + strlen("Creation:"));
12957 ver_date_buf = bcmstrtok(&ptr, "\n", 0);
12958 if (ver_date_buf)
12959 snprintf(clm_version+len, CLM_VER_STR_LEN-1-len,
12960 " (%s)", ver_date_buf);
12961 }
12962 DHD_INFO(("CLM version = %s\n", clm_version));
12963 }
12964 }
12965
12966#if defined(CUSTOMER_HW4_DEBUG)
12967 if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
12968 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
12969 } else {
12970 char tokenlim;
12971 ptr = (ver_temp_buf + strlen("Customization:"));
12972 if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
12973 DHD_ERROR(("Couldn't find project blob version"
12974 "or New line character\n"));
12975 } else if (tokenlim == '(') {
12976 snprintf(clm_version,
12977 CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
12978 clm_version, ver_temp_buf);
12979 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
12980 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
12981 DHD_ERROR(("Couldn't find New line character\n"));
12982 } else {
12983 snprintf(clm_version,
12984 strlen(clm_version) + strlen(ver_temp_buf),
12985 "%s%s", clm_version, ver_temp_buf);
12986 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
12987 clm_version));
12988
12989 }
12990 } else if (tokenlim == '\n') {
12991 snprintf(clm_version,
12992 strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
12993 "%s, Blob ver = Major : ", clm_version);
12994 snprintf(clm_version,
12995 strlen(clm_version) + strlen(ver_temp_buf) + 1,
12996 "%s%s", clm_version, ver_temp_buf);
12997 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
12998 }
12999 }
13000#endif /* CUSTOMER_HW4_DEBUG */
13001 if (strlen(clm_version)) {
13002 DHD_INFO(("CLM version = %s\n", clm_version));
13003 } else {
13004 DHD_ERROR(("Couldn't find CLM version!\n"));
13005 }
13006 }
13007 dhd_set_version_info(dhd, fw_version);
13008
13009#ifdef WRITE_WLANINFO
13010 sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
13011#endif /* WRITE_WLANINFO */
13012#ifdef GEN_SOFTAP_INFO_FILE
13013 sec_save_softap_info();
13014#endif /* GEN_SOFTAP_INFO_FILE */
13015
13016#if defined(BCMSDIO)
13017 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
13018#endif /* defined(BCMSDIO) */
13019
13020#if defined(BCMSDIO) || defined(BCMDBUS)
13021#ifdef PROP_TXSTATUS
13022 if (disable_proptx ||
13023#ifdef PROP_TXSTATUS_VSDB
13024 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
13025 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
13026 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
13027#endif /* PROP_TXSTATUS_VSDB */
13028 FALSE) {
13029 wlfc_enable = FALSE;
13030 }
13031 ret = dhd_conf_get_disable_proptx(dhd);
13032 if (ret == 0){
13033 disable_proptx = 0;
13034 wlfc_enable = TRUE;
13035 } else if (ret >= 1) {
13036 disable_proptx = 1;
13037 wlfc_enable = FALSE;
13038 /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
13039 hostreorder = 0;
13040 }
13041
13042#if defined(PROP_TXSTATUS)
13043#ifdef USE_WFA_CERT_CONF
13044 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
13045 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
13046 wlfc_enable = proptx;
13047 }
13048#endif /* USE_WFA_CERT_CONF */
13049#endif /* PROP_TXSTATUS */
13050
13051#ifndef DISABLE_11N
13052 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
13053 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
13054 NULL, 0, TRUE);
13055 if (ret2 < 0) {
13056 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
13057 if (ret2 != BCME_UNSUPPORTED)
13058 ret = ret2;
13059
13060 if (ret == BCME_NOTDOWN) {
13061 uint wl_down = 1;
13062 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
13063 sizeof(wl_down), TRUE, 0);
13064 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
13065 __FUNCTION__, ret2, hostreorder));
13066
13067 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
13068 sizeof(hostreorder), NULL, 0, TRUE);
13069 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
13070 if (ret2 != BCME_UNSUPPORTED)
13071 ret = ret2;
13072 }
13073 if (ret2 != BCME_OK)
13074 hostreorder = 0;
13075 }
13076#endif /* DISABLE_11N */
13077
13078 if (wlfc_enable) {
13079 dhd_wlfc_init(dhd);
13080 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
13081 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
13082 }
13083#ifndef DISABLE_11N
13084 else if (hostreorder)
13085 dhd_wlfc_hostreorder_init(dhd);
13086#endif /* DISABLE_11N */
13087#else
13088 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
13089 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
13090 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
13091#endif /* PROP_TXSTATUS */
13092#endif /* BCMSDIO || BCMDBUS */
13093#ifndef PCIE_FULL_DONGLE
13094 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
13095 if (FW_SUPPORTED(dhd, ap)) {
13096 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
13097 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
13098 NULL, 0, TRUE);
13099 if (ret < 0)
13100 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
13101 }
13102#endif /* PCIE_FULL_DONGLE */
13103#ifdef PNO_SUPPORT
13104 if (!dhd->pno_state) {
13105 dhd_pno_init(dhd);
13106 }
13107#endif
13108
13109#ifdef RTT_SUPPORT
13110 if (dhd->rtt_state) {
13111 ret = dhd_rtt_init(dhd);
13112 if (ret < 0) {
13113 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
13114 }
13115 }
13116#endif
13117#ifdef FILTER_IE
13118 /* Failure to configure filter IE is not a fatal error, ignore it. */
13119 if (FW_SUPPORTED(dhd, fie) &&
13120 !(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
13121 dhd_read_from_file(dhd);
13122 }
13123#endif /* FILTER_IE */
13124#ifdef WL11U
13125 dhd_interworking_enable(dhd);
13126#endif /* WL11U */
13127
13128#ifdef NDO_CONFIG_SUPPORT
13129 dhd->ndo_enable = FALSE;
13130 dhd->ndo_host_ip_overflow = FALSE;
13131 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
13132#endif /* NDO_CONFIG_SUPPORT */
13133
13134 /* ND offload version supported */
13135 dhd->ndo_version = dhd_ndo_get_version(dhd);
13136 if (dhd->ndo_version > 0) {
13137 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
13138
13139#ifdef NDO_CONFIG_SUPPORT
13140 /* enable Unsolicited NA filter */
13141 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
13142 if (ret < 0) {
13143 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
13144 }
13145#endif /* NDO_CONFIG_SUPPORT */
13146 }
13147
13148 /* check dongle supports wbtext (product policy) or not */
13149 dhd->wbtext_support = FALSE;
13150 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
13151 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
13152 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
13153 }
13154 dhd->wbtext_policy = wnm_bsstrans_resp;
13155 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
13156 dhd->wbtext_support = TRUE;
13157 }
13158#ifndef WBTEXT
13159 /* driver can turn off wbtext feature through makefile */
13160 if (dhd->wbtext_support) {
13161 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
13162 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
13163 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
13164 DHD_ERROR(("failed to disable WBTEXT\n"));
13165 }
13166 }
13167#endif /* !WBTEXT */
13168
13169#ifdef DHD_NON_DMA_M2M_CORRUPTION
13170 /* check pcie non dma loopback */
13171 if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
13172 (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
13173 goto done;
13174 }
13175#endif /* DHD_NON_DMA_M2M_CORRUPTION */
13176
13177 /* WNM capabilities */
13178 wnm_cap = 0
13179#ifdef WL11U
13180 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
13181#endif
13182#ifdef WBTEXT
13183 | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
13184#endif
13185 ;
13186#if defined(WL_MBO) && defined(WL_OCE)
13187 if (FW_SUPPORTED(dhd, estm)) {
13188 wnm_cap |= WL_WNM_ESTM;
13189 }
13190#endif /* WL_MBO && WL_OCE */
13191 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
13192 DHD_ERROR(("failed to set WNM capabilities\n"));
13193 }
13194
13195#ifdef CUSTOM_ASSOC_TIMEOUT
13196 /* set recreate_bi_timeout to increase assoc timeout :
13197 * 20 * 100TU * 1024 / 1000 = 2 secs
13198 * (beacon wait time = recreate_bi_timeout * beacon_period * 1024 / 1000)
13199 */
13200 if (dhd_wl_ioctl_set_intiovar(dhd, "recreate_bi_timeout",
13201 CUSTOM_ASSOC_TIMEOUT,
13202 WLC_SET_VAR, TRUE, 0) != BCME_OK) {
13203 DHD_ERROR(("failed to set assoc timeout\n"));
13204 }
13205#endif /* CUSTOM_ASSOC_TIMEOUT */
13206
13207#if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
13208 if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
13209 NULL, 0, TRUE) < 0) {
13210 DHD_ERROR(("failed to set BTM delta\n"));
13211 }
13212#endif /* WBTEXT && WBTEXT_BTMDELTA */
13213#if defined(WBTEXT) && defined(RRM_BCNREQ_MAX_CHAN_TIME)
13214 if (dhd_iovar(dhd, 0, "rrm_bcn_req_thrtl_win",
13215 (char *)&rrm_bcn_req_thrtl_win, sizeof(rrm_bcn_req_thrtl_win),
13216 NULL, 0, TRUE) < 0) {
13217 DHD_ERROR(("failed to set RRM BCN request thrtl_win\n"));
13218 }
13219 if (dhd_iovar(dhd, 0, "rrm_bcn_req_max_off_chan_time",
13220 (char *)&rrm_bcn_req_max_off_chan_time, sizeof(rrm_bcn_req_max_off_chan_time),
13221 NULL, 0, TRUE) < 0) {
13222 DHD_ERROR(("failed to set RRM BCN Request max_off_chan_time\n"));
13223 }
13224#endif /* WBTEXT && RRM_BCNREQ_MAX_CHAN_TIME */
13225
13226#ifdef WL_MONITOR
13227 if (FW_SUPPORTED(dhd, monitor)) {
13228 dhd->monitor_enable = TRUE;
13229 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
13230 } else {
13231 dhd->monitor_enable = FALSE;
13232 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
13233 }
13234#endif /* WL_MONITOR */
13235
13236 /* store the preserve log set numbers */
13237 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
13238 != BCME_OK) {
13239 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
13240 }
13241
13242 if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
13243 dhd_ecounter_configure(dhd, TRUE);
13244 }
13245
13246#ifdef CONFIG_SILENT_ROAM
13247 dhd->sroam_turn_on = TRUE;
13248 dhd->sroamed = FALSE;
13249#endif /* CONFIG_SILENT_ROAM */
13250 dhd_set_bandlock(dhd);
13251
13252 dhd_conf_postinit_ioctls(dhd);
13253done:
13254
13255 if (eventmask_msg) {
13256 MFREE(dhd->osh, eventmask_msg, msglen);
13257 }
13258 if (iov_buf) {
13259 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
13260 }
13261#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
13262 if (el_tag) {
13263 MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
13264 }
13265#endif /* DHD_8021X_DUMP */
13266 return ret;
13267}
13268
13269/* Deafult enable preinit optimisation */
13270#define DHD_PREINIT_OPTIMISATION
13271
13272int
13273dhd_preinit_ioctls(dhd_pub_t *dhd)
13274{
13275 int ret = 0;
13276
13277#ifdef DHD_PREINIT_OPTIMISATION
13278 int preinit_status = 0;
13279 ret = dhd_iovar(dhd, 0, "preinit_status", NULL, 0, (char *)&preinit_status,
13280 sizeof(preinit_status), FALSE);
13281
13282 if (ret == BCME_OK) {
13283 DHD_ERROR(("%s: preinit_status IOVAR present, use optimised preinit\n",
13284 __FUNCTION__));
13285 dhd->fw_preinit = TRUE;
13286 ret = dhd_optimised_preinit_ioctls(dhd);
13287 } else if (ret == BCME_UNSUPPORTED) {
13288 DHD_ERROR(("%s: preinit_status IOVAR not supported, use legacy preinit\n",
13289 __FUNCTION__));
13290 dhd->fw_preinit = FALSE;
13291 ret = dhd_legacy_preinit_ioctls(dhd);
13292 } else {
13293 DHD_ERROR(("%s: preinit_status IOVAR returned err(%d), ABORT\n",
13294 __FUNCTION__, ret));
13295 }
13296#else
13297 dhd->fw_preinit = FALSE;
13298 ret = dhd_legacy_preinit_ioctls(dhd);
13299#endif /* DHD_PREINIT_OPTIMISATION */
13300 return ret;
13301}
13302
13303int
13304dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
13305 uint cmd_len, char **resptr, uint resp_len)
13306{
13307 int len = resp_len;
13308 int ret;
13309 char *buf = *resptr;
13310 wl_ioctl_t ioc;
13311 if (resp_len > WLC_IOCTL_MAXLEN)
13312 return BCME_BADARG;
13313
13314 memset(buf, 0, resp_len);
13315
13316 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
13317 if (ret == 0) {
13318 return BCME_BUFTOOSHORT;
13319 }
13320
13321 memset(&ioc, 0, sizeof(ioc));
13322
13323 ioc.cmd = WLC_GET_VAR;
13324 ioc.buf = buf;
13325 ioc.len = len;
13326 ioc.set = 0;
13327
13328 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
13329
13330 return ret;
13331}
13332
13333int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
13334{
13335 struct dhd_info *dhd = dhdp->info;
13336 struct net_device *dev = NULL;
13337
13338 ASSERT(dhd && dhd->iflist[ifidx]);
13339 dev = dhd->iflist[ifidx]->net;
13340 ASSERT(dev);
13341
13342 if (netif_running(dev)) {
13343 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
13344 return BCME_NOTDOWN;
13345 }
13346
13347#define DHD_MIN_MTU 1500
13348#define DHD_MAX_MTU 1752
13349
13350 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
13351 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
13352 return BCME_BADARG;
13353 }
13354
13355 dev->mtu = new_mtu;
13356 return 0;
13357}
13358
13359#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
13360static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
13361{
13362 struct net_device *primary_ndev;
13363 struct bcm_cfg80211 *cfg;
13364 unsigned long flags = 0;
13365 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
13366
13367 if (!primary_ndev) {
13368 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
13369 return BCME_ERROR;
13370 }
13371 cfg = wl_get_cfg(primary_ndev);
13372
13373 if (!cfg) {
13374 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
13375 return BCME_ERROR;
13376 }
13377
13378 DHD_GENERAL_LOCK(dhdp, flags);
13379 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
13380 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
13381 dhd_os_busbusy_wake(dhdp);
13382 DHD_GENERAL_UNLOCK(dhdp, flags);
13383 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
13384 return BCME_ERROR;
13385 }
13386 DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
13387 DHD_GENERAL_UNLOCK(dhdp, flags);
13388
13389 DHD_OS_WAKE_LOCK(dhdp);
13390 /* check for hal started and only then send event if not clear dump state here */
13391 if (wl_cfg80211_is_hal_started(cfg)) {
13392 int timeleft = 0;
13393
13394 DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
13395 dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
13396
13397 DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
13398 __FUNCTION__, dhdp->dhd_bus_busy_state));
13399 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
13400 &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
13401 if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
13402 DHD_ERROR(("%s: Timed out(%d) dhd_bus_busy_state=0x%x\n",
13403 __FUNCTION__, timeleft, dhdp->dhd_bus_busy_state));
13404 }
13405 } else {
13406 DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
13407 }
13408 DHD_OS_WAKE_UNLOCK(dhdp);
13409 /* In case of dhd_os_busbusy_wait_bitmask() timeout,
13410 * hal dump bit will not be cleared. Hence clearing it here.
13411 */
13412 DHD_GENERAL_LOCK(dhdp, flags);
13413 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
13414 dhd_os_busbusy_wake(dhdp);
13415 DHD_GENERAL_UNLOCK(dhdp, flags);
13416
13417 return BCME_OK;
13418}
13419#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
13420
13421#ifdef ARP_OFFLOAD_SUPPORT
13422/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
13423/* XXX add operation is more efficent */
13424void
13425aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
13426{
13427 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
13428 int i;
13429 int ret;
13430
13431 bzero(ipv4_buf, sizeof(ipv4_buf));
13432
13433 /* display what we've got */
13434 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
13435 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
13436#ifdef AOE_DBG
13437 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
13438#endif
13439 /* now we saved hoste_ip table, clr it in the dongle AOE */
13440 dhd_aoe_hostip_clr(dhd_pub, idx);
13441
13442 if (ret) {
13443 DHD_ERROR(("%s failed\n", __FUNCTION__));
13444 return;
13445 }
13446
13447 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
13448 if (add && (ipv4_buf[i] == 0)) {
13449 ipv4_buf[i] = ipa;
13450 add = FALSE; /* added ipa to local table */
13451 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
13452 __FUNCTION__, i));
13453 } else if (ipv4_buf[i] == ipa) {
13454 ipv4_buf[i] = 0;
13455 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
13456 __FUNCTION__, ipa, i));
13457 }
13458
13459 if (ipv4_buf[i] != 0) {
13460 /* add back host_ip entries from our local cache */
13461 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
13462 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
13463 __FUNCTION__, ipv4_buf[i], i));
13464 }
13465 }
13466#ifdef AOE_DBG
13467 /* see the resulting hostip table */
13468 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
13469 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
13470 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
13471#endif
13472}
13473
13474/* XXX this function is only for IP address */
13475/*
13476 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
13477 * whenever there is an event related to an IP address.
13478 * ptr : kernel provided pointer to IP address that has changed
13479 */
13480static int dhd_inetaddr_notifier_call(struct notifier_block *this,
13481 unsigned long event,
13482 void *ptr)
13483{
13484 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
13485
13486 dhd_info_t *dhd;
13487 dhd_pub_t *dhd_pub;
13488 int idx;
13489
13490 if (!ifa || !(ifa->ifa_dev->dev))
13491 return NOTIFY_DONE;
13492
13493 /* Filter notifications meant for non Broadcom devices */
13494 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
13495 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
13496#if defined(WL_ENABLE_P2P_IF)
13497 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
13498#endif /* WL_ENABLE_P2P_IF */
13499 return NOTIFY_DONE;
13500 }
13501
13502 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
13503 if (!dhd)
13504 return NOTIFY_DONE;
13505
13506 dhd_pub = &dhd->pub;
13507
13508 if (!dhd_pub->arpoe_enable) {
13509 DHD_ERROR(("arpoe_enable not set"));
13510 return NOTIFY_DONE;
13511 }
13512
13513 if (dhd_pub->arp_version == 1) {
13514 idx = 0;
13515 } else {
13516 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
13517 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
13518 break;
13519 }
13520 if (idx < DHD_MAX_IFS)
13521 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
13522 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
13523 else {
13524 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
13525 idx = 0;
13526 }
13527 }
13528
13529 switch (event) {
13530 case NETDEV_UP:
13531 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
13532 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
13533
13534 /*
13535 * Skip if Bus is not in a state to transport the IOVAR
13536 * (or) the Dongle is not ready.
13537 */
13538 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
13539 dhd->pub.busstate == DHD_BUS_LOAD) {
13540 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
13541 __FUNCTION__, dhd->pub.busstate));
13542 if (dhd->pend_ipaddr) {
13543 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
13544 __FUNCTION__, dhd->pend_ipaddr));
13545 }
13546 dhd->pend_ipaddr = ifa->ifa_address;
13547 break;
13548 }
13549
13550#ifdef AOE_IP_ALIAS_SUPPORT
13551 /* XXX HOSTAPD will be rerturned at first */
13552 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
13553 __FUNCTION__));
13554 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
13555#endif /* AOE_IP_ALIAS_SUPPORT */
13556 dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE);
13557 break;
13558
13559 case NETDEV_DOWN:
13560 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
13561 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
13562 dhd->pend_ipaddr = 0;
13563#ifdef AOE_IP_ALIAS_SUPPORT
13564 /* XXX HOSTAPD will be rerturned at first */
13565 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
13566 __FUNCTION__));
13567 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
13568 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
13569 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
13570 } else
13571#endif /* AOE_IP_ALIAS_SUPPORT */
13572 {
13573 /* XXX clear ALL arp and hostip tables */
13574 dhd_aoe_hostip_clr(&dhd->pub, idx);
13575 dhd_aoe_arp_clr(&dhd->pub, idx);
13576 }
13577 dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE);
13578 break;
13579
13580 default:
13581 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
13582 __func__, ifa->ifa_label, event));
13583 break;
13584 }
13585 return NOTIFY_DONE;
13586}
13587#endif /* ARP_OFFLOAD_SUPPORT */
13588
13589#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
13590/* Neighbor Discovery Offload: defered handler */
13591static void
13592dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
13593{
13594 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
13595 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
13596 dhd_pub_t *dhdp;
13597 int ret;
13598
13599 if (!dhd) {
13600 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
13601 goto done;
13602 }
13603 dhdp = &dhd->pub;
13604
13605 if (event != DHD_WQ_WORK_IPV6_NDO) {
13606 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
13607 goto done;
13608 }
13609
13610 if (!ndo_work) {
13611 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
13612 return;
13613 }
13614
13615 switch (ndo_work->event) {
13616 case NETDEV_UP:
13617#ifndef NDO_CONFIG_SUPPORT
13618 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
13619 ret = dhd_ndo_enable(dhdp, TRUE);
13620 if (ret < 0) {
13621 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
13622 }
13623#endif /* !NDO_CONFIG_SUPPORT */
13624 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
13625 if (dhdp->ndo_version > 0) {
13626 /* inet6 addr notifier called only for unicast address */
13627 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
13628 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
13629 } else {
13630 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
13631 ndo_work->if_idx);
13632 }
13633 if (ret < 0) {
13634 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
13635 __FUNCTION__, ret));
13636 }
13637 break;
13638 case NETDEV_DOWN:
13639 if (dhdp->ndo_version > 0) {
13640 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
13641 ret = dhd_ndo_remove_ip_by_addr(dhdp,
13642 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
13643 } else {
13644 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
13645 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
13646 }
13647 if (ret < 0) {
13648 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
13649 __FUNCTION__, ret));
13650 goto done;
13651 }
13652#ifdef NDO_CONFIG_SUPPORT
13653 if (dhdp->ndo_host_ip_overflow) {
13654 ret = dhd_dev_ndo_update_inet6addr(
13655 dhd_idx2net(dhdp, ndo_work->if_idx));
13656 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
13657 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
13658 __FUNCTION__, ret));
13659 goto done;
13660 }
13661 }
13662#else /* !NDO_CONFIG_SUPPORT */
13663 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
13664 ret = dhd_ndo_enable(dhdp, FALSE);
13665 if (ret < 0) {
13666 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
13667 goto done;
13668 }
13669#endif /* NDO_CONFIG_SUPPORT */
13670 break;
13671
13672 default:
13673 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
13674 break;
13675 }
13676done:
13677
13678 /* free ndo_work. alloced while scheduling the work */
13679 if (ndo_work) {
13680 kfree(ndo_work);
13681 }
13682
13683 return;
13684} /* dhd_init_logstrs_array */
13685
13686/*
13687 * Neighbor Discovery Offload: Called when an interface
13688 * is assigned with ipv6 address.
13689 * Handles only primary interface
13690 */
13691int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
13692{
13693 dhd_info_t *dhd;
13694 dhd_pub_t *dhdp;
13695 struct inet6_ifaddr *inet6_ifa = ptr;
13696 struct ipv6_work_info_t *ndo_info;
13697 int idx;
13698
13699 /* Filter notifications meant for non Broadcom devices */
13700 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
13701 return NOTIFY_DONE;
13702 }
13703
13704 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
13705 if (!dhd) {
13706 return NOTIFY_DONE;
13707 }
13708 dhdp = &dhd->pub;
13709
13710 /* Supports only primary interface */
13711 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
13712 if (idx != 0) {
13713 return NOTIFY_DONE;
13714 }
13715
13716 /* FW capability */
13717 if (!FW_SUPPORTED(dhdp, ndoe)) {
13718 return NOTIFY_DONE;
13719 }
13720
13721 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
13722 if (!ndo_info) {
13723 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
13724 return NOTIFY_DONE;
13725 }
13726
13727 /* fill up ndo_info */
13728 ndo_info->event = event;
13729 ndo_info->if_idx = idx;
13730 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
13731
13732 /* defer the work to thread as it may block kernel */
13733 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
13734 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
13735 return NOTIFY_DONE;
13736}
13737#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
13738
13739/* Network attach to be invoked from the bus probe handlers */
13740int
13741dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
13742{
13743 struct net_device *primary_ndev;
13744 BCM_REFERENCE(primary_ndev);
13745
13746 /* Register primary net device */
13747 if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
13748 return BCME_ERROR;
13749 }
13750
13751#if defined(WL_CFG80211)
13752 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
13753 if (wl_cfg80211_net_attach(primary_ndev) < 0) {
13754 /* fail the init */
13755 dhd_remove_if(dhdp, 0, TRUE);
13756 return BCME_ERROR;
13757 }
13758#endif /* WL_CFG80211 */
13759 return BCME_OK;
13760}
13761
13762#if defined(WLAN_ACCEL_BOOT)
13763
13764#ifndef DHD_FS_CHECK_RETRY_DELAY_MS
13765#define DHD_FS_CHECK_RETRY_DELAY_MS 3000
13766#endif
13767
13768#ifndef DHD_FS_CHECK_RETRIES
13769#define DHD_FS_CHECK_RETRIES 3
13770#endif
13771
13772static bool
13773dhd_check_filesystem_is_up(void)
13774{
13775 struct file *fp;
13776 const char *fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
13777 fp = filp_open(fw, O_RDONLY, 0);
13778
13779 if (IS_ERR(fp)) {
13780 DHD_ERROR(("%s: filp_open(%s) failed(%d) schedule wl_accel_work\n",
13781 __FUNCTION__, fw, (int)IS_ERR(fp)));
13782 return FALSE;
13783 }
13784 filp_close(fp, NULL);
13785
13786 return TRUE;
13787}
13788
13789static void
13790dhd_wifi_accel_on_work_cb(struct work_struct *work)
13791{
13792 int ret = 0;
13793 struct delayed_work *dw = to_delayed_work(work);
13794 struct dhd_info *dhd;
13795 struct net_device *net;
13796
13797 /* Ignore compiler warnings due to -Werror=cast-qual */
13798 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13799 dhd = container_of(dw, struct dhd_info, wl_accel_work);
13800 GCC_DIAGNOSTIC_POP();
13801
13802 DHD_ERROR(("%s\n", __FUNCTION__));
13803
13804 if (!dhd_check_filesystem_is_up()) {
13805 if (!dhd->fs_check_retry--) {
13806 DHD_ERROR(("%s: max retry reached, BACKOFF\n", __FUNCTION__));
13807 return;
13808 }
13809 schedule_delayed_work(&dhd->wl_accel_work,
13810 msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS));
13811 return;
13812 }
13813
13814 net = dhd->iflist[0]->net;
13815
13816 /*
13817 * Keep wlan turn on and download firmware during bootup
13818 * by making g_wifi_on = FALSE
13819 */
13820 wl_android_set_wifi_on_flag(FALSE);
13821 ret = wl_android_wifi_on(net);
13822 if (ret) {
13823 DHD_ERROR(("%s: wl_android_wifi_on failed(%d)\n", __FUNCTION__, ret));
13824 goto fail;
13825 }
13826
13827 /* After bootup keep in suspend state */
13828 ret = dhd_net_bus_suspend(net);
13829 if (ret) {
13830 DHD_ERROR(("%s: dhd_net_bus_suspend failed(%d)\n", __FUNCTION__, ret));
13831 goto fail;
13832 }
13833
13834 /* Initilise force regon to FALSE and it will be set for Big Hammer case */
13835 dhd->wl_accel_force_reg_on = FALSE;
13836
13837 /* Mark wl_accel_boot_on_done */
13838 dhd->wl_accel_boot_on_done = TRUE;
13839 return;
13840
13841fail:
13842 DHD_ERROR(("%s: enable wl_accel_force_reg_on to recover\n", __FUNCTION__));
13843 /* Toggle REG_ON and download firmware during UP */
13844 dhd->wl_accel_force_reg_on = TRUE;
13845
13846 /* Reset wl_accel_boot_on_done */
13847 dhd->wl_accel_boot_on_done = FALSE;
13848 return;
13849
13850}
13851#endif /* WLAN_ACCEL_BOOT */
13852
13853int
13854dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
13855{
13856 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
13857 dhd_if_t *ifp;
13858 struct net_device *net = NULL;
13859 int err = 0;
13860 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
13861
13862 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
13863
13864 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
13865 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
13866 return BCME_ERROR;
13867 }
13868
13869 ASSERT(dhd && dhd->iflist[ifidx]);
13870 ifp = dhd->iflist[ifidx];
13871 net = ifp->net;
13872 ASSERT(net && (ifp->idx == ifidx));
13873
13874 ASSERT(!net->netdev_ops);
13875 net->netdev_ops = &dhd_ops_virt;
13876
13877 /* Ok, link into the network layer... */
13878 if (ifidx == 0) {
13879 /*
13880 * device functions for the primary interface only
13881 */
13882 net->netdev_ops = &dhd_ops_pri;
13883 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
13884 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
13885 } else {
13886 /*
13887 * We have to use the primary MAC for virtual interfaces
13888 */
13889 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
13890 /*
13891 * Android sets the locally administered bit to indicate that this is a
13892 * portable hotspot. This will not work in simultaneous AP/STA mode,
13893 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
13894 */
13895 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
13896 ETHER_ADDR_LEN)) {
13897 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
13898 __func__, net->name));
13899 temp_addr[0] |= 0x02;
13900 }
13901 }
13902
13903 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
13904 net->ethtool_ops = &dhd_ethtool_ops;
13905
13906#if defined(WL_WIRELESS_EXT)
13907#if WIRELESS_EXT < 19
13908 net->get_wireless_stats = dhd_get_wireless_stats;
13909#endif /* WIRELESS_EXT < 19 */
13910#if WIRELESS_EXT > 12
13911 net->wireless_handlers = &wl_iw_handler_def;
13912#endif /* WIRELESS_EXT > 12 */
13913#endif /* defined(WL_WIRELESS_EXT) */
13914
13915 /* XXX Set up an MTU change notifier as per linux/notifier.h? */
13916 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
13917
13918#ifdef WLMESH
13919 if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
13920 temp_addr[4] ^= 0x80;
13921 temp_addr[4] += ifidx;
13922 temp_addr[5] += ifidx;
13923 }
13924#endif
13925 /*
13926 * XXX Linux 2.6.25 does not like a blank MAC address, so use a
13927 * dummy address until the interface is brought up.
13928 */
13929 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
13930
13931 if (ifidx == 0)
13932 printf("%s\n", dhd_version);
13933 else {
13934#ifdef WL_EXT_IAPSTA
13935 wl_ext_iapsta_update_net_device(net, ifidx);
13936#endif /* WL_EXT_IAPSTA */
13937 if (dhd->pub.up == 1) {
13938 if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr, FALSE) == 0)
13939 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
13940 else
13941 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
13942 }
13943 }
13944
13945 if (need_rtnl_lock)
13946 err = register_netdev(net);
13947 else
13948 err = register_netdevice(net);
13949
13950 if (err != 0) {
13951 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
13952 goto fail;
13953 }
13954#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
13955 wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
13956#ifdef WL_ESCAN
13957 wl_escan_event_attach(net, dhdp);
13958#endif /* WL_ESCAN */
13959#ifdef WL_EXT_IAPSTA
13960 wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
13961 wl_ext_iapsta_attach_name(net, ifidx);
13962#endif /* WL_EXT_IAPSTA */
13963#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
13964
13965#if defined(CONFIG_TIZEN)
13966 net_stat_tizen_register(net);
13967#endif /* CONFIG_TIZEN */
13968
13969 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
13970#if defined(CUSTOMER_HW4_DEBUG)
13971 MAC2STRDBG(dhd->pub.mac.octet));
13972#else
13973 MAC2STRDBG(net->dev_addr));
13974#endif /* CUSTOMER_HW4_DEBUG */
13975
13976#if (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS))
13977 if (ifidx == 0) {
13978#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
13979 up(&dhd_registration_sem);
13980#endif /* BCMLXSDMMC */
13981 if (!dhd_download_fw_on_driverload) {
13982#ifdef WL_CFG80211
13983 wl_terminate_event_handler(net);
13984#endif /* WL_CFG80211 */
13985#if defined(DHD_LB_RXP)
13986 __skb_queue_purge(&dhd->rx_pend_queue);
13987#endif /* DHD_LB_RXP */
13988
13989#if defined(DHD_LB_TXP)
13990 skb_queue_purge(&dhd->tx_pend_queue);
13991#endif /* DHD_LB_TXP */
13992
13993#ifdef SHOW_LOGTRACE
13994 /* Release the skbs from queue for WLC_E_TRACE event */
13995 dhd_event_logtrace_flush_queue(dhdp);
13996#endif /* SHOW_LOGTRACE */
13997
13998#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
13999 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
14000#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
14001
14002#if defined(WLAN_ACCEL_BOOT)
14003 dhd->fs_check_retry = DHD_FS_CHECK_RETRIES;
14004 dhd->wl_accel_boot_on_done = FALSE;
14005 INIT_DELAYED_WORK(&dhd->wl_accel_work, dhd_wifi_accel_on_work_cb);
14006 schedule_delayed_work(&dhd->wl_accel_work,
14007 msecs_to_jiffies(DHD_FS_CHECK_RETRY_DELAY_MS));
14008#else
14009 /* Turn off Wifi after boot up */
14010#if defined(BT_OVER_SDIO)
14011 dhd_bus_put(&dhd->pub, WLAN_MODULE);
14012 wl_android_set_wifi_on_flag(FALSE);
14013#else
14014 wl_android_wifi_off(net, TRUE);
14015#endif /* BT_OVER_SDIO */
14016#endif /* WLAN_ACCEL_BOOT */
14017
14018 }
14019#if defined(WL_WIRELESS_EXT)
14020 wl_iw_down(net, &dhd->pub);
14021#endif /* defined(WL_WIRELESS_EXT) */
14022 }
14023#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC) */
14024 return 0;
14025
14026fail:
14027 net->netdev_ops = NULL;
14028 return err;
14029}
14030
14031void
14032dhd_bus_detach(dhd_pub_t *dhdp)
14033{
14034 dhd_info_t *dhd;
14035
14036 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
14037
14038 if (dhdp) {
14039 dhd = (dhd_info_t *)dhdp->info;
14040 if (dhd) {
14041
14042 /*
14043 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
14044 * calling stop again will cuase SD read/write errors.
14045 */
14046 if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
14047 /* Stop the protocol module */
14048 dhd_prot_stop(&dhd->pub);
14049
14050 /* Stop the bus module */
14051#ifdef BCMDBUS
14052 /* Force Dongle terminated */
14053 if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
14054 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
14055 __FUNCTION__));
14056 dbus_stop(dhd->pub.bus);
14057 dhd->pub.busstate = DHD_BUS_DOWN;
14058#else
14059 dhd_bus_stop(dhd->pub.bus, TRUE);
14060#endif /* BCMDBUS */
14061 }
14062
14063#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
14064 dhd_bus_oob_intr_unregister(dhdp);
14065#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
14066 }
14067 }
14068}
14069
14070void dhd_detach(dhd_pub_t *dhdp)
14071{
14072 dhd_info_t *dhd;
14073 unsigned long flags;
14074 int timer_valid = FALSE;
14075 struct net_device *dev;
14076#ifdef WL_CFG80211
14077 struct bcm_cfg80211 *cfg = NULL;
14078#endif
14079 if (!dhdp)
14080 return;
14081
14082 dhd = (dhd_info_t *)dhdp->info;
14083 if (!dhd)
14084 return;
14085
14086 dev = dhd->iflist[0]->net;
14087
14088 if (dev) {
14089 rtnl_lock();
14090#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
14091 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
14092 cfg = wl_get_cfg(dev);
14093 if (cfg && cfg->static_ndev && (cfg->static_ndev->flags & IFF_UP)) {
14094 dev_close(cfg->static_ndev);
14095 }
14096 }
14097#endif /* WL_CFG80211 && WL_STATIC_IF */
14098 if (dev->flags & IFF_UP) {
14099 /* If IFF_UP is still up, it indicates that
14100 * "ifconfig wlan0 down" hasn't been called.
14101 * So invoke dev_close explicitly here to
14102 * bring down the interface.
14103 */
14104 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
14105 dev_close(dev);
14106 }
14107 rtnl_unlock();
14108 }
14109
14110 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
14111
14112 /* XXX kernel panic issue when first bootup time,
14113 * rmmod without interface down make unnecessary hang event.
14114 */
14115 DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
14116 dhd->pub.up = 0;
14117 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
14118 /* Give sufficient time for threads to start running in case
14119 * dhd_attach() has failed
14120 */
14121 OSL_SLEEP(100);
14122 }
14123#ifdef DHD_WET
14124 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
14125#endif /* DHD_WET */
14126#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
14127#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
14128
14129#ifdef PROP_TXSTATUS
14130#ifdef DHD_WLFC_THREAD
14131 if (dhd->pub.wlfc_thread) {
14132 kthread_stop(dhd->pub.wlfc_thread);
14133 dhdp->wlfc_thread_go = TRUE;
14134 wake_up_interruptible(&dhdp->wlfc_wqhead);
14135 }
14136 dhd->pub.wlfc_thread = NULL;
14137#endif /* DHD_WLFC_THREAD */
14138#endif /* PROP_TXSTATUS */
14139
14140#ifdef WL_CFG80211
14141 if (dev)
14142 wl_cfg80211_down(dev);
14143#endif /* WL_CFG80211 */
14144
14145 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
14146
14147 dhd_bus_detach(dhdp);
14148#ifdef BCMPCIE
14149 if (is_reboot == SYS_RESTART) {
14150 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
14151 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
14152 dhdpcie_bus_stop_host_dev(dhdp->bus);
14153 wifi_platform_set_power(dhd_wifi_platdata->adapters,
14154 FALSE, WIFI_TURNOFF_DELAY);
14155 }
14156 }
14157#endif /* BCMPCIE */
14158#ifndef PCIE_FULL_DONGLE
14159 if (dhdp->prot)
14160 dhd_prot_detach(dhdp);
14161#endif /* !PCIE_FULL_DONGLE */
14162 }
14163
14164#ifdef ARP_OFFLOAD_SUPPORT
14165 if (dhd_inetaddr_notifier_registered) {
14166 dhd_inetaddr_notifier_registered = FALSE;
14167 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
14168 }
14169#endif /* ARP_OFFLOAD_SUPPORT */
14170#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
14171 if (dhd_inet6addr_notifier_registered) {
14172 dhd_inet6addr_notifier_registered = FALSE;
14173 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
14174 }
14175#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
14176#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14177 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
14178 if (dhd->early_suspend.suspend)
14179 unregister_early_suspend(&dhd->early_suspend);
14180 }
14181#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
14182
14183#if defined(WL_WIRELESS_EXT)
14184 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
14185 /* Detatch and unlink in the iw */
14186 wl_iw_detach(dev, dhdp);
14187 }
14188#endif /* defined(WL_WIRELESS_EXT) */
14189#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
14190#ifdef WL_EXT_IAPSTA
14191 wl_ext_iapsta_dettach(dhdp);
14192#endif /* WL_EXT_IAPSTA */
14193#ifdef WL_ESCAN
14194 wl_escan_detach(dev, dhdp);
14195#endif /* WL_ESCAN */
14196 wl_ext_event_dettach(dhdp);
14197#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
14198
14199 /* delete all interfaces, start with virtual */
14200 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
14201 int i = 1;
14202 dhd_if_t *ifp;
14203
14204 /* Cleanup virtual interfaces */
14205 dhd_net_if_lock_local(dhd);
14206 for (i = 1; i < DHD_MAX_IFS; i++) {
14207 if (dhd->iflist[i]) {
14208 dhd_remove_if(&dhd->pub, i, TRUE);
14209 }
14210 }
14211 dhd_net_if_unlock_local(dhd);
14212
14213 /* delete primary interface 0 */
14214 ifp = dhd->iflist[0];
14215 if (ifp && ifp->net) {
14216
14217#ifdef WL_CFG80211
14218 cfg = wl_get_cfg(ifp->net);
14219#endif
14220 /* in unregister_netdev case, the interface gets freed by net->destructor
14221 * (which is set to free_netdev)
14222 */
14223 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
14224 free_netdev(ifp->net);
14225 } else {
14226 netif_tx_disable(ifp->net);
14227 unregister_netdev(ifp->net);
14228 }
14229#ifdef PCIE_FULL_DONGLE
14230 ifp->net = DHD_NET_DEV_NULL;
14231#else
14232 ifp->net = NULL;
14233#endif /* PCIE_FULL_DONGLE */
14234
14235#ifdef DHD_L2_FILTER
14236 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
14237 NULL, FALSE, dhdp->tickcnt);
14238 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
14239 ifp->phnd_arp_table = NULL;
14240#endif /* DHD_L2_FILTER */
14241
14242 dhd_if_del_sta_list(ifp);
14243
14244 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
14245 dhd->iflist[0] = NULL;
14246#ifdef WL_CFG80211
14247 if (cfg && cfg->wdev)
14248 cfg->wdev->netdev = NULL;
14249#endif
14250 }
14251 }
14252
14253 /* Clear the watchdog timer */
14254 DHD_GENERAL_LOCK(&dhd->pub, flags);
14255 timer_valid = dhd->wd_timer_valid;
14256 dhd->wd_timer_valid = FALSE;
14257 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
14258 if (timer_valid)
14259 del_timer_sync(&dhd->timer);
14260 DHD_STOP_RPM_TIMER(&dhd->pub);
14261
14262#ifdef BCMDBUS
14263 tasklet_kill(&dhd->tasklet);
14264#else
14265 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
14266#ifdef DHD_PCIE_RUNTIMEPM
14267 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
14268 PROC_STOP(&dhd->thr_rpm_ctl);
14269 }
14270#endif /* DHD_PCIE_RUNTIMEPM */
14271 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
14272 PROC_STOP(&dhd->thr_wdt_ctl);
14273 }
14274
14275 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
14276 PROC_STOP(&dhd->thr_rxf_ctl);
14277 }
14278
14279 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
14280 PROC_STOP(&dhd->thr_dpc_ctl);
14281 } else
14282 {
14283 tasklet_kill(&dhd->tasklet);
14284 }
14285 }
14286#endif /* BCMDBUS */
14287
14288#ifdef WL_NATOE
14289 if (dhd->pub.nfct) {
14290 dhd_ct_close(dhd->pub.nfct);
14291 }
14292#endif /* WL_NATOE */
14293
14294#ifdef DHD_LB
14295 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
14296 /* Clear the flag first to avoid calling the cpu notifier */
14297 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
14298
14299 /* Kill the Load Balancing Tasklets */
14300#ifdef DHD_LB_RXP
14301 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
14302 __skb_queue_purge(&dhd->rx_pend_queue);
14303#endif /* DHD_LB_RXP */
14304#ifdef DHD_LB_TXP
14305 cancel_work_sync(&dhd->tx_dispatcher_work);
14306 tasklet_kill(&dhd->tx_tasklet);
14307 __skb_queue_purge(&dhd->tx_pend_queue);
14308#endif /* DHD_LB_TXP */
14309#ifdef DHD_LB_TXC
14310 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
14311 tasklet_kill(&dhd->tx_compl_tasklet);
14312#endif /* DHD_LB_TXC */
14313#ifdef DHD_LB_RXC
14314 tasklet_kill(&dhd->rx_compl_tasklet);
14315#endif /* DHD_LB_RXC */
14316
14317 /* Unregister from CPU Hotplug framework */
14318 dhd_unregister_cpuhp_callback(dhd);
14319
14320 dhd_cpumasks_deinit(dhd);
14321 DHD_LB_STATS_DEINIT(&dhd->pub);
14322 }
14323#endif /* DHD_LB */
14324
14325#ifdef CSI_SUPPORT
14326 dhd_csi_deinit(dhdp);
14327#endif /* CSI_SUPPORT */
14328
14329#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
14330 cancel_work_sync(&dhd->axi_error_dispatcher_work);
14331#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
14332
14333 DHD_SSSR_REG_INFO_DEINIT(&dhd->pub);
14334 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
14335
14336#ifdef EWP_EDL
14337 if (host_edl_support) {
14338 DHD_EDL_MEM_DEINIT(dhdp);
14339 host_edl_support = FALSE;
14340 }
14341#endif /* EWP_EDL */
14342
14343#ifdef WL_CFG80211
14344 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
14345 if (!cfg) {
14346 DHD_ERROR(("cfg NULL!\n"));
14347 ASSERT(0);
14348 } else {
14349 wl_cfg80211_detach(cfg);
14350 dhd_monitor_uninit();
14351 }
14352 }
14353#endif
14354
14355#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14356 destroy_workqueue(dhd->tx_wq);
14357 dhd->tx_wq = NULL;
14358 destroy_workqueue(dhd->rx_wq);
14359 dhd->rx_wq = NULL;
14360#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14361#ifdef DEBUGABILITY
14362 if (dhdp->dbg) {
14363#ifdef DBG_PKT_MON
14364 dhd_os_dbg_detach_pkt_monitor(dhdp);
14365 osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
14366#endif /* DBG_PKT_MON */
14367 }
14368#endif /* DEBUGABILITY */
14369 if (dhdp->dbg) {
14370 dhd_os_dbg_detach(dhdp);
14371 }
14372#ifdef DHD_MEM_STATS
14373 osl_spin_lock_deinit(dhd->pub.osh, dhd->pub.mem_stats_lock);
14374#endif /* DHD_MEM_STATS */
14375
14376#ifdef DHD_PKT_LOGGING
14377 dhd_os_detach_pktlog(dhdp);
14378#endif /* DHD_PKT_LOGGING */
14379#ifdef DHD_STATUS_LOGGING
14380 dhd_detach_statlog(dhdp);
14381#endif /* DHD_STATUS_LOGGING */
14382#ifdef DHD_PKTDUMP_ROAM
14383 dhd_dump_pkt_deinit(dhdp);
14384#endif /* DHD_PKTDUMP_ROAM */
14385#ifdef WL_CFGVENDOR_SEND_HANG_EVENT
14386 if (dhd->pub.hang_info) {
14387 MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
14388 }
14389#endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
14390#ifdef SHOW_LOGTRACE
14391 /* Release the skbs from queue for WLC_E_TRACE event */
14392 dhd_event_logtrace_flush_queue(dhdp);
14393
14394 /* Wait till event logtrace context finishes */
14395 dhd_cancel_logtrace_process_sync(dhd);
14396
14397 /* Remove ring proc entries */
14398 dhd_dbg_ring_proc_destroy(&dhd->pub);
14399
14400 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
14401 if (dhd->event_data.fmts) {
14402 MFREE(dhd->pub.osh, dhd->event_data.fmts,
14403 dhd->event_data.fmts_size);
14404 }
14405 if (dhd->event_data.raw_fmts) {
14406 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
14407 dhd->event_data.raw_fmts_size);
14408 }
14409 if (dhd->event_data.raw_sstr) {
14410 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
14411 dhd->event_data.raw_sstr_size);
14412 }
14413 if (dhd->event_data.rom_raw_sstr) {
14414 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
14415 dhd->event_data.rom_raw_sstr_size);
14416 }
14417 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
14418 }
14419#endif /* SHOW_LOGTRACE */
14420#ifdef PNO_SUPPORT
14421 if (dhdp->pno_state)
14422 dhd_pno_deinit(dhdp);
14423#endif
14424#ifdef RTT_SUPPORT
14425 if (dhdp->rtt_state) {
14426 dhd_rtt_detach(dhdp);
14427 }
14428#endif
14429#if defined(CONFIG_PM_SLEEP)
14430 if (dhd_pm_notifier_registered) {
14431 unregister_pm_notifier(&dhd->pm_notifier);
14432 dhd_pm_notifier_registered = FALSE;
14433 }
14434#endif /* CONFIG_PM_SLEEP */
14435
14436#ifdef DEBUG_CPU_FREQ
14437 if (dhd->new_freq)
14438 free_percpu(dhd->new_freq);
14439 dhd->new_freq = NULL;
14440 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
14441#endif
14442 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
14443#ifdef CONFIG_HAS_WAKELOCK
14444 dhd->wakelock_wd_counter = 0;
14445 wake_lock_destroy(&dhd->wl_wdwake);
14446 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
14447 wake_lock_destroy(&dhd->wl_wifi);
14448#endif /* CONFIG_HAS_WAKELOCK */
14449 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
14450 DHD_OS_WAKE_LOCK_DESTROY(dhd);
14451 }
14452
14453#ifdef DHDTCPACK_SUPPRESS
14454 /* This will free all MEM allocated for TCPACK SUPPRESS */
14455 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
14456#endif /* DHDTCPACK_SUPPRESS */
14457
14458#ifdef PCIE_FULL_DONGLE
14459 dhd_flow_rings_deinit(dhdp);
14460 if (dhdp->prot)
14461 dhd_prot_detach(dhdp);
14462#endif
14463
14464#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
14465 dhd_free_tdls_peer_list(dhdp);
14466#endif
14467
14468#ifdef DUMP_IOCTL_IOV_LIST
14469 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
14470#endif /* DUMP_IOCTL_IOV_LIST */
14471#ifdef DHD_DEBUG
14472 /* memory waste feature list initilization */
14473 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
14474#endif /* DHD_DEBUG */
14475#ifdef WL_MONITOR
14476 dhd_del_monitor_if(dhd);
14477#endif /* WL_MONITOR */
14478
14479#ifdef DHD_ERPOM
14480 if (dhdp->enable_erpom) {
14481 dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
14482 }
14483#endif /* DHD_ERPOM */
14484
14485 cancel_work_sync(&dhd->dhd_hang_process_work);
14486
14487 /* Prefer adding de-init code above this comment unless necessary.
14488 * The idea is to cancel work queue, sysfs and flags at the end.
14489 */
14490 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
14491 dhd->dhd_deferred_wq = NULL;
14492
14493 /* log dump related buffers should be freed after wq is purged */
14494#ifdef DHD_LOG_DUMP
14495 dhd_log_dump_deinit(&dhd->pub);
14496#endif /* DHD_LOG_DUMP */
14497#if defined(BCMPCIE)
14498 if (dhdp->extended_trap_data)
14499 {
14500 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
14501 dhdp->extended_trap_data = NULL;
14502 }
14503#ifdef DNGL_AXI_ERROR_LOGGING
14504 if (dhdp->axi_err_dump)
14505 {
14506 MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
14507 dhdp->axi_err_dump = NULL;
14508 }
14509#endif /* DNGL_AXI_ERROR_LOGGING */
14510#endif /* BCMPCIE */
14511
14512#ifdef EWP_EDL
14513 cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
14514#endif
14515
14516 (void)dhd_deinit_sock_flows_buf(dhd);
14517
14518#ifdef DHD_DUMP_MNGR
14519 if (dhd->pub.dump_file_manage) {
14520 MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
14521 sizeof(dhd_dump_file_manage_t));
14522 }
14523#endif /* DHD_DUMP_MNGR */
14524
14525 dhd_sysfs_exit(dhd);
14526 dhd->pub.fw_download_status = FW_UNLOADED;
14527
14528#if defined(BT_OVER_SDIO)
14529 mutex_destroy(&dhd->bus_user_lock);
14530#endif /* BT_OVER_SDIO */
14531
14532#ifdef DHD_TX_PROFILE
14533 (void)dhd_tx_profile_detach(dhdp);
14534#endif /* defined(DHD_TX_PROFILE) */
14535 dhd_conf_detach(dhdp);
14536
14537} /* dhd_detach */
14538
14539void
14540dhd_free(dhd_pub_t *dhdp)
14541{
14542 dhd_info_t *dhd;
14543 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
14544
14545 if (dhdp) {
14546 int i;
14547 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
14548 if (dhdp->reorder_bufs[i]) {
14549 reorder_info_t *ptr;
14550 uint32 buf_size = sizeof(struct reorder_info);
14551
14552 ptr = dhdp->reorder_bufs[i];
14553
14554 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
14555 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
14556 i, ptr->max_idx, buf_size));
14557
14558 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
14559 }
14560 }
14561
14562 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
14563
14564 dhd = (dhd_info_t *)dhdp->info;
14565 if (dhdp->soc_ram) {
14566#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
14567 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
14568#else
14569 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
14570#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
14571 }
14572 if (dhd != NULL) {
14573
14574 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
14575 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
14576 DHD_PREALLOC_DHD_INFO, 0, FALSE))
14577 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
14578 dhd = NULL;
14579 }
14580 }
14581}
14582
14583void
14584dhd_clear(dhd_pub_t *dhdp)
14585{
14586 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
14587
14588 if (dhdp) {
14589 int i;
14590#ifdef DHDTCPACK_SUPPRESS
14591 /* Clean up timer/data structure for any remaining/pending packet or timer. */
14592 dhd_tcpack_info_tbl_clean(dhdp);
14593#endif /* DHDTCPACK_SUPPRESS */
14594 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
14595 if (dhdp->reorder_bufs[i]) {
14596 reorder_info_t *ptr;
14597 uint32 buf_size = sizeof(struct reorder_info);
14598
14599 ptr = dhdp->reorder_bufs[i];
14600
14601 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
14602 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
14603 i, ptr->max_idx, buf_size));
14604
14605 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
14606 }
14607 }
14608
14609 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
14610
14611 if (dhdp->soc_ram) {
14612#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
14613 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
14614#else
14615 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
14616#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
14617 dhdp->soc_ram = NULL;
14618 }
14619 }
14620}
14621
14622static void
14623dhd_module_cleanup(void)
14624{
14625 printf("%s: Enter\n", __FUNCTION__);
14626
14627 dhd_bus_unregister();
14628
14629 wl_android_exit();
14630
14631 dhd_wifi_platform_unregister_drv();
14632
14633#ifdef CUSTOMER_HW_AMLOGIC
14634#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
14635 wifi_teardown_dt();
14636#endif
14637#endif
14638 printf("%s: Exit\n", __FUNCTION__);
14639}
14640
14641static void __exit
14642dhd_module_exit(void)
14643{
14644 atomic_set(&exit_in_progress, 1);
14645 dhd_module_cleanup();
14646 unregister_reboot_notifier(&dhd_reboot_notifier);
14647 dhd_destroy_to_notifier_skt();
14648}
14649
14650static int
14651_dhd_module_init(void)
14652{
14653 int err;
14654 int retry = POWERUP_MAX_RETRY;
14655
14656 printf("%s: in %s\n", __FUNCTION__, dhd_version);
14657#ifdef CUSTOMER_HW_AMLOGIC
14658#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
14659 if (wifi_setup_dt()) {
14660 printf("wifi_dt : fail to setup dt\n");
14661 }
14662#endif
14663#endif
14664
14665 if (firmware_path[0] != '\0') {
14666 strlcpy(fw_bak_path, firmware_path, sizeof(fw_bak_path));
14667 }
14668
14669 if (nvram_path[0] != '\0') {
14670 strlcpy(nv_bak_path, nvram_path, sizeof(nv_bak_path));
14671 }
14672
14673 do {
14674 err = dhd_wifi_platform_register_drv();
14675 if (!err) {
14676 register_reboot_notifier(&dhd_reboot_notifier);
14677 break;
14678 } else {
14679 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
14680 __FUNCTION__, retry));
14681 strlcpy(firmware_path, fw_bak_path, sizeof(firmware_path));
14682 strlcpy(nvram_path, nv_bak_path, sizeof(nvram_path));
14683 }
14684 } while (retry--);
14685
14686 dhd_create_to_notifier_skt();
14687
14688 if (err) {
14689#ifdef CUSTOMER_HW_AMLOGIC
14690#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
14691 wifi_teardown_dt();
14692#endif
14693#endif
14694 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
14695 } else {
14696 if (!dhd_download_fw_on_driverload) {
14697 dhd_driver_init_done = TRUE;
14698 }
14699 }
14700
14701 printf("%s: Exit err=%d\n", __FUNCTION__, err);
14702 return err;
14703}
14704
14705static int __init
14706dhd_module_init(void)
14707{
14708 int err;
14709
14710 err = _dhd_module_init();
14711#ifdef DHD_SUPPORT_HDM
14712 if (err && !dhd_download_fw_on_driverload) {
14713 dhd_hdm_wlan_sysfs_init();
14714 }
14715#endif /* DHD_SUPPORT_HDM */
14716 return err;
14717
14718}
14719
14720#ifdef DHD_SUPPORT_HDM
14721bool hdm_trigger_init = FALSE;
14722struct delayed_work hdm_sysfs_wq;
14723
14724int
14725dhd_module_init_hdm(void)
14726{
14727 int err = 0;
14728
14729 hdm_trigger_init = TRUE;
14730
14731 if (dhd_driver_init_done) {
14732 DHD_INFO(("%s : Module is already inited\n", __FUNCTION__));
14733 return err;
14734 }
14735
14736 err = _dhd_module_init();
14737
14738 /* remove sysfs file after module load properly */
14739 if (!err && !dhd_download_fw_on_driverload) {
14740 INIT_DELAYED_WORK(&hdm_sysfs_wq, dhd_hdm_wlan_sysfs_deinit);
14741 schedule_delayed_work(&hdm_sysfs_wq, msecs_to_jiffies(SYSFS_DEINIT_MS));
14742 }
14743
14744 hdm_trigger_init = FALSE;
14745 return err;
14746}
14747#endif /* DHD_SUPPORT_HDM */
14748
14749static int
14750dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
14751{
14752 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
14753 if (code == SYS_RESTART) {
14754#ifdef BCMPCIE
14755 is_reboot = code;
14756#endif /* BCMPCIE */
14757 }
14758 return NOTIFY_DONE;
14759}
14760
14761#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
14762/* XXX To decrease the device boot time, deferred_module_init() macro can be
14763 * used. The detailed principle and implemenation of deferred_module_init()
14764 * is found at http://elinux.org/Deferred_Initcalls
14765 * To enable this feature for module build, it needs to add another
14766 * deferred_module_init() definition to include/linux/init.h in Linux Kernel.
14767 * #define deferred_module_init(fn) module_init(fn)
14768 */
14769#if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS)
14770deferred_module_init_sync(dhd_module_init);
14771#else
14772deferred_module_init(dhd_module_init);
14773#endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */
14774#elif defined(USE_LATE_INITCALL_SYNC)
14775late_initcall_sync(dhd_module_init);
14776#else
14777late_initcall(dhd_module_init);
14778#endif /* USE_LATE_INITCALL_SYNC */
14779
14780module_exit(dhd_module_exit);
14781
14782/*
14783 * OS specific functions required to implement DHD driver in OS independent way
14784 */
14785int
14786dhd_os_proto_block(dhd_pub_t *pub)
14787{
14788 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14789
14790 if (dhd) {
14791 down(&dhd->proto_sem);
14792
14793 return 1;
14794 }
14795
14796 return 0;
14797}
14798
14799int
14800dhd_os_proto_unblock(dhd_pub_t *pub)
14801{
14802 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14803
14804 if (dhd) {
14805 up(&dhd->proto_sem);
14806 return 1;
14807 }
14808
14809 return 0;
14810}
14811
14812void
14813dhd_os_dhdiovar_lock(dhd_pub_t *pub)
14814{
14815 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14816
14817 if (dhd) {
14818 mutex_lock(&dhd->dhd_iovar_mutex);
14819 }
14820}
14821
14822void
14823dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
14824{
14825 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14826
14827 if (dhd) {
14828 mutex_unlock(&dhd->dhd_iovar_mutex);
14829 }
14830}
14831
14832void
14833dhd_os_logdump_lock(dhd_pub_t *pub)
14834{
14835 dhd_info_t *dhd = NULL;
14836
14837 if (!pub)
14838 return;
14839
14840 dhd = (dhd_info_t *)(pub->info);
14841
14842 if (dhd) {
14843 mutex_lock(&dhd->logdump_lock);
14844 }
14845}
14846
14847void
14848dhd_os_logdump_unlock(dhd_pub_t *pub)
14849{
14850 dhd_info_t *dhd = NULL;
14851
14852 if (!pub)
14853 return;
14854
14855 dhd = (dhd_info_t *)(pub->info);
14856
14857 if (dhd) {
14858 mutex_unlock(&dhd->logdump_lock);
14859 }
14860}
14861
14862unsigned long
14863dhd_os_dbgring_lock(void *lock)
14864{
14865 if (!lock)
14866 return 0;
14867
14868 mutex_lock((struct mutex *)lock);
14869
14870 return 0;
14871}
14872
14873void
14874dhd_os_dbgring_unlock(void *lock, unsigned long flags)
14875{
14876 BCM_REFERENCE(flags);
14877
14878 if (!lock)
14879 return;
14880
14881 mutex_unlock((struct mutex *)lock);
14882}
14883
14884unsigned int
14885dhd_os_get_ioctl_resp_timeout(void)
14886{
14887 return ((unsigned int)dhd_ioctl_timeout_msec);
14888}
14889
14890void
14891dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
14892{
14893 dhd_ioctl_timeout_msec = (int)timeout_msec;
14894}
14895
14896int
14897dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
14898{
14899 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14900 int timeout;
14901
14902 /* Convert timeout in millsecond to jiffies */
14903 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
14904
14905 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
14906
14907 return timeout;
14908}
14909
14910int
14911dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
14912{
14913 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
14914
14915 wake_up(&dhd->ioctl_resp_wait);
14916 return 0;
14917}
14918
14919int
14920dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
14921{
14922 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14923 int timeout;
14924
14925 /* Convert timeout in millsecond to jiffies */
14926 timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
14927
14928 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
14929
14930 return timeout;
14931}
14932
14933#ifdef PCIE_INB_DW
14934int
14935dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
14936{
14937 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14938 int timeout;
14939
14940 /* Convert timeout in millsecond to jiffies */
14941 timeout = msecs_to_jiffies(ds_exit_timeout_msec);
14942
14943 timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
14944
14945 return timeout;
14946}
14947
14948int
14949dhd_os_ds_exit_wake(dhd_pub_t *pub)
14950{
14951 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
14952
14953 wake_up_all(&dhd->ds_exit_wait);
14954 return 0;
14955}
14956
14957#endif /* PCIE_INB_DW */
14958
14959int
14960dhd_os_d3ack_wake(dhd_pub_t *pub)
14961{
14962 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
14963
14964 wake_up(&dhd->d3ack_wait);
14965 return 0;
14966}
14967
14968int
14969dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
14970{
14971 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14972 int timeout;
14973
14974 /* Wait for bus usage contexts to gracefully exit within some timeout value
14975 * Set time out to little higher than dhd_ioctl_timeout_msec,
14976 * so that IOCTL timeout should not get affected.
14977 */
14978 /* Convert timeout in millsecond to jiffies */
14979 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
14980
14981 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
14982
14983 return timeout;
14984}
14985
14986/*
14987 * Wait until the condition *var == condition is met.
14988 * Returns 0 if the @condition evaluated to false after the timeout elapsed
14989 * Returns 1 if the @condition evaluated to true
14990 */
14991int
14992dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
14993{
14994 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
14995 int timeout;
14996
14997 /* Convert timeout in millsecond to jiffies */
14998 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
14999
15000 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
15001
15002 return timeout;
15003}
15004
15005/*
15006 * Wait until the '(*var & bitmask) == condition' is met.
15007 * Returns 0 if the @condition evaluated to false after the timeout elapsed
15008 * Returns 1 if the @condition evaluated to true
15009 */
15010int
15011dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
15012 uint bitmask, uint condition)
15013{
15014 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
15015 int timeout;
15016
15017 /* Convert timeout in millsecond to jiffies */
15018 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
15019
15020 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
15021 ((*var & bitmask) == condition), timeout);
15022
15023 return timeout;
15024}
15025
15026int
15027dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
15028{
15029 int ret = 0;
15030 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
15031 int timeout;
15032
15033 timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
15034
15035 ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
15036
15037 return ret;
15038
15039}
15040
15041int
15042dhd_os_dmaxfer_wake(dhd_pub_t *pub)
15043{
15044 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15045
15046 wake_up(&dhd->dmaxfer_wait);
15047 return 0;
15048}
15049
15050void
15051dhd_os_tx_completion_wake(dhd_pub_t *dhd)
15052{
15053 /* Call wmb() to make sure before waking up the other event value gets updated */
15054 OSL_SMP_WMB();
15055 wake_up(&dhd->tx_completion_wait);
15056}
15057
15058/* Fix compilation error for FC11 */
15059INLINE int
15060dhd_os_busbusy_wake(dhd_pub_t *pub)
15061{
15062 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15063 /* Call wmb() to make sure before waking up the other event value gets updated */
15064 OSL_SMP_WMB();
15065 wake_up(&dhd->dhd_bus_busy_state_wait);
15066 return 0;
15067}
15068
15069void
15070dhd_os_wd_timer_extend(void *bus, bool extend)
15071{
15072#ifndef BCMDBUS
15073 dhd_pub_t *pub = bus;
15074 dhd_info_t *dhd = (dhd_info_t *)pub->info;
15075
15076 if (extend)
15077 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
15078 else
15079 dhd_os_wd_timer(bus, dhd->default_wd_interval);
15080#endif /* !BCMDBUS */
15081}
15082
15083void
15084dhd_os_wd_timer(void *bus, uint wdtick)
15085{
15086#ifndef BCMDBUS
15087 dhd_pub_t *pub = bus;
15088 dhd_info_t *dhd = (dhd_info_t *)pub->info;
15089 unsigned long flags;
15090
15091 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
15092
15093 if (!dhd) {
15094 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
15095 return;
15096 }
15097
15098 DHD_GENERAL_LOCK(pub, flags);
15099
15100 /* don't start the wd until fw is loaded */
15101 if (pub->busstate == DHD_BUS_DOWN) {
15102 DHD_GENERAL_UNLOCK(pub, flags);
15103#ifdef BCMSDIO
15104 if (!wdtick) {
15105 DHD_OS_WD_WAKE_UNLOCK(pub);
15106 }
15107#endif /* BCMSDIO */
15108 return;
15109 }
15110
15111 /* Totally stop the timer */
15112 if (!wdtick && dhd->wd_timer_valid == TRUE) {
15113 dhd->wd_timer_valid = FALSE;
15114 DHD_GENERAL_UNLOCK(pub, flags);
15115 del_timer_sync(&dhd->timer);
15116#ifdef BCMSDIO
15117 DHD_OS_WD_WAKE_UNLOCK(pub);
15118#endif /* BCMSDIO */
15119 return;
15120 }
15121
15122 if (wdtick) {
15123#ifdef BCMSDIO
15124 DHD_OS_WD_WAKE_LOCK(pub);
15125 dhd_watchdog_ms = (uint)wdtick;
15126#endif /* BCMSDIO */
15127 /* Re arm the timer, at last watchdog period */
15128 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
15129 dhd->wd_timer_valid = TRUE;
15130 }
15131 DHD_GENERAL_UNLOCK(pub, flags);
15132#endif /* !BCMDBUS */
15133}
15134
15135#ifdef DHD_PCIE_RUNTIMEPM
15136void
15137dhd_os_runtimepm_timer(void *bus, uint tick)
15138{
15139 dhd_pub_t *pub = bus;
15140 dhd_info_t *dhd = (dhd_info_t *)pub->info;
15141 unsigned long flags;
15142
15143 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
15144
15145 if (!dhd) {
15146 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
15147 return;
15148 }
15149
15150 DHD_GENERAL_LOCK(pub, flags);
15151
15152 /* don't start the RPM until fw is loaded */
15153 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
15154 DHD_GENERAL_UNLOCK(pub, flags);
15155 return;
15156 }
15157
15158 /* If tick is non-zero, the request is to start the timer */
15159 if (tick) {
15160 /* Start the timer only if its not already running */
15161 if (dhd->rpm_timer_valid == FALSE) {
15162 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
15163 dhd->rpm_timer_valid = TRUE;
15164 DHD_ERROR(("DHD Runtime PM Timer ON\n"));
15165 }
15166 } else {
15167 /* tick is zero, we have to stop the timer */
15168 /* Stop the timer only if its running, otherwise we don't have to do anything */
15169 if (dhd->rpm_timer_valid == TRUE) {
15170 dhd->rpm_timer_valid = FALSE;
15171 DHD_GENERAL_UNLOCK(pub, flags);
15172 del_timer_sync(&dhd->rpm_timer);
15173 DHD_ERROR(("DHD Runtime PM Timer OFF \n"));
15174 /* we have already released the lock, so just go to exit */
15175 goto exit;
15176 }
15177 }
15178
15179 DHD_GENERAL_UNLOCK(pub, flags);
15180exit:
15181 return;
15182
15183}
15184
15185#endif /* DHD_PCIE_RUNTIMEPM */
15186
15187void *
15188dhd_os_open_image1(dhd_pub_t *pub, char *filename)
15189{
15190 struct file *fp;
15191 int size;
15192
15193 fp = filp_open(filename, O_RDONLY, 0);
15194 /*
15195 * 2.6.11 (FC4) supports filp_open() but later revs don't?
15196 * Alternative:
15197 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
15198 * ???
15199 */
15200 if (IS_ERR(fp)) {
15201 fp = NULL;
15202 goto err;
15203 }
15204
15205 if (!S_ISREG(file_inode(fp)->i_mode)) {
15206 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
15207 fp = NULL;
15208 goto err;
15209 }
15210
15211 size = i_size_read(file_inode(fp));
15212 if (size <= 0) {
15213 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
15214 fp = NULL;
15215 goto err;
15216 }
15217
15218 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
15219
15220err:
15221 return fp;
15222}
15223
15224int
15225dhd_os_get_image_block(char *buf, int len, void *image)
15226{
15227 struct file *fp = (struct file *)image;
15228 int rdlen;
15229 int size;
15230
15231 if (!image) {
15232 return 0;
15233 }
15234
15235 size = i_size_read(file_inode(fp));
15236 rdlen = kernel_read_compat(fp, fp->f_pos, buf, MIN(len, size));
15237
15238 if (len >= size && size != rdlen) {
15239 return -EIO;
15240 }
15241
15242 if (rdlen > 0) {
15243 fp->f_pos += rdlen;
15244 }
15245
15246 return rdlen;
15247}
15248
15249#if defined(BT_OVER_SDIO)
15250int
15251dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
15252{
15253 struct file *fp = (struct file *)image;
15254 int rd_len;
15255 uint str_len = 0;
15256 char *str_end = NULL;
15257
15258 if (!image)
15259 return 0;
15260
15261 rd_len = kernel_read_compat(fp, fp->f_pos, str, len);
15262 str_end = strnchr(str, len, '\n');
15263 if (str_end == NULL) {
15264 goto err;
15265 }
15266 str_len = (uint)(str_end - str);
15267
15268 /* Advance file pointer past the string length */
15269 fp->f_pos += str_len + 1;
15270 bzero(str_end, rd_len - str_len);
15271
15272err:
15273 return str_len;
15274}
15275#endif /* defined (BT_OVER_SDIO) */
15276
15277int
15278dhd_os_get_image_size(void *image)
15279{
15280 struct file *fp = (struct file *)image;
15281 int size;
15282 if (!image) {
15283 return 0;
15284 }
15285
15286 size = i_size_read(file_inode(fp));
15287
15288 return size;
15289}
15290
15291void
15292dhd_os_close_image1(dhd_pub_t *pub, void *image)
15293{
15294 if (image) {
15295 filp_close((struct file *)image, NULL);
15296 }
15297}
15298
15299void
15300dhd_os_sdlock(dhd_pub_t *pub)
15301{
15302 dhd_info_t *dhd;
15303
15304 dhd = (dhd_info_t *)(pub->info);
15305
15306#ifdef BCMDBUS
15307 spin_lock_bh(&dhd->sdlock);
15308#else
15309 if (dhd_dpc_prio >= 0)
15310 down(&dhd->sdsem);
15311 else
15312 spin_lock_bh(&dhd->sdlock);
15313#endif /* !BCMDBUS */
15314}
15315
15316void
15317dhd_os_sdunlock(dhd_pub_t *pub)
15318{
15319 dhd_info_t *dhd;
15320
15321 dhd = (dhd_info_t *)(pub->info);
15322
15323#ifdef BCMDBUS
15324 spin_unlock_bh(&dhd->sdlock);
15325#else
15326 if (dhd_dpc_prio >= 0)
15327 up(&dhd->sdsem);
15328 else
15329 spin_unlock_bh(&dhd->sdlock);
15330#endif /* !BCMDBUS */
15331}
15332
15333void
15334dhd_os_sdlock_txq(dhd_pub_t *pub)
15335{
15336 dhd_info_t *dhd;
15337
15338 dhd = (dhd_info_t *)(pub->info);
15339#ifdef BCMDBUS
15340 spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
15341#else
15342 spin_lock_bh(&dhd->txqlock);
15343#endif /* BCMDBUS */
15344}
15345
15346void
15347dhd_os_sdunlock_txq(dhd_pub_t *pub)
15348{
15349 dhd_info_t *dhd;
15350
15351 dhd = (dhd_info_t *)(pub->info);
15352#ifdef BCMDBUS
15353 spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
15354#else
15355 spin_unlock_bh(&dhd->txqlock);
15356#endif /* BCMDBUS */
15357}
15358
15359void
15360dhd_os_sdlock_rxq(dhd_pub_t *pub)
15361{
15362}
15363
15364void
15365dhd_os_sdunlock_rxq(dhd_pub_t *pub)
15366{
15367}
15368
15369static void
15370dhd_os_rxflock(dhd_pub_t *pub)
15371{
15372 dhd_info_t *dhd;
15373
15374 dhd = (dhd_info_t *)(pub->info);
15375 spin_lock_bh(&dhd->rxf_lock);
15376
15377}
15378
15379static void
15380dhd_os_rxfunlock(dhd_pub_t *pub)
15381{
15382 dhd_info_t *dhd;
15383
15384 dhd = (dhd_info_t *)(pub->info);
15385 spin_unlock_bh(&dhd->rxf_lock);
15386}
15387
15388#ifdef DHDTCPACK_SUPPRESS
15389unsigned long
15390dhd_os_tcpacklock(dhd_pub_t *pub)
15391{
15392 dhd_info_t *dhd;
15393 unsigned long flags = 0;
15394
15395 dhd = (dhd_info_t *)(pub->info);
15396
15397 if (dhd) {
15398#ifdef BCMSDIO
15399 spin_lock_bh(&dhd->tcpack_lock);
15400#else
15401 flags = osl_spin_lock(&dhd->tcpack_lock);
15402#endif /* BCMSDIO */
15403 }
15404
15405 return flags;
15406}
15407
15408void
15409dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
15410{
15411 dhd_info_t *dhd;
15412
15413#ifdef BCMSDIO
15414 BCM_REFERENCE(flags);
15415#endif /* BCMSDIO */
15416
15417 dhd = (dhd_info_t *)(pub->info);
15418
15419 if (dhd) {
15420#ifdef BCMSDIO
15421 spin_unlock_bh(&dhd->tcpack_lock);
15422#else
15423 osl_spin_unlock(&dhd->tcpack_lock, flags);
15424#endif /* BCMSDIO */
15425 }
15426}
15427#endif /* DHDTCPACK_SUPPRESS */
15428
15429uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
15430{
15431 uint8* buf;
15432 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
15433
15434 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
15435 if (buf == NULL && kmalloc_if_fail)
15436 buf = kmalloc(size, flags);
15437
15438 return buf;
15439}
15440
15441void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
15442{
15443}
15444
15445#if defined(WL_WIRELESS_EXT)
15446struct iw_statistics *
15447dhd_get_wireless_stats(struct net_device *dev)
15448{
15449 int res = 0;
15450 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15451
15452 if (!dhd->pub.up) {
15453 return NULL;
15454 }
15455
15456 if (!(dev->flags & IFF_UP)) {
15457 return NULL;
15458 }
15459
15460 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
15461
15462 if (res == 0)
15463 return &dhd->iw.wstats;
15464 else
15465 return NULL;
15466}
15467#endif /* defined(WL_WIRELESS_EXT) */
15468
15469static int
15470dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
15471 wl_event_msg_t *event, void **data)
15472{
15473 int bcmerror = 0;
15474#ifdef WL_CFG80211
15475 unsigned long flags = 0;
15476#endif /* WL_CFG80211 */
15477 ASSERT(dhd != NULL);
15478
15479#ifdef SHOW_LOGTRACE
15480 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
15481 &dhd->event_data);
15482#else
15483 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
15484 NULL);
15485#endif /* SHOW_LOGTRACE */
15486 if (unlikely(bcmerror != BCME_OK)) {
15487 return bcmerror;
15488 }
15489
15490 if (ntoh32(event->event_type) == WLC_E_IF) {
15491 /* WLC_E_IF event types are consumed by wl_process_host_event.
15492 * For ifadd/del ops, the netdev ptr may not be valid at this
15493 * point. so return before invoking cfg80211/wext handlers.
15494 */
15495 return BCME_OK;
15496 }
15497
15498#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
15499 wl_ext_event_send(dhd->pub.event_params, event, *data);
15500#endif
15501
15502#ifdef WL_CFG80211
15503 if (dhd->iflist[ifidx]->net) {
15504 DHD_UP_LOCK(&dhd->pub.up_lock, flags);
15505 if (dhd->pub.up) {
15506 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
15507 }
15508 DHD_UP_UNLOCK(&dhd->pub.up_lock, flags);
15509 }
15510#endif /* defined(WL_CFG80211) */
15511
15512 return (bcmerror);
15513}
15514
15515/* send up locally generated event */
15516void
15517dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
15518{
15519 switch (ntoh32(event->event_type)) {
15520 /* Handle error case or further events here */
15521 default:
15522 break;
15523 }
15524}
15525
15526#ifdef LOG_INTO_TCPDUMP
15527void
15528dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
15529{
15530 struct sk_buff *p, *skb;
15531 uint32 pktlen;
15532 int len;
15533 dhd_if_t *ifp;
15534 dhd_info_t *dhd;
15535 uchar *skb_data;
15536 int ifidx = 0;
15537 struct ether_header eth;
15538
15539 pktlen = sizeof(eth) + data_len;
15540 dhd = dhdp->info;
15541
15542 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
15543 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
15544
15545 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
15546 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
15547 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
15548 eth.ether_type = hton16(ETHER_TYPE_BRCM);
15549
15550 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
15551 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
15552 skb = PKTTONATIVE(dhdp->osh, p);
15553 skb_data = skb->data;
15554 len = skb->len;
15555
15556 ifidx = dhd_ifname2idx(dhd, "wlan0");
15557 ifp = dhd->iflist[ifidx];
15558 if (ifp == NULL)
15559 ifp = dhd->iflist[0];
15560
15561 ASSERT(ifp);
15562 skb->dev = ifp->net;
15563 skb->protocol = eth_type_trans(skb, skb->dev);
15564 skb->data = skb_data;
15565 skb->len = len;
15566
15567 /* Strip header, count, deliver upward */
15568 skb_pull(skb, ETH_HLEN);
15569
15570 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
15571 __FUNCTION__, __LINE__);
15572 /* Send the packet */
15573 if (in_interrupt()) {
15574 netif_rx(skb);
15575 } else {
15576 netif_rx_ni(skb);
15577 }
15578 } else {
15579 /* Could not allocate a sk_buf */
15580 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
15581 }
15582}
15583#endif /* LOG_INTO_TCPDUMP */
15584
15585void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
15586{
15587#if defined(BCMSDIO)
15588 struct dhd_info *dhdinfo = dhd->info;
15589
15590 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
15591
15592 dhd_os_sdunlock(dhd);
15593 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
15594 dhd_os_sdlock(dhd);
15595#endif /* defined(BCMSDIO) */
15596 return;
15597} /* dhd_init_static_strs_array */
15598
15599void dhd_wait_event_wakeup(dhd_pub_t *dhd)
15600{
15601#if defined(BCMSDIO)
15602 struct dhd_info *dhdinfo = dhd->info;
15603 if (waitqueue_active(&dhdinfo->ctrl_wait))
15604 wake_up(&dhdinfo->ctrl_wait);
15605#endif
15606 return;
15607}
15608
15609#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
15610int
15611dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
15612{
15613 int ret;
15614
15615 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15616
15617#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
15618 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
15619 return BCME_ERROR;
15620#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
15621
15622 if (flag == TRUE) {
15623#ifndef WL_CFG80211
15624 /* Issue wl down command for non-cfg before resetting the chip */
15625 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
15626 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
15627 }
15628#endif /* !WL_CFG80211 */
15629#ifdef PROP_TXSTATUS
15630 if (dhd->pub.wlfc_enabled) {
15631 dhd_wlfc_deinit(&dhd->pub);
15632 }
15633#endif /* PROP_TXSTATUS */
15634#ifdef PNO_SUPPORT
15635 if (dhd->pub.pno_state) {
15636 dhd_pno_deinit(&dhd->pub);
15637 }
15638#endif
15639#ifdef RTT_SUPPORT
15640 if (dhd->pub.rtt_state) {
15641 dhd_rtt_deinit(&dhd->pub);
15642 }
15643#endif /* RTT_SUPPORT */
15644
15645/*
15646 * XXX Detach only if the module is not attached by default at dhd_attach.
15647 * If attached by default, we need to keep it till dhd_detach, so that
15648 * module is not detached at wifi on/off
15649 */
15650#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
15651 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
15652#endif /* DBG_PKT_MON */
15653 }
15654
15655#ifdef BCMSDIO
15656 /* XXX Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
15657 * This is indeed a hack but we have to make it work properly before we have a better
15658 * solution
15659 */
15660 if (!flag) {
15661 dhd_update_fw_nv_path(dhd);
15662 /* update firmware and nvram path to sdio bus */
15663 dhd_bus_update_fw_nv_path(dhd->pub.bus,
15664 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
15665 }
15666#endif /* BCMSDIO */
15667#if defined(CONFIG_ARCH_EXYNOS) && defined(BCMPCIE)
15668#if !defined(CONFIG_SOC_EXYNOS8890) && !defined(SUPPORT_EXYNOS7420)
15669 /* XXX: JIRA SWWLAN-139454: Added L1ss enable
15670 * after firmware download completion due to link down issue
15671 * JIRA SWWLAN-142236: Amendment - Changed L1ss enable point
15672 */
15673 printk(KERN_ERR "%s Disable L1ss EP side\n", __FUNCTION__);
15674 if (flag == FALSE && dhd->pub.busstate == DHD_BUS_DOWN)
15675 exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI);
15676#endif /* !CONFIG_SOC_EXYNOS8890 && !defined(SUPPORT_EXYNOS7420) */
15677#endif /* CONFIG_ARCH_EXYNOS && BCMPCIE */
15678
15679 ret = dhd_bus_devreset(&dhd->pub, flag);
15680
15681#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
15682 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
15683 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
15684#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
15685
15686 if (flag) {
15687 /* Clear some flags for recovery logic */
15688 dhd->pub.dongle_trap_occured = 0;
15689 dhd->pub.iovar_timeout_occured = 0;
15690#ifdef PCIE_FULL_DONGLE
15691 dhd->pub.d3ack_timeout_occured = 0;
15692 dhd->pub.livelock_occured = 0;
15693 dhd->pub.pktid_audit_failed = 0;
15694#endif /* PCIE_FULL_DONGLE */
15695 dhd->pub.smmu_fault_occurred = 0;
15696 dhd->pub.iface_op_failed = 0;
15697 dhd->pub.scan_timeout_occurred = 0;
15698 dhd->pub.scan_busy_occurred = 0;
15699 }
15700
15701 if (ret) {
15702 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
15703 }
15704
15705 return ret;
15706}
15707
15708int
15709dhd_net_bus_suspend(struct net_device *dev)
15710{
15711 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15712 return dhd_bus_suspend(&dhd->pub);
15713}
15714
15715int
15716dhd_net_bus_resume(struct net_device *dev, uint8 stage)
15717{
15718 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15719 return dhd_bus_resume(&dhd->pub, stage);
15720}
15721
15722#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
15723
15724int net_os_set_suspend_disable(struct net_device *dev, int val)
15725{
15726 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15727 int ret = 0;
15728
15729 if (dhd) {
15730 ret = dhd->pub.suspend_disable_flag;
15731 dhd->pub.suspend_disable_flag = val;
15732 }
15733 return ret;
15734}
15735
15736int net_os_set_suspend(struct net_device *dev, int val, int force)
15737{
15738 int ret = 0;
15739 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15740
15741 if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
15742 if (!val)
15743 dhd_conf_set_suspend_resume(&dhd->pub, val);
15744#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
15745 ret = dhd_set_suspend(val, &dhd->pub);
15746#else
15747 ret = dhd_suspend_resume_helper(dhd, val, force);
15748#endif
15749#ifdef WL_CFG80211
15750 wl_cfg80211_update_power_mode(dev);
15751#endif
15752 if (val)
15753 dhd_conf_set_suspend_resume(&dhd->pub, val);
15754 }
15755 return ret;
15756}
15757
15758int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
15759{
15760 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15761
15762 if (dhd) {
15763 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
15764 __FUNCTION__, val));
15765 dhd->pub.suspend_bcn_li_dtim = val;
15766 }
15767
15768 return 0;
15769}
15770
15771int net_os_set_max_dtim_enable(struct net_device *dev, int val)
15772{
15773 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15774
15775 if (dhd) {
15776 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
15777 __FUNCTION__, (val ? "Enable" : "Disable")));
15778 if (val) {
15779 dhd->pub.max_dtim_enable = TRUE;
15780 } else {
15781 dhd->pub.max_dtim_enable = FALSE;
15782 }
15783 } else {
15784 return -1;
15785 }
15786
15787 return 0;
15788}
15789
15790#ifdef DISABLE_DTIM_IN_SUSPEND
15791int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
15792{
15793 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15794
15795 if (dhd) {
15796 DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
15797 __FUNCTION__, (val ? "Enable" : "Disable")));
15798 if (val) {
15799 dhd->pub.disable_dtim_in_suspend = TRUE;
15800 } else {
15801 dhd->pub.disable_dtim_in_suspend = FALSE;
15802 }
15803 } else {
15804 return BCME_ERROR;
15805 }
15806
15807 return BCME_OK;
15808}
15809#endif /* DISABLE_DTIM_IN_SUSPEND */
15810
15811#ifdef PKT_FILTER_SUPPORT
15812int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
15813{
15814 int ret = 0;
15815
15816#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
15817 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15818
15819 if (!dhd_master_mode)
15820 add_remove = !add_remove;
15821 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
15822 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
15823 return 0;
15824 }
15825
15826#ifdef BLOCK_IPV6_PACKET
15827 /* customer want to use NO IPV6 packets only */
15828 if (num == DHD_MULTICAST6_FILTER_NUM) {
15829 return 0;
15830 }
15831#endif /* BLOCK_IPV6_PACKET */
15832
15833 if (num >= dhd->pub.pktfilter_count) {
15834 return -EINVAL;
15835 }
15836
15837 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
15838#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
15839
15840 return ret;
15841}
15842
15843/* XXX RB:4238 Change net_os_set_packet_filter() function name to net_os_enable_packet_filter()
15844 * previous code do 'set' & 'enable' in one fucntion.
15845 * but from now on, we are going to separate 'set' and 'enable' feature.
15846 * - set : net_os_rxfilter_add_remove() -> dhd_set_packet_filter() -> dhd_pktfilter_offload_set()
15847 * - enable : net_os_enable_packet_filter() -> dhd_enable_packet_filter()
15848 * -> dhd_pktfilter_offload_enable()
15849 */
15850int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
15851
15852{
15853 int ret = 0;
15854
15855 /* Packet filtering is set only if we still in early-suspend and
15856 * we need either to turn it ON or turn it OFF
15857 * We can always turn it OFF in case of early-suspend, but we turn it
15858 * back ON only if suspend_disable_flag was not set
15859 */
15860 if (dhdp && dhdp->up) {
15861 if (dhdp->in_suspend) {
15862 if (!val || (val && !dhdp->suspend_disable_flag))
15863 dhd_enable_packet_filter(val, dhdp);
15864 }
15865 }
15866 return ret;
15867}
15868
15869/* function to enable/disable packet for Network device */
15870int net_os_enable_packet_filter(struct net_device *dev, int val)
15871{
15872 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15873
15874 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
15875 return dhd_os_enable_packet_filter(&dhd->pub, val);
15876}
15877#endif /* PKT_FILTER_SUPPORT */
15878
15879int
15880dhd_dev_init_ioctl(struct net_device *dev)
15881{
15882 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15883 int ret;
15884
15885 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
15886 goto done;
15887
15888done:
15889 return ret;
15890}
15891
15892int
15893dhd_dev_get_feature_set(struct net_device *dev)
15894{
15895 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
15896 dhd_pub_t *dhd = (&ptr->pub);
15897 int feature_set = 0;
15898
15899 if (FW_SUPPORTED(dhd, sta))
15900 feature_set |= WIFI_FEATURE_INFRA;
15901 if (FW_SUPPORTED(dhd, dualband))
15902 feature_set |= WIFI_FEATURE_INFRA_5G;
15903 if (FW_SUPPORTED(dhd, p2p))
15904 feature_set |= WIFI_FEATURE_P2P;
15905 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
15906 feature_set |= WIFI_FEATURE_SOFT_AP;
15907 if (FW_SUPPORTED(dhd, tdls))
15908 feature_set |= WIFI_FEATURE_TDLS;
15909 if (FW_SUPPORTED(dhd, vsdb))
15910 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
15911 if (FW_SUPPORTED(dhd, nan)) {
15912 feature_set |= WIFI_FEATURE_NAN;
15913 /* NAN is essentail for d2d rtt */
15914 if (FW_SUPPORTED(dhd, rttd2d))
15915 feature_set |= WIFI_FEATURE_D2D_RTT;
15916 }
15917#ifdef RTT_SUPPORT
15918 if (dhd->rtt_supported) {
15919 feature_set |= WIFI_FEATURE_D2D_RTT;
15920 feature_set |= WIFI_FEATURE_D2AP_RTT;
15921 }
15922#endif /* RTT_SUPPORT */
15923 feature_set |= WIFI_FEATURE_SET_LATENCY_MODE;
15924#ifdef LINKSTAT_SUPPORT
15925 feature_set |= WIFI_FEATURE_LINKSTAT;
15926#endif /* LINKSTAT_SUPPORT */
15927
15928#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
15929 if (dhd_is_pno_supported(dhd)) {
15930 feature_set |= WIFI_FEATURE_PNO;
15931#ifdef BATCH_SCAN
15932 /* Deprecated */
15933 feature_set |= WIFI_FEATURE_BATCH_SCAN;
15934#endif /* BATCH_SCAN */
15935#ifdef GSCAN_SUPPORT
15936 /* terence 20171115: remove to get GTS PASS
15937 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
15938 */
15939// feature_set |= WIFI_FEATURE_GSCAN;
15940// feature_set |= WIFI_FEATURE_HAL_EPNO;
15941#endif /* GSCAN_SUPPORT */
15942 }
15943#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
15944#ifdef RSSI_MONITOR_SUPPORT
15945 if (FW_SUPPORTED(dhd, rssi_mon)) {
15946 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
15947 }
15948#endif /* RSSI_MONITOR_SUPPORT */
15949#ifdef WL11U
15950 feature_set |= WIFI_FEATURE_HOTSPOT;
15951#endif /* WL11U */
15952#ifdef NDO_CONFIG_SUPPORT
15953 feature_set |= WIFI_FEATURE_CONFIG_NDO;
15954#endif /* NDO_CONFIG_SUPPORT */
15955#ifdef SUPPORT_RANDOM_MAC_SCAN
15956 feature_set |= WIFI_FEATURE_SCAN_RAND;
15957#endif /* SUPPORT_RANDOM_MAC_SCAN */
15958#ifdef FILTER_IE
15959 if (FW_SUPPORTED(dhd, fie)) {
15960 feature_set |= WIFI_FEATURE_FILTER_IE;
15961 }
15962#endif /* FILTER_IE */
15963#ifdef ROAMEXP_SUPPORT
15964 feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
15965#endif /* ROAMEXP_SUPPORT */
15966#ifdef WL_P2P_RAND
15967 feature_set |= WIFI_FEATURE_P2P_RAND_MAC;
15968#endif /* WL_P2P_RAND */
15969#ifdef WL_SAR_TX_POWER
15970 feature_set |= WIFI_FEATURE_SET_TX_POWER_LIMIT;
15971 feature_set |= WIFI_FEATURE_USE_BODY_HEAD_SAR;
15972#endif /* WL_SAR_TX_POWER */
15973#ifdef WL_STATIC_IF
15974 feature_set |= WIFI_FEATURE_AP_STA;
15975#endif /* WL_STATIC_IF */
15976 return feature_set;
15977}
15978
15979int
15980dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
15981{
15982 int feature_set_full;
15983 int ret = 0;
15984
15985 feature_set_full = dhd_dev_get_feature_set(dev);
15986
15987 /* Common feature set for all interface */
15988 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
15989 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
15990 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
15991 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
15992 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
15993 (feature_set_full & WIFI_FEATURE_EPR);
15994
15995 /* Specific feature group for each interface */
15996 switch (num) {
15997 case 0:
15998 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
15999 /* Not supported yet */
16000 /* (feature_set_full & WIFI_FEATURE_NAN) | */
16001 (feature_set_full & WIFI_FEATURE_TDLS) |
16002 (feature_set_full & WIFI_FEATURE_PNO) |
16003 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
16004 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
16005 (feature_set_full & WIFI_FEATURE_GSCAN) |
16006 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
16007 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
16008 break;
16009
16010 case 1:
16011 ret |= (feature_set_full & WIFI_FEATURE_P2P);
16012 /* Not yet verified NAN with P2P */
16013 /* (feature_set_full & WIFI_FEATURE_NAN) | */
16014 break;
16015
16016 case 2:
16017 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
16018 (feature_set_full & WIFI_FEATURE_TDLS) |
16019 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
16020 break;
16021
16022 default:
16023 ret = WIFI_FEATURE_INVALID;
16024 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
16025 break;
16026 }
16027
16028 return ret;
16029}
16030
16031#ifdef CUSTOM_FORCE_NODFS_FLAG
16032int
16033dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
16034{
16035 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16036
16037 if (nodfs)
16038 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
16039 else
16040 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
16041 dhd->pub.force_country_change = TRUE;
16042 return 0;
16043}
16044#endif /* CUSTOM_FORCE_NODFS_FLAG */
16045
16046#ifdef NDO_CONFIG_SUPPORT
16047int
16048dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
16049{
16050 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16051 dhd_pub_t *dhdp = &dhd->pub;
16052 int ret = 0;
16053
16054 if (enable) {
16055 /* enable ND offload feature (will be enabled in FW on suspend) */
16056 dhdp->ndo_enable = TRUE;
16057
16058 /* Update changes of anycast address & DAD failed address */
16059 ret = dhd_dev_ndo_update_inet6addr(dev);
16060 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
16061 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
16062 return ret;
16063 }
16064 } else {
16065 /* disable ND offload feature */
16066 dhdp->ndo_enable = FALSE;
16067
16068 /* disable ND offload in FW */
16069 ret = dhd_ndo_enable(dhdp, FALSE);
16070 if (ret < 0) {
16071 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
16072 }
16073 }
16074 return ret;
16075}
16076
16077static int
16078dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
16079{
16080 struct inet6_ifaddr *ifa;
16081 struct ifacaddr6 *acaddr = NULL;
16082 int addr_count = 0;
16083
16084 /* lock */
16085 read_lock_bh(&inet6->lock);
16086
16087 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
16088 /* Count valid unicast address */
16089 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
16090 GCC_DIAGNOSTIC_POP();
16091 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
16092 addr_count++;
16093 }
16094 }
16095
16096 /* Count anycast address */
16097 acaddr = inet6->ac_list;
16098 while (acaddr) {
16099 addr_count++;
16100 acaddr = acaddr->aca_next;
16101 }
16102
16103 /* unlock */
16104 read_unlock_bh(&inet6->lock);
16105
16106 return addr_count;
16107}
16108
16109int
16110dhd_dev_ndo_update_inet6addr(struct net_device *dev)
16111{
16112 dhd_info_t *dhd;
16113 dhd_pub_t *dhdp;
16114 struct inet6_dev *inet6;
16115 struct inet6_ifaddr *ifa;
16116 struct ifacaddr6 *acaddr = NULL;
16117 struct in6_addr *ipv6_addr = NULL;
16118 int cnt, i;
16119 int ret = BCME_OK;
16120
16121 /*
16122 * this function evaulates host ip address in struct inet6_dev
16123 * unicast addr in inet6_dev->addr_list
16124 * anycast addr in inet6_dev->ac_list
16125 * while evaluating inet6_dev, read_lock_bh() is required to prevent
16126 * access on null(freed) pointer.
16127 */
16128
16129 if (dev) {
16130 inet6 = dev->ip6_ptr;
16131 if (!inet6) {
16132 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
16133 return BCME_ERROR;
16134 }
16135
16136 dhd = DHD_DEV_INFO(dev);
16137 if (!dhd) {
16138 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
16139 return BCME_ERROR;
16140 }
16141 dhdp = &dhd->pub;
16142
16143 if (dhd_net2idx(dhd, dev) != 0) {
16144 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
16145 return BCME_ERROR;
16146 }
16147 } else {
16148 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
16149 return BCME_ERROR;
16150 }
16151
16152 /* Check host IP overflow */
16153 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
16154 if (cnt > dhdp->ndo_max_host_ip) {
16155 if (!dhdp->ndo_host_ip_overflow) {
16156 dhdp->ndo_host_ip_overflow = TRUE;
16157 /* Disable ND offload in FW */
16158 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
16159 ret = dhd_ndo_enable(dhdp, FALSE);
16160 }
16161
16162 return ret;
16163 }
16164
16165 /*
16166 * Allocate ipv6 addr buffer to store addresses to be added/removed.
16167 * driver need to lock inet6_dev while accessing structure. but, driver
16168 * cannot use ioctl while inet6_dev locked since it requires scheduling
16169 * hence, copy addresses to the buffer and do ioctl after unlock.
16170 */
16171 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
16172 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
16173 if (!ipv6_addr) {
16174 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
16175 return BCME_NOMEM;
16176 }
16177
16178 /* Find DAD failed unicast address to be removed */
16179 cnt = 0;
16180 read_lock_bh(&inet6->lock);
16181 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
16182 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
16183 GCC_DIAGNOSTIC_POP();
16184 /* DAD failed unicast address */
16185 if ((ifa->flags & IFA_F_DADFAILED) &&
16186 (cnt < dhdp->ndo_max_host_ip)) {
16187 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
16188 cnt++;
16189 }
16190 }
16191 read_unlock_bh(&inet6->lock);
16192
16193 /* Remove DAD failed unicast address */
16194 for (i = 0; i < cnt; i++) {
16195 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
16196 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
16197 if (ret < 0) {
16198 goto done;
16199 }
16200 }
16201
16202 /* Remove all anycast address */
16203 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
16204 if (ret < 0) {
16205 goto done;
16206 }
16207
16208 /*
16209 * if ND offload was disabled due to host ip overflow,
16210 * attempt to add valid unicast address.
16211 */
16212 if (dhdp->ndo_host_ip_overflow) {
16213 /* Find valid unicast address */
16214 cnt = 0;
16215 read_lock_bh(&inet6->lock);
16216 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
16217 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
16218 GCC_DIAGNOSTIC_POP();
16219 /* valid unicast address */
16220 if (!(ifa->flags & IFA_F_DADFAILED) &&
16221 (cnt < dhdp->ndo_max_host_ip)) {
16222 memcpy(&ipv6_addr[cnt], &ifa->addr,
16223 sizeof(struct in6_addr));
16224 cnt++;
16225 }
16226 }
16227 read_unlock_bh(&inet6->lock);
16228
16229 /* Add valid unicast address */
16230 for (i = 0; i < cnt; i++) {
16231 ret = dhd_ndo_add_ip_with_type(dhdp,
16232 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
16233 if (ret < 0) {
16234 goto done;
16235 }
16236 }
16237 }
16238
16239 /* Find anycast address */
16240 cnt = 0;
16241 read_lock_bh(&inet6->lock);
16242 acaddr = inet6->ac_list;
16243 while (acaddr) {
16244 if (cnt < dhdp->ndo_max_host_ip) {
16245 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
16246 cnt++;
16247 }
16248 acaddr = acaddr->aca_next;
16249 }
16250 read_unlock_bh(&inet6->lock);
16251
16252 /* Add anycast address */
16253 for (i = 0; i < cnt; i++) {
16254 ret = dhd_ndo_add_ip_with_type(dhdp,
16255 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
16256 if (ret < 0) {
16257 goto done;
16258 }
16259 }
16260
16261 /* Now All host IP addr were added successfully */
16262 if (dhdp->ndo_host_ip_overflow) {
16263 dhdp->ndo_host_ip_overflow = FALSE;
16264 if (dhdp->in_suspend) {
16265 /* drvier is in (early) suspend state, need to enable ND offload in FW */
16266 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
16267 ret = dhd_ndo_enable(dhdp, TRUE);
16268 }
16269 }
16270
16271done:
16272 if (ipv6_addr) {
16273 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
16274 }
16275
16276 return ret;
16277}
16278
16279#endif /* NDO_CONFIG_SUPPORT */
16280
16281#ifdef PNO_SUPPORT
16282/* Linux wrapper to call common dhd_pno_stop_for_ssid */
16283int
16284dhd_dev_pno_stop_for_ssid(struct net_device *dev)
16285{
16286 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16287
16288 return (dhd_pno_stop_for_ssid(&dhd->pub));
16289}
16290
16291/* Linux wrapper to call common dhd_pno_set_for_ssid */
16292int
16293dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
16294 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
16295{
16296 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16297
16298 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
16299 pno_repeat, pno_freq_expo_max, channel_list, nchan));
16300}
16301
16302/* Linux wrapper to call common dhd_pno_enable */
16303int
16304dhd_dev_pno_enable(struct net_device *dev, int enable)
16305{
16306 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16307
16308 return (dhd_pno_enable(&dhd->pub, enable));
16309}
16310
16311/* Linux wrapper to call common dhd_pno_set_for_hotlist */
16312int
16313dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
16314 struct dhd_pno_hotlist_params *hotlist_params)
16315{
16316 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16317 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
16318}
16319/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
16320int
16321dhd_dev_pno_stop_for_batch(struct net_device *dev)
16322{
16323 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16324 return (dhd_pno_stop_for_batch(&dhd->pub));
16325}
16326
16327/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
16328int
16329dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
16330{
16331 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16332 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
16333}
16334
16335/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
16336int
16337dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
16338{
16339 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16340 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
16341}
16342#endif /* PNO_SUPPORT */
16343
16344#if defined(PNO_SUPPORT)
16345#ifdef GSCAN_SUPPORT
16346bool
16347dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
16348{
16349 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16350
16351 return (dhd_is_legacy_pno_enabled(&dhd->pub));
16352}
16353
16354int
16355dhd_dev_set_epno(struct net_device *dev)
16356{
16357 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16358 if (!dhd) {
16359 return BCME_ERROR;
16360 }
16361 return dhd_pno_set_epno(&dhd->pub);
16362}
16363int
16364dhd_dev_flush_fw_epno(struct net_device *dev)
16365{
16366 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16367 if (!dhd) {
16368 return BCME_ERROR;
16369 }
16370 return dhd_pno_flush_fw_epno(&dhd->pub);
16371}
16372
16373/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
16374int
16375dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
16376 void *buf, bool flush)
16377{
16378 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16379
16380 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
16381}
16382
16383/* Linux wrapper to call common dhd_pno_get_gscan */
16384void *
16385dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
16386 void *info, uint32 *len)
16387{
16388 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16389
16390 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
16391}
16392
16393/* Linux wrapper to call common dhd_wait_batch_results_complete */
16394int
16395dhd_dev_wait_batch_results_complete(struct net_device *dev)
16396{
16397 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16398
16399 return (dhd_wait_batch_results_complete(&dhd->pub));
16400}
16401
16402/* Linux wrapper to call common dhd_pno_lock_batch_results */
16403int
16404dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
16405{
16406 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16407
16408 return (dhd_pno_lock_batch_results(&dhd->pub));
16409}
16410/* Linux wrapper to call common dhd_pno_unlock_batch_results */
16411void
16412dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
16413{
16414 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16415
16416 return (dhd_pno_unlock_batch_results(&dhd->pub));
16417}
16418
16419/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
16420int
16421dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
16422{
16423 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16424
16425 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
16426}
16427
16428/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
16429int
16430dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
16431{
16432 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16433
16434 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
16435}
16436
16437/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
16438void *
16439dhd_dev_hotlist_scan_event(struct net_device *dev,
16440 const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
16441{
16442 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16443
16444 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
16445}
16446
16447/* Linux wrapper to call common dhd_process_full_gscan_result */
16448void *
16449dhd_dev_process_full_gscan_result(struct net_device *dev,
16450const void *data, uint32 len, int *send_evt_bytes)
16451{
16452 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16453
16454 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
16455}
16456
16457void
16458dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
16459{
16460 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16461
16462 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
16463
16464 return;
16465}
16466
16467int
16468dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
16469{
16470 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16471
16472 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
16473}
16474
16475/* Linux wrapper to call common dhd_retreive_batch_scan_results */
16476int
16477dhd_dev_retrieve_batch_scan(struct net_device *dev)
16478{
16479 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16480
16481 return (dhd_retreive_batch_scan_results(&dhd->pub));
16482}
16483
16484/* Linux wrapper to call common dhd_pno_process_epno_result */
16485void * dhd_dev_process_epno_result(struct net_device *dev,
16486 const void *data, uint32 event, int *send_evt_bytes)
16487{
16488 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16489
16490 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
16491}
16492
16493int
16494dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
16495 wlc_roam_exp_params_t *roam_param)
16496{
16497 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16498 wl_roam_exp_cfg_t roam_exp_cfg;
16499 int err;
16500
16501 if (!roam_param) {
16502 return BCME_BADARG;
16503 }
16504
16505 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
16506 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
16507 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
16508 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
16509 roam_param->cur_bssid_boost));
16510 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
16511 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
16512
16513 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
16514 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
16515 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
16516 if (dhd->pub.lazy_roam_enable) {
16517 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
16518 }
16519 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
16520 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
16521 TRUE);
16522 if (err < 0) {
16523 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
16524 }
16525 return err;
16526}
16527
16528int
16529dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
16530{
16531 int err;
16532 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16533 wl_roam_exp_cfg_t roam_exp_cfg;
16534
16535 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
16536 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
16537 if (enable) {
16538 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
16539 }
16540
16541 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
16542 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
16543 TRUE);
16544 if (err < 0) {
16545 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
16546 } else {
16547 dhd->pub.lazy_roam_enable = (enable != 0);
16548 }
16549 return err;
16550}
16551
16552int
16553dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
16554 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
16555{
16556 int err;
16557 uint len;
16558 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16559
16560 bssid_pref->version = BSSID_PREF_LIST_VERSION;
16561 /* By default programming bssid pref flushes out old values */
16562 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
16563 len = sizeof(wl_bssid_pref_cfg_t);
16564 if (bssid_pref->count) {
16565 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
16566 }
16567 err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
16568 (char *)bssid_pref, len, NULL, 0, TRUE);
16569 if (err != BCME_OK) {
16570 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
16571 }
16572 return err;
16573}
16574#endif /* GSCAN_SUPPORT */
16575
16576#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
16577int
16578dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
16579 uint32 len, uint32 flush)
16580{
16581 int err;
16582 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16583 int macmode;
16584
16585 if (blacklist) {
16586 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
16587 len, TRUE, 0);
16588 if (err != BCME_OK) {
16589 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
16590 return err;
16591 }
16592 }
16593 /* By default programming blacklist flushes out old values */
16594 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
16595 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
16596 sizeof(macmode), TRUE, 0);
16597 if (err != BCME_OK) {
16598 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
16599 }
16600 return err;
16601}
16602
16603int
16604dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
16605 uint32 len, uint32 flush)
16606{
16607 int err;
16608 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16609 wl_ssid_whitelist_t whitelist_ssid_flush;
16610
16611 if (!ssid_whitelist) {
16612 if (flush) {
16613 ssid_whitelist = &whitelist_ssid_flush;
16614 ssid_whitelist->ssid_count = 0;
16615 } else {
16616 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
16617 return BCME_BADARG;
16618 }
16619 }
16620 ssid_whitelist->version = SSID_WHITELIST_VERSION;
16621 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
16622 err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
16623 0, TRUE);
16624 if (err != BCME_OK) {
16625 if (err == BCME_UNSUPPORTED) {
16626 DHD_ERROR(("%s : roam_exp_bssid_pref, UNSUPPORTED \n", __FUNCTION__));
16627 } else {
16628 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n",
16629 __FUNCTION__, err));
16630 }
16631 }
16632 return err;
16633}
16634#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
16635#endif
16636
16637#ifdef RSSI_MONITOR_SUPPORT
16638int
16639dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
16640 int8 max_rssi, int8 min_rssi)
16641{
16642 int err;
16643 wl_rssi_monitor_cfg_t rssi_monitor;
16644 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16645
16646 rssi_monitor.version = RSSI_MONITOR_VERSION;
16647 rssi_monitor.max_rssi = max_rssi;
16648 rssi_monitor.min_rssi = min_rssi;
16649 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
16650 err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
16651 NULL, 0, TRUE);
16652 if (err < 0 && err != BCME_UNSUPPORTED) {
16653 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
16654 }
16655 return err;
16656}
16657#endif /* RSSI_MONITOR_SUPPORT */
16658
16659#ifdef DHDTCPACK_SUPPRESS
16660int
16661dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
16662{
16663 int err;
16664 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16665
16666 err = dhd_tcpack_suppress_set(&dhd->pub, enable);
16667 if (err != BCME_OK) {
16668 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
16669 }
16670 return err;
16671}
16672#endif /* DHDTCPACK_SUPPRESS */
16673
16674int
16675dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
16676{
16677 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16678 dhd_pub_t *dhdp = &dhd->pub;
16679
16680 if (!dhdp || !oui) {
16681 DHD_ERROR(("NULL POINTER : %s\n",
16682 __FUNCTION__));
16683 return BCME_ERROR;
16684 }
16685 if (ETHER_ISMULTI(oui)) {
16686 DHD_ERROR(("Expected unicast OUI\n"));
16687 return BCME_ERROR;
16688 } else {
16689 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
16690 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
16691 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
16692 MACOUI2STRDBG(rand_mac_oui)));
16693 }
16694 return BCME_OK;
16695}
16696
16697int
16698dhd_set_rand_mac_oui(dhd_pub_t *dhd)
16699{
16700 int err;
16701 wl_pfn_macaddr_cfg_t wl_cfg;
16702 uint8 *rand_mac_oui = dhd->rand_mac_oui;
16703
16704 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
16705 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
16706 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
16707 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
16708 wl_cfg.flags = 0;
16709 } else {
16710 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
16711 }
16712
16713 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
16714 MACOUI2STRDBG(rand_mac_oui)));
16715
16716 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
16717 if (err < 0) {
16718 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
16719 }
16720 return err;
16721}
16722
16723#if defined(RTT_SUPPORT) && defined(WL_CFG80211)
16724/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
16725int
16726dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
16727{
16728 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16729
16730 return (dhd_rtt_set_cfg(&dhd->pub, buf));
16731}
16732
16733int
16734dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
16735{
16736 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16737
16738 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
16739}
16740
16741int
16742dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
16743{
16744 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16745
16746 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
16747}
16748
16749int
16750dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
16751{
16752 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16753
16754 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
16755}
16756
16757int
16758dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
16759{
16760 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16761
16762 return (dhd_rtt_capability(&dhd->pub, capa));
16763}
16764
16765int
16766dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
16767{
16768 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16769 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
16770}
16771
16772int
16773dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
16774{
16775 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16776 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
16777}
16778
16779int dhd_dev_rtt_cancel_responder(struct net_device *dev)
16780{
16781 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
16782 return (dhd_rtt_cancel_responder(&dhd->pub));
16783}
16784
16785#endif /* RTT_SUPPORT */
16786#if defined(PKT_FILTER_SUPPORT) && defined(APF)
16787static void _dhd_apf_lock_local(dhd_info_t *dhd)
16788{
16789 if (dhd) {
16790 mutex_lock(&dhd->dhd_apf_mutex);
16791 }
16792}
16793
16794static void _dhd_apf_unlock_local(dhd_info_t *dhd)
16795{
16796 if (dhd) {
16797 mutex_unlock(&dhd->dhd_apf_mutex);
16798 }
16799}
16800
16801static int
16802__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
16803 u8* program, uint32 program_len)
16804{
16805 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16806 dhd_pub_t *dhdp = &dhd->pub;
16807 wl_pkt_filter_t * pkt_filterp;
16808 wl_apf_program_t *apf_program;
16809 char *buf;
16810 u32 cmd_len, buf_len;
16811 int ifidx, ret;
16812 char cmd[] = "pkt_filter_add";
16813
16814 ifidx = dhd_net2idx(dhd, ndev);
16815 if (ifidx == DHD_BAD_IF) {
16816 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
16817 return -ENODEV;
16818 }
16819
16820 cmd_len = sizeof(cmd);
16821
16822 /* Check if the program_len is more than the expected len
16823 * and if the program is NULL return from here.
16824 */
16825 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
16826 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
16827 __FUNCTION__, program_len, program));
16828 return -EINVAL;
16829 }
16830 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
16831 WL_APF_PROGRAM_FIXED_LEN + program_len;
16832
16833 buf = MALLOCZ(dhdp->osh, buf_len);
16834 if (unlikely(!buf)) {
16835 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
16836 return -ENOMEM;
16837 }
16838
16839 memcpy(buf, cmd, cmd_len);
16840
16841 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
16842 pkt_filterp->id = htod32(filter_id);
16843 pkt_filterp->negate_match = htod32(FALSE);
16844 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
16845
16846 apf_program = &pkt_filterp->u.apf_program;
16847 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
16848 apf_program->instr_len = htod16(program_len);
16849 memcpy(apf_program->instrs, program, program_len);
16850
16851 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
16852 if (unlikely(ret)) {
16853 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
16854 __FUNCTION__, filter_id, ret));
16855 }
16856
16857 if (buf) {
16858 MFREE(dhdp->osh, buf, buf_len);
16859 }
16860 return ret;
16861}
16862
16863static int
16864__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
16865 uint32 mode, uint32 enable)
16866{
16867 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16868 dhd_pub_t *dhdp = &dhd->pub;
16869 wl_pkt_filter_enable_t * pkt_filterp;
16870 char *buf;
16871 u32 cmd_len, buf_len;
16872 int ifidx, ret;
16873 char cmd[] = "pkt_filter_enable";
16874
16875 ifidx = dhd_net2idx(dhd, ndev);
16876 if (ifidx == DHD_BAD_IF) {
16877 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
16878 return -ENODEV;
16879 }
16880
16881 cmd_len = sizeof(cmd);
16882 buf_len = cmd_len + sizeof(*pkt_filterp);
16883
16884 buf = MALLOCZ(dhdp->osh, buf_len);
16885 if (unlikely(!buf)) {
16886 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
16887 return -ENOMEM;
16888 }
16889
16890 memcpy(buf, cmd, cmd_len);
16891
16892 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
16893 pkt_filterp->id = htod32(filter_id);
16894 pkt_filterp->enable = htod32(enable);
16895
16896 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
16897 if (unlikely(ret)) {
16898 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
16899 __FUNCTION__, filter_id, ret));
16900 goto exit;
16901 }
16902
16903 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
16904 WLC_SET_VAR, TRUE, ifidx);
16905 if (unlikely(ret)) {
16906 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
16907 __FUNCTION__, filter_id, ret));
16908 }
16909
16910exit:
16911 if (buf) {
16912 MFREE(dhdp->osh, buf, buf_len);
16913 }
16914 return ret;
16915}
16916
16917static int
16918__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
16919{
16920 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
16921 dhd_pub_t *dhdp = &dhd->pub;
16922 int ifidx, ret;
16923
16924 ifidx = dhd_net2idx(dhd, ndev);
16925 if (ifidx == DHD_BAD_IF) {
16926 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
16927 return -ENODEV;
16928 }
16929
16930 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
16931 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
16932 if (unlikely(ret)) {
16933 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
16934 __FUNCTION__, filter_id, ret));
16935 }
16936
16937 return ret;
16938}
16939
16940void dhd_apf_lock(struct net_device *dev)
16941{
16942 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16943 _dhd_apf_lock_local(dhd);
16944}
16945
16946void dhd_apf_unlock(struct net_device *dev)
16947{
16948 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16949 _dhd_apf_unlock_local(dhd);
16950}
16951
16952int
16953dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
16954{
16955 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16956 dhd_pub_t *dhdp = &dhd->pub;
16957 int ifidx, ret;
16958
16959 if (!FW_SUPPORTED(dhdp, apf)) {
16960 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
16961
16962 /*
16963 * Notify Android framework that APF is not supported by setting
16964 * version as zero.
16965 */
16966 *version = 0;
16967 return BCME_OK;
16968 }
16969
16970 ifidx = dhd_net2idx(dhd, ndev);
16971 if (ifidx == DHD_BAD_IF) {
16972 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
16973 return -ENODEV;
16974 }
16975
16976 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
16977 WLC_GET_VAR, FALSE, ifidx);
16978 if (unlikely(ret)) {
16979 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
16980 __FUNCTION__, ret));
16981 }
16982
16983 return ret;
16984}
16985
16986int
16987dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
16988{
16989 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
16990 dhd_pub_t *dhdp = &dhd->pub;
16991 int ifidx, ret;
16992
16993 if (!FW_SUPPORTED(dhdp, apf)) {
16994 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
16995 *max_len = 0;
16996 return BCME_OK;
16997 }
16998
16999 ifidx = dhd_net2idx(dhd, ndev);
17000 if (ifidx == DHD_BAD_IF) {
17001 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
17002 return -ENODEV;
17003 }
17004
17005 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
17006 WLC_GET_VAR, FALSE, ifidx);
17007 if (unlikely(ret)) {
17008 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
17009 __FUNCTION__, ret));
17010 }
17011
17012 return ret;
17013}
17014
17015int
17016dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
17017 uint32 program_len)
17018{
17019 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
17020 dhd_pub_t *dhdp = &dhd->pub;
17021 int ret;
17022
17023 DHD_APF_LOCK(ndev);
17024
17025 /* delete, if filter already exists */
17026 if (dhdp->apf_set) {
17027 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
17028 if (unlikely(ret)) {
17029 goto exit;
17030 }
17031 dhdp->apf_set = FALSE;
17032 }
17033
17034 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
17035 if (ret) {
17036 goto exit;
17037 }
17038 dhdp->apf_set = TRUE;
17039
17040 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
17041 /* Driver is still in (early) suspend state, enable APF filter back */
17042 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
17043 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
17044 }
17045exit:
17046 DHD_APF_UNLOCK(ndev);
17047
17048 return ret;
17049}
17050
17051int
17052dhd_dev_apf_enable_filter(struct net_device *ndev)
17053{
17054 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
17055 dhd_pub_t *dhdp = &dhd->pub;
17056 int ret = 0;
17057 bool nan_dp_active = false;
17058
17059 DHD_APF_LOCK(ndev);
17060#ifdef WL_NAN
17061 nan_dp_active = wl_cfgnan_is_dp_active(ndev);
17062#endif /* WL_NAN */
17063 if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
17064 !nan_dp_active)) {
17065 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
17066 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
17067 }
17068
17069 DHD_APF_UNLOCK(ndev);
17070
17071 return ret;
17072}
17073
17074int
17075dhd_dev_apf_disable_filter(struct net_device *ndev)
17076{
17077 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
17078 dhd_pub_t *dhdp = &dhd->pub;
17079 int ret = 0;
17080
17081 DHD_APF_LOCK(ndev);
17082
17083 if (dhdp->apf_set) {
17084 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
17085 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
17086 }
17087
17088 DHD_APF_UNLOCK(ndev);
17089
17090 return ret;
17091}
17092
17093int
17094dhd_dev_apf_delete_filter(struct net_device *ndev)
17095{
17096 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
17097 dhd_pub_t *dhdp = &dhd->pub;
17098 int ret = 0;
17099
17100 DHD_APF_LOCK(ndev);
17101
17102 if (dhdp->apf_set) {
17103 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
17104 if (!ret) {
17105 dhdp->apf_set = FALSE;
17106 }
17107 }
17108
17109 DHD_APF_UNLOCK(ndev);
17110
17111 return ret;
17112}
17113#endif /* PKT_FILTER_SUPPORT && APF */
17114
17115static void dhd_hang_process(struct work_struct *work_data)
17116{
17117 struct net_device *dev;
17118#ifdef IFACE_HANG_FORCE_DEV_CLOSE
17119 struct net_device *ndev;
17120 uint8 i = 0;
17121#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
17122 struct dhd_info *dhd;
17123 /* Ignore compiler warnings due to -Werror=cast-qual */
17124 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
17125 dhd = container_of(work_data, dhd_info_t, dhd_hang_process_work);
17126 GCC_DIAGNOSTIC_POP();
17127
17128 if (!dhd || !dhd->iflist[0])
17129 return;
17130 dev = dhd->iflist[0]->net;
17131
17132 if (dev) {
17133#if defined(WL_WIRELESS_EXT)
17134 wl_iw_send_priv_event(dev, "HANG");
17135#endif
17136#if defined(WL_CFG80211)
17137 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
17138#endif
17139 }
17140#ifdef IFACE_HANG_FORCE_DEV_CLOSE
17141 /*
17142 * For HW2, dev_close need to be done to recover
17143 * from upper layer after hang. For Interposer skip
17144 * dev_close so that dhd iovars can be used to take
17145 * socramdump after crash, also skip for HW4 as
17146 * handling of hang event is different
17147 */
17148
17149 rtnl_lock();
17150 for (i = 0; i < DHD_MAX_IFS; i++) {
17151 ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
17152 if (ndev && (ndev->flags & IFF_UP)) {
17153 DHD_ERROR(("ndev->name : %s dev close\n",
17154 ndev->name));
17155 dev_close(ndev);
17156 }
17157 }
17158 rtnl_unlock();
17159#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
17160}
17161
17162#ifdef CONFIG_ARCH_EXYNOS
17163extern dhd_pub_t *link_recovery;
17164void dhd_host_recover_link(void)
17165{
17166 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
17167 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
17168 dhd_bus_set_linkdown(link_recovery, TRUE);
17169 dhd_os_send_hang_message(link_recovery);
17170}
17171EXPORT_SYMBOL(dhd_host_recover_link);
17172#endif /* CONFIG_ARCH_EXYNOS */
17173
17174#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
17175#define MAX_CONSECUTIVE_MFG_HANG_COUNT 2
17176#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
17177int dhd_os_send_hang_message(dhd_pub_t *dhdp)
17178{
17179 int ret = 0;
17180 dhd_info_t *dhd_info = NULL;
17181#ifdef WL_CFG80211
17182 struct net_device *primary_ndev;
17183 struct bcm_cfg80211 *cfg;
17184#endif /* WL_CFG80211 */
17185
17186 if (!dhdp) {
17187 DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
17188 return -EINVAL;
17189 }
17190
17191 if (!dhdp->hang_report) {
17192 DHD_ERROR(("%s: hang_report is disabled\n", __FUNCTION__));
17193 return BCME_ERROR;
17194 }
17195
17196 dhd_info = (dhd_info_t *)dhdp->info;
17197 BCM_REFERENCE(dhd_info);
17198
17199#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
17200 if (dhd_info->scheduled_memdump) {
17201 DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
17202 dhdp->hang_was_pending = 1;
17203 return BCME_OK;
17204 }
17205#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
17206
17207#ifdef WL_CFG80211
17208 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
17209 if (!primary_ndev) {
17210 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
17211 return -ENODEV;
17212 }
17213 cfg = wl_get_cfg(primary_ndev);
17214 if (!cfg) {
17215 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
17216 return -EINVAL;
17217 }
17218
17219 /* Skip sending HANG event to framework if driver is not ready */
17220 if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
17221 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
17222 return -ENODEV;
17223 }
17224#endif /* WL_CFG80211 */
17225
17226#if defined(DHD_HANG_SEND_UP_TEST)
17227 if (dhdp->req_hang_type) {
17228 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
17229 __FUNCTION__, dhdp->req_hang_type));
17230 dhdp->req_hang_type = 0;
17231 }
17232#endif /* DHD_HANG_SEND_UP_TEST */
17233
17234 if (!dhdp->hang_was_sent) {
17235#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
17236 if (dhdp->op_mode & DHD_FLAG_MFG_MODE) {
17237 dhdp->hang_count++;
17238 if (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT) {
17239 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
17240 __FUNCTION__, dhdp->hang_count));
17241 BUG_ON(1);
17242 }
17243 }
17244#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
17245#ifdef DHD_DEBUG_UART
17246 /* If PCIe lane has broken, execute the debug uart application
17247 * to gether a ramdump data from dongle via uart
17248 */
17249 if (!dhdp->info->duart_execute) {
17250 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
17251 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
17252 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
17253 }
17254#endif /* DHD_DEBUG_UART */
17255 dhdp->hang_was_sent = 1;
17256#ifdef BT_OVER_SDIO
17257 dhdp->is_bt_recovery_required = TRUE;
17258#endif
17259 schedule_work(&dhdp->info->dhd_hang_process_work);
17260
17261#if defined(WLAN_ACCEL_BOOT)
17262 DHD_ERROR(("%s: hang event sent, set force reg on", __FUNCTION__));
17263 dhd_info->wl_accel_force_reg_on = TRUE;
17264#endif /* WLAN_ACCEL_BOOT */
17265 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
17266 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
17267 }
17268 return ret;
17269}
17270
17271int net_os_send_hang_message(struct net_device *dev)
17272{
17273 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17274 int ret = 0;
17275
17276 if (dhd) {
17277 /* Report FW problem when enabled */
17278 if (dhd->pub.hang_report) {
17279#ifdef BT_OVER_SDIO
17280 if (netif_running(dev)) {
17281#endif /* BT_OVER_SDIO */
17282 ret = dhd_os_send_hang_message(&dhd->pub);
17283#ifdef BT_OVER_SDIO
17284 }
17285 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
17286 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
17287#endif /* BT_OVER_SDIO */
17288 } else {
17289 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
17290 __FUNCTION__));
17291 }
17292 }
17293 return ret;
17294}
17295
17296int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
17297{
17298 dhd_info_t *dhd = NULL;
17299 dhd_pub_t *dhdp = NULL;
17300 int reason;
17301
17302 dhd = DHD_DEV_INFO(dev);
17303 if (dhd) {
17304 dhdp = &dhd->pub;
17305 }
17306
17307 if (!dhd || !dhdp) {
17308 return 0;
17309 }
17310
17311 reason = bcm_strtoul(string_num, NULL, 0);
17312 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
17313
17314 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
17315 reason = 0;
17316 }
17317
17318 dhdp->hang_reason = reason;
17319
17320 return net_os_send_hang_message(dev);
17321}
17322
17323int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
17324{
17325 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17326 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
17327}
17328
17329int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on)
17330{
17331 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17332 unsigned long delay_msec = on ? WIFI_TURNON_DELAY : WIFI_TURNOFF_DELAY;
17333 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
17334}
17335
17336bool dhd_force_country_change(struct net_device *dev)
17337{
17338 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17339
17340 if (dhd && dhd->pub.up)
17341 return dhd->pub.force_country_change;
17342 return FALSE;
17343}
17344
17345void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
17346 wl_country_t *cspec)
17347{
17348 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17349#if defined(DHD_BLOB_EXISTENCE_CHECK)
17350 if (!dhd->pub.is_blob)
17351#endif /* DHD_BLOB_EXISTENCE_CHECK */
17352 {
17353#if defined(CUSTOM_COUNTRY_CODE)
17354 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
17355 dhd->pub.dhd_cflags);
17356#else
17357 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
17358#endif /* CUSTOM_COUNTRY_CODE */
17359 }
17360#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
17361 else {
17362 /* Replace the ccode to XZ if ccode is undefined country */
17363 if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
17364 strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
17365 strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
17366 strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
17367 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
17368 }
17369 }
17370#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
17371
17372#ifdef KEEP_JP_REGREV
17373/* XXX Needed by customer's request */
17374 if (strncmp(country_iso_code, "JP", 3) == 0) {
17375#if defined(DHD_BLOB_EXISTENCE_CHECK)
17376 if (dhd->pub.is_blob) {
17377 if (strncmp(dhd->pub.vars_ccode, "J1", 3) == 0) {
17378 memcpy(cspec->ccode, dhd->pub.vars_ccode,
17379 sizeof(dhd->pub.vars_ccode));
17380 }
17381 } else
17382#endif /* DHD_BLOB_EXISTENCE_CHECK */
17383 {
17384 if (strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) {
17385 cspec->rev = dhd->pub.vars_regrev;
17386 }
17387 }
17388 }
17389#endif /* KEEP_JP_REGREV */
17390 BCM_REFERENCE(dhd);
17391}
17392
17393void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
17394{
17395 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17396#ifdef WL_CFG80211
17397 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
17398#endif
17399
17400 if (dhd && dhd->pub.up) {
17401 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
17402#ifdef WL_CFG80211
17403 wl_update_wiphybands(cfg, notify);
17404#endif
17405 }
17406}
17407
17408void dhd_bus_band_set(struct net_device *dev, uint band)
17409{
17410 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17411#ifdef WL_CFG80211
17412 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
17413#endif
17414 if (dhd && dhd->pub.up) {
17415#ifdef WL_CFG80211
17416 wl_update_wiphybands(cfg, true);
17417#endif
17418 }
17419}
17420
17421int dhd_net_set_fw_path(struct net_device *dev, char *fw)
17422{
17423 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17424
17425 if (!fw || fw[0] == '\0')
17426 return -EINVAL;
17427
17428 strlcpy(dhd->fw_path, fw, sizeof(dhd->fw_path));
17429
17430#if defined(SOFTAP)
17431 if (strstr(fw, "apsta") != NULL) {
17432 DHD_INFO(("GOT APSTA FIRMWARE\n"));
17433 ap_fw_loaded = TRUE;
17434 } else {
17435 DHD_INFO(("GOT STA FIRMWARE\n"));
17436 ap_fw_loaded = FALSE;
17437 }
17438#endif
17439 return 0;
17440}
17441
17442void dhd_net_if_lock(struct net_device *dev)
17443{
17444 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17445 dhd_net_if_lock_local(dhd);
17446}
17447
17448void dhd_net_if_unlock(struct net_device *dev)
17449{
17450 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17451 dhd_net_if_unlock_local(dhd);
17452}
17453
17454static void dhd_net_if_lock_local(dhd_info_t *dhd)
17455{
17456 if (dhd)
17457 mutex_lock(&dhd->dhd_net_if_mutex);
17458}
17459
17460static void dhd_net_if_unlock_local(dhd_info_t *dhd)
17461{
17462 if (dhd)
17463 mutex_unlock(&dhd->dhd_net_if_mutex);
17464}
17465
17466static void dhd_suspend_lock(dhd_pub_t *pub)
17467{
17468 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17469 if (dhd)
17470 mutex_lock(&dhd->dhd_suspend_mutex);
17471}
17472
17473static void dhd_suspend_unlock(dhd_pub_t *pub)
17474{
17475 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17476 if (dhd)
17477 mutex_unlock(&dhd->dhd_suspend_mutex);
17478}
17479
17480unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
17481{
17482 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17483 unsigned long flags = 0;
17484
17485 if (dhd) {
17486 flags = osl_spin_lock(&dhd->dhd_lock);
17487 }
17488
17489 return flags;
17490}
17491
17492void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
17493{
17494 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17495
17496 if (dhd) {
17497 osl_spin_unlock(&dhd->dhd_lock, flags);
17498 }
17499}
17500
17501void *
17502dhd_os_dbgring_lock_init(osl_t *osh)
17503{
17504 struct mutex *mtx = NULL;
17505
17506 mtx = MALLOCZ(osh, sizeof(*mtx));
17507 if (mtx)
17508 mutex_init(mtx);
17509
17510 return mtx;
17511}
17512
17513void
17514dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
17515{
17516 if (mtx) {
17517 mutex_destroy(mtx);
17518 MFREE(osh, mtx, sizeof(struct mutex));
17519 }
17520}
17521
17522static int
17523dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
17524{
17525 return (atomic_read(&dhd->pend_8021x_cnt));
17526}
17527
17528#define MAX_WAIT_FOR_8021X_TX 100
17529
17530int
17531dhd_wait_pend8021x(struct net_device *dev)
17532{
17533 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17534 int timeout = msecs_to_jiffies(10);
17535 int ntimes = MAX_WAIT_FOR_8021X_TX;
17536 int pend = dhd_get_pend_8021x_cnt(dhd);
17537
17538 while (ntimes && pend) {
17539 if (pend) {
17540 set_current_state(TASK_INTERRUPTIBLE);
17541 schedule_timeout(timeout);
17542 set_current_state(TASK_RUNNING);
17543 ntimes--;
17544 }
17545 pend = dhd_get_pend_8021x_cnt(dhd);
17546 }
17547 if (ntimes == 0)
17548 {
17549 atomic_set(&dhd->pend_8021x_cnt, 0);
17550 WL_MSG(dev->name, "TIMEOUT\n");
17551 }
17552 return pend;
17553}
17554
17555#if defined(DHD_DEBUG)
17556int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
17557{
17558 int ret = 0;
17559 struct file *fp = NULL;
17560 mm_segment_t old_fs;
17561 loff_t pos = 0;
17562
17563 /* change to KERNEL_DS address limit */
17564 old_fs = get_fs();
17565 set_fs(KERNEL_DS);
17566
17567 /* open file to write */
17568 fp = filp_open(file_name, flags, 0664);
17569 if (IS_ERR(fp)) {
17570 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
17571 goto exit;
17572 }
17573
17574 /* Write buf to file */
17575 ret = vfs_write(fp, buf, size, &pos);
17576 if (ret < 0) {
17577 DHD_ERROR(("write file error, err = %d\n", ret));
17578 goto exit;
17579 }
17580
17581 /* Sync file from filesystem to physical media */
17582 ret = vfs_fsync(fp, 0);
17583 if (ret < 0) {
17584 DHD_ERROR(("sync file error, error = %d\n", ret));
17585 goto exit;
17586 }
17587 ret = BCME_OK;
17588
17589exit:
17590 /* close file before return */
17591 if (!IS_ERR(fp))
17592 filp_close(fp, current->files);
17593
17594 /* restore previous address limit */
17595 set_fs(old_fs);
17596
17597 return ret;
17598}
17599#endif
17600
17601#ifdef DHD_DEBUG
17602static void
17603dhd_convert_memdump_type_to_str(uint32 type, char *buf, size_t buf_len, int substr_type)
17604{
17605 char *type_str = NULL;
17606
17607 switch (type) {
17608 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
17609 type_str = "resumed_on_timeout";
17610 break;
17611 case DUMP_TYPE_D3_ACK_TIMEOUT:
17612 type_str = "D3_ACK_timeout";
17613 break;
17614 case DUMP_TYPE_DONGLE_TRAP:
17615 type_str = "Dongle_Trap";
17616 break;
17617 case DUMP_TYPE_MEMORY_CORRUPTION:
17618 type_str = "Memory_Corruption";
17619 break;
17620 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
17621 type_str = "PKTID_AUDIT_Fail";
17622 break;
17623 case DUMP_TYPE_PKTID_INVALID:
17624 type_str = "PKTID_INVALID";
17625 break;
17626 case DUMP_TYPE_SCAN_TIMEOUT:
17627 type_str = "SCAN_timeout";
17628 break;
17629 case DUMP_TYPE_SCAN_BUSY:
17630 type_str = "SCAN_Busy";
17631 break;
17632 case DUMP_TYPE_BY_SYSDUMP:
17633 if (substr_type == CMD_UNWANTED) {
17634 type_str = "BY_SYSDUMP_FORUSER_unwanted";
17635 } else if (substr_type == CMD_DISCONNECTED) {
17636 type_str = "BY_SYSDUMP_FORUSER_disconnected";
17637 } else {
17638 type_str = "BY_SYSDUMP_FORUSER";
17639 }
17640 break;
17641 case DUMP_TYPE_BY_LIVELOCK:
17642 type_str = "BY_LIVELOCK";
17643 break;
17644 case DUMP_TYPE_AP_LINKUP_FAILURE:
17645 type_str = "BY_AP_LINK_FAILURE";
17646 break;
17647 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
17648 type_str = "INVALID_ACCESS";
17649 break;
17650 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
17651 type_str = "ERROR_RX_TIMED_OUT";
17652 break;
17653 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
17654 type_str = "ERROR_TX_TIMED_OUT";
17655 break;
17656 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
17657 type_str = "CFG_VENDOR_TRIGGERED";
17658 break;
17659 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
17660 type_str = "BY_INVALID_RING_RDWR";
17661 break;
17662 case DUMP_TYPE_IFACE_OP_FAILURE:
17663 type_str = "BY_IFACE_OP_FAILURE";
17664 break;
17665 case DUMP_TYPE_TRANS_ID_MISMATCH:
17666 type_str = "BY_TRANS_ID_MISMATCH";
17667 break;
17668#ifdef DEBUG_DNGL_INIT_FAIL
17669 case DUMP_TYPE_DONGLE_INIT_FAILURE:
17670 type_str = "DONGLE_INIT_FAIL";
17671 break;
17672#endif /* DEBUG_DNGL_INIT_FAIL */
17673#ifdef SUPPORT_LINKDOWN_RECOVERY
17674 case DUMP_TYPE_READ_SHM_FAIL:
17675 type_str = "READ_SHM_FAIL";
17676 break;
17677#endif /* SUPPORT_LINKDOWN_RECOVERY */
17678 case DUMP_TYPE_DONGLE_HOST_EVENT:
17679 type_str = "BY_DONGLE_HOST_EVENT";
17680 break;
17681 case DUMP_TYPE_SMMU_FAULT:
17682 type_str = "SMMU_FAULT";
17683 break;
17684#ifdef DHD_ERPOM
17685 case DUMP_TYPE_DUE_TO_BT:
17686 type_str = "DUE_TO_BT";
17687 break;
17688#endif /* DHD_ERPOM */
17689 case DUMP_TYPE_BY_USER:
17690 type_str = "BY_USER";
17691 break;
17692 case DUMP_TYPE_LOGSET_BEYOND_RANGE:
17693 type_str = "LOGSET_BEYOND_RANGE";
17694 break;
17695 case DUMP_TYPE_CTO_RECOVERY:
17696 type_str = "CTO_RECOVERY";
17697 break;
17698 case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
17699 type_str = "SEQUENTIAL_PRIVCMD_ERROR";
17700 break;
17701 case DUMP_TYPE_PROXD_TIMEOUT:
17702 type_str = "PROXD_TIMEOUT";
17703 break;
17704 case DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE:
17705 type_str = "INBAND_DEVICE_WAKE_FAILURE";
17706 break;
17707 case DUMP_TYPE_PKTID_POOL_DEPLETED:
17708 type_str = "PKTID_POOL_DEPLETED";
17709 break;
17710 case DUMP_TYPE_ESCAN_SYNCID_MISMATCH:
17711 type_str = "ESCAN_SYNCID_MISMATCH";
17712 break;
17713 default:
17714 type_str = "Unknown_type";
17715 break;
17716 }
17717
17718 strlcpy(buf, type_str, buf_len);
17719}
17720
17721void
17722dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
17723{
17724 char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN];
17725 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
17726 dhd_pub_t *dhdp = &dhd->pub;
17727
17728 /* Init file name */
17729 memset(memdump_path, 0, len);
17730 memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN);
17731 dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN,
17732 dhdp->debug_dump_subcmd);
17733 clear_debug_dump_time(dhdp->debug_dump_time_str);
17734 get_debug_dump_time(dhdp->debug_dump_time_str);
17735 snprintf(memdump_path, len, "%s%s_%s_" "%s",
17736 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
17737
17738 if (strstr(fname, "sssr_dump")) {
17739 DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
17740 } else {
17741 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
17742 memdump_path, FILE_NAME_HAL_TAG));
17743 }
17744}
17745
17746int
17747write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
17748{
17749 int ret = 0;
17750 char memdump_path[DHD_MEMDUMP_PATH_STR_LEN];
17751 char memdump_type[DHD_MEMDUMP_TYPE_STR_LEN];
17752 uint32 file_mode;
17753
17754 /* Init file name */
17755 memset(memdump_path, 0, DHD_MEMDUMP_PATH_STR_LEN);
17756 memset(memdump_type, 0, DHD_MEMDUMP_TYPE_STR_LEN);
17757 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, DHD_MEMDUMP_TYPE_STR_LEN,
17758 dhd->debug_dump_subcmd);
17759 clear_debug_dump_time(dhd->debug_dump_time_str);
17760 get_debug_dump_time(dhd->debug_dump_time_str);
17761
17762 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
17763 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
17764#ifdef CUSTOMER_HW4_DEBUG
17765 file_mode = O_CREAT | O_WRONLY | O_SYNC;
17766#elif defined(BOARD_HIKEY)
17767 file_mode = O_CREAT | O_WRONLY | O_SYNC;
17768#elif defined(__ARM_ARCH_7A__)
17769 file_mode = O_CREAT | O_WRONLY;
17770#else
17771 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
17772 * calling BUG_ON immediately after collecting the socram dump.
17773 * So the file write operation should directly write the contents into the
17774 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
17775 * instead of appending.
17776 */
17777 file_mode = O_CREAT | O_WRONLY | O_SYNC;
17778 {
17779 struct file *fp = filp_open(memdump_path, file_mode, 0664);
17780 /* Check if it is live Brix image having /installmedia, else use /data */
17781 if (IS_ERR(fp)) {
17782 DHD_ERROR(("open file %s, try /data/\n", memdump_path));
17783 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
17784 "/data/", fname, memdump_type, dhd->debug_dump_time_str);
17785 } else {
17786 filp_close(fp, NULL);
17787 }
17788 }
17789#endif /* CUSTOMER_HW4_DEBUG */
17790
17791 /* print SOCRAM dump file path */
17792 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
17793
17794#ifdef DHD_LOG_DUMP
17795 dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
17796#endif /* DHD_LOG_DUMP */
17797
17798 /* Write file */
17799 ret = write_file(memdump_path, file_mode, buf, size);
17800
17801#ifdef DHD_DUMP_MNGR
17802 if (ret == BCME_OK) {
17803 dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
17804 }
17805#endif /* DHD_DUMP_MNGR */
17806
17807 return ret;
17808}
17809#endif /* DHD_DEBUG */
17810
17811int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
17812{
17813 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17814 unsigned long flags;
17815 int ret = 0;
17816
17817 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17818 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
17819 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
17820 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
17821#ifdef CONFIG_HAS_WAKELOCK
17822 if (dhd->wakelock_rx_timeout_enable)
17823 wake_lock_timeout(&dhd->wl_rxwake,
17824 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
17825 if (dhd->wakelock_ctrl_timeout_enable)
17826 wake_lock_timeout(&dhd->wl_ctrlwake,
17827 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
17828#endif
17829 dhd->wakelock_rx_timeout_enable = 0;
17830 dhd->wakelock_ctrl_timeout_enable = 0;
17831 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
17832 }
17833 return ret;
17834}
17835
17836int net_os_wake_lock_timeout(struct net_device *dev)
17837{
17838 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17839 int ret = 0;
17840
17841 if (dhd)
17842 ret = dhd_os_wake_lock_timeout(&dhd->pub);
17843 return ret;
17844}
17845
17846int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
17847{
17848 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17849 unsigned long flags;
17850
17851 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17852 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
17853 if (val > dhd->wakelock_rx_timeout_enable)
17854 dhd->wakelock_rx_timeout_enable = val;
17855 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
17856 }
17857 return 0;
17858}
17859
17860int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
17861{
17862 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17863 unsigned long flags;
17864
17865 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17866 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
17867 if (val > dhd->wakelock_ctrl_timeout_enable)
17868 dhd->wakelock_ctrl_timeout_enable = val;
17869 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
17870 }
17871 return 0;
17872}
17873
17874int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
17875{
17876 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17877 unsigned long flags;
17878
17879 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17880 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
17881 dhd->wakelock_ctrl_timeout_enable = 0;
17882#ifdef CONFIG_HAS_WAKELOCK
17883 if (wake_lock_active(&dhd->wl_ctrlwake))
17884 wake_unlock(&dhd->wl_ctrlwake);
17885#endif
17886 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
17887 }
17888 return 0;
17889}
17890
17891int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
17892{
17893 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17894 int ret = 0;
17895
17896 if (dhd)
17897 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
17898 return ret;
17899}
17900
17901int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
17902{
17903 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17904 int ret = 0;
17905
17906 if (dhd)
17907 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
17908 return ret;
17909}
17910
17911#if defined(DHD_TRACE_WAKE_LOCK)
17912#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17913#include <linux/hashtable.h>
17914#else
17915#include <linux/hash.h>
17916#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17917
17918#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17919/* Define 2^5 = 32 bucket size hash table */
17920DEFINE_HASHTABLE(wklock_history, 5);
17921#else
17922/* Define 2^5 = 32 bucket size hash table */
17923struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
17924#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17925
17926atomic_t trace_wklock_onoff;
17927typedef enum dhd_wklock_type {
17928 DHD_WAKE_LOCK,
17929 DHD_WAKE_UNLOCK,
17930 DHD_WAIVE_LOCK,
17931 DHD_RESTORE_LOCK
17932} dhd_wklock_t;
17933
17934struct wk_trace_record {
17935 unsigned long addr; /* Address of the instruction */
17936 dhd_wklock_t lock_type; /* lock_type */
17937 unsigned long long counter; /* counter information */
17938 struct hlist_node wklock_node; /* hash node */
17939};
17940
17941static struct wk_trace_record *find_wklock_entry(unsigned long addr)
17942{
17943 struct wk_trace_record *wklock_info;
17944#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17945 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
17946 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
17947#else
17948 struct hlist_node *entry;
17949 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
17950 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
17951 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
17952#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17953 {
17954 GCC_DIAGNOSTIC_POP();
17955 if (wklock_info->addr == addr) {
17956 return wklock_info;
17957 }
17958 }
17959 return NULL;
17960}
17961
17962#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17963#define HASH_ADD(hashtable, node, key) \
17964 do { \
17965 hash_add(hashtable, node, key); \
17966 } while (0);
17967#else
17968#define HASH_ADD(hashtable, node, key) \
17969 do { \
17970 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
17971 hlist_add_head(node, &hashtable[index]); \
17972 } while (0);
17973#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
17974
17975#define STORE_WKLOCK_RECORD(wklock_type) \
17976 do { \
17977 struct wk_trace_record *wklock_info = NULL; \
17978 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
17979 wklock_info = find_wklock_entry(func_addr); \
17980 if (wklock_info) { \
17981 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
17982 wklock_info->counter = dhd->wakelock_counter; \
17983 } else { \
17984 wklock_info->counter++; \
17985 } \
17986 } else { \
17987 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
17988 if (!wklock_info) {\
17989 printk("Can't allocate wk_trace_record \n"); \
17990 } else { \
17991 wklock_info->addr = func_addr; \
17992 wklock_info->lock_type = wklock_type; \
17993 if (wklock_type == DHD_WAIVE_LOCK || \
17994 wklock_type == DHD_RESTORE_LOCK) { \
17995 wklock_info->counter = dhd->wakelock_counter; \
17996 } else { \
17997 wklock_info->counter++; \
17998 } \
17999 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
18000 } \
18001 } \
18002 } while (0);
18003
18004static inline void dhd_wk_lock_rec_dump(void)
18005{
18006 int bkt;
18007 struct wk_trace_record *wklock_info;
18008
18009#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18010 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
18011 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
18012#else
18013 struct hlist_node *entry = NULL;
18014 int max_index = ARRAY_SIZE(wklock_history);
18015 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
18016 for (bkt = 0; bkt < max_index; bkt++)
18017 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
18018#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
18019 {
18020 GCC_DIAGNOSTIC_POP();
18021 switch (wklock_info->lock_type) {
18022 case DHD_WAKE_LOCK:
18023 printk("wakelock lock : %pS lock_counter : %llu \n",
18024 (void *)wklock_info->addr, wklock_info->counter);
18025 break;
18026 case DHD_WAKE_UNLOCK:
18027 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
18028 (void *)wklock_info->addr, wklock_info->counter);
18029 break;
18030 case DHD_WAIVE_LOCK:
18031 printk("wakelock waive : %pS before_waive : %llu \n",
18032 (void *)wklock_info->addr, wklock_info->counter);
18033 break;
18034 case DHD_RESTORE_LOCK:
18035 printk("wakelock restore : %pS, after_waive : %llu \n",
18036 (void *)wklock_info->addr, wklock_info->counter);
18037 break;
18038 }
18039 }
18040}
18041
18042static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
18043{
18044 unsigned long flags;
18045#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
18046 int i;
18047#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
18048
18049 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18050#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18051 hash_init(wklock_history);
18052#else
18053 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
18054 INIT_HLIST_HEAD(&wklock_history[i]);
18055#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
18056 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18057 atomic_set(&trace_wklock_onoff, 1);
18058}
18059
18060static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
18061{
18062 int bkt;
18063 struct wk_trace_record *wklock_info;
18064 struct hlist_node *tmp;
18065 unsigned long flags;
18066#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
18067 struct hlist_node *entry = NULL;
18068 int max_index = ARRAY_SIZE(wklock_history);
18069#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
18070
18071 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18072 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
18073#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18074 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
18075#else
18076 for (bkt = 0; bkt < max_index; bkt++)
18077 hlist_for_each_entry_safe(wklock_info, entry, tmp,
18078 &wklock_history[bkt], wklock_node)
18079#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
18080 {
18081 GCC_DIAGNOSTIC_POP();
18082#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18083 hash_del(&wklock_info->wklock_node);
18084#else
18085 hlist_del_init(&wklock_info->wklock_node);
18086#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
18087 kfree(wklock_info);
18088 }
18089 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18090}
18091
18092void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
18093{
18094 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
18095 unsigned long flags;
18096
18097 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
18098 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18099 dhd_wk_lock_rec_dump();
18100 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18101
18102}
18103#else
18104#define STORE_WKLOCK_RECORD(wklock_type)
18105#endif /* ! DHD_TRACE_WAKE_LOCK */
18106
18107int dhd_os_wake_lock(dhd_pub_t *pub)
18108{
18109 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18110 unsigned long flags;
18111 int ret = 0;
18112
18113 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
18114 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18115 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
18116#ifdef CONFIG_HAS_WAKELOCK
18117 wake_lock(&dhd->wl_wifi);
18118#elif defined(BCMSDIO)
18119 dhd_bus_dev_pm_stay_awake(pub);
18120#endif
18121 }
18122#ifdef DHD_TRACE_WAKE_LOCK
18123 if (atomic_read(&trace_wklock_onoff)) {
18124 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
18125 }
18126#endif /* DHD_TRACE_WAKE_LOCK */
18127 dhd->wakelock_counter++;
18128 ret = dhd->wakelock_counter;
18129 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18130 }
18131
18132 return ret;
18133}
18134
18135void dhd_event_wake_lock(dhd_pub_t *pub)
18136{
18137 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18138
18139 if (dhd) {
18140#ifdef CONFIG_HAS_WAKELOCK
18141 wake_lock(&dhd->wl_evtwake);
18142#elif defined(BCMSDIO)
18143 dhd_bus_dev_pm_stay_awake(pub);
18144#endif
18145 }
18146}
18147
18148void
18149dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
18150{
18151#ifdef CONFIG_HAS_WAKELOCK
18152 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18153
18154 if (dhd) {
18155 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
18156 }
18157#endif /* CONFIG_HAS_WAKE_LOCK */
18158}
18159
18160void
18161dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
18162{
18163#ifdef CONFIG_HAS_WAKELOCK
18164 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18165
18166 if (dhd) {
18167 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
18168 }
18169#endif /* CONFIG_HAS_WAKE_LOCK */
18170}
18171
18172void
18173dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val)
18174{
18175#ifdef CONFIG_HAS_WAKELOCK
18176 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18177
18178 if (dhd) {
18179 wake_lock_timeout(&dhd->wl_nanwake, msecs_to_jiffies(val));
18180 }
18181#endif /* CONFIG_HAS_WAKE_LOCK */
18182}
18183
18184int net_os_wake_lock(struct net_device *dev)
18185{
18186 dhd_info_t *dhd = DHD_DEV_INFO(dev);
18187 int ret = 0;
18188
18189 if (dhd)
18190 ret = dhd_os_wake_lock(&dhd->pub);
18191 return ret;
18192}
18193
18194int dhd_os_wake_unlock(dhd_pub_t *pub)
18195{
18196 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18197 unsigned long flags;
18198 int ret = 0;
18199
18200 dhd_os_wake_lock_timeout(pub);
18201 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
18202 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18203
18204 if (dhd->wakelock_counter > 0) {
18205 dhd->wakelock_counter--;
18206#ifdef DHD_TRACE_WAKE_LOCK
18207 if (atomic_read(&trace_wklock_onoff)) {
18208 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
18209 }
18210#endif /* DHD_TRACE_WAKE_LOCK */
18211 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
18212#ifdef CONFIG_HAS_WAKELOCK
18213 wake_unlock(&dhd->wl_wifi);
18214#elif defined(BCMSDIO)
18215 dhd_bus_dev_pm_relax(pub);
18216#endif
18217 }
18218 ret = dhd->wakelock_counter;
18219 }
18220 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18221 }
18222 return ret;
18223}
18224
18225void dhd_event_wake_unlock(dhd_pub_t *pub)
18226{
18227 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18228
18229 if (dhd) {
18230#ifdef CONFIG_HAS_WAKELOCK
18231 wake_unlock(&dhd->wl_evtwake);
18232#elif defined(BCMSDIO)
18233 dhd_bus_dev_pm_relax(pub);
18234#endif
18235 }
18236}
18237
18238void dhd_pm_wake_unlock(dhd_pub_t *pub)
18239{
18240#ifdef CONFIG_HAS_WAKELOCK
18241 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18242
18243 if (dhd) {
18244 /* if wl_pmwake is active, unlock it */
18245 if (wake_lock_active(&dhd->wl_pmwake)) {
18246 wake_unlock(&dhd->wl_pmwake);
18247 }
18248 }
18249#endif /* CONFIG_HAS_WAKELOCK */
18250}
18251
18252void dhd_txfl_wake_unlock(dhd_pub_t *pub)
18253{
18254#ifdef CONFIG_HAS_WAKELOCK
18255 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18256
18257 if (dhd) {
18258 /* if wl_txflwake is active, unlock it */
18259 if (wake_lock_active(&dhd->wl_txflwake)) {
18260 wake_unlock(&dhd->wl_txflwake);
18261 }
18262 }
18263#endif /* CONFIG_HAS_WAKELOCK */
18264}
18265
18266void dhd_nan_wake_unlock(dhd_pub_t *pub)
18267{
18268#ifdef CONFIG_HAS_WAKELOCK
18269 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18270
18271 if (dhd) {
18272 /* if wl_nanwake is active, unlock it */
18273 if (wake_lock_active(&dhd->wl_nanwake)) {
18274 wake_unlock(&dhd->wl_nanwake);
18275 }
18276 }
18277#endif /* CONFIG_HAS_WAKELOCK */
18278}
18279
18280int dhd_os_check_wakelock(dhd_pub_t *pub)
18281{
18282#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
18283 dhd_info_t *dhd;
18284
18285 if (!pub)
18286 return 0;
18287 dhd = (dhd_info_t *)(pub->info);
18288#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
18289
18290#ifdef CONFIG_HAS_WAKELOCK
18291 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
18292 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
18293 (wake_lock_active(&dhd->wl_wdwake))))
18294 return 1;
18295#elif defined(BCMSDIO)
18296 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
18297 return 1;
18298#endif
18299 return 0;
18300}
18301
18302int
18303dhd_os_check_wakelock_all(dhd_pub_t *pub)
18304{
18305#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
18306#if defined(CONFIG_HAS_WAKELOCK)
18307 int l1, l2, l3, l4, l7, l8, l9, l10;
18308 int l5 = 0, l6 = 0;
18309 int c, lock_active;
18310#endif /* CONFIG_HAS_WAKELOCK */
18311 dhd_info_t *dhd;
18312
18313 if (!pub) {
18314 return 0;
18315 }
18316 if (pub->up == 0) {
18317 DHD_ERROR(("%s: skip as down in progress\n", __FUNCTION__));
18318 return 0;
18319 }
18320 dhd = (dhd_info_t *)(pub->info);
18321 if (!dhd) {
18322 return 0;
18323 }
18324#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
18325
18326#ifdef CONFIG_HAS_WAKELOCK
18327 c = dhd->wakelock_counter;
18328 l1 = wake_lock_active(&dhd->wl_wifi);
18329 l2 = wake_lock_active(&dhd->wl_wdwake);
18330 l3 = wake_lock_active(&dhd->wl_rxwake);
18331 l4 = wake_lock_active(&dhd->wl_ctrlwake);
18332 l7 = wake_lock_active(&dhd->wl_evtwake);
18333#ifdef BCMPCIE_OOB_HOST_WAKE
18334 l5 = wake_lock_active(&dhd->wl_intrwake);
18335#endif /* BCMPCIE_OOB_HOST_WAKE */
18336#ifdef DHD_USE_SCAN_WAKELOCK
18337 l6 = wake_lock_active(&dhd->wl_scanwake);
18338#endif /* DHD_USE_SCAN_WAKELOCK */
18339 l8 = wake_lock_active(&dhd->wl_pmwake);
18340 l9 = wake_lock_active(&dhd->wl_txflwake);
18341 l10 = wake_lock_active(&dhd->wl_nanwake);
18342 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9 || l10);
18343
18344 /* Indicate to the Host to avoid going to suspend if internal locks are up */
18345 if (lock_active) {
18346 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
18347 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d nan-%d\n",
18348 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10));
18349 return 1;
18350 }
18351#elif defined(BCMSDIO)
18352 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
18353 return 1;
18354 }
18355#endif /* defined(BCMSDIO) */
18356 return 0;
18357}
18358
18359int net_os_wake_unlock(struct net_device *dev)
18360{
18361 dhd_info_t *dhd = DHD_DEV_INFO(dev);
18362 int ret = 0;
18363
18364 if (dhd)
18365 ret = dhd_os_wake_unlock(&dhd->pub);
18366 return ret;
18367}
18368
18369int dhd_os_wd_wake_lock(dhd_pub_t *pub)
18370{
18371 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18372 unsigned long flags;
18373 int ret = 0;
18374
18375 if (dhd) {
18376 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18377 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
18378#ifdef CONFIG_HAS_WAKELOCK
18379 /* if wakelock_wd_counter was never used : lock it at once */
18380 wake_lock(&dhd->wl_wdwake);
18381#endif
18382 }
18383 dhd->wakelock_wd_counter++;
18384 ret = dhd->wakelock_wd_counter;
18385 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18386 }
18387 return ret;
18388}
18389
18390int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
18391{
18392 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18393 unsigned long flags;
18394 int ret = 0;
18395
18396 if (dhd) {
18397 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18398 if (dhd->wakelock_wd_counter > 0) {
18399 dhd->wakelock_wd_counter = 0;
18400 if (!dhd->waive_wakelock) {
18401#ifdef CONFIG_HAS_WAKELOCK
18402 wake_unlock(&dhd->wl_wdwake);
18403#endif
18404 }
18405 }
18406 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18407 }
18408 return ret;
18409}
18410
18411#ifdef BCMPCIE_OOB_HOST_WAKE
18412void
18413dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
18414{
18415#ifdef CONFIG_HAS_WAKELOCK
18416 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18417
18418 if (dhd) {
18419 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
18420 }
18421#endif /* CONFIG_HAS_WAKELOCK */
18422}
18423
18424void
18425dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
18426{
18427#ifdef CONFIG_HAS_WAKELOCK
18428 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18429
18430 if (dhd) {
18431 /* if wl_intrwake is active, unlock it */
18432 if (wake_lock_active(&dhd->wl_intrwake)) {
18433 wake_unlock(&dhd->wl_intrwake);
18434 }
18435 }
18436#endif /* CONFIG_HAS_WAKELOCK */
18437}
18438#endif /* BCMPCIE_OOB_HOST_WAKE */
18439
18440#ifdef DHD_USE_SCAN_WAKELOCK
18441void
18442dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
18443{
18444#ifdef CONFIG_HAS_WAKELOCK
18445 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18446
18447 if (dhd) {
18448 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
18449 }
18450#endif /* CONFIG_HAS_WAKELOCK */
18451}
18452
18453void
18454dhd_os_scan_wake_unlock(dhd_pub_t *pub)
18455{
18456#ifdef CONFIG_HAS_WAKELOCK
18457 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18458
18459 if (dhd) {
18460 /* if wl_scanwake is active, unlock it */
18461 if (wake_lock_active(&dhd->wl_scanwake)) {
18462 wake_unlock(&dhd->wl_scanwake);
18463 }
18464 }
18465#endif /* CONFIG_HAS_WAKELOCK */
18466}
18467#endif /* DHD_USE_SCAN_WAKELOCK */
18468
18469/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
18470 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
18471 */
18472int dhd_os_wake_lock_waive(dhd_pub_t *pub)
18473{
18474 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18475 unsigned long flags;
18476 int ret = 0;
18477
18478 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
18479 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18480
18481 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
18482 if (dhd->waive_wakelock == FALSE) {
18483#ifdef DHD_TRACE_WAKE_LOCK
18484 if (atomic_read(&trace_wklock_onoff)) {
18485 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
18486 }
18487#endif /* DHD_TRACE_WAKE_LOCK */
18488 /* record current lock status */
18489 dhd->wakelock_before_waive = dhd->wakelock_counter;
18490 dhd->waive_wakelock = TRUE;
18491 }
18492 ret = dhd->wakelock_wd_counter;
18493 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18494 }
18495 return ret;
18496}
18497
18498int dhd_os_wake_lock_restore(dhd_pub_t *pub)
18499{
18500 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
18501 unsigned long flags;
18502 int ret = 0;
18503
18504 if (!dhd)
18505 return 0;
18506 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
18507 return 0;
18508
18509 DHD_WAKE_SPIN_LOCK(&dhd->wakelock_spinlock, flags);
18510
18511 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
18512 if (!dhd->waive_wakelock)
18513 goto exit;
18514
18515 dhd->waive_wakelock = FALSE;
18516 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
18517 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
18518 * the lock in between, do the same by calling wake_unlock or pm_relax
18519 */
18520#ifdef DHD_TRACE_WAKE_LOCK
18521 if (atomic_read(&trace_wklock_onoff)) {
18522 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
18523 }
18524#endif /* DHD_TRACE_WAKE_LOCK */
18525
18526 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
18527#ifdef CONFIG_HAS_WAKELOCK
18528 wake_lock(&dhd->wl_wifi);
18529#elif defined(BCMSDIO)
18530 dhd_bus_dev_pm_stay_awake(&dhd->pub);
18531#endif
18532 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
18533#ifdef CONFIG_HAS_WAKELOCK
18534 wake_unlock(&dhd->wl_wifi);
18535#elif defined(BCMSDIO)
18536 dhd_bus_dev_pm_relax(&dhd->pub);
18537#endif
18538 }
18539 dhd->wakelock_before_waive = 0;
18540exit:
18541 ret = dhd->wakelock_wd_counter;
18542 DHD_WAKE_SPIN_UNLOCK(&dhd->wakelock_spinlock, flags);
18543 return ret;
18544}
18545
18546void dhd_os_wake_lock_init(struct dhd_info *dhd)
18547{
18548 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
18549 dhd->wakelock_counter = 0;
18550 dhd->wakelock_rx_timeout_enable = 0;
18551 dhd->wakelock_ctrl_timeout_enable = 0;
18552 /* wakelocks prevent a system from going into a low power state */
18553#ifdef CONFIG_HAS_WAKELOCK
18554 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
18555 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
18556 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
18557 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
18558 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
18559 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
18560#ifdef BCMPCIE_OOB_HOST_WAKE
18561 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
18562#endif /* BCMPCIE_OOB_HOST_WAKE */
18563#ifdef DHD_USE_SCAN_WAKELOCK
18564 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
18565#endif /* DHD_USE_SCAN_WAKELOCK */
18566 wake_lock_init(&dhd->wl_nanwake, WAKE_LOCK_SUSPEND, "wlan_nan_wake");
18567#endif /* CONFIG_HAS_WAKELOCK */
18568#ifdef DHD_TRACE_WAKE_LOCK
18569 dhd_wk_lock_trace_init(dhd);
18570#endif /* DHD_TRACE_WAKE_LOCK */
18571}
18572
18573void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
18574{
18575 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
18576#ifdef CONFIG_HAS_WAKELOCK
18577 dhd->wakelock_counter = 0;
18578 dhd->wakelock_rx_timeout_enable = 0;
18579 dhd->wakelock_ctrl_timeout_enable = 0;
18580 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
18581 wake_lock_destroy(&dhd->wl_rxwake);
18582 wake_lock_destroy(&dhd->wl_ctrlwake);
18583 wake_lock_destroy(&dhd->wl_evtwake);
18584 wake_lock_destroy(&dhd->wl_pmwake);
18585 wake_lock_destroy(&dhd->wl_txflwake);
18586#ifdef BCMPCIE_OOB_HOST_WAKE
18587 wake_lock_destroy(&dhd->wl_intrwake);
18588#endif /* BCMPCIE_OOB_HOST_WAKE */
18589#ifdef DHD_USE_SCAN_WAKELOCK
18590 wake_lock_destroy(&dhd->wl_scanwake);
18591#endif /* DHD_USE_SCAN_WAKELOCK */
18592 wake_lock_destroy(&dhd->wl_nanwake);
18593#ifdef DHD_TRACE_WAKE_LOCK
18594 dhd_wk_lock_trace_deinit(dhd);
18595#endif /* DHD_TRACE_WAKE_LOCK */
18596#else /* !CONFIG_HAS_WAKELOCK */
18597 if (dhd->wakelock_counter > 0) {
18598 DHD_ERROR(("%s: wake lock count=%d\n",
18599 __FUNCTION__, dhd->wakelock_counter));
18600 while (dhd_os_wake_unlock(&dhd->pub));
18601 }
18602#endif /* CONFIG_HAS_WAKELOCK */
18603}
18604
18605bool dhd_os_check_if_up(dhd_pub_t *pub)
18606{
18607 if (!pub)
18608 return FALSE;
18609 return pub->up;
18610}
18611
18612/* function to collect firmware, chip id and chip version info */
18613void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
18614{
18615 int i;
18616
18617 i = snprintf(info_string, sizeof(info_string),
18618 " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
18619 printf("%s\n", info_string);
18620
18621 if (!dhdp)
18622 return;
18623
18624 i = snprintf(&info_string[i], sizeof(info_string) - i,
18625 "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
18626 dhd_conf_get_chiprev(dhdp));
18627}
18628
18629int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
18630{
18631 int ifidx;
18632 int ret = 0;
18633 dhd_info_t *dhd = NULL;
18634
18635 if (!net || !DEV_PRIV(net)) {
18636 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
18637 __FUNCTION__, net, DEV_PRIV(net)));
18638 return -EINVAL;
18639 }
18640
18641 dhd = DHD_DEV_INFO(net);
18642 if (!dhd)
18643 return -EINVAL;
18644
18645 ifidx = dhd_net2idx(dhd, net);
18646 if (ifidx == DHD_BAD_IF) {
18647 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
18648 return -ENODEV;
18649 }
18650
18651 DHD_OS_WAKE_LOCK(&dhd->pub);
18652
18653 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
18654 dhd_check_hang(net, &dhd->pub, ret);
18655
18656 DHD_OS_WAKE_UNLOCK(&dhd->pub);
18657
18658 return ret;
18659}
18660
18661bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
18662{
18663 struct net_device *net;
18664
18665 net = dhd_idx2net(dhdp, ifidx);
18666 if (!net) {
18667 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
18668 return -EINVAL;
18669 }
18670
18671 return dhd_check_hang(net, dhdp, ret);
18672}
18673
18674/* Return instance */
18675int dhd_get_instance(dhd_pub_t *dhdp)
18676{
18677 return dhdp->info->unit;
18678}
18679
18680#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
18681#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
18682int dhd_deepsleep(struct net_device *dev, int flag)
18683{
18684 char iovbuf[20];
18685 uint powervar = 0;
18686 dhd_info_t *dhd;
18687 dhd_pub_t *dhdp;
18688 int cnt = 0;
18689 int ret = 0;
18690
18691 dhd = DHD_DEV_INFO(dev);
18692 dhdp = &dhd->pub;
18693
18694 switch (flag) {
18695 case 1 : /* Deepsleep on */
18696 DHD_ERROR(("[WiFi] Deepsleep On\n"));
18697 /* give some time to sysioc_work before deepsleep */
18698 OSL_SLEEP(200);
18699#ifdef PKT_FILTER_SUPPORT
18700 /* disable pkt filter */
18701 dhd_enable_packet_filter(0, dhdp);
18702#endif /* PKT_FILTER_SUPPORT */
18703 /* Disable MPC */
18704 powervar = 0;
18705 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
18706 0, TRUE);
18707 if (ret) {
18708 DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret));
18709 }
18710 /* Enable Deepsleep */
18711 powervar = 1;
18712 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
18713 NULL, 0, TRUE);
18714 if (ret) {
18715 DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret));
18716 }
18717 break;
18718
18719 case 0: /* Deepsleep Off */
18720 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
18721
18722 /* Disable Deepsleep */
18723 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
18724 powervar = 0;
18725 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
18726 sizeof(powervar), NULL, 0, TRUE);
18727 if (ret) {
18728 DHD_ERROR(("%s: deepsleep failed:%d\n", __FUNCTION__, ret));
18729 }
18730
18731 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
18732 sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
18733 if (ret < 0) {
18734 DHD_ERROR(("the error of dhd deepsleep status"
18735 " ret value :%d\n", ret));
18736 } else {
18737 if (!(*(int *)iovbuf)) {
18738 DHD_ERROR(("deepsleep mode is 0,"
18739 " count: %d\n", cnt));
18740 break;
18741 }
18742 }
18743 }
18744
18745 /* Enable MPC */
18746 powervar = 1;
18747 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar),
18748 NULL, 0, TRUE);
18749 if (ret) {
18750 DHD_ERROR(("%s: mpc failed:%d\n", __FUNCTION__, ret));
18751 }
18752 break;
18753 }
18754
18755 return 0;
18756}
18757#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
18758
18759#ifdef PROP_TXSTATUS
18760
18761void dhd_wlfc_plat_init(void *dhd)
18762{
18763#ifdef USE_DYNAMIC_F2_BLKSIZE
18764 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
18765#endif /* USE_DYNAMIC_F2_BLKSIZE */
18766 return;
18767}
18768
18769void dhd_wlfc_plat_deinit(void *dhd)
18770{
18771#ifdef USE_DYNAMIC_F2_BLKSIZE
18772 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
18773#endif /* USE_DYNAMIC_F2_BLKSIZE */
18774 return;
18775}
18776
18777bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
18778{
18779#ifdef SKIP_WLFC_ON_CONCURRENT
18780
18781#ifdef WL_CFG80211
18782 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
18783 if (net)
18784 /* enable flow control in vsdb mode */
18785 return !(wl_cfg80211_is_concurrent_mode(net));
18786#else
18787 return TRUE; /* skip flow control */
18788#endif /* WL_CFG80211 */
18789
18790#else
18791 return FALSE;
18792#endif /* SKIP_WLFC_ON_CONCURRENT */
18793 return FALSE;
18794}
18795#endif /* PROP_TXSTATUS */
18796
18797#ifdef BCMDBGFS
18798#include <linux/debugfs.h>
18799
18800typedef struct dhd_dbgfs {
18801 struct dentry *debugfs_dir;
18802 struct dentry *debugfs_mem;
18803 dhd_pub_t *dhdp;
18804 uint32 size;
18805} dhd_dbgfs_t;
18806
18807dhd_dbgfs_t g_dbgfs;
18808
18809extern uint32 dhd_readregl(void *bp, uint32 addr);
18810extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
18811
18812static int
18813dhd_dbg_state_open(struct inode *inode, struct file *file)
18814{
18815 file->private_data = inode->i_private;
18816 return 0;
18817}
18818
18819static ssize_t
18820dhd_dbg_state_read(struct file *file, char __user *ubuf,
18821 size_t count, loff_t *ppos)
18822{
18823 ssize_t rval;
18824 uint32 tmp;
18825 loff_t pos = *ppos;
18826 size_t ret;
18827
18828 if (pos < 0)
18829 return -EINVAL;
18830 if (pos >= g_dbgfs.size || !count)
18831 return 0;
18832 if (count > g_dbgfs.size - pos)
18833 count = g_dbgfs.size - pos;
18834
18835 /* XXX: The user can request any length they want, but they are getting 4 bytes */
18836 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
18837 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
18838
18839 ret = copy_to_user(ubuf, &tmp, 4);
18840 if (ret == count)
18841 return -EFAULT;
18842
18843 count -= ret;
18844 *ppos = pos + count;
18845 rval = count;
18846
18847 return rval;
18848}
18849
18850static ssize_t
18851dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
18852{
18853 loff_t pos = *ppos;
18854 size_t ret;
18855 uint32 buf;
18856
18857 if (pos < 0)
18858 return -EINVAL;
18859 if (pos >= g_dbgfs.size || !count)
18860 return 0;
18861 if (count > g_dbgfs.size - pos)
18862 count = g_dbgfs.size - pos;
18863
18864 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
18865 if (ret == count)
18866 return -EFAULT;
18867
18868 /* XXX: The user can request any length they want, but they are getting 4 bytes */
18869 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
18870 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
18871
18872 return count;
18873}
18874
18875loff_t
18876dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
18877{
18878 loff_t pos = -1;
18879
18880 switch (whence) {
18881 case 0:
18882 pos = off;
18883 break;
18884 case 1:
18885 pos = file->f_pos + off;
18886 break;
18887 case 2:
18888 pos = g_dbgfs.size - off;
18889 }
18890 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
18891}
18892
18893static const struct file_operations dhd_dbg_state_ops = {
18894 .read = dhd_dbg_state_read,
18895 .write = dhd_debugfs_write,
18896 .open = dhd_dbg_state_open,
18897 .llseek = dhd_debugfs_lseek
18898};
18899
18900static void dhd_dbgfs_create(void)
18901{
18902 if (g_dbgfs.debugfs_dir) {
18903 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
18904 NULL, &dhd_dbg_state_ops);
18905 }
18906}
18907
18908void dhd_dbgfs_init(dhd_pub_t *dhdp)
18909{
18910 g_dbgfs.dhdp = dhdp;
18911 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
18912
18913 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
18914 if (IS_ERR(g_dbgfs.debugfs_dir)) {
18915 g_dbgfs.debugfs_dir = NULL;
18916 return;
18917 }
18918
18919 dhd_dbgfs_create();
18920
18921 return;
18922}
18923
18924void dhd_dbgfs_remove(void)
18925{
18926 debugfs_remove(g_dbgfs.debugfs_mem);
18927 debugfs_remove(g_dbgfs.debugfs_dir);
18928
18929 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
18930}
18931#endif /* BCMDBGFS */
18932
18933#ifdef CUSTOM_SET_CPUCORE
18934void dhd_set_cpucore(dhd_pub_t *dhd, int set)
18935{
18936 int e_dpc = 0, e_rxf = 0, retry_set = 0;
18937
18938 if (!(dhd->chan_isvht80)) {
18939 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
18940 return;
18941 }
18942
18943 if (DPC_CPUCORE) {
18944 do {
18945 if (set == TRUE) {
18946 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
18947 cpumask_of(DPC_CPUCORE));
18948 } else {
18949 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
18950 cpumask_of(PRIMARY_CPUCORE));
18951 }
18952 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
18953 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
18954 return;
18955 }
18956 if (e_dpc < 0)
18957 OSL_SLEEP(1);
18958 } while (e_dpc < 0);
18959 }
18960 if (RXF_CPUCORE) {
18961 do {
18962 if (set == TRUE) {
18963 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
18964 cpumask_of(RXF_CPUCORE));
18965 } else {
18966 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
18967 cpumask_of(PRIMARY_CPUCORE));
18968 }
18969 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
18970 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
18971 return;
18972 }
18973 if (e_rxf < 0)
18974 OSL_SLEEP(1);
18975 } while (e_rxf < 0);
18976 }
18977 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
18978
18979 return;
18980}
18981#endif /* CUSTOM_SET_CPUCORE */
18982
18983#ifdef DHD_MCAST_REGEN
18984/* Get interface specific ap_isolate configuration */
18985int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
18986{
18987 dhd_info_t *dhd = dhdp->info;
18988 dhd_if_t *ifp;
18989
18990 ASSERT(idx < DHD_MAX_IFS);
18991
18992 ifp = dhd->iflist[idx];
18993
18994 return ifp->mcast_regen_bss_enable;
18995}
18996
18997/* Set interface specific mcast_regen configuration */
18998int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
18999{
19000 dhd_info_t *dhd = dhdp->info;
19001 dhd_if_t *ifp;
19002
19003 ASSERT(idx < DHD_MAX_IFS);
19004
19005 ifp = dhd->iflist[idx];
19006
19007 ifp->mcast_regen_bss_enable = val;
19008
19009 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
19010 * is enabled
19011 */
19012 dhd_update_rx_pkt_chainable_state(dhdp, idx);
19013 return BCME_OK;
19014}
19015#endif /* DHD_MCAST_REGEN */
19016
19017/* Get interface specific ap_isolate configuration */
19018int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
19019{
19020 dhd_info_t *dhd = dhdp->info;
19021 dhd_if_t *ifp;
19022
19023 ASSERT(idx < DHD_MAX_IFS);
19024
19025 ifp = dhd->iflist[idx];
19026
19027 return ifp->ap_isolate;
19028}
19029
19030/* Set interface specific ap_isolate configuration */
19031int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
19032{
19033 dhd_info_t *dhd = dhdp->info;
19034 dhd_if_t *ifp;
19035
19036 ASSERT(idx < DHD_MAX_IFS);
19037
19038 ifp = dhd->iflist[idx];
19039
19040 if (ifp)
19041 ifp->ap_isolate = val;
19042
19043 return 0;
19044}
19045
19046#ifdef DHD_FW_COREDUMP
19047void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
19048{
19049 dhd_dump_t *dump = NULL;
19050 unsigned long flags = 0;
19051 dhd_info_t *dhd_info = NULL;
19052#if defined(DHD_LOG_DUMP) && !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
19053 log_dump_type_t type = DLD_BUF_TYPE_ALL;
19054#endif /* DHD_LOG_DUMP && !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
19055
19056 dhd_info = (dhd_info_t *)dhdp->info;
19057 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
19058 if (dump == NULL) {
19059 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
19060 return;
19061 }
19062 dump->buf = buf;
19063 dump->bufsize = size;
19064#ifdef BCMPCIE
19065 dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
19066 (uint32 *)(&dump->hscb_bufsize));
19067#else
19068 dump->hscb_bufsize = 0;
19069#endif /* BCMPCIE */
19070
19071#ifdef DHD_LOG_DUMP
19072 dhd_print_buf_addr(dhdp, "memdump", buf, size);
19073#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
19074 /* Print out buffer infomation */
19075 dhd_log_dump_buf_addr(dhdp, &type);
19076#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
19077#endif /* DHD_LOG_DUMP */
19078
19079 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
19080 BUG_ON(1);
19081 }
19082
19083 if ((dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
19084 (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
19085 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT))
19086 {
19087 dhd_info->scheduled_memdump = FALSE;
19088 (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
19089 /* No need to collect debug dump for init failure */
19090 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) {
19091 return;
19092 }
19093#ifdef DHD_LOG_DUMP
19094 {
19095 log_dump_type_t *flush_type = NULL;
19096 /* for dongle init fail cases, 'dhd_mem_dump' does
19097 * not call 'dhd_log_dump', so call it here.
19098 */
19099 flush_type = MALLOCZ(dhdp->osh,
19100 sizeof(log_dump_type_t));
19101 if (flush_type) {
19102 *flush_type = DLD_BUF_TYPE_ALL;
19103 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
19104 dhd_log_dump(dhdp->info, flush_type, 0);
19105 }
19106 }
19107#endif /* DHD_LOG_DUMP */
19108 return;
19109 }
19110
19111 dhd_info->scheduled_memdump = TRUE;
19112
19113 /* bus busy bit for mem dump will be cleared in mem dump
19114 * work item context, after mem dump file is written
19115 */
19116 DHD_GENERAL_LOCK(dhdp, flags);
19117 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
19118 DHD_GENERAL_UNLOCK(dhdp, flags);
19119 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
19120 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
19121 DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
19122}
19123
19124static int
19125dhd_mem_dump(void *handle, void *event_info, u8 event)
19126{
19127 dhd_info_t *dhd = handle;
19128 dhd_pub_t *dhdp = NULL;
19129 unsigned long flags = 0;
19130 int ret = 0;
19131 dhd_dump_t *dump = NULL;
19132
19133 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
19134
19135 if (!dhd) {
19136 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
19137 return -ENODEV;
19138 }
19139
19140 dhdp = &dhd->pub;
19141 if (!dhdp) {
19142 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
19143 return -ENODEV;
19144 }
19145
19146 DHD_GENERAL_LOCK(dhdp, flags);
19147 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
19148 DHD_GENERAL_UNLOCK(dhdp, flags);
19149 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
19150 ret = -ENODEV;
19151 goto exit;
19152 }
19153 DHD_GENERAL_UNLOCK(dhdp, flags);
19154
19155#ifdef DHD_SDTC_ETB_DUMP
19156 if (dhdp->collect_sdtc) {
19157 dhd_sdtc_etb_dump(dhdp);
19158 dhdp->collect_sdtc = FALSE;
19159 }
19160#endif /* DHD_SDTC_ETB_DUMP */
19161
19162#ifdef DHD_SSSR_DUMP
19163 if (sssr_enab && dhdp->sssr_inited && dhdp->collect_sssr) {
19164 if (fis_enab && dhdp->sssr_reg_info->rev3.fis_enab) {
19165 int bcmerror = dhd_bus_fis_trigger(dhdp);
19166
19167 if (bcmerror == BCME_OK) {
19168 dhd_bus_fis_dump(dhdp);
19169 } else {
19170 DHD_ERROR(("%s: FIS trigger failed: %d\n",
19171 __FUNCTION__, bcmerror));
19172 }
19173 } else {
19174 DHD_ERROR(("%s: FIS not enabled (%d:%d), collect legacy sssr\n",
19175 __FUNCTION__, fis_enab, dhdp->sssr_reg_info->rev3.fis_enab));
19176 dhdpcie_sssr_dump(dhdp);
19177 }
19178 }
19179 dhdp->collect_sssr = FALSE;
19180#endif /* DHD_SSSR_DUMP */
19181#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
19182 dhd_wait_for_file_dump(dhdp);
19183#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
19184
19185 dump = (dhd_dump_t *)event_info;
19186 if (!dump) {
19187 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
19188 ret = -EINVAL;
19189 goto exit;
19190 }
19191
19192 /*
19193 * If kernel does not have file write access enabled
19194 * then skip writing dumps to files.
19195 * The dumps will be pushed to HAL layer which will
19196 * write into files
19197 */
19198#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
19199
19200 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
19201 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
19202#ifdef DHD_DEBUG_UART
19203 dhd->pub.memdump_success = FALSE;
19204#endif /* DHD_DEBUG_UART */
19205 }
19206
19207 if (dump->hscb_buf && dump->hscb_bufsize) {
19208 if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
19209 dump->hscb_bufsize, "mem_dump_hscb")) {
19210 DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
19211#ifdef DHD_DEBUG_UART
19212 dhd->pub.memdump_success = FALSE;
19213#endif /* DHD_DEBUG_UART */
19214 }
19215 }
19216
19217#ifndef DHD_PKT_LOGGING
19218 clear_debug_dump_time(dhdp->debug_dump_time_str);
19219#endif /* !DHD_PKT_LOGGING */
19220
19221 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
19222 * context, no need to schedule another work queue for log dump. In case of
19223 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
19224 * cfg layer is itself scheduling the log_dump work queue.
19225 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
19226 * collect debug_dump as it may be called from non-sleepable context.
19227 */
19228#ifdef DHD_LOG_DUMP
19229 if (dhd->scheduled_memdump &&
19230 dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
19231 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
19232 sizeof(log_dump_type_t));
19233 if (flush_type) {
19234 *flush_type = DLD_BUF_TYPE_ALL;
19235 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
19236 dhd_log_dump(dhd, flush_type, 0);
19237 }
19238 }
19239#endif /* DHD_LOG_DUMP */
19240
19241 /* before calling bug on, wait for other logs to be dumped.
19242 * we cannot wait in case dhd_mem_dump is called directly
19243 * as it may not be from a sleepable context
19244 */
19245 if (dhd->scheduled_memdump) {
19246 uint bitmask = 0;
19247 int timeleft = 0;
19248#ifdef DHD_SSSR_DUMP
19249 bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
19250#endif
19251 if (bitmask != 0) {
19252 DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
19253 __FUNCTION__, dhdp->dhd_bus_busy_state));
19254 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
19255 &dhdp->dhd_bus_busy_state, bitmask, 0);
19256 if ((timeleft == 0) || (timeleft == 1)) {
19257 DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
19258 __FUNCTION__, dhdp->dhd_bus_busy_state));
19259 }
19260 }
19261 }
19262#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
19263
19264 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
19265#ifdef DHD_LOG_DUMP
19266 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
19267#endif /* DHD_LOG_DUMP */
19268 dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
19269#ifdef DHD_DEBUG_UART
19270 dhd->pub.memdump_success == TRUE &&
19271#endif /* DHD_DEBUG_UART */
19272#ifdef DNGL_EVENT_SUPPORT
19273 dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
19274#endif /* DNGL_EVENT_SUPPORT */
19275 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
19276#ifdef SHOW_LOGTRACE
19277 /* Wait till logtrace context is flushed */
19278 dhd_flush_logtrace_process(dhd);
19279#endif /* SHOW_LOGTRACE */
19280
19281#ifdef EWP_EDL
19282 cancel_delayed_work_sync(&dhd->edl_dispatcher_work);
19283#endif
19284
19285 BUG_ON(1);
19286 }
19287
19288exit:
19289 if (dump) {
19290 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
19291 }
19292 DHD_GENERAL_LOCK(dhdp, flags);
19293 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
19294 dhd_os_busbusy_wake(dhdp);
19295 DHD_GENERAL_UNLOCK(dhdp, flags);
19296 dhd->scheduled_memdump = FALSE;
19297
19298 if (dhdp->hang_was_pending) {
19299 DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
19300 dhd_os_send_hang_message(dhdp);
19301 dhdp->hang_was_pending = 0;
19302 }
19303 DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
19304
19305 return ret;
19306}
19307#endif /* DHD_FW_COREDUMP */
19308
19309#ifdef DHD_SSSR_DUMP
19310uint
19311dhd_sssr_dig_buf_size(dhd_pub_t *dhdp)
19312{
19313 uint dig_buf_size = 0;
19314
19315 /* SSSR register information structure v0 and v1 shares most except dig_mem */
19316 switch (dhdp->sssr_reg_info->rev2.version) {
19317 case SSSR_REG_INFO_VER_3:
19318 /* intentional fall through */
19319 case SSSR_REG_INFO_VER_2 :
19320 if ((dhdp->sssr_reg_info->rev2.length >
19321 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
19322 dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
19323 dig_buf_size = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
19324 }
19325 break;
19326 case SSSR_REG_INFO_VER_1 :
19327 if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
19328 dig_buf_size = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
19329 } else if ((dhdp->sssr_reg_info->rev1.length >
19330 OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
19331 dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) {
19332 dig_buf_size = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
19333 }
19334 break;
19335 case SSSR_REG_INFO_VER_0 :
19336 if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
19337 dig_buf_size = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
19338 }
19339 break;
19340 default :
19341 DHD_ERROR(("invalid sssr_reg_ver"));
19342 return BCME_UNSUPPORTED;
19343 }
19344
19345 return dig_buf_size;
19346}
19347
19348uint
19349dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp)
19350{
19351 uint dig_buf_addr = 0;
19352
19353 /* SSSR register information structure v0 and v1 shares most except dig_mem */
19354 switch (dhdp->sssr_reg_info->rev2.version) {
19355 case SSSR_REG_INFO_VER_3 :
19356 /* intentional fall through */
19357 case SSSR_REG_INFO_VER_2 :
19358 if ((dhdp->sssr_reg_info->rev2.length >
19359 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
19360 dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
19361 dig_buf_addr = dhdp->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr;
19362 }
19363 break;
19364 case SSSR_REG_INFO_VER_1 :
19365 if (dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
19366 dig_buf_addr = dhdp->sssr_reg_info->rev1.vasip_regs.vasip_sr_addr;
19367 } else if ((dhdp->sssr_reg_info->rev1.length >
19368 OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
19369 dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_size) {
19370 dig_buf_addr = dhdp->sssr_reg_info->rev1.dig_mem_info.dig_sr_addr;
19371 }
19372 break;
19373 case SSSR_REG_INFO_VER_0 :
19374 if (dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
19375 dig_buf_addr = dhdp->sssr_reg_info->rev0.vasip_regs.vasip_sr_addr;
19376 }
19377 break;
19378 default :
19379 DHD_ERROR(("invalid sssr_reg_ver"));
19380 return BCME_UNSUPPORTED;
19381 }
19382
19383 return dig_buf_addr;
19384}
19385
19386uint
19387dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx)
19388{
19389 uint mac_buf_size = 0;
19390 uint8 num_d11cores;
19391
19392 num_d11cores = dhd_d11_slices_num_get(dhdp);
19393
19394 /* SSSR register information structure v0 and v1 shares most except dig_mem */
19395 if (core_idx < num_d11cores) {
19396 switch (dhdp->sssr_reg_info->rev2.version) {
19397 case SSSR_REG_INFO_VER_3 :
19398 /* intentional fall through */
19399 case SSSR_REG_INFO_VER_2 :
19400 mac_buf_size = dhdp->sssr_reg_info->rev2.mac_regs[core_idx].sr_size;
19401 break;
19402 case SSSR_REG_INFO_VER_1 :
19403 mac_buf_size = dhdp->sssr_reg_info->rev1.mac_regs[core_idx].sr_size;
19404 break;
19405 case SSSR_REG_INFO_VER_0 :
19406 mac_buf_size = dhdp->sssr_reg_info->rev0.mac_regs[core_idx].sr_size;
19407 break;
19408 default :
19409 DHD_ERROR(("invalid sssr_reg_ver"));
19410 return BCME_UNSUPPORTED;
19411 }
19412 }
19413
19414 return mac_buf_size;
19415}
19416
19417uint
19418dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx)
19419{
19420 uint xmtaddress = 0;
19421 uint8 num_d11cores;
19422
19423 num_d11cores = dhd_d11_slices_num_get(dhdp);
19424
19425 /* SSSR register information structure v0 and v1 shares most except dig_mem */
19426 if (core_idx < num_d11cores) {
19427 switch (dhdp->sssr_reg_info->rev2.version) {
19428 case SSSR_REG_INFO_VER_3 :
19429 /* intentional fall through */
19430 case SSSR_REG_INFO_VER_2 :
19431 xmtaddress = dhdp->sssr_reg_info->rev2.
19432 mac_regs[core_idx].base_regs.xmtaddress;
19433 break;
19434 case SSSR_REG_INFO_VER_1 :
19435 xmtaddress = dhdp->sssr_reg_info->rev1.
19436 mac_regs[core_idx].base_regs.xmtaddress;
19437 break;
19438 case SSSR_REG_INFO_VER_0 :
19439 xmtaddress = dhdp->sssr_reg_info->rev0.
19440 mac_regs[core_idx].base_regs.xmtaddress;
19441 break;
19442 default :
19443 DHD_ERROR(("invalid sssr_reg_ver"));
19444 return BCME_UNSUPPORTED;
19445 }
19446 }
19447
19448 return xmtaddress;
19449}
19450
19451uint
19452dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx)
19453{
19454 uint xmtdata = 0;
19455 uint8 num_d11cores;
19456
19457 num_d11cores = dhd_d11_slices_num_get(dhdp);
19458
19459 /* SSSR register information structure v0 and v1 shares most except dig_mem */
19460 if (core_idx < num_d11cores) {
19461 switch (dhdp->sssr_reg_info->rev2.version) {
19462 case SSSR_REG_INFO_VER_3 :
19463 /* intentional fall through */
19464 case SSSR_REG_INFO_VER_2 :
19465 xmtdata = dhdp->sssr_reg_info->rev2.
19466 mac_regs[core_idx].base_regs.xmtdata;
19467 break;
19468 case SSSR_REG_INFO_VER_1 :
19469 xmtdata = dhdp->sssr_reg_info->rev1.
19470 mac_regs[core_idx].base_regs.xmtdata;
19471 break;
19472 case SSSR_REG_INFO_VER_0 :
19473 xmtdata = dhdp->sssr_reg_info->rev0.
19474 mac_regs[core_idx].base_regs.xmtdata;
19475 break;
19476 default :
19477 DHD_ERROR(("invalid sssr_reg_ver"));
19478 return BCME_UNSUPPORTED;
19479 }
19480 }
19481
19482 return xmtdata;
19483}
19484
19485#ifdef DHD_SSSR_DUMP_BEFORE_SR
19486int
19487dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
19488{
19489 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
19490 dhd_pub_t *dhdp = &dhd_info->pub;
19491 int pos = 0, ret = BCME_ERROR;
19492 uint dig_buf_size = 0;
19493
19494 dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
19495
19496 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
19497 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
19498 NULL, user_buf, dig_buf_size, &pos);
19499 }
19500 return ret;
19501}
19502
19503int
19504dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
19505{
19506 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
19507 dhd_pub_t *dhdp = &dhd_info->pub;
19508 int pos = 0, ret = BCME_ERROR;
19509
19510 if (dhdp->sssr_d11_before[core] &&
19511 dhdp->sssr_d11_outofreset[core] &&
19512 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
19513 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
19514 NULL, user_buf, len, &pos);
19515 }
19516 return ret;
19517}
19518#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19519
19520int
19521dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
19522{
19523 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
19524 dhd_pub_t *dhdp = &dhd_info->pub;
19525 int pos = 0, ret = BCME_ERROR;
19526 uint dig_buf_size = 0;
19527
19528 dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
19529
19530 if (dhdp->sssr_dig_buf_after) {
19531 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
19532 NULL, user_buf, dig_buf_size, &pos);
19533 }
19534 return ret;
19535}
19536
19537int
19538dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
19539{
19540 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
19541 dhd_pub_t *dhdp = &dhd_info->pub;
19542 int pos = 0, ret = BCME_ERROR;
19543
19544 if (dhdp->sssr_d11_after[core] &&
19545 dhdp->sssr_d11_outofreset[core]) {
19546 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
19547 NULL, user_buf, len, &pos);
19548 }
19549 return ret;
19550}
19551
19552void
19553dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
19554{
19555 dhd_info_t *dhd = dhdinfo;
19556 dhd_pub_t *dhdp;
19557 int i;
19558#ifdef DHD_SSSR_DUMP_BEFORE_SR
19559 char before_sr_dump[128];
19560#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19561 char after_sr_dump[128];
19562 unsigned long flags = 0;
19563 uint dig_buf_size = 0;
19564 uint8 num_d11cores = 0;
19565 uint d11_buf_size = 0;
19566
19567 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
19568
19569 if (!dhd) {
19570 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
19571 return;
19572 }
19573
19574 dhdp = &dhd->pub;
19575
19576 DHD_GENERAL_LOCK(dhdp, flags);
19577 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
19578 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
19579 DHD_GENERAL_UNLOCK(dhdp, flags);
19580 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
19581 goto exit;
19582 }
19583 DHD_GENERAL_UNLOCK(dhdp, flags);
19584
19585 num_d11cores = dhd_d11_slices_num_get(dhdp);
19586
19587 for (i = 0; i < num_d11cores; i++) {
19588 /* Init file name */
19589#ifdef DHD_SSSR_DUMP_BEFORE_SR
19590 memset(before_sr_dump, 0, sizeof(before_sr_dump));
19591#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19592 memset(after_sr_dump, 0, sizeof(after_sr_dump));
19593
19594#ifdef DHD_SSSR_DUMP_BEFORE_SR
19595 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
19596 "sssr_dump_core", i, "before_SR");
19597#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19598 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
19599 "sssr_dump_core", i, "after_SR");
19600
19601 d11_buf_size = dhd_sssr_mac_buf_size(dhdp, i);
19602
19603#ifdef DHD_SSSR_DUMP_BEFORE_SR
19604 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
19605 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
19606 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
19607 d11_buf_size, before_sr_dump)) {
19608 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
19609 __FUNCTION__));
19610 }
19611 }
19612#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19613
19614 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
19615 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
19616 d11_buf_size, after_sr_dump)) {
19617 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
19618 __FUNCTION__));
19619 }
19620 }
19621 }
19622
19623 dig_buf_size = dhd_sssr_dig_buf_size(dhdp);
19624
19625#ifdef DHD_SSSR_DUMP_BEFORE_SR
19626 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
19627 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
19628 dig_buf_size, "sssr_dump_dig_before_SR")) {
19629 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
19630 __FUNCTION__));
19631 }
19632 }
19633#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19634
19635 if (dhdp->sssr_dig_buf_after) {
19636 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
19637 dig_buf_size, "sssr_dump_dig_after_SR")) {
19638 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
19639 __FUNCTION__));
19640 }
19641 }
19642
19643exit:
19644 DHD_GENERAL_LOCK(dhdp, flags);
19645 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
19646 dhd_os_busbusy_wake(dhdp);
19647 DHD_GENERAL_UNLOCK(dhdp, flags);
19648}
19649
19650void
19651dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
19652{
19653#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
19654 dhdp->sssr_dump_mode = dump_mode;
19655#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
19656
19657 /*
19658 * If kernel does not have file write access enabled
19659 * then skip writing dumps to files.
19660 * The dumps will be pushed to HAL layer which will
19661 * write into files
19662 */
19663#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
19664 return;
19665#else
19666 /*
19667 * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
19668 * Without workqueue -
19669 * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
19670 * : These are called in own handler, not in the interrupt context
19671 * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
19672 * Thus, it doesn't neeed to dump SSSR in workqueue
19673 */
19674 DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
19675 dhd_sssr_dump_to_file(dhdp->info);
19676#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
19677}
19678#endif /* DHD_SSSR_DUMP */
19679
19680#ifdef DHD_SDTC_ETB_DUMP
19681void
19682dhd_sdtc_etb_dump(dhd_pub_t *dhd)
19683{
19684 etb_info_t etb_info;
19685 uint8 *sdtc_etb_dump;
19686 uint8 *sdtc_etb_mempool;
19687 uint etb_dump_len;
19688 int ret = 0;
19689
19690 if (!dhd->sdtc_etb_inited) {
19691 DHD_ERROR(("%s, SDTC ETB dump not supported\n", __FUNCTION__));
19692 return;
19693 }
19694
19695 bzero(&etb_info, sizeof(etb_info));
19696
19697 if ((ret = dhd_bus_get_etb_info(dhd, dhd->etb_addr_info.etbinfo_addr, &etb_info))) {
19698 DHD_ERROR(("%s: failed to get etb info %d\n", __FUNCTION__, ret));
19699 return;
19700 }
19701
19702 if (etb_info.read_bytes == 0) {
19703 DHD_ERROR(("%s ETB is of zero size. Hence donot collect SDTC ETB\n", __FUNCTION__));
19704 return;
19705 }
19706
19707 DHD_ERROR(("%s etb_info ver:%d len:%d rwp:%d etb_full:%d etb:addr:0x%x, len:%d\n",
19708 __FUNCTION__, etb_info.version, etb_info.len,
19709 etb_info.read_write_p, etb_info.etb_full,
19710 etb_info.addr, etb_info.read_bytes));
19711
19712 /*
19713 * etb mempool format = etb_info + etb
19714 */
19715 etb_dump_len = etb_info.read_bytes + sizeof(etb_info);
19716 if (etb_dump_len > DHD_SDTC_ETB_MEMPOOL_SIZE) {
19717 DHD_ERROR(("%s etb_dump_len: %d is more than the alloced %d.Hence cannot collect\n",
19718 __FUNCTION__, etb_dump_len, DHD_SDTC_ETB_MEMPOOL_SIZE));
19719 return;
19720 }
19721 sdtc_etb_mempool = dhd->sdtc_etb_mempool;
19722 memcpy(sdtc_etb_mempool, &etb_info, sizeof(etb_info));
19723 sdtc_etb_dump = sdtc_etb_mempool + sizeof(etb_info);
19724 if ((ret = dhd_bus_get_sdtc_etb(dhd, sdtc_etb_dump, etb_info.addr, etb_info.read_bytes))) {
19725 DHD_ERROR(("%s: error to get SDTC ETB ret: %d\n", __FUNCTION__, ret));
19726 return;
19727 }
19728
19729 if (write_dump_to_file(dhd, (uint8 *)sdtc_etb_mempool,
19730 etb_dump_len, "sdtc_etb_dump")) {
19731 DHD_ERROR(("%s: failed to dump sdtc_etb to file\n",
19732 __FUNCTION__));
19733 }
19734}
19735#endif /* DHD_SDTC_ETB_DUMP */
19736
19737#ifdef DHD_LOG_DUMP
19738static void
19739dhd_log_dump(void *handle, void *event_info, u8 event)
19740{
19741 dhd_info_t *dhd = handle;
19742 log_dump_type_t *type = (log_dump_type_t *)event_info;
19743
19744 if (!dhd || !type) {
19745 DHD_ERROR(("%s: dhd/type is NULL\n", __FUNCTION__));
19746 return;
19747 }
19748
19749#ifdef WL_CFG80211
19750 /* flush the fw preserve logs */
19751 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
19752 FW_LOGSET_MASK_ALL);
19753#endif
19754
19755 /* there are currently 3 possible contexts from which
19756 * log dump can be scheduled -
19757 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
19758 * 3.HEALTH CHECK event
19759 * The concise debug info buffer is a shared resource
19760 * and in case a trap is one of the contexts then both the
19761 * scheduled work queues need to run because trap data is
19762 * essential for debugging. Hence a mutex lock is acquired
19763 * before calling do_dhd_log_dump().
19764 */
19765 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
19766 dhd_os_logdump_lock(&dhd->pub);
19767 DHD_OS_WAKE_LOCK(&dhd->pub);
19768 if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
19769 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
19770 }
19771 DHD_OS_WAKE_UNLOCK(&dhd->pub);
19772 dhd_os_logdump_unlock(&dhd->pub);
19773}
19774
19775void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
19776{
19777 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
19778
19779 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
19780 type, DHD_WQ_WORK_DHD_LOG_DUMP,
19781 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
19782}
19783
19784static void
19785dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
19786{
19787#ifdef DHD_FW_COREDUMP
19788 if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
19789 (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
19790 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
19791#ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
19792 (dhdp->op_mode & DHD_FLAG_MFG_MODE &&
19793 (dhdp->hang_count >= MAX_CONSECUTIVE_MFG_HANG_COUNT-1)) ||
19794#endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
19795 FALSE)
19796#else
19797 if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)
19798#endif
19799 {
19800#if defined(CONFIG_ARM64)
19801 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
19802 name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
19803#elif defined(__ARM_ARCH_7A__)
19804 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
19805 name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
19806#endif /* __ARM_ARCH_7A__ */
19807 }
19808}
19809
19810static void
19811dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
19812{
19813 int i;
19814 unsigned long wr_size = 0;
19815 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
19816 size_t log_size = 0;
19817 char buf_name[DHD_PRINT_BUF_NAME_LEN];
19818 dhd_dbg_ring_t *ring = NULL;
19819
19820 BCM_REFERENCE(ring);
19821
19822 for (i = 0; i < DLD_BUFFER_NUM; i++) {
19823 dld_buf = &g_dld_buf[i];
19824 log_size = (unsigned long)dld_buf->max -
19825 (unsigned long)dld_buf->buffer;
19826 if (dld_buf->wraparound) {
19827 wr_size = log_size;
19828 } else {
19829 wr_size = (unsigned long)dld_buf->present -
19830 (unsigned long)dld_buf->front;
19831 }
19832 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
19833 dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
19834 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
19835 dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
19836 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
19837 dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
19838 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
19839 dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
19840 }
19841
19842#ifdef DEBUGABILITY_ECNTRS_LOGGING
19843 /* periodic flushing of ecounters is NOT supported */
19844 if (*type == DLD_BUF_TYPE_ALL &&
19845 logdump_ecntr_enable &&
19846 dhdp->ecntr_dbg_ring) {
19847
19848 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
19849 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
19850 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
19851 LOG_DUMP_ECNTRS_MAX_BUFSIZE);
19852 }
19853#endif /* DEBUGABILITY_ECNTRS_LOGGING */
19854
19855#if defined(BCMPCIE)
19856 if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
19857 dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
19858 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
19859 }
19860#endif /* BCMPCIE */
19861
19862#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
19863 /* if health check event was received */
19864 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
19865 dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
19866 HEALTH_CHK_BUF_SIZE);
19867 }
19868#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
19869
19870 /* append the concise debug information */
19871 if (dhdp->concise_dbg_buf) {
19872 dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
19873 CONCISE_DUMP_BUFLEN);
19874 }
19875}
19876
19877#ifdef CUSTOMER_HW4_DEBUG
19878static void
19879dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
19880{
19881 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
19882 char *end = NULL;
19883 unsigned long plen = 0;
19884
19885 if (!bufptr || !len)
19886 return;
19887
19888 memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
19889 end = bufptr + len;
19890 while (bufptr < end) {
19891 if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
19892 memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
19893 tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
19894 printf("%s", tmp_buf);
19895 bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
19896 } else {
19897 plen = (unsigned long)end - (unsigned long)bufptr;
19898 memcpy(tmp_buf, bufptr, plen);
19899 tmp_buf[plen] = '\0';
19900 printf("%s", tmp_buf);
19901 bufptr += plen;
19902 }
19903 }
19904}
19905
19906static void
19907dhd_log_dump_print_tail(dhd_pub_t *dhdp,
19908 struct dhd_log_dump_buf *dld_buf,
19909 uint tail_len)
19910{
19911 char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
19912 unsigned long len_flush1 = 0, len_flush2 = 0;
19913 unsigned long flags = 0;
19914
19915 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
19916 DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
19917 flush_ptr1 = dld_buf->present - tail_len;
19918 if (flush_ptr1 >= dld_buf->front) {
19919 /* tail content is within the buffer */
19920 flush_ptr2 = NULL;
19921 len_flush1 = tail_len;
19922 } else if (dld_buf->wraparound) {
19923 /* tail content spans the buffer length i.e, wrap around */
19924 flush_ptr1 = dld_buf->front;
19925 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
19926 len_flush2 = (unsigned long)tail_len - len_flush1;
19927 flush_ptr2 = (char *)((unsigned long)dld_buf->max -
19928 (unsigned long)len_flush2);
19929 } else {
19930 /* amt of logs in buffer is less than tail size */
19931 flush_ptr1 = dld_buf->front;
19932 flush_ptr2 = NULL;
19933 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
19934 }
19935 DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
19936
19937 printf("\n================= LOG_DUMP tail =================\n");
19938 if (flush_ptr2) {
19939 dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
19940 }
19941 dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
19942 printf("\n===================================================\n");
19943}
19944#endif /* CUSTOMER_HW4_DEBUG */
19945
19946#ifdef DHD_SSSR_DUMP
19947int
19948dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
19949{
19950 int i = 0;
19951 uint dig_buf_size = 0;
19952
19953 DHD_ERROR(("%s\n", __FUNCTION__));
19954
19955 /* core 0 */
19956 i = 0;
19957#ifdef DHD_SSSR_DUMP_BEFORE_SR
19958 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
19959 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
19960
19961 arr_len[SSSR_C0_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i);
19962 DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
19963 arr_len[SSSR_C0_D11_BEFORE]));
19964#ifdef DHD_LOG_DUMP
19965 dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
19966 dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
19967#endif /* DHD_LOG_DUMP */
19968 }
19969#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19970 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
19971 arr_len[SSSR_C0_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i);
19972 DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
19973 arr_len[SSSR_C0_D11_AFTER]));
19974#ifdef DHD_LOG_DUMP
19975 dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
19976 dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
19977#endif /* DHD_LOG_DUMP */
19978 }
19979
19980 /* core 1 */
19981 i = 1;
19982#ifdef DHD_SSSR_DUMP_BEFORE_SR
19983 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
19984 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
19985 arr_len[SSSR_C1_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i);
19986 DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
19987 arr_len[SSSR_C1_D11_BEFORE]));
19988#ifdef DHD_LOG_DUMP
19989 dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
19990 dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
19991#endif /* DHD_LOG_DUMP */
19992 }
19993#endif /* DHD_SSSR_DUMP_BEFORE_SR */
19994 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
19995 arr_len[SSSR_C1_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i);
19996 DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
19997 arr_len[SSSR_C1_D11_AFTER]));
19998#ifdef DHD_LOG_DUMP
19999 dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
20000 dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
20001#endif /* DHD_LOG_DUMP */
20002 }
20003
20004 /* core 2 scan core */
20005 if (dhd->sssr_reg_info->rev2.version >= SSSR_REG_INFO_VER_2) {
20006 i = 2;
20007#ifdef DHD_SSSR_DUMP_BEFORE_SR
20008 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
20009 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
20010 arr_len[SSSR_C2_D11_BEFORE] = dhd_sssr_mac_buf_size(dhd, i);
20011 DHD_ERROR(("%s: arr_len[SSSR_C2_D11_BEFORE] : %d\n", __FUNCTION__,
20012 arr_len[SSSR_C2_D11_BEFORE]));
20013#ifdef DHD_LOG_DUMP
20014 dhd_print_buf_addr(dhd, "SSSR_C2_D11_BEFORE",
20015 dhd->sssr_d11_before[i], arr_len[SSSR_C2_D11_BEFORE]);
20016#endif /* DHD_LOG_DUMP */
20017 }
20018#endif /* DHD_SSSR_DUMP_BEFORE_SR */
20019 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
20020 arr_len[SSSR_C2_D11_AFTER] = dhd_sssr_mac_buf_size(dhd, i);
20021 DHD_ERROR(("%s: arr_len[SSSR_C2_D11_AFTER] : %d\n", __FUNCTION__,
20022 arr_len[SSSR_C2_D11_AFTER]));
20023#ifdef DHD_LOG_DUMP
20024 dhd_print_buf_addr(dhd, "SSSR_C2_D11_AFTER",
20025 dhd->sssr_d11_after[i], arr_len[SSSR_C2_D11_AFTER]);
20026#endif /* DHD_LOG_DUMP */
20027 }
20028 }
20029
20030 /* DIG core or VASIP */
20031 dig_buf_size = dhd_sssr_dig_buf_size(dhd);
20032#ifdef DHD_SSSR_DUMP_BEFORE_SR
20033 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_dig_buf_before) ? dig_buf_size : 0;
20034 DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
20035 arr_len[SSSR_DIG_BEFORE]));
20036#ifdef DHD_LOG_DUMP
20037 if (dhd->sssr_dig_buf_before) {
20038 dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
20039 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
20040 }
20041#endif /* DHD_LOG_DUMP */
20042#endif /* DHD_SSSR_DUMP_BEFORE_SR */
20043
20044 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_dig_buf_after) ? dig_buf_size : 0;
20045 DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
20046 arr_len[SSSR_DIG_AFTER]));
20047#ifdef DHD_LOG_DUMP
20048 if (dhd->sssr_dig_buf_after) {
20049 dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
20050 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
20051 }
20052#endif /* DHD_LOG_DUMP */
20053
20054 return BCME_OK;
20055}
20056
20057void
20058dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
20059{
20060 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20061 dhd_pub_t *dhdp = &dhd_info->pub;
20062
20063 if (dhdp->sssr_dump_collected) {
20064 dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
20065 }
20066}
20067#endif /* DHD_SSSR_DUMP */
20068
20069uint32
20070dhd_get_time_str_len()
20071{
20072 char *ts = NULL, time_str[128];
20073
20074 ts = dhd_log_dump_get_timestamp();
20075 snprintf(time_str, sizeof(time_str),
20076 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
20077 return strlen(time_str);
20078}
20079
20080#if defined(BCMPCIE)
20081uint32
20082dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
20083{
20084 int length = 0;
20085 log_dump_section_hdr_t sec_hdr;
20086 dhd_info_t *dhd_info;
20087
20088 if (ndev) {
20089 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20090 dhdp = &dhd_info->pub;
20091 }
20092
20093 if (!dhdp)
20094 return length;
20095
20096 if (dhdp->extended_trap_data) {
20097 length = (strlen(EXT_TRAP_LOG_HDR)
20098 + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
20099 }
20100 return length;
20101}
20102#endif /* BCMPCIE */
20103
20104#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
20105uint32
20106dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
20107{
20108 int length = 0;
20109 log_dump_section_hdr_t sec_hdr;
20110 dhd_info_t *dhd_info;
20111
20112 if (ndev) {
20113 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20114 dhdp = &dhd_info->pub;
20115 }
20116
20117 if (!dhdp)
20118 return length;
20119
20120 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
20121 length = (strlen(HEALTH_CHK_LOG_HDR)
20122 + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
20123 }
20124 return length;
20125}
20126#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
20127
20128uint32
20129dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
20130{
20131 uint32 length = 0;
20132 log_dump_section_hdr_t sec_hdr;
20133 dhd_info_t *dhd_info;
20134 int remain_len = 0;
20135
20136 if (ndev) {
20137 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20138 dhdp = &dhd_info->pub;
20139 }
20140
20141 if (!dhdp)
20142 return length;
20143
20144 if (dhdp->concise_dbg_buf) {
20145 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20146 if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
20147 DHD_ERROR(("%s: error getting concise debug info !\n",
20148 __FUNCTION__));
20149 return length;
20150 }
20151
20152 length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len);
20153 }
20154
20155 length += (uint32)(strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr));
20156 return length;
20157}
20158
20159uint32
20160dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
20161{
20162 int length = 0;
20163 dhd_info_t *dhd_info;
20164
20165 if (ndev) {
20166 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20167 dhdp = &dhd_info->pub;
20168 }
20169
20170 if (!dhdp)
20171 return length;
20172
20173 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
20174 length = dhd_log_dump_cookie_len(dhdp);
20175 }
20176 return length;
20177
20178}
20179
20180#ifdef DHD_DUMP_PCIE_RINGS
20181uint32
20182dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
20183{
20184 uint32 length = 0;
20185 log_dump_section_hdr_t sec_hdr;
20186 dhd_info_t *dhd_info;
20187 uint16 h2d_flowrings_total;
20188 int remain_len = 0;
20189
20190 if (ndev) {
20191 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20192 dhdp = &dhd_info->pub;
20193 }
20194
20195 if (!dhdp)
20196 return length;
20197
20198 if (dhdp->concise_dbg_buf) {
20199 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20200 if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
20201 DHD_ERROR(("%s: error getting concise debug info !\n",
20202 __FUNCTION__));
20203 return length;
20204 }
20205
20206 length += (uint32)(CONCISE_DUMP_BUFLEN - remain_len);
20207 }
20208
20209 length += (uint32) strlen(FLOWRING_DUMP_HDR);
20210 length += (uint32) sizeof(sec_hdr);
20211 h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
20212 length += ((D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
20213 + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
20214 + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
20215 + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
20216 + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
20217#ifdef EWP_EDL
20218 + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
20219#else
20220 + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
20221 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
20222#endif /* EWP_EDL */
20223
20224#if defined(DHD_HTPUT_TUNABLES)
20225 /* flowring lengths are different for HTPUT rings, handle accordingly */
20226 length += ((H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_htput_max_txpost(dhdp) *
20227 HTPUT_TOTAL_FLOW_RINGS) +
20228 (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) *
20229 (h2d_flowrings_total - HTPUT_TOTAL_FLOW_RINGS)));
20230#else
20231 length += (H2DRING_TXPOST_ITEMSIZE * dhd_prot_get_h2d_max_txpost(dhdp) *
20232 h2d_flowrings_total);
20233#endif /* DHD_HTPUT_TUNABLES */
20234
20235 return length;
20236}
20237#endif /* DHD_DUMP_PCIE_RINGS */
20238
20239#ifdef EWP_ECNTRS_LOGGING
20240uint32
20241dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
20242{
20243 dhd_info_t *dhd_info;
20244 log_dump_section_hdr_t sec_hdr;
20245 int length = 0;
20246 dhd_dbg_ring_t *ring;
20247
20248 if (ndev) {
20249 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20250 dhdp = &dhd_info->pub;
20251 }
20252
20253 if (!dhdp)
20254 return length;
20255
20256 if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
20257 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
20258 length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
20259 }
20260 return length;
20261}
20262#endif /* EWP_ECNTRS_LOGGING */
20263
20264int
20265dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20266 void *fp, uint32 len, int type, void *pos)
20267{
20268 int ret = BCME_OK;
20269 struct dhd_log_dump_buf *dld_buf;
20270 log_dump_section_hdr_t sec_hdr;
20271 dhd_info_t *dhd_info;
20272
20273 dld_buf = &g_dld_buf[type];
20274
20275 if (dev) {
20276 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20277 dhdp = &dhd_info->pub;
20278 } else if (!dhdp) {
20279 return BCME_ERROR;
20280 }
20281
20282 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
20283
20284 dhd_init_sec_hdr(&sec_hdr);
20285
20286 /* write the section header first */
20287 ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
20288 strlen(dld_hdrs[type].hdr_str), pos);
20289 if (ret < 0)
20290 goto exit;
20291 len -= (uint32)strlen(dld_hdrs[type].hdr_str);
20292 len -= (uint32)sizeof(sec_hdr);
20293 sec_hdr.type = dld_hdrs[type].sec_type;
20294 sec_hdr.length = len;
20295 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
20296 if (ret < 0)
20297 goto exit;
20298 ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
20299 if (ret < 0)
20300 goto exit;
20301
20302exit:
20303 return ret;
20304}
20305
20306static int
20307dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
20308{
20309 unsigned long flags = 0;
20310#ifdef EWP_EDL
20311 int i = 0;
20312#endif /* EWP_EDL */
20313 dhd_info_t *dhd_info = NULL;
20314
20315 BCM_REFERENCE(dhd_info);
20316
20317 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
20318 * so not freeing 'type' here is ok, even if we want to free 'type'
20319 * we cannot do so, since 'dhdp->osh' is unavailable
20320 * as dhdp is null
20321 */
20322 if (!dhdp || !type) {
20323 if (dhdp) {
20324 DHD_GENERAL_LOCK(dhdp, flags);
20325 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
20326 dhd_os_busbusy_wake(dhdp);
20327 DHD_GENERAL_UNLOCK(dhdp, flags);
20328 }
20329 return BCME_ERROR;
20330 }
20331
20332#if defined(BCMPCIE)
20333 if (dhd_bus_get_linkdown(dhdp)) {
20334 /* As link is down donot collect any data over PCIe.
20335 * Also return BCME_OK to caller, so that caller can
20336 * dump all the outstanding data to file
20337 */
20338 return BCME_OK;
20339 }
20340#endif /* BCMPCIE */
20341
20342 dhd_info = (dhd_info_t *)dhdp->info;
20343 /* in case of trap get preserve logs from ETD */
20344#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
20345 if (dhdp->dongle_trap_occured &&
20346 dhdp->extended_trap_data) {
20347 dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
20348 &dhd_info->event_data);
20349 }
20350#endif /* BCMPCIE */
20351
20352 /* flush the event work items to get any fw events/logs
20353 * flush_work is a blocking call
20354 */
20355#ifdef SHOW_LOGTRACE
20356#ifdef EWP_EDL
20357 if (dhd_info->pub.dongle_edl_support) {
20358 /* wait till existing edl items are processed */
20359 dhd_flush_logtrace_process(dhd_info);
20360 /* dhd_flush_logtrace_process will ensure the work items in the ring
20361 * (EDL ring) from rd to wr are processed. But if wr had
20362 * wrapped around, only the work items from rd to ring-end are processed.
20363 * So to ensure that the work items at the
20364 * beginning of ring are also processed in the wrap around case, call
20365 * it twice
20366 */
20367 for (i = 0; i < 2; i++) {
20368 /* blocks till the edl items are processed */
20369 dhd_flush_logtrace_process(dhd_info);
20370 }
20371 } else {
20372 dhd_flush_logtrace_process(dhd_info);
20373 }
20374#else
20375 dhd_flush_logtrace_process(dhd_info);
20376#endif /* EWP_EDL */
20377#endif /* SHOW_LOGTRACE */
20378
20379#ifdef CUSTOMER_HW4_DEBUG
20380 /* print last 'x' KB of preserve buffer data to kmsg console
20381 * this is to address cases where debug_dump is not
20382 * available for debugging
20383 */
20384 dhd_log_dump_print_tail(dhdp,
20385 &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
20386#endif /* CUSTOMER_HW4_DEBUG */
20387 return BCME_OK;
20388}
20389
20390int
20391dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
20392{
20393 dhd_info_t *dhd_info;
20394
20395 if (dev) {
20396 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20397 dhdp = &dhd_info->pub;
20398 }
20399
20400 if (!dhdp)
20401 return BCME_ERROR;
20402
20403 memset(dump_path, 0, size);
20404
20405 switch (dhdp->debug_dump_subcmd) {
20406 case CMD_UNWANTED:
20407 snprintf(dump_path, size, "%s",
20408 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
20409 DHD_DUMP_SUBSTR_UNWANTED);
20410 break;
20411 case CMD_DISCONNECTED:
20412 snprintf(dump_path, size, "%s",
20413 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
20414 DHD_DUMP_SUBSTR_DISCONNECTED);
20415 break;
20416 default:
20417 snprintf(dump_path, size, "%s",
20418 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
20419 }
20420
20421 if (!dhdp->logdump_periodic_flush) {
20422 get_debug_dump_time(dhdp->debug_dump_time_str);
20423 snprintf(dump_path + strlen(dump_path),
20424 size - strlen(dump_path),
20425 "_%s", dhdp->debug_dump_time_str);
20426 }
20427 return BCME_OK;
20428}
20429
20430uint32
20431dhd_get_dld_len(int log_type)
20432{
20433 unsigned long wr_size = 0;
20434 unsigned long buf_size = 0;
20435 unsigned long flags = 0;
20436 struct dhd_log_dump_buf *dld_buf;
20437 log_dump_section_hdr_t sec_hdr;
20438
20439 /* calculate the length of the log */
20440 dld_buf = &g_dld_buf[log_type];
20441 buf_size = (unsigned long)dld_buf->max -
20442 (unsigned long)dld_buf->buffer;
20443
20444 if (dld_buf->wraparound) {
20445 wr_size = buf_size;
20446 } else {
20447 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
20448 DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
20449 wr_size = (unsigned long)dld_buf->present -
20450 (unsigned long)dld_buf->front;
20451 DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
20452 }
20453 return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
20454}
20455
20456static void
20457dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
20458{
20459 char *ts = NULL;
20460 memset(time_str, 0, size);
20461 ts = dhd_log_dump_get_timestamp();
20462 snprintf(time_str, size,
20463 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
20464}
20465
20466int
20467dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
20468{
20469 char *ts = NULL;
20470 int ret = 0;
20471 char time_str[128];
20472
20473 memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
20474 ts = dhd_log_dump_get_timestamp();
20475 snprintf(time_str, sizeof(time_str),
20476 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
20477
20478 /* write the timestamp hdr to the file first */
20479 ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
20480 if (ret < 0) {
20481 DHD_ERROR(("write file error, err = %d\n", ret));
20482 }
20483 return ret;
20484}
20485
20486#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
20487int
20488dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20489 void *fp, uint32 len, void *pos)
20490{
20491 int ret = BCME_OK;
20492 log_dump_section_hdr_t sec_hdr;
20493 dhd_info_t *dhd_info;
20494
20495 if (dev) {
20496 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20497 dhdp = &dhd_info->pub;
20498 }
20499
20500 if (!dhdp)
20501 return BCME_ERROR;
20502
20503 dhd_init_sec_hdr(&sec_hdr);
20504
20505 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
20506 /* write the section header first */
20507 ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
20508 strlen(HEALTH_CHK_LOG_HDR), pos);
20509 if (ret < 0)
20510 goto exit;
20511
20512 len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
20513 sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
20514 sec_hdr.length = HEALTH_CHK_BUF_SIZE;
20515 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
20516 if (ret < 0)
20517 goto exit;
20518
20519 len -= (uint32)sizeof(sec_hdr);
20520 /* write the log */
20521 ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
20522 user_buf, len, pos);
20523 if (ret < 0)
20524 goto exit;
20525 }
20526exit:
20527 return ret;
20528}
20529#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
20530
20531#if defined(BCMPCIE)
20532int
20533dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20534 void *fp, uint32 len, void *pos)
20535{
20536 int ret = BCME_OK;
20537 log_dump_section_hdr_t sec_hdr;
20538 dhd_info_t *dhd_info;
20539
20540 if (dev) {
20541 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20542 dhdp = &dhd_info->pub;
20543 }
20544
20545 if (!dhdp)
20546 return BCME_ERROR;
20547
20548 dhd_init_sec_hdr(&sec_hdr);
20549
20550 /* append extended trap data to the file in case of traps */
20551 if (dhdp->dongle_trap_occured &&
20552 dhdp->extended_trap_data) {
20553 /* write the section header first */
20554 ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
20555 strlen(EXT_TRAP_LOG_HDR), pos);
20556 if (ret < 0)
20557 goto exit;
20558
20559 len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
20560 sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
20561 sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
20562 ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
20563 if (ret < 0)
20564 goto exit;
20565
20566 len -= (uint32)sizeof(sec_hdr);
20567 /* write the log */
20568 ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
20569 user_buf, len, pos);
20570 if (ret < 0)
20571 goto exit;
20572 }
20573exit:
20574 return ret;
20575}
20576#endif /* BCMPCIE */
20577
20578int
20579dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20580 void *fp, uint32 len, void *pos)
20581{
20582 int ret = BCME_OK;
20583 log_dump_section_hdr_t sec_hdr;
20584 dhd_info_t *dhd_info;
20585
20586 if (dev) {
20587 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20588 dhdp = &dhd_info->pub;
20589 }
20590
20591 if (!dhdp)
20592 return BCME_ERROR;
20593
20594 dhd_init_sec_hdr(&sec_hdr);
20595
20596 ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
20597 if (ret < 0)
20598 goto exit;
20599
20600 len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
20601 sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
20602 sec_hdr.length = len;
20603 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
20604 if (ret < 0)
20605 goto exit;
20606
20607 len -= (uint32)sizeof(sec_hdr);
20608
20609 if (dhdp->concise_dbg_buf) {
20610 dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20611 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
20612 if (ret < 0)
20613 goto exit;
20614 }
20615
20616exit:
20617 return ret;
20618}
20619
20620int
20621dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20622 void *fp, uint32 len, void *pos)
20623{
20624 int ret = BCME_OK;
20625 dhd_info_t *dhd_info;
20626
20627 if (dev) {
20628 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20629 dhdp = &dhd_info->pub;
20630 }
20631
20632 if (!dhdp)
20633 return BCME_ERROR;
20634
20635 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
20636 ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
20637 }
20638 return ret;
20639}
20640
20641#ifdef DHD_DUMP_PCIE_RINGS
20642int
20643dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20644 void *fp, uint32 len, void *pos)
20645{
20646 log_dump_section_hdr_t sec_hdr;
20647 int ret = BCME_OK;
20648 int remain_len = 0;
20649 dhd_info_t *dhd_info;
20650
20651 if (dev) {
20652 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20653 dhdp = &dhd_info->pub;
20654 }
20655
20656 if (!dhdp)
20657 return BCME_ERROR;
20658
20659 dhd_init_sec_hdr(&sec_hdr);
20660
20661 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20662 if (remain_len <= 0 || remain_len >= CONCISE_DUMP_BUFLEN) {
20663 DHD_ERROR(("%s: error getting concise debug info !\n",
20664 __FUNCTION__));
20665 return BCME_ERROR;
20666 }
20667 memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
20668
20669 /* write the section header first */
20670 ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
20671 strlen(FLOWRING_DUMP_HDR), pos);
20672 if (ret < 0)
20673 goto exit;
20674
20675 /* Write the ring summary */
20676 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
20677 (CONCISE_DUMP_BUFLEN - remain_len), pos);
20678 if (ret < 0)
20679 goto exit;
20680
20681 sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
20682 sec_hdr.length = len;
20683 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
20684 if (ret < 0)
20685 goto exit;
20686
20687 /* write the log */
20688 ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
20689 if (ret < 0)
20690 goto exit;
20691
20692exit:
20693 return ret;
20694}
20695#endif /* DHD_DUMP_PCIE_RINGS */
20696
20697#ifdef EWP_ECNTRS_LOGGING
20698int
20699dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20700 void *fp, uint32 len, void *pos)
20701{
20702 log_dump_section_hdr_t sec_hdr;
20703 int ret = BCME_OK;
20704 dhd_info_t *dhd_info;
20705
20706 if (dev) {
20707 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20708 dhdp = &dhd_info->pub;
20709 }
20710
20711 if (!dhdp)
20712 return BCME_ERROR;
20713
20714 dhd_init_sec_hdr(&sec_hdr);
20715
20716 if (logdump_ecntr_enable &&
20717 dhdp->ecntr_dbg_ring) {
20718 sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
20719 ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
20720 user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
20721 }
20722 return ret;
20723
20724}
20725#endif /* EWP_ECNTRS_LOGGING */
20726
20727#ifdef EWP_RTT_LOGGING
20728int
20729dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20730 void *fp, uint32 len, void *pos)
20731{
20732 log_dump_section_hdr_t sec_hdr;
20733 int ret = BCME_OK;
20734 dhd_info_t *dhd_info;
20735
20736 if (dev) {
20737 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20738 dhdp = &dhd_info->pub;
20739 }
20740
20741 if (!dhdp)
20742 return BCME_ERROR;
20743
20744 dhd_init_sec_hdr(&sec_hdr);
20745
20746 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
20747 ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
20748 user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
20749 }
20750 return ret;
20751
20752}
20753#endif /* EWP_RTT_LOGGING */
20754
20755#ifdef DHD_STATUS_LOGGING
20756int
20757dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
20758 void *fp, uint32 len, void *pos)
20759{
20760 dhd_info_t *dhd_info;
20761
20762 if (dev) {
20763 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
20764 dhdp = &dhd_info->pub;
20765 }
20766
20767 if (!dhdp) {
20768 return BCME_ERROR;
20769 }
20770
20771 return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
20772}
20773
20774uint32
20775dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
20776{
20777 dhd_info_t *dhd_info;
20778 uint32 length = 0;
20779
20780 if (ndev) {
20781 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
20782 dhdp = &dhd_info->pub;
20783 }
20784
20785 if (dhdp) {
20786 length = dhd_statlog_get_logbuf_len(dhdp);
20787 }
20788
20789 return length;
20790}
20791#endif /* DHD_STATUS_LOGGING */
20792
20793void
20794dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
20795{
20796 /* prep the section header */
20797 memset(sec_hdr, 0, sizeof(*sec_hdr));
20798 sec_hdr->magic = LOG_DUMP_MAGIC;
20799 sec_hdr->timestamp = local_clock();
20800}
20801
20802/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
20803static int
20804do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
20805{
20806 int ret = 0, i = 0;
20807 struct file *fp = NULL;
20808 mm_segment_t old_fs;
20809 loff_t pos = 0;
20810 char dump_path[128];
20811 uint32 file_mode;
20812 unsigned long flags = 0;
20813 size_t log_size = 0;
20814 size_t fspace_remain = 0;
20815 struct kstat stat;
20816 char time_str[128];
20817 unsigned int len = 0;
20818 log_dump_section_hdr_t sec_hdr;
20819
20820 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
20821
20822 DHD_GENERAL_LOCK(dhdp, flags);
20823 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
20824 DHD_GENERAL_UNLOCK(dhdp, flags);
20825 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
20826 goto exit1;
20827 }
20828 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
20829 DHD_GENERAL_UNLOCK(dhdp, flags);
20830
20831 if ((ret = dhd_log_flush(dhdp, type)) < 0) {
20832 goto exit1;
20833 }
20834 /* change to KERNEL_DS address limit */
20835 old_fs = get_fs();
20836 set_fs(KERNEL_DS);
20837
20838 dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
20839
20840 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
20841 DHD_ERROR(("DHD version: %s\n", dhd_version));
20842 DHD_ERROR(("F/W version: %s\n", fw_version));
20843
20844 dhd_log_dump_buf_addr(dhdp, type);
20845
20846 dhd_get_time_str(dhdp, time_str, 128);
20847
20848 /* if this is the first time after dhd is loaded,
20849 * or, if periodic flush is disabled, clear the log file
20850 */
20851 if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
20852 file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
20853 else
20854 file_mode = O_CREAT | O_RDWR | O_SYNC;
20855
20856 fp = filp_open(dump_path, file_mode, 0664);
20857 if (IS_ERR(fp)) {
20858 /* If android installed image, try '/data' directory */
20859#if defined(CONFIG_X86)
20860 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
20861 __FUNCTION__));
20862 snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
20863 if (!dhdp->logdump_periodic_flush) {
20864 snprintf(dump_path + strlen(dump_path),
20865 sizeof(dump_path) - strlen(dump_path),
20866 "_%s", dhdp->debug_dump_time_str);
20867 }
20868 fp = filp_open(dump_path, file_mode, 0664);
20869 if (IS_ERR(fp)) {
20870 ret = PTR_ERR(fp);
20871 DHD_ERROR(("open file error, err = %d\n", ret));
20872 goto exit2;
20873 }
20874 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
20875#else
20876 ret = PTR_ERR(fp);
20877 DHD_ERROR(("open file error, err = %d\n", ret));
20878 goto exit2;
20879#endif /* CONFIG_X86 && OEM_ANDROID */
20880 }
20881
20882 ret = vfs_stat(dump_path, &stat);
20883 if (ret < 0) {
20884 DHD_ERROR(("file stat error, err = %d\n", ret));
20885 goto exit2;
20886 }
20887
20888 /* if some one else has changed the file */
20889 if (dhdp->last_file_posn != 0 &&
20890 stat.size < dhdp->last_file_posn) {
20891 dhdp->last_file_posn = 0;
20892 }
20893
20894 /* XXX: periodic flush is disabled by default, if enabled
20895 * only periodic flushing of 'GENERAL' log dump buffer
20896 * is supported, its not recommended to turn on periodic
20897 * flushing, except for developer unit test.
20898 */
20899 if (dhdp->logdump_periodic_flush) {
20900 log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
20901 /* calculate the amount of space required to dump all logs */
20902 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
20903 if (*type != DLD_BUF_TYPE_ALL && i != *type)
20904 continue;
20905
20906 if (g_dld_buf[i].wraparound) {
20907 log_size += (unsigned long)g_dld_buf[i].max
20908 - (unsigned long)g_dld_buf[i].buffer;
20909 } else {
20910 DHD_LOG_DUMP_BUF_LOCK(&g_dld_buf[i].lock, flags);
20911 log_size += (unsigned long)g_dld_buf[i].present -
20912 (unsigned long)g_dld_buf[i].front;
20913 DHD_LOG_DUMP_BUF_UNLOCK(&g_dld_buf[i].lock, flags);
20914 }
20915 log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
20916
20917 if (*type != DLD_BUF_TYPE_ALL && i == *type)
20918 break;
20919 }
20920
20921 ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
20922 if (ret < 0) {
20923 DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
20924 goto exit2;
20925 }
20926 pos = fp->f_pos;
20927
20928 /* if the max file size is reached, wrap around to beginning of the file
20929 * we're treating the file as a large ring buffer
20930 */
20931 fspace_remain = logdump_max_filesize - pos;
20932 if (log_size > fspace_remain) {
20933 fp->f_pos -= pos;
20934 pos = fp->f_pos;
20935 }
20936 }
20937
20938 dhd_print_time_str(0, fp, len, &pos);
20939
20940 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
20941
20942 if (*type != DLD_BUF_TYPE_ALL && i != *type)
20943 continue;
20944
20945 len = dhd_get_dld_len(i);
20946 dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
20947 if (*type != DLD_BUF_TYPE_ALL)
20948 break;
20949 }
20950
20951#ifdef EWP_ECNTRS_LOGGING
20952 /* periodic flushing of ecounters is NOT supported */
20953 if (*type == DLD_BUF_TYPE_ALL &&
20954 logdump_ecntr_enable &&
20955 dhdp->ecntr_dbg_ring) {
20956 dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
20957 fp, (unsigned long *)&pos,
20958 &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
20959 }
20960#endif /* EWP_ECNTRS_LOGGING */
20961
20962#ifdef DHD_STATUS_LOGGING
20963 if (dhdp->statlog) {
20964 /* write the statlog */
20965 len = dhd_get_status_log_len(NULL, dhdp);
20966 if (len) {
20967 if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
20968 len, &pos) < 0) {
20969 goto exit2;
20970 }
20971 }
20972 }
20973#endif /* DHD_STATUS_LOGGING */
20974
20975#ifdef DHD_STATUS_LOGGING
20976 if (dhdp->statlog) {
20977 dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
20978 dhd_statlog_get_logbuf_len(dhdp));
20979 }
20980#endif /* DHD_STATUS_LOGGING */
20981
20982#ifdef EWP_RTT_LOGGING
20983 /* periodic flushing of rtt log is NOT supported */
20984 if (*type == DLD_BUF_TYPE_ALL &&
20985 logdump_rtt_enable &&
20986 dhdp->rtt_dbg_ring) {
20987 dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
20988 fp, (unsigned long *)&pos,
20989 &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
20990 }
20991#endif /* EWP_RTT_LOGGING */
20992
20993#ifdef BCMPCIE
20994 len = dhd_get_ext_trap_len(NULL, dhdp);
20995 if (len) {
20996 if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
20997 goto exit2;
20998 }
20999#endif /* BCMPCIE */
21000
21001#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
21002 len = dhd_get_health_chk_len(NULL, dhdp);
21003 if (len) {
21004 if (dhd_print_health_chk_data(NULL, dhdp, 0, fp, len, &pos) < 0)
21005 goto exit2;
21006 }
21007#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
21008
21009 len = dhd_get_dhd_dump_len(NULL, dhdp);
21010 if (len) {
21011 if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
21012 goto exit2;
21013 }
21014
21015 len = dhd_get_cookie_log_len(NULL, dhdp);
21016 if (len) {
21017 if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
21018 goto exit2;
21019 }
21020
21021#ifdef DHD_DUMP_PCIE_RINGS
21022 len = dhd_get_flowring_len(NULL, dhdp);
21023 if (len) {
21024 if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
21025 goto exit2;
21026 }
21027#endif
21028
21029 if (dhdp->logdump_periodic_flush) {
21030 /* store the last position written to in the file for future use */
21031 dhdp->last_file_posn = pos;
21032 }
21033
21034exit2:
21035 if (!IS_ERR(fp) && fp != NULL) {
21036 filp_close(fp, NULL);
21037 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
21038 __FUNCTION__, dump_path));
21039 }
21040 set_fs(old_fs);
21041exit1:
21042 if (type) {
21043 MFREE(dhdp->osh, type, sizeof(*type));
21044 }
21045 DHD_GENERAL_LOCK(dhdp, flags);
21046 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
21047 dhd_os_busbusy_wake(dhdp);
21048 DHD_GENERAL_UNLOCK(dhdp, flags);
21049
21050#ifdef DHD_DUMP_MNGR
21051 if (ret >= 0) {
21052 dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
21053 }
21054#endif /* DHD_DUMP_MNGR */
21055
21056 return (ret < 0) ? BCME_ERROR : BCME_OK;
21057}
21058#endif /* DHD_LOG_DUMP */
21059
21060/* This function writes data to the file pointed by fp, OR
21061 * copies data to the user buffer sent by upper layer(HAL).
21062 */
21063int
21064dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos)
21065{
21066 int ret = BCME_OK;
21067
21068 if (fp) {
21069 ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
21070 if (ret < 0) {
21071 DHD_ERROR(("write file error, err = %d\n", ret));
21072 goto exit;
21073 }
21074 } else {
21075#ifdef CONFIG_COMPAT
21076#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
21077 if (in_compat_syscall())
21078#else
21079 if (is_compat_task())
21080#endif /* LINUX_VER >= 4.6 */
21081 {
21082 void * usr_ptr = compat_ptr((uintptr_t) user_buf);
21083 ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)),
21084 mem_buf, buf_len);
21085 if (ret) {
21086 DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
21087 goto exit;
21088 }
21089 }
21090 else
21091#endif /* CONFIG_COMPAT */
21092 {
21093 ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
21094 mem_buf, buf_len);
21095 if (ret) {
21096 DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
21097 goto exit;
21098 }
21099 }
21100 (*(int *)pos) += buf_len;
21101 }
21102exit:
21103 return ret;
21104}
21105
21106/*
21107 * This call is to get the memdump size so that,
21108 * halutil can alloc that much buffer in user space.
21109 */
21110int
21111dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
21112{
21113 int ret = BCME_OK;
21114 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
21115 dhd_pub_t *dhdp = &dhd->pub;
21116
21117 if (dhdp->busstate == DHD_BUS_DOWN) {
21118 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
21119 return BCME_ERROR;
21120 }
21121
21122 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
21123 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
21124 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
21125 return BCME_ERROR;
21126 }
21127#ifdef DHD_PCIE_RUNTIMEPM
21128 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
21129#endif /* DHD_PCIE_RUNTIMEPM */
21130 ret = dhd_common_socram_dump(dhdp);
21131 if (ret == BCME_OK) {
21132 *dump_size = dhdp->soc_ram_length;
21133 }
21134 return ret;
21135}
21136
21137/*
21138 * This is to get the actual memdup after getting the memdump size
21139 */
21140int
21141dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
21142{
21143 int ret = BCME_OK;
21144 int orig_len = 0;
21145 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
21146 dhd_pub_t *dhdp = &dhd->pub;
21147 if (buf == NULL)
21148 return BCME_ERROR;
21149 orig_len = *size;
21150 if (dhdp->soc_ram) {
21151 if (orig_len >= dhdp->soc_ram_length) {
21152 *buf = dhdp->soc_ram;
21153 *size = dhdp->soc_ram_length;
21154 } else {
21155 ret = BCME_BUFTOOSHORT;
21156 DHD_ERROR(("The length of the buffer is too short"
21157 " to save the memory dump with %d\n", dhdp->soc_ram_length));
21158 }
21159 } else {
21160 DHD_ERROR(("socram_dump is not ready to get\n"));
21161 ret = BCME_NOTREADY;
21162 }
21163 return ret;
21164}
21165
21166#ifdef EWP_RTT_LOGGING
21167uint32
21168dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
21169{
21170 dhd_info_t *dhd_info;
21171 log_dump_section_hdr_t sec_hdr;
21172 int length = 0;
21173 dhd_dbg_ring_t *ring;
21174
21175 if (ndev) {
21176 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
21177 dhdp = &dhd_info->pub;
21178 }
21179
21180 if (!dhdp)
21181 return length;
21182
21183 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
21184 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
21185 length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
21186 }
21187 return length;
21188}
21189#endif /* EWP_RTT_LOGGING */
21190
21191int
21192dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
21193{
21194 char *fw_str;
21195
21196 if (size == 0)
21197 return BCME_BADARG;
21198
21199 fw_str = strstr(info_string, "Firmware: ");
21200 if (fw_str == NULL) {
21201 return BCME_ERROR;
21202 }
21203
21204 bzero(*buf, size);
21205 if (dhd_ver) {
21206 strlcpy(*buf, dhd_version, size);
21207 } else {
21208 strlcpy(*buf, fw_str, size);
21209 }
21210 return BCME_OK;
21211}
21212
21213#ifdef DHD_PKT_LOGGING
21214int
21215dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len)
21216{
21217 int ret = BCME_OK;
21218 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
21219 dhd_pub_t *dhdp = &dhd->pub;
21220 if (user_buf == NULL) {
21221 DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
21222 return BCME_ERROR;
21223 }
21224
21225 ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len);
21226 if (ret < 0) {
21227 DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
21228 return ret;
21229 }
21230 return ret;
21231}
21232
21233uint32
21234dhd_os_get_pktlog_dump_size(struct net_device *dev)
21235{
21236 uint32 size = 0;
21237 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
21238 dhd_pub_t *dhdp = &dhd->pub;
21239
21240 size = dhd_pktlog_get_dump_length(dhdp);
21241 if (size == 0) {
21242 DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size));
21243 }
21244 return size;
21245}
21246
21247void
21248dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len)
21249{
21250 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
21251 dhd_pub_t *dhdp = &dhd->pub;
21252
21253 dhd_pktlog_get_filename(dhdp, dump_path, len);
21254}
21255#endif /* DHD_PKT_LOGGING */
21256#ifdef DNGL_AXI_ERROR_LOGGING
21257int
21258dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
21259{
21260 int ret = BCME_OK;
21261 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
21262 dhd_pub_t *dhdp = &dhd->pub;
21263 loff_t pos = 0;
21264 if (user_buf == NULL) {
21265 DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
21266 return BCME_ERROR;
21267 }
21268
21269 ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
21270 NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
21271
21272 if (ret < 0) {
21273 DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
21274 return ret;
21275 }
21276 return ret;
21277}
21278
21279int
21280dhd_os_get_axi_error_dump_size(struct net_device *dev)
21281{
21282 int size = -1;
21283
21284 size = sizeof(dhd_axi_error_dump_t);
21285 if (size < 0) {
21286 DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
21287 }
21288 return size;
21289}
21290
21291void
21292dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
21293{
21294 snprintf(dump_path, len, "%s",
21295 DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
21296}
21297#endif /* DNGL_AXI_ERROR_LOGGING */
21298
21299bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
21300{
21301 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
21302}
21303
21304#ifdef DHD_L2_FILTER
21305arp_table_t*
21306dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
21307{
21308 dhd_info_t *dhd = dhdp->info;
21309 dhd_if_t *ifp;
21310
21311 ASSERT(bssidx < DHD_MAX_IFS);
21312
21313 ifp = dhd->iflist[bssidx];
21314 return ifp->phnd_arp_table;
21315}
21316
21317int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
21318{
21319 dhd_info_t *dhd = dhdp->info;
21320 dhd_if_t *ifp;
21321
21322 ASSERT(idx < DHD_MAX_IFS);
21323
21324 ifp = dhd->iflist[idx];
21325
21326 if (ifp)
21327 return ifp->parp_enable;
21328 else
21329 return FALSE;
21330}
21331
21332/* Set interface specific proxy arp configuration */
21333int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
21334{
21335 dhd_info_t *dhd = dhdp->info;
21336 dhd_if_t *ifp;
21337 ASSERT(idx < DHD_MAX_IFS);
21338 ifp = dhd->iflist[idx];
21339
21340 if (!ifp)
21341 return BCME_ERROR;
21342
21343 /* At present all 3 variables are being
21344 * handled at once
21345 */
21346 ifp->parp_enable = val;
21347 ifp->parp_discard = val;
21348 ifp->parp_allnode = val;
21349
21350 /* Flush ARP entries when disabled */
21351 if (val == FALSE) {
21352 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
21353 FALSE, dhdp->tickcnt);
21354 }
21355 return BCME_OK;
21356}
21357
21358bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
21359{
21360 dhd_info_t *dhd = dhdp->info;
21361 dhd_if_t *ifp;
21362
21363 ASSERT(idx < DHD_MAX_IFS);
21364
21365 ifp = dhd->iflist[idx];
21366
21367 ASSERT(ifp);
21368 return ifp->parp_discard;
21369}
21370
21371bool
21372dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
21373{
21374 dhd_info_t *dhd = dhdp->info;
21375 dhd_if_t *ifp;
21376
21377 ASSERT(idx < DHD_MAX_IFS);
21378
21379 ifp = dhd->iflist[idx];
21380
21381 ASSERT(ifp);
21382
21383 return ifp->parp_allnode;
21384}
21385
21386int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
21387{
21388 dhd_info_t *dhd = dhdp->info;
21389 dhd_if_t *ifp;
21390
21391 ASSERT(idx < DHD_MAX_IFS);
21392
21393 ifp = dhd->iflist[idx];
21394
21395 ASSERT(ifp);
21396
21397 return ifp->dhcp_unicast;
21398}
21399
21400int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
21401{
21402 dhd_info_t *dhd = dhdp->info;
21403 dhd_if_t *ifp;
21404 ASSERT(idx < DHD_MAX_IFS);
21405 ifp = dhd->iflist[idx];
21406
21407 ASSERT(ifp);
21408
21409 ifp->dhcp_unicast = val;
21410 return BCME_OK;
21411}
21412
21413int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
21414{
21415 dhd_info_t *dhd = dhdp->info;
21416 dhd_if_t *ifp;
21417
21418 ASSERT(idx < DHD_MAX_IFS);
21419
21420 ifp = dhd->iflist[idx];
21421
21422 ASSERT(ifp);
21423
21424 return ifp->block_ping;
21425}
21426
21427int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
21428{
21429 dhd_info_t *dhd = dhdp->info;
21430 dhd_if_t *ifp;
21431 ASSERT(idx < DHD_MAX_IFS);
21432 ifp = dhd->iflist[idx];
21433
21434 ASSERT(ifp);
21435
21436 ifp->block_ping = val;
21437 /* Disable rx_pkt_chain feature for interface if block_ping option is
21438 * enabled
21439 */
21440 dhd_update_rx_pkt_chainable_state(dhdp, idx);
21441 return BCME_OK;
21442}
21443
21444int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
21445{
21446 dhd_info_t *dhd = dhdp->info;
21447 dhd_if_t *ifp;
21448
21449 ASSERT(idx < DHD_MAX_IFS);
21450
21451 ifp = dhd->iflist[idx];
21452
21453 ASSERT(ifp);
21454
21455 return ifp->grat_arp;
21456}
21457
21458int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
21459{
21460 dhd_info_t *dhd = dhdp->info;
21461 dhd_if_t *ifp;
21462 ASSERT(idx < DHD_MAX_IFS);
21463 ifp = dhd->iflist[idx];
21464
21465 ASSERT(ifp);
21466
21467 ifp->grat_arp = val;
21468
21469 return BCME_OK;
21470}
21471
21472int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
21473{
21474 dhd_info_t *dhd = dhdp->info;
21475 dhd_if_t *ifp;
21476
21477 ASSERT(idx < DHD_MAX_IFS);
21478
21479 ifp = dhd->iflist[idx];
21480
21481 ASSERT(ifp);
21482
21483 return ifp->block_tdls;
21484}
21485
21486int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
21487{
21488 dhd_info_t *dhd = dhdp->info;
21489 dhd_if_t *ifp;
21490 ASSERT(idx < DHD_MAX_IFS);
21491 ifp = dhd->iflist[idx];
21492
21493 ASSERT(ifp);
21494
21495 ifp->block_tdls = val;
21496
21497 return BCME_OK;
21498}
21499#endif /* DHD_L2_FILTER */
21500
21501#ifdef DHD_DEBUG_PAGEALLOC
21502/* XXX Additional Kernel implemenation is needed to use this function at
21503 * the top of the check_poison_mem() function in mm/debug-pagealloc.c file.
21504 * Please check if below codes are implemenated your Linux Kernel first.
21505 *
21506 * - mm/debug-pagealloc.c
21507 *
21508 * // for DHD_DEBUG_PAGEALLOC
21509 * typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, uint addr_len);
21510 * page_corrupt_cb_t corrupt_cb = NULL;
21511 * void *corrupt_cb_handle = NULL;
21512 *
21513 * void register_page_corrupt_cb(page_corrupt_cb_t cb, void *handle)
21514 * {
21515 * corrupt_cb = cb;
21516 * corrupt_cb_handle = handle;
21517 * }
21518 * EXPORT_SYMBOL(register_page_corrupt_cb);
21519 *
21520 * extern void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
21521 *
21522 * static void check_poison_mem(unsigned char *mem, size_t bytes)
21523 * {
21524 * ......
21525 *
21526 * if (!__ratelimit(&ratelimit))
21527 * return;
21528 * else if (start == end && single_bit_flip(*start, PAGE_POISON))
21529 * printk(KERN_ERR "pagealloc: single bit error\n");
21530 * else
21531 * printk(KERN_ERR "pagealloc: memory corruption\n");
21532 *
21533 * print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
21534 * end - start + 1, 1);
21535 *
21536 * // for DHD_DEBUG_PAGEALLOC
21537 * dhd_page_corrupt_cb(corrupt_cb_handle, start, end - start + 1);
21538 *
21539 * dump_stack();
21540 * }
21541 *
21542 */
21543
21544void
21545dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
21546{
21547 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
21548
21549 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
21550 __FUNCTION__, addr_corrupt, (uint32)len));
21551
21552 DHD_OS_WAKE_LOCK(dhdp);
21553 prhex("Page Corruption:", addr_corrupt, len);
21554 dhd_dump_to_kernelog(dhdp);
21555#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
21556 /* Load the dongle side dump to host memory and then BUG_ON() */
21557 dhdp->memdump_enabled = DUMP_MEMONLY;
21558 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
21559 dhd_bus_mem_dump(dhdp);
21560#endif /* BCMPCIE && DHD_FW_COREDUMP */
21561 DHD_OS_WAKE_UNLOCK(dhdp);
21562}
21563EXPORT_SYMBOL(dhd_page_corrupt_cb);
21564#endif /* DHD_DEBUG_PAGEALLOC */
21565
21566#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
21567void
21568dhd_pktid_error_handler(dhd_pub_t *dhdp)
21569{
21570 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
21571 DHD_OS_WAKE_LOCK(dhdp);
21572 dhd_dump_to_kernelog(dhdp);
21573#ifdef DHD_FW_COREDUMP
21574 /* Load the dongle side dump to host memory */
21575 if (dhdp->memdump_enabled == DUMP_DISABLED) {
21576 dhdp->memdump_enabled = DUMP_MEMFILE;
21577 }
21578 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
21579 dhd_bus_mem_dump(dhdp);
21580#endif /* DHD_FW_COREDUMP */
21581 /* XXX Send HANG event to Android Framework for recovery */
21582 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
21583 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
21584 DHD_OS_WAKE_UNLOCK(dhdp);
21585}
21586#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
21587
21588struct net_device *
21589dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
21590{
21591 dhd_info_t *dhd = dhdp->info;
21592
21593 if (dhd->iflist[0] && dhd->iflist[0]->net)
21594 return dhd->iflist[0]->net;
21595 else
21596 return NULL;
21597}
21598
21599fw_download_status_t
21600dhd_fw_download_status(dhd_pub_t * dhd_pub)
21601{
21602 return dhd_pub->fw_download_status;
21603}
21604
21605static int
21606dhd_create_to_notifier_skt(void)
21607{
21608#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
21609 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
21610 /* Kernel version 3.6 is a special case which accepts 4 arguments */
21611 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
21612#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
21613 /* Kernel version 3.5 and below use this old API format */
21614 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
21615 dhd_process_daemon_msg, NULL, THIS_MODULE);
21616#else
21617 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
21618 &dhd_netlink_cfg);
21619#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
21620 if (!nl_to_event_sk)
21621 {
21622 printf("Error creating socket.\n");
21623 return -1;
21624 }
21625 DHD_INFO(("nl_to socket created successfully...\n"));
21626 return 0;
21627}
21628
21629void
21630dhd_destroy_to_notifier_skt(void)
21631{
21632 DHD_INFO(("Destroying nl_to socket\n"));
21633 netlink_kernel_release(nl_to_event_sk);
21634}
21635
21636static void
21637dhd_recv_msg_from_daemon(struct sk_buff *skb)
21638{
21639 struct nlmsghdr *nlh;
21640 bcm_to_info_t *cmd;
21641
21642 nlh = (struct nlmsghdr *)skb->data;
21643 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
21644 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
21645 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
21646 DHD_INFO(("DHD Daemon Started\n"));
21647 }
21648}
21649
21650int
21651dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
21652{
21653 struct nlmsghdr *nlh;
21654 struct sk_buff *skb_out;
21655 int ret = BCME_ERROR;
21656
21657 BCM_REFERENCE(skb);
21658 if (sender_pid == 0) {
21659 DHD_INFO(("Invalid PID 0\n"));
21660 skb_out = NULL;
21661 goto err;
21662 }
21663
21664 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
21665 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
21666 ret = BCME_NOMEM;
21667 goto err;
21668 }
21669 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
21670 if (nlh == NULL) {
21671 DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
21672 goto err;
21673 }
21674 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
21675 (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
21676
21677 if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
21678 DHD_ERROR(("Error sending message, ret:%d\n", ret));
21679 /* skb is already freed inside nlmsg_unicast() on error case */
21680 /* explicitly making skb_out to NULL to avoid double free */
21681 skb_out = NULL;
21682 goto err;
21683 }
21684 return BCME_OK;
21685err:
21686 if (skb_out) {
21687 nlmsg_free(skb_out);
21688 }
21689 return ret;
21690}
21691
21692static void
21693dhd_process_daemon_msg(struct sk_buff *skb)
21694{
21695 bcm_to_info_t to_info;
21696
21697 to_info.magic = BCM_TO_MAGIC;
21698 to_info.reason = REASON_DAEMON_STARTED;
21699 to_info.trap = NO_TRAP;
21700
21701 dhd_recv_msg_from_daemon(skb);
21702 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
21703}
21704
21705#ifdef DHD_LOG_DUMP
21706bool
21707dhd_log_dump_ecntr_enabled(void)
21708{
21709 return (bool)logdump_ecntr_enable;
21710}
21711
21712bool
21713dhd_log_dump_rtt_enabled(void)
21714{
21715 return (bool)logdump_rtt_enable;
21716}
21717
21718void
21719dhd_log_dump_init(dhd_pub_t *dhd)
21720{
21721 struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
21722 int i = 0;
21723 uint8 *prealloc_buf = NULL, *bufptr = NULL;
21724#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
21725 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
21726#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
21727 int ret;
21728 dhd_dbg_ring_t *ring = NULL;
21729 unsigned long flags = 0;
21730 dhd_info_t *dhd_info = dhd->info;
21731#if defined(EWP_ECNTRS_LOGGING)
21732 void *cookie_buf = NULL;
21733#endif
21734
21735 BCM_REFERENCE(ret);
21736 BCM_REFERENCE(ring);
21737 BCM_REFERENCE(flags);
21738
21739 /* sanity check */
21740 if (logdump_prsrv_tailsize <= 0 ||
21741 logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
21742 logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
21743 }
21744 /* now adjust the preserve log flush size based on the
21745 * kernel printk log buffer size
21746 */
21747#ifdef CONFIG_LOG_BUF_SHIFT
21748 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
21749 " limit prsrv tail size to = %uKB\n",
21750 __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
21751 logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
21752
21753 if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
21754 logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
21755 }
21756#else
21757 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
21758 __FUNCTION__, logdump_prsrv_tailsize/1024);
21759#endif /* CONFIG_LOG_BUF_SHIFT */
21760
21761 mutex_init(&dhd_info->logdump_lock);
21762 /* initialize log dump buf structures */
21763 memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
21764
21765 /* set the log dump buffer size based on the module_param */
21766 if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
21767 logdump_max_bufsize <= 0)
21768 dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
21769 else
21770 dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
21771
21772 /* pre-alloc the memory for the log buffers & 'special' buffer */
21773 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
21774#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
21775 prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
21776 dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
21777 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
21778#else
21779 prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
21780 dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
21781#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
21782
21783 if (!prealloc_buf) {
21784 DHD_ERROR(("Failed to allocate memory for log buffers\n"));
21785 goto fail;
21786 }
21787 if (!dld_buf_special->buffer) {
21788 DHD_ERROR(("Failed to allocate memory for special buffer\n"));
21789 goto fail;
21790 }
21791
21792 bufptr = prealloc_buf;
21793 for (i = 0; i < DLD_BUFFER_NUM; i++) {
21794 dld_buf = &g_dld_buf[i];
21795 dld_buf->dhd_pub = dhd;
21796 spin_lock_init(&dld_buf->lock);
21797 dld_buf->wraparound = 0;
21798 if (i != DLD_BUF_TYPE_SPECIAL) {
21799 dld_buf->buffer = bufptr;
21800 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
21801 bufptr = (uint8 *)dld_buf->max;
21802 } else {
21803 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
21804 }
21805 dld_buf->present = dld_buf->front = dld_buf->buffer;
21806 dld_buf->remain = dld_buf_size[i];
21807 dld_buf->enable = 1;
21808 }
21809#ifdef EWP_ECNTRS_LOGGING
21810 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
21811 dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
21812 if (!dhd->ecntr_dbg_ring)
21813 goto fail;
21814
21815 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
21816 ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
21817 ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
21818 bufptr, TRUE);
21819 if (ret != BCME_OK) {
21820 DHD_ERROR(("%s: unable to init ecntr ring !\n",
21821 __FUNCTION__));
21822 goto fail;
21823 }
21824 DHD_DBG_RING_LOCK(ring->lock, flags);
21825 ring->state = RING_ACTIVE;
21826 ring->threshold = 0;
21827 DHD_DBG_RING_UNLOCK(ring->lock, flags);
21828
21829 bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
21830#endif /* EWP_ECNTRS_LOGGING */
21831
21832#ifdef EWP_RTT_LOGGING
21833 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
21834 dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
21835 if (!dhd->rtt_dbg_ring)
21836 goto fail;
21837
21838 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
21839 ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
21840 RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
21841 bufptr, TRUE);
21842 if (ret != BCME_OK) {
21843 DHD_ERROR(("%s: unable to init ecntr ring !\n",
21844 __FUNCTION__));
21845 goto fail;
21846 }
21847 DHD_DBG_RING_LOCK(ring->lock, flags);
21848 ring->state = RING_ACTIVE;
21849 ring->threshold = 0;
21850 DHD_DBG_RING_UNLOCK(ring->lock, flags);
21851
21852 bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
21853#endif /* EWP_RTT_LOGGING */
21854
21855 /* Concise buffer is used as intermediate buffer for following purposes
21856 * a) pull ecounters records temporarily before
21857 * writing it to file
21858 * b) to store dhd dump data before putting it to file
21859 * It should have a size equal to
21860 * MAX(largest possible ecntr record, 'dhd dump' data size)
21861 */
21862 dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
21863 if (!dhd->concise_dbg_buf) {
21864 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
21865 __FUNCTION__));
21866 goto fail;
21867 }
21868
21869#if defined(DHD_EVENT_LOG_FILTER)
21870 /* XXX init filter last, because filter use buffer which alloced by log dump */
21871 ret = dhd_event_log_filter_init(dhd,
21872 bufptr,
21873 LOG_DUMP_FILTER_MAX_BUFSIZE);
21874 if (ret != BCME_OK) {
21875 goto fail;
21876 }
21877#endif /* DHD_EVENT_LOG_FILTER */
21878
21879#if defined(EWP_ECNTRS_LOGGING)
21880 cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
21881 if (!cookie_buf) {
21882 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
21883 __FUNCTION__));
21884 goto fail;
21885 }
21886
21887 ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
21888 if (ret != BCME_OK) {
21889 MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
21890 goto fail;
21891 }
21892#endif /* EWP_ECNTRS_LOGGING */
21893 return;
21894
21895fail:
21896
21897#if defined(DHD_EVENT_LOG_FILTER)
21898 /* XXX deinit filter first, because filter use buffer which alloced by log dump */
21899 if (dhd->event_log_filter) {
21900 dhd_event_log_filter_deinit(dhd);
21901 }
21902#endif /* DHD_EVENT_LOG_FILTER */
21903
21904 if (dhd->concise_dbg_buf) {
21905 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
21906 }
21907
21908#ifdef EWP_ECNTRS_LOGGING
21909 if (dhd->logdump_cookie) {
21910 dhd_logdump_cookie_deinit(dhd);
21911 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
21912 dhd->logdump_cookie = NULL;
21913 }
21914 if (dhd->ecntr_dbg_ring) {
21915 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
21916 dhd_dbg_ring_deinit(dhd, ring);
21917 ring->ring_buf = NULL;
21918 ring->ring_size = 0;
21919 MFREE(dhd->osh, dhd->ecntr_dbg_ring, sizeof(dhd_dbg_ring_t));
21920 dhd->ecntr_dbg_ring = NULL;
21921 }
21922#endif /* EWP_ECNTRS_LOGGING */
21923
21924#ifdef EWP_RTT_LOGGING
21925 if (dhd->rtt_dbg_ring) {
21926 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
21927 dhd_dbg_ring_deinit(dhd, ring);
21928 ring->ring_buf = NULL;
21929 ring->ring_size = 0;
21930 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
21931 dhd->rtt_dbg_ring = NULL;
21932 }
21933#endif /* EWP_RTT_LOGGING */
21934
21935#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
21936 if (prealloc_buf) {
21937 DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
21938 }
21939 if (dld_buf_special->buffer) {
21940 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
21941 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
21942 }
21943#else
21944 if (prealloc_buf) {
21945 MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
21946 }
21947 if (dld_buf_special->buffer) {
21948 MFREE(dhd->osh, dld_buf_special->buffer,
21949 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
21950 }
21951#endif /* CONFIG_DHD_USE_STATIC_BUF */
21952 for (i = 0; i < DLD_BUFFER_NUM; i++) {
21953 dld_buf = &g_dld_buf[i];
21954 dld_buf->enable = 0;
21955 dld_buf->buffer = NULL;
21956 }
21957 mutex_destroy(&dhd_info->logdump_lock);
21958}
21959
21960void
21961dhd_log_dump_deinit(dhd_pub_t *dhd)
21962{
21963 struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
21964 int i = 0;
21965 dhd_info_t *dhd_info = dhd->info;
21966 dhd_dbg_ring_t *ring = NULL;
21967
21968 BCM_REFERENCE(ring);
21969
21970 if (dhd->concise_dbg_buf) {
21971 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
21972 dhd->concise_dbg_buf = NULL;
21973 }
21974
21975#ifdef EWP_ECNTRS_LOGGING
21976 if (dhd->logdump_cookie) {
21977 dhd_logdump_cookie_deinit(dhd);
21978 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
21979 dhd->logdump_cookie = NULL;
21980 }
21981
21982 if (dhd->ecntr_dbg_ring) {
21983 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
21984 dhd_dbg_ring_deinit(dhd, ring);
21985 ring->ring_buf = NULL;
21986 ring->ring_size = 0;
21987 MFREE(dhd->osh, dhd->ecntr_dbg_ring, sizeof(dhd_dbg_ring_t));
21988 dhd->ecntr_dbg_ring = NULL;
21989 }
21990#endif /* EWP_ECNTRS_LOGGING */
21991
21992#ifdef EWP_RTT_LOGGING
21993 if (dhd->rtt_dbg_ring) {
21994 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
21995 dhd_dbg_ring_deinit(dhd, ring);
21996 ring->ring_buf = NULL;
21997 ring->ring_size = 0;
21998 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
21999 dhd->rtt_dbg_ring = NULL;
22000 }
22001#endif /* EWP_RTT_LOGGING */
22002
22003 /* 'general' buffer points to start of the pre-alloc'd memory */
22004 dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
22005 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
22006#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
22007 if (dld_buf->buffer) {
22008 DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
22009 }
22010 if (dld_buf_special->buffer) {
22011 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
22012 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
22013 }
22014#else
22015 if (dld_buf->buffer) {
22016 MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
22017 }
22018 if (dld_buf_special->buffer) {
22019 MFREE(dhd->osh, dld_buf_special->buffer,
22020 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
22021 }
22022#endif /* CONFIG_DHD_USE_STATIC_BUF */
22023 for (i = 0; i < DLD_BUFFER_NUM; i++) {
22024 dld_buf = &g_dld_buf[i];
22025 dld_buf->enable = 0;
22026 dld_buf->buffer = NULL;
22027 }
22028 mutex_destroy(&dhd_info->logdump_lock);
22029}
22030
22031void
22032dhd_log_dump_write(int type, char *binary_data,
22033 int binary_len, const char *fmt, ...)
22034{
22035 int len = 0;
22036 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
22037 va_list args;
22038 unsigned long flags = 0;
22039 struct dhd_log_dump_buf *dld_buf = NULL;
22040 bool flush_log = FALSE;
22041
22042 if (type < 0 || type >= DLD_BUFFER_NUM) {
22043 DHD_INFO(("%s: Unsupported DHD_LOG_DUMP_BUF_TYPE(%d).\n",
22044 __FUNCTION__, type));
22045 return;
22046 }
22047
22048 dld_buf = &g_dld_buf[type];
22049 if (dld_buf->enable != 1) {
22050 return;
22051 }
22052
22053 va_start(args, fmt);
22054 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
22055 /* Non ANSI C99 compliant returns -1,
22056 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
22057 */
22058 va_end(args);
22059 if (len < 0) {
22060 return;
22061 }
22062
22063 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
22064 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
22065 tmp_buf[len] = '\0';
22066 }
22067
22068 /* make a critical section to eliminate race conditions */
22069 DHD_LOG_DUMP_BUF_LOCK(&dld_buf->lock, flags);
22070 if (dld_buf->remain < len) {
22071 dld_buf->wraparound = 1;
22072 dld_buf->present = dld_buf->front;
22073 dld_buf->remain = dld_buf_size[type];
22074 /* if wrap around happens, flush the ring buffer to the file */
22075 flush_log = TRUE;
22076 }
22077
22078 memcpy(dld_buf->present, tmp_buf, len);
22079 dld_buf->remain -= len;
22080 dld_buf->present += len;
22081 DHD_LOG_DUMP_BUF_UNLOCK(&dld_buf->lock, flags);
22082
22083 /* double check invalid memory operation */
22084 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
22085
22086 if (dld_buf->dhd_pub) {
22087 dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
22088 dhdp->logdump_periodic_flush =
22089 logdump_periodic_flush;
22090 if (logdump_periodic_flush && flush_log) {
22091 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
22092 sizeof(log_dump_type_t));
22093 if (flush_type) {
22094 *flush_type = type;
22095 dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
22096 }
22097 }
22098 }
22099}
22100
22101char*
22102dhd_log_dump_get_timestamp(void)
22103{
22104 static char buf[32];
22105 u64 ts_nsec;
22106 unsigned long rem_nsec;
22107
22108 ts_nsec = local_clock();
22109 rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
22110 snprintf(buf, sizeof(buf), "%5lu.%06lu",
22111 (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
22112
22113 return buf;
22114}
22115#endif /* DHD_LOG_DUMP */
22116
22117#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
22118void
22119dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
22120{
22121 dhd_info_t * dhd;
22122
22123 if (dhdp) {
22124 dhd = dhdp->info;
22125 if (dhd) {
22126 flush_workqueue(dhd->tx_wq);
22127 flush_workqueue(dhd->rx_wq);
22128 }
22129 }
22130
22131 return;
22132}
22133#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
22134
22135#ifdef DHD_DEBUG_UART
22136bool
22137dhd_debug_uart_is_running(struct net_device *dev)
22138{
22139 dhd_info_t *dhd = DHD_DEV_INFO(dev);
22140
22141 if (dhd->duart_execute) {
22142 return TRUE;
22143 }
22144
22145 return FALSE;
22146}
22147
22148static void
22149dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
22150{
22151 dhd_pub_t *dhdp = handle;
22152 dhd_debug_uart_exec(dhdp, "rd");
22153}
22154
22155static void
22156dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
22157{
22158 int ret;
22159
22160 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
22161 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
22162
22163#ifdef DHD_FW_COREDUMP
22164 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
22165#endif
22166 {
22167 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
22168 dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
22169#ifdef DHD_FW_COREDUMP
22170 dhdp->memdump_success == FALSE ||
22171#endif
22172 FALSE) {
22173 dhdp->info->duart_execute = TRUE;
22174 DHD_ERROR(("DHD: %s - execute %s %s\n",
22175 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
22176 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
22177 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
22178 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
22179 dhdp->info->duart_execute = FALSE;
22180
22181#ifdef DHD_LOG_DUMP
22182 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
22183#endif
22184 {
22185 BUG_ON(1);
22186 }
22187 }
22188 }
22189}
22190#endif /* DHD_DEBUG_UART */
22191
22192#if defined(DHD_BLOB_EXISTENCE_CHECK)
22193void
22194dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
22195{
22196 struct file *fp;
22197 char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
22198
22199 fp = filp_open(filepath, O_RDONLY, 0);
22200 if (IS_ERR(fp)) {
22201 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
22202 filepath));
22203 dhdp->is_blob = FALSE;
22204 } else {
22205 DHD_ERROR(("%s: ----- blob file exists (%s) -----\n", __FUNCTION__, filepath));
22206 dhdp->is_blob = TRUE;
22207#if defined(CONCATE_BLOB)
22208 strncat(fw_path, "_blob", strlen("_blob"));
22209#else
22210 BCM_REFERENCE(fw_path);
22211#endif /* SKIP_CONCATE_BLOB */
22212 filp_close(fp, NULL);
22213 }
22214}
22215#endif /* DHD_BLOB_EXISTENCE_CHECK */
22216
22217#if defined(PCIE_FULL_DONGLE)
22218/** test / loopback */
22219void
22220dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
22221{
22222 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
22223 dhd_info_t *dhd_info = (dhd_info_t *)handle;
22224
22225 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
22226 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
22227 return;
22228 }
22229 if (dhd_info == NULL) {
22230 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
22231 return;
22232 }
22233 if (dmmap == NULL) {
22234 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
22235 return;
22236 }
22237 dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
22238}
22239
22240void
22241dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
22242{
22243 dhd_info_t *dhd_info = dhdp->info;
22244
22245 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
22246 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
22247}
22248#endif /* PCIE_FULL_DONGLE */
22249/* ---------------------------- End of sysfs implementation ------------------------------------- */
22250#ifdef SET_PCIE_IRQ_CPU_CORE
22251void
22252dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
22253{
22254 unsigned int pcie_irq = 0;
22255
22256 if (!dhdp) {
22257 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
22258 return;
22259 }
22260
22261 if (!dhdp->bus) {
22262 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
22263 return;
22264 }
22265
22266 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
22267
22268 if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
22269 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
22270 return;
22271 }
22272
22273 /*
22274 irq_set_affinity() assign dedicated CPU core PCIe interrupt
22275 If dedicated CPU core is not on-line,
22276 PCIe interrupt scheduled on CPU core 0
22277 */
22278 switch (affinity_cmd) {
22279 case PCIE_IRQ_AFFINITY_OFF:
22280 break;
22281 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
22282#if defined(CONFIG_ARCH_SM8150) || defined(CONFIG_ARCH_KONA)
22283 irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
22284 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
22285#else /* Exynos and Others */
22286 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
22287#endif /* CONFIG_ARCH_SM8150 || CONFIG_ARCH_KONA */
22288 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
22289 break;
22290#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
22291 defined(CONFIG_SOC_EXYNOS9830)
22292 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
22293 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
22294 __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
22295 irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
22296 break;
22297#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
22298 * CONFIG_SOC_EXYNOS9830
22299 */
22300 default:
22301 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
22302 __FUNCTION__, affinity_cmd));
22303 }
22304}
22305#endif /* SET_PCIE_IRQ_CPU_CORE */
22306
22307int
22308dhd_write_file(const char *filepath, char *buf, int buf_len)
22309{
22310 struct file *fp = NULL;
22311 mm_segment_t old_fs;
22312 int ret = 0;
22313
22314 /* change to KERNEL_DS address limit */
22315 old_fs = get_fs();
22316 set_fs(KERNEL_DS);
22317
22318 /* File is always created. */
22319 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
22320 if (IS_ERR(fp)) {
22321 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
22322 __FUNCTION__, filepath, PTR_ERR(fp)));
22323 ret = BCME_ERROR;
22324 } else {
22325 if (fp->f_mode & FMODE_WRITE) {
22326 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
22327 if (ret < 0) {
22328 DHD_ERROR(("%s: Couldn't write file '%s'\n",
22329 __FUNCTION__, filepath));
22330 ret = BCME_ERROR;
22331 } else {
22332 ret = BCME_OK;
22333 }
22334 }
22335 filp_close(fp, NULL);
22336 }
22337
22338 /* restore previous address limit */
22339 set_fs(old_fs);
22340
22341 return ret;
22342}
22343
22344int
22345dhd_read_file(const char *filepath, char *buf, int buf_len)
22346{
22347 struct file *fp = NULL;
22348 mm_segment_t old_fs;
22349 int ret;
22350
22351 /* change to KERNEL_DS address limit */
22352 old_fs = get_fs();
22353 set_fs(KERNEL_DS);
22354
22355 fp = filp_open(filepath, O_RDONLY, 0);
22356 if (IS_ERR(fp)) {
22357 set_fs(old_fs);
22358 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
22359 return BCME_ERROR;
22360 }
22361
22362 ret = kernel_read_compat(fp, 0, buf, buf_len);
22363 filp_close(fp, NULL);
22364
22365 /* restore previous address limit */
22366 set_fs(old_fs);
22367
22368 /* Return the number of bytes read */
22369 if (ret > 0) {
22370 /* Success to read */
22371 ret = 0;
22372 } else {
22373 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
22374 __FUNCTION__, filepath, ret));
22375 ret = BCME_ERROR;
22376 }
22377
22378 return ret;
22379}
22380
22381int
22382dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
22383{
22384 int ret;
22385
22386 ret = dhd_write_file(filepath, buf, buf_len);
22387 if (ret < 0) {
22388 return ret;
22389 }
22390
22391 /* Read the file again and check if the file size is not zero */
22392 memset(buf, 0, buf_len);
22393 ret = dhd_read_file(filepath, buf, buf_len);
22394
22395 return ret;
22396}
22397
22398#ifdef FILTER_IE
22399int dhd_read_from_file(dhd_pub_t *dhd)
22400{
22401 int ret = 0, nread = 0;
22402 void *fd;
22403 uint8 *buf;
22404 NULL_CHECK(dhd, "dhd is NULL", ret);
22405
22406 buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
22407 if (!buf) {
22408 DHD_ERROR(("error: failed to alllocate buf.\n"));
22409 return BCME_NOMEM;
22410 }
22411
22412 /* open file to read */
22413 fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
22414 if (!fd) {
22415 DHD_ERROR(("No filter file(not an error), filter path%s\n", FILTER_IE_PATH));
22416 ret = BCME_EPERM;
22417 goto exit;
22418 }
22419 nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
22420 if (nread > 0) {
22421 buf[nread] = '\0';
22422 if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
22423 DHD_ERROR(("error: failed to parse filter ie\n"));
22424 }
22425 } else {
22426 DHD_ERROR(("error: zero length file.failed to read\n"));
22427 ret = BCME_ERROR;
22428 }
22429 dhd_os_close_image1(dhd, fd);
22430exit:
22431 if (buf) {
22432 MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
22433 }
22434 return ret;
22435}
22436
22437int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
22438{
22439 uint8* pstr = buf;
22440 int element_count = 0;
22441
22442 if (buf == NULL) {
22443 return BCME_ERROR;
22444 }
22445
22446 while (*pstr != '\0') {
22447 if (*pstr == '\n') {
22448 element_count++;
22449 }
22450 pstr++;
22451 }
22452 /*
22453 * New line character must not be present after last line.
22454 * To count last line
22455 */
22456 element_count++;
22457
22458 return element_count;
22459}
22460
22461int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
22462{
22463 uint8 i, j, msb, lsb, oui_len = 0;
22464 /*
22465 * OUI can vary from 3 bytes to 5 bytes.
22466 * While reading from file as ascii input it can
22467 * take maximum size of 14 bytes and minumum size of
22468 * 8 bytes including ":"
22469 * Example 5byte OUI <AB:DE:BE:CD:FA>
22470 * Example 3byte OUI <AB:DC:EF>
22471 */
22472
22473 if ((inbuf == NULL) || (len < 8) || (len > 14)) {
22474 DHD_ERROR(("error: failed to parse OUI \n"));
22475 return BCME_ERROR;
22476 }
22477
22478 for (j = 0, i = 0; i < len; i += 3, ++j) {
22479 if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
22480 DHD_ERROR(("error: invalid OUI format \n"));
22481 return BCME_ERROR;
22482 }
22483 msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
22484 lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
22485 'A' + 10 : inbuf[i + 1] - '0';
22486 oui[j] = (msb << 4) | lsb;
22487 }
22488 /* Size of oui.It can vary from 3/4/5 */
22489 oui_len = j;
22490
22491 return oui_len;
22492}
22493
22494int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
22495{
22496 int i = 0;
22497
22498 while (i < len) {
22499 if (!bcm_isdigit(buf[i])) {
22500 DHD_ERROR(("error: non digit value found in filter_ie \n"));
22501 return BCME_ERROR;
22502 }
22503 i++;
22504 }
22505 if (bcm_atoi((char*)buf) > 255) {
22506 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
22507 return BCME_ERROR;
22508 }
22509
22510 return BCME_OK;
22511}
22512
22513int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
22514{
22515 int element_count = 0, i = 0, oui_size = 0, ret = 0;
22516 uint16 bufsize, buf_space_left, id = 0, len = 0;
22517 uint16 filter_iovsize, all_tlvsize;
22518 wl_filter_ie_tlv_t *p_ie_tlv = NULL;
22519 wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
22520 char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
22521 uint8 data[20];
22522
22523 element_count = dhd_get_filter_ie_count(dhd, buf);
22524 DHD_INFO(("total element count %d \n", element_count));
22525 /* Calculate the whole buffer size */
22526 filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
22527 p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
22528
22529 if (p_filter_iov == NULL) {
22530 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
22531 return BCME_ERROR;
22532 }
22533
22534 /* setup filter iovar header */
22535 p_filter_iov->version = WL_FILTER_IE_VERSION;
22536 p_filter_iov->len = filter_iovsize;
22537 p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
22538 p_filter_iov->pktflag = FC_PROBE_REQ;
22539 p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
22540 /* setup TLVs */
22541 bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
22542 p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
22543 buf_space_left = bufsize;
22544
22545 while ((i < element_count) && (buf != NULL)) {
22546 len = 0;
22547 /* token contains one line of input data */
22548 token = bcmstrtok((char**)&buf, "\n", NULL);
22549 if (token == NULL) {
22550 break;
22551 }
22552 if ((ele_token = bcmstrstr(token, ",")) == NULL) {
22553 /* only element id is present */
22554 if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
22555 DHD_ERROR(("error: Invalid element id \n"));
22556 ret = BCME_ERROR;
22557 goto exit;
22558 }
22559 id = bcm_atoi((char*)token);
22560 data[len++] = WL_FILTER_IE_SET;
22561 } else {
22562 /* oui is present */
22563 ele_token = bcmstrtok(&token, ",", NULL);
22564 if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
22565 strlen(ele_token)) == BCME_ERROR)) {
22566 DHD_ERROR(("error: Invalid element id \n"));
22567 ret = BCME_ERROR;
22568 goto exit;
22569 }
22570 id = bcm_atoi((char*)ele_token);
22571 data[len++] = WL_FILTER_IE_SET;
22572 if ((oui_token = bcmstrstr(token, ",")) == NULL) {
22573 oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
22574 if (oui_size == BCME_ERROR) {
22575 DHD_ERROR(("error: Invalid OUI \n"));
22576 ret = BCME_ERROR;
22577 goto exit;
22578 }
22579 len += oui_size;
22580 } else {
22581 /* type is present */
22582 oui_token = bcmstrtok(&token, ",", NULL);
22583 if ((oui_token == NULL) || ((oui_size =
22584 dhd_parse_oui(dhd, oui_token,
22585 &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
22586 DHD_ERROR(("error: Invalid OUI \n"));
22587 ret = BCME_ERROR;
22588 goto exit;
22589 }
22590 len += oui_size;
22591 if ((type = bcmstrstr(token, ",")) == NULL) {
22592 if (dhd_check_valid_ie(dhd, token,
22593 strlen(token)) == BCME_ERROR) {
22594 DHD_ERROR(("error: Invalid type \n"));
22595 ret = BCME_ERROR;
22596 goto exit;
22597 }
22598 data[len++] = bcm_atoi((char*)token);
22599 } else {
22600 /* subtype is present */
22601 type = bcmstrtok(&token, ",", NULL);
22602 if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
22603 strlen(type)) == BCME_ERROR)) {
22604 DHD_ERROR(("error: Invalid type \n"));
22605 ret = BCME_ERROR;
22606 goto exit;
22607 }
22608 data[len++] = bcm_atoi((char*)type);
22609 /* subtype is last element */
22610 if ((token == NULL) || (*token == '\0') ||
22611 (dhd_check_valid_ie(dhd, token,
22612 strlen(token)) == BCME_ERROR)) {
22613 DHD_ERROR(("error: Invalid subtype \n"));
22614 ret = BCME_ERROR;
22615 goto exit;
22616 }
22617 data[len++] = bcm_atoi((char*)token);
22618 }
22619 }
22620 }
22621 ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
22622 &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
22623 if (ret != BCME_OK) {
22624 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
22625 "status=%d\n", __FUNCTION__, ret));
22626 goto exit;
22627 }
22628 i++;
22629 }
22630 if (i == 0) {
22631 /* file is empty or first line is blank */
22632 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
22633 ret = BCME_ERROR;
22634 goto exit;
22635 }
22636 /* update the iov header, set len to include all TLVs + header */
22637 all_tlvsize = (bufsize - buf_space_left);
22638 p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
22639 ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
22640 p_filter_iov->len, NULL, 0, TRUE);
22641 if (ret != BCME_OK) {
22642 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
22643 }
22644exit:
22645 /* clean up */
22646 if (p_filter_iov) {
22647 MFREE(dhd->osh, p_filter_iov, filter_iovsize);
22648 }
22649 return ret;
22650}
22651#endif /* FILTER_IE */
22652#ifdef DHD_WAKE_STATUS
22653wake_counts_t*
22654dhd_get_wakecount(dhd_pub_t *dhdp)
22655{
22656#ifdef BCMDBUS
22657 return NULL;
22658#else
22659 return dhd_bus_get_wakecount(dhdp);
22660#endif /* BCMDBUS */
22661}
22662#endif /* DHD_WAKE_STATUS */
22663
22664int
22665dhd_get_random_bytes(uint8 *buf, uint len)
22666{
22667#ifdef BCMPCIE
22668#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
22669 int rndlen = get_random_bytes_arch(buf, len);
22670 if (rndlen != len) {
22671 bzero(buf, len);
22672 get_random_bytes(buf, len);
22673 }
22674#else
22675 get_random_bytes_arch(buf, len);
22676#endif
22677#endif /* BCMPCIE */
22678 return BCME_OK;
22679}
22680
22681#if defined(DHD_HANG_SEND_UP_TEST)
22682void
22683dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
22684{
22685 dhd_info_t *dhd = NULL;
22686 dhd_pub_t *dhdp = NULL;
22687 uint reason = HANG_REASON_MAX;
22688 uint32 fw_test_code = 0;
22689 dhd = DHD_DEV_INFO(dev);
22690
22691 if (dhd) {
22692 dhdp = &dhd->pub;
22693 }
22694
22695 if (!dhd || !dhdp) {
22696 return;
22697 }
22698
22699 reason = (uint) bcm_strtoul(string_num, NULL, 0);
22700 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
22701
22702 if (reason == 0) {
22703 if (dhdp->req_hang_type) {
22704 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
22705 __FUNCTION__, dhdp->req_hang_type));
22706 dhdp->req_hang_type = 0;
22707 return;
22708 } else {
22709 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
22710 return;
22711 }
22712 } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
22713 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
22714 return;
22715 }
22716
22717 if (dhdp->req_hang_type != 0) {
22718 DHD_ERROR(("Already HANG requested for test\n"));
22719 return;
22720 }
22721
22722 switch (reason) {
22723 case HANG_REASON_IOCTL_RESP_TIMEOUT:
22724 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
22725 dhdp->req_hang_type = reason;
22726 fw_test_code = 102; /* resumed on timeour */
22727 (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
22728 WLC_SET_VAR, TRUE, 0);
22729 break;
22730 case HANG_REASON_DONGLE_TRAP:
22731 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
22732 dhdp->req_hang_type = reason;
22733 fw_test_code = 99; /* dongle trap */
22734 (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
22735 WLC_SET_VAR, TRUE, 0);
22736 break;
22737 case HANG_REASON_D3_ACK_TIMEOUT:
22738 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
22739 dhdp->req_hang_type = reason;
22740 break;
22741 case HANG_REASON_BUS_DOWN:
22742 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
22743 dhdp->req_hang_type = reason;
22744 break;
22745 case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT:
22746 case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT:
22747 case HANG_REASON_MSGBUF_LIVELOCK:
22748 dhdp->req_hang_type = 0;
22749 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
22750 break;
22751 case HANG_REASON_IFACE_DEL_FAILURE:
22752 dhdp->req_hang_type = 0;
22753 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
22754 break;
22755 case HANG_REASON_HT_AVAIL_ERROR:
22756 dhdp->req_hang_type = 0;
22757 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
22758 break;
22759 case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
22760 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
22761 dhdp->req_hang_type = reason;
22762 break;
22763 default:
22764 dhdp->req_hang_type = 0;
22765 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
22766 break;
22767 }
22768}
22769#endif /* DHD_HANG_SEND_UP_TEST */
22770
22771#ifdef DHD_TX_PROFILE
22772static int
22773process_layer2_headers(uint8 **p, int *plen, uint16 *type)
22774{
22775 int err = BCME_OK;
22776
22777 if (*type < ETHER_TYPE_MIN) {
22778 struct dot3_mac_llc_snap_header *sh = (struct dot3_mac_llc_snap_header *)*p;
22779
22780 if (bcmp(&sh->dsap, llc_snap_hdr, SNAP_HDR_LEN) == 0) {
22781 *type = ntoh16(sh->type);
22782 if (*type == ETHER_TYPE_8021Q) {
22783 *p += sizeof(struct dot3_mac_llc_snap_header);
22784 if ((*plen -= sizeof(struct dot3_mac_llc_snap_header)) <= 0) {
22785 err = BCME_ERROR;
22786 }
22787 }
22788 else {
22789 struct dot3_mac_llc_snapvlan_header *svh = (struct
22790 dot3_mac_llc_snapvlan_header *)*p;
22791
22792 *type = ntoh16(svh->ether_type);
22793 *p += sizeof(struct dot3_mac_llc_snapvlan_header);
22794 if ((*plen -= sizeof(struct dot3_mac_llc_snapvlan_header)) <= 0) {
22795 err = BCME_ERROR;
22796 }
22797 }
22798 }
22799 else {
22800 err = BCME_ERROR;
22801 }
22802 }
22803 else {
22804 if (*type == ETHER_TYPE_8021Q) {
22805 struct ethervlan_header *evh = (struct ethervlan_header *)*p;
22806
22807 *type = ntoh16(evh->ether_type);
22808 *p += ETHERVLAN_HDR_LEN;
22809 if ((*plen -= ETHERVLAN_HDR_LEN) <= 0) {
22810 err = BCME_ERROR;
22811 }
22812 }
22813 else {
22814 *p += ETHER_HDR_LEN;
22815 if ((*plen -= ETHER_HDR_LEN) <= 0) {
22816 err = BCME_ERROR;
22817 }
22818 }
22819 }
22820
22821 return err;
22822}
22823
22824static int
22825process_layer3_headers(uint8 **p, int plen, uint16 *type)
22826{
22827 int err = BCME_OK;
22828
22829 if (*type == ETHER_TYPE_IP) {
22830 struct ipv4_hdr *iph = (struct ipv4_hdr *)*p;
22831 uint16 len = IPV4_HLEN(iph);
22832 if ((plen -= len) <= 0) {
22833 err = BCME_ERROR;
22834 } else if (IP_VER(iph) == IP_VER_4 && len >= IPV4_MIN_HEADER_LEN) {
22835 *type = IPV4_PROT(iph);
22836 *p += len;
22837 } else {
22838 err = BCME_ERROR;
22839 }
22840 } else if (*type == ETHER_TYPE_IPV6) {
22841 struct ipv6_hdr *ip6h = (struct ipv6_hdr *)*p;
22842 if ((plen -= IPV6_MIN_HLEN) <= 0) {
22843 err = BCME_ERROR;
22844 } else if (IP_VER(ip6h) == IP_VER_6) {
22845 *type = IPV6_PROT(ip6h);
22846 *p += IPV6_MIN_HLEN;
22847 if (IPV6_EXTHDR(*type)) {
22848 uint8 proto_6 = 0;
22849 int32 exth_len = ipv6_exthdr_len(*p, &proto_6);
22850 if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
22851 err = BCME_ERROR;
22852 } else {
22853 *type = proto_6;
22854 *p += exth_len;
22855 }
22856 }
22857 } else {
22858 err = BCME_ERROR;
22859 }
22860 }
22861
22862 return err;
22863}
22864
22865bool
22866dhd_protocol_matches_profile(uint8 *p, int plen, const dhd_tx_profile_protocol_t
22867 *proto)
22868{
22869 struct ether_header *eh = (struct ether_header *)p;
22870 bool result = FALSE;
22871 uint16 type;
22872
22873 ASSERT(proto != NULL);
22874 ASSERT(p != NULL);
22875
22876 if (plen <= 0) {
22877 result = FALSE;
22878 } else {
22879 type = ntoh16(eh->ether_type);
22880
22881 if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER &&
22882 proto->protocol_number == type) {
22883 result = TRUE;
22884 } else if (process_layer2_headers(&p, &plen, &type) != BCME_OK) {
22885 result = FALSE;
22886 } else if (proto->layer == DHD_TX_PROFILE_DATA_LINK_LAYER) {
22887 result = proto->protocol_number == type;
22888 } else if (proto->layer != DHD_TX_PROFILE_NETWORK_LAYER) {
22889 result = FALSE;
22890 } else if (process_layer3_headers(&p, plen, &type) != BCME_OK) {
22891 result = FALSE;
22892 } else if (proto->protocol_number == type) {
22893 /* L4, only check TCP/UDP case */
22894 if ((type == IP_PROT_TCP) || (type == IP_PROT_UDP)) {
22895 /* src/dst port are the first two uint16 fields in both tcp/udp
22896 * hdr
22897 */
22898 struct bcmudp_hdr *hdr = (struct bcmudp_hdr *)p;
22899
22900 /* note that a src_port or dest_port of zero counts as a match
22901 */
22902 result = ((proto->src_port == 0) || (proto->src_port ==
22903 ntoh16(hdr->src_port))) && ((proto->dest_port == 0) ||
22904 (proto->dest_port == ntoh16(hdr->dst_port)));
22905 } else {
22906 /* at this point we know we are dealing with layer 3, and we
22907 * know we are not dealing with TCP or UDP; this is considered a
22908 * match
22909 */
22910 result = TRUE;
22911 }
22912 }
22913 }
22914
22915 return result;
22916}
22917#endif /* defined(DHD_TX_PROFILE) */
22918
22919#ifdef BCMPCIE
22920#define KIRQ_PRINT_BUF_LEN 256
22921
22922void
22923dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
22924{
22925 unsigned long flags = 0;
22926 struct irq_desc *desc;
22927 int i; /* cpu iterator */
22928 struct bcmstrbuf strbuf;
22929 char tmp_buf[KIRQ_PRINT_BUF_LEN];
22930
22931#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
22932 desc = irq_to_desc(irq_num);
22933 if (!desc) {
22934 DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
22935 return;
22936 }
22937 bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
22938 raw_spin_lock_irqsave(&desc->lock, flags);
22939 bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
22940 for_each_online_cpu(i)
22941 bcm_bprintf(&strbuf, "%10u ",
22942 desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
22943 if (desc->irq_data.chip) {
22944 if (desc->irq_data.chip->name)
22945 bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
22946 else
22947 bcm_bprintf(&strbuf, " %8s", "-");
22948 } else {
22949 bcm_bprintf(&strbuf, " %8s", "None");
22950 }
22951#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
22952 if (desc->irq_data.domain)
22953 bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
22954#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
22955 bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
22956#endif
22957#endif /* LINUX VERSION > 3.1.0 */
22958
22959 if (desc->name)
22960 bcm_bprintf(&strbuf, "-%-8s", desc->name);
22961
22962 DHD_ERROR(("%s\n", strbuf.origbuf));
22963 raw_spin_unlock_irqrestore(&desc->lock, flags);
22964#endif /* LINUX VERSION > 2.6.28 */
22965}
22966#endif /* BCMPCIE */
22967
22968void
22969dhd_show_kirqstats(dhd_pub_t *dhd)
22970{
22971 unsigned int irq = -1;
22972#ifdef BCMPCIE
22973 dhdpcie_get_pcieirq(dhd->bus, &irq);
22974#endif /* BCMPCIE */
22975#ifdef BCMSDIO
22976 irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
22977#endif /* BCMSDIO */
22978 if (irq != -1) {
22979#ifdef BCMPCIE
22980 DHD_ERROR(("DUMP data kernel irq stats : \n"));
22981 dhd_print_kirqstats(dhd, irq);
22982#endif /* BCMPCIE */
22983#ifdef BCMSDIO
22984 DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
22985#endif /* BCMSDIO */
22986 }
22987#ifdef BCMPCIE_OOB_HOST_WAKE
22988 irq = dhd_bus_get_oob_irq_num(dhd);
22989 if (irq) {
22990 DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
22991 dhd_print_kirqstats(dhd, irq);
22992 }
22993#endif /* BCMPCIE_OOB_HOST_WAKE */
22994}
22995
22996void
22997dhd_print_tasklet_status(dhd_pub_t *dhd)
22998{
22999 dhd_info_t *dhdinfo;
23000
23001 if (!dhd) {
23002 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
23003 return;
23004 }
23005
23006 dhdinfo = dhd->info;
23007
23008 if (!dhdinfo) {
23009 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
23010 return;
23011 }
23012
23013 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
23014}
23015
23016#if defined(DHD_MQ) && defined(DHD_MQ_STATS)
23017void
23018dhd_mqstats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
23019{
23020 dhd_info_t *dhd = NULL;
23021 int i = 0, j = 0;
23022
23023 if (!dhdp || !strbuf)
23024 return;
23025
23026 dhd = dhdp->info;
23027 bcm_bprintf(strbuf, "\nMQ STATS:\n=========\n");
23028
23029 bcm_bprintf(strbuf, "\nTx packet arrival AC histogram:\n");
23030 bcm_bprintf(strbuf, "AC_BE \tAC_BK \tAC_VI \tAC_VO\n");
23031 bcm_bprintf(strbuf, "----- \t----- \t----- \t-----\n");
23032 for (i = 0; i < AC_COUNT; i++)
23033 bcm_bprintf(strbuf, "%-10d\t", dhd->pktcnt_per_ac[i]);
23034
23035 bcm_bprintf(strbuf, "\n\nTx packet arrival Q-AC histogram:\n");
23036 bcm_bprintf(strbuf, "\tAC_BE \tAC_BK \tAC_VI \tAC_VO\n");
23037 bcm_bprintf(strbuf, "\t----- \t----- \t----- \t-----");
23038 for (i = 0; i < MQ_MAX_QUEUES; i++) {
23039 bcm_bprintf(strbuf, "\nQ%d\t", i);
23040 for (j = 0; j < AC_COUNT; j++)
23041 bcm_bprintf(strbuf, "%-8d\t", dhd->pktcnt_qac_histo[i][j]);
23042 }
23043
23044 bcm_bprintf(strbuf, "\n\nTx Q-CPU scheduling histogram:\n");
23045 bcm_bprintf(strbuf, "\t");
23046 for (i = 0; i < nr_cpu_ids; i++)
23047 bcm_bprintf(strbuf, "CPU%d \t", i);
23048 for (i = 0; i < MQ_MAX_QUEUES; i++) {
23049 bcm_bprintf(strbuf, "\nQ%d\t", i);
23050 for (j = 0; j < nr_cpu_ids; j++)
23051 bcm_bprintf(strbuf, "%-8d\t", dhd->cpu_qstats[i][j]);
23052 }
23053 bcm_bprintf(strbuf, "\n");
23054}
23055#endif /* DHD_MQ && DHD_MQ_STATS */
23056
23057#ifdef DHD_MAP_LOGGING
23058/* Will be called from SMMU fault handler */
23059void
23060dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
23061{
23062 dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
23063 uint32 irq = (uint32)-1;
23064
23065 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
23066 DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
23067 dhdp->smmu_fault_occurred = TRUE;
23068#ifdef DNGL_AXI_ERROR_LOGGING
23069 dhdp->axi_error = TRUE;
23070 dhdp->axi_err_dump->axid = axid;
23071 dhdp->axi_err_dump->fault_address = fault_addr;
23072#endif /* DNGL_AXI_ERROR_LOGGING */
23073
23074 /* Disable PCIe IRQ */
23075 dhdpcie_get_pcieirq(dhdp->bus, &irq);
23076 if (irq != (uint32)-1) {
23077 disable_irq_nosync(irq);
23078 }
23079
23080 /* Take debug information first */
23081 DHD_OS_WAKE_LOCK(dhdp);
23082 dhd_prot_smmu_fault_dump(dhdp);
23083 DHD_OS_WAKE_UNLOCK(dhdp);
23084
23085 /* Take AXI information if possible */
23086#ifdef DNGL_AXI_ERROR_LOGGING
23087#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
23088 dhd_axi_error_dispatch(dhdp);
23089#else
23090 dhd_axi_error(dhdp);
23091#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
23092#endif /* DNGL_AXI_ERROR_LOGGING */
23093}
23094EXPORT_SYMBOL(dhd_smmu_fault_handler);
23095#endif /* DHD_MAP_LOGGING */
23096
23097#ifdef DHD_ERPOM
23098static void
23099dhd_error_recovery(void *handle, void *event_info, u8 event)
23100{
23101 dhd_info_t *dhd = handle;
23102 dhd_pub_t *dhdp;
23103 int ret = 0;
23104
23105 if (!dhd) {
23106 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
23107 return;
23108 }
23109
23110 dhdp = &dhd->pub;
23111
23112 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
23113 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
23114 __FUNCTION__));
23115 return;
23116 }
23117
23118 ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
23119 if (ret != BCME_DNGL_DEVRESET) {
23120 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
23121 "toggle REG_ON\n", __FUNCTION__, ret));
23122 /* toggle REG_ON */
23123 dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
23124 return;
23125 }
23126}
23127
23128void
23129dhd_schedule_reset(dhd_pub_t *dhdp)
23130{
23131 if (dhdp->enable_erpom) {
23132 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
23133 DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
23134 }
23135}
23136#endif /* DHD_ERPOM */
23137
23138#ifdef DHD_PKT_LOGGING
23139int
23140dhd_pktlog_debug_dump(dhd_pub_t *dhdp)
23141{
23142 struct net_device *primary_ndev;
23143 struct bcm_cfg80211 *cfg;
23144 unsigned long flags = 0;
23145
23146 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
23147 if (!primary_ndev) {
23148 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
23149 return BCME_ERROR;
23150 }
23151
23152 cfg = wl_get_cfg(primary_ndev);
23153 if (!cfg) {
23154 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
23155 return BCME_ERROR;
23156 }
23157
23158 DHD_GENERAL_LOCK(dhdp, flags);
23159 if (DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp)) {
23160 DHD_GENERAL_UNLOCK(dhdp, flags);
23161 DHD_ERROR(("%s: HAL dump is already triggered \n", __FUNCTION__));
23162 return BCME_ERROR;
23163 }
23164
23165 DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
23166 DHD_GENERAL_UNLOCK(dhdp, flags);
23167 DHD_OS_WAKE_LOCK(dhdp);
23168
23169 if (wl_cfg80211_is_hal_started(cfg)) {
23170 dhdp->pktlog_debug = TRUE;
23171 dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
23172 } else {
23173 DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
23174 }
23175 DHD_OS_WAKE_UNLOCK(dhdp);
23176 /* In case of dhd_os_busbusy_wait_bitmask() timeout,
23177 * hal dump bit will not be cleared. Hence clearing it here.
23178 */
23179 DHD_GENERAL_LOCK(dhdp, flags);
23180 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
23181 dhd_os_busbusy_wake(dhdp);
23182 DHD_GENERAL_UNLOCK(dhdp, flags);
23183
23184 return BCME_OK;
23185}
23186
23187void
23188dhd_pktlog_dump(void *handle, void *event_info, u8 event)
23189{
23190 dhd_info_t *dhd = handle;
23191
23192 if (!dhd) {
23193 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
23194 return;
23195 }
23196
23197 if (dhd_pktlog_dump_write_file(&dhd->pub)) {
23198 DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__));
23199 return;
23200 }
23201}
23202
23203void
23204dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
23205{
23206 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
23207 (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
23208 dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
23209}
23210#endif /* DHD_PKT_LOGGING */
23211
23212#ifdef DHDTCPSYNC_FLOOD_BLK
23213static void dhd_blk_tsfl_handler(struct work_struct * work)
23214{
23215 dhd_if_t *ifp = NULL;
23216 dhd_pub_t *dhdp = NULL;
23217 /* Ignore compiler warnings due to -Werror=cast-qual */
23218 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
23219 ifp = container_of(work, dhd_if_t, blk_tsfl_work);
23220 GCC_DIAGNOSTIC_POP();
23221
23222 if (ifp) {
23223 dhdp = &ifp->info->pub;
23224 if (dhdp) {
23225 if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
23226 (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
23227 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
23228 wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
23229 } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
23230 (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
23231 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
23232 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
23233 }
23234 ifp->disconnect_tsync_flood = TRUE;
23235 }
23236 }
23237}
23238void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
23239{
23240 ifp->tsync_rcvd = 0;
23241 ifp->tsyncack_txed = 0;
23242 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
23243}
23244void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
23245{
23246 dhd_if_t *ifp = NULL;
23247 if (dev) {
23248 ifp = DHD_DEV_IFP(dev);
23249 }
23250 if (ifp) {
23251 ifp->tsync_rcvd = 0;
23252 ifp->tsyncack_txed = 0;
23253 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
23254 ifp->tsync_per_sec = 0;
23255 ifp->disconnect_tsync_flood = FALSE;
23256 }
23257}
23258#endif /* DHDTCPSYNC_FLOOD_BLK */
23259
23260#ifdef DHD_4WAYM4_FAIL_DISCONNECT
23261static void dhd_m4_state_handler(struct work_struct *work)
23262{
23263 dhd_if_t *ifp = NULL;
23264 /* Ignore compiler warnings due to -Werror=cast-qual */
23265 struct delayed_work *dw = to_delayed_work(work);
23266 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
23267 ifp = container_of(dw, dhd_if_t, m4state_work);
23268 GCC_DIAGNOSTIC_POP();
23269
23270 if (ifp && ifp->net &&
23271 (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
23272 DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
23273 ifp->net->name));
23274 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
23275 }
23276}
23277
23278void
23279dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
23280{
23281 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
23282 struct ether_header *eh;
23283 uint16 type;
23284
23285 if (!success) {
23286 /* XXX where does this stuff belong to? */
23287 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
23288
23289 /* XXX Use packet tag when it is available to identify its type */
23290 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
23291 type = ntoh16(eh->ether_type);
23292 if (type == ETHER_TYPE_802_1X) {
23293 if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
23294 dhd_if_t *ifp = NULL;
23295 ifp = dhd->iflist[ifidx];
23296 if (!ifp || !ifp->net) {
23297 return;
23298 }
23299
23300 DHD_INFO(("%s: M4 TX failed on %d.\n",
23301 __FUNCTION__, ifidx));
23302
23303 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
23304 schedule_delayed_work(&ifp->m4state_work,
23305 msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
23306 }
23307 }
23308 }
23309}
23310
23311void
23312dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
23313{
23314 dhd_info_t *dhdinfo;
23315 dhd_if_t *ifp;
23316
23317 if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
23318 DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
23319 return;
23320 }
23321
23322 dhdinfo = (dhd_info_t *)(dhdp->info);
23323 if (!dhdinfo) {
23324 DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
23325 return;
23326 }
23327
23328 ifp = dhdinfo->iflist[ifidx];
23329 if (ifp) {
23330 cancel_delayed_work_sync(&ifp->m4state_work);
23331 }
23332}
23333#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
23334
23335#ifdef BIGDATA_SOFTAP
23336void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
23337{
23338 struct bcm_cfg80211 *cfg;
23339 dhd_pub_t *dhdp;
23340 ap_sta_wq_data_t *p_wq_data;
23341
23342 if (!bcm_cfg || !ndev || !e) {
23343 WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
23344 return;
23345 }
23346
23347 cfg = (struct bcm_cfg80211 *)bcm_cfg;
23348 dhdp = (dhd_pub_t *)cfg->pub;
23349
23350 if (!dhdp || !cfg->ap_sta_info) {
23351 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
23352 return;
23353 }
23354
23355 p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
23356 if (unlikely(!p_wq_data)) {
23357 DHD_ERROR(("%s(): could not allocate memory for - "
23358 "ap_sta_wq_data_t\n", __FUNCTION__));
23359 return;
23360 }
23361
23362 mutex_lock(&cfg->ap_sta_info->wq_data_sync);
23363
23364 memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
23365 p_wq_data->dhdp = dhdp;
23366 p_wq_data->bcm_cfg = cfg;
23367 p_wq_data->ndev = (struct net_device *)ndev;
23368
23369 mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
23370
23371 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
23372 p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
23373 wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
23374
23375}
23376#endif /* BIGDATA_SOFTAP */
23377
23378void
23379get_debug_dump_time(char *str)
23380{
23381 struct osl_timespec curtime;
23382 unsigned long local_time;
23383 struct rtc_time tm;
23384
23385 if (!strlen(str)) {
23386 osl_do_gettimeofday(&curtime);
23387 local_time = (u32)(curtime.tv_sec -
23388 (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
23389 rtc_time_to_tm(local_time, &tm);
23390
23391 snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
23392 tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
23393 tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
23394 }
23395}
23396
23397void
23398clear_debug_dump_time(char *str)
23399{
23400 memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
23401}
23402#if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
23403void
23404copy_debug_dump_time(char *dest, char *src)
23405{
23406 memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
23407}
23408#endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
23409
23410/*
23411 * DHD RING
23412 */
23413#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
23414#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
23415
23416#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
23417#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
23418
23419#define DHD_RING_MAGIC 0x20170910
23420#define DHD_RING_IDX_INVALID 0xffffffff
23421
23422#define DHD_RING_SYNC_LOCK_INIT(osh) osl_spin_lock_init(osh)
23423#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) osl_spin_lock_deinit(osh, lock)
23424#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
23425#define DHD_RING_SYNC_UNLOCK(lock, flags) osl_spin_unlock(lock, flags)
23426
23427typedef struct {
23428 uint32 elem_size;
23429 uint32 elem_cnt;
23430 uint32 write_idx; /* next write index, -1 : not started */
23431 uint32 read_idx; /* next read index, -1 : not start */
23432
23433 /* protected elements during serialization */
23434 int lock_idx; /* start index of locked, element will not be overried */
23435 int lock_count; /* number of locked, from lock idx */
23436
23437 /* saved data elements */
23438 void *elem;
23439} dhd_fixed_ring_info_t;
23440
23441typedef struct {
23442 uint32 elem_size;
23443 uint32 elem_cnt;
23444 uint32 idx; /* -1 : not started */
23445 uint32 rsvd; /* reserved for future use */
23446
23447 /* protected elements during serialization */
23448 atomic_t ring_locked;
23449 /* check the overwriting */
23450 uint32 ring_overwrited;
23451
23452 /* saved data elements */
23453 void *elem;
23454} dhd_singleidx_ring_info_t;
23455
23456typedef struct {
23457 uint32 magic;
23458 uint32 type;
23459 void *ring_sync; /* spinlock for sync */
23460 union {
23461 dhd_fixed_ring_info_t fixed;
23462 dhd_singleidx_ring_info_t single;
23463 };
23464} dhd_ring_info_t;
23465
23466uint32
23467dhd_ring_get_hdr_size(void)
23468{
23469 return sizeof(dhd_ring_info_t);
23470}
23471
23472void *
23473dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
23474 uint32 elem_cnt, uint32 type)
23475{
23476 dhd_ring_info_t *ret_ring;
23477
23478 if (!buf) {
23479 DHD_RING_ERR(("NO RING BUFFER\n"));
23480 return NULL;
23481 }
23482
23483 if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
23484 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
23485 return NULL;
23486 }
23487
23488 if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
23489 DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
23490 return NULL;
23491 }
23492
23493 ret_ring = (dhd_ring_info_t *)buf;
23494 ret_ring->type = type;
23495 ret_ring->ring_sync = (void *)DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
23496 ret_ring->magic = DHD_RING_MAGIC;
23497
23498 if (type == DHD_RING_TYPE_FIXED) {
23499 ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
23500 ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
23501 ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
23502 ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
23503 ret_ring->fixed.elem_size = elem_size;
23504 ret_ring->fixed.elem_cnt = elem_cnt;
23505 } else {
23506 ret_ring->single.idx = DHD_RING_IDX_INVALID;
23507 atomic_set(&ret_ring->single.ring_locked, 0);
23508 ret_ring->single.ring_overwrited = 0;
23509 ret_ring->single.rsvd = 0;
23510 ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
23511 ret_ring->single.elem_size = elem_size;
23512 ret_ring->single.elem_cnt = elem_cnt;
23513 }
23514
23515 return ret_ring;
23516}
23517
23518void
23519dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
23520{
23521 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
23522 if (!ring) {
23523 return;
23524 }
23525
23526 if (ring->magic != DHD_RING_MAGIC) {
23527 return;
23528 }
23529
23530 if (ring->type != DHD_RING_TYPE_FIXED &&
23531 ring->type != DHD_RING_TYPE_SINGLE_IDX) {
23532 return;
23533 }
23534
23535 DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
23536 ring->ring_sync = NULL;
23537 if (ring->type == DHD_RING_TYPE_FIXED) {
23538 dhd_fixed_ring_info_t *fixed = &ring->fixed;
23539 memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
23540 fixed->elem_size = fixed->elem_cnt = 0;
23541 } else {
23542 dhd_singleidx_ring_info_t *single = &ring->single;
23543 memset(single->elem, 0, single->elem_size * single->elem_cnt);
23544 single->elem_size = single->elem_cnt = 0;
23545 }
23546 ring->type = 0;
23547 ring->magic = 0;
23548}
23549
23550static inline uint32
23551__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
23552{
23553 uint32 diff;
23554 uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
23555 uint32 elem_size, elem_cnt;
23556 void *elem;
23557
23558 if (type == DHD_RING_TYPE_FIXED) {
23559 dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
23560 elem_size = fixed->elem_size;
23561 elem_cnt = fixed->elem_cnt;
23562 elem = fixed->elem;
23563 } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
23564 dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
23565 elem_size = single->elem_size;
23566 elem_cnt = single->elem_cnt;
23567 elem = single->elem;
23568 } else {
23569 DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
23570 return ret_idx;
23571 }
23572
23573 if (ptr < elem) {
23574 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
23575 return ret_idx;
23576 }
23577 diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
23578 if (diff % elem_size != 0) {
23579 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
23580 return ret_idx;
23581 }
23582 ret_idx = diff / elem_size;
23583 if (ret_idx >= elem_cnt) {
23584 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
23585 }
23586 return ret_idx;
23587}
23588
23589/* Sub functions for fixed ring */
23590/* get counts between two indexes of ring buffer (internal only) */
23591static inline int
23592__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
23593{
23594 if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
23595 return 0;
23596 }
23597
23598 return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
23599}
23600
23601static inline int
23602__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
23603{
23604 return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
23605}
23606
23607static inline void *
23608__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
23609{
23610 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23611 return NULL;
23612 }
23613 return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
23614}
23615
23616static inline void
23617__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
23618{
23619 uint32 next_idx;
23620
23621 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23622 DHD_RING_ERR(("EMPTY RING\n"));
23623 return;
23624 }
23625
23626 next_idx = (ring->read_idx + 1) % ring->elem_cnt;
23627 if (ring->read_idx == ring->write_idx) {
23628 /* Become empty */
23629 ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
23630 return;
23631 }
23632
23633 ring->read_idx = next_idx;
23634 return;
23635}
23636
23637static inline void *
23638__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
23639{
23640 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23641 return NULL;
23642 }
23643 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
23644}
23645
23646static inline void *
23647__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
23648{
23649 uint32 tmp_idx;
23650
23651 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23652 ring->read_idx = ring->write_idx = 0;
23653 return (uint8 *)ring->elem;
23654 }
23655
23656 /* check next index is not locked */
23657 tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
23658 if (ring->lock_idx == tmp_idx) {
23659 return NULL;
23660 }
23661
23662 ring->write_idx = tmp_idx;
23663 if (ring->write_idx == ring->read_idx) {
23664 /* record is full, drop oldest one */
23665 ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
23666
23667 }
23668 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
23669}
23670
23671static inline void *
23672__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
23673{
23674 uint32 cur_idx;
23675
23676 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23677 DHD_RING_ERR(("EMPTY RING\n"));
23678 return NULL;
23679 }
23680
23681 cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
23682 if (cur_idx >= ring->elem_cnt) {
23683 return NULL;
23684 }
23685
23686 if (cur_idx == ring->write_idx) {
23687 /* no more new record */
23688 return NULL;
23689 }
23690
23691 cur_idx = (cur_idx + 1) % ring->elem_cnt;
23692 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
23693}
23694
23695static inline void *
23696__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
23697{
23698 uint32 cur_idx;
23699
23700 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23701 DHD_RING_ERR(("EMPTY RING\n"));
23702 return NULL;
23703 }
23704 cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
23705 if (cur_idx >= ring->elem_cnt) {
23706 return NULL;
23707 }
23708 if (cur_idx == ring->read_idx) {
23709 /* no more new record */
23710 return NULL;
23711 }
23712
23713 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
23714 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
23715}
23716
23717static inline void
23718__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
23719{
23720 uint32 first_idx;
23721 uint32 last_idx;
23722 uint32 ring_filled_cnt;
23723 uint32 tmp_cnt;
23724
23725 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23726 DHD_RING_ERR(("EMPTY RING\n"));
23727 return;
23728 }
23729
23730 if (first_ptr) {
23731 first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
23732 if (first_idx >= ring->elem_cnt) {
23733 return;
23734 }
23735 } else {
23736 first_idx = ring->read_idx;
23737 }
23738
23739 if (last_ptr) {
23740 last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
23741 if (last_idx >= ring->elem_cnt) {
23742 return;
23743 }
23744 } else {
23745 last_idx = ring->write_idx;
23746 }
23747
23748 ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
23749 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
23750 if (tmp_cnt > ring_filled_cnt) {
23751 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
23752 ring->write_idx, ring->read_idx, first_idx));
23753 return;
23754 }
23755
23756 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
23757 if (tmp_cnt > ring_filled_cnt) {
23758 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
23759 ring->write_idx, ring->read_idx, last_idx));
23760 return;
23761 }
23762
23763 ring->lock_idx = first_idx;
23764 ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
23765 return;
23766}
23767
23768static inline void
23769__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
23770{
23771 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23772 DHD_RING_ERR(("EMPTY RING\n"));
23773 return;
23774 }
23775
23776 ring->lock_idx = DHD_RING_IDX_INVALID;
23777 ring->lock_count = 0;
23778 return;
23779}
23780static inline void *
23781__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
23782{
23783 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23784 DHD_RING_ERR(("EMPTY RING\n"));
23785 return NULL;
23786 }
23787 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
23788 DHD_RING_ERR(("NO LOCK POINT\n"));
23789 return NULL;
23790 }
23791 return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
23792}
23793
23794static inline void *
23795__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
23796{
23797 int lock_last_idx;
23798 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23799 DHD_RING_ERR(("EMPTY RING\n"));
23800 return NULL;
23801 }
23802 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
23803 DHD_RING_ERR(("NO LOCK POINT\n"));
23804 return NULL;
23805 }
23806
23807 lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
23808 return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
23809}
23810
23811static inline int
23812__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
23813{
23814 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23815 DHD_RING_ERR(("EMPTY RING\n"));
23816 return BCME_ERROR;
23817 }
23818 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
23819 DHD_RING_ERR(("NO LOCK POINT\n"));
23820 return BCME_ERROR;
23821 }
23822 return ring->lock_count;
23823}
23824
23825static inline void
23826__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
23827{
23828 if (ring->read_idx == DHD_RING_IDX_INVALID) {
23829 DHD_RING_ERR(("EMPTY RING\n"));
23830 return;
23831 }
23832 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
23833 DHD_RING_ERR(("NO LOCK POINT\n"));
23834 return;
23835 }
23836
23837 ring->lock_count--;
23838 if (ring->lock_count <= 0) {
23839 ring->lock_idx = DHD_RING_IDX_INVALID;
23840 } else {
23841 ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
23842 }
23843 return;
23844}
23845
23846static inline void
23847__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
23848{
23849 ring->read_idx = idx;
23850}
23851
23852static inline void
23853__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
23854{
23855 ring->write_idx = idx;
23856}
23857
23858static inline uint32
23859__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
23860{
23861 return ring->read_idx;
23862}
23863
23864static inline uint32
23865__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
23866{
23867 return ring->write_idx;
23868}
23869
23870/* Sub functions for single index ring */
23871static inline void *
23872__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
23873{
23874 uint32 tmp_idx = 0;
23875
23876 if (ring->idx == DHD_RING_IDX_INVALID) {
23877 return NULL;
23878 }
23879
23880 if (ring->ring_overwrited) {
23881 tmp_idx = (ring->idx + 1) % ring->elem_cnt;
23882 }
23883
23884 return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
23885}
23886
23887static inline void *
23888__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
23889{
23890 if (ring->idx == DHD_RING_IDX_INVALID) {
23891 return NULL;
23892 }
23893
23894 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
23895}
23896
23897static inline void *
23898__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
23899{
23900 if (ring->idx == DHD_RING_IDX_INVALID) {
23901 ring->idx = 0;
23902 return (uint8 *)ring->elem;
23903 }
23904
23905 /* check the lock is held */
23906 if (atomic_read(&ring->ring_locked)) {
23907 return NULL;
23908 }
23909
23910 /* check the index rollover */
23911 if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
23912 ring->ring_overwrited = 1;
23913 }
23914
23915 ring->idx = (ring->idx + 1) % ring->elem_cnt;
23916
23917 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
23918}
23919
23920static inline void *
23921__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
23922{
23923 uint32 cur_idx;
23924
23925 if (ring->idx == DHD_RING_IDX_INVALID) {
23926 DHD_RING_ERR(("EMPTY RING\n"));
23927 return NULL;
23928 }
23929
23930 cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
23931 if (cur_idx >= ring->elem_cnt) {
23932 return NULL;
23933 }
23934
23935 if (cur_idx == ring->idx) {
23936 /* no more new record */
23937 return NULL;
23938 }
23939
23940 cur_idx = (cur_idx + 1) % ring->elem_cnt;
23941
23942 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
23943}
23944
23945static inline void *
23946__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
23947{
23948 uint32 cur_idx;
23949
23950 if (ring->idx == DHD_RING_IDX_INVALID) {
23951 DHD_RING_ERR(("EMPTY RING\n"));
23952 return NULL;
23953 }
23954 cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
23955 if (cur_idx >= ring->elem_cnt) {
23956 return NULL;
23957 }
23958
23959 if (!ring->ring_overwrited && cur_idx == 0) {
23960 /* no more new record */
23961 return NULL;
23962 }
23963
23964 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
23965 if (ring->ring_overwrited && cur_idx == ring->idx) {
23966 /* no more new record */
23967 return NULL;
23968 }
23969
23970 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
23971}
23972
23973static inline void
23974__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
23975{
23976 if (!atomic_read(&ring->ring_locked)) {
23977 atomic_set(&ring->ring_locked, 1);
23978 }
23979}
23980
23981static inline void
23982__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
23983{
23984 if (atomic_read(&ring->ring_locked)) {
23985 atomic_set(&ring->ring_locked, 0);
23986 }
23987}
23988
23989/* Get first element : oldest element */
23990void *
23991dhd_ring_get_first(void *_ring)
23992{
23993 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
23994 void *ret = NULL;
23995 unsigned long flags;
23996
23997 if (!ring || ring->magic != DHD_RING_MAGIC) {
23998 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
23999 return NULL;
24000 }
24001
24002 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24003 if (ring->type == DHD_RING_TYPE_FIXED) {
24004 ret = __dhd_fixed_ring_get_first(&ring->fixed);
24005 }
24006 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24007 ret = __dhd_singleidx_ring_get_first(&ring->single);
24008 }
24009 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24010 return ret;
24011}
24012
24013/* Free first element : oldest element */
24014void
24015dhd_ring_free_first(void *_ring)
24016{
24017 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24018 unsigned long flags;
24019
24020 if (!ring || ring->magic != DHD_RING_MAGIC) {
24021 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24022 return;
24023 }
24024
24025 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24026 if (ring->type == DHD_RING_TYPE_FIXED) {
24027 __dhd_fixed_ring_free_first(&ring->fixed);
24028 }
24029 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24030}
24031
24032void
24033dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
24034{
24035 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24036 unsigned long flags;
24037
24038 if (!ring || ring->magic != DHD_RING_MAGIC) {
24039 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24040 return;
24041 }
24042
24043 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24044 if (ring->type == DHD_RING_TYPE_FIXED) {
24045 __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
24046 }
24047 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24048}
24049
24050void
24051dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
24052{
24053 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24054 unsigned long flags;
24055
24056 if (!ring || ring->magic != DHD_RING_MAGIC) {
24057 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24058 return;
24059 }
24060
24061 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24062 if (ring->type == DHD_RING_TYPE_FIXED) {
24063 __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
24064 }
24065 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24066}
24067
24068uint32
24069dhd_ring_get_read_idx(void *_ring)
24070{
24071 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24072 uint32 read_idx = DHD_RING_IDX_INVALID;
24073 unsigned long flags;
24074
24075 if (!ring || ring->magic != DHD_RING_MAGIC) {
24076 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24077 return read_idx;
24078 }
24079
24080 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24081 if (ring->type == DHD_RING_TYPE_FIXED) {
24082 read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
24083 }
24084 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24085
24086 return read_idx;
24087}
24088
24089uint32
24090dhd_ring_get_write_idx(void *_ring)
24091{
24092 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24093 uint32 write_idx = DHD_RING_IDX_INVALID;
24094 unsigned long flags;
24095
24096 if (!ring || ring->magic != DHD_RING_MAGIC) {
24097 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24098 return write_idx;
24099 }
24100
24101 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24102 if (ring->type == DHD_RING_TYPE_FIXED) {
24103 write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
24104 }
24105 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24106
24107 return write_idx;
24108}
24109
24110/* Get latest element */
24111void *
24112dhd_ring_get_last(void *_ring)
24113{
24114 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24115 void *ret = NULL;
24116 unsigned long flags;
24117
24118 if (!ring || ring->magic != DHD_RING_MAGIC) {
24119 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24120 return NULL;
24121 }
24122
24123 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24124 if (ring->type == DHD_RING_TYPE_FIXED) {
24125 ret = __dhd_fixed_ring_get_last(&ring->fixed);
24126 }
24127 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24128 ret = __dhd_singleidx_ring_get_last(&ring->single);
24129 }
24130 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24131 return ret;
24132}
24133
24134/* Get next point can be written
24135 * will overwrite which doesn't read
24136 * will return NULL if next pointer is locked.
24137 */
24138void *
24139dhd_ring_get_empty(void *_ring)
24140{
24141 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24142 void *ret = NULL;
24143 unsigned long flags;
24144
24145 if (!ring || ring->magic != DHD_RING_MAGIC) {
24146 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24147 return NULL;
24148 }
24149
24150 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24151 if (ring->type == DHD_RING_TYPE_FIXED) {
24152 ret = __dhd_fixed_ring_get_empty(&ring->fixed);
24153 }
24154 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24155 ret = __dhd_singleidx_ring_get_empty(&ring->single);
24156 }
24157 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24158 return ret;
24159}
24160
24161void *
24162dhd_ring_get_next(void *_ring, void *cur)
24163{
24164 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24165 void *ret = NULL;
24166 unsigned long flags;
24167
24168 if (!ring || ring->magic != DHD_RING_MAGIC) {
24169 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24170 return NULL;
24171 }
24172
24173 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24174 if (ring->type == DHD_RING_TYPE_FIXED) {
24175 ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
24176 }
24177 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24178 ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
24179 }
24180 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24181 return ret;
24182}
24183
24184void *
24185dhd_ring_get_prev(void *_ring, void *cur)
24186{
24187 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24188 void *ret = NULL;
24189 unsigned long flags;
24190
24191 if (!ring || ring->magic != DHD_RING_MAGIC) {
24192 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24193 return NULL;
24194 }
24195
24196 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24197 if (ring->type == DHD_RING_TYPE_FIXED) {
24198 ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
24199 }
24200 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24201 ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
24202 }
24203 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24204 return ret;
24205}
24206
24207int
24208dhd_ring_get_cur_size(void *_ring)
24209{
24210 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24211 int cnt = 0;
24212 unsigned long flags;
24213
24214 if (!ring || ring->magic != DHD_RING_MAGIC) {
24215 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24216 return cnt;
24217 }
24218
24219 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24220 if (ring->type == DHD_RING_TYPE_FIXED) {
24221 cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
24222 }
24223 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24224 return cnt;
24225}
24226
24227/* protect element between lock_ptr and write_idx */
24228void
24229dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
24230{
24231 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24232 unsigned long flags;
24233
24234 if (!ring || ring->magic != DHD_RING_MAGIC) {
24235 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24236 return;
24237 }
24238
24239 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24240 if (ring->type == DHD_RING_TYPE_FIXED) {
24241 __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
24242 }
24243 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24244}
24245
24246/* free all lock */
24247void
24248dhd_ring_lock_free(void *_ring)
24249{
24250 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24251 unsigned long flags;
24252
24253 if (!ring || ring->magic != DHD_RING_MAGIC) {
24254 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24255 return;
24256 }
24257
24258 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24259 if (ring->type == DHD_RING_TYPE_FIXED) {
24260 __dhd_fixed_ring_lock_free(&ring->fixed);
24261 }
24262 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24263}
24264
24265void *
24266dhd_ring_lock_get_first(void *_ring)
24267{
24268 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24269 void *ret = NULL;
24270 unsigned long flags;
24271
24272 if (!ring || ring->magic != DHD_RING_MAGIC) {
24273 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24274 return NULL;
24275 }
24276
24277 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24278 if (ring->type == DHD_RING_TYPE_FIXED) {
24279 ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
24280 }
24281 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24282 return ret;
24283}
24284
24285void *
24286dhd_ring_lock_get_last(void *_ring)
24287{
24288 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24289 void *ret = NULL;
24290 unsigned long flags;
24291
24292 if (!ring || ring->magic != DHD_RING_MAGIC) {
24293 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24294 return NULL;
24295 }
24296
24297 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24298 if (ring->type == DHD_RING_TYPE_FIXED) {
24299 ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
24300 }
24301 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24302 return ret;
24303}
24304
24305int
24306dhd_ring_lock_get_count(void *_ring)
24307{
24308 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24309 int ret = BCME_ERROR;
24310 unsigned long flags;
24311
24312 if (!ring || ring->magic != DHD_RING_MAGIC) {
24313 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24314 return ret;
24315 }
24316
24317 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24318 if (ring->type == DHD_RING_TYPE_FIXED) {
24319 ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
24320 }
24321 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24322 return ret;
24323}
24324
24325/* free first locked element */
24326void
24327dhd_ring_lock_free_first(void *_ring)
24328{
24329 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24330 unsigned long flags;
24331
24332 if (!ring || ring->magic != DHD_RING_MAGIC) {
24333 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24334 return;
24335 }
24336
24337 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24338 if (ring->type == DHD_RING_TYPE_FIXED) {
24339 __dhd_fixed_ring_lock_free_first(&ring->fixed);
24340 }
24341 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24342}
24343
24344void
24345dhd_ring_whole_lock(void *_ring)
24346{
24347 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24348 unsigned long flags;
24349
24350 if (!ring || ring->magic != DHD_RING_MAGIC) {
24351 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24352 return;
24353 }
24354
24355 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24356 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24357 __dhd_singleidx_ring_whole_lock(&ring->single);
24358 }
24359 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24360}
24361
24362void
24363dhd_ring_whole_unlock(void *_ring)
24364{
24365 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
24366 unsigned long flags;
24367
24368 if (!ring || ring->magic != DHD_RING_MAGIC) {
24369 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
24370 return;
24371 }
24372
24373 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
24374 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
24375 __dhd_singleidx_ring_whole_unlock(&ring->single);
24376 }
24377 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
24378}
24379/* END of DHD RING */
24380
24381#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
24382#define DHD_VFS_INODE(dir) (dir->d_inode)
24383#else
24384#define DHD_VFS_INODE(dir) d_inode(dir)
24385#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
24386
24387#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
24388#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
24389#else
24390#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
24391#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
24392int
24393dhd_file_delete(char *path)
24394{
24395 struct path file_path;
24396 int err;
24397 struct dentry *dir;
24398
24399 err = kern_path(path, 0, &file_path);
24400
24401 if (err < 0) {
24402 DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
24403 return err;
24404 }
24405 if (
24406#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
24407 !d_is_file(file_path.dentry) ||
24408#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
24409 d_really_is_negative(file_path.dentry) ||
24410#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
24411#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
24412 FALSE)
24413 {
24414 err = -EINVAL;
24415 } else {
24416 dir = dget_parent(file_path.dentry);
24417
24418 if (!IS_ERR(dir)) {
24419 err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
24420 dput(dir);
24421 } else {
24422 err = PTR_ERR(dir);
24423 }
24424 }
24425
24426 path_put(&file_path);
24427
24428 if (err < 0) {
24429 DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
24430 }
24431
24432 return err;
24433}
24434#ifdef DHD_DUMP_MNGR
24435static int
24436dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
24437{
24438 int i;
24439 int fm_idx = -1;
24440
24441 for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
24442 /* XXX dump file manager enqueues the type name to empty slot,
24443 * so it's impossible that empty slot is in the middle.
24444 */
24445 if (strlen(fm_ptr->elems[i].type_name) == 0) {
24446 fm_idx = i;
24447 break;
24448 }
24449 if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
24450 fm_idx = i;
24451 break;
24452 }
24453 }
24454
24455 if (fm_idx == -1) {
24456 return fm_idx;
24457 }
24458
24459 if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
24460 strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
24461 fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
24462 fm_ptr->elems[fm_idx].file_idx = 0;
24463 }
24464
24465 return fm_idx;
24466}
24467
24468/*
24469 * dhd_dump_file_manage_enqueue - enqueue dump file path
24470 * and delete odest file if file count is max.
24471 */
24472void
24473dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
24474{
24475 int fm_idx;
24476 int fp_idx;
24477 dhd_dump_file_manage_t *fm_ptr;
24478 DFM_elem_t *elem;
24479
24480 if (!dhd || !dhd->dump_file_manage) {
24481 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
24482 __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
24483 return;
24484 }
24485
24486 fm_ptr = dhd->dump_file_manage;
24487
24488 /* find file_manage idx */
24489 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
24490 if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
24491 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
24492 __FUNCTION__, fname));
24493 return;
24494 }
24495
24496 elem = &fm_ptr->elems[fm_idx];
24497 fp_idx = elem->file_idx;
24498 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
24499 __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
24500
24501 /* delete oldest file */
24502 if (strlen(elem->file_path[fp_idx]) != 0) {
24503 if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
24504 DHD_ERROR(("%s(): Failed to delete file: %s\n",
24505 __FUNCTION__, elem->file_path[fp_idx]));
24506 } else {
24507 DHD_ERROR(("%s(): Successed to delete file: %s\n",
24508 __FUNCTION__, elem->file_path[fp_idx]));
24509 }
24510 }
24511
24512 /* save dump file path */
24513 strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
24514 elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
24515
24516 /* change file index to next file index */
24517 elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
24518}
24519#endif /* DHD_DUMP_MNGR */
24520
24521#ifdef DNGL_AXI_ERROR_LOGGING
24522static void
24523dhd_axi_error_dump(void *handle, void *event_info, u8 event)
24524{
24525 dhd_info_t *dhd = (dhd_info_t *)handle;
24526 dhd_pub_t *dhdp = NULL;
24527
24528 if (!dhd) {
24529 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
24530 goto exit;
24531 }
24532
24533 dhdp = &dhd->pub;
24534 if (!dhdp) {
24535 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
24536 goto exit;
24537 }
24538
24539 /**
24540 * First save axi error information to a file
24541 * because panic should happen right after this.
24542 * After dhd reset, dhd reads the file, and do hang event process
24543 * to send axi error stored on the file to Bigdata server
24544 */
24545 if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
24546 DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
24547 __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
24548 }
24549
24550 DHD_OS_WAKE_LOCK(dhdp);
24551#ifdef DHD_FW_COREDUMP
24552#ifdef DHD_SSSR_DUMP
24553 DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
24554 dhdp->collect_sssr = TRUE;
24555#endif /* DHD_SSSR_DUMP */
24556 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
24557 dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
24558#endif /* DHD_FW_COREDUMP */
24559 DHD_OS_WAKE_UNLOCK(dhdp);
24560
24561exit:
24562 /* Trigger kernel panic after taking necessary dumps */
24563 BUG_ON(1);
24564}
24565
24566void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
24567{
24568 DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
24569 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
24570 type, DHD_WQ_WORK_AXI_ERROR_DUMP,
24571 dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
24572}
24573#endif /* DNGL_AXI_ERROR_LOGGING */
24574
24575#ifdef SUPPORT_SET_TID
24576/*
24577 * Set custom TID value for UDP frame based on UID value.
24578 * This will be triggered by android private command below.
24579 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
24580 * Mode 0(SET_TID_OFF) : Disable changing TID
24581 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
24582 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
24583*/
24584void
24585dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
24586{
24587 struct ether_header *eh = NULL;
24588 struct sock *sk = NULL;
24589 uint8 *pktdata = NULL;
24590 uint8 *ip_hdr = NULL;
24591 uint8 cur_prio;
24592 uint8 prio;
24593 uint32 uid;
24594
24595 if (dhdp->tid_mode == SET_TID_OFF) {
24596 return;
24597 }
24598
24599 pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
24600 eh = (struct ether_header *) pktdata;
24601 ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
24602
24603 if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
24604 return;
24605 }
24606
24607 cur_prio = PKTPRIO(pkt);
24608 prio = dhdp->target_tid;
24609 uid = dhdp->target_uid;
24610
24611 if ((cur_prio == prio) ||
24612 (cur_prio != PRIO_8021D_BE)) {
24613 return;
24614 }
24615
24616 sk = ((struct sk_buff*)(pkt))->sk;
24617
24618 if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
24619 (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
24620 PKTSETPRIO(pkt, prio);
24621 }
24622}
24623#endif /* SUPPORT_SET_TID */
24624
24625#ifdef BCMPCIE
24626static void
24627dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
24628{
24629 dhd_info_t *dhd = handle;
24630 dhd_pub_t *dhdp = NULL;
24631
24632 if (!dhd) {
24633 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
24634 BUG_ON(1);
24635 return;
24636 }
24637
24638 dhdp = &dhd->pub;
24639 dhdpcie_cto_recovery_handler(dhdp);
24640}
24641
24642void
24643dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
24644{
24645 DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
24646 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
24647 NULL, DHD_WQ_WORK_CTO_RECOVERY,
24648 dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
24649}
24650#endif /* BCMPCIE */
24651
24652#ifdef DHD_WIFI_SHUTDOWN
24653void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
24654{
24655 dhd_pub_t *dhd_pub = NULL;
24656 dhd_info_t *dhd_info = NULL;
24657 dhd_if_t *dhd_if = NULL;
24658
24659 DHD_ERROR(("%s enter\n", __FUNCTION__));
24660 dhd_pub = g_dhd_pub;
24661
24662 if (dhd_os_check_if_up(dhd_pub)) {
24663 dhd_info = (dhd_info_t *)dhd_pub->info;
24664 dhd_if = dhd_info->iflist[0];
24665 ASSERT(dhd_if);
24666 ASSERT(dhd_if->net);
24667 if (dhd_if && dhd_if->net) {
24668 dhd_stop(dhd_if->net);
24669 }
24670 }
24671}
24672#endif /* DHD_WIFI_SHUTDOWN */
24673#ifdef WL_AUTO_QOS
24674void
24675dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off)
24676{
24677 dhd_sock_qos_set_status(dhdp->info, on_off);
24678}
24679#endif /* WL_AUTO_QOS */
24680
24681#ifdef DHD_CFG80211_SUSPEND_RESUME
24682void
24683dhd_cfg80211_suspend(dhd_pub_t *dhdp)
24684{
24685 struct net_device *net = dhd_idx2net((dhd_pub_t *)dhdp, 0);
24686 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
24687 wl_cfg80211_suspend(cfg);
24688}
24689
24690void
24691dhd_cfg80211_resume(dhd_pub_t *dhdp)
24692{
24693 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, 0);
24694 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
24695 wl_cfg80211_resume(cfg);
24696}
24697#endif /* DHD_CFG80211_SUSPEND_RESUME */
24698
24699void
24700dhd_generate_rand_mac_addr(struct ether_addr *ea_addr)
24701{
24702 RANDOM_BYTES(ea_addr->octet, ETHER_ADDR_LEN);
24703 /* restore mcast and local admin bits to 0 and 1 */
24704 ETHER_SET_UNICAST(ea_addr->octet);
24705 ETHER_SET_LOCALADDR(ea_addr->octet);
24706 DHD_ERROR(("%s:generated new MAC="MACDBG" \n",
24707 __FUNCTION__, MAC2STRDBG(ea_addr->octet)));
24708 return;
24709}
24710
24711void *
24712dhd_get_roam_evt(dhd_pub_t *dhdp)
24713{
24714#if defined(DHD_PUB_ROAM_EVT)
24715 return (void *)&(dhdp->roam_evt);
24716#else
24717 return NULL;
24718#endif /* DHD_PUB_ROAM_EVT */
24719}
24720
24721/* BANDLOCK_FILE is for Hikey only and BANDLOCK has a priority than BANDLOCK_FILE */
24722static void dhd_set_bandlock(dhd_pub_t * dhd)
24723{
24724#if defined(BANDLOCK)
24725 int band = BANDLOCK;
24726 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) {
24727 DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band));
24728 }
24729#elif defined(BANDLOCK_FILE)
24730 int band;
24731 char val[2] = {0, 0};
24732 if (dhd_read_file(PATH_BANDLOCK_INFO, (char *)val, sizeof(char)) == BCME_OK) {
24733 band = bcm_atoi(val);
24734 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &band, sizeof(band), TRUE, 0) < 0) {
24735 DHD_ERROR(("%s: set band(%d) error\n", __FUNCTION__, band));
24736 }
24737 }
24738#endif /* BANDLOCK */
24739}
24740
24741#ifdef PCIE_FULL_DONGLE
24742/* API to delete flowings and Stations
24743* corresponding to the interface(ndev)
24744*/
24745void
24746dhd_net_del_flowrings_sta(dhd_pub_t *dhd, struct net_device *ndev)
24747{
24748 dhd_if_t *ifp = NULL;
24749
24750 ifp = dhd_get_ifp_by_ndev(dhd, ndev);
24751 if (ifp == NULL) {
24752 DHD_ERROR(("DHD Iface Info corresponding to %s not found\n", ndev->name));
24753 return;
24754 }
24755
24756 /* For now called only in iface delete path..
24757 * Add reason codes if this API need to be reused in any other paths.
24758 */
24759 DHD_ERROR(("%s:Clean up IFACE idx %d due to interface delete\n",
24760 __FUNCTION__, ifp->idx));
24761
24762 dhd_del_all_sta(dhd, ifp->idx);
24763 dhd_flow_rings_delete(dhd, ifp->idx);
24764}
24765#endif /* PCIE_FULL_DONGLE */
24766
24767void *dhd_get_pub(struct net_device *dev)
24768{
24769 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
24770 if (dhdinfo)
24771 return (void *)&dhdinfo->pub;
24772 else {
24773 printf("%s: null dhdinfo\n", __FUNCTION__);
24774 return NULL;
24775 }
24776}
24777
24778void *dhd_get_conf(struct net_device *dev)
24779{
24780 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
24781 if (dhdinfo)
24782 return (void *)dhdinfo->pub.conf;
24783 else {
24784 printf("%s: null dhdinfo\n", __FUNCTION__);
24785 return NULL;
24786 }
24787}
24788
24789bool dhd_os_wd_timer_enabled(void *bus)
24790{
24791 dhd_pub_t *pub = bus;
24792 dhd_info_t *dhd = (dhd_info_t *)pub->info;
24793
24794 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
24795 if (!dhd) {
24796 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
24797 return FALSE;
24798 }
24799 return dhd->wd_timer_valid;
24800}
24801
24802#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
24803/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */
24804static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev)
24805{
24806 struct net_device *primary_ndev = NULL, *br_dev = NULL;
24807 int cmd;
24808 struct ifreq ifr;
24809
24810 /* add new interface to bridge dev */
24811 if (dev) {
24812 int found = 0, i;
24813 DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx));
24814 for (i = 0 ; i < ifidx; i++) {
24815 DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx));
24816 /* search the primary interface */
24817 if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) {
24818 primary_ndev = dhd->pub.info->iflist[i]->net;
24819 DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name));
24820 found = 1;
24821 break;
24822 }
24823 }
24824 if (found == 0) {
24825 DHD_ERROR(("Can not find primary dev %s\n", dev->name));
24826 return;
24827 }
24828 cmd = SIOCBRADDIF;
24829 ifr.ifr_ifindex = dev->ifindex;
24830 } else { /* del interface from bridge dev */
24831 primary_ndev = dhd->pub.info->iflist[ifidx]->net;
24832 cmd = SIOCBRDELIF;
24833 ifr.ifr_ifindex = primary_ndev->ifindex;
24834 }
24835 /* if primary net device is bridged */
24836 if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) {
24837 rtnl_lock();
24838 /* get bridge device */
24839 br_dev = netdev_master_upper_dev_get(primary_ndev);
24840 if (br_dev) {
24841 const struct net_device_ops *ops = br_dev->netdev_ops;
24842 DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name));
24843 if (ops) {
24844 if (cmd == SIOCBRADDIF) {
24845 DHD_ERROR(("br call ndo_add_slave\n"));
24846 ops->ndo_add_slave(br_dev, dev);
24847 /* Also bring wds0.x interface up automatically */
24848 dev_change_flags(dev, dev->flags | IFF_UP);
24849 }
24850 else {
24851 DHD_ERROR(("br call ndo_del_slave\n"));
24852 ops->ndo_del_slave(br_dev, primary_ndev);
24853 }
24854 }
24855 }
24856 else {
24857 DHD_ERROR(("no br dev\n"));
24858 }
24859 rtnl_unlock();
24860 }
24861 else {
24862 DHD_ERROR(("device %s is not bridged\n", primary_ndev->name));
24863 }
24864}
24865#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */