source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / dhd_linux.c
1 /*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 818247 2019-05-07 04:15:13Z $
29 */
30
31 #include <typedefs.h>
32 #include <linuxver.h>
33 #include <osl.h>
34 #ifdef SHOW_LOGTRACE
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
38
39 #ifdef PCIE_FULL_DONGLE
40 #include <bcmmsgbuf.h>
41 #endif /* PCIE_FULL_DONGLE */
42
43 #include <linux/init.h>
44 #include <linux/kernel.h>
45 #include <linux/slab.h>
46 #include <linux/skbuff.h>
47 #include <linux/netdevice.h>
48 #include <linux/inetdevice.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/etherdevice.h>
51 #include <linux/random.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/fcntl.h>
55 #include <linux/fs.h>
56 #include <linux/ip.h>
57 #include <linux/reboot.h>
58 #include <linux/notifier.h>
59 #include <linux/irq.h>
60 #include <net/addrconf.h>
61 #ifdef ENABLE_ADAPTIVE_SCHED
62 #include <linux/cpufreq.h>
63 #endif /* ENABLE_ADAPTIVE_SCHED */
64 #include <linux/rtc.h>
65 #ifdef DHD_DUMP_MNGR
66 #include <linux/namei.h>
67 #endif /* DHD_DUMP_MNGR */
68 #include <asm/uaccess.h>
69 #include <asm/unaligned.h>
70 #include <dhd_linux_priv.h>
71
72 #include <epivers.h>
73 #include <bcmutils.h>
74 #include <bcmendian.h>
75 #include <bcmdevs.h>
76 #include <bcmiov.h>
77
78 #include <ethernet.h>
79 #include <bcmevent.h>
80 #include <vlan.h>
81 #include <802.3.h>
82
83 #include <dhd_linux_wq.h>
84 #include <dhd.h>
85 #include <dhd_linux.h>
86 #ifdef DHD_WET
87 #include <dhd_wet.h>
88 #endif /* DHD_WET */
89 #ifdef PCIE_FULL_DONGLE
90 #include <dhd_flowring.h>
91 #endif // endif
92 #include <dhd_bus.h>
93 #include <dhd_proto.h>
94 #include <dhd_dbg.h>
95 #include <dhd_dbg_ring.h>
96 #include <dhd_debug.h>
97 #ifdef CONFIG_HAS_WAKELOCK
98 #include <linux/wakelock.h>
99 #endif // endif
100 #if defined(WL_CFG80211)
101 #include <wl_cfg80211.h>
102 #ifdef WL_BAM
103 #include <wl_bam.h>
104 #endif /* WL_BAM */
105 #endif /* WL_CFG80211 */
106 #ifdef PNO_SUPPORT
107 #include <dhd_pno.h>
108 #endif // endif
109 #ifdef RTT_SUPPORT
110 #include <dhd_rtt.h>
111 #endif // endif
112
113 #ifdef CONFIG_COMPAT
114 #include <linux/compat.h>
115 #endif // endif
116
117 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
118 #include <linux/exynos-pci-ctrl.h>
119 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
120
121 #ifdef DHD_L2_FILTER
122 #include <bcmicmp.h>
123 #include <bcm_l2_filter.h>
124 #include <dhd_l2_filter.h>
125 #endif /* DHD_L2_FILTER */
126
127 #ifdef DHD_PSTA
128 #include <dhd_psta.h>
129 #endif /* DHD_PSTA */
130
131 #ifdef AMPDU_VO_ENABLE
132 #include <802.1d.h>
133 #endif /* AMPDU_VO_ENABLE */
134
135 #if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
136 #include <dhd_ip.h>
137 #endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
138 #include <dhd_daemon.h>
139 #ifdef DHD_PKT_LOGGING
140 #include <dhd_pktlog.h>
141 #endif /* DHD_PKT_LOGGING */
142 #ifdef DHD_DEBUG_PAGEALLOC
143 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
144 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
145 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
146 #endif /* DHD_DEBUG_PAGEALLOC */
147
148 #define IP_PROT_RESERVED 0xFF
149
150 #ifdef DHDTCPSYNC_FLOOD_BLK
151 static void dhd_blk_tsfl_handler(struct work_struct * work);
152 #endif /* DHDTCPSYNC_FLOOD_BLK */
153
154 #ifdef WL_NATOE
155 #include <dhd_linux_nfct.h>
156 #endif /* WL_NATOE */
157
158 #if defined(SOFTAP)
159 extern bool ap_cfg_running;
160 extern bool ap_fw_loaded;
161 #endif // endif
162
163 #if defined(DHD_LB)
164 #if defined(DHD_LB_RXP)
165 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
166 #endif /* DHD_LB_RXP */
167 #if defined(DHD_LB_TXP)
168 static void dhd_lb_tx_handler(unsigned long data);
169 static void dhd_tx_dispatcher_work(struct work_struct * work);
170 static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
171 static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
172 #endif /* DHD_LB_TXP */
173 #endif /* DHD_LB */
174
175 #ifdef FIX_CPU_MIN_CLOCK
176 #include <linux/pm_qos.h>
177 #endif /* FIX_CPU_MIN_CLOCK */
178
179 #ifdef SET_RANDOM_MAC_SOFTAP
180 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
181 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
182 #endif // endif
183 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
184 #endif /* SET_RANDOM_MAC_SOFTAP */
185
186 #ifdef ENABLE_ADAPTIVE_SCHED
187 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
188 #ifndef CUSTOM_CPUFREQ_THRESH
189 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
190 #endif /* CUSTOM_CPUFREQ_THRESH */
191 #endif /* ENABLE_ADAPTIVE_SCHED */
192
193 /* enable HOSTIP cache update from the host side when an eth0:N is up */
194 #define AOE_IP_ALIAS_SUPPORT 1
195
196 #ifdef PROP_TXSTATUS
197 #include <wlfc_proto.h>
198 #include <dhd_wlfc.h>
199 #endif // endif
200
201 #include <wl_android.h>
202
203 /* Maximum STA per radio */
204 #define DHD_MAX_STA 32
205
206 #ifdef DHD_EVENT_LOG_FILTER
207 #include <dhd_event_log_filter.h>
208 #endif /* DHD_EVENT_LOG_FILTER */
209
210 /*
211 * Start of Host DMA whitelist region.
212 */
213 uint32 wlreg_l = 0;
214 uint32 wlreg_h = 0;
215 module_param(wlreg_l, uint, 0644);
216 module_param(wlreg_h, uint, 0644);
217
218 /*
219 * Sizeof whitelist region. The dongle will allow DMA to only wlreg to wlreg+wlreg_len.
220 * If length of whitelist region is zero, host will not program whitelist region to dongle.
221 */
222 uint32 wlreg_len_h = 0;
223 uint32 wlreg_len_l = 0;
224
225 module_param(wlreg_len_l, uint, 0644);
226 module_param(wlreg_len_h, uint, 0644);
227
228 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
229 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
230 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
231
232 #ifdef ARP_OFFLOAD_SUPPORT
233 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
234 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
235 unsigned long event, void *ptr);
236 static struct notifier_block dhd_inetaddr_notifier = {
237 .notifier_call = dhd_inetaddr_notifier_call
238 };
239 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
240 * created in kernel notifier link list (with 'next' pointing to itself)
241 */
242 static bool dhd_inetaddr_notifier_registered = FALSE;
243 #endif /* ARP_OFFLOAD_SUPPORT */
244
245 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
246 int dhd_inet6addr_notifier_call(struct notifier_block *this,
247 unsigned long event, void *ptr);
248 static struct notifier_block dhd_inet6addr_notifier = {
249 .notifier_call = dhd_inet6addr_notifier_call
250 };
251 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
252 * created in kernel notifier link list (with 'next' pointing to itself)
253 */
254 static bool dhd_inet6addr_notifier_registered = FALSE;
255 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
256
257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
258 #include <linux/suspend.h>
259 volatile bool dhd_mmc_suspend = FALSE;
260 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
261 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
262
263 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
264 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
265 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
267 static void dhd_hang_process(struct work_struct *work_data);
268 #endif // endif
269 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
270 MODULE_LICENSE("GPL and additional rights");
271 #endif /* LinuxVer */
272
273 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
274 #define MAX_CONSECUTIVE_HANG_COUNTS 5
275 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
276
277 #include <dhd_bus.h>
278
279 #ifdef DHD_ULP
280 #include <dhd_ulp.h>
281 #endif /* DHD_ULP */
282
283 #ifndef PROP_TXSTATUS
284 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
285 #else
286 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
287 #endif // endif
288
289 #ifdef PROP_TXSTATUS
290 extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
291 extern void dhd_wlfc_plat_init(void *dhd);
292 extern void dhd_wlfc_plat_deinit(void *dhd);
293 #endif /* PROP_TXSTATUS */
294 #ifdef USE_DYNAMIC_F2_BLKSIZE
295 extern uint sd_f2_blocksize;
296 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
297 #endif /* USE_DYNAMIC_F2_BLKSIZE */
298
299 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
300 const char *
301 print_tainted()
302 {
303 return "";
304 }
305 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
306
307 /* Linux wireless extension support */
308 #if defined(WL_WIRELESS_EXT)
309 #include <wl_iw.h>
310 extern wl_iw_extra_params_t g_wl_iw_params;
311 #endif /* defined(WL_WIRELESS_EXT) */
312
313 #ifdef CONFIG_PARTIALSUSPEND_SLP
314 #include <linux/partialsuspend_slp.h>
315 #define CONFIG_HAS_EARLYSUSPEND
316 #define DHD_USE_EARLYSUSPEND
317 #define register_early_suspend register_pre_suspend
318 #define unregister_early_suspend unregister_pre_suspend
319 #define early_suspend pre_suspend
320 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
321 #else
322 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
323 #include <linux/earlysuspend.h>
324 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
325 #endif /* CONFIG_PARTIALSUSPEND_SLP */
326
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
328 #include <linux/nl80211.h>
329 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
330
331 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
332 static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
333 u8* program, uint32 program_len);
334 static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
335 uint32 mode, uint32 enable);
336 static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
337 #endif /* PKT_FILTER_SUPPORT && APF */
338
339 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
340 defined(ARGOS_NOTIFY_CB)
341 /* ARGOS notifer data */
342 static struct notifier_block argos_wifi; /* STA */
343 static struct notifier_block argos_p2p; /* P2P */
344 argos_rps_ctrl argos_rps_ctrl_data;
345 #ifdef DYNAMIC_MUMIMO_CONTROL
346 argos_mumimo_ctrl argos_mumimo_ctrl_data;
347 #ifdef CONFIG_SPLIT_ARGOS_SET
348 static struct notifier_block argos_mimo; /* STA */
349 #endif /* CONFIG_SPLIT_ARGOS_SET */
350 #endif /* DYNAMIC_MUMIMO_CONTROL */
351 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
352
353 #ifdef DHD_FW_COREDUMP
354 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
355 #endif /* DHD_FW_COREDUMP */
356
357 #ifdef DHD_LOG_DUMP
358
359 struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
360
361 /* Only header for log dump buffers is stored in array
362 * header for sections like 'dhd dump', 'ext trap'
363 * etc, is not in the array, because they are not log
364 * ring buffers
365 */
366 dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
367 {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
368 {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
369 {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
370 };
371
372 static int dld_buf_size[DLD_BUFFER_NUM] = {
373 LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
374 LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
375 LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
376 };
377 static void dhd_log_dump_init(dhd_pub_t *dhd);
378 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
379 static void dhd_log_dump(void *handle, void *event_info, u8 event);
380 static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
381 static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
382 #endif /* DHD_LOG_DUMP */
383
384 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
385 #include <linux/workqueue.h>
386 #include <linux/pm_runtime.h>
387 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
388
389 #ifdef DHD_DEBUG_UART
390 #include <linux/kmod.h>
391 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
392 static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
393 static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
394 #endif /* DHD_DEBUG_UART */
395
396 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
397 static struct notifier_block dhd_reboot_notifier = {
398 .notifier_call = dhd_reboot_callback,
399 .priority = 1,
400 };
401
402 #ifdef BCMPCIE
403 static int is_reboot = 0;
404 #endif /* BCMPCIE */
405
406 dhd_pub_t *g_dhd_pub = NULL;
407
408 #if defined(BT_OVER_SDIO)
409 #include "dhd_bt_interface.h"
410 #endif /* defined (BT_OVER_SDIO) */
411
412 #ifdef SHOW_LOGTRACE
413 static int dhd_trace_open_proc(struct inode *inode, struct file *file);
414 ssize_t dhd_trace_read_proc(struct file *file, char *buffer, size_t tt, loff_t *loff);
415
416 static const struct file_operations proc_file_fops = {
417 .read = dhd_trace_read_proc,
418 .open = dhd_trace_open_proc,
419 .release = seq_release,
420 };
421 #endif // endif
422
423 #ifdef WL_STATIC_IF
424 bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
425 #endif /* WL_STATIC_IF */
426
427 atomic_t exit_in_progress = ATOMIC_INIT(0);
428
429 static void dhd_process_daemon_msg(struct sk_buff *skb);
430 static void dhd_destroy_to_notifier_skt(void);
431 static int dhd_create_to_notifier_skt(void);
432 static struct sock *nl_to_event_sk = NULL;
433 int sender_pid = 0;
434
435 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
436 struct netlink_kernel_cfg dhd_netlink_cfg = {
437 .groups = 1,
438 .input = dhd_process_daemon_msg,
439 };
440 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
441
442 #if defined(BT_OVER_SDIO)
443 /* Flag to indicate if driver is initialized */
444 uint dhd_driver_init_done = TRUE;
445 #else
446 /* Flag to indicate if driver is initialized */
447 uint dhd_driver_init_done = FALSE;
448 #endif // endif
449 /* Flag to indicate if we should download firmware on driver load */
450 uint dhd_download_fw_on_driverload = TRUE;
451
452 /* Definitions to provide path to the firmware and nvram
453 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
454 */
455 char firmware_path[MOD_PARAM_PATHLEN];
456 char nvram_path[MOD_PARAM_PATHLEN];
457 char clm_path[MOD_PARAM_PATHLEN];
458 #ifdef DHD_UCODE_DOWNLOAD
459 char ucode_path[MOD_PARAM_PATHLEN];
460 #endif /* DHD_UCODE_DOWNLOAD */
461
462 module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
463
464 /* backup buffer for firmware and nvram path */
465 char fw_bak_path[MOD_PARAM_PATHLEN];
466 char nv_bak_path[MOD_PARAM_PATHLEN];
467
468 /* information string to keep firmware, chio, cheip version info visiable from log */
469 char info_string[MOD_PARAM_INFOLEN];
470 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
471 int op_mode = 0;
472 int disable_proptx = 0;
473 module_param(op_mode, int, 0644);
474 extern int wl_control_wl_start(struct net_device *dev);
475 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
476 struct semaphore dhd_registration_sem;
477 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
478
479 #ifdef DHD_LOG_DUMP
480 int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
481 module_param(logdump_max_filesize, int, 0644);
482 int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
483 module_param(logdump_max_bufsize, int, 0644);
484 int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
485 int logdump_periodic_flush = FALSE;
486 module_param(logdump_periodic_flush, int, 0644);
487 #ifdef DEBUGABILITY_ECNTRS_LOGGING
488 int logdump_ecntr_enable = TRUE;
489 #else
490 int logdump_ecntr_enable = FALSE;
491 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
492 module_param(logdump_ecntr_enable, int, 0644);
493 #endif /* DHD_LOG_DUMP */
494
495 /* deferred handlers */
496 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
497 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
498 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
499 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
500 #ifdef WL_NATOE
501 static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
502 static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
503 #endif /* WL_NATOE */
504
505 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
506 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
507 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
508 #ifdef WL_CFG80211
509 extern void dhd_netdev_free(struct net_device *ndev);
510 #endif /* WL_CFG80211 */
511 static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
512
513 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
514 /* update rx_pkt_chainable state of dhd interface */
515 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
516 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
517
518 /* Error bits */
519 module_param(dhd_msg_level, int, 0);
520
521 #ifdef ARP_OFFLOAD_SUPPORT
522 /* ARP offload enable */
523 uint dhd_arp_enable = TRUE;
524 module_param(dhd_arp_enable, uint, 0);
525
526 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
527
528 #ifdef ENABLE_ARP_SNOOP_MODE
529 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
530 #else
531 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
532 #endif /* ENABLE_ARP_SNOOP_MODE */
533
534 module_param(dhd_arp_mode, uint, 0);
535 #endif /* ARP_OFFLOAD_SUPPORT */
536
537 /* Disable Prop tx */
538 module_param(disable_proptx, int, 0644);
539 /* load firmware and/or nvram values from the filesystem */
540 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
541 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
542 #ifdef DHD_UCODE_DOWNLOAD
543 module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
544 #endif /* DHD_UCODE_DOWNLOAD */
545
546 /* wl event forwarding */
547 #ifdef WL_EVENT_ENAB
548 uint wl_event_enable = true;
549 #else
550 uint wl_event_enable = false;
551 #endif /* WL_EVENT_ENAB */
552 module_param(wl_event_enable, uint, 0660);
553
554 /* wl event forwarding */
555 #ifdef LOGTRACE_PKT_SENDUP
556 uint logtrace_pkt_sendup = true;
557 #else
558 uint logtrace_pkt_sendup = false;
559 #endif /* LOGTRACE_PKT_SENDUP */
560 module_param(logtrace_pkt_sendup, uint, 0660);
561
562 /* Watchdog interval */
563 /* extend watchdog expiration to 2 seconds when DPC is running */
564 #define WATCHDOG_EXTEND_INTERVAL (2000)
565
566 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
567 module_param(dhd_watchdog_ms, uint, 0);
568
569 #ifdef DHD_PCIE_RUNTIMEPM
570 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
571 #endif /* DHD_PCIE_RUNTIMEPMT */
572 #if defined(DHD_DEBUG)
573 /* Console poll interval */
574 uint dhd_console_ms = 0;
575 module_param(dhd_console_ms, uint, 0644);
576 #else
577 uint dhd_console_ms = 0;
578 #endif /* DHD_DEBUG */
579
580 uint dhd_slpauto = TRUE;
581 module_param(dhd_slpauto, uint, 0);
582
583 #ifdef PKT_FILTER_SUPPORT
584 /* Global Pkt filter enable control */
585 uint dhd_pkt_filter_enable = TRUE;
586 module_param(dhd_pkt_filter_enable, uint, 0);
587 #endif // endif
588
589 /* Pkt filter init setup */
590 uint dhd_pkt_filter_init = 0;
591 module_param(dhd_pkt_filter_init, uint, 0);
592
593 /* Pkt filter mode control */
594 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
595 uint dhd_master_mode = FALSE;
596 #else
597 uint dhd_master_mode = TRUE;
598 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
599 module_param(dhd_master_mode, uint, 0);
600
601 int dhd_watchdog_prio = 0;
602 module_param(dhd_watchdog_prio, int, 0);
603
604 /* DPC thread priority */
605 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
606 module_param(dhd_dpc_prio, int, 0);
607
608 /* RX frame thread priority */
609 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
610 module_param(dhd_rxf_prio, int, 0);
611
612 #if !defined(BCMDHDUSB)
613 extern int dhd_dongle_ramsize;
614 module_param(dhd_dongle_ramsize, int, 0);
615 #endif /* BCMDHDUSB */
616
617 #ifdef WL_CFG80211
618 int passive_channel_skip = 0;
619 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
620 #endif /* WL_CFG80211 */
621
622 #ifdef DHD_MSI_SUPPORT
623 uint enable_msi = TRUE;
624 module_param(enable_msi, uint, 0);
625 #endif /* PCIE_FULL_DONGLE */
626
627 /* Keep track of number of instances */
628 static int dhd_found = 0;
629 static int instance_base = 0; /* Starting instance number */
630 module_param(instance_base, int, 0644);
631
632 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
633 static int dhd_napi_weight = 32;
634 module_param(dhd_napi_weight, int, 0644);
635 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
636
637 #ifdef PCIE_FULL_DONGLE
638 extern int h2d_max_txpost;
639 module_param(h2d_max_txpost, int, 0644);
640
641 extern uint dma_ring_indices;
642 module_param(dma_ring_indices, uint, 0644);
643
644 extern bool h2d_phase;
645 module_param(h2d_phase, bool, 0644);
646 extern bool force_trap_bad_h2d_phase;
647 module_param(force_trap_bad_h2d_phase, bool, 0644);
648 #endif /* PCIE_FULL_DONGLE */
649
650 #ifdef DHD_DHCP_DUMP
651 struct bootp_fmt {
652 struct iphdr ip_header;
653 struct udphdr udp_header;
654 uint8 op;
655 uint8 htype;
656 uint8 hlen;
657 uint8 hops;
658 uint32 transaction_id;
659 uint16 secs;
660 uint16 flags;
661 uint32 client_ip;
662 uint32 assigned_ip;
663 uint32 server_ip;
664 uint32 relay_ip;
665 uint8 hw_address[16];
666 uint8 server_name[64];
667 uint8 file_name[128];
668 uint8 options[312];
669 };
670
671 static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
672 static const char dhcp_ops[][10] = {
673 "NA", "REQUEST", "REPLY"
674 };
675 static const char dhcp_types[][10] = {
676 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
677 };
678 static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
679 #endif /* DHD_DHCP_DUMP */
680
681 #ifdef DHD_ICMP_DUMP
682 #include <net/icmp.h>
683 static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
684 #endif /* DHD_ICMP_DUMP */
685
686 #ifdef SHOW_LOGTRACE
687 #if defined(CUSTOMER_HW4_DEBUG)
688 static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
689 char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
690 static char *map_file_path = PLATFORM_PATH"rtecdc.map";
691 static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
692 static char *rom_map_file_path = PLATFORM_PATH"roml.map";
693 #else
694 static char *logstrs_path = "/installmedia/logstrs.bin";
695 char *st_str_file_path = "/installmedia/rtecdc.bin";
696 static char *map_file_path = "/installmedia/rtecdc.map";
697 static char *rom_st_str_file_path = "/installmedia/roml.bin";
698 static char *rom_map_file_path = "/installmedia/roml.map";
699 #endif // endif
700 static char *ram_file_str = "rtecdc";
701 static char *rom_file_str = "roml";
702
703 module_param(logstrs_path, charp, S_IRUGO);
704 module_param(st_str_file_path, charp, S_IRUGO);
705 module_param(map_file_path, charp, S_IRUGO);
706 module_param(rom_st_str_file_path, charp, S_IRUGO);
707 module_param(rom_map_file_path, charp, S_IRUGO);
708
709 static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
710 static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
711 uint32 *rodata_end);
712 static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
713 char *map_file);
714 #endif /* SHOW_LOGTRACE */
715
716 #ifdef D2H_MINIDUMP
717 void dhd_d2h_minidump(dhd_pub_t *dhdp);
718 #endif /* D2H_MINIDUMP */
719
720 #ifdef DHDTCPSYNC_FLOOD_BLK
721 extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp);
722 #endif /* DHDTCPSYNC_FLOOD_BLK */
723
724 #if defined(DHD_LB)
725
726 static void
727 dhd_lb_set_default_cpus(dhd_info_t *dhd)
728 {
729 /* Default CPU allocation for the jobs */
730 atomic_set(&dhd->rx_napi_cpu, 1);
731 atomic_set(&dhd->rx_compl_cpu, 2);
732 atomic_set(&dhd->tx_compl_cpu, 2);
733 atomic_set(&dhd->tx_cpu, 2);
734 atomic_set(&dhd->net_tx_cpu, 0);
735 }
736
737 static void
738 dhd_cpumasks_deinit(dhd_info_t *dhd)
739 {
740 free_cpumask_var(dhd->cpumask_curr_avail);
741 free_cpumask_var(dhd->cpumask_primary);
742 free_cpumask_var(dhd->cpumask_primary_new);
743 free_cpumask_var(dhd->cpumask_secondary);
744 free_cpumask_var(dhd->cpumask_secondary_new);
745 }
746
747 static int
748 dhd_cpumasks_init(dhd_info_t *dhd)
749 {
750 int id;
751 uint32 cpus, num_cpus = num_possible_cpus();
752 int ret = 0;
753
754 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
755 DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
756
757 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
758 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
759 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
760 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
761 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
762 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
763 ret = -ENOMEM;
764 goto fail;
765 }
766
767 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
768 cpumask_clear(dhd->cpumask_primary);
769 cpumask_clear(dhd->cpumask_secondary);
770
771 if (num_cpus > 32) {
772 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
773 ASSERT(0);
774 }
775
776 cpus = DHD_LB_PRIMARY_CPUS;
777 for (id = 0; id < num_cpus; id++) {
778 if (isset(&cpus, id))
779 cpumask_set_cpu(id, dhd->cpumask_primary);
780 }
781
782 cpus = DHD_LB_SECONDARY_CPUS;
783 for (id = 0; id < num_cpus; id++) {
784 if (isset(&cpus, id))
785 cpumask_set_cpu(id, dhd->cpumask_secondary);
786 }
787
788 return ret;
789 fail:
790 dhd_cpumasks_deinit(dhd);
791 return ret;
792 }
793
794 /*
795 * The CPU Candidacy Algorithm
796 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
797 * The available CPUs for selection are divided into two groups
798 * Primary Set - A CPU mask that carries the First Choice CPUs
799 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
800 *
801 * There are two types of Job, that needs to be assigned to
802 * the CPUs, from one of the above mentioned CPU group. The Jobs are
803 * 1) Rx Packet Processing - napi_cpu
804 * 2) Completion Processiong (Tx, RX) - compl_cpu
805 *
806 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
807 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
808 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
809 * If there are more processors free, it assigns one to compl_cpu.
810 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
811 * CPU, as much as possible.
812 *
813 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
814 * would allow Tx completion skb's to be released into a local free pool from
815 * which the rx buffer posts could have been serviced. it is important to note
816 * that a Tx packet may not have a large enough buffer for rx posting.
817 */
818 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
819 {
820 uint32 primary_available_cpus; /* count of primary available cpus */
821 uint32 secondary_available_cpus; /* count of secondary available cpus */
822 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
823 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
824 uint32 tx_cpu = 0; /* cpu selected for tx processing job */
825
826 cpumask_clear(dhd->cpumask_primary_new);
827 cpumask_clear(dhd->cpumask_secondary_new);
828
829 /*
830 * Now select from the primary mask. Even if a Job is
831 * already running on a CPU in secondary group, we still move
832 * to primary CPU. So no conditional checks.
833 */
834 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
835 dhd->cpumask_curr_avail);
836
837 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
838 dhd->cpumask_curr_avail);
839
840 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
841
842 if (primary_available_cpus > 0) {
843 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
844
845 /* If no further CPU is available,
846 * cpumask_next returns >= nr_cpu_ids
847 */
848 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
849 if (tx_cpu >= nr_cpu_ids)
850 tx_cpu = 0;
851
852 /* In case there are no more CPUs, do completions & Tx in same CPU */
853 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
854 if (compl_cpu >= nr_cpu_ids)
855 compl_cpu = tx_cpu;
856 }
857
858 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
859 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
860
861 /* -- Now check for the CPUs from the secondary mask -- */
862 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
863
864 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
865 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
866
867 if (secondary_available_cpus > 0) {
868 /* At this point if napi_cpu is unassigned it means no CPU
869 * is online from Primary Group
870 */
871 if (napi_cpu == 0) {
872 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
873 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
874 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
875 } else if (tx_cpu == 0) {
876 tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
877 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
878 } else if (compl_cpu == 0) {
879 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
880 }
881
882 /* If no CPU was available for tx processing, choose CPU 0 */
883 if (tx_cpu >= nr_cpu_ids)
884 tx_cpu = 0;
885
886 /* If no CPU was available for completion, choose CPU 0 */
887 if (compl_cpu >= nr_cpu_ids)
888 compl_cpu = 0;
889 }
890 if ((primary_available_cpus == 0) &&
891 (secondary_available_cpus == 0)) {
892 /* No CPUs available from primary or secondary mask */
893 napi_cpu = 1;
894 compl_cpu = 0;
895 tx_cpu = 2;
896 }
897
898 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
899 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
900
901 ASSERT(napi_cpu < nr_cpu_ids);
902 ASSERT(compl_cpu < nr_cpu_ids);
903 ASSERT(tx_cpu < nr_cpu_ids);
904
905 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
906 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
907 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
908 atomic_set(&dhd->tx_cpu, tx_cpu);
909
910 return;
911 }
912
913 /*
914 * Function to handle CPU Hotplug notifications.
915 * One of the task it does is to trigger the CPU Candidacy algorithm
916 * for load balancing.
917 */
918 int
919 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
920 {
921 unsigned long int cpu = (unsigned long int)hcpu;
922
923 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
924 #pragma GCC diagnostic push
925 #pragma GCC diagnostic ignored "-Wcast-qual"
926 #endif // endif
927 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
928 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
929 #pragma GCC diagnostic pop
930 #endif // endif
931
932 if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
933 DHD_INFO(("%s(): LB data is not initialized yet.\n",
934 __FUNCTION__));
935 return NOTIFY_BAD;
936 }
937
938 switch (action)
939 {
940 case CPU_ONLINE:
941 case CPU_ONLINE_FROZEN:
942 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
943 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
944 dhd_select_cpu_candidacy(dhd);
945 break;
946
947 case CPU_DOWN_PREPARE:
948 case CPU_DOWN_PREPARE_FROZEN:
949 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
950 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
951 dhd_select_cpu_candidacy(dhd);
952 break;
953 default:
954 break;
955 }
956
957 return NOTIFY_OK;
958 }
959
960 #if defined(DHD_LB_STATS)
961 void dhd_lb_stats_init(dhd_pub_t *dhdp)
962 {
963 dhd_info_t *dhd;
964 int i, j, num_cpus = num_possible_cpus();
965 int alloc_size = sizeof(uint32) * num_cpus;
966
967 if (dhdp == NULL) {
968 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
969 __FUNCTION__));
970 return;
971 }
972
973 dhd = dhdp->info;
974 if (dhd == NULL) {
975 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
976 return;
977 }
978
979 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
980 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
981
982 dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
983 if (!dhd->napi_percpu_run_cnt) {
984 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
985 __FUNCTION__));
986 return;
987 }
988 for (i = 0; i < num_cpus; i++)
989 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
990
991 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
992
993 dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
994 if (!dhd->rxc_percpu_run_cnt) {
995 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
996 __FUNCTION__));
997 return;
998 }
999 for (i = 0; i < num_cpus; i++)
1000 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1001
1002 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1003
1004 dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1005 if (!dhd->txc_percpu_run_cnt) {
1006 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1007 __FUNCTION__));
1008 return;
1009 }
1010 for (i = 0; i < num_cpus; i++)
1011 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1012
1013 dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1014 if (!dhd->cpu_online_cnt) {
1015 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1016 __FUNCTION__));
1017 return;
1018 }
1019 for (i = 0; i < num_cpus; i++)
1020 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1021
1022 dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1023 if (!dhd->cpu_offline_cnt) {
1024 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1025 __FUNCTION__));
1026 return;
1027 }
1028 for (i = 0; i < num_cpus; i++)
1029 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1030
1031 dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1032 if (!dhd->txp_percpu_run_cnt) {
1033 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1034 __FUNCTION__));
1035 return;
1036 }
1037 for (i = 0; i < num_cpus; i++)
1038 DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
1039
1040 dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1041 if (!dhd->tx_start_percpu_run_cnt) {
1042 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1043 __FUNCTION__));
1044 return;
1045 }
1046 for (i = 0; i < num_cpus; i++)
1047 DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
1048
1049 for (j = 0; j < HIST_BIN_SIZE; j++) {
1050 dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1051 if (!dhd->napi_rx_hist[j]) {
1052 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1053 __FUNCTION__, j));
1054 return;
1055 }
1056 for (i = 0; i < num_cpus; i++) {
1057 DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
1058 }
1059 }
1060 #ifdef DHD_LB_TXC
1061 for (j = 0; j < HIST_BIN_SIZE; j++) {
1062 dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1063 if (!dhd->txc_hist[j]) {
1064 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1065 __FUNCTION__, j));
1066 return;
1067 }
1068 for (i = 0; i < num_cpus; i++) {
1069 DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
1070 }
1071 }
1072 #endif /* DHD_LB_TXC */
1073 #ifdef DHD_LB_RXC
1074 for (j = 0; j < HIST_BIN_SIZE; j++) {
1075 dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1076 if (!dhd->rxc_hist[j]) {
1077 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1078 __FUNCTION__, j));
1079 return;
1080 }
1081 for (i = 0; i < num_cpus; i++) {
1082 DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
1083 }
1084 }
1085 #endif /* DHD_LB_RXC */
1086 return;
1087 }
1088
1089 void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
1090 {
1091 dhd_info_t *dhd;
1092 int j, num_cpus = num_possible_cpus();
1093 int alloc_size = sizeof(uint32) * num_cpus;
1094
1095 if (dhdp == NULL) {
1096 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1097 __FUNCTION__));
1098 return;
1099 }
1100
1101 dhd = dhdp->info;
1102 if (dhd == NULL) {
1103 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1104 return;
1105 }
1106
1107 if (dhd->napi_percpu_run_cnt) {
1108 MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
1109 dhd->napi_percpu_run_cnt = NULL;
1110 }
1111 if (dhd->rxc_percpu_run_cnt) {
1112 MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
1113 dhd->rxc_percpu_run_cnt = NULL;
1114 }
1115 if (dhd->txc_percpu_run_cnt) {
1116 MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
1117 dhd->txc_percpu_run_cnt = NULL;
1118 }
1119 if (dhd->cpu_online_cnt) {
1120 MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
1121 dhd->cpu_online_cnt = NULL;
1122 }
1123 if (dhd->cpu_offline_cnt) {
1124 MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
1125 dhd->cpu_offline_cnt = NULL;
1126 }
1127
1128 if (dhd->txp_percpu_run_cnt) {
1129 MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
1130 dhd->txp_percpu_run_cnt = NULL;
1131 }
1132 if (dhd->tx_start_percpu_run_cnt) {
1133 MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
1134 dhd->tx_start_percpu_run_cnt = NULL;
1135 }
1136
1137 for (j = 0; j < HIST_BIN_SIZE; j++) {
1138 if (dhd->napi_rx_hist[j]) {
1139 MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
1140 dhd->napi_rx_hist[j] = NULL;
1141 }
1142 #ifdef DHD_LB_TXC
1143 if (dhd->txc_hist[j]) {
1144 MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
1145 dhd->txc_hist[j] = NULL;
1146 }
1147 #endif /* DHD_LB_TXC */
1148 #ifdef DHD_LB_RXC
1149 if (dhd->rxc_hist[j]) {
1150 MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
1151 dhd->rxc_hist[j] = NULL;
1152 }
1153 #endif /* DHD_LB_RXC */
1154 }
1155
1156 return;
1157 }
1158
1159 static void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
1160 struct bcmstrbuf *strbuf, uint32 **hist)
1161 {
1162 int i, j;
1163 uint32 *per_cpu_total;
1164 uint32 total = 0;
1165 uint32 num_cpus = num_possible_cpus();
1166
1167 per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
1168 if (!per_cpu_total) {
1169 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
1170 return;
1171 }
1172 bzero(per_cpu_total, sizeof(uint32) * num_cpus);
1173
1174 bcm_bprintf(strbuf, "CPU: \t\t");
1175 for (i = 0; i < num_cpus; i++)
1176 bcm_bprintf(strbuf, "%d\t", i);
1177 bcm_bprintf(strbuf, "\nBin\n");
1178
1179 for (i = 0; i < HIST_BIN_SIZE; i++) {
1180 bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
1181 for (j = 0; j < num_cpus; j++) {
1182 bcm_bprintf(strbuf, "%d\t", hist[i][j]);
1183 }
1184 bcm_bprintf(strbuf, "\n");
1185 }
1186 bcm_bprintf(strbuf, "Per CPU Total \t");
1187 total = 0;
1188 for (i = 0; i < num_cpus; i++) {
1189 for (j = 0; j < HIST_BIN_SIZE; j++) {
1190 per_cpu_total[i] += (hist[j][i] * (1<<j));
1191 }
1192 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1193 total += per_cpu_total[i];
1194 }
1195 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1196
1197 if (per_cpu_total) {
1198 MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
1199 per_cpu_total = NULL;
1200 }
1201 return;
1202 }
1203
1204 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1205 {
1206 int i, num_cpus = num_possible_cpus();
1207
1208 bcm_bprintf(strbuf, "CPU: \t");
1209 for (i = 0; i < num_cpus; i++)
1210 bcm_bprintf(strbuf, "%d\t", i);
1211 bcm_bprintf(strbuf, "\n");
1212
1213 bcm_bprintf(strbuf, "Val: \t");
1214 for (i = 0; i < num_cpus; i++)
1215 bcm_bprintf(strbuf, "%u\t", *(p+i));
1216 bcm_bprintf(strbuf, "\n");
1217 return;
1218 }
1219
1220 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1221 {
1222 dhd_info_t *dhd;
1223
1224 if (dhdp == NULL || strbuf == NULL) {
1225 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1226 __FUNCTION__, dhdp, strbuf));
1227 return;
1228 }
1229
1230 dhd = dhdp->info;
1231 if (dhd == NULL) {
1232 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1233 return;
1234 }
1235
1236 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1237 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1238
1239 bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
1240 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1241
1242 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1243 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1244 dhd->txc_sched_cnt);
1245
1246 #ifdef DHD_LB_RXP
1247 bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
1248 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1249 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1250 dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
1251 #endif /* DHD_LB_RXP */
1252
1253 #ifdef DHD_LB_RXC
1254 bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
1255 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1256 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1257 dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
1258 #endif /* DHD_LB_RXC */
1259
1260 #ifdef DHD_LB_TXC
1261 bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
1262 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1263 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1264 dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
1265 #endif /* DHD_LB_TXC */
1266
1267 #ifdef DHD_LB_TXP
1268 bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
1269 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
1270
1271 bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
1272 dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
1273 #endif /* DHD_LB_TXP */
1274 }
1275
1276 /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
1277 static inline uint32 next_larger_power2(uint32 num)
1278 {
1279 num--;
1280 num |= (num >> 1);
1281 num |= (num >> 2);
1282 num |= (num >> 4);
1283 num |= (num >> 8);
1284 num |= (num >> 16);
1285
1286 return (num + 1);
1287 }
1288
1289 static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
1290 {
1291 uint32 bin_power;
1292 uint32 *p;
1293 bin_power = next_larger_power2(count);
1294
1295 switch (bin_power) {
1296 case 1: p = bin[0] + cpu; break;
1297 case 2: p = bin[1] + cpu; break;
1298 case 4: p = bin[2] + cpu; break;
1299 case 8: p = bin[3] + cpu; break;
1300 case 16: p = bin[4] + cpu; break;
1301 case 32: p = bin[5] + cpu; break;
1302 case 64: p = bin[6] + cpu; break;
1303 case 128: p = bin[7] + cpu; break;
1304 default : p = bin[8] + cpu; break;
1305 }
1306
1307 *p = *p + 1;
1308 return;
1309 }
1310
1311 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1312 {
1313 int cpu;
1314 dhd_info_t *dhd = dhdp->info;
1315
1316 cpu = get_cpu();
1317 put_cpu();
1318 dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
1319
1320 return;
1321 }
1322
1323 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1324 {
1325 int cpu;
1326 dhd_info_t *dhd = dhdp->info;
1327
1328 cpu = get_cpu();
1329 put_cpu();
1330 dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
1331
1332 return;
1333 }
1334
1335 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1336 {
1337 int cpu;
1338 dhd_info_t *dhd = dhdp->info;
1339
1340 cpu = get_cpu();
1341 put_cpu();
1342 dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
1343
1344 return;
1345 }
1346
1347 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1348 {
1349 dhd_info_t *dhd = dhdp->info;
1350 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1351 }
1352
1353 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1354 {
1355 dhd_info_t *dhd = dhdp->info;
1356 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1357 }
1358 #endif /* DHD_LB_STATS */
1359
1360 #endif /* DHD_LB */
1361
1362 #ifdef USE_WFA_CERT_CONF
1363 int g_frameburst = 1;
1364 #endif /* USE_WFA_CERT_CONF */
1365
1366 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1367
1368 /* DHD Perimiter lock only used in router with bypass forwarding. */
1369 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1370 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1371 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1372
1373 #ifdef PCIE_FULL_DONGLE
1374 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1375 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1376 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1377 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1378 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1379
1380 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1381 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1382 struct list_head *snapshot_list);
1383 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1384 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1385 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1386 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1387 #endif /* PCIE_FULL_DONGLE */
1388
1389 /* Control fw roaming */
1390 #ifdef BCMCCX
1391 uint dhd_roam_disable = 0;
1392 #else
1393 uint dhd_roam_disable = 0;
1394 #endif /* BCMCCX */
1395
1396 #ifdef BCMDBGFS
1397 extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
1398 extern void dhd_dbgfs_remove(void);
1399 #endif // endif
1400
1401 static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
1402 module_param(pcie_txs_metadata_enable, int, 0);
1403
1404 /* Control radio state */
1405 uint dhd_radio_up = 1;
1406
1407 /* Network inteface name */
1408 char iface_name[IFNAMSIZ] = {'\0'};
1409 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1410
1411 /* The following are specific to the SDIO dongle */
1412
1413 /* IOCTL response timeout */
1414 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1415
1416 /* DS Exit response timeout */
1417 int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
1418
1419 /* Idle timeout for backplane clock */
1420 int dhd_idletime = DHD_IDLETIME_TICKS;
1421 module_param(dhd_idletime, int, 0);
1422
1423 /* Use polling */
1424 uint dhd_poll = FALSE;
1425 module_param(dhd_poll, uint, 0);
1426
1427 /* Use interrupts */
1428 uint dhd_intr = TRUE;
1429 module_param(dhd_intr, uint, 0);
1430
1431 /* SDIO Drive Strength (in milliamps) */
1432 uint dhd_sdiod_drive_strength = 6;
1433 module_param(dhd_sdiod_drive_strength, uint, 0);
1434
1435 #ifdef BCMSDIO
1436 /* Tx/Rx bounds */
1437 extern uint dhd_txbound;
1438 extern uint dhd_rxbound;
1439 module_param(dhd_txbound, uint, 0);
1440 module_param(dhd_rxbound, uint, 0);
1441
1442 /* Deferred transmits */
1443 extern uint dhd_deferred_tx;
1444 module_param(dhd_deferred_tx, uint, 0);
1445
1446 #endif /* BCMSDIO */
1447
1448 #ifdef SDTEST
1449 /* Echo packet generator (pkts/s) */
1450 uint dhd_pktgen = 0;
1451 module_param(dhd_pktgen, uint, 0);
1452
1453 /* Echo packet len (0 => sawtooth, max 2040) */
1454 uint dhd_pktgen_len = 0;
1455 module_param(dhd_pktgen_len, uint, 0);
1456 #endif /* SDTEST */
1457
1458 #if defined(BCMSUP_4WAY_HANDSHAKE)
1459 /* Use in dongle supplicant for 4-way handshake */
1460 #if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
1461 /* Enable idsup by default (if supported in fw) */
1462 uint dhd_use_idsup = 1;
1463 #else
1464 uint dhd_use_idsup = 0;
1465 #endif /* WLFBT || WL_ENABLE_IDSUP */
1466 module_param(dhd_use_idsup, uint, 0);
1467 #endif /* BCMSUP_4WAY_HANDSHAKE */
1468
1469 /* Allow delayed firmware download for debug purpose */
1470 int allow_delay_fwdl = FALSE;
1471 module_param(allow_delay_fwdl, int, 0);
1472
1473 #ifdef ECOUNTER_PERIODIC_DISABLE
1474 uint enable_ecounter = FALSE;
1475 #else
1476 uint enable_ecounter = TRUE;
1477 #endif // endif
1478 module_param(enable_ecounter, uint, 0);
1479
1480 extern char dhd_version[];
1481 extern char fw_version[];
1482 extern char clm_version[];
1483
1484 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1485 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1486 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1487 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1488 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1489
1490 /* Monitor interface */
1491 int dhd_monitor_init(void *dhd_pub);
1492 int dhd_monitor_uninit(void);
1493
1494 #ifdef DHD_PM_CONTROL_FROM_FILE
1495 bool g_pm_control;
1496 #ifdef DHD_EXPORT_CNTL_FILE
1497 int pmmode_val;
1498 #endif /* DHD_EXPORT_CNTL_FILE */
1499 void sec_control_pm(dhd_pub_t *dhd, uint *);
1500 #endif /* DHD_PM_CONTROL_FROM_FILE */
1501
1502 #if defined(WL_WIRELESS_EXT)
1503 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1504 #endif /* defined(WL_WIRELESS_EXT) */
1505
1506 static void dhd_dpc(ulong data);
1507 /* forward decl */
1508 extern int dhd_wait_pend8021x(struct net_device *dev);
1509 void dhd_os_wd_timer_extend(void *bus, bool extend);
1510
1511 #ifdef TOE
1512 #ifndef BDC
1513 #error TOE requires BDC
1514 #endif /* !BDC */
1515 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1516 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1517 #endif /* TOE */
1518
1519 static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
1520 wl_event_msg_t *event_ptr, void **data_ptr);
1521
1522 #if defined(CONFIG_PM_SLEEP)
1523 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1524 {
1525 int ret = NOTIFY_DONE;
1526 bool suspend = FALSE;
1527
1528 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1529 #pragma GCC diagnostic push
1530 #pragma GCC diagnostic ignored "-Wcast-qual"
1531 #endif // endif
1532 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1533 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1534 #pragma GCC diagnostic pop
1535 #endif // endif
1536
1537 BCM_REFERENCE(dhdinfo);
1538 BCM_REFERENCE(suspend);
1539
1540 switch (action) {
1541 case PM_HIBERNATION_PREPARE:
1542 case PM_SUSPEND_PREPARE:
1543 suspend = TRUE;
1544 break;
1545
1546 case PM_POST_HIBERNATION:
1547 case PM_POST_SUSPEND:
1548 suspend = FALSE;
1549 break;
1550 }
1551
1552 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
1553 if (suspend) {
1554 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1555 dhd_wlfc_suspend(&dhdinfo->pub);
1556 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1557 } else {
1558 dhd_wlfc_resume(&dhdinfo->pub);
1559 }
1560 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
1561
1562 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1563 KERNEL_VERSION(2, 6, 39))
1564 dhd_mmc_suspend = suspend;
1565 smp_mb();
1566 #endif // endif
1567
1568 return ret;
1569 }
1570
1571 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1572 * created in kernel notifier link list (with 'next' pointing to itself)
1573 */
1574 static bool dhd_pm_notifier_registered = FALSE;
1575
1576 extern int register_pm_notifier(struct notifier_block *nb);
1577 extern int unregister_pm_notifier(struct notifier_block *nb);
1578 #endif /* CONFIG_PM_SLEEP */
1579
1580 /* Request scheduling of the bus rx frame */
1581 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1582 static void dhd_os_rxflock(dhd_pub_t *pub);
1583 static void dhd_os_rxfunlock(dhd_pub_t *pub);
1584
1585 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1586 typedef struct dhd_dev_priv {
1587 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1588 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1589 int ifidx; /* interface index */
1590 void * lkup;
1591 } dhd_dev_priv_t;
1592
1593 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1594 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1595 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1596 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1597 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1598 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
1599
1600 #if defined(DHD_OF_SUPPORT)
1601 extern int dhd_wlan_init(void);
1602 #endif /* defined(DHD_OF_SUPPORT) */
1603 /** Clear the dhd net_device's private structure. */
1604 static inline void
1605 dhd_dev_priv_clear(struct net_device * dev)
1606 {
1607 dhd_dev_priv_t * dev_priv;
1608 ASSERT(dev != (struct net_device *)NULL);
1609 dev_priv = DHD_DEV_PRIV(dev);
1610 dev_priv->dhd = (dhd_info_t *)NULL;
1611 dev_priv->ifp = (dhd_if_t *)NULL;
1612 dev_priv->ifidx = DHD_BAD_IF;
1613 dev_priv->lkup = (void *)NULL;
1614 }
1615
1616 /** Setup the dhd net_device's private structure. */
1617 static inline void
1618 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1619 int ifidx)
1620 {
1621 dhd_dev_priv_t * dev_priv;
1622 ASSERT(dev != (struct net_device *)NULL);
1623 dev_priv = DHD_DEV_PRIV(dev);
1624 dev_priv->dhd = dhd;
1625 dev_priv->ifp = ifp;
1626 dev_priv->ifidx = ifidx;
1627 }
1628
1629 /* Return interface pointer */
1630 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1631 {
1632 ASSERT(ifidx < DHD_MAX_IFS);
1633
1634 if (ifidx >= DHD_MAX_IFS)
1635 return NULL;
1636
1637 return dhdp->info->iflist[ifidx];
1638 }
1639
1640 #ifdef PCIE_FULL_DONGLE
1641
1642 /** Dummy objects are defined with state representing bad|down.
1643 * Performance gains from reducing branch conditionals, instruction parallelism,
1644 * dual issue, reducing load shadows, avail of larger pipelines.
1645 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1646 * is accessed via the dhd_sta_t.
1647 */
1648
1649 /* Dummy dhd_info object */
1650 dhd_info_t dhd_info_null = {
1651 .pub = {
1652 .info = &dhd_info_null,
1653 #ifdef DHDTCPACK_SUPPRESS
1654 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1655 #endif /* DHDTCPACK_SUPPRESS */
1656 .up = FALSE,
1657 .busstate = DHD_BUS_DOWN
1658 }
1659 };
1660 #define DHD_INFO_NULL (&dhd_info_null)
1661 #define DHD_PUB_NULL (&dhd_info_null.pub)
1662
1663 /* Dummy netdevice object */
1664 struct net_device dhd_net_dev_null = {
1665 .reg_state = NETREG_UNREGISTERED
1666 };
1667 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1668
1669 /* Dummy dhd_if object */
1670 dhd_if_t dhd_if_null = {
1671 #ifdef WMF
1672 .wmf = { .wmf_enable = TRUE },
1673 #endif // endif
1674 .info = DHD_INFO_NULL,
1675 .net = DHD_NET_DEV_NULL,
1676 .idx = DHD_BAD_IF
1677 };
1678 #define DHD_IF_NULL (&dhd_if_null)
1679
1680 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1681
1682 /** Interface STA list management. */
1683
1684 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1685 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1686 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1687
1688 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1689 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1690 static void dhd_if_flush_sta(dhd_if_t * ifp);
1691
1692 /* Construct/Destruct a sta pool. */
1693 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1694 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1695 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1696 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1697
1698 /** Reset a dhd_sta object and free into the dhd pool. */
1699 static void
1700 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1701 {
1702 int prio;
1703
1704 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1705
1706 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1707
1708 /*
1709 * Flush and free all packets in all flowring's queues belonging to sta.
1710 * Packets in flow ring will be flushed later.
1711 */
1712 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1713 uint16 flowid = sta->flowid[prio];
1714
1715 if (flowid != FLOWID_INVALID) {
1716 unsigned long flags;
1717 flow_ring_node_t * flow_ring_node;
1718
1719 #ifdef DHDTCPACK_SUPPRESS
1720 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1721 * when there is a newly coming packet from network stack.
1722 */
1723 dhd_tcpack_info_tbl_clean(dhdp);
1724 #endif /* DHDTCPACK_SUPPRESS */
1725
1726 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1727 if (flow_ring_node) {
1728 flow_queue_t *queue = &flow_ring_node->queue;
1729
1730 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1731 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1732
1733 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1734 void * pkt;
1735 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
1736 NULL) {
1737 PKTFREE(dhdp->osh, pkt, TRUE);
1738 }
1739 }
1740
1741 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1742 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1743 }
1744 }
1745
1746 sta->flowid[prio] = FLOWID_INVALID;
1747 }
1748
1749 id16_map_free(dhdp->staid_allocator, sta->idx);
1750 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1751 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1752 sta->ifidx = DHD_BAD_IF;
1753 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1754 INIT_LIST_HEAD(&sta->list);
1755 sta->idx = ID16_INVALID; /* implying free */
1756 }
1757
1758 /** Allocate a dhd_sta object from the dhd pool. */
1759 static dhd_sta_t *
1760 dhd_sta_alloc(dhd_pub_t * dhdp)
1761 {
1762 uint16 idx;
1763 dhd_sta_t * sta;
1764 dhd_sta_pool_t * sta_pool;
1765
1766 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1767
1768 idx = id16_map_alloc(dhdp->staid_allocator);
1769 if (idx == ID16_INVALID) {
1770 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1771 return DHD_STA_NULL;
1772 }
1773
1774 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1775 sta = &sta_pool[idx];
1776
1777 ASSERT((sta->idx == ID16_INVALID) &&
1778 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1779
1780 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1781
1782 sta->idx = idx; /* implying allocated */
1783
1784 return sta;
1785 }
1786
1787 /** Delete all STAs in an interface's STA list. */
1788 static void
1789 dhd_if_del_sta_list(dhd_if_t *ifp)
1790 {
1791 dhd_sta_t *sta, *next;
1792 unsigned long flags;
1793
1794 DHD_IF_STA_LIST_LOCK(ifp, flags);
1795 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1796 #pragma GCC diagnostic push
1797 #pragma GCC diagnostic ignored "-Wcast-qual"
1798 #endif // endif
1799 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1800 list_del(&sta->list);
1801 dhd_sta_free(&ifp->info->pub, sta);
1802 }
1803 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1804 #pragma GCC diagnostic pop
1805 #endif // endif
1806 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1807
1808 return;
1809 }
1810
1811 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1812 static void
1813 dhd_if_flush_sta(dhd_if_t * ifp)
1814 {
1815 }
1816
1817 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1818 static int
1819 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1820 {
1821 int idx, prio, sta_pool_memsz;
1822 dhd_sta_t * sta;
1823 dhd_sta_pool_t * sta_pool;
1824 void * staid_allocator;
1825
1826 ASSERT(dhdp != (dhd_pub_t *)NULL);
1827 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1828
1829 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1830 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1831 if (staid_allocator == NULL) {
1832 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1833 return BCME_ERROR;
1834 }
1835
1836 /* Pre allocate a pool of dhd_sta objects (one extra). */
1837 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1838 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1839 if (sta_pool == NULL) {
1840 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1841 id16_map_fini(dhdp->osh, staid_allocator);
1842 return BCME_ERROR;
1843 }
1844
1845 dhdp->sta_pool = sta_pool;
1846 dhdp->staid_allocator = staid_allocator;
1847
1848 /* Initialize all sta(s) for the pre-allocated free pool. */
1849 bzero((uchar *)sta_pool, sta_pool_memsz);
1850 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1851 sta = &sta_pool[idx];
1852 sta->idx = id16_map_alloc(staid_allocator);
1853 ASSERT(sta->idx <= max_sta);
1854 }
1855
1856 /* Now place them into the pre-allocated free pool. */
1857 for (idx = 1; idx <= max_sta; idx++) {
1858 sta = &sta_pool[idx];
1859 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1860 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1861 }
1862 dhd_sta_free(dhdp, sta);
1863 }
1864
1865 return BCME_OK;
1866 }
1867
1868 /** Destruct the pool of dhd_sta_t objects.
1869 * Caller must ensure that no STA objects are currently associated with an if.
1870 */
1871 static void
1872 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1873 {
1874 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1875
1876 if (sta_pool) {
1877 int idx;
1878 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1879 for (idx = 1; idx <= max_sta; idx++) {
1880 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1881 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1882 }
1883 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1884 dhdp->sta_pool = NULL;
1885 }
1886
1887 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1888 dhdp->staid_allocator = NULL;
1889 }
1890
1891 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1892 static void
1893 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1894 {
1895 int idx, prio, sta_pool_memsz;
1896 dhd_sta_t * sta;
1897 dhd_sta_pool_t * sta_pool;
1898 void *staid_allocator;
1899
1900 if (!dhdp) {
1901 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1902 return;
1903 }
1904
1905 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1906 staid_allocator = dhdp->staid_allocator;
1907
1908 if (!sta_pool) {
1909 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1910 return;
1911 }
1912
1913 if (!staid_allocator) {
1914 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1915 return;
1916 }
1917
1918 /* clear free pool */
1919 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1920 bzero((uchar *)sta_pool, sta_pool_memsz);
1921
1922 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1923 id16_map_clear(staid_allocator, max_sta, 1);
1924
1925 /* Initialize all sta(s) for the pre-allocated free pool. */
1926 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1927 sta = &sta_pool[idx];
1928 sta->idx = id16_map_alloc(staid_allocator);
1929 ASSERT(sta->idx <= max_sta);
1930 }
1931 /* Now place them into the pre-allocated free pool. */
1932 for (idx = 1; idx <= max_sta; idx++) {
1933 sta = &sta_pool[idx];
1934 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1935 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1936 }
1937 dhd_sta_free(dhdp, sta);
1938 }
1939 }
1940
1941 /** Find STA with MAC address ea in an interface's STA list. */
1942 dhd_sta_t *
1943 dhd_find_sta(void *pub, int ifidx, void *ea)
1944 {
1945 dhd_sta_t *sta;
1946 dhd_if_t *ifp;
1947 unsigned long flags;
1948
1949 ASSERT(ea != NULL);
1950 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1951 if (ifp == NULL)
1952 return DHD_STA_NULL;
1953
1954 DHD_IF_STA_LIST_LOCK(ifp, flags);
1955 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1956 #pragma GCC diagnostic push
1957 #pragma GCC diagnostic ignored "-Wcast-qual"
1958 #endif // endif
1959 list_for_each_entry(sta, &ifp->sta_list, list) {
1960 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1961 DHD_INFO(("%s: Found STA " MACDBG "\n",
1962 __FUNCTION__, MAC2STRDBG((char *)ea)));
1963 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1964 return sta;
1965 }
1966 }
1967 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1968 #pragma GCC diagnostic pop
1969 #endif // endif
1970 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1971
1972 return DHD_STA_NULL;
1973 }
1974
1975 /** Add STA into the interface's STA list. */
1976 dhd_sta_t *
1977 dhd_add_sta(void *pub, int ifidx, void *ea)
1978 {
1979 dhd_sta_t *sta;
1980 dhd_if_t *ifp;
1981 unsigned long flags;
1982
1983 ASSERT(ea != NULL);
1984 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1985 if (ifp == NULL)
1986 return DHD_STA_NULL;
1987
1988 if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
1989 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
1990 return DHD_STA_NULL;
1991 }
1992
1993 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1994 if (sta == DHD_STA_NULL) {
1995 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1996 return DHD_STA_NULL;
1997 }
1998
1999 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
2000
2001 /* link the sta and the dhd interface */
2002 sta->ifp = ifp;
2003 sta->ifidx = ifidx;
2004 INIT_LIST_HEAD(&sta->list);
2005
2006 DHD_IF_STA_LIST_LOCK(ifp, flags);
2007
2008 list_add_tail(&sta->list, &ifp->sta_list);
2009
2010 DHD_ERROR(("%s: Adding STA " MACDBG "\n",
2011 __FUNCTION__, MAC2STRDBG((char *)ea)));
2012
2013 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2014
2015 return sta;
2016 }
2017
2018 /** Delete all STAs from the interface's STA list. */
2019 void
2020 dhd_del_all_sta(void *pub, int ifidx)
2021 {
2022 dhd_sta_t *sta, *next;
2023 dhd_if_t *ifp;
2024 unsigned long flags;
2025
2026 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2027 if (ifp == NULL)
2028 return;
2029
2030 DHD_IF_STA_LIST_LOCK(ifp, flags);
2031 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2032 #pragma GCC diagnostic push
2033 #pragma GCC diagnostic ignored "-Wcast-qual"
2034 #endif // endif
2035 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2036
2037 list_del(&sta->list);
2038 dhd_sta_free(&ifp->info->pub, sta);
2039 #ifdef DHD_L2_FILTER
2040 if (ifp->parp_enable) {
2041 /* clear Proxy ARP cache of specific Ethernet Address */
2042 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
2043 ifp->phnd_arp_table, FALSE,
2044 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2045 }
2046 #endif /* DHD_L2_FILTER */
2047 }
2048 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2049 #pragma GCC diagnostic pop
2050 #endif // endif
2051 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2052
2053 return;
2054 }
2055
2056 /** Delete STA from the interface's STA list. */
2057 void
2058 dhd_del_sta(void *pub, int ifidx, void *ea)
2059 {
2060 dhd_sta_t *sta, *next;
2061 dhd_if_t *ifp;
2062 unsigned long flags;
2063
2064 ASSERT(ea != NULL);
2065 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2066 if (ifp == NULL)
2067 return;
2068
2069 DHD_IF_STA_LIST_LOCK(ifp, flags);
2070 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2071 #pragma GCC diagnostic push
2072 #pragma GCC diagnostic ignored "-Wcast-qual"
2073 #endif // endif
2074 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2075 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
2076 DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
2077 __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
2078 list_del(&sta->list);
2079 dhd_sta_free(&ifp->info->pub, sta);
2080 }
2081 }
2082 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2083 #pragma GCC diagnostic pop
2084 #endif // endif
2085 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2086 #ifdef DHD_L2_FILTER
2087 if (ifp->parp_enable) {
2088 /* clear Proxy ARP cache of specific Ethernet Address */
2089 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
2090 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2091 }
2092 #endif /* DHD_L2_FILTER */
2093 return;
2094 }
2095
2096 /** Add STA if it doesn't exist. Not reentrant. */
2097 dhd_sta_t*
2098 dhd_findadd_sta(void *pub, int ifidx, void *ea)
2099 {
2100 dhd_sta_t *sta;
2101
2102 sta = dhd_find_sta(pub, ifidx, ea);
2103
2104 if (!sta) {
2105 /* Add entry */
2106 sta = dhd_add_sta(pub, ifidx, ea);
2107 }
2108
2109 return sta;
2110 }
2111
2112 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2113 static struct list_head *
2114 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
2115 {
2116 unsigned long flags;
2117 dhd_sta_t *sta, *snapshot;
2118
2119 INIT_LIST_HEAD(snapshot_list);
2120
2121 DHD_IF_STA_LIST_LOCK(ifp, flags);
2122
2123 list_for_each_entry(sta, &ifp->sta_list, list) {
2124 /* allocate one and add to snapshot */
2125 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
2126 if (snapshot == NULL) {
2127 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
2128 continue;
2129 }
2130
2131 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
2132
2133 INIT_LIST_HEAD(&snapshot->list);
2134 list_add_tail(&snapshot->list, snapshot_list);
2135 }
2136
2137 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2138
2139 return snapshot_list;
2140 }
2141
2142 static void
2143 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
2144 {
2145 dhd_sta_t *sta, *next;
2146
2147 list_for_each_entry_safe(sta, next, snapshot_list, list) {
2148 list_del(&sta->list);
2149 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
2150 }
2151 }
2152 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2153
2154 #else
2155 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2156 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2157 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2158 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2159 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2160 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2161 dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
2162 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2163 #endif /* PCIE_FULL_DONGLE */
2164
2165 #if defined(DHD_LB)
2166
2167 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) || \
2168 defined(DHD_LB_RXP)
2169 /**
2170 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2171 * CPU and schedules a tasklet.
2172 * @tasklet: opaque pointer to the tasklet
2173 */
2174 INLINE void
2175 dhd_tasklet_schedule(void *tasklet)
2176 {
2177 tasklet_schedule((struct tasklet_struct *)tasklet);
2178 }
2179 /**
2180 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2181 * @tasklet: tasklet to be scheduled
2182 * @on_cpu: cpu core id
2183 *
2184 * If the requested cpu is online, then an IPI is sent to this cpu via the
2185 * smp_call_function_single with no wait and the tasklet_schedule function
2186 * will be invoked to schedule the specified tasklet on the requested CPU.
2187 */
2188 INLINE void
2189 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2190 {
2191 const int wait = 0;
2192 smp_call_function_single(on_cpu,
2193 dhd_tasklet_schedule, (void *)tasklet, wait);
2194 }
2195
2196 /**
2197 * dhd_work_schedule_on - Executes the passed work in a given CPU
2198 * @work: work to be scheduled
2199 * @on_cpu: cpu core id
2200 *
2201 * If the requested cpu is online, then an IPI is sent to this cpu via the
2202 * schedule_work_on and the work function
2203 * will be invoked to schedule the specified work on the requested CPU.
2204 */
2205
2206 INLINE void
2207 dhd_work_schedule_on(struct work_struct *work, int on_cpu)
2208 {
2209 schedule_work_on(on_cpu, work);
2210 }
2211 #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP || DHD_LB_RXP */
2212
2213 #if defined(DHD_LB_TXC)
2214 /**
2215 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2216 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2217 * freeing the packets placed in the tx_compl workq
2218 */
2219 void
2220 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2221 {
2222 dhd_info_t *dhd = dhdp->info;
2223 int curr_cpu, on_cpu;
2224
2225 if (dhd->rx_napi_netdev == NULL) {
2226 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2227 return;
2228 }
2229
2230 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2231 /*
2232 * If the destination CPU is NOT online or is same as current CPU
2233 * no need to schedule the work
2234 */
2235 curr_cpu = get_cpu();
2236 put_cpu();
2237
2238 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2239
2240 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2241 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2242 } else {
2243 schedule_work(&dhd->tx_compl_dispatcher_work);
2244 }
2245 }
2246
2247 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2248 {
2249 struct dhd_info *dhd =
2250 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2251 int cpu;
2252
2253 get_online_cpus();
2254 cpu = atomic_read(&dhd->tx_compl_cpu);
2255 if (!cpu_online(cpu))
2256 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2257 else
2258 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2259 put_online_cpus();
2260 }
2261 #endif /* DHD_LB_TXC */
2262
2263 #if defined(DHD_LB_RXC)
2264 /**
2265 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2266 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2267 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2268 * placed in the rx_compl workq.
2269 *
2270 * @dhdp: pointer to dhd_pub object
2271 */
2272 void
2273 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2274 {
2275 dhd_info_t *dhd = dhdp->info;
2276 int curr_cpu, on_cpu;
2277
2278 if (dhd->rx_napi_netdev == NULL) {
2279 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2280 return;
2281 }
2282
2283 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2284 /*
2285 * If the destination CPU is NOT online or is same as current CPU
2286 * no need to schedule the work
2287 */
2288 curr_cpu = get_cpu();
2289 put_cpu();
2290 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2291
2292 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2293 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2294 } else {
2295 schedule_work(&dhd->rx_compl_dispatcher_work);
2296 }
2297 }
2298
2299 static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2300 {
2301 struct dhd_info *dhd =
2302 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2303 int cpu;
2304
2305 get_online_cpus();
2306 cpu = atomic_read(&dhd->rx_compl_cpu);
2307 if (!cpu_online(cpu))
2308 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2309 else {
2310 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2311 }
2312 put_online_cpus();
2313 }
2314 #endif /* DHD_LB_RXC */
2315
2316 #if defined(DHD_LB_TXP)
2317 static void dhd_tx_dispatcher_work(struct work_struct * work)
2318 {
2319 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2320 #pragma GCC diagnostic push
2321 #pragma GCC diagnostic ignored "-Wcast-qual"
2322 #endif // endif
2323 struct dhd_info *dhd =
2324 container_of(work, struct dhd_info, tx_dispatcher_work);
2325 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2326 #pragma GCC diagnostic pop
2327 #endif // endif
2328 dhd_tasklet_schedule(&dhd->tx_tasklet);
2329 }
2330
2331 static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
2332 {
2333 int cpu;
2334 int net_tx_cpu;
2335 dhd_info_t *dhd = dhdp->info;
2336
2337 preempt_disable();
2338 cpu = atomic_read(&dhd->tx_cpu);
2339 net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
2340
2341 /*
2342 * Now if the NET_TX has pushed the packet in the same
2343 * CPU that is chosen for Tx processing, seperate it out
2344 * i.e run the TX processing tasklet in compl_cpu
2345 */
2346 if (net_tx_cpu == cpu)
2347 cpu = atomic_read(&dhd->tx_compl_cpu);
2348
2349 if (!cpu_online(cpu)) {
2350 /*
2351 * Ooohh... but the Chosen CPU is not online,
2352 * Do the job in the current CPU itself.
2353 */
2354 dhd_tasklet_schedule(&dhd->tx_tasklet);
2355 } else {
2356 /*
2357 * Schedule tx_dispatcher_work to on the cpu which
2358 * in turn will schedule tx_tasklet.
2359 */
2360 dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
2361 }
2362 preempt_enable();
2363 }
2364
2365 /**
2366 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2367 * on another cpu. The tx_tasklet will take care of actually putting
2368 * the skbs into appropriate flow ring and ringing H2D interrupt
2369 *
2370 * @dhdp: pointer to dhd_pub object
2371 */
2372 static void
2373 dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
2374 {
2375 dhd_info_t *dhd = dhdp->info;
2376 int curr_cpu;
2377
2378 curr_cpu = get_cpu();
2379 put_cpu();
2380
2381 /* Record the CPU in which the TX request from Network stack came */
2382 atomic_set(&dhd->net_tx_cpu, curr_cpu);
2383
2384 /* Schedule the work to dispatch ... */
2385 dhd_tx_dispatcher_fn(dhdp);
2386 }
2387 #endif /* DHD_LB_TXP */
2388
2389 #if defined(DHD_LB_RXP)
2390 /**
2391 * dhd_napi_poll - Load balance napi poll function to process received
2392 * packets and send up the network stack using netif_receive_skb()
2393 *
2394 * @napi: napi object in which context this poll function is invoked
2395 * @budget: number of packets to be processed.
2396 *
2397 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2398 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2399 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2400 * packet tag and sendup.
2401 */
2402 static int
2403 dhd_napi_poll(struct napi_struct *napi, int budget)
2404 {
2405 int ifid;
2406 const int pkt_count = 1;
2407 const int chan = 0;
2408 struct sk_buff * skb;
2409 unsigned long flags;
2410 struct dhd_info *dhd;
2411 int processed = 0;
2412 struct sk_buff_head rx_process_queue;
2413
2414 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2415 #pragma GCC diagnostic push
2416 #pragma GCC diagnostic ignored "-Wcast-qual"
2417 #endif // endif
2418 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2419 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2420 #pragma GCC diagnostic pop
2421 #endif // endif
2422
2423 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2424 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2425 __skb_queue_head_init(&rx_process_queue);
2426
2427 /* extract the entire rx_napi_queue into local rx_process_queue */
2428 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2429 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2430 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2431
2432 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2433 OSL_PREFETCH(skb->data);
2434
2435 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2436
2437 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2438 __FUNCTION__, skb, ifid));
2439
2440 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2441 processed++;
2442 }
2443
2444 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2445
2446 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2447 napi_complete(napi);
2448
2449 return budget - 1;
2450 }
2451
2452 /**
2453 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2454 * poll list. This function may be invoked via the smp_call_function_single
2455 * from a remote CPU.
2456 *
2457 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2458 * after the napi_struct is added to the softnet data's poll_list
2459 *
2460 * @info: pointer to a dhd_info struct
2461 */
2462 static void
2463 dhd_napi_schedule(void *info)
2464 {
2465 dhd_info_t *dhd = (dhd_info_t *)info;
2466
2467 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2468 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2469
2470 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2471 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2472 __napi_schedule(&dhd->rx_napi_struct);
2473 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2474 #ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
2475 raise_softirq(NET_RX_SOFTIRQ);
2476 #endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
2477 }
2478
2479 /*
2480 * If the rx_napi_struct was already running, then we let it complete
2481 * processing all its packets. The rx_napi_struct may only run on one
2482 * core at a time, to avoid out-of-order handling.
2483 */
2484 }
2485
2486 /**
2487 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2488 * action after placing the dhd's rx_process napi object in the the remote CPU's
2489 * softnet data's poll_list.
2490 *
2491 * @dhd: dhd_info which has the rx_process napi object
2492 * @on_cpu: desired remote CPU id
2493 */
2494 static INLINE int
2495 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2496 {
2497 int wait = 0; /* asynchronous IPI */
2498 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2499 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2500
2501 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2502 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2503 __FUNCTION__, on_cpu));
2504 }
2505
2506 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2507
2508 return 0;
2509 }
2510
2511 /*
2512 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2513 * Why should we do this?
2514 * The candidacy algorithm is run from the call back function
2515 * registered to CPU hotplug notifier. This call back happens from Worker
2516 * context. The dhd_napi_schedule_on is also from worker context.
2517 * Note that both of this can run on two different CPUs at the same time.
2518 * So we can possibly have a window where a given CPUn is being brought
2519 * down from CPUm while we try to run a function on CPUn.
2520 * To prevent this its better have the whole code to execute an SMP
2521 * function under get_online_cpus.
2522 * This function call ensures that hotplug mechanism does not kick-in
2523 * until we are done dealing with online CPUs
2524 * If the hotplug worker is already running, no worries because the
2525 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2526 *
2527 * The below mentioned code structure is proposed in
2528 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2529 * for the question
2530 * Q: I need to ensure that a particular cpu is not removed when there is some
2531 * work specific to this cpu is in progress
2532 *
2533 * According to the documentation calling get_online_cpus is NOT required, if
2534 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2535 * run from Work Queue context we have to call these functions
2536 */
2537 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2538 {
2539 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2540 #pragma GCC diagnostic push
2541 #pragma GCC diagnostic ignored "-Wcast-qual"
2542 #endif // endif
2543 struct dhd_info *dhd =
2544 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2545 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2546 #pragma GCC diagnostic pop
2547 #endif // endif
2548
2549 dhd_napi_schedule(dhd);
2550 }
2551
2552 /**
2553 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2554 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2555 * the packets enqueued into the rx_napi_queue and sendup.
2556 * The producer's rx packet queue is appended to the rx_napi_queue before
2557 * dispatching the rx_napi_struct.
2558 */
2559 void
2560 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2561 {
2562 unsigned long flags;
2563 dhd_info_t *dhd = dhdp->info;
2564 int curr_cpu;
2565 int on_cpu;
2566 #ifdef DHD_LB_IRQSET
2567 cpumask_t cpus;
2568 #endif /* DHD_LB_IRQSET */
2569
2570 if (dhd->rx_napi_netdev == NULL) {
2571 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2572 return;
2573 }
2574
2575 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2576 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2577
2578 /* append the producer's queue of packets to the napi's rx process queue */
2579 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2580 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2581 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2582
2583 /*
2584 * If the destination CPU is NOT online or is same as current CPU
2585 * no need to schedule the work
2586 */
2587 curr_cpu = get_cpu();
2588 put_cpu();
2589
2590 preempt_disable();
2591 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2592 #ifdef DHD_LB_IRQSET
2593 if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
2594 (!cpu_online(on_cpu))) {
2595 #else
2596 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2597 #endif /* DHD_LB_IRQSET */
2598 DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
2599 curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
2600 dhd_napi_schedule(dhd);
2601 } else {
2602 DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
2603 __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
2604 dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu);
2605 }
2606 preempt_enable();
2607 }
2608
2609 /**
2610 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2611 */
2612 void
2613 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2614 {
2615 dhd_info_t *dhd = dhdp->info;
2616
2617 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2618 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2619 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2620 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2621 }
2622 #endif /* DHD_LB_RXP */
2623
2624 #ifdef DHD_LB_IRQSET
2625 void
2626 dhd_irq_set_affinity(dhd_pub_t *dhdp)
2627 {
2628 unsigned int irq = (unsigned int)-1;
2629 int err = BCME_OK;
2630
2631 if (!dhdp) {
2632 DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
2633 return;
2634 }
2635
2636 if (!dhdp->bus) {
2637 DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
2638 return;
2639 }
2640
2641 dhdpcie_get_pcieirq(dhdp->bus, &irq);
2642 err = irq_set_affinity(irq, dhdp->info->cpumask_primary);
2643 if (err)
2644 DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
2645 __FUNCTION__, *cpumask_bits(dhdp->info->cpumask_primary)));
2646 }
2647 #endif /* DHD_LB_IRQSET */
2648 #endif /* DHD_LB */
2649
2650 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
2651 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2652 {
2653 dhd_if_t *ifp;
2654 dhd_info_t *dhd = dhdp->info;
2655 int i;
2656
2657 ASSERT(bssidx < DHD_MAX_IFS);
2658 ASSERT(dhdp);
2659
2660 for (i = 0; i < DHD_MAX_IFS; i++) {
2661 ifp = dhd->iflist[i];
2662 if (ifp && (ifp->bssidx == bssidx)) {
2663 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2664 ifp->name, bssidx, i));
2665 break;
2666 }
2667 }
2668 return i;
2669 }
2670
2671 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2672 {
2673 uint32 store_idx;
2674 uint32 sent_idx;
2675
2676 if (!skb) {
2677 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2678 return BCME_ERROR;
2679 }
2680
2681 dhd_os_rxflock(dhdp);
2682 store_idx = dhdp->store_idx;
2683 sent_idx = dhdp->sent_idx;
2684 if (dhdp->skbbuf[store_idx] != NULL) {
2685 /* Make sure the previous packets are processed */
2686 dhd_os_rxfunlock(dhdp);
2687 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2688 skb, store_idx, sent_idx));
2689 /* removed msleep here, should use wait_event_timeout if we
2690 * want to give rx frame thread a chance to run
2691 */
2692 #if defined(WAIT_DEQUEUE)
2693 OSL_SLEEP(1);
2694 #endif // endif
2695 return BCME_ERROR;
2696 }
2697 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2698 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2699 dhdp->skbbuf[store_idx] = skb;
2700 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2701 dhd_os_rxfunlock(dhdp);
2702
2703 return BCME_OK;
2704 }
2705
2706 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2707 {
2708 uint32 store_idx;
2709 uint32 sent_idx;
2710 void *skb;
2711
2712 dhd_os_rxflock(dhdp);
2713
2714 store_idx = dhdp->store_idx;
2715 sent_idx = dhdp->sent_idx;
2716 skb = dhdp->skbbuf[sent_idx];
2717
2718 if (skb == NULL) {
2719 dhd_os_rxfunlock(dhdp);
2720 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2721 store_idx, sent_idx));
2722 return NULL;
2723 }
2724
2725 dhdp->skbbuf[sent_idx] = NULL;
2726 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2727
2728 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2729 skb, sent_idx));
2730
2731 dhd_os_rxfunlock(dhdp);
2732
2733 return skb;
2734 }
2735
2736 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2737 {
2738 if (prepost) { /* pre process */
2739 dhd_read_cis(dhdp);
2740 dhd_check_module_cid(dhdp);
2741 dhd_check_module_mac(dhdp);
2742 dhd_set_macaddr_from_file(dhdp);
2743 } else { /* post process */
2744 dhd_write_macaddr(&dhdp->mac);
2745 dhd_clear_cis(dhdp);
2746 }
2747
2748 return 0;
2749 }
2750
2751 #ifdef PKT_FILTER_SUPPORT
2752 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2753 static bool
2754 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
2755 {
2756 bool _apply = FALSE;
2757 /* In case of IBSS mode, apply arp pkt filter */
2758 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
2759 _apply = TRUE;
2760 goto exit;
2761 }
2762 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2763 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
2764 _apply = TRUE;
2765 goto exit;
2766 }
2767
2768 exit:
2769 return _apply;
2770 }
2771 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2772
2773 void
2774 dhd_set_packet_filter(dhd_pub_t *dhd)
2775 {
2776 int i;
2777
2778 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2779 if (dhd_pkt_filter_enable) {
2780 for (i = 0; i < dhd->pktfilter_count; i++) {
2781 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2782 }
2783 }
2784 }
2785
2786 void
2787 dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2788 {
2789 int i;
2790
2791 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2792 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2793 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2794 return;
2795 }
2796 /* 1 - Enable packet filter, only allow unicast packet to send up */
2797 /* 0 - Disable packet filter */
2798 if (dhd_pkt_filter_enable && (!value ||
2799 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2800 {
2801 for (i = 0; i < dhd->pktfilter_count; i++) {
2802 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
2803 if (value && (i == DHD_ARP_FILTER_NUM) &&
2804 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2805 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2806 "val %d, cnt %d, op_mode 0x%x\n",
2807 value, i, dhd->op_mode));
2808 continue;
2809 }
2810 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2811 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2812 value, dhd_master_mode);
2813 }
2814 }
2815 }
2816
2817 int
2818 dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
2819 {
2820 char *filterp = NULL;
2821 int filter_id = 0;
2822
2823 switch (num) {
2824 case DHD_BROADCAST_FILTER_NUM:
2825 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
2826 filter_id = 101;
2827 break;
2828 case DHD_MULTICAST4_FILTER_NUM:
2829 filter_id = 102;
2830 if (FW_SUPPORTED((dhdp), pf6)) {
2831 if (dhdp->pktfilter[num] != NULL) {
2832 dhd_pktfilter_offload_delete(dhdp, filter_id);
2833 dhdp->pktfilter[num] = NULL;
2834 }
2835 if (!add_remove) {
2836 filterp = DISCARD_IPV4_MCAST;
2837 add_remove = 1;
2838 break;
2839 }
2840 }
2841 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
2842 break;
2843 case DHD_MULTICAST6_FILTER_NUM:
2844 filter_id = 103;
2845 if (FW_SUPPORTED((dhdp), pf6)) {
2846 if (dhdp->pktfilter[num] != NULL) {
2847 dhd_pktfilter_offload_delete(dhdp, filter_id);
2848 dhdp->pktfilter[num] = NULL;
2849 }
2850 if (!add_remove) {
2851 filterp = DISCARD_IPV6_MCAST;
2852 add_remove = 1;
2853 break;
2854 }
2855 }
2856 filterp = "103 0 0 0 0xFFFF 0x3333";
2857 break;
2858 case DHD_MDNS_FILTER_NUM:
2859 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
2860 filter_id = 104;
2861 break;
2862 case DHD_ARP_FILTER_NUM:
2863 filterp = "105 0 0 12 0xFFFF 0x0806";
2864 filter_id = 105;
2865 break;
2866 case DHD_BROADCAST_ARP_FILTER_NUM:
2867 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
2868 " 0xFFFFFFFFFFFF0000000000000806";
2869 filter_id = 106;
2870 break;
2871 default:
2872 return -EINVAL;
2873 }
2874
2875 /* Add filter */
2876 if (add_remove) {
2877 dhdp->pktfilter[num] = filterp;
2878 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
2879 } else { /* Delete filter */
2880 if (dhdp->pktfilter[num]) {
2881 dhd_pktfilter_offload_delete(dhdp, filter_id);
2882 dhdp->pktfilter[num] = NULL;
2883 }
2884 }
2885
2886 return 0;
2887 }
2888 #endif /* PKT_FILTER_SUPPORT */
2889
2890 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2891 {
2892 #ifndef SUPPORT_PM2_ONLY
2893 int power_mode = PM_MAX;
2894 #endif /* SUPPORT_PM2_ONLY */
2895 /* wl_pkt_filter_enable_t enable_parm; */
2896 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2897 int ret = 0;
2898 #ifdef DHD_USE_EARLYSUSPEND
2899 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2900 int bcn_timeout = 0;
2901 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2902 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2903 int roam_time_thresh = 0; /* (ms) */
2904 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2905 #ifndef ENABLE_FW_ROAM_SUSPEND
2906 uint roamvar = 1;
2907 #endif /* ENABLE_FW_ROAM_SUSPEND */
2908 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2909 int bcn_li_bcn = 1;
2910 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2911 uint nd_ra_filter = 0;
2912 #ifdef ENABLE_IPMCAST_FILTER
2913 int ipmcast_l2filter;
2914 #endif /* ENABLE_IPMCAST_FILTER */
2915 #ifdef CUSTOM_EVENT_PM_WAKE
2916 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
2917 #endif /* CUSTOM_EVENT_PM_WAKE */
2918 #endif /* DHD_USE_EARLYSUSPEND */
2919 #ifdef PASS_ALL_MCAST_PKTS
2920 struct dhd_info *dhdinfo;
2921 uint32 allmulti;
2922 uint i;
2923 #endif /* PASS_ALL_MCAST_PKTS */
2924 #ifdef DYNAMIC_SWOOB_DURATION
2925 #ifndef CUSTOM_INTR_WIDTH
2926 #define CUSTOM_INTR_WIDTH 100
2927 int intr_width = 0;
2928 #endif /* CUSTOM_INTR_WIDTH */
2929 #endif /* DYNAMIC_SWOOB_DURATION */
2930
2931 #if defined(BCMPCIE)
2932 int lpas = 0;
2933 int dtim_period = 0;
2934 int bcn_interval = 0;
2935 int bcn_to_dly = 0;
2936 #if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2937 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
2938 #else
2939 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
2940 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
2941 #endif /* OEM_ANDROID && BCMPCIE */
2942
2943 if (!dhd)
2944 return -ENODEV;
2945
2946 #ifdef PASS_ALL_MCAST_PKTS
2947 dhdinfo = dhd->info;
2948 #endif /* PASS_ALL_MCAST_PKTS */
2949
2950 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2951 __FUNCTION__, value, dhd->in_suspend));
2952
2953 dhd_suspend_lock(dhd);
2954
2955 #ifdef CUSTOM_SET_CPUCORE
2956 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2957 /* set specific cpucore */
2958 dhd_set_cpucore(dhd, TRUE);
2959 #endif /* CUSTOM_SET_CPUCORE */
2960 if (dhd->up) {
2961 if (value && dhd->in_suspend) {
2962 #ifdef PKT_FILTER_SUPPORT
2963 dhd->early_suspended = 1;
2964 #endif // endif
2965 /* Kernel suspended */
2966 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
2967
2968 #ifndef SUPPORT_PM2_ONLY
2969 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2970 sizeof(power_mode), TRUE, 0);
2971 #endif /* SUPPORT_PM2_ONLY */
2972
2973 #ifdef PKT_FILTER_SUPPORT
2974 /* Enable packet filter,
2975 * only allow unicast packet to send up
2976 */
2977 dhd_enable_packet_filter(1, dhd);
2978 #ifdef APF
2979 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
2980 #endif /* APF */
2981 #endif /* PKT_FILTER_SUPPORT */
2982
2983 #ifdef PASS_ALL_MCAST_PKTS
2984 allmulti = 0;
2985 for (i = 0; i < DHD_MAX_IFS; i++) {
2986 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2987 ret = dhd_iovar(dhd, i, "allmulti",
2988 (char *)&allmulti,
2989 sizeof(allmulti),
2990 NULL, 0, TRUE);
2991 if (ret < 0) {
2992 DHD_ERROR(("%s allmulti failed %d\n",
2993 __FUNCTION__, ret));
2994 }
2995 }
2996 #endif /* PASS_ALL_MCAST_PKTS */
2997
2998 /* If DTIM skip is set up as default, force it to wake
2999 * each third DTIM for better power savings. Note that
3000 * one side effect is a chance to miss BC/MC packet.
3001 */
3002 #ifdef WLTDLS
3003 /* Do not set bcn_li_ditm on WFD mode */
3004 if (dhd->tdls_mode) {
3005 bcn_li_dtim = 0;
3006 } else
3007 #endif /* WLTDLS */
3008 #if defined(BCMPCIE)
3009 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
3010 &bcn_interval);
3011 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3012 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3013 if (ret < 0) {
3014 DHD_ERROR(("%s bcn_li_dtim failed %d\n",
3015 __FUNCTION__, ret));
3016 }
3017 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
3018 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
3019 /*
3020 * Increase max roaming threshold from 2 secs to 8 secs
3021 * the real roam threshold is MIN(max_roam_threshold,
3022 * bcn_timeout/2)
3023 */
3024 lpas = 1;
3025 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
3026 NULL, 0, TRUE);
3027 if (ret < 0) {
3028 DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__,
3029 ret));
3030 }
3031 bcn_to_dly = 1;
3032 /*
3033 * if bcn_to_dly is 1, the real roam threshold is
3034 * MIN(max_roam_threshold, bcn_timeout -1);
3035 * notify link down event after roaming procedure complete
3036 * if we hit bcn_timeout while we are in roaming progress.
3037 */
3038 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3039 sizeof(bcn_to_dly), NULL, 0, TRUE);
3040 if (ret < 0) {
3041 DHD_ERROR(("%s bcn_to_dly failed %d\n",
3042 __FUNCTION__, ret));
3043 }
3044 /* Increase beacon timeout to 6 secs or use bigger one */
3045 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
3046 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3047 sizeof(bcn_timeout), NULL, 0, TRUE);
3048 if (ret < 0) {
3049 DHD_ERROR(("%s bcn_timeout failed %d\n",
3050 __FUNCTION__, ret));
3051 }
3052 }
3053 #else
3054 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
3055 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3056 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
3057 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
3058 #endif /* OEM_ANDROID && BCMPCIE */
3059
3060 #ifdef DHD_USE_EARLYSUSPEND
3061 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3062 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
3063 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3064 sizeof(bcn_timeout), NULL, 0, TRUE);
3065 if (ret < 0) {
3066 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
3067 ret));
3068 }
3069 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3070 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3071 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
3072 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
3073 (char *)&roam_time_thresh,
3074 sizeof(roam_time_thresh), NULL, 0, TRUE);
3075 if (ret < 0) {
3076 DHD_ERROR(("%s roam_time_thresh failed %d\n",
3077 __FUNCTION__, ret));
3078 }
3079 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3080 #ifndef ENABLE_FW_ROAM_SUSPEND
3081 /* Disable firmware roaming during suspend */
3082 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
3083 sizeof(roamvar), NULL, 0, TRUE);
3084 if (ret < 0) {
3085 DHD_ERROR(("%s roam_off failed %d\n",
3086 __FUNCTION__, ret));
3087 }
3088 #endif /* ENABLE_FW_ROAM_SUSPEND */
3089 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3090 if (bcn_li_dtim) {
3091 bcn_li_bcn = 0;
3092 }
3093 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3094 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3095 if (ret < 0) {
3096 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
3097 }
3098 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3099 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
3100 ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
3101 if (ret != BCME_OK) {
3102 DHD_ERROR(("failed to stop beacon recv event on"
3103 " suspend state (%d)\n", ret));
3104 }
3105 #endif /* WL_CFG80211 && WL_BCNRECV */
3106 #ifdef NDO_CONFIG_SUPPORT
3107 if (dhd->ndo_enable) {
3108 if (!dhd->ndo_host_ip_overflow) {
3109 /* enable ND offload on suspend */
3110 ret = dhd_ndo_enable(dhd, TRUE);
3111 if (ret < 0) {
3112 DHD_ERROR(("%s: failed to enable NDO\n",
3113 __FUNCTION__));
3114 }
3115 } else {
3116 DHD_INFO(("%s: NDO disabled on suspend due to"
3117 "HW capacity\n", __FUNCTION__));
3118 }
3119 }
3120 #endif /* NDO_CONFIG_SUPPORT */
3121 #ifndef APF
3122 if (FW_SUPPORTED(dhd, ndoe)) {
3123 #else
3124 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
3125 #endif /* APF */
3126 /* enable IPv6 RA filter in firmware during suspend */
3127 nd_ra_filter = 1;
3128 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3129 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3130 NULL, 0, TRUE);
3131 if (ret < 0)
3132 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3133 ret));
3134 }
3135 dhd_os_suppress_logging(dhd, TRUE);
3136 #ifdef ENABLE_IPMCAST_FILTER
3137 ipmcast_l2filter = 1;
3138 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3139 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3140 NULL, 0, TRUE);
3141 if (ret < 0) {
3142 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
3143 }
3144 #endif /* ENABLE_IPMCAST_FILTER */
3145 #ifdef DYNAMIC_SWOOB_DURATION
3146 intr_width = CUSTOM_INTR_WIDTH;
3147 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3148 sizeof(intr_width), NULL, 0, TRUE);
3149 if (ret < 0) {
3150 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3151 }
3152 #endif /* DYNAMIC_SWOOB_DURATION */
3153 #ifdef CUSTOM_EVENT_PM_WAKE
3154 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
3155 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
3156 (char *)&pm_awake_thresh,
3157 sizeof(pm_awake_thresh), NULL, 0, TRUE);
3158 if (ret < 0) {
3159 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3160 __FUNCTION__, ret));
3161 }
3162 #endif /* CUSTOM_EVENT_PM_WAKE */
3163 #endif /* DHD_USE_EARLYSUSPEND */
3164 } else {
3165 #ifdef PKT_FILTER_SUPPORT
3166 dhd->early_suspended = 0;
3167 #endif // endif
3168 /* Kernel resumed */
3169 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
3170 #ifdef DYNAMIC_SWOOB_DURATION
3171 intr_width = 0;
3172 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3173 sizeof(intr_width), NULL, 0, TRUE);
3174 if (ret < 0) {
3175 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3176 }
3177 #endif /* DYNAMIC_SWOOB_DURATION */
3178 #ifndef SUPPORT_PM2_ONLY
3179 power_mode = PM_FAST;
3180 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
3181 sizeof(power_mode), TRUE, 0);
3182 #endif /* SUPPORT_PM2_ONLY */
3183 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
3184 ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
3185 if (ret != BCME_OK) {
3186 DHD_ERROR(("failed to resume beacon recv state (%d)\n",
3187 ret));
3188 }
3189 #endif /* WL_CF80211 && WL_BCNRECV */
3190 #ifdef PKT_FILTER_SUPPORT
3191 /* disable pkt filter */
3192 dhd_enable_packet_filter(0, dhd);
3193 #ifdef APF
3194 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
3195 #endif /* APF */
3196 #endif /* PKT_FILTER_SUPPORT */
3197 #ifdef PASS_ALL_MCAST_PKTS
3198 allmulti = 1;
3199 for (i = 0; i < DHD_MAX_IFS; i++) {
3200 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
3201 ret = dhd_iovar(dhd, i, "allmulti",
3202 (char *)&allmulti,
3203 sizeof(allmulti), NULL,
3204 0, TRUE);
3205 if (ret < 0) {
3206 DHD_ERROR(("%s: allmulti failed:%d\n",
3207 __FUNCTION__, ret));
3208 }
3209 }
3210 #endif /* PASS_ALL_MCAST_PKTS */
3211 #if defined(BCMPCIE)
3212 /* restore pre-suspend setting */
3213 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3214 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3215 if (ret < 0) {
3216 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
3217 __FUNCTION__, ret));
3218 }
3219 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
3220 0, TRUE);
3221 if (ret < 0) {
3222 DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret));
3223 }
3224 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3225 sizeof(bcn_to_dly), NULL, 0, TRUE);
3226 if (ret < 0) {
3227 DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret));
3228 }
3229 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3230 sizeof(bcn_timeout), NULL, 0, TRUE);
3231 if (ret < 0) {
3232 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3233 __FUNCTION__, ret));
3234 }
3235 #else
3236 /* restore pre-suspend setting for dtim_skip */
3237 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3238 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3239 if (ret < 0) {
3240 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
3241 }
3242 #endif /* OEM_ANDROID && BCMPCIE */
3243 #ifdef DHD_USE_EARLYSUSPEND
3244 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3245 bcn_timeout = CUSTOM_BCN_TIMEOUT;
3246 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3247 sizeof(bcn_timeout), NULL, 0, TRUE);
3248 if (ret < 0) {
3249 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
3250 __FUNCTION__, ret));
3251 }
3252 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3253 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3254 roam_time_thresh = 2000;
3255 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
3256 (char *)&roam_time_thresh,
3257 sizeof(roam_time_thresh), NULL, 0, TRUE);
3258 if (ret < 0) {
3259 DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
3260 __FUNCTION__, ret));
3261 }
3262
3263 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3264 #ifndef ENABLE_FW_ROAM_SUSPEND
3265 roamvar = dhd_roam_disable;
3266 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
3267 sizeof(roamvar), NULL, 0, TRUE);
3268 if (ret < 0) {
3269 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
3270 }
3271 #endif /* ENABLE_FW_ROAM_SUSPEND */
3272 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3273 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3274 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3275 if (ret < 0) {
3276 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
3277 __FUNCTION__, ret));
3278 }
3279 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3280 #ifdef NDO_CONFIG_SUPPORT
3281 if (dhd->ndo_enable) {
3282 /* Disable ND offload on resume */
3283 ret = dhd_ndo_enable(dhd, FALSE);
3284 if (ret < 0) {
3285 DHD_ERROR(("%s: failed to disable NDO\n",
3286 __FUNCTION__));
3287 }
3288 }
3289 #endif /* NDO_CONFIG_SUPPORT */
3290 #ifndef APF
3291 if (FW_SUPPORTED(dhd, ndoe)) {
3292 #else
3293 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
3294 #endif /* APF */
3295 /* disable IPv6 RA filter in firmware during suspend */
3296 nd_ra_filter = 0;
3297 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3298 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3299 NULL, 0, TRUE);
3300 if (ret < 0) {
3301 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3302 ret));
3303 }
3304 }
3305 dhd_os_suppress_logging(dhd, FALSE);
3306 #ifdef ENABLE_IPMCAST_FILTER
3307 ipmcast_l2filter = 0;
3308 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3309 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3310 NULL, 0, TRUE);
3311 if (ret < 0) {
3312 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
3313 }
3314 #endif /* ENABLE_IPMCAST_FILTER */
3315 #ifdef CUSTOM_EVENT_PM_WAKE
3316 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
3317 (char *)&pm_awake_thresh,
3318 sizeof(pm_awake_thresh), NULL, 0, TRUE);
3319 if (ret < 0) {
3320 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
3321 __FUNCTION__, ret));
3322 }
3323 #endif /* CUSTOM_EVENT_PM_WAKE */
3324 #endif /* DHD_USE_EARLYSUSPEND */
3325 #ifdef DHD_LB_IRQSET
3326 dhd_irq_set_affinity(dhd);
3327 #endif /* DHD_LB_IRQSET */
3328 }
3329 }
3330 dhd_suspend_unlock(dhd);
3331
3332 return 0;
3333 }
3334
3335 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
3336 {
3337 dhd_pub_t *dhdp = &dhd->pub;
3338 int ret = 0;
3339
3340 DHD_OS_WAKE_LOCK(dhdp);
3341 DHD_PERIM_LOCK(dhdp);
3342
3343 /* Set flag when early suspend was called */
3344 dhdp->in_suspend = val;
3345 if ((force || !dhdp->suspend_disable_flag) &&
3346 dhd_support_sta_mode(dhdp))
3347 {
3348 ret = dhd_set_suspend(val, dhdp);
3349 }
3350
3351 DHD_PERIM_UNLOCK(dhdp);
3352 DHD_OS_WAKE_UNLOCK(dhdp);
3353 return ret;
3354 }
3355
3356 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3357 static void dhd_early_suspend(struct early_suspend *h)
3358 {
3359 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3360 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3361
3362 if (dhd)
3363 dhd_suspend_resume_helper(dhd, 1, 0);
3364 }
3365
3366 static void dhd_late_resume(struct early_suspend *h)
3367 {
3368 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3369 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3370
3371 if (dhd)
3372 dhd_suspend_resume_helper(dhd, 0, 0);
3373 }
3374 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3375
3376 /*
3377 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3378 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3379 *
3380 * dhd_timeout_start(&tmo, usec);
3381 * while (!dhd_timeout_expired(&tmo))
3382 * if (poll_something())
3383 * break;
3384 * if (dhd_timeout_expired(&tmo))
3385 * fatal();
3386 */
3387
3388 void
3389 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
3390 {
3391 tmo->limit = usec;
3392 tmo->increment = 0;
3393 tmo->elapsed = 0;
3394 tmo->tick = jiffies_to_usecs(1);
3395 }
3396
3397 int
3398 dhd_timeout_expired(dhd_timeout_t *tmo)
3399 {
3400 /* Does nothing the first call */
3401 if (tmo->increment == 0) {
3402 tmo->increment = 1;
3403 return 0;
3404 }
3405
3406 if (tmo->elapsed >= tmo->limit)
3407 return 1;
3408
3409 /* Add the delay that's about to take place */
3410 tmo->elapsed += tmo->increment;
3411
3412 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
3413 OSL_DELAY(tmo->increment);
3414 tmo->increment *= 2;
3415 if (tmo->increment > tmo->tick)
3416 tmo->increment = tmo->tick;
3417 } else {
3418 /*
3419 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
3420 * context where the exact wakeup time is flexible, it would be good
3421 * to use usleep_range() instead of udelay(). It takes a few advantages
3422 * such as improving responsiveness and reducing power.
3423 */
3424 OSL_SLEEP(jiffies_to_msecs(1));
3425 }
3426
3427 return 0;
3428 }
3429
3430 int
3431 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
3432 {
3433 int i = 0;
3434
3435 if (!dhd) {
3436 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
3437 return DHD_BAD_IF;
3438 }
3439
3440 while (i < DHD_MAX_IFS) {
3441 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
3442 return i;
3443 i++;
3444 }
3445
3446 return DHD_BAD_IF;
3447 }
3448
3449 struct net_device * dhd_idx2net(void *pub, int ifidx)
3450 {
3451 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
3452 struct dhd_info *dhd_info;
3453
3454 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
3455 return NULL;
3456 dhd_info = dhd_pub->info;
3457 if (dhd_info && dhd_info->iflist[ifidx])
3458 return dhd_info->iflist[ifidx]->net;
3459 return NULL;
3460 }
3461
3462 int
3463 dhd_ifname2idx(dhd_info_t *dhd, char *name)
3464 {
3465 int i = DHD_MAX_IFS;
3466
3467 ASSERT(dhd);
3468
3469 if (name == NULL || *name == '\0')
3470 return 0;
3471
3472 while (--i > 0)
3473 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
3474 break;
3475
3476 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
3477
3478 return i; /* default - the primary interface */
3479 }
3480
3481 char *
3482 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
3483 {
3484 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3485
3486 ASSERT(dhd);
3487
3488 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
3489 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
3490 return "<if_bad>";
3491 }
3492
3493 if (dhd->iflist[ifidx] == NULL) {
3494 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
3495 return "<if_null>";
3496 }
3497
3498 if (dhd->iflist[ifidx]->net)
3499 return dhd->iflist[ifidx]->net->name;
3500
3501 return "<if_none>";
3502 }
3503
3504 uint8 *
3505 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
3506 {
3507 int i;
3508 dhd_info_t *dhd = (dhd_info_t *)dhdp;
3509
3510 ASSERT(dhd);
3511 for (i = 0; i < DHD_MAX_IFS; i++)
3512 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
3513 return dhd->iflist[i]->mac_addr;
3514
3515 return NULL;
3516 }
3517
3518 static void
3519 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3520 {
3521 struct net_device *dev;
3522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3523 struct netdev_hw_addr *ha;
3524 #else
3525 struct dev_mc_list *mclist;
3526 #endif // endif
3527 uint32 allmulti, cnt;
3528
3529 wl_ioctl_t ioc;
3530 char *buf, *bufp;
3531 uint buflen;
3532 int ret;
3533
3534 #ifdef MCAST_LIST_ACCUMULATION
3535 int i;
3536 uint32 cnt_iface[DHD_MAX_IFS];
3537 cnt = 0;
3538 allmulti = 0;
3539
3540 for (i = 0; i < DHD_MAX_IFS; i++) {
3541 if (dhd->iflist[i]) {
3542 dev = dhd->iflist[i]->net;
3543 if (!dev)
3544 continue;
3545 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3546 netif_addr_lock_bh(dev);
3547 #endif /* LINUX >= 2.6.27 */
3548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3549 cnt_iface[i] = netdev_mc_count(dev);
3550 cnt += cnt_iface[i];
3551 #else
3552 cnt += dev->mc_count;
3553 #endif /* LINUX >= 2.6.35 */
3554 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3555 netif_addr_unlock_bh(dev);
3556 #endif /* LINUX >= 2.6.27 */
3557
3558 /* Determine initial value of allmulti flag */
3559 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3560 }
3561 }
3562 #else /* !MCAST_LIST_ACCUMULATION */
3563 if (!dhd->iflist[ifidx]) {
3564 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3565 return;
3566 }
3567 dev = dhd->iflist[ifidx]->net;
3568 if (!dev)
3569 return;
3570 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3571 netif_addr_lock_bh(dev);
3572 #endif /* LINUX >= 2.6.27 */
3573 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3574 cnt = netdev_mc_count(dev);
3575 #else
3576 cnt = dev->mc_count;
3577 #endif /* LINUX >= 2.6.35 */
3578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3579 netif_addr_unlock_bh(dev);
3580 #endif /* LINUX >= 2.6.27 */
3581
3582 /* Determine initial value of allmulti flag */
3583 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3584 #endif /* MCAST_LIST_ACCUMULATION */
3585
3586 #ifdef PASS_ALL_MCAST_PKTS
3587 #ifdef PKT_FILTER_SUPPORT
3588 if (!dhd->pub.early_suspended)
3589 #endif /* PKT_FILTER_SUPPORT */
3590 allmulti = TRUE;
3591 #endif /* PASS_ALL_MCAST_PKTS */
3592
3593 /* Send down the multicast list first. */
3594
3595 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3596 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3597 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3598 dhd_ifname(&dhd->pub, ifidx), cnt));
3599 return;
3600 }
3601
3602 strncpy(bufp, "mcast_list", buflen - 1);
3603 bufp[buflen - 1] = '\0';
3604 bufp += strlen("mcast_list") + 1;
3605
3606 cnt = htol32(cnt);
3607 memcpy(bufp, &cnt, sizeof(cnt));
3608 bufp += sizeof(cnt);
3609
3610 #ifdef MCAST_LIST_ACCUMULATION
3611 for (i = 0; i < DHD_MAX_IFS; i++) {
3612 if (dhd->iflist[i]) {
3613 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
3614 dev = dhd->iflist[i]->net;
3615
3616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3617 netif_addr_lock_bh(dev);
3618 #endif /* LINUX >= 2.6.27 */
3619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3620 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3621 #pragma GCC diagnostic push
3622 #pragma GCC diagnostic ignored "-Wcast-qual"
3623 #endif // endif
3624 netdev_for_each_mc_addr(ha, dev) {
3625 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3626 #pragma GCC diagnostic pop
3627 #endif // endif
3628 if (!cnt_iface[i])
3629 break;
3630 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3631 bufp += ETHER_ADDR_LEN;
3632 DHD_TRACE(("_dhd_set_multicast_list: cnt "
3633 "%d " MACDBG "\n",
3634 cnt_iface[i], MAC2STRDBG(ha->addr)));
3635 cnt_iface[i]--;
3636 }
3637 #else /* LINUX < 2.6.35 */
3638 for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0));
3639 cnt_iface[i]--, mclist = mclist->next) {
3640 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3641 bufp += ETHER_ADDR_LEN;
3642 }
3643 #endif /* LINUX >= 2.6.35 */
3644 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3645 netif_addr_unlock_bh(dev);
3646 #endif /* LINUX >= 2.6.27 */
3647 }
3648 }
3649 #else /* !MCAST_LIST_ACCUMULATION */
3650 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3651 netif_addr_lock_bh(dev);
3652 #endif /* LINUX >= 2.6.27 */
3653 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3654 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3655 #pragma GCC diagnostic push
3656 #pragma GCC diagnostic ignored "-Wcast-qual"
3657 #endif // endif
3658 netdev_for_each_mc_addr(ha, dev) {
3659 if (!cnt)
3660 break;
3661 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3662 bufp += ETHER_ADDR_LEN;
3663 cnt--;
3664 }
3665 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3666 #pragma GCC diagnostic pop
3667 #endif // endif
3668 #else /* LINUX < 2.6.35 */
3669 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3670 cnt--, mclist = mclist->next) {
3671 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3672 bufp += ETHER_ADDR_LEN;
3673 }
3674 #endif /* LINUX >= 2.6.35 */
3675 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3676 netif_addr_unlock_bh(dev);
3677 #endif /* LINUX >= 2.6.27 */
3678 #endif /* MCAST_LIST_ACCUMULATION */
3679
3680 memset(&ioc, 0, sizeof(ioc));
3681 ioc.cmd = WLC_SET_VAR;
3682 ioc.buf = buf;
3683 ioc.len = buflen;
3684 ioc.set = TRUE;
3685
3686 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3687 if (ret < 0) {
3688 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3689 dhd_ifname(&dhd->pub, ifidx), cnt));
3690 allmulti = cnt ? TRUE : allmulti;
3691 }
3692
3693 MFREE(dhd->pub.osh, buf, buflen);
3694
3695 /* Now send the allmulti setting. This is based on the setting in the
3696 * net_device flags, but might be modified above to be turned on if we
3697 * were trying to set some addresses and dongle rejected it...
3698 */
3699
3700 allmulti = htol32(allmulti);
3701 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
3702 sizeof(allmulti), NULL, 0, TRUE);
3703 if (ret < 0) {
3704 DHD_ERROR(("%s: set allmulti %d failed\n",
3705 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3706 }
3707
3708 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3709
3710 #ifdef MCAST_LIST_ACCUMULATION
3711 allmulti = 0;
3712 for (i = 0; i < DHD_MAX_IFS; i++) {
3713 if (dhd->iflist[i]) {
3714 dev = dhd->iflist[i]->net;
3715 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3716 }
3717 }
3718 #else
3719 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3720 #endif /* MCAST_LIST_ACCUMULATION */
3721
3722 allmulti = htol32(allmulti);
3723
3724 memset(&ioc, 0, sizeof(ioc));
3725 ioc.cmd = WLC_SET_PROMISC;
3726 ioc.buf = &allmulti;
3727 ioc.len = sizeof(allmulti);
3728 ioc.set = TRUE;
3729
3730 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3731 if (ret < 0) {
3732 DHD_ERROR(("%s: set promisc %d failed\n",
3733 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3734 }
3735 }
3736
3737 int
3738 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3739 {
3740 int ret;
3741
3742 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
3743 ETHER_ADDR_LEN, NULL, 0, TRUE);
3744 if (ret < 0) {
3745 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3746 } else {
3747 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3748 if (ifidx == 0)
3749 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3750 }
3751
3752 return ret;
3753 }
3754
3755 #ifdef SOFTAP
3756 extern struct net_device *ap_net_dev;
3757 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3758 #endif // endif
3759
3760 #ifdef DHD_PSTA
3761 /* Get psta/psr configuration configuration */
3762 int dhd_get_psta_mode(dhd_pub_t *dhdp)
3763 {
3764 dhd_info_t *dhd = dhdp->info;
3765 return (int)dhd->psta_mode;
3766 }
3767 /* Set psta/psr configuration configuration */
3768 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3769 {
3770 dhd_info_t *dhd = dhdp->info;
3771 dhd->psta_mode = val;
3772 return 0;
3773 }
3774 #endif /* DHD_PSTA */
3775
3776 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
3777 static void
3778 dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
3779 {
3780 dhd_info_t *dhd = dhdp->info;
3781 dhd_if_t *ifp;
3782
3783 ASSERT(idx < DHD_MAX_IFS);
3784
3785 ifp = dhd->iflist[idx];
3786
3787 if (
3788 #ifdef DHD_L2_FILTER
3789 (ifp->block_ping) ||
3790 #endif // endif
3791 #ifdef DHD_WET
3792 (dhd->wet_mode) ||
3793 #endif // endif
3794 #ifdef DHD_MCAST_REGEN
3795 (ifp->mcast_regen_bss_enable) ||
3796 #endif // endif
3797 FALSE) {
3798 ifp->rx_pkt_chainable = FALSE;
3799 }
3800 }
3801 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
3802
3803 #ifdef DHD_WET
3804 /* Get wet configuration configuration */
3805 int dhd_get_wet_mode(dhd_pub_t *dhdp)
3806 {
3807 dhd_info_t *dhd = dhdp->info;
3808 return (int)dhd->wet_mode;
3809 }
3810
3811 /* Set wet configuration configuration */
3812 int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
3813 {
3814 dhd_info_t *dhd = dhdp->info;
3815 dhd->wet_mode = val;
3816 dhd_update_rx_pkt_chainable_state(dhdp, 0);
3817 return 0;
3818 }
3819 #endif /* DHD_WET */
3820
3821 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3822 int32 dhd_role_to_nl80211_iftype(int32 role)
3823 {
3824 switch (role) {
3825 case WLC_E_IF_ROLE_STA:
3826 return NL80211_IFTYPE_STATION;
3827 case WLC_E_IF_ROLE_AP:
3828 return NL80211_IFTYPE_AP;
3829 case WLC_E_IF_ROLE_WDS:
3830 return NL80211_IFTYPE_WDS;
3831 case WLC_E_IF_ROLE_P2P_GO:
3832 return NL80211_IFTYPE_P2P_GO;
3833 case WLC_E_IF_ROLE_P2P_CLIENT:
3834 return NL80211_IFTYPE_P2P_CLIENT;
3835 case WLC_E_IF_ROLE_IBSS:
3836 case WLC_E_IF_ROLE_NAN:
3837 return NL80211_IFTYPE_ADHOC;
3838 default:
3839 return NL80211_IFTYPE_UNSPECIFIED;
3840 }
3841 }
3842 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3843
3844 static void
3845 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3846 {
3847 dhd_info_t *dhd = handle;
3848 dhd_if_event_t *if_event = event_info;
3849 int ifidx, bssidx;
3850 int ret;
3851 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3852 struct wl_if_event_info info;
3853 #else
3854 struct net_device *ndev;
3855 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3856
3857 BCM_REFERENCE(ret);
3858 if (event != DHD_WQ_WORK_IF_ADD) {
3859 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3860 return;
3861 }
3862
3863 if (!dhd) {
3864 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3865 return;
3866 }
3867
3868 if (!if_event) {
3869 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3870 return;
3871 }
3872
3873 dhd_net_if_lock_local(dhd);
3874 DHD_OS_WAKE_LOCK(&dhd->pub);
3875 DHD_PERIM_LOCK(&dhd->pub);
3876
3877 ifidx = if_event->event.ifidx;
3878 bssidx = if_event->event.bssidx;
3879 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3880
3881 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3882 if (if_event->event.ifidx > 0) {
3883 u8 *mac_addr;
3884 bzero(&info, sizeof(info));
3885 info.ifidx = ifidx;
3886 info.bssidx = bssidx;
3887 info.role = if_event->event.role;
3888 strncpy(info.name, if_event->name, IFNAMSIZ);
3889 if (is_valid_ether_addr(if_event->mac)) {
3890 mac_addr = if_event->mac;
3891 } else {
3892 mac_addr = NULL;
3893 }
3894
3895 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
3896 &info, mac_addr, NULL, true) == NULL) {
3897 /* Do the post interface create ops */
3898 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
3899 goto done;
3900 }
3901 }
3902 #else
3903 /* This path is for non-android case */
3904 /* The interface name in host and in event msg are same */
3905 /* if name in event msg is used to create dongle if list on host */
3906 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3907 if_event->mac, bssidx, TRUE, if_event->name);
3908 if (!ndev) {
3909 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3910 goto done;
3911 }
3912
3913 DHD_PERIM_UNLOCK(&dhd->pub);
3914 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3915 DHD_PERIM_LOCK(&dhd->pub);
3916 if (ret != BCME_OK) {
3917 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3918 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3919 goto done;
3920 }
3921 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3922
3923 #ifndef PCIE_FULL_DONGLE
3924 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3925 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3926 uint32 var_int = 1;
3927 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
3928 NULL, 0, TRUE);
3929 if (ret != BCME_OK) {
3930 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3931 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3932 }
3933 }
3934 #endif /* PCIE_FULL_DONGLE */
3935
3936 done:
3937 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3938
3939 DHD_PERIM_UNLOCK(&dhd->pub);
3940 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3941 dhd_net_if_unlock_local(dhd);
3942 }
3943
3944 static void
3945 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3946 {
3947 dhd_info_t *dhd = handle;
3948 int ifidx;
3949 dhd_if_event_t *if_event = event_info;
3950
3951 if (event != DHD_WQ_WORK_IF_DEL) {
3952 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3953 return;
3954 }
3955
3956 if (!dhd) {
3957 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3958 return;
3959 }
3960
3961 if (!if_event) {
3962 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3963 return;
3964 }
3965
3966 dhd_net_if_lock_local(dhd);
3967 DHD_OS_WAKE_LOCK(&dhd->pub);
3968 DHD_PERIM_LOCK(&dhd->pub);
3969
3970 ifidx = if_event->event.ifidx;
3971 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3972
3973 DHD_PERIM_UNLOCK(&dhd->pub);
3974 if (!dhd->pub.info->iflist[ifidx]) {
3975 /* No matching netdev found */
3976 DHD_ERROR(("Netdev not found! Do nothing.\n"));
3977 goto done;
3978 }
3979 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
3980 if (if_event->event.ifidx > 0) {
3981 /* Do the post interface del ops */
3982 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
3983 true, if_event->event.ifidx) != 0) {
3984 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
3985 goto done;
3986 }
3987 }
3988 #else
3989 /* For non-cfg80211 drivers */
3990 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3991 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
3992
3993 done:
3994 DHD_PERIM_LOCK(&dhd->pub);
3995 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3996 DHD_PERIM_UNLOCK(&dhd->pub);
3997 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3998 dhd_net_if_unlock_local(dhd);
3999 }
4000
4001 static void
4002 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
4003 {
4004 dhd_info_t *dhd = handle;
4005 dhd_if_t *ifp = event_info;
4006
4007 if (event != DHD_WQ_WORK_SET_MAC) {
4008 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4009 }
4010
4011 if (!dhd) {
4012 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4013 return;
4014 }
4015
4016 dhd_net_if_lock_local(dhd);
4017 DHD_OS_WAKE_LOCK(&dhd->pub);
4018 DHD_PERIM_LOCK(&dhd->pub);
4019
4020 #ifdef SOFTAP
4021 {
4022 unsigned long flags;
4023 bool in_ap = FALSE;
4024 DHD_GENERAL_LOCK(&dhd->pub, flags);
4025 in_ap = (ap_net_dev != NULL);
4026 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4027
4028 if (in_ap) {
4029 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
4030 ifp->net->name));
4031 goto done;
4032 }
4033 }
4034 #endif /* SOFTAP */
4035
4036 if (ifp == NULL || !dhd->pub.up) {
4037 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4038 goto done;
4039 }
4040
4041 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
4042 ifp->set_macaddress = FALSE;
4043 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
4044 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
4045 else
4046 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
4047
4048 done:
4049 DHD_PERIM_UNLOCK(&dhd->pub);
4050 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4051 dhd_net_if_unlock_local(dhd);
4052 }
4053
4054 static void
4055 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
4056 {
4057 dhd_info_t *dhd = handle;
4058 int ifidx = (int)((long int)event_info);
4059 dhd_if_t *ifp = NULL;
4060
4061 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
4062 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4063 return;
4064 }
4065
4066 if (!dhd) {
4067 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4068 return;
4069 }
4070
4071 dhd_net_if_lock_local(dhd);
4072 DHD_OS_WAKE_LOCK(&dhd->pub);
4073 DHD_PERIM_LOCK(&dhd->pub);
4074
4075 ifp = dhd->iflist[ifidx];
4076
4077 if (ifp == NULL || !dhd->pub.up) {
4078 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4079 goto done;
4080 }
4081
4082 #ifdef SOFTAP
4083 {
4084 bool in_ap = FALSE;
4085 unsigned long flags;
4086 DHD_GENERAL_LOCK(&dhd->pub, flags);
4087 in_ap = (ap_net_dev != NULL);
4088 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4089
4090 if (in_ap) {
4091 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
4092 ifp->net->name));
4093 ifp->set_multicast = FALSE;
4094 goto done;
4095 }
4096 }
4097 #endif /* SOFTAP */
4098
4099 ifidx = ifp->idx;
4100
4101 #ifdef MCAST_LIST_ACCUMULATION
4102 ifidx = 0;
4103 #endif /* MCAST_LIST_ACCUMULATION */
4104
4105 _dhd_set_multicast_list(dhd, ifidx);
4106 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
4107
4108 done:
4109 DHD_PERIM_UNLOCK(&dhd->pub);
4110 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4111 dhd_net_if_unlock_local(dhd);
4112 }
4113
4114 static int
4115 dhd_set_mac_address(struct net_device *dev, void *addr)
4116 {
4117 int ret = 0;
4118
4119 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4120 struct sockaddr *sa = (struct sockaddr *)addr;
4121 int ifidx;
4122 dhd_if_t *dhdif;
4123
4124 ifidx = dhd_net2idx(dhd, dev);
4125 if (ifidx == DHD_BAD_IF)
4126 return -1;
4127
4128 dhdif = dhd->iflist[ifidx];
4129
4130 dhd_net_if_lock_local(dhd);
4131 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
4132 dhdif->set_macaddress = TRUE;
4133 dhd_net_if_unlock_local(dhd);
4134 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
4135 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
4136 return ret;
4137 }
4138
4139 static void
4140 dhd_set_multicast_list(struct net_device *dev)
4141 {
4142 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4143 int ifidx;
4144
4145 ifidx = dhd_net2idx(dhd, dev);
4146 if (ifidx == DHD_BAD_IF)
4147 return;
4148
4149 dhd->iflist[ifidx]->set_multicast = TRUE;
4150 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
4151 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
4152 }
4153
4154 #ifdef DHD_UCODE_DOWNLOAD
4155 /* Get ucode path */
4156 char *
4157 dhd_get_ucode_path(dhd_pub_t *dhdp)
4158 {
4159 dhd_info_t *dhd = dhdp->info;
4160 return dhd->uc_path;
4161 }
4162 #endif /* DHD_UCODE_DOWNLOAD */
4163
4164 #ifdef PROP_TXSTATUS
4165 int
4166 dhd_os_wlfc_block(dhd_pub_t *pub)
4167 {
4168 dhd_info_t *di = (dhd_info_t *)(pub->info);
4169 ASSERT(di != NULL);
4170 spin_lock_bh(&di->wlfc_spinlock);
4171 return 1;
4172 }
4173
4174 int
4175 dhd_os_wlfc_unblock(dhd_pub_t *pub)
4176 {
4177 dhd_info_t *di = (dhd_info_t *)(pub->info);
4178
4179 ASSERT(di != NULL);
4180 spin_unlock_bh(&di->wlfc_spinlock);
4181 return 1;
4182 }
4183
4184 #endif /* PROP_TXSTATUS */
4185
4186 /* This routine do not support Packet chain feature, Currently tested for
4187 * proxy arp feature
4188 */
4189 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
4190 {
4191 struct sk_buff *skb;
4192 void *skbhead = NULL;
4193 void *skbprev = NULL;
4194 dhd_if_t *ifp;
4195 ASSERT(!PKTISCHAINED(p));
4196 skb = PKTTONATIVE(dhdp->osh, p);
4197
4198 ifp = dhdp->info->iflist[ifidx];
4199 skb->dev = ifp->net;
4200
4201 skb->protocol = eth_type_trans(skb, skb->dev);
4202
4203 if (in_interrupt()) {
4204 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4205 __FUNCTION__, __LINE__);
4206 netif_rx(skb);
4207 } else {
4208 if (dhdp->info->rxthread_enabled) {
4209 if (!skbhead) {
4210 skbhead = skb;
4211 } else {
4212 PKTSETNEXT(dhdp->osh, skbprev, skb);
4213 }
4214 skbprev = skb;
4215 } else {
4216 /* If the receive is not processed inside an ISR,
4217 * the softirqd must be woken explicitly to service
4218 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4219 * by netif_rx_ni(), but in earlier kernels, we need
4220 * to do it manually.
4221 */
4222 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4223 __FUNCTION__, __LINE__);
4224 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4225 netif_rx_ni(skb);
4226 #else
4227 ulong flags;
4228 netif_rx(skb);
4229 local_irq_save(flags);
4230 RAISE_RX_SOFTIRQ();
4231 local_irq_restore(flags);
4232 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4233 }
4234 }
4235
4236 if (dhdp->info->rxthread_enabled && skbhead)
4237 dhd_sched_rxf(dhdp, skbhead);
4238
4239 return BCME_OK;
4240 }
4241
4242 int BCMFASTPATH
4243 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4244 {
4245 int ret = BCME_OK;
4246 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4247 struct ether_header *eh = NULL;
4248 #if defined(DHD_L2_FILTER)
4249 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
4250 #endif // endif
4251
4252 /* Reject if down */
4253 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
4254 /* free the packet here since the caller won't */
4255 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4256 return -ENODEV;
4257 }
4258
4259 #ifdef PCIE_FULL_DONGLE
4260 if (dhdp->busstate == DHD_BUS_SUSPEND) {
4261 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4262 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4263 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4264 return -ENODEV;
4265 #else
4266 return NETDEV_TX_BUSY;
4267 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4268 }
4269 #endif /* PCIE_FULL_DONGLE */
4270
4271 /* Reject if pktlen > MAX_MTU_SZ */
4272 if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
4273 /* free the packet here since the caller won't */
4274 dhdp->tx_big_packets++;
4275 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4276 return BCME_ERROR;
4277 }
4278
4279 #ifdef DHD_L2_FILTER
4280 /* if dhcp_unicast is enabled, we need to convert the */
4281 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4282 if (ifp->dhcp_unicast) {
4283 uint8* mac_addr;
4284 uint8* ehptr = NULL;
4285 int ret;
4286 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
4287 if (ret == BCME_OK) {
4288 /* if given mac address having valid entry in sta list
4289 * copy the given mac address, and return with BCME_OK
4290 */
4291 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
4292 ehptr = PKTDATA(dhdp->osh, pktbuf);
4293 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
4294 }
4295 }
4296 }
4297
4298 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4299 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4300 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4301 return BCME_ERROR;
4302 }
4303 }
4304
4305 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4306 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
4307
4308 /* Drop the packets if l2 filter has processed it already
4309 * otherwise continue with the normal path
4310 */
4311 if (ret == BCME_OK) {
4312 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4313 return BCME_ERROR;
4314 }
4315 }
4316 #endif /* DHD_L2_FILTER */
4317 /* Update multicast statistic */
4318 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
4319 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
4320 eh = (struct ether_header *)pktdata;
4321
4322 if (ETHER_ISMULTI(eh->ether_dhost))
4323 dhdp->tx_multicast++;
4324 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
4325 #ifdef DHD_LOSSLESS_ROAMING
4326 uint8 prio = (uint8)PKTPRIO(pktbuf);
4327
4328 /* back up 802.1x's priority */
4329 dhdp->prio_8021x = prio;
4330 #endif /* DHD_LOSSLESS_ROAMING */
4331 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
4332 atomic_inc(&dhd->pend_8021x_cnt);
4333 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
4334 wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
4335 pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
4336 #endif /* WL_CFG80211 && WL_WPS_SYNC */
4337 #if defined(DHD_8021X_DUMP)
4338 dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4339 #endif /* DHD_8021X_DUMP */
4340 }
4341
4342 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
4343 #ifdef DHD_DHCP_DUMP
4344 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4345 #endif /* DHD_DHCP_DUMP */
4346 #ifdef DHD_ICMP_DUMP
4347 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4348 #endif /* DHD_ICMP_DUMP */
4349 }
4350 } else {
4351 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4352 return BCME_ERROR;
4353 }
4354
4355 {
4356 /* Look into the packet and update the packet priority */
4357 #ifndef PKTPRIO_OVERRIDE
4358 if (PKTPRIO(pktbuf) == 0)
4359 #endif /* !PKTPRIO_OVERRIDE */
4360 {
4361 #if defined(QOS_MAP_SET)
4362 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
4363 #else
4364 pktsetprio(pktbuf, FALSE);
4365 #endif /* QOS_MAP_SET */
4366 }
4367 #ifndef PKTPRIO_OVERRIDE
4368 else {
4369 /* Some protocols like OZMO use priority values from 256..263.
4370 * these are magic values to indicate a specific 802.1d priority.
4371 * make sure that priority field is in range of 0..7
4372 */
4373 PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
4374 }
4375 #endif /* !PKTPRIO_OVERRIDE */
4376 }
4377
4378 #ifdef SUPPORT_SET_TID
4379 dhd_set_tid_based_on_uid(dhdp, pktbuf);
4380 #endif /* SUPPORT_SET_TID */
4381
4382 #ifdef PCIE_FULL_DONGLE
4383 /*
4384 * Lkup the per interface hash table, for a matching flowring. If one is not
4385 * available, allocate a unique flowid and add a flowring entry.
4386 * The found or newly created flowid is placed into the pktbuf's tag.
4387 */
4388 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
4389 if (ret != BCME_OK) {
4390 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
4391 return ret;
4392 }
4393 #endif // endif
4394
4395 #ifdef PROP_TXSTATUS
4396 if (dhd_wlfc_is_supported(dhdp)) {
4397 /* store the interface ID */
4398 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
4399
4400 /* store destination MAC in the tag as well */
4401 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
4402
4403 /* decide which FIFO this packet belongs to */
4404 if (ETHER_ISMULTI(eh->ether_dhost))
4405 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4406 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
4407 else
4408 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
4409 } else
4410 #endif /* PROP_TXSTATUS */
4411 {
4412 /* If the protocol uses a data header, apply it */
4413 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
4414 }
4415
4416 /* Use bus module to send data frame */
4417 #ifdef DYNAMIC_MUMIMO_CONTROL
4418 if (dhdp->reassoc_mumimo_sw &&
4419 dhd_check_eapol_4way_message(PKTDATA(dhdp->osh, pktbuf)) == EAPOL_4WAY_M4) {
4420 dhdp->reassoc_mumimo_sw = 0;
4421 DHD_ENABLE_RUNTIME_PM(dhdp);
4422 }
4423 #endif /* DYNAMIC_MUMIMO_CONTROL */
4424 #ifdef PROP_TXSTATUS
4425 {
4426 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
4427 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
4428 /* non-proptxstatus way */
4429 #ifdef BCMPCIE
4430 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4431 #else
4432 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4433 #endif /* BCMPCIE */
4434 }
4435 }
4436 #else
4437 #ifdef BCMPCIE
4438 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4439 #else
4440 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4441 #endif /* BCMPCIE */
4442 #endif /* PROP_TXSTATUS */
4443
4444 return ret;
4445 }
4446
4447 int BCMFASTPATH
4448 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4449 {
4450 int ret = 0;
4451 unsigned long flags;
4452 dhd_if_t *ifp;
4453
4454 DHD_GENERAL_LOCK(dhdp, flags);
4455 ifp = dhd_get_ifp(dhdp, ifidx);
4456 if (!ifp || ifp->del_in_progress) {
4457 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
4458 __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
4459 DHD_GENERAL_UNLOCK(dhdp, flags);
4460 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4461 return -ENODEV;
4462 }
4463 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
4464 DHD_ERROR(("%s: returning as busstate=%d\n",
4465 __FUNCTION__, dhdp->busstate));
4466 DHD_GENERAL_UNLOCK(dhdp, flags);
4467 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4468 return -ENODEV;
4469 }
4470 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4471 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
4472 DHD_GENERAL_UNLOCK(dhdp, flags);
4473
4474 #ifdef DHD_PCIE_RUNTIMEPM
4475 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
4476 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4477 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4478 ret = -EBUSY;
4479 goto exit;
4480 }
4481 #endif /* DHD_PCIE_RUNTIMEPM */
4482
4483 DHD_GENERAL_LOCK(dhdp, flags);
4484 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
4485 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4486 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
4487 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4488 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4489 dhd_os_tx_completion_wake(dhdp);
4490 dhd_os_busbusy_wake(dhdp);
4491 DHD_GENERAL_UNLOCK(dhdp, flags);
4492 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4493 return -ENODEV;
4494 }
4495 DHD_GENERAL_UNLOCK(dhdp, flags);
4496
4497 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
4498
4499 #ifdef DHD_PCIE_RUNTIMEPM
4500 exit:
4501 #endif // endif
4502 DHD_GENERAL_LOCK(dhdp, flags);
4503 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
4504 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
4505 dhd_os_tx_completion_wake(dhdp);
4506 dhd_os_busbusy_wake(dhdp);
4507 DHD_GENERAL_UNLOCK(dhdp, flags);
4508 return ret;
4509 }
4510
4511 #if defined(DHD_LB_TXP)
4512
4513 int BCMFASTPATH
4514 dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
4515 int ifidx, void *skb)
4516 {
4517 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
4518
4519 /* If the feature is disabled run-time do TX from here */
4520 if (atomic_read(&dhd->lb_txp_active) == 0) {
4521 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
4522 return __dhd_sendpkt(&dhd->pub, ifidx, skb);
4523 }
4524
4525 /* Store the address of net device and interface index in the Packet tag */
4526 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
4527 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
4528
4529 /* Enqueue the skb into tx_pend_queue */
4530 skb_queue_tail(&dhd->tx_pend_queue, skb);
4531
4532 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
4533
4534 /* Dispatch the Tx job to be processed by the tx_tasklet */
4535 dhd_lb_tx_dispatch(&dhd->pub);
4536
4537 return NETDEV_TX_OK;
4538 }
4539 #endif /* DHD_LB_TXP */
4540
4541 int BCMFASTPATH
4542 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
4543 {
4544 int ret;
4545 uint datalen;
4546 void *pktbuf;
4547 dhd_info_t *dhd = DHD_DEV_INFO(net);
4548 dhd_if_t *ifp = NULL;
4549 int ifidx;
4550 unsigned long flags;
4551 uint8 htsfdlystat_sz = 0;
4552
4553 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4554
4555 if (dhd_query_bus_erros(&dhd->pub)) {
4556 return -ENODEV;
4557 }
4558
4559 DHD_GENERAL_LOCK(&dhd->pub, flags);
4560 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
4561 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4562
4563 #ifdef DHD_PCIE_RUNTIMEPM
4564 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
4565 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
4566 /* stop the network queue temporarily until resume done */
4567 DHD_GENERAL_LOCK(&dhd->pub, flags);
4568 if (!dhdpcie_is_resume_done(&dhd->pub)) {
4569 dhd_bus_stop_queue(dhd->pub.bus);
4570 }
4571 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4572 dhd_os_busbusy_wake(&dhd->pub);
4573 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4574 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4575 return -ENODEV;
4576 #else
4577 return NETDEV_TX_BUSY;
4578 #endif // endif
4579 }
4580 #endif /* DHD_PCIE_RUNTIMEPM */
4581
4582 DHD_GENERAL_LOCK(&dhd->pub, flags);
4583 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4584 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
4585 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
4586 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4587 #ifdef PCIE_FULL_DONGLE
4588 /* Stop tx queues if suspend is in progress */
4589 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
4590 dhd_bus_stop_queue(dhd->pub.bus);
4591 }
4592 #endif /* PCIE_FULL_DONGLE */
4593 dhd_os_busbusy_wake(&dhd->pub);
4594 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4595 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4596 return -ENODEV;
4597 #else
4598 return NETDEV_TX_BUSY;
4599 #endif // endif
4600 }
4601
4602 DHD_OS_WAKE_LOCK(&dhd->pub);
4603 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4604
4605 #if defined(DHD_HANG_SEND_UP_TEST)
4606 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
4607 dhd->pub.busstate = DHD_BUS_DOWN;
4608 }
4609 #endif /* DHD_HANG_SEND_UP_TEST */
4610
4611 /* Reject if down */
4612 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
4613 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
4614 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
4615 netif_stop_queue(net);
4616 /* Send Event when bus down detected during data session */
4617 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
4618 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
4619 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
4620 net_os_send_hang_message(net);
4621 }
4622 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4623 dhd_os_busbusy_wake(&dhd->pub);
4624 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4625 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4626 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4628 return -ENODEV;
4629 #else
4630 return NETDEV_TX_BUSY;
4631 #endif // endif
4632 }
4633
4634 ifp = DHD_DEV_IFP(net);
4635 ifidx = DHD_DEV_IFIDX(net);
4636 if (!ifp || (ifidx == DHD_BAD_IF) ||
4637 ifp->del_in_progress) {
4638 DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
4639 __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
4640 netif_stop_queue(net);
4641 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4642 dhd_os_busbusy_wake(&dhd->pub);
4643 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4644 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4645 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4646 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4647 return -ENODEV;
4648 #else
4649 return NETDEV_TX_BUSY;
4650 #endif // endif
4651 }
4652
4653 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
4654 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4655
4656 ASSERT(ifidx == dhd_net2idx(dhd, net));
4657 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4658
4659 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4660
4661 /* re-align socket buffer if "skb->data" is odd address */
4662 if (((unsigned long)(skb->data)) & 0x1) {
4663 unsigned char *data = skb->data;
4664 uint32 length = skb->len;
4665 PKTPUSH(dhd->pub.osh, skb, 1);
4666 memmove(skb->data, data, length);
4667 PKTSETLEN(dhd->pub.osh, skb, length);
4668 }
4669
4670 datalen = PKTLEN(dhd->pub.osh, skb);
4671
4672 /* Make sure there's enough room for any header */
4673 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4674 struct sk_buff *skb2;
4675
4676 DHD_INFO(("%s: insufficient headroom\n",
4677 dhd_ifname(&dhd->pub, ifidx)));
4678 dhd->pub.tx_realloc++;
4679
4680 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4681 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4682
4683 dev_kfree_skb(skb);
4684 if ((skb = skb2) == NULL) {
4685 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4686 dhd_ifname(&dhd->pub, ifidx)));
4687 ret = -ENOMEM;
4688 goto done;
4689 }
4690 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4691 }
4692
4693 /* Convert to packet */
4694 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4695 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4696 dhd_ifname(&dhd->pub, ifidx)));
4697 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4698 dev_kfree_skb_any(skb);
4699 ret = -ENOMEM;
4700 goto done;
4701 }
4702
4703 #ifdef DHD_WET
4704 /* wet related packet proto manipulation should be done in DHD
4705 since dongle doesn't have complete payload
4706 */
4707 if (WET_ENABLED(&dhd->pub) &&
4708 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
4709 DHD_INFO(("%s:%s: wet send proc failed\n",
4710 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
4711 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
4712 ret = -EFAULT;
4713 goto done;
4714 }
4715 #endif /* DHD_WET */
4716
4717 #ifdef DHD_PSTA
4718 /* PSR related packet proto manipulation should be done in DHD
4719 * since dongle doesn't have complete payload
4720 */
4721 if (PSR_ENABLED(&dhd->pub) &&
4722 (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
4723
4724 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4725 dhd_ifname(&dhd->pub, ifidx)));
4726 }
4727 #endif /* DHD_PSTA */
4728
4729 #ifdef DHDTCPSYNC_FLOOD_BLK
4730 if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
4731 ifp->tsyncack_txed ++;
4732 }
4733 #endif /* DHDTCPSYNC_FLOOD_BLK */
4734
4735 #ifdef DHDTCPACK_SUPPRESS
4736 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4737 /* If this packet has been hold or got freed, just return */
4738 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4739 ret = 0;
4740 goto done;
4741 }
4742 } else {
4743 /* If this packet has replaced another packet and got freed, just return */
4744 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4745 ret = 0;
4746 goto done;
4747 }
4748 }
4749 #endif /* DHDTCPACK_SUPPRESS */
4750
4751 /*
4752 * If Load Balance is enabled queue the packet
4753 * else send directly from here.
4754 */
4755 #if defined(DHD_LB_TXP)
4756 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
4757 #else
4758 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4759 #endif // endif
4760
4761 done:
4762 if (ret) {
4763 ifp->stats.tx_dropped++;
4764 dhd->pub.tx_dropped++;
4765 } else {
4766 #ifdef PROP_TXSTATUS
4767 /* tx_packets counter can counted only when wlfc is disabled */
4768 if (!dhd_wlfc_is_supported(&dhd->pub))
4769 #endif // endif
4770 {
4771 dhd->pub.tx_packets++;
4772 ifp->stats.tx_packets++;
4773 ifp->stats.tx_bytes += datalen;
4774 }
4775 }
4776
4777 DHD_GENERAL_LOCK(&dhd->pub, flags);
4778 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
4779 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
4780 dhd_os_tx_completion_wake(&dhd->pub);
4781 dhd_os_busbusy_wake(&dhd->pub);
4782 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4783 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4784 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4785 /* Return ok: we always eat the packet */
4786 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4787 return 0;
4788 #else
4789 return NETDEV_TX_OK;
4790 #endif // endif
4791 }
4792
4793 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4794 void dhd_rx_wq_wakeup(struct work_struct *ptr)
4795 {
4796 struct dhd_rx_tx_work *work;
4797 struct dhd_pub * pub;
4798
4799 work = container_of(ptr, struct dhd_rx_tx_work, work);
4800
4801 pub = work->pub;
4802
4803 DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
4804
4805 if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
4806 return;
4807 }
4808
4809 DHD_OS_WAKE_LOCK(pub);
4810 if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
4811
4812 // do nothing but wakeup the bus.
4813 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
4814 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
4815 }
4816 DHD_OS_WAKE_UNLOCK(pub);
4817 kfree(work);
4818 }
4819
4820 void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
4821 {
4822 struct dhd_rx_tx_work *work;
4823 int ret;
4824 dhd_info_t *dhd;
4825 struct dhd_bus * bus;
4826
4827 work = container_of(ptr, struct dhd_rx_tx_work, work);
4828
4829 dhd = DHD_DEV_INFO(work->net);
4830
4831 bus = dhd->pub.bus;
4832
4833 if (atomic_read(&dhd->pub.block_bus)) {
4834 kfree_skb(work->skb);
4835 kfree(work);
4836 dhd_netif_start_queue(bus);
4837 return;
4838 }
4839
4840 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
4841 ret = dhd_start_xmit(work->skb, work->net);
4842 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
4843 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
4844 }
4845 kfree(work);
4846 dhd_netif_start_queue(bus);
4847
4848 if (ret)
4849 netdev_err(work->net,
4850 "error: dhd_start_xmit():%d\n", ret);
4851 }
4852
4853 int BCMFASTPATH
4854 dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
4855 {
4856 struct dhd_rx_tx_work *start_xmit_work;
4857 int ret;
4858 dhd_info_t *dhd = DHD_DEV_INFO(net);
4859
4860 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4861 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
4862
4863 dhd_netif_stop_queue(dhd->pub.bus);
4864
4865 start_xmit_work = (struct dhd_rx_tx_work*)
4866 kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
4867
4868 if (!start_xmit_work) {
4869 netdev_err(net,
4870 "error: failed to alloc start_xmit_work\n");
4871 ret = -ENOMEM;
4872 goto exit;
4873 }
4874
4875 INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
4876 start_xmit_work->skb = skb;
4877 start_xmit_work->net = net;
4878 queue_work(dhd->tx_wq, &start_xmit_work->work);
4879 ret = NET_XMIT_SUCCESS;
4880
4881 } else if (dhd->pub.busstate == DHD_BUS_DATA) {
4882 ret = dhd_start_xmit(skb, net);
4883 } else {
4884 /* when bus is down */
4885 ret = -ENODEV;
4886 }
4887
4888 exit:
4889 return ret;
4890 }
4891 void
4892 dhd_bus_wakeup_work(dhd_pub_t *dhdp)
4893 {
4894 struct dhd_rx_tx_work *rx_work;
4895 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4896
4897 rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
4898 if (!rx_work) {
4899 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
4900 return;
4901 }
4902
4903 INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
4904 rx_work->pub = dhdp;
4905 queue_work(dhd->rx_wq, &rx_work->work);
4906
4907 }
4908 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4909 void
4910 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4911 {
4912 struct net_device *net;
4913 dhd_info_t *dhd = dhdp->info;
4914 int i;
4915
4916 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4917
4918 ASSERT(dhd);
4919
4920 #ifdef DHD_LOSSLESS_ROAMING
4921 /* block flowcontrol during roaming */
4922 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4923 return;
4924 }
4925 #endif // endif
4926
4927 if (ifidx == ALL_INTERFACES) {
4928 /* Flow control on all active interfaces */
4929 dhdp->txoff = state;
4930 for (i = 0; i < DHD_MAX_IFS; i++) {
4931 if (dhd->iflist[i]) {
4932 net = dhd->iflist[i]->net;
4933 if (state == ON)
4934 netif_stop_queue(net);
4935 else
4936 netif_wake_queue(net);
4937 }
4938 }
4939 } else {
4940 if (dhd->iflist[ifidx]) {
4941 net = dhd->iflist[ifidx]->net;
4942 if (state == ON)
4943 netif_stop_queue(net);
4944 else
4945 netif_wake_queue(net);
4946 }
4947 }
4948 }
4949
4950 #ifdef DHD_RX_DUMP
4951 typedef struct {
4952 uint16 type;
4953 const char *str;
4954 } PKTTYPE_INFO;
4955
4956 static const PKTTYPE_INFO packet_type_info[] =
4957 {
4958 { ETHER_TYPE_IP, "IP" },
4959 { ETHER_TYPE_ARP, "ARP" },
4960 { ETHER_TYPE_BRCM, "BRCM" },
4961 { ETHER_TYPE_802_1X, "802.1X" },
4962 #ifdef BCMWAPI_WAI
4963 { ETHER_TYPE_WAI, "WAPI" },
4964 #endif /* BCMWAPI_WAI */
4965 { 0, ""}
4966 };
4967
4968 static const char *_get_packet_type_str(uint16 type)
4969 {
4970 int i;
4971 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
4972
4973 for (i = 0; i < n; i++) {
4974 if (packet_type_info[i].type == type)
4975 return packet_type_info[i].str;
4976 }
4977
4978 return packet_type_info[n].str;
4979 }
4980 #endif /* DHD_RX_DUMP */
4981
4982 #ifdef DHD_MCAST_REGEN
4983 /*
4984 * Description: This function is called to do the reverse translation
4985 *
4986 * Input eh - pointer to the ethernet header
4987 */
4988 int32
4989 dhd_mcast_reverse_translation(struct ether_header *eh)
4990 {
4991 uint8 *iph;
4992 uint32 dest_ip;
4993
4994 iph = (uint8 *)eh + ETHER_HDR_LEN;
4995 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4996
4997 /* Only IP packets are handled */
4998 if (eh->ether_type != hton16(ETHER_TYPE_IP))
4999 return BCME_ERROR;
5000
5001 /* Non-IPv4 multicast packets are not handled */
5002 if (IP_VER(iph) != IP_VER_4)
5003 return BCME_ERROR;
5004
5005 /*
5006 * The packet has a multicast IP and unicast MAC. That means
5007 * we have to do the reverse translation
5008 */
5009 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
5010 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
5011 return BCME_OK;
5012 }
5013
5014 return BCME_ERROR;
5015 }
5016 #endif /* MCAST_REGEN */
5017
5018 #ifdef SHOW_LOGTRACE
5019 static void
5020 dhd_netif_rx_ni(struct sk_buff * skb)
5021 {
5022 /* Do not call netif_recieve_skb as this workqueue scheduler is
5023 * not from NAPI Also as we are not in INTR context, do not call
5024 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
5025 * does netif_rx, disables irq, raise NET_IF_RX softirq and
5026 * enables interrupts back
5027 */
5028 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5029 netif_rx_ni(skb);
5030 #else
5031 ulong flags;
5032 netif_rx(skb);
5033 local_irq_save(flags);
5034 RAISE_RX_SOFTIRQ();
5035 local_irq_restore(flags);
5036 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5037 }
5038
5039 static int
5040 dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
5041 {
5042 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5043 int ret = BCME_OK;
5044 uint datalen;
5045 bcm_event_msg_u_t evu;
5046 void *data = NULL;
5047 void *pktdata = NULL;
5048 bcm_event_t *pvt_data;
5049 uint pktlen;
5050
5051 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5052
5053 /* In dhd_rx_frame, header is stripped using skb_pull
5054 * of size ETH_HLEN, so adjust pktlen accordingly
5055 */
5056 pktlen = skb->len + ETH_HLEN;
5057
5058 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5059 pktdata = (void *)skb_mac_header(skb);
5060 #else
5061 pktdata = (void *)skb->mac.raw;
5062 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5063
5064 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5065
5066 if (ret != BCME_OK) {
5067 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5068 __FUNCTION__, ret));
5069 goto exit;
5070 }
5071
5072 datalen = ntoh32(evu.event.datalen);
5073
5074 pvt_data = (bcm_event_t *)pktdata;
5075 data = &pvt_data[1];
5076
5077 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
5078
5079 exit:
5080 return ret;
5081 }
5082
5083 #define DHD_EVENT_LOGTRACE_BOUND 12
5084 #define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 1
5085
5086 static void
5087 dhd_event_logtrace_process(struct work_struct * work)
5088 {
5089 /* Ignore compiler warnings due to -Werror=cast-qual */
5090 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5091 #pragma GCC diagnostic push
5092 #pragma GCC diagnostic ignored "-Wcast-qual"
5093 #endif // endif
5094 struct delayed_work *dw = to_delayed_work(work);
5095 struct dhd_info *dhd =
5096 container_of(dw, struct dhd_info, event_log_dispatcher_work);
5097 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5098 #pragma GCC diagnostic pop
5099 #endif // endif
5100
5101 dhd_pub_t *dhdp;
5102 struct sk_buff *skb;
5103 uint32 qlen;
5104 uint32 process_len;
5105
5106 if (!dhd) {
5107 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5108 return;
5109 }
5110
5111 dhdp = &dhd->pub;
5112
5113 if (!dhdp) {
5114 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
5115 return;
5116 }
5117
5118 qlen = skb_queue_len(&dhd->evt_trace_queue);
5119 process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
5120
5121 /* Run while loop till bound is reached or skb queue is empty */
5122 while (process_len--) {
5123 int ifid = 0;
5124 skb = skb_dequeue(&dhd->evt_trace_queue);
5125 if (skb == NULL) {
5126 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
5127 __FUNCTION__));
5128 break;
5129 }
5130 BCM_REFERENCE(ifid);
5131 #ifdef PCIE_FULL_DONGLE
5132 /* Check if pkt is from INFO ring or WLC_E_TRACE */
5133 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
5134 if (ifid == DHD_DUMMY_INFO_IF) {
5135 /* Process logtrace from info rings */
5136 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
5137 } else
5138 #endif /* PCIE_FULL_DONGLE */
5139 {
5140 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
5141 dhd_event_logtrace_pkt_process(dhdp, skb);
5142 }
5143
5144 /* Send packet up if logtrace_pkt_sendup is TRUE */
5145 if (dhdp->logtrace_pkt_sendup) {
5146 #ifdef DHD_USE_STATIC_CTRLBUF
5147 /* If bufs are allocated via static buf pool
5148 * and logtrace_pkt_sendup enabled, make a copy,
5149 * free the local one and send the copy up.
5150 */
5151 void *npkt = PKTDUP(dhdp->osh, skb);
5152 /* Clone event and send it up */
5153 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5154 if (npkt) {
5155 skb = npkt;
5156 } else {
5157 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
5158 /* Packet is already freed, go to next packet */
5159 continue;
5160 }
5161 #endif /* DHD_USE_STATIC_CTRLBUF */
5162 #ifdef PCIE_FULL_DONGLE
5163 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
5164 * to send skb to network layer, assign skb->dev with
5165 * Primary interface n/w device
5166 */
5167 if (ifid == DHD_DUMMY_INFO_IF) {
5168 skb = PKTTONATIVE(dhdp->osh, skb);
5169 skb->dev = dhd->iflist[0]->net;
5170 }
5171 #endif /* PCIE_FULL_DONGLE */
5172 /* Send pkt UP */
5173 dhd_netif_rx_ni(skb);
5174 } else {
5175 /* Don't send up. Free up the packet. */
5176 #ifdef DHD_USE_STATIC_CTRLBUF
5177 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5178 #else
5179 PKTFREE(dhdp->osh, skb, FALSE);
5180 #endif /* DHD_USE_STATIC_CTRLBUF */
5181 }
5182 }
5183
5184 /* Reschedule the workqueue if more packets to be processed */
5185 if (qlen >= DHD_EVENT_LOGTRACE_BOUND) {
5186 schedule_delayed_work(&dhd->event_log_dispatcher_work,
5187 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
5188 }
5189 }
5190
5191 void
5192 dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
5193 {
5194 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5195
5196 #ifdef PCIE_FULL_DONGLE
5197 /* Add ifidx in the PKTTAG */
5198 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
5199 #endif /* PCIE_FULL_DONGLE */
5200 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
5201
5202 schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
5203 }
5204
5205 void
5206 dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
5207 {
5208 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5209 struct sk_buff *skb;
5210
5211 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5212 #ifdef DHD_USE_STATIC_CTRLBUF
5213 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5214 #else
5215 PKTFREE(dhdp->osh, skb, FALSE);
5216 #endif /* DHD_USE_STATIC_CTRLBUF */
5217 }
5218 }
5219 #endif /* SHOW_LOGTRACE */
5220
5221 /** Called when a frame is received by the dongle on interface 'ifidx' */
5222 void
5223 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
5224 {
5225 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5226 struct sk_buff *skb;
5227 uchar *eth;
5228 uint len;
5229 void *data, *pnext = NULL;
5230 int i;
5231 dhd_if_t *ifp;
5232 wl_event_msg_t event;
5233 int tout_rx = 0;
5234 int tout_ctrl = 0;
5235 void *skbhead = NULL;
5236 void *skbprev = NULL;
5237 uint16 protocol;
5238 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5239 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5240 unsigned char *dump_data;
5241 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5242 #ifdef DHD_MCAST_REGEN
5243 uint8 interface_role;
5244 if_flow_lkup_t *if_flow_lkup;
5245 unsigned long flags;
5246 #endif // endif
5247 #ifdef DHD_WAKE_STATUS
5248 int pkt_wake = 0;
5249 wake_counts_t *wcp = NULL;
5250 #endif /* DHD_WAKE_STATUS */
5251
5252 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5253
5254 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
5255 struct ether_header *eh;
5256
5257 pnext = PKTNEXT(dhdp->osh, pktbuf);
5258 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
5259
5260 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5261 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
5262 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5263 */
5264 if (ifidx == DHD_DUMMY_INFO_IF) {
5265 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5266 * context in case of PCIe FD, in case of other bus this will be from
5267 * DPC context. If we get bunch of events from Dongle then printing all
5268 * of them from Tasklet/DPC context that too in data path is costly.
5269 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5270 * events with type WLC_E_TRACE.
5271 * We'll print this console logs from the WorkQueue context by enqueing SKB
5272 * here and Dequeuing will be done in WorkQueue and will be freed only if
5273 * logtrace_pkt_sendup is TRUE
5274 */
5275 #ifdef SHOW_LOGTRACE
5276 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5277 #else /* !SHOW_LOGTRACE */
5278 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
5279 * free the PKT here itself
5280 */
5281 #ifdef DHD_USE_STATIC_CTRLBUF
5282 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5283 #else
5284 PKTFREE(dhdp->osh, pktbuf, FALSE);
5285 #endif /* DHD_USE_STATIC_CTRLBUF */
5286 #endif /* SHOW_LOGTRACE */
5287 continue;
5288 }
5289 #ifdef DHD_WAKE_STATUS
5290 pkt_wake = dhd_bus_get_bus_wake(dhdp);
5291 wcp = dhd_bus_get_wakecount(dhdp);
5292 if (wcp == NULL) {
5293 /* If wakeinfo count buffer is null do not update wake count values */
5294 pkt_wake = 0;
5295 }
5296 #endif /* DHD_WAKE_STATUS */
5297
5298 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5299
5300 if (ifidx >= DHD_MAX_IFS) {
5301 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
5302 __FUNCTION__, ifidx));
5303 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
5304 #ifdef DHD_USE_STATIC_CTRLBUF
5305 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5306 #else
5307 PKTFREE(dhdp->osh, pktbuf, FALSE);
5308 #endif /* DHD_USE_STATIC_CTRLBUF */
5309 } else {
5310 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5311 }
5312 continue;
5313 }
5314
5315 ifp = dhd->iflist[ifidx];
5316 if (ifp == NULL) {
5317 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5318 __FUNCTION__));
5319 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
5320 #ifdef DHD_USE_STATIC_CTRLBUF
5321 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5322 #else
5323 PKTFREE(dhdp->osh, pktbuf, FALSE);
5324 #endif /* DHD_USE_STATIC_CTRLBUF */
5325 } else {
5326 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5327 }
5328 continue;
5329 }
5330
5331 /* Dropping only data packets before registering net device to avoid kernel panic */
5332 #ifndef PROP_TXSTATUS_VSDB
5333 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
5334 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5335 #else
5336 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
5337 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5338 #endif /* PROP_TXSTATUS_VSDB */
5339 {
5340 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5341 __FUNCTION__));
5342 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5343 continue;
5344 }
5345
5346 #ifdef PROP_TXSTATUS
5347 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
5348 /* WLFC may send header only packet when
5349 there is an urgent message but no packet to
5350 piggy-back on
5351 */
5352 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5353 continue;
5354 }
5355 #endif // endif
5356 #ifdef DHD_L2_FILTER
5357 /* If block_ping is enabled drop the ping packet */
5358 if (ifp->block_ping) {
5359 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
5360 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5361 continue;
5362 }
5363 }
5364 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
5365 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
5366 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5367 continue;
5368 }
5369 }
5370 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
5371 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
5372
5373 /* Drop the packets if l2 filter has processed it already
5374 * otherwise continue with the normal path
5375 */
5376 if (ret == BCME_OK) {
5377 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5378 continue;
5379 }
5380 }
5381 if (ifp->block_tdls) {
5382 if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
5383 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5384 continue;
5385 }
5386 }
5387 #endif /* DHD_L2_FILTER */
5388
5389 #ifdef DHD_MCAST_REGEN
5390 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
5391 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
5392 ASSERT(if_flow_lkup);
5393
5394 interface_role = if_flow_lkup[ifidx].role;
5395 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
5396
5397 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
5398 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
5399 ETHER_ISUCAST(eh->ether_dhost)) {
5400 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
5401 #ifdef DHD_PSTA
5402 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5403 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
5404 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
5405 if (ifidx != 0) {
5406 /* Let the primary in PSTA interface handle this
5407 * frame after unicast to Multicast conversion
5408 */
5409 ifp = dhd_get_ifp(dhdp, 0);
5410 ASSERT(ifp);
5411 }
5412 }
5413 }
5414 #endif /* PSTA */
5415 }
5416 #endif /* MCAST_REGEN */
5417
5418 #ifdef DHDTCPSYNC_FLOOD_BLK
5419 if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
5420 int delta_sec;
5421 int delta_sync;
5422 int sync_per_sec;
5423 u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
5424 ifp->tsync_rcvd ++;
5425 delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
5426 delta_sec = curr_time - ifp->last_sync;
5427 if (delta_sec > 1) {
5428 sync_per_sec = delta_sync/delta_sec;
5429 if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
5430 schedule_work(&ifp->blk_tsfl_work);
5431 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
5432 "sync recvied %d pkt/sec \n",
5433 ifidx, sync_per_sec));
5434 }
5435 dhd_reset_tcpsync_info_by_ifp(ifp);
5436 }
5437
5438 }
5439 #endif /* DHDTCPSYNC_FLOOD_BLK */
5440
5441 #ifdef DHDTCPACK_SUPPRESS
5442 dhd_tcpdata_info_get(dhdp, pktbuf);
5443 #endif // endif
5444 skb = PKTTONATIVE(dhdp->osh, pktbuf);
5445
5446 ASSERT(ifp);
5447 skb->dev = ifp->net;
5448 #ifdef DHD_WET
5449 /* wet related packet proto manipulation should be done in DHD
5450 * since dongle doesn't have complete payload
5451 */
5452 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
5453 pktbuf) < 0)) {
5454 DHD_INFO(("%s:%s: wet recv proc failed\n",
5455 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
5456 }
5457 #endif /* DHD_WET */
5458
5459 #ifdef DHD_PSTA
5460 if (PSR_ENABLED(dhdp) &&
5461 (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
5462 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
5463 dhd_ifname(dhdp, ifidx)));
5464 }
5465 #endif /* DHD_PSTA */
5466
5467 #ifdef PCIE_FULL_DONGLE
5468 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
5469 (!ifp->ap_isolate)) {
5470 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5471 if (ETHER_ISUCAST(eh->ether_dhost)) {
5472 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
5473 dhd_sendpkt(dhdp, ifidx, pktbuf);
5474 continue;
5475 }
5476 } else {
5477 void *npktbuf = NULL;
5478 if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
5479 (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) {
5480 dhd_sendpkt(dhdp, ifidx, npktbuf);
5481 }
5482 }
5483 }
5484 #endif /* PCIE_FULL_DONGLE */
5485 #ifdef DYNAMIC_MUMIMO_CONTROL
5486 if (dhdp->reassoc_mumimo_sw && dhdp->murx_block_eapol &&
5487 dhd_check_eapol_4way_message((void *)(skb->data)) == EAPOL_4WAY_M1) {
5488 DHD_ERROR(("%s: Reassoc is in progress..."
5489 " drop EAPOL M1 frame\n", __FUNCTION__));
5490 PKTFREE(dhdp->osh, pktbuf, FALSE);
5491 continue;
5492 }
5493 #endif /* DYNAMIC_MUMIMO_CONTROL */
5494
5495 /* Get the protocol, maintain skb around eth_type_trans()
5496 * The main reason for this hack is for the limitation of
5497 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5498 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5499 * coping of the packet coming from the network stack to add
5500 * BDC, Hardware header etc, during network interface registration
5501 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5502 * for BDC, Hardware header etc. and not just the ETH_HLEN
5503 */
5504 eth = skb->data;
5505 len = skb->len;
5506
5507 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \
5508 defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) || defined(WL_WPS_SYNC)
5509 dump_data = skb->data;
5510 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */
5511
5512 protocol = (skb->data[12] << 8) | skb->data[13];
5513 if (protocol == ETHER_TYPE_802_1X) {
5514 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
5515 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
5516 wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
5517 #endif /* WL_CFG80211 && WL_WPS_SYNC */
5518 #ifdef DHD_8021X_DUMP
5519 dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5520 #endif /* DHD_8021X_DUMP */
5521 }
5522
5523 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
5524 #ifdef DHD_DHCP_DUMP
5525 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5526 #endif /* DHD_DHCP_DUMP */
5527 #ifdef DHD_ICMP_DUMP
5528 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5529 #endif /* DHD_ICMP_DUMP */
5530 }
5531 #ifdef DHD_RX_DUMP
5532 DHD_ERROR(("RX DUMP[%s] - %s\n",
5533 dhd_ifname(dhdp, ifidx), _get_packet_type_str(protocol)));
5534 if (protocol != ETHER_TYPE_BRCM) {
5535 if (dump_data[0] == 0xFF) {
5536 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
5537
5538 if ((dump_data[12] == 8) &&
5539 (dump_data[13] == 6)) {
5540 DHD_ERROR(("%s: ARP %d\n",
5541 __FUNCTION__, dump_data[0x15]));
5542 }
5543 } else if (dump_data[0] & 1) {
5544 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
5545 __FUNCTION__, MAC2STRDBG(dump_data)));
5546 }
5547 #ifdef DHD_RX_FULL_DUMP
5548 {
5549 int k;
5550 for (k = 0; k < skb->len; k++) {
5551 DHD_ERROR(("%02X ", dump_data[k]));
5552 if ((k & 15) == 15)
5553 DHD_ERROR(("\n"));
5554 }
5555 DHD_ERROR(("\n"));
5556 }
5557 #endif /* DHD_RX_FULL_DUMP */
5558 }
5559 #endif /* DHD_RX_DUMP */
5560
5561 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5562 if (pkt_wake) {
5563 prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
5564 }
5565 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5566
5567 skb->protocol = eth_type_trans(skb, skb->dev);
5568
5569 if (skb->pkt_type == PACKET_MULTICAST) {
5570 dhd->pub.rx_multicast++;
5571 ifp->stats.multicast++;
5572 }
5573
5574 skb->data = eth;
5575 skb->len = len;
5576
5577 DHD_DBG_PKT_MON_RX(dhdp, skb);
5578 #ifdef DHD_PKT_LOGGING
5579 DHD_PKTLOG_RX(dhdp, skb);
5580 #endif /* DHD_PKT_LOGGING */
5581 /* Strip header, count, deliver upward */
5582 skb_pull(skb, ETH_HLEN);
5583
5584 /* Process special event packets and then discard them */
5585 memset(&event, 0, sizeof(event));
5586
5587 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
5588 bcm_event_msg_u_t evu;
5589 int ret_event;
5590 int event_type;
5591
5592 ret_event = wl_host_event_get_data(
5593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5594 skb_mac_header(skb),
5595 #else
5596 skb->mac.raw,
5597 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5598 len, &evu);
5599
5600 if (ret_event != BCME_OK) {
5601 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5602 __FUNCTION__, ret_event));
5603 #ifdef DHD_USE_STATIC_CTRLBUF
5604 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5605 #else
5606 PKTFREE(dhdp->osh, pktbuf, FALSE);
5607 #endif // endif
5608 continue;
5609 }
5610
5611 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5612 event_type = ntoh32_ua((void *)&event.event_type);
5613 #ifdef SHOW_LOGTRACE
5614 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5615 * context in case of PCIe FD, in case of other bus this will be from
5616 * DPC context. If we get bunch of events from Dongle then printing all
5617 * of them from Tasklet/DPC context that too in data path is costly.
5618 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5619 * events with type WLC_E_TRACE.
5620 * We'll print this console logs from the WorkQueue context by enqueing SKB
5621 * here and Dequeuing will be done in WorkQueue and will be freed only if
5622 * logtrace_pkt_sendup is true
5623 */
5624 if (event_type == WLC_E_TRACE) {
5625 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
5626 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5627 continue;
5628 }
5629 #endif /* SHOW_LOGTRACE */
5630
5631 ret_event = dhd_wl_host_event(dhd, ifidx,
5632 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5633 skb_mac_header(skb),
5634 #else
5635 skb->mac.raw,
5636 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5637 len, &event, &data);
5638
5639 wl_event_to_host_order(&event);
5640 if (!tout_ctrl)
5641 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
5642
5643 #if defined(PNO_SUPPORT)
5644 if (event_type == WLC_E_PFN_NET_FOUND) {
5645 /* enforce custom wake lock to garantee that Kernel not suspended */
5646 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
5647 }
5648 #endif /* PNO_SUPPORT */
5649 if (numpkt != 1) {
5650 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
5651 __FUNCTION__));
5652 }
5653
5654 #ifdef DHD_WAKE_STATUS
5655 if (unlikely(pkt_wake)) {
5656 #ifdef DHD_WAKE_EVENT_STATUS
5657 if (event.event_type < WLC_E_LAST) {
5658 wcp->rc_event[event.event_type]++;
5659 wcp->rcwake++;
5660 pkt_wake = 0;
5661 }
5662 #endif /* DHD_WAKE_EVENT_STATUS */
5663 }
5664 #endif /* DHD_WAKE_STATUS */
5665
5666 /* For delete virtual interface event, wl_host_event returns positive
5667 * i/f index, do not proceed. just free the pkt.
5668 */
5669 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
5670 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
5671 __FUNCTION__));
5672 #ifdef DHD_USE_STATIC_CTRLBUF
5673 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5674 #else
5675 PKTFREE(dhdp->osh, pktbuf, FALSE);
5676 #endif // endif
5677 continue;
5678 }
5679
5680 /*
5681 * For the event packets, there is a possibility
5682 * of ifidx getting modifed.Thus update the ifp
5683 * once again.
5684 */
5685 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
5686 ifp = dhd->iflist[ifidx];
5687 #ifndef PROP_TXSTATUS_VSDB
5688 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
5689 #else
5690 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
5691 dhd->pub.up))
5692 #endif /* PROP_TXSTATUS_VSDB */
5693 {
5694 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
5695 __FUNCTION__));
5696 #ifdef DHD_USE_STATIC_CTRLBUF
5697 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5698 #else
5699 PKTFREE(dhdp->osh, pktbuf, FALSE);
5700 #endif // endif
5701 continue;
5702 }
5703
5704 if (dhdp->wl_event_enabled) {
5705 #ifdef DHD_USE_STATIC_CTRLBUF
5706 /* If event bufs are allocated via static buf pool
5707 * and wl events are enabled, make a copy, free the
5708 * local one and send the copy up.
5709 */
5710 void *npkt = PKTDUP(dhdp->osh, skb);
5711 /* Clone event and send it up */
5712 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5713 if (npkt) {
5714 skb = npkt;
5715 } else {
5716 DHD_ERROR(("skb clone failed. dropping event.\n"));
5717 continue;
5718 }
5719 #endif /* DHD_USE_STATIC_CTRLBUF */
5720 } else {
5721 /* If event enabled not explictly set, drop events */
5722 #ifdef DHD_USE_STATIC_CTRLBUF
5723 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5724 #else
5725 PKTFREE(dhdp->osh, pktbuf, FALSE);
5726 #endif /* DHD_USE_STATIC_CTRLBUF */
5727 continue;
5728 }
5729 } else {
5730 tout_rx = DHD_PACKET_TIMEOUT_MS;
5731
5732 #ifdef PROP_TXSTATUS
5733 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
5734 #endif /* PROP_TXSTATUS */
5735
5736 #ifdef DHD_WAKE_STATUS
5737 if (unlikely(pkt_wake)) {
5738 wcp->rxwake++;
5739 #ifdef DHD_WAKE_RX_STATUS
5740 #define ETHER_ICMP6_HEADER 20
5741 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
5742 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
5743 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
5744
5745 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
5746 wcp->rx_arp++;
5747 if (dump_data[0] == 0xFF) { /* Broadcast */
5748 wcp->rx_bcast++;
5749 } else if (dump_data[0] & 0x01) { /* Multicast */
5750 wcp->rx_mcast++;
5751 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
5752 wcp->rx_multi_ipv6++;
5753 if ((skb->len > ETHER_ICMP6_HEADER) &&
5754 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
5755 wcp->rx_icmpv6++;
5756 if (skb->len > ETHER_ICMPV6_TYPE) {
5757 switch (dump_data[ETHER_ICMPV6_TYPE]) {
5758 case NDISC_ROUTER_ADVERTISEMENT:
5759 wcp->rx_icmpv6_ra++;
5760 break;
5761 case NDISC_NEIGHBOUR_ADVERTISEMENT:
5762 wcp->rx_icmpv6_na++;
5763 break;
5764 case NDISC_NEIGHBOUR_SOLICITATION:
5765 wcp->rx_icmpv6_ns++;
5766 break;
5767 }
5768 }
5769 }
5770 } else if (dump_data[2] == 0x5E) {
5771 wcp->rx_multi_ipv4++;
5772 } else {
5773 wcp->rx_multi_other++;
5774 }
5775 } else { /* Unicast */
5776 wcp->rx_ucast++;
5777 }
5778 #undef ETHER_ICMP6_HEADER
5779 #undef ETHER_IPV6_SADDR
5780 #undef ETHER_IPV6_DAADR
5781 #undef ETHER_ICMPV6_TYPE
5782 #endif /* DHD_WAKE_RX_STATUS */
5783 pkt_wake = 0;
5784 }
5785 #endif /* DHD_WAKE_STATUS */
5786 }
5787
5788 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
5789 ifp->net->last_rx = jiffies;
5790 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
5791
5792 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
5793 dhdp->dstats.rx_bytes += skb->len;
5794 dhdp->rx_packets++; /* Local count */
5795 ifp->stats.rx_bytes += skb->len;
5796 ifp->stats.rx_packets++;
5797 }
5798
5799 if (in_interrupt()) {
5800 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5801 __FUNCTION__, __LINE__);
5802 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5803 #if defined(DHD_LB_RXP)
5804 netif_receive_skb(skb);
5805 #else /* !defined(DHD_LB_RXP) */
5806 netif_rx(skb);
5807 #endif /* !defined(DHD_LB_RXP) */
5808 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5809 } else {
5810 if (dhd->rxthread_enabled) {
5811 if (!skbhead)
5812 skbhead = skb;
5813 else
5814 PKTSETNEXT(dhdp->osh, skbprev, skb);
5815 skbprev = skb;
5816 } else {
5817
5818 /* If the receive is not processed inside an ISR,
5819 * the softirqd must be woken explicitly to service
5820 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5821 * by netif_rx_ni(), but in earlier kernels, we need
5822 * to do it manually.
5823 */
5824 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5825 __FUNCTION__, __LINE__);
5826
5827 #if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
5828 defined(ARGOS_NOTIFY_CB)
5829 argos_register_notifier_deinit();
5830 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
5831 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
5832 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5833 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
5834 #if defined(DHD_LB_RXP)
5835 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5836 netif_receive_skb(skb);
5837 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5838 #else /* !defined(DHD_LB_RXP) */
5839 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5840 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5841 netif_rx_ni(skb);
5842 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5843 #else
5844 ulong flags;
5845 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5846 netif_rx(skb);
5847 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5848 local_irq_save(flags);
5849 RAISE_RX_SOFTIRQ();
5850 local_irq_restore(flags);
5851 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5852 #endif /* !defined(DHD_LB_RXP) */
5853 }
5854 }
5855 }
5856
5857 if (dhd->rxthread_enabled && skbhead)
5858 dhd_sched_rxf(dhdp, skbhead);
5859
5860 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
5861 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
5862 }
5863
5864 void
5865 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
5866 {
5867 /* Linux version has nothing to do */
5868 return;
5869 }
5870
5871 void
5872 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
5873 {
5874 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
5875 struct ether_header *eh;
5876 uint16 type;
5877
5878 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
5879
5880 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
5881 type = ntoh16(eh->ether_type);
5882
5883 if (type == ETHER_TYPE_802_1X) {
5884 atomic_dec(&dhd->pend_8021x_cnt);
5885 }
5886
5887 #ifdef PROP_TXSTATUS
5888 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
5889 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
5890 uint datalen = PKTLEN(dhd->pub.osh, txp);
5891 if (ifp != NULL) {
5892 if (success) {
5893 dhd->pub.tx_packets++;
5894 ifp->stats.tx_packets++;
5895 ifp->stats.tx_bytes += datalen;
5896 } else {
5897 ifp->stats.tx_dropped++;
5898 }
5899 }
5900 }
5901 #endif // endif
5902 }
5903
5904 static struct net_device_stats *
5905 dhd_get_stats(struct net_device *net)
5906 {
5907 dhd_info_t *dhd = DHD_DEV_INFO(net);
5908 dhd_if_t *ifp;
5909
5910 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5911
5912 if (!dhd) {
5913 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
5914 goto error;
5915 }
5916
5917 ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
5918 if (!ifp) {
5919 /* return empty stats */
5920 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
5921 goto error;
5922 }
5923
5924 if (dhd->pub.up) {
5925 /* Use the protocol to get dongle stats */
5926 dhd_prot_dstats(&dhd->pub);
5927 }
5928 return &ifp->stats;
5929
5930 error:
5931 memset(&net->stats, 0, sizeof(net->stats));
5932 return &net->stats;
5933 }
5934
5935 static int
5936 dhd_watchdog_thread(void *data)
5937 {
5938 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5939 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5940 /* This thread doesn't need any user-level access,
5941 * so get rid of all our resources
5942 */
5943 if (dhd_watchdog_prio > 0) {
5944 struct sched_param param;
5945 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
5946 dhd_watchdog_prio:(MAX_RT_PRIO-1);
5947 setScheduler(current, SCHED_FIFO, &param);
5948 }
5949
5950 while (1) {
5951 if (down_interruptible (&tsk->sema) == 0) {
5952 unsigned long flags;
5953 unsigned long jiffies_at_start = jiffies;
5954 unsigned long time_lapse;
5955 #ifdef BCMPCIE
5956 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5957 #endif /* BCMPCIE */
5958
5959 SMP_RD_BARRIER_DEPENDS();
5960 if (tsk->terminated) {
5961 #ifdef BCMPCIE
5962 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5963 #endif /* BCMPCIE */
5964 break;
5965 }
5966
5967 if (dhd->pub.dongle_reset == FALSE) {
5968 DHD_TIMER(("%s:\n", __FUNCTION__));
5969 dhd_bus_watchdog(&dhd->pub);
5970
5971 DHD_GENERAL_LOCK(&dhd->pub, flags);
5972 /* Count the tick for reference */
5973 dhd->pub.tickcnt++;
5974 #ifdef DHD_L2_FILTER
5975 dhd_l2_filter_watchdog(&dhd->pub);
5976 #endif /* DHD_L2_FILTER */
5977 time_lapse = jiffies - jiffies_at_start;
5978
5979 /* Reschedule the watchdog */
5980 if (dhd->wd_timer_valid) {
5981 mod_timer(&dhd->timer,
5982 jiffies +
5983 msecs_to_jiffies(dhd_watchdog_ms) -
5984 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
5985 }
5986 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5987 }
5988 #ifdef BCMPCIE
5989 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5990 #endif /* BCMPCIE */
5991 } else {
5992 break;
5993 }
5994 }
5995
5996 complete_and_exit(&tsk->completed, 0);
5997 }
5998
5999 static void dhd_watchdog(ulong data)
6000 {
6001 dhd_info_t *dhd = (dhd_info_t *)data;
6002 unsigned long flags;
6003
6004 if (dhd->pub.dongle_reset) {
6005 return;
6006 }
6007
6008 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
6009 up(&dhd->thr_wdt_ctl.sema);
6010 return;
6011 }
6012
6013 #ifdef BCMPCIE
6014 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6015 #endif /* BCMPCIE */
6016 /* Call the bus module watchdog */
6017 dhd_bus_watchdog(&dhd->pub);
6018
6019 DHD_GENERAL_LOCK(&dhd->pub, flags);
6020 /* Count the tick for reference */
6021 dhd->pub.tickcnt++;
6022
6023 #ifdef DHD_L2_FILTER
6024 dhd_l2_filter_watchdog(&dhd->pub);
6025 #endif /* DHD_L2_FILTER */
6026 /* Reschedule the watchdog */
6027 if (dhd->wd_timer_valid)
6028 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
6029 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6030 #ifdef BCMPCIE
6031 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6032 #endif /* BCMPCIE */
6033 }
6034
6035 #ifdef DHD_PCIE_RUNTIMEPM
6036 static int
6037 dhd_rpm_state_thread(void *data)
6038 {
6039 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6040 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6041
6042 while (1) {
6043 if (down_interruptible (&tsk->sema) == 0) {
6044 unsigned long flags;
6045 unsigned long jiffies_at_start = jiffies;
6046 unsigned long time_lapse;
6047
6048 SMP_RD_BARRIER_DEPENDS();
6049 if (tsk->terminated) {
6050 break;
6051 }
6052
6053 if (dhd->pub.dongle_reset == FALSE) {
6054 DHD_TIMER(("%s:\n", __FUNCTION__));
6055 if (dhd->pub.up) {
6056 dhd_runtimepm_state(&dhd->pub);
6057 }
6058
6059 DHD_GENERAL_LOCK(&dhd->pub, flags);
6060 time_lapse = jiffies - jiffies_at_start;
6061
6062 /* Reschedule the watchdog */
6063 if (dhd->rpm_timer_valid) {
6064 mod_timer(&dhd->rpm_timer,
6065 jiffies +
6066 msecs_to_jiffies(dhd_runtimepm_ms) -
6067 min(msecs_to_jiffies(dhd_runtimepm_ms),
6068 time_lapse));
6069 }
6070 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6071 }
6072 } else {
6073 break;
6074 }
6075 }
6076
6077 complete_and_exit(&tsk->completed, 0);
6078 }
6079
6080 static void dhd_runtimepm(ulong data)
6081 {
6082 dhd_info_t *dhd = (dhd_info_t *)data;
6083
6084 if (dhd->pub.dongle_reset) {
6085 return;
6086 }
6087
6088 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
6089 up(&dhd->thr_rpm_ctl.sema);
6090 return;
6091 }
6092 }
6093
6094 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
6095 {
6096 dhd_os_runtimepm_timer(dhdp, 0);
6097 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
6098 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6099 }
6100
6101 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
6102 {
6103 if (dhd_get_idletime(dhdp)) {
6104 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
6105 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6106 }
6107 }
6108
6109 #endif /* DHD_PCIE_RUNTIMEPM */
6110
6111 #ifdef ENABLE_ADAPTIVE_SCHED
6112 static void
6113 dhd_sched_policy(int prio)
6114 {
6115 struct sched_param param;
6116 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
6117 param.sched_priority = 0;
6118 setScheduler(current, SCHED_NORMAL, &param);
6119 } else {
6120 if (get_scheduler_policy(current) != SCHED_FIFO) {
6121 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
6122 setScheduler(current, SCHED_FIFO, &param);
6123 }
6124 }
6125 }
6126 #endif /* ENABLE_ADAPTIVE_SCHED */
6127 #ifdef DEBUG_CPU_FREQ
6128 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
6129 {
6130 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
6131 struct cpufreq_freqs *freq = data;
6132 if (dhd) {
6133 if (!dhd->new_freq)
6134 goto exit;
6135 if (val == CPUFREQ_POSTCHANGE) {
6136 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6137 freq->new, freq->cpu));
6138 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
6139 }
6140 }
6141 exit:
6142 return 0;
6143 }
6144 #endif /* DEBUG_CPU_FREQ */
6145 static int
6146 dhd_dpc_thread(void *data)
6147 {
6148 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6149 !defined(CONFIG_SOC_EXYNOS7870)
6150 int ret = 0;
6151 unsigned long flags;
6152 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6153 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6154 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6155
6156 /* This thread doesn't need any user-level access,
6157 * so get rid of all our resources
6158 */
6159 if (dhd_dpc_prio > 0)
6160 {
6161 struct sched_param param;
6162 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
6163 setScheduler(current, SCHED_FIFO, &param);
6164 }
6165
6166 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6167 !defined(CONFIG_SOC_EXYNOS7870)
6168 if (!zalloc_cpumask_var(&dhd->pub.default_cpu_mask, GFP_KERNEL)) {
6169 DHD_ERROR(("dpc_thread, zalloc_cpumask_var error\n"));
6170 dhd->pub.affinity_isdpc = FALSE;
6171 } else {
6172 if (!zalloc_cpumask_var(&dhd->pub.dpc_affinity_cpu_mask, GFP_KERNEL)) {
6173 DHD_ERROR(("dpc_thread, dpc_affinity_cpu_mask error\n"));
6174 free_cpumask_var(dhd->pub.default_cpu_mask);
6175 dhd->pub.affinity_isdpc = FALSE;
6176 } else {
6177 cpumask_copy(dhd->pub.default_cpu_mask, &hmp_slow_cpu_mask);
6178 cpumask_or(dhd->pub.dpc_affinity_cpu_mask,
6179 dhd->pub.dpc_affinity_cpu_mask, cpumask_of(DPC_CPUCORE));
6180
6181 DHD_GENERAL_LOCK(&dhd->pub, flags);
6182 if ((ret = argos_task_affinity_setup_label(current, "WIFI",
6183 dhd->pub.dpc_affinity_cpu_mask,
6184 dhd->pub.default_cpu_mask)) < 0) {
6185 DHD_ERROR(("Failed to add CPU affinity(dpc) error=%d\n",
6186 ret));
6187 free_cpumask_var(dhd->pub.default_cpu_mask);
6188 free_cpumask_var(dhd->pub.dpc_affinity_cpu_mask);
6189 dhd->pub.affinity_isdpc = FALSE;
6190 } else {
6191 unsigned int irq = -1;
6192 #ifdef BCMPCIE
6193 if (dhdpcie_get_pcieirq(dhd->pub.bus, &irq)) {
6194 DHD_ERROR(("%s : Can't get interrupt number\n",
6195 __FUNCTION__));
6196 }
6197 #endif /* BCMPCIE */
6198 #ifdef BCMSDIO
6199 wifi_adapter_info_t *adapter = dhd->adapter;
6200 irq = adapter->irq_num;
6201 #endif /* BCMSDIO */
6202 DHD_ERROR(("Argos set Completed : dpcthread\n"));
6203 set_irq_cpucore(irq, dhd->pub.default_cpu_mask,
6204 dhd->pub.dpc_affinity_cpu_mask);
6205 dhd->pub.affinity_isdpc = TRUE;
6206 }
6207 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6208 }
6209 }
6210 #else /* ARGOS_CPU_SCHEDULER */
6211 #ifdef CUSTOM_DPC_CPUCORE
6212 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
6213 #endif // endif
6214 #ifdef CUSTOM_SET_CPUCORE
6215 dhd->pub.current_dpc = current;
6216 #endif /* CUSTOM_SET_CPUCORE */
6217 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6218 /* Run until signal received */
6219 while (1) {
6220 if (!binary_sema_down(tsk)) {
6221 #ifdef ENABLE_ADAPTIVE_SCHED
6222 dhd_sched_policy(dhd_dpc_prio);
6223 #endif /* ENABLE_ADAPTIVE_SCHED */
6224 SMP_RD_BARRIER_DEPENDS();
6225 if (tsk->terminated) {
6226 break;
6227 }
6228
6229 /* Call bus dpc unless it indicated down (then clean stop) */
6230 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6231 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6232 int resched_cnt = 0;
6233 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6234 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
6235 while (dhd_bus_dpc(dhd->pub.bus)) {
6236 /* process all data */
6237 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6238 resched_cnt++;
6239 if (resched_cnt > MAX_RESCHED_CNT) {
6240 DHD_INFO(("%s Calling msleep to"
6241 "let other processes run. \n",
6242 __FUNCTION__));
6243 dhd->pub.dhd_bug_on = true;
6244 resched_cnt = 0;
6245 OSL_SLEEP(1);
6246 }
6247 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6248 }
6249 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
6250 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6251 } else {
6252 if (dhd->pub.up)
6253 dhd_bus_stop(dhd->pub.bus, TRUE);
6254 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6255 }
6256 } else {
6257 break;
6258 }
6259 }
6260 #ifdef ARGOS_CPU_SCHEDULER
6261 if (dhd->pub.affinity_isdpc == TRUE) {
6262 free_cpumask_var(dhd->pub.default_cpu_mask);
6263 free_cpumask_var(dhd->pub.dpc_affinity_cpu_mask);
6264 dhd->pub.affinity_isdpc = FALSE;
6265 }
6266 #endif /* ARGOS_CPU_SCHEDULER */
6267 complete_and_exit(&tsk->completed, 0);
6268 }
6269
6270 static int
6271 dhd_rxf_thread(void *data)
6272 {
6273 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6274 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6275 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6276 !defined(CONFIG_SOC_EXYNOS7870)
6277 int ret = 0;
6278 unsigned long flags;
6279 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && CONFIG_SOC_EXYNOS7870 */
6280 #if defined(WAIT_DEQUEUE)
6281 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6282 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
6283 #endif // endif
6284 dhd_pub_t *pub = &dhd->pub;
6285
6286 /* This thread doesn't need any user-level access,
6287 * so get rid of all our resources
6288 */
6289 if (dhd_rxf_prio > 0)
6290 {
6291 struct sched_param param;
6292 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
6293 setScheduler(current, SCHED_FIFO, &param);
6294 }
6295
6296 #if defined(ARGOS_CPU_SCHEDULER) && !defined(DHD_LB_IRQSET) && \
6297 !defined(CONFIG_SOC_EXYNOS7870)
6298 if (!zalloc_cpumask_var(&dhd->pub.rxf_affinity_cpu_mask, GFP_KERNEL)) {
6299 DHD_ERROR(("rxthread zalloc_cpumask_var error\n"));
6300 dhd->pub.affinity_isrxf = FALSE;
6301 } else {
6302 cpumask_or(dhd->pub.rxf_affinity_cpu_mask, dhd->pub.rxf_affinity_cpu_mask,
6303 cpumask_of(RXF_CPUCORE));
6304
6305 DHD_GENERAL_LOCK(&dhd->pub, flags);
6306 if ((ret = argos_task_affinity_setup_label(current, "WIFI",
6307 dhd->pub.rxf_affinity_cpu_mask, dhd->pub.default_cpu_mask)) < 0) {
6308 DHD_ERROR(("Failed to add CPU affinity(rxf) error=%d\n", ret));
6309 dhd->pub.affinity_isrxf = FALSE;
6310 free_cpumask_var(dhd->pub.rxf_affinity_cpu_mask);
6311 } else {
6312 DHD_ERROR(("RXthread affinity completed\n"));
6313 dhd->pub.affinity_isrxf = TRUE;
6314 }
6315 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6316 }
6317 #else /* ARGOS_CPU_SCHEDULER */
6318 #ifdef CUSTOM_SET_CPUCORE
6319 dhd->pub.current_rxf = current;
6320 #endif /* CUSTOM_SET_CPUCORE */
6321 #endif /* ARGOS_CPU_SCHEDULER && !DHD_LB_IRQSET && !CONFIG_SOC_EXYNOS7870 */
6322 /* Run until signal received */
6323 while (1) {
6324 if (down_interruptible(&tsk->sema) == 0) {
6325 void *skb;
6326 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6327 ulong flags;
6328 #endif // endif
6329 #ifdef ENABLE_ADAPTIVE_SCHED
6330 dhd_sched_policy(dhd_rxf_prio);
6331 #endif /* ENABLE_ADAPTIVE_SCHED */
6332
6333 SMP_RD_BARRIER_DEPENDS();
6334
6335 if (tsk->terminated) {
6336 break;
6337 }
6338 skb = dhd_rxf_dequeue(pub);
6339
6340 if (skb == NULL) {
6341 continue;
6342 }
6343 while (skb) {
6344 void *skbnext = PKTNEXT(pub->osh, skb);
6345 PKTSETNEXT(pub->osh, skb, NULL);
6346 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6347 __FUNCTION__, __LINE__);
6348 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6349 netif_rx_ni(skb);
6350 #else
6351 netif_rx(skb);
6352 local_irq_save(flags);
6353 RAISE_RX_SOFTIRQ();
6354 local_irq_restore(flags);
6355
6356 #endif // endif
6357 skb = skbnext;
6358 }
6359 #if defined(WAIT_DEQUEUE)
6360 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
6361 OSL_SLEEP(1);
6362 watchdogTime = OSL_SYSUPTIME();
6363 }
6364 #endif // endif
6365
6366 DHD_OS_WAKE_UNLOCK(pub);
6367 } else {
6368 break;
6369 }
6370 }
6371 #ifdef ARGOS_CPU_SCHEDULER
6372 if (dhd->pub.affinity_isrxf == TRUE) {
6373 free_cpumask_var(dhd->pub.rxf_affinity_cpu_mask);
6374 dhd->pub.affinity_isrxf = FALSE;
6375 }
6376 #endif /* ARGOS_CPU_SCHEDULER */
6377 complete_and_exit(&tsk->completed, 0);
6378 }
6379
6380 #ifdef BCMPCIE
6381 void dhd_dpc_enable(dhd_pub_t *dhdp)
6382 {
6383 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6384 dhd_info_t *dhd;
6385
6386 if (!dhdp || !dhdp->info)
6387 return;
6388 dhd = dhdp->info;
6389 #endif /* DHD_LB_RXP || DHD_LB_TXP */
6390
6391 #ifdef DHD_LB_RXP
6392 __skb_queue_head_init(&dhd->rx_pend_queue);
6393 #endif /* DHD_LB_RXP */
6394
6395 #ifdef DHD_LB_TXP
6396 skb_queue_head_init(&dhd->tx_pend_queue);
6397 #endif /* DHD_LB_TXP */
6398 }
6399 #endif /* BCMPCIE */
6400
6401 #ifdef BCMPCIE
6402 void
6403 dhd_dpc_kill(dhd_pub_t *dhdp)
6404 {
6405 dhd_info_t *dhd;
6406
6407 if (!dhdp) {
6408 return;
6409 }
6410
6411 dhd = dhdp->info;
6412
6413 if (!dhd) {
6414 return;
6415 }
6416
6417 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6418 tasklet_kill(&dhd->tasklet);
6419 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
6420 }
6421
6422 #ifdef DHD_LB
6423 #ifdef DHD_LB_RXP
6424 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
6425 __skb_queue_purge(&dhd->rx_pend_queue);
6426 #endif /* DHD_LB_RXP */
6427 #ifdef DHD_LB_TXP
6428 cancel_work_sync(&dhd->tx_dispatcher_work);
6429 skb_queue_purge(&dhd->tx_pend_queue);
6430 #endif /* DHD_LB_TXP */
6431
6432 /* Kill the Load Balancing Tasklets */
6433 #if defined(DHD_LB_TXC)
6434 tasklet_kill(&dhd->tx_compl_tasklet);
6435 #endif /* DHD_LB_TXC */
6436 #if defined(DHD_LB_RXC)
6437 tasklet_kill(&dhd->rx_compl_tasklet);
6438 #endif /* DHD_LB_RXC */
6439 #if defined(DHD_LB_TXP)
6440 tasklet_kill(&dhd->tx_tasklet);
6441 #endif /* DHD_LB_TXP */
6442 #endif /* DHD_LB */
6443 }
6444
6445 void
6446 dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
6447 {
6448 dhd_info_t *dhd;
6449
6450 if (!dhdp) {
6451 return;
6452 }
6453
6454 dhd = dhdp->info;
6455
6456 if (!dhd) {
6457 return;
6458 }
6459
6460 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6461 tasklet_kill(&dhd->tasklet);
6462 }
6463 }
6464 #endif /* BCMPCIE */
6465
6466 static void
6467 dhd_dpc(ulong data)
6468 {
6469 dhd_info_t *dhd;
6470
6471 dhd = (dhd_info_t *)data;
6472
6473 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6474 * down below , wake lock is set,
6475 * the tasklet is initialized in dhd_attach()
6476 */
6477 /* Call bus dpc unless it indicated down (then clean stop) */
6478 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6479 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6480 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
6481 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6482 if (dhd_bus_dpc(dhd->pub.bus)) {
6483 tasklet_schedule(&dhd->tasklet);
6484 }
6485 } else {
6486 dhd_bus_stop(dhd->pub.bus, TRUE);
6487 }
6488 }
6489
6490 void
6491 dhd_sched_dpc(dhd_pub_t *dhdp)
6492 {
6493 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6494
6495 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
6496 DHD_OS_WAKE_LOCK(dhdp);
6497 /* If the semaphore does not get up,
6498 * wake unlock should be done here
6499 */
6500 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
6501 DHD_OS_WAKE_UNLOCK(dhdp);
6502 }
6503 return;
6504 } else {
6505 dhd_bus_set_dpc_sched_time(dhdp);
6506 tasklet_schedule(&dhd->tasklet);
6507 }
6508 }
6509
6510 static void
6511 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
6512 {
6513 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6514
6515 DHD_OS_WAKE_LOCK(dhdp);
6516
6517 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6518 do {
6519 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
6520 break;
6521 } while (1);
6522 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6523 up(&dhd->thr_rxf_ctl.sema);
6524 }
6525 return;
6526 }
6527
6528 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6529 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6530
6531 #ifdef TOE
6532 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6533 static int
6534 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
6535 {
6536 char buf[32];
6537 int ret;
6538
6539 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6540
6541 if (ret < 0) {
6542 if (ret == -EIO) {
6543 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
6544 ifidx)));
6545 return -EOPNOTSUPP;
6546 }
6547
6548 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6549 return ret;
6550 }
6551
6552 memcpy(toe_ol, buf, sizeof(uint32));
6553 return 0;
6554 }
6555
6556 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6557 static int
6558 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
6559 {
6560 int toe, ret;
6561
6562 /* Set toe_ol as requested */
6563 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
6564 if (ret < 0) {
6565 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6566 dhd_ifname(&dhd->pub, ifidx), ret));
6567 return ret;
6568 }
6569
6570 /* Enable toe globally only if any components are enabled. */
6571 toe = (toe_ol != 0);
6572 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
6573 if (ret < 0) {
6574 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6575 return ret;
6576 }
6577
6578 return 0;
6579 }
6580 #endif /* TOE */
6581
6582 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6583 void dhd_set_scb_probe(dhd_pub_t *dhd)
6584 {
6585 wl_scb_probe_t scb_probe;
6586 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
6587 int ret;
6588
6589 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6590 return;
6591 }
6592
6593 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
6594 if (ret < 0) {
6595 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
6596 }
6597
6598 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
6599
6600 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
6601
6602 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
6603 TRUE);
6604 if (ret < 0) {
6605 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
6606 return;
6607 }
6608 }
6609 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6610
6611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6612 static void
6613 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
6614 {
6615 dhd_info_t *dhd = DHD_DEV_INFO(net);
6616
6617 snprintf(info->driver, sizeof(info->driver), "wl");
6618 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
6619 }
6620
6621 struct ethtool_ops dhd_ethtool_ops = {
6622 .get_drvinfo = dhd_ethtool_get_drvinfo
6623 };
6624 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6625
6626 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6627 static int
6628 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
6629 {
6630 struct ethtool_drvinfo info;
6631 char drvname[sizeof(info.driver)];
6632 uint32 cmd;
6633 #ifdef TOE
6634 struct ethtool_value edata;
6635 uint32 toe_cmpnt, csum_dir;
6636 int ret;
6637 #endif // endif
6638
6639 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6640
6641 /* all ethtool calls start with a cmd word */
6642 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
6643 return -EFAULT;
6644
6645 switch (cmd) {
6646 case ETHTOOL_GDRVINFO:
6647 /* Copy out any request driver name */
6648 if (copy_from_user(&info, uaddr, sizeof(info)))
6649 return -EFAULT;
6650 strncpy(drvname, info.driver, sizeof(info.driver));
6651 drvname[sizeof(info.driver)-1] = '\0';
6652
6653 /* clear struct for return */
6654 memset(&info, 0, sizeof(info));
6655 info.cmd = cmd;
6656
6657 /* if dhd requested, identify ourselves */
6658 if (strcmp(drvname, "?dhd") == 0) {
6659 snprintf(info.driver, sizeof(info.driver), "dhd");
6660 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
6661 info.version[sizeof(info.version) - 1] = '\0';
6662 }
6663
6664 /* otherwise, require dongle to be up */
6665 else if (!dhd->pub.up) {
6666 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
6667 return -ENODEV;
6668 }
6669
6670 /* finally, report dongle driver type */
6671 else if (dhd->pub.iswl)
6672 snprintf(info.driver, sizeof(info.driver), "wl");
6673 else
6674 snprintf(info.driver, sizeof(info.driver), "xx");
6675
6676 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
6677 if (copy_to_user(uaddr, &info, sizeof(info)))
6678 return -EFAULT;
6679 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
6680 (int)sizeof(drvname), drvname, info.driver));
6681 break;
6682
6683 #ifdef TOE
6684 /* Get toe offload components from dongle */
6685 case ETHTOOL_GRXCSUM:
6686 case ETHTOOL_GTXCSUM:
6687 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
6688 return ret;
6689
6690 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
6691
6692 edata.cmd = cmd;
6693 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
6694
6695 if (copy_to_user(uaddr, &edata, sizeof(edata)))
6696 return -EFAULT;
6697 break;
6698
6699 /* Set toe offload components in dongle */
6700 case ETHTOOL_SRXCSUM:
6701 case ETHTOOL_STXCSUM:
6702 if (copy_from_user(&edata, uaddr, sizeof(edata)))
6703 return -EFAULT;
6704
6705 /* Read the current settings, update and write back */
6706 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
6707 return ret;
6708
6709 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
6710
6711 if (edata.data != 0)
6712 toe_cmpnt |= csum_dir;
6713 else
6714 toe_cmpnt &= ~csum_dir;
6715
6716 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
6717 return ret;
6718
6719 /* If setting TX checksum mode, tell Linux the new mode */
6720 if (cmd == ETHTOOL_STXCSUM) {
6721 if (edata.data)
6722 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
6723 else
6724 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
6725 }
6726
6727 break;
6728 #endif /* TOE */
6729
6730 default:
6731 return -EOPNOTSUPP;
6732 }
6733
6734 return 0;
6735 }
6736 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
6737
6738 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
6739 {
6740 if (!dhdp) {
6741 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
6742 return FALSE;
6743 }
6744
6745 if (!dhdp->up)
6746 return FALSE;
6747
6748 #if !defined(BCMPCIE)
6749 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
6750 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
6751 return FALSE;
6752 }
6753 #endif // endif
6754
6755 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
6756 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
6757 #ifdef BCMPCIE
6758 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
6759 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
6760 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
6761 #else
6762 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
6763 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
6764 #endif /* BCMPCIE */
6765 if (dhdp->hang_reason == 0) {
6766 if (dhdp->dongle_trap_occured) {
6767 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
6768 #ifdef BCMPCIE
6769 } else if (dhdp->d3ackcnt_timeout) {
6770 dhdp->hang_reason = dhdp->is_sched_error ?
6771 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
6772 HANG_REASON_D3_ACK_TIMEOUT;
6773 #endif /* BCMPCIE */
6774 } else {
6775 dhdp->hang_reason = dhdp->is_sched_error ?
6776 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
6777 HANG_REASON_IOCTL_RESP_TIMEOUT;
6778 }
6779 }
6780 net_os_send_hang_message(net);
6781 return TRUE;
6782 }
6783 return FALSE;
6784 }
6785
6786 #ifdef WL_MONITOR
6787 bool
6788 dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
6789 {
6790 return (dhd->info->monitor_type != 0);
6791 }
6792
6793 void
6794 dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
6795 {
6796 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6797 {
6798 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
6799 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
6800 switch (amsdu_flag) {
6801 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
6802 default:
6803 if (!dhd->monitor_skb) {
6804 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
6805 == NULL)
6806 return;
6807 }
6808 if (dhd->monitor_type && dhd->monitor_dev)
6809 dhd->monitor_skb->dev = dhd->monitor_dev;
6810 else {
6811 PKTFREE(dhdp->osh, pkt, FALSE);
6812 dhd->monitor_skb = NULL;
6813 return;
6814 }
6815 dhd->monitor_skb->protocol =
6816 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6817 dhd->monitor_len = 0;
6818 break;
6819
6820 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
6821 if (!dhd->monitor_skb) {
6822 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
6823 == NULL)
6824 return;
6825 dhd->monitor_len = 0;
6826 }
6827 if (dhd->monitor_type && dhd->monitor_dev)
6828 dhd->monitor_skb->dev = dhd->monitor_dev;
6829 else {
6830 PKTFREE(dhdp->osh, pkt, FALSE);
6831 dev_kfree_skb(dhd->monitor_skb);
6832 return;
6833 }
6834 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
6835 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6836 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
6837 PKTFREE(dhdp->osh, pkt, FALSE);
6838 return;
6839
6840 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
6841 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
6842 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6843 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
6844 PKTFREE(dhdp->osh, pkt, FALSE);
6845 return;
6846
6847 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
6848 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
6849 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
6850 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
6851 PKTFREE(dhdp->osh, pkt, FALSE);
6852 skb_put(dhd->monitor_skb, dhd->monitor_len);
6853 dhd->monitor_skb->protocol =
6854 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
6855 dhd->monitor_len = 0;
6856 break;
6857 }
6858 }
6859
6860 if (in_interrupt()) {
6861 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6862 __FUNCTION__, __LINE__);
6863 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6864 netif_rx(dhd->monitor_skb);
6865 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6866 } else {
6867 /* If the receive is not processed inside an ISR,
6868 * the softirqd must be woken explicitly to service
6869 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6870 * by netif_rx_ni(), but in earlier kernels, we need
6871 * to do it manually.
6872 */
6873 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
6874 __FUNCTION__, __LINE__);
6875
6876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6877 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6878 netif_rx_ni(dhd->monitor_skb);
6879 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6880 #else
6881 ulong flags;
6882 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6883 netif_rx(dhd->monitor_skb);
6884 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6885 local_irq_save(flags);
6886 RAISE_RX_SOFTIRQ();
6887 local_irq_restore(flags);
6888 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6889 }
6890
6891 dhd->monitor_skb = NULL;
6892 }
6893
6894 typedef struct dhd_mon_dev_priv {
6895 struct net_device_stats stats;
6896 } dhd_mon_dev_priv_t;
6897
6898 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
6899 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
6900 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
6901
6902 static int
6903 dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
6904 {
6905 PKTFREE(NULL, skb, FALSE);
6906 return 0;
6907 }
6908
6909 #if defined(BT_OVER_SDIO)
6910
6911 void
6912 dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
6913 {
6914 dhdp->info->bus_user_count++;
6915 }
6916
6917 void
6918 dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
6919 {
6920 dhdp->info->bus_user_count--;
6921 }
6922
6923 /* Return values:
6924 * Success: Returns 0
6925 * Failure: Returns -1 or errono code
6926 */
6927 int
6928 dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
6929 {
6930 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6931 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6932 int ret = 0;
6933
6934 mutex_lock(&dhd->bus_user_lock);
6935 ++dhd->bus_user_count;
6936 if (dhd->bus_user_count < 0) {
6937 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6938 ret = -1;
6939 goto exit;
6940 }
6941
6942 if (dhd->bus_user_count == 1) {
6943
6944 dhd->pub.hang_was_sent = 0;
6945
6946 /* First user, turn on WL_REG, start the bus */
6947 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
6948
6949 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
6950 /* Enable F1 */
6951 ret = dhd_bus_resume(dhdp, 0);
6952 if (ret) {
6953 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
6954 __FUNCTION__, ret));
6955 goto exit;
6956 }
6957 }
6958
6959 dhd_update_fw_nv_path(dhd);
6960 /* update firmware and nvram path to sdio bus */
6961 dhd_bus_update_fw_nv_path(dhd->pub.bus,
6962 dhd->fw_path, dhd->nv_path);
6963 /* download the firmware, Enable F2 */
6964 /* TODO: Should be done only in case of FW switch */
6965 ret = dhd_bus_devreset(dhdp, FALSE);
6966 dhd_bus_resume(dhdp, 1);
6967 if (!ret) {
6968 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
6969 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
6970 ret = -EFAULT;
6971 }
6972 } else {
6973 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
6974 }
6975 } else {
6976 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
6977 __FUNCTION__, dhd->bus_user_count));
6978 }
6979 exit:
6980 mutex_unlock(&dhd->bus_user_lock);
6981 return ret;
6982 }
6983 EXPORT_SYMBOL(dhd_bus_get);
6984
6985 /* Return values:
6986 * Success: Returns 0
6987 * Failure: Returns -1 or errono code
6988 */
6989 int
6990 dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
6991 {
6992 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6993 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6994 int ret = 0;
6995 BCM_REFERENCE(owner);
6996
6997 mutex_lock(&dhd->bus_user_lock);
6998 --dhd->bus_user_count;
6999 if (dhd->bus_user_count < 0) {
7000 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7001 dhd->bus_user_count = 0;
7002 ret = -1;
7003 goto exit;
7004 }
7005
7006 if (dhd->bus_user_count == 0) {
7007 /* Last user, stop the bus and turn Off WL_REG */
7008 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
7009 __FUNCTION__));
7010 #ifdef PROP_TXSTATUS
7011 if (dhd->pub.wlfc_enabled) {
7012 dhd_wlfc_deinit(&dhd->pub);
7013 }
7014 #endif /* PROP_TXSTATUS */
7015 #ifdef PNO_SUPPORT
7016 if (dhd->pub.pno_state) {
7017 dhd_pno_deinit(&dhd->pub);
7018 }
7019 #endif /* PNO_SUPPORT */
7020 #ifdef RTT_SUPPORT
7021 if (dhd->pub.rtt_state) {
7022 dhd_rtt_deinit(&dhd->pub);
7023 }
7024 #endif /* RTT_SUPPORT */
7025 ret = dhd_bus_devreset(dhdp, TRUE);
7026 if (!ret) {
7027 dhd_bus_suspend(dhdp);
7028 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
7029 }
7030 } else {
7031 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7032 __FUNCTION__, dhd->bus_user_count));
7033 }
7034 exit:
7035 mutex_unlock(&dhd->bus_user_lock);
7036 return ret;
7037 }
7038 EXPORT_SYMBOL(dhd_bus_put);
7039
7040 int
7041 dhd_net_bus_get(struct net_device *dev)
7042 {
7043 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7044 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
7045 }
7046
7047 int
7048 dhd_net_bus_put(struct net_device *dev)
7049 {
7050 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7051 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
7052 }
7053
7054 /*
7055 * Function to enable the Bus Clock
7056 * Returns BCME_OK on success and BCME_xxx on failure
7057 *
7058 * This function is not callable from non-sleepable context
7059 */
7060 int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
7061 {
7062 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7063
7064 int ret;
7065
7066 dhd_os_sdlock(dhdp);
7067 /*
7068 * The second argument is TRUE, that means, we expect
7069 * the function to "wait" until the clocks are really
7070 * available
7071 */
7072 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
7073 dhd_os_sdunlock(dhdp);
7074
7075 return ret;
7076 }
7077 EXPORT_SYMBOL(dhd_bus_clk_enable);
7078
7079 /*
7080 * Function to disable the Bus Clock
7081 * Returns BCME_OK on success and BCME_xxx on failure
7082 *
7083 * This function is not callable from non-sleepable context
7084 */
7085 int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
7086 {
7087 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7088
7089 int ret;
7090
7091 dhd_os_sdlock(dhdp);
7092 /*
7093 * The second argument is TRUE, that means, we expect
7094 * the function to "wait" until the clocks are really
7095 * disabled
7096 */
7097 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
7098 dhd_os_sdunlock(dhdp);
7099
7100 return ret;
7101 }
7102 EXPORT_SYMBOL(dhd_bus_clk_disable);
7103
7104 /*
7105 * Function to reset bt_use_count counter to zero.
7106 *
7107 * This function is not callable from non-sleepable context
7108 */
7109 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
7110 {
7111 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7112
7113 /* take the lock and reset bt use count */
7114 dhd_os_sdlock(dhdp);
7115 dhdsdio_reset_bt_use_count(dhdp->bus);
7116 dhd_os_sdunlock(dhdp);
7117 }
7118 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
7119
7120 void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
7121 {
7122 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7123 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7124
7125 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7126 dhdp->hang_was_sent = 0;
7127
7128 dhd_os_send_hang_message(&dhd->pub);
7129 #else
7130 DHD_ERROR(("%s: unsupported\n", __FUNCTION__));
7131 #endif // endif
7132 }
7133 EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
7134
7135 #endif /* BT_OVER_SDIO */
7136
7137 static int
7138 dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7139 {
7140 return 0;
7141 }
7142
7143 static struct net_device_stats*
7144 dhd_monitor_get_stats(struct net_device *dev)
7145 {
7146 return &DHD_MON_DEV_STATS(dev);
7147 }
7148
7149 static const struct net_device_ops netdev_monitor_ops =
7150 {
7151 .ndo_start_xmit = dhd_monitor_start,
7152 .ndo_get_stats = dhd_monitor_get_stats,
7153 .ndo_do_ioctl = dhd_monitor_ioctl
7154 };
7155
7156 static void
7157 dhd_add_monitor_if(dhd_info_t *dhd)
7158 {
7159 struct net_device *dev;
7160 char *devname;
7161 uint32 scan_suppress = FALSE;
7162 int ret = BCME_OK;
7163
7164 if (!dhd) {
7165 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7166 return;
7167 }
7168
7169 if (dhd->monitor_dev) {
7170 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
7171 return;
7172 }
7173
7174 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
7175 if (!dev) {
7176 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
7177 return;
7178 }
7179
7180 devname = "radiotap";
7181
7182 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
7183
7184 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7185 #define ARPHRD_IEEE80211_PRISM 802
7186 #endif // endif
7187
7188 #ifndef ARPHRD_IEEE80211_RADIOTAP
7189 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7190 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7191
7192 dev->type = ARPHRD_IEEE80211_RADIOTAP;
7193
7194 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7195 dev->hard_start_xmit = dhd_monitor_start;
7196 dev->do_ioctl = dhd_monitor_ioctl;
7197 dev->get_stats = dhd_monitor_get_stats;
7198 #else
7199 dev->netdev_ops = &netdev_monitor_ops;
7200 #endif // endif
7201
7202 if (register_netdevice(dev)) {
7203 DHD_ERROR(("%s, register_netdev failed for %s\n",
7204 __FUNCTION__, dev->name));
7205 free_netdev(dev);
7206 return;
7207 }
7208
7209 if (FW_SUPPORTED((&dhd->pub), monitor)) {
7210 #ifdef DHD_PCIE_RUNTIMEPM
7211 /* Disable RuntimePM in monitor mode */
7212 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7213 DHD_ERROR(("%s : Disable RuntimePM in Monitor Mode\n", __FUNCTION__));
7214 #endif /* DHD_PCIE_RUNTIME_PM */
7215 scan_suppress = TRUE;
7216 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
7217 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
7218 sizeof(scan_suppress), NULL, 0, TRUE);
7219 if (ret < 0) {
7220 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
7221 }
7222 }
7223
7224 dhd->monitor_dev = dev;
7225 }
7226
7227 static void
7228 dhd_del_monitor_if(dhd_info_t *dhd)
7229 {
7230
7231 if (!dhd) {
7232 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7233 return;
7234 }
7235
7236 if (!dhd->monitor_dev) {
7237 DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
7238 return;
7239 }
7240
7241 if (dhd->monitor_dev) {
7242 if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
7243 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7244 MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
7245 MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
7246 #else
7247 free_netdev(dhd->monitor_dev);
7248 #endif /* 2.6.24 */
7249 } else {
7250 unregister_netdevice(dhd->monitor_dev);
7251 }
7252 dhd->monitor_dev = NULL;
7253 }
7254 }
7255
7256 static void
7257 dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
7258 {
7259 dhd_info_t *dhd = pub->info;
7260
7261 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
7262
7263 dhd_net_if_lock_local(dhd);
7264 if (!val) {
7265 /* Delete monitor */
7266 dhd_del_monitor_if(dhd);
7267 } else {
7268 /* Add monitor */
7269 dhd_add_monitor_if(dhd);
7270 }
7271 dhd->monitor_type = val;
7272 dhd_net_if_unlock_local(dhd);
7273 }
7274 #endif /* WL_MONITOR */
7275
7276 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
7277 {
7278 int bcmerror = BCME_OK;
7279 int buflen = 0;
7280 struct net_device *net;
7281
7282 net = dhd_idx2net(pub, ifidx);
7283 if (!net) {
7284 bcmerror = BCME_BADARG;
7285 /*
7286 * The netdev pointer is bad means the DHD can't communicate
7287 * to higher layers, so just return from here
7288 */
7289 return bcmerror;
7290 }
7291
7292 /* check for local dhd ioctl and handle it */
7293 if (ioc->driver == DHD_IOCTL_MAGIC) {
7294 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7295 if (data_buf)
7296 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
7297 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
7298 if (bcmerror)
7299 pub->bcmerror = bcmerror;
7300 goto done;
7301 }
7302
7303 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7304 if (data_buf)
7305 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
7306
7307 /* send to dongle (must be up, and wl). */
7308 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
7309 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
7310 int ret;
7311 if (atomic_read(&exit_in_progress)) {
7312 DHD_ERROR(("%s module exit in progress\n", __func__));
7313 bcmerror = BCME_DONGLE_DOWN;
7314 goto done;
7315 }
7316 ret = dhd_bus_start(pub);
7317 if (ret != 0) {
7318 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7319 bcmerror = BCME_DONGLE_DOWN;
7320 goto done;
7321 }
7322 } else {
7323 bcmerror = BCME_DONGLE_DOWN;
7324 goto done;
7325 }
7326 }
7327
7328 if (!pub->iswl) {
7329 bcmerror = BCME_DONGLE_DOWN;
7330 goto done;
7331 }
7332
7333 /*
7334 * Flush the TX queue if required for proper message serialization:
7335 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7336 * prevent M4 encryption and
7337 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7338 * prevent disassoc frame being sent before WPS-DONE frame.
7339 */
7340 if (ioc->cmd == WLC_SET_KEY ||
7341 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7342 strncmp("wsec_key", data_buf, 9) == 0) ||
7343 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7344 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
7345 ioc->cmd == WLC_DISASSOC)
7346 dhd_wait_pend8021x(net);
7347
7348 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
7349 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
7350 bcmerror = BCME_UNSUPPORTED;
7351 goto done;
7352 }
7353 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
7354
7355 #ifdef WL_MONITOR
7356 /* Intercept monitor ioctl here, add/del monitor if */
7357 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
7358 int val = 0;
7359 if (data_buf != NULL && buflen != 0) {
7360 if (buflen >= 4) {
7361 val = *(int*)data_buf;
7362 } else if (buflen >= 2) {
7363 val = *(short*)data_buf;
7364 } else {
7365 val = *(char*)data_buf;
7366 }
7367 }
7368 dhd_set_monitor(pub, ifidx, val);
7369 }
7370 #endif /* WL_MONITOR */
7371
7372 done:
7373 dhd_check_hang(net, pub, bcmerror);
7374
7375 return bcmerror;
7376 }
7377
7378 /**
7379 * Called by the OS (optionally via a wrapper function).
7380 * @param net Linux per dongle instance
7381 * @param ifr Linux request structure
7382 * @param cmd e.g. SIOCETHTOOL
7383 */
7384 static int
7385 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
7386 {
7387 dhd_info_t *dhd = DHD_DEV_INFO(net);
7388 dhd_ioctl_t ioc;
7389 int bcmerror = 0;
7390 int ifidx;
7391 int ret;
7392 void *local_buf = NULL; /**< buffer in kernel space */
7393 void __user *ioc_buf_user = NULL; /**< buffer in user space */
7394 u16 buflen = 0;
7395
7396 if (atomic_read(&exit_in_progress)) {
7397 DHD_ERROR(("%s module exit in progress\n", __func__));
7398 bcmerror = BCME_DONGLE_DOWN;
7399 return OSL_ERROR(bcmerror);
7400 }
7401
7402 DHD_OS_WAKE_LOCK(&dhd->pub);
7403 DHD_PERIM_LOCK(&dhd->pub);
7404
7405 /* Interface up check for built-in type */
7406 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
7407 DHD_TRACE(("%s: Interface is down \n", __FUNCTION__));
7408 DHD_PERIM_UNLOCK(&dhd->pub);
7409 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7410 return OSL_ERROR(BCME_NOTUP);
7411 }
7412
7413 ifidx = dhd_net2idx(dhd, net);
7414 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
7415
7416 #if defined(WL_STATIC_IF)
7417 /* skip for static ndev when it is down */
7418 if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
7419 DHD_PERIM_UNLOCK(&dhd->pub);
7420 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7421 return -1;
7422 }
7423 #endif /* WL_STATIC_iF */
7424
7425 if (ifidx == DHD_BAD_IF) {
7426 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
7427 DHD_PERIM_UNLOCK(&dhd->pub);
7428 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7429 return -1;
7430 }
7431
7432 #if defined(WL_WIRELESS_EXT)
7433 /* linux wireless extensions */
7434 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
7435 /* may recurse, do NOT lock */
7436 ret = wl_iw_ioctl(net, ifr, cmd);
7437 DHD_PERIM_UNLOCK(&dhd->pub);
7438 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7439 return ret;
7440 }
7441 #endif /* defined(WL_WIRELESS_EXT) */
7442
7443 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7444 if (cmd == SIOCETHTOOL) {
7445 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
7446 DHD_PERIM_UNLOCK(&dhd->pub);
7447 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7448 return ret;
7449 }
7450 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7451
7452 if (cmd == SIOCDEVPRIVATE+1) {
7453 ret = wl_android_priv_cmd(net, ifr);
7454 dhd_check_hang(net, &dhd->pub, ret);
7455 DHD_PERIM_UNLOCK(&dhd->pub);
7456 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7457 return ret;
7458 }
7459
7460 if (cmd != SIOCDEVPRIVATE) {
7461 DHD_PERIM_UNLOCK(&dhd->pub);
7462 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7463 return -EOPNOTSUPP;
7464 }
7465
7466 memset(&ioc, 0, sizeof(ioc));
7467
7468 #ifdef CONFIG_COMPAT
7469 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7470 if (in_compat_syscall()) {
7471 #else
7472 if (is_compat_task()) {
7473 #endif /* LINUX_VER >= 4.6 */
7474 compat_wl_ioctl_t compat_ioc;
7475 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
7476 bcmerror = BCME_BADADDR;
7477 goto done;
7478 }
7479 ioc.cmd = compat_ioc.cmd;
7480 ioc.buf = compat_ptr(compat_ioc.buf);
7481 ioc.len = compat_ioc.len;
7482 ioc.set = compat_ioc.set;
7483 ioc.used = compat_ioc.used;
7484 ioc.needed = compat_ioc.needed;
7485 /* To differentiate between wl and dhd read 4 more byes */
7486 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
7487 sizeof(uint)) != 0)) {
7488 bcmerror = BCME_BADADDR;
7489 goto done;
7490 }
7491 } else
7492 #endif /* CONFIG_COMPAT */
7493 {
7494 /* Copy the ioc control structure part of ioctl request */
7495 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7496 bcmerror = BCME_BADADDR;
7497 goto done;
7498 }
7499
7500 /* To differentiate between wl and dhd read 4 more byes */
7501 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7502 sizeof(uint)) != 0)) {
7503 bcmerror = BCME_BADADDR;
7504 goto done;
7505 }
7506 }
7507
7508 if (!capable(CAP_NET_ADMIN)) {
7509 bcmerror = BCME_EPERM;
7510 goto done;
7511 }
7512
7513 /* Take backup of ioc.buf and restore later */
7514 ioc_buf_user = ioc.buf;
7515
7516 if (ioc.len > 0) {
7517 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
7518 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
7519 bcmerror = BCME_NOMEM;
7520 goto done;
7521 }
7522
7523 DHD_PERIM_UNLOCK(&dhd->pub);
7524 if (copy_from_user(local_buf, ioc.buf, buflen)) {
7525 DHD_PERIM_LOCK(&dhd->pub);
7526 bcmerror = BCME_BADADDR;
7527 goto done;
7528 }
7529 DHD_PERIM_LOCK(&dhd->pub);
7530
7531 *((char *)local_buf + buflen) = '\0';
7532
7533 /* For some platforms accessing userspace memory
7534 * of ioc.buf is causing kernel panic, so to avoid that
7535 * make ioc.buf pointing to kernel space memory local_buf
7536 */
7537 ioc.buf = local_buf;
7538 }
7539
7540 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7541 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
7542 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
7543 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
7544 bcmerror = BCME_DONGLE_DOWN;
7545 goto done;
7546 }
7547
7548 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
7549
7550 /* Restore back userspace pointer to ioc.buf */
7551 ioc.buf = ioc_buf_user;
7552
7553 if (!bcmerror && buflen && local_buf && ioc.buf) {
7554 DHD_PERIM_UNLOCK(&dhd->pub);
7555 if (copy_to_user(ioc.buf, local_buf, buflen))
7556 bcmerror = -EFAULT;
7557 DHD_PERIM_LOCK(&dhd->pub);
7558 }
7559
7560 done:
7561 if (local_buf)
7562 MFREE(dhd->pub.osh, local_buf, buflen+1);
7563
7564 DHD_PERIM_UNLOCK(&dhd->pub);
7565 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7566
7567 return OSL_ERROR(bcmerror);
7568 }
7569
7570 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
7571 /* Flags to indicate if we distingish power off policy when
7572 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
7573 */
7574 int trigger_deep_sleep = 0;
7575 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
7576
7577 #ifdef FIX_CPU_MIN_CLOCK
7578 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
7579 {
7580 if (dhd) {
7581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7582 mutex_init(&dhd->cpufreq_fix);
7583 #endif // endif
7584 dhd->cpufreq_fix_status = FALSE;
7585 }
7586 return 0;
7587 }
7588
7589 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
7590 {
7591 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7592 mutex_lock(&dhd->cpufreq_fix);
7593 #endif // endif
7594 if (dhd && !dhd->cpufreq_fix_status) {
7595 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
7596 #ifdef FIX_BUS_MIN_CLOCK
7597 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
7598 #endif /* FIX_BUS_MIN_CLOCK */
7599 DHD_ERROR(("pm_qos_add_requests called\n"));
7600
7601 dhd->cpufreq_fix_status = TRUE;
7602 }
7603 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7604 mutex_unlock(&dhd->cpufreq_fix);
7605 #endif // endif
7606 }
7607
7608 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
7609 {
7610 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7611 mutex_lock(&dhd ->cpufreq_fix);
7612 #endif // endif
7613 if (dhd && dhd->cpufreq_fix_status != TRUE) {
7614 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7615 mutex_unlock(&dhd->cpufreq_fix);
7616 #endif // endif
7617 return;
7618 }
7619
7620 pm_qos_remove_request(&dhd->dhd_cpu_qos);
7621 #ifdef FIX_BUS_MIN_CLOCK
7622 pm_qos_remove_request(&dhd->dhd_bus_qos);
7623 #endif /* FIX_BUS_MIN_CLOCK */
7624 DHD_ERROR(("pm_qos_add_requests called\n"));
7625
7626 dhd->cpufreq_fix_status = FALSE;
7627 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7628 mutex_unlock(&dhd->cpufreq_fix);
7629 #endif // endif
7630 }
7631 #endif /* FIX_CPU_MIN_CLOCK */
7632
7633 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7634 static int
7635 dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
7636 {
7637 int error;
7638 dhd_info_t *dhd = DHD_DEV_INFO(net);
7639
7640 if (atomic_read(&dhd->pub.block_bus))
7641 return -EHOSTDOWN;
7642
7643 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
7644 return BCME_ERROR;
7645
7646 error = dhd_ioctl_entry(net, ifr, cmd);
7647
7648 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7649 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7650
7651 return error;
7652 }
7653 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7654
7655 static int
7656 dhd_stop(struct net_device *net)
7657 {
7658 int ifidx = 0;
7659 bool skip_reset = false;
7660 #if defined(WL_CFG80211)
7661 unsigned long flags = 0;
7662 #ifdef WL_STATIC_IF
7663 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
7664 #endif /* WL_STATIC_IF */
7665 #endif /* WL_CFG80211 */
7666 dhd_info_t *dhd = DHD_DEV_INFO(net);
7667 DHD_OS_WAKE_LOCK(&dhd->pub);
7668 DHD_PERIM_LOCK(&dhd->pub);
7669 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
7670 dhd->pub.rxcnt_timeout = 0;
7671 dhd->pub.txcnt_timeout = 0;
7672
7673 #ifdef BCMPCIE
7674 dhd->pub.d3ackcnt_timeout = 0;
7675 #endif /* BCMPCIE */
7676
7677 mutex_lock(&dhd->pub.ndev_op_sync);
7678
7679 if (dhd->pub.up == 0) {
7680 goto exit;
7681 }
7682 #if defined(DHD_HANG_SEND_UP_TEST)
7683 if (dhd->pub.req_hang_type) {
7684 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
7685 __FUNCTION__, dhd->pub.req_hang_type));
7686 dhd->pub.req_hang_type = 0;
7687 }
7688 #endif /* DHD_HANG_SEND_UP_TEST */
7689
7690 dhd_if_flush_sta(DHD_DEV_IFP(net));
7691
7692 #ifdef FIX_CPU_MIN_CLOCK
7693 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
7694 dhd_rollback_cpu_freq(dhd);
7695 #endif /* FIX_CPU_MIN_CLOCK */
7696
7697 ifidx = dhd_net2idx(dhd, net);
7698 BCM_REFERENCE(ifidx);
7699
7700 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7701 /* If static if is operational, don't reset the chip */
7702 if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
7703 DHD_INFO(("[STATIC_IF] static if operational. Avoiding chip reset!\n"));
7704 wl_cfg80211_sta_ifdown(net);
7705 skip_reset = true;
7706 goto exit;
7707 }
7708 #endif /* WL_STATIC_IF && WL_CFG80211 */
7709 #ifdef WL_CFG80211
7710
7711 /* Disable Runtime PM before interface down */
7712 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7713
7714 spin_lock_irqsave(&dhd->pub.up_lock, flags);
7715 dhd->pub.up = 0;
7716 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
7717 #else
7718 dhd->pub.up = 0;
7719 #endif /* WL_CFG80211 */
7720
7721 #ifdef WL_CFG80211
7722 if (ifidx == 0) {
7723 dhd_if_t *ifp;
7724 wl_cfg80211_down(net);
7725
7726 ifp = dhd->iflist[0];
7727 /*
7728 * For CFG80211: Clean up all the left over virtual interfaces
7729 * when the primary Interface is brought down. [ifconfig wlan0 down]
7730 */
7731 if (!dhd_download_fw_on_driverload) {
7732 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
7733 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7734 int i;
7735 #ifdef WL_CFG80211_P2P_DEV_IF
7736 wl_cfg80211_del_p2p_wdev(net);
7737 #endif /* WL_CFG80211_P2P_DEV_IF */
7738
7739 dhd_net_if_lock_local(dhd);
7740 for (i = 1; i < DHD_MAX_IFS; i++)
7741 dhd_remove_if(&dhd->pub, i, FALSE);
7742
7743 if (ifp && ifp->net) {
7744 dhd_if_del_sta_list(ifp);
7745 }
7746 #ifdef ARP_OFFLOAD_SUPPORT
7747 if (dhd_inetaddr_notifier_registered) {
7748 dhd_inetaddr_notifier_registered = FALSE;
7749 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7750 }
7751 #endif /* ARP_OFFLOAD_SUPPORT */
7752 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7753 if (dhd_inet6addr_notifier_registered) {
7754 dhd_inet6addr_notifier_registered = FALSE;
7755 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7756 }
7757 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7758 dhd_net_if_unlock_local(dhd);
7759 }
7760 cancel_work_sync(dhd->dhd_deferred_wq);
7761
7762 #ifdef SHOW_LOGTRACE
7763 /* Wait till event_log_dispatcher_work finishes */
7764 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
7765 #endif /* SHOW_LOGTRACE */
7766
7767 #if defined(DHD_LB_RXP)
7768 __skb_queue_purge(&dhd->rx_pend_queue);
7769 #endif /* DHD_LB_RXP */
7770
7771 #if defined(DHD_LB_TXP)
7772 skb_queue_purge(&dhd->tx_pend_queue);
7773 #endif /* DHD_LB_TXP */
7774 }
7775
7776 #if (defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)) || \
7777 defined(ARGOS_NOTIFY_CB)
7778 argos_register_notifier_deinit();
7779 #endif /* (ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER) || ARGOS_NOTIFY_CB */
7780 #ifdef DHDTCPACK_SUPPRESS
7781 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7782 #endif /* DHDTCPACK_SUPPRESS */
7783 #if defined(DHD_LB_RXP)
7784 if (ifp && ifp->net == dhd->rx_napi_netdev) {
7785 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
7786 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7787 skb_queue_purge(&dhd->rx_napi_queue);
7788 napi_disable(&dhd->rx_napi_struct);
7789 netif_napi_del(&dhd->rx_napi_struct);
7790 dhd->rx_napi_netdev = NULL;
7791 }
7792 #endif /* DHD_LB_RXP */
7793 }
7794 #endif /* WL_CFG80211 */
7795
7796 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
7797
7798 #ifdef PROP_TXSTATUS
7799 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
7800 #endif // endif
7801 #ifdef SHOW_LOGTRACE
7802 if (!dhd_download_fw_on_driverload) {
7803 /* Release the skbs from queue for WLC_E_TRACE event */
7804 dhd_event_logtrace_flush_queue(&dhd->pub);
7805 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
7806 if (dhd->event_data.fmts) {
7807 MFREE(dhd->pub.osh, dhd->event_data.fmts,
7808 dhd->event_data.fmts_size);
7809 dhd->event_data.fmts = NULL;
7810 }
7811 if (dhd->event_data.raw_fmts) {
7812 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
7813 dhd->event_data.raw_fmts_size);
7814 dhd->event_data.raw_fmts = NULL;
7815 }
7816 if (dhd->event_data.raw_sstr) {
7817 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
7818 dhd->event_data.raw_sstr_size);
7819 dhd->event_data.raw_sstr = NULL;
7820 }
7821 if (dhd->event_data.rom_raw_sstr) {
7822 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
7823 dhd->event_data.rom_raw_sstr_size);
7824 dhd->event_data.rom_raw_sstr = NULL;
7825 }
7826 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
7827 }
7828 }
7829 #endif /* SHOW_LOGTRACE */
7830 #ifdef APF
7831 dhd_dev_apf_delete_filter(net);
7832 #endif /* APF */
7833
7834 /* Stop the protocol module */
7835 dhd_prot_stop(&dhd->pub);
7836
7837 OLD_MOD_DEC_USE_COUNT;
7838 exit:
7839 if (skip_reset == false) {
7840 #if defined(WL_CFG80211)
7841 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
7842 #if defined(BT_OVER_SDIO)
7843 dhd_bus_put(&dhd->pub, WLAN_MODULE);
7844 wl_android_set_wifi_on_flag(FALSE);
7845 #else
7846 wl_android_wifi_off(net, TRUE);
7847 #endif /* BT_OVER_SDIO */
7848 }
7849 #ifdef SUPPORT_DEEP_SLEEP
7850 else {
7851 /* CSP#505233: Flags to indicate if we distingish
7852 * power off policy when user set the memu
7853 * "Keep Wi-Fi on during sleep" to "Never"
7854 */
7855 if (trigger_deep_sleep) {
7856 dhd_deepsleep(net, 1);
7857 trigger_deep_sleep = 0;
7858 }
7859 }
7860 #endif /* SUPPORT_DEEP_SLEEP */
7861 #endif // endif
7862 dhd->pub.hang_was_sent = 0;
7863
7864 /* Clear country spec for for built-in type driver */
7865 if (!dhd_download_fw_on_driverload) {
7866 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
7867 dhd->pub.dhd_cspec.rev = 0;
7868 dhd->pub.dhd_cspec.ccode[0] = 0x00;
7869 }
7870
7871 #ifdef BCMDBGFS
7872 dhd_dbgfs_remove();
7873 #endif // endif
7874 }
7875
7876 DHD_PERIM_UNLOCK(&dhd->pub);
7877 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7878
7879 /* Destroy wakelock */
7880 if (!dhd_download_fw_on_driverload &&
7881 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
7882 (skip_reset == false)) {
7883 DHD_OS_WAKE_LOCK_DESTROY(dhd);
7884 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
7885 }
7886
7887 mutex_unlock(&dhd->pub.ndev_op_sync);
7888 return 0;
7889 }
7890
7891 #if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
7892 defined(USE_INITIAL_SHORT_DWELL_TIME))
7893 extern bool g_first_broadcast_scan;
7894 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
7895
7896 #ifdef WL11U
7897 static int dhd_interworking_enable(dhd_pub_t *dhd)
7898 {
7899 uint32 enable = true;
7900 int ret = BCME_OK;
7901
7902 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
7903 if (ret < 0) {
7904 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
7905 }
7906
7907 return ret;
7908 }
7909 #endif /* WL11u */
7910
7911 static int
7912 dhd_open(struct net_device *net)
7913 {
7914 dhd_info_t *dhd = DHD_DEV_INFO(net);
7915 #ifdef TOE
7916 uint32 toe_ol;
7917 #endif // endif
7918 int ifidx;
7919 int32 ret = 0;
7920
7921 #if defined(PREVENT_REOPEN_DURING_HANG)
7922 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
7923 if (dhd->pub.hang_was_sent == 1) {
7924 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
7925 /* Force to bring down WLAN interface in case dhd_stop() is not called
7926 * from the upper layer when HANG event is triggered.
7927 */
7928 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
7929 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
7930 dhd_stop(net);
7931 } else {
7932 return -1;
7933 }
7934 }
7935 #endif /* PREVENT_REOPEN_DURING_HANG */
7936
7937 mutex_lock(&dhd->pub.ndev_op_sync);
7938
7939 if (dhd->pub.up == 1) {
7940 /* already up */
7941 DHD_ERROR(("Primary net_device is already up \n"));
7942 mutex_unlock(&dhd->pub.ndev_op_sync);
7943 return BCME_OK;
7944 }
7945
7946 if (!dhd_download_fw_on_driverload) {
7947 if (!dhd_driver_init_done) {
7948 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
7949 mutex_unlock(&dhd->pub.ndev_op_sync);
7950 return -1;
7951 }
7952 /* Init wakelock */
7953 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
7954 DHD_OS_WAKE_LOCK_INIT(dhd);
7955 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7956 }
7957
7958 #ifdef SHOW_LOGTRACE
7959 skb_queue_head_init(&dhd->evt_trace_queue);
7960
7961 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
7962 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
7963 if (ret == BCME_OK) {
7964 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7965 st_str_file_path, map_file_path);
7966 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7967 rom_st_str_file_path, rom_map_file_path);
7968 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
7969 }
7970 }
7971 #endif /* SHOW_LOGTRACE */
7972 }
7973
7974 #if defined(MULTIPLE_SUPPLICANT)
7975 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
7976 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
7977 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
7978 }
7979 mutex_lock(&_dhd_sdio_mutex_lock_);
7980 #endif // endif
7981 #endif /* MULTIPLE_SUPPLICANT */
7982
7983 DHD_OS_WAKE_LOCK(&dhd->pub);
7984 DHD_PERIM_LOCK(&dhd->pub);
7985 dhd->pub.dongle_trap_occured = 0;
7986 dhd->pub.hang_was_sent = 0;
7987 dhd->pub.hang_reason = 0;
7988 dhd->pub.iovar_timeout_occured = 0;
7989 #ifdef PCIE_FULL_DONGLE
7990 dhd->pub.d3ack_timeout_occured = 0;
7991 #endif /* PCIE_FULL_DONGLE */
7992 #ifdef DHD_MAP_LOGGING
7993 dhd->pub.smmu_fault_occurred = 0;
7994 #endif /* DHD_MAP_LOGGING */
7995
7996 #ifdef DHD_LOSSLESS_ROAMING
7997 dhd->pub.dequeue_prec_map = ALLPRIO;
7998 #endif // endif
7999
8000 #if !defined(WL_CFG80211)
8001 /*
8002 * Force start if ifconfig_up gets called before START command
8003 * We keep WEXT's wl_control_wl_start to provide backward compatibility
8004 * This should be removed in the future
8005 */
8006 ret = wl_control_wl_start(net);
8007 if (ret != 0) {
8008 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8009 ret = -1;
8010 goto exit;
8011 }
8012
8013 #endif // endif
8014
8015 ifidx = dhd_net2idx(dhd, net);
8016 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
8017
8018 if (ifidx < 0) {
8019 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
8020 ret = -1;
8021 goto exit;
8022 }
8023
8024 if (!dhd->iflist[ifidx]) {
8025 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
8026 ret = -1;
8027 goto exit;
8028 }
8029
8030 if (ifidx == 0) {
8031 atomic_set(&dhd->pend_8021x_cnt, 0);
8032 #if defined(WL_CFG80211)
8033 if (!dhd_download_fw_on_driverload) {
8034 DHD_ERROR(("\n%s\n", dhd_version));
8035 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8036 g_first_broadcast_scan = TRUE;
8037 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8038 #if defined(BT_OVER_SDIO)
8039 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
8040 wl_android_set_wifi_on_flag(TRUE);
8041 #else
8042 ret = wl_android_wifi_on(net);
8043 #endif /* BT_OVER_SDIO */
8044 if (ret != 0) {
8045 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8046 __FUNCTION__, ret));
8047 ret = -1;
8048 goto exit;
8049 }
8050 }
8051 #ifdef SUPPORT_DEEP_SLEEP
8052 else {
8053 /* Flags to indicate if we distingish
8054 * power off policy when user set the memu
8055 * "Keep Wi-Fi on during sleep" to "Never"
8056 */
8057 if (trigger_deep_sleep) {
8058 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
8059 g_first_broadcast_scan = TRUE;
8060 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
8061 dhd_deepsleep(net, 0);
8062 trigger_deep_sleep = 0;
8063 }
8064 }
8065 #endif /* SUPPORT_DEEP_SLEEP */
8066 #ifdef FIX_CPU_MIN_CLOCK
8067 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
8068 dhd_init_cpufreq_fix(dhd);
8069 dhd_fix_cpu_freq(dhd);
8070 }
8071 #endif /* FIX_CPU_MIN_CLOCK */
8072 #endif // endif
8073
8074 if (dhd->pub.busstate != DHD_BUS_DATA) {
8075
8076 /* try to bring up bus */
8077 DHD_PERIM_UNLOCK(&dhd->pub);
8078
8079 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8080 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
8081 ret = dhd_bus_start(&dhd->pub);
8082 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
8083 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
8084 }
8085 #else
8086 ret = dhd_bus_start(&dhd->pub);
8087 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8088
8089 DHD_PERIM_LOCK(&dhd->pub);
8090 if (ret) {
8091 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8092 ret = -1;
8093 goto exit;
8094 }
8095
8096 }
8097
8098 #ifdef BT_OVER_SDIO
8099 if (dhd->pub.is_bt_recovery_required) {
8100 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
8101 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
8102 }
8103 dhd->pub.is_bt_recovery_required = FALSE;
8104 #endif // endif
8105
8106 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8107 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
8108
8109 #ifdef TOE
8110 /* Get current TOE mode from dongle */
8111 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
8112 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
8113 } else {
8114 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
8115 }
8116 #endif /* TOE */
8117
8118 #if defined(DHD_LB_RXP)
8119 __skb_queue_head_init(&dhd->rx_pend_queue);
8120 if (dhd->rx_napi_netdev == NULL) {
8121 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
8122 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
8123 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
8124 dhd_napi_poll, dhd_napi_weight);
8125 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8126 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8127 napi_enable(&dhd->rx_napi_struct);
8128 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
8129 skb_queue_head_init(&dhd->rx_napi_queue);
8130 } /* rx_napi_netdev == NULL */
8131 #endif /* DHD_LB_RXP */
8132 #ifdef DHD_LB_IRQSET
8133 dhd_irq_set_affinity(&dhd->pub);
8134 #endif /* DHD_LB_IRQSET */
8135
8136 #if defined(DHD_LB_TXP)
8137 /* Use the variant that uses locks */
8138 skb_queue_head_init(&dhd->tx_pend_queue);
8139 #endif /* DHD_LB_TXP */
8140
8141 #if defined(WL_CFG80211)
8142 if (unlikely(wl_cfg80211_up(net))) {
8143 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
8144 ret = -1;
8145 goto exit;
8146 }
8147 if (!dhd_download_fw_on_driverload) {
8148 #ifdef ARP_OFFLOAD_SUPPORT
8149 dhd->pend_ipaddr = 0;
8150 if (!dhd_inetaddr_notifier_registered) {
8151 dhd_inetaddr_notifier_registered = TRUE;
8152 register_inetaddr_notifier(&dhd_inetaddr_notifier);
8153 }
8154 #endif /* ARP_OFFLOAD_SUPPORT */
8155 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8156 if (!dhd_inet6addr_notifier_registered) {
8157 dhd_inet6addr_notifier_registered = TRUE;
8158 register_inet6addr_notifier(&dhd_inet6addr_notifier);
8159 }
8160 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8161 }
8162
8163 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
8164 defined(ARGOS_NOTIFY_CB)
8165 argos_register_notifier_init(net);
8166 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
8167 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
8168 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
8169 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8170 #else
8171 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8172 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
8173 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
8174 #if defined(NUM_SCB_MAX_PROBE)
8175 dhd_set_scb_probe(&dhd->pub);
8176 #endif /* NUM_SCB_MAX_PROBE */
8177 #endif /* WL_CFG80211 */
8178 }
8179
8180 dhd->pub.up = 1;
8181
8182 if (wl_event_enable) {
8183 /* For wl utility to receive events */
8184 dhd->pub.wl_event_enabled = true;
8185 } else {
8186 dhd->pub.wl_event_enabled = false;
8187 }
8188
8189 if (logtrace_pkt_sendup) {
8190 /* For any deamon to recieve logtrace */
8191 dhd->pub.logtrace_pkt_sendup = true;
8192 } else {
8193 dhd->pub.logtrace_pkt_sendup = false;
8194 }
8195
8196 OLD_MOD_INC_USE_COUNT;
8197
8198 #ifdef BCMDBGFS
8199 dhd_dbgfs_init(&dhd->pub);
8200 #endif // endif
8201
8202 exit:
8203 mutex_unlock(&dhd->pub.ndev_op_sync);
8204 if (ret) {
8205 dhd_stop(net);
8206 }
8207
8208 DHD_PERIM_UNLOCK(&dhd->pub);
8209 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8210
8211 #if defined(MULTIPLE_SUPPLICANT)
8212 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8213 mutex_unlock(&_dhd_sdio_mutex_lock_);
8214 #endif // endif
8215 #endif /* MULTIPLE_SUPPLICANT */
8216
8217 return ret;
8218 }
8219
8220 /*
8221 * ndo_start handler for primary ndev
8222 */
8223 static int
8224 dhd_pri_open(struct net_device *net)
8225 {
8226 s32 ret;
8227
8228 ret = dhd_open(net);
8229 if (unlikely(ret)) {
8230 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
8231 return ret;
8232 }
8233
8234 /* Allow transmit calls */
8235 netif_start_queue(net);
8236 DHD_ERROR(("[%s] tx queue started\n", net->name));
8237 return ret;
8238 }
8239
8240 /*
8241 * ndo_stop handler for primary ndev
8242 */
8243 static int
8244 dhd_pri_stop(struct net_device *net)
8245 {
8246 s32 ret;
8247
8248 /* stop tx queue */
8249 netif_stop_queue(net);
8250 DHD_ERROR(("[%s] tx queue stopped\n", net->name));
8251
8252 ret = dhd_stop(net);
8253 if (unlikely(ret)) {
8254 DHD_ERROR(("dhd_stop failed: %d\n", ret));
8255 return ret;
8256 }
8257
8258 return ret;
8259 }
8260
8261 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
8262 /*
8263 * For static I/Fs, the firmware interface init
8264 * is done from the IFF_UP context.
8265 */
8266 static int
8267 dhd_static_if_open(struct net_device *net)
8268 {
8269 s32 ret = 0;
8270 struct bcm_cfg80211 *cfg;
8271 struct net_device *primary_netdev = NULL;
8272
8273 cfg = wl_get_cfg(net);
8274 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
8275
8276 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
8277 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
8278 ret = BCME_OK;
8279 goto done;
8280 }
8281
8282 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
8283 /* Ensure fw is initialized. If it is already initialized,
8284 * dhd_open will return success.
8285 */
8286 ret = dhd_open(primary_netdev);
8287 if (unlikely(ret)) {
8288 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
8289 goto done;
8290 }
8291
8292 ret = wl_cfg80211_static_if_open(net);
8293 if (!ret) {
8294 /* Allow transmit calls */
8295 netif_start_queue(net);
8296 }
8297 done:
8298 return ret;
8299 }
8300
8301 static int
8302 dhd_static_if_stop(struct net_device *net)
8303 {
8304 struct bcm_cfg80211 *cfg;
8305 struct net_device *primary_netdev = NULL;
8306 int ret = BCME_OK;
8307 dhd_info_t *dhd = DHD_DEV_INFO(net);
8308
8309 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
8310
8311 /* Ensure queue is disabled */
8312 netif_tx_disable(net);
8313
8314 cfg = wl_get_cfg(net);
8315 if (!IS_CFG80211_STATIC_IF(cfg, net)) {
8316 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
8317 return BCME_OK;
8318 }
8319
8320 ret = wl_cfg80211_static_if_close(net);
8321
8322 if (dhd->pub.up == 0) {
8323 /* If fw is down, return */
8324 DHD_ERROR(("fw down\n"));
8325 return BCME_OK;
8326 }
8327 /* If STA iface is not in operational, invoke dhd_close from this
8328 * context.
8329 */
8330 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
8331 if (!(primary_netdev->flags & IFF_UP)) {
8332 ret = dhd_stop(primary_netdev);
8333 } else {
8334 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
8335 }
8336
8337 return ret;
8338 }
8339 #endif /* WL_STATIC_IF && WL_CF80211 */
8340
8341 int dhd_do_driver_init(struct net_device *net)
8342 {
8343 dhd_info_t *dhd = NULL;
8344
8345 if (!net) {
8346 DHD_ERROR(("Primary Interface not initialized \n"));
8347 return -EINVAL;
8348 }
8349
8350 #ifdef MULTIPLE_SUPPLICANT
8351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(BCMSDIO)
8352 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
8353 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
8354 return 0;
8355 }
8356 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8357 #endif /* MULTIPLE_SUPPLICANT */
8358
8359 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8360 dhd = DHD_DEV_INFO(net);
8361
8362 /* If driver is already initialized, do nothing
8363 */
8364 if (dhd->pub.busstate == DHD_BUS_DATA) {
8365 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8366 return 0;
8367 }
8368
8369 if (dhd_open(net) < 0) {
8370 DHD_ERROR(("Driver Init Failed \n"));
8371 return -1;
8372 }
8373
8374 return 0;
8375 }
8376
8377 int
8378 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8379 {
8380
8381 #ifdef WL_CFG80211
8382 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8383 ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
8384 return BCME_OK;
8385 #endif // endif
8386
8387 /* handle IF event caused by wl commands, SoftAP, WEXT and
8388 * anything else. This has to be done asynchronously otherwise
8389 * DPC will be blocked (and iovars will timeout as DPC has no chance
8390 * to read the response back)
8391 */
8392 if (ifevent->ifidx > 0) {
8393 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8394 if (if_event == NULL) {
8395 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8396 MALLOCED(dhdinfo->pub.osh)));
8397 return BCME_NOMEM;
8398 }
8399
8400 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8401 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8402 strncpy(if_event->name, name, IFNAMSIZ);
8403 if_event->name[IFNAMSIZ - 1] = '\0';
8404 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
8405 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8406 }
8407
8408 return BCME_OK;
8409 }
8410
8411 int
8412 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8413 {
8414 dhd_if_event_t *if_event;
8415
8416 #ifdef WL_CFG80211
8417 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8418 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8419 return BCME_OK;
8420 #endif /* WL_CFG80211 */
8421
8422 /* handle IF event caused by wl commands, SoftAP, WEXT and
8423 * anything else
8424 */
8425 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8426 if (if_event == NULL) {
8427 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8428 MALLOCED(dhdinfo->pub.osh)));
8429 return BCME_NOMEM;
8430 }
8431 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8432 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8433 strncpy(if_event->name, name, IFNAMSIZ);
8434 if_event->name[IFNAMSIZ - 1] = '\0';
8435 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
8436 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8437
8438 return BCME_OK;
8439 }
8440
8441 int
8442 dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8443 {
8444 #ifdef WL_CFG80211
8445 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8446 ifevent->ifidx, name, mac, ifevent->bssidx);
8447 #endif /* WL_CFG80211 */
8448 return BCME_OK;
8449 }
8450
8451 #ifdef WL_NATOE
8452 /* Handler to update natoe info and bind with new subscriptions if there is change in config */
8453 static void
8454 dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
8455 {
8456 dhd_info_t *dhd = handle;
8457 wl_event_data_natoe_t *natoe = event_info;
8458 dhd_nfct_info_t *nfct = dhd->pub.nfct;
8459
8460 if (event != DHD_WQ_WORK_NATOE_EVENT) {
8461 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8462 return;
8463 }
8464
8465 if (!dhd) {
8466 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8467 return;
8468 }
8469 if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
8470 (natoe->start_port < natoe->end_port)) {
8471 /* Rebind subscriptions to start receiving notifications from groups */
8472 if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
8473 dhd_ct_close(nfct);
8474 }
8475 dhd_ct_send_dump_req(nfct);
8476 } else if (!natoe->natoe_active) {
8477 /* Rebind subscriptions to stop receiving notifications from groups */
8478 if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
8479 dhd_ct_close(nfct);
8480 }
8481 }
8482 }
8483
8484 /* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
8485 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
8486 */
8487 int
8488 dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
8489 {
8490 wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
8491
8492 if (dhd->nfct) {
8493 wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
8494 uint8 prev_enable = natoe->natoe_active;
8495
8496 spin_lock_bh(&dhd->nfct_lock);
8497 memcpy(natoe, event_data, sizeof(*event_data));
8498 spin_unlock_bh(&dhd->nfct_lock);
8499
8500 if (prev_enable != event_data->natoe_active) {
8501 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
8502 (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
8503 dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
8504 }
8505 return BCME_OK;
8506 }
8507 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
8508 return BCME_ERROR;
8509 }
8510
8511 /* Handler to send natoe ioctl to dongle */
8512 static void
8513 dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
8514 {
8515 dhd_info_t *dhd = handle;
8516 dhd_ct_ioc_t *ct_ioc = event_info;
8517
8518 if (event != DHD_WQ_WORK_NATOE_IOCTL) {
8519 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
8520 return;
8521 }
8522
8523 if (!dhd) {
8524 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
8525 return;
8526 }
8527
8528 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
8529 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
8530 }
8531 }
8532
8533 /* When Netlink message contains port collision info, the info must be sent to dongle FW
8534 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
8535 */
8536 void
8537 dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
8538 {
8539
8540 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
8541 DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
8542 DHD_WQ_WORK_PRIORITY_HIGH);
8543 }
8544 #endif /* WL_NATOE */
8545
8546 /* This API maps ndev to ifp inclusive of static IFs */
8547 static dhd_if_t *
8548 dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
8549 {
8550 dhd_if_t *ifp = NULL;
8551 #ifdef WL_STATIC_IF
8552 u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
8553 #else
8554 u32 ifidx = (DHD_MAX_IFS - 1);
8555 #endif /* WL_STATIC_IF */
8556
8557 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
8558 do {
8559 ifp = dhdinfo->iflist[ifidx];
8560 if (ifp && (ifp->net == ndev)) {
8561 DHD_TRACE(("match found for %s. ifidx:%d\n",
8562 ndev->name, ifidx));
8563 return ifp;
8564 }
8565 } while (ifidx--);
8566
8567 DHD_ERROR(("no entry found for %s\n", ndev->name));
8568 return NULL;
8569 }
8570
8571 bool
8572 dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
8573 {
8574 dhd_if_t *ifp = NULL;
8575
8576 if (!dhdp || !ndev) {
8577 DHD_ERROR(("wrong input\n"));
8578 ASSERT(0);
8579 return false;
8580 }
8581
8582 ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
8583 return (ifp && (ifp->static_if == true));
8584 }
8585
8586 #ifdef WL_STATIC_IF
8587 /* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
8588 * are not known. For e.g: static i/f case. This function lets to update it once
8589 * it is known.
8590 */
8591 s32
8592 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
8593 uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
8594 {
8595 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
8596 dhd_if_t *ifp, *ifp_new;
8597 s32 cur_idx;
8598 dhd_dev_priv_t * dev_priv;
8599
8600 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
8601 if_state, ifidx));
8602
8603 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
8604
8605 if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
8606 return -ENODEV;
8607 }
8608 cur_idx = ifp->idx;
8609
8610 if (if_state == NDEV_STATE_OS_IF_CREATED) {
8611 /* mark static if */
8612 ifp->static_if = TRUE;
8613 return BCME_OK;
8614 }
8615
8616 ifp_new = dhdinfo->iflist[ifidx];
8617 if (ifp_new && (ifp_new != ifp)) {
8618 /* There should be only one entry for a given ifidx. */
8619 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
8620 ASSERT(0);
8621 dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
8622 net_os_send_hang_message(ifp->net);
8623 return -EINVAL;
8624 }
8625
8626 /* For static if delete case, cleanup the if before ifidx update */
8627 if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
8628 (if_state == NDEV_STATE_FW_IF_FAILED)) {
8629 dhd_cleanup_if(ifp->net);
8630 dev_priv = DHD_DEV_PRIV(ndev);
8631 dev_priv->ifidx = ifidx;
8632 }
8633
8634 /* update the iflist ifidx slot with cached info */
8635 dhdinfo->iflist[ifidx] = ifp;
8636 dhdinfo->iflist[cur_idx] = NULL;
8637
8638 /* update the values */
8639 ifp->idx = ifidx;
8640 ifp->bssidx = bssidx;
8641
8642 if (if_state == NDEV_STATE_FW_IF_CREATED) {
8643 dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
8644 /* initialize the dongle provided if name */
8645 if (dngl_name) {
8646 strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
8647 } else if (ndev->name[0] != '\0') {
8648 strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
8649 }
8650 if (mac != NULL)
8651 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8652 }
8653 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
8654 ifidx, cur_idx, if_state));
8655 return BCME_OK;
8656 }
8657 #endif /* WL_STATIC_IF */
8658
8659 /* unregister and free the existing net_device interface (if any) in iflist and
8660 * allocate a new one. the slot is reused. this function does NOT register the
8661 * new interface to linux kernel. dhd_register_if does the job
8662 */
8663 struct net_device*
8664 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
8665 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
8666 {
8667 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8668 dhd_if_t *ifp;
8669
8670 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
8671
8672 ifp = dhdinfo->iflist[ifidx];
8673
8674 if (ifp != NULL) {
8675 if (ifp->net != NULL) {
8676 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8677 __FUNCTION__, ifp->net->name, ifidx));
8678
8679 if (ifidx == 0) {
8680 /* For primary ifidx (0), there shouldn't be
8681 * any netdev present already.
8682 */
8683 DHD_ERROR(("Primary ifidx populated already\n"));
8684 ASSERT(0);
8685 return NULL;
8686 }
8687
8688 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
8689
8690 /* in unregister_netdev case, the interface gets freed by net->destructor
8691 * (which is set to free_netdev)
8692 */
8693 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8694 free_netdev(ifp->net);
8695 } else {
8696 netif_stop_queue(ifp->net);
8697 if (need_rtnl_lock)
8698 unregister_netdev(ifp->net);
8699 else
8700 unregister_netdevice(ifp->net);
8701 }
8702 ifp->net = NULL;
8703 }
8704 } else {
8705 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
8706 if (ifp == NULL) {
8707 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
8708 return NULL;
8709 }
8710 }
8711
8712 memset(ifp, 0, sizeof(dhd_if_t));
8713 ifp->info = dhdinfo;
8714 ifp->idx = ifidx;
8715 ifp->bssidx = bssidx;
8716 #ifdef DHD_MCAST_REGEN
8717 ifp->mcast_regen_bss_enable = FALSE;
8718 #endif // endif
8719 /* set to TRUE rx_pkt_chainable at alloc time */
8720 ifp->rx_pkt_chainable = TRUE;
8721
8722 if (mac != NULL)
8723 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8724
8725 /* Allocate etherdev, including space for private structure */
8726 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
8727 if (ifp->net == NULL) {
8728 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
8729 goto fail;
8730 }
8731
8732 /* Setup the dhd interface's netdevice private structure. */
8733 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
8734
8735 if (name && name[0]) {
8736 strncpy(ifp->net->name, name, IFNAMSIZ);
8737 ifp->net->name[IFNAMSIZ - 1] = '\0';
8738 }
8739
8740 #ifdef WL_CFG80211
8741 if (ifidx == 0)
8742 ifp->net->destructor = free_netdev;
8743 else
8744 ifp->net->destructor = dhd_netdev_free;
8745 #else
8746 ifp->net->destructor = free_netdev;
8747 #endif /* WL_CFG80211 */
8748 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
8749 ifp->name[IFNAMSIZ - 1] = '\0';
8750 dhdinfo->iflist[ifidx] = ifp;
8751
8752 /* initialize the dongle provided if name */
8753 if (dngl_name) {
8754 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
8755 } else if (name) {
8756 strncpy(ifp->dngl_name, name, IFNAMSIZ);
8757 }
8758
8759 #ifdef PCIE_FULL_DONGLE
8760 /* Initialize STA info list */
8761 INIT_LIST_HEAD(&ifp->sta_list);
8762 DHD_IF_STA_LIST_LOCK_INIT(ifp);
8763 #endif /* PCIE_FULL_DONGLE */
8764
8765 #ifdef DHD_L2_FILTER
8766 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
8767 ifp->parp_allnode = TRUE;
8768 #endif /* DHD_L2_FILTER */
8769
8770 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8771
8772 #ifdef DHDTCPSYNC_FLOOD_BLK
8773 INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
8774 dhd_reset_tcpsync_info_by_ifp(ifp);
8775 #endif /* DHDTCPSYNC_FLOOD_BLK */
8776
8777 return ifp->net;
8778
8779 fail:
8780 if (ifp != NULL) {
8781 if (ifp->net != NULL) {
8782 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8783 if (ifp->net == dhdinfo->rx_napi_netdev) {
8784 napi_disable(&dhdinfo->rx_napi_struct);
8785 netif_napi_del(&dhdinfo->rx_napi_struct);
8786 skb_queue_purge(&dhdinfo->rx_napi_queue);
8787 dhdinfo->rx_napi_netdev = NULL;
8788 }
8789 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8790 dhd_dev_priv_clear(ifp->net);
8791 free_netdev(ifp->net);
8792 ifp->net = NULL;
8793 }
8794 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8795 ifp = NULL;
8796 }
8797
8798 dhdinfo->iflist[ifidx] = NULL;
8799 return NULL;
8800 }
8801
8802 static void
8803 dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
8804 {
8805 #ifdef PCIE_FULL_DONGLE
8806 s32 ifidx = 0;
8807 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8808 #endif /* PCIE_FULL_DONGLE */
8809
8810 if (ifp != NULL) {
8811 if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
8812 DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
8813 ASSERT(0);
8814 return;
8815 }
8816 #ifdef DHD_L2_FILTER
8817 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
8818 NULL, FALSE, dhdpub->tickcnt);
8819 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
8820 ifp->phnd_arp_table = NULL;
8821 #endif /* DHD_L2_FILTER */
8822
8823 dhd_if_del_sta_list(ifp);
8824 #ifdef PCIE_FULL_DONGLE
8825 /* Delete flowrings of virtual interface */
8826 ifidx = ifp->idx;
8827 if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
8828 dhd_flow_rings_delete(dhdp, ifidx);
8829 }
8830 #endif /* PCIE_FULL_DONGLE */
8831 }
8832 }
8833
8834 void
8835 dhd_cleanup_if(struct net_device *net)
8836 {
8837 dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
8838 dhd_pub_t *dhdp = &dhdinfo->pub;
8839 dhd_if_t *ifp;
8840
8841 if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
8842 (ifp->idx >= DHD_MAX_IFS)) {
8843 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
8844 ASSERT(0);
8845 return;
8846 }
8847
8848 dhd_cleanup_ifp(dhdp, ifp);
8849 }
8850
8851 /* unregister and free the the net_device interface associated with the indexed
8852 * slot, also free the slot memory and set the slot pointer to NULL
8853 */
8854 #define DHD_TX_COMPLETION_TIMEOUT 5000
8855 int
8856 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
8857 {
8858 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8859 dhd_if_t *ifp;
8860 unsigned long flags;
8861 long timeout;
8862
8863 ifp = dhdinfo->iflist[ifidx];
8864
8865 if (ifp != NULL) {
8866 #ifdef DHDTCPSYNC_FLOOD_BLK
8867 cancel_work_sync(&ifp->blk_tsfl_work);
8868 #endif /* DHDTCPSYNC_FLOOD_BLK */
8869 #ifdef WL_STATIC_IF
8870 /* static IF will be handled in detach */
8871 if (ifp->static_if) {
8872 DHD_TRACE(("Skip del iface for static interface\n"));
8873 return BCME_OK;
8874 }
8875 #endif /* WL_STATIC_IF */
8876 if (ifp->net != NULL) {
8877 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
8878
8879 DHD_GENERAL_LOCK(dhdpub, flags);
8880 ifp->del_in_progress = true;
8881 DHD_GENERAL_UNLOCK(dhdpub, flags);
8882
8883 /* If TX is in progress, hold the if del */
8884 if (DHD_IF_IS_TX_ACTIVE(ifp)) {
8885 DHD_INFO(("TX in progress. Wait for it to be complete."));
8886 timeout = wait_event_timeout(dhdpub->tx_completion_wait,
8887 ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
8888 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
8889 if (!timeout) {
8890 /* Tx completion timeout. Attempt proceeding ahead */
8891 DHD_ERROR(("Tx completion timed out!\n"));
8892 ASSERT(0);
8893 }
8894 } else {
8895 DHD_TRACE(("No outstanding TX!\n"));
8896 }
8897 dhdinfo->iflist[ifidx] = NULL;
8898 /* in unregister_netdev case, the interface gets freed by net->destructor
8899 * (which is set to free_netdev)
8900 */
8901 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8902 free_netdev(ifp->net);
8903 } else {
8904 netif_tx_disable(ifp->net);
8905
8906 #if defined(SET_RPS_CPUS)
8907 custom_rps_map_clear(ifp->net->_rx);
8908 #endif /* SET_RPS_CPUS */
8909 #if (defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL))
8910 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
8911 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
8912 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
8913 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
8914 if (need_rtnl_lock)
8915 unregister_netdev(ifp->net);
8916 else
8917 unregister_netdevice(ifp->net);
8918 }
8919 ifp->net = NULL;
8920 DHD_GENERAL_LOCK(dhdpub, flags);
8921 ifp->del_in_progress = false;
8922 DHD_GENERAL_UNLOCK(dhdpub, flags);
8923 }
8924 dhd_cleanup_ifp(dhdpub, ifp);
8925 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8926
8927 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8928 ifp = NULL;
8929 }
8930
8931 return BCME_OK;
8932 }
8933
8934 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
8935 static struct net_device_ops dhd_ops_pri = {
8936 .ndo_open = dhd_pri_open,
8937 .ndo_stop = dhd_pri_stop,
8938 .ndo_get_stats = dhd_get_stats,
8939 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8940 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8941 .ndo_start_xmit = dhd_start_xmit_wrapper,
8942 #else
8943 .ndo_do_ioctl = dhd_ioctl_entry,
8944 .ndo_start_xmit = dhd_start_xmit,
8945 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8946 .ndo_set_mac_address = dhd_set_mac_address,
8947 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8948 .ndo_set_rx_mode = dhd_set_multicast_list,
8949 #else
8950 .ndo_set_multicast_list = dhd_set_multicast_list,
8951 #endif // endif
8952 };
8953
8954 static struct net_device_ops dhd_ops_virt = {
8955 #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8956 .ndo_open = dhd_static_if_open,
8957 .ndo_stop = dhd_static_if_stop,
8958 #endif // endif
8959 .ndo_get_stats = dhd_get_stats,
8960 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8961 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8962 .ndo_start_xmit = dhd_start_xmit_wrapper,
8963 #else
8964 .ndo_do_ioctl = dhd_ioctl_entry,
8965 .ndo_start_xmit = dhd_start_xmit,
8966 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8967 .ndo_set_mac_address = dhd_set_mac_address,
8968 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8969 .ndo_set_rx_mode = dhd_set_multicast_list,
8970 #else
8971 .ndo_set_multicast_list = dhd_set_multicast_list,
8972 #endif // endif
8973 };
8974 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
8975
8976 int
8977 dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
8978 unsigned long buflen)
8979 {
8980 loff_t wr_posn = *posn;
8981
8982 if (!fp || !buf || buflen == 0)
8983 return -1;
8984
8985 if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
8986 return -1;
8987
8988 *posn = wr_posn;
8989 return 0;
8990 }
8991
8992 #ifdef SHOW_LOGTRACE
8993 int
8994 dhd_os_read_file(void *file, char *buf, uint32 size)
8995 {
8996 struct file *filep = (struct file *)file;
8997
8998 if (!file || !buf)
8999 return -1;
9000
9001 return vfs_read(filep, buf, size, &filep->f_pos);
9002 }
9003
9004 int
9005 dhd_os_seek_file(void *file, int64 offset)
9006 {
9007 struct file *filep = (struct file *)file;
9008 if (!file)
9009 return -1;
9010
9011 /* offset can be -ve */
9012 filep->f_pos = filep->f_pos + offset;
9013
9014 return 0;
9015 }
9016
9017 static int
9018 dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
9019 {
9020 struct file *filep = NULL;
9021 struct kstat stat;
9022 mm_segment_t fs;
9023 char *raw_fmts = NULL;
9024 int logstrs_size = 0;
9025 int error = 0;
9026
9027 fs = get_fs();
9028 set_fs(KERNEL_DS);
9029
9030 filep = filp_open(logstrs_path, O_RDONLY, 0);
9031
9032 if (IS_ERR(filep)) {
9033 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
9034 goto fail;
9035 }
9036 error = vfs_stat(logstrs_path, &stat);
9037 if (error) {
9038 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
9039 goto fail;
9040 }
9041 logstrs_size = (int) stat.size;
9042
9043 if (logstrs_size == 0) {
9044 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9045 goto fail1;
9046 }
9047
9048 raw_fmts = MALLOC(osh, logstrs_size);
9049 if (raw_fmts == NULL) {
9050 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
9051 goto fail;
9052 }
9053
9054 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
9055 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
9056 goto fail;
9057 }
9058
9059 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
9060 == BCME_OK) {
9061 filp_close(filep, NULL);
9062 set_fs(fs);
9063 return BCME_OK;
9064 }
9065
9066 fail:
9067 if (raw_fmts) {
9068 MFREE(osh, raw_fmts, logstrs_size);
9069 raw_fmts = NULL;
9070 }
9071
9072 fail1:
9073 if (!IS_ERR(filep))
9074 filp_close(filep, NULL);
9075
9076 set_fs(fs);
9077 temp->fmts = NULL;
9078 return BCME_ERROR;
9079 }
9080
9081 static int
9082 dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
9083 uint32 *rodata_end)
9084 {
9085 struct file *filep = NULL;
9086 mm_segment_t fs;
9087 int err = BCME_ERROR;
9088
9089 if (fname == NULL) {
9090 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
9091 return BCME_ERROR;
9092 }
9093
9094 fs = get_fs();
9095 set_fs(KERNEL_DS);
9096
9097 filep = filp_open(fname, O_RDONLY, 0);
9098 if (IS_ERR(filep)) {
9099 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
9100 goto fail;
9101 }
9102
9103 if ((err = dhd_parse_map_file(osh, filep, ramstart,
9104 rodata_start, rodata_end)) < 0)
9105 goto fail;
9106
9107 fail:
9108 if (!IS_ERR(filep))
9109 filp_close(filep, NULL);
9110
9111 set_fs(fs);
9112
9113 return err;
9114 }
9115
9116 static int
9117 dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
9118 {
9119 struct file *filep = NULL;
9120 mm_segment_t fs;
9121 char *raw_fmts = NULL;
9122 uint32 logstrs_size = 0;
9123 int error = 0;
9124 uint32 ramstart = 0;
9125 uint32 rodata_start = 0;
9126 uint32 rodata_end = 0;
9127 uint32 logfilebase = 0;
9128
9129 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
9130 if (error != BCME_OK) {
9131 DHD_ERROR(("readmap Error!! \n"));
9132 /* don't do event log parsing in actual case */
9133 if (strstr(str_file, ram_file_str) != NULL) {
9134 temp->raw_sstr = NULL;
9135 } else if (strstr(str_file, rom_file_str) != NULL) {
9136 temp->rom_raw_sstr = NULL;
9137 }
9138 return error;
9139 }
9140 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9141 ramstart, rodata_start, rodata_end));
9142
9143 fs = get_fs();
9144 set_fs(KERNEL_DS);
9145
9146 filep = filp_open(str_file, O_RDONLY, 0);
9147 if (IS_ERR(filep)) {
9148 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
9149 goto fail;
9150 }
9151
9152 if (TRUE) {
9153 /* Full file size is huge. Just read required part */
9154 logstrs_size = rodata_end - rodata_start;
9155 logfilebase = rodata_start - ramstart;
9156 }
9157
9158 if (logstrs_size == 0) {
9159 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9160 goto fail1;
9161 }
9162
9163 raw_fmts = MALLOC(osh, logstrs_size);
9164 if (raw_fmts == NULL) {
9165 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9166 goto fail;
9167 }
9168
9169 if (TRUE) {
9170 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
9171 if (error < 0) {
9172 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
9173 goto fail;
9174 }
9175 }
9176
9177 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
9178 if (error != logstrs_size) {
9179 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
9180 goto fail;
9181 }
9182
9183 if (strstr(str_file, ram_file_str) != NULL) {
9184 temp->raw_sstr = raw_fmts;
9185 temp->raw_sstr_size = logstrs_size;
9186 temp->rodata_start = rodata_start;
9187 temp->rodata_end = rodata_end;
9188 } else if (strstr(str_file, rom_file_str) != NULL) {
9189 temp->rom_raw_sstr = raw_fmts;
9190 temp->rom_raw_sstr_size = logstrs_size;
9191 temp->rom_rodata_start = rodata_start;
9192 temp->rom_rodata_end = rodata_end;
9193 }
9194
9195 filp_close(filep, NULL);
9196 set_fs(fs);
9197
9198 return BCME_OK;
9199
9200 fail:
9201 if (raw_fmts) {
9202 MFREE(osh, raw_fmts, logstrs_size);
9203 raw_fmts = NULL;
9204 }
9205
9206 fail1:
9207 if (!IS_ERR(filep))
9208 filp_close(filep, NULL);
9209
9210 set_fs(fs);
9211
9212 if (strstr(str_file, ram_file_str) != NULL) {
9213 temp->raw_sstr = NULL;
9214 } else if (strstr(str_file, rom_file_str) != NULL) {
9215 temp->rom_raw_sstr = NULL;
9216 }
9217
9218 return error;
9219 } /* dhd_init_static_strs_array */
9220
9221 static int
9222 dhd_trace_open_proc(struct inode *inode, struct file *file)
9223 {
9224 return single_open(file, 0, NULL);
9225 }
9226
9227 ssize_t
9228 dhd_trace_read_proc(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
9229 {
9230 trace_buf_info_t *trace_buf_info;
9231 int ret = BCME_ERROR;
9232
9233 ASSERT(g_dhd_pub);
9234 mutex_lock(&g_dhd_pub->dhd_trace_lock);
9235 trace_buf_info = (trace_buf_info_t *)MALLOC(g_dhd_pub->osh,
9236 sizeof(trace_buf_info_t));
9237 if (trace_buf_info) {
9238 dhd_get_read_buf_ptr(g_dhd_pub, trace_buf_info);
9239 if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
9240 {
9241 ret = -EFAULT;
9242 goto exit;
9243 }
9244 if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
9245 ret = BUF_NOT_AVAILABLE;
9246 else
9247 ret = trace_buf_info->size;
9248 } else
9249 DHD_ERROR(("Memory allocation Failed\n"));
9250
9251 exit:
9252 if (trace_buf_info) {
9253 MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
9254 }
9255 mutex_unlock(&g_dhd_pub->dhd_trace_lock);
9256 return ret;
9257 }
9258 #endif /* SHOW_LOGTRACE */
9259
9260 #ifdef DHD_ERPOM
9261 uint enable_erpom = 0;
9262 module_param(enable_erpom, int, 0);
9263
9264 int
9265 dhd_wlan_power_off_handler(void *handler, unsigned char reason)
9266 {
9267 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
9268 bool dongle_isolation = dhdp->dongle_isolation;
9269
9270 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
9271
9272 if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
9273 #if defined(DHD_FW_COREDUMP)
9274 /* save core dump to a file */
9275 if (dhdp->memdump_enabled) {
9276 #ifdef DHD_SSSR_DUMP
9277 if (dhdp->sssr_inited) {
9278 dhdp->info->no_wq_sssrdump = TRUE;
9279 dhd_bus_sssr_dump(dhdp);
9280 dhdp->info->no_wq_sssrdump = FALSE;
9281 }
9282 #endif /* DHD_SSSR_DUMP */
9283 dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
9284 dhd_bus_mem_dump(dhdp);
9285 }
9286 #endif /* DHD_FW_COREDUMP */
9287 }
9288
9289 /* pause data on all the interfaces */
9290 dhd_bus_stop_queue(dhdp->bus);
9291
9292 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9293 dhdp->dongle_isolation = TRUE;
9294 dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
9295 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
9296 return 0;
9297 }
9298
9299 int
9300 dhd_wlan_power_on_handler(void *handler, unsigned char reason)
9301 {
9302 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
9303 bool dongle_isolation = dhdp->dongle_isolation;
9304
9305 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
9306 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
9307 dhdp->dongle_isolation = TRUE;
9308 dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
9309 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
9310 /* resume data on all the interfaces */
9311 dhd_bus_start_queue(dhdp->bus);
9312 return 0;
9313
9314 }
9315
9316 #endif /* DHD_ERPOM */
9317 /** Called once for each hardware (dongle) instance that this DHD manages */
9318 dhd_pub_t *
9319 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
9320 {
9321 dhd_info_t *dhd = NULL;
9322 struct net_device *net = NULL;
9323 char if_name[IFNAMSIZ] = {'\0'};
9324 uint32 bus_type = -1;
9325 uint32 bus_num = -1;
9326 uint32 slot_num = -1;
9327 #ifdef SHOW_LOGTRACE
9328 int ret;
9329 #endif /* SHOW_LOGTRACE */
9330 #ifdef DHD_ERPOM
9331 pom_func_handler_t *pom_handler;
9332 #endif /* DHD_ERPOM */
9333 wifi_adapter_info_t *adapter = NULL;
9334
9335 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
9336 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9337
9338 #ifdef PCIE_FULL_DONGLE
9339 ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
9340 ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
9341 #endif /* PCIE_FULL_DONGLE */
9342
9343 /* will implement get_ids for DBUS later */
9344 #if defined(BCMSDIO)
9345 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
9346 #endif // endif
9347 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
9348
9349 /* Allocate primary dhd_info */
9350 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
9351 if (dhd == NULL) {
9352 dhd = MALLOC(osh, sizeof(dhd_info_t));
9353 if (dhd == NULL) {
9354 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
9355 goto dhd_null_flag;
9356 }
9357 }
9358 memset(dhd, 0, sizeof(dhd_info_t));
9359 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
9360
9361 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
9362
9363 dhd->pub.osh = osh;
9364 #ifdef DUMP_IOCTL_IOV_LIST
9365 dll_init(&(dhd->pub.dump_iovlist_head));
9366 #endif /* DUMP_IOCTL_IOV_LIST */
9367 dhd->adapter = adapter;
9368 #ifdef BT_OVER_SDIO
9369 dhd->pub.is_bt_recovery_required = FALSE;
9370 mutex_init(&dhd->bus_user_lock);
9371 #endif /* BT_OVER_SDIO */
9372
9373 #ifdef DHD_DEBUG
9374 dll_init(&(dhd->pub.mw_list_head));
9375 #endif /* DHD_DEBUG */
9376
9377 #ifdef GET_CUSTOM_MAC_ENABLE
9378 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
9379 #endif /* GET_CUSTOM_MAC_ENABLE */
9380 #ifdef CUSTOM_FORCE_NODFS_FLAG
9381 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
9382 dhd->pub.force_country_change = TRUE;
9383 #endif /* CUSTOM_FORCE_NODFS_FLAG */
9384 #ifdef CUSTOM_COUNTRY_CODE
9385 get_customized_country_code(dhd->adapter,
9386 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
9387 dhd->pub.dhd_cflags);
9388 #endif /* CUSTOM_COUNTRY_CODE */
9389 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
9390 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
9391 #ifdef DHD_WET
9392 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
9393 #endif /* DHD_WET */
9394 /* Initialize thread based operation and lock */
9395 sema_init(&dhd->sdsem, 1);
9396
9397 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9398 * This is indeed a hack but we have to make it work properly before we have a better
9399 * solution
9400 */
9401 dhd_update_fw_nv_path(dhd);
9402 dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
9403
9404 /* Link to info module */
9405 dhd->pub.info = dhd;
9406
9407 /* Link to bus module */
9408 dhd->pub.bus = bus;
9409 dhd->pub.hdrlen = bus_hdrlen;
9410
9411 /* Set network interface name if it was provided as module parameter */
9412 if (iface_name[0]) {
9413 int len;
9414 char ch;
9415 strncpy(if_name, iface_name, IFNAMSIZ);
9416 if_name[IFNAMSIZ - 1] = 0;
9417 len = strlen(if_name);
9418 ch = if_name[len - 1];
9419 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
9420 strncat(if_name, "%d", 2);
9421 }
9422
9423 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9424 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
9425 if (net == NULL) {
9426 goto fail;
9427 }
9428 mutex_init(&dhd->pub.ndev_op_sync);
9429 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
9430 /* Init ARGOS notifier data */
9431 argos_wifi.notifier_call = NULL;
9432 argos_p2p.notifier_call = NULL;
9433 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
9434
9435 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
9436 #ifdef DHD_L2_FILTER
9437 /* initialize the l2_filter_cnt */
9438 dhd->pub.l2_filter_cnt = 0;
9439 #endif // endif
9440 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9441 net->open = NULL;
9442 #else
9443 net->netdev_ops = NULL;
9444 #endif // endif
9445
9446 mutex_init(&dhd->dhd_iovar_mutex);
9447 sema_init(&dhd->proto_sem, 1);
9448 #ifdef DHD_ULP
9449 if (!(dhd_ulp_init(osh, &dhd->pub)))
9450 goto fail;
9451 #endif /* DHD_ULP */
9452
9453 #if defined(DHD_HANG_SEND_UP_TEST)
9454 dhd->pub.req_hang_type = 0;
9455 #endif /* DHD_HANG_SEND_UP_TEST */
9456
9457 #ifdef PROP_TXSTATUS
9458 spin_lock_init(&dhd->wlfc_spinlock);
9459
9460 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
9461 dhd->pub.plat_init = dhd_wlfc_plat_init;
9462 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
9463
9464 #ifdef DHD_WLFC_THREAD
9465 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
9466 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
9467 if (IS_ERR(dhd->pub.wlfc_thread)) {
9468 DHD_ERROR(("create wlfc thread failed\n"));
9469 goto fail;
9470 } else {
9471 wake_up_process(dhd->pub.wlfc_thread);
9472 }
9473 #endif /* DHD_WLFC_THREAD */
9474 #endif /* PROP_TXSTATUS */
9475
9476 /* Initialize other structure content */
9477 init_waitqueue_head(&dhd->ioctl_resp_wait);
9478 init_waitqueue_head(&dhd->d3ack_wait);
9479 init_waitqueue_head(&dhd->ctrl_wait);
9480 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
9481 init_waitqueue_head(&dhd->dmaxfer_wait);
9482 init_waitqueue_head(&dhd->pub.tx_completion_wait);
9483 dhd->pub.dhd_bus_busy_state = 0;
9484
9485 /* Initialize the spinlocks */
9486 spin_lock_init(&dhd->sdlock);
9487 spin_lock_init(&dhd->txqlock);
9488 spin_lock_init(&dhd->dhd_lock);
9489 spin_lock_init(&dhd->rxf_lock);
9490 #ifdef WLTDLS
9491 spin_lock_init(&dhd->pub.tdls_lock);
9492 #endif /* WLTDLS */
9493 #if defined(RXFRAME_THREAD)
9494 dhd->rxthread_enabled = TRUE;
9495 #endif /* defined(RXFRAME_THREAD) */
9496
9497 #ifdef DHDTCPACK_SUPPRESS
9498 spin_lock_init(&dhd->tcpack_lock);
9499 #endif /* DHDTCPACK_SUPPRESS */
9500
9501 /* Initialize Wakelock stuff */
9502 spin_lock_init(&dhd->wakelock_spinlock);
9503 spin_lock_init(&dhd->wakelock_evt_spinlock);
9504 DHD_OS_WAKE_LOCK_INIT(dhd);
9505 dhd->wakelock_counter = 0;
9506 /* wakelocks prevent a system from going into a low power state */
9507 #ifdef CONFIG_HAS_WAKELOCK
9508 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
9509 #endif /* CONFIG_HAS_WAKELOCK */
9510
9511 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9512 mutex_init(&dhd->dhd_net_if_mutex);
9513 mutex_init(&dhd->dhd_suspend_mutex);
9514 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
9515 mutex_init(&dhd->dhd_apf_mutex);
9516 #endif /* PKT_FILTER_SUPPORT && APF */
9517 #endif // endif
9518 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
9519
9520 /* Attach and link in the protocol */
9521 if (dhd_prot_attach(&dhd->pub) != 0) {
9522 DHD_ERROR(("dhd_prot_attach failed\n"));
9523 goto fail;
9524 }
9525 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
9526
9527 #ifdef WL_CFG80211
9528 spin_lock_init(&dhd->pub.up_lock);
9529 /* Attach and link in the cfg80211 */
9530 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
9531 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9532 goto fail;
9533 }
9534
9535 dhd_monitor_init(&dhd->pub);
9536 dhd_state |= DHD_ATTACH_STATE_CFG80211;
9537 #endif // endif
9538
9539 #if defined(WL_WIRELESS_EXT)
9540 /* Attach and link in the iw */
9541 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
9542 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
9543 DHD_ERROR(("wl_iw_attach failed\n"));
9544 goto fail;
9545 }
9546 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
9547 }
9548 #endif /* defined(WL_WIRELESS_EXT) */
9549
9550 #ifdef SHOW_LOGTRACE
9551 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
9552 if (ret == BCME_OK) {
9553 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
9554 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
9555 rom_map_file_path);
9556 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
9557 }
9558 #endif /* SHOW_LOGTRACE */
9559
9560 #ifdef DEBUGABILITY
9561 /* attach debug if support */
9562 if (dhd_os_dbg_attach(&dhd->pub)) {
9563 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
9564 goto fail;
9565 }
9566
9567 #if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
9568 /* enable verbose ring to support dump_trace_buf */
9569 dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
9570 #endif /* SHOW_LOGTRACE */
9571
9572 #ifdef DBG_PKT_MON
9573 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
9574 #ifdef DBG_PKT_MON_INIT_DEFAULT
9575 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
9576 #endif /* DBG_PKT_MON_INIT_DEFAULT */
9577 #endif /* DBG_PKT_MON */
9578 #endif /* DEBUGABILITY */
9579
9580 #ifdef DHD_LOG_DUMP
9581 dhd_log_dump_init(&dhd->pub);
9582 #endif /* DHD_LOG_DUMP */
9583
9584 #ifdef DHD_PKT_LOGGING
9585 dhd_os_attach_pktlog(&dhd->pub);
9586 #endif /* DHD_PKT_LOGGING */
9587 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
9588 dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
9589 if (dhd->pub.hang_info == NULL) {
9590 DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
9591 }
9592 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
9593 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
9594 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
9595 goto fail;
9596 }
9597
9598 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9599 dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
9600 if (!dhd->tx_wq) {
9601 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
9602 goto fail;
9603 }
9604 dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
9605 if (!dhd->rx_wq) {
9606 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
9607 destroy_workqueue(dhd->tx_wq);
9608 dhd->tx_wq = NULL;
9609 goto fail;
9610 }
9611 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9612
9613 /* Set up the watchdog timer */
9614 init_timer(&dhd->timer);
9615 dhd->timer.data = (ulong)dhd;
9616 dhd->timer.function = dhd_watchdog;
9617 dhd->default_wd_interval = dhd_watchdog_ms;
9618
9619 if (dhd_watchdog_prio >= 0) {
9620 /* Initialize watchdog thread */
9621 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
9622 if (dhd->thr_wdt_ctl.thr_pid < 0) {
9623 goto fail;
9624 }
9625
9626 } else {
9627 dhd->thr_wdt_ctl.thr_pid = -1;
9628 }
9629
9630 #ifdef DHD_PCIE_RUNTIMEPM
9631 /* Setup up the runtime PM Idlecount timer */
9632 init_timer(&dhd->rpm_timer);
9633 dhd->rpm_timer.data = (ulong)dhd;
9634 dhd->rpm_timer.function = dhd_runtimepm;
9635 dhd->rpm_timer_valid = FALSE;
9636
9637 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
9638 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
9639 if (dhd->thr_rpm_ctl.thr_pid < 0) {
9640 goto fail;
9641 }
9642 #endif /* DHD_PCIE_RUNTIMEPM */
9643
9644 #ifdef SHOW_LOGTRACE
9645 skb_queue_head_init(&dhd->evt_trace_queue);
9646 if (proc_create("dhd_trace", S_IRUSR, NULL, &proc_file_fops) == NULL)
9647 DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
9648 mutex_init(&dhd->pub.dhd_trace_lock);
9649 #endif /* SHOW_LOGTRACE */
9650
9651 /* Set up the bottom half handler */
9652 if (dhd_dpc_prio >= 0) {
9653 /* Initialize DPC thread */
9654 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
9655 if (dhd->thr_dpc_ctl.thr_pid < 0) {
9656 goto fail;
9657 }
9658 } else {
9659 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_DPC_TASKLET_CTL) && \
9660 !defined(DHD_LB_IRQSET)
9661 if (!zalloc_cpumask_var(&dhd->pub.default_cpu_mask, GFP_KERNEL)) {
9662 DHD_ERROR(("dpc tasklet, zalloc_cpumask_var error\n"));
9663 dhd->pub.affinity_isdpc = FALSE;
9664 } else {
9665 if (!zalloc_cpumask_var(&dhd->pub.dpc_affinity_cpu_mask, GFP_KERNEL)) {
9666 DHD_ERROR(("dpc thread, dpc_affinity_cpu_mask error\n"));
9667 free_cpumask_var(dhd->pub.default_cpu_mask);
9668 dhd->pub.affinity_isdpc = FALSE;
9669 } else {
9670 unsigned int irq = -1;
9671 #ifdef BCMPCIE
9672 if (dhdpcie_get_pcieirq(bus, &irq)) {
9673 DHD_ERROR(("%s : Can't get interrupt number\n",
9674 __FUNCTION__));
9675 goto fail;
9676 }
9677 #endif /* BCMPCIE */
9678 #ifdef BCMSDIO
9679 irq = adapter->irq_num;
9680 #endif /* BCMSDIO */
9681
9682 cpumask_copy(dhd->pub.default_cpu_mask, &hmp_slow_cpu_mask);
9683 cpumask_or(dhd->pub.dpc_affinity_cpu_mask,
9684 dhd->pub.dpc_affinity_cpu_mask,
9685 cpumask_of(TASKLET_CPUCORE));
9686
9687 set_irq_cpucore(irq, dhd->pub.default_cpu_mask,
9688 dhd->pub.dpc_affinity_cpu_mask);
9689 dhd->pub.affinity_isdpc = TRUE;
9690 }
9691 }
9692 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_DPC_TASKLET_CTL && !DHD_LB_IRQSET */
9693 /* use tasklet for dpc */
9694 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
9695 dhd->thr_dpc_ctl.thr_pid = -1;
9696 }
9697
9698 if (dhd->rxthread_enabled) {
9699 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
9700 /* Initialize RXF thread */
9701 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
9702 if (dhd->thr_rxf_ctl.thr_pid < 0) {
9703 goto fail;
9704 }
9705 }
9706
9707 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
9708
9709 #if defined(CONFIG_PM_SLEEP)
9710 if (!dhd_pm_notifier_registered) {
9711 dhd_pm_notifier_registered = TRUE;
9712 dhd->pm_notifier.notifier_call = dhd_pm_callback;
9713 dhd->pm_notifier.priority = 10;
9714 register_pm_notifier(&dhd->pm_notifier);
9715 }
9716
9717 #endif /* CONFIG_PM_SLEEP */
9718
9719 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9720 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
9721 dhd->early_suspend.suspend = dhd_early_suspend;
9722 dhd->early_suspend.resume = dhd_late_resume;
9723 register_early_suspend(&dhd->early_suspend);
9724 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
9725 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9726
9727 #ifdef ARP_OFFLOAD_SUPPORT
9728 dhd->pend_ipaddr = 0;
9729 if (!dhd_inetaddr_notifier_registered) {
9730 dhd_inetaddr_notifier_registered = TRUE;
9731 register_inetaddr_notifier(&dhd_inetaddr_notifier);
9732 }
9733 #endif /* ARP_OFFLOAD_SUPPORT */
9734
9735 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9736 if (!dhd_inet6addr_notifier_registered) {
9737 dhd_inet6addr_notifier_registered = TRUE;
9738 register_inet6addr_notifier(&dhd_inet6addr_notifier);
9739 }
9740 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9741 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
9742 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9743 INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
9744 #endif // endif
9745 #ifdef DEBUG_CPU_FREQ
9746 dhd->new_freq = alloc_percpu(int);
9747 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
9748 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9749 #endif // endif
9750 #ifdef DHDTCPACK_SUPPRESS
9751 #ifdef BCMSDIO
9752 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
9753 #elif defined(BCMPCIE)
9754 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
9755 #else
9756 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9757 #endif /* BCMSDIO */
9758 #endif /* DHDTCPACK_SUPPRESS */
9759
9760 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9761 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9762
9763 #ifdef DHD_DEBUG_PAGEALLOC
9764 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
9765 #endif /* DHD_DEBUG_PAGEALLOC */
9766
9767 #if defined(DHD_LB)
9768
9769 dhd_lb_set_default_cpus(dhd);
9770
9771 /* Initialize the CPU Masks */
9772 if (dhd_cpumasks_init(dhd) == 0) {
9773 /* Now we have the current CPU maps, run through candidacy */
9774 dhd_select_cpu_candidacy(dhd);
9775 /*
9776 * If we are able to initialize CPU masks, lets register to the
9777 * CPU Hotplug framework to change the CPU for each job dynamically
9778 * using candidacy algorithm.
9779 */
9780 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
9781 register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
9782 } else {
9783 /*
9784 * We are unable to initialize CPU masks, so candidacy algorithm
9785 * won't run, but still Load Balancing will be honoured based
9786 * on the CPUs allocated for a given job statically during init
9787 */
9788 dhd->cpu_notifier.notifier_call = NULL;
9789 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9790 __FUNCTION__));
9791 }
9792
9793 #ifdef DHD_LB_TXP
9794 #ifdef DHD_LB_TXP_DEFAULT_ENAB
9795 /* Trun ON the feature by default */
9796 atomic_set(&dhd->lb_txp_active, 1);
9797 #else
9798 /* Trun OFF the feature by default */
9799 atomic_set(&dhd->lb_txp_active, 0);
9800 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
9801 #endif /* DHD_LB_TXP */
9802
9803 DHD_LB_STATS_INIT(&dhd->pub);
9804
9805 /* Initialize the Load Balancing Tasklets and Napi object */
9806 #if defined(DHD_LB_TXC)
9807 tasklet_init(&dhd->tx_compl_tasklet,
9808 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
9809 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
9810 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
9811 #endif /* DHD_LB_TXC */
9812
9813 #if defined(DHD_LB_RXC)
9814 tasklet_init(&dhd->rx_compl_tasklet,
9815 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
9816 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
9817 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
9818 #endif /* DHD_LB_RXC */
9819
9820 #if defined(DHD_LB_RXP)
9821 __skb_queue_head_init(&dhd->rx_pend_queue);
9822 skb_queue_head_init(&dhd->rx_napi_queue);
9823 /* Initialize the work that dispatches NAPI job to a given core */
9824 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
9825 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
9826 #endif /* DHD_LB_RXP */
9827
9828 #if defined(DHD_LB_TXP)
9829 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
9830 skb_queue_head_init(&dhd->tx_pend_queue);
9831 /* Initialize the work that dispatches TX job to a given core */
9832 tasklet_init(&dhd->tx_tasklet,
9833 dhd_lb_tx_handler, (ulong)(dhd));
9834 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
9835 #endif /* DHD_LB_TXP */
9836
9837 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
9838 #endif /* DHD_LB */
9839
9840 #if defined(BCMPCIE)
9841 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
9842 if (dhd->pub.extended_trap_data == NULL) {
9843 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
9844 }
9845 #endif /* BCMPCIE && ETD */
9846
9847 #ifdef SHOW_LOGTRACE
9848 INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
9849 #endif /* SHOW_LOGTRACE */
9850
9851 DHD_INFO(("%s: sssr mempool init\n", __FUNCTION__));
9852 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
9853
9854 (void)dhd_sysfs_init(dhd);
9855
9856 #ifdef WL_NATOE
9857 /* Open Netlink socket for NF_CONNTRACK notifications */
9858 dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
9859 CT_ALL);
9860 #endif /* WL_NATOE */
9861
9862 dhd_state |= DHD_ATTACH_STATE_DONE;
9863 dhd->dhd_state = dhd_state;
9864
9865 dhd_found++;
9866
9867 g_dhd_pub = &dhd->pub;
9868
9869 #ifdef DHD_DUMP_MNGR
9870 dhd->pub.dump_file_manage =
9871 (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
9872 if (unlikely(!dhd->pub.dump_file_manage)) {
9873 DHD_ERROR(("%s(): could not allocate memory for - "
9874 "dhd_dump_file_manage_t\n", __FUNCTION__));
9875 }
9876 #endif /* DHD_DUMP_MNGR */
9877 #ifdef DHD_FW_COREDUMP
9878 /* Set memdump default values */
9879 #ifdef CUSTOMER_HW4_DEBUG
9880 dhd->pub.memdump_enabled = DUMP_DISABLED;
9881 #else
9882 dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
9883 #endif /* CUSTOMER_HW4_DEBUG */
9884 /* Check the memdump capability */
9885 dhd_get_memdump_info(&dhd->pub);
9886 #endif /* DHD_FW_COREDUMP */
9887
9888 #ifdef DHD_ERPOM
9889 if (enable_erpom) {
9890 pom_handler = &dhd->pub.pom_wlan_handler;
9891 pom_handler->func_id = WLAN_FUNC_ID;
9892 pom_handler->handler = (void *)g_dhd_pub;
9893 pom_handler->power_off = dhd_wlan_power_off_handler;
9894 pom_handler->power_on = dhd_wlan_power_on_handler;
9895
9896 dhd->pub.pom_func_register = NULL;
9897 dhd->pub.pom_func_deregister = NULL;
9898 dhd->pub.pom_toggle_reg_on = NULL;
9899
9900 dhd->pub.pom_func_register = symbol_get(pom_func_register);
9901 dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
9902 dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
9903
9904 symbol_put(pom_func_register);
9905 symbol_put(pom_func_deregister);
9906 symbol_put(pom_toggle_reg_on);
9907
9908 if (!dhd->pub.pom_func_register ||
9909 !dhd->pub.pom_func_deregister ||
9910 !dhd->pub.pom_toggle_reg_on) {
9911 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
9912 "POM is not loaded\n", __FUNCTION__));
9913 ASSERT(0);
9914 goto fail;
9915 }
9916 dhd->pub.pom_func_register(pom_handler);
9917 dhd->pub.enable_erpom = TRUE;
9918
9919 }
9920 #endif /* DHD_ERPOM */
9921 return &dhd->pub;
9922
9923 fail:
9924 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
9925 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9926 __FUNCTION__, dhd_state, &dhd->pub));
9927 dhd->dhd_state = dhd_state;
9928 dhd_detach(&dhd->pub);
9929 dhd_free(&dhd->pub);
9930 }
9931
9932 dhd_null_flag:
9933 return NULL;
9934 }
9935
9936 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9937 {
9938 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9939 return DHD_FLAG_HOSTAP_MODE;
9940 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9941 return DHD_FLAG_P2P_MODE;
9942 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9943 return DHD_FLAG_IBSS_MODE;
9944 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9945 return DHD_FLAG_MFG_MODE;
9946
9947 return DHD_FLAG_STA_MODE;
9948 }
9949
9950 int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9951 {
9952 return dhd_get_fw_mode(dhdp->info);
9953 }
9954
9955 extern char * nvram_get(const char *name);
9956 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9957 {
9958 int fw_len;
9959 int nv_len;
9960 const char *fw = NULL;
9961 const char *nv = NULL;
9962 #ifdef DHD_UCODE_DOWNLOAD
9963 int uc_len;
9964 const char *uc = NULL;
9965 #endif /* DHD_UCODE_DOWNLOAD */
9966 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9967 int fw_path_len = sizeof(dhdinfo->fw_path);
9968 int nv_path_len = sizeof(dhdinfo->nv_path);
9969
9970 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9971 * The path from adapter info is used for initialization only (as it won't change).
9972 *
9973 * The firmware_path/nvram_path module parameter may be changed by the system at run
9974 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9975 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9976 * module parameter after it is copied. We won't update the path until the module parameter
9977 * is changed again (first character is not '\0')
9978 */
9979
9980 /* set default firmware and nvram path for built-in type driver */
9981 if (!dhd_download_fw_on_driverload) {
9982 #ifdef CONFIG_BCMDHD_FW_PATH
9983 fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
9984 #endif /* CONFIG_BCMDHD_FW_PATH */
9985 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9986 nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
9987 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9988 }
9989
9990 /* check if we need to initialize the path */
9991 if (dhdinfo->fw_path[0] == '\0') {
9992 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9993 fw = adapter->fw_path;
9994 }
9995 if (dhdinfo->nv_path[0] == '\0') {
9996 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9997 nv = adapter->nv_path;
9998 }
9999
10000 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10001 *
10002 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
10003 */
10004 if (firmware_path[0] != '\0')
10005 fw = firmware_path;
10006
10007 if (nvram_path[0] != '\0')
10008 nv = nvram_path;
10009
10010 #ifdef DHD_UCODE_DOWNLOAD
10011 if (ucode_path[0] != '\0')
10012 uc = ucode_path;
10013 #endif /* DHD_UCODE_DOWNLOAD */
10014
10015 if (fw && fw[0] != '\0') {
10016 fw_len = strlen(fw);
10017 if (fw_len >= fw_path_len) {
10018 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
10019 return FALSE;
10020 }
10021 strncpy(dhdinfo->fw_path, fw, fw_path_len);
10022 if (dhdinfo->fw_path[fw_len-1] == '\n')
10023 dhdinfo->fw_path[fw_len-1] = '\0';
10024 }
10025 if (nv && nv[0] != '\0') {
10026 nv_len = strlen(nv);
10027 if (nv_len >= nv_path_len) {
10028 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
10029 return FALSE;
10030 }
10031 memset(dhdinfo->nv_path, 0, nv_path_len);
10032 strncpy(dhdinfo->nv_path, nv, nv_path_len);
10033 dhdinfo->nv_path[nv_len] = '\0';
10034 #ifdef DHD_USE_SINGLE_NVRAM_FILE
10035 /* Remove "_net" or "_mfg" tag from current nvram path */
10036 {
10037 char *nvram_tag = "nvram_";
10038 char *ext_tag = ".txt";
10039 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
10040 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
10041 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
10042 if (valid_buf) {
10043 char *sp = sp_nvram + strlen(nvram_tag) - 1;
10044 uint32 padding_size = (uint32)(dhdinfo->nv_path +
10045 nv_path_len - sp);
10046 memset(sp, 0, padding_size);
10047 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
10048 nv_len = strlen(dhdinfo->nv_path);
10049 DHD_INFO(("%s: new nvram path = %s\n",
10050 __FUNCTION__, dhdinfo->nv_path));
10051 } else if (sp_nvram) {
10052 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10053 __FUNCTION__));
10054 return FALSE;
10055 } else {
10056 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10057 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
10058 }
10059 }
10060 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
10061 if (dhdinfo->nv_path[nv_len-1] == '\n')
10062 dhdinfo->nv_path[nv_len-1] = '\0';
10063 }
10064 #ifdef DHD_UCODE_DOWNLOAD
10065 if (uc && uc[0] != '\0') {
10066 uc_len = strlen(uc);
10067 if (uc_len >= sizeof(dhdinfo->uc_path)) {
10068 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10069 return FALSE;
10070 }
10071 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
10072 if (dhdinfo->uc_path[uc_len-1] == '\n')
10073 dhdinfo->uc_path[uc_len-1] = '\0';
10074 }
10075 #endif /* DHD_UCODE_DOWNLOAD */
10076
10077 /* clear the path in module parameter */
10078 if (dhd_download_fw_on_driverload) {
10079 firmware_path[0] = '\0';
10080 nvram_path[0] = '\0';
10081 }
10082 #ifdef DHD_UCODE_DOWNLOAD
10083 ucode_path[0] = '\0';
10084 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
10085 #endif /* DHD_UCODE_DOWNLOAD */
10086
10087 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10088 if (dhdinfo->fw_path[0] == '\0') {
10089 DHD_ERROR(("firmware path not found\n"));
10090 return FALSE;
10091 }
10092 if (dhdinfo->nv_path[0] == '\0') {
10093 DHD_ERROR(("nvram path not found\n"));
10094 return FALSE;
10095 }
10096
10097 return TRUE;
10098 }
10099
10100 #if defined(BT_OVER_SDIO)
10101 extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
10102 {
10103 int fw_len;
10104 const char *fw = NULL;
10105 wifi_adapter_info_t *adapter = dhdinfo->adapter;
10106
10107 /* Update bt firmware path. The path may be from adapter info or module parameter
10108 * The path from adapter info is used for initialization only (as it won't change).
10109 *
10110 * The btfw_path module parameter may be changed by the system at run
10111 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10112 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10113 * module parameter after it is copied. We won't update the path until the module parameter
10114 * is changed again (first character is not '\0')
10115 */
10116
10117 /* set default firmware and nvram path for built-in type driver */
10118 if (!dhd_download_fw_on_driverload) {
10119 #ifdef CONFIG_BCMDHD_BTFW_PATH
10120 fw = CONFIG_BCMDHD_BTFW_PATH;
10121 #endif /* CONFIG_BCMDHD_FW_PATH */
10122 }
10123
10124 /* check if we need to initialize the path */
10125 if (dhdinfo->btfw_path[0] == '\0') {
10126 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
10127 fw = adapter->btfw_path;
10128 }
10129
10130 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10131 */
10132 if (btfw_path[0] != '\0')
10133 fw = btfw_path;
10134
10135 if (fw && fw[0] != '\0') {
10136 fw_len = strlen(fw);
10137 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
10138 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10139 return FALSE;
10140 }
10141 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
10142 if (dhdinfo->btfw_path[fw_len-1] == '\n')
10143 dhdinfo->btfw_path[fw_len-1] = '\0';
10144 }
10145
10146 /* clear the path in module parameter */
10147 btfw_path[0] = '\0';
10148
10149 if (dhdinfo->btfw_path[0] == '\0') {
10150 DHD_ERROR(("bt firmware path not found\n"));
10151 return FALSE;
10152 }
10153
10154 return TRUE;
10155 }
10156 #endif /* defined (BT_OVER_SDIO) */
10157
10158 #ifdef CUSTOMER_HW4_DEBUG
10159 bool dhd_validate_chipid(dhd_pub_t *dhdp)
10160 {
10161 uint chipid = dhd_bus_chip_id(dhdp);
10162 uint config_chipid;
10163
10164 #ifdef BCM4375_CHIP
10165 config_chipid = BCM4375_CHIP_ID;
10166 #elif defined(BCM4361_CHIP)
10167 config_chipid = BCM4361_CHIP_ID;
10168 #elif defined(BCM4359_CHIP)
10169 config_chipid = BCM4359_CHIP_ID;
10170 #elif defined(BCM4358_CHIP)
10171 config_chipid = BCM4358_CHIP_ID;
10172 #elif defined(BCM4354_CHIP)
10173 config_chipid = BCM4354_CHIP_ID;
10174 #elif defined(BCM4339_CHIP)
10175 config_chipid = BCM4339_CHIP_ID;
10176 #elif defined(BCM4335_CHIP)
10177 config_chipid = BCM4335_CHIP_ID;
10178 #elif defined(BCM43430_CHIP)
10179 config_chipid = BCM43430_CHIP_ID;
10180 #elif defined(BCM43018_CHIP)
10181 config_chipid = BCM43018_CHIP_ID;
10182 #elif defined(BCM43455_CHIP) || defined(BCM43456_CHIP)
10183 config_chipid = BCM4345_CHIP_ID;
10184 #elif defined(BCM43454_CHIP)
10185 config_chipid = BCM43454_CHIP_ID;
10186 #elif defined(BCM43012_CHIP_)
10187 config_chipid = BCM43012_CHIP_ID;
10188 #else
10189 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10190 " please add CONFIG_BCMXXXX into the Kernel and"
10191 " BCMXXXX_CHIP definition into the DHD driver\n",
10192 __FUNCTION__));
10193 config_chipid = 0;
10194
10195 return FALSE;
10196 #endif /* BCM4354_CHIP */
10197
10198 #ifdef SUPPORT_MULTIPLE_CHIP_4345X
10199 if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) {
10200 return TRUE;
10201 }
10202 #endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10203 #if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10204 if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
10205 return TRUE;
10206 }
10207 #endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
10208 #if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
10209 if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
10210 return TRUE;
10211 }
10212 #endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
10213 #if defined(BCM4359_CHIP)
10214 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
10215 return TRUE;
10216 }
10217 #endif /* BCM4359_CHIP */
10218 #if defined(BCM4361_CHIP)
10219 if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
10220 return TRUE;
10221 }
10222 #endif /* BCM4361_CHIP */
10223
10224 return config_chipid == chipid;
10225 }
10226 #endif /* CUSTOMER_HW4_DEBUG */
10227
10228 #if defined(BT_OVER_SDIO)
10229 wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
10230 {
10231 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
10232 /* assuming that dhd_pub_t type pointer is available from a global variable */
10233 return (wlan_bt_handle_t) g_dhd_pub;
10234 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
10235
10236 int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
10237 {
10238 int ret = -1;
10239 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
10240 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10241
10242 /* Download BT firmware image to the dongle */
10243 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
10244 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
10245 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
10246 if (ret < 0) {
10247 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10248 __FUNCTION__, dhd->btfw_path));
10249 return ret;
10250 }
10251 }
10252 return ret;
10253 } EXPORT_SYMBOL(dhd_download_btfw);
10254 #endif /* defined (BT_OVER_SDIO) */
10255
10256 int
10257 dhd_bus_start(dhd_pub_t *dhdp)
10258 {
10259 int ret = -1;
10260 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10261 unsigned long flags;
10262
10263 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10264 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
10265 #endif /* DHD_DEBUG && BCMSDIO */
10266 ASSERT(dhd);
10267
10268 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
10269 dhdp->dongle_trap_occured = 0;
10270 dhdp->iovar_timeout_occured = 0;
10271 #ifdef PCIE_FULL_DONGLE
10272 dhdp->d3ack_timeout_occured = 0;
10273 #endif /* PCIE_FULL_DONGLE */
10274 #ifdef DHD_MAP_LOGGING
10275 dhdp->smmu_fault_occurred = 0;
10276 #endif /* DHD_MAP_LOGGING */
10277
10278 DHD_PERIM_LOCK(dhdp);
10279 /* try to download image and nvram to the dongle */
10280 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
10281 /* Indicate FW Download has not yet done */
10282 dhd->pub.fw_download_done = FALSE;
10283 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
10284 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10285 fw_download_start = OSL_SYSUPTIME();
10286 #endif /* DHD_DEBUG && BCMSDIO */
10287 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
10288 dhd->fw_path, dhd->nv_path);
10289 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10290 fw_download_end = OSL_SYSUPTIME();
10291 #endif /* DHD_DEBUG && BCMSDIO */
10292 if (ret < 0) {
10293 DHD_ERROR(("%s: failed to download firmware %s\n",
10294 __FUNCTION__, dhd->fw_path));
10295 DHD_PERIM_UNLOCK(dhdp);
10296 return ret;
10297 }
10298 /* Indicate FW Download has succeeded */
10299 dhd->pub.fw_download_done = TRUE;
10300 }
10301 if (dhd->pub.busstate != DHD_BUS_LOAD) {
10302 DHD_PERIM_UNLOCK(dhdp);
10303 return -ENETDOWN;
10304 }
10305
10306 #ifdef BCMSDIO
10307 dhd_os_sdlock(dhdp);
10308 #endif /* BCMSDIO */
10309
10310 /* Start the watchdog timer */
10311 dhd->pub.tickcnt = 0;
10312 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
10313
10314 /* Bring up the bus */
10315 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
10316
10317 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
10318 #ifdef BCMSDIO
10319 dhd_os_sdunlock(dhdp);
10320 #endif /* BCMSDIO */
10321 DHD_PERIM_UNLOCK(dhdp);
10322 return ret;
10323 }
10324
10325 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
10326
10327 #ifdef DHD_ULP
10328 dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
10329 #endif /* DHD_ULP */
10330 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
10331 /* Host registration for OOB interrupt */
10332 if (dhd_bus_oob_intr_register(dhdp)) {
10333 /* deactivate timer and wait for the handler to finish */
10334 #if !defined(BCMPCIE_OOB_HOST_WAKE)
10335 DHD_GENERAL_LOCK(&dhd->pub, flags);
10336 dhd->wd_timer_valid = FALSE;
10337 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10338 del_timer_sync(&dhd->timer);
10339
10340 #endif /* !BCMPCIE_OOB_HOST_WAKE */
10341 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10342 DHD_PERIM_UNLOCK(dhdp);
10343 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
10344 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10345 return -ENODEV;
10346 }
10347
10348 #if defined(BCMPCIE_OOB_HOST_WAKE)
10349 dhd_bus_oob_intr_set(dhdp, TRUE);
10350 #else
10351 /* Enable oob at firmware */
10352 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10353 #endif /* BCMPCIE_OOB_HOST_WAKE */
10354 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
10355 #ifdef PCIE_FULL_DONGLE
10356 {
10357 /* max_h2d_rings includes H2D common rings */
10358 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
10359
10360 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
10361 max_h2d_rings));
10362 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
10363 #ifdef BCMSDIO
10364 dhd_os_sdunlock(dhdp);
10365 #endif /* BCMSDIO */
10366 DHD_PERIM_UNLOCK(dhdp);
10367 return ret;
10368 }
10369 }
10370 #endif /* PCIE_FULL_DONGLE */
10371
10372 /* Do protocol initialization necessary for IOCTL/IOVAR */
10373 ret = dhd_prot_init(&dhd->pub);
10374 if (unlikely(ret) != BCME_OK) {
10375 DHD_PERIM_UNLOCK(dhdp);
10376 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10377 return ret;
10378 }
10379
10380 /* If bus is not ready, can't come up */
10381 if (dhd->pub.busstate != DHD_BUS_DATA) {
10382 DHD_GENERAL_LOCK(&dhd->pub, flags);
10383 dhd->wd_timer_valid = FALSE;
10384 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10385 del_timer_sync(&dhd->timer);
10386 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
10387 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10388 #ifdef BCMSDIO
10389 dhd_os_sdunlock(dhdp);
10390 #endif /* BCMSDIO */
10391 DHD_PERIM_UNLOCK(dhdp);
10392 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10393 return -ENODEV;
10394 }
10395
10396 #ifdef BCMSDIO
10397 dhd_os_sdunlock(dhdp);
10398 #endif /* BCMSDIO */
10399
10400 /* Bus is ready, query any dongle information */
10401 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10402 f2_sync_start = OSL_SYSUPTIME();
10403 #endif /* DHD_DEBUG && BCMSDIO */
10404 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
10405 DHD_GENERAL_LOCK(&dhd->pub, flags);
10406 dhd->wd_timer_valid = FALSE;
10407 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10408 del_timer_sync(&dhd->timer);
10409 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
10410 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10411 DHD_PERIM_UNLOCK(dhdp);
10412 return ret;
10413 }
10414 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810)
10415 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
10416 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
10417 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 */
10418
10419 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10420 f2_sync_end = OSL_SYSUPTIME();
10421 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10422 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
10423 #endif /* DHD_DEBUG && BCMSDIO */
10424
10425 #ifdef ARP_OFFLOAD_SUPPORT
10426 if (dhd->pend_ipaddr) {
10427 #ifdef AOE_IP_ALIAS_SUPPORT
10428 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
10429 #endif /* AOE_IP_ALIAS_SUPPORT */
10430 dhd->pend_ipaddr = 0;
10431 }
10432 #endif /* ARP_OFFLOAD_SUPPORT */
10433
10434 DHD_PERIM_UNLOCK(dhdp);
10435
10436 return 0;
10437 }
10438 #ifdef WLTDLS
10439 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
10440 {
10441 uint32 tdls = tdls_on;
10442 int ret = 0;
10443 uint32 tdls_auto_op = 0;
10444 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
10445 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
10446 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
10447 uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
10448 uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
10449
10450 BCM_REFERENCE(mac);
10451 if (!FW_SUPPORTED(dhd, tdls))
10452 return BCME_ERROR;
10453
10454 if (dhd->tdls_enable == tdls_on)
10455 goto auto_mode;
10456 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
10457 if (ret < 0) {
10458 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
10459 goto exit;
10460 }
10461 dhd->tdls_enable = tdls_on;
10462 auto_mode:
10463
10464 tdls_auto_op = auto_on;
10465 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
10466 0, TRUE);
10467 if (ret < 0) {
10468 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
10469 goto exit;
10470 }
10471
10472 if (tdls_auto_op) {
10473 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
10474 sizeof(tdls_idle_time), NULL, 0, TRUE);
10475 if (ret < 0) {
10476 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
10477 goto exit;
10478 }
10479 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
10480 sizeof(tdls_rssi_high), NULL, 0, TRUE);
10481 if (ret < 0) {
10482 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
10483 goto exit;
10484 }
10485 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
10486 sizeof(tdls_rssi_low), NULL, 0, TRUE);
10487 if (ret < 0) {
10488 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
10489 goto exit;
10490 }
10491 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
10492 sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
10493 if (ret < 0) {
10494 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
10495 goto exit;
10496 }
10497 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
10498 sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
10499 if (ret < 0) {
10500 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
10501 goto exit;
10502 }
10503 }
10504
10505 exit:
10506 return ret;
10507 }
10508 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
10509 {
10510 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10511 int ret = 0;
10512 if (dhd)
10513 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
10514 else
10515 ret = BCME_ERROR;
10516 return ret;
10517 }
10518 int
10519 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
10520 {
10521 int ret = 0;
10522 bool auto_on = false;
10523 uint32 mode = wfd_mode;
10524
10525 #ifdef ENABLE_TDLS_AUTO_MODE
10526 if (wfd_mode) {
10527 auto_on = false;
10528 } else {
10529 auto_on = true;
10530 }
10531 #else
10532 auto_on = false;
10533 #endif /* ENABLE_TDLS_AUTO_MODE */
10534 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
10535 if (ret < 0) {
10536 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
10537 return ret;
10538 }
10539
10540 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
10541 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
10542 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
10543 return ret;
10544 }
10545
10546 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
10547 if (ret < 0) {
10548 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
10549 return ret;
10550 }
10551
10552 dhd->tdls_mode = mode;
10553 return ret;
10554 }
10555 #ifdef PCIE_FULL_DONGLE
10556 int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
10557 {
10558 dhd_pub_t *dhd_pub = dhdp;
10559 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
10560 tdls_peer_node_t *new = NULL, *prev = NULL;
10561 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
10562 uint8 *da = (uint8 *)&event->addr.octet[0];
10563 bool connect = FALSE;
10564 uint32 reason = ntoh32(event->reason);
10565 unsigned long flags;
10566
10567 if (reason == WLC_E_TDLS_PEER_CONNECTED)
10568 connect = TRUE;
10569 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
10570 connect = FALSE;
10571 else
10572 {
10573 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
10574 return BCME_ERROR;
10575 }
10576 if (ifindex == DHD_BAD_IF)
10577 return BCME_ERROR;
10578
10579 if (connect) {
10580 while (cur != NULL) {
10581 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10582 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10583 __FUNCTION__, __LINE__));
10584 return BCME_ERROR;
10585 }
10586 cur = cur->next;
10587 }
10588
10589 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
10590 if (new == NULL) {
10591 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
10592 return BCME_ERROR;
10593 }
10594 memcpy(new->addr, da, ETHER_ADDR_LEN);
10595 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10596 new->next = dhd_pub->peer_tbl.node;
10597 dhd_pub->peer_tbl.node = new;
10598 dhd_pub->peer_tbl.tdls_peer_count++;
10599 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10600
10601 } else {
10602 while (cur != NULL) {
10603 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10604 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
10605 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10606 if (prev)
10607 prev->next = cur->next;
10608 else
10609 dhd_pub->peer_tbl.node = cur->next;
10610 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
10611 dhd_pub->peer_tbl.tdls_peer_count--;
10612 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10613 return BCME_OK;
10614 }
10615 prev = cur;
10616 cur = cur->next;
10617 }
10618 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
10619 }
10620 return BCME_OK;
10621 }
10622 #endif /* PCIE_FULL_DONGLE */
10623 #endif // endif
10624
10625 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
10626 {
10627 if (!dhd)
10628 return FALSE;
10629
10630 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
10631 return TRUE;
10632 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
10633 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
10634 return TRUE;
10635 else
10636 return FALSE;
10637 }
10638 #if !defined(AP) && defined(WLP2P)
10639 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10640 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10641 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10642 * would still be named as fw_bcmdhd_apsta.
10643 */
10644 uint32
10645 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
10646 {
10647 int32 ret = 0;
10648 char buf[WLC_IOCTL_SMLEN];
10649 bool mchan_supported = FALSE;
10650 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10651 * test mode, that means we only will use the mode as it is
10652 */
10653 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
10654 return 0;
10655 if (FW_SUPPORTED(dhd, vsdb)) {
10656 mchan_supported = TRUE;
10657 }
10658 if (!FW_SUPPORTED(dhd, p2p)) {
10659 DHD_TRACE(("Chip does not support p2p\n"));
10660 return 0;
10661 } else {
10662 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10663 memset(buf, 0, sizeof(buf));
10664 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
10665 sizeof(buf), FALSE);
10666 if (ret < 0) {
10667 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
10668 return 0;
10669 } else {
10670 if (buf[0] == 1) {
10671 /* By default, chip supports single chan concurrency,
10672 * now lets check for mchan
10673 */
10674 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
10675 if (mchan_supported)
10676 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
10677 if (FW_SUPPORTED(dhd, rsdb)) {
10678 ret |= DHD_FLAG_RSDB_MODE;
10679 }
10680 #ifdef WL_SUPPORT_MULTIP2P
10681 if (FW_SUPPORTED(dhd, mp2p)) {
10682 ret |= DHD_FLAG_MP2P_MODE;
10683 }
10684 #endif /* WL_SUPPORT_MULTIP2P */
10685 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10686 return ret;
10687 #else
10688 return 0;
10689 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10690 }
10691 }
10692 }
10693 return 0;
10694 }
10695 #endif // endif
10696
10697 #ifdef WLAIBSS
10698 int
10699 dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
10700 {
10701 int ret = BCME_OK;
10702 aibss_bcn_force_config_t bcn_config;
10703 uint32 aibss;
10704 #ifdef WLAIBSS_PS
10705 uint32 aibss_ps;
10706 s32 atim;
10707 #endif /* WLAIBSS_PS */
10708 int ibss_coalesce;
10709
10710 aibss = 1;
10711 ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
10712 if (ret < 0) {
10713 if (ret == BCME_UNSUPPORTED) {
10714 DHD_ERROR(("%s aibss is not supported\n",
10715 __FUNCTION__));
10716 return BCME_OK;
10717 } else {
10718 DHD_ERROR(("%s Set aibss to %d failed %d\n",
10719 __FUNCTION__, aibss, ret));
10720 return ret;
10721 }
10722 }
10723
10724 #ifdef WLAIBSS_PS
10725 aibss_ps = 1;
10726 ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
10727 if (ret < 0) {
10728 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
10729 __FUNCTION__, aibss, ret));
10730 return ret;
10731 }
10732
10733 atim = 10;
10734 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
10735 (char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
10736 DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
10737 __FUNCTION__, ret));
10738 return ret;
10739 }
10740 #endif /* WLAIBSS_PS */
10741
10742 memset(&bcn_config, 0, sizeof(bcn_config));
10743 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
10744 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
10745 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
10746 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
10747 bcn_config.len = sizeof(bcn_config);
10748
10749 ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
10750 sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
10751 if (ret < 0) {
10752 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
10753 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
10754 AIBSS_BCN_FLOOD_DUR, ret));
10755 return ret;
10756 }
10757
10758 ibss_coalesce = IBSS_COALESCE_DEFAULT;
10759 ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
10760 sizeof(ibss_coalesce), NULL, 0, TRUE);
10761 if (ret < 0) {
10762 DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
10763 __FUNCTION__, ret));
10764 return ret;
10765 }
10766
10767 dhd->op_mode |= DHD_FLAG_IBSS_MODE;
10768 return BCME_OK;
10769 }
10770 #endif /* WLAIBSS */
10771
10772 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10773 #ifdef WL_BAM
10774 static int
10775 dhd_check_adps_bad_ap(dhd_pub_t *dhd)
10776 {
10777 struct net_device *ndev;
10778 struct bcm_cfg80211 *cfg;
10779 struct wl_profile *profile;
10780 struct ether_addr bssid;
10781
10782 if (!dhd_is_associated(dhd, 0, NULL)) {
10783 DHD_ERROR(("%s - not associated\n", __FUNCTION__));
10784 return BCME_OK;
10785 }
10786
10787 ndev = dhd_linux_get_primary_netdev(dhd);
10788 if (!ndev) {
10789 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
10790 return -ENODEV;
10791 }
10792
10793 cfg = wl_get_cfg(ndev);
10794 if (!cfg) {
10795 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
10796 return -EINVAL;
10797 }
10798
10799 profile = wl_get_profile_by_netdev(cfg, ndev);
10800 memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
10801 if (wl_adps_bad_ap_check(cfg, &bssid)) {
10802 if (wl_adps_enabled(cfg, ndev)) {
10803 wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
10804 }
10805 }
10806
10807 return BCME_OK;
10808 }
10809 #endif /* WL_BAM */
10810
10811 int
10812 dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
10813 {
10814 int i;
10815 int len;
10816 int ret = BCME_OK;
10817
10818 bcm_iov_buf_t *iov_buf = NULL;
10819 wl_adps_params_v1_t *data = NULL;
10820
10821 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
10822 iov_buf = MALLOC(dhd->osh, len);
10823 if (iov_buf == NULL) {
10824 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
10825 ret = BCME_NOMEM;
10826 goto exit;
10827 }
10828
10829 iov_buf->version = WL_ADPS_IOV_VER;
10830 iov_buf->len = sizeof(*data);
10831 iov_buf->id = WL_ADPS_IOV_MODE;
10832
10833 data = (wl_adps_params_v1_t *)iov_buf->data;
10834 data->version = ADPS_SUB_IOV_VERSION_1;
10835 data->length = sizeof(*data);
10836 data->mode = on;
10837
10838 for (i = 1; i <= MAX_BANDS; i++) {
10839 data->band = i;
10840 ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
10841 if (ret < 0) {
10842 if (ret == BCME_UNSUPPORTED) {
10843 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
10844 ret = BCME_OK;
10845 goto exit;
10846 }
10847 else {
10848 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10849 __FUNCTION__, on ? "On" : "Off", i, ret));
10850 goto exit;
10851 }
10852 }
10853 }
10854
10855 #ifdef WL_BAM
10856 if (on) {
10857 dhd_check_adps_bad_ap(dhd);
10858 }
10859 #endif /* WL_BAM */
10860
10861 exit:
10862 if (iov_buf) {
10863 MFREE(dhd->osh, iov_buf, len);
10864 iov_buf = NULL;
10865 }
10866 return ret;
10867 }
10868 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10869
10870 int
10871 dhd_preinit_ioctls(dhd_pub_t *dhd)
10872 {
10873 int ret = 0;
10874 char eventmask[WL_EVENTING_MASK_LEN];
10875 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10876 uint32 buf_key_b4_m4 = 1;
10877 uint8 msglen;
10878 eventmsgs_ext_t *eventmask_msg = NULL;
10879 char* iov_buf = NULL;
10880 int ret2 = 0;
10881 uint32 wnm_cap = 0;
10882 #if defined(BCMSUP_4WAY_HANDSHAKE)
10883 uint32 sup_wpa = 1;
10884 #endif /* BCMSUP_4WAY_HANDSHAKE */
10885 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
10886 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
10887 uint32 ampdu_ba_wsize = 0;
10888 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
10889 #if defined(CUSTOM_AMPDU_MPDU)
10890 int32 ampdu_mpdu = 0;
10891 #endif // endif
10892 #if defined(CUSTOM_AMPDU_RELEASE)
10893 int32 ampdu_release = 0;
10894 #endif // endif
10895 #if defined(CUSTOM_AMSDU_AGGSF)
10896 int32 amsdu_aggsf = 0;
10897 #endif // endif
10898
10899 #if defined(BCMSDIO)
10900 #ifdef PROP_TXSTATUS
10901 int wlfc_enable = TRUE;
10902 #ifndef DISABLE_11N
10903 uint32 hostreorder = 1;
10904 #endif /* DISABLE_11N */
10905 #endif /* PROP_TXSTATUS */
10906 #endif // endif
10907 #ifndef PCIE_FULL_DONGLE
10908 uint32 wl_ap_isolate;
10909 #endif /* PCIE_FULL_DONGLE */
10910 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
10911 uint wnm_bsstrans_resp = 0;
10912 #ifdef SUPPORT_SET_CAC
10913 #ifdef SUPPORT_CUSTOM_SET_CAC
10914 uint32 cac = 0;
10915 #else
10916 uint32 cac = 1;
10917 #endif /* SUPPORT_CUSTOM_SET_CAC */
10918 #endif /* SUPPORT_SET_CAC */
10919
10920 #if defined(DHD_NON_DMA_M2M_CORRUPTION)
10921 dhd_pcie_dmaxfer_lpbk_t pcie_dmaxfer_lpbk;
10922 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
10923
10924 #ifdef DHD_ENABLE_LPC
10925 uint32 lpc = 1;
10926 #endif /* DHD_ENABLE_LPC */
10927 uint power_mode = PM_FAST;
10928 #if defined(BCMSDIO)
10929 uint32 dongle_align = DHD_SDALIGN;
10930 uint32 glom = CUSTOM_GLOM_SETTING;
10931 #endif /* defined(BCMSDIO) */
10932 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
10933 uint scancache_enab = TRUE;
10934 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10935 uint32 bcn_li_bcn = 1;
10936 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10937 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
10938 #if defined(ARP_OFFLOAD_SUPPORT)
10939 int arpoe = 1;
10940 #endif // endif
10941 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
10942 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
10943 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
10944 char buf[WLC_IOCTL_SMLEN];
10945 char *ptr;
10946 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
10947 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10948 wl_el_tag_params_t *el_tag = NULL;
10949 #endif /* DHD_8021X_DUMP */
10950 #ifdef ROAM_ENABLE
10951 uint roamvar = 0;
10952 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
10953 int roam_scan_period[2] = {10, WLC_BAND_ALL};
10954 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
10955 #ifdef ROAM_AP_ENV_DETECTION
10956 int roam_env_mode = AP_ENV_INDETERMINATE;
10957 #endif /* ROAM_AP_ENV_DETECTION */
10958 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10959 int roam_fullscan_period = 60;
10960 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10961 int roam_fullscan_period = 120;
10962 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10963 #ifdef DISABLE_BCNLOSS_ROAM
10964 uint roam_bcnloss_off = 1;
10965 #endif /* DISABLE_BCNLOSS_ROAM */
10966 #else
10967 #ifdef DISABLE_BUILTIN_ROAM
10968 uint roamvar = 1;
10969 #endif /* DISABLE_BUILTIN_ROAM */
10970 #endif /* ROAM_ENABLE */
10971
10972 #if defined(SOFTAP)
10973 uint dtim = 1;
10974 #endif // endif
10975 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10976 struct ether_addr p2p_ea;
10977 #endif // endif
10978 #ifdef BCMCCX
10979 uint32 ccx = 1;
10980 #endif // endif
10981 #ifdef SOFTAP_UAPSD_OFF
10982 uint32 wme_apsd = 0;
10983 #endif /* SOFTAP_UAPSD_OFF */
10984 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10985 uint32 apsta = 1; /* Enable APSTA mode */
10986 #elif defined(SOFTAP_AND_GC)
10987 uint32 apsta = 0;
10988 int ap_mode = 1;
10989 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10990 #ifdef GET_CUSTOM_MAC_ENABLE
10991 struct ether_addr ea_addr;
10992 #endif /* GET_CUSTOM_MAC_ENABLE */
10993 #ifdef OKC_SUPPORT
10994 uint32 okc = 1;
10995 #endif // endif
10996
10997 #ifdef DISABLE_11N
10998 uint32 nmode = 0;
10999 #endif /* DISABLE_11N */
11000
11001 #ifdef USE_WL_TXBF
11002 uint32 txbf = 1;
11003 #endif /* USE_WL_TXBF */
11004 #ifdef DISABLE_TXBFR
11005 uint32 txbf_bfr_cap = 0;
11006 #endif /* DISABLE_TXBFR */
11007 #ifdef AMPDU_VO_ENABLE
11008 struct ampdu_tid_control tid;
11009 #endif // endif
11010 #if defined(PROP_TXSTATUS)
11011 #ifdef USE_WFA_CERT_CONF
11012 uint32 proptx = 0;
11013 #endif /* USE_WFA_CERT_CONF */
11014 #endif /* PROP_TXSTATUS */
11015 #ifdef DHD_SET_FW_HIGHSPEED
11016 uint32 ack_ratio = 250;
11017 uint32 ack_ratio_depth = 64;
11018 #endif /* DHD_SET_FW_HIGHSPEED */
11019 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11020 uint32 vht_features = 0; /* init to 0, will be set based on each support */
11021 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11022 #ifdef DISABLE_11N_PROPRIETARY_RATES
11023 uint32 ht_features = 0;
11024 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11025 #ifdef CUSTOM_EVENT_PM_WAKE
11026 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
11027 #endif /* CUSTOM_EVENT_PM_WAKE */
11028 #ifdef DISABLE_PRUNED_SCAN
11029 uint32 scan_features = 0;
11030 #endif /* DISABLE_PRUNED_SCAN */
11031 #ifdef DHD_2G_ONLY_SUPPORT
11032 uint band = WLC_BAND_2G;
11033 #endif /* DHD_2G_ONLY_SUPPORT */
11034 #ifdef BCMPCIE_OOB_HOST_WAKE
11035 uint32 hostwake_oob = 0;
11036 #endif /* BCMPCIE_OOB_HOST_WAKE */
11037 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
11038 uint32 btmdelta = WBTEXT_BTMDELTA;
11039 #endif /* WBTEXT && WBTEXT_BTMDELTA */
11040
11041 #ifdef PKT_FILTER_SUPPORT
11042 dhd_pkt_filter_enable = TRUE;
11043 #ifdef APF
11044 dhd->apf_set = FALSE;
11045 #endif /* APF */
11046 #endif /* PKT_FILTER_SUPPORT */
11047 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
11048 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
11049 dhd->max_dtim_enable = TRUE;
11050 #else
11051 dhd->max_dtim_enable = FALSE;
11052 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
11053 dhd->disable_dtim_in_suspend = FALSE;
11054 #ifdef CUSTOM_SET_OCLOFF
11055 dhd->ocl_off = FALSE;
11056 #endif /* CUSTOM_SET_OCLOFF */
11057 #ifdef SUPPORT_SET_TID
11058 dhd->tid_mode = SET_TID_OFF;
11059 dhd->target_uid = 0;
11060 dhd->target_tid = 0;
11061 #endif /* SUPPORT_SET_TID */
11062 DHD_TRACE(("Enter %s\n", __FUNCTION__));
11063 dhd->op_mode = 0;
11064
11065 #ifdef CUSTOMER_HW4_DEBUG
11066 if (!dhd_validate_chipid(dhd)) {
11067 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
11068 __FUNCTION__, dhd_bus_chip_id(dhd)));
11069 #ifndef SUPPORT_MULTIPLE_CHIPS
11070 ret = BCME_BADARG;
11071 goto done;
11072 #endif /* !SUPPORT_MULTIPLE_CHIPS */
11073 }
11074 #endif /* CUSTOMER_HW4_DEBUG */
11075 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11076 (op_mode == DHD_FLAG_MFG_MODE)) {
11077 dhd->op_mode = DHD_FLAG_MFG_MODE;
11078 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
11079 /* disable runtimePM by default in MFG mode. */
11080 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
11081 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
11082 #ifdef DHD_PCIE_RUNTIMEPM
11083 /* Disable RuntimePM in mfg mode */
11084 DHD_DISABLE_RUNTIME_PM(dhd);
11085 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
11086 #endif /* DHD_PCIE_RUNTIME_PM */
11087 /* Check and adjust IOCTL response timeout for Manufactring firmware */
11088 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
11089 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
11090 __FUNCTION__));
11091 } else {
11092 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
11093 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
11094 }
11095 #ifdef BCMPCIE_OOB_HOST_WAKE
11096 ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
11097 sizeof(hostwake_oob), FALSE);
11098 if (ret < 0) {
11099 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
11100 } else {
11101 if (hostwake_oob == 0) {
11102 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
11103 __FUNCTION__));
11104 ret = BCME_UNSUPPORTED;
11105 goto done;
11106 } else {
11107 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
11108 }
11109 }
11110 #endif /* BCMPCIE_OOB_HOST_WAKE */
11111 #ifdef GET_CUSTOM_MAC_ENABLE
11112 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
11113 if (!ret) {
11114 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
11115 TRUE);
11116 if (ret < 0) {
11117 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11118 ret = BCME_NOTUP;
11119 goto done;
11120 }
11121 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
11122 } else {
11123 #endif /* GET_CUSTOM_MAC_ENABLE */
11124 /* Get the default device MAC address directly from firmware */
11125 ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11126 if (ret < 0) {
11127 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
11128 ret = BCME_NOTUP;
11129 goto done;
11130 }
11131 /* Update public MAC address after reading from Firmware */
11132 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
11133
11134 #ifdef GET_CUSTOM_MAC_ENABLE
11135 }
11136 #endif /* GET_CUSTOM_MAC_ENABLE */
11137
11138 #ifdef DHD_USE_CLMINFO_PARSER
11139 if ((ret = dhd_get_clminfo(dhd, clm_path)) < 0) {
11140 if (dhd->is_clm_mult_regrev) {
11141 DHD_ERROR(("%s: CLM Information load failed. Abort initialization.\n",
11142 __FUNCTION__));
11143 goto done;
11144 }
11145 }
11146 #endif /* DHD_USE_CLMINFO_PARSER */
11147 if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
11148 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
11149 goto done;
11150 }
11151
11152 /* get a capabilities from firmware */
11153 {
11154 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
11155 memset(dhd->fw_capabilities, 0, cap_buf_size);
11156 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
11157 FALSE);
11158 if (ret < 0) {
11159 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11160 __FUNCTION__, ret));
11161 return 0;
11162 }
11163
11164 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
11165 dhd->fw_capabilities[0] = ' ';
11166 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
11167 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
11168 }
11169
11170 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
11171 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
11172 #ifdef SET_RANDOM_MAC_SOFTAP
11173 uint rand_mac;
11174 #endif /* SET_RANDOM_MAC_SOFTAP */
11175 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
11176 #if defined(ARP_OFFLOAD_SUPPORT)
11177 arpoe = 0;
11178 #endif // endif
11179 #ifdef PKT_FILTER_SUPPORT
11180 dhd_pkt_filter_enable = FALSE;
11181 #endif // endif
11182 #ifdef SET_RANDOM_MAC_SOFTAP
11183 SRANDOM32((uint)jiffies);
11184 rand_mac = RANDOM32();
11185 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
11186 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
11187 iovbuf[2] = (unsigned char)vendor_oui;
11188 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
11189 iovbuf[4] = (unsigned char)(rand_mac >> 8);
11190 iovbuf[5] = (unsigned char)(rand_mac >> 16);
11191
11192 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
11193 TRUE);
11194 if (ret < 0) {
11195 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11196 } else
11197 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
11198 #endif /* SET_RANDOM_MAC_SOFTAP */
11199 #ifdef USE_DYNAMIC_F2_BLKSIZE
11200 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11201 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11202 #ifdef SOFTAP_UAPSD_OFF
11203 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
11204 TRUE);
11205 if (ret < 0) {
11206 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11207 __FUNCTION__, ret));
11208 }
11209 #endif /* SOFTAP_UAPSD_OFF */
11210 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11211 (op_mode == DHD_FLAG_MFG_MODE)) {
11212 #if defined(ARP_OFFLOAD_SUPPORT)
11213 arpoe = 0;
11214 #endif /* ARP_OFFLOAD_SUPPORT */
11215 #ifdef PKT_FILTER_SUPPORT
11216 dhd_pkt_filter_enable = FALSE;
11217 #endif /* PKT_FILTER_SUPPORT */
11218 dhd->op_mode = DHD_FLAG_MFG_MODE;
11219 #ifdef USE_DYNAMIC_F2_BLKSIZE
11220 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11221 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11222 #ifndef CUSTOM_SET_ANTNPM
11223 if (FW_SUPPORTED(dhd, rsdb)) {
11224 wl_config_t rsdb_mode;
11225 memset(&rsdb_mode, 0, sizeof(rsdb_mode));
11226 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
11227 NULL, 0, TRUE);
11228 if (ret < 0) {
11229 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11230 __FUNCTION__, ret));
11231 }
11232 }
11233 #endif /* !CUSTOM_SET_ANTNPM */
11234 } else {
11235 uint32 concurrent_mode = 0;
11236 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
11237 (op_mode == DHD_FLAG_P2P_MODE)) {
11238 #if defined(ARP_OFFLOAD_SUPPORT)
11239 arpoe = 0;
11240 #endif // endif
11241 #ifdef PKT_FILTER_SUPPORT
11242 dhd_pkt_filter_enable = FALSE;
11243 #endif // endif
11244 dhd->op_mode = DHD_FLAG_P2P_MODE;
11245 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
11246 (op_mode == DHD_FLAG_IBSS_MODE)) {
11247 dhd->op_mode = DHD_FLAG_IBSS_MODE;
11248 } else
11249 dhd->op_mode = DHD_FLAG_STA_MODE;
11250 #if !defined(AP) && defined(WLP2P)
11251 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
11252 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
11253 #if defined(ARP_OFFLOAD_SUPPORT)
11254 arpoe = 1;
11255 #endif // endif
11256 dhd->op_mode |= concurrent_mode;
11257 }
11258
11259 /* Check if we are enabling p2p */
11260 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11261 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
11262 TRUE);
11263 if (ret < 0)
11264 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
11265
11266 #if defined(SOFTAP_AND_GC)
11267 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
11268 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
11269 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
11270 }
11271 #endif // endif
11272 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
11273 ETHER_SET_LOCALADDR(&p2p_ea);
11274 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
11275 NULL, 0, TRUE);
11276 if (ret < 0)
11277 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
11278 else
11279 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11280 }
11281 #else
11282 (void)concurrent_mode;
11283 #endif // endif
11284 }
11285
11286 #ifdef DISABLE_PRUNED_SCAN
11287 if (FW_SUPPORTED(dhd, rsdb)) {
11288 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11289 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
11290 if (ret < 0) {
11291 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11292 __FUNCTION__, ret));
11293 } else {
11294 memcpy(&scan_features, iovbuf, 4);
11295 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
11296 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11297 sizeof(scan_features), NULL, 0, TRUE);
11298 if (ret < 0) {
11299 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11300 __FUNCTION__, ret));
11301 }
11302 }
11303 }
11304 #endif /* DISABLE_PRUNED_SCAN */
11305
11306 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
11307 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
11308
11309 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11310 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
11311 dhd->info->rxthread_enabled = FALSE;
11312 else
11313 dhd->info->rxthread_enabled = TRUE;
11314 #endif // endif
11315 /* Set Country code */
11316 if (dhd->dhd_cspec.ccode[0] != 0) {
11317 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
11318 NULL, 0, TRUE);
11319 if (ret < 0)
11320 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
11321 }
11322
11323 #ifdef DHD_2G_ONLY_SUPPORT
11324 DHD_ERROR(("Enabled DHD 2G only support!!\n"));
11325 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, (char *)&band, sizeof(band), TRUE, 0);
11326 if (ret < 0) {
11327 DHD_ERROR(("%s Set Band B failed %d\n", __FUNCTION__, ret));
11328 }
11329 #endif /* DHD_2G_ONLY_SUPPORT */
11330
11331 /* Set Listen Interval */
11332 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
11333 NULL, 0, TRUE);
11334 if (ret < 0)
11335 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
11336
11337 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11338 #ifdef USE_WFA_CERT_CONF
11339 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
11340 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
11341 }
11342 #endif /* USE_WFA_CERT_CONF */
11343 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11344 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
11345 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11346 #if defined(ROAM_ENABLE)
11347 #ifdef DISABLE_BCNLOSS_ROAM
11348 ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
11349 sizeof(roam_bcnloss_off), NULL, 0, TRUE);
11350 #endif /* DISABLE_BCNLOSS_ROAM */
11351 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
11352 sizeof(roam_trigger), TRUE, 0)) < 0)
11353 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
11354 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
11355 sizeof(roam_scan_period), TRUE, 0)) < 0)
11356 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
11357 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
11358 sizeof(roam_delta), TRUE, 0)) < 0)
11359 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
11360 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
11361 sizeof(roam_fullscan_period), NULL, 0, TRUE);
11362 if (ret < 0)
11363 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
11364 #ifdef ROAM_AP_ENV_DETECTION
11365 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
11366 if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
11367 sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
11368 dhd->roam_env_detection = TRUE;
11369 else
11370 dhd->roam_env_detection = FALSE;
11371 }
11372 #endif /* ROAM_AP_ENV_DETECTION */
11373 #endif /* ROAM_ENABLE */
11374
11375 #ifdef CUSTOM_EVENT_PM_WAKE
11376 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
11377 sizeof(pm_awake_thresh), NULL, 0, TRUE);
11378 if (ret < 0) {
11379 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
11380 }
11381 #endif /* CUSTOM_EVENT_PM_WAKE */
11382 #ifdef OKC_SUPPORT
11383 ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
11384 #endif // endif
11385 #ifdef BCMCCX
11386 ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
11387 #endif /* BCMCCX */
11388
11389 #ifdef WLTDLS
11390 dhd->tdls_enable = FALSE;
11391 dhd_tdls_set_mode(dhd, false);
11392 #endif /* WLTDLS */
11393
11394 #ifdef DHD_ENABLE_LPC
11395 /* Set lpc 1 */
11396 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11397 if (ret < 0) {
11398 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
11399
11400 if (ret == BCME_NOTDOWN) {
11401 uint wl_down = 1;
11402 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11403 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11404 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
11405
11406 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11407 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
11408 }
11409 }
11410 #endif /* DHD_ENABLE_LPC */
11411
11412 #ifdef WLADPS
11413 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
11414 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
11415 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11416 __FUNCTION__, ret));
11417 }
11418 }
11419 #endif /* WLADPS */
11420
11421 #ifdef DHD_PM_CONTROL_FROM_FILE
11422 sec_control_pm(dhd, &power_mode);
11423 #else
11424 /* Set PowerSave mode */
11425 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
11426 #endif /* DHD_PM_CONTROL_FROM_FILE */
11427
11428 #if defined(BCMSDIO)
11429 /* Match Host and Dongle rx alignment */
11430 ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
11431 NULL, 0, TRUE);
11432
11433 #ifdef USE_WFA_CERT_CONF
11434 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
11435 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
11436 }
11437 #endif /* USE_WFA_CERT_CONF */
11438 if (glom != DEFAULT_GLOM_VALUE) {
11439 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
11440 ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
11441 }
11442 #endif /* defined(BCMSDIO) */
11443
11444 /* Setup timeout if Beacons are lost and roam is off to report link down */
11445 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0,
11446 TRUE);
11447
11448 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11449 ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0,
11450 TRUE);
11451
11452 #if defined(AP) && !defined(WLP2P)
11453 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
11454
11455 #endif /* defined(AP) && !defined(WLP2P) */
11456
11457 #ifdef MIMO_ANT_SETTING
11458 dhd_sel_ant_from_file(dhd);
11459 #endif /* MIMO_ANT_SETTING */
11460
11461 #if defined(SOFTAP)
11462 if (ap_fw_loaded == TRUE) {
11463 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
11464 }
11465 #endif // endif
11466
11467 #if defined(KEEP_ALIVE)
11468 {
11469 /* Set Keep Alive : be sure to use FW with -keepalive */
11470 int res;
11471
11472 #if defined(SOFTAP)
11473 if (ap_fw_loaded == FALSE)
11474 #endif // endif
11475 if (!(dhd->op_mode &
11476 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
11477 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
11478 DHD_ERROR(("%s set keeplive failed %d\n",
11479 __FUNCTION__, res));
11480 }
11481 }
11482 #endif /* defined(KEEP_ALIVE) */
11483
11484 #ifdef USE_WL_TXBF
11485 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
11486 if (ret < 0)
11487 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
11488
11489 #endif /* USE_WL_TXBF */
11490
11491 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
11492 0, TRUE);
11493 if (ret < 0) {
11494 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
11495 }
11496
11497 #ifdef DISABLE_TXBFR
11498 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
11499 0, TRUE);
11500 if (ret < 0) {
11501 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
11502 }
11503 #endif /* DISABLE_TXBFR */
11504
11505 #ifdef USE_WFA_CERT_CONF
11506 #ifdef USE_WL_FRAMEBURST
11507 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
11508 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
11509 }
11510 #endif /* USE_WL_FRAMEBURST */
11511 g_frameburst = frameburst;
11512 #endif /* USE_WFA_CERT_CONF */
11513 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11514 /* Disable Framebursting for SofAP */
11515 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
11516 frameburst = 0;
11517 }
11518 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11519 /* Set frameburst to value */
11520 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
11521 sizeof(frameburst), TRUE, 0)) < 0) {
11522 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
11523 }
11524 #ifdef DHD_SET_FW_HIGHSPEED
11525 /* Set ack_ratio */
11526 ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
11527 if (ret < 0) {
11528 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
11529 }
11530
11531 /* Set ack_ratio_depth */
11532 ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
11533 sizeof(ack_ratio_depth), NULL, 0, TRUE);
11534 if (ret < 0) {
11535 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
11536 }
11537 #endif /* DHD_SET_FW_HIGHSPEED */
11538
11539 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
11540 if (iov_buf == NULL) {
11541 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
11542 ret = BCME_NOMEM;
11543 goto done;
11544 }
11545
11546 #ifdef WLAIBSS
11547 /* Apply AIBSS configurations */
11548 if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
11549 DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
11550 __FUNCTION__, ret));
11551 goto done;
11552 }
11553 #endif /* WLAIBSS */
11554
11555 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
11556 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
11557 /* Set ampdu ba wsize to 64 or 16 */
11558 #ifdef CUSTOM_AMPDU_BA_WSIZE
11559 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
11560 #endif // endif
11561 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
11562 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
11563 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
11564 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
11565 if (ampdu_ba_wsize != 0) {
11566 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
11567 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
11568 if (ret < 0) {
11569 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11570 __FUNCTION__, ampdu_ba_wsize, ret));
11571 }
11572 }
11573 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
11574
11575 #if defined(CUSTOM_AMPDU_MPDU)
11576 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
11577 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
11578 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
11579 NULL, 0, TRUE);
11580 if (ret < 0) {
11581 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11582 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
11583 }
11584 }
11585 #endif /* CUSTOM_AMPDU_MPDU */
11586
11587 #if defined(CUSTOM_AMPDU_RELEASE)
11588 ampdu_release = CUSTOM_AMPDU_RELEASE;
11589 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
11590 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
11591 sizeof(ampdu_release), NULL, 0, TRUE);
11592 if (ret < 0) {
11593 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11594 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
11595 }
11596 }
11597 #endif /* CUSTOM_AMPDU_RELEASE */
11598
11599 #if defined(CUSTOM_AMSDU_AGGSF)
11600 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
11601 if (amsdu_aggsf != 0) {
11602 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
11603 NULL, 0, TRUE);
11604 if (ret < 0) {
11605 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11606 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
11607 }
11608 }
11609 #endif /* CUSTOM_AMSDU_AGGSF */
11610
11611 #if defined(BCMSUP_4WAY_HANDSHAKE)
11612 /* Read 4-way handshake requirements */
11613 if (dhd_use_idsup == 1) {
11614 ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
11615 (char *)&iovbuf, sizeof(iovbuf), FALSE);
11616 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
11617 * in-dongle supplicant.
11618 */
11619 if (ret >= 0 || ret == BCME_NOTREADY)
11620 dhd->fw_4way_handshake = TRUE;
11621 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
11622 }
11623 #endif /* BCMSUP_4WAY_HANDSHAKE */
11624 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
11625 ret = dhd_iovar(dhd, 0, "vht_features", NULL, 0,
11626 (char *)&vht_features, sizeof(vht_features), FALSE);
11627 if (ret < 0) {
11628 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
11629 vht_features = 0;
11630 } else {
11631 #ifdef SUPPORT_2G_VHT
11632 vht_features |= 0x3; /* 2G support */
11633 #endif /* SUPPORT_2G_VHT */
11634 #ifdef SUPPORT_5G_1024QAM_VHT
11635 vht_features |= 0x6; /* 5G 1024 QAM support */
11636 #endif /* SUPPORT_5G_1024QAM_VHT */
11637 }
11638 if (vht_features) {
11639 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
11640 NULL, 0, TRUE);
11641 if (ret < 0) {
11642 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
11643
11644 if (ret == BCME_NOTDOWN) {
11645 uint wl_down = 1;
11646 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11647 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11648 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11649 " vht_features = 0x%x\n",
11650 __FUNCTION__, ret, vht_features));
11651
11652 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
11653 sizeof(vht_features), NULL, 0, TRUE);
11654
11655 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
11656 }
11657 }
11658 }
11659 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
11660 #ifdef DISABLE_11N_PROPRIETARY_RATES
11661 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
11662 TRUE);
11663 if (ret < 0) {
11664 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
11665 }
11666 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11667 #ifdef DHD_DISABLE_VHTMODE
11668 dhd_disable_vhtmode(dhd);
11669 #endif /* DHD_DISABLE_VHTMODE */
11670
11671 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
11672 NULL, 0, TRUE);
11673 if (ret < 0) {
11674 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
11675 }
11676 #ifdef SUPPORT_SET_CAC
11677 ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
11678 if (ret < 0) {
11679 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
11680 }
11681 #endif /* SUPPORT_SET_CAC */
11682 #ifdef DHD_ULP
11683 /* Get the required details from dongle during preinit ioctl */
11684 dhd_ulp_preinit(dhd);
11685 #endif /* DHD_ULP */
11686
11687 /* Read event_msgs mask */
11688 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
11689 sizeof(iovbuf), FALSE);
11690 if (ret < 0) {
11691 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
11692 goto done;
11693 }
11694 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
11695
11696 /* Setup event_msgs */
11697 setbit(eventmask, WLC_E_SET_SSID);
11698 setbit(eventmask, WLC_E_PRUNE);
11699 setbit(eventmask, WLC_E_AUTH);
11700 setbit(eventmask, WLC_E_AUTH_IND);
11701 setbit(eventmask, WLC_E_ASSOC);
11702 setbit(eventmask, WLC_E_REASSOC);
11703 setbit(eventmask, WLC_E_REASSOC_IND);
11704 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
11705 setbit(eventmask, WLC_E_DEAUTH);
11706 setbit(eventmask, WLC_E_DEAUTH_IND);
11707 setbit(eventmask, WLC_E_DISASSOC_IND);
11708 setbit(eventmask, WLC_E_DISASSOC);
11709 setbit(eventmask, WLC_E_JOIN);
11710 setbit(eventmask, WLC_E_START);
11711 setbit(eventmask, WLC_E_ASSOC_IND);
11712 setbit(eventmask, WLC_E_PSK_SUP);
11713 setbit(eventmask, WLC_E_LINK);
11714 setbit(eventmask, WLC_E_MIC_ERROR);
11715 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
11716 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
11717 #ifdef LIMIT_BORROW
11718 setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
11719 #endif // endif
11720 #ifndef WL_CFG80211
11721 setbit(eventmask, WLC_E_PMKID_CACHE);
11722 setbit(eventmask, WLC_E_TXFAIL);
11723 #endif // endif
11724 setbit(eventmask, WLC_E_JOIN_START);
11725 setbit(eventmask, WLC_E_SCAN_COMPLETE);
11726 #ifdef DHD_DEBUG
11727 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
11728 #endif // endif
11729 #ifdef PNO_SUPPORT
11730 setbit(eventmask, WLC_E_PFN_NET_FOUND);
11731 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
11732 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
11733 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
11734 #endif /* PNO_SUPPORT */
11735 /* enable dongle roaming event */
11736 #ifdef WL_CFG80211
11737 #if !defined(ROAM_EVT_DISABLE)
11738 setbit(eventmask, WLC_E_ROAM);
11739 #endif /* !ROAM_EVT_DISABLE */
11740 setbit(eventmask, WLC_E_BSSID);
11741 #endif /* WL_CFG80211 */
11742 #ifdef BCMCCX
11743 setbit(eventmask, WLC_E_ADDTS_IND);
11744 setbit(eventmask, WLC_E_DELTS_IND);
11745 #endif /* BCMCCX */
11746 #ifdef WLTDLS
11747 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
11748 #endif /* WLTDLS */
11749 #ifdef RTT_SUPPORT
11750 setbit(eventmask, WLC_E_PROXD);
11751 #endif /* RTT_SUPPORT */
11752 #ifdef WL_CFG80211
11753 setbit(eventmask, WLC_E_ESCAN_RESULT);
11754 setbit(eventmask, WLC_E_AP_STARTED);
11755 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
11756 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11757 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
11758 }
11759 #endif /* WL_CFG80211 */
11760 #ifdef WLAIBSS
11761 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
11762 #endif /* WLAIBSS */
11763
11764 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11765 if (dhd_logtrace_from_file(dhd)) {
11766 setbit(eventmask, WLC_E_TRACE);
11767 } else {
11768 clrbit(eventmask, WLC_E_TRACE);
11769 }
11770 #elif defined(SHOW_LOGTRACE)
11771 setbit(eventmask, WLC_E_TRACE);
11772 #else
11773 clrbit(eventmask, WLC_E_TRACE);
11774 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11775
11776 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
11777 #ifdef CUSTOM_EVENT_PM_WAKE
11778 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
11779 #endif /* CUSTOM_EVENT_PM_WAKE */
11780 #ifdef DHD_LOSSLESS_ROAMING
11781 setbit(eventmask, WLC_E_ROAM_PREP);
11782 #endif // endif
11783 /* nan events */
11784 setbit(eventmask, WLC_E_NAN);
11785 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11786 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11787 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11788
11789 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11790 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11791 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11792
11793 /* Write updated Event mask */
11794 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
11795 if (ret < 0) {
11796 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
11797 goto done;
11798 }
11799
11800 /* make up event mask ext message iovar for event larger than 128 */
11801 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
11802 eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
11803 if (eventmask_msg == NULL) {
11804 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
11805 ret = BCME_NOMEM;
11806 goto done;
11807 }
11808 bzero(eventmask_msg, msglen);
11809 eventmask_msg->ver = EVENTMSGS_VER;
11810 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11811
11812 /* Read event_msgs_ext mask */
11813 ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
11814 WLC_IOCTL_SMLEN, FALSE);
11815
11816 if (ret2 == 0) { /* event_msgs_ext must be supported */
11817 bcopy(iov_buf, eventmask_msg, msglen);
11818 #ifdef RSSI_MONITOR_SUPPORT
11819 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11820 #endif /* RSSI_MONITOR_SUPPORT */
11821 #ifdef GSCAN_SUPPORT
11822 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
11823 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
11824 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
11825 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
11826 #endif /* GSCAN_SUPPORT */
11827 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11828 #ifdef BT_WIFI_HANDOVER
11829 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
11830 #endif /* BT_WIFI_HANDOVER */
11831 #ifdef DBG_PKT_MON
11832 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
11833 #endif /* DBG_PKT_MON */
11834 #ifdef DHD_ULP
11835 setbit(eventmask_msg->mask, WLC_E_ULP);
11836 #endif // endif
11837 #ifdef WL_NATOE
11838 setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
11839 #endif /* WL_NATOE */
11840 #ifdef WL_NAN
11841 setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
11842 #endif /* WL_NAN */
11843 #ifdef SUPPORT_EVT_SDB_LOG
11844 setbit(eventmask_msg->mask, WLC_E_SDB_TRANSITION);
11845 #endif /* SUPPORT_EVT_SDB_LOG */
11846 #ifdef WL_BCNRECV
11847 setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
11848 #endif /* WL_BCNRECV */
11849 /* Write updated Event mask */
11850 eventmask_msg->ver = EVENTMSGS_VER;
11851 eventmask_msg->command = EVENTMSGS_SET_MASK;
11852 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11853 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
11854 TRUE);
11855 if (ret < 0) {
11856 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
11857 goto done;
11858 }
11859 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
11860 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11861 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11862 __FUNCTION__, ret2));
11863 } else {
11864 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
11865 ret = ret2;
11866 goto done;
11867 }
11868
11869 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11870 /* Enabling event log trace for EAP events */
11871 el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
11872 if (el_tag == NULL) {
11873 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11874 (int)sizeof(wl_el_tag_params_t)));
11875 ret = BCME_NOMEM;
11876 goto done;
11877 }
11878 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
11879 el_tag->set = 1;
11880 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
11881 ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0,
11882 TRUE);
11883 #endif /* DHD_8021X_DUMP */
11884
11885 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
11886 sizeof(scan_assoc_time), TRUE, 0);
11887 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
11888 sizeof(scan_unassoc_time), TRUE, 0);
11889 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
11890 sizeof(scan_passive_time), TRUE, 0);
11891
11892 #ifdef ARP_OFFLOAD_SUPPORT
11893 /* Set and enable ARP offload feature for STA only */
11894 #if defined(SOFTAP)
11895 if (arpoe && !ap_fw_loaded) {
11896 #else
11897 if (arpoe) {
11898 #endif // endif
11899 dhd_arp_offload_enable(dhd, TRUE);
11900 dhd_arp_offload_set(dhd, dhd_arp_mode);
11901 } else {
11902 dhd_arp_offload_enable(dhd, FALSE);
11903 dhd_arp_offload_set(dhd, 0);
11904 }
11905 dhd_arp_enable = arpoe;
11906 #endif /* ARP_OFFLOAD_SUPPORT */
11907
11908 #ifdef PKT_FILTER_SUPPORT
11909 /* Setup default defintions for pktfilter , enable in suspend */
11910 dhd->pktfilter_count = 6;
11911 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11912 if (!FW_SUPPORTED(dhd, pf6)) {
11913 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11914 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11915 } else {
11916 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11917 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11918 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11919 }
11920 /* apply APP pktfilter */
11921 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11922
11923 #ifdef BLOCK_IPV6_PACKET
11924 /* Setup filter to allow only IPv4 unicast frames */
11925 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
11926 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
11927 " "
11928 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
11929 #else
11930 /* Setup filter to allow only unicast */
11931 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11932 #endif /* BLOCK_IPV6_PACKET */
11933
11934 #ifdef PASS_IPV4_SUSPEND
11935 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
11936 #else
11937 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11938 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11939 #endif /* PASS_IPV4_SUSPEND */
11940 if (FW_SUPPORTED(dhd, pf6)) {
11941 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11942 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
11943 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11944 dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
11945 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11946 dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
11947 dhd->pktfilter_count = 10;
11948 }
11949
11950 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11951 dhd->pktfilter_count = 4;
11952 /* Setup filter to block broadcast and NAT Keepalive packets */
11953 /* discard all broadcast packets */
11954 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11955 /* discard NAT Keepalive packets */
11956 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11957 /* discard NAT Keepalive packets */
11958 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11959 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11960 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11961
11962 #if defined(SOFTAP)
11963 if (ap_fw_loaded) {
11964 dhd_enable_packet_filter(0, dhd);
11965 }
11966 #endif /* defined(SOFTAP) */
11967 dhd_set_packet_filter(dhd);
11968 #endif /* PKT_FILTER_SUPPORT */
11969 #ifdef DISABLE_11N
11970 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
11971 if (ret < 0)
11972 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
11973 #endif /* DISABLE_11N */
11974
11975 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11976 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0,
11977 TRUE);
11978 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11979 #ifdef AMPDU_VO_ENABLE
11980 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
11981 tid.enable = TRUE;
11982 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11983
11984 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
11985 tid.enable = TRUE;
11986 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11987 #endif // endif
11988 /* query for 'clmver' to get clm version info from firmware */
11989 memset(buf, 0, sizeof(buf));
11990 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11991 if (ret < 0)
11992 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11993 else {
11994 char *ver_temp_buf = NULL;
11995
11996 if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
11997 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11998 } else {
11999 ptr = (ver_temp_buf + strlen("Data:"));
12000 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
12001 DHD_ERROR(("Couldn't find New line character\n"));
12002 } else {
12003 memset(clm_version, 0, CLM_VER_STR_LEN);
12004 strncpy(clm_version, ver_temp_buf,
12005 MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN - 1));
12006 DHD_INFO(("CLM version = %s\n", clm_version));
12007 }
12008 }
12009
12010 #if defined(CUSTOMER_HW4_DEBUG)
12011 if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
12012 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
12013 } else {
12014 char tokenlim;
12015 ptr = (ver_temp_buf + strlen("Customization:"));
12016 if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
12017 DHD_ERROR(("Couldn't find project blob version"
12018 "or New line character\n"));
12019 } else if (tokenlim == '(') {
12020 snprintf(clm_version,
12021 CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
12022 clm_version, ver_temp_buf);
12023 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
12024 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
12025 DHD_ERROR(("Couldn't find New line character\n"));
12026 } else {
12027 snprintf(clm_version,
12028 strlen(clm_version) + strlen(ver_temp_buf),
12029 "%s%s", clm_version, ver_temp_buf);
12030 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
12031 clm_version));
12032
12033 }
12034 } else if (tokenlim == '\n') {
12035 snprintf(clm_version,
12036 strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
12037 "%s, Blob ver = Major : ", clm_version);
12038 snprintf(clm_version,
12039 strlen(clm_version) + strlen(ver_temp_buf) + 1,
12040 "%s%s", clm_version, ver_temp_buf);
12041 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
12042 }
12043 }
12044 #endif /* CUSTOMER_HW4_DEBUG */
12045 if (strlen(clm_version)) {
12046 DHD_ERROR(("CLM version = %s\n", clm_version));
12047 } else {
12048 DHD_ERROR(("Couldn't find CLM version!\n"));
12049 }
12050 }
12051
12052 /* query for 'ver' to get version info from firmware */
12053 memset(buf, 0, sizeof(buf));
12054 ptr = buf;
12055 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
12056 if (ret < 0)
12057 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
12058 else {
12059 bcmstrtok(&ptr, "\n", 0);
12060 /* Print fw version info */
12061 DHD_ERROR(("Firmware version = %s\n", buf));
12062 strncpy(fw_version, buf, FW_VER_STR_LEN);
12063 fw_version[FW_VER_STR_LEN-1] = '\0';
12064 #if defined(BCMSDIO) || defined(BCMPCIE)
12065 dhd_set_version_info(dhd, buf);
12066 #endif /* BCMSDIO || BCMPCIE */
12067 #ifdef WRITE_WLANINFO
12068 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
12069 #endif /* WRITE_WLANINFO */
12070 }
12071 #ifdef GEN_SOFTAP_INFO_FILE
12072 sec_save_softap_info();
12073 #endif /* GEN_SOFTAP_INFO_FILE */
12074
12075 #if defined(BCMSDIO)
12076 dhd_txglom_enable(dhd, TRUE);
12077 #endif /* defined(BCMSDIO) */
12078
12079 #if defined(BCMSDIO)
12080 #ifdef PROP_TXSTATUS
12081 if (disable_proptx ||
12082 #ifdef PROP_TXSTATUS_VSDB
12083 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
12084 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
12085 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
12086 #endif /* PROP_TXSTATUS_VSDB */
12087 FALSE) {
12088 wlfc_enable = FALSE;
12089 }
12090
12091 #if defined(PROP_TXSTATUS)
12092 #ifdef USE_WFA_CERT_CONF
12093 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
12094 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
12095 wlfc_enable = proptx;
12096 }
12097 #endif /* USE_WFA_CERT_CONF */
12098 #endif /* PROP_TXSTATUS */
12099
12100 #ifndef DISABLE_11N
12101 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
12102 NULL, 0, TRUE);
12103 if (ret2 < 0) {
12104 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
12105 if (ret2 != BCME_UNSUPPORTED)
12106 ret = ret2;
12107
12108 if (ret == BCME_NOTDOWN) {
12109 uint wl_down = 1;
12110 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
12111 sizeof(wl_down), TRUE, 0);
12112 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
12113 __FUNCTION__, ret2, hostreorder));
12114
12115 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
12116 sizeof(hostreorder), NULL, 0, TRUE);
12117 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
12118 if (ret2 != BCME_UNSUPPORTED)
12119 ret = ret2;
12120 }
12121 if (ret2 != BCME_OK)
12122 hostreorder = 0;
12123 }
12124 #endif /* DISABLE_11N */
12125
12126 if (wlfc_enable)
12127 dhd_wlfc_init(dhd);
12128 #ifndef DISABLE_11N
12129 else if (hostreorder)
12130 dhd_wlfc_hostreorder_init(dhd);
12131 #endif /* DISABLE_11N */
12132
12133 #endif /* PROP_TXSTATUS */
12134 #endif /* BCMSDIO || BCMBUS */
12135 #ifndef PCIE_FULL_DONGLE
12136 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
12137 if (FW_SUPPORTED(dhd, ap)) {
12138 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
12139 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
12140 NULL, 0, TRUE);
12141 if (ret < 0)
12142 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
12143 }
12144 #endif /* PCIE_FULL_DONGLE */
12145 #ifdef PNO_SUPPORT
12146 if (!dhd->pno_state) {
12147 dhd_pno_init(dhd);
12148 }
12149 #endif // endif
12150 #ifdef RTT_SUPPORT
12151 if (!dhd->rtt_state) {
12152 ret = dhd_rtt_init(dhd);
12153 if (ret < 0) {
12154 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
12155 }
12156 }
12157 #endif // endif
12158 #ifdef FILTER_IE
12159 /* Failure to configure filter IE is not a fatal error, ignore it. */
12160 if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
12161 dhd_read_from_file(dhd);
12162 #endif /* FILTER_IE */
12163 #ifdef WL11U
12164 dhd_interworking_enable(dhd);
12165 #endif /* WL11U */
12166
12167 #ifdef NDO_CONFIG_SUPPORT
12168 dhd->ndo_enable = FALSE;
12169 dhd->ndo_host_ip_overflow = FALSE;
12170 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
12171 #endif /* NDO_CONFIG_SUPPORT */
12172
12173 /* ND offload version supported */
12174 dhd->ndo_version = dhd_ndo_get_version(dhd);
12175 if (dhd->ndo_version > 0) {
12176 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
12177
12178 #ifdef NDO_CONFIG_SUPPORT
12179 /* enable Unsolicited NA filter */
12180 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
12181 if (ret < 0) {
12182 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
12183 }
12184 #endif /* NDO_CONFIG_SUPPORT */
12185 }
12186
12187 /* check dongle supports wbtext (product policy) or not */
12188 dhd->wbtext_support = FALSE;
12189 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
12190 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
12191 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12192 }
12193 dhd->wbtext_policy = wnm_bsstrans_resp;
12194 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
12195 dhd->wbtext_support = TRUE;
12196 }
12197 #ifndef WBTEXT
12198 /* driver can turn off wbtext feature through makefile */
12199 if (dhd->wbtext_support) {
12200 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
12201 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
12202 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
12203 DHD_ERROR(("failed to disable WBTEXT\n"));
12204 }
12205 }
12206 #endif /* !WBTEXT */
12207
12208 #if defined(DHD_NON_DMA_M2M_CORRUPTION)
12209 /* check pcie non dma loopback */
12210 if (dhd->op_mode == DHD_FLAG_MFG_MODE) {
12211 memset(&pcie_dmaxfer_lpbk, 0, sizeof(dhd_pcie_dmaxfer_lpbk_t));
12212 pcie_dmaxfer_lpbk.u.length = PCIE_DMAXFER_LPBK_LENGTH;
12213 pcie_dmaxfer_lpbk.lpbkmode = M2M_NON_DMA_LPBK;
12214 pcie_dmaxfer_lpbk.wait = TRUE;
12215
12216 if ((ret = dhd_bus_iovar_op(dhd, "pcie_dmaxfer", NULL, 0,
12217 (char *)&pcie_dmaxfer_lpbk, sizeof(dhd_pcie_dmaxfer_lpbk_t),
12218 IOV_SET)) < 0) {
12219 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d\n",
12220 ret));
12221 goto done;
12222 }
12223
12224 if (pcie_dmaxfer_lpbk.u.status != BCME_OK) {
12225 DHD_ERROR(("failed to check PCIe Non DMA Loopback Test!!! Reason : %d"
12226 " Status : %d\n", ret, pcie_dmaxfer_lpbk.u.status));
12227 ret = BCME_ERROR;
12228 goto done;
12229 } else {
12230
12231 DHD_ERROR(("successful to check PCIe Non DMA Loopback Test\n"));
12232 }
12233 }
12234 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
12235
12236 /* WNM capabilities */
12237 wnm_cap = 0
12238 #ifdef WL11U
12239 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
12240 #endif // endif
12241 #ifdef WBTEXT
12242 | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
12243 #endif // endif
12244 ;
12245 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
12246 DHD_ERROR(("failed to set WNM capabilities\n"));
12247 }
12248
12249 if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
12250 if (dhd_start_ecounters(dhd) != BCME_OK) {
12251 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
12252 } else if (dhd_start_event_ecounters(dhd) != BCME_OK) {
12253 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
12254 }
12255
12256 }
12257
12258 /* store the preserve log set numbers */
12259 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
12260 != BCME_OK) {
12261 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
12262 }
12263
12264 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
12265 if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
12266 NULL, 0, TRUE) < 0) {
12267 DHD_ERROR(("failed to set BTM delta\n"));
12268 }
12269 #endif /* WBTEXT && WBTEXT_BTMDELTA */
12270
12271 #ifdef WL_MONITOR
12272 if (FW_SUPPORTED(dhd, monitor)) {
12273 dhd->monitor_enable = TRUE;
12274 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
12275 } else {
12276 dhd->monitor_enable = FALSE;
12277 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
12278 }
12279 #endif /* WL_MONITOR */
12280
12281 done:
12282
12283 if (eventmask_msg) {
12284 MFREE(dhd->osh, eventmask_msg, msglen);
12285 eventmask_msg = NULL;
12286 }
12287 if (iov_buf) {
12288 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
12289 iov_buf = NULL;
12290 }
12291 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12292 if (el_tag) {
12293 MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
12294 el_tag = NULL;
12295 }
12296 #endif /* DHD_8021X_DUMP */
12297 return ret;
12298 }
12299
12300 int
12301 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
12302 uint res_len, int set)
12303 {
12304 char *buf = NULL;
12305 int input_len;
12306 wl_ioctl_t ioc;
12307 int ret;
12308
12309 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
12310 return BCME_BADARG;
12311
12312 input_len = strlen(name) + 1 + param_len;
12313 if (input_len > WLC_IOCTL_MAXLEN)
12314 return BCME_BADARG;
12315
12316 buf = NULL;
12317 if (set) {
12318 if (res_buf || res_len != 0) {
12319 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
12320 ret = BCME_BADARG;
12321 goto exit;
12322 }
12323 buf = MALLOCZ(pub->osh, input_len);
12324 if (!buf) {
12325 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12326 ret = BCME_NOMEM;
12327 goto exit;
12328 }
12329 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12330 if (!ret) {
12331 ret = BCME_NOMEM;
12332 goto exit;
12333 }
12334
12335 ioc.cmd = WLC_SET_VAR;
12336 ioc.buf = buf;
12337 ioc.len = input_len;
12338 ioc.set = set;
12339
12340 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12341 } else {
12342 if (!res_buf || !res_len) {
12343 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
12344 ret = BCME_BADARG;
12345 goto exit;
12346 }
12347
12348 if (res_len < input_len) {
12349 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
12350 res_len, input_len));
12351 buf = MALLOCZ(pub->osh, input_len);
12352 if (!buf) {
12353 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12354 ret = BCME_NOMEM;
12355 goto exit;
12356 }
12357 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12358 if (!ret) {
12359 ret = BCME_NOMEM;
12360 goto exit;
12361 }
12362
12363 ioc.cmd = WLC_GET_VAR;
12364 ioc.buf = buf;
12365 ioc.len = input_len;
12366 ioc.set = set;
12367
12368 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12369
12370 if (ret == BCME_OK) {
12371 memcpy(res_buf, buf, res_len);
12372 }
12373 } else {
12374 memset(res_buf, 0, res_len);
12375 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
12376 if (!ret) {
12377 ret = BCME_NOMEM;
12378 goto exit;
12379 }
12380
12381 ioc.cmd = WLC_GET_VAR;
12382 ioc.buf = res_buf;
12383 ioc.len = res_len;
12384 ioc.set = set;
12385
12386 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12387 }
12388 }
12389 exit:
12390 if (buf) {
12391 MFREE(pub->osh, buf, input_len);
12392 buf = NULL;
12393 }
12394 return ret;
12395 }
12396
12397 int
12398 dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
12399 uint cmd_len, char **resptr, uint resp_len)
12400 {
12401 int len = resp_len;
12402 int ret;
12403 char *buf = *resptr;
12404 wl_ioctl_t ioc;
12405 if (resp_len > WLC_IOCTL_MAXLEN)
12406 return BCME_BADARG;
12407
12408 memset(buf, 0, resp_len);
12409
12410 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
12411 if (ret == 0) {
12412 return BCME_BUFTOOSHORT;
12413 }
12414
12415 memset(&ioc, 0, sizeof(ioc));
12416
12417 ioc.cmd = WLC_GET_VAR;
12418 ioc.buf = buf;
12419 ioc.len = len;
12420 ioc.set = 0;
12421
12422 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12423
12424 return ret;
12425 }
12426
12427 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
12428 {
12429 struct dhd_info *dhd = dhdp->info;
12430 struct net_device *dev = NULL;
12431
12432 ASSERT(dhd && dhd->iflist[ifidx]);
12433 dev = dhd->iflist[ifidx]->net;
12434 ASSERT(dev);
12435
12436 if (netif_running(dev)) {
12437 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
12438 return BCME_NOTDOWN;
12439 }
12440
12441 #define DHD_MIN_MTU 1500
12442 #define DHD_MAX_MTU 1752
12443
12444 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
12445 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
12446 return BCME_BADARG;
12447 }
12448
12449 dev->mtu = new_mtu;
12450 return 0;
12451 }
12452
12453 #ifdef ARP_OFFLOAD_SUPPORT
12454 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12455 void
12456 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
12457 {
12458 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
12459 int i;
12460 int ret;
12461
12462 bzero(ipv4_buf, sizeof(ipv4_buf));
12463
12464 /* display what we've got */
12465 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12466 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
12467 #ifdef AOE_DBG
12468 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12469 #endif // endif
12470 /* now we saved hoste_ip table, clr it in the dongle AOE */
12471 dhd_aoe_hostip_clr(dhd_pub, idx);
12472
12473 if (ret) {
12474 DHD_ERROR(("%s failed\n", __FUNCTION__));
12475 return;
12476 }
12477
12478 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
12479 if (add && (ipv4_buf[i] == 0)) {
12480 ipv4_buf[i] = ipa;
12481 add = FALSE; /* added ipa to local table */
12482 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12483 __FUNCTION__, i));
12484 } else if (ipv4_buf[i] == ipa) {
12485 ipv4_buf[i] = 0;
12486 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12487 __FUNCTION__, ipa, i));
12488 }
12489
12490 if (ipv4_buf[i] != 0) {
12491 /* add back host_ip entries from our local cache */
12492 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
12493 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12494 __FUNCTION__, ipv4_buf[i], i));
12495 }
12496 }
12497 #ifdef AOE_DBG
12498 /* see the resulting hostip table */
12499 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12500 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
12501 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12502 #endif // endif
12503 }
12504
12505 /*
12506 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12507 * whenever there is an event related to an IP address.
12508 * ptr : kernel provided pointer to IP address that has changed
12509 */
12510 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
12511 unsigned long event,
12512 void *ptr)
12513 {
12514 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
12515
12516 dhd_info_t *dhd;
12517 dhd_pub_t *dhd_pub;
12518 int idx;
12519
12520 if (!dhd_arp_enable)
12521 return NOTIFY_DONE;
12522 if (!ifa || !(ifa->ifa_dev->dev))
12523 return NOTIFY_DONE;
12524
12525 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12526 /* Filter notifications meant for non Broadcom devices */
12527 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
12528 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
12529 #if defined(WL_ENABLE_P2P_IF)
12530 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
12531 #endif /* WL_ENABLE_P2P_IF */
12532 return NOTIFY_DONE;
12533 }
12534 #endif /* LINUX_VERSION_CODE */
12535
12536 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
12537 if (!dhd)
12538 return NOTIFY_DONE;
12539
12540 dhd_pub = &dhd->pub;
12541
12542 if (dhd_pub->arp_version == 1) {
12543 idx = 0;
12544 } else {
12545 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
12546 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
12547 break;
12548 }
12549 if (idx < DHD_MAX_IFS)
12550 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
12551 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
12552 else {
12553 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
12554 idx = 0;
12555 }
12556 }
12557
12558 switch (event) {
12559 case NETDEV_UP:
12560 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12561 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12562
12563 /*
12564 * Skip if Bus is not in a state to transport the IOVAR
12565 * (or) the Dongle is not ready.
12566 */
12567 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
12568 dhd->pub.busstate == DHD_BUS_LOAD) {
12569 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
12570 __FUNCTION__, dhd->pub.busstate));
12571 if (dhd->pend_ipaddr) {
12572 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12573 __FUNCTION__, dhd->pend_ipaddr));
12574 }
12575 dhd->pend_ipaddr = ifa->ifa_address;
12576 break;
12577 }
12578
12579 #ifdef AOE_IP_ALIAS_SUPPORT
12580 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12581 __FUNCTION__));
12582 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
12583 #endif /* AOE_IP_ALIAS_SUPPORT */
12584 break;
12585
12586 case NETDEV_DOWN:
12587 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12588 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12589 dhd->pend_ipaddr = 0;
12590 #ifdef AOE_IP_ALIAS_SUPPORT
12591 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12592 __FUNCTION__));
12593 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
12594 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
12595 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
12596 } else
12597 #endif /* AOE_IP_ALIAS_SUPPORT */
12598 {
12599 dhd_aoe_hostip_clr(&dhd->pub, idx);
12600 dhd_aoe_arp_clr(&dhd->pub, idx);
12601 }
12602 break;
12603
12604 default:
12605 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12606 __func__, ifa->ifa_label, event));
12607 break;
12608 }
12609 return NOTIFY_DONE;
12610 }
12611 #endif /* ARP_OFFLOAD_SUPPORT */
12612
12613 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12614 /* Neighbor Discovery Offload: defered handler */
12615 static void
12616 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
12617 {
12618 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
12619 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
12620 dhd_pub_t *dhdp;
12621 int ret;
12622
12623 if (!dhd) {
12624 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
12625 goto done;
12626 }
12627 dhdp = &dhd->pub;
12628
12629 if (event != DHD_WQ_WORK_IPV6_NDO) {
12630 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
12631 goto done;
12632 }
12633
12634 if (!ndo_work) {
12635 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
12636 return;
12637 }
12638
12639 switch (ndo_work->event) {
12640 case NETDEV_UP:
12641 #ifndef NDO_CONFIG_SUPPORT
12642 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
12643 ret = dhd_ndo_enable(dhdp, TRUE);
12644 if (ret < 0) {
12645 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
12646 }
12647 #endif /* !NDO_CONFIG_SUPPORT */
12648 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
12649 if (dhdp->ndo_version > 0) {
12650 /* inet6 addr notifier called only for unicast address */
12651 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
12652 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
12653 } else {
12654 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
12655 ndo_work->if_idx);
12656 }
12657 if (ret < 0) {
12658 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12659 __FUNCTION__, ret));
12660 }
12661 break;
12662 case NETDEV_DOWN:
12663 if (dhdp->ndo_version > 0) {
12664 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
12665 ret = dhd_ndo_remove_ip_by_addr(dhdp,
12666 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
12667 } else {
12668 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
12669 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
12670 }
12671 if (ret < 0) {
12672 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12673 __FUNCTION__, ret));
12674 goto done;
12675 }
12676 #ifdef NDO_CONFIG_SUPPORT
12677 if (dhdp->ndo_host_ip_overflow) {
12678 ret = dhd_dev_ndo_update_inet6addr(
12679 dhd_idx2net(dhdp, ndo_work->if_idx));
12680 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
12681 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12682 __FUNCTION__, ret));
12683 goto done;
12684 }
12685 }
12686 #else /* !NDO_CONFIG_SUPPORT */
12687 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
12688 ret = dhd_ndo_enable(dhdp, FALSE);
12689 if (ret < 0) {
12690 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
12691 goto done;
12692 }
12693 #endif /* NDO_CONFIG_SUPPORT */
12694 break;
12695
12696 default:
12697 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
12698 break;
12699 }
12700 done:
12701
12702 /* free ndo_work. alloced while scheduling the work */
12703 if (ndo_work) {
12704 kfree(ndo_work);
12705 }
12706
12707 return;
12708 } /* dhd_init_logstrs_array */
12709
12710 /*
12711 * Neighbor Discovery Offload: Called when an interface
12712 * is assigned with ipv6 address.
12713 * Handles only primary interface
12714 */
12715 int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
12716 {
12717 dhd_info_t *dhd;
12718 dhd_pub_t *dhdp;
12719 struct inet6_ifaddr *inet6_ifa = ptr;
12720 struct ipv6_work_info_t *ndo_info;
12721 int idx;
12722
12723 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12724 /* Filter notifications meant for non Broadcom devices */
12725 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
12726 return NOTIFY_DONE;
12727 }
12728 #endif /* LINUX_VERSION_CODE */
12729
12730 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
12731 if (!dhd) {
12732 return NOTIFY_DONE;
12733 }
12734 dhdp = &dhd->pub;
12735
12736 /* Supports only primary interface */
12737 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
12738 if (idx != 0) {
12739 return NOTIFY_DONE;
12740 }
12741
12742 /* FW capability */
12743 if (!FW_SUPPORTED(dhdp, ndoe)) {
12744 return NOTIFY_DONE;
12745 }
12746
12747 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
12748 if (!ndo_info) {
12749 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
12750 return NOTIFY_DONE;
12751 }
12752
12753 /* fill up ndo_info */
12754 ndo_info->event = event;
12755 ndo_info->if_idx = idx;
12756 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
12757
12758 /* defer the work to thread as it may block kernel */
12759 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
12760 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
12761 return NOTIFY_DONE;
12762 }
12763 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12764
12765 /* Network attach to be invoked from the bus probe handlers */
12766 int
12767 dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
12768 {
12769 struct net_device *primary_ndev;
12770 BCM_REFERENCE(primary_ndev);
12771
12772 /* Register primary net device */
12773 if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
12774 return BCME_ERROR;
12775 }
12776
12777 #if defined(WL_CFG80211)
12778 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
12779 if (wl_cfg80211_net_attach(primary_ndev) < 0) {
12780 /* fail the init */
12781 dhd_remove_if(dhdp, 0, TRUE);
12782 return BCME_ERROR;
12783 }
12784 #endif /* WL_CFG80211 */
12785 return BCME_OK;
12786 }
12787
12788 int
12789 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
12790 {
12791 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12792 dhd_if_t *ifp;
12793 struct net_device *net = NULL;
12794 int err = 0;
12795 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12796
12797 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
12798
12799 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
12800 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
12801 return BCME_ERROR;
12802 }
12803
12804 ASSERT(dhd && dhd->iflist[ifidx]);
12805 ifp = dhd->iflist[ifidx];
12806 net = ifp->net;
12807 ASSERT(net && (ifp->idx == ifidx));
12808
12809 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12810 ASSERT(!net->open);
12811 net->get_stats = dhd_get_stats;
12812 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
12813 net->do_ioctl = dhd_ioctl_entry_wrapper;
12814 net->hard_start_xmit = dhd_start_xmit_wrapper;
12815 #else
12816 net->do_ioctl = dhd_ioctl_entry;
12817 net->hard_start_xmit = dhd_start_xmit;
12818 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
12819
12820 net->set_mac_address = dhd_set_mac_address;
12821 net->set_multicast_list = dhd_set_multicast_list;
12822 net->open = net->stop = NULL;
12823 #else
12824 ASSERT(!net->netdev_ops);
12825 net->netdev_ops = &dhd_ops_virt;
12826 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12827
12828 /* Ok, link into the network layer... */
12829 if (ifidx == 0) {
12830 /*
12831 * device functions for the primary interface only
12832 */
12833 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12834 net->open = dhd_pri_open;
12835 net->stop = dhd_pri_stop;
12836 #else
12837 net->netdev_ops = &dhd_ops_pri;
12838 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12839 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
12840 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12841 } else {
12842 /*
12843 * We have to use the primary MAC for virtual interfaces
12844 */
12845 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
12846 /*
12847 * Android sets the locally administered bit to indicate that this is a
12848 * portable hotspot. This will not work in simultaneous AP/STA mode,
12849 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12850 */
12851 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
12852 ETHER_ADDR_LEN)) {
12853 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12854 __func__, net->name));
12855 temp_addr[0] |= 0x02;
12856 }
12857 }
12858
12859 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
12860 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12861 net->ethtool_ops = &dhd_ethtool_ops;
12862 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12863
12864 #if defined(WL_WIRELESS_EXT)
12865 #if WIRELESS_EXT < 19
12866 net->get_wireless_stats = dhd_get_wireless_stats;
12867 #endif /* WIRELESS_EXT < 19 */
12868 #if WIRELESS_EXT > 12
12869 net->wireless_handlers = &wl_iw_handler_def;
12870 #endif /* WIRELESS_EXT > 12 */
12871 #endif /* defined(WL_WIRELESS_EXT) */
12872
12873 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
12874
12875 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
12876
12877 if (ifidx == 0)
12878 printf("%s\n", dhd_version);
12879
12880 if (need_rtnl_lock)
12881 err = register_netdev(net);
12882 else
12883 err = register_netdevice(net);
12884
12885 if (err != 0) {
12886 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
12887 goto fail;
12888 }
12889
12890 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
12891 #if defined(CUSTOMER_HW4_DEBUG)
12892 MAC2STRDBG(dhd->pub.mac.octet));
12893 #else
12894 MAC2STRDBG(net->dev_addr));
12895 #endif /* CUSTOMER_HW4_DEBUG */
12896
12897 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12898 wl_iw_iscan_set_scan_broadcast_prep(net, 1);
12899 #endif // endif
12900
12901 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12902 KERNEL_VERSION(2, 6, 27))))
12903 if (ifidx == 0) {
12904 #ifdef BCMLXSDMMC
12905 up(&dhd_registration_sem);
12906 #endif /* BCMLXSDMMC */
12907 if (!dhd_download_fw_on_driverload) {
12908 #ifdef WL_CFG80211
12909 wl_terminate_event_handler(net);
12910 #endif /* WL_CFG80211 */
12911 #if defined(DHD_LB_RXP)
12912 __skb_queue_purge(&dhd->rx_pend_queue);
12913 #endif /* DHD_LB_RXP */
12914
12915 #if defined(DHD_LB_TXP)
12916 skb_queue_purge(&dhd->tx_pend_queue);
12917 #endif /* DHD_LB_TXP */
12918
12919 #ifdef SHOW_LOGTRACE
12920 /* Release the skbs from queue for WLC_E_TRACE event */
12921 dhd_event_logtrace_flush_queue(dhdp);
12922 #endif /* SHOW_LOGTRACE */
12923
12924 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
12925 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
12926 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
12927 dhd_net_bus_devreset(net, TRUE);
12928 #ifdef BCMLXSDMMC
12929 dhd_net_bus_suspend(net);
12930 #endif /* BCMLXSDMMC */
12931 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
12932 #if defined(BT_OVER_SDIO)
12933 dhd->bus_user_count--;
12934 #endif /* BT_OVER_SDIO */
12935 }
12936 }
12937 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12938 return 0;
12939
12940 fail:
12941 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12942 net->open = NULL;
12943 #else
12944 net->netdev_ops = NULL;
12945 #endif // endif
12946 return err;
12947 }
12948
12949 void
12950 dhd_bus_detach(dhd_pub_t *dhdp)
12951 {
12952 dhd_info_t *dhd;
12953
12954 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12955
12956 if (dhdp) {
12957 dhd = (dhd_info_t *)dhdp->info;
12958 if (dhd) {
12959
12960 /*
12961 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12962 * calling stop again will cuase SD read/write errors.
12963 */
12964 if (dhd->pub.busstate != DHD_BUS_DOWN) {
12965 /* Stop the protocol module */
12966 dhd_prot_stop(&dhd->pub);
12967
12968 /* Stop the bus module */
12969 dhd_bus_stop(dhd->pub.bus, TRUE);
12970 }
12971
12972 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12973 dhd_bus_oob_intr_unregister(dhdp);
12974 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12975 }
12976 }
12977 }
12978
12979 void dhd_detach(dhd_pub_t *dhdp)
12980 {
12981 dhd_info_t *dhd;
12982 unsigned long flags;
12983 int timer_valid = FALSE;
12984 struct net_device *dev;
12985 #ifdef WL_CFG80211
12986 struct bcm_cfg80211 *cfg = NULL;
12987 #endif // endif
12988 if (!dhdp)
12989 return;
12990
12991 dhd = (dhd_info_t *)dhdp->info;
12992 if (!dhd)
12993 return;
12994
12995 dev = dhd->iflist[0]->net;
12996
12997 if (dev) {
12998 rtnl_lock();
12999 if (dev->flags & IFF_UP) {
13000 /* If IFF_UP is still up, it indicates that
13001 * "ifconfig wlan0 down" hasn't been called.
13002 * So invoke dev_close explicitly here to
13003 * bring down the interface.
13004 */
13005 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
13006 dev_close(dev);
13007 }
13008 rtnl_unlock();
13009 }
13010
13011 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
13012
13013 dhd->pub.up = 0;
13014 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
13015 /* Give sufficient time for threads to start running in case
13016 * dhd_attach() has failed
13017 */
13018 OSL_SLEEP(100);
13019 }
13020 #ifdef DHD_WET
13021 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
13022 #endif /* DHD_WET */
13023 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
13024 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
13025
13026 #ifdef PROP_TXSTATUS
13027 #ifdef DHD_WLFC_THREAD
13028 if (dhd->pub.wlfc_thread) {
13029 kthread_stop(dhd->pub.wlfc_thread);
13030 dhdp->wlfc_thread_go = TRUE;
13031 wake_up_interruptible(&dhdp->wlfc_wqhead);
13032 }
13033 dhd->pub.wlfc_thread = NULL;
13034 #endif /* DHD_WLFC_THREAD */
13035 #endif /* PROP_TXSTATUS */
13036
13037 #ifdef WL_CFG80211
13038 if (dev)
13039 wl_cfg80211_down(dev);
13040 #endif /* WL_CFG80211 */
13041
13042 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
13043
13044 dhd_bus_detach(dhdp);
13045 #ifdef BCMPCIE
13046 if (is_reboot == SYS_RESTART) {
13047 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
13048 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
13049 dhdpcie_bus_clock_stop(dhdp->bus);
13050 wifi_platform_set_power(dhd_wifi_platdata->adapters,
13051 FALSE, WIFI_TURNOFF_DELAY);
13052 }
13053 }
13054 #endif /* BCMPCIE */
13055 #ifndef PCIE_FULL_DONGLE
13056 if (dhdp->prot)
13057 dhd_prot_detach(dhdp);
13058 #endif /* !PCIE_FULL_DONGLE */
13059 }
13060
13061 #ifdef ARP_OFFLOAD_SUPPORT
13062 if (dhd_inetaddr_notifier_registered) {
13063 dhd_inetaddr_notifier_registered = FALSE;
13064 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
13065 }
13066 #endif /* ARP_OFFLOAD_SUPPORT */
13067 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
13068 if (dhd_inet6addr_notifier_registered) {
13069 dhd_inet6addr_notifier_registered = FALSE;
13070 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
13071 }
13072 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
13073 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
13074 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
13075 if (dhd->early_suspend.suspend)
13076 unregister_early_suspend(&dhd->early_suspend);
13077 }
13078 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
13079
13080 #if defined(WL_WIRELESS_EXT)
13081 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
13082 /* Detatch and unlink in the iw */
13083 wl_iw_detach();
13084 }
13085 #endif /* defined(WL_WIRELESS_EXT) */
13086
13087 #ifdef DHD_ULP
13088 dhd_ulp_deinit(dhd->pub.osh, dhdp);
13089 #endif /* DHD_ULP */
13090
13091 /* delete all interfaces, start with virtual */
13092 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
13093 int i = 1;
13094 dhd_if_t *ifp;
13095
13096 /* Cleanup virtual interfaces */
13097 dhd_net_if_lock_local(dhd);
13098 for (i = 1; i < DHD_MAX_IFS; i++) {
13099 if (dhd->iflist[i]) {
13100 dhd_remove_if(&dhd->pub, i, TRUE);
13101 }
13102 }
13103 dhd_net_if_unlock_local(dhd);
13104
13105 /* delete primary interface 0 */
13106 ifp = dhd->iflist[0];
13107 if (ifp && ifp->net) {
13108
13109 #ifdef WL_CFG80211
13110 cfg = wl_get_cfg(ifp->net);
13111 #endif // endif
13112 /* in unregister_netdev case, the interface gets freed by net->destructor
13113 * (which is set to free_netdev)
13114 */
13115 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
13116 free_netdev(ifp->net);
13117 } else {
13118 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
13119 defined(ARGOS_NOTIFY_CB)
13120 argos_register_notifier_deinit();
13121 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
13122 #ifdef SET_RPS_CPUS
13123 custom_rps_map_clear(ifp->net->_rx);
13124 #endif /* SET_RPS_CPUS */
13125 netif_tx_disable(ifp->net);
13126 unregister_netdev(ifp->net);
13127 }
13128 #ifdef PCIE_FULL_DONGLE
13129 ifp->net = DHD_NET_DEV_NULL;
13130 #else
13131 ifp->net = NULL;
13132 #endif /* PCIE_FULL_DONGLE */
13133
13134 #ifdef DHD_L2_FILTER
13135 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
13136 NULL, FALSE, dhdp->tickcnt);
13137 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
13138 ifp->phnd_arp_table = NULL;
13139 #endif /* DHD_L2_FILTER */
13140
13141 dhd_if_del_sta_list(ifp);
13142
13143 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
13144 dhd->iflist[0] = NULL;
13145 }
13146 }
13147
13148 /* Clear the watchdog timer */
13149 DHD_GENERAL_LOCK(&dhd->pub, flags);
13150 timer_valid = dhd->wd_timer_valid;
13151 dhd->wd_timer_valid = FALSE;
13152 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
13153 if (timer_valid)
13154 del_timer_sync(&dhd->timer);
13155 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
13156
13157 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
13158 #ifdef DHD_PCIE_RUNTIMEPM
13159 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
13160 PROC_STOP(&dhd->thr_rpm_ctl);
13161 }
13162 #endif /* DHD_PCIE_RUNTIMEPM */
13163 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
13164 PROC_STOP(&dhd->thr_wdt_ctl);
13165 }
13166
13167 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
13168 PROC_STOP(&dhd->thr_rxf_ctl);
13169 }
13170
13171 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
13172 PROC_STOP(&dhd->thr_dpc_ctl);
13173 } else
13174 {
13175 tasklet_kill(&dhd->tasklet);
13176 }
13177 }
13178
13179 #ifdef WL_NATOE
13180 if (dhd->pub.nfct) {
13181 dhd_ct_close(dhd->pub.nfct);
13182 }
13183 #endif /* WL_NATOE */
13184
13185 #ifdef DHD_LB
13186 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
13187 /* Clear the flag first to avoid calling the cpu notifier */
13188 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
13189
13190 /* Kill the Load Balancing Tasklets */
13191 #ifdef DHD_LB_RXP
13192 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
13193 __skb_queue_purge(&dhd->rx_pend_queue);
13194 #endif /* DHD_LB_RXP */
13195 #ifdef DHD_LB_TXP
13196 cancel_work_sync(&dhd->tx_dispatcher_work);
13197 tasklet_kill(&dhd->tx_tasklet);
13198 __skb_queue_purge(&dhd->tx_pend_queue);
13199 #endif /* DHD_LB_TXP */
13200 #ifdef DHD_LB_TXC
13201 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
13202 tasklet_kill(&dhd->tx_compl_tasklet);
13203 #endif /* DHD_LB_TXC */
13204 #ifdef DHD_LB_RXC
13205 tasklet_kill(&dhd->rx_compl_tasklet);
13206 #endif /* DHD_LB_RXC */
13207
13208 if (dhd->cpu_notifier.notifier_call != NULL) {
13209 unregister_cpu_notifier(&dhd->cpu_notifier);
13210 }
13211 dhd_cpumasks_deinit(dhd);
13212 DHD_LB_STATS_DEINIT(&dhd->pub);
13213 }
13214 #endif /* DHD_LB */
13215
13216 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
13217
13218 #ifdef WL_CFG80211
13219 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
13220 if (!cfg) {
13221 DHD_ERROR(("cfg NULL!\n"));
13222 ASSERT(0);
13223 } else {
13224 wl_cfg80211_detach(cfg);
13225 dhd_monitor_uninit();
13226 }
13227 }
13228 #endif // endif
13229
13230 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13231 destroy_workqueue(dhd->tx_wq);
13232 dhd->tx_wq = NULL;
13233 destroy_workqueue(dhd->rx_wq);
13234 dhd->rx_wq = NULL;
13235 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13236 #ifdef DEBUGABILITY
13237 if (dhdp->dbg) {
13238 #ifdef DBG_PKT_MON
13239 dhd_os_dbg_detach_pkt_monitor(dhdp);
13240 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
13241 #endif /* DBG_PKT_MON */
13242 dhd_os_dbg_detach(dhdp);
13243 }
13244 #endif /* DEBUGABILITY */
13245 #ifdef DHD_PKT_LOGGING
13246 dhd_os_detach_pktlog(dhdp);
13247 #endif /* DHD_PKT_LOGGING */
13248 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13249 if (dhd->pub.hang_info) {
13250 MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
13251 }
13252 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13253 #ifdef SHOW_LOGTRACE
13254 /* Release the skbs from queue for WLC_E_TRACE event */
13255 dhd_event_logtrace_flush_queue(dhdp);
13256
13257 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
13258 if (dhd->event_data.fmts) {
13259 MFREE(dhd->pub.osh, dhd->event_data.fmts,
13260 dhd->event_data.fmts_size);
13261 dhd->event_data.fmts = NULL;
13262 }
13263 if (dhd->event_data.raw_fmts) {
13264 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
13265 dhd->event_data.raw_fmts_size);
13266 dhd->event_data.raw_fmts = NULL;
13267 }
13268 if (dhd->event_data.raw_sstr) {
13269 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
13270 dhd->event_data.raw_sstr_size);
13271 dhd->event_data.raw_sstr = NULL;
13272 }
13273 if (dhd->event_data.rom_raw_sstr) {
13274 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
13275 dhd->event_data.rom_raw_sstr_size);
13276 dhd->event_data.rom_raw_sstr = NULL;
13277 }
13278 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
13279 }
13280 #endif /* SHOW_LOGTRACE */
13281 #ifdef PNO_SUPPORT
13282 if (dhdp->pno_state)
13283 dhd_pno_deinit(dhdp);
13284 #endif // endif
13285 #ifdef RTT_SUPPORT
13286 if (dhdp->rtt_state) {
13287 dhd_rtt_deinit(dhdp);
13288 }
13289 #endif // endif
13290 #if defined(CONFIG_PM_SLEEP)
13291 if (dhd_pm_notifier_registered) {
13292 unregister_pm_notifier(&dhd->pm_notifier);
13293 dhd_pm_notifier_registered = FALSE;
13294 }
13295 #endif /* CONFIG_PM_SLEEP */
13296
13297 #ifdef DEBUG_CPU_FREQ
13298 if (dhd->new_freq)
13299 free_percpu(dhd->new_freq);
13300 dhd->new_freq = NULL;
13301 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
13302 #endif // endif
13303 #ifdef CONFIG_HAS_WAKELOCK
13304 dhd->wakelock_wd_counter = 0;
13305 wake_lock_destroy(&dhd->wl_wdwake);
13306 #endif /* CONFIG_HAS_WAKELOCK */
13307 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
13308 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
13309 DHD_OS_WAKE_LOCK_DESTROY(dhd);
13310 }
13311
13312 #ifdef ARGOS_CPU_SCHEDULER
13313 if (dhd->pub.affinity_isdpc == TRUE) {
13314 free_cpumask_var(dhd->pub.default_cpu_mask);
13315 free_cpumask_var(dhd->pub.dpc_affinity_cpu_mask);
13316 dhd->pub.affinity_isdpc = FALSE;
13317 }
13318 #endif /* ARGOS_CPU_SCHEDULER */
13319
13320 #ifdef DHDTCPACK_SUPPRESS
13321 /* This will free all MEM allocated for TCPACK SUPPRESS */
13322 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
13323 #endif /* DHDTCPACK_SUPPRESS */
13324
13325 #ifdef PCIE_FULL_DONGLE
13326 dhd_flow_rings_deinit(dhdp);
13327 if (dhdp->prot)
13328 dhd_prot_detach(dhdp);
13329 #endif // endif
13330
13331 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13332 dhd_free_tdls_peer_list(dhdp);
13333 #endif // endif
13334
13335 #ifdef DUMP_IOCTL_IOV_LIST
13336 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
13337 #endif /* DUMP_IOCTL_IOV_LIST */
13338 #ifdef DHD_DEBUG
13339 /* memory waste feature list initilization */
13340 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
13341 #endif /* DHD_DEBUG */
13342 #ifdef WL_MONITOR
13343 dhd_del_monitor_if(dhd);
13344 #endif /* WL_MONITOR */
13345
13346 #ifdef DHD_ERPOM
13347 if (dhdp->enable_erpom) {
13348 dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
13349 }
13350 #endif /* DHD_ERPOM */
13351
13352 cancel_work_sync(&dhd->dhd_hang_process_work);
13353
13354 /* Prefer adding de-init code above this comment unless necessary.
13355 * The idea is to cancel work queue, sysfs and flags at the end.
13356 */
13357 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
13358 dhd->dhd_deferred_wq = NULL;
13359
13360 /* log dump related buffers should be freed after wq is purged */
13361 #ifdef DHD_LOG_DUMP
13362 dhd_log_dump_deinit(&dhd->pub);
13363 #endif /* DHD_LOG_DUMP */
13364 #if defined(BCMPCIE)
13365 if (dhdp->extended_trap_data)
13366 {
13367 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
13368 dhdp->extended_trap_data = NULL;
13369 }
13370 #endif /* BCMPCIE */
13371
13372 #ifdef SHOW_LOGTRACE
13373 /* Wait till event_log_dispatcher_work finishes */
13374 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
13375 mutex_lock(&dhd->pub.dhd_trace_lock);
13376 remove_proc_entry("dhd_trace", NULL);
13377 mutex_unlock(&dhd->pub.dhd_trace_lock);
13378 #endif /* SHOW_LOGTRACE */
13379
13380 #ifdef DHD_DUMP_MNGR
13381 if (dhd->pub.dump_file_manage) {
13382 MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
13383 sizeof(dhd_dump_file_manage_t));
13384 }
13385 #endif /* DHD_DUMP_MNGR */
13386 dhd_sysfs_exit(dhd);
13387 dhd->pub.fw_download_done = FALSE;
13388
13389 #if defined(BT_OVER_SDIO)
13390 mutex_destroy(&dhd->bus_user_lock);
13391 #endif /* BT_OVER_SDIO */
13392
13393 } /* dhd_detach */
13394
13395 void
13396 dhd_free(dhd_pub_t *dhdp)
13397 {
13398 dhd_info_t *dhd;
13399 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13400
13401 if (dhdp) {
13402 int i;
13403 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13404 if (dhdp->reorder_bufs[i]) {
13405 reorder_info_t *ptr;
13406 uint32 buf_size = sizeof(struct reorder_info);
13407
13408 ptr = dhdp->reorder_bufs[i];
13409
13410 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13411 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13412 i, ptr->max_idx, buf_size));
13413
13414 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13415 dhdp->reorder_bufs[i] = NULL;
13416 }
13417 }
13418
13419 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
13420
13421 dhd = (dhd_info_t *)dhdp->info;
13422 if (dhdp->soc_ram) {
13423 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13424 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13425 #else
13426 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13427 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13428 dhdp->soc_ram = NULL;
13429 }
13430 if (dhd != NULL) {
13431
13432 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13433 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
13434 DHD_PREALLOC_DHD_INFO, 0, FALSE))
13435 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
13436 dhd = NULL;
13437 }
13438 }
13439 }
13440
13441 void
13442 dhd_clear(dhd_pub_t *dhdp)
13443 {
13444 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13445
13446 if (dhdp) {
13447 int i;
13448 #ifdef DHDTCPACK_SUPPRESS
13449 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13450 dhd_tcpack_info_tbl_clean(dhdp);
13451 #endif /* DHDTCPACK_SUPPRESS */
13452 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13453 if (dhdp->reorder_bufs[i]) {
13454 reorder_info_t *ptr;
13455 uint32 buf_size = sizeof(struct reorder_info);
13456
13457 ptr = dhdp->reorder_bufs[i];
13458
13459 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13460 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13461 i, ptr->max_idx, buf_size));
13462
13463 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13464 dhdp->reorder_bufs[i] = NULL;
13465 }
13466 }
13467
13468 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
13469
13470 if (dhdp->soc_ram) {
13471 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13472 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13473 #else
13474 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13475 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13476 dhdp->soc_ram = NULL;
13477 }
13478 }
13479 }
13480
13481 static void
13482 dhd_module_cleanup(void)
13483 {
13484 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13485
13486 dhd_bus_unregister();
13487
13488 wl_android_exit();
13489
13490 dhd_wifi_platform_unregister_drv();
13491 }
13492
13493 static void __exit
13494 dhd_module_exit(void)
13495 {
13496 atomic_set(&exit_in_progress, 1);
13497 dhd_module_cleanup();
13498 unregister_reboot_notifier(&dhd_reboot_notifier);
13499 dhd_destroy_to_notifier_skt();
13500 }
13501
13502 static int __init
13503 dhd_module_init(void)
13504 {
13505 int err;
13506 int retry = POWERUP_MAX_RETRY;
13507
13508 DHD_ERROR(("%s in\n", __FUNCTION__));
13509
13510 DHD_PERIM_RADIO_INIT();
13511
13512 if (firmware_path[0] != '\0') {
13513 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
13514 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13515 }
13516
13517 if (nvram_path[0] != '\0') {
13518 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
13519 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13520 }
13521
13522 do {
13523 err = dhd_wifi_platform_register_drv();
13524 if (!err) {
13525 register_reboot_notifier(&dhd_reboot_notifier);
13526 break;
13527 } else {
13528 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13529 __FUNCTION__, retry));
13530 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
13531 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
13532 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
13533 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
13534 }
13535 } while (retry--);
13536
13537 dhd_create_to_notifier_skt();
13538
13539 if (err) {
13540 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
13541 } else {
13542 if (!dhd_download_fw_on_driverload) {
13543 dhd_driver_init_done = TRUE;
13544 }
13545 }
13546
13547 DHD_ERROR(("%s out\n", __FUNCTION__));
13548
13549 return err;
13550 }
13551
13552 static int
13553 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
13554 {
13555 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
13556 if (code == SYS_RESTART) {
13557 #ifdef BCMPCIE
13558 is_reboot = code;
13559 #endif /* BCMPCIE */
13560 }
13561 return NOTIFY_DONE;
13562 }
13563
13564 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13565 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13566 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13567 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
13568 defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
13569 defined(CONFIG_ARCH_SDM845)
13570 deferred_module_init_sync(dhd_module_init);
13571 #else
13572 deferred_module_init(dhd_module_init);
13573 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13574 * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
13575 * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845
13576 */
13577 #elif defined(USE_LATE_INITCALL_SYNC)
13578 late_initcall_sync(dhd_module_init);
13579 #else
13580 late_initcall(dhd_module_init);
13581 #endif /* USE_LATE_INITCALL_SYNC */
13582 #else
13583 module_init(dhd_module_init);
13584 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13585
13586 module_exit(dhd_module_exit);
13587
13588 /*
13589 * OS specific functions required to implement DHD driver in OS independent way
13590 */
13591 int
13592 dhd_os_proto_block(dhd_pub_t *pub)
13593 {
13594 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13595
13596 if (dhd) {
13597 DHD_PERIM_UNLOCK(pub);
13598
13599 down(&dhd->proto_sem);
13600
13601 DHD_PERIM_LOCK(pub);
13602 return 1;
13603 }
13604
13605 return 0;
13606 }
13607
13608 int
13609 dhd_os_proto_unblock(dhd_pub_t *pub)
13610 {
13611 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13612
13613 if (dhd) {
13614 up(&dhd->proto_sem);
13615 return 1;
13616 }
13617
13618 return 0;
13619 }
13620
13621 void
13622 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
13623 {
13624 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13625
13626 if (dhd) {
13627 mutex_lock(&dhd->dhd_iovar_mutex);
13628 }
13629 }
13630
13631 void
13632 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
13633 {
13634 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13635
13636 if (dhd) {
13637 mutex_unlock(&dhd->dhd_iovar_mutex);
13638 }
13639 }
13640
13641 void
13642 dhd_os_logdump_lock(dhd_pub_t *pub)
13643 {
13644 dhd_info_t *dhd = NULL;
13645
13646 if (!pub)
13647 return;
13648
13649 dhd = (dhd_info_t *)(pub->info);
13650
13651 if (dhd) {
13652 mutex_lock(&dhd->logdump_lock);
13653 }
13654 }
13655
13656 void
13657 dhd_os_logdump_unlock(dhd_pub_t *pub)
13658 {
13659 dhd_info_t *dhd = NULL;
13660
13661 if (!pub)
13662 return;
13663
13664 dhd = (dhd_info_t *)(pub->info);
13665
13666 if (dhd) {
13667 mutex_unlock(&dhd->logdump_lock);
13668 }
13669 }
13670
13671 unsigned long
13672 dhd_os_dbgring_lock(void *lock)
13673 {
13674 if (!lock)
13675 return 0;
13676
13677 mutex_lock((struct mutex *)lock);
13678
13679 return 0;
13680 }
13681
13682 void
13683 dhd_os_dbgring_unlock(void *lock, unsigned long flags)
13684 {
13685 BCM_REFERENCE(flags);
13686
13687 if (!lock)
13688 return;
13689
13690 mutex_unlock((struct mutex *)lock);
13691 }
13692
13693 unsigned int
13694 dhd_os_get_ioctl_resp_timeout(void)
13695 {
13696 return ((unsigned int)dhd_ioctl_timeout_msec);
13697 }
13698
13699 void
13700 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
13701 {
13702 dhd_ioctl_timeout_msec = (int)timeout_msec;
13703 }
13704
13705 int
13706 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
13707 {
13708 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13709 int timeout;
13710
13711 /* Convert timeout in millsecond to jiffies */
13712 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13713 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13714 #else
13715 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
13716 #endif // endif
13717
13718 DHD_PERIM_UNLOCK(pub);
13719
13720 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
13721
13722 DHD_PERIM_LOCK(pub);
13723
13724 return timeout;
13725 }
13726
13727 int
13728 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
13729 {
13730 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13731
13732 wake_up(&dhd->ioctl_resp_wait);
13733 return 0;
13734 }
13735
13736 int
13737 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
13738 {
13739 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13740 int timeout;
13741
13742 /* Convert timeout in millsecond to jiffies */
13743 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13744 timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
13745 #else
13746 timeout = D3_ACK_RESP_TIMEOUT * HZ / 1000;
13747 #endif // endif
13748
13749 DHD_PERIM_UNLOCK(pub);
13750
13751 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
13752
13753 DHD_PERIM_LOCK(pub);
13754
13755 return timeout;
13756 }
13757
13758 int
13759 dhd_os_d3ack_wake(dhd_pub_t *pub)
13760 {
13761 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13762
13763 wake_up(&dhd->d3ack_wait);
13764 return 0;
13765 }
13766
13767 int
13768 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
13769 {
13770 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13771 int timeout;
13772
13773 /* Wait for bus usage contexts to gracefully exit within some timeout value
13774 * Set time out to little higher than dhd_ioctl_timeout_msec,
13775 * so that IOCTL timeout should not get affected.
13776 */
13777 /* Convert timeout in millsecond to jiffies */
13778 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13779 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13780 #else
13781 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13782 #endif // endif
13783
13784 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
13785
13786 return timeout;
13787 }
13788
13789 /*
13790 * Wait until the condition *var == condition is met.
13791 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13792 * Returns 1 if the @condition evaluated to true
13793 */
13794 int
13795 dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
13796 {
13797 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13798 int timeout;
13799
13800 /* Convert timeout in millsecond to jiffies */
13801 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13802 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13803 #else
13804 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13805 #endif // endif
13806
13807 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
13808
13809 return timeout;
13810 }
13811
13812 /*
13813 * Wait until the '(*var & bitmask) == condition' is met.
13814 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13815 * Returns 1 if the @condition evaluated to true
13816 */
13817 int
13818 dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
13819 uint bitmask, uint condition)
13820 {
13821 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13822 int timeout;
13823
13824 /* Convert timeout in millsecond to jiffies */
13825 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13826 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13827 #else
13828 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13829 #endif // endif
13830
13831 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
13832 ((*var & bitmask) == condition), timeout);
13833
13834 return timeout;
13835 }
13836
13837 int
13838 dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
13839 {
13840 int ret = 0;
13841 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13842
13843 DHD_PERIM_UNLOCK(pub);
13844 ret = wait_event_interruptible(dhd->dmaxfer_wait, (*condition));
13845 DHD_PERIM_LOCK(pub);
13846
13847 return ret;
13848
13849 }
13850
13851 int
13852 dhd_os_dmaxfer_wake(dhd_pub_t *pub)
13853 {
13854 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13855
13856 wake_up(&dhd->dmaxfer_wait);
13857 return 0;
13858 }
13859
13860 void
13861 dhd_os_tx_completion_wake(dhd_pub_t *dhd)
13862 {
13863 /* Call wmb() to make sure before waking up the other event value gets updated */
13864 OSL_SMP_WMB();
13865 wake_up(&dhd->tx_completion_wait);
13866 }
13867
13868 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13869 /* Fix compilation error for FC11 */
13870 INLINE
13871 #endif // endif
13872 int
13873 dhd_os_busbusy_wake(dhd_pub_t *pub)
13874 {
13875 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13876 /* Call wmb() to make sure before waking up the other event value gets updated */
13877 OSL_SMP_WMB();
13878 wake_up(&dhd->dhd_bus_busy_state_wait);
13879 return 0;
13880 }
13881
13882 void
13883 dhd_os_wd_timer_extend(void *bus, bool extend)
13884 {
13885 dhd_pub_t *pub = bus;
13886 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13887
13888 if (extend)
13889 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
13890 else
13891 dhd_os_wd_timer(bus, dhd->default_wd_interval);
13892 }
13893
13894 void
13895 dhd_os_wd_timer(void *bus, uint wdtick)
13896 {
13897 dhd_pub_t *pub = bus;
13898 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13899 unsigned long flags;
13900
13901 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13902
13903 if (!dhd) {
13904 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13905 return;
13906 }
13907
13908 DHD_GENERAL_LOCK(pub, flags);
13909
13910 /* don't start the wd until fw is loaded */
13911 if (pub->busstate == DHD_BUS_DOWN) {
13912 DHD_GENERAL_UNLOCK(pub, flags);
13913 #ifdef BCMSDIO
13914 if (!wdtick) {
13915 DHD_OS_WD_WAKE_UNLOCK(pub);
13916 }
13917 #endif /* BCMSDIO */
13918 return;
13919 }
13920
13921 /* Totally stop the timer */
13922 if (!wdtick && dhd->wd_timer_valid == TRUE) {
13923 dhd->wd_timer_valid = FALSE;
13924 DHD_GENERAL_UNLOCK(pub, flags);
13925 del_timer_sync(&dhd->timer);
13926 #ifdef BCMSDIO
13927 DHD_OS_WD_WAKE_UNLOCK(pub);
13928 #endif /* BCMSDIO */
13929 return;
13930 }
13931
13932 if (wdtick) {
13933 #ifdef BCMSDIO
13934 DHD_OS_WD_WAKE_LOCK(pub);
13935 dhd_watchdog_ms = (uint)wdtick;
13936 #endif /* BCMSDIO */
13937 /* Re arm the timer, at last watchdog period */
13938 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13939 dhd->wd_timer_valid = TRUE;
13940 }
13941 DHD_GENERAL_UNLOCK(pub, flags);
13942 }
13943
13944 #ifdef DHD_PCIE_RUNTIMEPM
13945 void
13946 dhd_os_runtimepm_timer(void *bus, uint tick)
13947 {
13948 dhd_pub_t *pub = bus;
13949 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13950 unsigned long flags;
13951
13952 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13953
13954 if (!dhd) {
13955 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13956 return;
13957 }
13958
13959 DHD_GENERAL_LOCK(pub, flags);
13960
13961 /* don't start the RPM until fw is loaded */
13962 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
13963 DHD_GENERAL_UNLOCK(pub, flags);
13964 return;
13965 }
13966
13967 /* If tick is non-zero, the request is to start the timer */
13968 if (tick) {
13969 /* Start the timer only if its not already running */
13970 if (dhd->rpm_timer_valid == FALSE) {
13971 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
13972 dhd->rpm_timer_valid = TRUE;
13973 }
13974 } else {
13975 /* tick is zero, we have to stop the timer */
13976 /* Stop the timer only if its running, otherwise we don't have to do anything */
13977 if (dhd->rpm_timer_valid == TRUE) {
13978 dhd->rpm_timer_valid = FALSE;
13979 DHD_GENERAL_UNLOCK(pub, flags);
13980 del_timer_sync(&dhd->rpm_timer);
13981 /* we have already released the lock, so just go to exit */
13982 goto exit;
13983 }
13984 }
13985
13986 DHD_GENERAL_UNLOCK(pub, flags);
13987 exit:
13988 return;
13989
13990 }
13991
13992 #endif /* DHD_PCIE_RUNTIMEPM */
13993
13994 void *
13995 dhd_os_open_image1(dhd_pub_t *pub, char *filename)
13996 {
13997 struct file *fp;
13998 int size;
13999
14000 fp = filp_open(filename, O_RDONLY, 0);
14001 /*
14002 * 2.6.11 (FC4) supports filp_open() but later revs don't?
14003 * Alternative:
14004 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
14005 * ???
14006 */
14007 if (IS_ERR(fp)) {
14008 fp = NULL;
14009 goto err;
14010 }
14011
14012 if (!S_ISREG(file_inode(fp)->i_mode)) {
14013 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
14014 fp = NULL;
14015 goto err;
14016 }
14017
14018 size = i_size_read(file_inode(fp));
14019 if (size <= 0) {
14020 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
14021 fp = NULL;
14022 goto err;
14023 }
14024
14025 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
14026
14027 err:
14028 return fp;
14029 }
14030
14031 int
14032 dhd_os_get_image_block(char *buf, int len, void *image)
14033 {
14034 struct file *fp = (struct file *)image;
14035 int rdlen;
14036 int size;
14037
14038 if (!image) {
14039 return 0;
14040 }
14041
14042 size = i_size_read(file_inode(fp));
14043 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
14044
14045 if (len >= size && size != rdlen) {
14046 return -EIO;
14047 }
14048
14049 if (rdlen > 0) {
14050 fp->f_pos += rdlen;
14051 }
14052
14053 return rdlen;
14054 }
14055
14056 #if defined(BT_OVER_SDIO)
14057 int
14058 dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
14059 {
14060 struct file *fp = (struct file *)image;
14061 int rd_len;
14062 uint str_len = 0;
14063 char *str_end = NULL;
14064
14065 if (!image)
14066 return 0;
14067
14068 rd_len = kernel_read(fp, fp->f_pos, str, len);
14069 str_end = strnchr(str, len, '\n');
14070 if (str_end == NULL) {
14071 goto err;
14072 }
14073 str_len = (uint)(str_end - str);
14074
14075 /* Advance file pointer past the string length */
14076 fp->f_pos += str_len + 1;
14077 bzero(str_end, rd_len - str_len);
14078
14079 err:
14080 return str_len;
14081 }
14082 #endif /* defined (BT_OVER_SDIO) */
14083
14084 int
14085 dhd_os_get_image_size(void *image)
14086 {
14087 struct file *fp = (struct file *)image;
14088 int size;
14089 if (!image) {
14090 return 0;
14091 }
14092
14093 size = i_size_read(file_inode(fp));
14094
14095 return size;
14096 }
14097
14098 void
14099 dhd_os_close_image1(dhd_pub_t *pub, void *image)
14100 {
14101 if (image) {
14102 filp_close((struct file *)image, NULL);
14103 }
14104 }
14105
14106 void
14107 dhd_os_sdlock(dhd_pub_t *pub)
14108 {
14109 dhd_info_t *dhd;
14110
14111 dhd = (dhd_info_t *)(pub->info);
14112
14113 if (dhd_dpc_prio >= 0)
14114 down(&dhd->sdsem);
14115 else
14116 spin_lock_bh(&dhd->sdlock);
14117 }
14118
14119 void
14120 dhd_os_sdunlock(dhd_pub_t *pub)
14121 {
14122 dhd_info_t *dhd;
14123
14124 dhd = (dhd_info_t *)(pub->info);
14125
14126 if (dhd_dpc_prio >= 0)
14127 up(&dhd->sdsem);
14128 else
14129 spin_unlock_bh(&dhd->sdlock);
14130 }
14131
14132 void
14133 dhd_os_sdlock_txq(dhd_pub_t *pub)
14134 {
14135 dhd_info_t *dhd;
14136
14137 dhd = (dhd_info_t *)(pub->info);
14138 spin_lock_bh(&dhd->txqlock);
14139 }
14140
14141 void
14142 dhd_os_sdunlock_txq(dhd_pub_t *pub)
14143 {
14144 dhd_info_t *dhd;
14145
14146 dhd = (dhd_info_t *)(pub->info);
14147 spin_unlock_bh(&dhd->txqlock);
14148 }
14149
14150 void
14151 dhd_os_sdlock_rxq(dhd_pub_t *pub)
14152 {
14153 }
14154
14155 void
14156 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
14157 {
14158 }
14159
14160 static void
14161 dhd_os_rxflock(dhd_pub_t *pub)
14162 {
14163 dhd_info_t *dhd;
14164
14165 dhd = (dhd_info_t *)(pub->info);
14166 spin_lock_bh(&dhd->rxf_lock);
14167
14168 }
14169
14170 static void
14171 dhd_os_rxfunlock(dhd_pub_t *pub)
14172 {
14173 dhd_info_t *dhd;
14174
14175 dhd = (dhd_info_t *)(pub->info);
14176 spin_unlock_bh(&dhd->rxf_lock);
14177 }
14178
14179 #ifdef DHDTCPACK_SUPPRESS
14180 unsigned long
14181 dhd_os_tcpacklock(dhd_pub_t *pub)
14182 {
14183 dhd_info_t *dhd;
14184 unsigned long flags = 0;
14185
14186 dhd = (dhd_info_t *)(pub->info);
14187
14188 if (dhd) {
14189 #ifdef BCMSDIO
14190 spin_lock_bh(&dhd->tcpack_lock);
14191 #else
14192 spin_lock_irqsave(&dhd->tcpack_lock, flags);
14193 #endif /* BCMSDIO */
14194 }
14195
14196 return flags;
14197 }
14198
14199 void
14200 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
14201 {
14202 dhd_info_t *dhd;
14203
14204 #ifdef BCMSDIO
14205 BCM_REFERENCE(flags);
14206 #endif /* BCMSDIO */
14207
14208 dhd = (dhd_info_t *)(pub->info);
14209
14210 if (dhd) {
14211 #ifdef BCMSDIO
14212 spin_unlock_bh(&dhd->tcpack_lock);
14213 #else
14214 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
14215 #endif /* BCMSDIO */
14216 }
14217 }
14218 #endif /* DHDTCPACK_SUPPRESS */
14219
14220 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
14221 {
14222 uint8* buf;
14223 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
14224
14225 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
14226 if (buf == NULL && kmalloc_if_fail)
14227 buf = kmalloc(size, flags);
14228
14229 return buf;
14230 }
14231
14232 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
14233 {
14234 }
14235
14236 #if defined(WL_WIRELESS_EXT)
14237 struct iw_statistics *
14238 dhd_get_wireless_stats(struct net_device *dev)
14239 {
14240 int res = 0;
14241 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14242
14243 if (!dhd->pub.up) {
14244 return NULL;
14245 }
14246
14247 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
14248
14249 if (res == 0)
14250 return &dhd->iw.wstats;
14251 else
14252 return NULL;
14253 }
14254 #endif /* defined(WL_WIRELESS_EXT) */
14255
14256 static int
14257 dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
14258 wl_event_msg_t *event, void **data)
14259 {
14260 int bcmerror = 0;
14261 #ifdef WL_CFG80211
14262 unsigned long flags = 0;
14263 #ifdef DYNAMIC_MUMIMO_CONTROL
14264 static uint32 reassoc_err = 0;
14265 #endif /* DYNAMIC_MUMIMO_CONTROL */
14266 #endif /* WL_CFG80211 */
14267 ASSERT(dhd != NULL);
14268
14269 #ifdef SHOW_LOGTRACE
14270 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
14271 &dhd->event_data);
14272 #else
14273 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
14274 NULL);
14275 #endif /* SHOW_LOGTRACE */
14276 if (unlikely(bcmerror != BCME_OK)) {
14277 return bcmerror;
14278 }
14279
14280 if (ntoh32(event->event_type) == WLC_E_IF) {
14281 /* WLC_E_IF event types are consumed by wl_process_host_event.
14282 * For ifadd/del ops, the netdev ptr may not be valid at this
14283 * point. so return before invoking cfg80211/wext handlers.
14284 */
14285 return BCME_OK;
14286 }
14287
14288 #if defined(WL_WIRELESS_EXT)
14289 if (event->bsscfgidx == 0) {
14290 /*
14291 * Wireless ext is on primary interface only
14292 */
14293 ASSERT(dhd->iflist[ifidx] != NULL);
14294 ASSERT(dhd->iflist[ifidx]->net != NULL);
14295
14296 if (dhd->iflist[ifidx]->net) {
14297 wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
14298 }
14299 }
14300 #endif /* defined(WL_WIRELESS_EXT) */
14301
14302 #ifdef WL_CFG80211
14303 if (dhd->iflist[ifidx]->net) {
14304 spin_lock_irqsave(&dhd->pub.up_lock, flags);
14305 if (dhd->pub.up) {
14306 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
14307 }
14308 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
14309 }
14310 #ifdef DYNAMIC_MUMIMO_CONTROL
14311 #define REASSOC_ERROR_RETRY_LIMIT 1
14312 if (dhd->pub.reassoc_mumimo_sw) {
14313 uint event_type = ntoh32(event->event_type);
14314 uint status = ntoh32(event->status);
14315
14316 if (event_type == WLC_E_REASSOC) {
14317 if (status == WLC_E_STATUS_SUCCESS) {
14318 reassoc_err = 0;
14319 } else {
14320 reassoc_err++;
14321 }
14322
14323 if (reassoc_err > REASSOC_ERROR_RETRY_LIMIT) {
14324 dhd->pub.reassoc_mumimo_sw = FALSE;
14325 dhd->pub.murx_block_eapol = FALSE;
14326 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
14327 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, OFF);
14328 }
14329 }
14330 }
14331 #undef REASSOC_ERROR_RETRY_LIMIT
14332 #endif /* DYNAMIC_MUMIMO_CONTROL */
14333 #endif /* defined(WL_CFG80211) */
14334
14335 return (bcmerror);
14336 }
14337
14338 /* send up locally generated event */
14339 void
14340 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
14341 {
14342 switch (ntoh32(event->event_type)) {
14343 /* Handle error case or further events here */
14344 default:
14345 break;
14346 }
14347 }
14348
14349 #ifdef LOG_INTO_TCPDUMP
14350 void
14351 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
14352 {
14353 struct sk_buff *p, *skb;
14354 uint32 pktlen;
14355 int len;
14356 dhd_if_t *ifp;
14357 dhd_info_t *dhd;
14358 uchar *skb_data;
14359 int ifidx = 0;
14360 struct ether_header eth;
14361
14362 pktlen = sizeof(eth) + data_len;
14363 dhd = dhdp->info;
14364
14365 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
14366 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
14367
14368 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
14369 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
14370 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
14371 eth.ether_type = hton16(ETHER_TYPE_BRCM);
14372
14373 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
14374 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
14375 skb = PKTTONATIVE(dhdp->osh, p);
14376 skb_data = skb->data;
14377 len = skb->len;
14378
14379 ifidx = dhd_ifname2idx(dhd, "wlan0");
14380 ifp = dhd->iflist[ifidx];
14381 if (ifp == NULL)
14382 ifp = dhd->iflist[0];
14383
14384 ASSERT(ifp);
14385 skb->dev = ifp->net;
14386 skb->protocol = eth_type_trans(skb, skb->dev);
14387 skb->data = skb_data;
14388 skb->len = len;
14389
14390 /* Strip header, count, deliver upward */
14391 skb_pull(skb, ETH_HLEN);
14392
14393 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
14394 __FUNCTION__, __LINE__);
14395 /* Send the packet */
14396 if (in_interrupt()) {
14397 netif_rx(skb);
14398 } else {
14399 netif_rx_ni(skb);
14400 }
14401 } else {
14402 /* Could not allocate a sk_buf */
14403 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
14404 }
14405 }
14406 #endif /* LOG_INTO_TCPDUMP */
14407
14408 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
14409 {
14410 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14411 struct dhd_info *dhdinfo = dhd->info;
14412
14413 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14414 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
14415 #else
14416 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
14417 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14418
14419 dhd_os_sdunlock(dhd);
14420 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
14421 dhd_os_sdlock(dhd);
14422 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14423 return;
14424 } /* dhd_init_static_strs_array */
14425
14426 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
14427 {
14428 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14429 struct dhd_info *dhdinfo = dhd->info;
14430 if (waitqueue_active(&dhdinfo->ctrl_wait))
14431 wake_up(&dhdinfo->ctrl_wait);
14432 #endif // endif
14433 return;
14434 }
14435
14436 #if defined(BCMSDIO) || defined(BCMPCIE)
14437 int
14438 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
14439 {
14440 int ret;
14441
14442 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14443
14444 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14445 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
14446 return BCME_ERROR;
14447 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14448
14449 if (flag == TRUE) {
14450 /* Issue wl down command before resetting the chip */
14451 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
14452 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
14453 }
14454 #ifdef PROP_TXSTATUS
14455 if (dhd->pub.wlfc_enabled) {
14456 dhd_wlfc_deinit(&dhd->pub);
14457 }
14458 #endif /* PROP_TXSTATUS */
14459 #ifdef PNO_SUPPORT
14460 if (dhd->pub.pno_state) {
14461 dhd_pno_deinit(&dhd->pub);
14462 }
14463 #endif // endif
14464 #ifdef RTT_SUPPORT
14465 if (dhd->pub.rtt_state) {
14466 dhd_rtt_deinit(&dhd->pub);
14467 }
14468 #endif /* RTT_SUPPORT */
14469
14470 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14471 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
14472 #endif /* DBG_PKT_MON */
14473 }
14474
14475 #ifdef BCMSDIO
14476 if (!flag) {
14477 dhd_update_fw_nv_path(dhd);
14478 /* update firmware and nvram path to sdio bus */
14479 dhd_bus_update_fw_nv_path(dhd->pub.bus,
14480 dhd->fw_path, dhd->nv_path);
14481 }
14482 #endif /* BCMSDIO */
14483
14484 ret = dhd_bus_devreset(&dhd->pub, flag);
14485
14486 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
14487 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
14488 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
14489 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
14490
14491 if (flag) {
14492 /* Clear some flags for recovery logic */
14493 dhd->pub.dongle_trap_occured = 0;
14494 dhd->pub.iovar_timeout_occured = 0;
14495 #ifdef PCIE_FULL_DONGLE
14496 dhd->pub.d3ack_timeout_occured = 0;
14497 #endif /* PCIE_FULL_DONGLE */
14498 #ifdef DHD_MAP_LOGGING
14499 dhd->pub.smmu_fault_occurred = 0;
14500 #endif /* DHD_MAP_LOGGING */
14501 }
14502
14503 if (ret) {
14504 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
14505 }
14506
14507 return ret;
14508 }
14509
14510 #ifdef BCMSDIO
14511 int
14512 dhd_net_bus_suspend(struct net_device *dev)
14513 {
14514 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14515 return dhd_bus_suspend(&dhd->pub);
14516 }
14517
14518 int
14519 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
14520 {
14521 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14522 return dhd_bus_resume(&dhd->pub, stage);
14523 }
14524
14525 #endif /* BCMSDIO */
14526 #endif /* BCMSDIO || BCMPCIE */
14527
14528 int net_os_set_suspend_disable(struct net_device *dev, int val)
14529 {
14530 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14531 int ret = 0;
14532
14533 if (dhd) {
14534 ret = dhd->pub.suspend_disable_flag;
14535 dhd->pub.suspend_disable_flag = val;
14536 }
14537 return ret;
14538 }
14539
14540 int net_os_set_suspend(struct net_device *dev, int val, int force)
14541 {
14542 int ret = 0;
14543 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14544
14545 if (dhd) {
14546 #ifdef CONFIG_MACH_UNIVERSAL7420
14547 #if defined(ARGOS_RPS_CPU_CTL) && defined(DHD_LB_RXP)
14548 if (!val) {
14549 /* Force to set rps_cpus to specific CPU core */
14550 dhd_rps_cpus_enable(dev, TRUE);
14551 }
14552 #endif /* ARGOS_RPS_CPU_CTL && DHD_LB_RXP */
14553 #endif /* CONFIG_MACH_UNIVERSAL7420 */
14554 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14555 ret = dhd_set_suspend(val, &dhd->pub);
14556 #else
14557 ret = dhd_suspend_resume_helper(dhd, val, force);
14558 #endif // endif
14559 #ifdef WL_CFG80211
14560 wl_cfg80211_update_power_mode(dev);
14561 #endif // endif
14562 }
14563 return ret;
14564 }
14565
14566 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
14567 {
14568 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14569
14570 if (dhd) {
14571 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
14572 __FUNCTION__, val));
14573 dhd->pub.suspend_bcn_li_dtim = val;
14574 }
14575
14576 return 0;
14577 }
14578
14579 int net_os_set_max_dtim_enable(struct net_device *dev, int val)
14580 {
14581 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14582
14583 if (dhd) {
14584 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14585 __FUNCTION__, (val ? "Enable" : "Disable")));
14586 if (val) {
14587 dhd->pub.max_dtim_enable = TRUE;
14588 } else {
14589 dhd->pub.max_dtim_enable = FALSE;
14590 }
14591 } else {
14592 return -1;
14593 }
14594
14595 return 0;
14596 }
14597
14598 int
14599 net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
14600 {
14601 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14602
14603 if (dhd) {
14604 DHD_ERROR(("%s: Disable bcn_li_dtim in suspend : %s\n",
14605 __FUNCTION__, (val ? "Enable" : "Disable")));
14606 if (val) {
14607 dhd->pub.disable_dtim_in_suspend = TRUE;
14608 } else {
14609 dhd->pub.disable_dtim_in_suspend = FALSE;
14610 }
14611 } else {
14612 return -1;
14613 }
14614
14615 return 0;
14616 }
14617
14618 #ifdef PKT_FILTER_SUPPORT
14619 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
14620 {
14621 int ret = 0;
14622
14623 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14624 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14625
14626 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
14627 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
14628 return 0;
14629 }
14630
14631 #ifdef BLOCK_IPV6_PACKET
14632 /* customer want to use NO IPV6 packets only */
14633 if (num == DHD_MULTICAST6_FILTER_NUM) {
14634 return 0;
14635 }
14636 #endif /* BLOCK_IPV6_PACKET */
14637
14638 if (num >= dhd->pub.pktfilter_count) {
14639 return -EINVAL;
14640 }
14641
14642 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
14643 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14644
14645 return ret;
14646 }
14647
14648 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
14649
14650 {
14651 int ret = 0;
14652
14653 /* Packet filtering is set only if we still in early-suspend and
14654 * we need either to turn it ON or turn it OFF
14655 * We can always turn it OFF in case of early-suspend, but we turn it
14656 * back ON only if suspend_disable_flag was not set
14657 */
14658 if (dhdp && dhdp->up) {
14659 if (dhdp->in_suspend) {
14660 if (!val || (val && !dhdp->suspend_disable_flag))
14661 dhd_enable_packet_filter(val, dhdp);
14662 }
14663 }
14664 return ret;
14665 }
14666
14667 /* function to enable/disable packet for Network device */
14668 int net_os_enable_packet_filter(struct net_device *dev, int val)
14669 {
14670 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14671
14672 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
14673 return dhd_os_enable_packet_filter(&dhd->pub, val);
14674 }
14675 #endif /* PKT_FILTER_SUPPORT */
14676
14677 int
14678 dhd_dev_init_ioctl(struct net_device *dev)
14679 {
14680 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14681 int ret;
14682
14683 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
14684 goto done;
14685
14686 done:
14687 return ret;
14688 }
14689
14690 int
14691 dhd_dev_get_feature_set(struct net_device *dev)
14692 {
14693 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
14694 dhd_pub_t *dhd = (&ptr->pub);
14695 int feature_set = 0;
14696
14697 if (FW_SUPPORTED(dhd, sta))
14698 feature_set |= WIFI_FEATURE_INFRA;
14699 if (FW_SUPPORTED(dhd, dualband))
14700 feature_set |= WIFI_FEATURE_INFRA_5G;
14701 if (FW_SUPPORTED(dhd, p2p))
14702 feature_set |= WIFI_FEATURE_P2P;
14703 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
14704 feature_set |= WIFI_FEATURE_SOFT_AP;
14705 if (FW_SUPPORTED(dhd, tdls))
14706 feature_set |= WIFI_FEATURE_TDLS;
14707 if (FW_SUPPORTED(dhd, vsdb))
14708 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
14709 if (FW_SUPPORTED(dhd, nan)) {
14710 feature_set |= WIFI_FEATURE_NAN;
14711 /* NAN is essentail for d2d rtt */
14712 if (FW_SUPPORTED(dhd, rttd2d))
14713 feature_set |= WIFI_FEATURE_D2D_RTT;
14714 }
14715 #ifdef RTT_SUPPORT
14716 feature_set |= WIFI_FEATURE_D2D_RTT;
14717 feature_set |= WIFI_FEATURE_D2AP_RTT;
14718 #endif /* RTT_SUPPORT */
14719 #ifdef LINKSTAT_SUPPORT
14720 feature_set |= WIFI_FEATURE_LINKSTAT;
14721 #endif /* LINKSTAT_SUPPORT */
14722
14723 #if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
14724 if (dhd_is_pno_supported(dhd)) {
14725 feature_set |= WIFI_FEATURE_PNO;
14726 #ifdef GSCAN_SUPPORT
14727 feature_set |= WIFI_FEATURE_GSCAN;
14728 feature_set |= WIFI_FEATURE_HAL_EPNO;
14729 #endif /* GSCAN_SUPPORT */
14730 }
14731 #endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
14732 #ifdef RSSI_MONITOR_SUPPORT
14733 if (FW_SUPPORTED(dhd, rssi_mon)) {
14734 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
14735 }
14736 #endif /* RSSI_MONITOR_SUPPORT */
14737 #ifdef WL11U
14738 feature_set |= WIFI_FEATURE_HOTSPOT;
14739 #endif /* WL11U */
14740 #ifdef NDO_CONFIG_SUPPORT
14741 feature_set |= WIFI_FEATURE_CONFIG_NDO;
14742 #endif /* NDO_CONFIG_SUPPORT */
14743 #ifdef KEEP_ALIVE
14744 feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
14745 #endif /* KEEP_ALIVE */
14746 #ifdef SUPPORT_RANDOM_MAC_SCAN
14747 feature_set |= WIFI_FEATURE_SCAN_RAND;
14748 #endif /* SUPPORT_RANDOM_MAC_SCAN */
14749 #ifdef FILTER_IE
14750 if (FW_SUPPORTED(dhd, fie)) {
14751 feature_set |= WIFI_FEATURE_FILTER_IE;
14752 }
14753 #endif /* FILTER_IE */
14754 #ifdef ROAMEXP_SUPPORT
14755 /* Check if the Android O roam feature is supported by FW */
14756 if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
14757 feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
14758 }
14759 #endif /* ROAMEXP_SUPPORT */
14760 return feature_set;
14761 }
14762
14763 int
14764 dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
14765 {
14766 int feature_set_full;
14767 int ret = 0;
14768
14769 feature_set_full = dhd_dev_get_feature_set(dev);
14770
14771 /* Common feature set for all interface */
14772 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
14773 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
14774 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
14775 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
14776 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
14777 (feature_set_full & WIFI_FEATURE_EPR);
14778
14779 /* Specific feature group for each interface */
14780 switch (num) {
14781 case 0:
14782 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
14783 /* Not supported yet */
14784 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14785 (feature_set_full & WIFI_FEATURE_TDLS) |
14786 (feature_set_full & WIFI_FEATURE_PNO) |
14787 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
14788 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
14789 (feature_set_full & WIFI_FEATURE_GSCAN) |
14790 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
14791 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
14792 break;
14793
14794 case 1:
14795 ret |= (feature_set_full & WIFI_FEATURE_P2P);
14796 /* Not yet verified NAN with P2P */
14797 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14798 break;
14799
14800 case 2:
14801 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
14802 (feature_set_full & WIFI_FEATURE_TDLS) |
14803 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
14804 break;
14805
14806 default:
14807 ret = WIFI_FEATURE_INVALID;
14808 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
14809 break;
14810 }
14811
14812 return ret;
14813 }
14814 #ifdef CUSTOM_FORCE_NODFS_FLAG
14815 int
14816 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
14817 {
14818 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14819
14820 if (nodfs)
14821 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
14822 else
14823 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
14824 dhd->pub.force_country_change = TRUE;
14825 return 0;
14826 }
14827 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14828 #ifdef NDO_CONFIG_SUPPORT
14829 int
14830 dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
14831 {
14832 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14833 dhd_pub_t *dhdp = &dhd->pub;
14834 int ret = 0;
14835
14836 if (enable) {
14837 /* enable ND offload feature (will be enabled in FW on suspend) */
14838 dhdp->ndo_enable = TRUE;
14839
14840 /* Update changes of anycast address & DAD failed address */
14841 ret = dhd_dev_ndo_update_inet6addr(dev);
14842 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
14843 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
14844 return ret;
14845 }
14846 } else {
14847 /* disable ND offload feature */
14848 dhdp->ndo_enable = FALSE;
14849
14850 /* disable ND offload in FW */
14851 ret = dhd_ndo_enable(dhdp, FALSE);
14852 if (ret < 0) {
14853 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
14854 }
14855 }
14856 return ret;
14857 }
14858
14859 /* #pragma used as a WAR to fix build failure,
14860 * ignore dropping of 'const' qualifier in 'list_entry' macro
14861 * this pragma disables the warning only for the following function
14862 */
14863 #pragma GCC diagnostic push
14864 #pragma GCC diagnostic ignored "-Wcast-qual"
14865
14866 static int
14867 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
14868 {
14869 struct inet6_ifaddr *ifa;
14870 struct ifacaddr6 *acaddr = NULL;
14871 int addr_count = 0;
14872
14873 /* lock */
14874 read_lock_bh(&inet6->lock);
14875
14876 /* Count valid unicast address */
14877 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14878 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
14879 addr_count++;
14880 }
14881 }
14882
14883 /* Count anycast address */
14884 acaddr = inet6->ac_list;
14885 while (acaddr) {
14886 addr_count++;
14887 acaddr = acaddr->aca_next;
14888 }
14889
14890 /* unlock */
14891 read_unlock_bh(&inet6->lock);
14892
14893 return addr_count;
14894 }
14895
14896 int
14897 dhd_dev_ndo_update_inet6addr(struct net_device *dev)
14898 {
14899 dhd_info_t *dhd;
14900 dhd_pub_t *dhdp;
14901 struct inet6_dev *inet6;
14902 struct inet6_ifaddr *ifa;
14903 struct ifacaddr6 *acaddr = NULL;
14904 struct in6_addr *ipv6_addr = NULL;
14905 int cnt, i;
14906 int ret = BCME_OK;
14907
14908 /*
14909 * this function evaulates host ip address in struct inet6_dev
14910 * unicast addr in inet6_dev->addr_list
14911 * anycast addr in inet6_dev->ac_list
14912 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14913 * access on null(freed) pointer.
14914 */
14915
14916 if (dev) {
14917 inet6 = dev->ip6_ptr;
14918 if (!inet6) {
14919 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
14920 return BCME_ERROR;
14921 }
14922
14923 dhd = DHD_DEV_INFO(dev);
14924 if (!dhd) {
14925 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
14926 return BCME_ERROR;
14927 }
14928 dhdp = &dhd->pub;
14929
14930 if (dhd_net2idx(dhd, dev) != 0) {
14931 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
14932 return BCME_ERROR;
14933 }
14934 } else {
14935 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
14936 return BCME_ERROR;
14937 }
14938
14939 /* Check host IP overflow */
14940 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
14941 if (cnt > dhdp->ndo_max_host_ip) {
14942 if (!dhdp->ndo_host_ip_overflow) {
14943 dhdp->ndo_host_ip_overflow = TRUE;
14944 /* Disable ND offload in FW */
14945 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
14946 ret = dhd_ndo_enable(dhdp, FALSE);
14947 }
14948
14949 return ret;
14950 }
14951
14952 /*
14953 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14954 * driver need to lock inet6_dev while accessing structure. but, driver
14955 * cannot use ioctl while inet6_dev locked since it requires scheduling
14956 * hence, copy addresses to the buffer and do ioctl after unlock.
14957 */
14958 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
14959 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14960 if (!ipv6_addr) {
14961 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
14962 return BCME_NOMEM;
14963 }
14964
14965 /* Find DAD failed unicast address to be removed */
14966 cnt = 0;
14967 read_lock_bh(&inet6->lock);
14968 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14969 /* DAD failed unicast address */
14970 if ((ifa->flags & IFA_F_DADFAILED) &&
14971 (cnt < dhdp->ndo_max_host_ip)) {
14972 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
14973 cnt++;
14974 }
14975 }
14976 read_unlock_bh(&inet6->lock);
14977
14978 /* Remove DAD failed unicast address */
14979 for (i = 0; i < cnt; i++) {
14980 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
14981 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
14982 if (ret < 0) {
14983 goto done;
14984 }
14985 }
14986
14987 /* Remove all anycast address */
14988 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14989 if (ret < 0) {
14990 goto done;
14991 }
14992
14993 /*
14994 * if ND offload was disabled due to host ip overflow,
14995 * attempt to add valid unicast address.
14996 */
14997 if (dhdp->ndo_host_ip_overflow) {
14998 /* Find valid unicast address */
14999 cnt = 0;
15000 read_lock_bh(&inet6->lock);
15001 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
15002 /* valid unicast address */
15003 if (!(ifa->flags & IFA_F_DADFAILED) &&
15004 (cnt < dhdp->ndo_max_host_ip)) {
15005 memcpy(&ipv6_addr[cnt], &ifa->addr,
15006 sizeof(struct in6_addr));
15007 cnt++;
15008 }
15009 }
15010 read_unlock_bh(&inet6->lock);
15011
15012 /* Add valid unicast address */
15013 for (i = 0; i < cnt; i++) {
15014 ret = dhd_ndo_add_ip_with_type(dhdp,
15015 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
15016 if (ret < 0) {
15017 goto done;
15018 }
15019 }
15020 }
15021
15022 /* Find anycast address */
15023 cnt = 0;
15024 read_lock_bh(&inet6->lock);
15025 acaddr = inet6->ac_list;
15026 while (acaddr) {
15027 if (cnt < dhdp->ndo_max_host_ip) {
15028 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
15029 cnt++;
15030 }
15031 acaddr = acaddr->aca_next;
15032 }
15033 read_unlock_bh(&inet6->lock);
15034
15035 /* Add anycast address */
15036 for (i = 0; i < cnt; i++) {
15037 ret = dhd_ndo_add_ip_with_type(dhdp,
15038 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
15039 if (ret < 0) {
15040 goto done;
15041 }
15042 }
15043
15044 /* Now All host IP addr were added successfully */
15045 if (dhdp->ndo_host_ip_overflow) {
15046 dhdp->ndo_host_ip_overflow = FALSE;
15047 if (dhdp->in_suspend) {
15048 /* drvier is in (early) suspend state, need to enable ND offload in FW */
15049 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
15050 ret = dhd_ndo_enable(dhdp, TRUE);
15051 }
15052 }
15053
15054 done:
15055 if (ipv6_addr) {
15056 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
15057 }
15058
15059 return ret;
15060 }
15061 #pragma GCC diagnostic pop
15062
15063 #endif /* NDO_CONFIG_SUPPORT */
15064
15065 #ifdef PNO_SUPPORT
15066 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
15067 int
15068 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
15069 {
15070 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15071
15072 return (dhd_pno_stop_for_ssid(&dhd->pub));
15073 }
15074 /* Linux wrapper to call common dhd_pno_set_for_ssid */
15075 int
15076 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
15077 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
15078 {
15079 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15080
15081 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
15082 pno_repeat, pno_freq_expo_max, channel_list, nchan));
15083 }
15084
15085 /* Linux wrapper to call common dhd_pno_enable */
15086 int
15087 dhd_dev_pno_enable(struct net_device *dev, int enable)
15088 {
15089 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15090
15091 return (dhd_pno_enable(&dhd->pub, enable));
15092 }
15093
15094 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
15095 int
15096 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
15097 struct dhd_pno_hotlist_params *hotlist_params)
15098 {
15099 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15100 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
15101 }
15102 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
15103 int
15104 dhd_dev_pno_stop_for_batch(struct net_device *dev)
15105 {
15106 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15107 return (dhd_pno_stop_for_batch(&dhd->pub));
15108 }
15109 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
15110 int
15111 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
15112 {
15113 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15114 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
15115 }
15116 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
15117 int
15118 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
15119 {
15120 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15121 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
15122 }
15123 #endif /* PNO_SUPPORT */
15124
15125 #if defined(PNO_SUPPORT)
15126 #ifdef GSCAN_SUPPORT
15127 bool
15128 dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
15129 {
15130 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15131
15132 return (dhd_is_legacy_pno_enabled(&dhd->pub));
15133 }
15134
15135 int
15136 dhd_dev_set_epno(struct net_device *dev)
15137 {
15138 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15139 if (!dhd) {
15140 return BCME_ERROR;
15141 }
15142 return dhd_pno_set_epno(&dhd->pub);
15143 }
15144 int
15145 dhd_dev_flush_fw_epno(struct net_device *dev)
15146 {
15147 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15148 if (!dhd) {
15149 return BCME_ERROR;
15150 }
15151 return dhd_pno_flush_fw_epno(&dhd->pub);
15152 }
15153
15154 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15155 int
15156 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
15157 void *buf, bool flush)
15158 {
15159 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15160
15161 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
15162 }
15163
15164 /* Linux wrapper to call common dhd_wait_batch_results_complete */
15165 int
15166 dhd_dev_wait_batch_results_complete(struct net_device *dev)
15167 {
15168 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15169
15170 return (dhd_wait_batch_results_complete(&dhd->pub));
15171 }
15172
15173 /* Linux wrapper to call common dhd_pno_lock_batch_results */
15174 int
15175 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
15176 {
15177 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15178
15179 return (dhd_pno_lock_batch_results(&dhd->pub));
15180 }
15181 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
15182 void
15183 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
15184 {
15185 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15186
15187 return (dhd_pno_unlock_batch_results(&dhd->pub));
15188 }
15189
15190 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
15191 int
15192 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
15193 {
15194 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15195
15196 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
15197 }
15198
15199 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
15200 int
15201 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
15202 {
15203 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15204
15205 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
15206 }
15207
15208 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
15209 void *
15210 dhd_dev_hotlist_scan_event(struct net_device *dev,
15211 const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
15212 {
15213 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15214
15215 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
15216 }
15217
15218 /* Linux wrapper to call common dhd_process_full_gscan_result */
15219 void *
15220 dhd_dev_process_full_gscan_result(struct net_device *dev,
15221 const void *data, uint32 len, int *send_evt_bytes)
15222 {
15223 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15224
15225 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
15226 }
15227
15228 void
15229 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
15230 {
15231 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15232
15233 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
15234
15235 return;
15236 }
15237
15238 int
15239 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
15240 {
15241 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15242
15243 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
15244 }
15245
15246 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
15247 int
15248 dhd_dev_retrieve_batch_scan(struct net_device *dev)
15249 {
15250 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15251
15252 return (dhd_retreive_batch_scan_results(&dhd->pub));
15253 }
15254 /* Linux wrapper to call common dhd_pno_process_epno_result */
15255 void * dhd_dev_process_epno_result(struct net_device *dev,
15256 const void *data, uint32 event, int *send_evt_bytes)
15257 {
15258 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15259
15260 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
15261 }
15262
15263 int
15264 dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
15265 wlc_roam_exp_params_t *roam_param)
15266 {
15267 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15268 wl_roam_exp_cfg_t roam_exp_cfg;
15269 int err;
15270
15271 if (!roam_param) {
15272 return BCME_BADARG;
15273 }
15274
15275 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
15276 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
15277 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
15278 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
15279 roam_param->cur_bssid_boost));
15280 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
15281 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
15282
15283 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
15284 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
15285 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
15286 if (dhd->pub.lazy_roam_enable) {
15287 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
15288 }
15289 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
15290 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
15291 TRUE);
15292 if (err < 0) {
15293 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
15294 }
15295 return err;
15296 }
15297
15298 int
15299 dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
15300 {
15301 int err;
15302 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15303 wl_roam_exp_cfg_t roam_exp_cfg;
15304
15305 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
15306 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
15307 if (enable) {
15308 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
15309 }
15310
15311 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
15312 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
15313 TRUE);
15314 if (err < 0) {
15315 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
15316 } else {
15317 dhd->pub.lazy_roam_enable = (enable != 0);
15318 }
15319 return err;
15320 }
15321 int
15322 dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
15323 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
15324 {
15325 int err;
15326 uint len;
15327 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15328
15329 bssid_pref->version = BSSID_PREF_LIST_VERSION;
15330 /* By default programming bssid pref flushes out old values */
15331 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
15332 len = sizeof(wl_bssid_pref_cfg_t);
15333 if (bssid_pref->count) {
15334 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
15335 }
15336 err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
15337 (char *)bssid_pref, len, NULL, 0, TRUE);
15338 if (err != BCME_OK) {
15339 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15340 }
15341 return err;
15342 }
15343 #endif /* GSCAN_SUPPORT */
15344 #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
15345 int
15346 dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
15347 uint32 len, uint32 flush)
15348 {
15349 int err;
15350 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15351 int macmode;
15352
15353 if (blacklist) {
15354 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
15355 len, TRUE, 0);
15356 if (err != BCME_OK) {
15357 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
15358 return err;
15359 }
15360 }
15361 /* By default programming blacklist flushes out old values */
15362 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
15363 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
15364 sizeof(macmode), TRUE, 0);
15365 if (err != BCME_OK) {
15366 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
15367 }
15368 return err;
15369 }
15370 int
15371 dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
15372 uint32 len, uint32 flush)
15373 {
15374 int err;
15375 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15376 wl_ssid_whitelist_t whitelist_ssid_flush;
15377
15378 if (!ssid_whitelist) {
15379 if (flush) {
15380 ssid_whitelist = &whitelist_ssid_flush;
15381 ssid_whitelist->ssid_count = 0;
15382 } else {
15383 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
15384 return BCME_BADARG;
15385 }
15386 }
15387 ssid_whitelist->version = SSID_WHITELIST_VERSION;
15388 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
15389 err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
15390 0, TRUE);
15391 if (err != BCME_OK) {
15392 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15393 }
15394 return err;
15395 }
15396 #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
15397 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15398 /* Linux wrapper to call common dhd_pno_get_gscan */
15399 void *
15400 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
15401 void *info, uint32 *len)
15402 {
15403 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15404
15405 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
15406 }
15407 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15408 #endif // endif
15409
15410 #ifdef RSSI_MONITOR_SUPPORT
15411 int
15412 dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
15413 int8 max_rssi, int8 min_rssi)
15414 {
15415 int err;
15416 wl_rssi_monitor_cfg_t rssi_monitor;
15417 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15418
15419 rssi_monitor.version = RSSI_MONITOR_VERSION;
15420 rssi_monitor.max_rssi = max_rssi;
15421 rssi_monitor.min_rssi = min_rssi;
15422 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
15423 err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
15424 NULL, 0, TRUE);
15425 if (err < 0 && err != BCME_UNSUPPORTED) {
15426 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
15427 }
15428 return err;
15429 }
15430 #endif /* RSSI_MONITOR_SUPPORT */
15431
15432 #ifdef DHDTCPACK_SUPPRESS
15433 int
15434 dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
15435 {
15436 int err;
15437 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15438
15439 err = dhd_tcpack_suppress_set(&dhd->pub, enable);
15440 if (err != BCME_OK) {
15441 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
15442 }
15443 return err;
15444 }
15445 #endif /* DHDTCPACK_SUPPRESS */
15446
15447 int
15448 dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
15449 {
15450 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15451 dhd_pub_t *dhdp = &dhd->pub;
15452
15453 if (!dhdp || !oui) {
15454 DHD_ERROR(("NULL POINTER : %s\n",
15455 __FUNCTION__));
15456 return BCME_ERROR;
15457 }
15458 if (ETHER_ISMULTI(oui)) {
15459 DHD_ERROR(("Expected unicast OUI\n"));
15460 return BCME_ERROR;
15461 } else {
15462 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
15463 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
15464 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
15465 MACOUI2STRDBG(rand_mac_oui)));
15466 }
15467 return BCME_OK;
15468 }
15469
15470 int
15471 dhd_set_rand_mac_oui(dhd_pub_t *dhd)
15472 {
15473 int err;
15474 wl_pfn_macaddr_cfg_t wl_cfg;
15475 uint8 *rand_mac_oui = dhd->rand_mac_oui;
15476
15477 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
15478 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
15479 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
15480 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
15481 wl_cfg.flags = 0;
15482 } else {
15483 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
15484 }
15485
15486 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
15487 MACOUI2STRDBG(rand_mac_oui)));
15488
15489 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
15490 if (err < 0) {
15491 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
15492 }
15493 return err;
15494 }
15495
15496 #ifdef RTT_SUPPORT
15497 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15498 int
15499 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
15500 {
15501 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15502
15503 return (dhd_rtt_set_cfg(&dhd->pub, buf));
15504 }
15505
15506 int
15507 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
15508 {
15509 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15510
15511 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
15512 }
15513
15514 int
15515 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
15516 {
15517 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15518
15519 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
15520 }
15521
15522 int
15523 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
15524 {
15525 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15526
15527 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
15528 }
15529
15530 int
15531 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
15532 {
15533 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15534
15535 return (dhd_rtt_capability(&dhd->pub, capa));
15536 }
15537
15538 int
15539 dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
15540 {
15541 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15542 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
15543 }
15544
15545 int
15546 dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
15547 {
15548 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15549 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
15550 }
15551
15552 int dhd_dev_rtt_cancel_responder(struct net_device *dev)
15553 {
15554 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15555 return (dhd_rtt_cancel_responder(&dhd->pub));
15556 }
15557
15558 #endif /* RTT_SUPPORT */
15559
15560 #ifdef KEEP_ALIVE
15561 #define KA_TEMP_BUF_SIZE 512
15562 #define KA_FRAME_SIZE 300
15563
15564 int
15565 dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
15566 uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
15567 {
15568 const int ETHERTYPE_LEN = 2;
15569 char *pbuf = NULL;
15570 const char *str;
15571 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15572 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15573 int buf_len = 0;
15574 int str_len = 0;
15575 int res = BCME_ERROR;
15576 int len_bytes = 0;
15577 int i = 0;
15578
15579 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15580 char *pmac_frame = NULL;
15581 char *pmac_frame_begin = NULL;
15582
15583 /*
15584 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15585 * dongle shall reject a mkeep_alive request.
15586 */
15587 if (!dhd_support_sta_mode(dhd_pub))
15588 return res;
15589
15590 DHD_TRACE(("%s execution\n", __FUNCTION__));
15591
15592 if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15593 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15594 res = BCME_NOMEM;
15595 return res;
15596 }
15597
15598 if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
15599 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
15600 res = BCME_NOMEM;
15601 goto exit;
15602 }
15603 pmac_frame_begin = pmac_frame;
15604
15605 /*
15606 * Get current mkeep-alive status.
15607 */
15608 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
15609 KA_TEMP_BUF_SIZE, FALSE);
15610 if (res < 0) {
15611 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15612 goto exit;
15613 } else {
15614 /* Check available ID whether it is occupied */
15615 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15616 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15617 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15618 __FUNCTION__, mkeep_alive_id));
15619
15620 /* Current occupied ID info */
15621 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
15622 DHD_ERROR((" Id : %d\n"
15623 " Period: %d msec\n"
15624 " Length: %d\n"
15625 " Packet: 0x",
15626 mkeep_alive_pktp->keep_alive_id,
15627 dtoh32(mkeep_alive_pktp->period_msec),
15628 dtoh16(mkeep_alive_pktp->len_bytes)));
15629
15630 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15631 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
15632 }
15633 DHD_ERROR(("\n"));
15634
15635 res = BCME_NOTFOUND;
15636 goto exit;
15637 }
15638 }
15639
15640 /* Request the specified ID */
15641 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15642 memset(pbuf, 0, KA_TEMP_BUF_SIZE);
15643 str = "mkeep_alive";
15644 str_len = strlen(str);
15645 strncpy(pbuf, str, str_len);
15646 pbuf[str_len] = '\0';
15647
15648 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
15649 mkeep_alive_pkt.period_msec = htod32(period_msec);
15650 buf_len = str_len + 1;
15651 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15652 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15653
15654 /* ID assigned */
15655 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15656
15657 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
15658
15659 /*
15660 * Build up Ethernet Frame
15661 */
15662
15663 /* Mapping dest mac addr */
15664 memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
15665 pmac_frame += ETHER_ADDR_LEN;
15666
15667 /* Mapping src mac addr */
15668 memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
15669 pmac_frame += ETHER_ADDR_LEN;
15670
15671 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15672 *(pmac_frame++) = 0x08;
15673 *(pmac_frame++) = 0x00;
15674
15675 /* Mapping IP pkt */
15676 memcpy(pmac_frame, ip_pkt, ip_pkt_len);
15677 pmac_frame += ip_pkt_len;
15678
15679 /*
15680 * Length of ether frame (assume to be all hexa bytes)
15681 * = src mac + dst mac + ether type + ip pkt len
15682 */
15683 len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
15684 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
15685 buf_len += len_bytes;
15686 mkeep_alive_pkt.len_bytes = htod16(len_bytes);
15687
15688 /*
15689 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15690 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15691 * guarantee that the buffer is properly aligned.
15692 */
15693 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
15694
15695 res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
15696 exit:
15697 if (pmac_frame_begin) {
15698 MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
15699 pmac_frame_begin = NULL;
15700 }
15701 if (pbuf) {
15702 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15703 pbuf = NULL;
15704 }
15705 return res;
15706 }
15707
15708 int
15709 dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
15710 {
15711 char *pbuf = NULL;
15712 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15713 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15714 int res = BCME_ERROR;
15715 int i = 0;
15716
15717 /*
15718 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15719 * dongle shall reject a mkeep_alive request.
15720 */
15721 if (!dhd_support_sta_mode(dhd_pub))
15722 return res;
15723
15724 DHD_TRACE(("%s execution\n", __FUNCTION__));
15725
15726 /*
15727 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15728 */
15729 if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15730 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15731 return res;
15732 }
15733
15734 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
15735 sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
15736 if (res < 0) {
15737 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15738 goto exit;
15739 } else {
15740 /* Check occupied ID */
15741 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15742 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
15743 DHD_INFO((" Id : %d\n"
15744 " Period: %d msec\n"
15745 " Length: %d\n"
15746 " Packet: 0x",
15747 mkeep_alive_pktp->keep_alive_id,
15748 dtoh32(mkeep_alive_pktp->period_msec),
15749 dtoh16(mkeep_alive_pktp->len_bytes)));
15750
15751 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15752 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
15753 }
15754 DHD_INFO(("\n"));
15755 }
15756
15757 /* Make it stop if available */
15758 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15759 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
15760 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15761
15762 mkeep_alive_pkt.period_msec = 0;
15763 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15764 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15765 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15766
15767 res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
15768 (char *)&mkeep_alive_pkt,
15769 WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
15770 } else {
15771 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
15772 res = BCME_NOTFOUND;
15773 }
15774 exit:
15775 if (pbuf) {
15776 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15777 pbuf = NULL;
15778 }
15779 return res;
15780 }
15781 #endif /* KEEP_ALIVE */
15782
15783 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
15784 static void _dhd_apf_lock_local(dhd_info_t *dhd)
15785 {
15786 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15787 if (dhd) {
15788 mutex_lock(&dhd->dhd_apf_mutex);
15789 }
15790 #endif // endif
15791 }
15792
15793 static void _dhd_apf_unlock_local(dhd_info_t *dhd)
15794 {
15795 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15796 if (dhd) {
15797 mutex_unlock(&dhd->dhd_apf_mutex);
15798 }
15799 #endif // endif
15800 }
15801
15802 static int
15803 __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
15804 u8* program, uint32 program_len)
15805 {
15806 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15807 dhd_pub_t *dhdp = &dhd->pub;
15808 wl_pkt_filter_t * pkt_filterp;
15809 wl_apf_program_t *apf_program;
15810 char *buf;
15811 u32 cmd_len, buf_len;
15812 int ifidx, ret;
15813 char cmd[] = "pkt_filter_add";
15814
15815 ifidx = dhd_net2idx(dhd, ndev);
15816 if (ifidx == DHD_BAD_IF) {
15817 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15818 return -ENODEV;
15819 }
15820
15821 cmd_len = sizeof(cmd);
15822
15823 /* Check if the program_len is more than the expected len
15824 * and if the program is NULL return from here.
15825 */
15826 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
15827 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15828 __FUNCTION__, program_len, program));
15829 return -EINVAL;
15830 }
15831 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
15832 WL_APF_PROGRAM_FIXED_LEN + program_len;
15833
15834 buf = MALLOCZ(dhdp->osh, buf_len);
15835 if (unlikely(!buf)) {
15836 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15837 return -ENOMEM;
15838 }
15839
15840 memcpy(buf, cmd, cmd_len);
15841
15842 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
15843 pkt_filterp->id = htod32(filter_id);
15844 pkt_filterp->negate_match = htod32(FALSE);
15845 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
15846
15847 apf_program = &pkt_filterp->u.apf_program;
15848 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
15849 apf_program->instr_len = htod16(program_len);
15850 memcpy(apf_program->instrs, program, program_len);
15851
15852 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15853 if (unlikely(ret)) {
15854 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15855 __FUNCTION__, filter_id, ret));
15856 }
15857
15858 if (buf) {
15859 MFREE(dhdp->osh, buf, buf_len);
15860 }
15861 return ret;
15862 }
15863
15864 static int
15865 __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
15866 uint32 mode, uint32 enable)
15867 {
15868 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15869 dhd_pub_t *dhdp = &dhd->pub;
15870 wl_pkt_filter_enable_t * pkt_filterp;
15871 char *buf;
15872 u32 cmd_len, buf_len;
15873 int ifidx, ret;
15874 char cmd[] = "pkt_filter_enable";
15875
15876 ifidx = dhd_net2idx(dhd, ndev);
15877 if (ifidx == DHD_BAD_IF) {
15878 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15879 return -ENODEV;
15880 }
15881
15882 cmd_len = sizeof(cmd);
15883 buf_len = cmd_len + sizeof(*pkt_filterp);
15884
15885 buf = MALLOCZ(dhdp->osh, buf_len);
15886 if (unlikely(!buf)) {
15887 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15888 return -ENOMEM;
15889 }
15890
15891 memcpy(buf, cmd, cmd_len);
15892
15893 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
15894 pkt_filterp->id = htod32(filter_id);
15895 pkt_filterp->enable = htod32(enable);
15896
15897 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15898 if (unlikely(ret)) {
15899 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15900 __FUNCTION__, filter_id, ret));
15901 goto exit;
15902 }
15903
15904 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
15905 WLC_SET_VAR, TRUE, ifidx);
15906 if (unlikely(ret)) {
15907 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15908 __FUNCTION__, filter_id, ret));
15909 }
15910
15911 exit:
15912 if (buf) {
15913 MFREE(dhdp->osh, buf, buf_len);
15914 }
15915 return ret;
15916 }
15917
15918 static int
15919 __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
15920 {
15921 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15922 dhd_pub_t *dhdp = &dhd->pub;
15923 int ifidx, ret;
15924
15925 ifidx = dhd_net2idx(dhd, ndev);
15926 if (ifidx == DHD_BAD_IF) {
15927 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15928 return -ENODEV;
15929 }
15930
15931 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
15932 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
15933 if (unlikely(ret)) {
15934 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15935 __FUNCTION__, filter_id, ret));
15936 }
15937
15938 return ret;
15939 }
15940
15941 void dhd_apf_lock(struct net_device *dev)
15942 {
15943 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15944 _dhd_apf_lock_local(dhd);
15945 }
15946
15947 void dhd_apf_unlock(struct net_device *dev)
15948 {
15949 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15950 _dhd_apf_unlock_local(dhd);
15951 }
15952
15953 int
15954 dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
15955 {
15956 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15957 dhd_pub_t *dhdp = &dhd->pub;
15958 int ifidx, ret;
15959
15960 if (!FW_SUPPORTED(dhdp, apf)) {
15961 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15962
15963 /*
15964 * Notify Android framework that APF is not supported by setting
15965 * version as zero.
15966 */
15967 *version = 0;
15968 return BCME_OK;
15969 }
15970
15971 ifidx = dhd_net2idx(dhd, ndev);
15972 if (ifidx == DHD_BAD_IF) {
15973 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15974 return -ENODEV;
15975 }
15976
15977 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
15978 WLC_GET_VAR, FALSE, ifidx);
15979 if (unlikely(ret)) {
15980 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15981 __FUNCTION__, ret));
15982 }
15983
15984 return ret;
15985 }
15986
15987 int
15988 dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
15989 {
15990 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15991 dhd_pub_t *dhdp = &dhd->pub;
15992 int ifidx, ret;
15993
15994 if (!FW_SUPPORTED(dhdp, apf)) {
15995 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15996 *max_len = 0;
15997 return BCME_OK;
15998 }
15999
16000 ifidx = dhd_net2idx(dhd, ndev);
16001 if (ifidx == DHD_BAD_IF) {
16002 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
16003 return -ENODEV;
16004 }
16005
16006 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
16007 WLC_GET_VAR, FALSE, ifidx);
16008 if (unlikely(ret)) {
16009 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
16010 __FUNCTION__, ret));
16011 }
16012
16013 return ret;
16014 }
16015
16016 int
16017 dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
16018 uint32 program_len)
16019 {
16020 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16021 dhd_pub_t *dhdp = &dhd->pub;
16022 int ret;
16023
16024 DHD_APF_LOCK(ndev);
16025
16026 /* delete, if filter already exists */
16027 if (dhdp->apf_set) {
16028 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
16029 if (unlikely(ret)) {
16030 goto exit;
16031 }
16032 dhdp->apf_set = FALSE;
16033 }
16034
16035 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
16036 if (ret) {
16037 goto exit;
16038 }
16039 dhdp->apf_set = TRUE;
16040
16041 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
16042 /* Driver is still in (early) suspend state, enable APF filter back */
16043 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
16044 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
16045 }
16046 exit:
16047 DHD_APF_UNLOCK(ndev);
16048
16049 return ret;
16050 }
16051
16052 int
16053 dhd_dev_apf_enable_filter(struct net_device *ndev)
16054 {
16055 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16056 dhd_pub_t *dhdp = &dhd->pub;
16057 int ret = 0;
16058 bool nan_dp_active = false;
16059
16060 DHD_APF_LOCK(ndev);
16061 #ifdef WL_NAN
16062 nan_dp_active = wl_cfgnan_is_dp_active(ndev);
16063 #endif /* WL_NAN */
16064 if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
16065 !nan_dp_active)) {
16066 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
16067 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
16068 }
16069
16070 DHD_APF_UNLOCK(ndev);
16071
16072 return ret;
16073 }
16074
16075 int
16076 dhd_dev_apf_disable_filter(struct net_device *ndev)
16077 {
16078 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16079 dhd_pub_t *dhdp = &dhd->pub;
16080 int ret = 0;
16081
16082 DHD_APF_LOCK(ndev);
16083
16084 if (dhdp->apf_set) {
16085 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
16086 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
16087 }
16088
16089 DHD_APF_UNLOCK(ndev);
16090
16091 return ret;
16092 }
16093
16094 int
16095 dhd_dev_apf_delete_filter(struct net_device *ndev)
16096 {
16097 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
16098 dhd_pub_t *dhdp = &dhd->pub;
16099 int ret = 0;
16100
16101 DHD_APF_LOCK(ndev);
16102
16103 if (dhdp->apf_set) {
16104 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
16105 if (!ret) {
16106 dhdp->apf_set = FALSE;
16107 }
16108 }
16109
16110 DHD_APF_UNLOCK(ndev);
16111
16112 return ret;
16113 }
16114 #endif /* PKT_FILTER_SUPPORT && APF */
16115
16116 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16117 static void dhd_hang_process(struct work_struct *work_data)
16118 {
16119 struct net_device *dev;
16120 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
16121 struct net_device *ndev;
16122 uint8 i = 0;
16123 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16124 /* Ignore compiler warnings due to -Werror=cast-qual */
16125 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16126 #pragma GCC diagnostic push
16127 #pragma GCC diagnostic ignored "-Wcast-qual"
16128 #endif // endif
16129 struct dhd_info *dhd =
16130 container_of(work_data, dhd_info_t, dhd_hang_process_work);
16131 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
16132 #pragma GCC diagnostic pop
16133 #endif // endif
16134
16135 dev = dhd->iflist[0]->net;
16136
16137 if (dev) {
16138 #if defined(WL_WIRELESS_EXT)
16139 wl_iw_send_priv_event(dev, "HANG");
16140 #endif // endif
16141 #if defined(WL_CFG80211)
16142 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
16143 #endif // endif
16144 }
16145 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
16146 /*
16147 * For HW2, dev_close need to be done to recover
16148 * from upper layer after hang. For Interposer skip
16149 * dev_close so that dhd iovars can be used to take
16150 * socramdump after crash, also skip for HW4 as
16151 * handling of hang event is different
16152 */
16153
16154 rtnl_lock();
16155 for (i = 0; i < DHD_MAX_IFS; i++) {
16156 ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
16157 if (ndev && (ndev->flags & IFF_UP)) {
16158 DHD_ERROR(("ndev->name : %s dev close\n",
16159 ndev->name));
16160 dev_close(ndev);
16161 }
16162 }
16163 rtnl_unlock();
16164 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
16165 }
16166
16167 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
16168 extern dhd_pub_t *link_recovery;
16169 void dhd_host_recover_link(void)
16170 {
16171 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
16172 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
16173 dhd_bus_set_linkdown(link_recovery, TRUE);
16174 dhd_os_send_hang_message(link_recovery);
16175 }
16176 EXPORT_SYMBOL(dhd_host_recover_link);
16177 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
16178
16179 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
16180 {
16181 int ret = 0;
16182
16183 if (dhdp) {
16184 #ifdef WL_CFG80211
16185 struct net_device *primary_ndev;
16186 struct bcm_cfg80211 *cfg;
16187
16188 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
16189 if (!primary_ndev) {
16190 DHD_ERROR(("%s: Cannot find primary netdev\n",
16191 __FUNCTION__));
16192 return -ENODEV;
16193 }
16194
16195 cfg = wl_get_cfg(primary_ndev);
16196 if (!cfg) {
16197 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
16198 return -EINVAL;
16199 }
16200
16201 /* Skip sending HANG event to framework if driver is not ready */
16202 if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
16203 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
16204 return -ENODEV;
16205 }
16206 #endif /* WL_CFG80211 */
16207
16208 #if defined(DHD_HANG_SEND_UP_TEST)
16209 if (dhdp->req_hang_type) {
16210 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
16211 __FUNCTION__, dhdp->req_hang_type));
16212 dhdp->req_hang_type = 0;
16213 }
16214 #endif /* DHD_HANG_SEND_UP_TEST */
16215
16216 if (!dhdp->hang_was_sent) {
16217 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
16218 dhdp->hang_counts++;
16219 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
16220 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
16221 __func__, dhdp->hang_counts));
16222 BUG_ON(1);
16223 }
16224 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
16225 #ifdef DHD_DEBUG_UART
16226 /* If PCIe lane has broken, execute the debug uart application
16227 * to gether a ramdump data from dongle via uart
16228 */
16229 if (!dhdp->info->duart_execute) {
16230 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
16231 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
16232 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
16233 }
16234 #endif /* DHD_DEBUG_UART */
16235 dhdp->hang_was_sent = 1;
16236 #ifdef BT_OVER_SDIO
16237 dhdp->is_bt_recovery_required = TRUE;
16238 #endif // endif
16239 schedule_work(&dhdp->info->dhd_hang_process_work);
16240
16241 }
16242 }
16243 return ret;
16244 }
16245
16246 int net_os_send_hang_message(struct net_device *dev)
16247 {
16248 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16249 int ret = 0;
16250
16251 if (dhd) {
16252 /* Report FW problem when enabled */
16253 if (dhd->pub.hang_report) {
16254 #ifdef BT_OVER_SDIO
16255 if (netif_running(dev)) {
16256 #endif /* BT_OVER_SDIO */
16257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
16258 ret = dhd_os_send_hang_message(&dhd->pub);
16259 #else
16260 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
16261 #endif // endif
16262 #ifdef BT_OVER_SDIO
16263 }
16264 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
16265 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
16266 #endif /* BT_OVER_SDIO */
16267 } else {
16268 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
16269 __FUNCTION__));
16270 }
16271 }
16272 return ret;
16273 }
16274
16275 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
16276 {
16277 dhd_info_t *dhd = NULL;
16278 dhd_pub_t *dhdp = NULL;
16279 int reason;
16280
16281 dhd = DHD_DEV_INFO(dev);
16282 if (dhd) {
16283 dhdp = &dhd->pub;
16284 }
16285
16286 if (!dhd || !dhdp) {
16287 return 0;
16288 }
16289
16290 reason = bcm_strtoul(string_num, NULL, 0);
16291 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
16292
16293 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
16294 reason = 0;
16295 }
16296
16297 dhdp->hang_reason = reason;
16298
16299 return net_os_send_hang_message(dev);
16300 }
16301 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
16302
16303 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
16304 {
16305 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16306 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
16307 }
16308
16309 bool dhd_force_country_change(struct net_device *dev)
16310 {
16311 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16312
16313 if (dhd && dhd->pub.up)
16314 return dhd->pub.force_country_change;
16315 return FALSE;
16316 }
16317
16318 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
16319 wl_country_t *cspec)
16320 {
16321 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16322 dhd_pub_t *dhdp = &dhd->pub;
16323
16324 BCM_REFERENCE(dhdp);
16325 if (!CHECK_IS_BLOB(dhdp) || CHECK_IS_MULT_REGREV(dhdp)) {
16326 #if defined(CUSTOM_COUNTRY_CODE)
16327 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
16328 dhd->pub.dhd_cflags);
16329 #else
16330 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
16331 #endif /* CUSTOM_COUNTRY_CODE */
16332 }
16333 #if !defined(CUSTOM_COUNTRY_CODE)
16334 else {
16335 /* Replace the ccode to XZ if ccode is undefined country */
16336 if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
16337 strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
16338 strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
16339 strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
16340 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
16341 }
16342 }
16343 #endif /* !CUSTOM_COUNTRY_CODE */
16344
16345 #if defined(KEEP_KR_REGREV)
16346 if (strncmp(country_iso_code, "KR", 3) == 0) {
16347 if (!CHECK_IS_BLOB(dhdp) || CHECK_IS_MULT_REGREV(dhdp)) {
16348 if (strncmp(dhd->pub.vars_ccode, "KR", 3) == 0) {
16349 cspec->rev = dhd->pub.vars_regrev;
16350 }
16351 }
16352 }
16353 #endif /* KEEP_KR_REGREV */
16354
16355 #ifdef KEEP_JP_REGREV
16356 if (strncmp(country_iso_code, "JP", 3) == 0) {
16357 if (CHECK_IS_BLOB(dhdp) && !CHECK_IS_MULT_REGREV(dhdp)) {
16358 if (strncmp(dhd->pub.vars_ccode, "J1", 3) == 0) {
16359 memcpy(cspec->ccode, dhd->pub.vars_ccode,
16360 sizeof(dhd->pub.vars_ccode));
16361 }
16362 } else {
16363 if (strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) {
16364 cspec->rev = dhd->pub.vars_regrev;
16365 }
16366 }
16367 }
16368 #endif /* KEEP_JP_REGREV */
16369 BCM_REFERENCE(dhd);
16370 }
16371 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
16372 {
16373 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16374 #ifdef WL_CFG80211
16375 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
16376 #endif // endif
16377
16378 if (dhd && dhd->pub.up) {
16379 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
16380 #ifdef DHD_DISABLE_VHTMODE
16381 dhd_disable_vhtmode(&dhd->pub);
16382 #endif /* DHD_DISABLE_VHTMODE */
16383
16384 #ifdef WL_CFG80211
16385 wl_update_wiphybands(cfg, notify);
16386 #endif // endif
16387 }
16388 }
16389
16390 #ifdef DHD_DISABLE_VHTMODE
16391 void
16392 dhd_disable_vhtmode(dhd_pub_t *dhd)
16393 {
16394 int ret = 0;
16395 uint32 vhtmode = FALSE;
16396 char buf[32];
16397
16398 /* Get vhtmode */
16399 ret = dhd_iovar(dhd, 0, "vhtmode", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
16400 if (ret < 0) {
16401 DHD_ERROR(("%s Get vhtmode Fail ret %d\n", __FUNCTION__, ret));
16402 return;
16403 }
16404 memcpy(&vhtmode, buf, sizeof(uint32));
16405 if (vhtmode == 0) {
16406 DHD_ERROR(("%s Get vhtmode is 0\n", __FUNCTION__));
16407 return;
16408 }
16409 vhtmode = FALSE;
16410
16411 /* Set vhtmode */
16412 ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode, sizeof(vhtmode), NULL, 0, TRUE);
16413 if (ret == 0) {
16414 DHD_ERROR(("%s Set vhtmode Success %d\n", __FUNCTION__, vhtmode));
16415 } else {
16416 if (ret == BCME_NOTDOWN) {
16417 uint wl_down = 1;
16418 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
16419 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
16420 if (ret) {
16421 DHD_ERROR(("%s WL_DOWN Fail ret %d\n", __FUNCTION__, ret));
16422 return;
16423 }
16424
16425 ret = dhd_iovar(dhd, 0, "vhtmode", (char *)&vhtmode,
16426 sizeof(vhtmode), NULL, 0, TRUE);
16427 DHD_ERROR(("%s Set vhtmode %d, ret %d\n", __FUNCTION__, vhtmode, ret));
16428
16429 ret = dhd_wl_ioctl_cmd(dhd, WLC_UP,
16430 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
16431 if (ret) {
16432 DHD_ERROR(("%s WL_UP Fail ret %d\n", __FUNCTION__, ret));
16433 }
16434 } else {
16435 DHD_ERROR(("%s Set vhtmode 0 failed %d\n", __FUNCTION__, ret));
16436 }
16437 }
16438 }
16439 #endif /* DHD_DISABLE_VHTMODE */
16440
16441 void dhd_bus_band_set(struct net_device *dev, uint band)
16442 {
16443 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16444 #ifdef WL_CFG80211
16445 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
16446 #endif // endif
16447 if (dhd && dhd->pub.up) {
16448 #ifdef WL_CFG80211
16449 wl_update_wiphybands(cfg, true);
16450 #endif // endif
16451 }
16452 }
16453
16454 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
16455 {
16456 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16457
16458 if (!fw || fw[0] == '\0')
16459 return -EINVAL;
16460
16461 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
16462 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
16463
16464 #if defined(SOFTAP)
16465 if (strstr(fw, "apsta") != NULL) {
16466 DHD_INFO(("GOT APSTA FIRMWARE\n"));
16467 ap_fw_loaded = TRUE;
16468 } else {
16469 DHD_INFO(("GOT STA FIRMWARE\n"));
16470 ap_fw_loaded = FALSE;
16471 }
16472 #endif // endif
16473 return 0;
16474 }
16475
16476 void dhd_net_if_lock(struct net_device *dev)
16477 {
16478 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16479 dhd_net_if_lock_local(dhd);
16480 }
16481
16482 void dhd_net_if_unlock(struct net_device *dev)
16483 {
16484 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16485 dhd_net_if_unlock_local(dhd);
16486 }
16487
16488 static void dhd_net_if_lock_local(dhd_info_t *dhd)
16489 {
16490 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16491 if (dhd)
16492 mutex_lock(&dhd->dhd_net_if_mutex);
16493 #endif // endif
16494 }
16495
16496 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
16497 {
16498 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16499 if (dhd)
16500 mutex_unlock(&dhd->dhd_net_if_mutex);
16501 #endif // endif
16502 }
16503
16504 static void dhd_suspend_lock(dhd_pub_t *pub)
16505 {
16506 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16507 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16508 if (dhd)
16509 mutex_lock(&dhd->dhd_suspend_mutex);
16510 #endif // endif
16511 }
16512
16513 static void dhd_suspend_unlock(dhd_pub_t *pub)
16514 {
16515 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16516 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16517 if (dhd)
16518 mutex_unlock(&dhd->dhd_suspend_mutex);
16519 #endif // endif
16520 }
16521
16522 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
16523 {
16524 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16525 unsigned long flags = 0;
16526
16527 if (dhd)
16528 spin_lock_irqsave(&dhd->dhd_lock, flags);
16529
16530 return flags;
16531 }
16532
16533 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
16534 {
16535 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16536
16537 if (dhd)
16538 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
16539 }
16540
16541 /* Linux specific multipurpose spinlock API */
16542 void *
16543 dhd_os_spin_lock_init(osl_t *osh)
16544 {
16545 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16546 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16547 /* and this results in kernel asserts in internal builds */
16548 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
16549 if (lock)
16550 spin_lock_init(lock);
16551 return ((void *)lock);
16552 }
16553 void
16554 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
16555 {
16556 if (lock)
16557 MFREE(osh, lock, sizeof(spinlock_t) + 4);
16558 }
16559 unsigned long
16560 dhd_os_spin_lock(void *lock)
16561 {
16562 unsigned long flags = 0;
16563
16564 if (lock)
16565 spin_lock_irqsave((spinlock_t *)lock, flags);
16566
16567 return flags;
16568 }
16569 void
16570 dhd_os_spin_unlock(void *lock, unsigned long flags)
16571 {
16572 if (lock)
16573 spin_unlock_irqrestore((spinlock_t *)lock, flags);
16574 }
16575
16576 void *
16577 dhd_os_dbgring_lock_init(osl_t *osh)
16578 {
16579 struct mutex *mtx = NULL;
16580
16581 mtx = MALLOCZ(osh, sizeof(*mtx));
16582 if (mtx)
16583 mutex_init(mtx);
16584
16585 return mtx;
16586 }
16587
16588 void
16589 dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
16590 {
16591 if (mtx) {
16592 mutex_destroy(mtx);
16593 MFREE(osh, mtx, sizeof(struct mutex));
16594 }
16595 }
16596
16597 static int
16598 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
16599 {
16600 return (atomic_read(&dhd->pend_8021x_cnt));
16601 }
16602
16603 #define MAX_WAIT_FOR_8021X_TX 100
16604
16605 int
16606 dhd_wait_pend8021x(struct net_device *dev)
16607 {
16608 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16609 int timeout = msecs_to_jiffies(10);
16610 int ntimes = MAX_WAIT_FOR_8021X_TX;
16611 int pend = dhd_get_pend_8021x_cnt(dhd);
16612
16613 while (ntimes && pend) {
16614 if (pend) {
16615 set_current_state(TASK_INTERRUPTIBLE);
16616 DHD_PERIM_UNLOCK(&dhd->pub);
16617 schedule_timeout(timeout);
16618 DHD_PERIM_LOCK(&dhd->pub);
16619 set_current_state(TASK_RUNNING);
16620 ntimes--;
16621 }
16622 pend = dhd_get_pend_8021x_cnt(dhd);
16623 }
16624 if (ntimes == 0)
16625 {
16626 atomic_set(&dhd->pend_8021x_cnt, 0);
16627 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
16628 }
16629 return pend;
16630 }
16631
16632 #if defined(DHD_DEBUG)
16633 int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
16634 {
16635 int ret = 0;
16636 struct file *fp = NULL;
16637 mm_segment_t old_fs;
16638 loff_t pos = 0;
16639 /* change to KERNEL_DS address limit */
16640 old_fs = get_fs();
16641 set_fs(KERNEL_DS);
16642
16643 /* open file to write */
16644 fp = filp_open(file_name, flags, 0664);
16645 if (IS_ERR(fp)) {
16646 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
16647 goto exit;
16648 }
16649
16650 /* Write buf to file */
16651 ret = vfs_write(fp, buf, size, &pos);
16652 if (ret < 0) {
16653 DHD_ERROR(("write file error, err = %d\n", ret));
16654 goto exit;
16655 }
16656
16657 /* Sync file from filesystem to physical media */
16658 ret = vfs_fsync(fp, 0);
16659 if (ret < 0) {
16660 DHD_ERROR(("sync file error, error = %d\n", ret));
16661 goto exit;
16662 }
16663 ret = BCME_OK;
16664
16665 exit:
16666 /* close file before return */
16667 if (!IS_ERR(fp))
16668 filp_close(fp, current->files);
16669
16670 /* restore previous address limit */
16671 set_fs(old_fs);
16672
16673 return ret;
16674 }
16675 #endif // endif
16676
16677 #ifdef DHD_DEBUG
16678 static void
16679 dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
16680 {
16681 char *type_str = NULL;
16682
16683 switch (type) {
16684 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
16685 type_str = "resumed_on_timeout";
16686 break;
16687 case DUMP_TYPE_D3_ACK_TIMEOUT:
16688 type_str = "D3_ACK_timeout";
16689 break;
16690 case DUMP_TYPE_DONGLE_TRAP:
16691 type_str = "Dongle_Trap";
16692 break;
16693 case DUMP_TYPE_MEMORY_CORRUPTION:
16694 type_str = "Memory_Corruption";
16695 break;
16696 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
16697 type_str = "PKTID_AUDIT_Fail";
16698 break;
16699 case DUMP_TYPE_PKTID_INVALID:
16700 type_str = "PKTID_INVALID";
16701 break;
16702 case DUMP_TYPE_SCAN_TIMEOUT:
16703 type_str = "SCAN_timeout";
16704 break;
16705 case DUMP_TYPE_SCAN_BUSY:
16706 type_str = "SCAN_Busy";
16707 break;
16708 case DUMP_TYPE_BY_SYSDUMP:
16709 if (substr_type == CMD_UNWANTED) {
16710 type_str = "BY_SYSDUMP_FORUSER_unwanted";
16711 } else if (substr_type == CMD_DISCONNECTED) {
16712 type_str = "BY_SYSDUMP_FORUSER_disconnected";
16713 } else {
16714 type_str = "BY_SYSDUMP_FORUSER";
16715 }
16716 break;
16717 case DUMP_TYPE_BY_LIVELOCK:
16718 type_str = "BY_LIVELOCK";
16719 break;
16720 case DUMP_TYPE_AP_LINKUP_FAILURE:
16721 type_str = "BY_AP_LINK_FAILURE";
16722 break;
16723 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
16724 type_str = "INVALID_ACCESS";
16725 break;
16726 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
16727 type_str = "ERROR_RX_TIMED_OUT";
16728 break;
16729 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
16730 type_str = "ERROR_TX_TIMED_OUT";
16731 break;
16732 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
16733 type_str = "CFG_VENDOR_TRIGGERED";
16734 break;
16735 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
16736 type_str = "BY_INVALID_RING_RDWR";
16737 break;
16738 case DUMP_TYPE_IFACE_OP_FAILURE:
16739 type_str = "BY_IFACE_OP_FAILURE";
16740 break;
16741 case DUMP_TYPE_TRANS_ID_MISMATCH:
16742 type_str = "BY_TRANS_ID_MISMATCH";
16743 break;
16744 #ifdef DEBUG_DNGL_INIT_FAIL
16745 case DUMP_TYPE_DONGLE_INIT_FAILURE:
16746 type_str = "DONGLE_INIT_FAIL";
16747 break;
16748 #endif /* DEBUG_DNGL_INIT_FAIL */
16749 #ifdef SUPPORT_LINKDOWN_RECOVERY
16750 case DUMP_TYPE_READ_SHM_FAIL:
16751 type_str = "READ_SHM_FAIL";
16752 break;
16753 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16754 case DUMP_TYPE_DONGLE_HOST_EVENT:
16755 type_str = "BY_DONGLE_HOST_EVENT";
16756 break;
16757 case DUMP_TYPE_SMMU_FAULT:
16758 type_str = "SMMU_FAULT";
16759 break;
16760 case DUMP_TYPE_BY_USER:
16761 type_str = "BY_USER";
16762 break;
16763 #ifdef DHD_ERPOM
16764 case DUMP_TYPE_DUE_TO_BT:
16765 type_str = "DUE_TO_BT";
16766 break;
16767 #endif /* DHD_ERPOM */
16768 case DUMP_TYPE_LOGSET_BEYOND_RANGE:
16769 type_str = "LOGSET_BEYOND_RANGE";
16770 break;
16771 case DUMP_TYPE_CTO_RECOVERY:
16772 type_str = "CTO_RECOVERY";
16773 break;
16774 case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
16775 type_str = "SEQUENTIAL_PRIVCMD_ERROR";
16776 break;
16777 default:
16778 type_str = "Unknown_type";
16779 break;
16780 }
16781
16782 strncpy(buf, type_str, strlen(type_str));
16783 buf[strlen(type_str)] = 0;
16784 }
16785
16786 int
16787 write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
16788 {
16789 int ret = 0;
16790 char memdump_path[128];
16791 char memdump_type[32];
16792 struct timeval curtime;
16793 uint32 file_mode;
16794
16795 /* Init file name */
16796 memset(memdump_path, 0, sizeof(memdump_path));
16797 memset(memdump_type, 0, sizeof(memdump_type));
16798 do_gettimeofday(&curtime);
16799 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
16800 #ifdef CUSTOMER_HW4_DEBUG
16801 get_debug_dump_time(dhd->debug_dump_time_str);
16802 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16803 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16804 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16805 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16806 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16807 "/data/misc/wifi/", fname, memdump_type,
16808 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16809 file_mode = O_CREAT | O_WRONLY;
16810 #else
16811 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16812 "/installmedia/", fname, memdump_type,
16813 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16814 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16815 * calling BUG_ON immediately after collecting the socram dump.
16816 * So the file write operation should directly write the contents into the
16817 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16818 * instead of appending.
16819 */
16820 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16821 {
16822 struct file *fp = filp_open(memdump_path, file_mode, 0664);
16823 /* Check if it is live Brix image having /installmedia, else use /data */
16824 if (IS_ERR(fp)) {
16825 DHD_ERROR(("open file %s, try /data/\n", memdump_path));
16826 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16827 "/data/", fname, memdump_type,
16828 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16829 } else {
16830 filp_close(fp, NULL);
16831 }
16832 }
16833 #endif /* CUSTOMER_HW4_DEBUG */
16834
16835 /* print SOCRAM dump file path */
16836 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
16837
16838 #ifdef DHD_LOG_DUMP
16839 dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
16840 #endif /* DHD_LOG_DUMP */
16841
16842 /* Write file */
16843 ret = write_file(memdump_path, file_mode, buf, size);
16844
16845 #ifdef DHD_DUMP_MNGR
16846 if (ret == BCME_OK) {
16847 dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
16848 }
16849 #endif /* DHD_DUMP_MNGR */
16850
16851 return ret;
16852 }
16853 #endif /* DHD_DEBUG */
16854
16855 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
16856 {
16857 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16858 unsigned long flags;
16859 int ret = 0;
16860
16861 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16862 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16863 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
16864 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
16865 #ifdef CONFIG_HAS_WAKELOCK
16866 if (dhd->wakelock_rx_timeout_enable)
16867 wake_lock_timeout(&dhd->wl_rxwake,
16868 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
16869 if (dhd->wakelock_ctrl_timeout_enable)
16870 wake_lock_timeout(&dhd->wl_ctrlwake,
16871 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
16872 #endif // endif
16873 dhd->wakelock_rx_timeout_enable = 0;
16874 dhd->wakelock_ctrl_timeout_enable = 0;
16875 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16876 }
16877 return ret;
16878 }
16879
16880 int net_os_wake_lock_timeout(struct net_device *dev)
16881 {
16882 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16883 int ret = 0;
16884
16885 if (dhd)
16886 ret = dhd_os_wake_lock_timeout(&dhd->pub);
16887 return ret;
16888 }
16889
16890 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
16891 {
16892 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16893 unsigned long flags;
16894
16895 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16896 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16897 if (val > dhd->wakelock_rx_timeout_enable)
16898 dhd->wakelock_rx_timeout_enable = val;
16899 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16900 }
16901 return 0;
16902 }
16903
16904 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
16905 {
16906 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16907 unsigned long flags;
16908
16909 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16910 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16911 if (val > dhd->wakelock_ctrl_timeout_enable)
16912 dhd->wakelock_ctrl_timeout_enable = val;
16913 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16914 }
16915 return 0;
16916 }
16917
16918 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
16919 {
16920 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16921 unsigned long flags;
16922
16923 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16924 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16925 dhd->wakelock_ctrl_timeout_enable = 0;
16926 #ifdef CONFIG_HAS_WAKELOCK
16927 if (wake_lock_active(&dhd->wl_ctrlwake))
16928 wake_unlock(&dhd->wl_ctrlwake);
16929 #endif // endif
16930 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16931 }
16932 return 0;
16933 }
16934
16935 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
16936 {
16937 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16938 int ret = 0;
16939
16940 if (dhd)
16941 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
16942 return ret;
16943 }
16944
16945 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
16946 {
16947 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16948 int ret = 0;
16949
16950 if (dhd)
16951 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
16952 return ret;
16953 }
16954
16955 #if defined(DHD_TRACE_WAKE_LOCK)
16956 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16957 #include <linux/hashtable.h>
16958 #else
16959 #include <linux/hash.h>
16960 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16961
16962 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16963 /* Define 2^5 = 32 bucket size hash table */
16964 DEFINE_HASHTABLE(wklock_history, 5);
16965 #else
16966 /* Define 2^5 = 32 bucket size hash table */
16967 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
16968 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16969
16970 atomic_t trace_wklock_onoff;
16971 typedef enum dhd_wklock_type {
16972 DHD_WAKE_LOCK,
16973 DHD_WAKE_UNLOCK,
16974 DHD_WAIVE_LOCK,
16975 DHD_RESTORE_LOCK
16976 } dhd_wklock_t;
16977
16978 struct wk_trace_record {
16979 unsigned long addr; /* Address of the instruction */
16980 dhd_wklock_t lock_type; /* lock_type */
16981 unsigned long long counter; /* counter information */
16982 struct hlist_node wklock_node; /* hash node */
16983 };
16984
16985 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
16986 {
16987 struct wk_trace_record *wklock_info;
16988 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16989 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
16990 #else
16991 struct hlist_node *entry;
16992 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
16993 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
16994 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16995 {
16996 if (wklock_info->addr == addr) {
16997 return wklock_info;
16998 }
16999 }
17000 return NULL;
17001 }
17002
17003 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17004 #define HASH_ADD(hashtable, node, key) \
17005 do { \
17006 hash_add(hashtable, node, key); \
17007 } while (0);
17008 #else
17009 #define HASH_ADD(hashtable, node, key) \
17010 do { \
17011 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
17012 hlist_add_head(node, &hashtable[index]); \
17013 } while (0);
17014 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
17015
17016 #define STORE_WKLOCK_RECORD(wklock_type) \
17017 do { \
17018 struct wk_trace_record *wklock_info = NULL; \
17019 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
17020 wklock_info = find_wklock_entry(func_addr); \
17021 if (wklock_info) { \
17022 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
17023 wklock_info->counter = dhd->wakelock_counter; \
17024 } else { \
17025 wklock_info->counter++; \
17026 } \
17027 } else { \
17028 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
17029 if (!wklock_info) {\
17030 printk("Can't allocate wk_trace_record \n"); \
17031 } else { \
17032 wklock_info->addr = func_addr; \
17033 wklock_info->lock_type = wklock_type; \
17034 if (wklock_type == DHD_WAIVE_LOCK || \
17035 wklock_type == DHD_RESTORE_LOCK) { \
17036 wklock_info->counter = dhd->wakelock_counter; \
17037 } else { \
17038 wklock_info->counter++; \
17039 } \
17040 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
17041 } \
17042 } \
17043 } while (0);
17044
17045 static inline void dhd_wk_lock_rec_dump(void)
17046 {
17047 int bkt;
17048 struct wk_trace_record *wklock_info;
17049
17050 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17051 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
17052 #else
17053 struct hlist_node *entry = NULL;
17054 int max_index = ARRAY_SIZE(wklock_history);
17055 for (bkt = 0; bkt < max_index; bkt++)
17056 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
17057 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17058 {
17059 switch (wklock_info->lock_type) {
17060 case DHD_WAKE_LOCK:
17061 printk("wakelock lock : %pS lock_counter : %llu \n",
17062 (void *)wklock_info->addr, wklock_info->counter);
17063 break;
17064 case DHD_WAKE_UNLOCK:
17065 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
17066 (void *)wklock_info->addr, wklock_info->counter);
17067 break;
17068 case DHD_WAIVE_LOCK:
17069 printk("wakelock waive : %pS before_waive : %llu \n",
17070 (void *)wklock_info->addr, wklock_info->counter);
17071 break;
17072 case DHD_RESTORE_LOCK:
17073 printk("wakelock restore : %pS, after_waive : %llu \n",
17074 (void *)wklock_info->addr, wklock_info->counter);
17075 break;
17076 }
17077 }
17078 }
17079
17080 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
17081 {
17082 unsigned long flags;
17083 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17084 int i;
17085 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17086
17087 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17088 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17089 hash_init(wklock_history);
17090 #else
17091 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
17092 INIT_HLIST_HEAD(&wklock_history[i]);
17093 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17094 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17095 atomic_set(&trace_wklock_onoff, 1);
17096 }
17097
17098 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
17099 {
17100 int bkt;
17101 struct wk_trace_record *wklock_info;
17102 struct hlist_node *tmp;
17103 unsigned long flags;
17104 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
17105 struct hlist_node *entry = NULL;
17106 int max_index = ARRAY_SIZE(wklock_history);
17107 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
17108
17109 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17110 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17111 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
17112 #else
17113 for (bkt = 0; bkt < max_index; bkt++)
17114 hlist_for_each_entry_safe(wklock_info, entry, tmp,
17115 &wklock_history[bkt], wklock_node)
17116 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17117 {
17118 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
17119 hash_del(&wklock_info->wklock_node);
17120 #else
17121 hlist_del_init(&wklock_info->wklock_node);
17122 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
17123 kfree(wklock_info);
17124 }
17125 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17126 }
17127
17128 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
17129 {
17130 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
17131 unsigned long flags;
17132
17133 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
17134 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17135 dhd_wk_lock_rec_dump();
17136 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17137
17138 }
17139 #else
17140 #define STORE_WKLOCK_RECORD(wklock_type)
17141 #endif /* ! DHD_TRACE_WAKE_LOCK */
17142
17143 int dhd_os_wake_lock(dhd_pub_t *pub)
17144 {
17145 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17146 unsigned long flags;
17147 int ret = 0;
17148
17149 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17150 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17151 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
17152 #ifdef CONFIG_HAS_WAKELOCK
17153 wake_lock(&dhd->wl_wifi);
17154 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17155 dhd_bus_dev_pm_stay_awake(pub);
17156 #endif // endif
17157 }
17158 #ifdef DHD_TRACE_WAKE_LOCK
17159 if (atomic_read(&trace_wklock_onoff)) {
17160 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
17161 }
17162 #endif /* DHD_TRACE_WAKE_LOCK */
17163 dhd->wakelock_counter++;
17164 ret = dhd->wakelock_counter;
17165 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17166 }
17167
17168 return ret;
17169 }
17170
17171 void dhd_event_wake_lock(dhd_pub_t *pub)
17172 {
17173 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17174
17175 if (dhd) {
17176 #ifdef CONFIG_HAS_WAKELOCK
17177 wake_lock(&dhd->wl_evtwake);
17178 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17179 dhd_bus_dev_pm_stay_awake(pub);
17180 #endif // endif
17181 }
17182 }
17183
17184 void
17185 dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
17186 {
17187 #ifdef CONFIG_HAS_WAKELOCK
17188 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17189
17190 if (dhd) {
17191 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
17192 }
17193 #endif /* CONFIG_HAS_WAKE_LOCK */
17194 }
17195
17196 void
17197 dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
17198 {
17199 #ifdef CONFIG_HAS_WAKELOCK
17200 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17201
17202 if (dhd) {
17203 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
17204 }
17205 #endif /* CONFIG_HAS_WAKE_LOCK */
17206 }
17207
17208 int net_os_wake_lock(struct net_device *dev)
17209 {
17210 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17211 int ret = 0;
17212
17213 if (dhd)
17214 ret = dhd_os_wake_lock(&dhd->pub);
17215 return ret;
17216 }
17217
17218 int dhd_os_wake_unlock(dhd_pub_t *pub)
17219 {
17220 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17221 unsigned long flags;
17222 int ret = 0;
17223
17224 dhd_os_wake_lock_timeout(pub);
17225 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17226 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17227
17228 if (dhd->wakelock_counter > 0) {
17229 dhd->wakelock_counter--;
17230 #ifdef DHD_TRACE_WAKE_LOCK
17231 if (atomic_read(&trace_wklock_onoff)) {
17232 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
17233 }
17234 #endif /* DHD_TRACE_WAKE_LOCK */
17235 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
17236 #ifdef CONFIG_HAS_WAKELOCK
17237 wake_unlock(&dhd->wl_wifi);
17238 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17239 dhd_bus_dev_pm_relax(pub);
17240 #endif // endif
17241 }
17242 ret = dhd->wakelock_counter;
17243 }
17244 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17245 }
17246 return ret;
17247 }
17248
17249 void dhd_event_wake_unlock(dhd_pub_t *pub)
17250 {
17251 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17252
17253 if (dhd) {
17254 #ifdef CONFIG_HAS_WAKELOCK
17255 wake_unlock(&dhd->wl_evtwake);
17256 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17257 dhd_bus_dev_pm_relax(pub);
17258 #endif // endif
17259 }
17260 }
17261
17262 void dhd_pm_wake_unlock(dhd_pub_t *pub)
17263 {
17264 #ifdef CONFIG_HAS_WAKELOCK
17265 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17266
17267 if (dhd) {
17268 /* if wl_pmwake is active, unlock it */
17269 if (wake_lock_active(&dhd->wl_pmwake)) {
17270 wake_unlock(&dhd->wl_pmwake);
17271 }
17272 }
17273 #endif /* CONFIG_HAS_WAKELOCK */
17274 }
17275
17276 void dhd_txfl_wake_unlock(dhd_pub_t *pub)
17277 {
17278 #ifdef CONFIG_HAS_WAKELOCK
17279 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17280
17281 if (dhd) {
17282 /* if wl_txflwake is active, unlock it */
17283 if (wake_lock_active(&dhd->wl_txflwake)) {
17284 wake_unlock(&dhd->wl_txflwake);
17285 }
17286 }
17287 #endif /* CONFIG_HAS_WAKELOCK */
17288 }
17289
17290 int dhd_os_check_wakelock(dhd_pub_t *pub)
17291 {
17292 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17293 KERNEL_VERSION(2, 6, 36)))
17294 dhd_info_t *dhd;
17295
17296 if (!pub)
17297 return 0;
17298 dhd = (dhd_info_t *)(pub->info);
17299 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17300
17301 #ifdef CONFIG_HAS_WAKELOCK
17302 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
17303 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
17304 (wake_lock_active(&dhd->wl_wdwake))))
17305 return 1;
17306 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17307 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
17308 return 1;
17309 #endif // endif
17310 return 0;
17311 }
17312
17313 int
17314 dhd_os_check_wakelock_all(dhd_pub_t *pub)
17315 {
17316 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
17317 KERNEL_VERSION(2, 6, 36)))
17318 #if defined(CONFIG_HAS_WAKELOCK)
17319 int l1, l2, l3, l4, l7, l8, l9;
17320 int l5 = 0, l6 = 0;
17321 int c, lock_active;
17322 #endif /* CONFIG_HAS_WAKELOCK */
17323 dhd_info_t *dhd;
17324
17325 if (!pub) {
17326 return 0;
17327 }
17328 dhd = (dhd_info_t *)(pub->info);
17329 if (!dhd) {
17330 return 0;
17331 }
17332 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
17333
17334 #ifdef CONFIG_HAS_WAKELOCK
17335 c = dhd->wakelock_counter;
17336 l1 = wake_lock_active(&dhd->wl_wifi);
17337 l2 = wake_lock_active(&dhd->wl_wdwake);
17338 l3 = wake_lock_active(&dhd->wl_rxwake);
17339 l4 = wake_lock_active(&dhd->wl_ctrlwake);
17340 l7 = wake_lock_active(&dhd->wl_evtwake);
17341 #ifdef BCMPCIE_OOB_HOST_WAKE
17342 l5 = wake_lock_active(&dhd->wl_intrwake);
17343 #endif /* BCMPCIE_OOB_HOST_WAKE */
17344 #ifdef DHD_USE_SCAN_WAKELOCK
17345 l6 = wake_lock_active(&dhd->wl_scanwake);
17346 #endif /* DHD_USE_SCAN_WAKELOCK */
17347 l8 = wake_lock_active(&dhd->wl_pmwake);
17348 l9 = wake_lock_active(&dhd->wl_txflwake);
17349 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
17350
17351 /* Indicate to the Host to avoid going to suspend if internal locks are up */
17352 if (lock_active) {
17353 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
17354 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
17355 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
17356 return 1;
17357 }
17358 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17359 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
17360 return 1;
17361 }
17362 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
17363 return 0;
17364 }
17365
17366 int net_os_wake_unlock(struct net_device *dev)
17367 {
17368 dhd_info_t *dhd = DHD_DEV_INFO(dev);
17369 int ret = 0;
17370
17371 if (dhd)
17372 ret = dhd_os_wake_unlock(&dhd->pub);
17373 return ret;
17374 }
17375
17376 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
17377 {
17378 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17379 unsigned long flags;
17380 int ret = 0;
17381
17382 if (dhd) {
17383 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17384 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
17385 #ifdef CONFIG_HAS_WAKELOCK
17386 /* if wakelock_wd_counter was never used : lock it at once */
17387 wake_lock(&dhd->wl_wdwake);
17388 #endif // endif
17389 }
17390 dhd->wakelock_wd_counter++;
17391 ret = dhd->wakelock_wd_counter;
17392 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17393 }
17394 return ret;
17395 }
17396
17397 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
17398 {
17399 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17400 unsigned long flags;
17401 int ret = 0;
17402
17403 if (dhd) {
17404 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17405 if (dhd->wakelock_wd_counter > 0) {
17406 dhd->wakelock_wd_counter = 0;
17407 if (!dhd->waive_wakelock) {
17408 #ifdef CONFIG_HAS_WAKELOCK
17409 wake_unlock(&dhd->wl_wdwake);
17410 #endif // endif
17411 }
17412 }
17413 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17414 }
17415 return ret;
17416 }
17417
17418 #ifdef BCMPCIE_OOB_HOST_WAKE
17419 void
17420 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
17421 {
17422 #ifdef CONFIG_HAS_WAKELOCK
17423 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17424
17425 if (dhd) {
17426 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
17427 }
17428 #endif /* CONFIG_HAS_WAKELOCK */
17429 }
17430
17431 void
17432 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
17433 {
17434 #ifdef CONFIG_HAS_WAKELOCK
17435 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17436
17437 if (dhd) {
17438 /* if wl_intrwake is active, unlock it */
17439 if (wake_lock_active(&dhd->wl_intrwake)) {
17440 wake_unlock(&dhd->wl_intrwake);
17441 }
17442 }
17443 #endif /* CONFIG_HAS_WAKELOCK */
17444 }
17445 #endif /* BCMPCIE_OOB_HOST_WAKE */
17446
17447 #ifdef DHD_USE_SCAN_WAKELOCK
17448 void
17449 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
17450 {
17451 #ifdef CONFIG_HAS_WAKELOCK
17452 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17453
17454 if (dhd) {
17455 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
17456 }
17457 #endif /* CONFIG_HAS_WAKELOCK */
17458 }
17459
17460 void
17461 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
17462 {
17463 #ifdef CONFIG_HAS_WAKELOCK
17464 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17465
17466 if (dhd) {
17467 /* if wl_scanwake is active, unlock it */
17468 if (wake_lock_active(&dhd->wl_scanwake)) {
17469 wake_unlock(&dhd->wl_scanwake);
17470 }
17471 }
17472 #endif /* CONFIG_HAS_WAKELOCK */
17473 }
17474 #endif /* DHD_USE_SCAN_WAKELOCK */
17475
17476 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
17477 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
17478 */
17479 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
17480 {
17481 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17482 unsigned long flags;
17483 int ret = 0;
17484
17485 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
17486 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17487
17488 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17489 if (dhd->waive_wakelock == FALSE) {
17490 #ifdef DHD_TRACE_WAKE_LOCK
17491 if (atomic_read(&trace_wklock_onoff)) {
17492 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
17493 }
17494 #endif /* DHD_TRACE_WAKE_LOCK */
17495 /* record current lock status */
17496 dhd->wakelock_before_waive = dhd->wakelock_counter;
17497 dhd->waive_wakelock = TRUE;
17498 }
17499 ret = dhd->wakelock_wd_counter;
17500 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17501 }
17502 return ret;
17503 }
17504
17505 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
17506 {
17507 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
17508 unsigned long flags;
17509 int ret = 0;
17510
17511 if (!dhd)
17512 return 0;
17513 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
17514 return 0;
17515
17516 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
17517
17518 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
17519 if (!dhd->waive_wakelock)
17520 goto exit;
17521
17522 dhd->waive_wakelock = FALSE;
17523 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
17524 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
17525 * the lock in between, do the same by calling wake_unlock or pm_relax
17526 */
17527 #ifdef DHD_TRACE_WAKE_LOCK
17528 if (atomic_read(&trace_wklock_onoff)) {
17529 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
17530 }
17531 #endif /* DHD_TRACE_WAKE_LOCK */
17532
17533 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
17534 #ifdef CONFIG_HAS_WAKELOCK
17535 wake_lock(&dhd->wl_wifi);
17536 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17537 dhd_bus_dev_pm_stay_awake(&dhd->pub);
17538 #endif // endif
17539 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
17540 #ifdef CONFIG_HAS_WAKELOCK
17541 wake_unlock(&dhd->wl_wifi);
17542 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17543 dhd_bus_dev_pm_relax(&dhd->pub);
17544 #endif // endif
17545 }
17546 dhd->wakelock_before_waive = 0;
17547 exit:
17548 ret = dhd->wakelock_wd_counter;
17549 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17550 return ret;
17551 }
17552
17553 void dhd_os_wake_lock_init(struct dhd_info *dhd)
17554 {
17555 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
17556 dhd->wakelock_counter = 0;
17557 dhd->wakelock_rx_timeout_enable = 0;
17558 dhd->wakelock_ctrl_timeout_enable = 0;
17559 /* wakelocks prevent a system from going into a low power state */
17560 #ifdef CONFIG_HAS_WAKELOCK
17561 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
17562 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
17563 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
17564 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
17565 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
17566 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
17567 #ifdef BCMPCIE_OOB_HOST_WAKE
17568 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
17569 #endif /* BCMPCIE_OOB_HOST_WAKE */
17570 #ifdef DHD_USE_SCAN_WAKELOCK
17571 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
17572 #endif /* DHD_USE_SCAN_WAKELOCK */
17573 #endif /* CONFIG_HAS_WAKELOCK */
17574 #ifdef DHD_TRACE_WAKE_LOCK
17575 dhd_wk_lock_trace_init(dhd);
17576 #endif /* DHD_TRACE_WAKE_LOCK */
17577 }
17578
17579 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
17580 {
17581 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
17582 #ifdef CONFIG_HAS_WAKELOCK
17583 dhd->wakelock_counter = 0;
17584 dhd->wakelock_rx_timeout_enable = 0;
17585 dhd->wakelock_ctrl_timeout_enable = 0;
17586 wake_lock_destroy(&dhd->wl_wifi);
17587 wake_lock_destroy(&dhd->wl_rxwake);
17588 wake_lock_destroy(&dhd->wl_ctrlwake);
17589 wake_lock_destroy(&dhd->wl_evtwake);
17590 wake_lock_destroy(&dhd->wl_pmwake);
17591 wake_lock_destroy(&dhd->wl_txflwake);
17592 #ifdef BCMPCIE_OOB_HOST_WAKE
17593 wake_lock_destroy(&dhd->wl_intrwake);
17594 #endif /* BCMPCIE_OOB_HOST_WAKE */
17595 #ifdef DHD_USE_SCAN_WAKELOCK
17596 wake_lock_destroy(&dhd->wl_scanwake);
17597 #endif /* DHD_USE_SCAN_WAKELOCK */
17598 #ifdef DHD_TRACE_WAKE_LOCK
17599 dhd_wk_lock_trace_deinit(dhd);
17600 #endif /* DHD_TRACE_WAKE_LOCK */
17601 #endif /* CONFIG_HAS_WAKELOCK */
17602 }
17603
17604 bool dhd_os_check_if_up(dhd_pub_t *pub)
17605 {
17606 if (!pub)
17607 return FALSE;
17608 return pub->up;
17609 }
17610
17611 #if defined(BCMSDIO) || defined(BCMPCIE)
17612 /* function to collect firmware, chip id and chip version info */
17613 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
17614 {
17615 int i;
17616
17617 i = snprintf(info_string, sizeof(info_string),
17618 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
17619
17620 if (!dhdp)
17621 return;
17622
17623 i = snprintf(&info_string[i], sizeof(info_string) - i,
17624 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
17625 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
17626 }
17627 #endif /* BCMSDIO || BCMPCIE */
17628 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
17629 {
17630 int ifidx;
17631 int ret = 0;
17632 dhd_info_t *dhd = NULL;
17633
17634 if (!net || !DEV_PRIV(net)) {
17635 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
17636 __FUNCTION__, net, DEV_PRIV(net)));
17637 return -EINVAL;
17638 }
17639
17640 dhd = DHD_DEV_INFO(net);
17641 if (!dhd)
17642 return -EINVAL;
17643
17644 ifidx = dhd_net2idx(dhd, net);
17645 if (ifidx == DHD_BAD_IF) {
17646 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
17647 return -ENODEV;
17648 }
17649
17650 DHD_OS_WAKE_LOCK(&dhd->pub);
17651 DHD_PERIM_LOCK(&dhd->pub);
17652
17653 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
17654 dhd_check_hang(net, &dhd->pub, ret);
17655
17656 DHD_PERIM_UNLOCK(&dhd->pub);
17657 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17658
17659 return ret;
17660 }
17661
17662 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
17663 {
17664 struct net_device *net;
17665
17666 net = dhd_idx2net(dhdp, ifidx);
17667 if (!net) {
17668 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
17669 return -EINVAL;
17670 }
17671
17672 return dhd_check_hang(net, dhdp, ret);
17673 }
17674
17675 /* Return instance */
17676 int dhd_get_instance(dhd_pub_t *dhdp)
17677 {
17678 return dhdp->info->unit;
17679 }
17680
17681 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
17682 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
17683 int dhd_deepsleep(struct net_device *dev, int flag)
17684 {
17685 char iovbuf[20];
17686 uint powervar = 0;
17687 dhd_info_t *dhd;
17688 dhd_pub_t *dhdp;
17689 int cnt = 0;
17690 int ret = 0;
17691
17692 dhd = DHD_DEV_INFO(dev);
17693 dhdp = &dhd->pub;
17694
17695 switch (flag) {
17696 case 1 : /* Deepsleep on */
17697 DHD_ERROR(("[WiFi] Deepsleep On\n"));
17698 /* give some time to sysioc_work before deepsleep */
17699 OSL_SLEEP(200);
17700 #ifdef PKT_FILTER_SUPPORT
17701 /* disable pkt filter */
17702 dhd_enable_packet_filter(0, dhdp);
17703 #endif /* PKT_FILTER_SUPPORT */
17704 /* Disable MPC */
17705 powervar = 0;
17706 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
17707 0, TRUE);
17708
17709 /* Enable Deepsleep */
17710 powervar = 1;
17711 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
17712 NULL, 0, TRUE);
17713 break;
17714
17715 case 0: /* Deepsleep Off */
17716 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
17717
17718 /* Disable Deepsleep */
17719 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
17720 powervar = 0;
17721 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
17722 sizeof(powervar), NULL, 0, TRUE);
17723
17724 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
17725 sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
17726 if (ret < 0) {
17727 DHD_ERROR(("the error of dhd deepsleep status"
17728 " ret value :%d\n", ret));
17729 } else {
17730 if (!(*(int *)iovbuf)) {
17731 DHD_ERROR(("deepsleep mode is 0,"
17732 " count: %d\n", cnt));
17733 break;
17734 }
17735 }
17736 }
17737
17738 /* Enable MPC */
17739 powervar = 1;
17740 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
17741 0, TRUE);
17742 break;
17743 }
17744
17745 return 0;
17746 }
17747 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
17748
17749 #ifdef PROP_TXSTATUS
17750
17751 void dhd_wlfc_plat_init(void *dhd)
17752 {
17753 #ifdef USE_DYNAMIC_F2_BLKSIZE
17754 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
17755 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17756 return;
17757 }
17758
17759 void dhd_wlfc_plat_deinit(void *dhd)
17760 {
17761 #ifdef USE_DYNAMIC_F2_BLKSIZE
17762 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
17763 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17764 return;
17765 }
17766
17767 bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
17768 {
17769 #ifdef SKIP_WLFC_ON_CONCURRENT
17770
17771 #ifdef WL_CFG80211
17772 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
17773 if (net)
17774 /* enable flow control in vsdb mode */
17775 return !(wl_cfg80211_is_concurrent_mode(net));
17776 #else
17777 return TRUE; /* skip flow control */
17778 #endif /* WL_CFG80211 */
17779
17780 #else
17781 return FALSE;
17782 #endif /* SKIP_WLFC_ON_CONCURRENT */
17783 return FALSE;
17784 }
17785 #endif /* PROP_TXSTATUS */
17786
17787 #ifdef BCMDBGFS
17788 #include <linux/debugfs.h>
17789
17790 typedef struct dhd_dbgfs {
17791 struct dentry *debugfs_dir;
17792 struct dentry *debugfs_mem;
17793 dhd_pub_t *dhdp;
17794 uint32 size;
17795 } dhd_dbgfs_t;
17796
17797 dhd_dbgfs_t g_dbgfs;
17798
17799 extern uint32 dhd_readregl(void *bp, uint32 addr);
17800 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
17801
17802 static int
17803 dhd_dbg_state_open(struct inode *inode, struct file *file)
17804 {
17805 file->private_data = inode->i_private;
17806 return 0;
17807 }
17808
17809 static ssize_t
17810 dhd_dbg_state_read(struct file *file, char __user *ubuf,
17811 size_t count, loff_t *ppos)
17812 {
17813 ssize_t rval;
17814 uint32 tmp;
17815 loff_t pos = *ppos;
17816 size_t ret;
17817
17818 if (pos < 0)
17819 return -EINVAL;
17820 if (pos >= g_dbgfs.size || !count)
17821 return 0;
17822 if (count > g_dbgfs.size - pos)
17823 count = g_dbgfs.size - pos;
17824
17825 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17826 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
17827
17828 ret = copy_to_user(ubuf, &tmp, 4);
17829 if (ret == count)
17830 return -EFAULT;
17831
17832 count -= ret;
17833 *ppos = pos + count;
17834 rval = count;
17835
17836 return rval;
17837 }
17838
17839 static ssize_t
17840 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
17841 {
17842 loff_t pos = *ppos;
17843 size_t ret;
17844 uint32 buf;
17845
17846 if (pos < 0)
17847 return -EINVAL;
17848 if (pos >= g_dbgfs.size || !count)
17849 return 0;
17850 if (count > g_dbgfs.size - pos)
17851 count = g_dbgfs.size - pos;
17852
17853 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
17854 if (ret == count)
17855 return -EFAULT;
17856
17857 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17858 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
17859
17860 return count;
17861 }
17862
17863 loff_t
17864 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
17865 {
17866 loff_t pos = -1;
17867
17868 switch (whence) {
17869 case 0:
17870 pos = off;
17871 break;
17872 case 1:
17873 pos = file->f_pos + off;
17874 break;
17875 case 2:
17876 pos = g_dbgfs.size - off;
17877 }
17878 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
17879 }
17880
17881 static const struct file_operations dhd_dbg_state_ops = {
17882 .read = dhd_dbg_state_read,
17883 .write = dhd_debugfs_write,
17884 .open = dhd_dbg_state_open,
17885 .llseek = dhd_debugfs_lseek
17886 };
17887
17888 static void dhd_dbgfs_create(void)
17889 {
17890 if (g_dbgfs.debugfs_dir) {
17891 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
17892 NULL, &dhd_dbg_state_ops);
17893 }
17894 }
17895
17896 void dhd_dbgfs_init(dhd_pub_t *dhdp)
17897 {
17898 g_dbgfs.dhdp = dhdp;
17899 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
17900
17901 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
17902 if (IS_ERR(g_dbgfs.debugfs_dir)) {
17903 g_dbgfs.debugfs_dir = NULL;
17904 return;
17905 }
17906
17907 dhd_dbgfs_create();
17908
17909 return;
17910 }
17911
17912 void dhd_dbgfs_remove(void)
17913 {
17914 debugfs_remove(g_dbgfs.debugfs_mem);
17915 debugfs_remove(g_dbgfs.debugfs_dir);
17916
17917 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
17918 }
17919 #endif /* BCMDBGFS */
17920
17921 #ifdef CUSTOM_SET_CPUCORE
17922 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
17923 {
17924 int e_dpc = 0, e_rxf = 0, retry_set = 0;
17925
17926 if (!(dhd->chan_isvht80)) {
17927 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
17928 return;
17929 }
17930
17931 if (DPC_CPUCORE) {
17932 do {
17933 if (set == TRUE) {
17934 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17935 cpumask_of(DPC_CPUCORE));
17936 } else {
17937 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17938 cpumask_of(PRIMARY_CPUCORE));
17939 }
17940 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17941 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
17942 return;
17943 }
17944 if (e_dpc < 0)
17945 OSL_SLEEP(1);
17946 } while (e_dpc < 0);
17947 }
17948 if (RXF_CPUCORE) {
17949 do {
17950 if (set == TRUE) {
17951 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17952 cpumask_of(RXF_CPUCORE));
17953 } else {
17954 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17955 cpumask_of(PRIMARY_CPUCORE));
17956 }
17957 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17958 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
17959 return;
17960 }
17961 if (e_rxf < 0)
17962 OSL_SLEEP(1);
17963 } while (e_rxf < 0);
17964 }
17965 #ifdef DHD_OF_SUPPORT
17966 interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
17967 #endif /* DHD_OF_SUPPORT */
17968 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
17969
17970 return;
17971 }
17972 #endif /* CUSTOM_SET_CPUCORE */
17973
17974 #ifdef DHD_MCAST_REGEN
17975 /* Get interface specific ap_isolate configuration */
17976 int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
17977 {
17978 dhd_info_t *dhd = dhdp->info;
17979 dhd_if_t *ifp;
17980
17981 ASSERT(idx < DHD_MAX_IFS);
17982
17983 ifp = dhd->iflist[idx];
17984
17985 return ifp->mcast_regen_bss_enable;
17986 }
17987
17988 /* Set interface specific mcast_regen configuration */
17989 int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
17990 {
17991 dhd_info_t *dhd = dhdp->info;
17992 dhd_if_t *ifp;
17993
17994 ASSERT(idx < DHD_MAX_IFS);
17995
17996 ifp = dhd->iflist[idx];
17997
17998 ifp->mcast_regen_bss_enable = val;
17999
18000 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
18001 * is enabled
18002 */
18003 dhd_update_rx_pkt_chainable_state(dhdp, idx);
18004 return BCME_OK;
18005 }
18006 #endif /* DHD_MCAST_REGEN */
18007
18008 /* Get interface specific ap_isolate configuration */
18009 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
18010 {
18011 dhd_info_t *dhd = dhdp->info;
18012 dhd_if_t *ifp;
18013
18014 ASSERT(idx < DHD_MAX_IFS);
18015
18016 ifp = dhd->iflist[idx];
18017
18018 return ifp->ap_isolate;
18019 }
18020
18021 /* Set interface specific ap_isolate configuration */
18022 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
18023 {
18024 dhd_info_t *dhd = dhdp->info;
18025 dhd_if_t *ifp;
18026
18027 ASSERT(idx < DHD_MAX_IFS);
18028
18029 ifp = dhd->iflist[idx];
18030
18031 if (ifp)
18032 ifp->ap_isolate = val;
18033
18034 return 0;
18035 }
18036
18037 #ifdef DHD_FW_COREDUMP
18038 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
18039 {
18040 unsigned long flags = 0;
18041 dhd_dump_t *dump = NULL;
18042 dhd_info_t *dhd_info = NULL;
18043 dhd_info = (dhd_info_t *)dhdp->info;
18044 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
18045 if (dump == NULL) {
18046 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
18047 return;
18048 }
18049 dump->buf = buf;
18050 dump->bufsize = size;
18051 #ifdef DHD_LOG_DUMP
18052 dhd_print_buf_addr(dhdp, "memdump", buf, size);
18053 #endif /* DHD_LOG_DUMP */
18054
18055 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
18056 BUG_ON(1);
18057 }
18058
18059 #if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM)
18060 if (
18061 #if defined(DEBUG_DNGL_INIT_FAIL)
18062 (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
18063 #endif /* DEBUG_DNGL_INIT_FAIL */
18064 #ifdef DHD_ERPOM
18065 (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
18066 #endif /* DHD_ERPOM */
18067 FALSE)
18068 {
18069 #ifdef DHD_LOG_DUMP
18070 log_dump_type_t *flush_type = NULL;
18071 #endif // endif
18072 dhd_info->scheduled_memdump = FALSE;
18073 dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
18074 /* for dongle init fail cases, 'dhd_mem_dump' does
18075 * not call 'dhd_log_dump', so call it here.
18076 */
18077 #ifdef DHD_LOG_DUMP
18078 flush_type = MALLOCZ(dhdp->osh,
18079 sizeof(log_dump_type_t));
18080 if (flush_type) {
18081 *flush_type = DLD_BUF_TYPE_ALL;
18082 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
18083 dhd_log_dump(dhdp->info, flush_type, 0);
18084 }
18085 #endif /* DHD_LOG_DUMP */
18086 return;
18087 }
18088 #endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM */
18089
18090 dhd_info->scheduled_memdump = TRUE;
18091 /* bus busy bit for mem dump will be cleared in mem dump
18092 * work item context, after mem dump file is written
18093 */
18094 DHD_GENERAL_LOCK(dhdp, flags);
18095 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
18096 DHD_GENERAL_UNLOCK(dhdp, flags);
18097 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
18098 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
18099 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18100 }
18101 static void
18102 dhd_mem_dump(void *handle, void *event_info, u8 event)
18103 {
18104 dhd_info_t *dhd = handle;
18105 dhd_pub_t *dhdp = NULL;
18106 dhd_dump_t *dump = event_info;
18107 unsigned long flags = 0;
18108
18109 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18110
18111 if (!dhd) {
18112 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
18113 return;
18114 }
18115
18116 dhdp = &dhd->pub;
18117
18118 DHD_GENERAL_LOCK(dhdp, flags);
18119 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18120 DHD_GENERAL_UNLOCK(dhdp, flags);
18121 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
18122 goto exit;
18123 }
18124 DHD_GENERAL_UNLOCK(dhdp, flags);
18125
18126 #ifdef D2H_MINIDUMP
18127 /* dump minidump */
18128 if (dhd_bus_is_minidump_enabled(dhdp)) {
18129 dhd_d2h_minidump(&dhd->pub);
18130 } else {
18131 DHD_ERROR(("minidump is not enabled\n"));
18132 }
18133 #endif /* D2H_MINIDUMP */
18134
18135 if (!dump) {
18136 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
18137 goto exit;
18138 }
18139
18140 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
18141 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
18142 #ifdef DHD_DEBUG_UART
18143 dhd->pub.memdump_success = FALSE;
18144 #endif /* DHD_DEBUG_UART */
18145 }
18146
18147 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
18148 * context, no need to schedule another work queue for log dump. In case of
18149 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
18150 * cfg layer is itself scheduling the log_dump work queue.
18151 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
18152 * collect debug_dump as it may be called from non-sleepable context.
18153 */
18154 #ifdef DHD_LOG_DUMP
18155 if (dhd->scheduled_memdump &&
18156 dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
18157 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
18158 sizeof(log_dump_type_t));
18159 if (flush_type) {
18160 *flush_type = DLD_BUF_TYPE_ALL;
18161 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
18162 dhd_log_dump(dhd, flush_type, 0);
18163 }
18164 }
18165 #endif /* DHD_LOG_DUMP */
18166
18167 #ifdef DHD_PKT_LOGGING
18168 copy_debug_dump_time(dhdp->debug_dump_time_pktlog_str, dhdp->debug_dump_time_str);
18169 #endif /* DHD_PKT_LOGGING */
18170 clear_debug_dump_time(dhdp->debug_dump_time_str);
18171
18172 /* before calling bug on, wait for other logs to be dumped.
18173 * we cannot wait in case dhd_mem_dump is called directly
18174 * as it may not be in a sleepable context
18175 */
18176 if (dhd->scheduled_memdump) {
18177 uint bitmask = 0;
18178 int timeleft = 0;
18179 #ifdef DHD_SSSR_DUMP
18180 bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
18181 #endif // endif
18182 if (bitmask != 0) {
18183 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
18184 &dhdp->dhd_bus_busy_state, bitmask, 0);
18185 if ((timeleft == 0) || (timeleft == 1)) {
18186 DHD_ERROR(("%s:Timed out on sssr dump,dhd_bus_busy_state=0x%x\n",
18187 __FUNCTION__, dhdp->dhd_bus_busy_state));
18188 }
18189 }
18190 }
18191
18192 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
18193 #ifdef DHD_LOG_DUMP
18194 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
18195 #endif /* DHD_LOG_DUMP */
18196 dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
18197 #ifdef DHD_DEBUG_UART
18198 dhd->pub.memdump_success == TRUE &&
18199 #endif /* DHD_DEBUG_UART */
18200 #ifdef DNGL_EVENT_SUPPORT
18201 dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
18202 #endif /* DNGL_EVENT_SUPPORT */
18203 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
18204
18205 #ifdef SHOW_LOGTRACE
18206 /* Wait till event_log_dispatcher_work finishes */
18207 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
18208 #endif /* SHOW_LOGTRACE */
18209
18210 BUG_ON(1);
18211 }
18212
18213 exit:
18214 if (dump)
18215 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
18216 DHD_GENERAL_LOCK(dhdp, flags);
18217 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
18218 dhd_os_busbusy_wake(dhdp);
18219 DHD_GENERAL_UNLOCK(dhdp, flags);
18220 dhd->scheduled_memdump = FALSE;
18221 }
18222 #endif /* DHD_FW_COREDUMP */
18223
18224 #ifdef D2H_MINIDUMP
18225 void
18226 dhd_d2h_minidump(dhd_pub_t *dhdp)
18227 {
18228 char d2h_minidump[128];
18229 dhd_dma_buf_t *minidump_buf;
18230
18231 minidump_buf = dhd_prot_get_minidump_buf(dhdp);
18232 if (minidump_buf->va == NULL) {
18233 DHD_ERROR(("%s: minidump_buf is NULL\n", __FUNCTION__));
18234 return;
18235 }
18236
18237 /* Init file name */
18238 memset(d2h_minidump, 0, sizeof(d2h_minidump));
18239 snprintf(d2h_minidump, sizeof(d2h_minidump), "%s", "d2h_minidump");
18240
18241 if (write_dump_to_file(dhdp, (uint8 *)minidump_buf->va,
18242 BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN, d2h_minidump)) {
18243 DHD_ERROR(("%s: failed to dump d2h_minidump to file\n",
18244 __FUNCTION__));
18245 }
18246 }
18247 #endif /* D2H_MINIDUMP */
18248
18249 #ifdef DHD_SSSR_DUMP
18250
18251 static void
18252 dhd_sssr_dump(void *handle, void *event_info, u8 event)
18253 {
18254 dhd_info_t *dhd = handle;
18255 dhd_pub_t *dhdp;
18256 int i;
18257 char before_sr_dump[128];
18258 char after_sr_dump[128];
18259 unsigned long flags = 0;
18260 uint dig_buf_size = 0;
18261
18262 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18263
18264 if (!dhd) {
18265 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
18266 return;
18267 }
18268
18269 dhdp = &dhd->pub;
18270
18271 DHD_GENERAL_LOCK(dhdp, flags);
18272 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18273 DHD_GENERAL_UNLOCK(dhdp, flags);
18274 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
18275 goto exit;
18276 }
18277 DHD_GENERAL_UNLOCK(dhdp, flags);
18278
18279 for (i = 0; i < MAX_NUM_D11CORES; i++) {
18280 /* Init file name */
18281 memset(before_sr_dump, 0, sizeof(before_sr_dump));
18282 memset(after_sr_dump, 0, sizeof(after_sr_dump));
18283
18284 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
18285 "sssr_core", i, "before_SR");
18286 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
18287 "sssr_core", i, "after_SR");
18288
18289 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
18290 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
18291 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
18292 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
18293 __FUNCTION__));
18294 }
18295 }
18296 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
18297 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
18298 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
18299 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
18300 __FUNCTION__));
18301 }
18302 }
18303 }
18304
18305 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
18306 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
18307 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
18308 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
18309 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
18310 }
18311
18312 if (dhdp->sssr_dig_buf_before) {
18313 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
18314 dig_buf_size, "sssr_dig_before_SR")) {
18315 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
18316 __FUNCTION__));
18317 }
18318 }
18319
18320 if (dhdp->sssr_dig_buf_after) {
18321 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
18322 dig_buf_size, "sssr_dig_after_SR")) {
18323 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
18324 __FUNCTION__));
18325 }
18326 }
18327
18328 exit:
18329 DHD_GENERAL_LOCK(dhdp, flags);
18330 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
18331 dhd_os_busbusy_wake(dhdp);
18332 DHD_GENERAL_UNLOCK(dhdp, flags);
18333 }
18334
18335 void
18336 dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
18337 {
18338 unsigned long flags = 0;
18339
18340 /* bus busy bit for sssr dump will be cleared in sssr dump
18341 * work item context, after sssr dump files are created
18342 */
18343 DHD_GENERAL_LOCK(dhdp, flags);
18344 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
18345 DHD_GENERAL_UNLOCK(dhdp, flags);
18346
18347 if (dhdp->info->no_wq_sssrdump) {
18348 dhd_sssr_dump(dhdp->info, 0, 0);
18349 return;
18350 }
18351
18352 DHD_ERROR(("%s: scheduling sssr dump.. \n", __FUNCTION__));
18353 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
18354 DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18355 }
18356 #endif /* DHD_SSSR_DUMP */
18357
18358 #ifdef DHD_LOG_DUMP
18359 static void
18360 dhd_log_dump(void *handle, void *event_info, u8 event)
18361 {
18362 dhd_info_t *dhd = handle;
18363 log_dump_type_t *type = (log_dump_type_t *)event_info;
18364
18365 if (!dhd || !type) {
18366 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
18367 return;
18368 }
18369
18370 #ifdef WL_CFG80211
18371 /* flush the fw side logs */
18372 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
18373 FW_LOGSET_MASK_ALL);
18374 #endif // endif
18375 /* there are currently 3 possible contexts from which
18376 * log dump can be scheduled -
18377 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
18378 * 3.HEALTH CHECK event
18379 * The concise debug info buffer is a shared resource
18380 * and in case a trap is one of the contexts then both the
18381 * scheduled work queues need to run because trap data is
18382 * essential for debugging. Hence a mutex lock is acquired
18383 * before calling do_dhd_log_dump().
18384 */
18385 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
18386 dhd_os_logdump_lock(&dhd->pub);
18387 DHD_OS_WAKE_LOCK(&dhd->pub);
18388 if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
18389 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
18390 }
18391 DHD_OS_WAKE_UNLOCK(&dhd->pub);
18392 dhd_os_logdump_unlock(&dhd->pub);
18393 }
18394
18395 void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
18396 {
18397 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
18398 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
18399 type, DHD_WQ_WORK_DHD_LOG_DUMP,
18400 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18401 }
18402
18403 static void
18404 dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
18405 {
18406 if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
18407 (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)) {
18408 #if defined(CONFIG_ARM64)
18409 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
18410 name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
18411 #elif defined(__ARM_ARCH_7A__)
18412 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
18413 name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
18414 #endif /* __ARM_ARCH_7A__ */
18415 }
18416 }
18417
18418 static void
18419 dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
18420 {
18421 int i;
18422 unsigned long wr_size = 0;
18423 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
18424 size_t log_size = 0;
18425 char buf_name[DHD_PRINT_BUF_NAME_LEN];
18426 dhd_dbg_ring_t *ring = NULL;
18427
18428 BCM_REFERENCE(ring);
18429
18430 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18431 dld_buf = &g_dld_buf[i];
18432 log_size = (unsigned long)dld_buf->max -
18433 (unsigned long)dld_buf->buffer;
18434 if (dld_buf->wraparound) {
18435 wr_size = log_size;
18436 } else {
18437 wr_size = (unsigned long)dld_buf->present -
18438 (unsigned long)dld_buf->front;
18439 }
18440 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
18441 dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
18442 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
18443 dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
18444 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
18445 dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
18446 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
18447 dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
18448 }
18449
18450 #ifdef DEBUGABILITY_ECNTRS_LOGGING
18451 /* periodic flushing of ecounters is NOT supported */
18452 if (*type == DLD_BUF_TYPE_ALL &&
18453 logdump_ecntr_enable &&
18454 dhdp->ecntr_dbg_ring) {
18455
18456 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
18457 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
18458 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
18459 LOG_DUMP_ECNTRS_MAX_BUFSIZE);
18460 }
18461 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
18462
18463 #ifdef BCMPCIE
18464 if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
18465 dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
18466 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
18467 }
18468 #endif /* BCMPCIE */
18469
18470 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18471 /* if health check event was received */
18472 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18473 dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
18474 HEALTH_CHK_BUF_SIZE);
18475 }
18476 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18477
18478 /* append the concise debug information */
18479 if (dhdp->concise_dbg_buf) {
18480 dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
18481 CONCISE_DUMP_BUFLEN);
18482 }
18483 }
18484
18485 #ifdef CUSTOMER_HW4_DEBUG
18486 static void
18487 dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
18488 {
18489 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
18490 char *end = NULL;
18491 unsigned long plen = 0;
18492
18493 if (!bufptr || !len)
18494 return;
18495
18496 memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
18497 end = bufptr + len;
18498 while (bufptr < end) {
18499 if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
18500 memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
18501 tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
18502 printf("%s", tmp_buf);
18503 bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
18504 } else {
18505 plen = (unsigned long)end - (unsigned long)bufptr;
18506 memcpy(tmp_buf, bufptr, plen);
18507 tmp_buf[plen] = '\0';
18508 printf("%s", tmp_buf);
18509 bufptr += plen;
18510 }
18511 }
18512 }
18513
18514 static void
18515 dhd_log_dump_print_tail(dhd_pub_t *dhdp,
18516 struct dhd_log_dump_buf *dld_buf,
18517 uint tail_len)
18518 {
18519 char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
18520 unsigned long len_flush1 = 0, len_flush2 = 0;
18521 unsigned long flags = 0;
18522
18523 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18524 spin_lock_irqsave(&dld_buf->lock, flags);
18525 flush_ptr1 = dld_buf->present - tail_len;
18526 if (flush_ptr1 >= dld_buf->front) {
18527 /* tail content is within the buffer */
18528 flush_ptr2 = NULL;
18529 len_flush1 = tail_len;
18530 } else if (dld_buf->wraparound) {
18531 /* tail content spans the buffer length i.e, wrap around */
18532 flush_ptr1 = dld_buf->front;
18533 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
18534 len_flush2 = (unsigned long)tail_len - len_flush1;
18535 flush_ptr2 = (char *)((unsigned long)dld_buf->max -
18536 (unsigned long)len_flush2);
18537 } else {
18538 /* amt of logs in buffer is less than tail size */
18539 flush_ptr1 = dld_buf->front;
18540 flush_ptr2 = NULL;
18541 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
18542 }
18543 spin_unlock_irqrestore(&dld_buf->lock, flags);
18544
18545 printf("\n================= LOG_DUMP tail =================\n");
18546 if (flush_ptr2) {
18547 dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
18548 }
18549 dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
18550 printf("\n===================================================\n");
18551 }
18552 #endif /* CUSTOMER_HW4_DEBUG */
18553
18554 /* Must hold 'dhd_os_logdump_lock' before calling this function ! */
18555 static int
18556 do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
18557 {
18558 int ret = 0, i = 0;
18559 struct file *fp = NULL;
18560 mm_segment_t old_fs;
18561 loff_t pos = 0;
18562 unsigned int wr_size = 0;
18563 char dump_path[128];
18564 uint32 file_mode;
18565 unsigned long flags = 0;
18566 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
18567 size_t log_size = 0;
18568 size_t fspace_remain = 0;
18569 struct kstat stat;
18570 char time_str[128];
18571 char *ts = NULL;
18572 uint32 remain_len = 0;
18573 log_dump_section_hdr_t sec_hdr;
18574 dhd_info_t *dhd_info = NULL;
18575
18576 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18577
18578 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
18579 * so not freeing 'type' here is ok, even if we want to free 'type'
18580 * we cannot do so, since 'dhdp->osh' is unavailable
18581 * as dhdp is null
18582 */
18583 if (!dhdp || !type) {
18584 if (dhdp) {
18585 DHD_GENERAL_LOCK(dhdp, flags);
18586 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18587 dhd_os_busbusy_wake(dhdp);
18588 DHD_GENERAL_UNLOCK(dhdp, flags);
18589 }
18590 return BCME_ERROR;
18591 }
18592
18593 DHD_GENERAL_LOCK(dhdp, flags);
18594 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
18595 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18596 dhd_os_busbusy_wake(dhdp);
18597 DHD_GENERAL_UNLOCK(dhdp, flags);
18598 MFREE(dhdp->osh, type, sizeof(*type));
18599 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
18600 return BCME_ERROR;
18601 }
18602 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
18603 DHD_GENERAL_UNLOCK(dhdp, flags);
18604
18605 dhd_info = (dhd_info_t *)dhdp->info;
18606 BCM_REFERENCE(dhd_info);
18607
18608 /* in case of trap get preserve logs from ETD */
18609 #if defined(BCMPCIE) && defined(DEBUGABILITY_ETD_PRSRV_LOGS)
18610 if (dhdp->dongle_trap_occured &&
18611 dhdp->extended_trap_data) {
18612 dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
18613 &dhd_info->event_data);
18614 }
18615 #endif /* BCMPCIE */
18616
18617 #ifdef SHOW_LOGTRACE
18618 /* flush the event work items to get any fw events/logs
18619 * flush_work is a blocking call
18620 */
18621 flush_delayed_work(&dhd_info->event_log_dispatcher_work);
18622 #endif /* SHOW_LOGTRACE */
18623
18624 #ifdef CUSTOMER_HW4_DEBUG
18625 /* print last 'x' KB of preserve buffer data to kmsg console
18626 * this is to address cases where debug_dump is not
18627 * available for debugging
18628 */
18629 dhd_log_dump_print_tail(dhdp,
18630 &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
18631 #endif /* CUSTOMER_HW4_DEBUG */
18632
18633 /* change to KERNEL_DS address limit */
18634 old_fs = get_fs();
18635 set_fs(KERNEL_DS);
18636
18637 /* Init file name */
18638 memset(dump_path, 0, sizeof(dump_path));
18639 switch (dhdp->debug_dump_subcmd) {
18640 case CMD_UNWANTED:
18641 snprintf(dump_path, sizeof(dump_path), "%s",
18642 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18643 DHD_DUMP_SUBSTR_UNWANTED);
18644 break;
18645 case CMD_DISCONNECTED:
18646 snprintf(dump_path, sizeof(dump_path), "%s",
18647 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18648 DHD_DUMP_SUBSTR_DISCONNECTED);
18649 break;
18650 default:
18651 snprintf(dump_path, sizeof(dump_path), "%s",
18652 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
18653 }
18654
18655 if (!dhdp->logdump_periodic_flush) {
18656 get_debug_dump_time(dhdp->debug_dump_time_str);
18657 snprintf(dump_path + strlen(dump_path),
18658 sizeof(dump_path) - strlen(dump_path),
18659 "_%s", dhdp->debug_dump_time_str);
18660 }
18661
18662 memset(time_str, 0, sizeof(time_str));
18663 ts = dhd_log_dump_get_timestamp();
18664 snprintf(time_str, sizeof(time_str),
18665 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
18666
18667 DHD_ERROR(("DHD version: %s\n", dhd_version));
18668 DHD_ERROR(("F/W version: %s\n", fw_version));
18669 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18670
18671 dhd_log_dump_buf_addr(dhdp, type);
18672
18673 /* if this is the first time after dhd is loaded,
18674 * or, if periodic flush is disabled, clear the log file
18675 */
18676 if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
18677 file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
18678 else
18679 file_mode = O_CREAT | O_RDWR | O_SYNC;
18680
18681 fp = filp_open(dump_path, file_mode, 0664);
18682 if (IS_ERR(fp)) {
18683 /* If android installed image, try '/data' directory */
18684 #if defined(CONFIG_X86)
18685 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
18686 __FUNCTION__));
18687 snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
18688 if (!dhdp->logdump_periodic_flush) {
18689 snprintf(dump_path + strlen(dump_path),
18690 sizeof(dump_path) - strlen(dump_path),
18691 "_%s", dhdp->debug_dump_time_str);
18692 }
18693 fp = filp_open(dump_path, file_mode, 0664);
18694 if (IS_ERR(fp)) {
18695 ret = PTR_ERR(fp);
18696 DHD_ERROR(("open file error, err = %d\n", ret));
18697 goto exit;
18698 }
18699 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18700 #else
18701 ret = PTR_ERR(fp);
18702 DHD_ERROR(("open file error, err = %d\n", ret));
18703 goto exit;
18704 #endif /* CONFIG_X86 && OEM_ANDROID */
18705 }
18706
18707 ret = vfs_stat(dump_path, &stat);
18708 if (ret < 0) {
18709 DHD_ERROR(("file stat error, err = %d\n", ret));
18710 goto exit;
18711 }
18712
18713 /* if some one else has changed the file */
18714 if (dhdp->last_file_posn != 0 &&
18715 stat.size < dhdp->last_file_posn) {
18716 dhdp->last_file_posn = 0;
18717 }
18718
18719 if (dhdp->logdump_periodic_flush) {
18720 log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
18721 /* calculate the amount of space required to dump all logs */
18722 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
18723 if (*type != DLD_BUF_TYPE_ALL && i != *type)
18724 continue;
18725
18726 if (g_dld_buf[i].wraparound) {
18727 log_size += (unsigned long)g_dld_buf[i].max
18728 - (unsigned long)g_dld_buf[i].buffer;
18729 } else {
18730 spin_lock_irqsave(&g_dld_buf[i].lock, flags);
18731 log_size += (unsigned long)g_dld_buf[i].present -
18732 (unsigned long)g_dld_buf[i].front;
18733 spin_unlock_irqrestore(&g_dld_buf[i].lock, flags);
18734 }
18735 log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
18736
18737 if (*type != DLD_BUF_TYPE_ALL && i == *type)
18738 break;
18739 }
18740
18741 ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
18742 if (ret < 0) {
18743 DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
18744 goto exit;
18745 }
18746 pos = fp->f_pos;
18747
18748 /* if the max file size is reached, wrap around to beginning of the file
18749 * we're treating the file as a large ring buffer
18750 */
18751 fspace_remain = logdump_max_filesize - pos;
18752 if (log_size > fspace_remain) {
18753 fp->f_pos -= pos;
18754 pos = fp->f_pos;
18755 }
18756 }
18757 /* write the timestamp hdr to the file first */
18758 ret = vfs_write(fp, time_str, strlen(time_str), &pos);
18759 if (ret < 0) {
18760 DHD_ERROR(("write file error, err = %d\n", ret));
18761 goto exit;
18762 }
18763
18764 /* prep the section header */
18765 memset(&sec_hdr, 0, sizeof(sec_hdr));
18766 sec_hdr.magic = LOG_DUMP_MAGIC;
18767 sec_hdr.timestamp = local_clock();
18768
18769 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
18770 unsigned int buf_size = 0;
18771
18772 if (*type != DLD_BUF_TYPE_ALL && i != *type)
18773 continue;
18774
18775 /* calculate the length of the log */
18776 dld_buf = &g_dld_buf[i];
18777 buf_size = (unsigned long)dld_buf->max -
18778 (unsigned long)dld_buf->buffer;
18779 if (dld_buf->wraparound) {
18780 wr_size = buf_size;
18781 } else {
18782 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18783 spin_lock_irqsave(&dld_buf->lock, flags);
18784 wr_size = (unsigned long)dld_buf->present -
18785 (unsigned long)dld_buf->front;
18786 spin_unlock_irqrestore(&dld_buf->lock, flags);
18787 }
18788
18789 /* write the section header first */
18790 sec_hdr.type = dld_hdrs[i].sec_type;
18791 sec_hdr.length = wr_size;
18792 vfs_write(fp, dld_hdrs[i].hdr_str, strlen(dld_hdrs[i].hdr_str), &pos);
18793 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18794 /* write the log */
18795 ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
18796 if (ret < 0) {
18797 DHD_ERROR(("write file error, err = %d\n", ret));
18798 goto exit;
18799 }
18800
18801 /* re-init dhd_log_dump_buf structure */
18802 spin_lock_irqsave(&dld_buf->lock, flags);
18803 dld_buf->wraparound = 0;
18804 dld_buf->present = dld_buf->front;
18805 dld_buf->remain = buf_size;
18806 bzero(dld_buf->buffer, buf_size);
18807 spin_unlock_irqrestore(&dld_buf->lock, flags);
18808
18809 if (*type != DLD_BUF_TYPE_ALL)
18810 break;
18811 }
18812
18813 #ifdef DEBUGABILITY_ECNTRS_LOGGING
18814 /* periodic flushing of ecounters is NOT supported */
18815 if (*type == DLD_BUF_TYPE_ALL &&
18816 logdump_ecntr_enable &&
18817 dhdp->ecntr_dbg_ring) {
18818 dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
18819 fp, (unsigned long *)&pos, &sec_hdr);
18820 }
18821 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
18822
18823 #ifdef BCMPCIE
18824 /* append extended trap data to the file in case of traps */
18825 if (dhdp->dongle_trap_occured &&
18826 dhdp->extended_trap_data) {
18827 /* write the section header first */
18828 vfs_write(fp, EXT_TRAP_LOG_HDR, strlen(EXT_TRAP_LOG_HDR), &pos);
18829 sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
18830 sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
18831 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18832 /* write the log */
18833 ret = vfs_write(fp, (char *)dhdp->extended_trap_data,
18834 BCMPCIE_EXT_TRAP_DATA_MAXLEN, &pos);
18835 if (ret < 0) {
18836 DHD_ERROR(("write file error of ext trap info,"
18837 " err = %d\n", ret));
18838 goto exit;
18839 }
18840 }
18841 #endif /* BCMPCIE */
18842
18843 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18844 /* if health check event was received, dump to file */
18845 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18846 /* write the section header first */
18847 vfs_write(fp, HEALTH_CHK_LOG_HDR, strlen(HEALTH_CHK_LOG_HDR), &pos);
18848 sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
18849 sec_hdr.length = HEALTH_CHK_BUF_SIZE;
18850 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18851 /* write the log */
18852 ret = vfs_write(fp, (char *)dhdp->health_chk_event_data,
18853 HEALTH_CHK_BUF_SIZE, &pos);
18854 if (ret < 0) {
18855 DHD_ERROR(("write file error of health chk info,"
18856 " err = %d\n", ret));
18857 goto exit;
18858 }
18859 }
18860 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18861
18862 #ifdef DHD_DUMP_PCIE_RINGS
18863 /* write the section header first */
18864 vfs_write(fp, FLOWRING_DUMP_HDR, strlen(FLOWRING_DUMP_HDR), &pos);
18865 /* Write the ring summary */
18866 ret = vfs_write(fp, dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN - remain_len, &pos);
18867 if (ret < 0) {
18868 DHD_ERROR(("write file error of concise debug info,"
18869 " err = %d\n", ret));
18870 goto exit;
18871 }
18872 sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
18873 sec_hdr.length = ((H2DRING_TXPOST_ITEMSIZE
18874 * H2DRING_TXPOST_MAX_ITEM)
18875 + (D2HRING_TXCMPLT_ITEMSIZE
18876 * D2HRING_TXCMPLT_MAX_ITEM)
18877 + (H2DRING_RXPOST_ITEMSIZE
18878 * H2DRING_RXPOST_MAX_ITEM)
18879 + (D2HRING_RXCMPLT_ITEMSIZE
18880 * D2HRING_RXCMPLT_MAX_ITEM)
18881 + (H2DRING_CTRL_SUB_ITEMSIZE
18882 * H2DRING_CTRL_SUB_MAX_ITEM)
18883 + (D2HRING_CTRL_CMPLT_ITEMSIZE
18884 * D2HRING_CTRL_CMPLT_MAX_ITEM)
18885 + (H2DRING_INFO_BUFPOST_ITEMSIZE
18886 * H2DRING_DYNAMIC_INFO_MAX_ITEM)
18887 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE
18888 * D2HRING_DYNAMIC_INFO_MAX_ITEM));
18889 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18890 /* write the log */
18891 ret = dhd_d2h_h2d_ring_dump(dhdp, fp, (unsigned long *)&pos);
18892 if (ret < 0) {
18893 DHD_ERROR(("%s: error dumping ring data!\n",
18894 __FUNCTION__));
18895 goto exit;
18896 }
18897 #endif /* DHD_DUMP_PCIE_RINGS */
18898
18899 /* append the concise debug information to the file.
18900 * This is the information which is seen
18901 * when a 'dhd dump' iovar is fired
18902 */
18903 if (dhdp->concise_dbg_buf) {
18904 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18905 if (remain_len <= 0) {
18906 DHD_ERROR(("%s: error getting concise debug info !\n",
18907 __FUNCTION__));
18908 goto exit;
18909 } else {
18910 /* write the section header first */
18911 vfs_write(fp, DHD_DUMP_LOG_HDR, strlen(DHD_DUMP_LOG_HDR), &pos);
18912 sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
18913 sec_hdr.length = CONCISE_DUMP_BUFLEN - remain_len;
18914 vfs_write(fp, (char *)&sec_hdr, sizeof(sec_hdr), &pos);
18915 /* write the log */
18916 ret = vfs_write(fp, dhdp->concise_dbg_buf,
18917 CONCISE_DUMP_BUFLEN - remain_len, &pos);
18918 if (ret < 0) {
18919 DHD_ERROR(("write file error of concise debug info,"
18920 " err = %d\n", ret));
18921 goto exit;
18922 }
18923 }
18924 }
18925
18926 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18927 ret = dhd_log_dump_cookie_to_file(dhdp, fp, (unsigned long *)&pos);
18928 if (ret < 0) {
18929 DHD_ERROR(("write file error of cooke info, err = %d\n", ret));
18930 goto exit;
18931 }
18932 }
18933
18934 if (dhdp->logdump_periodic_flush) {
18935 /* store the last position written to in the file for future use */
18936 dhdp->last_file_posn = pos;
18937 }
18938
18939 exit:
18940 MFREE(dhdp->osh, type, sizeof(*type));
18941 if (!IS_ERR(fp) && fp != NULL) {
18942 filp_close(fp, NULL);
18943 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
18944 __FUNCTION__, dump_path));
18945 }
18946 set_fs(old_fs);
18947 DHD_GENERAL_LOCK(dhdp, flags);
18948 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18949 dhd_os_busbusy_wake(dhdp);
18950 DHD_GENERAL_UNLOCK(dhdp, flags);
18951
18952 #ifdef DHD_DUMP_MNGR
18953 if (ret >= 0) {
18954 dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
18955 }
18956 #endif /* DHD_DUMP_MNGR */
18957
18958 return (ret < 0) ? BCME_ERROR : BCME_OK;
18959 }
18960 #endif /* DHD_LOG_DUMP */
18961
18962 /*
18963 * This call is to get the memdump size so that,
18964 * halutil can alloc that much buffer in user space.
18965 */
18966 int
18967 dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
18968 {
18969 int ret = BCME_OK;
18970 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18971 dhd_pub_t *dhdp = &dhd->pub;
18972
18973 if (dhdp->busstate == DHD_BUS_DOWN) {
18974 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
18975 return BCME_ERROR;
18976 }
18977
18978 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
18979 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18980 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
18981 return BCME_ERROR;
18982 }
18983 #ifdef DHD_PCIE_RUNTIMEPM
18984 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
18985 #endif /* DHD_PCIE_RUNTIMEPM */
18986 ret = dhd_common_socram_dump(dhdp);
18987 if (ret == BCME_OK) {
18988 *dump_size = dhdp->soc_ram_length;
18989 }
18990 return ret;
18991 }
18992
18993 /*
18994 * This is to get the actual memdup after getting the memdump size
18995 */
18996 int
18997 dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
18998 {
18999 int ret = BCME_OK;
19000 int orig_len = 0;
19001 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19002 dhd_pub_t *dhdp = &dhd->pub;
19003 if (buf == NULL)
19004 return BCME_ERROR;
19005 orig_len = *size;
19006 if (dhdp->soc_ram) {
19007 if (orig_len >= dhdp->soc_ram_length) {
19008 memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
19009 /* reset the storage of dump */
19010 memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
19011 *size = dhdp->soc_ram_length;
19012 } else {
19013 ret = BCME_BUFTOOSHORT;
19014 DHD_ERROR(("The length of the buffer is too short"
19015 " to save the memory dump with %d\n", dhdp->soc_ram_length));
19016 }
19017 } else {
19018 DHD_ERROR(("socram_dump is not ready to get\n"));
19019 ret = BCME_NOTREADY;
19020 }
19021 return ret;
19022 }
19023
19024 int
19025 dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
19026 {
19027 char *fw_str;
19028
19029 if (size == 0)
19030 return BCME_BADARG;
19031
19032 fw_str = strstr(info_string, "Firmware: ");
19033 if (fw_str == NULL) {
19034 return BCME_ERROR;
19035 }
19036
19037 memset(*buf, 0, size);
19038 if (dhd_ver) {
19039 strncpy(*buf, dhd_version, size - 1);
19040 } else {
19041 strncpy(*buf, fw_str, size - 1);
19042 }
19043 return BCME_OK;
19044 }
19045
19046 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
19047 {
19048 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
19049 }
19050
19051 #ifdef DHD_L2_FILTER
19052 arp_table_t*
19053 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
19054 {
19055 dhd_info_t *dhd = dhdp->info;
19056 dhd_if_t *ifp;
19057
19058 ASSERT(bssidx < DHD_MAX_IFS);
19059
19060 ifp = dhd->iflist[bssidx];
19061 return ifp->phnd_arp_table;
19062 }
19063
19064 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
19065 {
19066 dhd_info_t *dhd = dhdp->info;
19067 dhd_if_t *ifp;
19068
19069 ASSERT(idx < DHD_MAX_IFS);
19070
19071 ifp = dhd->iflist[idx];
19072
19073 if (ifp)
19074 return ifp->parp_enable;
19075 else
19076 return FALSE;
19077 }
19078
19079 /* Set interface specific proxy arp configuration */
19080 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
19081 {
19082 dhd_info_t *dhd = dhdp->info;
19083 dhd_if_t *ifp;
19084 ASSERT(idx < DHD_MAX_IFS);
19085 ifp = dhd->iflist[idx];
19086
19087 if (!ifp)
19088 return BCME_ERROR;
19089
19090 /* At present all 3 variables are being
19091 * handled at once
19092 */
19093 ifp->parp_enable = val;
19094 ifp->parp_discard = val;
19095 ifp->parp_allnode = val;
19096
19097 /* Flush ARP entries when disabled */
19098 if (val == FALSE) {
19099 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
19100 FALSE, dhdp->tickcnt);
19101 }
19102 return BCME_OK;
19103 }
19104
19105 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
19106 {
19107 dhd_info_t *dhd = dhdp->info;
19108 dhd_if_t *ifp;
19109
19110 ASSERT(idx < DHD_MAX_IFS);
19111
19112 ifp = dhd->iflist[idx];
19113
19114 ASSERT(ifp);
19115 return ifp->parp_discard;
19116 }
19117
19118 bool
19119 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
19120 {
19121 dhd_info_t *dhd = dhdp->info;
19122 dhd_if_t *ifp;
19123
19124 ASSERT(idx < DHD_MAX_IFS);
19125
19126 ifp = dhd->iflist[idx];
19127
19128 ASSERT(ifp);
19129
19130 return ifp->parp_allnode;
19131 }
19132
19133 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
19134 {
19135 dhd_info_t *dhd = dhdp->info;
19136 dhd_if_t *ifp;
19137
19138 ASSERT(idx < DHD_MAX_IFS);
19139
19140 ifp = dhd->iflist[idx];
19141
19142 ASSERT(ifp);
19143
19144 return ifp->dhcp_unicast;
19145 }
19146
19147 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
19148 {
19149 dhd_info_t *dhd = dhdp->info;
19150 dhd_if_t *ifp;
19151 ASSERT(idx < DHD_MAX_IFS);
19152 ifp = dhd->iflist[idx];
19153
19154 ASSERT(ifp);
19155
19156 ifp->dhcp_unicast = val;
19157 return BCME_OK;
19158 }
19159
19160 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
19161 {
19162 dhd_info_t *dhd = dhdp->info;
19163 dhd_if_t *ifp;
19164
19165 ASSERT(idx < DHD_MAX_IFS);
19166
19167 ifp = dhd->iflist[idx];
19168
19169 ASSERT(ifp);
19170
19171 return ifp->block_ping;
19172 }
19173
19174 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
19175 {
19176 dhd_info_t *dhd = dhdp->info;
19177 dhd_if_t *ifp;
19178 ASSERT(idx < DHD_MAX_IFS);
19179 ifp = dhd->iflist[idx];
19180
19181 ASSERT(ifp);
19182
19183 ifp->block_ping = val;
19184 /* Disable rx_pkt_chain feature for interface if block_ping option is
19185 * enabled
19186 */
19187 dhd_update_rx_pkt_chainable_state(dhdp, idx);
19188 return BCME_OK;
19189 }
19190
19191 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
19192 {
19193 dhd_info_t *dhd = dhdp->info;
19194 dhd_if_t *ifp;
19195
19196 ASSERT(idx < DHD_MAX_IFS);
19197
19198 ifp = dhd->iflist[idx];
19199
19200 ASSERT(ifp);
19201
19202 return ifp->grat_arp;
19203 }
19204
19205 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
19206 {
19207 dhd_info_t *dhd = dhdp->info;
19208 dhd_if_t *ifp;
19209 ASSERT(idx < DHD_MAX_IFS);
19210 ifp = dhd->iflist[idx];
19211
19212 ASSERT(ifp);
19213
19214 ifp->grat_arp = val;
19215
19216 return BCME_OK;
19217 }
19218
19219 int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
19220 {
19221 dhd_info_t *dhd = dhdp->info;
19222 dhd_if_t *ifp;
19223
19224 ASSERT(idx < DHD_MAX_IFS);
19225
19226 ifp = dhd->iflist[idx];
19227
19228 ASSERT(ifp);
19229
19230 return ifp->block_tdls;
19231 }
19232
19233 int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
19234 {
19235 dhd_info_t *dhd = dhdp->info;
19236 dhd_if_t *ifp;
19237 ASSERT(idx < DHD_MAX_IFS);
19238 ifp = dhd->iflist[idx];
19239
19240 ASSERT(ifp);
19241
19242 ifp->block_tdls = val;
19243
19244 return BCME_OK;
19245 }
19246 #endif /* DHD_L2_FILTER */
19247
19248 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
19249 int dhd_rps_cpus_enable(struct net_device *net, int enable)
19250 {
19251 dhd_info_t *dhd = DHD_DEV_INFO(net);
19252 dhd_if_t *ifp;
19253 int ifidx;
19254 char * RPS_CPU_SETBUF;
19255
19256 ifidx = dhd_net2idx(dhd, net);
19257 if (ifidx == DHD_BAD_IF) {
19258 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
19259 return -ENODEV;
19260 }
19261
19262 if (ifidx == PRIMARY_INF) {
19263 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
19264 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
19265 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
19266 } else {
19267 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
19268 RPS_CPU_SETBUF = RPS_CPUS_MASK;
19269 }
19270 } else if (ifidx == VIRTUAL_INF) {
19271 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
19272 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
19273 } else {
19274 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
19275 return -EINVAL;
19276 }
19277
19278 ifp = dhd->iflist[ifidx];
19279 if (ifp) {
19280 if (enable) {
19281 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
19282 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
19283 } else {
19284 custom_rps_map_clear(ifp->net->_rx);
19285 }
19286 } else {
19287 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
19288 return -ENODEV;
19289 }
19290 return BCME_OK;
19291 }
19292
19293 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
19294 {
19295 struct rps_map *old_map, *map;
19296 cpumask_var_t mask;
19297 int err, cpu, i;
19298 static DEFINE_SPINLOCK(rps_map_lock);
19299
19300 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19301
19302 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
19303 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
19304 return -ENOMEM;
19305 }
19306
19307 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
19308 if (err) {
19309 free_cpumask_var(mask);
19310 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
19311 return err;
19312 }
19313
19314 map = kzalloc(max_t(unsigned int,
19315 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
19316 GFP_KERNEL);
19317 if (!map) {
19318 free_cpumask_var(mask);
19319 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
19320 return -ENOMEM;
19321 }
19322
19323 i = 0;
19324 for_each_cpu(cpu, mask) {
19325 map->cpus[i++] = cpu;
19326 }
19327
19328 if (i) {
19329 map->len = i;
19330 } else {
19331 kfree(map);
19332 map = NULL;
19333 free_cpumask_var(mask);
19334 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
19335 return -1;
19336 }
19337
19338 spin_lock(&rps_map_lock);
19339 old_map = rcu_dereference_protected(queue->rps_map,
19340 lockdep_is_held(&rps_map_lock));
19341 rcu_assign_pointer(queue->rps_map, map);
19342 spin_unlock(&rps_map_lock);
19343
19344 if (map) {
19345 static_key_slow_inc(&rps_needed);
19346 }
19347 if (old_map) {
19348 kfree_rcu(old_map, rcu);
19349 static_key_slow_dec(&rps_needed);
19350 }
19351 free_cpumask_var(mask);
19352
19353 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
19354 return map->len;
19355 }
19356
19357 void custom_rps_map_clear(struct netdev_rx_queue *queue)
19358 {
19359 struct rps_map *map;
19360
19361 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19362
19363 map = rcu_dereference_protected(queue->rps_map, 1);
19364 if (map) {
19365 RCU_INIT_POINTER(queue->rps_map, NULL);
19366 kfree_rcu(map, rcu);
19367 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
19368 }
19369 }
19370 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
19371
19372 #if (defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)) || \
19373 defined(ARGOS_NOTIFY_CB)
19374
19375 static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19376 unsigned long speed, void *v);
19377 static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19378 unsigned long speed, void *v);
19379 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19380 static int argos_status_notifier_config_mumimo_cb(struct notifier_block *notifier,
19381 unsigned long speed, void *v);
19382 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19383
19384 #ifdef DYNAMIC_MUMIMO_CONTROL
19385 #define MUMIMO_CONTROL_TIMER_INTERVAL_MS 5000
19386
19387 void
19388 argos_config_mumimo_timer(unsigned long data)
19389 {
19390 argos_mumimo_ctrl *ctrl_data = (argos_mumimo_ctrl *)data;
19391
19392 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
19393 schedule_work(&ctrl_data->mumimo_ctrl_work);
19394 }
19395
19396 void
19397 argos_config_mumimo_handler(struct work_struct *work)
19398 {
19399 argos_mumimo_ctrl *ctrl_data;
19400 struct net_device *dev;
19401 int err;
19402 int new_cap;
19403
19404 ctrl_data = container_of(work, argos_mumimo_ctrl, mumimo_ctrl_work);
19405
19406 dev = ctrl_data->dev;
19407
19408 if (!dev) {
19409 return;
19410 }
19411
19412 new_cap = ctrl_data->cur_murx_bfe_cap;
19413 err = wl_set_murx_bfe_cap(dev, new_cap, TRUE);
19414 if (err) {
19415 DHD_ERROR(("%s: Failed to set murx_bfe_cap to %d, err=%d\n",
19416 __FUNCTION__, new_cap, err));
19417 } else {
19418 DHD_ERROR(("%s: Newly configured murx_bfe_cap = %d\n",
19419 __FUNCTION__, new_cap));
19420 }
19421 }
19422
19423 void
19424 argos_status_notifier_config_mumimo(struct notifier_block *notifier,
19425 unsigned long speed, void *v)
19426 {
19427 struct net_device *dev;
19428 int prev_murx_bfe_cap;
19429 int cap;
19430 dhd_info_t *dhd;
19431
19432 dev = argos_mumimo_ctrl_data.dev;
19433 if (!dev) {
19434 return;
19435 }
19436
19437 dhd = DHD_DEV_INFO(dev);
19438 if (!dhd) {
19439 return;
19440 }
19441
19442 /* Check if STA reassociate with the AP after murx configuration */
19443 if (dhd->pub.reassoc_mumimo_sw) {
19444 /* Cancel the MU-MIMO control timer */
19445 if (timer_pending(&argos_mumimo_ctrl_data.config_timer)) {
19446 del_timer_sync(&argos_mumimo_ctrl_data.config_timer);
19447 }
19448
19449 DHD_ERROR(("%s: Reassociation is in progress...\n", __FUNCTION__));
19450 return;
19451 }
19452
19453 /* Check if current associated AP supports MU-MIMO capability
19454 * or current Tput meets the condition for MU-MIMO configuration
19455 */
19456 if ((wl_check_bss_support_mumimo(dev) <= 0) ||
19457 ((speed < MUMIMO_TO_SUMIMO_TPUT_THRESHOLD) &&
19458 (speed >= SUMIMO_TO_MUMIMO_TPUT_THRESHOLD))) {
19459 return;
19460 }
19461
19462 prev_murx_bfe_cap = argos_mumimo_ctrl_data.cur_murx_bfe_cap;
19463
19464 /* Check the TPut condition */
19465 if (speed >= MUMIMO_TO_SUMIMO_TPUT_THRESHOLD) {
19466 cap = 0;
19467 } else {
19468 cap = 1;
19469 }
19470
19471 if (prev_murx_bfe_cap != cap) {
19472 /* Cancel the MU-MIMO control timer */
19473 if (timer_pending(&argos_mumimo_ctrl_data.config_timer)) {
19474 del_timer_sync(&argos_mumimo_ctrl_data.config_timer);
19475 }
19476
19477 /* Update the new value */
19478 argos_mumimo_ctrl_data.cur_murx_bfe_cap = cap;
19479
19480 /* Arm the MU-MIMO control timer */
19481 mod_timer(&argos_mumimo_ctrl_data.config_timer,
19482 jiffies + msecs_to_jiffies(MUMIMO_CONTROL_TIMER_INTERVAL_MS));
19483
19484 DHD_ERROR(("%s: Arm the MU-MIMO control timer, cur_murx_bfe_cap=%d\n",
19485 __FUNCTION__, cap));
19486 }
19487 }
19488
19489 void
19490 argos_config_mumimo_init(struct net_device *dev)
19491 {
19492 init_timer(&argos_mumimo_ctrl_data.config_timer);
19493 argos_mumimo_ctrl_data.config_timer.data = (unsigned long)&argos_mumimo_ctrl_data;
19494 argos_mumimo_ctrl_data.config_timer.function = argos_config_mumimo_timer;
19495 argos_mumimo_ctrl_data.dev = dev;
19496 INIT_WORK(&argos_mumimo_ctrl_data.mumimo_ctrl_work, argos_config_mumimo_handler);
19497 argos_mumimo_ctrl_data.cur_murx_bfe_cap = -1;
19498 }
19499
19500 void
19501 argos_config_mumimo_deinit(void)
19502 {
19503 argos_mumimo_ctrl_data.dev = NULL;
19504 if (timer_pending(&argos_mumimo_ctrl_data.config_timer)) {
19505 del_timer_sync(&argos_mumimo_ctrl_data.config_timer);
19506 }
19507
19508 cancel_work_sync(&argos_mumimo_ctrl_data.mumimo_ctrl_work);
19509 }
19510
19511 void
19512 argos_config_mumimo_reset(void)
19513 {
19514 argos_mumimo_ctrl_data.cur_murx_bfe_cap = -1;
19515 }
19516 #endif /* DYNAMIC_MUMIMO_CONTROL */
19517
19518 int
19519 argos_register_notifier_init(struct net_device *net)
19520 {
19521 int ret = 0;
19522
19523 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19524 argos_rps_ctrl_data.wlan_primary_netdev = net;
19525 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19526 #ifdef DYNAMIC_MUMIMO_CONTROL
19527 argos_config_mumimo_init(net);
19528 #endif /* DYNAMIC_MUMIMO_CONTROL */
19529
19530 if (argos_wifi.notifier_call == NULL) {
19531 argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
19532 ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19533 if (ret < 0) {
19534 DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
19535 goto exit;
19536 }
19537 }
19538
19539 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19540 if (argos_mimo.notifier_call == NULL) {
19541 argos_mimo.notifier_call = argos_status_notifier_config_mumimo_cb;
19542 ret = sec_argos_register_notifier(&argos_mimo, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL);
19543 if (ret < 0) {
19544 DHD_ERROR(("DHD:Failed to register WIFI for MIMO notifier, ret=%d\n", ret));
19545 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19546 goto exit;
19547 }
19548 }
19549 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19550
19551 if (argos_p2p.notifier_call == NULL) {
19552 argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
19553 ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19554 if (ret < 0) {
19555 DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
19556 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19557 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19558 sec_argos_unregister_notifier(&argos_mimo, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL);
19559 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19560 goto exit;
19561 }
19562 }
19563
19564 return 0;
19565
19566 exit:
19567 if (argos_wifi.notifier_call) {
19568 argos_wifi.notifier_call = NULL;
19569 }
19570
19571 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19572 if (argos_mimo.notifier_call) {
19573 argos_mimo.notifier_call = NULL;
19574 }
19575 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19576
19577 if (argos_p2p.notifier_call) {
19578 argos_p2p.notifier_call = NULL;
19579 }
19580
19581 return ret;
19582 }
19583
19584 int
19585 argos_register_notifier_deinit(void)
19586 {
19587 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19588
19589 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19590 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
19591 return -1;
19592 }
19593
19594 #ifdef DYNAMIC_MUMIMO_CONTROL
19595 argos_config_mumimo_deinit();
19596 #endif /* DYNAMIC_MUMIMO_CONTROL */
19597
19598 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19599 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19600 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19601
19602 if (argos_p2p.notifier_call) {
19603 sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19604 argos_p2p.notifier_call = NULL;
19605 }
19606
19607 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19608 if (argos_mimo.notifier_call) {
19609 sec_argos_unregister_notifier(&argos_mimo, ARGOS_WIFI_TABLE_FOR_MIMO_LABEL);
19610 argos_mimo.notifier_call = NULL;
19611 }
19612 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19613
19614 if (argos_wifi.notifier_call) {
19615 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19616 argos_wifi.notifier_call = NULL;
19617 }
19618
19619 argos_rps_ctrl_data.wlan_primary_netdev = NULL;
19620 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19621
19622 return 0;
19623 }
19624
19625 int
19626 argos_status_notifier_cb(struct notifier_block *notifier,
19627 unsigned long speed, void *v)
19628 {
19629 dhd_info_t *dhd;
19630 dhd_pub_t *dhdp;
19631
19632 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19633
19634 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19635 goto exit;
19636 }
19637
19638 dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
19639 if (dhd == NULL) {
19640 goto exit;
19641 }
19642
19643 dhdp = &dhd->pub;
19644 if (dhdp == NULL || !dhdp->up) {
19645 goto exit;
19646 }
19647 /* Check if reported TPut value is more than threshold value */
19648 if (speed > RPS_TPUT_THRESHOLD) {
19649 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
19650 /* It does not need to configre rps_cpus
19651 * if Load Balance is enabled
19652 */
19653 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19654 int err = 0;
19655
19656 if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
19657 err = custom_rps_map_set(
19658 argos_rps_ctrl_data.wlan_primary_netdev->_rx,
19659 RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
19660 } else {
19661 DHD_ERROR(("DHD: %s: RPS_Set fail,"
19662 " Core=%d Offline\n", __FUNCTION__,
19663 RPS_CPUS_WLAN_CORE_ID));
19664 err = -1;
19665 }
19666
19667 if (err < 0) {
19668 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
19669 "speed=%ld, error=%d\n",
19670 __FUNCTION__, speed, err));
19671 } else {
19672 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19673 #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19674 if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
19675 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
19676 __FUNCTION__, TCPACK_SUP_HOLD));
19677 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
19678 }
19679 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19680 argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
19681 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19682 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
19683 __FUNCTION__, speed));
19684 }
19685 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19686 }
19687 } else {
19688 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
19689 #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)
19690 if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
19691 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
19692 __FUNCTION__));
19693 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
19694 }
19695 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19696 #if !defined(DHD_LB) && defined(ARGOS_RPS_CPU_CTL)
19697 /* It does not need to configre rps_cpus
19698 * if Load Balance is enabled
19699 */
19700 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19701 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
19702 OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
19703 #endif /* !DHD_LB && ARGOS_RPS_CPU_CTL */
19704 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19705 }
19706 }
19707
19708 exit:
19709 return NOTIFY_OK;
19710 }
19711
19712 int
19713 argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19714 unsigned long speed, void *v)
19715 {
19716 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19717 argos_status_notifier_cb(notifier, speed, v);
19718 #if !defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19719 argos_status_notifier_config_mumimo(notifier, speed, v);
19720 #endif /* !CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19721
19722 return NOTIFY_OK;
19723 }
19724
19725 #if defined(CONFIG_SPLIT_ARGOS_SET) && defined(DYNAMIC_MUMIMO_CONTROL)
19726 int
19727 argos_status_notifier_config_mumimo_cb(struct notifier_block *notifier,
19728 unsigned long speed, void *v)
19729 {
19730 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19731 argos_status_notifier_config_mumimo(notifier, speed, v);
19732
19733 return NOTIFY_OK;
19734 }
19735 #endif /* CONFIG_SPLIT_ARGOS_SET && DYNAMIC_MUMIMO_CONTROL */
19736
19737 int
19738 argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19739 unsigned long speed, void *v)
19740 {
19741 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19742 argos_status_notifier_cb(notifier, speed, v);
19743
19744 return NOTIFY_OK;
19745 }
19746 #endif /* (ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL) || ARGOS_NOTIFY_CB */
19747
19748 #ifdef DHD_DEBUG_PAGEALLOC
19749
19750 void
19751 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
19752 {
19753 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
19754
19755 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
19756 __FUNCTION__, addr_corrupt, (uint32)len));
19757
19758 DHD_OS_WAKE_LOCK(dhdp);
19759 prhex("Page Corruption:", addr_corrupt, len);
19760 dhd_dump_to_kernelog(dhdp);
19761 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
19762 /* Load the dongle side dump to host memory and then BUG_ON() */
19763 dhdp->memdump_enabled = DUMP_MEMONLY;
19764 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
19765 dhd_bus_mem_dump(dhdp);
19766 #endif /* BCMPCIE && DHD_FW_COREDUMP */
19767 DHD_OS_WAKE_UNLOCK(dhdp);
19768 }
19769 EXPORT_SYMBOL(dhd_page_corrupt_cb);
19770 #endif /* DHD_DEBUG_PAGEALLOC */
19771
19772 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
19773 void
19774 dhd_pktid_error_handler(dhd_pub_t *dhdp)
19775 {
19776 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
19777 DHD_OS_WAKE_LOCK(dhdp);
19778 dhd_dump_to_kernelog(dhdp);
19779 #ifdef DHD_FW_COREDUMP
19780 /* Load the dongle side dump to host memory */
19781 if (dhdp->memdump_enabled == DUMP_DISABLED) {
19782 dhdp->memdump_enabled = DUMP_MEMFILE;
19783 }
19784 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
19785 dhd_bus_mem_dump(dhdp);
19786 #endif /* DHD_FW_COREDUMP */
19787 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
19788 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
19789 DHD_OS_WAKE_UNLOCK(dhdp);
19790 }
19791 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
19792
19793 struct net_device *
19794 dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
19795 {
19796 dhd_info_t *dhd = dhdp->info;
19797
19798 if (dhd->iflist[0] && dhd->iflist[0]->net)
19799 return dhd->iflist[0]->net;
19800 else
19801 return NULL;
19802 }
19803
19804 #ifdef DHD_DHCP_DUMP
19805 static void
19806 dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
19807 {
19808 struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
19809 struct iphdr *h = &b->ip_header;
19810 uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
19811 int dhcp_type = 0, len, opt_len;
19812
19813 /* check IP header */
19814 if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
19815 return;
19816 }
19817
19818 /* check UDP port for bootp (67, 68) */
19819 if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
19820 b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
19821 return;
19822 }
19823
19824 /* check header length */
19825 if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
19826 return;
19827 }
19828
19829 len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
19830 opt_len = len
19831 - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
19832
19833 /* parse bootp options */
19834 if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
19835 ptr = &b->options[4];
19836 while (ptr < end && *ptr != 0xff) {
19837 opt = ptr++;
19838 if (*opt == 0) {
19839 continue;
19840 }
19841 ptr += *ptr + 1;
19842 if (ptr >= end) {
19843 break;
19844 }
19845 /* 53 is dhcp type */
19846 if (*opt == 53) {
19847 if (opt[1]) {
19848 dhcp_type = opt[2];
19849 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
19850 ifname, dhcp_types[dhcp_type],
19851 tx ? "TX" : "RX", dhcp_ops[b->op]));
19852 break;
19853 }
19854 }
19855 }
19856 }
19857 }
19858 #endif /* DHD_DHCP_DUMP */
19859
19860 #ifdef DHD_ICMP_DUMP
19861 static void
19862 dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
19863 {
19864 uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
19865 struct iphdr *iph = (struct iphdr *)pkt;
19866 struct icmphdr *icmph;
19867
19868 /* check IP header */
19869 if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
19870 return;
19871 }
19872
19873 icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
19874 if (icmph->type == ICMP_ECHO) {
19875 DHD_ERROR_MEM(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
19876 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
19877 } else if (icmph->type == ICMP_ECHOREPLY) {
19878 DHD_ERROR_MEM(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
19879 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
19880 } else {
19881 DHD_ERROR_MEM(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
19882 ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
19883 }
19884 }
19885 #endif /* DHD_ICMP_DUMP */
19886
19887 #ifdef SHOW_LOGTRACE
19888 void
19889 dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
19890 {
19891 dhd_dbg_ring_status_t ring_status;
19892 uint32 rlen = 0;
19893 #if defined(DEBUGABILITY)
19894 rlen = dhd_dbg_pull_single_from_ring(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
19895 TRACE_LOG_BUF_MAX_SIZE, TRUE);
19896 #elif defined(DEBUGABILITY_ECNTRS_LOGGING)
19897 rlen = dhd_dbg_ring_pull_single(dhd_pub->ecntr_dbg_ring, trace_buf_info->buf,
19898 TRACE_LOG_BUF_MAX_SIZE, TRUE);
19899 #else
19900 ASSERT(0);
19901 #endif /* DEBUGABILITY */
19902
19903 trace_buf_info->size = rlen;
19904 trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
19905 if (rlen == 0) {
19906 trace_buf_info->availability = BUF_NOT_AVAILABLE;
19907 return;
19908 }
19909 dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
19910 if (ring_status.written_bytes != ring_status.read_bytes) {
19911 trace_buf_info->availability = NEXT_BUF_AVAIL;
19912 }
19913 }
19914 #endif /* SHOW_LOGTRACE */
19915
19916 bool
19917 dhd_fw_download_status(dhd_pub_t * dhd_pub)
19918 {
19919 return dhd_pub->fw_download_done;
19920 }
19921
19922 int
19923 dhd_create_to_notifier_skt(void)
19924 {
19925 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
19926 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
19927 /* Kernel version 3.6 is a special case which accepts 4 arguments */
19928 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
19929 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
19930 /* Kernel version 3.5 and below use this old API format */
19931 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
19932 dhd_process_daemon_msg, NULL, THIS_MODULE);
19933 #else
19934 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
19935 &dhd_netlink_cfg);
19936 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
19937 if (!nl_to_event_sk)
19938 {
19939 printf("Error creating socket.\n");
19940 return -1;
19941 }
19942 DHD_INFO(("nl_to socket created successfully...\n"));
19943 return 0;
19944 }
19945
19946 void
19947 dhd_destroy_to_notifier_skt(void)
19948 {
19949 DHD_INFO(("Destroying nl_to socket\n"));
19950 netlink_kernel_release(nl_to_event_sk);
19951 }
19952
19953 static void
19954 dhd_recv_msg_from_daemon(struct sk_buff *skb)
19955 {
19956 struct nlmsghdr *nlh;
19957 bcm_to_info_t *cmd;
19958
19959 nlh = (struct nlmsghdr *)skb->data;
19960 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
19961 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
19962 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
19963 DHD_INFO(("DHD Daemon Started\n"));
19964 }
19965 }
19966
19967 int
19968 dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
19969 {
19970 struct nlmsghdr *nlh;
19971 struct sk_buff *skb_out;
19972
19973 BCM_REFERENCE(skb);
19974 if (sender_pid == 0) {
19975 DHD_INFO(("Invalid PID 0\n"));
19976 return -1;
19977 }
19978
19979 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
19980 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
19981 return -1;
19982 }
19983 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
19984 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
19985 memcpy(nlmsg_data(nlh), (char *)data, size);
19986
19987 if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
19988 DHD_INFO(("Error sending message\n"));
19989 }
19990 return 0;
19991 }
19992
19993 static void
19994 dhd_process_daemon_msg(struct sk_buff *skb)
19995 {
19996 bcm_to_info_t to_info;
19997
19998 to_info.magic = BCM_TO_MAGIC;
19999 to_info.reason = REASON_DAEMON_STARTED;
20000 to_info.trap = NO_TRAP;
20001
20002 dhd_recv_msg_from_daemon(skb);
20003 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
20004 }
20005
20006 #ifdef DHD_LOG_DUMP
20007 bool
20008 dhd_log_dump_ecntr_enabled(void)
20009 {
20010 return (bool)logdump_ecntr_enable;
20011 }
20012
20013 void
20014 dhd_log_dump_init(dhd_pub_t *dhd)
20015 {
20016 struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
20017 int i = 0;
20018 uint8 *prealloc_buf = NULL, *bufptr = NULL;
20019 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20020 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
20021 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20022 int ret;
20023 dhd_dbg_ring_t *ring = NULL;
20024 unsigned long flags = 0;
20025 dhd_info_t *dhd_info = dhd->info;
20026 void *cookie_buf = NULL;
20027
20028 BCM_REFERENCE(ret);
20029 BCM_REFERENCE(ring);
20030 BCM_REFERENCE(flags);
20031
20032 /* sanity check */
20033 if (logdump_prsrv_tailsize <= 0 ||
20034 logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
20035 logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
20036 }
20037 /* now adjust the preserve log flush size based on the
20038 * kernel printk log buffer size
20039 */
20040 #ifdef CONFIG_LOG_BUF_SHIFT
20041 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
20042 " limit prsrv tail size to = %uKB\n",
20043 __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
20044 logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
20045
20046 if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
20047 logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
20048 }
20049 #else
20050 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
20051 __FUNCTION__, logdump_prsrv_tailsize/1024);
20052 #endif /* CONFIG_LOG_BUF_SHIFT */
20053
20054 mutex_init(&dhd_info->logdump_lock);
20055
20056 /* initialize log dump buf structures */
20057 memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
20058
20059 /* set the log dump buffer size based on the module_param */
20060 if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
20061 logdump_max_bufsize <= 0)
20062 dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
20063 else
20064 dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
20065
20066 /* pre-alloc the memory for the log buffers & 'special' buffer */
20067 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
20068 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20069 DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
20070 __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE));
20071 prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
20072 dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
20073 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20074 #else
20075 prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
20076 dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20077 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20078 if (!prealloc_buf) {
20079 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
20080 goto fail;
20081 }
20082 if (!dld_buf_special->buffer) {
20083 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
20084 goto fail;
20085 }
20086
20087 bufptr = prealloc_buf;
20088 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20089 dld_buf = &g_dld_buf[i];
20090 dld_buf->dhd_pub = dhd;
20091 spin_lock_init(&dld_buf->lock);
20092 dld_buf->wraparound = 0;
20093 if (i != DLD_BUF_TYPE_SPECIAL) {
20094 dld_buf->buffer = bufptr;
20095 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20096 bufptr = (uint8 *)dld_buf->max;
20097 } else {
20098 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20099 }
20100 dld_buf->present = dld_buf->front = dld_buf->buffer;
20101 dld_buf->remain = dld_buf_size[i];
20102 dld_buf->enable = 1;
20103 }
20104
20105 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20106 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20107 dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
20108 if (!dhd->ecntr_dbg_ring)
20109 goto fail;
20110
20111 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20112 ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
20113 ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
20114 bufptr);
20115 if (ret != BCME_OK) {
20116 DHD_ERROR(("%s: unable to init ecntr ring !\n",
20117 __FUNCTION__));
20118 goto fail;
20119 }
20120 DHD_DBG_RING_LOCK(ring->lock, flags);
20121 ring->state = RING_ACTIVE;
20122 ring->threshold = 0;
20123 DHD_DBG_RING_UNLOCK(ring->lock, flags);
20124
20125 bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
20126 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20127
20128 /* Concise buffer is used as intermediate buffer for following purposes
20129 * a) pull ecounters records temporarily before
20130 * writing it to file
20131 * b) to store dhd dump data before putting it to file
20132 * It should have a size equal to
20133 * MAX(largest possible ecntr record, 'dhd dump' data size)
20134 */
20135 dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
20136 if (!dhd->concise_dbg_buf) {
20137 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
20138 __FUNCTION__));
20139 goto fail;
20140 }
20141
20142 #if defined(DHD_EVENT_LOG_FILTER)
20143 ret = dhd_event_log_filter_init(dhd,
20144 bufptr,
20145 LOG_DUMP_FILTER_MAX_BUFSIZE);
20146 if (ret != BCME_OK) {
20147 goto fail;
20148 }
20149 #endif /* DHD_EVENT_LOG_FILTER */
20150
20151 cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
20152 if (!cookie_buf) {
20153 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
20154 __FUNCTION__));
20155 goto fail;
20156 }
20157 ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20158 if (ret != BCME_OK) {
20159 MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20160 goto fail;
20161 }
20162 return;
20163
20164 fail:
20165
20166 if (dhd->logdump_cookie) {
20167 dhd_logdump_cookie_deinit(dhd);
20168 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20169 dhd->logdump_cookie = NULL;
20170 }
20171 #if defined(DHD_EVENT_LOG_FILTER)
20172 if (dhd->event_log_filter) {
20173 dhd_event_log_filter_deinit(dhd);
20174 }
20175 #endif /* DHD_EVENT_LOG_FILTER */
20176
20177 if (dhd->concise_dbg_buf) {
20178 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20179 }
20180
20181 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20182 if (dhd->ecntr_dbg_ring) {
20183 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20184 dhd_dbg_ring_deinit(dhd, ring);
20185 ring->ring_buf = NULL;
20186 ring->ring_size = 0;
20187 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20188 dhd->ecntr_dbg_ring = NULL;
20189 }
20190 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20191
20192 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20193 if (prealloc_buf) {
20194 DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20195 }
20196 if (dld_buf_special->buffer) {
20197 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20198 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20199 }
20200 #else
20201 if (prealloc_buf) {
20202 MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20203 }
20204 if (dld_buf_special->buffer) {
20205 MFREE(dhd->osh, dld_buf_special->buffer,
20206 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20207 }
20208 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20209 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20210 dld_buf = &g_dld_buf[i];
20211 dld_buf->enable = 0;
20212 dld_buf->buffer = NULL;
20213 }
20214
20215 mutex_destroy(&dhd_info->logdump_lock);
20216 }
20217
20218 void
20219 dhd_log_dump_deinit(dhd_pub_t *dhd)
20220 {
20221 struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
20222 int i = 0;
20223 dhd_info_t *dhd_info = dhd->info;
20224 dhd_dbg_ring_t *ring = NULL;
20225
20226 BCM_REFERENCE(ring);
20227
20228 if (dhd->concise_dbg_buf) {
20229 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20230 dhd->concise_dbg_buf = NULL;
20231 }
20232
20233 if (dhd->logdump_cookie) {
20234 dhd_logdump_cookie_deinit(dhd);
20235 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20236 dhd->logdump_cookie = NULL;
20237 }
20238
20239 #if defined(DHD_EVENT_LOG_FILTER)
20240 if (dhd->event_log_filter) {
20241 dhd_event_log_filter_deinit(dhd);
20242 }
20243 #endif /* DHD_EVENT_LOG_FILTER */
20244
20245 #ifdef DEBUGABILITY_ECNTRS_LOGGING
20246 if (dhd->ecntr_dbg_ring) {
20247 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20248 dhd_dbg_ring_deinit(dhd, ring);
20249 ring->ring_buf = NULL;
20250 ring->ring_size = 0;
20251 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20252 dhd->ecntr_dbg_ring = NULL;
20253 }
20254 #endif /* DEBUGABILITY_ECNTRS_LOGGING */
20255
20256 /* 'general' buffer points to start of the pre-alloc'd memory */
20257 dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
20258 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
20259 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20260 if (dld_buf->buffer) {
20261 DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20262 }
20263 if (dld_buf_special->buffer) {
20264 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20265 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20266 }
20267 #else
20268 if (dld_buf->buffer) {
20269 MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20270 }
20271 if (dld_buf_special->buffer) {
20272 MFREE(dhd->osh, dld_buf_special->buffer,
20273 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20274 }
20275 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20276 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20277 dld_buf = &g_dld_buf[i];
20278 dld_buf->enable = 0;
20279 dld_buf->buffer = NULL;
20280 }
20281
20282 mutex_destroy(&dhd_info->logdump_lock);
20283 }
20284
20285 void
20286 dhd_log_dump_write(int type, char *binary_data,
20287 int binary_len, const char *fmt, ...)
20288 {
20289 int len = 0;
20290 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
20291 va_list args;
20292 unsigned long flags = 0;
20293 struct dhd_log_dump_buf *dld_buf = NULL;
20294 bool flush_log = FALSE;
20295
20296 if (type < 0 || type >= DLD_BUFFER_NUM) {
20297 DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
20298 __FUNCTION__, type));
20299 return;
20300 }
20301
20302 dld_buf = &g_dld_buf[type];
20303
20304 if (dld_buf->enable != 1) {
20305 return;
20306 }
20307
20308 va_start(args, fmt);
20309 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
20310 /* Non ANSI C99 compliant returns -1,
20311 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
20312 */
20313 va_end(args);
20314 if (len < 0) {
20315 return;
20316 }
20317
20318 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
20319 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
20320 tmp_buf[len] = '\0';
20321 }
20322
20323 /* make a critical section to eliminate race conditions */
20324 spin_lock_irqsave(&dld_buf->lock, flags);
20325 if (dld_buf->remain < len) {
20326 dld_buf->wraparound = 1;
20327 dld_buf->present = dld_buf->front;
20328 dld_buf->remain = dld_buf_size[type];
20329 /* if wrap around happens, flush the ring buffer to the file */
20330 flush_log = TRUE;
20331 }
20332
20333 memcpy(dld_buf->present, tmp_buf, len);
20334 dld_buf->remain -= len;
20335 dld_buf->present += len;
20336 spin_unlock_irqrestore(&dld_buf->lock, flags);
20337
20338 /* double check invalid memory operation */
20339 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
20340
20341 if (dld_buf->dhd_pub) {
20342 dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
20343 dhdp->logdump_periodic_flush =
20344 logdump_periodic_flush;
20345 if (logdump_periodic_flush && flush_log) {
20346 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
20347 sizeof(log_dump_type_t));
20348 if (flush_type) {
20349 *flush_type = type;
20350 dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
20351 }
20352 }
20353 }
20354 }
20355
20356 char*
20357 dhd_log_dump_get_timestamp(void)
20358 {
20359 static char buf[16];
20360 u64 ts_nsec;
20361 unsigned long rem_nsec;
20362
20363 ts_nsec = local_clock();
20364 rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
20365 snprintf(buf, sizeof(buf), "%5lu.%06lu",
20366 (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
20367
20368 return buf;
20369 }
20370 #endif /* DHD_LOG_DUMP */
20371
20372 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
20373 void
20374 dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
20375 {
20376 dhd_info_t * dhd;
20377
20378 if (dhdp) {
20379 dhd = dhdp->info;
20380 if (dhd) {
20381 flush_workqueue(dhd->tx_wq);
20382 flush_workqueue(dhd->rx_wq);
20383 }
20384 }
20385
20386 return;
20387 }
20388 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
20389
20390 #ifdef DHD_LB_TXP
20391 #define DHD_LB_TXBOUND 64
20392 /*
20393 * Function that performs the TX processing on a given CPU
20394 */
20395 bool
20396 dhd_lb_tx_process(dhd_info_t *dhd)
20397 {
20398 struct sk_buff *skb;
20399 int cnt = 0;
20400 struct net_device *net;
20401 int ifidx;
20402 bool resched = FALSE;
20403
20404 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
20405 if (dhd == NULL) {
20406 DHD_ERROR((" Null pointer DHD \r\n"));
20407 return resched;
20408 }
20409
20410 BCM_REFERENCE(net);
20411
20412 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
20413
20414 /* Base Loop to perform the actual Tx */
20415 do {
20416 skb = skb_dequeue(&dhd->tx_pend_queue);
20417 if (skb == NULL) {
20418 DHD_TRACE(("Dequeued a Null Packet \r\n"));
20419 break;
20420 }
20421 cnt++;
20422
20423 net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
20424 ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
20425
20426 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
20427 net, ifidx));
20428
20429 __dhd_sendpkt(&dhd->pub, ifidx, skb);
20430
20431 if (cnt >= DHD_LB_TXBOUND) {
20432 resched = TRUE;
20433 break;
20434 }
20435
20436 } while (1);
20437
20438 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
20439
20440 return resched;
20441 }
20442
20443 void
20444 dhd_lb_tx_handler(unsigned long data)
20445 {
20446 dhd_info_t *dhd = (dhd_info_t *)data;
20447
20448 if (dhd_lb_tx_process(dhd)) {
20449 dhd_tasklet_schedule(&dhd->tx_tasklet);
20450 }
20451 }
20452
20453 #endif /* DHD_LB_TXP */
20454
20455 #ifdef DHD_DEBUG_UART
20456 bool
20457 dhd_debug_uart_is_running(struct net_device *dev)
20458 {
20459 dhd_info_t *dhd = DHD_DEV_INFO(dev);
20460
20461 if (dhd->duart_execute) {
20462 return TRUE;
20463 }
20464
20465 return FALSE;
20466 }
20467
20468 static void
20469 dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
20470 {
20471 dhd_pub_t *dhdp = handle;
20472 dhd_debug_uart_exec(dhdp, "rd");
20473 }
20474
20475 static void
20476 dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
20477 {
20478 int ret;
20479
20480 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
20481 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
20482
20483 #ifdef DHD_FW_COREDUMP
20484 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
20485 #endif // endif
20486 {
20487 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
20488 #ifdef DHD_FW_COREDUMP
20489 dhdp->memdump_success == FALSE ||
20490 #endif // endif
20491 FALSE) {
20492 dhdp->info->duart_execute = TRUE;
20493 DHD_ERROR(("DHD: %s - execute %s %s\n",
20494 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
20495 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
20496 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
20497 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
20498 dhdp->info->duart_execute = FALSE;
20499
20500 #ifdef DHD_LOG_DUMP
20501 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
20502 #endif // endif
20503 {
20504 BUG_ON(1);
20505 }
20506 }
20507 }
20508 }
20509 #endif /* DHD_DEBUG_UART */
20510
20511 #if defined(DHD_BLOB_EXISTENCE_CHECK)
20512 void
20513 dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
20514 {
20515 struct file *fp;
20516 char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
20517 fp = filp_open(filepath, O_RDONLY, 0);
20518 if (IS_ERR(fp)) {
20519 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
20520 filepath));
20521 dhdp->is_blob = FALSE;
20522 } else {
20523 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
20524 dhdp->is_blob = TRUE;
20525 #if defined(CONCATE_BLOB)
20526 strncat(fw_path, "_blob", strlen("_blob"));
20527 #else
20528 BCM_REFERENCE(fw_path);
20529 #endif /* SKIP_CONCATE_BLOB */
20530 filp_close(fp, NULL);
20531 }
20532 }
20533 #endif /* DHD_BLOB_EXISTENCE_CHECK */
20534
20535 #if defined(PCIE_FULL_DONGLE)
20536 /** test / loopback */
20537 void
20538 dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
20539 {
20540 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
20541 dhd_info_t *dhd_info = (dhd_info_t *)handle;
20542
20543 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
20544 DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__));
20545 return;
20546 }
20547 if (dhd_info == NULL) {
20548 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
20549 return;
20550 }
20551 if (dmmap == NULL) {
20552 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
20553 return;
20554 }
20555 dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
20556 }
20557
20558 void
20559 dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
20560 {
20561 dhd_info_t *dhd_info = dhdp->info;
20562
20563 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
20564 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
20565 }
20566 #endif /* PCIE_FULL_DONGLE */
20567 /* ---------------------------- End of sysfs implementation ------------------------------------- */
20568
20569 #ifdef SET_PCIE_IRQ_CPU_CORE
20570 void
20571 dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
20572 {
20573 unsigned int pcie_irq = 0;
20574
20575 if (!dhdp) {
20576 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
20577 return;
20578 }
20579
20580 if (!dhdp->bus) {
20581 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
20582 return;
20583 }
20584
20585 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
20586
20587 if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
20588 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
20589 return;
20590 }
20591
20592 /*
20593 irq_set_affinity() assign dedicated CPU core PCIe interrupt
20594 If dedicated CPU core is not on-line,
20595 PCIe interrupt scheduled on CPU core 0
20596 */
20597 switch (affinity_cmd) {
20598 case PCIE_IRQ_AFFINITY_OFF:
20599 break;
20600 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
20601 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20602 break;
20603 #ifdef CONFIG_SOC_EXYNOS9810
20604 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
20605 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
20606 __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
20607 irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
20608 break;
20609 #endif /* CONFIG_SOC_EXYNOS9810 */
20610 default:
20611 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
20612 __FUNCTION__, affinity_cmd));
20613 }
20614 }
20615 #endif /* SET_PCIE_IRQ_CPU_CORE */
20616
20617 int
20618 dhd_write_file(const char *filepath, char *buf, int buf_len)
20619 {
20620 struct file *fp = NULL;
20621 mm_segment_t old_fs;
20622 int ret = 0;
20623
20624 /* change to KERNEL_DS address limit */
20625 old_fs = get_fs();
20626 set_fs(KERNEL_DS);
20627
20628 /* File is always created. */
20629 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
20630 if (IS_ERR(fp)) {
20631 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
20632 __FUNCTION__, filepath, PTR_ERR(fp)));
20633 ret = BCME_ERROR;
20634 } else {
20635 if (fp->f_mode & FMODE_WRITE) {
20636 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
20637 if (ret < 0) {
20638 DHD_ERROR(("%s: Couldn't write file '%s'\n",
20639 __FUNCTION__, filepath));
20640 ret = BCME_ERROR;
20641 } else {
20642 ret = BCME_OK;
20643 }
20644 }
20645 filp_close(fp, NULL);
20646 }
20647
20648 /* restore previous address limit */
20649 set_fs(old_fs);
20650
20651 return ret;
20652 }
20653
20654 int
20655 dhd_read_file(const char *filepath, char *buf, int buf_len)
20656 {
20657 struct file *fp = NULL;
20658 mm_segment_t old_fs;
20659 int ret;
20660
20661 /* change to KERNEL_DS address limit */
20662 old_fs = get_fs();
20663 set_fs(KERNEL_DS);
20664
20665 fp = filp_open(filepath, O_RDONLY, 0);
20666 if (IS_ERR(fp)) {
20667 set_fs(old_fs);
20668 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
20669 return BCME_ERROR;
20670 }
20671
20672 ret = kernel_read(fp, 0, buf, buf_len);
20673 filp_close(fp, NULL);
20674
20675 /* restore previous address limit */
20676 set_fs(old_fs);
20677
20678 /* Return the number of bytes read */
20679 if (ret > 0) {
20680 /* Success to read */
20681 ret = 0;
20682 } else {
20683 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
20684 __FUNCTION__, filepath, ret));
20685 ret = BCME_ERROR;
20686 }
20687
20688 return ret;
20689 }
20690
20691 int
20692 dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
20693 {
20694 int ret;
20695
20696 ret = dhd_write_file(filepath, buf, buf_len);
20697 if (ret < 0) {
20698 return ret;
20699 }
20700
20701 /* Read the file again and check if the file size is not zero */
20702 memset(buf, 0, buf_len);
20703 ret = dhd_read_file(filepath, buf, buf_len);
20704
20705 return ret;
20706 }
20707
20708 #ifdef FILTER_IE
20709 int dhd_read_from_file(dhd_pub_t *dhd)
20710 {
20711 int ret = 0, nread = 0;
20712 void *fd;
20713 uint8 *buf;
20714 NULL_CHECK(dhd, "dhd is NULL", ret);
20715
20716 buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
20717 if (!buf) {
20718 DHD_ERROR(("error: failed to alllocate buf.\n"));
20719 return BCME_NOMEM;
20720 }
20721
20722 /* open file to read */
20723 fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
20724 if (!fd) {
20725 DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH));
20726 ret = BCME_EPERM;
20727 goto exit;
20728 }
20729 nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
20730 if (nread > 0) {
20731 buf[nread] = '\0';
20732 if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
20733 DHD_ERROR(("error: failed to parse filter ie\n"));
20734 }
20735 } else {
20736 DHD_ERROR(("error: zero length file.failed to read\n"));
20737 ret = BCME_ERROR;
20738 }
20739 dhd_os_close_image1(dhd, fd);
20740 exit:
20741 if (buf) {
20742 MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
20743 buf = NULL;
20744 }
20745 return ret;
20746 }
20747
20748 int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
20749 {
20750 uint8* pstr = buf;
20751 int element_count = 0;
20752
20753 if (buf == NULL) {
20754 return BCME_ERROR;
20755 }
20756
20757 while (*pstr != '\0') {
20758 if (*pstr == '\n') {
20759 element_count++;
20760 }
20761 pstr++;
20762 }
20763 /*
20764 * New line character must not be present after last line.
20765 * To count last line
20766 */
20767 element_count++;
20768
20769 return element_count;
20770 }
20771
20772 int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
20773 {
20774 uint8 i, j, msb, lsb, oui_len = 0;
20775 /*
20776 * OUI can vary from 3 bytes to 5 bytes.
20777 * While reading from file as ascii input it can
20778 * take maximum size of 14 bytes and minumum size of
20779 * 8 bytes including ":"
20780 * Example 5byte OUI <AB:DE:BE:CD:FA>
20781 * Example 3byte OUI <AB:DC:EF>
20782 */
20783
20784 if ((inbuf == NULL) || (len < 8) || (len > 14)) {
20785 DHD_ERROR(("error: failed to parse OUI \n"));
20786 return BCME_ERROR;
20787 }
20788
20789 for (j = 0, i = 0; i < len; i += 3, ++j) {
20790 if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
20791 DHD_ERROR(("error: invalid OUI format \n"));
20792 return BCME_ERROR;
20793 }
20794 msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
20795 lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
20796 'A' + 10 : inbuf[i + 1] - '0';
20797 oui[j] = (msb << 4) | lsb;
20798 }
20799 /* Size of oui.It can vary from 3/4/5 */
20800 oui_len = j;
20801
20802 return oui_len;
20803 }
20804
20805 int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
20806 {
20807 int i = 0;
20808
20809 while (i < len) {
20810 if (!bcm_isdigit(buf[i])) {
20811 DHD_ERROR(("error: non digit value found in filter_ie \n"));
20812 return BCME_ERROR;
20813 }
20814 i++;
20815 }
20816 if (bcm_atoi((char*)buf) > 255) {
20817 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
20818 return BCME_ERROR;
20819 }
20820
20821 return BCME_OK;
20822 }
20823
20824 int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
20825 {
20826 int element_count = 0, i = 0, oui_size = 0, ret = 0;
20827 uint16 bufsize, buf_space_left, id = 0, len = 0;
20828 uint16 filter_iovsize, all_tlvsize;
20829 wl_filter_ie_tlv_t *p_ie_tlv = NULL;
20830 wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
20831 char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
20832 uint8 data[20];
20833
20834 element_count = dhd_get_filter_ie_count(dhd, buf);
20835 DHD_INFO(("total element count %d \n", element_count));
20836 /* Calculate the whole buffer size */
20837 filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
20838 p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
20839
20840 if (p_filter_iov == NULL) {
20841 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
20842 return BCME_ERROR;
20843 }
20844
20845 /* setup filter iovar header */
20846 p_filter_iov->version = WL_FILTER_IE_VERSION;
20847 p_filter_iov->len = filter_iovsize;
20848 p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
20849 p_filter_iov->pktflag = FC_PROBE_REQ;
20850 p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
20851 /* setup TLVs */
20852 bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
20853 p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
20854 buf_space_left = bufsize;
20855
20856 while ((i < element_count) && (buf != NULL)) {
20857 len = 0;
20858 /* token contains one line of input data */
20859 token = bcmstrtok((char**)&buf, "\n", NULL);
20860 if (token == NULL) {
20861 break;
20862 }
20863 if ((ele_token = bcmstrstr(token, ",")) == NULL) {
20864 /* only element id is present */
20865 if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
20866 DHD_ERROR(("error: Invalid element id \n"));
20867 ret = BCME_ERROR;
20868 goto exit;
20869 }
20870 id = bcm_atoi((char*)token);
20871 data[len++] = WL_FILTER_IE_SET;
20872 } else {
20873 /* oui is present */
20874 ele_token = bcmstrtok(&token, ",", NULL);
20875 if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
20876 strlen(ele_token)) == BCME_ERROR)) {
20877 DHD_ERROR(("error: Invalid element id \n"));
20878 ret = BCME_ERROR;
20879 goto exit;
20880 }
20881 id = bcm_atoi((char*)ele_token);
20882 data[len++] = WL_FILTER_IE_SET;
20883 if ((oui_token = bcmstrstr(token, ",")) == NULL) {
20884 oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
20885 if (oui_size == BCME_ERROR) {
20886 DHD_ERROR(("error: Invalid OUI \n"));
20887 ret = BCME_ERROR;
20888 goto exit;
20889 }
20890 len += oui_size;
20891 } else {
20892 /* type is present */
20893 oui_token = bcmstrtok(&token, ",", NULL);
20894 if ((oui_token == NULL) || ((oui_size =
20895 dhd_parse_oui(dhd, oui_token,
20896 &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
20897 DHD_ERROR(("error: Invalid OUI \n"));
20898 ret = BCME_ERROR;
20899 goto exit;
20900 }
20901 len += oui_size;
20902 if ((type = bcmstrstr(token, ",")) == NULL) {
20903 if (dhd_check_valid_ie(dhd, token,
20904 strlen(token)) == BCME_ERROR) {
20905 DHD_ERROR(("error: Invalid type \n"));
20906 ret = BCME_ERROR;
20907 goto exit;
20908 }
20909 data[len++] = bcm_atoi((char*)token);
20910 } else {
20911 /* subtype is present */
20912 type = bcmstrtok(&token, ",", NULL);
20913 if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
20914 strlen(type)) == BCME_ERROR)) {
20915 DHD_ERROR(("error: Invalid type \n"));
20916 ret = BCME_ERROR;
20917 goto exit;
20918 }
20919 data[len++] = bcm_atoi((char*)type);
20920 /* subtype is last element */
20921 if ((token == NULL) || (*token == '\0') ||
20922 (dhd_check_valid_ie(dhd, token,
20923 strlen(token)) == BCME_ERROR)) {
20924 DHD_ERROR(("error: Invalid subtype \n"));
20925 ret = BCME_ERROR;
20926 goto exit;
20927 }
20928 data[len++] = bcm_atoi((char*)token);
20929 }
20930 }
20931 }
20932 ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
20933 &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
20934 if (ret != BCME_OK) {
20935 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
20936 "status=%d\n", __FUNCTION__, ret));
20937 goto exit;
20938 }
20939 i++;
20940 }
20941 if (i == 0) {
20942 /* file is empty or first line is blank */
20943 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
20944 ret = BCME_ERROR;
20945 goto exit;
20946 }
20947 /* update the iov header, set len to include all TLVs + header */
20948 all_tlvsize = (bufsize - buf_space_left);
20949 p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
20950 ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
20951 p_filter_iov->len, NULL, 0, TRUE);
20952 if (ret != BCME_OK) {
20953 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
20954 }
20955 exit:
20956 /* clean up */
20957 if (p_filter_iov) {
20958 MFREE(dhd->osh, p_filter_iov, filter_iovsize);
20959 p_filter_iov = NULL;
20960 }
20961 return ret;
20962 }
20963 #endif /* FILTER_IE */
20964 #ifdef DHD_WAKE_STATUS
20965 wake_counts_t*
20966 dhd_get_wakecount(dhd_pub_t *dhdp)
20967 {
20968 return dhd_bus_get_wakecount(dhdp);
20969 }
20970 #endif /* DHD_WAKE_STATUS */
20971
20972 int
20973 dhd_get_random_bytes(uint8 *buf, uint len)
20974 {
20975 #ifdef BCMPCIE
20976 get_random_bytes_arch(buf, len);
20977 #endif /* BCMPCIE */
20978 return BCME_OK;
20979 }
20980
20981 #if defined(DHD_HANG_SEND_UP_TEST)
20982 void
20983 dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
20984 {
20985 dhd_info_t *dhd = NULL;
20986 dhd_pub_t *dhdp = NULL;
20987 uint reason = HANG_REASON_MAX;
20988 uint32 fw_test_code = 0;
20989 dhd = DHD_DEV_INFO(dev);
20990
20991 if (dhd) {
20992 dhdp = &dhd->pub;
20993 }
20994
20995 if (!dhd || !dhdp) {
20996 return;
20997 }
20998
20999 reason = (uint) bcm_strtoul(string_num, NULL, 0);
21000 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
21001
21002 if (reason == 0) {
21003 if (dhdp->req_hang_type) {
21004 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
21005 __FUNCTION__, dhdp->req_hang_type));
21006 dhdp->req_hang_type = 0;
21007 return;
21008 } else {
21009 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
21010 return;
21011 }
21012 } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
21013 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
21014 return;
21015 }
21016
21017 if (dhdp->req_hang_type != 0) {
21018 DHD_ERROR(("Already HANG requested for test\n"));
21019 return;
21020 }
21021
21022 switch (reason) {
21023 case HANG_REASON_IOCTL_RESP_TIMEOUT:
21024 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
21025 dhdp->req_hang_type = reason;
21026 fw_test_code = 102; /* resumed on timeour */
21027 dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
21028 WLC_SET_VAR, TRUE, 0);
21029 break;
21030 case HANG_REASON_DONGLE_TRAP:
21031 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
21032 dhdp->req_hang_type = reason;
21033 fw_test_code = 99; /* dongle trap */
21034 dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
21035 WLC_SET_VAR, TRUE, 0);
21036 break;
21037 case HANG_REASON_D3_ACK_TIMEOUT:
21038 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
21039 dhdp->req_hang_type = reason;
21040 break;
21041 case HANG_REASON_BUS_DOWN:
21042 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
21043 dhdp->req_hang_type = reason;
21044 break;
21045 case HANG_REASON_PCIE_LINK_DOWN:
21046 case HANG_REASON_MSGBUF_LIVELOCK:
21047 dhdp->req_hang_type = 0;
21048 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
21049 break;
21050 case HANG_REASON_IFACE_DEL_FAILURE:
21051 dhdp->req_hang_type = 0;
21052 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
21053 break;
21054 case HANG_REASON_HT_AVAIL_ERROR:
21055 dhdp->req_hang_type = 0;
21056 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
21057 break;
21058 case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
21059 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
21060 dhdp->req_hang_type = reason;
21061 break;
21062 default:
21063 dhdp->req_hang_type = 0;
21064 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
21065 break;
21066 }
21067 }
21068 #endif /* DHD_HANG_SEND_UP_TEST */
21069
21070 #ifdef DHD_ERPOM
21071 static void
21072 dhd_error_recovery(void *handle, void *event_info, u8 event)
21073 {
21074 dhd_info_t *dhd = handle;
21075 dhd_pub_t *dhdp;
21076 int ret = 0;
21077
21078 if (!dhd) {
21079 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21080 return;
21081 }
21082
21083 dhdp = &dhd->pub;
21084
21085 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
21086 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
21087 __FUNCTION__));
21088 return;
21089 }
21090
21091 ret = dhd_bus_perform_flr_with_quiesce(dhdp);
21092 if (ret != BCME_DNGL_DEVRESET) {
21093 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
21094 "toggle REG_ON\n", __FUNCTION__, ret));
21095 /* toggle REG_ON */
21096 dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
21097 return;
21098 }
21099 }
21100
21101 void
21102 dhd_schedule_reset(dhd_pub_t *dhdp)
21103 {
21104 if (dhdp->enable_erpom) {
21105 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
21106 DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
21107 }
21108 }
21109 #endif /* DHD_ERPOM */
21110
21111 #ifdef DHD_PKT_LOGGING
21112 void
21113 dhd_pktlog_dump(void *handle, void *event_info, u8 event)
21114 {
21115 dhd_info_t *dhd = handle;
21116
21117 if (!dhd) {
21118 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21119 return;
21120 }
21121
21122 if (dhd_pktlog_write_file(&dhd->pub)) {
21123 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
21124 return;
21125 }
21126 }
21127
21128 void
21129 dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
21130 {
21131 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21132 (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
21133 dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
21134 }
21135 #endif /* DHD_PKT_LOGGING */
21136
21137 #ifdef BIGDATA_SOFTAP
21138 void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
21139 {
21140 struct bcm_cfg80211 *cfg;
21141 dhd_pub_t *dhdp;
21142 ap_sta_wq_data_t *p_wq_data;
21143
21144 if (!bcm_cfg || !ndev || !e) {
21145 WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
21146 return;
21147 }
21148
21149 cfg = (struct bcm_cfg80211 *)bcm_cfg;
21150 dhdp = (dhd_pub_t *)cfg->pub;
21151
21152 if (!dhdp || !cfg->ap_sta_info) {
21153 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
21154 return;
21155 }
21156
21157 p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
21158 if (unlikely(!p_wq_data)) {
21159 DHD_ERROR(("%s(): could not allocate memory for - "
21160 "ap_sta_wq_data_t\n", __FUNCTION__));
21161 return;
21162 }
21163
21164 mutex_lock(&cfg->ap_sta_info->wq_data_sync);
21165
21166 memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
21167 p_wq_data->dhdp = dhdp;
21168 p_wq_data->bcm_cfg = cfg;
21169 p_wq_data->ndev = (struct net_device *)ndev;
21170
21171 mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
21172
21173 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21174 p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
21175 wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
21176
21177 }
21178 #endif /* BIGDATA_SOFTAP */
21179
21180 void
21181 get_debug_dump_time(char *str)
21182 {
21183 struct timeval curtime;
21184 unsigned long local_time;
21185 struct rtc_time tm;
21186
21187 if (!strlen(str)) {
21188 do_gettimeofday(&curtime);
21189 local_time = (u32)(curtime.tv_sec -
21190 (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
21191 rtc_time_to_tm(local_time, &tm);
21192
21193 snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
21194 tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
21195 tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
21196 }
21197 }
21198
21199 void
21200 clear_debug_dump_time(char *str)
21201 {
21202 memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
21203 }
21204 #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
21205 void
21206 copy_debug_dump_time(char *dest, char *src)
21207 {
21208 memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
21209 }
21210 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
21211
21212 #define KIRQ_PRINT_BUF_LEN 256
21213
21214 void
21215 dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
21216 {
21217 unsigned long flags = 0;
21218 struct irq_desc *desc;
21219 int i; /* cpu iterator */
21220 struct bcmstrbuf strbuf;
21221 char tmp_buf[KIRQ_PRINT_BUF_LEN];
21222
21223 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
21224 desc = irq_to_desc(irq_num);
21225 if (!desc) {
21226 DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
21227 return;
21228 }
21229 bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
21230 raw_spin_lock_irqsave(&desc->lock, flags);
21231 bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
21232 for_each_online_cpu(i)
21233 bcm_bprintf(&strbuf, "%10u ",
21234 desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
21235 if (desc->irq_data.chip) {
21236 if (desc->irq_data.chip->name)
21237 bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
21238 else
21239 bcm_bprintf(&strbuf, " %8s", "-");
21240 } else {
21241 bcm_bprintf(&strbuf, " %8s", "None");
21242 }
21243 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
21244 if (desc->irq_data.domain)
21245 bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
21246 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
21247 bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
21248 #endif // endif
21249 #endif /* LINUX VERSION > 3.1.0 */
21250
21251 if (desc->name)
21252 bcm_bprintf(&strbuf, "-%-8s", desc->name);
21253
21254 DHD_ERROR(("%s\n", strbuf.origbuf));
21255 raw_spin_unlock_irqrestore(&desc->lock, flags);
21256 #endif /* LINUX VERSION > 2.6.28 */
21257 }
21258
21259 void
21260 dhd_show_kirqstats(dhd_pub_t *dhd)
21261 {
21262 unsigned int irq = -1;
21263 #ifdef BCMPCIE
21264 dhdpcie_get_pcieirq(dhd->bus, &irq);
21265 #endif /* BCMPCIE */
21266 #ifdef BCMSDIO
21267 irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
21268 #endif /* BCMSDIO */
21269 if (irq != -1) {
21270 #ifdef BCMPCIE
21271 DHD_ERROR(("DUMP data kernel irq stats : \n"));
21272 #endif /* BCMPCIE */
21273 #ifdef BCMSDIO
21274 DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
21275 #endif /* BCMSDIO */
21276 dhd_print_kirqstats(dhd, irq);
21277 }
21278 #ifdef BCMPCIE_OOB_HOST_WAKE
21279 irq = dhdpcie_get_oob_irq_num(dhd->bus);
21280 if (irq) {
21281 DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
21282 dhd_print_kirqstats(dhd, irq);
21283 }
21284 #endif /* BCMPCIE_OOB_HOST_WAKE */
21285 }
21286
21287 void
21288 dhd_print_tasklet_status(dhd_pub_t *dhd)
21289 {
21290 dhd_info_t *dhdinfo;
21291
21292 if (!dhd) {
21293 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
21294 return;
21295 }
21296
21297 dhdinfo = dhd->info;
21298
21299 if (!dhdinfo) {
21300 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
21301 return;
21302 }
21303
21304 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
21305 }
21306
21307 /*
21308 * DHD RING
21309 */
21310 #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
21311 #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
21312
21313 #define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
21314 #define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
21315
21316 #define DHD_RING_MAGIC 0x20170910
21317 #define DHD_RING_IDX_INVALID 0xffffffff
21318
21319 typedef struct {
21320 uint32 elem_size;
21321 uint32 elem_cnt;
21322 uint32 write_idx; /* next write index, -1 : not started */
21323 uint32 read_idx; /* next read index, -1 : not start */
21324
21325 /* protected elements during serialization */
21326 int lock_idx; /* start index of locked, element will not be overried */
21327 int lock_count; /* number of locked, from lock idx */
21328
21329 /* saved data elements */
21330 void *elem;
21331 } dhd_fixed_ring_info_t;
21332
21333 typedef struct {
21334 uint32 magic;
21335 uint32 type;
21336 struct mutex ring_sync; /* pointer to mutex */
21337 union {
21338 dhd_fixed_ring_info_t fixed;
21339 };
21340 } dhd_ring_info_t;
21341
21342 uint32
21343 dhd_ring_get_hdr_size(void)
21344 {
21345 return sizeof(dhd_ring_info_t);
21346 }
21347
21348 void *
21349 dhd_ring_init(uint8 *buf, uint32 buf_size, uint32 elem_size, uint32 elem_cnt)
21350 {
21351 dhd_ring_info_t *ret_ring;
21352
21353 if (!buf) {
21354 DHD_RING_ERR(("NO RING BUFFER\n"));
21355 return NULL;
21356 }
21357 if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
21358 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
21359 return NULL;
21360 }
21361
21362 ret_ring = (dhd_ring_info_t *)buf;
21363 ret_ring->type = DHD_RING_TYPE_FIXED;
21364 mutex_init(&ret_ring->ring_sync);
21365 ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
21366 ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
21367 ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
21368 ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
21369 ret_ring->fixed.elem_size = elem_size;
21370 ret_ring->fixed.elem_cnt = elem_cnt;
21371 ret_ring->magic = DHD_RING_MAGIC;
21372 return ret_ring;
21373 }
21374
21375 void
21376 dhd_ring_deinit(void *_ring)
21377 {
21378 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21379 dhd_fixed_ring_info_t *fixed;
21380 if (!ring) {
21381 return;
21382 }
21383
21384 if (ring->magic != DHD_RING_MAGIC) {
21385 return;
21386 }
21387
21388 mutex_destroy(&ring->ring_sync);
21389 fixed = &ring->fixed;
21390 memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
21391 fixed->elem_size = fixed->elem_cnt = 0;
21392 ring->type = 0;
21393 ring->magic = 0;
21394 return;
21395 }
21396
21397 /* get counts between two indexes of ring buffer (internal only) */
21398 static inline int
21399 __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
21400 {
21401 if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
21402 return 0;
21403 }
21404
21405 return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
21406 }
21407
21408 static inline int
21409 __dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
21410 {
21411 return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21412 }
21413
21414 static inline void *
21415 __dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
21416 {
21417 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21418 return NULL;
21419 }
21420 return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
21421 }
21422
21423 static inline void
21424 __dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
21425 {
21426 uint32 next_idx;
21427
21428 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21429 DHD_RING_ERR(("EMPTY RING\n"));
21430 return;
21431 }
21432
21433 next_idx = (ring->read_idx + 1) % ring->elem_cnt;
21434 if (ring->read_idx == ring->write_idx) {
21435 /* Become empty */
21436 ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
21437 return;
21438 }
21439
21440 ring->read_idx = next_idx;
21441 return;
21442 }
21443
21444 static inline void *
21445 __dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
21446 {
21447 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21448 return NULL;
21449 }
21450 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21451 }
21452
21453 static inline void *
21454 __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
21455 {
21456 uint32 tmp_idx;
21457
21458 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21459 ring->read_idx = ring->write_idx = 0;
21460 return (uint8 *)ring->elem;
21461 }
21462
21463 /* check next index is not locked */
21464 tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
21465 if (ring->lock_idx == tmp_idx) {
21466 return NULL;
21467 }
21468
21469 ring->write_idx = tmp_idx;
21470 if (ring->write_idx == ring->read_idx) {
21471 /* record is full, drop oldest one */
21472 ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
21473
21474 }
21475 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21476 }
21477
21478 static inline uint32
21479 __dhd_fixed_ring_ptr2idx(dhd_fixed_ring_info_t *ring, void *ptr, char *sig)
21480 {
21481 uint32 diff;
21482 uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
21483
21484 if (ptr < ring->elem) {
21485 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
21486 return ret_idx;
21487 }
21488 diff = (uint32)((uint8 *)ptr - (uint8 *)ring->elem);
21489 if (diff % ring->elem_size != 0) {
21490 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, ring->elem));
21491 return ret_idx;
21492 }
21493 ret_idx = diff / ring->elem_size;
21494 if (ret_idx >= ring->elem_cnt) {
21495 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", ring->elem_cnt, ret_idx));
21496 }
21497 return ret_idx;
21498 }
21499
21500 static inline void *
21501 __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev)
21502 {
21503 uint32 cur_idx;
21504
21505 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21506 DHD_RING_ERR(("EMPTY RING\n"));
21507 return NULL;
21508 }
21509
21510 cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "NEXT");
21511 if (cur_idx >= ring->elem_cnt) {
21512 return NULL;
21513 }
21514
21515 if (cur_idx == ring->write_idx) {
21516 /* no more new record */
21517 return NULL;
21518 }
21519
21520 cur_idx = (cur_idx + 1) % ring->elem_cnt;
21521 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21522 }
21523
21524 static inline void *
21525 __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev)
21526 {
21527 uint32 cur_idx;
21528
21529 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21530 DHD_RING_ERR(("EMPTY RING\n"));
21531 return NULL;
21532 }
21533 cur_idx = __dhd_fixed_ring_ptr2idx(ring, prev, "PREV");
21534 if (cur_idx >= ring->elem_cnt) {
21535 return NULL;
21536 }
21537 if (cur_idx == ring->read_idx) {
21538 /* no more new record */
21539 return NULL;
21540 }
21541
21542 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
21543 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21544 }
21545
21546 static inline void
21547 __dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr)
21548 {
21549 uint32 first_idx;
21550 uint32 last_idx;
21551 uint32 ring_filled_cnt;
21552 uint32 tmp_cnt;
21553
21554 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21555 DHD_RING_ERR(("EMPTY RING\n"));
21556 return;
21557 }
21558
21559 if (first_ptr) {
21560 first_idx = __dhd_fixed_ring_ptr2idx(ring, first_ptr, "LCK FIRST");
21561 if (first_idx >= ring->elem_cnt) {
21562 return;
21563 }
21564 } else {
21565 first_idx = ring->read_idx;
21566 }
21567
21568 if (last_ptr) {
21569 last_idx = __dhd_fixed_ring_ptr2idx(ring, last_ptr, "LCK LAST");
21570 if (last_idx >= ring->elem_cnt) {
21571 return;
21572 }
21573 } else {
21574 last_idx = ring->write_idx;
21575 }
21576
21577 ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21578 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
21579 if (tmp_cnt > ring_filled_cnt) {
21580 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21581 ring->write_idx, ring->read_idx, first_idx));
21582 return;
21583 }
21584
21585 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
21586 if (tmp_cnt > ring_filled_cnt) {
21587 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21588 ring->write_idx, ring->read_idx, last_idx));
21589 return;
21590 }
21591
21592 ring->lock_idx = first_idx;
21593 ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
21594 return;
21595 }
21596
21597 static inline void
21598 __dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
21599 {
21600 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21601 DHD_RING_ERR(("EMPTY RING\n"));
21602 return;
21603 }
21604
21605 ring->lock_idx = DHD_RING_IDX_INVALID;
21606 ring->lock_count = 0;
21607 return;
21608 }
21609 static inline void *
21610 __dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
21611 {
21612 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21613 DHD_RING_ERR(("EMPTY RING\n"));
21614 return NULL;
21615 }
21616 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21617 DHD_RING_ERR(("NO LOCK POINT\n"));
21618 return NULL;
21619 }
21620 return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
21621 }
21622
21623 static inline void *
21624 __dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
21625 {
21626 int lock_last_idx;
21627 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21628 DHD_RING_ERR(("EMPTY RING\n"));
21629 return NULL;
21630 }
21631 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21632 DHD_RING_ERR(("NO LOCK POINT\n"));
21633 return NULL;
21634 }
21635
21636 lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
21637 return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
21638 }
21639
21640 static inline int
21641 __dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
21642 {
21643 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21644 DHD_RING_ERR(("EMPTY RING\n"));
21645 return BCME_ERROR;
21646 }
21647 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21648 DHD_RING_ERR(("NO LOCK POINT\n"));
21649 return BCME_ERROR;
21650 }
21651 return ring->lock_count;
21652 }
21653
21654 static inline void
21655 __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
21656 {
21657 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21658 DHD_RING_ERR(("EMPTY RING\n"));
21659 return;
21660 }
21661 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21662 DHD_RING_ERR(("NO LOCK POINT\n"));
21663 return;
21664 }
21665
21666 ring->lock_count--;
21667 if (ring->lock_count <= 0) {
21668 ring->lock_idx = DHD_RING_IDX_INVALID;
21669 } else {
21670 ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
21671 }
21672 return;
21673 }
21674
21675 /* Get first element : oldest element */
21676 void *
21677 dhd_ring_get_first(void *_ring)
21678 {
21679 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21680 void *ret = NULL;
21681
21682 if (!ring || ring->magic != DHD_RING_MAGIC) {
21683 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21684 return NULL;
21685 }
21686
21687 mutex_lock(&ring->ring_sync);
21688 if (ring->type == DHD_RING_TYPE_FIXED) {
21689 ret = __dhd_fixed_ring_get_first(&ring->fixed);
21690 }
21691 mutex_unlock(&ring->ring_sync);
21692 return ret;
21693 }
21694
21695 /* Free first element : oldest element */
21696 void
21697 dhd_ring_free_first(void *_ring)
21698 {
21699 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21700
21701 if (!ring || ring->magic != DHD_RING_MAGIC) {
21702 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21703 return;
21704 }
21705
21706 mutex_lock(&ring->ring_sync);
21707 if (ring->type == DHD_RING_TYPE_FIXED) {
21708 __dhd_fixed_ring_free_first(&ring->fixed);
21709 }
21710 mutex_unlock(&ring->ring_sync);
21711 return;
21712 }
21713
21714 /* Get latest element */
21715 void *
21716 dhd_ring_get_last(void *_ring)
21717 {
21718 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21719 void *ret = NULL;
21720
21721 if (!ring || ring->magic != DHD_RING_MAGIC) {
21722 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21723 return NULL;
21724 }
21725
21726 mutex_lock(&ring->ring_sync);
21727 if (ring->type == DHD_RING_TYPE_FIXED) {
21728 ret = __dhd_fixed_ring_get_last(&ring->fixed);
21729 }
21730 mutex_unlock(&ring->ring_sync);
21731 return ret;
21732 }
21733
21734 /* Get next point can be written
21735 * will overwrite which doesn't read
21736 * will return NULL if next pointer is locked
21737 */
21738 void *
21739 dhd_ring_get_empty(void *_ring)
21740 {
21741 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21742 void *ret = NULL;
21743
21744 if (!ring || ring->magic != DHD_RING_MAGIC) {
21745 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21746 return NULL;
21747 }
21748
21749 mutex_lock(&ring->ring_sync);
21750 if (ring->type == DHD_RING_TYPE_FIXED) {
21751 ret = __dhd_fixed_ring_get_empty(&ring->fixed);
21752 }
21753 mutex_unlock(&ring->ring_sync);
21754 return ret;
21755 }
21756
21757 void *
21758 dhd_ring_get_next(void *_ring, void *cur)
21759 {
21760 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21761 void *ret = NULL;
21762
21763 if (!ring || ring->magic != DHD_RING_MAGIC) {
21764 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21765 return NULL;
21766 }
21767
21768 mutex_lock(&ring->ring_sync);
21769 if (ring->type == DHD_RING_TYPE_FIXED) {
21770 ret = __dhd_fixed_ring_get_next(&ring->fixed, cur);
21771 }
21772 mutex_unlock(&ring->ring_sync);
21773 return ret;
21774 }
21775
21776 void *
21777 dhd_ring_get_prev(void *_ring, void *cur)
21778 {
21779 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21780 void *ret = NULL;
21781
21782 if (!ring || ring->magic != DHD_RING_MAGIC) {
21783 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21784 return NULL;
21785 }
21786
21787 mutex_lock(&ring->ring_sync);
21788 if (ring->type == DHD_RING_TYPE_FIXED) {
21789 ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur);
21790 }
21791 mutex_unlock(&ring->ring_sync);
21792 return ret;
21793 }
21794
21795 int
21796 dhd_ring_get_cur_size(void *_ring)
21797 {
21798 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21799 int cnt = 0;
21800
21801 if (!ring || ring->magic != DHD_RING_MAGIC) {
21802 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21803 return cnt;
21804 }
21805
21806 mutex_lock(&ring->ring_sync);
21807 if (ring->type == DHD_RING_TYPE_FIXED) {
21808 cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
21809 }
21810 mutex_unlock(&ring->ring_sync);
21811 return cnt;
21812 }
21813
21814 /* protect element between lock_ptr and write_idx */
21815 void
21816 dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
21817 {
21818 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21819
21820 if (!ring || ring->magic != DHD_RING_MAGIC) {
21821 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21822 return;
21823 }
21824
21825 mutex_lock(&ring->ring_sync);
21826 if (ring->type == DHD_RING_TYPE_FIXED) {
21827 __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr);
21828 }
21829 mutex_unlock(&ring->ring_sync);
21830 return;
21831 }
21832
21833 /* free all lock */
21834 void
21835 dhd_ring_lock_free(void *_ring)
21836 {
21837 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21838
21839 if (!ring || ring->magic != DHD_RING_MAGIC) {
21840 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21841 return;
21842 }
21843
21844 mutex_lock(&ring->ring_sync);
21845 if (ring->type == DHD_RING_TYPE_FIXED) {
21846 __dhd_fixed_ring_lock_free(&ring->fixed);
21847 }
21848 mutex_unlock(&ring->ring_sync);
21849 return;
21850 }
21851
21852 void *
21853 dhd_ring_lock_get_first(void *_ring)
21854 {
21855 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21856 void *ret = NULL;
21857
21858 if (!ring || ring->magic != DHD_RING_MAGIC) {
21859 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21860 return NULL;
21861 }
21862
21863 mutex_lock(&ring->ring_sync);
21864 if (ring->type == DHD_RING_TYPE_FIXED) {
21865 ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
21866 }
21867 mutex_unlock(&ring->ring_sync);
21868 return ret;
21869 }
21870
21871 void *
21872 dhd_ring_lock_get_last(void *_ring)
21873 {
21874 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21875 void *ret = NULL;
21876
21877 if (!ring || ring->magic != DHD_RING_MAGIC) {
21878 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21879 return NULL;
21880 }
21881
21882 mutex_lock(&ring->ring_sync);
21883 if (ring->type == DHD_RING_TYPE_FIXED) {
21884 ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
21885 }
21886 mutex_unlock(&ring->ring_sync);
21887 return ret;
21888 }
21889
21890 int
21891 dhd_ring_lock_get_count(void *_ring)
21892 {
21893 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21894 int ret = BCME_ERROR;
21895
21896 if (!ring || ring->magic != DHD_RING_MAGIC) {
21897 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21898 return ret;
21899 }
21900
21901 mutex_lock(&ring->ring_sync);
21902 if (ring->type == DHD_RING_TYPE_FIXED) {
21903 ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
21904 }
21905 mutex_unlock(&ring->ring_sync);
21906 return ret;
21907 }
21908
21909 /* free first locked element */
21910 void
21911 dhd_ring_lock_free_first(void *_ring)
21912 {
21913 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21914
21915 if (!ring || ring->magic != DHD_RING_MAGIC) {
21916 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
21917 return;
21918 }
21919
21920 mutex_lock(&ring->ring_sync);
21921 if (ring->type == DHD_RING_TYPE_FIXED) {
21922 __dhd_fixed_ring_lock_free_first(&ring->fixed);
21923 }
21924 mutex_unlock(&ring->ring_sync);
21925 return;
21926 }
21927
21928 #ifdef DHD_DUMP_MNGR
21929 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
21930 #define DHD_VFS_INODE(dir) (dir->d_inode)
21931 #else
21932 #define DHD_VFS_INODE(dir) d_inode(dir)
21933 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
21934
21935 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
21936 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
21937 #else
21938 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
21939 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
21940
21941 static int
21942 dhd_file_delete(char *path)
21943 {
21944 struct path file_path;
21945 int err;
21946 struct dentry *dir;
21947
21948 err = kern_path(path, 0, &file_path);
21949
21950 if (err < 0) {
21951 return err;
21952 }
21953 if (FALSE ||
21954 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
21955 !d_is_file(file_path.dentry) ||
21956 #if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
21957 d_really_is_negative(file_path.dentry)
21958 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
21959 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
21960 )
21961 {
21962 err = -EINVAL;
21963 } else {
21964 dir = dget_parent(file_path.dentry);
21965
21966 if (!IS_ERR(dir)) {
21967 err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
21968 dput(dir);
21969 } else {
21970 err = PTR_ERR(dir);
21971 }
21972 }
21973
21974 path_put(&file_path);
21975
21976 if (err < 0) {
21977 DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
21978 }
21979
21980 return err;
21981 }
21982
21983 static int
21984 dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
21985 {
21986 int i;
21987 int fm_idx = -1;
21988
21989 for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
21990 if (strlen(fm_ptr->elems[i].type_name) == 0) {
21991 fm_idx = i;
21992 break;
21993 }
21994 if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
21995 fm_idx = i;
21996 break;
21997 }
21998 }
21999
22000 if (fm_idx == -1) {
22001 return fm_idx;
22002 }
22003
22004 if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
22005 strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
22006 fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
22007 fm_ptr->elems[fm_idx].file_idx = 0;
22008 }
22009
22010 return fm_idx;
22011 }
22012
22013 /*
22014 * dhd_dump_file_manage_enqueue - enqueue dump file path
22015 * and delete odest file if file count is max.
22016 */
22017 void
22018 dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
22019 {
22020 int fm_idx;
22021 int fp_idx;
22022 dhd_dump_file_manage_t *fm_ptr;
22023 DFM_elem_t *elem;
22024
22025 if (!dhd || !dhd->dump_file_manage) {
22026 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
22027 __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
22028 return;
22029 }
22030
22031 fm_ptr = dhd->dump_file_manage;
22032
22033 /* find file_manage idx */
22034 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
22035 if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
22036 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
22037 __FUNCTION__, fname));
22038 return;
22039 }
22040
22041 elem = &fm_ptr->elems[fm_idx];
22042 fp_idx = elem->file_idx;
22043 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
22044 __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
22045
22046 /* delete oldest file */
22047 if (strlen(elem->file_path[fp_idx]) != 0) {
22048 if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
22049 DHD_ERROR(("%s(): Failed to delete file: %s\n",
22050 __FUNCTION__, elem->file_path[fp_idx]));
22051 } else {
22052 DHD_ERROR(("%s(): Successed to delete file: %s\n",
22053 __FUNCTION__, elem->file_path[fp_idx]));
22054 }
22055 }
22056
22057 /* save dump file path */
22058 strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
22059 elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
22060
22061 /* change file index to next file index */
22062 elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
22063 }
22064 #endif /* DHD_DUMP_MNGR */
22065
22066 #ifdef DHD_MAP_LOGGING
22067 /* Will be called from SMMU fault handler */
22068 void
22069 dhd_debug_info_dump(void)
22070 {
22071 dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
22072 uint32 irq = (uint32)-1;
22073
22074 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
22075 dhdp->smmu_fault_occurred = TRUE;
22076
22077 /* Disable PCIe IRQ */
22078 dhdpcie_get_pcieirq(dhdp->bus, &irq);
22079 if (irq != (uint32)-1) {
22080 disable_irq_nosync(irq);
22081 }
22082
22083 DHD_OS_WAKE_LOCK(dhdp);
22084 dhd_prot_debug_info_print(dhdp);
22085 osl_dma_map_dump(dhdp->osh);
22086 #ifdef DHD_MAP_PKTID_LOGGING
22087 dhd_pktid_logging_dump(dhdp);
22088 #endif /* DHD_MAP_PKTID_LOGGING */
22089 #ifdef DHD_FW_COREDUMP
22090 /* Load the dongle side dump to host memory */
22091 dhdp->memdump_enabled = DUMP_MEMONLY;
22092 dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
22093 dhd_bus_mem_dump(dhdp);
22094 #endif /* DHD_FW_COREDUMP */
22095 DHD_OS_WAKE_UNLOCK(dhdp);
22096 }
22097 EXPORT_SYMBOL(dhd_debug_info_dump);
22098 #endif /* DHD_MAP_LOGGING */
22099 int
22100 dhd_get_host_whitelist_region(void *buf, uint len)
22101 {
22102 dma_wl_addr_region_host_t *host_reg;
22103 uint64 wl_end;
22104
22105 if ((wlreg_len_h == 0) && (wlreg_len_l == 0)) {
22106 return BCME_RANGE;
22107 }
22108
22109 host_reg = (dma_wl_addr_region_host_t *)buf;
22110 wl_end = wlreg_len_h + wlreg_h;
22111 wl_end = (wl_end & MASK_32_BITS) << 32;
22112 wl_end += wlreg_l;
22113 wl_end += wlreg_len_l;
22114 /* Now write whitelist region(s) */
22115 host_reg->hreg_start.addr_low = wlreg_l;
22116 host_reg->hreg_start.addr_high = wlreg_h;
22117 host_reg->hreg_end.addr_low = EXTRACT_LOW32(wl_end);
22118 host_reg->hreg_end.addr_high = EXTRACT_HIGH32(wl_end);
22119 return BCME_OK;
22120 }
22121
22122 #ifdef SUPPORT_SET_TID
22123 /*
22124 * Set custom TID value for UDP frame based on UID value.
22125 * This will be triggered by android private command below.
22126 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
22127 * Mode 0(SET_TID_OFF) : Disable changing TID
22128 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
22129 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
22130 */
22131 void
22132 dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
22133 {
22134 struct ether_header *eh = NULL;
22135 struct sock *sk = NULL;
22136 uint8 *pktdata = NULL;
22137 uint8 *ip_hdr = NULL;
22138 uint8 cur_prio;
22139 uint8 prio;
22140 uint32 uid;
22141
22142 if (dhdp->tid_mode == SET_TID_OFF) {
22143 return;
22144 }
22145
22146 pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
22147 eh = (struct ether_header *) pktdata;
22148 ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
22149
22150 if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
22151 return;
22152 }
22153
22154 cur_prio = PKTPRIO(pkt);
22155 prio = dhdp->target_tid;
22156 uid = dhdp->target_uid;
22157
22158 if ((cur_prio == prio) ||
22159 (cur_prio != PRIO_8021D_BE)) {
22160 return;
22161 }
22162
22163 sk = ((struct sk_buff*)(pkt))->sk;
22164
22165 if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
22166 (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
22167 PKTSETPRIO(pkt, prio);
22168 }
22169 }
22170 #endif /* SUPPORT_SET_TID */
22171 #ifdef DHDTCPSYNC_FLOOD_BLK
22172 static void dhd_blk_tsfl_handler(struct work_struct * work)
22173 {
22174 dhd_if_t *ifp = NULL;
22175 dhd_pub_t *dhdp = NULL;
22176 /* Ignore compiler warnings due to -Werror=cast-qual */
22177 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22178 #pragma GCC diagnostic push
22179 #pragma GCC diagnostic ignored "-Wcast-qual"
22180 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22181 ifp = container_of(work, dhd_if_t, blk_tsfl_work);
22182 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22183 #pragma GCC diagnostic pop
22184 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22185 if (ifp) {
22186 dhdp = &ifp->info->pub;
22187 if (dhdp) {
22188 if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
22189 (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
22190 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
22191 wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
22192 } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
22193 (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
22194 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
22195 wl_cfg80211_disassoc(ifp->net);
22196 }
22197 }
22198 }
22199 }
22200 void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
22201 {
22202 ifp->tsync_rcvd = 0;
22203 ifp->tsyncack_txed = 0;
22204 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22205 }
22206 void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
22207 {
22208 dhd_if_t *ifp = NULL;
22209 if (dev) {
22210 ifp = DHD_DEV_IFP(dev);
22211 }
22212 if (ifp) {
22213 ifp->tsync_rcvd = 0;
22214 ifp->tsyncack_txed = 0;
22215 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22216 }
22217 }
22218 #endif /* DHDTCPSYNC_FLOOD_BLK */