0264224a271d97adaa6f9abb028368d5fe141140
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.1.579.77.41.1.cn / dhd_linux.c
1 /*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2017, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $
29 */
30
31 #include <typedefs.h>
32 #include <linuxver.h>
33 #include <osl.h>
34 #ifdef SHOW_LOGTRACE
35 #include <linux/syscalls.h>
36 #include <event_log.h>
37 #endif /* SHOW_LOGTRACE */
38
39 #include <linux/init.h>
40 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/inetdevice.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/etherdevice.h>
47 #include <linux/random.h>
48 #include <linux/spinlock.h>
49 #include <linux/ethtool.h>
50 #include <linux/fcntl.h>
51 #include <linux/fs.h>
52 #include <linux/ip.h>
53 #include <linux/reboot.h>
54 #include <linux/notifier.h>
55 #include <net/addrconf.h>
56 #ifdef ENABLE_ADAPTIVE_SCHED
57 #include <linux/cpufreq.h>
58 #endif /* ENABLE_ADAPTIVE_SCHED */
59
60 #include <asm/uaccess.h>
61 #include <asm/unaligned.h>
62
63 #include <epivers.h>
64 #include <bcmutils.h>
65 #include <bcmendian.h>
66 #include <bcmdevs.h>
67
68
69 #include <ethernet.h>
70 #include <bcmevent.h>
71 #include <vlan.h>
72 #include <802.3.h>
73
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
76 #include <dhd.h>
77 #include <dhd_linux.h>
78 #ifdef DHD_WET
79 #include <dhd_wet.h>
80 #endif /* DHD_WET */
81 #ifdef PCIE_FULL_DONGLE
82 #include <dhd_flowring.h>
83 #endif
84 #include <dhd_bus.h>
85 #include <dhd_proto.h>
86 #include <dhd_config.h>
87 #ifdef WL_ESCAN
88 #include <wl_escan.h>
89 #endif
90 #include <dhd_dbg.h>
91 #include <dhd_debug.h>
92 #ifdef CONFIG_HAS_WAKELOCK
93 #include <linux/wakelock.h>
94 #endif
95 #ifdef WL_CFG80211
96 #include <wl_cfg80211.h>
97 #endif
98 #ifdef PNO_SUPPORT
99 #include <dhd_pno.h>
100 #endif
101 #ifdef RTT_SUPPORT
102 #include <dhd_rtt.h>
103 #endif
104 #ifdef DHD_TIMESYNC
105 #include <dhd_timesync.h>
106 #endif /* DHD_TIMESYNC */
107
108 #ifdef CONFIG_COMPAT
109 #include <linux/compat.h>
110 #endif
111
112 #if defined(CONFIG_SOC_EXYNOS8895)
113 #include <linux/exynos-pci-ctrl.h>
114 #endif /* CONFIG_SOC_EXYNOS8895 */
115
116 #ifdef DHD_WMF
117 #include <dhd_wmf_linux.h>
118 #endif /* DHD_WMF */
119
120 #ifdef DHD_L2_FILTER
121 #include <bcmicmp.h>
122 #include <bcm_l2_filter.h>
123 #include <dhd_l2_filter.h>
124 #endif /* DHD_L2_FILTER */
125
126 #ifdef DHD_PSTA
127 #include <dhd_psta.h>
128 #endif /* DHD_PSTA */
129
130
131 #ifdef DHDTCPACK_SUPPRESS
132 #include <dhd_ip.h>
133 #endif /* DHDTCPACK_SUPPRESS */
134 #include <dhd_daemon.h>
135 #ifdef DHD_PKT_LOGGING
136 #include <dhd_pktlog.h>
137 #endif /* DHD_PKT_LOGGING */
138 #if defined(STAT_REPORT)
139 #include <wl_statreport.h>
140 #endif /* STAT_REPORT */
141 #ifdef DHD_DEBUG_PAGEALLOC
142 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
143 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
144 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
145 #endif /* DHD_DEBUG_PAGEALLOC */
146
147
148 #if defined(DHD_LB)
149 #if !defined(PCIE_FULL_DONGLE)
150 #error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
151 #endif /* !PCIE_FULL_DONGLE */
152 #endif /* DHD_LB */
153
154 #if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
155 defined(DHD_LB_STATS)
156 #if !defined(DHD_LB)
157 #error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
158 #endif /* !DHD_LB */
159 #endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
160
161 #if defined(DHD_LB)
162 /* Dynamic CPU selection for load balancing */
163 #include <linux/cpu.h>
164 #include <linux/cpumask.h>
165 #include <linux/notifier.h>
166 #include <linux/workqueue.h>
167 #include <asm/atomic.h>
168
169 #if !defined(DHD_LB_PRIMARY_CPUS)
170 #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
171 #endif
172 #if !defined(DHD_LB_SECONDARY_CPUS)
173 #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
174 #endif
175
176 #define HIST_BIN_SIZE 9
177
178 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
179
180 #if defined(DHD_LB_TXP)
181 static void dhd_lb_tx_handler(unsigned long data);
182 static void dhd_tx_dispatcher_work(struct work_struct * work);
183 static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
184 static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
185
186 /* Pkttag not compatible with PROP_TXSTATUS or WLFC */
187 typedef struct dhd_tx_lb_pkttag_fr {
188 struct net_device *net;
189 int ifidx;
190 } dhd_tx_lb_pkttag_fr_t;
191
192 #define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
193 #define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
194
195 #define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
196 #define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
197 #endif /* DHD_LB_TXP */
198 #endif /* DHD_LB */
199
200 #ifdef HOFFLOAD_MODULES
201 #include <linux/firmware.h>
202 #endif
203
204 #ifdef WLMEDIA_HTSF
205 #include <linux/time.h>
206 #include <htsf.h>
207
208 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
209 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
210 #define TSMAX 1000 /* max no. of timing record kept */
211 #define NUMBIN 34
212
213 static uint32 tsidx = 0;
214 static uint32 htsf_seqnum = 0;
215 uint32 tsfsync;
216 struct timeval tsync;
217 static uint32 tsport = 5010;
218
219 typedef struct histo_ {
220 uint32 bin[NUMBIN];
221 } histo_t;
222
223 #if !ISPOWEROF2(DHD_SDALIGN)
224 #error DHD_SDALIGN is not a power of 2!
225 #endif
226
227 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
228 #endif /* WLMEDIA_HTSF */
229
230 #ifdef WL_MONITOR
231 #include <bcmmsgbuf.h>
232 #include <bcmwifi_monitor.h>
233 #endif
234
235 #define htod32(i) (i)
236 #define htod16(i) (i)
237 #define dtoh32(i) (i)
238 #define dtoh16(i) (i)
239 #define htodchanspec(i) (i)
240 #define dtohchanspec(i) (i)
241
242 #ifdef STBLINUX
243 #ifdef quote_str
244 #undef quote_str
245 #endif /* quote_str */
246 #ifdef to_str
247 #undef to_str
248 #endif /* quote_str */
249 #define to_str(s) #s
250 #define quote_str(s) to_str(s)
251
252 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
253 #endif /* STBLINUX */
254
255
256
257 #if defined(SOFTAP)
258 extern bool ap_cfg_running;
259 extern bool ap_fw_loaded;
260 #endif
261
262 #ifdef DHD_8021X_DUMP
263 extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
264 #endif /* DHD_8021X_DUMP */
265
266 #ifdef FIX_CPU_MIN_CLOCK
267 #include <linux/pm_qos.h>
268 #endif /* FIX_CPU_MIN_CLOCK */
269
270 #ifdef SET_RANDOM_MAC_SOFTAP
271 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
272 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
273 #endif
274 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
275 #endif /* SET_RANDOM_MAC_SOFTAP */
276
277 #ifdef ENABLE_ADAPTIVE_SCHED
278 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
279 #ifndef CUSTOM_CPUFREQ_THRESH
280 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
281 #endif /* CUSTOM_CPUFREQ_THRESH */
282 #endif /* ENABLE_ADAPTIVE_SCHED */
283
284 /* enable HOSTIP cache update from the host side when an eth0:N is up */
285 #define AOE_IP_ALIAS_SUPPORT 1
286
287 #ifdef BCM_FD_AGGR
288 #include <bcm_rpc.h>
289 #include <bcm_rpc_tp.h>
290 #endif
291 #ifdef PROP_TXSTATUS
292 #include <wlfc_proto.h>
293 #include <dhd_wlfc.h>
294 #endif
295
296 #include <wl_android.h>
297
298 /* Maximum STA per radio */
299 #define DHD_MAX_STA 32
300
301 #ifdef CUSTOMER_HW_AMLOGIC
302 #include <linux/amlogic/wifi_dt.h>
303 #endif
304
305
306 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
307 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
308 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
309
310 #ifdef ARP_OFFLOAD_SUPPORT
311 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
312 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
313 unsigned long event, void *ptr);
314 static struct notifier_block dhd_inetaddr_notifier = {
315 .notifier_call = dhd_inetaddr_notifier_call
316 };
317 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
318 * created in kernel notifier link list (with 'next' pointing to itself)
319 */
320 static bool dhd_inetaddr_notifier_registered = FALSE;
321 #endif /* ARP_OFFLOAD_SUPPORT */
322
323 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
324 int dhd_inet6addr_notifier_call(struct notifier_block *this,
325 unsigned long event, void *ptr);
326 static struct notifier_block dhd_inet6addr_notifier = {
327 .notifier_call = dhd_inet6addr_notifier_call
328 };
329 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
330 * created in kernel notifier link list (with 'next' pointing to itself)
331 */
332 static bool dhd_inet6addr_notifier_registered = FALSE;
333 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
334
335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
336 #include <linux/suspend.h>
337 volatile bool dhd_mmc_suspend = FALSE;
338 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
339 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
340
341 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
342 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
343 #endif
344 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
345 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
346 #endif
347 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
348 MODULE_LICENSE("GPL and additional rights");
349 #endif /* LinuxVer */
350
351 #if defined(MULTIPLE_SUPPLICANT)
352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
353 DEFINE_MUTEX(_dhd_mutex_lock_);
354 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
355 #endif
356
357 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
358 #define MAX_CONSECUTIVE_HANG_COUNTS 5
359 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
360
361 #include <dhd_bus.h>
362
363 #ifdef DHD_ULP
364 #include <dhd_ulp.h>
365 #endif /* DHD_ULP */
366
367 #ifdef BCM_FD_AGGR
368 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
369 #else
370 #ifndef PROP_TXSTATUS
371 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
372 #else
373 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
374 #endif
375 #endif /* BCM_FD_AGGR */
376
377 #ifdef PROP_TXSTATUS
378 extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
379 extern void dhd_wlfc_plat_init(void *dhd);
380 extern void dhd_wlfc_plat_deinit(void *dhd);
381 #endif /* PROP_TXSTATUS */
382 extern uint sd_f2_blocksize;
383 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
384
385 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
386 const char *
387 print_tainted()
388 {
389 return "";
390 }
391 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
392
393 /* Linux wireless extension support */
394 #if defined(WL_WIRELESS_EXT)
395 #include <wl_iw.h>
396 extern wl_iw_extra_params_t g_wl_iw_params;
397 #endif /* defined(WL_WIRELESS_EXT) */
398
399 #ifdef CONFIG_PARTIALSUSPEND_SLP
400 #include <linux/partialsuspend_slp.h>
401 #define CONFIG_HAS_EARLYSUSPEND
402 #define DHD_USE_EARLYSUSPEND
403 #define register_early_suspend register_pre_suspend
404 #define unregister_early_suspend unregister_pre_suspend
405 #define early_suspend pre_suspend
406 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
407 #else
408 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
409 #include <linux/earlysuspend.h>
410 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
411 #endif /* CONFIG_PARTIALSUSPEND_SLP */
412
413 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
414 #include <linux/nl80211.h>
415 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
416
417 #if defined(BCMPCIE)
418 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
419 #else
420 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
421 #endif /* OEM_ANDROID && BCMPCIE */
422
423 #ifdef PKT_FILTER_SUPPORT
424 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
425 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
426 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
427 #endif
428
429 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
430 static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
431 u8* program, uint32 program_len);
432 static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
433 uint32 mode, uint32 enable);
434 static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
435 #endif /* PKT_FILTER_SUPPORT && APF */
436
437
438
439 static INLINE int argos_register_notifier_init(struct net_device *net) { return 0;}
440 static INLINE int argos_register_notifier_deinit(void) { return 0;}
441
442 #if defined(BT_OVER_SDIO)
443 extern void wl_android_set_wifi_on_flag(bool enable);
444 #endif /* BT_OVER_SDIO */
445
446
447 #if defined(TRAFFIC_MGMT_DWM)
448 void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf);
449 #endif
450
451 #ifdef DHD_FW_COREDUMP
452 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
453 #endif /* DHD_FW_COREDUMP */
454 #ifdef DHD_LOG_DUMP
455 #define DLD_BUFFER_NUM 2
456 /* [0]: General, [1]: Special */
457 struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
458 static const int dld_buf_size[] = {
459 (1024 * 1024), /* DHD_LOG_DUMP_BUFFER_SIZE */
460 (8 * 1024) /* DHD_LOG_DUMP_BUFFER_EX_SIZE */
461 };
462 static void dhd_log_dump_init(dhd_pub_t *dhd);
463 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
464 static void dhd_log_dump(void *handle, void *event_info, u8 event);
465 void dhd_schedule_log_dump(dhd_pub_t *dhdp);
466 static int do_dhd_log_dump(dhd_pub_t *dhdp);
467 #endif /* DHD_LOG_DUMP */
468
469 #ifdef DHD_DEBUG_UART
470 #include <linux/kmod.h>
471 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
472 static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
473 static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
474 #endif /* DHD_DEBUG_UART */
475
476 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
477 static struct notifier_block dhd_reboot_notifier = {
478 .notifier_call = dhd_reboot_callback,
479 .priority = 1,
480 };
481
482 #ifdef BCMPCIE
483 static int is_reboot = 0;
484 #endif /* BCMPCIE */
485
486 #if defined(BT_OVER_SDIO)
487 #include "dhd_bt_interface.h"
488 dhd_pub_t *g_dhd_pub = NULL;
489 #endif /* defined (BT_OVER_SDIO) */
490
491 atomic_t exit_in_progress = ATOMIC_INIT(0);
492
493 typedef struct dhd_if_event {
494 struct list_head list;
495 wl_event_data_if_t event;
496 char name[IFNAMSIZ+1];
497 uint8 mac[ETHER_ADDR_LEN];
498 } dhd_if_event_t;
499
500 /* Interface control information */
501 typedef struct dhd_if {
502 struct dhd_info *info; /* back pointer to dhd_info */
503 /* OS/stack specifics */
504 struct net_device *net;
505 int idx; /* iface idx in dongle */
506 uint subunit; /* subunit */
507 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
508 bool set_macaddress;
509 bool set_multicast;
510 uint8 bssidx; /* bsscfg index for the interface */
511 bool attached; /* Delayed attachment when unset */
512 bool txflowcontrol; /* Per interface flow control indicator */
513 char name[IFNAMSIZ+1]; /* linux interface name */
514 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
515 struct net_device_stats stats;
516 #ifdef DHD_WMF
517 dhd_wmf_t wmf; /* per bsscfg wmf setting */
518 bool wmf_psta_disable; /* enable/disable MC pkt to each mac
519 * of MC group behind PSTA
520 */
521 #endif /* DHD_WMF */
522 #ifdef PCIE_FULL_DONGLE
523 struct list_head sta_list; /* sll of associated stations */
524 #if !defined(BCM_GMAC3)
525 spinlock_t sta_list_lock; /* lock for manipulating sll */
526 #endif /* ! BCM_GMAC3 */
527 #endif /* PCIE_FULL_DONGLE */
528 uint32 ap_isolate; /* ap-isolation settings */
529 #ifdef DHD_L2_FILTER
530 bool parp_enable;
531 bool parp_discard;
532 bool parp_allnode;
533 arp_table_t *phnd_arp_table;
534 /* for Per BSS modification */
535 bool dhcp_unicast;
536 bool block_ping;
537 bool grat_arp;
538 #endif /* DHD_L2_FILTER */
539 #ifdef DHD_MCAST_REGEN
540 bool mcast_regen_bss_enable;
541 #endif
542 bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
543 cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
544 } dhd_if_t;
545
546 #ifdef WLMEDIA_HTSF
547 typedef struct {
548 uint32 low;
549 uint32 high;
550 } tsf_t;
551
552 typedef struct {
553 uint32 last_cycle;
554 uint32 last_sec;
555 uint32 last_tsf;
556 uint32 coef; /* scaling factor */
557 uint32 coefdec1; /* first decimal */
558 uint32 coefdec2; /* second decimal */
559 } htsf_t;
560
561 typedef struct {
562 uint32 t1;
563 uint32 t2;
564 uint32 t3;
565 uint32 t4;
566 } tstamp_t;
567
568 static tstamp_t ts[TSMAX];
569 static tstamp_t maxdelayts;
570 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
571
572 #endif /* WLMEDIA_HTSF */
573
574 struct ipv6_work_info_t {
575 uint8 if_idx;
576 char ipv6_addr[IPV6_ADDR_LEN];
577 unsigned long event;
578 };
579 static void dhd_process_daemon_msg(struct sk_buff *skb);
580 static void dhd_destroy_to_notifier_skt(void);
581 static int dhd_create_to_notifier_skt(void);
582 static struct sock *nl_to_event_sk = NULL;
583 int sender_pid = 0;
584
585 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
586 struct netlink_kernel_cfg g_cfg = {
587 .groups = 1,
588 .input = dhd_process_daemon_msg,
589 };
590 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
591
592 typedef struct dhd_dump {
593 uint8 *buf;
594 int bufsize;
595 } dhd_dump_t;
596
597
598 /* When Perimeter locks are deployed, any blocking calls must be preceeded
599 * with a PERIM UNLOCK and followed by a PERIM LOCK.
600 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
601 * wait_event_timeout().
602 */
603
604 /* Local private structure (extension of pub) */
605 typedef struct dhd_info {
606 #if defined(WL_WIRELESS_EXT)
607 wl_iw_t iw; /* wireless extensions state (must be first) */
608 #endif /* defined(WL_WIRELESS_EXT) */
609 dhd_pub_t pub;
610 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
611
612 wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
613 char fw_path[PATH_MAX]; /* path to firmware image */
614 char nv_path[PATH_MAX]; /* path to nvram vars file */
615 char clm_path[PATH_MAX]; /* path to clm vars file */
616 char conf_path[PATH_MAX]; /* path to config vars file */
617 #ifdef DHD_UCODE_DOWNLOAD
618 char uc_path[PATH_MAX]; /* path to ucode image */
619 #endif /* DHD_UCODE_DOWNLOAD */
620
621 /* serialize dhd iovars */
622 struct mutex dhd_iovar_mutex;
623
624 struct semaphore proto_sem;
625 #ifdef PROP_TXSTATUS
626 spinlock_t wlfc_spinlock;
627
628 #ifdef BCMDBUS
629 ulong wlfc_lock_flags;
630 ulong wlfc_pub_lock_flags;
631 #endif /* BCMDBUS */
632 #endif /* PROP_TXSTATUS */
633 #ifdef WLMEDIA_HTSF
634 htsf_t htsf;
635 #endif
636 wait_queue_head_t ioctl_resp_wait;
637 wait_queue_head_t d3ack_wait;
638 wait_queue_head_t dhd_bus_busy_state_wait;
639 uint32 default_wd_interval;
640
641 struct timer_list timer;
642 bool wd_timer_valid;
643 #ifdef DHD_PCIE_RUNTIMEPM
644 struct timer_list rpm_timer;
645 bool rpm_timer_valid;
646 tsk_ctl_t thr_rpm_ctl;
647 #endif /* DHD_PCIE_RUNTIMEPM */
648 struct tasklet_struct tasklet;
649 spinlock_t sdlock;
650 spinlock_t txqlock;
651 spinlock_t rxqlock;
652 spinlock_t dhd_lock;
653 #ifdef BCMDBUS
654 ulong txqlock_flags;
655 #else
656
657 struct semaphore sdsem;
658 tsk_ctl_t thr_dpc_ctl;
659 tsk_ctl_t thr_wdt_ctl;
660 #endif /* BCMDBUS */
661
662 tsk_ctl_t thr_rxf_ctl;
663 spinlock_t rxf_lock;
664 bool rxthread_enabled;
665
666 /* Wakelocks */
667 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
668 struct wake_lock wl_wifi; /* Wifi wakelock */
669 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
670 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
671 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
672 struct wake_lock wl_evtwake; /* Wifi event wakelock */
673 struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
674 struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
675 #ifdef BCMPCIE_OOB_HOST_WAKE
676 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
677 #endif /* BCMPCIE_OOB_HOST_WAKE */
678 #ifdef DHD_USE_SCAN_WAKELOCK
679 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
680 #endif /* DHD_USE_SCAN_WAKELOCK */
681 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
682
683 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
684 /* net_device interface lock, prevent race conditions among net_dev interface
685 * calls and wifi_on or wifi_off
686 */
687 struct mutex dhd_net_if_mutex;
688 struct mutex dhd_suspend_mutex;
689 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
690 struct mutex dhd_apf_mutex;
691 #endif /* PKT_FILTER_SUPPORT && APF */
692 #endif
693 spinlock_t wakelock_spinlock;
694 spinlock_t wakelock_evt_spinlock;
695 uint32 wakelock_counter;
696 int wakelock_wd_counter;
697 int wakelock_rx_timeout_enable;
698 int wakelock_ctrl_timeout_enable;
699 bool waive_wakelock;
700 uint32 wakelock_before_waive;
701
702 /* Thread to issue ioctl for multicast */
703 wait_queue_head_t ctrl_wait;
704 atomic_t pend_8021x_cnt;
705 dhd_attach_states_t dhd_state;
706 #ifdef SHOW_LOGTRACE
707 dhd_event_log_t event_data;
708 #endif /* SHOW_LOGTRACE */
709
710 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
711 struct early_suspend early_suspend;
712 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
713
714 #ifdef ARP_OFFLOAD_SUPPORT
715 u32 pend_ipaddr;
716 #endif /* ARP_OFFLOAD_SUPPORT */
717 #ifdef BCM_FD_AGGR
718 void *rpc_th;
719 void *rpc_osh;
720 struct timer_list rpcth_timer;
721 bool rpcth_timer_active;
722 uint8 fdaggr;
723 #endif
724 #ifdef DHDTCPACK_SUPPRESS
725 spinlock_t tcpack_lock;
726 #endif /* DHDTCPACK_SUPPRESS */
727 #ifdef FIX_CPU_MIN_CLOCK
728 bool cpufreq_fix_status;
729 struct mutex cpufreq_fix;
730 struct pm_qos_request dhd_cpu_qos;
731 #ifdef FIX_BUS_MIN_CLOCK
732 struct pm_qos_request dhd_bus_qos;
733 #endif /* FIX_BUS_MIN_CLOCK */
734 #endif /* FIX_CPU_MIN_CLOCK */
735 void *dhd_deferred_wq;
736 #ifdef DEBUG_CPU_FREQ
737 struct notifier_block freq_trans;
738 int __percpu *new_freq;
739 #endif
740 unsigned int unit;
741 struct notifier_block pm_notifier;
742 #ifdef DHD_PSTA
743 uint32 psta_mode; /* PSTA or PSR */
744 #endif /* DHD_PSTA */
745 #ifdef DHD_WET
746 uint32 wet_mode;
747 #endif /* DHD_WET */
748 #ifdef DHD_DEBUG
749 dhd_dump_t *dump;
750 struct timer_list join_timer;
751 u32 join_timeout_val;
752 bool join_timer_active;
753 uint scan_time_count;
754 struct timer_list scan_timer;
755 bool scan_timer_active;
756 #endif
757 #if defined(DHD_LB)
758 /* CPU Load Balance dynamic CPU selection */
759
760 /* Variable that tracks the currect CPUs available for candidacy */
761 cpumask_var_t cpumask_curr_avail;
762
763 /* Primary and secondary CPU mask */
764 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
765 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
766
767 struct notifier_block cpu_notifier;
768
769 /* Tasklet to handle Tx Completion packet freeing */
770 struct tasklet_struct tx_compl_tasklet;
771 atomic_t tx_compl_cpu;
772
773 /* Tasklet to handle RxBuf Post during Rx completion */
774 struct tasklet_struct rx_compl_tasklet;
775 atomic_t rx_compl_cpu;
776
777 /* Napi struct for handling rx packet sendup. Packets are removed from
778 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
779 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
780 * to run to rx_napi_cpu.
781 */
782 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
783 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
784 struct napi_struct rx_napi_struct ____cacheline_aligned;
785 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
786 struct net_device *rx_napi_netdev; /* netdev of primary interface */
787
788 struct work_struct rx_napi_dispatcher_work;
789 struct work_struct tx_compl_dispatcher_work;
790 struct work_struct tx_dispatcher_work;
791
792 /* Number of times DPC Tasklet ran */
793 uint32 dhd_dpc_cnt;
794 /* Number of times NAPI processing got scheduled */
795 uint32 napi_sched_cnt;
796 /* Number of times NAPI processing ran on each available core */
797 uint32 *napi_percpu_run_cnt;
798 /* Number of times RX Completions got scheduled */
799 uint32 rxc_sched_cnt;
800 /* Number of times RX Completion ran on each available core */
801 uint32 *rxc_percpu_run_cnt;
802 /* Number of times TX Completions got scheduled */
803 uint32 txc_sched_cnt;
804 /* Number of times TX Completions ran on each available core */
805 uint32 *txc_percpu_run_cnt;
806 /* CPU status */
807 /* Number of times each CPU came online */
808 uint32 *cpu_online_cnt;
809 /* Number of times each CPU went offline */
810 uint32 *cpu_offline_cnt;
811
812 /* Number of times TX processing run on each core */
813 uint32 *txp_percpu_run_cnt;
814 /* Number of times TX start run on each core */
815 uint32 *tx_start_percpu_run_cnt;
816
817 /* Tx load balancing */
818
819 /* TODO: Need to see if batch processing is really required in case of TX
820 * processing. In case of RX the Dongle can send a bunch of rx completions,
821 * hence we took a 3 queue approach
822 * enque - adds the skbs to rx_pend_queue
823 * dispatch - uses a lock and adds the list of skbs from pend queue to
824 * napi queue
825 * napi processing - copies the pend_queue into a local queue and works
826 * on it.
827 * But for TX its going to be 1 skb at a time, so we are just thinking
828 * of using only one queue and use the lock supported skb queue functions
829 * to add and process it. If its in-efficient we'll re-visit the queue
830 * design.
831 */
832
833 /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
834 /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
835 /*
836 * From the Tasklet that actually sends out data
837 * copy the list tx_pend_queue into tx_active_queue. There by we need
838 * to spinlock to only perform the copy the rest of the code ie to
839 * construct the tx_pend_queue and the code to process tx_active_queue
840 * can be lockless. The concept is borrowed as is from RX processing
841 */
842 /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
843
844 /* Control TXP in runtime, enable by default */
845 atomic_t lb_txp_active;
846
847 /*
848 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
849 * For now, the processing tasklet will also direcly operate on this
850 * queue
851 */
852 struct sk_buff_head tx_pend_queue ____cacheline_aligned;
853
854 /* cpu on which the DHD Tx is happenning */
855 atomic_t tx_cpu;
856
857 /* CPU on which the Network stack is calling the DHD's xmit function */
858 atomic_t net_tx_cpu;
859
860 /* Tasklet context from which the DHD's TX processing happens */
861 struct tasklet_struct tx_tasklet;
862
863 /*
864 * Consumer Histogram - NAPI RX Packet processing
865 * -----------------------------------------------
866 * On Each CPU, when the NAPI RX Packet processing call back was invoked
867 * how many packets were processed is captured in this data structure.
868 * Now its difficult to capture the "exact" number of packets processed.
869 * So considering the packet counter to be a 32 bit one, we have a
870 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
871 * processed is rounded off to the next power of 2 and put in the
872 * approriate "bin" the value in the bin gets incremented.
873 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
874 * and the packet count processed is as follows (assume the bin counters are 0)
875 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
876 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
877 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
878 */
879 uint32 *napi_rx_hist[HIST_BIN_SIZE];
880 uint32 *txc_hist[HIST_BIN_SIZE];
881 uint32 *rxc_hist[HIST_BIN_SIZE];
882 #endif /* DHD_LB */
883
884 #ifdef SHOW_LOGTRACE
885 struct work_struct event_log_dispatcher_work;
886 #endif /* SHOW_LOGTRACE */
887
888 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
889 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
890 struct kobject dhd_kobj;
891 #ifdef SHOW_LOGTRACE
892 struct sk_buff_head evt_trace_queue ____cacheline_aligned;
893 #endif
894 struct timer_list timesync_timer;
895 #if defined(BT_OVER_SDIO)
896 char btfw_path[PATH_MAX];
897 #endif /* defined (BT_OVER_SDIO) */
898
899 #ifdef WL_MONITOR
900 struct net_device *monitor_dev; /* monitor pseudo device */
901 struct sk_buff *monitor_skb;
902 uint monitor_len;
903 uint monitor_type; /* monitor pseudo device */
904 monitor_info_t *monitor_info;
905 #endif /* WL_MONITOR */
906 uint32 shub_enable;
907 #if defined(BT_OVER_SDIO)
908 struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
909 int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
910 #endif /* BT_OVER_SDIO */
911 #ifdef DHD_DEBUG_UART
912 bool duart_execute;
913 #endif
914 #ifdef PCIE_INB_DW
915 wait_queue_head_t ds_exit_wait;
916 #endif /* PCIE_INB_DW */
917 } dhd_info_t;
918
919 #ifdef WL_MONITOR
920 #define MONPKT_EXTRA_LEN 48
921 #endif
922
923 #define DHDIF_FWDER(dhdif) FALSE
924
925 #if defined(BT_OVER_SDIO)
926 /* Flag to indicate if driver is initialized */
927 uint dhd_driver_init_done = TRUE;
928 #else
929 /* Flag to indicate if driver is initialized */
930 uint dhd_driver_init_done = FALSE;
931 #endif
932 /* Flag to indicate if we should download firmware on driver load */
933 uint dhd_download_fw_on_driverload = TRUE;
934
935 /* Definitions to provide path to the firmware and nvram
936 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
937 */
938 char firmware_path[MOD_PARAM_PATHLEN];
939 char nvram_path[MOD_PARAM_PATHLEN];
940 char clm_path[MOD_PARAM_PATHLEN];
941 char config_path[MOD_PARAM_PATHLEN];
942 #ifdef DHD_UCODE_DOWNLOAD
943 char ucode_path[MOD_PARAM_PATHLEN];
944 #endif /* DHD_UCODE_DOWNLOAD */
945
946 module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
947
948
949 /* backup buffer for firmware and nvram path */
950 char fw_bak_path[MOD_PARAM_PATHLEN];
951 char nv_bak_path[MOD_PARAM_PATHLEN];
952
953 /* information string to keep firmware, chio, cheip version info visiable from log */
954 char info_string[MOD_PARAM_INFOLEN];
955 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
956 int op_mode = 0;
957 int disable_proptx = 0;
958 module_param(op_mode, int, 0644);
959 extern int wl_control_wl_start(struct net_device *dev);
960 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
961 struct semaphore dhd_registration_sem;
962 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
963
964 /* deferred handlers */
965 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
966 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
967 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
968 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
969
970 #ifdef DHD_UPDATE_INTF_MAC
971 static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
972 #endif /* DHD_UPDATE_INTF_MAC */
973 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
974 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
975 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
976 #ifdef WL_CFG80211
977 extern void dhd_netdev_free(struct net_device *ndev);
978 #endif /* WL_CFG80211 */
979
980 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
981 /* update rx_pkt_chainable state of dhd interface */
982 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
983 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
984
985 #ifdef HOFFLOAD_MODULES
986 char dhd_hmem_module_string[MOD_PARAM_SRLEN];
987 module_param_string(dhd_hmem_module_string, dhd_hmem_module_string, MOD_PARAM_SRLEN, 0660);
988 #endif
989 /* Error bits */
990 module_param(dhd_msg_level, int, 0);
991 #if defined(WL_WIRELESS_EXT)
992 module_param(iw_msg_level, int, 0);
993 #endif
994 #ifdef WL_CFG80211
995 module_param(wl_dbg_level, int, 0);
996 #endif
997 module_param(android_msg_level, int, 0);
998 module_param(config_msg_level, int, 0);
999
1000 #ifdef ARP_OFFLOAD_SUPPORT
1001 /* ARP offload enable */
1002 uint dhd_arp_enable = TRUE;
1003 module_param(dhd_arp_enable, uint, 0);
1004
1005 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
1006
1007 #ifdef ENABLE_ARP_SNOOP_MODE
1008 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
1009 #else
1010 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
1011 #endif /* ENABLE_ARP_SNOOP_MODE */
1012
1013 module_param(dhd_arp_mode, uint, 0);
1014 #endif /* ARP_OFFLOAD_SUPPORT */
1015
1016 /* Disable Prop tx */
1017 module_param(disable_proptx, int, 0644);
1018 /* load firmware and/or nvram values from the filesystem */
1019 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
1020 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
1021 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
1022 #ifdef DHD_UCODE_DOWNLOAD
1023 module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
1024 #endif /* DHD_UCODE_DOWNLOAD */
1025
1026 /* Watchdog interval */
1027
1028 /* extend watchdog expiration to 2 seconds when DPC is running */
1029 #define WATCHDOG_EXTEND_INTERVAL (2000)
1030
1031 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
1032 module_param(dhd_watchdog_ms, uint, 0);
1033
1034 #ifdef DHD_PCIE_RUNTIMEPM
1035 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
1036 #endif /* DHD_PCIE_RUNTIMEPMT */
1037 #if defined(DHD_DEBUG)
1038 /* Console poll interval */
1039 uint dhd_console_ms = 0;
1040 module_param(dhd_console_ms, uint, 0644);
1041 #else
1042 uint dhd_console_ms = 0;
1043 #endif /* DHD_DEBUG */
1044
1045 uint dhd_slpauto = TRUE;
1046 module_param(dhd_slpauto, uint, 0);
1047
1048 #ifdef PKT_FILTER_SUPPORT
1049 /* Global Pkt filter enable control */
1050 uint dhd_pkt_filter_enable = TRUE;
1051 module_param(dhd_pkt_filter_enable, uint, 0);
1052 #endif
1053
1054 /* Pkt filter init setup */
1055 uint dhd_pkt_filter_init = 0;
1056 module_param(dhd_pkt_filter_init, uint, 0);
1057
1058 /* Pkt filter mode control */
1059 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
1060 uint dhd_master_mode = FALSE;
1061 #else
1062 uint dhd_master_mode = FALSE;
1063 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
1064 module_param(dhd_master_mode, uint, 0);
1065
1066 int dhd_watchdog_prio = 0;
1067 module_param(dhd_watchdog_prio, int, 0);
1068
1069 /* DPC thread priority */
1070 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
1071 module_param(dhd_dpc_prio, int, 0);
1072
1073 /* RX frame thread priority */
1074 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
1075 module_param(dhd_rxf_prio, int, 0);
1076
1077 #if !defined(BCMDBUS)
1078 extern int dhd_dongle_ramsize;
1079 module_param(dhd_dongle_ramsize, int, 0);
1080 #endif /* !BCMDBUS */
1081
1082 #ifdef WL_CFG80211
1083 int passive_channel_skip = 0;
1084 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
1085 #endif /* WL_CFG80211 */
1086
1087 /* Keep track of number of instances */
1088 static int dhd_found = 0;
1089 static int instance_base = 0; /* Starting instance number */
1090 module_param(instance_base, int, 0644);
1091
1092 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
1093 static int dhd_napi_weight = 32;
1094 module_param(dhd_napi_weight, int, 0644);
1095 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
1096
1097 #ifdef PCIE_FULL_DONGLE
1098 extern int h2d_max_txpost;
1099 module_param(h2d_max_txpost, int, 0644);
1100 #endif /* PCIE_FULL_DONGLE */
1101
1102 #ifdef DHD_DHCP_DUMP
1103 struct bootp_fmt {
1104 struct iphdr ip_header;
1105 struct udphdr udp_header;
1106 uint8 op;
1107 uint8 htype;
1108 uint8 hlen;
1109 uint8 hops;
1110 uint32 transaction_id;
1111 uint16 secs;
1112 uint16 flags;
1113 uint32 client_ip;
1114 uint32 assigned_ip;
1115 uint32 server_ip;
1116 uint32 relay_ip;
1117 uint8 hw_address[16];
1118 uint8 server_name[64];
1119 uint8 file_name[128];
1120 uint8 options[312];
1121 };
1122
1123 static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
1124 static const char dhcp_ops[][10] = {
1125 "NA", "REQUEST", "REPLY"
1126 };
1127 static const char dhcp_types[][10] = {
1128 "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
1129 };
1130 static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
1131 #endif /* DHD_DHCP_DUMP */
1132
1133 #ifdef DHD_ICMP_DUMP
1134 #include <net/icmp.h>
1135 static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
1136 #endif /* DHD_ICMP_DUMP */
1137
1138 /* Functions to manage sysfs interface for dhd */
1139 static int dhd_sysfs_init(dhd_info_t *dhd);
1140 static void dhd_sysfs_exit(dhd_info_t *dhd);
1141
1142 #ifdef SHOW_LOGTRACE
1143 #if defined(CUSTOMER_HW4_DEBUG)
1144 static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
1145 static char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
1146 static char *map_file_path = PLATFORM_PATH"rtecdc.map";
1147 static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
1148 static char *rom_map_file_path = PLATFORM_PATH"roml.map";
1149 #elif defined(CUSTOMER_HW2)
1150 static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
1151 static char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
1152 static char *map_file_path = "/data/misc/wifi/rtecdc.map";
1153 static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin";
1154 static char *rom_map_file_path = "/data/misc/wifi/roml.map";
1155 #else
1156 static char *logstrs_path = "/installmedia/logstrs.bin";
1157 static char *st_str_file_path = "/installmedia/rtecdc.bin";
1158 static char *map_file_path = "/installmedia/rtecdc.map";
1159 static char *rom_st_str_file_path = "/installmedia/roml.bin";
1160 static char *rom_map_file_path = "/installmedia/roml.map";
1161 #endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */
1162 static char *ram_file_str = "rtecdc";
1163 static char *rom_file_str = "roml";
1164
1165 module_param(logstrs_path, charp, S_IRUGO);
1166 module_param(st_str_file_path, charp, S_IRUGO);
1167 module_param(map_file_path, charp, S_IRUGO);
1168 module_param(rom_st_str_file_path, charp, S_IRUGO);
1169 module_param(rom_map_file_path, charp, S_IRUGO);
1170
1171 static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
1172 static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
1173 uint32 *rodata_end);
1174 static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
1175 char *map_file);
1176 #endif /* SHOW_LOGTRACE */
1177
1178 #if defined(DHD_LB)
1179
1180 static void
1181 dhd_lb_set_default_cpus(dhd_info_t *dhd)
1182 {
1183 /* Default CPU allocation for the jobs */
1184 atomic_set(&dhd->rx_napi_cpu, 1);
1185 atomic_set(&dhd->rx_compl_cpu, 2);
1186 atomic_set(&dhd->tx_compl_cpu, 2);
1187 atomic_set(&dhd->tx_cpu, 2);
1188 atomic_set(&dhd->net_tx_cpu, 0);
1189 }
1190
1191 static void
1192 dhd_cpumasks_deinit(dhd_info_t *dhd)
1193 {
1194 free_cpumask_var(dhd->cpumask_curr_avail);
1195 free_cpumask_var(dhd->cpumask_primary);
1196 free_cpumask_var(dhd->cpumask_primary_new);
1197 free_cpumask_var(dhd->cpumask_secondary);
1198 free_cpumask_var(dhd->cpumask_secondary_new);
1199 }
1200
1201 static int
1202 dhd_cpumasks_init(dhd_info_t *dhd)
1203 {
1204 int id;
1205 uint32 cpus, num_cpus = num_possible_cpus();
1206 int ret = 0;
1207
1208 DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
1209 DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
1210
1211 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
1212 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
1213 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
1214 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
1215 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
1216 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
1217 ret = -ENOMEM;
1218 goto fail;
1219 }
1220
1221 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
1222 cpumask_clear(dhd->cpumask_primary);
1223 cpumask_clear(dhd->cpumask_secondary);
1224
1225 if (num_cpus > 32) {
1226 DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
1227 ASSERT(0);
1228 }
1229
1230 cpus = DHD_LB_PRIMARY_CPUS;
1231 for (id = 0; id < num_cpus; id++) {
1232 if (isset(&cpus, id))
1233 cpumask_set_cpu(id, dhd->cpumask_primary);
1234 }
1235
1236 cpus = DHD_LB_SECONDARY_CPUS;
1237 for (id = 0; id < num_cpus; id++) {
1238 if (isset(&cpus, id))
1239 cpumask_set_cpu(id, dhd->cpumask_secondary);
1240 }
1241
1242 return ret;
1243 fail:
1244 dhd_cpumasks_deinit(dhd);
1245 return ret;
1246 }
1247
1248 /*
1249 * The CPU Candidacy Algorithm
1250 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1251 * The available CPUs for selection are divided into two groups
1252 * Primary Set - A CPU mask that carries the First Choice CPUs
1253 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
1254 *
1255 * There are two types of Job, that needs to be assigned to
1256 * the CPUs, from one of the above mentioned CPU group. The Jobs are
1257 * 1) Rx Packet Processing - napi_cpu
1258 * 2) Completion Processiong (Tx, RX) - compl_cpu
1259 *
1260 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
1261 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
1262 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
1263 * If there are more processors free, it assigns one to compl_cpu.
1264 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
1265 * CPU, as much as possible.
1266 *
1267 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
1268 * would allow Tx completion skb's to be released into a local free pool from
1269 * which the rx buffer posts could have been serviced. it is important to note
1270 * that a Tx packet may not have a large enough buffer for rx posting.
1271 */
1272 void dhd_select_cpu_candidacy(dhd_info_t *dhd)
1273 {
1274 uint32 primary_available_cpus; /* count of primary available cpus */
1275 uint32 secondary_available_cpus; /* count of secondary available cpus */
1276 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
1277 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
1278 uint32 tx_cpu = 0; /* cpu selected for tx processing job */
1279
1280 cpumask_clear(dhd->cpumask_primary_new);
1281 cpumask_clear(dhd->cpumask_secondary_new);
1282
1283 /*
1284 * Now select from the primary mask. Even if a Job is
1285 * already running on a CPU in secondary group, we still move
1286 * to primary CPU. So no conditional checks.
1287 */
1288 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
1289 dhd->cpumask_curr_avail);
1290
1291 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
1292 dhd->cpumask_curr_avail);
1293
1294 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
1295
1296 if (primary_available_cpus > 0) {
1297 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
1298
1299 /* If no further CPU is available,
1300 * cpumask_next returns >= nr_cpu_ids
1301 */
1302 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
1303 if (tx_cpu >= nr_cpu_ids)
1304 tx_cpu = 0;
1305
1306 /* In case there are no more CPUs, do completions & Tx in same CPU */
1307 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
1308 if (compl_cpu >= nr_cpu_ids)
1309 compl_cpu = tx_cpu;
1310 }
1311
1312 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
1313 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
1314
1315 /* -- Now check for the CPUs from the secondary mask -- */
1316 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
1317
1318 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
1319 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
1320
1321 if (secondary_available_cpus > 0) {
1322 /* At this point if napi_cpu is unassigned it means no CPU
1323 * is online from Primary Group
1324 */
1325 if (napi_cpu == 0) {
1326 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
1327 tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
1328 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
1329 } else if (tx_cpu == 0) {
1330 tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
1331 compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
1332 } else if (compl_cpu == 0) {
1333 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
1334 }
1335
1336 /* If no CPU was available for tx processing, choose CPU 0 */
1337 if (tx_cpu >= nr_cpu_ids)
1338 tx_cpu = 0;
1339
1340 /* If no CPU was available for completion, choose CPU 0 */
1341 if (compl_cpu >= nr_cpu_ids)
1342 compl_cpu = 0;
1343 }
1344 if ((primary_available_cpus == 0) &&
1345 (secondary_available_cpus == 0)) {
1346 /* No CPUs available from primary or secondary mask */
1347 napi_cpu = 1;
1348 compl_cpu = 0;
1349 tx_cpu = 2;
1350 }
1351
1352 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
1353 __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
1354
1355 ASSERT(napi_cpu < nr_cpu_ids);
1356 ASSERT(compl_cpu < nr_cpu_ids);
1357 ASSERT(tx_cpu < nr_cpu_ids);
1358
1359 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
1360 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
1361 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1362 atomic_set(&dhd->tx_cpu, tx_cpu);
1363
1364 return;
1365 }
1366
1367 /*
1368 * Function to handle CPU Hotplug notifications.
1369 * One of the task it does is to trigger the CPU Candidacy algorithm
1370 * for load balancing.
1371 */
1372 int
1373 dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1374 {
1375 unsigned long int cpu = (unsigned long int)hcpu;
1376
1377 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1378 #pragma GCC diagnostic push
1379 #pragma GCC diagnostic ignored "-Wcast-qual"
1380 #endif
1381 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1382 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1383 #pragma GCC diagnostic pop
1384 #endif
1385
1386 if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
1387 DHD_INFO(("%s(): LB data is not initialized yet.\n",
1388 __FUNCTION__));
1389 return NOTIFY_BAD;
1390 }
1391
1392 switch (action)
1393 {
1394 case CPU_ONLINE:
1395 case CPU_ONLINE_FROZEN:
1396 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1397 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1398 dhd_select_cpu_candidacy(dhd);
1399 break;
1400
1401 case CPU_DOWN_PREPARE:
1402 case CPU_DOWN_PREPARE_FROZEN:
1403 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1404 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1405 dhd_select_cpu_candidacy(dhd);
1406 break;
1407 default:
1408 break;
1409 }
1410
1411 return NOTIFY_OK;
1412 }
1413
1414 #if defined(DHD_LB_STATS)
1415 void dhd_lb_stats_init(dhd_pub_t *dhdp)
1416 {
1417 dhd_info_t *dhd;
1418 int i, j, num_cpus = num_possible_cpus();
1419 int alloc_size = sizeof(uint32) * num_cpus;
1420
1421 if (dhdp == NULL) {
1422 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1423 __FUNCTION__));
1424 return;
1425 }
1426
1427 dhd = dhdp->info;
1428 if (dhd == NULL) {
1429 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1430 return;
1431 }
1432
1433 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1434 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1435
1436 dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1437 if (!dhd->napi_percpu_run_cnt) {
1438 DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
1439 __FUNCTION__));
1440 return;
1441 }
1442 for (i = 0; i < num_cpus; i++)
1443 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1444
1445 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1446
1447 dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1448 if (!dhd->rxc_percpu_run_cnt) {
1449 DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
1450 __FUNCTION__));
1451 return;
1452 }
1453 for (i = 0; i < num_cpus; i++)
1454 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1455
1456 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1457
1458 dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1459 if (!dhd->txc_percpu_run_cnt) {
1460 DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
1461 __FUNCTION__));
1462 return;
1463 }
1464 for (i = 0; i < num_cpus; i++)
1465 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1466
1467 dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1468 if (!dhd->cpu_online_cnt) {
1469 DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
1470 __FUNCTION__));
1471 return;
1472 }
1473 for (i = 0; i < num_cpus; i++)
1474 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1475
1476 dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1477 if (!dhd->cpu_offline_cnt) {
1478 DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
1479 __FUNCTION__));
1480 return;
1481 }
1482 for (i = 0; i < num_cpus; i++)
1483 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1484
1485 dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1486 if (!dhd->txp_percpu_run_cnt) {
1487 DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
1488 __FUNCTION__));
1489 return;
1490 }
1491 for (i = 0; i < num_cpus; i++)
1492 DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
1493
1494 dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1495 if (!dhd->tx_start_percpu_run_cnt) {
1496 DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
1497 __FUNCTION__));
1498 return;
1499 }
1500 for (i = 0; i < num_cpus; i++)
1501 DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
1502
1503 for (j = 0; j < HIST_BIN_SIZE; j++) {
1504 dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1505 if (!dhd->napi_rx_hist[j]) {
1506 DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
1507 __FUNCTION__, j));
1508 return;
1509 }
1510 for (i = 0; i < num_cpus; i++) {
1511 DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
1512 }
1513 }
1514 #ifdef DHD_LB_TXC
1515 for (j = 0; j < HIST_BIN_SIZE; j++) {
1516 dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1517 if (!dhd->txc_hist[j]) {
1518 DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
1519 __FUNCTION__, j));
1520 return;
1521 }
1522 for (i = 0; i < num_cpus; i++) {
1523 DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
1524 }
1525 }
1526 #endif /* DHD_LB_TXC */
1527 #ifdef DHD_LB_RXC
1528 for (j = 0; j < HIST_BIN_SIZE; j++) {
1529 dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
1530 if (!dhd->rxc_hist[j]) {
1531 DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
1532 __FUNCTION__, j));
1533 return;
1534 }
1535 for (i = 0; i < num_cpus; i++) {
1536 DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
1537 }
1538 }
1539 #endif /* DHD_LB_RXC */
1540 return;
1541 }
1542
1543 void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
1544 {
1545 dhd_info_t *dhd;
1546 int j, num_cpus = num_possible_cpus();
1547 int alloc_size = sizeof(uint32) * num_cpus;
1548
1549 if (dhdp == NULL) {
1550 DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
1551 __FUNCTION__));
1552 return;
1553 }
1554
1555 dhd = dhdp->info;
1556 if (dhd == NULL) {
1557 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1558 return;
1559 }
1560
1561 if (dhd->napi_percpu_run_cnt) {
1562 MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
1563 dhd->napi_percpu_run_cnt = NULL;
1564 }
1565 if (dhd->rxc_percpu_run_cnt) {
1566 MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
1567 dhd->rxc_percpu_run_cnt = NULL;
1568 }
1569 if (dhd->txc_percpu_run_cnt) {
1570 MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
1571 dhd->txc_percpu_run_cnt = NULL;
1572 }
1573 if (dhd->cpu_online_cnt) {
1574 MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
1575 dhd->cpu_online_cnt = NULL;
1576 }
1577 if (dhd->cpu_offline_cnt) {
1578 MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
1579 dhd->cpu_offline_cnt = NULL;
1580 }
1581
1582 if (dhd->txp_percpu_run_cnt) {
1583 MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
1584 dhd->txp_percpu_run_cnt = NULL;
1585 }
1586 if (dhd->tx_start_percpu_run_cnt) {
1587 MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
1588 dhd->tx_start_percpu_run_cnt = NULL;
1589 }
1590
1591 for (j = 0; j < HIST_BIN_SIZE; j++) {
1592 if (dhd->napi_rx_hist[j]) {
1593 MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
1594 dhd->napi_rx_hist[j] = NULL;
1595 }
1596 #ifdef DHD_LB_TXC
1597 if (dhd->txc_hist[j]) {
1598 MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
1599 dhd->txc_hist[j] = NULL;
1600 }
1601 #endif /* DHD_LB_TXC */
1602 #ifdef DHD_LB_RXC
1603 if (dhd->rxc_hist[j]) {
1604 MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
1605 dhd->rxc_hist[j] = NULL;
1606 }
1607 #endif /* DHD_LB_RXC */
1608 }
1609
1610 return;
1611 }
1612
1613 static void dhd_lb_stats_dump_histo(
1614 struct bcmstrbuf *strbuf, uint32 **hist)
1615 {
1616 int i, j;
1617 uint32 *per_cpu_total;
1618 uint32 total = 0;
1619 uint32 num_cpus = num_possible_cpus();
1620
1621 per_cpu_total = (uint32 *)kmalloc(sizeof(uint32) * num_cpus, GFP_ATOMIC);
1622 if (!per_cpu_total) {
1623 DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
1624 return;
1625 }
1626 bzero(per_cpu_total, sizeof(uint32) * num_cpus);
1627
1628 bcm_bprintf(strbuf, "CPU: \t\t");
1629 for (i = 0; i < num_cpus; i++)
1630 bcm_bprintf(strbuf, "%d\t", i);
1631 bcm_bprintf(strbuf, "\nBin\n");
1632
1633 for (i = 0; i < HIST_BIN_SIZE; i++) {
1634 bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
1635 for (j = 0; j < num_cpus; j++) {
1636 bcm_bprintf(strbuf, "%d\t", hist[i][j]);
1637 }
1638 bcm_bprintf(strbuf, "\n");
1639 }
1640 bcm_bprintf(strbuf, "Per CPU Total \t");
1641 total = 0;
1642 for (i = 0; i < num_cpus; i++) {
1643 for (j = 0; j < HIST_BIN_SIZE; j++) {
1644 per_cpu_total[i] += (hist[j][i] * (1<<j));
1645 }
1646 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1647 total += per_cpu_total[i];
1648 }
1649 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1650
1651 kfree(per_cpu_total);
1652 return;
1653 }
1654
1655 static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1656 {
1657 int i, num_cpus = num_possible_cpus();
1658
1659 bcm_bprintf(strbuf, "CPU: \t");
1660 for (i = 0; i < num_cpus; i++)
1661 bcm_bprintf(strbuf, "%d\t", i);
1662 bcm_bprintf(strbuf, "\n");
1663
1664 bcm_bprintf(strbuf, "Val: \t");
1665 for (i = 0; i < num_cpus; i++)
1666 bcm_bprintf(strbuf, "%u\t", *(p+i));
1667 bcm_bprintf(strbuf, "\n");
1668 return;
1669 }
1670
1671 void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1672 {
1673 dhd_info_t *dhd;
1674
1675 if (dhdp == NULL || strbuf == NULL) {
1676 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1677 __FUNCTION__, dhdp, strbuf));
1678 return;
1679 }
1680
1681 dhd = dhdp->info;
1682 if (dhd == NULL) {
1683 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1684 return;
1685 }
1686
1687 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1688 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1689
1690 bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
1691 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1692
1693 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1694 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1695 dhd->txc_sched_cnt);
1696
1697 #ifdef DHD_LB_RXP
1698 bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
1699 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1700 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1701 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1702 #endif /* DHD_LB_RXP */
1703
1704 #ifdef DHD_LB_RXC
1705 bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
1706 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1707 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1708 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1709 #endif /* DHD_LB_RXC */
1710
1711 #ifdef DHD_LB_TXC
1712 bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
1713 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1714 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1715 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1716 #endif /* DHD_LB_TXC */
1717
1718 #ifdef DHD_LB_TXP
1719 bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
1720 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
1721
1722 bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
1723 dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
1724 #endif /* DHD_LB_TXP */
1725
1726 bcm_bprintf(strbuf, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n",
1727 DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS);
1728
1729 bcm_bprintf(strbuf, "napi_cpu %x tx_cpu %x\n",
1730 atomic_read(&dhd->rx_napi_cpu), atomic_read(&dhd->tx_cpu));
1731
1732 }
1733
1734 /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
1735 static inline uint32 next_larger_power2(uint32 num)
1736 {
1737 num--;
1738 num |= (num >> 1);
1739 num |= (num >> 2);
1740 num |= (num >> 4);
1741 num |= (num >> 8);
1742 num |= (num >> 16);
1743
1744 return (num + 1);
1745 }
1746
1747 static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
1748 {
1749 uint32 bin_power;
1750 uint32 *p;
1751 bin_power = next_larger_power2(count);
1752
1753 switch (bin_power) {
1754 case 1: p = bin[0] + cpu; break;
1755 case 2: p = bin[1] + cpu; break;
1756 case 4: p = bin[2] + cpu; break;
1757 case 8: p = bin[3] + cpu; break;
1758 case 16: p = bin[4] + cpu; break;
1759 case 32: p = bin[5] + cpu; break;
1760 case 64: p = bin[6] + cpu; break;
1761 case 128: p = bin[7] + cpu; break;
1762 default : p = bin[8] + cpu; break;
1763 }
1764
1765 *p = *p + 1;
1766 return;
1767 }
1768
1769 extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1770 {
1771 int cpu;
1772 dhd_info_t *dhd = dhdp->info;
1773
1774 cpu = get_cpu();
1775 put_cpu();
1776 dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
1777
1778 return;
1779 }
1780
1781 extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1782 {
1783 int cpu;
1784 dhd_info_t *dhd = dhdp->info;
1785
1786 cpu = get_cpu();
1787 put_cpu();
1788 dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
1789
1790 return;
1791 }
1792
1793 extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1794 {
1795 int cpu;
1796 dhd_info_t *dhd = dhdp->info;
1797
1798 cpu = get_cpu();
1799 put_cpu();
1800 dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
1801
1802 return;
1803 }
1804
1805 extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1806 {
1807 dhd_info_t *dhd = dhdp->info;
1808 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1809 }
1810
1811 extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1812 {
1813 dhd_info_t *dhd = dhdp->info;
1814 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1815 }
1816 #endif /* DHD_LB_STATS */
1817
1818 #endif /* DHD_LB */
1819
1820 #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1821 int g_frameburst = 1;
1822 #endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1823
1824 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1825
1826 /* DHD Perimiter lock only used in router with bypass forwarding. */
1827 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1828 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1829 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1830
1831 #ifdef PCIE_FULL_DONGLE
1832 #if defined(BCM_GMAC3)
1833 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1834 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1835 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1836
1837 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1838 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1839 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1840 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1841
1842 #else /* ! BCM_GMAC3 */
1843 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1844 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1845 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1846 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1847 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1848
1849 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1850 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1851 struct list_head *snapshot_list);
1852 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1853 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1854 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1855 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1856
1857 #endif /* ! BCM_GMAC3 */
1858 #endif /* PCIE_FULL_DONGLE */
1859
1860 /* Control fw roaming */
1861 uint dhd_roam_disable = 0;
1862
1863 #ifdef BCMDBGFS
1864 extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
1865 extern void dhd_dbgfs_remove(void);
1866 #endif
1867
1868
1869 /* Control radio state */
1870 uint dhd_radio_up = 1;
1871
1872 /* Network inteface name */
1873 char iface_name[IFNAMSIZ] = {'\0'};
1874 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1875
1876 /* The following are specific to the SDIO dongle */
1877
1878 /* IOCTL response timeout */
1879 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1880
1881 /* DS Exit response timeout */
1882 int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
1883
1884 /* Idle timeout for backplane clock */
1885 int dhd_idletime = DHD_IDLETIME_TICKS;
1886 module_param(dhd_idletime, int, 0);
1887
1888 /* Use polling */
1889 uint dhd_poll = FALSE;
1890 module_param(dhd_poll, uint, 0);
1891
1892 /* Use interrupts */
1893 uint dhd_intr = TRUE;
1894 module_param(dhd_intr, uint, 0);
1895
1896 /* SDIO Drive Strength (in milliamps) */
1897 uint dhd_sdiod_drive_strength = 6;
1898 module_param(dhd_sdiod_drive_strength, uint, 0);
1899
1900 #ifdef BCMSDIO
1901 /* Tx/Rx bounds */
1902 extern uint dhd_txbound;
1903 extern uint dhd_rxbound;
1904 module_param(dhd_txbound, uint, 0);
1905 module_param(dhd_rxbound, uint, 0);
1906
1907 /* Deferred transmits */
1908 extern uint dhd_deferred_tx;
1909 module_param(dhd_deferred_tx, uint, 0);
1910
1911 #endif /* BCMSDIO */
1912
1913
1914 #ifdef SDTEST
1915 /* Echo packet generator (pkts/s) */
1916 uint dhd_pktgen = 0;
1917 module_param(dhd_pktgen, uint, 0);
1918
1919 /* Echo packet len (0 => sawtooth, max 2040) */
1920 uint dhd_pktgen_len = 0;
1921 module_param(dhd_pktgen_len, uint, 0);
1922 #endif /* SDTEST */
1923
1924
1925
1926 #ifndef BCMDBUS
1927 /* Allow delayed firmware download for debug purpose */
1928 int allow_delay_fwdl = FALSE;
1929 module_param(allow_delay_fwdl, int, 0);
1930 #endif /* !BCMDBUS */
1931
1932 extern char dhd_version[];
1933 extern char fw_version[];
1934 extern char clm_version[];
1935
1936 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1937 static void dhd_net_if_lock_local(dhd_info_t *dhd);
1938 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1939 static void dhd_suspend_lock(dhd_pub_t *dhdp);
1940 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1941
1942 #ifdef WLMEDIA_HTSF
1943 void htsf_update(dhd_info_t *dhd, void *data);
1944 tsf_t prev_tsf, cur_tsf;
1945
1946 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1947 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1948 static void dhd_dump_latency(void);
1949 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1950 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1951 static void dhd_dump_htsfhisto(histo_t *his, char *s);
1952 #endif /* WLMEDIA_HTSF */
1953
1954 /* Monitor interface */
1955 int dhd_monitor_init(void *dhd_pub);
1956 int dhd_monitor_uninit(void);
1957
1958
1959 #if defined(WL_WIRELESS_EXT)
1960 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1961 #endif /* defined(WL_WIRELESS_EXT) */
1962
1963 #ifndef BCMDBUS
1964 static void dhd_dpc(ulong data);
1965 #endif /* !BCMDBUS */
1966 /* forward decl */
1967 extern int dhd_wait_pend8021x(struct net_device *dev);
1968 void dhd_os_wd_timer_extend(void *bus, bool extend);
1969
1970 #ifdef TOE
1971 #ifndef BDC
1972 #error TOE requires BDC
1973 #endif /* !BDC */
1974 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1975 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1976 #endif /* TOE */
1977
1978 static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
1979 wl_event_msg_t *event_ptr, void **data_ptr);
1980
1981 #if defined(CONFIG_PM_SLEEP)
1982 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1983 {
1984 int ret = NOTIFY_DONE;
1985 bool suspend = FALSE;
1986
1987 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1988 #pragma GCC diagnostic push
1989 #pragma GCC diagnostic ignored "-Wcast-qual"
1990 #endif
1991 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1992 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1993 #pragma GCC diagnostic pop
1994 #endif
1995
1996 BCM_REFERENCE(dhdinfo);
1997 BCM_REFERENCE(suspend);
1998
1999 switch (action) {
2000 case PM_HIBERNATION_PREPARE:
2001 case PM_SUSPEND_PREPARE:
2002 suspend = TRUE;
2003 break;
2004
2005 case PM_POST_HIBERNATION:
2006 case PM_POST_SUSPEND:
2007 suspend = FALSE;
2008 break;
2009 }
2010
2011 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
2012 if (suspend) {
2013 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
2014 dhd_wlfc_suspend(&dhdinfo->pub);
2015 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
2016 } else {
2017 dhd_wlfc_resume(&dhdinfo->pub);
2018 }
2019 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
2020
2021 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
2022 KERNEL_VERSION(2, 6, 39))
2023 dhd_mmc_suspend = suspend;
2024 smp_mb();
2025 #endif
2026
2027 return ret;
2028 }
2029
2030 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
2031 * created in kernel notifier link list (with 'next' pointing to itself)
2032 */
2033 static bool dhd_pm_notifier_registered = FALSE;
2034
2035 extern int register_pm_notifier(struct notifier_block *nb);
2036 extern int unregister_pm_notifier(struct notifier_block *nb);
2037 #endif /* CONFIG_PM_SLEEP */
2038
2039 /* Request scheduling of the bus rx frame */
2040 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
2041 static void dhd_os_rxflock(dhd_pub_t *pub);
2042 static void dhd_os_rxfunlock(dhd_pub_t *pub);
2043
2044 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
2045 typedef struct dhd_dev_priv {
2046 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
2047 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
2048 int ifidx; /* interface index */
2049 void * lkup;
2050 } dhd_dev_priv_t;
2051
2052 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
2053 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
2054 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
2055 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
2056 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
2057 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
2058
2059 #if defined(DHD_OF_SUPPORT)
2060 extern int dhd_wlan_init(void);
2061 #endif /* defined(DHD_OF_SUPPORT) */
2062 /** Clear the dhd net_device's private structure. */
2063 static inline void
2064 dhd_dev_priv_clear(struct net_device * dev)
2065 {
2066 dhd_dev_priv_t * dev_priv;
2067 ASSERT(dev != (struct net_device *)NULL);
2068 dev_priv = DHD_DEV_PRIV(dev);
2069 dev_priv->dhd = (dhd_info_t *)NULL;
2070 dev_priv->ifp = (dhd_if_t *)NULL;
2071 dev_priv->ifidx = DHD_BAD_IF;
2072 dev_priv->lkup = (void *)NULL;
2073 }
2074
2075 /** Setup the dhd net_device's private structure. */
2076 static inline void
2077 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
2078 int ifidx)
2079 {
2080 dhd_dev_priv_t * dev_priv;
2081 ASSERT(dev != (struct net_device *)NULL);
2082 dev_priv = DHD_DEV_PRIV(dev);
2083 dev_priv->dhd = dhd;
2084 dev_priv->ifp = ifp;
2085 dev_priv->ifidx = ifidx;
2086 }
2087
2088 #ifdef PCIE_FULL_DONGLE
2089
2090 /** Dummy objects are defined with state representing bad|down.
2091 * Performance gains from reducing branch conditionals, instruction parallelism,
2092 * dual issue, reducing load shadows, avail of larger pipelines.
2093 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
2094 * is accessed via the dhd_sta_t.
2095 */
2096
2097 /* Dummy dhd_info object */
2098 dhd_info_t dhd_info_null = {
2099 #if defined(BCM_GMAC3)
2100 .fwdh = FWDER_NULL,
2101 #endif
2102 .pub = {
2103 .info = &dhd_info_null,
2104 #ifdef DHDTCPACK_SUPPRESS
2105 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
2106 #endif /* DHDTCPACK_SUPPRESS */
2107 #if defined(TRAFFIC_MGMT_DWM)
2108 .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE },
2109 #endif
2110 .up = FALSE,
2111 .busstate = DHD_BUS_DOWN
2112 }
2113 };
2114 #define DHD_INFO_NULL (&dhd_info_null)
2115 #define DHD_PUB_NULL (&dhd_info_null.pub)
2116
2117 /* Dummy netdevice object */
2118 struct net_device dhd_net_dev_null = {
2119 .reg_state = NETREG_UNREGISTERED
2120 };
2121 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
2122
2123 /* Dummy dhd_if object */
2124 dhd_if_t dhd_if_null = {
2125 #if defined(BCM_GMAC3)
2126 .fwdh = FWDER_NULL,
2127 #endif
2128 #ifdef WMF
2129 .wmf = { .wmf_enable = TRUE },
2130 #endif
2131 .info = DHD_INFO_NULL,
2132 .net = DHD_NET_DEV_NULL,
2133 .idx = DHD_BAD_IF
2134 };
2135 #define DHD_IF_NULL (&dhd_if_null)
2136
2137 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
2138
2139 /** Interface STA list management. */
2140
2141 /** Fetch the dhd_if object, given the interface index in the dhd. */
2142 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
2143
2144 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
2145 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
2146 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
2147
2148 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
2149 static void dhd_if_del_sta_list(dhd_if_t * ifp);
2150 static void dhd_if_flush_sta(dhd_if_t * ifp);
2151
2152 /* Construct/Destruct a sta pool. */
2153 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
2154 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
2155 /* Clear the pool of dhd_sta_t objects for built-in type driver */
2156 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
2157
2158
2159 /* Return interface pointer */
2160 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
2161 {
2162 ASSERT(ifidx < DHD_MAX_IFS);
2163
2164 if (ifidx >= DHD_MAX_IFS)
2165 return NULL;
2166
2167 return dhdp->info->iflist[ifidx];
2168 }
2169
2170 /** Reset a dhd_sta object and free into the dhd pool. */
2171 static void
2172 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
2173 {
2174 int prio;
2175
2176 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
2177
2178 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
2179
2180 /*
2181 * Flush and free all packets in all flowring's queues belonging to sta.
2182 * Packets in flow ring will be flushed later.
2183 */
2184 for (prio = 0; prio < (int)NUMPRIO; prio++) {
2185 uint16 flowid = sta->flowid[prio];
2186
2187 if (flowid != FLOWID_INVALID) {
2188 unsigned long flags;
2189 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
2190 flow_ring_node_t * flow_ring_node;
2191
2192 #ifdef DHDTCPACK_SUPPRESS
2193 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
2194 * when there is a newly coming packet from network stack.
2195 */
2196 dhd_tcpack_info_tbl_clean(dhdp);
2197 #endif /* DHDTCPACK_SUPPRESS */
2198
2199 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
2200 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2201 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
2202
2203 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
2204 void * pkt;
2205 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
2206 PKTFREE(dhdp->osh, pkt, TRUE);
2207 }
2208 }
2209
2210 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2211 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
2212 }
2213
2214 sta->flowid[prio] = FLOWID_INVALID;
2215 }
2216
2217 id16_map_free(dhdp->staid_allocator, sta->idx);
2218 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
2219 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
2220 sta->ifidx = DHD_BAD_IF;
2221 bzero(sta->ea.octet, ETHER_ADDR_LEN);
2222 INIT_LIST_HEAD(&sta->list);
2223 sta->idx = ID16_INVALID; /* implying free */
2224 }
2225
2226 /** Allocate a dhd_sta object from the dhd pool. */
2227 static dhd_sta_t *
2228 dhd_sta_alloc(dhd_pub_t * dhdp)
2229 {
2230 uint16 idx;
2231 dhd_sta_t * sta;
2232 dhd_sta_pool_t * sta_pool;
2233
2234 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
2235
2236 idx = id16_map_alloc(dhdp->staid_allocator);
2237 if (idx == ID16_INVALID) {
2238 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
2239 return DHD_STA_NULL;
2240 }
2241
2242 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
2243 sta = &sta_pool[idx];
2244
2245 ASSERT((sta->idx == ID16_INVALID) &&
2246 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
2247
2248 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
2249
2250 sta->idx = idx; /* implying allocated */
2251
2252 return sta;
2253 }
2254
2255 /** Delete all STAs in an interface's STA list. */
2256 static void
2257 dhd_if_del_sta_list(dhd_if_t *ifp)
2258 {
2259 dhd_sta_t *sta, *next;
2260 unsigned long flags;
2261
2262 DHD_IF_STA_LIST_LOCK(ifp, flags);
2263 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2264 #pragma GCC diagnostic push
2265 #pragma GCC diagnostic ignored "-Wcast-qual"
2266 #endif
2267 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2268 #if defined(BCM_GMAC3)
2269 if (ifp->fwdh) {
2270 /* Remove sta from WOFA forwarder. */
2271 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (uintptr_t)sta);
2272 }
2273 #endif /* BCM_GMAC3 */
2274 list_del(&sta->list);
2275 dhd_sta_free(&ifp->info->pub, sta);
2276 }
2277 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2278 #pragma GCC diagnostic pop
2279 #endif
2280 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2281
2282 return;
2283 }
2284
2285 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
2286 static void
2287 dhd_if_flush_sta(dhd_if_t * ifp)
2288 {
2289 #if defined(BCM_GMAC3)
2290
2291 if (ifp && (ifp->fwdh != FWDER_NULL)) {
2292 dhd_sta_t *sta, *next;
2293 unsigned long flags;
2294
2295 DHD_IF_STA_LIST_LOCK(ifp, flags);
2296
2297 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2298 /* Remove any sta entry from WOFA forwarder. */
2299 fwder_flush(ifp->fwdh, (uintptr_t)sta);
2300 }
2301
2302 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2303 }
2304 #endif /* BCM_GMAC3 */
2305 }
2306
2307 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
2308 static int
2309 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
2310 {
2311 int idx, prio, sta_pool_memsz;
2312 dhd_sta_t * sta;
2313 dhd_sta_pool_t * sta_pool;
2314 void * staid_allocator;
2315
2316 ASSERT(dhdp != (dhd_pub_t *)NULL);
2317 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
2318
2319 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
2320 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
2321 if (staid_allocator == NULL) {
2322 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
2323 return BCME_ERROR;
2324 }
2325
2326 /* Pre allocate a pool of dhd_sta objects (one extra). */
2327 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
2328 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
2329 if (sta_pool == NULL) {
2330 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
2331 id16_map_fini(dhdp->osh, staid_allocator);
2332 return BCME_ERROR;
2333 }
2334
2335 dhdp->sta_pool = sta_pool;
2336 dhdp->staid_allocator = staid_allocator;
2337
2338 /* Initialize all sta(s) for the pre-allocated free pool. */
2339 bzero((uchar *)sta_pool, sta_pool_memsz);
2340 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
2341 sta = &sta_pool[idx];
2342 sta->idx = id16_map_alloc(staid_allocator);
2343 ASSERT(sta->idx <= max_sta);
2344 }
2345 /* Now place them into the pre-allocated free pool. */
2346 for (idx = 1; idx <= max_sta; idx++) {
2347 sta = &sta_pool[idx];
2348 for (prio = 0; prio < (int)NUMPRIO; prio++) {
2349 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
2350 }
2351 dhd_sta_free(dhdp, sta);
2352 }
2353
2354 return BCME_OK;
2355 }
2356
2357 /** Destruct the pool of dhd_sta_t objects.
2358 * Caller must ensure that no STA objects are currently associated with an if.
2359 */
2360 static void
2361 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
2362 {
2363 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
2364
2365 if (sta_pool) {
2366 int idx;
2367 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
2368 for (idx = 1; idx <= max_sta; idx++) {
2369 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
2370 ASSERT(sta_pool[idx].idx == ID16_INVALID);
2371 }
2372 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
2373 dhdp->sta_pool = NULL;
2374 }
2375
2376 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
2377 dhdp->staid_allocator = NULL;
2378 }
2379
2380 /* Clear the pool of dhd_sta_t objects for built-in type driver */
2381 static void
2382 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
2383 {
2384 int idx, prio, sta_pool_memsz;
2385 dhd_sta_t * sta;
2386 dhd_sta_pool_t * sta_pool;
2387 void *staid_allocator;
2388
2389 if (!dhdp) {
2390 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
2391 return;
2392 }
2393
2394 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
2395 staid_allocator = dhdp->staid_allocator;
2396
2397 if (!sta_pool) {
2398 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
2399 return;
2400 }
2401
2402 if (!staid_allocator) {
2403 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
2404 return;
2405 }
2406
2407 /* clear free pool */
2408 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
2409 bzero((uchar *)sta_pool, sta_pool_memsz);
2410
2411 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
2412 id16_map_clear(staid_allocator, max_sta, 1);
2413
2414 /* Initialize all sta(s) for the pre-allocated free pool. */
2415 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
2416 sta = &sta_pool[idx];
2417 sta->idx = id16_map_alloc(staid_allocator);
2418 ASSERT(sta->idx <= max_sta);
2419 }
2420 /* Now place them into the pre-allocated free pool. */
2421 for (idx = 1; idx <= max_sta; idx++) {
2422 sta = &sta_pool[idx];
2423 for (prio = 0; prio < (int)NUMPRIO; prio++) {
2424 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
2425 }
2426 dhd_sta_free(dhdp, sta);
2427 }
2428 }
2429
2430 /** Find STA with MAC address ea in an interface's STA list. */
2431 dhd_sta_t *
2432 dhd_find_sta(void *pub, int ifidx, void *ea)
2433 {
2434 dhd_sta_t *sta;
2435 dhd_if_t *ifp;
2436 unsigned long flags;
2437
2438 ASSERT(ea != NULL);
2439 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2440 if (ifp == NULL)
2441 return DHD_STA_NULL;
2442
2443 DHD_IF_STA_LIST_LOCK(ifp, flags);
2444 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2445 #pragma GCC diagnostic push
2446 #pragma GCC diagnostic ignored "-Wcast-qual"
2447 #endif
2448 list_for_each_entry(sta, &ifp->sta_list, list) {
2449 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
2450 DHD_INFO(("%s: found STA " MACDBG "\n",
2451 __FUNCTION__, MAC2STRDBG((char *)ea)));
2452 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2453 return sta;
2454 }
2455 }
2456 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2457 #pragma GCC diagnostic pop
2458 #endif
2459 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2460
2461 return DHD_STA_NULL;
2462 }
2463
2464 /** Add STA into the interface's STA list. */
2465 dhd_sta_t *
2466 dhd_add_sta(void *pub, int ifidx, void *ea)
2467 {
2468 dhd_sta_t *sta;
2469 dhd_if_t *ifp;
2470 unsigned long flags;
2471
2472 ASSERT(ea != NULL);
2473 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2474 if (ifp == NULL)
2475 return DHD_STA_NULL;
2476
2477 sta = dhd_sta_alloc((dhd_pub_t *)pub);
2478 if (sta == DHD_STA_NULL) {
2479 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
2480 return DHD_STA_NULL;
2481 }
2482
2483 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
2484
2485 /* link the sta and the dhd interface */
2486 sta->ifp = ifp;
2487 sta->ifidx = ifidx;
2488 #ifdef DHD_WMF
2489 sta->psta_prim = NULL;
2490 #endif
2491 INIT_LIST_HEAD(&sta->list);
2492
2493 DHD_IF_STA_LIST_LOCK(ifp, flags);
2494
2495 list_add_tail(&sta->list, &ifp->sta_list);
2496
2497 #if defined(BCM_GMAC3)
2498 if (ifp->fwdh) {
2499 ASSERT(ISALIGNED(ea, 2));
2500 /* Add sta to WOFA forwarder. */
2501 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
2502 }
2503 #endif /* BCM_GMAC3 */
2504
2505 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2506
2507 return sta;
2508 }
2509
2510 /** Delete all STAs from the interface's STA list. */
2511 void
2512 dhd_del_all_sta(void *pub, int ifidx)
2513 {
2514 dhd_sta_t *sta, *next;
2515 dhd_if_t *ifp;
2516 unsigned long flags;
2517
2518 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2519 if (ifp == NULL)
2520 return;
2521
2522 DHD_IF_STA_LIST_LOCK(ifp, flags);
2523 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2524 #pragma GCC diagnostic push
2525 #pragma GCC diagnostic ignored "-Wcast-qual"
2526 #endif
2527 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2528 #if defined(BCM_GMAC3)
2529 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
2530 ASSERT(ISALIGNED(sta->ea.octet, 2));
2531 fwder_deassoc(ifp->fwdh, (uint16 *)sta->ea.octet, (uintptr_t)sta);
2532 }
2533 #endif /* BCM_GMAC3 */
2534
2535 list_del(&sta->list);
2536 dhd_sta_free(&ifp->info->pub, sta);
2537 #ifdef DHD_L2_FILTER
2538 if (ifp->parp_enable) {
2539 /* clear Proxy ARP cache of specific Ethernet Address */
2540 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
2541 ifp->phnd_arp_table, FALSE,
2542 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2543 }
2544 #endif /* DHD_L2_FILTER */
2545 }
2546 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2547 #pragma GCC diagnostic pop
2548 #endif
2549 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2550
2551 return;
2552 }
2553
2554 /** Delete STA from the interface's STA list. */
2555 void
2556 dhd_del_sta(void *pub, int ifidx, void *ea)
2557 {
2558 dhd_sta_t *sta, *next;
2559 dhd_if_t *ifp;
2560 unsigned long flags;
2561 char macstr[ETHER_ADDR_STR_LEN];
2562
2563 ASSERT(ea != NULL);
2564 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
2565 if (ifp == NULL)
2566 return;
2567
2568 DHD_IF_STA_LIST_LOCK(ifp, flags);
2569 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2570 #pragma GCC diagnostic push
2571 #pragma GCC diagnostic ignored "-Wcast-qual"
2572 #endif
2573 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
2574 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
2575 #if defined(BCM_GMAC3)
2576 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
2577 ASSERT(ISALIGNED(ea, 2));
2578 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
2579 }
2580 #endif /* BCM_GMAC3 */
2581 DHD_MAC_TO_STR(((char *)ea), macstr);
2582 DHD_ERROR(("%s: Deleting STA %s\n", __FUNCTION__, macstr));
2583 list_del(&sta->list);
2584 dhd_sta_free(&ifp->info->pub, sta);
2585 }
2586 }
2587 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2588 #pragma GCC diagnostic pop
2589 #endif
2590 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2591 #ifdef DHD_L2_FILTER
2592 if (ifp->parp_enable) {
2593 /* clear Proxy ARP cache of specific Ethernet Address */
2594 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
2595 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
2596 }
2597 #endif /* DHD_L2_FILTER */
2598 return;
2599 }
2600
2601 /** Add STA if it doesn't exist. Not reentrant. */
2602 dhd_sta_t*
2603 dhd_findadd_sta(void *pub, int ifidx, void *ea)
2604 {
2605 dhd_sta_t *sta;
2606
2607 sta = dhd_find_sta(pub, ifidx, ea);
2608
2609 if (!sta) {
2610 /* Add entry */
2611 sta = dhd_add_sta(pub, ifidx, ea);
2612 }
2613
2614 return sta;
2615 }
2616
2617 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2618 #if !defined(BCM_GMAC3)
2619 static struct list_head *
2620 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
2621 {
2622 unsigned long flags;
2623 dhd_sta_t *sta, *snapshot;
2624
2625 INIT_LIST_HEAD(snapshot_list);
2626
2627 DHD_IF_STA_LIST_LOCK(ifp, flags);
2628
2629 list_for_each_entry(sta, &ifp->sta_list, list) {
2630 /* allocate one and add to snapshot */
2631 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
2632 if (snapshot == NULL) {
2633 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
2634 continue;
2635 }
2636
2637 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
2638
2639 INIT_LIST_HEAD(&snapshot->list);
2640 list_add_tail(&snapshot->list, snapshot_list);
2641 }
2642
2643 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2644
2645 return snapshot_list;
2646 }
2647
2648 static void
2649 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
2650 {
2651 dhd_sta_t *sta, *next;
2652
2653 list_for_each_entry_safe(sta, next, snapshot_list, list) {
2654 list_del(&sta->list);
2655 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
2656 }
2657 }
2658 #endif /* !BCM_GMAC3 */
2659 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2660
2661 #else
2662 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2663 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2664 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2665 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2666 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2667 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2668 dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
2669 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2670 #endif /* PCIE_FULL_DONGLE */
2671
2672
2673
2674 #if defined(DHD_LB)
2675
2676 #if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP)
2677 /**
2678 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2679 * CPU and schedules a tasklet.
2680 * @tasklet: opaque pointer to the tasklet
2681 */
2682 INLINE void
2683 dhd_tasklet_schedule(void *tasklet)
2684 {
2685 tasklet_schedule((struct tasklet_struct *)tasklet);
2686 }
2687 /**
2688 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2689 * @tasklet: tasklet to be scheduled
2690 * @on_cpu: cpu core id
2691 *
2692 * If the requested cpu is online, then an IPI is sent to this cpu via the
2693 * smp_call_function_single with no wait and the tasklet_schedule function
2694 * will be invoked to schedule the specified tasklet on the requested CPU.
2695 */
2696 INLINE void
2697 dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2698 {
2699 const int wait = 0;
2700 smp_call_function_single(on_cpu,
2701 dhd_tasklet_schedule, (void *)tasklet, wait);
2702 }
2703
2704 /**
2705 * dhd_work_schedule_on - Executes the passed work in a given CPU
2706 * @work: work to be scheduled
2707 * @on_cpu: cpu core id
2708 *
2709 * If the requested cpu is online, then an IPI is sent to this cpu via the
2710 * schedule_work_on and the work function
2711 * will be invoked to schedule the specified work on the requested CPU.
2712 */
2713
2714 INLINE void
2715 dhd_work_schedule_on(struct work_struct *work, int on_cpu)
2716 {
2717 schedule_work_on(on_cpu, work);
2718 }
2719 #endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */
2720
2721 #if defined(DHD_LB_TXC)
2722 /**
2723 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2724 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2725 * freeing the packets placed in the tx_compl workq
2726 */
2727 void
2728 dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2729 {
2730 dhd_info_t *dhd = dhdp->info;
2731 int curr_cpu, on_cpu;
2732
2733 if (dhd->rx_napi_netdev == NULL) {
2734 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2735 return;
2736 }
2737
2738 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2739 /*
2740 * If the destination CPU is NOT online or is same as current CPU
2741 * no need to schedule the work
2742 */
2743 curr_cpu = get_cpu();
2744 put_cpu();
2745
2746 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2747
2748 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2749 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2750 } else {
2751 schedule_work(&dhd->tx_compl_dispatcher_work);
2752 }
2753 }
2754
2755 static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2756 {
2757 struct dhd_info *dhd =
2758 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2759 int cpu;
2760
2761 get_online_cpus();
2762 cpu = atomic_read(&dhd->tx_compl_cpu);
2763 if (!cpu_online(cpu))
2764 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2765 else
2766 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2767 put_online_cpus();
2768 }
2769 #endif /* DHD_LB_TXC */
2770
2771 #if defined(DHD_LB_RXC)
2772 /**
2773 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2774 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2775 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2776 * placed in the rx_compl workq.
2777 *
2778 * @dhdp: pointer to dhd_pub object
2779 */
2780 void
2781 dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2782 {
2783 dhd_info_t *dhd = dhdp->info;
2784 int curr_cpu, on_cpu;
2785
2786 if (dhd->rx_napi_netdev == NULL) {
2787 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2788 return;
2789 }
2790
2791 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2792 /*
2793 * If the destination CPU is NOT online or is same as current CPU
2794 * no need to schedule the work
2795 */
2796 curr_cpu = get_cpu();
2797 put_cpu();
2798 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2799
2800 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2801 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2802 } else {
2803 dhd_rx_compl_dispatcher_fn(dhdp);
2804 }
2805 }
2806
2807 static void dhd_rx_compl_dispatcher_fn(dhd_pub_t *dhdp)
2808 {
2809 struct dhd_info *dhd = dhdp->info;
2810 int cpu;
2811
2812 preempt_disable();
2813 cpu = atomic_read(&dhd->rx_compl_cpu);
2814 if (!cpu_online(cpu))
2815 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2816 else {
2817 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2818 }
2819 preempt_enable();
2820 }
2821 #endif /* DHD_LB_RXC */
2822
2823 #if defined(DHD_LB_TXP)
2824 static void dhd_tx_dispatcher_work(struct work_struct * work)
2825 {
2826 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2827 #pragma GCC diagnostic push
2828 #pragma GCC diagnostic ignored "-Wcast-qual"
2829 #endif
2830 struct dhd_info *dhd =
2831 container_of(work, struct dhd_info, tx_dispatcher_work);
2832 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2833 #pragma GCC diagnostic pop
2834 #endif
2835 dhd_tasklet_schedule(&dhd->tx_tasklet);
2836 }
2837
2838 static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
2839 {
2840 int cpu;
2841 int net_tx_cpu;
2842 dhd_info_t *dhd = dhdp->info;
2843
2844 preempt_disable();
2845 cpu = atomic_read(&dhd->tx_cpu);
2846 net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
2847
2848 /*
2849 * Now if the NET_TX has pushed the packet in the same
2850 * CPU that is chosen for Tx processing, seperate it out
2851 * i.e run the TX processing tasklet in compl_cpu
2852 */
2853 if (net_tx_cpu == cpu)
2854 cpu = atomic_read(&dhd->tx_compl_cpu);
2855
2856 if (!cpu_online(cpu)) {
2857 /*
2858 * Ooohh... but the Chosen CPU is not online,
2859 * Do the job in the current CPU itself.
2860 */
2861 dhd_tasklet_schedule(&dhd->tx_tasklet);
2862 } else {
2863 /*
2864 * Schedule tx_dispatcher_work to on the cpu which
2865 * in turn will schedule tx_tasklet.
2866 */
2867 dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
2868 }
2869 preempt_enable();
2870 }
2871
2872 /**
2873 * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
2874 * on another cpu. The tx_tasklet will take care of actually putting
2875 * the skbs into appropriate flow ring and ringing H2D interrupt
2876 *
2877 * @dhdp: pointer to dhd_pub object
2878 */
2879 static void
2880 dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
2881 {
2882 dhd_info_t *dhd = dhdp->info;
2883 int curr_cpu;
2884
2885 curr_cpu = get_cpu();
2886 put_cpu();
2887
2888 /* Record the CPU in which the TX request from Network stack came */
2889 atomic_set(&dhd->net_tx_cpu, curr_cpu);
2890
2891 /* Schedule the work to dispatch ... */
2892 dhd_tx_dispatcher_fn(dhdp);
2893
2894 }
2895 #endif /* DHD_LB_TXP */
2896
2897 #if defined(DHD_LB_RXP)
2898 /**
2899 * dhd_napi_poll - Load balance napi poll function to process received
2900 * packets and send up the network stack using netif_receive_skb()
2901 *
2902 * @napi: napi object in which context this poll function is invoked
2903 * @budget: number of packets to be processed.
2904 *
2905 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2906 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2907 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2908 * packet tag and sendup.
2909 */
2910 static int
2911 dhd_napi_poll(struct napi_struct *napi, int budget)
2912 {
2913 int ifid;
2914 const int pkt_count = 1;
2915 const int chan = 0;
2916 struct sk_buff * skb;
2917 unsigned long flags;
2918 struct dhd_info *dhd;
2919 int processed = 0;
2920 struct sk_buff_head rx_process_queue;
2921
2922 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2923 #pragma GCC diagnostic push
2924 #pragma GCC diagnostic ignored "-Wcast-qual"
2925 #endif
2926 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2927 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2928 #pragma GCC diagnostic pop
2929 #endif
2930
2931 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2932 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2933 __skb_queue_head_init(&rx_process_queue);
2934
2935 /* extract the entire rx_napi_queue into local rx_process_queue */
2936 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2937 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2938 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2939
2940 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2941 OSL_PREFETCH(skb->data);
2942
2943 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2944
2945 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2946 __FUNCTION__, skb, ifid));
2947
2948 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2949 processed++;
2950 }
2951
2952 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2953
2954 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2955 napi_complete(napi);
2956
2957 return budget - 1;
2958 }
2959
2960 /**
2961 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2962 * poll list. This function may be invoked via the smp_call_function_single
2963 * from a remote CPU.
2964 *
2965 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2966 * after the napi_struct is added to the softnet data's poll_list
2967 *
2968 * @info: pointer to a dhd_info struct
2969 */
2970 static void
2971 dhd_napi_schedule(void *info)
2972 {
2973 dhd_info_t *dhd = (dhd_info_t *)info;
2974
2975 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2976 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2977
2978 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2979 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2980 __napi_schedule(&dhd->rx_napi_struct);
2981 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2982 }
2983
2984 /*
2985 * If the rx_napi_struct was already running, then we let it complete
2986 * processing all its packets. The rx_napi_struct may only run on one
2987 * core at a time, to avoid out-of-order handling.
2988 */
2989 }
2990
2991 /**
2992 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2993 * action after placing the dhd's rx_process napi object in the the remote CPU's
2994 * softnet data's poll_list.
2995 *
2996 * @dhd: dhd_info which has the rx_process napi object
2997 * @on_cpu: desired remote CPU id
2998 */
2999 static INLINE int
3000 dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
3001 {
3002 int wait = 0; /* asynchronous IPI */
3003 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
3004 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
3005
3006 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
3007 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
3008 __FUNCTION__, on_cpu));
3009 }
3010
3011 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
3012
3013 return 0;
3014 }
3015
3016 /*
3017 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
3018 * Why should we do this?
3019 * The candidacy algorithm is run from the call back function
3020 * registered to CPU hotplug notifier. This call back happens from Worker
3021 * context. The dhd_napi_schedule_on is also from worker context.
3022 * Note that both of this can run on two different CPUs at the same time.
3023 * So we can possibly have a window where a given CPUn is being brought
3024 * down from CPUm while we try to run a function on CPUn.
3025 * To prevent this its better have the whole code to execute an SMP
3026 * function under get_online_cpus.
3027 * This function call ensures that hotplug mechanism does not kick-in
3028 * until we are done dealing with online CPUs
3029 * If the hotplug worker is already running, no worries because the
3030 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
3031 *
3032 * The below mentioned code structure is proposed in
3033 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
3034 * for the question
3035 * Q: I need to ensure that a particular cpu is not removed when there is some
3036 * work specific to this cpu is in progress
3037 *
3038 * According to the documentation calling get_online_cpus is NOT required, if
3039 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
3040 * run from Work Queue context we have to call these functions
3041 */
3042 static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
3043 {
3044 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3045 #pragma GCC diagnostic push
3046 #pragma GCC diagnostic ignored "-Wcast-qual"
3047 #endif
3048 struct dhd_info *dhd =
3049 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
3050 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3051 #pragma GCC diagnostic pop
3052 #endif
3053 int cpu;
3054
3055 get_online_cpus();
3056 cpu = atomic_read(&dhd->rx_napi_cpu);
3057
3058 if (!cpu_online(cpu))
3059 dhd_napi_schedule(dhd);
3060 else
3061 dhd_napi_schedule_on(dhd, cpu);
3062
3063 put_online_cpus();
3064 }
3065
3066 /**
3067 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
3068 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
3069 * the packets enqueued into the rx_napi_queue and sendup.
3070 * The producer's rx packet queue is appended to the rx_napi_queue before
3071 * dispatching the rx_napi_struct.
3072 */
3073 void
3074 dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
3075 {
3076 unsigned long flags;
3077 dhd_info_t *dhd = dhdp->info;
3078 int curr_cpu;
3079 int on_cpu;
3080
3081 if (dhd->rx_napi_netdev == NULL) {
3082 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
3083 return;
3084 }
3085
3086 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
3087 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
3088
3089 /* append the producer's queue of packets to the napi's rx process queue */
3090 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
3091 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
3092 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
3093
3094 /*
3095 * If the destination CPU is NOT online or is same as current CPU
3096 * no need to schedule the work
3097 */
3098 curr_cpu = get_cpu();
3099 put_cpu();
3100
3101 on_cpu = atomic_read(&dhd->rx_napi_cpu);
3102 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
3103 dhd_napi_schedule(dhd);
3104 } else {
3105 schedule_work(&dhd->rx_napi_dispatcher_work);
3106 }
3107 }
3108
3109 /**
3110 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
3111 */
3112 void
3113 dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
3114 {
3115 dhd_info_t *dhd = dhdp->info;
3116
3117 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
3118 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
3119 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
3120 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
3121 }
3122 #endif /* DHD_LB_RXP */
3123
3124 #endif /* DHD_LB */
3125
3126
3127 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
3128 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
3129 {
3130 dhd_if_t *ifp;
3131 dhd_info_t *dhd = dhdp->info;
3132 int i;
3133
3134 ASSERT(bssidx < DHD_MAX_IFS);
3135 ASSERT(dhdp);
3136
3137 for (i = 0; i < DHD_MAX_IFS; i++) {
3138 ifp = dhd->iflist[i];
3139 if (ifp && (ifp->bssidx == bssidx)) {
3140 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
3141 ifp->name, bssidx, i));
3142 break;
3143 }
3144 }
3145 return i;
3146 }
3147
3148 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
3149 {
3150 uint32 store_idx;
3151 uint32 sent_idx;
3152
3153 if (!skb) {
3154 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
3155 return BCME_ERROR;
3156 }
3157
3158 dhd_os_rxflock(dhdp);
3159 store_idx = dhdp->store_idx;
3160 sent_idx = dhdp->sent_idx;
3161 if (dhdp->skbbuf[store_idx] != NULL) {
3162 /* Make sure the previous packets are processed */
3163 dhd_os_rxfunlock(dhdp);
3164 #ifdef RXF_DEQUEUE_ON_BUSY
3165 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
3166 skb, store_idx, sent_idx));
3167 return BCME_BUSY;
3168 #else /* RXF_DEQUEUE_ON_BUSY */
3169 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
3170 skb, store_idx, sent_idx));
3171 /* removed msleep here, should use wait_event_timeout if we
3172 * want to give rx frame thread a chance to run
3173 */
3174 #if defined(WAIT_DEQUEUE)
3175 OSL_SLEEP(1);
3176 #endif
3177 return BCME_ERROR;
3178 #endif /* RXF_DEQUEUE_ON_BUSY */
3179 }
3180 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
3181 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
3182 dhdp->skbbuf[store_idx] = skb;
3183 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
3184 dhd_os_rxfunlock(dhdp);
3185
3186 return BCME_OK;
3187 }
3188
3189 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
3190 {
3191 uint32 store_idx;
3192 uint32 sent_idx;
3193 void *skb;
3194
3195 dhd_os_rxflock(dhdp);
3196
3197 store_idx = dhdp->store_idx;
3198 sent_idx = dhdp->sent_idx;
3199 skb = dhdp->skbbuf[sent_idx];
3200
3201 if (skb == NULL) {
3202 dhd_os_rxfunlock(dhdp);
3203 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
3204 store_idx, sent_idx));
3205 return NULL;
3206 }
3207
3208 dhdp->skbbuf[sent_idx] = NULL;
3209 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
3210
3211 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
3212 skb, sent_idx));
3213
3214 dhd_os_rxfunlock(dhdp);
3215
3216 return skb;
3217 }
3218
3219 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
3220 {
3221 if (prepost) { /* pre process */
3222 dhd_read_cis(dhdp);
3223 dhd_check_module_cid(dhdp);
3224 dhd_check_module_mac(dhdp);
3225 dhd_set_macaddr_from_file(dhdp);
3226 } else { /* post process */
3227 dhd_write_macaddr(&dhdp->mac);
3228 dhd_clear_cis(dhdp);
3229 }
3230
3231 return 0;
3232 }
3233
3234 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
3235 #if defined(PKT_FILTER_SUPPORT)
3236 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
3237 static bool
3238 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
3239 {
3240 bool _apply = FALSE;
3241 /* In case of IBSS mode, apply arp pkt filter */
3242 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
3243 _apply = TRUE;
3244 goto exit;
3245 }
3246 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
3247 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
3248 _apply = TRUE;
3249 goto exit;
3250 }
3251
3252 exit:
3253 return _apply;
3254 }
3255 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
3256
3257 void
3258 dhd_set_packet_filter(dhd_pub_t *dhd)
3259 {
3260 int i;
3261
3262 DHD_TRACE(("%s: enter\n", __FUNCTION__));
3263 if (dhd_pkt_filter_enable) {
3264 for (i = 0; i < dhd->pktfilter_count; i++) {
3265 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
3266 }
3267 }
3268 }
3269
3270 void
3271 dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
3272 {
3273 int i;
3274
3275 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
3276 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
3277 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
3278 return;
3279 }
3280 /* 1 - Enable packet filter, only allow unicast packet to send up */
3281 /* 0 - Disable packet filter */
3282 if (dhd_pkt_filter_enable && (!value ||
3283 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
3284 {
3285 for (i = 0; i < dhd->pktfilter_count; i++) {
3286 // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
3287 #if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
3288 if (value && (i == DHD_ARP_FILTER_NUM) &&
3289 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
3290 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
3291 "val %d, cnt %d, op_mode 0x%x\n",
3292 value, i, dhd->op_mode));
3293 continue;
3294 }
3295 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
3296 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
3297 value, dhd_master_mode);
3298 }
3299 }
3300 }
3301
3302 int
3303 dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
3304 {
3305 char *filterp = NULL;
3306 int filter_id = 0;
3307
3308 switch (num) {
3309 case DHD_BROADCAST_FILTER_NUM:
3310 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
3311 filter_id = 101;
3312 break;
3313 case DHD_MULTICAST4_FILTER_NUM:
3314 filter_id = 102;
3315 if (FW_SUPPORTED((dhdp), pf6)) {
3316 if (dhdp->pktfilter[num] != NULL) {
3317 dhd_pktfilter_offload_delete(dhdp, filter_id);
3318 dhdp->pktfilter[num] = NULL;
3319 }
3320 if (!add_remove) {
3321 filterp = DISCARD_IPV4_MCAST;
3322 add_remove = 1;
3323 break;
3324 }
3325 }
3326 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
3327 break;
3328 case DHD_MULTICAST6_FILTER_NUM:
3329 filter_id = 103;
3330 if (FW_SUPPORTED((dhdp), pf6)) {
3331 if (dhdp->pktfilter[num] != NULL) {
3332 dhd_pktfilter_offload_delete(dhdp, filter_id);
3333 dhdp->pktfilter[num] = NULL;
3334 }
3335 if (!add_remove) {
3336 filterp = DISCARD_IPV6_MCAST;
3337 add_remove = 1;
3338 break;
3339 }
3340 }
3341 filterp = "103 0 0 0 0xFFFF 0x3333";
3342 break;
3343 case DHD_MDNS_FILTER_NUM:
3344 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
3345 filter_id = 104;
3346 break;
3347 case DHD_ARP_FILTER_NUM:
3348 filterp = "105 0 0 12 0xFFFF 0x0806";
3349 filter_id = 105;
3350 break;
3351 case DHD_BROADCAST_ARP_FILTER_NUM:
3352 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
3353 " 0xFFFFFFFFFFFF0000000000000806";
3354 filter_id = 106;
3355 break;
3356 default:
3357 return -EINVAL;
3358 }
3359
3360 /* Add filter */
3361 if (add_remove) {
3362 dhdp->pktfilter[num] = filterp;
3363 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
3364 } else { /* Delete filter */
3365 if (dhdp->pktfilter[num]) {
3366 dhd_pktfilter_offload_delete(dhdp, filter_id);
3367 dhdp->pktfilter[num] = NULL;
3368 }
3369 }
3370
3371 return 0;
3372 }
3373 #endif /* PKT_FILTER_SUPPORT */
3374
3375 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
3376 {
3377 int power_mode = PM_MAX;
3378 #ifdef SUPPORT_SENSORHUB
3379 shub_control_t shub_ctl;
3380 #endif /* SUPPORT_SENSORHUB */
3381 /* wl_pkt_filter_enable_t enable_parm; */
3382 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
3383 int ret = 0;
3384 #ifdef DHD_USE_EARLYSUSPEND
3385 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3386 int bcn_timeout = 0;
3387 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3388 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3389 int roam_time_thresh = 0; /* (ms) */
3390 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3391 #ifndef ENABLE_FW_ROAM_SUSPEND
3392 uint roamvar = dhd->conf->roam_off_suspend;
3393 #endif /* ENABLE_FW_ROAM_SUSPEND */
3394 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3395 int bcn_li_bcn;
3396 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3397 uint nd_ra_filter = 0;
3398 #endif /* DHD_USE_EARLYSUSPEND */
3399 #ifdef PASS_ALL_MCAST_PKTS
3400 struct dhd_info *dhdinfo;
3401 uint32 allmulti;
3402 uint i;
3403 #endif /* PASS_ALL_MCAST_PKTS */
3404 #ifdef ENABLE_IPMCAST_FILTER
3405 int ipmcast_l2filter;
3406 #endif /* ENABLE_IPMCAST_FILTER */
3407 #ifdef DYNAMIC_SWOOB_DURATION
3408 #ifndef CUSTOM_INTR_WIDTH
3409 #define CUSTOM_INTR_WIDTH 100
3410 int intr_width = 0;
3411 #endif /* CUSTOM_INTR_WIDTH */
3412 #endif /* DYNAMIC_SWOOB_DURATION */
3413
3414 #if defined(BCMPCIE)
3415 int lpas = 0;
3416 int dtim_period = 0;
3417 int bcn_interval = 0;
3418 int bcn_to_dly = 0;
3419 #ifndef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3420 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
3421 #else
3422 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
3423 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3424 #endif /* OEM_ANDROID && BCMPCIE */
3425
3426 if (!dhd)
3427 return -ENODEV;
3428
3429 #ifdef PASS_ALL_MCAST_PKTS
3430 dhdinfo = dhd->info;
3431 #endif /* PASS_ALL_MCAST_PKTS */
3432
3433 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
3434 __FUNCTION__, value, dhd->in_suspend));
3435
3436 dhd_suspend_lock(dhd);
3437
3438 #ifdef CUSTOM_SET_CPUCORE
3439 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
3440 /* set specific cpucore */
3441 dhd_set_cpucore(dhd, TRUE);
3442 #endif /* CUSTOM_SET_CPUCORE */
3443
3444 if (dhd->conf->pm >= 0)
3445 power_mode = dhd->conf->pm;
3446 else
3447 power_mode = PM_FAST;
3448
3449 if (dhd->up) {
3450 if (value && dhd->in_suspend) {
3451 #ifdef PKT_FILTER_SUPPORT
3452 dhd->early_suspended = 1;
3453 #endif
3454 /* Kernel suspended */
3455 DHD_ERROR(("%s: force extra suspend setting\n", __FUNCTION__));
3456
3457 if (dhd->conf->pm_in_suspend >= 0)
3458 power_mode = dhd->conf->pm_in_suspend;
3459 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
3460 sizeof(power_mode), TRUE, 0);
3461
3462 #ifdef PKT_FILTER_SUPPORT
3463 /* Enable packet filter,
3464 * only allow unicast packet to send up
3465 */
3466 dhd_enable_packet_filter(1, dhd);
3467 #ifdef APF
3468 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
3469 #endif /* APF */
3470 #endif /* PKT_FILTER_SUPPORT */
3471
3472 #ifdef SUPPORT_SENSORHUB
3473 shub_ctl.enable = 1;
3474 shub_ctl.cmd = 0x000;
3475 shub_ctl.op_mode = 1;
3476 shub_ctl.interval = 0;
3477 if (dhd->info->shub_enable == 1) {
3478 ret = dhd_iovar(dhd, 0, "shub_msreq",
3479 (char *)&shub_ctl, sizeof(shub_ctl), NULL, 0, TRUE);
3480 if (ret < 0) {
3481 DHD_ERROR(("%s SensorHub MS start: failed %d\n",
3482 __FUNCTION__, ret));
3483 }
3484 }
3485 #endif /* SUPPORT_SENSORHUB */
3486
3487
3488 #ifdef PASS_ALL_MCAST_PKTS
3489 allmulti = 0;
3490 for (i = 0; i < DHD_MAX_IFS; i++) {
3491 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
3492 dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
3493 sizeof(allmulti), NULL, 0, TRUE);
3494
3495 }
3496 #endif /* PASS_ALL_MCAST_PKTS */
3497
3498 /* If DTIM skip is set up as default, force it to wake
3499 * each third DTIM for better power savings. Note that
3500 * one side effect is a chance to miss BC/MC packet.
3501 */
3502 #ifdef WLTDLS
3503 /* Do not set bcn_li_ditm on WFD mode */
3504 if (dhd->tdls_mode) {
3505 bcn_li_dtim = 0;
3506 } else
3507 #endif /* WLTDLS */
3508 #if defined(BCMPCIE)
3509 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
3510 &bcn_interval);
3511 dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3512 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3513
3514 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
3515 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
3516 /*
3517 * Increase max roaming threshold from 2 secs to 8 secs
3518 * the real roam threshold is MIN(max_roam_threshold,
3519 * bcn_timeout/2)
3520 */
3521 lpas = 1;
3522 dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
3523 0, TRUE);
3524
3525 bcn_to_dly = 1;
3526 /*
3527 * if bcn_to_dly is 1, the real roam threshold is
3528 * MIN(max_roam_threshold, bcn_timeout -1);
3529 * notify link down event after roaming procedure complete
3530 * if we hit bcn_timeout while we are in roaming progress.
3531 */
3532 dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3533 sizeof(bcn_to_dly), NULL, 0, TRUE);
3534 /* Increase beacon timeout to 6 secs or use bigger one */
3535 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
3536 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3537 sizeof(bcn_timeout), NULL, 0, TRUE);
3538 }
3539 #else
3540 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
3541 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3542 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
3543 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
3544 #endif /* OEM_ANDROID && BCMPCIE */
3545
3546 #ifdef DHD_USE_EARLYSUSPEND
3547 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3548 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
3549 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3550 sizeof(bcn_timeout), NULL, 0, TRUE);
3551 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3552 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3553 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
3554 dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
3555 sizeof(roam_time_thresh), NULL, 0, TRUE);
3556 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3557 #ifndef ENABLE_FW_ROAM_SUSPEND
3558 /* Disable firmware roaming during suspend */
3559 dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
3560 NULL, 0, TRUE);
3561 #endif /* ENABLE_FW_ROAM_SUSPEND */
3562 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3563 bcn_li_bcn = 0;
3564 dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3565 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3566 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3567 #ifdef NDO_CONFIG_SUPPORT
3568 if (dhd->ndo_enable) {
3569 if (!dhd->ndo_host_ip_overflow) {
3570 /* enable ND offload on suspend */
3571 ret = dhd_ndo_enable(dhd, 1);
3572 if (ret < 0) {
3573 DHD_ERROR(("%s: failed to enable NDO\n",
3574 __FUNCTION__));
3575 }
3576 } else {
3577 DHD_INFO(("%s: NDO disabled on suspend due to"
3578 "HW capacity\n", __FUNCTION__));
3579 }
3580 }
3581 #endif /* NDO_CONFIG_SUPPORT */
3582 #ifndef APF
3583 if (FW_SUPPORTED(dhd, ndoe))
3584 #else
3585 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
3586 #endif /* APF */
3587 {
3588 /* enable IPv6 RA filter in firmware during suspend */
3589 nd_ra_filter = 1;
3590 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3591 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3592 NULL, 0, TRUE);
3593 if (ret < 0)
3594 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3595 ret));
3596 }
3597 dhd_os_suppress_logging(dhd, TRUE);
3598 #ifdef ENABLE_IPMCAST_FILTER
3599 ipmcast_l2filter = 1;
3600 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3601 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3602 NULL, 0, TRUE);
3603 #endif /* ENABLE_IPMCAST_FILTER */
3604 #ifdef DYNAMIC_SWOOB_DURATION
3605 intr_width = CUSTOM_INTR_WIDTH;
3606 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3607 sizeof(intr_width), NULL, 0, TRUE);
3608 if (ret < 0) {
3609 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3610 }
3611 #endif /* DYNAMIC_SWOOB_DURATION */
3612 #endif /* DHD_USE_EARLYSUSPEND */
3613 dhd_conf_set_ap_in_suspend(dhd, value);
3614 } else {
3615 dhd_conf_set_ap_in_suspend(dhd, value);
3616 #ifdef PKT_FILTER_SUPPORT
3617 dhd->early_suspended = 0;
3618 #endif
3619 /* Kernel resumed */
3620 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
3621
3622 #ifdef SUPPORT_SENSORHUB
3623 shub_ctl.enable = 1;
3624 shub_ctl.cmd = 0x000;
3625 shub_ctl.op_mode = 0;
3626 shub_ctl.interval = 0;
3627 if (dhd->info->shub_enable == 1) {
3628 ret = dhd_iovar(dhd, 0, "shub_msreq",
3629 (char *)&shub_ctl, sizeof(shub_ctl),
3630 NULL, 0, TRUE);
3631 if (ret < 0) {
3632 DHD_ERROR(("%s SensorHub MS stop: failed %d\n",
3633 __FUNCTION__, ret));
3634 }
3635 }
3636 #endif /* SUPPORT_SENSORHUB */
3637
3638 #ifdef DYNAMIC_SWOOB_DURATION
3639 intr_width = 0;
3640 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
3641 sizeof(intr_width), NULL, 0, TRUE);
3642 if (ret < 0) {
3643 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
3644 }
3645 #endif /* DYNAMIC_SWOOB_DURATION */
3646 #ifndef SUPPORT_PM2_ONLY
3647 power_mode = PM_FAST;
3648 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
3649 sizeof(power_mode), TRUE, 0);
3650 #endif /* SUPPORT_PM2_ONLY */
3651 #ifdef PKT_FILTER_SUPPORT
3652 /* disable pkt filter */
3653 dhd_enable_packet_filter(0, dhd);
3654 #ifdef APF
3655 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
3656 #endif /* APF */
3657 #endif /* PKT_FILTER_SUPPORT */
3658 #ifdef PASS_ALL_MCAST_PKTS
3659 allmulti = 1;
3660 for (i = 0; i < DHD_MAX_IFS; i++) {
3661 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
3662 dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
3663 sizeof(allmulti), NULL, 0, TRUE);
3664 }
3665 #endif /* PASS_ALL_MCAST_PKTS */
3666 #if defined(BCMPCIE)
3667 /* restore pre-suspend setting */
3668 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3669 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3670 if (ret < 0) {
3671 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
3672 }
3673
3674 dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, 0,
3675 TRUE);
3676
3677 dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
3678 sizeof(bcn_to_dly), NULL, 0, TRUE);
3679
3680 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3681 sizeof(bcn_timeout), NULL, 0, TRUE);
3682 #else
3683 /* restore pre-suspend setting for dtim_skip */
3684 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
3685 sizeof(bcn_li_dtim), NULL, 0, TRUE);
3686 if (ret < 0) {
3687 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
3688 }
3689 #endif /* OEM_ANDROID && BCMPCIE */
3690 #ifdef DHD_USE_EARLYSUSPEND
3691 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3692 bcn_timeout = CUSTOM_BCN_TIMEOUT;
3693 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
3694 sizeof(bcn_timeout), NULL, 0, TRUE);
3695 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3696 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
3697 roam_time_thresh = 2000;
3698 dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
3699 sizeof(roam_time_thresh), NULL, 0, TRUE);
3700
3701 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
3702 #ifndef ENABLE_FW_ROAM_SUSPEND
3703 roamvar = dhd_roam_disable;
3704 dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
3705 NULL, 0, TRUE);
3706 #endif /* ENABLE_FW_ROAM_SUSPEND */
3707 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
3708 bcn_li_bcn = 1;
3709 dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
3710 sizeof(bcn_li_bcn), NULL, 0, TRUE);
3711 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
3712 #ifdef NDO_CONFIG_SUPPORT
3713 if (dhd->ndo_enable) {
3714 /* Disable ND offload on resume */
3715 ret = dhd_ndo_enable(dhd, 0);
3716 if (ret < 0) {
3717 DHD_ERROR(("%s: failed to disable NDO\n",
3718 __FUNCTION__));
3719 }
3720 }
3721 #endif /* NDO_CONFIG_SUPPORT */
3722 #ifndef APF
3723 if (FW_SUPPORTED(dhd, ndoe))
3724 #else
3725 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
3726 #endif /* APF */
3727 {
3728 /* disable IPv6 RA filter in firmware during suspend */
3729 nd_ra_filter = 0;
3730 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
3731 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
3732 NULL, 0, TRUE);
3733 if (ret < 0) {
3734 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
3735 ret));
3736 }
3737 }
3738 dhd_os_suppress_logging(dhd, FALSE);
3739 #ifdef ENABLE_IPMCAST_FILTER
3740 ipmcast_l2filter = 0;
3741 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
3742 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
3743 NULL, 0, TRUE);
3744 #endif /* ENABLE_IPMCAST_FILTER */
3745 #endif /* DHD_USE_EARLYSUSPEND */
3746
3747 /* terence 2017029: Reject in early suspend */
3748 if (!dhd->conf->xmit_in_suspend) {
3749 dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
3750 }
3751 }
3752 }
3753 dhd_suspend_unlock(dhd);
3754
3755 return 0;
3756 }
3757
3758 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
3759 {
3760 dhd_pub_t *dhdp = &dhd->pub;
3761 int ret = 0;
3762
3763 DHD_OS_WAKE_LOCK(dhdp);
3764 DHD_PERIM_LOCK(dhdp);
3765
3766 /* Set flag when early suspend was called */
3767 dhdp->in_suspend = val;
3768 if ((force || !dhdp->suspend_disable_flag) &&
3769 (dhd_support_sta_mode(dhdp) || dhd_conf_get_ap_mode_in_suspend(dhdp)))
3770 {
3771 ret = dhd_set_suspend(val, dhdp);
3772 }
3773
3774 DHD_PERIM_UNLOCK(dhdp);
3775 DHD_OS_WAKE_UNLOCK(dhdp);
3776 return ret;
3777 }
3778
3779 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
3780 static void dhd_early_suspend(struct early_suspend *h)
3781 {
3782 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3783 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3784
3785 if (dhd)
3786 dhd_suspend_resume_helper(dhd, 1, 0);
3787 }
3788
3789 static void dhd_late_resume(struct early_suspend *h)
3790 {
3791 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
3792 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
3793
3794 if (dhd)
3795 dhd_suspend_resume_helper(dhd, 0, 0);
3796 }
3797 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
3798
3799 /*
3800 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
3801 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
3802 *
3803 * dhd_timeout_start(&tmo, usec);
3804 * while (!dhd_timeout_expired(&tmo))
3805 * if (poll_something())
3806 * break;
3807 * if (dhd_timeout_expired(&tmo))
3808 * fatal();
3809 */
3810
3811 void
3812 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
3813 {
3814 tmo->limit = usec;
3815 tmo->increment = 0;
3816 tmo->elapsed = 0;
3817 tmo->tick = jiffies_to_usecs(1);
3818 }
3819
3820 int
3821 dhd_timeout_expired(dhd_timeout_t *tmo)
3822 {
3823 /* Does nothing the first call */
3824 if (tmo->increment == 0) {
3825 tmo->increment = 1;
3826 return 0;
3827 }
3828
3829 if (tmo->elapsed >= tmo->limit)
3830 return 1;
3831
3832 /* Add the delay that's about to take place */
3833 tmo->elapsed += tmo->increment;
3834
3835 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
3836 OSL_DELAY(tmo->increment);
3837 tmo->increment *= 2;
3838 if (tmo->increment > tmo->tick)
3839 tmo->increment = tmo->tick;
3840 } else {
3841 wait_queue_head_t delay_wait;
3842 DECLARE_WAITQUEUE(wait, current);
3843 init_waitqueue_head(&delay_wait);
3844 add_wait_queue(&delay_wait, &wait);
3845 set_current_state(TASK_INTERRUPTIBLE);
3846 (void)schedule_timeout(1);
3847 remove_wait_queue(&delay_wait, &wait);
3848 set_current_state(TASK_RUNNING);
3849 }
3850
3851 return 0;
3852 }
3853
3854 int
3855 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
3856 {
3857 int i = 0;
3858
3859 if (!dhd) {
3860 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
3861 return DHD_BAD_IF;
3862 }
3863
3864 while (i < DHD_MAX_IFS) {
3865 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
3866 return i;
3867 i++;
3868 }
3869
3870 return DHD_BAD_IF;
3871 }
3872
3873 struct net_device * dhd_idx2net(void *pub, int ifidx)
3874 {
3875 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
3876 struct dhd_info *dhd_info;
3877
3878 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
3879 return NULL;
3880 dhd_info = dhd_pub->info;
3881 if (dhd_info && dhd_info->iflist[ifidx])
3882 return dhd_info->iflist[ifidx]->net;
3883 return NULL;
3884 }
3885
3886 int
3887 dhd_ifname2idx(dhd_info_t *dhd, char *name)
3888 {
3889 int i = DHD_MAX_IFS;
3890
3891 ASSERT(dhd);
3892
3893 if (name == NULL || *name == '\0')
3894 return 0;
3895
3896 while (--i > 0)
3897 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
3898 break;
3899
3900 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
3901
3902 return i; /* default - the primary interface */
3903 }
3904
3905 char *
3906 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
3907 {
3908 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3909
3910 ASSERT(dhd);
3911
3912 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
3913 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
3914 return "<if_bad>";
3915 }
3916
3917 if (dhd->iflist[ifidx] == NULL) {
3918 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
3919 return "<if_null>";
3920 }
3921
3922 if (dhd->iflist[ifidx]->net)
3923 return dhd->iflist[ifidx]->net->name;
3924
3925 return "<if_none>";
3926 }
3927
3928 uint8 *
3929 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
3930 {
3931 int i;
3932 dhd_info_t *dhd = (dhd_info_t *)dhdp;
3933
3934 ASSERT(dhd);
3935 for (i = 0; i < DHD_MAX_IFS; i++)
3936 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
3937 return dhd->iflist[i]->mac_addr;
3938
3939 return NULL;
3940 }
3941
3942
3943 static void
3944 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3945 {
3946 struct net_device *dev;
3947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3948 struct netdev_hw_addr *ha;
3949 #else
3950 struct dev_mc_list *mclist;
3951 #endif
3952 uint32 allmulti, cnt;
3953
3954 wl_ioctl_t ioc;
3955 char *buf, *bufp;
3956 uint buflen;
3957 int ret;
3958
3959 if (!dhd->iflist[ifidx]) {
3960 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3961 return;
3962 }
3963 dev = dhd->iflist[ifidx]->net;
3964 if (!dev)
3965 return;
3966 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3967 netif_addr_lock_bh(dev);
3968 #endif /* LINUX >= 2.6.27 */
3969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3970 cnt = netdev_mc_count(dev);
3971 #else
3972 cnt = dev->mc_count;
3973 #endif /* LINUX >= 2.6.35 */
3974 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3975 netif_addr_unlock_bh(dev);
3976 #endif /* LINUX >= 2.6.27 */
3977
3978 /* Determine initial value of allmulti flag */
3979 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3980
3981 #ifdef PASS_ALL_MCAST_PKTS
3982 #ifdef PKT_FILTER_SUPPORT
3983 if (!dhd->pub.early_suspended)
3984 #endif /* PKT_FILTER_SUPPORT */
3985 allmulti = TRUE;
3986 #endif /* PASS_ALL_MCAST_PKTS */
3987
3988 /* Send down the multicast list first. */
3989
3990
3991 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3992 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3993 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3994 dhd_ifname(&dhd->pub, ifidx), cnt));
3995 return;
3996 }
3997
3998 strncpy(bufp, "mcast_list", buflen - 1);
3999 bufp[buflen - 1] = '\0';
4000 bufp += strlen("mcast_list") + 1;
4001
4002 cnt = htol32(cnt);
4003 memcpy(bufp, &cnt, sizeof(cnt));
4004 bufp += sizeof(cnt);
4005
4006 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
4007 netif_addr_lock_bh(dev);
4008 #endif /* LINUX >= 2.6.27 */
4009 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
4010 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4011 #pragma GCC diagnostic push
4012 #pragma GCC diagnostic ignored "-Wcast-qual"
4013 #endif
4014 netdev_for_each_mc_addr(ha, dev) {
4015 if (!cnt)
4016 break;
4017 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
4018 bufp += ETHER_ADDR_LEN;
4019 cnt--;
4020 }
4021 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4022 #pragma GCC diagnostic pop
4023 #endif
4024 #else /* LINUX < 2.6.35 */
4025 for (mclist = dev->mc_list; (mclist && (cnt > 0));
4026 cnt--, mclist = mclist->next) {
4027 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
4028 bufp += ETHER_ADDR_LEN;
4029 }
4030 #endif /* LINUX >= 2.6.35 */
4031 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
4032 netif_addr_unlock_bh(dev);
4033 #endif /* LINUX >= 2.6.27 */
4034
4035 memset(&ioc, 0, sizeof(ioc));
4036 ioc.cmd = WLC_SET_VAR;
4037 ioc.buf = buf;
4038 ioc.len = buflen;
4039 ioc.set = TRUE;
4040
4041 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
4042 if (ret < 0) {
4043 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
4044 dhd_ifname(&dhd->pub, ifidx), cnt));
4045 allmulti = cnt ? TRUE : allmulti;
4046 }
4047
4048 MFREE(dhd->pub.osh, buf, buflen);
4049
4050 /* Now send the allmulti setting. This is based on the setting in the
4051 * net_device flags, but might be modified above to be turned on if we
4052 * were trying to set some addresses and dongle rejected it...
4053 */
4054
4055 allmulti = htol32(allmulti);
4056 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
4057 sizeof(allmulti), NULL, 0, TRUE);
4058 if (ret < 0) {
4059 DHD_ERROR(("%s: set allmulti %d failed\n",
4060 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
4061 }
4062
4063 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
4064
4065 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
4066
4067 allmulti = htol32(allmulti);
4068
4069 memset(&ioc, 0, sizeof(ioc));
4070 ioc.cmd = WLC_SET_PROMISC;
4071 ioc.buf = &allmulti;
4072 ioc.len = sizeof(allmulti);
4073 ioc.set = TRUE;
4074
4075 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
4076 if (ret < 0) {
4077 DHD_ERROR(("%s: set promisc %d failed\n",
4078 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
4079 }
4080 }
4081
4082 int
4083 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
4084 {
4085 int ret;
4086
4087 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
4088 ETHER_ADDR_LEN, NULL, 0, TRUE);
4089 if (ret < 0) {
4090 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
4091 } else {
4092 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
4093 if (ifidx == 0)
4094 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
4095 }
4096
4097 return ret;
4098 }
4099
4100 #ifdef SOFTAP
4101 extern struct net_device *ap_net_dev;
4102 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
4103 #endif
4104
4105 #ifdef DHD_WMF
4106 void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea,
4107 void* event_data)
4108 {
4109 struct wl_psta_primary_intf_event *psta_prim_event =
4110 (struct wl_psta_primary_intf_event*)event_data;
4111 dhd_sta_t *psta_interface = NULL;
4112 dhd_sta_t *sta = NULL;
4113 uint8 ifindex;
4114 ASSERT(ifname);
4115 ASSERT(psta_prim_event);
4116 ASSERT(ea);
4117
4118 ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname);
4119 sta = dhd_find_sta(dhdp, ifindex, ea);
4120 if (sta != NULL) {
4121 psta_interface = dhd_find_sta(dhdp, ifindex,
4122 (void *)(psta_prim_event->prim_ea.octet));
4123 if (psta_interface != NULL) {
4124 sta->psta_prim = psta_interface;
4125 }
4126 }
4127 }
4128
4129 /* Get wmf_psta_disable configuration configuration */
4130 int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx)
4131 {
4132 dhd_info_t *dhd = dhdp->info;
4133 dhd_if_t *ifp;
4134 ASSERT(idx < DHD_MAX_IFS);
4135 ifp = dhd->iflist[idx];
4136 return ifp->wmf_psta_disable;
4137 }
4138
4139 /* Set wmf_psta_disable configuration configuration */
4140 int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val)
4141 {
4142 dhd_info_t *dhd = dhdp->info;
4143 dhd_if_t *ifp;
4144 ASSERT(idx < DHD_MAX_IFS);
4145 ifp = dhd->iflist[idx];
4146 ifp->wmf_psta_disable = val;
4147 return 0;
4148 }
4149 #endif /* DHD_WMF */
4150
4151 #ifdef DHD_PSTA
4152 /* Get psta/psr configuration configuration */
4153 int dhd_get_psta_mode(dhd_pub_t *dhdp)
4154 {
4155 dhd_info_t *dhd = dhdp->info;
4156 return (int)dhd->psta_mode;
4157 }
4158 /* Set psta/psr configuration configuration */
4159 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
4160 {
4161 dhd_info_t *dhd = dhdp->info;
4162 dhd->psta_mode = val;
4163 return 0;
4164 }
4165 #endif /* DHD_PSTA */
4166
4167 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
4168 static void
4169 dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
4170 {
4171 dhd_info_t *dhd = dhdp->info;
4172 dhd_if_t *ifp;
4173
4174 ASSERT(idx < DHD_MAX_IFS);
4175
4176 ifp = dhd->iflist[idx];
4177
4178 if (
4179 #ifdef DHD_L2_FILTER
4180 (ifp->block_ping) ||
4181 #endif
4182 #ifdef DHD_WET
4183 (dhd->wet_mode) ||
4184 #endif
4185 #ifdef DHD_MCAST_REGEN
4186 (ifp->mcast_regen_bss_enable) ||
4187 #endif
4188 FALSE) {
4189 ifp->rx_pkt_chainable = FALSE;
4190 }
4191 }
4192 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
4193
4194 #ifdef DHD_WET
4195 /* Get wet configuration configuration */
4196 int dhd_get_wet_mode(dhd_pub_t *dhdp)
4197 {
4198 dhd_info_t *dhd = dhdp->info;
4199 return (int)dhd->wet_mode;
4200 }
4201
4202 /* Set wet configuration configuration */
4203 int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
4204 {
4205 dhd_info_t *dhd = dhdp->info;
4206 dhd->wet_mode = val;
4207 dhd_update_rx_pkt_chainable_state(dhdp, 0);
4208 return 0;
4209 }
4210 #endif /* DHD_WET */
4211
4212 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4213 int32 dhd_role_to_nl80211_iftype(int32 role)
4214 {
4215 switch (role) {
4216 case WLC_E_IF_ROLE_STA:
4217 return NL80211_IFTYPE_STATION;
4218 case WLC_E_IF_ROLE_AP:
4219 return NL80211_IFTYPE_AP;
4220 case WLC_E_IF_ROLE_WDS:
4221 return NL80211_IFTYPE_WDS;
4222 case WLC_E_IF_ROLE_P2P_GO:
4223 return NL80211_IFTYPE_P2P_GO;
4224 case WLC_E_IF_ROLE_P2P_CLIENT:
4225 return NL80211_IFTYPE_P2P_CLIENT;
4226 case WLC_E_IF_ROLE_IBSS:
4227 case WLC_E_IF_ROLE_NAN:
4228 return NL80211_IFTYPE_ADHOC;
4229 default:
4230 return NL80211_IFTYPE_UNSPECIFIED;
4231 }
4232 }
4233 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4234
4235 static void
4236 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
4237 {
4238 dhd_info_t *dhd = handle;
4239 dhd_if_event_t *if_event = event_info;
4240 struct net_device *ndev;
4241 int ifidx, bssidx;
4242 int ret;
4243 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4244 struct wl_if_event_info info;
4245 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4246
4247 if (event != DHD_WQ_WORK_IF_ADD) {
4248 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4249 return;
4250 }
4251
4252 if (!dhd) {
4253 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4254 return;
4255 }
4256
4257 if (!if_event) {
4258 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
4259 return;
4260 }
4261
4262 dhd_net_if_lock_local(dhd);
4263 DHD_OS_WAKE_LOCK(&dhd->pub);
4264 DHD_PERIM_LOCK(&dhd->pub);
4265
4266 ifidx = if_event->event.ifidx;
4267 bssidx = if_event->event.bssidx;
4268 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
4269
4270
4271 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4272 if (if_event->event.ifidx > 0) {
4273 bzero(&info, sizeof(info));
4274 info.ifidx = if_event->event.ifidx;
4275 info.bssidx = if_event->event.bssidx;
4276 info.role = if_event->event.role;
4277 strncpy(info.name, if_event->name, IFNAMSIZ);
4278 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
4279 &info, if_event->mac, NULL, true) != NULL) {
4280 /* Do the post interface create ops */
4281 DHD_ERROR(("Post ifcreate ops done. Returning \n"));
4282 goto done;
4283 }
4284 }
4285 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4286
4287 /* This path is for non-android case */
4288 /* The interface name in host and in event msg are same */
4289 /* if name in event msg is used to create dongle if list on host */
4290 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
4291 if_event->mac, bssidx, TRUE, if_event->name);
4292 if (!ndev) {
4293 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
4294 goto done;
4295 }
4296
4297 DHD_PERIM_UNLOCK(&dhd->pub);
4298 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
4299 DHD_PERIM_LOCK(&dhd->pub);
4300 if (ret != BCME_OK) {
4301 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
4302 dhd_remove_if(&dhd->pub, ifidx, TRUE);
4303 goto done;
4304 }
4305 #ifndef PCIE_FULL_DONGLE
4306 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
4307 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
4308 uint32 var_int = 1;
4309 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
4310 NULL, 0, TRUE);
4311 if (ret != BCME_OK) {
4312 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
4313 dhd_remove_if(&dhd->pub, ifidx, TRUE);
4314 }
4315 }
4316 #endif /* PCIE_FULL_DONGLE */
4317
4318 done:
4319 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
4320
4321 DHD_PERIM_UNLOCK(&dhd->pub);
4322 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4323 dhd_net_if_unlock_local(dhd);
4324 }
4325
4326 static void
4327 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
4328 {
4329 dhd_info_t *dhd = handle;
4330 int ifidx;
4331 dhd_if_event_t *if_event = event_info;
4332
4333
4334 if (event != DHD_WQ_WORK_IF_DEL) {
4335 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4336 return;
4337 }
4338
4339 if (!dhd) {
4340 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4341 return;
4342 }
4343
4344 if (!if_event) {
4345 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
4346 return;
4347 }
4348
4349 dhd_net_if_lock_local(dhd);
4350 DHD_OS_WAKE_LOCK(&dhd->pub);
4351 DHD_PERIM_LOCK(&dhd->pub);
4352
4353 ifidx = if_event->event.ifidx;
4354 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
4355
4356 DHD_PERIM_UNLOCK(&dhd->pub);
4357 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4358 if (if_event->event.ifidx > 0) {
4359 /* Do the post interface del ops */
4360 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, true) == 0) {
4361 DHD_TRACE(("Post ifdel ops done. Returning \n"));
4362 DHD_PERIM_LOCK(&dhd->pub);
4363 goto done;
4364 }
4365 }
4366 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4367
4368 dhd_remove_if(&dhd->pub, ifidx, TRUE);
4369 DHD_PERIM_LOCK(&dhd->pub);
4370
4371 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
4372 done:
4373 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
4374 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
4375
4376 DHD_PERIM_UNLOCK(&dhd->pub);
4377 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4378 dhd_net_if_unlock_local(dhd);
4379 }
4380
4381 #ifdef DHD_UPDATE_INTF_MAC
4382 static void
4383 dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
4384 {
4385 dhd_info_t *dhd = handle;
4386 int ifidx;
4387 dhd_if_event_t *if_event = event_info;
4388
4389 if (event != DHD_WQ_WORK_IF_UPDATE) {
4390 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4391 return;
4392 }
4393
4394 if (!dhd) {
4395 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4396 return;
4397 }
4398
4399 if (!if_event) {
4400 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
4401 return;
4402 }
4403
4404 dhd_net_if_lock_local(dhd);
4405 DHD_OS_WAKE_LOCK(&dhd->pub);
4406
4407 ifidx = if_event->event.ifidx;
4408 DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
4409
4410 dhd_op_if_update(&dhd->pub, ifidx);
4411
4412 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
4413
4414 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4415 dhd_net_if_unlock_local(dhd);
4416 }
4417
4418 int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
4419 {
4420 dhd_info_t * dhdinfo = NULL;
4421 dhd_if_t * ifp = NULL;
4422 int ret = 0;
4423 char buf[128];
4424
4425 if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
4426 DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
4427 return -1;
4428 } else {
4429 dhdinfo = (dhd_info_t *)dhdpub->info;
4430 ifp = dhdinfo->iflist[ifidx];
4431 if (NULL==ifp) {
4432 DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
4433 return -2;
4434 }
4435 }
4436
4437 DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
4438 // Get MAC address
4439 strcpy(buf, "cur_etheraddr");
4440 ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
4441 if (0>ret) {
4442 DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
4443 // avoid collision
4444 dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
4445 // force locally administrate address
4446 ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
4447 } else {
4448 DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
4449 ifp->name, ifp->idx,
4450 (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
4451 (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
4452 memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
4453 if (dhdinfo->iflist[ifp->idx]->net) {
4454 memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
4455 }
4456 }
4457
4458 return ret;
4459 }
4460 #endif /* DHD_UPDATE_INTF_MAC */
4461
4462 static void
4463 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
4464 {
4465 dhd_info_t *dhd = handle;
4466 dhd_if_t *ifp = event_info;
4467
4468 if (event != DHD_WQ_WORK_SET_MAC) {
4469 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4470 }
4471
4472 if (!dhd) {
4473 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4474 return;
4475 }
4476
4477 dhd_net_if_lock_local(dhd);
4478 DHD_OS_WAKE_LOCK(&dhd->pub);
4479 DHD_PERIM_LOCK(&dhd->pub);
4480
4481 #ifdef SOFTAP
4482 {
4483 unsigned long flags;
4484 bool in_ap = FALSE;
4485 DHD_GENERAL_LOCK(&dhd->pub, flags);
4486 in_ap = (ap_net_dev != NULL);
4487 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4488
4489 if (in_ap) {
4490 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
4491 ifp->net->name));
4492 goto done;
4493 }
4494 }
4495 #endif /* SOFTAP */
4496
4497 // terence 20160907: fix for not able to set mac when wlan0 is down
4498 if (ifp == NULL || !ifp->set_macaddress) {
4499 goto done;
4500 }
4501 if (ifp == NULL || !dhd->pub.up) {
4502 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4503 goto done;
4504 }
4505
4506 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
4507 ifp->set_macaddress = FALSE;
4508 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
4509 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
4510 else
4511 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
4512
4513 done:
4514 DHD_PERIM_UNLOCK(&dhd->pub);
4515 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4516 dhd_net_if_unlock_local(dhd);
4517 }
4518
4519 static void
4520 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
4521 {
4522 dhd_info_t *dhd = handle;
4523 int ifidx = (int)((long int)event_info);
4524 dhd_if_t *ifp = NULL;
4525
4526 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
4527 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
4528 return;
4529 }
4530
4531 if (!dhd) {
4532 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4533 return;
4534 }
4535
4536 dhd_net_if_lock_local(dhd);
4537 DHD_OS_WAKE_LOCK(&dhd->pub);
4538 DHD_PERIM_LOCK(&dhd->pub);
4539
4540 ifp = dhd->iflist[ifidx];
4541
4542 if (ifp == NULL || !dhd->pub.up) {
4543 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4544 goto done;
4545 }
4546
4547 #ifdef SOFTAP
4548 {
4549 bool in_ap = FALSE;
4550 unsigned long flags;
4551 DHD_GENERAL_LOCK(&dhd->pub, flags);
4552 in_ap = (ap_net_dev != NULL);
4553 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4554
4555 if (in_ap) {
4556 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
4557 ifp->net->name));
4558 ifp->set_multicast = FALSE;
4559 goto done;
4560 }
4561 }
4562 #endif /* SOFTAP */
4563
4564 if (ifp == NULL || !dhd->pub.up) {
4565 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
4566 goto done;
4567 }
4568
4569 ifidx = ifp->idx;
4570
4571
4572 _dhd_set_multicast_list(dhd, ifidx);
4573 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
4574
4575 done:
4576 DHD_PERIM_UNLOCK(&dhd->pub);
4577 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4578 dhd_net_if_unlock_local(dhd);
4579 }
4580
4581 static int
4582 dhd_set_mac_address(struct net_device *dev, void *addr)
4583 {
4584 int ret = 0;
4585
4586 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4587 struct sockaddr *sa = (struct sockaddr *)addr;
4588 int ifidx;
4589 dhd_if_t *dhdif;
4590
4591 ifidx = dhd_net2idx(dhd, dev);
4592 if (ifidx == DHD_BAD_IF)
4593 return -1;
4594
4595 dhdif = dhd->iflist[ifidx];
4596
4597 dhd_net_if_lock_local(dhd);
4598 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
4599 dhdif->set_macaddress = TRUE;
4600 dhd_net_if_unlock_local(dhd);
4601 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
4602 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
4603 return ret;
4604 }
4605
4606 static void
4607 dhd_set_multicast_list(struct net_device *dev)
4608 {
4609 dhd_info_t *dhd = DHD_DEV_INFO(dev);
4610 int ifidx;
4611
4612 ifidx = dhd_net2idx(dhd, dev);
4613 if (ifidx == DHD_BAD_IF)
4614 return;
4615
4616 dhd->iflist[ifidx]->set_multicast = TRUE;
4617 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
4618 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
4619
4620 // terence 20160907: fix for not able to set mac when wlan0 is down
4621 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
4622 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
4623 }
4624
4625 #ifdef DHD_UCODE_DOWNLOAD
4626 /* Get ucode path */
4627 char *
4628 dhd_get_ucode_path(dhd_pub_t *dhdp)
4629 {
4630 dhd_info_t *dhd = dhdp->info;
4631 return dhd->uc_path;
4632 }
4633 #endif /* DHD_UCODE_DOWNLOAD */
4634
4635 #ifdef PROP_TXSTATUS
4636 int
4637 dhd_os_wlfc_block(dhd_pub_t *pub)
4638 {
4639 dhd_info_t *di = (dhd_info_t *)(pub->info);
4640 ASSERT(di != NULL);
4641 /* terence 20161229: don't do spin lock if proptx not enabled */
4642 if (disable_proptx)
4643 return 1;
4644 #ifdef BCMDBUS
4645 spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
4646 #else
4647 spin_lock_bh(&di->wlfc_spinlock);
4648 #endif /* BCMDBUS */
4649 return 1;
4650 }
4651
4652 int
4653 dhd_os_wlfc_unblock(dhd_pub_t *pub)
4654 {
4655 dhd_info_t *di = (dhd_info_t *)(pub->info);
4656
4657 ASSERT(di != NULL);
4658 /* terence 20161229: don't do spin lock if proptx not enabled */
4659 if (disable_proptx)
4660 return 1;
4661 #ifdef BCMDBUS
4662 spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
4663 #else
4664 spin_unlock_bh(&di->wlfc_spinlock);
4665 #endif /* BCMDBUS */
4666 return 1;
4667 }
4668
4669 #endif /* PROP_TXSTATUS */
4670
4671 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
4672 typedef struct {
4673 uint16 type;
4674 const char *str;
4675 } PKTTYPE_INFO;
4676
4677 static const PKTTYPE_INFO packet_type_info[] =
4678 {
4679 { ETHER_TYPE_IP, "IP" },
4680 { ETHER_TYPE_ARP, "ARP" },
4681 { ETHER_TYPE_BRCM, "BRCM" },
4682 { ETHER_TYPE_802_1X, "802.1X" },
4683 { ETHER_TYPE_WAI, "WAPI" },
4684 { 0, ""}
4685 };
4686
4687 static const char *_get_packet_type_str(uint16 type)
4688 {
4689 int i;
4690 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
4691
4692 for (i = 0; i < n; i++) {
4693 if (packet_type_info[i].type == type)
4694 return packet_type_info[i].str;
4695 }
4696
4697 return packet_type_info[n].str;
4698 }
4699
4700 void
4701 dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx)
4702 {
4703 uint16 protocol;
4704 char *ifname;
4705
4706 protocol = (dump_data[12] << 8) | dump_data[13];
4707 ifname = ndev ? ndev->name : "N/A";
4708
4709 if (protocol != ETHER_TYPE_BRCM) {
4710 DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname,
4711 _get_packet_type_str(protocol)));
4712 #if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP)
4713 prhex("Data", dump_data, datalen);
4714 #endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */
4715 }
4716 }
4717 #endif /* DHD_TX_DUMP || DHD_RX_DUMP */
4718
4719 /* This routine do not support Packet chain feature, Currently tested for
4720 * proxy arp feature
4721 */
4722 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
4723 {
4724 struct sk_buff *skb;
4725 void *skbhead = NULL;
4726 void *skbprev = NULL;
4727 dhd_if_t *ifp;
4728 ASSERT(!PKTISCHAINED(p));
4729 skb = PKTTONATIVE(dhdp->osh, p);
4730
4731 ifp = dhdp->info->iflist[ifidx];
4732 skb->dev = ifp->net;
4733 #if defined(BCM_GMAC3)
4734 /* Forwarder capable interfaces use WOFA based forwarding */
4735 if (ifp->fwdh) {
4736 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
4737 uint16 * da = (uint16 *)(eh->ether_dhost);
4738 uintptr_t wofa_data;
4739 ASSERT(ISALIGNED(da, 2));
4740
4741 wofa_data = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
4742 if (wofa_data == WOFA_DATA_INVALID) { /* Unknown MAC address */
4743 if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
4744 return BCME_OK;
4745 }
4746 }
4747 PKTFRMNATIVE(dhdp->osh, p);
4748 PKTFREE(dhdp->osh, p, FALSE);
4749 return BCME_OK;
4750 }
4751 #endif /* BCM_GMAC3 */
4752
4753 skb->protocol = eth_type_trans(skb, skb->dev);
4754
4755 if (in_interrupt()) {
4756 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4757 __FUNCTION__, __LINE__);
4758 netif_rx(skb);
4759 } else {
4760 if (dhdp->info->rxthread_enabled) {
4761 if (!skbhead) {
4762 skbhead = skb;
4763 } else {
4764 PKTSETNEXT(dhdp->osh, skbprev, skb);
4765 }
4766 skbprev = skb;
4767 } else {
4768 /* If the receive is not processed inside an ISR,
4769 * the softirqd must be woken explicitly to service
4770 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4771 * by netif_rx_ni(), but in earlier kernels, we need
4772 * to do it manually.
4773 */
4774 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4775 __FUNCTION__, __LINE__);
4776 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4777 netif_rx_ni(skb);
4778 #else
4779 ulong flags;
4780 netif_rx(skb);
4781 local_irq_save(flags);
4782 RAISE_RX_SOFTIRQ();
4783 local_irq_restore(flags);
4784 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4785 }
4786 }
4787
4788 if (dhdp->info->rxthread_enabled && skbhead)
4789 dhd_sched_rxf(dhdp, skbhead);
4790
4791 return BCME_OK;
4792 }
4793
4794 int BCMFASTPATH
4795 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4796 {
4797 int ret = BCME_OK;
4798 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4799 struct ether_header *eh = NULL;
4800 #if defined(DHD_L2_FILTER)
4801 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
4802 #endif
4803
4804 /* Reject if down */
4805 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
4806 /* free the packet here since the caller won't */
4807 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4808 return -ENODEV;
4809 }
4810
4811 #ifdef PCIE_FULL_DONGLE
4812 if (dhdp->busstate == DHD_BUS_SUSPEND) {
4813 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
4814 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4815 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4816 return -ENODEV;
4817 #else
4818 return NETDEV_TX_BUSY;
4819 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
4820 }
4821 #endif /* PCIE_FULL_DONGLE */
4822
4823 #ifdef DHD_L2_FILTER
4824 /* if dhcp_unicast is enabled, we need to convert the */
4825 /* broadcast DHCP ACK/REPLY packets to Unicast. */
4826 if (ifp->dhcp_unicast) {
4827 uint8* mac_addr;
4828 uint8* ehptr = NULL;
4829 int ret;
4830 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
4831 if (ret == BCME_OK) {
4832 /* if given mac address having valid entry in sta list
4833 * copy the given mac address, and return with BCME_OK
4834 */
4835 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
4836 ehptr = PKTDATA(dhdp->osh, pktbuf);
4837 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
4838 }
4839 }
4840 }
4841
4842 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4843 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4844 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4845 return BCME_ERROR;
4846 }
4847 }
4848
4849 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4850 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
4851
4852 /* Drop the packets if l2 filter has processed it already
4853 * otherwise continue with the normal path
4854 */
4855 if (ret == BCME_OK) {
4856 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4857 return BCME_ERROR;
4858 }
4859 }
4860 #endif /* DHD_L2_FILTER */
4861 /* Update multicast statistic */
4862 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
4863 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
4864 eh = (struct ether_header *)pktdata;
4865
4866 if (ETHER_ISMULTI(eh->ether_dhost))
4867 dhdp->tx_multicast++;
4868 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
4869 #ifdef DHD_LOSSLESS_ROAMING
4870 uint8 prio = (uint8)PKTPRIO(pktbuf);
4871
4872 /* back up 802.1x's priority */
4873 dhdp->prio_8021x = prio;
4874 #endif /* DHD_LOSSLESS_ROAMING */
4875 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
4876 atomic_inc(&dhd->pend_8021x_cnt);
4877 #if defined(DHD_8021X_DUMP)
4878 dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4879 #endif /* DHD_8021X_DUMP */
4880 dhd_conf_set_eapol_status(dhdp, dhd_ifname(dhdp, ifidx), pktdata);
4881 }
4882
4883 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
4884 #ifdef DHD_DHCP_DUMP
4885 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4886 #endif /* DHD_DHCP_DUMP */
4887 #ifdef DHD_ICMP_DUMP
4888 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
4889 #endif /* DHD_ICMP_DUMP */
4890 }
4891 } else {
4892 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4893 return BCME_ERROR;
4894 }
4895
4896 {
4897 /* Look into the packet and update the packet priority */
4898 #ifndef PKTPRIO_OVERRIDE
4899 if (PKTPRIO(pktbuf) == 0)
4900 #endif /* !PKTPRIO_OVERRIDE */
4901 {
4902 #if defined(QOS_MAP_SET)
4903 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
4904 #else
4905 pktsetprio(pktbuf, FALSE);
4906 #endif /* QOS_MAP_SET */
4907 }
4908 }
4909
4910
4911 #if defined(TRAFFIC_MGMT_DWM)
4912 traffic_mgmt_pkt_set_prio(dhdp, pktbuf);
4913
4914 #ifdef BCM_GMAC3
4915 DHD_PKT_SET_DATAOFF(pktbuf, 0);
4916 #endif /* BCM_GMAC3 */
4917 #endif
4918
4919 #ifdef PCIE_FULL_DONGLE
4920 /*
4921 * Lkup the per interface hash table, for a matching flowring. If one is not
4922 * available, allocate a unique flowid and add a flowring entry.
4923 * The found or newly created flowid is placed into the pktbuf's tag.
4924 */
4925 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
4926 if (ret != BCME_OK) {
4927 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
4928 return ret;
4929 }
4930 #endif
4931
4932 #if defined(DHD_TX_DUMP)
4933 dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf),
4934 PKTLEN(dhdp->osh, pktbuf), TRUE);
4935 #endif
4936 /* terence 20150901: Micky add to ajust the 802.1X priority */
4937 /* Set the 802.1X packet with the highest priority 7 */
4938 if (dhdp->conf->pktprio8021x >= 0)
4939 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
4940
4941 #ifdef PROP_TXSTATUS
4942 if (dhd_wlfc_is_supported(dhdp)) {
4943 /* store the interface ID */
4944 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
4945
4946 /* store destination MAC in the tag as well */
4947 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
4948
4949 /* decide which FIFO this packet belongs to */
4950 if (ETHER_ISMULTI(eh->ether_dhost))
4951 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
4952 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
4953 else
4954 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
4955 } else
4956 #endif /* PROP_TXSTATUS */
4957 {
4958 /* If the protocol uses a data header, apply it */
4959 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
4960 }
4961
4962 /* Use bus module to send data frame */
4963 #ifdef WLMEDIA_HTSF
4964 dhd_htsf_addtxts(dhdp, pktbuf);
4965 #endif
4966 #ifdef PROP_TXSTATUS
4967 {
4968 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
4969 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
4970 /* non-proptxstatus way */
4971 #ifdef BCMPCIE
4972 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4973 #else
4974 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4975 #endif /* BCMPCIE */
4976 }
4977 }
4978 #else
4979 #ifdef BCMPCIE
4980 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
4981 #else
4982 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
4983 #endif /* BCMPCIE */
4984 #endif /* PROP_TXSTATUS */
4985 #ifdef BCMDBUS
4986 if (ret)
4987 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4988 #endif /* BCMDBUS */
4989
4990 return ret;
4991 }
4992
4993 int BCMFASTPATH
4994 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4995 {
4996 int ret = 0;
4997 unsigned long flags;
4998
4999 DHD_GENERAL_LOCK(dhdp, flags);
5000 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
5001 DHD_ERROR(("%s: returning as busstate=%d\n",
5002 __FUNCTION__, dhdp->busstate));
5003 DHD_GENERAL_UNLOCK(dhdp, flags);
5004 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5005 return -ENODEV;
5006 }
5007 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
5008 DHD_GENERAL_UNLOCK(dhdp, flags);
5009
5010 #ifdef DHD_PCIE_RUNTIMEPM
5011 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
5012 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
5013 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5014 ret = -EBUSY;
5015 goto exit;
5016 }
5017 #endif /* DHD_PCIE_RUNTIMEPM */
5018
5019 DHD_GENERAL_LOCK(dhdp, flags);
5020 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
5021 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5022 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
5023 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
5024 dhd_os_busbusy_wake(dhdp);
5025 DHD_GENERAL_UNLOCK(dhdp, flags);
5026 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5027 return -ENODEV;
5028 }
5029 DHD_GENERAL_UNLOCK(dhdp, flags);
5030
5031 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
5032
5033 #ifdef DHD_PCIE_RUNTIMEPM
5034 exit:
5035 #endif
5036 DHD_GENERAL_LOCK(dhdp, flags);
5037 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
5038 dhd_os_busbusy_wake(dhdp);
5039 DHD_GENERAL_UNLOCK(dhdp, flags);
5040 return ret;
5041 }
5042
5043 #if defined(DHD_LB_TXP)
5044
5045 int BCMFASTPATH
5046 dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
5047 int ifidx, void *skb)
5048 {
5049 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
5050
5051 /* If the feature is disabled run-time do TX from here */
5052 if (atomic_read(&dhd->lb_txp_active) == 0) {
5053 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
5054 return __dhd_sendpkt(&dhd->pub, ifidx, skb);
5055 }
5056
5057 /* Store the address of net device and interface index in the Packet tag */
5058 DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
5059 DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
5060
5061 /* Enqueue the skb into tx_pend_queue */
5062 skb_queue_tail(&dhd->tx_pend_queue, skb);
5063
5064 DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
5065
5066 /* Dispatch the Tx job to be processed by the tx_tasklet */
5067 dhd_lb_tx_dispatch(&dhd->pub);
5068
5069 return NETDEV_TX_OK;
5070 }
5071 #endif /* DHD_LB_TXP */
5072
5073 int BCMFASTPATH
5074 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
5075 {
5076 int ret;
5077 uint datalen;
5078 void *pktbuf;
5079 dhd_info_t *dhd = DHD_DEV_INFO(net);
5080 dhd_if_t *ifp = NULL;
5081 int ifidx;
5082 unsigned long flags;
5083 #ifdef WLMEDIA_HTSF
5084 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
5085 #else
5086 uint8 htsfdlystat_sz = 0;
5087 #endif
5088 #ifdef DHD_WMF
5089 struct ether_header *eh;
5090 uint8 *iph;
5091 #endif /* DHD_WMF */
5092
5093 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5094
5095 if (dhd_query_bus_erros(&dhd->pub)) {
5096 return -ENODEV;
5097 }
5098
5099 /* terence 2017029: Reject in early suspend */
5100 if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) {
5101 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
5102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5103 return -ENODEV;
5104 #else
5105 return NETDEV_TX_BUSY;
5106 #endif
5107 }
5108
5109 DHD_GENERAL_LOCK(&dhd->pub, flags);
5110 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
5111 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5112
5113 #ifdef DHD_PCIE_RUNTIMEPM
5114 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
5115 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
5116 /* stop the network queue temporarily until resume done */
5117 DHD_GENERAL_LOCK(&dhd->pub, flags);
5118 if (!dhdpcie_is_resume_done(&dhd->pub)) {
5119 dhd_bus_stop_queue(dhd->pub.bus);
5120 }
5121 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5122 dhd_os_busbusy_wake(&dhd->pub);
5123 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5124 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5125 return -ENODEV;
5126 #else
5127 return NETDEV_TX_BUSY;
5128 #endif
5129 }
5130 #endif /* DHD_PCIE_RUNTIMEPM */
5131
5132 DHD_GENERAL_LOCK(&dhd->pub, flags);
5133 #ifdef BCMPCIE
5134 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
5135 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5136 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
5137 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5138 #ifdef PCIE_FULL_DONGLE
5139 /* Stop tx queues if suspend is in progress */
5140 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
5141 dhd_bus_stop_queue(dhd->pub.bus);
5142 }
5143 #endif /* PCIE_FULL_DONGLE */
5144 dhd_os_busbusy_wake(&dhd->pub);
5145 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5146 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5147 return -ENODEV;
5148 #else
5149 return NETDEV_TX_BUSY;
5150 #endif
5151 }
5152 #else
5153 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
5154 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5155 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
5156 }
5157 #endif
5158
5159 DHD_OS_WAKE_LOCK(&dhd->pub);
5160 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5161
5162
5163 #if defined(DHD_HANG_SEND_UP_TEST)
5164 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
5165 dhd->pub.busstate = DHD_BUS_DOWN;
5166 }
5167 #endif /* DHD_HANG_SEND_UP_TEST */
5168
5169 /* Reject if down */
5170 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
5171 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
5172 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
5173 netif_stop_queue(net);
5174 /* Send Event when bus down detected during data session */
5175 if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
5176 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
5177 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
5178 net_os_send_hang_message(net);
5179 }
5180 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5181 dhd_os_busbusy_wake(&dhd->pub);
5182 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5183 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5184 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5185 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5186 return -ENODEV;
5187 #else
5188 return NETDEV_TX_BUSY;
5189 #endif
5190 }
5191
5192 ifp = DHD_DEV_IFP(net);
5193 ifidx = DHD_DEV_IFIDX(net);
5194 if (ifidx == DHD_BAD_IF) {
5195 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
5196 netif_stop_queue(net);
5197 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5198 dhd_os_busbusy_wake(&dhd->pub);
5199 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5200 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5201 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5202 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5203 return -ENODEV;
5204 #else
5205 return NETDEV_TX_BUSY;
5206 #endif
5207 }
5208
5209 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5210
5211 ASSERT(ifidx == dhd_net2idx(dhd, net));
5212 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
5213
5214 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
5215
5216 /* re-align socket buffer if "skb->data" is odd address */
5217 if (((unsigned long)(skb->data)) & 0x1) {
5218 unsigned char *data = skb->data;
5219 uint32 length = skb->len;
5220 PKTPUSH(dhd->pub.osh, skb, 1);
5221 memmove(skb->data, data, length);
5222 PKTSETLEN(dhd->pub.osh, skb, length);
5223 }
5224
5225 datalen = PKTLEN(dhd->pub.osh, skb);
5226
5227 /* Make sure there's enough room for any header */
5228 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
5229 struct sk_buff *skb2;
5230
5231 DHD_INFO(("%s: insufficient headroom\n",
5232 dhd_ifname(&dhd->pub, ifidx)));
5233 dhd->pub.tx_realloc++;
5234
5235 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
5236 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
5237
5238 dev_kfree_skb(skb);
5239 if ((skb = skb2) == NULL) {
5240 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
5241 dhd_ifname(&dhd->pub, ifidx)));
5242 ret = -ENOMEM;
5243 goto done;
5244 }
5245 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
5246 }
5247
5248 /* Convert to packet */
5249 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
5250 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
5251 dhd_ifname(&dhd->pub, ifidx)));
5252 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
5253 dev_kfree_skb_any(skb);
5254 ret = -ENOMEM;
5255 goto done;
5256 }
5257
5258 #if defined(WLMEDIA_HTSF)
5259 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
5260 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
5261 struct ether_header *eh = (struct ether_header *)pktdata;
5262
5263 if (!ETHER_ISMULTI(eh->ether_dhost) &&
5264 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
5265 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
5266 }
5267 }
5268 #endif
5269 #ifdef DHD_WET
5270 /* wet related packet proto manipulation should be done in DHD
5271 since dongle doesn't have complete payload
5272 */
5273 if (WET_ENABLED(&dhd->pub) &&
5274 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
5275 DHD_INFO(("%s:%s: wet send proc failed\n",
5276 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
5277 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
5278 ret = -EFAULT;
5279 goto done;
5280 }
5281 #endif /* DHD_WET */
5282
5283 #ifdef DHD_WMF
5284 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
5285 iph = (uint8 *)eh + ETHER_HDR_LEN;
5286
5287 /* WMF processing for multicast packets
5288 * Only IPv4 packets are handled
5289 */
5290 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
5291 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
5292 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
5293 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
5294 void *sdu_clone;
5295 bool ucast_convert = FALSE;
5296 #ifdef DHD_UCAST_UPNP
5297 uint32 dest_ip;
5298
5299 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
5300 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
5301 #endif /* DHD_UCAST_UPNP */
5302 #ifdef DHD_IGMP_UCQUERY
5303 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
5304 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
5305 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
5306 #endif /* DHD_IGMP_UCQUERY */
5307 if (ucast_convert) {
5308 dhd_sta_t *sta;
5309 unsigned long flags;
5310 struct list_head snapshot_list;
5311 struct list_head *wmf_ucforward_list;
5312
5313 ret = NETDEV_TX_OK;
5314
5315 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
5316 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
5317 */
5318 wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
5319
5320 /* Convert upnp/igmp query to unicast for each assoc STA */
5321 list_for_each_entry(sta, wmf_ucforward_list, list) {
5322 /* Skip sending to proxy interfaces of proxySTA */
5323 if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) {
5324 continue;
5325 }
5326 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
5327 ret = WMF_NOP;
5328 break;
5329 }
5330 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
5331 }
5332 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
5333
5334 DHD_GENERAL_LOCK(&dhd->pub, flags);
5335 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5336 dhd_os_busbusy_wake(&dhd->pub);
5337 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5338 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5339 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5340
5341 if (ret == NETDEV_TX_OK)
5342 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
5343
5344 return ret;
5345 } else
5346 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
5347 {
5348 /* There will be no STA info if the packet is coming from LAN host
5349 * Pass as NULL
5350 */
5351 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
5352 switch (ret) {
5353 case WMF_TAKEN:
5354 case WMF_DROP:
5355 /* Either taken by WMF or we should drop it.
5356 * Exiting send path
5357 */
5358
5359 DHD_GENERAL_LOCK(&dhd->pub, flags);
5360 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5361 dhd_os_busbusy_wake(&dhd->pub);
5362 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5363 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5364 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5365 return NETDEV_TX_OK;
5366 default:
5367 /* Continue the transmit path */
5368 break;
5369 }
5370 }
5371 }
5372 #endif /* DHD_WMF */
5373 #ifdef DHD_PSTA
5374 /* PSR related packet proto manipulation should be done in DHD
5375 * since dongle doesn't have complete payload
5376 */
5377 if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
5378 ifidx, &pktbuf, TRUE) < 0)) {
5379 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
5380 dhd_ifname(&dhd->pub, ifidx)));
5381 }
5382 #endif /* DHD_PSTA */
5383
5384 #ifdef DHDTCPACK_SUPPRESS
5385 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
5386 /* If this packet has been hold or got freed, just return */
5387 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
5388 ret = 0;
5389 goto done;
5390 }
5391 } else {
5392 /* If this packet has replaced another packet and got freed, just return */
5393 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
5394 ret = 0;
5395 goto done;
5396 }
5397 }
5398 #endif /* DHDTCPACK_SUPPRESS */
5399
5400 /*
5401 * If Load Balance is enabled queue the packet
5402 * else send directly from here.
5403 */
5404 #if defined(DHD_LB_TXP)
5405 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
5406 #else
5407 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
5408 #endif
5409
5410 done:
5411 if (ret) {
5412 ifp->stats.tx_dropped++;
5413 dhd->pub.tx_dropped++;
5414 } else {
5415 #ifdef PROP_TXSTATUS
5416 /* tx_packets counter can counted only when wlfc is disabled */
5417 if (!dhd_wlfc_is_supported(&dhd->pub))
5418 #endif
5419 {
5420 dhd->pub.tx_packets++;
5421 ifp->stats.tx_packets++;
5422 ifp->stats.tx_bytes += datalen;
5423 }
5424 }
5425
5426
5427 DHD_GENERAL_LOCK(&dhd->pub, flags);
5428 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
5429 dhd_os_busbusy_wake(&dhd->pub);
5430 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5431 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
5432 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5433 /* Return ok: we always eat the packet */
5434 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
5435 return 0;
5436 #else
5437 return NETDEV_TX_OK;
5438 #endif
5439 }
5440
5441
5442 void
5443 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
5444 {
5445 struct net_device *net;
5446 dhd_info_t *dhd = dhdp->info;
5447 int i;
5448
5449 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5450
5451 ASSERT(dhd);
5452
5453 #ifdef DHD_LOSSLESS_ROAMING
5454 /* block flowcontrol during roaming */
5455 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
5456 return;
5457 }
5458 #endif
5459
5460 if (ifidx == ALL_INTERFACES) {
5461 /* Flow control on all active interfaces */
5462 dhdp->txoff = state;
5463 for (i = 0; i < DHD_MAX_IFS; i++) {
5464 if (dhd->iflist[i]) {
5465 net = dhd->iflist[i]->net;
5466 if (state == ON)
5467 netif_stop_queue(net);
5468 else
5469 netif_wake_queue(net);
5470 }
5471 }
5472 } else {
5473 if (dhd->iflist[ifidx]) {
5474 net = dhd->iflist[ifidx]->net;
5475 if (state == ON)
5476 netif_stop_queue(net);
5477 else
5478 netif_wake_queue(net);
5479 }
5480 }
5481 }
5482
5483
5484 #ifdef DHD_WMF
5485 bool
5486 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
5487 {
5488 dhd_info_t *dhd = dhdp->info;
5489
5490 return dhd->rxthread_enabled;
5491 }
5492 #endif /* DHD_WMF */
5493
5494 #ifdef DHD_MCAST_REGEN
5495 /*
5496 * Description: This function is called to do the reverse translation
5497 *
5498 * Input eh - pointer to the ethernet header
5499 */
5500 int32
5501 dhd_mcast_reverse_translation(struct ether_header *eh)
5502 {
5503 uint8 *iph;
5504 uint32 dest_ip;
5505
5506 iph = (uint8 *)eh + ETHER_HDR_LEN;
5507 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
5508
5509 /* Only IP packets are handled */
5510 if (eh->ether_type != hton16(ETHER_TYPE_IP))
5511 return BCME_ERROR;
5512
5513 /* Non-IPv4 multicast packets are not handled */
5514 if (IP_VER(iph) != IP_VER_4)
5515 return BCME_ERROR;
5516
5517 /*
5518 * The packet has a multicast IP and unicast MAC. That means
5519 * we have to do the reverse translation
5520 */
5521 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
5522 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
5523 return BCME_OK;
5524 }
5525
5526 return BCME_ERROR;
5527 }
5528 #endif /* MCAST_REGEN */
5529
5530 #ifdef SHOW_LOGTRACE
5531 static int
5532 dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
5533 {
5534 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5535 int ret = BCME_OK;
5536 uint datalen;
5537 bcm_event_msg_u_t evu;
5538 void *data = NULL;
5539 void *pktdata = NULL;
5540 bcm_event_t *pvt_data;
5541 uint pktlen;
5542
5543 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5544
5545 /* In dhd_rx_frame, header is stripped using skb_pull
5546 * of size ETH_HLEN, so adjust pktlen accordingly
5547 */
5548 pktlen = skb->len + ETH_HLEN;
5549
5550 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
5551 pktdata = (void *)skb_mac_header(skb);
5552 #else
5553 pktdata = (void *)skb->mac.raw;
5554 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
5555
5556 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5557
5558 if (ret != BCME_OK) {
5559 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
5560 __FUNCTION__, ret));
5561 goto exit;
5562 }
5563
5564 datalen = ntoh32(evu.event.datalen);
5565
5566 pvt_data = (bcm_event_t *)pktdata;
5567 data = &pvt_data[1];
5568
5569 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
5570
5571 exit:
5572 return ret;
5573 }
5574
5575 static void
5576 dhd_event_logtrace_process(struct work_struct * work)
5577 {
5578 /* Ignore compiler warnings due to -Werror=cast-qual */
5579 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5580 #pragma GCC diagnostic push
5581 #pragma GCC diagnostic ignored "-Wcast-qual"
5582 #endif
5583 struct dhd_info *dhd =
5584 container_of(work, struct dhd_info, event_log_dispatcher_work);
5585 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
5586 #pragma GCC diagnostic pop
5587 #endif
5588
5589 dhd_pub_t *dhdp;
5590 struct sk_buff *skb;
5591
5592 if (!dhd) {
5593 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5594 return;
5595 }
5596
5597 dhdp = &dhd->pub;
5598
5599 if (!dhdp) {
5600 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
5601 return;
5602 }
5603
5604 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
5605
5606 /* Run while(1) loop till all skbs are dequeued */
5607 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5608 #ifdef PCIE_FULL_DONGLE
5609 int ifid;
5610 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
5611 if (ifid == DHD_EVENT_IF) {
5612 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
5613 /* For sending skb to network layer, convert it to Native PKT
5614 * after that assign skb->dev with Primary interface n/w device
5615 * as for infobuf events, we are sending special DHD_EVENT_IF
5616 */
5617 #ifdef DHD_USE_STATIC_CTRLBUF
5618 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5619 #else
5620 PKTFREE(dhdp->osh, skb, FALSE);
5621 #endif /* DHD_USE_STATIC_CTRLBUF */
5622 continue;
5623 }
5624 else {
5625 dhd_event_logtrace_pkt_process(dhdp, skb);
5626 }
5627 #else
5628 dhd_event_logtrace_pkt_process(dhdp, skb);
5629 #endif /* PCIE_FULL_DONGLE */
5630
5631 /* Free skb buffer here if DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
5632 * macro is defined the Info Ring event and WLC_E_TRACE event is freed in DHD
5633 * else it is always sent up to network layers.
5634 */
5635 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
5636 #ifdef DHD_USE_STATIC_CTRLBUF
5637 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5638 #else
5639 PKTFREE(dhdp->osh, skb, FALSE);
5640 #endif /* DHD_USE_STATIC_CTRLBUF */
5641 #else /* !DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
5642 /* Do not call netif_recieve_skb as this workqueue scheduler is not from NAPI
5643 * Also as we are not in INTR context, do not call netif_rx, instead call
5644 * netif_rx_ni (for kerenl >= 2.6) which does netif_rx, disables irq, raise
5645 * NET_IF_RX softirq and enables interrupts back
5646 */
5647 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5648 netif_rx_ni(skb);
5649 #else
5650 {
5651 ulong flags;
5652 netif_rx(skb);
5653 local_irq_save(flags);
5654 RAISE_RX_SOFTIRQ();
5655 local_irq_restore(flags);
5656 }
5657 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
5658 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
5659 }
5660 }
5661
5662 void
5663 dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
5664 {
5665 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5666
5667 #ifdef PCIE_FULL_DONGLE
5668 /* Add ifidx in the PKTTAG */
5669 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
5670 #endif /* PCIE_FULL_DONGLE */
5671 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
5672
5673 schedule_work(&dhd->event_log_dispatcher_work);
5674 }
5675
5676 void
5677 dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
5678 {
5679 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5680 struct sk_buff *skb;
5681
5682 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
5683 #ifdef DHD_USE_STATIC_CTRLBUF
5684 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
5685 #else
5686 PKTFREE(dhdp->osh, skb, FALSE);
5687 #endif /* DHD_USE_STATIC_CTRLBUF */
5688 }
5689 }
5690 #endif /* SHOW_LOGTRACE */
5691
5692 /** Called when a frame is received by the dongle on interface 'ifidx' */
5693 void
5694 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
5695 {
5696 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5697 struct sk_buff *skb;
5698 uchar *eth;
5699 uint len;
5700 void *data, *pnext = NULL;
5701 int i;
5702 dhd_if_t *ifp;
5703 wl_event_msg_t event;
5704 int tout_rx = 0;
5705 int tout_ctrl = 0;
5706 void *skbhead = NULL;
5707 void *skbprev = NULL;
5708 uint16 protocol;
5709 unsigned char *dump_data;
5710 #ifdef DHD_MCAST_REGEN
5711 uint8 interface_role;
5712 if_flow_lkup_t *if_flow_lkup;
5713 unsigned long flags;
5714 #endif
5715 #ifdef DHD_WAKE_STATUS
5716 int pkt_wake = 0;
5717 wake_counts_t *wcp = NULL;
5718 #endif /* DHD_WAKE_STATUS */
5719
5720 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5721
5722 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
5723 struct ether_header *eh;
5724
5725 pnext = PKTNEXT(dhdp->osh, pktbuf);
5726 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
5727
5728 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5729 * special ifidx of DHD_EVENT_IF. This is just internal to dhd to get the data from
5730 * dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
5731 */
5732 if (ifidx == DHD_EVENT_IF) {
5733 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
5734 * context in case of PCIe FD, in case of other bus this will be from
5735 * DPC context. If we get bunch of events from Dongle then printing all
5736 * of them from Tasklet/DPC context that too in data path is costly.
5737 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
5738 * events with type WLC_E_TRACE.
5739 * We'll print this console logs from the WorkQueue context by enqueing SKB
5740 * here and Dequeuing will be done in WorkQueue and will be freed only if
5741 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
5742 */
5743 #ifdef SHOW_LOGTRACE
5744 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
5745 #else /* !SHOW_LOGTRACE */
5746 /* If SHOW_LOGTRACE not defined and ifidx is DHD_EVENT_IF,
5747 * free the PKT here itself
5748 */
5749 #ifdef DHD_USE_STATIC_CTRLBUF
5750 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
5751 #else
5752 PKTFREE(dhdp->osh, pktbuf, FALSE);
5753 #endif /* DHD_USE_STATIC_CTRLBUF */
5754 #endif /* SHOW_LOGTRACE */
5755 continue;
5756 }
5757 #ifdef DHD_WAKE_STATUS
5758 #ifdef BCMDBUS
5759 wcp = NULL;
5760 #else
5761 pkt_wake = dhd_bus_get_bus_wake(dhdp);
5762 wcp = dhd_bus_get_wakecount(dhdp);
5763 #endif /* BCMDBUS */
5764 if (wcp == NULL) {
5765 /* If wakeinfo count buffer is null do not update wake count values */
5766 pkt_wake = 0;
5767 }
5768 #endif /* DHD_WAKE_STATUS */
5769
5770 ifp = dhd->iflist[ifidx];
5771 if (ifp == NULL) {
5772 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
5773 __FUNCTION__));
5774 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5775 continue;
5776 }
5777
5778 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5779
5780 /* Dropping only data packets before registering net device to avoid kernel panic */
5781 #ifndef PROP_TXSTATUS_VSDB
5782 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
5783 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5784 #else
5785 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
5786 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
5787 #endif /* PROP_TXSTATUS_VSDB */
5788 {
5789 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
5790 __FUNCTION__));
5791 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5792 continue;
5793 }
5794
5795 #ifdef PROP_TXSTATUS
5796 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
5797 /* WLFC may send header only packet when
5798 there is an urgent message but no packet to
5799 piggy-back on
5800 */
5801 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5802 continue;
5803 }
5804 #endif
5805 #ifdef DHD_L2_FILTER
5806 /* If block_ping is enabled drop the ping packet */
5807 if (ifp->block_ping) {
5808 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
5809 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5810 continue;
5811 }
5812 }
5813 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
5814 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
5815 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5816 continue;
5817 }
5818 }
5819 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
5820 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
5821
5822 /* Drop the packets if l2 filter has processed it already
5823 * otherwise continue with the normal path
5824 */
5825 if (ret == BCME_OK) {
5826 PKTCFREE(dhdp->osh, pktbuf, TRUE);
5827 continue;
5828 }
5829 }
5830 #endif /* DHD_L2_FILTER */
5831
5832 #ifdef DHD_MCAST_REGEN
5833 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
5834 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
5835 ASSERT(if_flow_lkup);
5836
5837 interface_role = if_flow_lkup[ifidx].role;
5838 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
5839
5840 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
5841 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
5842 ETHER_ISUCAST(eh->ether_dhost)) {
5843 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
5844 #ifdef DHD_PSTA
5845 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
5846 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
5847 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
5848 if (ifidx != 0) {
5849 /* Let the primary in PSTA interface handle this
5850 * frame after unicast to Multicast conversion
5851 */
5852 ifp = dhd_get_ifp(dhdp, 0);
5853 ASSERT(ifp);
5854 }
5855 }
5856 }
5857 #endif /* PSTA */
5858 }
5859 #endif /* MCAST_REGEN */
5860
5861 #ifdef DHD_WMF
5862 /* WMF processing for multicast packets */
5863 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
5864 dhd_sta_t *sta;
5865 int ret;
5866
5867 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
5868 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
5869 switch (ret) {
5870 case WMF_TAKEN:
5871 /* The packet is taken by WMF. Continue to next iteration */
5872 continue;
5873 case WMF_DROP:
5874 /* Packet DROP decision by WMF. Toss it */
5875 DHD_ERROR(("%s: WMF decides to drop packet\n",
5876 __FUNCTION__));
5877 PKTCFREE(dhdp->osh, pktbuf, FALSE);
5878 continue;
5879 default:
5880 /* Continue the transmit path */
5881 break;
5882 }
5883 }
5884 #endif /* DHD_WMF */
5885
5886 #ifdef DHDTCPACK_SUPPRESS
5887 dhd_tcpdata_info_get(dhdp, pktbuf);
5888 #endif
5889 skb = PKTTONATIVE(dhdp->osh, pktbuf);
5890
5891 ASSERT(ifp);
5892 skb->dev = ifp->net;
5893 #ifdef DHD_WET
5894 /* wet related packet proto manipulation should be done in DHD
5895 * since dongle doesn't have complete payload
5896 */
5897 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
5898 pktbuf) < 0)) {
5899 DHD_INFO(("%s:%s: wet recv proc failed\n",
5900 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
5901 }
5902 #endif /* DHD_WET */
5903
5904 #ifdef DHD_PSTA
5905 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
5906 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
5907 dhd_ifname(dhdp, ifidx)));
5908 }
5909 #endif /* DHD_PSTA */
5910
5911 #ifdef PCIE_FULL_DONGLE
5912 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
5913 (!ifp->ap_isolate)) {
5914 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
5915 if (ETHER_ISUCAST(eh->ether_dhost)) {
5916 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
5917 dhd_sendpkt(dhdp, ifidx, pktbuf);
5918 continue;
5919 }
5920 } else {
5921 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
5922 if (npktbuf)
5923 dhd_sendpkt(dhdp, ifidx, npktbuf);
5924 }
5925 }
5926 #endif /* PCIE_FULL_DONGLE */
5927
5928 /* Get the protocol, maintain skb around eth_type_trans()
5929 * The main reason for this hack is for the limitation of
5930 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
5931 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
5932 * coping of the packet coming from the network stack to add
5933 * BDC, Hardware header etc, during network interface registration
5934 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
5935 * for BDC, Hardware header etc. and not just the ETH_HLEN
5936 */
5937 eth = skb->data;
5938 len = skb->len;
5939
5940 dump_data = skb->data;
5941
5942 protocol = (skb->data[12] << 8) | skb->data[13];
5943 if (protocol == ETHER_TYPE_802_1X) {
5944 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
5945 #ifdef DHD_8021X_DUMP
5946 dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5947 #endif /* DHD_8021X_DUMP */
5948 dhd_conf_set_eapol_status(dhdp, dhd_ifname(dhdp, ifidx), dump_data);
5949 }
5950
5951 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
5952 #ifdef DHD_DHCP_DUMP
5953 dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5954 #endif /* DHD_DHCP_DUMP */
5955 #ifdef DHD_ICMP_DUMP
5956 dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
5957 #endif /* DHD_ICMP_DUMP */
5958 }
5959 #ifdef DHD_RX_DUMP
5960 dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE);
5961 #endif /* DHD_RX_DUMP */
5962 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
5963 if (pkt_wake) {
5964 prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
5965 }
5966 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
5967
5968 skb->protocol = eth_type_trans(skb, skb->dev);
5969
5970 if (skb->pkt_type == PACKET_MULTICAST) {
5971 dhd->pub.rx_multicast++;
5972 ifp->stats.multicast++;
5973 }
5974
5975 skb->data = eth;
5976 skb->len = len;
5977
5978 #ifdef WLMEDIA_HTSF
5979 dhd_htsf_addrxts(dhdp, pktbuf);
5980 #endif
5981 #ifdef DBG_PKT_MON
5982 DHD_DBG_PKT_MON_RX(dhdp, skb);
5983 #endif /* DBG_PKT_MON */
5984 #ifdef DHD_PKT_LOGGING
5985 DHD_PKTLOG_RX(dhdp, skb);
5986 #endif /* DHD_PKT_LOGGING */
5987 /* Strip header, count, deliver upward */
5988 skb_pull(skb, ETH_HLEN);
5989
5990 /* Process special event packets and then discard them */
5991 memset(&event, 0, sizeof(event));
5992
5993 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
5994 bcm_event_msg_u_t evu;
5995 int ret_event;
5996 int event_type;
5997
5998 ret_event = wl_host_event_get_data(
5999 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
6000 skb_mac_header(skb),
6001 #else
6002 skb->mac.raw,
6003 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
6004 len, &evu);
6005
6006 if (ret_event != BCME_OK) {
6007 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
6008 __FUNCTION__, ret_event));
6009 #ifdef DHD_USE_STATIC_CTRLBUF
6010 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6011 #else
6012 PKTFREE(dhdp->osh, pktbuf, FALSE);
6013 #endif
6014 continue;
6015 }
6016
6017 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
6018 event_type = ntoh32_ua((void *)&event.event_type);
6019 #ifdef SHOW_LOGTRACE
6020 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
6021 * context in case of PCIe FD, in case of other bus this will be from
6022 * DPC context. If we get bunch of events from Dongle then printing all
6023 * of them from Tasklet/DPC context that too in data path is costly.
6024 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
6025 * events with type WLC_E_TRACE.
6026 * We'll print this console logs from the WorkQueue context by enqueing SKB
6027 * here and Dequeuing will be done in WorkQueue and will be freed only if
6028 * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
6029 */
6030 if (event_type == WLC_E_TRACE) {
6031 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
6032 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
6033 continue;
6034 }
6035 #endif /* SHOW_LOGTRACE */
6036
6037 ret_event = dhd_wl_host_event(dhd, ifidx,
6038 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
6039 skb_mac_header(skb),
6040 #else
6041 skb->mac.raw,
6042 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
6043 len, &event, &data);
6044
6045 wl_event_to_host_order(&event);
6046 if (!tout_ctrl)
6047 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
6048
6049 #if defined(PNO_SUPPORT)
6050 if (event_type == WLC_E_PFN_NET_FOUND) {
6051 /* enforce custom wake lock to garantee that Kernel not suspended */
6052 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
6053 }
6054 #endif /* PNO_SUPPORT */
6055 if (numpkt != 1) {
6056 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
6057 __FUNCTION__));
6058 }
6059
6060 #ifdef DHD_WAKE_STATUS
6061 if (unlikely(pkt_wake)) {
6062 #ifdef DHD_WAKE_EVENT_STATUS
6063 if (event.event_type < WLC_E_LAST) {
6064 wcp->rc_event[event.event_type]++;
6065 wcp->rcwake++;
6066 pkt_wake = 0;
6067 }
6068 #endif /* DHD_WAKE_EVENT_STATUS */
6069 }
6070 #endif /* DHD_WAKE_STATUS */
6071
6072 /* For delete virtual interface event, wl_host_event returns positive
6073 * i/f index, do not proceed. just free the pkt.
6074 */
6075 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
6076 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
6077 __FUNCTION__));
6078 #ifdef DHD_USE_STATIC_CTRLBUF
6079 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6080 #else
6081 PKTFREE(dhdp->osh, pktbuf, FALSE);
6082 #endif
6083 continue;
6084 }
6085
6086 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
6087 #ifdef DHD_USE_STATIC_CTRLBUF
6088 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6089 #else
6090 PKTFREE(dhdp->osh, pktbuf, FALSE);
6091 #endif /* DHD_USE_STATIC_CTRLBUF */
6092 continue;
6093 #else
6094 /*
6095 * For the event packets, there is a possibility
6096 * of ifidx getting modifed.Thus update the ifp
6097 * once again.
6098 */
6099 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
6100 ifp = dhd->iflist[ifidx];
6101 #ifndef PROP_TXSTATUS_VSDB
6102 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
6103 #else
6104 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
6105 dhd->pub.up))
6106 #endif /* PROP_TXSTATUS_VSDB */
6107 {
6108 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
6109 __FUNCTION__));
6110 #ifdef DHD_USE_STATIC_CTRLBUF
6111 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
6112 #else
6113 PKTFREE(dhdp->osh, pktbuf, FALSE);
6114 #endif
6115 continue;
6116 }
6117 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
6118 } else {
6119 tout_rx = DHD_PACKET_TIMEOUT_MS;
6120
6121 #ifdef PROP_TXSTATUS
6122 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
6123 #endif /* PROP_TXSTATUS */
6124
6125 #ifdef DHD_WAKE_STATUS
6126 if (unlikely(pkt_wake)) {
6127 wcp->rxwake++;
6128 #ifdef DHD_WAKE_RX_STATUS
6129 #define ETHER_ICMP6_HEADER 20
6130 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
6131 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
6132 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
6133
6134 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
6135 wcp->rx_arp++;
6136 if (dump_data[0] == 0xFF) { /* Broadcast */
6137 wcp->rx_bcast++;
6138 } else if (dump_data[0] & 0x01) { /* Multicast */
6139 wcp->rx_mcast++;
6140 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
6141 wcp->rx_multi_ipv6++;
6142 if ((skb->len > ETHER_ICMP6_HEADER) &&
6143 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
6144 wcp->rx_icmpv6++;
6145 if (skb->len > ETHER_ICMPV6_TYPE) {
6146 switch (dump_data[ETHER_ICMPV6_TYPE]) {
6147 case NDISC_ROUTER_ADVERTISEMENT:
6148 wcp->rx_icmpv6_ra++;
6149 break;
6150 case NDISC_NEIGHBOUR_ADVERTISEMENT:
6151 wcp->rx_icmpv6_na++;
6152 break;
6153 case NDISC_NEIGHBOUR_SOLICITATION:
6154 wcp->rx_icmpv6_ns++;
6155 break;
6156 }
6157 }
6158 }
6159 } else if (dump_data[2] == 0x5E) {
6160 wcp->rx_multi_ipv4++;
6161 } else {
6162 wcp->rx_multi_other++;
6163 }
6164 } else { /* Unicast */
6165 wcp->rx_ucast++;
6166 }
6167 #undef ETHER_ICMP6_HEADER
6168 #undef ETHER_IPV6_SADDR
6169 #undef ETHER_IPV6_DAADR
6170 #undef ETHER_ICMPV6_TYPE
6171 #endif /* DHD_WAKE_RX_STATUS */
6172 pkt_wake = 0;
6173 }
6174 #endif /* DHD_WAKE_STATUS */
6175 }
6176
6177 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
6178 if (ifp->net)
6179 ifp->net->last_rx = jiffies;
6180 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
6181
6182 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
6183 dhdp->dstats.rx_bytes += skb->len;
6184 dhdp->rx_packets++; /* Local count */
6185 ifp->stats.rx_bytes += skb->len;
6186 ifp->stats.rx_packets++;
6187 }
6188
6189 if (in_interrupt()) {
6190 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6191 __FUNCTION__, __LINE__);
6192 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6193 #if defined(DHD_LB_RXP)
6194 netif_receive_skb(skb);
6195 #else /* !defined(DHD_LB_RXP) */
6196 netif_rx(skb);
6197 #endif /* !defined(DHD_LB_RXP) */
6198 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6199 } else {
6200 if (dhd->rxthread_enabled) {
6201 if (!skbhead)
6202 skbhead = skb;
6203 else
6204 PKTSETNEXT(dhdp->osh, skbprev, skb);
6205 skbprev = skb;
6206 } else {
6207
6208 /* If the receive is not processed inside an ISR,
6209 * the softirqd must be woken explicitly to service
6210 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
6211 * by netif_rx_ni(), but in earlier kernels, we need
6212 * to do it manually.
6213 */
6214 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6215 __FUNCTION__, __LINE__);
6216
6217 #if defined(DHD_LB_RXP)
6218 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6219 netif_receive_skb(skb);
6220 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6221 #else /* !defined(DHD_LB_RXP) */
6222 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6223 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6224 netif_rx_ni(skb);
6225 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6226 #else
6227 ulong flags;
6228 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6229 netif_rx(skb);
6230 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
6231 local_irq_save(flags);
6232 RAISE_RX_SOFTIRQ();
6233 local_irq_restore(flags);
6234 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
6235 #endif /* !defined(DHD_LB_RXP) */
6236 }
6237 }
6238 }
6239
6240 if (dhd->rxthread_enabled && skbhead)
6241 dhd_sched_rxf(dhdp, skbhead);
6242
6243 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
6244 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
6245 }
6246
6247 void
6248 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
6249 {
6250 /* Linux version has nothing to do */
6251 return;
6252 }
6253
6254 void
6255 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
6256 {
6257 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
6258 struct ether_header *eh;
6259 uint16 type;
6260
6261 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
6262
6263
6264 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
6265 type = ntoh16(eh->ether_type);
6266
6267 if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) {
6268 atomic_dec(&dhd->pend_8021x_cnt);
6269 }
6270
6271 #ifdef PROP_TXSTATUS
6272 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
6273 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
6274 uint datalen = PKTLEN(dhd->pub.osh, txp);
6275 if (ifp != NULL) {
6276 if (success) {
6277 dhd->pub.tx_packets++;
6278 ifp->stats.tx_packets++;
6279 ifp->stats.tx_bytes += datalen;
6280 } else {
6281 ifp->stats.tx_dropped++;
6282 }
6283 }
6284 }
6285 #endif
6286 }
6287
6288 static struct net_device_stats *
6289 dhd_get_stats(struct net_device *net)
6290 {
6291 dhd_info_t *dhd = DHD_DEV_INFO(net);
6292 dhd_if_t *ifp;
6293 int ifidx;
6294
6295 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6296
6297 if (!dhd) {
6298 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
6299 goto error;
6300 }
6301
6302 ifidx = dhd_net2idx(dhd, net);
6303 if (ifidx == DHD_BAD_IF) {
6304 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
6305 goto error;
6306 }
6307
6308 ifp = dhd->iflist[ifidx];
6309
6310 if (!ifp) {
6311 ASSERT(ifp);
6312 DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__));
6313 goto error;
6314 }
6315
6316 if (dhd->pub.up) {
6317 /* Use the protocol to get dongle stats */
6318 dhd_prot_dstats(&dhd->pub);
6319 }
6320 return &ifp->stats;
6321
6322 error:
6323 memset(&net->stats, 0, sizeof(net->stats));
6324 return &net->stats;
6325 }
6326
6327 #ifndef BCMDBUS
6328 static int
6329 dhd_watchdog_thread(void *data)
6330 {
6331 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6332 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6333 /* This thread doesn't need any user-level access,
6334 * so get rid of all our resources
6335 */
6336 if (dhd_watchdog_prio > 0) {
6337 struct sched_param param;
6338 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
6339 dhd_watchdog_prio:(MAX_RT_PRIO-1);
6340 setScheduler(current, SCHED_FIFO, &param);
6341 }
6342
6343 while (1) {
6344 if (down_interruptible (&tsk->sema) == 0) {
6345 unsigned long flags;
6346 unsigned long jiffies_at_start = jiffies;
6347 unsigned long time_lapse;
6348 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6349
6350 SMP_RD_BARRIER_DEPENDS();
6351 if (tsk->terminated) {
6352 break;
6353 }
6354
6355 if (dhd->pub.dongle_reset == FALSE) {
6356 DHD_TIMER(("%s:\n", __FUNCTION__));
6357 dhd_bus_watchdog(&dhd->pub);
6358
6359 #ifdef DHD_TIMESYNC
6360 /* Call the timesync module watchdog */
6361 dhd_timesync_watchdog(&dhd->pub);
6362 #endif /* DHD_TIMESYNC */
6363
6364 DHD_GENERAL_LOCK(&dhd->pub, flags);
6365 /* Count the tick for reference */
6366 dhd->pub.tickcnt++;
6367 #ifdef DHD_L2_FILTER
6368 dhd_l2_filter_watchdog(&dhd->pub);
6369 #endif /* DHD_L2_FILTER */
6370 time_lapse = jiffies - jiffies_at_start;
6371
6372 /* Reschedule the watchdog */
6373 if (dhd->wd_timer_valid) {
6374 mod_timer(&dhd->timer,
6375 jiffies +
6376 msecs_to_jiffies(dhd_watchdog_ms) -
6377 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
6378 }
6379 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6380 }
6381 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6382 } else {
6383 break;
6384 }
6385 }
6386
6387 complete_and_exit(&tsk->completed, 0);
6388 }
6389
6390 static void dhd_watchdog(
6391 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6392 struct timer_list *t
6393 #else
6394 ulong data
6395 #endif
6396 )
6397 {
6398 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6399 dhd_info_t *dhd = from_timer(dhd, t, timer);
6400 #else
6401 dhd_info_t *dhd = (dhd_info_t *)data;
6402 #endif
6403 unsigned long flags;
6404
6405 if (dhd->pub.dongle_reset) {
6406 return;
6407 }
6408
6409 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
6410 up(&dhd->thr_wdt_ctl.sema);
6411 return;
6412 }
6413
6414 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
6415 /* Call the bus module watchdog */
6416 dhd_bus_watchdog(&dhd->pub);
6417
6418 #ifdef DHD_TIMESYNC
6419 /* Call the timesync module watchdog */
6420 dhd_timesync_watchdog(&dhd->pub);
6421 #endif /* DHD_TIMESYNC */
6422
6423 DHD_GENERAL_LOCK(&dhd->pub, flags);
6424 /* Count the tick for reference */
6425 dhd->pub.tickcnt++;
6426
6427 #ifdef DHD_L2_FILTER
6428 dhd_l2_filter_watchdog(&dhd->pub);
6429 #endif /* DHD_L2_FILTER */
6430 /* Reschedule the watchdog */
6431 if (dhd->wd_timer_valid)
6432 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
6433 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6434 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6435 }
6436
6437 #ifdef DHD_PCIE_RUNTIMEPM
6438 static int
6439 dhd_rpm_state_thread(void *data)
6440 {
6441 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6442 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6443
6444 while (1) {
6445 if (down_interruptible (&tsk->sema) == 0) {
6446 unsigned long flags;
6447 unsigned long jiffies_at_start = jiffies;
6448 unsigned long time_lapse;
6449
6450 SMP_RD_BARRIER_DEPENDS();
6451 if (tsk->terminated) {
6452 break;
6453 }
6454
6455 if (dhd->pub.dongle_reset == FALSE) {
6456 DHD_TIMER(("%s:\n", __FUNCTION__));
6457 if (dhd->pub.up) {
6458 dhd_runtimepm_state(&dhd->pub);
6459 }
6460
6461 DHD_GENERAL_LOCK(&dhd->pub, flags);
6462 time_lapse = jiffies - jiffies_at_start;
6463
6464 /* Reschedule the watchdog */
6465 if (dhd->rpm_timer_valid) {
6466 mod_timer(&dhd->rpm_timer,
6467 jiffies +
6468 msecs_to_jiffies(dhd_runtimepm_ms) -
6469 min(msecs_to_jiffies(dhd_runtimepm_ms),
6470 time_lapse));
6471 }
6472 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6473 }
6474 } else {
6475 break;
6476 }
6477 }
6478
6479 complete_and_exit(&tsk->completed, 0);
6480 }
6481
6482 static void dhd_runtimepm(
6483 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6484 struct timer_list *t
6485 #else
6486 ulong data
6487 #endif
6488 )
6489 {
6490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
6491 dhd_info_t *dhd = from_timer(dhd, t, rpm_timer);
6492 #else
6493 dhd_info_t *dhd = (dhd_info_t *)data;
6494 #endif
6495
6496 if (dhd->pub.dongle_reset) {
6497 return;
6498 }
6499
6500 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
6501 up(&dhd->thr_rpm_ctl.sema);
6502 return;
6503 }
6504 }
6505
6506 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
6507 {
6508 dhd_os_runtimepm_timer(dhdp, 0);
6509 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
6510 DHD_ERROR(("DHD Runtime PM Disabled \n"));
6511 }
6512
6513 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
6514 {
6515 if (dhd_get_idletime(dhdp)) {
6516 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
6517 DHD_ERROR(("DHD Runtime PM Enabled \n"));
6518 }
6519 }
6520
6521 #endif /* DHD_PCIE_RUNTIMEPM */
6522
6523
6524 #ifdef ENABLE_ADAPTIVE_SCHED
6525 static void
6526 dhd_sched_policy(int prio)
6527 {
6528 struct sched_param param;
6529 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
6530 param.sched_priority = 0;
6531 setScheduler(current, SCHED_NORMAL, &param);
6532 } else {
6533 if (get_scheduler_policy(current) != SCHED_FIFO) {
6534 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
6535 setScheduler(current, SCHED_FIFO, &param);
6536 }
6537 }
6538 }
6539 #endif /* ENABLE_ADAPTIVE_SCHED */
6540 #ifdef DEBUG_CPU_FREQ
6541 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
6542 {
6543 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
6544 struct cpufreq_freqs *freq = data;
6545 if (dhd) {
6546 if (!dhd->new_freq)
6547 goto exit;
6548 if (val == CPUFREQ_POSTCHANGE) {
6549 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
6550 freq->new, freq->cpu));
6551 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
6552 }
6553 }
6554 exit:
6555 return 0;
6556 }
6557 #endif /* DEBUG_CPU_FREQ */
6558
6559 static int
6560 dhd_dpc_thread(void *data)
6561 {
6562 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6563 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6564
6565 /* This thread doesn't need any user-level access,
6566 * so get rid of all our resources
6567 */
6568 if (dhd_dpc_prio > 0)
6569 {
6570 struct sched_param param;
6571 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
6572 setScheduler(current, SCHED_FIFO, &param);
6573 }
6574
6575 #ifdef CUSTOM_DPC_CPUCORE
6576 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
6577 #endif
6578 #ifdef CUSTOM_SET_CPUCORE
6579 dhd->pub.current_dpc = current;
6580 #endif /* CUSTOM_SET_CPUCORE */
6581 /* Run until signal received */
6582 while (1) {
6583 if (dhd->pub.conf->dpc_cpucore >= 0) {
6584 printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
6585 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
6586 dhd->pub.conf->dpc_cpucore = -1;
6587 }
6588 if (!binary_sema_down(tsk)) {
6589 #ifdef ENABLE_ADAPTIVE_SCHED
6590 dhd_sched_policy(dhd_dpc_prio);
6591 #endif /* ENABLE_ADAPTIVE_SCHED */
6592 SMP_RD_BARRIER_DEPENDS();
6593 if (tsk->terminated) {
6594 break;
6595 }
6596
6597 /* Call bus dpc unless it indicated down (then clean stop) */
6598 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6599 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6600 int resched_cnt = 0;
6601 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6602 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
6603 while (dhd_bus_dpc(dhd->pub.bus)) {
6604 /* process all data */
6605 #ifdef DEBUG_DPC_THREAD_WATCHDOG
6606 resched_cnt++;
6607 if (resched_cnt > MAX_RESCHED_CNT) {
6608 DHD_INFO(("%s Calling msleep to"
6609 "let other processes run. \n",
6610 __FUNCTION__));
6611 dhd->pub.dhd_bug_on = true;
6612 resched_cnt = 0;
6613 OSL_SLEEP(1);
6614 }
6615 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
6616 }
6617 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
6618 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6619 } else {
6620 if (dhd->pub.up)
6621 dhd_bus_stop(dhd->pub.bus, TRUE);
6622 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6623 }
6624 } else {
6625 break;
6626 }
6627 }
6628 complete_and_exit(&tsk->completed, 0);
6629 }
6630
6631 static int
6632 dhd_rxf_thread(void *data)
6633 {
6634 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
6635 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
6636 #if defined(WAIT_DEQUEUE)
6637 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
6638 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
6639 #endif
6640 dhd_pub_t *pub = &dhd->pub;
6641
6642 /* This thread doesn't need any user-level access,
6643 * so get rid of all our resources
6644 */
6645 if (dhd_rxf_prio > 0)
6646 {
6647 struct sched_param param;
6648 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
6649 setScheduler(current, SCHED_FIFO, &param);
6650 }
6651
6652 #ifdef CUSTOM_SET_CPUCORE
6653 dhd->pub.current_rxf = current;
6654 #endif /* CUSTOM_SET_CPUCORE */
6655 /* Run until signal received */
6656 while (1) {
6657 if (dhd->pub.conf->rxf_cpucore >= 0) {
6658 printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
6659 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
6660 dhd->pub.conf->rxf_cpucore = -1;
6661 }
6662 if (down_interruptible(&tsk->sema) == 0) {
6663 void *skb;
6664 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
6665 ulong flags;
6666 #endif
6667 #ifdef ENABLE_ADAPTIVE_SCHED
6668 dhd_sched_policy(dhd_rxf_prio);
6669 #endif /* ENABLE_ADAPTIVE_SCHED */
6670
6671 SMP_RD_BARRIER_DEPENDS();
6672
6673 if (tsk->terminated) {
6674 break;
6675 }
6676 skb = dhd_rxf_dequeue(pub);
6677
6678 if (skb == NULL) {
6679 continue;
6680 }
6681 while (skb) {
6682 void *skbnext = PKTNEXT(pub->osh, skb);
6683 PKTSETNEXT(pub->osh, skb, NULL);
6684 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6685 __FUNCTION__, __LINE__);
6686 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
6687 netif_rx_ni(skb);
6688 #else
6689 netif_rx(skb);
6690 local_irq_save(flags);
6691 RAISE_RX_SOFTIRQ();
6692 local_irq_restore(flags);
6693
6694 #endif
6695 skb = skbnext;
6696 }
6697 #if defined(WAIT_DEQUEUE)
6698 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
6699 OSL_SLEEP(1);
6700 watchdogTime = OSL_SYSUPTIME();
6701 }
6702 #endif
6703
6704 DHD_OS_WAKE_UNLOCK(pub);
6705 } else {
6706 break;
6707 }
6708 }
6709 complete_and_exit(&tsk->completed, 0);
6710 }
6711
6712 #ifdef BCMPCIE
6713 void dhd_dpc_enable(dhd_pub_t *dhdp)
6714 {
6715 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
6716 dhd_info_t *dhd;
6717
6718 if (!dhdp || !dhdp->info)
6719 return;
6720 dhd = dhdp->info;
6721 #endif /* DHD_LB_RXP || DHD_LB_TXP */
6722
6723 #ifdef DHD_LB_RXP
6724 __skb_queue_head_init(&dhd->rx_pend_queue);
6725 #endif /* DHD_LB_RXP */
6726
6727 #ifdef DHD_LB_TXP
6728 skb_queue_head_init(&dhd->tx_pend_queue);
6729 #endif /* DHD_LB_TXP */
6730 }
6731 #endif /* BCMPCIE */
6732
6733 #ifdef BCMPCIE
6734 void
6735 dhd_dpc_kill(dhd_pub_t *dhdp)
6736 {
6737 dhd_info_t *dhd;
6738
6739 if (!dhdp) {
6740 return;
6741 }
6742
6743 dhd = dhdp->info;
6744
6745 if (!dhd) {
6746 return;
6747 }
6748
6749 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6750 tasklet_kill(&dhd->tasklet);
6751 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
6752 }
6753
6754 #ifdef DHD_LB
6755 #ifdef DHD_LB_RXP
6756 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
6757 __skb_queue_purge(&dhd->rx_pend_queue);
6758 #endif /* DHD_LB_RXP */
6759 #ifdef DHD_LB_TXP
6760 cancel_work_sync(&dhd->tx_dispatcher_work);
6761 skb_queue_purge(&dhd->tx_pend_queue);
6762 #endif /* DHD_LB_TXP */
6763
6764 /* Kill the Load Balancing Tasklets */
6765 #if defined(DHD_LB_TXC)
6766 tasklet_kill(&dhd->tx_compl_tasklet);
6767 #endif /* DHD_LB_TXC */
6768 #if defined(DHD_LB_RXC)
6769 tasklet_kill(&dhd->rx_compl_tasklet);
6770 #endif /* DHD_LB_RXC */
6771 #if defined(DHD_LB_TXP)
6772 tasklet_kill(&dhd->tx_tasklet);
6773 #endif /* DHD_LB_TXP */
6774 #endif /* DHD_LB */
6775 }
6776
6777 void
6778 dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
6779 {
6780 dhd_info_t *dhd;
6781
6782 if (!dhdp) {
6783 return;
6784 }
6785
6786 dhd = dhdp->info;
6787
6788 if (!dhd) {
6789 return;
6790 }
6791
6792 if (dhd->thr_dpc_ctl.thr_pid < 0) {
6793 tasklet_kill(&dhd->tasklet);
6794 }
6795 }
6796 #endif /* BCMPCIE */
6797
6798 static void
6799 dhd_dpc(ulong data)
6800 {
6801 dhd_info_t *dhd;
6802
6803 dhd = (dhd_info_t *)data;
6804
6805 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
6806 * down below , wake lock is set,
6807 * the tasklet is initialized in dhd_attach()
6808 */
6809 /* Call bus dpc unless it indicated down (then clean stop) */
6810 if (dhd->pub.busstate != DHD_BUS_DOWN) {
6811 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
6812 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
6813 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
6814 if (dhd_bus_dpc(dhd->pub.bus)) {
6815 tasklet_schedule(&dhd->tasklet);
6816 }
6817 } else {
6818 dhd_bus_stop(dhd->pub.bus, TRUE);
6819 }
6820 }
6821
6822 void
6823 dhd_sched_dpc(dhd_pub_t *dhdp)
6824 {
6825 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6826
6827 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
6828 DHD_OS_WAKE_LOCK(dhdp);
6829 /* If the semaphore does not get up,
6830 * wake unlock should be done here
6831 */
6832 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
6833 DHD_OS_WAKE_UNLOCK(dhdp);
6834 }
6835 return;
6836 } else {
6837 tasklet_schedule(&dhd->tasklet);
6838 }
6839 }
6840 #endif /* BCMDBUS */
6841
6842 static void
6843 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
6844 {
6845 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6846 #ifdef RXF_DEQUEUE_ON_BUSY
6847 int ret = BCME_OK;
6848 int retry = 2;
6849 #endif /* RXF_DEQUEUE_ON_BUSY */
6850
6851 DHD_OS_WAKE_LOCK(dhdp);
6852
6853 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
6854 #ifdef RXF_DEQUEUE_ON_BUSY
6855 do {
6856 ret = dhd_rxf_enqueue(dhdp, skb);
6857 if (ret == BCME_OK || ret == BCME_ERROR)
6858 break;
6859 else
6860 OSL_SLEEP(50); /* waiting for dequeueing */
6861 } while (retry-- > 0);
6862
6863 if (retry <= 0 && ret == BCME_BUSY) {
6864 void *skbp = skb;
6865
6866 while (skbp) {
6867 void *skbnext = PKTNEXT(dhdp->osh, skbp);
6868 PKTSETNEXT(dhdp->osh, skbp, NULL);
6869 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
6870 __FUNCTION__, __LINE__);
6871 netif_rx_ni(skbp);
6872 skbp = skbnext;
6873 }
6874 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
6875 } else {
6876 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6877 up(&dhd->thr_rxf_ctl.sema);
6878 }
6879 }
6880 #else /* RXF_DEQUEUE_ON_BUSY */
6881 do {
6882 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
6883 break;
6884 } while (1);
6885 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
6886 up(&dhd->thr_rxf_ctl.sema);
6887 }
6888 return;
6889 #endif /* RXF_DEQUEUE_ON_BUSY */
6890 }
6891
6892 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
6893 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
6894
6895 #ifdef TOE
6896 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
6897 static int
6898 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
6899 {
6900 char buf[32];
6901 int ret;
6902
6903 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6904
6905 if (ret < 0) {
6906 if (ret == -EIO) {
6907 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
6908 ifidx)));
6909 return -EOPNOTSUPP;
6910 }
6911
6912 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6913 return ret;
6914 }
6915
6916 memcpy(toe_ol, buf, sizeof(uint32));
6917 return 0;
6918 }
6919
6920 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
6921 static int
6922 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
6923 {
6924 int toe, ret;
6925
6926 /* Set toe_ol as requested */
6927 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
6928 if (ret < 0) {
6929 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
6930 dhd_ifname(&dhd->pub, ifidx), ret));
6931 return ret;
6932 }
6933
6934 /* Enable toe globally only if any components are enabled. */
6935 toe = (toe_ol != 0);
6936 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
6937 if (ret < 0) {
6938 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
6939 return ret;
6940 }
6941
6942 return 0;
6943 }
6944 #endif /* TOE */
6945
6946 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
6947 void dhd_set_scb_probe(dhd_pub_t *dhd)
6948 {
6949 wl_scb_probe_t scb_probe;
6950 int ret;
6951
6952 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6953 return;
6954 }
6955
6956 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0,
6957 (char *)&scb_probe, sizeof(scb_probe), FALSE);
6958 if (ret < 0) {
6959 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
6960 }
6961
6962 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
6963
6964 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(scb_probe),
6965 NULL, 0, TRUE);
6966 if (ret < 0) {
6967 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
6968 return;
6969 }
6970 }
6971 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
6972
6973 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
6974 static void
6975 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
6976 {
6977 dhd_info_t *dhd = DHD_DEV_INFO(net);
6978
6979 snprintf(info->driver, sizeof(info->driver), "wl");
6980 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
6981 }
6982
6983 struct ethtool_ops dhd_ethtool_ops = {
6984 .get_drvinfo = dhd_ethtool_get_drvinfo
6985 };
6986 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
6987
6988
6989 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
6990 static int
6991 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
6992 {
6993 struct ethtool_drvinfo info;
6994 char drvname[sizeof(info.driver)];
6995 uint32 cmd;
6996 #ifdef TOE
6997 struct ethtool_value edata;
6998 uint32 toe_cmpnt, csum_dir;
6999 int ret;
7000 #endif
7001
7002 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7003
7004 /* all ethtool calls start with a cmd word */
7005 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
7006 return -EFAULT;
7007
7008 switch (cmd) {
7009 case ETHTOOL_GDRVINFO:
7010 /* Copy out any request driver name */
7011 if (copy_from_user(&info, uaddr, sizeof(info)))
7012 return -EFAULT;
7013 strncpy(drvname, info.driver, sizeof(info.driver));
7014 drvname[sizeof(info.driver)-1] = '\0';
7015
7016 /* clear struct for return */
7017 memset(&info, 0, sizeof(info));
7018 info.cmd = cmd;
7019
7020 /* if dhd requested, identify ourselves */
7021 if (strcmp(drvname, "?dhd") == 0) {
7022 snprintf(info.driver, sizeof(info.driver), "dhd");
7023 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
7024 info.version[sizeof(info.version) - 1] = '\0';
7025 }
7026
7027 /* otherwise, require dongle to be up */
7028 else if (!dhd->pub.up) {
7029 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
7030 return -ENODEV;
7031 }
7032
7033 /* finally, report dongle driver type */
7034 else if (dhd->pub.iswl)
7035 snprintf(info.driver, sizeof(info.driver), "wl");
7036 else
7037 snprintf(info.driver, sizeof(info.driver), "xx");
7038
7039 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
7040 if (copy_to_user(uaddr, &info, sizeof(info)))
7041 return -EFAULT;
7042 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
7043 (int)sizeof(drvname), drvname, info.driver));
7044 break;
7045
7046 #ifdef TOE
7047 /* Get toe offload components from dongle */
7048 case ETHTOOL_GRXCSUM:
7049 case ETHTOOL_GTXCSUM:
7050 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
7051 return ret;
7052
7053 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
7054
7055 edata.cmd = cmd;
7056 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
7057
7058 if (copy_to_user(uaddr, &edata, sizeof(edata)))
7059 return -EFAULT;
7060 break;
7061
7062 /* Set toe offload components in dongle */
7063 case ETHTOOL_SRXCSUM:
7064 case ETHTOOL_STXCSUM:
7065 if (copy_from_user(&edata, uaddr, sizeof(edata)))
7066 return -EFAULT;
7067
7068 /* Read the current settings, update and write back */
7069 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
7070 return ret;
7071
7072 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
7073
7074 if (edata.data != 0)
7075 toe_cmpnt |= csum_dir;
7076 else
7077 toe_cmpnt &= ~csum_dir;
7078
7079 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
7080 return ret;
7081
7082 /* If setting TX checksum mode, tell Linux the new mode */
7083 if (cmd == ETHTOOL_STXCSUM) {
7084 if (edata.data)
7085 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
7086 else
7087 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
7088 }
7089
7090 break;
7091 #endif /* TOE */
7092
7093 default:
7094 return -EOPNOTSUPP;
7095 }
7096
7097 return 0;
7098 }
7099 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7100
7101 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
7102 {
7103 if (!dhdp) {
7104 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
7105 return FALSE;
7106 }
7107
7108 if (!dhdp->up)
7109 return FALSE;
7110
7111 #if !defined(BCMPCIE) && !defined(BCMDBUS)
7112 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
7113 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
7114 return FALSE;
7115 }
7116 #endif /* !BCMPCIE && !BCMDBUS */
7117
7118 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
7119 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
7120 #ifdef BCMPCIE
7121 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
7122 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
7123 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
7124 #else
7125 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
7126 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
7127 #endif /* BCMPCIE */
7128 if (dhdp->hang_reason == 0) {
7129 if (dhdp->dongle_trap_occured) {
7130 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
7131 #ifdef BCMPCIE
7132 } else if (dhdp->d3ackcnt_timeout) {
7133 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
7134 #endif /* BCMPCIE */
7135 } else {
7136 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
7137 }
7138 }
7139 net_os_send_hang_message(net);
7140 return TRUE;
7141 }
7142 return FALSE;
7143 }
7144
7145 #ifdef WL_MONITOR
7146 bool
7147 dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
7148 {
7149 return (dhd->info->monitor_type != 0);
7150 }
7151
7152 void
7153 dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
7154 {
7155 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7156 #ifdef HOST_RADIOTAP_CONV
7157 uint16 len = 0, offset = 0;
7158 monitor_pkt_info_t pkt_info;
7159 memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker));
7160 memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t));
7161
7162 if (!dhd->monitor_skb) {
7163 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
7164 return;
7165 }
7166
7167 len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt),
7168 PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset);
7169
7170 if (dhd->monitor_type && dhd->monitor_dev)
7171 dhd->monitor_skb->dev = dhd->monitor_dev;
7172 else {
7173 PKTFREE(dhdp->osh, pkt, FALSE);
7174 dev_kfree_skb(dhd->monitor_skb);
7175 return;
7176 }
7177
7178 PKTFREE(dhdp->osh, pkt, FALSE);
7179
7180 if (!len) {
7181 return;
7182 }
7183
7184 skb_put(dhd->monitor_skb, len);
7185 skb_pull(dhd->monitor_skb, offset);
7186
7187 dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7188 #else
7189 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
7190 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
7191 switch (amsdu_flag) {
7192 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
7193 default:
7194 if (!dhd->monitor_skb) {
7195 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
7196 return;
7197 }
7198
7199 if (dhd->monitor_type && dhd->monitor_dev)
7200 dhd->monitor_skb->dev = dhd->monitor_dev;
7201 else {
7202 PKTFREE(dhdp->osh, pkt, FALSE);
7203 dhd->monitor_skb = NULL;
7204 return;
7205 }
7206
7207 dhd->monitor_skb->protocol =
7208 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7209 dhd->monitor_len = 0;
7210 break;
7211 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
7212 if (!dhd->monitor_skb) {
7213 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
7214 return;
7215 dhd->monitor_len = 0;
7216 }
7217 if (dhd->monitor_type && dhd->monitor_dev)
7218 dhd->monitor_skb->dev = dhd->monitor_dev;
7219 else {
7220 PKTFREE(dhdp->osh, pkt, FALSE);
7221 dev_kfree_skb(dhd->monitor_skb);
7222 return;
7223 }
7224 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
7225 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7226
7227 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
7228 PKTFREE(dhdp->osh, pkt, FALSE);
7229 return;
7230 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
7231 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
7232 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7233 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
7234
7235 PKTFREE(dhdp->osh, pkt, FALSE);
7236 return;
7237 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
7238 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
7239 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
7240 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
7241
7242 PKTFREE(dhdp->osh, pkt, FALSE);
7243 skb_put(dhd->monitor_skb, dhd->monitor_len);
7244 dhd->monitor_skb->protocol =
7245 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
7246 dhd->monitor_len = 0;
7247 break;
7248 }
7249
7250 #endif /* HOST_RADIOTAP_CONV */
7251 if (in_interrupt()) {
7252 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
7253 __FUNCTION__, __LINE__);
7254 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7255 netif_rx(dhd->monitor_skb);
7256 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7257 } else {
7258 /* If the receive is not processed inside an ISR,
7259 * the softirqd must be woken explicitly to service
7260 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
7261 * by netif_rx_ni(), but in earlier kernels, we need
7262 * to do it manually.
7263 */
7264 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
7265 __FUNCTION__, __LINE__);
7266
7267 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7268 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7269 netif_rx_ni(dhd->monitor_skb);
7270 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7271 #else
7272 ulong flags;
7273 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7274 netif_rx(dhd->monitor_skb);
7275 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
7276 local_irq_save(flags);
7277 RAISE_RX_SOFTIRQ();
7278 local_irq_restore(flags);
7279 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7280 }
7281
7282 dhd->monitor_skb = NULL;
7283 }
7284
7285 typedef struct dhd_mon_dev_priv {
7286 struct net_device_stats stats;
7287 } dhd_mon_dev_priv_t;
7288
7289 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
7290 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
7291 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
7292
7293 static int
7294 dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
7295 {
7296 PKTFREE(NULL, skb, FALSE);
7297 return 0;
7298 }
7299
7300 static int
7301 dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7302 {
7303 return 0;
7304 }
7305
7306 static struct net_device_stats*
7307 dhd_monitor_get_stats(struct net_device *dev)
7308 {
7309 return &DHD_MON_DEV_STATS(dev);
7310 }
7311
7312 static const struct net_device_ops netdev_monitor_ops =
7313 {
7314 .ndo_start_xmit = dhd_monitor_start,
7315 .ndo_get_stats = dhd_monitor_get_stats,
7316 .ndo_do_ioctl = dhd_monitor_ioctl
7317 };
7318
7319 static void
7320 dhd_add_monitor_if(void *handle, void *event_info, u8 event)
7321 {
7322 dhd_info_t *dhd = handle;
7323 struct net_device *dev;
7324 char *devname;
7325
7326 if (event != DHD_WQ_WORK_IF_ADD) {
7327 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7328 return;
7329 }
7330
7331 if (!dhd) {
7332 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7333 return;
7334 }
7335
7336 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
7337 if (!dev) {
7338 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
7339 return;
7340 }
7341
7342 devname = "radiotap";
7343
7344 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
7345
7346 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
7347 #define ARPHRD_IEEE80211_PRISM 802
7348 #endif
7349
7350 #ifndef ARPHRD_IEEE80211_RADIOTAP
7351 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
7352 #endif /* ARPHRD_IEEE80211_RADIOTAP */
7353
7354 dev->type = ARPHRD_IEEE80211_RADIOTAP;
7355
7356 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7357 dev->hard_start_xmit = dhd_monitor_start;
7358 dev->do_ioctl = dhd_monitor_ioctl;
7359 dev->get_stats = dhd_monitor_get_stats;
7360 #else
7361 dev->netdev_ops = &netdev_monitor_ops;
7362 #endif
7363
7364 if (register_netdev(dev)) {
7365 DHD_ERROR(("%s, register_netdev failed for %s\n",
7366 __FUNCTION__, dev->name));
7367 free_netdev(dev);
7368 }
7369
7370 bcmwifi_monitor_create(&dhd->monitor_info);
7371 dhd->monitor_dev = dev;
7372 }
7373
7374 static void
7375 dhd_del_monitor_if(void *handle, void *event_info, u8 event)
7376 {
7377 dhd_info_t *dhd = handle;
7378
7379 if (event != DHD_WQ_WORK_IF_DEL) {
7380 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7381 return;
7382 }
7383
7384 if (!dhd) {
7385 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7386 return;
7387 }
7388
7389 if (dhd->monitor_dev) {
7390 unregister_netdev(dhd->monitor_dev);
7391
7392 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
7393 MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
7394 MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
7395 #else
7396 free_netdev(dhd->monitor_dev);
7397 #endif /* 2.6.24 */
7398
7399 dhd->monitor_dev = NULL;
7400 }
7401
7402 if (dhd->monitor_info) {
7403 bcmwifi_monitor_delete(dhd->monitor_info);
7404 dhd->monitor_info = NULL;
7405 }
7406 }
7407
7408 static void
7409 dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val)
7410 {
7411 dhd_info_t *info = dhd->info;
7412
7413 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
7414 if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) {
7415 DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__));
7416 return;
7417 }
7418
7419 /* Delete monitor */
7420 if (!val) {
7421 info->monitor_type = val;
7422 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL,
7423 dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
7424 return;
7425 }
7426
7427 /* Add monitor */
7428 info->monitor_type = val;
7429 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD,
7430 dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
7431 }
7432 #endif /* WL_MONITOR */
7433
7434 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
7435 {
7436 int bcmerror = BCME_OK;
7437 int buflen = 0;
7438 struct net_device *net;
7439
7440 #ifdef REPORT_FATAL_TIMEOUTS
7441 if (ioc->cmd == WLC_SET_WPA_AUTH) {
7442 int wpa_auth;
7443
7444 wpa_auth = *((int *)ioc->buf);
7445 DHD_INFO(("wpa_auth:%d\n", wpa_auth));
7446 if (wpa_auth != WPA_AUTH_DISABLED) {
7447 /* If AP is with security then enable WLC_E_PSK_SUP event checking */
7448 dhd_set_join_error(pub, WLC_WPA_MASK);
7449 } else {
7450 /* If AP is with open then disable WLC_E_PSK_SUP event checking */
7451 dhd_clear_join_error(pub, WLC_WPA_MASK);
7452 }
7453 }
7454
7455 if (ioc->cmd == WLC_SET_AUTH) {
7456 int auth;
7457 auth = *((int *)ioc->buf);
7458 DHD_INFO(("Auth:%d\n", auth));
7459
7460 if (auth != WL_AUTH_OPEN_SYSTEM) {
7461 /* If AP is with security then enable WLC_E_PSK_SUP event checking */
7462 dhd_set_join_error(pub, WLC_WPA_MASK);
7463 } else {
7464 /* If AP is with open then disable WLC_E_PSK_SUP event checking */
7465 dhd_clear_join_error(pub, WLC_WPA_MASK);
7466 }
7467 }
7468 #endif /* REPORT_FATAL_TIMEOUTS */
7469 net = dhd_idx2net(pub, ifidx);
7470 if (!net) {
7471 bcmerror = BCME_BADARG;
7472 goto done;
7473 }
7474
7475 /* check for local dhd ioctl and handle it */
7476 if (ioc->driver == DHD_IOCTL_MAGIC) {
7477 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
7478 if (data_buf)
7479 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
7480 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
7481 if (bcmerror)
7482 pub->bcmerror = bcmerror;
7483 goto done;
7484 }
7485
7486 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
7487 if (data_buf)
7488 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
7489
7490 #ifndef BCMDBUS
7491 /* send to dongle (must be up, and wl). */
7492 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
7493 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
7494 int ret;
7495 if (atomic_read(&exit_in_progress)) {
7496 DHD_ERROR(("%s module exit in progress\n", __func__));
7497 bcmerror = BCME_DONGLE_DOWN;
7498 goto done;
7499 }
7500 ret = dhd_bus_start(pub);
7501 if (ret != 0) {
7502 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7503 bcmerror = BCME_DONGLE_DOWN;
7504 goto done;
7505 }
7506 } else {
7507 bcmerror = BCME_DONGLE_DOWN;
7508 goto done;
7509 }
7510 }
7511
7512 if (!pub->iswl) {
7513 bcmerror = BCME_DONGLE_DOWN;
7514 goto done;
7515 }
7516 #endif /* !BCMDBUS */
7517
7518 /*
7519 * Flush the TX queue if required for proper message serialization:
7520 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
7521 * prevent M4 encryption and
7522 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
7523 * prevent disassoc frame being sent before WPS-DONE frame.
7524 */
7525 if (ioc->cmd == WLC_SET_KEY ||
7526 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7527 strncmp("wsec_key", data_buf, 9) == 0) ||
7528 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
7529 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
7530 ioc->cmd == WLC_DISASSOC)
7531 dhd_wait_pend8021x(net);
7532
7533 #ifdef WLMEDIA_HTSF
7534 if (data_buf) {
7535 /* short cut wl ioctl calls here */
7536 if (strcmp("htsf", data_buf) == 0) {
7537 dhd_ioctl_htsf_get(dhd, 0);
7538 return BCME_OK;
7539 }
7540
7541 if (strcmp("htsflate", data_buf) == 0) {
7542 if (ioc->set) {
7543 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
7544 memset(&maxdelayts, 0, sizeof(tstamp_t));
7545 maxdelay = 0;
7546 tspktcnt = 0;
7547 maxdelaypktno = 0;
7548 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
7549 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
7550 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
7551 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
7552 } else {
7553 dhd_dump_latency();
7554 }
7555 return BCME_OK;
7556 }
7557 if (strcmp("htsfclear", data_buf) == 0) {
7558 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
7559 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
7560 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
7561 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
7562 htsf_seqnum = 0;
7563 return BCME_OK;
7564 }
7565 if (strcmp("htsfhis", data_buf) == 0) {
7566 dhd_dump_htsfhisto(&vi_d1, "H to D");
7567 dhd_dump_htsfhisto(&vi_d2, "D to D");
7568 dhd_dump_htsfhisto(&vi_d3, "D to H");
7569 dhd_dump_htsfhisto(&vi_d4, "H to H");
7570 return BCME_OK;
7571 }
7572 if (strcmp("tsport", data_buf) == 0) {
7573 if (ioc->set) {
7574 memcpy(&tsport, data_buf + 7, 4);
7575 } else {
7576 DHD_ERROR(("current timestamp port: %d \n", tsport));
7577 }
7578 return BCME_OK;
7579 }
7580 }
7581 #endif /* WLMEDIA_HTSF */
7582
7583 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
7584 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
7585 #ifdef BCM_FD_AGGR
7586 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
7587 #else
7588 bcmerror = BCME_UNSUPPORTED;
7589 #endif
7590 goto done;
7591 }
7592 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
7593
7594 #ifdef WL_MONITOR
7595 /* Intercept monitor ioctl here, add/del monitor if */
7596 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
7597 dhd_set_monitor(pub, ifidx, *(int32*)data_buf);
7598 }
7599 #endif
7600
7601 #ifdef REPORT_FATAL_TIMEOUTS
7602 if (ioc->cmd == WLC_SCAN && bcmerror == 0) {
7603 dhd_start_scan_timer(pub);
7604 }
7605 if (ioc->cmd == WLC_SET_SSID && bcmerror == 0) {
7606 dhd_start_join_timer(pub);
7607 }
7608 #endif /* REPORT_FATAL_TIMEOUTS */
7609
7610 done:
7611 dhd_check_hang(net, pub, bcmerror);
7612
7613 return bcmerror;
7614 }
7615
7616 static int
7617 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
7618 {
7619 dhd_info_t *dhd = DHD_DEV_INFO(net);
7620 dhd_ioctl_t ioc;
7621 int bcmerror = 0;
7622 int ifidx;
7623 int ret;
7624 void *local_buf = NULL;
7625 void __user *ioc_buf_user = NULL;
7626 u16 buflen = 0;
7627
7628 if (atomic_read(&exit_in_progress)) {
7629 DHD_ERROR(("%s module exit in progress\n", __func__));
7630 bcmerror = BCME_DONGLE_DOWN;
7631 return OSL_ERROR(bcmerror);
7632 }
7633
7634 DHD_OS_WAKE_LOCK(&dhd->pub);
7635 DHD_PERIM_LOCK(&dhd->pub);
7636
7637 /* Interface up check for built-in type */
7638 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
7639 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
7640 DHD_PERIM_UNLOCK(&dhd->pub);
7641 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7642 return OSL_ERROR(BCME_NOTUP);
7643 }
7644
7645 ifidx = dhd_net2idx(dhd, net);
7646 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
7647
7648 if (ifidx == DHD_BAD_IF) {
7649 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
7650 DHD_PERIM_UNLOCK(&dhd->pub);
7651 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7652 return -1;
7653 }
7654
7655 #if defined(WL_WIRELESS_EXT)
7656 /* linux wireless extensions */
7657 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
7658 /* may recurse, do NOT lock */
7659 ret = wl_iw_ioctl(net, ifr, cmd);
7660 DHD_PERIM_UNLOCK(&dhd->pub);
7661 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7662 return ret;
7663 }
7664 #endif /* defined(WL_WIRELESS_EXT) */
7665
7666 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
7667 if (cmd == SIOCETHTOOL) {
7668 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
7669 DHD_PERIM_UNLOCK(&dhd->pub);
7670 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7671 return ret;
7672 }
7673 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
7674
7675 if (cmd == SIOCDEVPRIVATE+1) {
7676 ret = wl_android_priv_cmd(net, ifr, cmd);
7677 dhd_check_hang(net, &dhd->pub, ret);
7678 DHD_PERIM_UNLOCK(&dhd->pub);
7679 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7680 return ret;
7681 }
7682
7683 if (cmd != SIOCDEVPRIVATE) {
7684 DHD_PERIM_UNLOCK(&dhd->pub);
7685 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7686 return -EOPNOTSUPP;
7687 }
7688
7689 memset(&ioc, 0, sizeof(ioc));
7690
7691 #ifdef CONFIG_COMPAT
7692 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
7693 if (in_compat_syscall())
7694 #else
7695 if (is_compat_task())
7696 #endif
7697 {
7698 compat_wl_ioctl_t compat_ioc;
7699 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
7700 bcmerror = BCME_BADADDR;
7701 goto done;
7702 }
7703 ioc.cmd = compat_ioc.cmd;
7704 if (ioc.cmd & WLC_SPEC_FLAG) {
7705 memset(&ioc, 0, sizeof(ioc));
7706 /* Copy the ioc control structure part of ioctl request */
7707 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7708 bcmerror = BCME_BADADDR;
7709 goto done;
7710 }
7711 ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
7712
7713 /* To differentiate between wl and dhd read 4 more byes */
7714 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7715 sizeof(uint)) != 0)) {
7716 bcmerror = BCME_BADADDR;
7717 goto done;
7718 }
7719
7720 } else { /* ioc.cmd & WLC_SPEC_FLAG */
7721 ioc.buf = compat_ptr(compat_ioc.buf);
7722 ioc.len = compat_ioc.len;
7723 ioc.set = compat_ioc.set;
7724 ioc.used = compat_ioc.used;
7725 ioc.needed = compat_ioc.needed;
7726 /* To differentiate between wl and dhd read 4 more byes */
7727 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
7728 sizeof(uint)) != 0)) {
7729 bcmerror = BCME_BADADDR;
7730 goto done;
7731 }
7732 } /* ioc.cmd & WLC_SPEC_FLAG */
7733 } else
7734 #endif /* CONFIG_COMPAT */
7735 {
7736 /* Copy the ioc control structure part of ioctl request */
7737 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
7738 bcmerror = BCME_BADADDR;
7739 goto done;
7740 }
7741 #ifdef CONFIG_COMPAT
7742 ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
7743 #endif
7744
7745 /* To differentiate between wl and dhd read 4 more byes */
7746 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
7747 sizeof(uint)) != 0)) {
7748 bcmerror = BCME_BADADDR;
7749 goto done;
7750 }
7751 }
7752 /*
7753 if (!capable(CAP_NET_ADMIN)) {
7754 bcmerror = BCME_EPERM;
7755 goto done;
7756 }
7757 */
7758 /* Take backup of ioc.buf and restore later */
7759 ioc_buf_user = ioc.buf;
7760
7761 if (ioc.len > 0) {
7762 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
7763 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
7764 bcmerror = BCME_NOMEM;
7765 goto done;
7766 }
7767
7768 DHD_PERIM_UNLOCK(&dhd->pub);
7769 if (copy_from_user(local_buf, ioc.buf, buflen)) {
7770 DHD_PERIM_LOCK(&dhd->pub);
7771 bcmerror = BCME_BADADDR;
7772 goto done;
7773 }
7774 DHD_PERIM_LOCK(&dhd->pub);
7775
7776 *((char *)local_buf + buflen) = '\0';
7777
7778 /* For some platforms accessing userspace memory
7779 * of ioc.buf is causing kernel panic, so to avoid that
7780 * make ioc.buf pointing to kernel space memory local_buf
7781 */
7782 ioc.buf = local_buf;
7783 }
7784
7785 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
7786 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
7787 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
7788 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
7789 bcmerror = BCME_DONGLE_DOWN;
7790 goto done;
7791 }
7792
7793 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
7794
7795 /* Restore back userspace pointer to ioc.buf */
7796 ioc.buf = ioc_buf_user;
7797
7798 if (!bcmerror && buflen && local_buf && ioc.buf) {
7799 DHD_PERIM_UNLOCK(&dhd->pub);
7800 if (copy_to_user(ioc.buf, local_buf, buflen))
7801 bcmerror = -EFAULT;
7802 DHD_PERIM_LOCK(&dhd->pub);
7803 }
7804
7805 done:
7806 if (local_buf)
7807 MFREE(dhd->pub.osh, local_buf, buflen+1);
7808
7809 DHD_PERIM_UNLOCK(&dhd->pub);
7810 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7811
7812 return OSL_ERROR(bcmerror);
7813 }
7814
7815
7816 #ifdef FIX_CPU_MIN_CLOCK
7817 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
7818 {
7819 if (dhd) {
7820 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7821 mutex_init(&dhd->cpufreq_fix);
7822 #endif
7823 dhd->cpufreq_fix_status = FALSE;
7824 }
7825 return 0;
7826 }
7827
7828 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
7829 {
7830 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7831 mutex_lock(&dhd->cpufreq_fix);
7832 #endif
7833 if (dhd && !dhd->cpufreq_fix_status) {
7834 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
7835 #ifdef FIX_BUS_MIN_CLOCK
7836 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
7837 #endif /* FIX_BUS_MIN_CLOCK */
7838 DHD_ERROR(("pm_qos_add_requests called\n"));
7839
7840 dhd->cpufreq_fix_status = TRUE;
7841 }
7842 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7843 mutex_unlock(&dhd->cpufreq_fix);
7844 #endif
7845 }
7846
7847 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
7848 {
7849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7850 mutex_lock(&dhd ->cpufreq_fix);
7851 #endif
7852 if (dhd && dhd->cpufreq_fix_status != TRUE) {
7853 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7854 mutex_unlock(&dhd->cpufreq_fix);
7855 #endif
7856 return;
7857 }
7858
7859 pm_qos_remove_request(&dhd->dhd_cpu_qos);
7860 #ifdef FIX_BUS_MIN_CLOCK
7861 pm_qos_remove_request(&dhd->dhd_bus_qos);
7862 #endif /* FIX_BUS_MIN_CLOCK */
7863 DHD_ERROR(("pm_qos_add_requests called\n"));
7864
7865 dhd->cpufreq_fix_status = FALSE;
7866 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7867 mutex_unlock(&dhd->cpufreq_fix);
7868 #endif
7869 }
7870 #endif /* FIX_CPU_MIN_CLOCK */
7871
7872 #if defined(BT_OVER_SDIO)
7873
7874 void
7875 dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
7876 {
7877 dhdp->info->bus_user_count++;
7878 }
7879
7880 void
7881 dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
7882 {
7883 dhdp->info->bus_user_count--;
7884 }
7885
7886 /* Return values:
7887 * Success: Returns 0
7888 * Failure: Returns -1 or errono code
7889 */
7890 int
7891 dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
7892 {
7893 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7894 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7895 int ret = 0;
7896
7897 mutex_lock(&dhd->bus_user_lock);
7898 ++dhd->bus_user_count;
7899 if (dhd->bus_user_count < 0) {
7900 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7901 ret = -1;
7902 goto exit;
7903 }
7904
7905 if (dhd->bus_user_count == 1) {
7906
7907 dhd->pub.hang_was_sent = 0;
7908
7909 /* First user, turn on WL_REG, start the bus */
7910 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
7911
7912 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
7913 /* Enable F1 */
7914 ret = dhd_bus_resume(dhdp, 0);
7915 if (ret) {
7916 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
7917 __FUNCTION__, ret));
7918 goto exit;
7919 }
7920 }
7921
7922 dhd_update_fw_nv_path(dhd);
7923 /* update firmware and nvram path to sdio bus */
7924 dhd_bus_update_fw_nv_path(dhd->pub.bus,
7925 dhd->fw_path, dhd->nv_path);
7926 /* download the firmware, Enable F2 */
7927 /* TODO: Should be done only in case of FW switch */
7928 ret = dhd_bus_devreset(dhdp, FALSE);
7929 dhd_bus_resume(dhdp, 1);
7930 if (!ret) {
7931 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
7932 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
7933 ret = -EFAULT;
7934 }
7935 } else {
7936 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
7937 }
7938 } else {
7939 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
7940 __FUNCTION__, dhd->bus_user_count));
7941 }
7942 exit:
7943 mutex_unlock(&dhd->bus_user_lock);
7944 return ret;
7945 }
7946 EXPORT_SYMBOL(dhd_bus_get);
7947
7948 /* Return values:
7949 * Success: Returns 0
7950 * Failure: Returns -1 or errono code
7951 */
7952 int
7953 dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
7954 {
7955 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
7956 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7957 int ret = 0;
7958 BCM_REFERENCE(owner);
7959
7960 mutex_lock(&dhd->bus_user_lock);
7961 --dhd->bus_user_count;
7962 if (dhd->bus_user_count < 0) {
7963 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
7964 dhd->bus_user_count = 0;
7965 ret = -1;
7966 goto exit;
7967 }
7968
7969 if (dhd->bus_user_count == 0) {
7970 /* Last user, stop the bus and turn Off WL_REG */
7971 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
7972 __FUNCTION__));
7973 #ifdef PROP_TXSTATUS
7974 if (dhd->pub.wlfc_enabled) {
7975 dhd_wlfc_deinit(&dhd->pub);
7976 }
7977 #endif /* PROP_TXSTATUS */
7978 #ifdef PNO_SUPPORT
7979 if (dhd->pub.pno_state) {
7980 dhd_pno_deinit(&dhd->pub);
7981 }
7982 #endif /* PNO_SUPPORT */
7983 #ifdef RTT_SUPPORT
7984 if (dhd->pub.rtt_state) {
7985 dhd_rtt_deinit(&dhd->pub);
7986 }
7987 #endif /* RTT_SUPPORT */
7988 ret = dhd_bus_devreset(dhdp, TRUE);
7989 if (!ret) {
7990 dhd_bus_suspend(dhdp);
7991 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
7992 }
7993 } else {
7994 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
7995 __FUNCTION__, dhd->bus_user_count));
7996 }
7997 exit:
7998 mutex_unlock(&dhd->bus_user_lock);
7999 return ret;
8000 }
8001 EXPORT_SYMBOL(dhd_bus_put);
8002
8003 int
8004 dhd_net_bus_get(struct net_device *dev)
8005 {
8006 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8007 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
8008 }
8009
8010 int
8011 dhd_net_bus_put(struct net_device *dev)
8012 {
8013 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8014 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
8015 }
8016
8017 /*
8018 * Function to enable the Bus Clock
8019 * Returns BCME_OK on success and BCME_xxx on failure
8020 *
8021 * This function is not callable from non-sleepable context
8022 */
8023 int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
8024 {
8025 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
8026
8027 int ret;
8028
8029 dhd_os_sdlock(dhdp);
8030 /*
8031 * The second argument is TRUE, that means, we expect
8032 * the function to "wait" until the clocks are really
8033 * available
8034 */
8035 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
8036 dhd_os_sdunlock(dhdp);
8037
8038 return ret;
8039 }
8040 EXPORT_SYMBOL(dhd_bus_clk_enable);
8041
8042 /*
8043 * Function to disable the Bus Clock
8044 * Returns BCME_OK on success and BCME_xxx on failure
8045 *
8046 * This function is not callable from non-sleepable context
8047 */
8048 int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
8049 {
8050 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
8051
8052 int ret;
8053
8054 dhd_os_sdlock(dhdp);
8055 /*
8056 * The second argument is TRUE, that means, we expect
8057 * the function to "wait" until the clocks are really
8058 * disabled
8059 */
8060 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
8061 dhd_os_sdunlock(dhdp);
8062
8063 return ret;
8064 }
8065 EXPORT_SYMBOL(dhd_bus_clk_disable);
8066
8067 /*
8068 * Function to reset bt_use_count counter to zero.
8069 *
8070 * This function is not callable from non-sleepable context
8071 */
8072 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
8073 {
8074 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
8075
8076 /* take the lock and reset bt use count */
8077 dhd_os_sdlock(dhdp);
8078 dhdsdio_reset_bt_use_count(dhdp->bus);
8079 dhd_os_sdunlock(dhdp);
8080 }
8081 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
8082
8083 #endif /* BT_OVER_SDIO */
8084
8085 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
8086 int dhd_deepsleep(dhd_info_t *dhd, int flag)
8087 {
8088 char iovbuf[20];
8089 uint powervar = 0;
8090 dhd_pub_t *dhdp;
8091 int cnt = 0;
8092 int ret = 0;
8093
8094 dhdp = &dhd->pub;
8095
8096 switch (flag) {
8097 case 1 : /* Deepsleep on */
8098 DHD_ERROR(("dhd_deepsleep: ON\n"));
8099 /* give some time to sysioc_work before deepsleep */
8100 OSL_SLEEP(200);
8101 #ifdef PKT_FILTER_SUPPORT
8102 /* disable pkt filter */
8103 dhd_enable_packet_filter(0, dhdp);
8104 #endif /* PKT_FILTER_SUPPORT */
8105 /* Disable MPC */
8106 powervar = 0;
8107 memset(iovbuf, 0, sizeof(iovbuf));
8108 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
8109 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8110
8111 /* Enable Deepsleep */
8112 powervar = 1;
8113 memset(iovbuf, 0, sizeof(iovbuf));
8114 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
8115 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8116 break;
8117
8118 case 0: /* Deepsleep Off */
8119 DHD_ERROR(("dhd_deepsleep: OFF\n"));
8120
8121 /* Disable Deepsleep */
8122 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
8123 powervar = 0;
8124 memset(iovbuf, 0, sizeof(iovbuf));
8125 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
8126 iovbuf, sizeof(iovbuf));
8127 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
8128 sizeof(iovbuf), TRUE, 0);
8129
8130 memset(iovbuf, 0, sizeof(iovbuf));
8131 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
8132 iovbuf, sizeof(iovbuf));
8133 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
8134 sizeof(iovbuf), FALSE, 0)) < 0) {
8135 DHD_ERROR(("the error of dhd deepsleep status"
8136 " ret value :%d\n", ret));
8137 } else {
8138 if (!(*(int *)iovbuf)) {
8139 DHD_ERROR(("deepsleep mode is 0,"
8140 " count: %d\n", cnt));
8141 break;
8142 }
8143 }
8144 }
8145
8146 /* Enable MPC */
8147 powervar = 1;
8148 memset(iovbuf, 0, sizeof(iovbuf));
8149 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
8150 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8151 break;
8152 }
8153
8154 return 0;
8155 }
8156
8157 static int
8158 dhd_stop(struct net_device *net)
8159 {
8160 int ifidx = 0;
8161 #ifdef WL_CFG80211
8162 unsigned long flags = 0;
8163 #endif /* WL_CFG80211 */
8164 dhd_info_t *dhd = DHD_DEV_INFO(net);
8165 DHD_OS_WAKE_LOCK(&dhd->pub);
8166 DHD_PERIM_LOCK(&dhd->pub);
8167 printf("%s: Enter %p\n", __FUNCTION__, net);
8168 dhd->pub.rxcnt_timeout = 0;
8169 dhd->pub.txcnt_timeout = 0;
8170
8171 #ifdef BCMPCIE
8172 dhd->pub.d3ackcnt_timeout = 0;
8173 #endif /* BCMPCIE */
8174
8175 if (dhd->pub.up == 0) {
8176 goto exit;
8177 }
8178 #if defined(DHD_HANG_SEND_UP_TEST)
8179 if (dhd->pub.req_hang_type) {
8180 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
8181 __FUNCTION__, dhd->pub.req_hang_type));
8182 dhd->pub.req_hang_type = 0;
8183 }
8184 #endif /* DHD_HANG_SEND_UP_TEST */
8185
8186 dhd_if_flush_sta(DHD_DEV_IFP(net));
8187
8188 /* Disable Runtime PM before interface down */
8189 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
8190
8191 #ifdef FIX_CPU_MIN_CLOCK
8192 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
8193 dhd_rollback_cpu_freq(dhd);
8194 #endif /* FIX_CPU_MIN_CLOCK */
8195
8196 ifidx = dhd_net2idx(dhd, net);
8197 BCM_REFERENCE(ifidx);
8198
8199 /* Set state and stop OS transmissions */
8200 netif_stop_queue(net);
8201 #ifdef WL_CFG80211
8202 spin_lock_irqsave(&dhd->pub.up_lock, flags);
8203 dhd->pub.up = 0;
8204 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
8205 #else
8206 dhd->pub.up = 0;
8207 #endif /* WL_CFG80211 */
8208
8209 #ifdef WL_CFG80211
8210 if (ifidx == 0) {
8211 dhd_if_t *ifp;
8212 wl_cfg80211_down(net);
8213
8214 ifp = dhd->iflist[0];
8215 ASSERT(ifp && ifp->net);
8216 /*
8217 * For CFG80211: Clean up all the left over virtual interfaces
8218 * when the primary Interface is brought down. [ifconfig wlan0 down]
8219 */
8220 if (!dhd_download_fw_on_driverload) {
8221 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
8222 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
8223 int i;
8224 #ifdef WL_CFG80211_P2P_DEV_IF
8225 wl_cfg80211_del_p2p_wdev(net);
8226 #endif /* WL_CFG80211_P2P_DEV_IF */
8227
8228 dhd_net_if_lock_local(dhd);
8229 for (i = 1; i < DHD_MAX_IFS; i++)
8230 dhd_remove_if(&dhd->pub, i, FALSE);
8231
8232 if (ifp && ifp->net) {
8233 dhd_if_del_sta_list(ifp);
8234 }
8235 #ifdef ARP_OFFLOAD_SUPPORT
8236 if (dhd_inetaddr_notifier_registered) {
8237 dhd_inetaddr_notifier_registered = FALSE;
8238 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
8239 }
8240 #endif /* ARP_OFFLOAD_SUPPORT */
8241 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8242 if (dhd_inet6addr_notifier_registered) {
8243 dhd_inet6addr_notifier_registered = FALSE;
8244 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
8245 }
8246 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8247 dhd_net_if_unlock_local(dhd);
8248 }
8249 #if 0
8250 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
8251 cancel_work_sync(dhd->dhd_deferred_wq);
8252 #endif
8253
8254 #ifdef SHOW_LOGTRACE
8255 /* Wait till event_log_dispatcher_work finishes */
8256 cancel_work_sync(&dhd->event_log_dispatcher_work);
8257 #endif /* SHOW_LOGTRACE */
8258
8259 #if defined(DHD_LB_RXP)
8260 __skb_queue_purge(&dhd->rx_pend_queue);
8261 #endif /* DHD_LB_RXP */
8262
8263 #if defined(DHD_LB_TXP)
8264 skb_queue_purge(&dhd->tx_pend_queue);
8265 #endif /* DHD_LB_TXP */
8266 }
8267
8268 argos_register_notifier_deinit();
8269 #ifdef DHDTCPACK_SUPPRESS
8270 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8271 #endif /* DHDTCPACK_SUPPRESS */
8272 #if defined(DHD_LB_RXP)
8273 if (ifp->net == dhd->rx_napi_netdev) {
8274 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
8275 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8276 skb_queue_purge(&dhd->rx_napi_queue);
8277 napi_disable(&dhd->rx_napi_struct);
8278 netif_napi_del(&dhd->rx_napi_struct);
8279 dhd->rx_napi_netdev = NULL;
8280 }
8281 #endif /* DHD_LB_RXP */
8282 }
8283 #endif /* WL_CFG80211 */
8284
8285 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
8286
8287 #ifdef PROP_TXSTATUS
8288 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
8289 #endif
8290 #ifdef SHOW_LOGTRACE
8291 if (!dhd_download_fw_on_driverload) {
8292 /* Release the skbs from queue for WLC_E_TRACE event */
8293 dhd_event_logtrace_flush_queue(&dhd->pub);
8294 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
8295 if (dhd->event_data.fmts) {
8296 MFREE(dhd->pub.osh, dhd->event_data.fmts,
8297 dhd->event_data.fmts_size);
8298 dhd->event_data.fmts = NULL;
8299 }
8300 if (dhd->event_data.raw_fmts) {
8301 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
8302 dhd->event_data.raw_fmts_size);
8303 dhd->event_data.raw_fmts = NULL;
8304 }
8305 if (dhd->event_data.raw_sstr) {
8306 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
8307 dhd->event_data.raw_sstr_size);
8308 dhd->event_data.raw_sstr = NULL;
8309 }
8310 if (dhd->event_data.rom_raw_sstr) {
8311 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
8312 dhd->event_data.rom_raw_sstr_size);
8313 dhd->event_data.rom_raw_sstr = NULL;
8314 }
8315 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
8316 }
8317 }
8318 #endif /* SHOW_LOGTRACE */
8319 #ifdef APF
8320 dhd_dev_apf_delete_filter(net);
8321 #endif /* APF */
8322
8323 /* Stop the protocol module */
8324 dhd_prot_stop(&dhd->pub);
8325
8326 OLD_MOD_DEC_USE_COUNT;
8327 exit:
8328 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
8329 #if defined(BT_OVER_SDIO)
8330 dhd_bus_put(&dhd->pub, WLAN_MODULE);
8331 wl_android_set_wifi_on_flag(FALSE);
8332 #else
8333 wl_android_wifi_off(net, TRUE);
8334 #ifdef WL_EXT_IAPSTA
8335 wl_ext_iapsta_dettach_netdev();
8336 #endif
8337 } else {
8338 if (dhd->pub.conf->deepsleep)
8339 dhd_deepsleep(dhd, 1);
8340 #endif /* BT_OVER_SDIO */
8341 }
8342 dhd->pub.hang_was_sent = 0;
8343
8344 /* Clear country spec for for built-in type driver */
8345 if (!dhd_download_fw_on_driverload) {
8346 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
8347 dhd->pub.dhd_cspec.rev = 0;
8348 dhd->pub.dhd_cspec.ccode[0] = 0x00;
8349 }
8350
8351 #ifdef BCMDBGFS
8352 dhd_dbgfs_remove();
8353 #endif
8354
8355 DHD_PERIM_UNLOCK(&dhd->pub);
8356 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8357
8358 /* Destroy wakelock */
8359 if (!dhd_download_fw_on_driverload &&
8360 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
8361 DHD_OS_WAKE_LOCK_DESTROY(dhd);
8362 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
8363 }
8364 printf("%s: Exit\n", __FUNCTION__);
8365
8366 return 0;
8367 }
8368
8369 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
8370 extern bool g_first_broadcast_scan;
8371 #endif
8372
8373 #ifdef WL11U
8374 static int dhd_interworking_enable(dhd_pub_t *dhd)
8375 {
8376 uint32 enable = true;
8377 int ret = BCME_OK;
8378
8379 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
8380 if (ret < 0) {
8381 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
8382 }
8383
8384 return ret;
8385 }
8386 #endif /* WL11u */
8387
8388 static int
8389 dhd_open(struct net_device *net)
8390 {
8391 dhd_info_t *dhd = DHD_DEV_INFO(net);
8392 #ifdef TOE
8393 uint32 toe_ol;
8394 #endif
8395 #ifdef BCM_FD_AGGR
8396 char iovbuf[WLC_IOCTL_SMLEN];
8397 dbus_config_t config;
8398 uint32 agglimit = 0;
8399 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
8400 #endif /* BCM_FD_AGGR */
8401 int ifidx;
8402 int32 ret = 0;
8403 #if defined(OOB_INTR_ONLY)
8404 uint32 bus_type = -1;
8405 uint32 bus_num = -1;
8406 uint32 slot_num = -1;
8407 wifi_adapter_info_t *adapter = NULL;
8408 #endif
8409 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
8410 int bytes_written = 0;
8411 struct dhd_conf *conf;
8412 #endif
8413
8414 if (!dhd_download_fw_on_driverload) {
8415 if (!dhd_driver_init_done) {
8416 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
8417 return -1;
8418 }
8419 }
8420
8421 printf("%s: Enter %p\n", __FUNCTION__, net);
8422 DHD_MUTEX_LOCK();
8423 /* Init wakelock */
8424 if (!dhd_download_fw_on_driverload) {
8425 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
8426 DHD_OS_WAKE_LOCK_INIT(dhd);
8427 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
8428 }
8429 #ifdef SHOW_LOGTRACE
8430 skb_queue_head_init(&dhd->evt_trace_queue);
8431
8432 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
8433 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
8434 if (ret == BCME_OK) {
8435 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
8436 st_str_file_path, map_file_path);
8437 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
8438 rom_st_str_file_path, rom_map_file_path);
8439 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
8440 }
8441 }
8442 #endif /* SHOW_LOGTRACE */
8443 }
8444
8445 #if defined(PREVENT_REOPEN_DURING_HANG)
8446 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
8447 if (dhd->pub.hang_was_sent == 1) {
8448 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
8449 /* Force to bring down WLAN interface in case dhd_stop() is not called
8450 * from the upper layer when HANG event is triggered.
8451 */
8452 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
8453 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
8454 dhd_stop(net);
8455 } else {
8456 return -1;
8457 }
8458 }
8459 #endif /* PREVENT_REOPEN_DURING_HANG */
8460
8461
8462 DHD_OS_WAKE_LOCK(&dhd->pub);
8463 DHD_PERIM_LOCK(&dhd->pub);
8464 dhd->pub.dongle_trap_occured = 0;
8465 dhd->pub.hang_was_sent = 0;
8466 dhd->pub.hang_reason = 0;
8467 dhd->pub.iovar_timeout_occured = 0;
8468 #ifdef PCIE_FULL_DONGLE
8469 dhd->pub.d3ack_timeout_occured = 0;
8470 #endif /* PCIE_FULL_DONGLE */
8471
8472 #ifdef DHD_LOSSLESS_ROAMING
8473 dhd->pub.dequeue_prec_map = ALLPRIO;
8474 #endif
8475 #if 0
8476 /*
8477 * Force start if ifconfig_up gets called before START command
8478 * We keep WEXT's wl_control_wl_start to provide backward compatibility
8479 * This should be removed in the future
8480 */
8481 ret = wl_control_wl_start(net);
8482 if (ret != 0) {
8483 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8484 ret = -1;
8485 goto exit;
8486 }
8487 #endif
8488
8489 ifidx = dhd_net2idx(dhd, net);
8490 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
8491
8492 if (ifidx < 0) {
8493 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
8494 ret = -1;
8495 goto exit;
8496 }
8497
8498 if (!dhd->iflist[ifidx]) {
8499 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
8500 ret = -1;
8501 goto exit;
8502 }
8503
8504 if (ifidx == 0) {
8505 atomic_set(&dhd->pend_8021x_cnt, 0);
8506 if (!dhd_download_fw_on_driverload) {
8507 DHD_ERROR(("\n%s\n", dhd_version));
8508 #ifdef WL_EXT_IAPSTA
8509 wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
8510 #endif
8511 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
8512 g_first_broadcast_scan = TRUE;
8513 #endif
8514 #if defined(BT_OVER_SDIO)
8515 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
8516 wl_android_set_wifi_on_flag(TRUE);
8517 #else
8518 ret = wl_android_wifi_on(net);
8519 #endif /* BT_OVER_SDIO */
8520 if (ret != 0) {
8521 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
8522 __FUNCTION__, ret));
8523 ret = -1;
8524 goto exit;
8525 }
8526 #if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
8527 conf = dhd_get_conf(net);
8528 if (conf) {
8529 wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written);
8530 wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written);
8531 wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written);
8532 }
8533 #endif
8534 }
8535 #ifdef FIX_CPU_MIN_CLOCK
8536 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
8537 dhd_init_cpufreq_fix(dhd);
8538 dhd_fix_cpu_freq(dhd);
8539 }
8540 #endif /* FIX_CPU_MIN_CLOCK */
8541 #if defined(OOB_INTR_ONLY)
8542 if (dhd->pub.conf->dpc_cpucore >= 0) {
8543 dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
8544 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
8545 if (adapter) {
8546 printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
8547 irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
8548 }
8549 }
8550 #endif
8551
8552 if (dhd->pub.busstate != DHD_BUS_DATA) {
8553 #ifdef BCMDBUS
8554 dhd_set_path(&dhd->pub);
8555 DHD_MUTEX_UNLOCK();
8556 wait_event_interruptible_timeout(dhd->adapter->status_event,
8557 wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
8558 msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
8559 DHD_MUTEX_LOCK();
8560 if ((ret = dbus_up(dhd->pub.bus)) != 0) {
8561 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
8562 goto exit;
8563 } else {
8564 dhd->pub.busstate = DHD_BUS_DATA;
8565 }
8566 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
8567 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8568 goto exit;
8569 }
8570 #else
8571 /* try to bring up bus */
8572 DHD_PERIM_UNLOCK(&dhd->pub);
8573 ret = dhd_bus_start(&dhd->pub);
8574 DHD_PERIM_LOCK(&dhd->pub);
8575 if (ret) {
8576 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
8577 ret = -1;
8578 goto exit;
8579 }
8580 #endif /* !BCMDBUS */
8581
8582 }
8583 #ifdef WL_EXT_IAPSTA
8584 wl_ext_iapsta_attach_name(net, ifidx);
8585 #endif
8586 if (dhd_download_fw_on_driverload) {
8587 if (dhd->pub.conf->deepsleep)
8588 dhd_deepsleep(dhd, 0);
8589 }
8590
8591 #ifdef BCM_FD_AGGR
8592 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
8593
8594
8595 memset(iovbuf, 0, sizeof(iovbuf));
8596 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
8597 iovbuf, sizeof(iovbuf));
8598
8599 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
8600 agglimit = *(uint32 *)iovbuf;
8601 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
8602 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
8603 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
8604 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
8605 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
8606 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
8607 }
8608 } else {
8609 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
8610 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
8611 }
8612
8613 /* Set aggregation for TX */
8614 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
8615 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
8616
8617 /* Set aggregation for RX */
8618 memset(iovbuf, 0, sizeof(iovbuf));
8619 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
8620 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
8621 dhd->pub.info->fdaggr = 0;
8622 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
8623 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
8624 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
8625 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
8626 } else {
8627 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
8628 }
8629 #endif /* BCM_FD_AGGR */
8630
8631 #ifdef BT_OVER_SDIO
8632 if (dhd->pub.is_bt_recovery_required) {
8633 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
8634 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
8635 }
8636 dhd->pub.is_bt_recovery_required = FALSE;
8637 #endif
8638
8639 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
8640 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
8641
8642 #ifdef TOE
8643 /* Get current TOE mode from dongle */
8644 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
8645 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
8646 } else {
8647 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
8648 }
8649 #endif /* TOE */
8650
8651 #if defined(DHD_LB_RXP)
8652 __skb_queue_head_init(&dhd->rx_pend_queue);
8653 if (dhd->rx_napi_netdev == NULL) {
8654 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
8655 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
8656 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
8657 dhd_napi_poll, dhd_napi_weight);
8658 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
8659 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
8660 napi_enable(&dhd->rx_napi_struct);
8661 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
8662 skb_queue_head_init(&dhd->rx_napi_queue);
8663 } /* rx_napi_netdev == NULL */
8664 #endif /* DHD_LB_RXP */
8665
8666 #if defined(DHD_LB_TXP)
8667 /* Use the variant that uses locks */
8668 skb_queue_head_init(&dhd->tx_pend_queue);
8669 #endif /* DHD_LB_TXP */
8670
8671 #if defined(WL_CFG80211)
8672 if (unlikely(wl_cfg80211_up(net))) {
8673 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
8674 ret = -1;
8675 goto exit;
8676 }
8677 if (!dhd_download_fw_on_driverload) {
8678 #ifdef ARP_OFFLOAD_SUPPORT
8679 dhd->pend_ipaddr = 0;
8680 if (!dhd_inetaddr_notifier_registered) {
8681 dhd_inetaddr_notifier_registered = TRUE;
8682 register_inetaddr_notifier(&dhd_inetaddr_notifier);
8683 }
8684 #endif /* ARP_OFFLOAD_SUPPORT */
8685 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8686 if (!dhd_inet6addr_notifier_registered) {
8687 dhd_inet6addr_notifier_registered = TRUE;
8688 register_inet6addr_notifier(&dhd_inet6addr_notifier);
8689 }
8690 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8691 }
8692
8693 argos_register_notifier_init(net);
8694 #if defined(NUM_SCB_MAX_PROBE)
8695 dhd_set_scb_probe(&dhd->pub);
8696 #endif /* NUM_SCB_MAX_PROBE */
8697 #endif /* WL_CFG80211 */
8698 }
8699
8700 /* Allow transmit calls */
8701 netif_start_queue(net);
8702 dhd->pub.up = 1;
8703
8704 OLD_MOD_INC_USE_COUNT;
8705
8706 #ifdef BCMDBGFS
8707 dhd_dbgfs_init(&dhd->pub);
8708 #endif
8709
8710 exit:
8711 if (ret) {
8712 dhd_stop(net);
8713 }
8714
8715 DHD_PERIM_UNLOCK(&dhd->pub);
8716 DHD_OS_WAKE_UNLOCK(&dhd->pub);
8717 DHD_MUTEX_UNLOCK();
8718
8719 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
8720 return ret;
8721 }
8722
8723 int dhd_do_driver_init(struct net_device *net)
8724 {
8725 dhd_info_t *dhd = NULL;
8726
8727 if (!net) {
8728 DHD_ERROR(("Primary Interface not initialized \n"));
8729 return -EINVAL;
8730 }
8731
8732 DHD_MUTEX_IS_LOCK_RETURN();
8733
8734 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
8735 dhd = DHD_DEV_INFO(net);
8736
8737 /* If driver is already initialized, do nothing
8738 */
8739 if (dhd->pub.busstate == DHD_BUS_DATA) {
8740 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
8741 return 0;
8742 }
8743
8744 if (dhd_open(net) < 0) {
8745 DHD_ERROR(("Driver Init Failed \n"));
8746 return -1;
8747 }
8748
8749 return 0;
8750 }
8751
8752 int
8753 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8754 {
8755
8756 #ifdef WL_CFG80211
8757 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8758 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8759 return BCME_OK;
8760 #endif
8761
8762 /* handle IF event caused by wl commands, SoftAP, WEXT and
8763 * anything else. This has to be done asynchronously otherwise
8764 * DPC will be blocked (and iovars will timeout as DPC has no chance
8765 * to read the response back)
8766 */
8767 if (ifevent->ifidx > 0) {
8768 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8769 if (if_event == NULL) {
8770 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
8771 MALLOCED(dhdinfo->pub.osh)));
8772 return BCME_NOMEM;
8773 }
8774
8775 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8776 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8777 strncpy(if_event->name, name, IFNAMSIZ);
8778 if_event->name[IFNAMSIZ - 1] = '\0';
8779 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
8780 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8781 }
8782
8783 return BCME_OK;
8784 }
8785
8786 int
8787 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8788 {
8789 dhd_if_event_t *if_event;
8790
8791 #ifdef WL_CFG80211
8792 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8793 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
8794 return BCME_OK;
8795 #endif /* WL_CFG80211 */
8796
8797 /* handle IF event caused by wl commands, SoftAP, WEXT and
8798 * anything else
8799 */
8800 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8801 if (if_event == NULL) {
8802 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8803 MALLOCED(dhdinfo->pub.osh)));
8804 return BCME_NOMEM;
8805 }
8806 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8807 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8808 strncpy(if_event->name, name, IFNAMSIZ);
8809 if_event->name[IFNAMSIZ - 1] = '\0';
8810 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
8811 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8812
8813 return BCME_OK;
8814 }
8815
8816 int
8817 dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
8818 {
8819 #ifdef DHD_UPDATE_INTF_MAC
8820 dhd_if_event_t *if_event;
8821 #endif /* DHD_UPDATE_INTF_MAC */
8822
8823 #ifdef WL_CFG80211
8824 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
8825 ifevent->ifidx, name, mac, ifevent->bssidx);
8826 #endif /* WL_CFG80211 */
8827
8828 #ifdef DHD_UPDATE_INTF_MAC
8829 /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
8830 * anything else
8831 */
8832 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
8833 if (if_event == NULL) {
8834 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
8835 MALLOCED(dhdinfo->pub.osh)));
8836 return BCME_NOMEM;
8837 }
8838 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
8839 // construct a change event
8840 if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
8841 if_event->event.opcode = WLC_E_IF_CHANGE;
8842 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
8843 strncpy(if_event->name, name, IFNAMSIZ);
8844 if_event->name[IFNAMSIZ - 1] = '\0';
8845 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
8846 dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
8847 #endif /* DHD_UPDATE_INTF_MAC */
8848
8849 return BCME_OK;
8850 }
8851
8852 /* unregister and free the existing net_device interface (if any) in iflist and
8853 * allocate a new one. the slot is reused. this function does NOT register the
8854 * new interface to linux kernel. dhd_register_if does the job
8855 */
8856 struct net_device*
8857 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
8858 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
8859 {
8860 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
8861 dhd_if_t *ifp;
8862
8863 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
8864 ifp = dhdinfo->iflist[ifidx];
8865
8866 if (ifp != NULL) {
8867 if (ifp->net != NULL) {
8868 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
8869 __FUNCTION__, ifp->net->name, ifidx));
8870
8871 if (ifidx == 0) {
8872 /* For primary ifidx (0), there shouldn't be
8873 * any netdev present already.
8874 */
8875 DHD_ERROR(("Primary ifidx populated already\n"));
8876 ASSERT(0);
8877 return NULL;
8878 }
8879
8880 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
8881
8882 /* in unregister_netdev case, the interface gets freed by net->destructor
8883 * (which is set to free_netdev)
8884 */
8885 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8886 free_netdev(ifp->net);
8887 } else {
8888 netif_stop_queue(ifp->net);
8889 if (need_rtnl_lock)
8890 unregister_netdev(ifp->net);
8891 else
8892 unregister_netdevice(ifp->net);
8893 }
8894 ifp->net = NULL;
8895 }
8896 } else {
8897 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
8898 if (ifp == NULL) {
8899 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
8900 return NULL;
8901 }
8902 }
8903
8904 memset(ifp, 0, sizeof(dhd_if_t));
8905 ifp->info = dhdinfo;
8906 ifp->idx = ifidx;
8907 ifp->bssidx = bssidx;
8908 #ifdef DHD_MCAST_REGEN
8909 ifp->mcast_regen_bss_enable = FALSE;
8910 #endif
8911 /* set to TRUE rx_pkt_chainable at alloc time */
8912 ifp->rx_pkt_chainable = TRUE;
8913
8914 if (mac != NULL)
8915 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
8916
8917 /* Allocate etherdev, including space for private structure */
8918 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
8919 if (ifp->net == NULL) {
8920 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
8921 goto fail;
8922 }
8923
8924 /* Setup the dhd interface's netdevice private structure. */
8925 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
8926
8927 if (name && name[0]) {
8928 strncpy(ifp->net->name, name, IFNAMSIZ);
8929 ifp->net->name[IFNAMSIZ - 1] = '\0';
8930 }
8931
8932 #ifdef WL_CFG80211
8933 if (ifidx == 0) {
8934 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8935 ifp->net->destructor = free_netdev;
8936 #else
8937 ifp->net->needs_free_netdev = true;
8938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
8939 } else {
8940 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8941 ifp->net->destructor = dhd_netdev_free;
8942 #else
8943 ifp->net->needs_free_netdev = true;
8944 ifp->net->priv_destructor = dhd_netdev_free;
8945 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
8946 }
8947 #else
8948 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
8949 ifp->net->destructor = free_netdev;
8950 #else
8951 ifp->net->needs_free_netdev = true;
8952 #endif
8953 #endif /* WL_CFG80211 */
8954 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
8955 ifp->name[IFNAMSIZ - 1] = '\0';
8956 dhdinfo->iflist[ifidx] = ifp;
8957
8958 /* initialize the dongle provided if name */
8959 if (dngl_name)
8960 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
8961 else if (name)
8962 strncpy(ifp->dngl_name, name, IFNAMSIZ);
8963
8964 #ifdef PCIE_FULL_DONGLE
8965 /* Initialize STA info list */
8966 INIT_LIST_HEAD(&ifp->sta_list);
8967 DHD_IF_STA_LIST_LOCK_INIT(ifp);
8968 #endif /* PCIE_FULL_DONGLE */
8969
8970 #ifdef DHD_L2_FILTER
8971 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
8972 ifp->parp_allnode = TRUE;
8973 #endif /* DHD_L2_FILTER */
8974
8975
8976 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8977
8978 return ifp->net;
8979
8980 fail:
8981 if (ifp != NULL) {
8982 if (ifp->net != NULL) {
8983 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
8984 if (ifp->net == dhdinfo->rx_napi_netdev) {
8985 napi_disable(&dhdinfo->rx_napi_struct);
8986 netif_napi_del(&dhdinfo->rx_napi_struct);
8987 skb_queue_purge(&dhdinfo->rx_napi_queue);
8988 dhdinfo->rx_napi_netdev = NULL;
8989 }
8990 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
8991 dhd_dev_priv_clear(ifp->net);
8992 free_netdev(ifp->net);
8993 ifp->net = NULL;
8994 }
8995 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8996 ifp = NULL;
8997 }
8998 dhdinfo->iflist[ifidx] = NULL;
8999 return NULL;
9000 }
9001
9002 /* unregister and free the the net_device interface associated with the indexed
9003 * slot, also free the slot memory and set the slot pointer to NULL
9004 */
9005 int
9006 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
9007 {
9008 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
9009 dhd_if_t *ifp;
9010 #ifdef PCIE_FULL_DONGLE
9011 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdpub->if_flow_lkup;
9012 #endif /* PCIE_FULL_DONGLE */
9013
9014 ifp = dhdinfo->iflist[ifidx];
9015
9016 if (ifp != NULL) {
9017 if (ifp->net != NULL) {
9018 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
9019
9020 dhdinfo->iflist[ifidx] = NULL;
9021 /* in unregister_netdev case, the interface gets freed by net->destructor
9022 * (which is set to free_netdev)
9023 */
9024 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9025 free_netdev(ifp->net);
9026 } else {
9027 netif_tx_disable(ifp->net);
9028
9029
9030
9031 #if defined(SET_RPS_CPUS)
9032 custom_rps_map_clear(ifp->net->_rx);
9033 #endif /* SET_RPS_CPUS */
9034 if (need_rtnl_lock)
9035 unregister_netdev(ifp->net);
9036 else
9037 unregister_netdevice(ifp->net);
9038 }
9039 ifp->net = NULL;
9040 }
9041 #ifdef DHD_WMF
9042 dhd_wmf_cleanup(dhdpub, ifidx);
9043 #endif /* DHD_WMF */
9044 #ifdef DHD_L2_FILTER
9045 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
9046 NULL, FALSE, dhdpub->tickcnt);
9047 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
9048 ifp->phnd_arp_table = NULL;
9049 #endif /* DHD_L2_FILTER */
9050
9051
9052 dhd_if_del_sta_list(ifp);
9053 #ifdef PCIE_FULL_DONGLE
9054 /* Delete flowrings of WDS interface */
9055 if (if_flow_lkup[ifidx].role == WLC_E_IF_ROLE_WDS) {
9056 dhd_flow_rings_delete(dhdpub, ifidx);
9057 }
9058 #endif /* PCIE_FULL_DONGLE */
9059 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
9060
9061 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
9062 ifp = NULL;
9063 }
9064
9065 return BCME_OK;
9066 }
9067
9068
9069 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9070 static struct net_device_ops dhd_ops_pri = {
9071 .ndo_open = dhd_open,
9072 .ndo_stop = dhd_stop,
9073 .ndo_get_stats = dhd_get_stats,
9074 .ndo_do_ioctl = dhd_ioctl_entry,
9075 .ndo_start_xmit = dhd_start_xmit,
9076 .ndo_set_mac_address = dhd_set_mac_address,
9077 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
9078 .ndo_set_rx_mode = dhd_set_multicast_list,
9079 #else
9080 .ndo_set_multicast_list = dhd_set_multicast_list,
9081 #endif
9082 };
9083
9084 static struct net_device_ops dhd_ops_virt = {
9085 .ndo_get_stats = dhd_get_stats,
9086 .ndo_do_ioctl = dhd_ioctl_entry,
9087 .ndo_start_xmit = dhd_start_xmit,
9088 .ndo_set_mac_address = dhd_set_mac_address,
9089 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
9090 .ndo_set_rx_mode = dhd_set_multicast_list,
9091 #else
9092 .ndo_set_multicast_list = dhd_set_multicast_list,
9093 #endif
9094 };
9095 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
9096
9097 #ifdef DEBUGGER
9098 extern void debugger_init(void *bus_handle);
9099 #endif
9100
9101
9102 #ifdef SHOW_LOGTRACE
9103 int
9104 dhd_os_read_file(void *file, char *buf, uint32 size)
9105 {
9106 struct file *filep = (struct file *)file;
9107
9108 if (!file || !buf)
9109 return -1;
9110
9111 return vfs_read(filep, buf, size, &filep->f_pos);
9112 }
9113
9114 int
9115 dhd_os_seek_file(void *file, int64 offset)
9116 {
9117 struct file *filep = (struct file *)file;
9118 if (!file)
9119 return -1;
9120
9121 /* offset can be -ve */
9122 filep->f_pos = filep->f_pos + offset;
9123
9124 return 0;
9125 }
9126
9127 static int
9128 dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
9129 {
9130 struct file *filep = NULL;
9131 struct kstat stat;
9132 mm_segment_t fs;
9133 char *raw_fmts = NULL;
9134 int logstrs_size = 0;
9135 int error = 0;
9136
9137 fs = get_fs();
9138 set_fs(KERNEL_DS);
9139
9140 filep = filp_open(logstrs_path, O_RDONLY, 0);
9141
9142 if (IS_ERR(filep)) {
9143 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
9144 goto fail;
9145 }
9146 error = vfs_stat(logstrs_path, &stat);
9147 if (error) {
9148 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
9149 goto fail;
9150 }
9151 logstrs_size = (int) stat.size;
9152
9153 if (logstrs_size == 0) {
9154 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9155 goto fail1;
9156 }
9157
9158 raw_fmts = MALLOC(osh, logstrs_size);
9159 if (raw_fmts == NULL) {
9160 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
9161 goto fail;
9162 }
9163 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
9164 DHD_ERROR(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
9165 goto fail;
9166 }
9167
9168 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
9169 == BCME_OK) {
9170 filp_close(filep, NULL);
9171 set_fs(fs);
9172 return BCME_OK;
9173 }
9174
9175 fail:
9176 if (raw_fmts) {
9177 MFREE(osh, raw_fmts, logstrs_size);
9178 raw_fmts = NULL;
9179 }
9180
9181 fail1:
9182 if (!IS_ERR(filep))
9183 filp_close(filep, NULL);
9184
9185 set_fs(fs);
9186 temp->fmts = NULL;
9187 return BCME_ERROR;
9188 }
9189
9190 static int
9191 dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
9192 uint32 *rodata_end)
9193 {
9194 struct file *filep = NULL;
9195 mm_segment_t fs;
9196 int err = BCME_ERROR;
9197
9198 if (fname == NULL) {
9199 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
9200 return BCME_ERROR;
9201 }
9202
9203 fs = get_fs();
9204 set_fs(KERNEL_DS);
9205
9206 filep = filp_open(fname, O_RDONLY, 0);
9207 if (IS_ERR(filep)) {
9208 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
9209 goto fail;
9210 }
9211
9212 if ((err = dhd_parse_map_file(osh, filep, ramstart,
9213 rodata_start, rodata_end)) < 0)
9214 goto fail;
9215
9216 fail:
9217 if (!IS_ERR(filep))
9218 filp_close(filep, NULL);
9219
9220 set_fs(fs);
9221
9222 return err;
9223 }
9224
9225 static int
9226 dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
9227 {
9228 struct file *filep = NULL;
9229 mm_segment_t fs;
9230 char *raw_fmts = NULL;
9231 uint32 logstrs_size = 0;
9232
9233 int error = 0;
9234 uint32 ramstart = 0;
9235 uint32 rodata_start = 0;
9236 uint32 rodata_end = 0;
9237 uint32 logfilebase = 0;
9238
9239 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
9240 if (error != BCME_OK) {
9241 DHD_ERROR(("readmap Error!! \n"));
9242 /* don't do event log parsing in actual case */
9243 if (strstr(str_file, ram_file_str) != NULL) {
9244 temp->raw_sstr = NULL;
9245 } else if (strstr(str_file, rom_file_str) != NULL) {
9246 temp->rom_raw_sstr = NULL;
9247 }
9248 return error;
9249 }
9250 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
9251 ramstart, rodata_start, rodata_end));
9252
9253 fs = get_fs();
9254 set_fs(KERNEL_DS);
9255
9256 filep = filp_open(str_file, O_RDONLY, 0);
9257 if (IS_ERR(filep)) {
9258 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
9259 goto fail;
9260 }
9261
9262 /* Full file size is huge. Just read required part */
9263 logstrs_size = rodata_end - rodata_start;
9264
9265 if (logstrs_size == 0) {
9266 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
9267 goto fail1;
9268 }
9269
9270 raw_fmts = MALLOC(osh, logstrs_size);
9271 if (raw_fmts == NULL) {
9272 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9273 goto fail;
9274 }
9275
9276 logfilebase = rodata_start - ramstart;
9277
9278 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
9279 if (error < 0) {
9280 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
9281 goto fail;
9282 }
9283
9284 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
9285 if (error != logstrs_size) {
9286 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
9287 goto fail;
9288 }
9289
9290 if (strstr(str_file, ram_file_str) != NULL) {
9291 temp->raw_sstr = raw_fmts;
9292 temp->raw_sstr_size = logstrs_size;
9293 temp->ramstart = ramstart;
9294 temp->rodata_start = rodata_start;
9295 temp->rodata_end = rodata_end;
9296 } else if (strstr(str_file, rom_file_str) != NULL) {
9297 temp->rom_raw_sstr = raw_fmts;
9298 temp->rom_raw_sstr_size = logstrs_size;
9299 temp->rom_ramstart = ramstart;
9300 temp->rom_rodata_start = rodata_start;
9301 temp->rom_rodata_end = rodata_end;
9302 }
9303
9304 filp_close(filep, NULL);
9305 set_fs(fs);
9306
9307 return BCME_OK;
9308
9309 fail:
9310 if (raw_fmts) {
9311 MFREE(osh, raw_fmts, logstrs_size);
9312 raw_fmts = NULL;
9313 }
9314
9315 fail1:
9316 if (!IS_ERR(filep))
9317 filp_close(filep, NULL);
9318
9319 set_fs(fs);
9320
9321 if (strstr(str_file, ram_file_str) != NULL) {
9322 temp->raw_sstr = NULL;
9323 } else if (strstr(str_file, rom_file_str) != NULL) {
9324 temp->rom_raw_sstr = NULL;
9325 }
9326
9327 return error;
9328 }
9329
9330 #endif /* SHOW_LOGTRACE */
9331
9332 #ifdef BCMDBUS
9333 uint
9334 dhd_get_rxsz(dhd_pub_t *pub)
9335 {
9336 struct net_device *net = NULL;
9337 dhd_info_t *dhd = NULL;
9338 uint rxsz;
9339
9340 /* Assign rxsz for dbus_attach */
9341 dhd = pub->info;
9342 net = dhd->iflist[0]->net;
9343 net->hard_header_len = ETH_HLEN + pub->hdrlen;
9344 rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9345
9346 return rxsz;
9347 }
9348
9349 void
9350 dhd_set_path(dhd_pub_t *pub)
9351 {
9352 dhd_info_t *dhd = NULL;
9353
9354 dhd = pub->info;
9355
9356 /* try to download image and nvram to the dongle */
9357 if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
9358 DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
9359 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
9360 dhd_bus_update_fw_nv_path(dhd->pub.bus,
9361 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
9362 }
9363 }
9364 #endif
9365
9366 dhd_pub_t *
9367 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
9368 #ifdef BCMDBUS
9369 , void *data
9370 #endif
9371 )
9372 {
9373 dhd_info_t *dhd = NULL;
9374 struct net_device *net = NULL;
9375 char if_name[IFNAMSIZ] = {'\0'};
9376 #ifdef SHOW_LOGTRACE
9377 int ret;
9378 #endif /* SHOW_LOGTRACE */
9379 #if defined(BCMSDIO) || defined(BCMPCIE)
9380 uint32 bus_type = -1;
9381 uint32 bus_num = -1;
9382 uint32 slot_num = -1;
9383 wifi_adapter_info_t *adapter = NULL;
9384 #elif defined(BCMDBUS)
9385 wifi_adapter_info_t *adapter = data;
9386 #endif
9387 #ifdef GET_CUSTOM_MAC_ENABLE
9388 char hw_ether[62];
9389 #endif /* GET_CUSTOM_MAC_ENABLE */
9390
9391 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
9392 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9393
9394 #ifdef STBLINUX
9395 DHD_ERROR(("%s\n", driver_target));
9396 #endif /* STBLINUX */
9397 /* will implement get_ids for DBUS later */
9398 #if defined(BCMSDIO)
9399 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
9400 #endif
9401 #if defined(BCMSDIO) || defined(BCMPCIE)
9402 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
9403 #endif
9404
9405 /* Allocate primary dhd_info */
9406 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
9407 if (dhd == NULL) {
9408 dhd = MALLOC(osh, sizeof(dhd_info_t));
9409 if (dhd == NULL) {
9410 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
9411 goto dhd_null_flag;
9412 }
9413 }
9414 memset(dhd, 0, sizeof(dhd_info_t));
9415 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
9416
9417 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
9418
9419 dhd->pub.osh = osh;
9420 #ifdef DUMP_IOCTL_IOV_LIST
9421 dll_init(&(dhd->pub.dump_iovlist_head));
9422 #endif /* DUMP_IOCTL_IOV_LIST */
9423 dhd->adapter = adapter;
9424 dhd->pub.adapter = (void *)adapter;
9425 #ifdef DHD_DEBUG
9426 dll_init(&(dhd->pub.mw_list_head));
9427 #endif /* DHD_DEBUG */
9428 #ifdef BT_OVER_SDIO
9429 dhd->pub.is_bt_recovery_required = FALSE;
9430 mutex_init(&dhd->bus_user_lock);
9431 #endif /* BT_OVER_SDIO */
9432
9433 #ifdef GET_CUSTOM_MAC_ENABLE
9434 wifi_platform_get_mac_addr(dhd->adapter, hw_ether);
9435 bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
9436 #endif /* GET_CUSTOM_MAC_ENABLE */
9437 #ifdef CUSTOM_FORCE_NODFS_FLAG
9438 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
9439 dhd->pub.force_country_change = TRUE;
9440 #endif /* CUSTOM_FORCE_NODFS_FLAG */
9441 #ifdef CUSTOM_COUNTRY_CODE
9442 get_customized_country_code(dhd->adapter,
9443 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
9444 dhd->pub.dhd_cflags);
9445 #endif /* CUSTOM_COUNTRY_CODE */
9446 #ifndef BCMDBUS
9447 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
9448 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
9449 #ifdef DHD_WET
9450 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
9451 #endif /* DHD_WET */
9452 /* Initialize thread based operation and lock */
9453 sema_init(&dhd->sdsem, 1);
9454 #endif /* !BCMDBUS */
9455
9456 /* Link to info module */
9457 dhd->pub.info = dhd;
9458
9459
9460 /* Link to bus module */
9461 dhd->pub.bus = bus;
9462 dhd->pub.hdrlen = bus_hdrlen;
9463
9464 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
9465 * because dhd_detech will check .info is NULL or not.
9466 */
9467 if (dhd_conf_attach(&dhd->pub) != 0) {
9468 DHD_ERROR(("dhd_conf_attach failed\n"));
9469 goto fail;
9470 }
9471 #ifndef BCMDBUS
9472 dhd_conf_reset(&dhd->pub);
9473 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
9474 dhd_conf_preinit(&dhd->pub);
9475 #endif /* !BCMDBUS */
9476
9477 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
9478 * This is indeed a hack but we have to make it work properly before we have a better
9479 * solution
9480 */
9481 dhd_update_fw_nv_path(dhd);
9482
9483 /* Set network interface name if it was provided as module parameter */
9484 if (iface_name[0]) {
9485 int len;
9486 char ch;
9487 strncpy(if_name, iface_name, IFNAMSIZ);
9488 if_name[IFNAMSIZ - 1] = 0;
9489 len = strlen(if_name);
9490 ch = if_name[len - 1];
9491 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
9492 strncat(if_name, "%d", 2);
9493 }
9494
9495 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
9496 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
9497 if (net == NULL) {
9498 goto fail;
9499 }
9500
9501
9502 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
9503 #ifdef DHD_L2_FILTER
9504 /* initialize the l2_filter_cnt */
9505 dhd->pub.l2_filter_cnt = 0;
9506 #endif
9507 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9508 net->open = NULL;
9509 #else
9510 net->netdev_ops = NULL;
9511 #endif
9512
9513 mutex_init(&dhd->dhd_iovar_mutex);
9514 sema_init(&dhd->proto_sem, 1);
9515 #ifdef DHD_ULP
9516 if (!(dhd_ulp_init(osh, &dhd->pub)))
9517 goto fail;
9518 #endif /* DHD_ULP */
9519
9520 #if defined(DHD_HANG_SEND_UP_TEST)
9521 dhd->pub.req_hang_type = 0;
9522 #endif /* DHD_HANG_SEND_UP_TEST */
9523
9524 #ifdef PROP_TXSTATUS
9525 spin_lock_init(&dhd->wlfc_spinlock);
9526
9527 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
9528 dhd->pub.plat_init = dhd_wlfc_plat_init;
9529 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
9530
9531 #ifdef DHD_WLFC_THREAD
9532 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
9533 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
9534 if (IS_ERR(dhd->pub.wlfc_thread)) {
9535 DHD_ERROR(("create wlfc thread failed\n"));
9536 goto fail;
9537 } else {
9538 wake_up_process(dhd->pub.wlfc_thread);
9539 }
9540 #endif /* DHD_WLFC_THREAD */
9541 #endif /* PROP_TXSTATUS */
9542
9543 /* Initialize other structure content */
9544 init_waitqueue_head(&dhd->ioctl_resp_wait);
9545 init_waitqueue_head(&dhd->d3ack_wait);
9546 #ifdef PCIE_INB_DW
9547 init_waitqueue_head(&dhd->ds_exit_wait);
9548 #endif /* PCIE_INB_DW */
9549 init_waitqueue_head(&dhd->ctrl_wait);
9550 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
9551 dhd->pub.dhd_bus_busy_state = 0;
9552
9553 /* Initialize the spinlocks */
9554 spin_lock_init(&dhd->sdlock);
9555 spin_lock_init(&dhd->txqlock);
9556 spin_lock_init(&dhd->rxqlock);
9557 spin_lock_init(&dhd->dhd_lock);
9558 spin_lock_init(&dhd->rxf_lock);
9559 #ifdef WLTDLS
9560 spin_lock_init(&dhd->pub.tdls_lock);
9561 #endif /* WLTDLS */
9562 #if defined(RXFRAME_THREAD)
9563 dhd->rxthread_enabled = TRUE;
9564 #endif /* defined(RXFRAME_THREAD) */
9565
9566 #ifdef DHDTCPACK_SUPPRESS
9567 spin_lock_init(&dhd->tcpack_lock);
9568 #endif /* DHDTCPACK_SUPPRESS */
9569
9570 /* Initialize Wakelock stuff */
9571 spin_lock_init(&dhd->wakelock_spinlock);
9572 spin_lock_init(&dhd->wakelock_evt_spinlock);
9573 DHD_OS_WAKE_LOCK_INIT(dhd);
9574 dhd->wakelock_counter = 0;
9575 #ifdef CONFIG_HAS_WAKELOCK
9576 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
9577 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
9578 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
9579 #endif /* CONFIG_HAS_WAKELOCK */
9580
9581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
9582 mutex_init(&dhd->dhd_net_if_mutex);
9583 mutex_init(&dhd->dhd_suspend_mutex);
9584 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
9585 mutex_init(&dhd->dhd_apf_mutex);
9586 #endif /* PKT_FILTER_SUPPORT && APF */
9587 #endif
9588 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
9589
9590 /* Attach and link in the protocol */
9591 if (dhd_prot_attach(&dhd->pub) != 0) {
9592 DHD_ERROR(("dhd_prot_attach failed\n"));
9593 goto fail;
9594 }
9595 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
9596
9597 #ifdef DHD_TIMESYNC
9598 /* attach the timesync module */
9599 if (dhd_timesync_attach(&dhd->pub) != 0) {
9600 DHD_ERROR(("dhd_timesync_attach failed\n"));
9601 goto fail;
9602 }
9603 dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE;
9604 #endif /* DHD_TIMESYNC */
9605
9606 #ifdef WL_CFG80211
9607 spin_lock_init(&dhd->pub.up_lock);
9608 /* Attach and link in the cfg80211 */
9609 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
9610 DHD_ERROR(("wl_cfg80211_attach failed\n"));
9611 goto fail;
9612 }
9613
9614 dhd_monitor_init(&dhd->pub);
9615 dhd_state |= DHD_ATTACH_STATE_CFG80211;
9616 #endif
9617 #ifdef DHD_LOG_DUMP
9618 dhd_log_dump_init(&dhd->pub);
9619 #endif /* DHD_LOG_DUMP */
9620 #if defined(WL_WIRELESS_EXT)
9621 /* Attach and link in the iw */
9622 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
9623 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
9624 DHD_ERROR(("wl_iw_attach failed\n"));
9625 goto fail;
9626 }
9627 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
9628 }
9629 #ifdef WL_ESCAN
9630 wl_escan_attach(net, &dhd->pub);
9631 #endif /* WL_ESCAN */
9632 #endif /* defined(WL_WIRELESS_EXT) */
9633
9634 #ifdef SHOW_LOGTRACE
9635 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
9636 if (ret == BCME_OK) {
9637 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
9638 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
9639 rom_map_file_path);
9640 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
9641 }
9642 #endif /* SHOW_LOGTRACE */
9643
9644 #ifdef DEBUGABILITY
9645 /* attach debug if support */
9646 if (dhd_os_dbg_attach(&dhd->pub)) {
9647 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
9648 goto fail;
9649 }
9650
9651 #ifdef DBG_PKT_MON
9652 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
9653 #ifdef DBG_PKT_MON_INIT_DEFAULT
9654 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
9655 #endif /* DBG_PKT_MON_INIT_DEFAULT */
9656 #endif /* DBG_PKT_MON */
9657 #endif /* DEBUGABILITY */
9658 #ifdef DHD_PKT_LOGGING
9659 dhd_os_attach_pktlog(&dhd->pub);
9660 #endif /* DHD_PKT_LOGGING */
9661
9662 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
9663 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
9664 goto fail;
9665 }
9666
9667
9668
9669 #ifndef BCMDBUS
9670 /* Set up the watchdog timer */
9671 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
9672 timer_setup(&dhd->timer, dhd_watchdog, 0);
9673 #else
9674 init_timer(&dhd->timer);
9675 dhd->timer.data = (ulong)dhd;
9676 dhd->timer.function = dhd_watchdog;
9677 #endif
9678 dhd->default_wd_interval = dhd_watchdog_ms;
9679
9680 if (dhd_watchdog_prio >= 0) {
9681 /* Initialize watchdog thread */
9682 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
9683 if (dhd->thr_wdt_ctl.thr_pid < 0) {
9684 goto fail;
9685 }
9686
9687 } else {
9688 dhd->thr_wdt_ctl.thr_pid = -1;
9689 }
9690
9691 #ifdef DHD_PCIE_RUNTIMEPM
9692 /* Setup up the runtime PM Idlecount timer */
9693 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
9694 timer_setup(&dhd->rpm_timer, dhd_runtimepm, 0);
9695 #else
9696 init_timer(&dhd->rpm_timer);
9697 dhd->rpm_timer.data = (ulong)dhd;
9698 dhd->rpm_timer.function = dhd_runtimepm;
9699 #endif
9700 dhd->rpm_timer_valid = FALSE;
9701
9702 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
9703 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
9704 if (dhd->thr_rpm_ctl.thr_pid < 0) {
9705 goto fail;
9706 }
9707 #endif /* DHD_PCIE_RUNTIMEPM */
9708
9709 #ifdef DEBUGGER
9710 debugger_init((void *) bus);
9711 #endif
9712
9713 /* Set up the bottom half handler */
9714 if (dhd_dpc_prio >= 0) {
9715 /* Initialize DPC thread */
9716 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
9717 if (dhd->thr_dpc_ctl.thr_pid < 0) {
9718 goto fail;
9719 }
9720 } else {
9721 /* use tasklet for dpc */
9722 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
9723 dhd->thr_dpc_ctl.thr_pid = -1;
9724 }
9725
9726 if (dhd->rxthread_enabled) {
9727 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
9728 /* Initialize RXF thread */
9729 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
9730 if (dhd->thr_rxf_ctl.thr_pid < 0) {
9731 goto fail;
9732 }
9733 }
9734 #endif /* !BCMDBUS */
9735 #ifdef SHOW_LOGTRACE
9736 skb_queue_head_init(&dhd->evt_trace_queue);
9737 #endif /* SHOW_LOGTRACE */
9738
9739 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
9740
9741 #if defined(CONFIG_PM_SLEEP)
9742 if (!dhd_pm_notifier_registered) {
9743 dhd_pm_notifier_registered = TRUE;
9744 dhd->pm_notifier.notifier_call = dhd_pm_callback;
9745 dhd->pm_notifier.priority = 10;
9746 register_pm_notifier(&dhd->pm_notifier);
9747 }
9748
9749 #endif /* CONFIG_PM_SLEEP */
9750
9751 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9752 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
9753 dhd->early_suspend.suspend = dhd_early_suspend;
9754 dhd->early_suspend.resume = dhd_late_resume;
9755 register_early_suspend(&dhd->early_suspend);
9756 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
9757 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9758
9759 #ifdef ARP_OFFLOAD_SUPPORT
9760 dhd->pend_ipaddr = 0;
9761 if (!dhd_inetaddr_notifier_registered) {
9762 dhd_inetaddr_notifier_registered = TRUE;
9763 register_inetaddr_notifier(&dhd_inetaddr_notifier);
9764 }
9765 #endif /* ARP_OFFLOAD_SUPPORT */
9766
9767 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9768 if (!dhd_inet6addr_notifier_registered) {
9769 dhd_inet6addr_notifier_registered = TRUE;
9770 register_inet6addr_notifier(&dhd_inet6addr_notifier);
9771 }
9772 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9773 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
9774 #ifdef DEBUG_CPU_FREQ
9775 dhd->new_freq = alloc_percpu(int);
9776 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
9777 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9778 #endif
9779 #ifdef DHDTCPACK_SUPPRESS
9780 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DEFAULT);
9781 #endif /* DHDTCPACK_SUPPRESS */
9782
9783 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9784 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9785
9786
9787 #ifdef DHD_DEBUG_PAGEALLOC
9788 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
9789 #endif /* DHD_DEBUG_PAGEALLOC */
9790
9791 #if defined(DHD_LB)
9792
9793 dhd_lb_set_default_cpus(dhd);
9794
9795 /* Initialize the CPU Masks */
9796 if (dhd_cpumasks_init(dhd) == 0) {
9797 /* Now we have the current CPU maps, run through candidacy */
9798 dhd_select_cpu_candidacy(dhd);
9799 /*
9800 * If we are able to initialize CPU masks, lets register to the
9801 * CPU Hotplug framework to change the CPU for each job dynamically
9802 * using candidacy algorithm.
9803 */
9804 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
9805 register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
9806 } else {
9807 /*
9808 * We are unable to initialize CPU masks, so candidacy algorithm
9809 * won't run, but still Load Balancing will be honoured based
9810 * on the CPUs allocated for a given job statically during init
9811 */
9812 dhd->cpu_notifier.notifier_call = NULL;
9813 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
9814 __FUNCTION__));
9815 }
9816
9817 #ifdef DHD_LB_TXP
9818 #ifdef DHD_LB_TXP_DEFAULT_ENAB
9819 /* Trun ON the feature by default */
9820 atomic_set(&dhd->lb_txp_active, 1);
9821 #else
9822 /* Trun OFF the feature by default */
9823 atomic_set(&dhd->lb_txp_active, 0);
9824 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
9825 #endif /* DHD_LB_TXP */
9826
9827 DHD_LB_STATS_INIT(&dhd->pub);
9828
9829 /* Initialize the Load Balancing Tasklets and Napi object */
9830 #if defined(DHD_LB_TXC)
9831 tasklet_init(&dhd->tx_compl_tasklet,
9832 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
9833 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
9834 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
9835 #endif /* DHD_LB_TXC */
9836
9837 #if defined(DHD_LB_RXC)
9838 tasklet_init(&dhd->rx_compl_tasklet,
9839 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
9840 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
9841 #endif /* DHD_LB_RXC */
9842
9843 #if defined(DHD_LB_RXP)
9844 __skb_queue_head_init(&dhd->rx_pend_queue);
9845 skb_queue_head_init(&dhd->rx_napi_queue);
9846 /* Initialize the work that dispatches NAPI job to a given core */
9847 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
9848 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
9849 #endif /* DHD_LB_RXP */
9850
9851 #if defined(DHD_LB_TXP)
9852 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
9853 skb_queue_head_init(&dhd->tx_pend_queue);
9854 /* Initialize the work that dispatches TX job to a given core */
9855 tasklet_init(&dhd->tx_tasklet,
9856 dhd_lb_tx_handler, (ulong)(dhd));
9857 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
9858 #endif /* DHD_LB_TXP */
9859
9860 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
9861 #endif /* DHD_LB */
9862
9863 #ifdef SHOW_LOGTRACE
9864 INIT_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
9865 #endif /* SHOW_LOGTRACE */
9866
9867 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
9868
9869 #ifdef REPORT_FATAL_TIMEOUTS
9870 init_dhd_timeouts(&dhd->pub);
9871 #endif /* REPORT_FATAL_TIMEOUTS */
9872 #ifdef BCMPCIE
9873 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
9874 if (dhd->pub.extended_trap_data == NULL) {
9875 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
9876 }
9877 #endif /* BCMPCIE */
9878
9879 (void)dhd_sysfs_init(dhd);
9880
9881 dhd_state |= DHD_ATTACH_STATE_DONE;
9882 dhd->dhd_state = dhd_state;
9883
9884 dhd_found++;
9885
9886 return &dhd->pub;
9887
9888 fail:
9889 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
9890 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9891 __FUNCTION__, dhd_state, &dhd->pub));
9892 dhd->dhd_state = dhd_state;
9893 dhd_detach(&dhd->pub);
9894 dhd_free(&dhd->pub);
9895 }
9896 dhd_null_flag:
9897 return NULL;
9898 }
9899
9900 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9901 {
9902 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9903 return DHD_FLAG_HOSTAP_MODE;
9904 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9905 return DHD_FLAG_P2P_MODE;
9906 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9907 return DHD_FLAG_IBSS_MODE;
9908 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9909 return DHD_FLAG_MFG_MODE;
9910
9911 return DHD_FLAG_STA_MODE;
9912 }
9913
9914 int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9915 {
9916 return dhd_get_fw_mode(dhdp->info);
9917 }
9918
9919 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9920 {
9921 int fw_len;
9922 int nv_len;
9923 int clm_len;
9924 int conf_len;
9925 const char *fw = NULL;
9926 const char *nv = NULL;
9927 const char *clm = NULL;
9928 const char *conf = NULL;
9929 #ifdef DHD_UCODE_DOWNLOAD
9930 int uc_len;
9931 const char *uc = NULL;
9932 #endif /* DHD_UCODE_DOWNLOAD */
9933 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9934 int fw_path_len = sizeof(dhdinfo->fw_path);
9935 int nv_path_len = sizeof(dhdinfo->nv_path);
9936
9937
9938 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9939 * The path from adapter info is used for initialization only (as it won't change).
9940 *
9941 * The firmware_path/nvram_path module parameter may be changed by the system at run
9942 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9943 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9944 * module parameter after it is copied. We won't update the path until the module parameter
9945 * is changed again (first character is not '\0')
9946 */
9947
9948 /* set default firmware and nvram path for built-in type driver */
9949 // if (!dhd_download_fw_on_driverload) {
9950 #ifdef CONFIG_BCMDHD_FW_PATH
9951 fw = CONFIG_BCMDHD_FW_PATH;
9952 #endif /* CONFIG_BCMDHD_FW_PATH */
9953 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9954 nv = CONFIG_BCMDHD_NVRAM_PATH;
9955 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9956 // }
9957
9958 /* check if we need to initialize the path */
9959 if (dhdinfo->fw_path[0] == '\0') {
9960 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9961 fw = adapter->fw_path;
9962
9963 }
9964 if (dhdinfo->nv_path[0] == '\0') {
9965 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9966 nv = adapter->nv_path;
9967 }
9968 if (dhdinfo->clm_path[0] == '\0') {
9969 if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
9970 clm = adapter->clm_path;
9971 }
9972 if (dhdinfo->conf_path[0] == '\0') {
9973 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
9974 conf = adapter->conf_path;
9975 }
9976
9977 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9978 *
9979 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9980 */
9981 if (firmware_path[0] != '\0')
9982 fw = firmware_path;
9983 if (nvram_path[0] != '\0')
9984 nv = nvram_path;
9985 if (clm_path[0] != '\0')
9986 clm = clm_path;
9987 if (config_path[0] != '\0')
9988 conf = config_path;
9989 #ifdef DHD_UCODE_DOWNLOAD
9990 if (ucode_path[0] != '\0')
9991 uc = ucode_path;
9992 #endif /* DHD_UCODE_DOWNLOAD */
9993
9994 if (fw && fw[0] != '\0') {
9995 fw_len = strlen(fw);
9996 if (fw_len >= fw_path_len) {
9997 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
9998 return FALSE;
9999 }
10000 strncpy(dhdinfo->fw_path, fw, fw_path_len);
10001 if (dhdinfo->fw_path[fw_len-1] == '\n')
10002 dhdinfo->fw_path[fw_len-1] = '\0';
10003 }
10004 if (nv && nv[0] != '\0') {
10005 nv_len = strlen(nv);
10006 if (nv_len >= nv_path_len) {
10007 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
10008 return FALSE;
10009 }
10010 memset(dhdinfo->nv_path, 0, nv_path_len);
10011 strncpy(dhdinfo->nv_path, nv, nv_path_len);
10012 #ifdef DHD_USE_SINGLE_NVRAM_FILE
10013 /* Remove "_net" or "_mfg" tag from current nvram path */
10014 {
10015 char *nvram_tag = "nvram_";
10016 char *ext_tag = ".txt";
10017 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
10018 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
10019 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
10020 if (valid_buf) {
10021 char *sp = sp_nvram + strlen(nvram_tag) - 1;
10022 uint32 padding_size = (uint32)(dhdinfo->nv_path +
10023 nv_path_len - sp);
10024 memset(sp, 0, padding_size);
10025 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
10026 nv_len = strlen(dhdinfo->nv_path);
10027 DHD_INFO(("%s: new nvram path = %s\n",
10028 __FUNCTION__, dhdinfo->nv_path));
10029 } else if (sp_nvram) {
10030 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
10031 __FUNCTION__));
10032 return FALSE;
10033 } else {
10034 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
10035 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
10036 }
10037 }
10038 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
10039 if (dhdinfo->nv_path[nv_len-1] == '\n')
10040 dhdinfo->nv_path[nv_len-1] = '\0';
10041 }
10042 if (clm && clm[0] != '\0') {
10043 clm_len = strlen(clm);
10044 if (clm_len >= sizeof(dhdinfo->clm_path)) {
10045 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
10046 return FALSE;
10047 }
10048 strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
10049 if (dhdinfo->clm_path[clm_len-1] == '\n')
10050 dhdinfo->clm_path[clm_len-1] = '\0';
10051 }
10052 if (conf && conf[0] != '\0') {
10053 conf_len = strlen(conf);
10054 if (conf_len >= sizeof(dhdinfo->conf_path)) {
10055 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
10056 return FALSE;
10057 }
10058 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
10059 if (dhdinfo->conf_path[conf_len-1] == '\n')
10060 dhdinfo->conf_path[conf_len-1] = '\0';
10061 }
10062 #ifdef DHD_UCODE_DOWNLOAD
10063 if (uc && uc[0] != '\0') {
10064 uc_len = strlen(uc);
10065 if (uc_len >= sizeof(dhdinfo->uc_path)) {
10066 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
10067 return FALSE;
10068 }
10069 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
10070 if (dhdinfo->uc_path[uc_len-1] == '\n')
10071 dhdinfo->uc_path[uc_len-1] = '\0';
10072 }
10073 #endif /* DHD_UCODE_DOWNLOAD */
10074
10075 #if 0
10076 /* clear the path in module parameter */
10077 if (dhd_download_fw_on_driverload) {
10078 firmware_path[0] = '\0';
10079 nvram_path[0] = '\0';
10080 clm_path[0] = '\0';
10081 config_path[0] = '\0';
10082 }
10083 #endif
10084 #ifdef DHD_UCODE_DOWNLOAD
10085 ucode_path[0] = '\0';
10086 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
10087 #endif /* DHD_UCODE_DOWNLOAD */
10088
10089 #ifndef BCMEMBEDIMAGE
10090 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
10091 if (dhdinfo->fw_path[0] == '\0') {
10092 DHD_ERROR(("firmware path not found\n"));
10093 return FALSE;
10094 }
10095 if (dhdinfo->nv_path[0] == '\0') {
10096 DHD_ERROR(("nvram path not found\n"));
10097 return FALSE;
10098 }
10099 #endif /* BCMEMBEDIMAGE */
10100
10101 return TRUE;
10102 }
10103
10104 #if defined(BT_OVER_SDIO)
10105 extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
10106 {
10107 int fw_len;
10108 const char *fw = NULL;
10109 wifi_adapter_info_t *adapter = dhdinfo->adapter;
10110
10111
10112 /* Update bt firmware path. The path may be from adapter info or module parameter
10113 * The path from adapter info is used for initialization only (as it won't change).
10114 *
10115 * The btfw_path module parameter may be changed by the system at run
10116 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
10117 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
10118 * module parameter after it is copied. We won't update the path until the module parameter
10119 * is changed again (first character is not '\0')
10120 */
10121
10122 /* set default firmware and nvram path for built-in type driver */
10123 if (!dhd_download_fw_on_driverload) {
10124 #ifdef CONFIG_BCMDHD_BTFW_PATH
10125 fw = CONFIG_BCMDHD_BTFW_PATH;
10126 #endif /* CONFIG_BCMDHD_FW_PATH */
10127 }
10128
10129 /* check if we need to initialize the path */
10130 if (dhdinfo->btfw_path[0] == '\0') {
10131 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
10132 fw = adapter->btfw_path;
10133 }
10134
10135 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
10136 */
10137 if (btfw_path[0] != '\0')
10138 fw = btfw_path;
10139
10140 if (fw && fw[0] != '\0') {
10141 fw_len = strlen(fw);
10142 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
10143 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
10144 return FALSE;
10145 }
10146 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
10147 if (dhdinfo->btfw_path[fw_len-1] == '\n')
10148 dhdinfo->btfw_path[fw_len-1] = '\0';
10149 }
10150
10151 /* clear the path in module parameter */
10152 btfw_path[0] = '\0';
10153
10154 if (dhdinfo->btfw_path[0] == '\0') {
10155 DHD_ERROR(("bt firmware path not found\n"));
10156 return FALSE;
10157 }
10158
10159 return TRUE;
10160 }
10161 #endif /* defined (BT_OVER_SDIO) */
10162
10163
10164 #ifdef CUSTOMER_HW4_DEBUG
10165 bool dhd_validate_chipid(dhd_pub_t *dhdp)
10166 {
10167 uint chipid = dhd_bus_chip_id(dhdp);
10168 uint config_chipid;
10169
10170 #ifdef BCM4361_CHIP
10171 config_chipid = BCM4361_CHIP_ID;
10172 #elif defined(BCM4359_CHIP)
10173 config_chipid = BCM4359_CHIP_ID;
10174 #elif defined(BCM4358_CHIP)
10175 config_chipid = BCM4358_CHIP_ID;
10176 #elif defined(BCM4354_CHIP)
10177 config_chipid = BCM4354_CHIP_ID;
10178 #elif defined(BCM4339_CHIP)
10179 config_chipid = BCM4339_CHIP_ID;
10180 #elif defined(BCM43349_CHIP)
10181 config_chipid = BCM43349_CHIP_ID;
10182 #elif defined(BCM4335_CHIP)
10183 config_chipid = BCM4335_CHIP_ID;
10184 #elif defined(BCM43241_CHIP)
10185 config_chipid = BCM4324_CHIP_ID;
10186 #elif defined(BCM4330_CHIP)
10187 config_chipid = BCM4330_CHIP_ID;
10188 #elif defined(BCM43430_CHIP)
10189 config_chipid = BCM43430_CHIP_ID;
10190 #elif defined(BCM43018_CHIP)
10191 config_chipid = BCM43018_CHIP_ID;
10192 #elif defined(BCM43455_CHIP)
10193 config_chipid = BCM4345_CHIP_ID;
10194 #elif defined(BCM4334W_CHIP)
10195 config_chipid = BCM43342_CHIP_ID;
10196 #elif defined(BCM43454_CHIP)
10197 config_chipid = BCM43454_CHIP_ID;
10198 #elif defined(BCM43012_CHIP_)
10199 config_chipid = BCM43012_CHIP_ID;
10200 #else
10201 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
10202 " please add CONFIG_BCMXXXX into the Kernel and"
10203 " BCMXXXX_CHIP definition into the DHD driver\n",
10204 __FUNCTION__));
10205 config_chipid = 0;
10206
10207 return FALSE;
10208 #endif /* BCM4354_CHIP */
10209
10210 #ifdef SUPPORT_MULTIPLE_CHIP_4345X
10211 if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) {
10212 return TRUE;
10213 }
10214 #endif /* SUPPORT_MULTIPLE_CHIP_4345X */
10215 #if defined(BCM4359_CHIP)
10216 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
10217 return TRUE;
10218 }
10219 #endif /* BCM4359_CHIP */
10220 #if defined(BCM4361_CHIP)
10221 if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
10222 return TRUE;
10223 }
10224 #endif /* BCM4361_CHIP */
10225
10226 return config_chipid == chipid;
10227 }
10228 #endif /* CUSTOMER_HW4_DEBUG */
10229
10230 #if defined(BT_OVER_SDIO)
10231 wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
10232 {
10233 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
10234 /* assuming that dhd_pub_t type pointer is available from a global variable */
10235 return (wlan_bt_handle_t) g_dhd_pub;
10236 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
10237
10238 int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
10239 {
10240 int ret = -1;
10241 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
10242 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10243
10244
10245 /* Download BT firmware image to the dongle */
10246 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
10247 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
10248 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
10249 if (ret < 0) {
10250 DHD_ERROR(("%s: failed to download btfw from: %s\n",
10251 __FUNCTION__, dhd->btfw_path));
10252 return ret;
10253 }
10254 }
10255 return ret;
10256 } EXPORT_SYMBOL(dhd_download_btfw);
10257 #endif /* defined (BT_OVER_SDIO) */
10258
10259 #ifndef BCMDBUS
10260 int
10261 dhd_bus_start(dhd_pub_t *dhdp)
10262 {
10263 int ret = -1;
10264 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
10265 unsigned long flags;
10266
10267 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10268 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
10269 #endif /* DHD_DEBUG && BCMSDIO */
10270 ASSERT(dhd);
10271
10272 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
10273
10274 DHD_PERIM_LOCK(dhdp);
10275 #ifdef HOFFLOAD_MODULES
10276 dhd_linux_get_modfw_address(dhdp);
10277 #endif
10278 /* try to download image and nvram to the dongle */
10279 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
10280 /* Indicate FW Download has not yet done */
10281 dhd->pub.fw_download_done = FALSE;
10282 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
10283 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
10284 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10285 fw_download_start = OSL_SYSUPTIME();
10286 #endif /* DHD_DEBUG && BCMSDIO */
10287 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
10288 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
10289 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10290 fw_download_end = OSL_SYSUPTIME();
10291 #endif /* DHD_DEBUG && BCMSDIO */
10292 if (ret < 0) {
10293 DHD_ERROR(("%s: failed to download firmware %s\n",
10294 __FUNCTION__, dhd->fw_path));
10295 DHD_PERIM_UNLOCK(dhdp);
10296 return ret;
10297 }
10298 /* Indicate FW Download has succeeded */
10299 dhd->pub.fw_download_done = TRUE;
10300 }
10301 if (dhd->pub.busstate != DHD_BUS_LOAD) {
10302 DHD_PERIM_UNLOCK(dhdp);
10303 return -ENETDOWN;
10304 }
10305
10306 #ifdef BCMSDIO
10307 dhd_os_sdlock(dhdp);
10308 #endif /* BCMSDIO */
10309
10310 /* Start the watchdog timer */
10311 dhd->pub.tickcnt = 0;
10312 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
10313
10314 /* Bring up the bus */
10315 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
10316
10317 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
10318 #ifdef BCMSDIO
10319 dhd_os_sdunlock(dhdp);
10320 #endif /* BCMSDIO */
10321 DHD_PERIM_UNLOCK(dhdp);
10322 return ret;
10323 }
10324
10325 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
10326
10327 #ifdef DHD_ULP
10328 dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
10329 #endif /* DHD_ULP */
10330 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
10331 /* Host registration for OOB interrupt */
10332 if (dhd_bus_oob_intr_register(dhdp)) {
10333 /* deactivate timer and wait for the handler to finish */
10334 #if !defined(BCMPCIE_OOB_HOST_WAKE)
10335 DHD_GENERAL_LOCK(&dhd->pub, flags);
10336 dhd->wd_timer_valid = FALSE;
10337 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10338 del_timer_sync(&dhd->timer);
10339
10340 #endif /* !BCMPCIE_OOB_HOST_WAKE */
10341 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10342 DHD_PERIM_UNLOCK(dhdp);
10343 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
10344 return -ENODEV;
10345 }
10346
10347 #if defined(BCMPCIE_OOB_HOST_WAKE)
10348 dhd_bus_oob_intr_set(dhdp, TRUE);
10349 #else
10350 /* Enable oob at firmware */
10351 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10352 #endif /* BCMPCIE_OOB_HOST_WAKE */
10353 #elif defined(FORCE_WOWLAN)
10354 /* Enable oob at firmware */
10355 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
10356 #endif
10357 #ifdef PCIE_FULL_DONGLE
10358 {
10359 /* max_h2d_rings includes H2D common rings */
10360 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
10361
10362 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
10363 max_h2d_rings));
10364 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
10365 #ifdef BCMSDIO
10366 dhd_os_sdunlock(dhdp);
10367 #endif /* BCMSDIO */
10368 DHD_PERIM_UNLOCK(dhdp);
10369 return ret;
10370 }
10371 }
10372 #endif /* PCIE_FULL_DONGLE */
10373
10374 /* Do protocol initialization necessary for IOCTL/IOVAR */
10375 ret = dhd_prot_init(&dhd->pub);
10376 if (unlikely(ret) != BCME_OK) {
10377 DHD_PERIM_UNLOCK(dhdp);
10378 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10379 return ret;
10380 }
10381
10382 /* If bus is not ready, can't come up */
10383 if (dhd->pub.busstate != DHD_BUS_DATA) {
10384 DHD_GENERAL_LOCK(&dhd->pub, flags);
10385 dhd->wd_timer_valid = FALSE;
10386 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10387 del_timer_sync(&dhd->timer);
10388 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
10389 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
10390 #ifdef BCMSDIO
10391 dhd_os_sdunlock(dhdp);
10392 #endif /* BCMSDIO */
10393 DHD_PERIM_UNLOCK(dhdp);
10394 return -ENODEV;
10395 }
10396
10397 #ifdef BCMSDIO
10398 dhd_os_sdunlock(dhdp);
10399 #endif /* BCMSDIO */
10400
10401 /* Bus is ready, query any dongle information */
10402 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10403 f2_sync_start = OSL_SYSUPTIME();
10404 #endif /* DHD_DEBUG && BCMSDIO */
10405 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
10406 DHD_GENERAL_LOCK(&dhd->pub, flags);
10407 dhd->wd_timer_valid = FALSE;
10408 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
10409 del_timer_sync(&dhd->timer);
10410 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
10411 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
10412 DHD_PERIM_UNLOCK(dhdp);
10413 return ret;
10414 }
10415 #if defined(CONFIG_SOC_EXYNOS8895)
10416 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
10417 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
10418 #endif /* CONFIG_SOC_EXYNOS8895 */
10419
10420 #if defined(DHD_DEBUG) && defined(BCMSDIO)
10421 f2_sync_end = OSL_SYSUPTIME();
10422 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
10423 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
10424 #endif /* DHD_DEBUG && BCMSDIO */
10425
10426 #ifdef ARP_OFFLOAD_SUPPORT
10427 if (dhd->pend_ipaddr) {
10428 #ifdef AOE_IP_ALIAS_SUPPORT
10429 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
10430 #endif /* AOE_IP_ALIAS_SUPPORT */
10431 dhd->pend_ipaddr = 0;
10432 }
10433 #endif /* ARP_OFFLOAD_SUPPORT */
10434
10435 #if defined(TRAFFIC_MGMT_DWM)
10436 bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
10437 #endif
10438 DHD_PERIM_UNLOCK(dhdp);
10439 return 0;
10440 }
10441 #endif /* !BCMDBUS */
10442
10443 #ifdef WLTDLS
10444 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
10445 {
10446 uint32 tdls = tdls_on;
10447 int ret = 0;
10448 uint32 tdls_auto_op = 0;
10449 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
10450 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
10451 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
10452 BCM_REFERENCE(mac);
10453 if (!FW_SUPPORTED(dhd, tdls))
10454 return BCME_ERROR;
10455
10456 if (dhd->tdls_enable == tdls_on)
10457 goto auto_mode;
10458 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
10459 if (ret < 0) {
10460 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
10461 goto exit;
10462 }
10463 dhd->tdls_enable = tdls_on;
10464 auto_mode:
10465
10466 tdls_auto_op = auto_on;
10467 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
10468 0, TRUE);
10469 if (ret < 0) {
10470 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
10471 goto exit;
10472 }
10473
10474 if (tdls_auto_op) {
10475 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
10476 sizeof(tdls_idle_time), NULL, 0, TRUE);
10477 if (ret < 0) {
10478 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
10479 goto exit;
10480 }
10481 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
10482 sizeof(tdls_rssi_high), NULL, 0, TRUE);
10483 if (ret < 0) {
10484 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
10485 goto exit;
10486 }
10487 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
10488 sizeof(tdls_rssi_low), NULL, 0, TRUE);
10489 if (ret < 0) {
10490 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
10491 goto exit;
10492 }
10493 }
10494
10495 exit:
10496 return ret;
10497 }
10498
10499 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
10500 {
10501 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10502 int ret = 0;
10503 if (dhd)
10504 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
10505 else
10506 ret = BCME_ERROR;
10507 return ret;
10508 }
10509
10510 int
10511 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
10512 {
10513 int ret = 0;
10514 bool auto_on = false;
10515 uint32 mode = wfd_mode;
10516
10517 #ifdef ENABLE_TDLS_AUTO_MODE
10518 if (wfd_mode) {
10519 auto_on = false;
10520 } else {
10521 auto_on = true;
10522 }
10523 #else
10524 auto_on = false;
10525 #endif /* ENABLE_TDLS_AUTO_MODE */
10526 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
10527 if (ret < 0) {
10528 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
10529 return ret;
10530 }
10531
10532 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
10533 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
10534 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
10535 return ret;
10536 }
10537
10538 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
10539 if (ret < 0) {
10540 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
10541 return ret;
10542 }
10543
10544 dhd->tdls_mode = mode;
10545 return ret;
10546 }
10547 #ifdef PCIE_FULL_DONGLE
10548 int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
10549 {
10550 dhd_pub_t *dhd_pub = dhdp;
10551 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
10552 tdls_peer_node_t *new = NULL, *prev = NULL;
10553 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
10554 uint8 *da = (uint8 *)&event->addr.octet[0];
10555 bool connect = FALSE;
10556 uint32 reason = ntoh32(event->reason);
10557 unsigned long flags;
10558
10559 if (reason == WLC_E_TDLS_PEER_CONNECTED)
10560 connect = TRUE;
10561 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
10562 connect = FALSE;
10563 else
10564 {
10565 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
10566 return BCME_ERROR;
10567 }
10568 if (ifindex == DHD_BAD_IF)
10569 return BCME_ERROR;
10570
10571 if (connect) {
10572 while (cur != NULL) {
10573 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10574 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
10575 __FUNCTION__, __LINE__));
10576 return BCME_ERROR;
10577 }
10578 cur = cur->next;
10579 }
10580
10581 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
10582 if (new == NULL) {
10583 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
10584 return BCME_ERROR;
10585 }
10586 memcpy(new->addr, da, ETHER_ADDR_LEN);
10587 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10588 new->next = dhd_pub->peer_tbl.node;
10589 dhd_pub->peer_tbl.node = new;
10590 dhd_pub->peer_tbl.tdls_peer_count++;
10591 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10592
10593 } else {
10594 while (cur != NULL) {
10595 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
10596 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
10597 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
10598 if (prev)
10599 prev->next = cur->next;
10600 else
10601 dhd_pub->peer_tbl.node = cur->next;
10602 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
10603 dhd_pub->peer_tbl.tdls_peer_count--;
10604 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
10605 return BCME_OK;
10606 }
10607 prev = cur;
10608 cur = cur->next;
10609 }
10610 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
10611 }
10612 return BCME_OK;
10613 }
10614 #endif /* PCIE_FULL_DONGLE */
10615 #endif
10616
10617 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
10618 {
10619 if (!dhd)
10620 return FALSE;
10621
10622 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
10623 return TRUE;
10624 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
10625 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
10626 return TRUE;
10627 else
10628 return FALSE;
10629 }
10630 #if !defined(AP) && defined(WLP2P)
10631 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
10632 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
10633 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
10634 * would still be named as fw_bcmdhd_apsta.
10635 */
10636 uint32
10637 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
10638 {
10639 int32 ret = 0;
10640 char buf[WLC_IOCTL_SMLEN];
10641 bool mchan_supported = FALSE;
10642 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
10643 * test mode, that means we only will use the mode as it is
10644 */
10645 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
10646 return 0;
10647 if (FW_SUPPORTED(dhd, vsdb)) {
10648 mchan_supported = TRUE;
10649 }
10650 if (!FW_SUPPORTED(dhd, p2p)) {
10651 DHD_TRACE(("Chip does not support p2p\n"));
10652 return 0;
10653 } else {
10654 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
10655 memset(buf, 0, sizeof(buf));
10656 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
10657 sizeof(buf), FALSE);
10658 if (ret < 0) {
10659 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
10660 return 0;
10661 } else {
10662 if (buf[0] == 1) {
10663 /* By default, chip supports single chan concurrency,
10664 * now lets check for mchan
10665 */
10666 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
10667 if (mchan_supported)
10668 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
10669 if (FW_SUPPORTED(dhd, rsdb)) {
10670 ret |= DHD_FLAG_RSDB_MODE;
10671 }
10672 #ifdef WL_SUPPORT_MULTIP2P
10673 if (FW_SUPPORTED(dhd, mp2p)) {
10674 ret |= DHD_FLAG_MP2P_MODE;
10675 }
10676 #endif /* WL_SUPPORT_MULTIP2P */
10677 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
10678 return ret;
10679 #else
10680 return 0;
10681 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
10682 }
10683 }
10684 }
10685 return 0;
10686 }
10687 #endif
10688
10689 #ifdef SUPPORT_AP_POWERSAVE
10690 #define RXCHAIN_PWRSAVE_PPS 10
10691 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
10692 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
10693 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
10694 {
10695 int32 pps = RXCHAIN_PWRSAVE_PPS;
10696 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
10697 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
10698 int ret;
10699
10700 if (enable) {
10701 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
10702 NULL, 0, TRUE);
10703 if (ret != BCME_OK) {
10704 DHD_ERROR(("Failed to enable AP power save\n"));
10705 }
10706 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0,
10707 TRUE);
10708 if (ret != BCME_OK) {
10709 DHD_ERROR(("Failed to set pps\n"));
10710 }
10711 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time,
10712 sizeof(quiet_time), NULL, 0, TRUE);
10713 if (ret != BCME_OK) {
10714 DHD_ERROR(("Failed to set quiet time\n"));
10715 }
10716 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check",
10717 (char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE);
10718 if (ret != BCME_OK) {
10719 DHD_ERROR(("Failed to set stas assoc check\n"));
10720 }
10721 } else {
10722 ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
10723 NULL, 0, TRUE);
10724 if (ret != BCME_OK) {
10725 DHD_ERROR(("Failed to disable AP power save\n"));
10726 }
10727 }
10728
10729 return 0;
10730 }
10731 #endif /* SUPPORT_AP_POWERSAVE */
10732
10733
10734
10735
10736 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
10737 int
10738 dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
10739 {
10740 int i;
10741 int len;
10742 int ret = BCME_OK;
10743
10744 bcm_iov_buf_t *iov_buf = NULL;
10745 wl_adps_params_v1_t *data = NULL;
10746 char buf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10747
10748 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
10749 iov_buf = kmalloc(len, GFP_KERNEL);
10750 if (iov_buf == NULL) {
10751 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
10752 ret = BCME_NOMEM;
10753 goto exit;
10754 }
10755
10756 iov_buf->version = WL_ADPS_IOV_VER;
10757 iov_buf->len = sizeof(*data);
10758 iov_buf->id = WL_ADPS_IOV_MODE;
10759
10760 data = (wl_adps_params_v1_t *)iov_buf->data;
10761 data->version = ADPS_SUB_IOV_VERSION_1;
10762 data->length = sizeof(*data);
10763 data->mode = on;
10764
10765 for (i = 1; i <= MAX_BANDS; i++) {
10766 data->band = i;
10767 bcm_mkiovar("adps", (char *)iov_buf, len, buf, sizeof(buf));
10768 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0)) < 0) {
10769 if (ret == BCME_UNSUPPORTED) {
10770 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
10771 ret = BCME_OK;
10772 goto exit;
10773 }
10774 else {
10775 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10776 __FUNCTION__, on ? "On" : "Off", i, ret));
10777 goto exit;
10778 }
10779 }
10780 }
10781
10782 exit:
10783 if (iov_buf) {
10784 kfree(iov_buf);
10785 }
10786 return ret;
10787 }
10788 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10789
10790 int
10791 dhd_preinit_ioctls(dhd_pub_t *dhd)
10792 {
10793 int ret = 0;
10794 char eventmask[WL_EVENTING_MASK_LEN];
10795 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10796 uint32 buf_key_b4_m4 = 1;
10797 uint8 msglen;
10798 eventmsgs_ext_t *eventmask_msg = NULL;
10799 char* iov_buf = NULL;
10800 int ret2 = 0;
10801 uint32 wnm_cap = 0;
10802 #if defined(CUSTOM_AMPDU_BA_WSIZE)
10803 uint32 ampdu_ba_wsize = 0;
10804 #endif
10805 #if defined(CUSTOM_AMPDU_MPDU)
10806 int32 ampdu_mpdu = 0;
10807 #endif
10808 #if defined(CUSTOM_AMPDU_RELEASE)
10809 int32 ampdu_release = 0;
10810 #endif
10811 #if defined(CUSTOM_AMSDU_AGGSF)
10812 int32 amsdu_aggsf = 0;
10813 #endif
10814 shub_control_t shub_ctl;
10815
10816 #if defined(BCMSDIO) || defined(BCMDBUS)
10817 #ifdef PROP_TXSTATUS
10818 int wlfc_enable = TRUE;
10819 #ifndef DISABLE_11N
10820 uint32 hostreorder = 1;
10821 uint wl_down = 1;
10822 #endif /* DISABLE_11N */
10823 #endif /* PROP_TXSTATUS */
10824 #endif /* BCMSDIO || BCMDBUS */
10825 #ifndef PCIE_FULL_DONGLE
10826 uint32 wl_ap_isolate;
10827 #endif /* PCIE_FULL_DONGLE */
10828 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
10829 uint wnm_bsstrans_resp = 0;
10830 #ifdef SUPPORT_SET_CAC
10831 uint32 cac = 1;
10832 #endif /* SUPPORT_SET_CAC */
10833 #ifdef DHD_ENABLE_LPC
10834 uint32 lpc = 1;
10835 #endif /* DHD_ENABLE_LPC */
10836 uint power_mode = PM_FAST;
10837 #if defined(BCMSDIO)
10838 uint32 dongle_align = DHD_SDALIGN;
10839 uint32 glom = CUSTOM_GLOM_SETTING;
10840 #endif /* defined(BCMSDIO) */
10841 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
10842 uint32 credall = 1;
10843 #endif
10844 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
10845 uint scancache_enab = TRUE;
10846 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10847 uint32 bcn_li_bcn = 1;
10848 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10849 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
10850 #if defined(ARP_OFFLOAD_SUPPORT)
10851 int arpoe = 1;
10852 #endif
10853 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
10854 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
10855 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
10856 char buf[WLC_IOCTL_SMLEN];
10857 char *ptr;
10858 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
10859 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10860 wl_el_tag_params_t *el_tag = NULL;
10861 #endif /* DHD_8021X_DUMP */
10862 #ifdef ROAM_ENABLE
10863 uint roamvar = 0;
10864 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
10865 int roam_scan_period[2] = {10, WLC_BAND_ALL};
10866 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
10867 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10868 int roam_fullscan_period = 60;
10869 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10870 int roam_fullscan_period = 120;
10871 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10872 #ifdef DISABLE_BCNLOSS_ROAM
10873 uint roam_bcnloss_off = 1;
10874 #endif /* DISABLE_BCNLOSS_ROAM */
10875 #else
10876 #ifdef DISABLE_BUILTIN_ROAM
10877 uint roamvar = 1;
10878 #endif /* DISABLE_BUILTIN_ROAM */
10879 #endif /* ROAM_ENABLE */
10880
10881 #if defined(SOFTAP)
10882 uint dtim = 1;
10883 #endif
10884 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10885 struct ether_addr p2p_ea;
10886 #endif
10887 #ifdef SOFTAP_UAPSD_OFF
10888 uint32 wme_apsd = 0;
10889 #endif /* SOFTAP_UAPSD_OFF */
10890 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10891 uint32 apsta = 1; /* Enable APSTA mode */
10892 #elif defined(SOFTAP_AND_GC)
10893 uint32 apsta = 0;
10894 int ap_mode = 1;
10895 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10896 #ifdef GET_CUSTOM_MAC_ENABLE
10897 struct ether_addr ea_addr;
10898 char hw_ether[62];
10899 #endif /* GET_CUSTOM_MAC_ENABLE */
10900
10901 #ifdef DISABLE_11N
10902 uint32 nmode = 0;
10903 #endif /* DISABLE_11N */
10904
10905 #ifdef USE_WL_TXBF
10906 uint32 txbf = 1;
10907 #endif /* USE_WL_TXBF */
10908 #ifdef DISABLE_TXBFR
10909 uint32 txbf_bfr_cap = 0;
10910 #endif /* DISABLE_TXBFR */
10911 #if defined(PROP_TXSTATUS)
10912 #ifdef USE_WFA_CERT_CONF
10913 uint32 proptx = 0;
10914 #endif /* USE_WFA_CERT_CONF */
10915 #endif /* PROP_TXSTATUS */
10916 #if defined(SUPPORT_5G_1024QAM_VHT)
10917 uint32 vht_features = 0; /* init to 0, will be set based on each support */
10918 #endif
10919 #ifdef DISABLE_11N_PROPRIETARY_RATES
10920 uint32 ht_features = 0;
10921 #endif /* DISABLE_11N_PROPRIETARY_RATES */
10922 #ifdef CUSTOM_PSPRETEND_THR
10923 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
10924 #endif
10925 #ifdef CUSTOM_EVENT_PM_WAKE
10926 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
10927 #endif /* CUSTOM_EVENT_PM_WAKE */
10928 uint32 rsdb_mode = 0;
10929 #ifdef ENABLE_TEMP_THROTTLING
10930 wl_temp_control_t temp_control;
10931 #endif /* ENABLE_TEMP_THROTTLING */
10932 #ifdef DISABLE_PRUNED_SCAN
10933 uint32 scan_features = 0;
10934 #endif /* DISABLE_PRUNED_SCAN */
10935 #ifdef PKT_FILTER_SUPPORT
10936 dhd_pkt_filter_enable = TRUE;
10937 #ifdef APF
10938 dhd->apf_set = FALSE;
10939 #endif /* APF */
10940 #endif /* PKT_FILTER_SUPPORT */
10941 #ifdef WLTDLS
10942 dhd->tdls_enable = FALSE;
10943 dhd_tdls_set_mode(dhd, false);
10944 #endif /* WLTDLS */
10945 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
10946 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
10947 dhd->max_dtim_enable = TRUE;
10948 #else
10949 dhd->max_dtim_enable = FALSE;
10950 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
10951 #ifdef CUSTOM_SET_OCLOFF
10952 dhd->ocl_off = FALSE;
10953 #endif /* CUSTOM_SET_OCLOFF */
10954 DHD_TRACE(("Enter %s\n", __FUNCTION__));
10955
10956 #ifdef DHDTCPACK_SUPPRESS
10957 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
10958 #endif
10959 dhd->op_mode = 0;
10960
10961 #if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
10962 /* clear AP flags */
10963 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
10964 #endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
10965
10966 #ifdef CUSTOMER_HW4_DEBUG
10967 if (!dhd_validate_chipid(dhd)) {
10968 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
10969 __FUNCTION__, dhd_bus_chip_id(dhd)));
10970 #ifndef SUPPORT_MULTIPLE_CHIPS
10971 ret = BCME_BADARG;
10972 goto done;
10973 #endif /* !SUPPORT_MULTIPLE_CHIPS */
10974 }
10975 #endif /* CUSTOMER_HW4_DEBUG */
10976 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10977 (op_mode == DHD_FLAG_MFG_MODE)) {
10978 dhd->op_mode = DHD_FLAG_MFG_MODE;
10979 #ifdef DHD_PCIE_RUNTIMEPM
10980 /* Disable RuntimePM in mfg mode */
10981 DHD_DISABLE_RUNTIME_PM(dhd);
10982 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
10983 #endif /* DHD_PCIE_RUNTIME_PM */
10984 /* Check and adjust IOCTL response timeout for Manufactring firmware */
10985 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
10986 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
10987 __FUNCTION__));
10988 } else {
10989 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
10990 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
10991 }
10992 #ifdef GET_CUSTOM_MAC_ENABLE
10993 ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether);
10994 if (!ret) {
10995 memset(buf, 0, sizeof(buf));
10996 bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
10997 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
10998 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
10999 if (ret < 0) {
11000 memset(buf, 0, sizeof(buf));
11001 bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
11002 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
11003 if (ret) {
11004 int i;
11005 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
11006 __FUNCTION__, MAC2STRDBG(hw_ether), ret));
11007 for (i=0; i<sizeof(hw_ether)-ETHER_ADDR_LEN; i++) {
11008 printf("0x%02x,", hw_ether[i+ETHER_ADDR_LEN]);
11009 if ((i+1)%8 == 0)
11010 printf("\n");
11011 }
11012 ret = BCME_NOTUP;
11013 goto done;
11014 }
11015 }
11016 } else {
11017 DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
11018 ret = BCME_NOTUP;
11019 goto done;
11020 }
11021 #endif /* GET_CUSTOM_MAC_ENABLE */
11022 /* Get the default device MAC address directly from firmware */
11023 memset(buf, 0, sizeof(buf));
11024 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
11025 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
11026 FALSE, 0)) < 0) {
11027 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
11028 ret = BCME_NOTUP;
11029 goto done;
11030 }
11031 /* Update public MAC address after reading from Firmware */
11032 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
11033
11034 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
11035 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
11036 goto done;
11037 }
11038
11039 /* get a capabilities from firmware */
11040 {
11041 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
11042 memset(dhd->fw_capabilities, 0, cap_buf_size);
11043 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
11044 FALSE);
11045 if (ret < 0) {
11046 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
11047 __FUNCTION__, ret));
11048 return 0;
11049 }
11050
11051 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
11052 dhd->fw_capabilities[0] = ' ';
11053 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
11054 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
11055 }
11056
11057 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
11058 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
11059 #ifdef SET_RANDOM_MAC_SOFTAP
11060 uint rand_mac;
11061 #endif /* SET_RANDOM_MAC_SOFTAP */
11062 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
11063 #if defined(ARP_OFFLOAD_SUPPORT)
11064 arpoe = 0;
11065 #endif
11066 #ifdef PKT_FILTER_SUPPORT
11067 dhd_pkt_filter_enable = FALSE;
11068 #endif
11069 #ifdef SET_RANDOM_MAC_SOFTAP
11070 SRANDOM32((uint)jiffies);
11071 rand_mac = RANDOM32();
11072 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
11073 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
11074 iovbuf[2] = (unsigned char)vendor_oui;
11075 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
11076 iovbuf[4] = (unsigned char)(rand_mac >> 8);
11077 iovbuf[5] = (unsigned char)(rand_mac >> 16);
11078
11079 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
11080 TRUE);
11081 if (ret < 0) {
11082 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
11083 } else
11084 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
11085 #endif /* SET_RANDOM_MAC_SOFTAP */
11086 #ifdef USE_DYNAMIC_F2_BLKSIZE
11087 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11088 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11089 #ifdef SUPPORT_AP_POWERSAVE
11090 dhd_set_ap_powersave(dhd, 0, TRUE);
11091 #endif /* SUPPORT_AP_POWERSAVE */
11092 #ifdef SOFTAP_UAPSD_OFF
11093 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
11094 TRUE);
11095 if (ret < 0) {
11096 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
11097 __FUNCTION__, ret));
11098 }
11099 #endif /* SOFTAP_UAPSD_OFF */
11100 #if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
11101 /* set AP flag for specific country code of SOFTAP */
11102 dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
11103 #endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
11104 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
11105 (op_mode == DHD_FLAG_MFG_MODE)) {
11106 #if defined(ARP_OFFLOAD_SUPPORT)
11107 arpoe = 0;
11108 #endif /* ARP_OFFLOAD_SUPPORT */
11109 #ifdef PKT_FILTER_SUPPORT
11110 dhd_pkt_filter_enable = FALSE;
11111 #endif /* PKT_FILTER_SUPPORT */
11112 dhd->op_mode = DHD_FLAG_MFG_MODE;
11113 #ifdef USE_DYNAMIC_F2_BLKSIZE
11114 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
11115 #endif /* USE_DYNAMIC_F2_BLKSIZE */
11116 if (FW_SUPPORTED(dhd, rsdb)) {
11117 rsdb_mode = 0;
11118 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
11119 NULL, 0, TRUE);
11120 if (ret < 0) {
11121 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
11122 __FUNCTION__, ret));
11123 }
11124 }
11125 } else {
11126 uint32 concurrent_mode = 0;
11127 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
11128 (op_mode == DHD_FLAG_P2P_MODE)) {
11129 #if defined(ARP_OFFLOAD_SUPPORT)
11130 arpoe = 0;
11131 #endif
11132 #ifdef PKT_FILTER_SUPPORT
11133 dhd_pkt_filter_enable = FALSE;
11134 #endif
11135 dhd->op_mode = DHD_FLAG_P2P_MODE;
11136 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
11137 (op_mode == DHD_FLAG_IBSS_MODE)) {
11138 dhd->op_mode = DHD_FLAG_IBSS_MODE;
11139 } else
11140 dhd->op_mode = DHD_FLAG_STA_MODE;
11141 #if !defined(AP) && defined(WLP2P)
11142 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
11143 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
11144 #if defined(ARP_OFFLOAD_SUPPORT)
11145 arpoe = 1;
11146 #endif
11147 dhd->op_mode |= concurrent_mode;
11148 }
11149
11150 /* Check if we are enabling p2p */
11151 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11152 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
11153 TRUE);
11154 if (ret < 0)
11155 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
11156
11157 #if defined(SOFTAP_AND_GC)
11158 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
11159 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
11160 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
11161 }
11162 #endif
11163 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
11164 ETHER_SET_LOCALADDR(&p2p_ea);
11165 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
11166 NULL, 0, TRUE);
11167 if (ret < 0)
11168 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
11169 else
11170 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
11171 }
11172 #else
11173 (void)concurrent_mode;
11174 #endif
11175 }
11176 #ifdef BCMSDIO
11177 if (dhd->conf->sd_f2_blocksize)
11178 dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
11179 #endif
11180
11181 #if defined(RSDB_MODE_FROM_FILE)
11182 (void)dhd_rsdb_mode_from_file(dhd);
11183 #endif
11184
11185 #ifdef DISABLE_PRUNED_SCAN
11186 if (FW_SUPPORTED(dhd, rsdb)) {
11187 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11188 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
11189 if (ret < 0) {
11190 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
11191 __FUNCTION__, ret));
11192 } else {
11193 memcpy(&scan_features, iovbuf, 4);
11194 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
11195 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
11196 sizeof(scan_features), NULL, 0, TRUE);
11197 if (ret < 0) {
11198 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
11199 __FUNCTION__, ret));
11200 }
11201 }
11202 }
11203 #endif /* DISABLE_PRUNED_SCAN */
11204
11205 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
11206 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
11207 #ifdef CUSTOMER_HW2
11208 #if defined(DHD_BLOB_EXISTENCE_CHECK)
11209 if (!dhd->pub.is_blob)
11210 #endif /* DHD_BLOB_EXISTENCE_CHECK */
11211 {
11212 /* get a ccode and revision for the country code */
11213 #if defined(CUSTOM_COUNTRY_CODE)
11214 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
11215 &dhd->dhd_cspec, dhd->dhd_cflags);
11216 #else
11217 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
11218 &dhd->dhd_cspec);
11219 #endif /* CUSTOM_COUNTRY_CODE */
11220 }
11221 #endif /* CUSTOMER_HW2 */
11222
11223 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
11224 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
11225 dhd->info->rxthread_enabled = FALSE;
11226 else
11227 dhd->info->rxthread_enabled = TRUE;
11228 #endif
11229 /* Set Country code */
11230 if (dhd->dhd_cspec.ccode[0] != 0) {
11231 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
11232 NULL, 0, TRUE);
11233 if (ret < 0)
11234 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
11235 }
11236
11237
11238 /* Set Listen Interval */
11239 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
11240 NULL, 0, TRUE);
11241 if (ret < 0)
11242 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
11243
11244 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
11245 #ifdef USE_WFA_CERT_CONF
11246 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
11247 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
11248 }
11249 #endif /* USE_WFA_CERT_CONF */
11250 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
11251 dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
11252 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
11253 #if defined(ROAM_ENABLE)
11254 #ifdef DISABLE_BCNLOSS_ROAM
11255 dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, sizeof(roam_bcnloss_off),
11256 NULL, 0, TRUE);
11257 #endif /* DISABLE_BCNLOSS_ROAM */
11258 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
11259 sizeof(roam_trigger), TRUE, 0)) < 0)
11260 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
11261 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
11262 sizeof(roam_scan_period), TRUE, 0)) < 0)
11263 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
11264 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
11265 sizeof(roam_delta), TRUE, 0)) < 0)
11266 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
11267 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
11268 sizeof(roam_fullscan_period), NULL, 0, TRUE);
11269 if (ret < 0)
11270 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
11271 #endif /* ROAM_ENABLE */
11272
11273 #ifdef CUSTOM_EVENT_PM_WAKE
11274 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
11275 sizeof(pm_awake_thresh), NULL, 0, TRUE);
11276 if (ret < 0) {
11277 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
11278 }
11279 #endif /* CUSTOM_EVENT_PM_WAKE */
11280 #ifdef WLTDLS
11281 #ifdef ENABLE_TDLS_AUTO_MODE
11282 /* by default TDLS on and auto mode on */
11283 _dhd_tdls_enable(dhd, true, true, NULL);
11284 #else
11285 /* by default TDLS on and auto mode off */
11286 _dhd_tdls_enable(dhd, true, false, NULL);
11287 #endif /* ENABLE_TDLS_AUTO_MODE */
11288 #endif /* WLTDLS */
11289
11290 #ifdef DHD_ENABLE_LPC
11291 /* Set lpc 1 */
11292 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11293 if (ret < 0) {
11294 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
11295
11296 if (ret == BCME_NOTDOWN) {
11297 uint wl_down = 1;
11298 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11299 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11300 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
11301
11302 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
11303 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
11304 }
11305 }
11306 #endif /* DHD_ENABLE_LPC */
11307
11308 #ifdef WLADPS
11309 #ifdef WLADPS_SEAK_AP_WAR
11310 dhd->disabled_adps = FALSE;
11311 #endif /* WLADPS_SEAK_AP_WAR */
11312 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
11313 #ifdef ADPS_MODE_FROM_FILE
11314 dhd_adps_mode_from_file(dhd);
11315 #else
11316 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
11317 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
11318 __FUNCTION__, ret));
11319 }
11320 #endif /* ADPS_MODE_FROM_FILE */
11321 }
11322 #endif /* WLADPS */
11323
11324 /* Set PowerSave mode */
11325 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
11326
11327 #if defined(BCMSDIO)
11328 /* Match Host and Dongle rx alignment */
11329 dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
11330 NULL, 0, TRUE);
11331
11332 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
11333 /* enable credall to reduce the chance of no bus credit happened. */
11334 dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
11335 #endif
11336
11337 #ifdef USE_WFA_CERT_CONF
11338 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
11339 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
11340 }
11341 #endif /* USE_WFA_CERT_CONF */
11342 if (glom != DEFAULT_GLOM_VALUE) {
11343 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
11344 dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
11345 }
11346 #endif /* defined(BCMSDIO) */
11347
11348 /* Setup timeout if Beacons are lost and roam is off to report link down */
11349 dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0, TRUE);
11350
11351 /* Setup assoc_retry_max count to reconnect target AP in dongle */
11352 dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0, TRUE);
11353
11354 #if defined(AP) && !defined(WLP2P)
11355 dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
11356
11357 #endif /* defined(AP) && !defined(WLP2P) */
11358
11359 #ifdef MIMO_ANT_SETTING
11360 dhd_sel_ant_from_file(dhd);
11361 #endif /* MIMO_ANT_SETTING */
11362
11363 #if defined(SOFTAP)
11364 if (ap_fw_loaded == TRUE) {
11365 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
11366 }
11367 #endif
11368
11369 #if defined(KEEP_ALIVE)
11370 {
11371 /* Set Keep Alive : be sure to use FW with -keepalive */
11372 int res;
11373
11374 #if defined(SOFTAP)
11375 if (ap_fw_loaded == FALSE)
11376 #endif
11377 if (!(dhd->op_mode &
11378 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
11379 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
11380 DHD_ERROR(("%s set keeplive failed %d\n",
11381 __FUNCTION__, res));
11382 }
11383 }
11384 #endif /* defined(KEEP_ALIVE) */
11385
11386 #ifdef USE_WL_TXBF
11387 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
11388 if (ret < 0)
11389 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
11390
11391 #endif /* USE_WL_TXBF */
11392
11393 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
11394 0, TRUE);
11395 if (ret < 0) {
11396 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
11397 }
11398
11399 #ifdef DISABLE_TXBFR
11400 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
11401 0, TRUE);
11402 if (ret < 0) {
11403 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
11404 }
11405 #endif /* DISABLE_TXBFR */
11406
11407 #ifdef USE_WFA_CERT_CONF
11408 #ifdef USE_WL_FRAMEBURST
11409 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
11410 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
11411 }
11412 #endif /* USE_WL_FRAMEBURST */
11413 #ifdef DISABLE_FRAMEBURST_VSDB
11414 g_frameburst = frameburst;
11415 #endif /* DISABLE_FRAMEBURST_VSDB */
11416 #endif /* USE_WFA_CERT_CONF */
11417 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
11418 /* Disable Framebursting for SofAP */
11419 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
11420 frameburst = 0;
11421 }
11422 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
11423 /* Set frameburst to value */
11424 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
11425 sizeof(frameburst), TRUE, 0)) < 0) {
11426 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
11427 }
11428
11429 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
11430 if (iov_buf == NULL) {
11431 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
11432 ret = BCME_NOMEM;
11433 goto done;
11434 }
11435
11436
11437 #if defined(CUSTOM_AMPDU_BA_WSIZE)
11438 /* Set ampdu ba wsize to 64 or 16 */
11439 #ifdef CUSTOM_AMPDU_BA_WSIZE
11440 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
11441 #endif
11442 if (ampdu_ba_wsize != 0) {
11443 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
11444 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
11445 if (ret < 0) {
11446 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
11447 __FUNCTION__, ampdu_ba_wsize, ret));
11448 }
11449 }
11450 #endif
11451
11452 #ifdef ENABLE_TEMP_THROTTLING
11453 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
11454 memset(&temp_control, 0, sizeof(temp_control));
11455 temp_control.enable = 1;
11456 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
11457 ret = dhd_iovar(dhd, 0, "temp_throttle_control", (char *)&temp_control,
11458 sizeof(temp_control), NULL, 0, TRUE);
11459 if (ret < 0) {
11460 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
11461 __FUNCTION__, ret));
11462 }
11463 }
11464 #endif /* ENABLE_TEMP_THROTTLING */
11465
11466 #if defined(CUSTOM_AMPDU_MPDU)
11467 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
11468 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
11469 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
11470 NULL, 0, TRUE);
11471 if (ret < 0) {
11472 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
11473 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
11474 }
11475 }
11476 #endif /* CUSTOM_AMPDU_MPDU */
11477
11478 #if defined(CUSTOM_AMPDU_RELEASE)
11479 ampdu_release = CUSTOM_AMPDU_RELEASE;
11480 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
11481 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
11482 sizeof(ampdu_release), NULL, 0, TRUE);
11483 if (ret < 0) {
11484 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
11485 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
11486 }
11487 }
11488 #endif /* CUSTOM_AMPDU_RELEASE */
11489
11490 #if defined(CUSTOM_AMSDU_AGGSF)
11491 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
11492 if (amsdu_aggsf != 0) {
11493 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
11494 NULL, 0, TRUE);
11495 if (ret < 0) {
11496 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
11497 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
11498 }
11499 }
11500 #endif /* CUSTOM_AMSDU_AGGSF */
11501
11502 #if defined(SUPPORT_5G_1024QAM_VHT)
11503 #ifdef SUPPORT_5G_1024QAM_VHT
11504 if (dhd_get_chipid(dhd) == BCM4361_CHIP_ID) {
11505 vht_features |= 0x6; /* 5G 1024 QAM support */
11506 }
11507 #endif /* SUPPORT_5G_1024QAM_VHT */
11508 if (vht_features) {
11509 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
11510 NULL, 0, TRUE);
11511 if (ret < 0) {
11512 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
11513
11514 if (ret == BCME_NOTDOWN) {
11515 uint wl_down = 1;
11516 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
11517 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11518 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
11519 " vht_features = 0x%x\n",
11520 __FUNCTION__, ret, vht_features));
11521
11522 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
11523 sizeof(vht_features), NULL, 0, TRUE);
11524 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
11525 }
11526 }
11527 }
11528 #endif
11529 #ifdef DISABLE_11N_PROPRIETARY_RATES
11530 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
11531 TRUE);
11532 if (ret < 0) {
11533 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
11534 }
11535 #endif /* DISABLE_11N_PROPRIETARY_RATES */
11536 #ifdef CUSTOM_PSPRETEND_THR
11537 /* Turn off MPC in AP mode */
11538 ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
11539 sizeof(pspretend_thr), NULL, 0, TRUE);
11540 if (ret < 0) {
11541 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
11542 __FUNCTION__, ret));
11543 }
11544 #endif
11545
11546 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
11547 NULL, 0, TRUE);
11548 if (ret < 0) {
11549 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
11550 }
11551 #ifdef SUPPORT_SET_CAC
11552 bcm_mkiovar("cac", (char *)&cac, sizeof(cac), iovbuf, sizeof(iovbuf));
11553 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
11554 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
11555 }
11556 #endif /* SUPPORT_SET_CAC */
11557 #ifdef DHD_ULP
11558 /* Get the required details from dongle during preinit ioctl */
11559 dhd_ulp_preinit(dhd);
11560 #endif /* DHD_ULP */
11561
11562 /* Read event_msgs mask */
11563 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
11564 sizeof(iovbuf), FALSE);
11565 if (ret < 0) {
11566 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
11567 goto done;
11568 }
11569 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
11570
11571 /* Setup event_msgs */
11572 setbit(eventmask, WLC_E_SET_SSID);
11573 setbit(eventmask, WLC_E_PRUNE);
11574 setbit(eventmask, WLC_E_AUTH);
11575 setbit(eventmask, WLC_E_AUTH_IND);
11576 setbit(eventmask, WLC_E_ASSOC);
11577 setbit(eventmask, WLC_E_REASSOC);
11578 setbit(eventmask, WLC_E_REASSOC_IND);
11579 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
11580 setbit(eventmask, WLC_E_DEAUTH);
11581 setbit(eventmask, WLC_E_DEAUTH_IND);
11582 setbit(eventmask, WLC_E_DISASSOC_IND);
11583 setbit(eventmask, WLC_E_DISASSOC);
11584 setbit(eventmask, WLC_E_JOIN);
11585 setbit(eventmask, WLC_E_BSSID);
11586 setbit(eventmask, WLC_E_START);
11587 setbit(eventmask, WLC_E_ASSOC_IND);
11588 setbit(eventmask, WLC_E_PSK_SUP);
11589 setbit(eventmask, WLC_E_LINK);
11590 setbit(eventmask, WLC_E_MIC_ERROR);
11591 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
11592 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
11593 #ifdef LIMIT_BORROW
11594 setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
11595 #endif
11596 #ifndef WL_CFG80211
11597 setbit(eventmask, WLC_E_PMKID_CACHE);
11598 setbit(eventmask, WLC_E_TXFAIL);
11599 #endif
11600 setbit(eventmask, WLC_E_JOIN_START);
11601 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
11602 #ifdef DHD_DEBUG
11603 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
11604 #endif
11605 #ifdef WLMEDIA_HTSF
11606 setbit(eventmask, WLC_E_HTSFSYNC);
11607 #endif /* WLMEDIA_HTSF */
11608 #ifdef PNO_SUPPORT
11609 setbit(eventmask, WLC_E_PFN_NET_FOUND);
11610 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
11611 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
11612 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
11613 #endif /* PNO_SUPPORT */
11614 /* enable dongle roaming event */
11615 setbit(eventmask, WLC_E_ROAM);
11616 #ifdef WLTDLS
11617 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
11618 #endif /* WLTDLS */
11619 #ifdef WL_ESCAN
11620 setbit(eventmask, WLC_E_ESCAN_RESULT);
11621 #endif /* WL_ESCAN */
11622 #ifdef RTT_SUPPORT
11623 setbit(eventmask, WLC_E_PROXD);
11624 #endif /* RTT_SUPPORT */
11625 #ifdef WL_CFG80211
11626 setbit(eventmask, WLC_E_ESCAN_RESULT);
11627 setbit(eventmask, WLC_E_AP_STARTED);
11628 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
11629 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11630 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
11631 }
11632 #endif /* WL_CFG80211 */
11633
11634 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11635 if (dhd_logtrace_from_file(dhd)) {
11636 setbit(eventmask, WLC_E_TRACE);
11637 } else {
11638 clrbit(eventmask, WLC_E_TRACE);
11639 }
11640 #elif defined(SHOW_LOGTRACE)
11641 setbit(eventmask, WLC_E_TRACE);
11642 #else
11643 clrbit(eventmask, WLC_E_TRACE);
11644 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11645
11646 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
11647 #ifdef DHD_WMF
11648 setbit(eventmask, WLC_E_PSTA_PRIMARY_INTF_IND);
11649 #endif
11650 #ifdef CUSTOM_EVENT_PM_WAKE
11651 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
11652 #endif /* CUSTOM_EVENT_PM_WAKE */
11653 #ifdef DHD_LOSSLESS_ROAMING
11654 setbit(eventmask, WLC_E_ROAM_PREP);
11655 #endif
11656 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11657 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11658 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11659
11660 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11661 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11662 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11663
11664 #ifdef SUSPEND_EVENT
11665 bcopy(eventmask, dhd->conf->resume_eventmask, WL_EVENTING_MASK_LEN);
11666 #endif
11667 /* Write updated Event mask */
11668 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
11669 if (ret < 0) {
11670 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
11671 goto done;
11672 }
11673
11674 /* make up event mask ext message iovar for event larger than 128 */
11675 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
11676 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
11677 if (eventmask_msg == NULL) {
11678 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
11679 ret = BCME_NOMEM;
11680 goto done;
11681 }
11682 bzero(eventmask_msg, msglen);
11683 eventmask_msg->ver = EVENTMSGS_VER;
11684 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11685
11686 /* Read event_msgs_ext mask */
11687 ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
11688 WLC_IOCTL_SMLEN, FALSE);
11689
11690 if (ret2 == 0) { /* event_msgs_ext must be supported */
11691 bcopy(iov_buf, eventmask_msg, msglen);
11692 #ifdef RSSI_MONITOR_SUPPORT
11693 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11694 #endif /* RSSI_MONITOR_SUPPORT */
11695 #ifdef GSCAN_SUPPORT
11696 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
11697 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
11698 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
11699 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
11700 #endif /* GSCAN_SUPPORT */
11701 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11702 #ifdef BT_WIFI_HANDOVER
11703 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
11704 #endif /* BT_WIFI_HANDOVER */
11705 #ifdef DBG_PKT_MON
11706 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
11707 #endif /* DBG_PKT_MON */
11708 #ifdef DHD_ULP
11709 setbit(eventmask_msg->mask, WLC_E_ULP);
11710 #endif
11711 #ifdef ENABLE_TEMP_THROTTLING
11712 setbit(eventmask_msg->mask, WLC_E_TEMP_THROTTLE);
11713 #endif /* ENABLE_TEMP_THROTTLING */
11714
11715 /* Write updated Event mask */
11716 eventmask_msg->ver = EVENTMSGS_VER;
11717 eventmask_msg->command = EVENTMSGS_SET_MASK;
11718 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11719 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
11720 TRUE);
11721 if (ret < 0) {
11722 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
11723 goto done;
11724 }
11725 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
11726 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11727 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11728 __FUNCTION__, ret2));
11729 } else {
11730 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
11731 ret = ret2;
11732 goto done;
11733 }
11734
11735 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11736 /* Enabling event log trace for EAP events */
11737 el_tag = (wl_el_tag_params_t *)kmalloc(sizeof(wl_el_tag_params_t), GFP_KERNEL);
11738 if (el_tag == NULL) {
11739 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11740 (int)sizeof(wl_el_tag_params_t)));
11741 ret = BCME_NOMEM;
11742 goto done;
11743 }
11744 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
11745 el_tag->set = 1;
11746 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
11747 bcm_mkiovar("event_log_tag_control", (char *)el_tag,
11748 sizeof(*el_tag), iovbuf, sizeof(iovbuf));
11749 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
11750 #endif /* DHD_8021X_DUMP */
11751
11752 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
11753 sizeof(scan_assoc_time), TRUE, 0);
11754 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
11755 sizeof(scan_unassoc_time), TRUE, 0);
11756 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
11757 sizeof(scan_passive_time), TRUE, 0);
11758
11759 #ifdef ARP_OFFLOAD_SUPPORT
11760 /* Set and enable ARP offload feature for STA only */
11761 #if defined(SOFTAP)
11762 if (arpoe && !ap_fw_loaded)
11763 #else
11764 if (arpoe)
11765 #endif
11766 {
11767 dhd_arp_offload_enable(dhd, TRUE);
11768 dhd_arp_offload_set(dhd, dhd_arp_mode);
11769 } else {
11770 dhd_arp_offload_enable(dhd, FALSE);
11771 dhd_arp_offload_set(dhd, 0);
11772 }
11773 dhd_arp_enable = arpoe;
11774 #endif /* ARP_OFFLOAD_SUPPORT */
11775
11776 #ifdef PKT_FILTER_SUPPORT
11777 /* Setup default defintions for pktfilter , enable in suspend */
11778 if (dhd_master_mode) {
11779 dhd->pktfilter_count = 6;
11780 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11781 if (!FW_SUPPORTED(dhd, pf6)) {
11782 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11783 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11784 } else {
11785 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11786 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11787 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11788 }
11789 /* apply APP pktfilter */
11790 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11791
11792 /* Setup filter to allow only unicast */
11793 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11794
11795 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11796 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11797
11798 dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM] = NULL;
11799 if (FW_SUPPORTED(dhd, pf6)) {
11800 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11801 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] =
11802 "107 1 6 IP4_H:16 0xf0 !0xe0 IP4_H:19 0xff 0xff";
11803 dhd->pktfilter_count = 8;
11804 }
11805
11806 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11807 dhd->pktfilter_count = 4;
11808 /* Setup filter to block broadcast and NAT Keepalive packets */
11809 /* discard all broadcast packets */
11810 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11811 /* discard NAT Keepalive packets */
11812 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11813 /* discard NAT Keepalive packets */
11814 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11815 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11816 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11817 } else
11818 dhd_conf_discard_pkt_filter(dhd);
11819 dhd_conf_add_pkt_filter(dhd);
11820
11821 #if defined(SOFTAP)
11822 if (ap_fw_loaded) {
11823 dhd_enable_packet_filter(0, dhd);
11824 }
11825 #endif /* defined(SOFTAP) */
11826 dhd_set_packet_filter(dhd);
11827 #endif /* PKT_FILTER_SUPPORT */
11828 #ifdef DISABLE_11N
11829 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
11830 if (ret < 0)
11831 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
11832 #endif /* DISABLE_11N */
11833
11834 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11835 dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE);
11836 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11837 /* query for 'clmver' to get clm version info from firmware */
11838 memset(buf, 0, sizeof(buf));
11839 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11840 if (ret < 0)
11841 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11842 else {
11843 char *clmver_temp_buf = NULL;
11844
11845 if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
11846 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11847 } else {
11848 ptr = (clmver_temp_buf + strlen("Data:"));
11849 if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
11850 DHD_ERROR(("Couldn't find New line character\n"));
11851 } else {
11852 memset(clm_version, 0, CLM_VER_STR_LEN);
11853 strncpy(clm_version, clmver_temp_buf,
11854 MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
11855 }
11856 }
11857 }
11858
11859 /* query for 'ver' to get version info from firmware */
11860 memset(buf, 0, sizeof(buf));
11861 ptr = buf;
11862 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
11863 if (ret < 0)
11864 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11865 else {
11866 bcmstrtok(&ptr, "\n", 0);
11867 strncpy(fw_version, buf, FW_VER_STR_LEN);
11868 fw_version[FW_VER_STR_LEN-1] = '\0';
11869 dhd_set_version_info(dhd, buf);
11870 #ifdef WRITE_WLANINFO
11871 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
11872 #endif /* WRITE_WLANINFO */
11873 }
11874 #ifdef GEN_SOFTAP_INFO_FILE
11875 sec_save_softap_info();
11876 #endif /* GEN_SOFTAP_INFO_FILE */
11877
11878 #if defined(BCMSDIO)
11879 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
11880 #endif /* defined(BCMSDIO) */
11881
11882 #if defined(BCMSDIO) || defined(BCMDBUS)
11883 #ifdef PROP_TXSTATUS
11884 if (disable_proptx ||
11885 #ifdef PROP_TXSTATUS_VSDB
11886 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
11887 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
11888 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
11889 #endif /* PROP_TXSTATUS_VSDB */
11890 FALSE) {
11891 wlfc_enable = FALSE;
11892 }
11893 ret = dhd_conf_get_disable_proptx(dhd);
11894 if (ret == 0){
11895 disable_proptx = 0;
11896 wlfc_enable = TRUE;
11897 } else if (ret >= 1) {
11898 disable_proptx = 1;
11899 wlfc_enable = FALSE;
11900 /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
11901 hostreorder = 0;
11902 }
11903
11904 #if defined(PROP_TXSTATUS)
11905 #ifdef USE_WFA_CERT_CONF
11906 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
11907 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
11908 wlfc_enable = proptx;
11909 }
11910 #endif /* USE_WFA_CERT_CONF */
11911 #endif /* PROP_TXSTATUS */
11912
11913 #ifndef DISABLE_11N
11914 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
11915 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
11916 NULL, 0, TRUE);
11917 if (ret2 < 0) {
11918 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
11919 if (ret2 != BCME_UNSUPPORTED)
11920 ret = ret2;
11921
11922 if (ret == BCME_NOTDOWN) {
11923 uint wl_down = 1;
11924 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
11925 sizeof(wl_down), TRUE, 0);
11926 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
11927 __FUNCTION__, ret2, hostreorder));
11928
11929 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
11930 sizeof(hostreorder), NULL, 0, TRUE);
11931 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
11932 if (ret2 != BCME_UNSUPPORTED)
11933 ret = ret2;
11934 }
11935 if (ret2 != BCME_OK)
11936 hostreorder = 0;
11937 }
11938 #endif /* DISABLE_11N */
11939
11940
11941 if (wlfc_enable) {
11942 dhd_wlfc_init(dhd);
11943 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
11944 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
11945 }
11946 #ifndef DISABLE_11N
11947 else if (hostreorder)
11948 dhd_wlfc_hostreorder_init(dhd);
11949 #endif /* DISABLE_11N */
11950 #else
11951 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
11952 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
11953 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
11954 #endif /* PROP_TXSTATUS */
11955 #endif /* BCMSDIO || BCMDBUS */
11956 #ifndef PCIE_FULL_DONGLE
11957 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
11958 if (FW_SUPPORTED(dhd, ap)) {
11959 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
11960 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
11961 NULL, 0, TRUE);
11962 if (ret < 0)
11963 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11964 }
11965 #endif /* PCIE_FULL_DONGLE */
11966 #ifdef PNO_SUPPORT
11967 if (!dhd->pno_state) {
11968 dhd_pno_init(dhd);
11969 }
11970 #endif
11971 #ifdef RTT_SUPPORT
11972 if (!dhd->rtt_state) {
11973 ret = dhd_rtt_init(dhd);
11974 if (ret < 0) {
11975 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
11976 }
11977 }
11978 #endif
11979 #ifdef WL11U
11980 dhd_interworking_enable(dhd);
11981 #endif /* WL11U */
11982
11983 #ifdef SUPPORT_SENSORHUB
11984 DHD_ERROR(("%s: SensorHub enabled %d\n",
11985 __FUNCTION__, dhd->info->shub_enable));
11986 ret2 = dhd_iovar(dhd, 0, "shub", NULL, 0,
11987 (char *)&shub_ctl, sizeof(shub_ctl), FALSE);
11988 if (ret2 < 0) {
11989 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
11990 __FUNCTION__, ret2));
11991 dhd->info->shub_enable = 0;
11992 } else {
11993 dhd->info->shub_enable = shub_ctl.enable;
11994 DHD_ERROR(("%s: checking sensorhub enable %d\n",
11995 __FUNCTION__, dhd->info->shub_enable));
11996 }
11997 #else
11998 DHD_ERROR(("%s: SensorHub diabled %d\n",
11999 __FUNCTION__, dhd->info->shub_enable));
12000 dhd->info->shub_enable = FALSE;
12001 shub_ctl.enable = FALSE;
12002 ret2 = dhd_iovar(dhd, 0, "shub", (char *)&shub_ctl, sizeof(shub_ctl),
12003 NULL, 0, TRUE);
12004 if (ret2 < 0) {
12005 DHD_ERROR(("%s failed to set ShubHub disable\n",
12006 __FUNCTION__));
12007 }
12008 #endif /* SUPPORT_SENSORHUB */
12009
12010
12011 #ifdef NDO_CONFIG_SUPPORT
12012 dhd->ndo_enable = FALSE;
12013 dhd->ndo_host_ip_overflow = FALSE;
12014 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
12015 #endif /* NDO_CONFIG_SUPPORT */
12016
12017 /* ND offload version supported */
12018 dhd->ndo_version = dhd_ndo_get_version(dhd);
12019 if (dhd->ndo_version > 0) {
12020 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
12021
12022 #ifdef NDO_CONFIG_SUPPORT
12023 /* enable Unsolicited NA filter */
12024 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
12025 if (ret < 0) {
12026 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
12027 }
12028 #endif /* NDO_CONFIG_SUPPORT */
12029 }
12030
12031 /* check dongle supports wbtext or not */
12032 dhd->wbtext_support = FALSE;
12033 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
12034 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
12035 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
12036 }
12037 if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
12038 dhd->wbtext_support = TRUE;
12039 }
12040 #ifndef WBTEXT
12041 /* driver can turn off wbtext feature through makefile */
12042 if (dhd->wbtext_support) {
12043 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
12044 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
12045 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
12046 DHD_ERROR(("failed to disable WBTEXT\n"));
12047 }
12048 }
12049 #endif /* !WBTEXT */
12050
12051 /* WNM capabilities */
12052 wnm_cap = 0
12053 #ifdef WL11U
12054 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
12055 #endif
12056 #ifdef WBTEXT
12057 | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
12058 #endif
12059 ;
12060 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
12061 DHD_ERROR(("failed to set WNM capabilities\n"));
12062 }
12063
12064 dhd_conf_postinit_ioctls(dhd);
12065 done:
12066
12067 if (eventmask_msg)
12068 kfree(eventmask_msg);
12069 if (iov_buf)
12070 kfree(iov_buf);
12071 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
12072 if (el_tag)
12073 kfree(el_tag);
12074 #endif /* DHD_8021X_DUMP */
12075 return ret;
12076 }
12077
12078
12079 int
12080 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
12081 uint res_len, int set)
12082 {
12083 char *buf = NULL;
12084 int input_len;
12085 wl_ioctl_t ioc;
12086 int ret;
12087
12088 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
12089 return BCME_BADARG;
12090
12091 input_len = strlen(name) + 1 + param_len;
12092 if (input_len > WLC_IOCTL_MAXLEN)
12093 return BCME_BADARG;
12094
12095 buf = NULL;
12096 if (set) {
12097 if (res_buf || res_len != 0) {
12098 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
12099 ret = BCME_BADARG;
12100 goto exit;
12101 }
12102 buf = kzalloc(input_len, GFP_KERNEL);
12103 if (!buf) {
12104 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12105 ret = BCME_NOMEM;
12106 goto exit;
12107 }
12108 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12109 if (!ret) {
12110 ret = BCME_NOMEM;
12111 goto exit;
12112 }
12113
12114 ioc.cmd = WLC_SET_VAR;
12115 ioc.buf = buf;
12116 ioc.len = input_len;
12117 ioc.set = set;
12118
12119 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12120 } else {
12121 if (!res_buf || !res_len) {
12122 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
12123 ret = BCME_BADARG;
12124 goto exit;
12125 }
12126
12127 if (res_len < input_len) {
12128 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
12129 res_len, input_len));
12130 buf = kzalloc(input_len, GFP_KERNEL);
12131 if (!buf) {
12132 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
12133 ret = BCME_NOMEM;
12134 goto exit;
12135 }
12136 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
12137 if (!ret) {
12138 ret = BCME_NOMEM;
12139 goto exit;
12140 }
12141
12142 ioc.cmd = WLC_GET_VAR;
12143 ioc.buf = buf;
12144 ioc.len = input_len;
12145 ioc.set = set;
12146
12147 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12148
12149 if (ret == BCME_OK) {
12150 memcpy(res_buf, buf, res_len);
12151 }
12152 } else {
12153 memset(res_buf, 0, res_len);
12154 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
12155 if (!ret) {
12156 ret = BCME_NOMEM;
12157 goto exit;
12158 }
12159
12160 ioc.cmd = WLC_GET_VAR;
12161 ioc.buf = res_buf;
12162 ioc.len = res_len;
12163 ioc.set = set;
12164
12165 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12166 }
12167 }
12168 exit:
12169 kfree(buf);
12170 return ret;
12171 }
12172
12173 int
12174 dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
12175 uint cmd_len, char **resptr, uint resp_len)
12176 {
12177 int len = resp_len;
12178 int ret;
12179 char *buf = *resptr;
12180 wl_ioctl_t ioc;
12181 if (resp_len > WLC_IOCTL_MAXLEN)
12182 return BCME_BADARG;
12183
12184 memset(buf, 0, resp_len);
12185
12186 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
12187 if (ret == 0) {
12188 return BCME_BUFTOOSHORT;
12189 }
12190
12191 memset(&ioc, 0, sizeof(ioc));
12192
12193 ioc.cmd = WLC_GET_VAR;
12194 ioc.buf = buf;
12195 ioc.len = len;
12196 ioc.set = 0;
12197
12198 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
12199
12200 return ret;
12201 }
12202
12203
12204 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
12205 {
12206 struct dhd_info *dhd = dhdp->info;
12207 struct net_device *dev = NULL;
12208
12209 ASSERT(dhd && dhd->iflist[ifidx]);
12210 dev = dhd->iflist[ifidx]->net;
12211 ASSERT(dev);
12212
12213 if (netif_running(dev)) {
12214 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
12215 return BCME_NOTDOWN;
12216 }
12217
12218 #define DHD_MIN_MTU 1500
12219 #define DHD_MAX_MTU 1752
12220
12221 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
12222 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
12223 return BCME_BADARG;
12224 }
12225
12226 dev->mtu = new_mtu;
12227 return 0;
12228 }
12229
12230 #ifdef ARP_OFFLOAD_SUPPORT
12231 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
12232 void
12233 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
12234 {
12235 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
12236 int i;
12237 int ret;
12238
12239 bzero(ipv4_buf, sizeof(ipv4_buf));
12240
12241 /* display what we've got */
12242 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12243 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
12244 #ifdef AOE_DBG
12245 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12246 #endif
12247 /* now we saved hoste_ip table, clr it in the dongle AOE */
12248 dhd_aoe_hostip_clr(dhd_pub, idx);
12249
12250 if (ret) {
12251 DHD_ERROR(("%s failed\n", __FUNCTION__));
12252 return;
12253 }
12254
12255 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
12256 if (add && (ipv4_buf[i] == 0)) {
12257 ipv4_buf[i] = ipa;
12258 add = FALSE; /* added ipa to local table */
12259 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
12260 __FUNCTION__, i));
12261 } else if (ipv4_buf[i] == ipa) {
12262 ipv4_buf[i] = 0;
12263 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
12264 __FUNCTION__, ipa, i));
12265 }
12266
12267 if (ipv4_buf[i] != 0) {
12268 /* add back host_ip entries from our local cache */
12269 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
12270 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
12271 __FUNCTION__, ipv4_buf[i], i));
12272 }
12273 }
12274 #ifdef AOE_DBG
12275 /* see the resulting hostip table */
12276 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
12277 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
12278 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
12279 #endif
12280 }
12281
12282 /*
12283 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
12284 * whenever there is an event related to an IP address.
12285 * ptr : kernel provided pointer to IP address that has changed
12286 */
12287 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
12288 unsigned long event,
12289 void *ptr)
12290 {
12291 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
12292
12293 dhd_info_t *dhd;
12294 dhd_pub_t *dhd_pub;
12295 int idx;
12296
12297 if (!dhd_arp_enable)
12298 return NOTIFY_DONE;
12299 if (!ifa || !(ifa->ifa_dev->dev))
12300 return NOTIFY_DONE;
12301
12302 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12303 /* Filter notifications meant for non Broadcom devices */
12304 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
12305 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
12306 #if defined(WL_ENABLE_P2P_IF)
12307 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
12308 #endif /* WL_ENABLE_P2P_IF */
12309 return NOTIFY_DONE;
12310 }
12311 #endif /* LINUX_VERSION_CODE */
12312
12313 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
12314 if (!dhd)
12315 return NOTIFY_DONE;
12316
12317 dhd_pub = &dhd->pub;
12318
12319 if (dhd_pub->arp_version == 1) {
12320 idx = 0;
12321 } else {
12322 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
12323 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
12324 break;
12325 }
12326 if (idx < DHD_MAX_IFS)
12327 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
12328 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
12329 else {
12330 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
12331 idx = 0;
12332 }
12333 }
12334
12335 switch (event) {
12336 case NETDEV_UP:
12337 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
12338 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12339
12340 if (dhd->pub.busstate != DHD_BUS_DATA) {
12341 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
12342 if (dhd->pend_ipaddr) {
12343 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
12344 __FUNCTION__, dhd->pend_ipaddr));
12345 }
12346 dhd->pend_ipaddr = ifa->ifa_address;
12347 break;
12348 }
12349
12350 #ifdef AOE_IP_ALIAS_SUPPORT
12351 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
12352 __FUNCTION__));
12353 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
12354 #endif /* AOE_IP_ALIAS_SUPPORT */
12355 break;
12356
12357 case NETDEV_DOWN:
12358 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
12359 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
12360 dhd->pend_ipaddr = 0;
12361 #ifdef AOE_IP_ALIAS_SUPPORT
12362 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
12363 __FUNCTION__));
12364 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
12365 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
12366 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
12367 } else
12368 #endif /* AOE_IP_ALIAS_SUPPORT */
12369 {
12370 dhd_aoe_hostip_clr(&dhd->pub, idx);
12371 dhd_aoe_arp_clr(&dhd->pub, idx);
12372 }
12373 break;
12374
12375 default:
12376 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
12377 __func__, ifa->ifa_label, event));
12378 break;
12379 }
12380 return NOTIFY_DONE;
12381 }
12382 #endif /* ARP_OFFLOAD_SUPPORT */
12383
12384 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12385 /* Neighbor Discovery Offload: defered handler */
12386 static void
12387 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
12388 {
12389 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
12390 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
12391 dhd_pub_t *dhdp;
12392 int ret;
12393
12394 if (!dhd) {
12395 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
12396 goto done;
12397 }
12398 dhdp = &dhd->pub;
12399
12400 if (event != DHD_WQ_WORK_IPV6_NDO) {
12401 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
12402 goto done;
12403 }
12404
12405 if (!ndo_work) {
12406 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
12407 return;
12408 }
12409
12410 switch (ndo_work->event) {
12411 case NETDEV_UP:
12412 #ifndef NDO_CONFIG_SUPPORT
12413 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
12414 ret = dhd_ndo_enable(dhdp, TRUE);
12415 if (ret < 0) {
12416 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
12417 }
12418 #endif /* !NDO_CONFIG_SUPPORT */
12419 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
12420 if (dhdp->ndo_version > 0) {
12421 /* inet6 addr notifier called only for unicast address */
12422 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
12423 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
12424 } else {
12425 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
12426 ndo_work->if_idx);
12427 }
12428 if (ret < 0) {
12429 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
12430 __FUNCTION__, ret));
12431 }
12432 break;
12433 case NETDEV_DOWN:
12434 if (dhdp->ndo_version > 0) {
12435 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
12436 ret = dhd_ndo_remove_ip_by_addr(dhdp,
12437 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
12438 } else {
12439 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
12440 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
12441 }
12442 if (ret < 0) {
12443 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
12444 __FUNCTION__, ret));
12445 goto done;
12446 }
12447 #ifdef NDO_CONFIG_SUPPORT
12448 if (dhdp->ndo_host_ip_overflow) {
12449 ret = dhd_dev_ndo_update_inet6addr(
12450 dhd_idx2net(dhdp, ndo_work->if_idx));
12451 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
12452 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
12453 __FUNCTION__, ret));
12454 goto done;
12455 }
12456 }
12457 #else /* !NDO_CONFIG_SUPPORT */
12458 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
12459 ret = dhd_ndo_enable(dhdp, FALSE);
12460 if (ret < 0) {
12461 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
12462 goto done;
12463 }
12464 #endif /* NDO_CONFIG_SUPPORT */
12465 break;
12466
12467 default:
12468 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
12469 break;
12470 }
12471 done:
12472 /* free ndo_work. alloced while scheduling the work */
12473 if (ndo_work) {
12474 kfree(ndo_work);
12475 }
12476
12477 return;
12478 }
12479
12480 /*
12481 * Neighbor Discovery Offload: Called when an interface
12482 * is assigned with ipv6 address.
12483 * Handles only primary interface
12484 */
12485 int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
12486 {
12487 dhd_info_t *dhd;
12488 dhd_pub_t *dhdp;
12489 struct inet6_ifaddr *inet6_ifa = ptr;
12490 struct ipv6_work_info_t *ndo_info;
12491 int idx;
12492
12493 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
12494 /* Filter notifications meant for non Broadcom devices */
12495 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
12496 return NOTIFY_DONE;
12497 }
12498 #endif /* LINUX_VERSION_CODE */
12499
12500 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
12501 if (!dhd) {
12502 return NOTIFY_DONE;
12503 }
12504 dhdp = &dhd->pub;
12505
12506 /* Supports only primary interface */
12507 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
12508 if (idx != 0) {
12509 return NOTIFY_DONE;
12510 }
12511
12512 /* FW capability */
12513 if (!FW_SUPPORTED(dhdp, ndoe)) {
12514 return NOTIFY_DONE;
12515 }
12516
12517 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
12518 if (!ndo_info) {
12519 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
12520 return NOTIFY_DONE;
12521 }
12522
12523 /* fill up ndo_info */
12524 ndo_info->event = event;
12525 ndo_info->if_idx = idx;
12526 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
12527
12528 /* defer the work to thread as it may block kernel */
12529 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
12530 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
12531 return NOTIFY_DONE;
12532 }
12533 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12534
12535 int
12536 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
12537 {
12538 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12539 dhd_if_t *ifp;
12540 struct net_device *net = NULL;
12541 int err = 0;
12542 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12543
12544 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
12545
12546 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
12547 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
12548 return BCME_ERROR;
12549 }
12550
12551 ASSERT(dhd && dhd->iflist[ifidx]);
12552 ifp = dhd->iflist[ifidx];
12553 net = ifp->net;
12554 ASSERT(net && (ifp->idx == ifidx));
12555
12556 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12557 ASSERT(!net->open);
12558 net->get_stats = dhd_get_stats;
12559 net->do_ioctl = dhd_ioctl_entry;
12560 net->hard_start_xmit = dhd_start_xmit;
12561 net->set_mac_address = dhd_set_mac_address;
12562 net->set_multicast_list = dhd_set_multicast_list;
12563 net->open = net->stop = NULL;
12564 #else
12565 ASSERT(!net->netdev_ops);
12566 net->netdev_ops = &dhd_ops_virt;
12567 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12568
12569 /* Ok, link into the network layer... */
12570 if (ifidx == 0) {
12571 /*
12572 * device functions for the primary interface only
12573 */
12574 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
12575 net->open = dhd_open;
12576 net->stop = dhd_stop;
12577 #else
12578 net->netdev_ops = &dhd_ops_pri;
12579 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
12580 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
12581 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12582 } else {
12583 /*
12584 * We have to use the primary MAC for virtual interfaces
12585 */
12586 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
12587 /*
12588 * Android sets the locally administered bit to indicate that this is a
12589 * portable hotspot. This will not work in simultaneous AP/STA mode,
12590 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12591 */
12592 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
12593 ETHER_ADDR_LEN)) {
12594 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12595 __func__, net->name));
12596 temp_addr[0] |= 0x02;
12597 }
12598 }
12599
12600 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
12601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
12602 net->ethtool_ops = &dhd_ethtool_ops;
12603 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
12604
12605 #if defined(WL_WIRELESS_EXT)
12606 #if WIRELESS_EXT < 19
12607 net->get_wireless_stats = dhd_get_wireless_stats;
12608 #endif /* WIRELESS_EXT < 19 */
12609 #if WIRELESS_EXT > 12
12610 net->wireless_handlers = &wl_iw_handler_def;
12611 #endif /* WIRELESS_EXT > 12 */
12612 #endif /* defined(WL_WIRELESS_EXT) */
12613
12614 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
12615
12616 #ifdef WLMESH
12617 if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
12618 temp_addr[4] ^= 0x80;
12619 temp_addr[4] += ifidx;
12620 temp_addr[5] += ifidx;
12621 }
12622 #endif
12623 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
12624
12625 if (ifidx == 0)
12626 printf("%s\n", dhd_version);
12627 #ifdef WL_EXT_IAPSTA
12628 else
12629 wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
12630 #endif
12631 if (ifidx != 0) {
12632 if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
12633 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
12634 else
12635 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
12636 }
12637
12638 if (need_rtnl_lock)
12639 err = register_netdev(net);
12640 else
12641 err = register_netdevice(net);
12642
12643 if (err != 0) {
12644 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
12645 goto fail;
12646 }
12647 #ifdef WL_EXT_IAPSTA
12648 if (ifidx == 0)
12649 wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
12650 wl_ext_iapsta_attach_name(net, ifidx);
12651 #endif
12652
12653
12654
12655 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
12656 #if defined(CUSTOMER_HW4_DEBUG)
12657 MAC2STRDBG(dhd->pub.mac.octet));
12658 #else
12659 MAC2STRDBG(net->dev_addr));
12660 #endif /* CUSTOMER_HW4_DEBUG */
12661
12662 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
12663 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
12664 #endif
12665
12666 #if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
12667 KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS))
12668 if (ifidx == 0) {
12669 #if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
12670 up(&dhd_registration_sem);
12671 #endif /* BCMLXSDMMC */
12672 if (!dhd_download_fw_on_driverload) {
12673 #ifdef WL_CFG80211
12674 wl_terminate_event_handler(net);
12675 #endif /* WL_CFG80211 */
12676 #if defined(DHD_LB_RXP)
12677 __skb_queue_purge(&dhd->rx_pend_queue);
12678 #endif /* DHD_LB_RXP */
12679
12680 #if defined(DHD_LB_TXP)
12681 skb_queue_purge(&dhd->tx_pend_queue);
12682 #endif /* DHD_LB_TXP */
12683
12684 #ifdef SHOW_LOGTRACE
12685 /* Release the skbs from queue for WLC_E_TRACE event */
12686 dhd_event_logtrace_flush_queue(dhdp);
12687 #endif /* SHOW_LOGTRACE */
12688
12689 #ifdef DHDTCPACK_SUPPRESS
12690 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
12691 #endif /* DHDTCPACK_SUPPRESS */
12692 dhd_net_bus_devreset(net, TRUE);
12693 #ifdef BCMLXSDMMC
12694 dhd_net_bus_suspend(net);
12695 #endif /* BCMLXSDMMC */
12696 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
12697 #if defined(BT_OVER_SDIO)
12698 dhd->bus_user_count--;
12699 #endif /* BT_OVER_SDIO */
12700 }
12701 }
12702 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
12703 return 0;
12704
12705 fail:
12706 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
12707 net->open = NULL;
12708 #else
12709 net->netdev_ops = NULL;
12710 #endif
12711 return err;
12712 }
12713
12714 void
12715 dhd_bus_detach(dhd_pub_t *dhdp)
12716 {
12717 dhd_info_t *dhd;
12718
12719 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12720
12721 if (dhdp) {
12722 dhd = (dhd_info_t *)dhdp->info;
12723 if (dhd) {
12724
12725 /*
12726 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12727 * calling stop again will cuase SD read/write errors.
12728 */
12729 if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
12730 /* Stop the protocol module */
12731 dhd_prot_stop(&dhd->pub);
12732
12733 /* Stop the bus module */
12734 #ifdef BCMDBUS
12735 /* Force Dongle terminated */
12736 if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
12737 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
12738 __FUNCTION__));
12739 dbus_stop(dhd->pub.bus);
12740 dhd->pub.busstate = DHD_BUS_DOWN;
12741 #else
12742 dhd_bus_stop(dhd->pub.bus, TRUE);
12743 #endif /* BCMDBUS */
12744 }
12745
12746 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
12747 dhd_bus_oob_intr_unregister(dhdp);
12748 #endif
12749 }
12750 }
12751 }
12752
12753
12754 void dhd_detach(dhd_pub_t *dhdp)
12755 {
12756 dhd_info_t *dhd;
12757 unsigned long flags;
12758 int timer_valid = FALSE;
12759 struct net_device *dev;
12760 #ifdef WL_CFG80211
12761 struct bcm_cfg80211 *cfg = NULL;
12762 #endif
12763 #ifdef HOFFLOAD_MODULES
12764 struct module_metadata *hmem = NULL;
12765 #endif
12766 if (!dhdp)
12767 return;
12768
12769 dhd = (dhd_info_t *)dhdp->info;
12770 if (!dhd)
12771 return;
12772
12773 dev = dhd->iflist[0]->net;
12774
12775 if (dev) {
12776 rtnl_lock();
12777 if (dev->flags & IFF_UP) {
12778 /* If IFF_UP is still up, it indicates that
12779 * "ifconfig wlan0 down" hasn't been called.
12780 * So invoke dev_close explicitly here to
12781 * bring down the interface.
12782 */
12783 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12784 dev_close(dev);
12785 }
12786 rtnl_unlock();
12787 }
12788
12789 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
12790
12791 dhd->pub.up = 0;
12792 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
12793 /* Give sufficient time for threads to start running in case
12794 * dhd_attach() has failed
12795 */
12796 OSL_SLEEP(100);
12797 }
12798 #ifdef DHD_WET
12799 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
12800 #endif /* DHD_WET */
12801 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
12802 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
12803
12804 #ifdef PROP_TXSTATUS
12805 #ifdef DHD_WLFC_THREAD
12806 if (dhd->pub.wlfc_thread) {
12807 kthread_stop(dhd->pub.wlfc_thread);
12808 dhdp->wlfc_thread_go = TRUE;
12809 wake_up_interruptible(&dhdp->wlfc_wqhead);
12810 }
12811 dhd->pub.wlfc_thread = NULL;
12812 #endif /* DHD_WLFC_THREAD */
12813 #endif /* PROP_TXSTATUS */
12814
12815 #ifdef DHD_TIMESYNC
12816 if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
12817 dhd_timesync_detach(dhdp);
12818 }
12819 #endif /* DHD_TIMESYNC */
12820 #ifdef WL_CFG80211
12821 if (dev) {
12822 wl_cfg80211_down(dev);
12823 }
12824 #endif /* WL_CFG80211 */
12825
12826 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
12827 dhd_bus_detach(dhdp);
12828 #ifdef BCMPCIE
12829 if (is_reboot == SYS_RESTART) {
12830 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
12831 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
12832 dhdpcie_bus_clock_stop(dhdp->bus);
12833 wifi_platform_set_power(dhd_wifi_platdata->adapters,
12834 FALSE, WIFI_TURNOFF_DELAY);
12835 }
12836 }
12837 #endif /* BCMPCIE */
12838 #ifndef PCIE_FULL_DONGLE
12839 if (dhdp->prot)
12840 dhd_prot_detach(dhdp);
12841 #endif /* !PCIE_FULL_DONGLE */
12842 }
12843
12844 #ifdef ARP_OFFLOAD_SUPPORT
12845 if (dhd_inetaddr_notifier_registered) {
12846 dhd_inetaddr_notifier_registered = FALSE;
12847 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
12848 }
12849 #endif /* ARP_OFFLOAD_SUPPORT */
12850 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12851 if (dhd_inet6addr_notifier_registered) {
12852 dhd_inet6addr_notifier_registered = FALSE;
12853 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
12854 }
12855 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12856 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
12857 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
12858 if (dhd->early_suspend.suspend)
12859 unregister_early_suspend(&dhd->early_suspend);
12860 }
12861 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
12862
12863 #if defined(WL_WIRELESS_EXT)
12864 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
12865 /* Detatch and unlink in the iw */
12866 wl_iw_detach();
12867 }
12868 #ifdef WL_ESCAN
12869 wl_escan_detach(dhdp);
12870 #endif /* WL_ESCAN */
12871 #endif /* defined(WL_WIRELESS_EXT) */
12872
12873 #ifdef DHD_ULP
12874 dhd_ulp_deinit(dhd->pub.osh, dhdp);
12875 #endif /* DHD_ULP */
12876
12877 /* delete all interfaces, start with virtual */
12878 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
12879 int i = 1;
12880 dhd_if_t *ifp;
12881
12882 /* Cleanup virtual interfaces */
12883 dhd_net_if_lock_local(dhd);
12884 for (i = 1; i < DHD_MAX_IFS; i++) {
12885 if (dhd->iflist[i]) {
12886 dhd_remove_if(&dhd->pub, i, TRUE);
12887 }
12888 }
12889 dhd_net_if_unlock_local(dhd);
12890
12891 /* delete primary interface 0 */
12892 ifp = dhd->iflist[0];
12893 ASSERT(ifp);
12894 ASSERT(ifp->net);
12895 if (ifp && ifp->net) {
12896 #ifdef WL_CFG80211
12897 cfg = wl_get_cfg(ifp->net);
12898 #endif
12899 /* in unregister_netdev case, the interface gets freed by net->destructor
12900 * (which is set to free_netdev)
12901 */
12902 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
12903 free_netdev(ifp->net);
12904 } else {
12905 argos_register_notifier_deinit();
12906 #ifdef SET_RPS_CPUS
12907 custom_rps_map_clear(ifp->net->_rx);
12908 #endif /* SET_RPS_CPUS */
12909 netif_tx_disable(ifp->net);
12910 unregister_netdev(ifp->net);
12911 }
12912 #ifdef PCIE_FULL_DONGLE
12913 ifp->net = DHD_NET_DEV_NULL;
12914 #else
12915 ifp->net = NULL;
12916 #endif /* PCIE_FULL_DONGLE */
12917
12918 #ifdef DHD_WMF
12919 dhd_wmf_cleanup(dhdp, 0);
12920 #endif /* DHD_WMF */
12921 #ifdef DHD_L2_FILTER
12922 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
12923 NULL, FALSE, dhdp->tickcnt);
12924 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
12925 ifp->phnd_arp_table = NULL;
12926 #endif /* DHD_L2_FILTER */
12927
12928
12929 dhd_if_del_sta_list(ifp);
12930
12931 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
12932 dhd->iflist[0] = NULL;
12933 }
12934 }
12935
12936 /* Clear the watchdog timer */
12937 DHD_GENERAL_LOCK(&dhd->pub, flags);
12938 timer_valid = dhd->wd_timer_valid;
12939 dhd->wd_timer_valid = FALSE;
12940 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12941 if (timer_valid)
12942 del_timer_sync(&dhd->timer);
12943 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
12944
12945 #ifdef BCMDBUS
12946 tasklet_kill(&dhd->tasklet);
12947 #else
12948 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
12949 #ifdef DHD_PCIE_RUNTIMEPM
12950 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
12951 PROC_STOP(&dhd->thr_rpm_ctl);
12952 }
12953 #endif /* DHD_PCIE_RUNTIMEPM */
12954 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
12955 PROC_STOP(&dhd->thr_wdt_ctl);
12956 }
12957
12958 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
12959 PROC_STOP(&dhd->thr_rxf_ctl);
12960 }
12961
12962 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
12963 PROC_STOP(&dhd->thr_dpc_ctl);
12964 } else
12965 {
12966 tasklet_kill(&dhd->tasklet);
12967 }
12968 }
12969 #endif /* BCMDBUS */
12970
12971 #ifdef DHD_LB
12972 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
12973 /* Clear the flag first to avoid calling the cpu notifier */
12974 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
12975
12976 /* Kill the Load Balancing Tasklets */
12977 #ifdef DHD_LB_RXP
12978 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
12979 __skb_queue_purge(&dhd->rx_pend_queue);
12980 #endif /* DHD_LB_RXP */
12981 #ifdef DHD_LB_TXP
12982 cancel_work_sync(&dhd->tx_dispatcher_work);
12983 tasklet_kill(&dhd->tx_tasklet);
12984 __skb_queue_purge(&dhd->tx_pend_queue);
12985 #endif /* DHD_LB_TXP */
12986 #ifdef DHD_LB_TXC
12987 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
12988 tasklet_kill(&dhd->tx_compl_tasklet);
12989 #endif /* DHD_LB_TXC */
12990 #ifdef DHD_LB_RXC
12991 tasklet_kill(&dhd->rx_compl_tasklet);
12992 #endif /* DHD_LB_RXC */
12993
12994 if (dhd->cpu_notifier.notifier_call != NULL) {
12995 unregister_cpu_notifier(&dhd->cpu_notifier);
12996 }
12997 dhd_cpumasks_deinit(dhd);
12998 DHD_LB_STATS_DEINIT(&dhd->pub);
12999 }
13000 #endif /* DHD_LB */
13001
13002 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
13003
13004 #ifdef DHD_LOG_DUMP
13005 dhd_log_dump_deinit(&dhd->pub);
13006 #endif /* DHD_LOG_DUMP */
13007 #ifdef WL_CFG80211
13008 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
13009 if (!cfg) {
13010 DHD_ERROR(("cfg NULL!\n"));
13011 ASSERT(0);
13012 } else {
13013 wl_cfg80211_detach(cfg);
13014 dhd_monitor_uninit();
13015 }
13016 }
13017 #endif
13018
13019 #ifdef DEBUGABILITY
13020 if (dhdp->dbg) {
13021 #ifdef DBG_PKT_MON
13022 dhd_os_dbg_detach_pkt_monitor(dhdp);
13023 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
13024 #endif /* DBG_PKT_MON */
13025 dhd_os_dbg_detach(dhdp);
13026 }
13027 #endif /* DEBUGABILITY */
13028 #ifdef SHOW_LOGTRACE
13029 #ifdef DHD_PKT_LOGGING
13030 dhd_os_detach_pktlog(dhdp);
13031 #endif /* DHD_PKT_LOGGING */
13032 /* Release the skbs from queue for WLC_E_TRACE event */
13033 dhd_event_logtrace_flush_queue(dhdp);
13034
13035 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
13036 if (dhd->event_data.fmts) {
13037 MFREE(dhd->pub.osh, dhd->event_data.fmts,
13038 dhd->event_data.fmts_size);
13039 dhd->event_data.fmts = NULL;
13040 }
13041 if (dhd->event_data.raw_fmts) {
13042 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
13043 dhd->event_data.raw_fmts_size);
13044 dhd->event_data.raw_fmts = NULL;
13045 }
13046 if (dhd->event_data.raw_sstr) {
13047 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
13048 dhd->event_data.raw_sstr_size);
13049 dhd->event_data.raw_sstr = NULL;
13050 }
13051 if (dhd->event_data.rom_raw_sstr) {
13052 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
13053 dhd->event_data.rom_raw_sstr_size);
13054 dhd->event_data.rom_raw_sstr = NULL;
13055 }
13056 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
13057 }
13058 #endif /* SHOW_LOGTRACE */
13059 #ifdef BCMPCIE
13060 if (dhdp->extended_trap_data)
13061 {
13062 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
13063 dhdp->extended_trap_data = NULL;
13064 }
13065 #endif /* BCMPCIE */
13066 #ifdef PNO_SUPPORT
13067 if (dhdp->pno_state)
13068 dhd_pno_deinit(dhdp);
13069 #endif
13070 #ifdef RTT_SUPPORT
13071 if (dhdp->rtt_state) {
13072 dhd_rtt_deinit(dhdp);
13073 }
13074 #endif
13075 #if defined(CONFIG_PM_SLEEP)
13076 if (dhd_pm_notifier_registered) {
13077 unregister_pm_notifier(&dhd->pm_notifier);
13078 dhd_pm_notifier_registered = FALSE;
13079 }
13080 #endif /* CONFIG_PM_SLEEP */
13081
13082 #ifdef DEBUG_CPU_FREQ
13083 if (dhd->new_freq)
13084 free_percpu(dhd->new_freq);
13085 dhd->new_freq = NULL;
13086 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
13087 #endif
13088 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
13089 #ifdef CONFIG_HAS_WAKELOCK
13090 dhd->wakelock_wd_counter = 0;
13091 wake_lock_destroy(&dhd->wl_wdwake);
13092 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
13093 wake_lock_destroy(&dhd->wl_wifi);
13094 #endif /* CONFIG_HAS_WAKELOCK */
13095 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
13096 DHD_OS_WAKE_LOCK_DESTROY(dhd);
13097 }
13098
13099
13100
13101 #ifdef DHDTCPACK_SUPPRESS
13102 /* This will free all MEM allocated for TCPACK SUPPRESS */
13103 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
13104 #endif /* DHDTCPACK_SUPPRESS */
13105
13106 #ifdef PCIE_FULL_DONGLE
13107 dhd_flow_rings_deinit(dhdp);
13108 if (dhdp->prot)
13109 dhd_prot_detach(dhdp);
13110 #endif
13111
13112 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
13113 dhd_free_tdls_peer_list(dhdp);
13114 #endif
13115
13116 #ifdef HOFFLOAD_MODULES
13117 hmem = &dhdp->hmem;
13118 dhd_free_module_memory(dhdp->bus, hmem);
13119 #endif /* HOFFLOAD_MODULES */
13120 #if defined(BT_OVER_SDIO)
13121 mutex_destroy(&dhd->bus_user_lock);
13122 #endif /* BT_OVER_SDIO */
13123 #ifdef DUMP_IOCTL_IOV_LIST
13124 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
13125 #endif /* DUMP_IOCTL_IOV_LIST */
13126 #ifdef DHD_DEBUG
13127 /* memory waste feature list initilization */
13128 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
13129 #endif /* DHD_DEBUG */
13130 #ifdef WL_MONITOR
13131 dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL);
13132 #endif /* WL_MONITOR */
13133
13134 /* Prefer adding de-init code above this comment unless necessary.
13135 * The idea is to cancel work queue, sysfs and flags at the end.
13136 */
13137 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
13138 dhd->dhd_deferred_wq = NULL;
13139
13140 #ifdef SHOW_LOGTRACE
13141 /* Wait till event_log_dispatcher_work finishes */
13142 cancel_work_sync(&dhd->event_log_dispatcher_work);
13143 #endif /* SHOW_LOGTRACE */
13144
13145 dhd_sysfs_exit(dhd);
13146 dhd->pub.fw_download_done = FALSE;
13147 dhd_conf_detach(dhdp);
13148 }
13149
13150
13151 void
13152 dhd_free(dhd_pub_t *dhdp)
13153 {
13154 dhd_info_t *dhd;
13155 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13156
13157 if (dhdp) {
13158 int i;
13159 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13160 if (dhdp->reorder_bufs[i]) {
13161 reorder_info_t *ptr;
13162 uint32 buf_size = sizeof(struct reorder_info);
13163
13164 ptr = dhdp->reorder_bufs[i];
13165
13166 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13167 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13168 i, ptr->max_idx, buf_size));
13169
13170 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13171 dhdp->reorder_bufs[i] = NULL;
13172 }
13173 }
13174
13175 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
13176
13177 dhd = (dhd_info_t *)dhdp->info;
13178 if (dhdp->soc_ram) {
13179 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13180 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13181 #else
13182 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13183 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13184 dhdp->soc_ram = NULL;
13185 }
13186 #ifdef CACHE_FW_IMAGES
13187 if (dhdp->cached_fw) {
13188 MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
13189 dhdp->cached_fw = NULL;
13190 }
13191
13192 if (dhdp->cached_nvram) {
13193 MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
13194 dhdp->cached_nvram = NULL;
13195 }
13196 #endif
13197 if (dhd) {
13198 #ifdef REPORT_FATAL_TIMEOUTS
13199 deinit_dhd_timeouts(&dhd->pub);
13200 #endif /* REPORT_FATAL_TIMEOUTS */
13201
13202 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
13203 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
13204 DHD_PREALLOC_DHD_INFO, 0, FALSE))
13205 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
13206 dhd = NULL;
13207 }
13208 }
13209 }
13210
13211 void
13212 dhd_clear(dhd_pub_t *dhdp)
13213 {
13214 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13215
13216 if (dhdp) {
13217 int i;
13218 #ifdef DHDTCPACK_SUPPRESS
13219 /* Clean up timer/data structure for any remaining/pending packet or timer. */
13220 dhd_tcpack_info_tbl_clean(dhdp);
13221 #endif /* DHDTCPACK_SUPPRESS */
13222 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
13223 if (dhdp->reorder_bufs[i]) {
13224 reorder_info_t *ptr;
13225 uint32 buf_size = sizeof(struct reorder_info);
13226
13227 ptr = dhdp->reorder_bufs[i];
13228
13229 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
13230 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
13231 i, ptr->max_idx, buf_size));
13232
13233 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
13234 dhdp->reorder_bufs[i] = NULL;
13235 }
13236 }
13237
13238 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
13239
13240 if (dhdp->soc_ram) {
13241 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13242 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
13243 #else
13244 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
13245 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13246 dhdp->soc_ram = NULL;
13247 }
13248 }
13249 }
13250
13251 static void
13252 dhd_module_cleanup(void)
13253 {
13254 printf("%s: Enter\n", __FUNCTION__);
13255
13256 dhd_bus_unregister();
13257
13258 wl_android_exit();
13259
13260 dhd_wifi_platform_unregister_drv();
13261 #ifdef CUSTOMER_HW_AMLOGIC
13262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
13263 wifi_teardown_dt();
13264 #endif
13265 #endif
13266 printf("%s: Exit\n", __FUNCTION__);
13267 }
13268
13269 static void __exit
13270 dhd_module_exit(void)
13271 {
13272 atomic_set(&exit_in_progress, 1);
13273 dhd_module_cleanup();
13274 unregister_reboot_notifier(&dhd_reboot_notifier);
13275 dhd_destroy_to_notifier_skt();
13276 }
13277
13278 static int __init
13279 dhd_module_init(void)
13280 {
13281 int err;
13282 int retry = POWERUP_MAX_RETRY;
13283
13284 printf("%s: in %s\n", __FUNCTION__, dhd_version);
13285 #ifdef CUSTOMER_HW_AMLOGIC
13286 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
13287 if (wifi_setup_dt()) {
13288 printf("wifi_dt : fail to setup dt\n");
13289 }
13290 #endif
13291 #endif
13292
13293 DHD_PERIM_RADIO_INIT();
13294
13295
13296 if (firmware_path[0] != '\0') {
13297 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
13298 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13299 }
13300
13301 if (nvram_path[0] != '\0') {
13302 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
13303 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
13304 }
13305
13306 do {
13307 err = dhd_wifi_platform_register_drv();
13308 if (!err) {
13309 register_reboot_notifier(&dhd_reboot_notifier);
13310 break;
13311 } else {
13312 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
13313 __FUNCTION__, retry));
13314 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
13315 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
13316 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
13317 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
13318 }
13319 } while (retry--);
13320
13321 dhd_create_to_notifier_skt();
13322
13323 if (err) {
13324 #ifdef CUSTOMER_HW_AMLOGIC
13325 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
13326 wifi_teardown_dt();
13327 #endif
13328 #endif
13329 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
13330 } else {
13331 if (!dhd_download_fw_on_driverload) {
13332 dhd_driver_init_done = TRUE;
13333 }
13334 }
13335
13336 printf("%s: Exit err=%d\n", __FUNCTION__, err);
13337 return err;
13338 }
13339
13340 static int
13341 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
13342 {
13343 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
13344 if (code == SYS_RESTART) {
13345 #ifdef BCMPCIE
13346 is_reboot = code;
13347 #endif /* BCMPCIE */
13348 }
13349 return NOTIFY_DONE;
13350 }
13351
13352
13353 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
13354 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
13355 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
13356 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8895) || \
13357 defined(CONFIG_ARCH_MSM8998)
13358 deferred_module_init_sync(dhd_module_init);
13359 #else
13360 deferred_module_init(dhd_module_init);
13361 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
13362 * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8895 || CONFIG_ARCH_MSM8998
13363 */
13364 #elif defined(USE_LATE_INITCALL_SYNC)
13365 late_initcall_sync(dhd_module_init);
13366 #else
13367 late_initcall(dhd_module_init);
13368 #endif /* USE_LATE_INITCALL_SYNC */
13369 #else
13370 module_init(dhd_module_init);
13371 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
13372
13373 module_exit(dhd_module_exit);
13374
13375 /*
13376 * OS specific functions required to implement DHD driver in OS independent way
13377 */
13378 int
13379 dhd_os_proto_block(dhd_pub_t *pub)
13380 {
13381 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13382
13383 if (dhd) {
13384 DHD_PERIM_UNLOCK(pub);
13385
13386 down(&dhd->proto_sem);
13387
13388 DHD_PERIM_LOCK(pub);
13389 return 1;
13390 }
13391
13392 return 0;
13393 }
13394
13395 int
13396 dhd_os_proto_unblock(dhd_pub_t *pub)
13397 {
13398 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13399
13400 if (dhd) {
13401 up(&dhd->proto_sem);
13402 return 1;
13403 }
13404
13405 return 0;
13406 }
13407
13408 void
13409 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
13410 {
13411 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13412
13413 if (dhd) {
13414 mutex_lock(&dhd->dhd_iovar_mutex);
13415 }
13416 }
13417
13418 void
13419 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
13420 {
13421 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13422
13423 if (dhd) {
13424 mutex_unlock(&dhd->dhd_iovar_mutex);
13425 }
13426 }
13427
13428 unsigned int
13429 dhd_os_get_ioctl_resp_timeout(void)
13430 {
13431 return ((unsigned int)dhd_ioctl_timeout_msec);
13432 }
13433
13434 void
13435 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
13436 {
13437 dhd_ioctl_timeout_msec = (int)timeout_msec;
13438 }
13439
13440 int
13441 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched)
13442 {
13443 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13444 int timeout, timeout_tmp = dhd_ioctl_timeout_msec;
13445
13446 if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
13447 timeout_tmp = dhd_ioctl_timeout_msec;
13448 dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec;
13449 }
13450
13451 /* Convert timeout in millsecond to jiffies */
13452 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13453 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13454 #else
13455 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
13456 #endif
13457
13458 DHD_PERIM_UNLOCK(pub);
13459
13460 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
13461
13462 if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
13463 dhd_ioctl_timeout_msec = timeout_tmp;
13464 }
13465
13466 DHD_PERIM_LOCK(pub);
13467
13468 return timeout;
13469 }
13470
13471 int
13472 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
13473 {
13474 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13475
13476 wake_up(&dhd->ioctl_resp_wait);
13477 return 0;
13478 }
13479
13480 int
13481 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
13482 {
13483 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13484 int timeout;
13485
13486 /* Convert timeout in millsecond to jiffies */
13487 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13488 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13489 #else
13490 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
13491 #endif
13492
13493 DHD_PERIM_UNLOCK(pub);
13494
13495 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
13496
13497 DHD_PERIM_LOCK(pub);
13498
13499 return timeout;
13500 }
13501
13502 #ifdef PCIE_INB_DW
13503 int
13504 dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
13505 {
13506 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13507 int timeout;
13508
13509 /* Convert timeout in millsecond to jiffies */
13510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13511 timeout = msecs_to_jiffies(ds_exit_timeout_msec);
13512 #else
13513 timeout = ds_exit_timeout_msec * HZ / 1000;
13514 #endif
13515
13516 DHD_PERIM_UNLOCK(pub);
13517
13518 timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
13519
13520 DHD_PERIM_LOCK(pub);
13521
13522 return timeout;
13523 }
13524
13525 int
13526 dhd_os_ds_exit_wake(dhd_pub_t *pub)
13527 {
13528 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13529
13530 wake_up(&dhd->ds_exit_wait);
13531 return 0;
13532 }
13533
13534 #endif /* PCIE_INB_DW */
13535
13536 int
13537 dhd_os_d3ack_wake(dhd_pub_t *pub)
13538 {
13539 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13540
13541 wake_up(&dhd->d3ack_wait);
13542 return 0;
13543 }
13544
13545 int
13546 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
13547 {
13548 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13549 int timeout;
13550
13551 /* Wait for bus usage contexts to gracefully exit within some timeout value
13552 * Set time out to little higher than dhd_ioctl_timeout_msec,
13553 * so that IOCTL timeout should not get affected.
13554 */
13555 /* Convert timeout in millsecond to jiffies */
13556 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13557 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13558 #else
13559 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13560 #endif
13561
13562 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
13563
13564 return timeout;
13565 }
13566
13567 /*
13568 * Wait until the condition *var == condition is met.
13569 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13570 * Returns 1 if the @condition evaluated to true
13571 */
13572 int
13573 dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
13574 {
13575 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13576 int timeout;
13577
13578 /* Convert timeout in millsecond to jiffies */
13579 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
13580 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13581 #else
13582 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
13583 #endif
13584
13585 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
13586
13587 return timeout;
13588 }
13589
13590
13591 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
13592 /* Fix compilation error for FC11 */
13593 INLINE
13594 #endif
13595 int
13596 dhd_os_busbusy_wake(dhd_pub_t *pub)
13597 {
13598 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13599 /* Call wmb() to make sure before waking up the other event value gets updated */
13600 OSL_SMP_WMB();
13601 wake_up(&dhd->dhd_bus_busy_state_wait);
13602 return 0;
13603 }
13604
13605 void
13606 dhd_os_wd_timer_extend(void *bus, bool extend)
13607 {
13608 #ifndef BCMDBUS
13609 dhd_pub_t *pub = bus;
13610 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13611
13612 if (extend)
13613 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
13614 else
13615 dhd_os_wd_timer(bus, dhd->default_wd_interval);
13616 #endif /* !BCMDBUS */
13617 }
13618
13619
13620 void
13621 dhd_os_wd_timer(void *bus, uint wdtick)
13622 {
13623 #ifndef BCMDBUS
13624 dhd_pub_t *pub = bus;
13625 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13626 unsigned long flags;
13627
13628 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13629
13630 if (!dhd) {
13631 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13632 return;
13633 }
13634
13635 DHD_GENERAL_LOCK(pub, flags);
13636
13637 /* don't start the wd until fw is loaded */
13638 if (pub->busstate == DHD_BUS_DOWN) {
13639 DHD_GENERAL_UNLOCK(pub, flags);
13640 return;
13641 }
13642
13643 /* Totally stop the timer */
13644 if (!wdtick && dhd->wd_timer_valid == TRUE) {
13645 dhd->wd_timer_valid = FALSE;
13646 DHD_GENERAL_UNLOCK(pub, flags);
13647 del_timer_sync(&dhd->timer);
13648 return;
13649 }
13650
13651 if (wdtick) {
13652 dhd_watchdog_ms = (uint)wdtick;
13653 /* Re arm the timer, at last watchdog period */
13654 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13655 dhd->wd_timer_valid = TRUE;
13656 }
13657 DHD_GENERAL_UNLOCK(pub, flags);
13658 #endif /* !BCMDBUS */
13659 }
13660
13661 #ifdef DHD_PCIE_RUNTIMEPM
13662 void
13663 dhd_os_runtimepm_timer(void *bus, uint tick)
13664 {
13665 dhd_pub_t *pub = bus;
13666 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13667 unsigned long flags;
13668
13669 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13670
13671 if (!dhd) {
13672 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13673 return;
13674 }
13675
13676 DHD_GENERAL_LOCK(pub, flags);
13677
13678 /* don't start the RPM until fw is loaded */
13679 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
13680 DHD_GENERAL_UNLOCK(pub, flags);
13681 return;
13682 }
13683
13684 /* If tick is non-zero, the request is to start the timer */
13685 if (tick) {
13686 /* Start the timer only if its not already running */
13687 if (dhd->rpm_timer_valid == FALSE) {
13688 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
13689 dhd->rpm_timer_valid = TRUE;
13690 }
13691 } else {
13692 /* tick is zero, we have to stop the timer */
13693 /* Stop the timer only if its running, otherwise we don't have to do anything */
13694 if (dhd->rpm_timer_valid == TRUE) {
13695 dhd->rpm_timer_valid = FALSE;
13696 DHD_GENERAL_UNLOCK(pub, flags);
13697 del_timer_sync(&dhd->rpm_timer);
13698 /* we have already released the lock, so just go to exit */
13699 goto exit;
13700 }
13701 }
13702
13703 DHD_GENERAL_UNLOCK(pub, flags);
13704 exit:
13705 return;
13706
13707 }
13708
13709 #endif /* DHD_PCIE_RUNTIMEPM */
13710
13711 void *
13712 dhd_os_open_image(char *filename)
13713 {
13714 struct file *fp;
13715 int size;
13716
13717 fp = filp_open(filename, O_RDONLY, 0);
13718 /*
13719 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13720 * Alternative:
13721 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13722 * ???
13723 */
13724 if (IS_ERR(fp)) {
13725 fp = NULL;
13726 goto err;
13727 }
13728
13729 if (!S_ISREG(file_inode(fp)->i_mode)) {
13730 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
13731 fp = NULL;
13732 goto err;
13733 }
13734
13735 size = i_size_read(file_inode(fp));
13736 if (size <= 0) {
13737 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
13738 fp = NULL;
13739 goto err;
13740 }
13741
13742 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
13743
13744 err:
13745 return fp;
13746 }
13747
13748 int
13749 dhd_os_get_image_block(char *buf, int len, void *image)
13750 {
13751 struct file *fp = (struct file *)image;
13752 int rdlen;
13753 int size;
13754
13755 if (!image) {
13756 return 0;
13757 }
13758
13759 size = i_size_read(file_inode(fp));
13760 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
13761 rdlen = kernel_read(fp, buf, MIN(len, size), &fp->f_pos);
13762 #else
13763 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
13764 #endif
13765
13766 if (len >= size && size != rdlen) {
13767 return -EIO;
13768 }
13769
13770 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
13771 if (rdlen > 0) {
13772 fp->f_pos += rdlen;
13773 }
13774 #endif
13775
13776 return rdlen;
13777 }
13778
13779 int
13780 dhd_os_get_image_size(void *image)
13781 {
13782 struct file *fp = (struct file *)image;
13783 int size;
13784 if (!image) {
13785 return 0;
13786 }
13787
13788 size = i_size_read(file_inode(fp));
13789
13790 return size;
13791 }
13792
13793 #if defined(BT_OVER_SDIO)
13794 int
13795 dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
13796 {
13797 struct file *fp = (struct file *)image;
13798 int rd_len;
13799 uint str_len = 0;
13800 char *str_end = NULL;
13801
13802 if (!image)
13803 return 0;
13804
13805 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
13806 rd_len = kernel_read(fp, str, len, &fp->f_pos);
13807 #else
13808 rd_len = kernel_read(fp, fp->f_pos, str, len);
13809 #endif
13810 str_end = strnchr(str, len, '\n');
13811 if (str_end == NULL) {
13812 goto err;
13813 }
13814 str_len = (uint)(str_end - str);
13815
13816 /* Advance file pointer past the string length */
13817 fp->f_pos += str_len + 1;
13818 bzero(str_end, rd_len - str_len);
13819
13820 err:
13821 return str_len;
13822 }
13823 #endif /* defined (BT_OVER_SDIO) */
13824
13825
13826 void
13827 dhd_os_close_image(void *image)
13828 {
13829 if (image)
13830 filp_close((struct file *)image, NULL);
13831 }
13832
13833 void
13834 dhd_os_sdlock(dhd_pub_t *pub)
13835 {
13836 dhd_info_t *dhd;
13837
13838 dhd = (dhd_info_t *)(pub->info);
13839
13840 #ifdef BCMDBUS
13841 spin_lock_bh(&dhd->sdlock);
13842 #else
13843 if (dhd_dpc_prio >= 0)
13844 down(&dhd->sdsem);
13845 else
13846 spin_lock_bh(&dhd->sdlock);
13847 #endif /* !BCMDBUS */
13848 }
13849
13850 void
13851 dhd_os_sdunlock(dhd_pub_t *pub)
13852 {
13853 dhd_info_t *dhd;
13854
13855 dhd = (dhd_info_t *)(pub->info);
13856
13857 #ifdef BCMDBUS
13858 spin_unlock_bh(&dhd->sdlock);
13859 #else
13860 if (dhd_dpc_prio >= 0)
13861 up(&dhd->sdsem);
13862 else
13863 spin_unlock_bh(&dhd->sdlock);
13864 #endif /* !BCMDBUS */
13865 }
13866
13867 void
13868 dhd_os_sdlock_txq(dhd_pub_t *pub)
13869 {
13870 dhd_info_t *dhd;
13871
13872 dhd = (dhd_info_t *)(pub->info);
13873 #ifdef BCMDBUS
13874 spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
13875 #else
13876 spin_lock_bh(&dhd->txqlock);
13877 #endif /* BCMDBUS */
13878 }
13879
13880 void
13881 dhd_os_sdunlock_txq(dhd_pub_t *pub)
13882 {
13883 dhd_info_t *dhd;
13884
13885 dhd = (dhd_info_t *)(pub->info);
13886 #ifdef BCMDBUS
13887 spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
13888 #else
13889 spin_unlock_bh(&dhd->txqlock);
13890 #endif /* BCMDBUS */
13891 }
13892
13893 void
13894 dhd_os_sdlock_rxq(dhd_pub_t *pub)
13895 {
13896 #if 0
13897 dhd_info_t *dhd;
13898
13899 dhd = (dhd_info_t *)(pub->info);
13900 spin_lock_bh(&dhd->rxqlock);
13901 #endif
13902 }
13903
13904 void
13905 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
13906 {
13907 #if 0
13908 dhd_info_t *dhd;
13909
13910 dhd = (dhd_info_t *)(pub->info);
13911 spin_unlock_bh(&dhd->rxqlock);
13912 #endif
13913 }
13914
13915 static void
13916 dhd_os_rxflock(dhd_pub_t *pub)
13917 {
13918 dhd_info_t *dhd;
13919
13920 dhd = (dhd_info_t *)(pub->info);
13921 spin_lock_bh(&dhd->rxf_lock);
13922
13923 }
13924
13925 static void
13926 dhd_os_rxfunlock(dhd_pub_t *pub)
13927 {
13928 dhd_info_t *dhd;
13929
13930 dhd = (dhd_info_t *)(pub->info);
13931 spin_unlock_bh(&dhd->rxf_lock);
13932 }
13933
13934 #ifdef DHDTCPACK_SUPPRESS
13935 unsigned long
13936 dhd_os_tcpacklock(dhd_pub_t *pub)
13937 {
13938 dhd_info_t *dhd;
13939 unsigned long flags = 0;
13940
13941 dhd = (dhd_info_t *)(pub->info);
13942
13943 if (dhd) {
13944 #ifdef BCMSDIO
13945 spin_lock_bh(&dhd->tcpack_lock);
13946 #else
13947 spin_lock_irqsave(&dhd->tcpack_lock, flags);
13948 #endif /* BCMSDIO */
13949 }
13950
13951 return flags;
13952 }
13953
13954 void
13955 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
13956 {
13957 dhd_info_t *dhd;
13958
13959 #ifdef BCMSDIO
13960 BCM_REFERENCE(flags);
13961 #endif /* BCMSDIO */
13962
13963 dhd = (dhd_info_t *)(pub->info);
13964
13965 if (dhd) {
13966 #ifdef BCMSDIO
13967 spin_unlock_bh(&dhd->tcpack_lock);
13968 #else
13969 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
13970 #endif /* BCMSDIO */
13971 }
13972 }
13973 #endif /* DHDTCPACK_SUPPRESS */
13974
13975 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
13976 {
13977 uint8* buf;
13978 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
13979
13980 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
13981 if (buf == NULL && kmalloc_if_fail)
13982 buf = kmalloc(size, flags);
13983
13984 return buf;
13985 }
13986
13987 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
13988 {
13989 }
13990
13991 #if defined(WL_WIRELESS_EXT)
13992 struct iw_statistics *
13993 dhd_get_wireless_stats(struct net_device *dev)
13994 {
13995 int res = 0;
13996 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13997
13998 if (!dhd->pub.up) {
13999 return NULL;
14000 }
14001
14002 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
14003
14004 if (res == 0)
14005 return &dhd->iw.wstats;
14006 else
14007 return NULL;
14008 }
14009 #endif /* defined(WL_WIRELESS_EXT) */
14010
14011 static int
14012 dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
14013 wl_event_msg_t *event, void **data)
14014 {
14015 int bcmerror = 0;
14016 #ifdef WL_CFG80211
14017 unsigned long flags = 0;
14018 #endif /* WL_CFG80211 */
14019 ASSERT(dhd != NULL);
14020
14021 #ifdef SHOW_LOGTRACE
14022 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
14023 &dhd->event_data);
14024 #else
14025 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
14026 NULL);
14027 #endif /* SHOW_LOGTRACE */
14028
14029 if (bcmerror != BCME_OK)
14030 return (bcmerror);
14031
14032 #if defined(WL_EXT_IAPSTA)
14033 wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data);
14034 #endif /* defined(WL_EXT_IAPSTA) */
14035 #if defined(WL_WIRELESS_EXT)
14036 if (event->bsscfgidx == 0) {
14037 /*
14038 * Wireless ext is on primary interface only
14039 */
14040
14041 ASSERT(dhd->iflist[ifidx] != NULL);
14042 ASSERT(dhd->iflist[ifidx]->net != NULL);
14043
14044 if (dhd->iflist[ifidx]->net) {
14045 wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
14046 }
14047 }
14048 #endif /* defined(WL_WIRELESS_EXT) */
14049
14050 #ifdef WL_CFG80211
14051 ASSERT(dhd->iflist[ifidx] != NULL);
14052 ASSERT(dhd->iflist[ifidx]->net != NULL);
14053 if (dhd->iflist[ifidx]->net) {
14054 spin_lock_irqsave(&dhd->pub.up_lock, flags);
14055 if (dhd->pub.up) {
14056 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
14057 }
14058 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
14059 }
14060 #endif /* defined(WL_CFG80211) */
14061
14062 return (bcmerror);
14063 }
14064
14065 /* send up locally generated event */
14066 void
14067 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
14068 {
14069 /* Just return from here */
14070 return;
14071 }
14072
14073 #ifdef LOG_INTO_TCPDUMP
14074 void
14075 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
14076 {
14077 struct sk_buff *p, *skb;
14078 uint32 pktlen;
14079 int len;
14080 dhd_if_t *ifp;
14081 dhd_info_t *dhd;
14082 uchar *skb_data;
14083 int ifidx = 0;
14084 struct ether_header eth;
14085
14086 pktlen = sizeof(eth) + data_len;
14087 dhd = dhdp->info;
14088
14089 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
14090 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
14091
14092 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
14093 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
14094 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
14095 eth.ether_type = hton16(ETHER_TYPE_BRCM);
14096
14097 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
14098 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
14099 skb = PKTTONATIVE(dhdp->osh, p);
14100 skb_data = skb->data;
14101 len = skb->len;
14102
14103 ifidx = dhd_ifname2idx(dhd, "wlan0");
14104 ifp = dhd->iflist[ifidx];
14105 if (ifp == NULL)
14106 ifp = dhd->iflist[0];
14107
14108 ASSERT(ifp);
14109 skb->dev = ifp->net;
14110 skb->protocol = eth_type_trans(skb, skb->dev);
14111 skb->data = skb_data;
14112 skb->len = len;
14113
14114 /* Strip header, count, deliver upward */
14115 skb_pull(skb, ETH_HLEN);
14116
14117 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
14118 __FUNCTION__, __LINE__);
14119 /* Send the packet */
14120 if (in_interrupt()) {
14121 netif_rx(skb);
14122 } else {
14123 netif_rx_ni(skb);
14124 }
14125 } else {
14126 /* Could not allocate a sk_buf */
14127 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
14128 }
14129 }
14130 #endif /* LOG_INTO_TCPDUMP */
14131
14132 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
14133 {
14134 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14135 struct dhd_info *dhdinfo = dhd->info;
14136
14137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
14138 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
14139 #else
14140 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
14141 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
14142
14143 dhd_os_sdunlock(dhd);
14144 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
14145 dhd_os_sdlock(dhd);
14146 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
14147 return;
14148 }
14149
14150 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
14151 {
14152 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
14153 struct dhd_info *dhdinfo = dhd->info;
14154 if (waitqueue_active(&dhdinfo->ctrl_wait))
14155 wake_up(&dhdinfo->ctrl_wait);
14156 #endif
14157 return;
14158 }
14159
14160 #if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
14161 int
14162 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
14163 {
14164 int ret;
14165
14166 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14167
14168 if (flag == TRUE) {
14169 /* Issue wl down command before resetting the chip */
14170 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
14171 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
14172 }
14173 #ifdef PROP_TXSTATUS
14174 if (dhd->pub.wlfc_enabled) {
14175 dhd_wlfc_deinit(&dhd->pub);
14176 }
14177 #endif /* PROP_TXSTATUS */
14178 #ifdef PNO_SUPPORT
14179 if (dhd->pub.pno_state) {
14180 dhd_pno_deinit(&dhd->pub);
14181 }
14182 #endif
14183 #ifdef RTT_SUPPORT
14184 if (dhd->pub.rtt_state) {
14185 dhd_rtt_deinit(&dhd->pub);
14186 }
14187 #endif /* RTT_SUPPORT */
14188
14189 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
14190 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
14191 #endif /* DBG_PKT_MON */
14192 }
14193
14194 #ifdef BCMSDIO
14195 if (!flag) {
14196 dhd_update_fw_nv_path(dhd);
14197 /* update firmware and nvram path to sdio bus */
14198 dhd_bus_update_fw_nv_path(dhd->pub.bus,
14199 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
14200 }
14201 #endif /* BCMSDIO */
14202
14203 ret = dhd_bus_devreset(&dhd->pub, flag);
14204 if (ret) {
14205 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
14206 return ret;
14207 }
14208
14209 return ret;
14210 }
14211
14212 #ifdef BCMSDIO
14213 int
14214 dhd_net_bus_suspend(struct net_device *dev)
14215 {
14216 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14217 return dhd_bus_suspend(&dhd->pub);
14218 }
14219
14220 int
14221 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
14222 {
14223 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14224 return dhd_bus_resume(&dhd->pub, stage);
14225 }
14226
14227 #endif /* BCMSDIO */
14228 #endif /* BCMSDIO || BCMPCIE || BCMDBUS */
14229
14230 int net_os_set_suspend_disable(struct net_device *dev, int val)
14231 {
14232 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14233 int ret = 0;
14234
14235 if (dhd) {
14236 ret = dhd->pub.suspend_disable_flag;
14237 dhd->pub.suspend_disable_flag = val;
14238 }
14239 return ret;
14240 }
14241
14242 int net_os_set_suspend(struct net_device *dev, int val, int force)
14243 {
14244 int ret = 0;
14245 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14246
14247 if (dhd) {
14248 #ifdef CONFIG_MACH_UNIVERSAL7420
14249 #endif /* CONFIG_MACH_UNIVERSAL7420 */
14250 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
14251 ret = dhd_set_suspend(val, &dhd->pub);
14252 #else
14253 ret = dhd_suspend_resume_helper(dhd, val, force);
14254 #endif
14255 #ifdef WL_CFG80211
14256 wl_cfg80211_update_power_mode(dev);
14257 #endif
14258 }
14259 return ret;
14260 }
14261
14262 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
14263 {
14264 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14265
14266 if (dhd)
14267 dhd->pub.suspend_bcn_li_dtim = val;
14268
14269 return 0;
14270 }
14271
14272 int net_os_set_max_dtim_enable(struct net_device *dev, int val)
14273 {
14274 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14275
14276 if (dhd) {
14277 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
14278 __FUNCTION__, (val ? "Enable" : "Disable")));
14279 if (val) {
14280 dhd->pub.max_dtim_enable = TRUE;
14281 } else {
14282 dhd->pub.max_dtim_enable = FALSE;
14283 }
14284 } else {
14285 return -1;
14286 }
14287
14288 return 0;
14289 }
14290
14291 #ifdef PKT_FILTER_SUPPORT
14292 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
14293 {
14294 int ret = 0;
14295
14296 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
14297 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14298
14299 if (!dhd_master_mode)
14300 add_remove = !add_remove;
14301 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
14302 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
14303 return 0;
14304 }
14305
14306
14307 if (num >= dhd->pub.pktfilter_count) {
14308 return -EINVAL;
14309 }
14310
14311 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
14312 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
14313
14314 return ret;
14315 }
14316
14317 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
14318
14319 {
14320 int ret = 0;
14321
14322 /* Packet filtering is set only if we still in early-suspend and
14323 * we need either to turn it ON or turn it OFF
14324 * We can always turn it OFF in case of early-suspend, but we turn it
14325 * back ON only if suspend_disable_flag was not set
14326 */
14327 if (dhdp && dhdp->up) {
14328 if (dhdp->in_suspend) {
14329 if (!val || (val && !dhdp->suspend_disable_flag))
14330 dhd_enable_packet_filter(val, dhdp);
14331 }
14332 }
14333 return ret;
14334 }
14335
14336 /* function to enable/disable packet for Network device */
14337 int net_os_enable_packet_filter(struct net_device *dev, int val)
14338 {
14339 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14340
14341 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
14342 return dhd_os_enable_packet_filter(&dhd->pub, val);
14343 }
14344 #endif /* PKT_FILTER_SUPPORT */
14345
14346 int
14347 dhd_dev_init_ioctl(struct net_device *dev)
14348 {
14349 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14350 int ret;
14351
14352 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
14353 goto done;
14354
14355 done:
14356 return ret;
14357 }
14358
14359 int
14360 dhd_dev_get_feature_set(struct net_device *dev)
14361 {
14362 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
14363 dhd_pub_t *dhd = (&ptr->pub);
14364 int feature_set = 0;
14365
14366 if (FW_SUPPORTED(dhd, sta))
14367 feature_set |= WIFI_FEATURE_INFRA;
14368 if (FW_SUPPORTED(dhd, dualband))
14369 feature_set |= WIFI_FEATURE_INFRA_5G;
14370 if (FW_SUPPORTED(dhd, p2p))
14371 feature_set |= WIFI_FEATURE_P2P;
14372 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
14373 feature_set |= WIFI_FEATURE_SOFT_AP;
14374 if (FW_SUPPORTED(dhd, tdls))
14375 feature_set |= WIFI_FEATURE_TDLS;
14376 if (FW_SUPPORTED(dhd, vsdb))
14377 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
14378 if (FW_SUPPORTED(dhd, nan)) {
14379 feature_set |= WIFI_FEATURE_NAN;
14380 /* NAN is essentail for d2d rtt */
14381 if (FW_SUPPORTED(dhd, rttd2d))
14382 feature_set |= WIFI_FEATURE_D2D_RTT;
14383 }
14384 #ifdef RTT_SUPPORT
14385 if (dhd->rtt_supported) {
14386 feature_set |= WIFI_FEATURE_D2D_RTT;
14387 feature_set |= WIFI_FEATURE_D2AP_RTT;
14388 }
14389 #endif /* RTT_SUPPORT */
14390 #ifdef LINKSTAT_SUPPORT
14391 feature_set |= WIFI_FEATURE_LINKSTAT;
14392 #endif /* LINKSTAT_SUPPORT */
14393
14394 #ifdef PNO_SUPPORT
14395 if (dhd_is_pno_supported(dhd)) {
14396 feature_set |= WIFI_FEATURE_PNO;
14397 #ifdef GSCAN_SUPPORT
14398 /* terence 20171115: remove to get GTS PASS
14399 * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
14400 */
14401 // feature_set |= WIFI_FEATURE_GSCAN;
14402 // feature_set |= WIFI_FEATURE_HAL_EPNO;
14403 #endif /* GSCAN_SUPPORT */
14404 }
14405 #endif /* PNO_SUPPORT */
14406 #ifdef RSSI_MONITOR_SUPPORT
14407 if (FW_SUPPORTED(dhd, rssi_mon)) {
14408 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
14409 }
14410 #endif /* RSSI_MONITOR_SUPPORT */
14411 #ifdef WL11U
14412 feature_set |= WIFI_FEATURE_HOTSPOT;
14413 #endif /* WL11U */
14414 #ifdef NDO_CONFIG_SUPPORT
14415 feature_set |= WIFI_FEATURE_CONFIG_NDO;
14416 #endif /* NDO_CONFIG_SUPPORT */
14417 #ifdef KEEP_ALIVE
14418 feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
14419 #endif /* KEEP_ALIVE */
14420
14421 return feature_set;
14422 }
14423
14424 int
14425 dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
14426 {
14427 int feature_set_full;
14428 int ret = 0;
14429
14430 feature_set_full = dhd_dev_get_feature_set(dev);
14431
14432 /* Common feature set for all interface */
14433 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
14434 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
14435 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
14436 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
14437 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
14438 (feature_set_full & WIFI_FEATURE_EPR);
14439
14440 /* Specific feature group for each interface */
14441 switch (num) {
14442 case 0:
14443 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
14444 /* Not supported yet */
14445 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14446 (feature_set_full & WIFI_FEATURE_TDLS) |
14447 (feature_set_full & WIFI_FEATURE_PNO) |
14448 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
14449 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
14450 (feature_set_full & WIFI_FEATURE_GSCAN) |
14451 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
14452 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
14453 break;
14454
14455 case 1:
14456 ret |= (feature_set_full & WIFI_FEATURE_P2P);
14457 /* Not yet verified NAN with P2P */
14458 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14459 break;
14460
14461 case 2:
14462 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
14463 (feature_set_full & WIFI_FEATURE_TDLS) |
14464 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
14465 break;
14466
14467 default:
14468 ret = WIFI_FEATURE_INVALID;
14469 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
14470 break;
14471 }
14472
14473 return ret;
14474 }
14475
14476 #ifdef CUSTOM_FORCE_NODFS_FLAG
14477 int
14478 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
14479 {
14480 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14481
14482 if (nodfs)
14483 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
14484 else
14485 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
14486 dhd->pub.force_country_change = TRUE;
14487 return 0;
14488 }
14489 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14490
14491 #ifdef NDO_CONFIG_SUPPORT
14492 int
14493 dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
14494 {
14495 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14496 dhd_pub_t *dhdp = &dhd->pub;
14497 int ret = 0;
14498
14499 if (enable) {
14500 /* enable ND offload feature (will be enabled in FW on suspend) */
14501 dhdp->ndo_enable = TRUE;
14502
14503 /* Update changes of anycast address & DAD failed address */
14504 ret = dhd_dev_ndo_update_inet6addr(dev);
14505 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
14506 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
14507 return ret;
14508 }
14509 } else {
14510 /* disable ND offload feature */
14511 dhdp->ndo_enable = FALSE;
14512
14513 /* disable ND offload in FW */
14514 ret = dhd_ndo_enable(dhdp, 0);
14515 if (ret < 0) {
14516 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
14517 }
14518 }
14519 return ret;
14520 }
14521
14522 /* #pragma used as a WAR to fix build failure,
14523 * ignore dropping of 'const' qualifier in 'list_entry' macro
14524 * this pragma disables the warning only for the following function
14525 */
14526 #pragma GCC diagnostic push
14527 #pragma GCC diagnostic ignored "-Wcast-qual"
14528
14529 static int
14530 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
14531 {
14532 struct inet6_ifaddr *ifa;
14533 struct ifacaddr6 *acaddr = NULL;
14534 int addr_count = 0;
14535
14536 /* lock */
14537 read_lock_bh(&inet6->lock);
14538
14539 /* Count valid unicast address */
14540 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14541 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
14542 addr_count++;
14543 }
14544 }
14545
14546 /* Count anycast address */
14547 acaddr = inet6->ac_list;
14548 while (acaddr) {
14549 addr_count++;
14550 acaddr = acaddr->aca_next;
14551 }
14552
14553 /* unlock */
14554 read_unlock_bh(&inet6->lock);
14555
14556 return addr_count;
14557 }
14558
14559 int
14560 dhd_dev_ndo_update_inet6addr(struct net_device *dev)
14561 {
14562 dhd_info_t *dhd;
14563 dhd_pub_t *dhdp;
14564 struct inet6_dev *inet6;
14565 struct inet6_ifaddr *ifa;
14566 struct ifacaddr6 *acaddr = NULL;
14567 struct in6_addr *ipv6_addr = NULL;
14568 int cnt, i;
14569 int ret = BCME_OK;
14570
14571 /*
14572 * this function evaulates host ip address in struct inet6_dev
14573 * unicast addr in inet6_dev->addr_list
14574 * anycast addr in inet6_dev->ac_list
14575 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14576 * access on null(freed) pointer.
14577 */
14578
14579 if (dev) {
14580 inet6 = dev->ip6_ptr;
14581 if (!inet6) {
14582 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
14583 return BCME_ERROR;
14584 }
14585
14586 dhd = DHD_DEV_INFO(dev);
14587 if (!dhd) {
14588 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
14589 return BCME_ERROR;
14590 }
14591 dhdp = &dhd->pub;
14592
14593 if (dhd_net2idx(dhd, dev) != 0) {
14594 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
14595 return BCME_ERROR;
14596 }
14597 } else {
14598 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
14599 return BCME_ERROR;
14600 }
14601
14602 /* Check host IP overflow */
14603 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
14604 if (cnt > dhdp->ndo_max_host_ip) {
14605 if (!dhdp->ndo_host_ip_overflow) {
14606 dhdp->ndo_host_ip_overflow = TRUE;
14607 /* Disable ND offload in FW */
14608 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
14609 ret = dhd_ndo_enable(dhdp, 0);
14610 }
14611
14612 return ret;
14613 }
14614
14615 /*
14616 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14617 * driver need to lock inet6_dev while accessing structure. but, driver
14618 * cannot use ioctl while inet6_dev locked since it requires scheduling
14619 * hence, copy addresses to the buffer and do ioctl after unlock.
14620 */
14621 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
14622 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14623 if (!ipv6_addr) {
14624 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
14625 return BCME_NOMEM;
14626 }
14627
14628 /* Find DAD failed unicast address to be removed */
14629 cnt = 0;
14630 read_lock_bh(&inet6->lock);
14631 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14632 /* DAD failed unicast address */
14633 if ((ifa->flags & IFA_F_DADFAILED) &&
14634 (cnt < dhdp->ndo_max_host_ip)) {
14635 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
14636 cnt++;
14637 }
14638 }
14639 read_unlock_bh(&inet6->lock);
14640
14641 /* Remove DAD failed unicast address */
14642 for (i = 0; i < cnt; i++) {
14643 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
14644 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
14645 if (ret < 0) {
14646 goto done;
14647 }
14648 }
14649
14650 /* Remove all anycast address */
14651 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14652 if (ret < 0) {
14653 goto done;
14654 }
14655
14656 /*
14657 * if ND offload was disabled due to host ip overflow,
14658 * attempt to add valid unicast address.
14659 */
14660 if (dhdp->ndo_host_ip_overflow) {
14661 /* Find valid unicast address */
14662 cnt = 0;
14663 read_lock_bh(&inet6->lock);
14664 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14665 /* valid unicast address */
14666 if (!(ifa->flags & IFA_F_DADFAILED) &&
14667 (cnt < dhdp->ndo_max_host_ip)) {
14668 memcpy(&ipv6_addr[cnt], &ifa->addr,
14669 sizeof(struct in6_addr));
14670 cnt++;
14671 }
14672 }
14673 read_unlock_bh(&inet6->lock);
14674
14675 /* Add valid unicast address */
14676 for (i = 0; i < cnt; i++) {
14677 ret = dhd_ndo_add_ip_with_type(dhdp,
14678 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
14679 if (ret < 0) {
14680 goto done;
14681 }
14682 }
14683 }
14684
14685 /* Find anycast address */
14686 cnt = 0;
14687 read_lock_bh(&inet6->lock);
14688 acaddr = inet6->ac_list;
14689 while (acaddr) {
14690 if (cnt < dhdp->ndo_max_host_ip) {
14691 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
14692 cnt++;
14693 }
14694 acaddr = acaddr->aca_next;
14695 }
14696 read_unlock_bh(&inet6->lock);
14697
14698 /* Add anycast address */
14699 for (i = 0; i < cnt; i++) {
14700 ret = dhd_ndo_add_ip_with_type(dhdp,
14701 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14702 if (ret < 0) {
14703 goto done;
14704 }
14705 }
14706
14707 /* Now All host IP addr were added successfully */
14708 if (dhdp->ndo_host_ip_overflow) {
14709 dhdp->ndo_host_ip_overflow = FALSE;
14710 if (dhdp->in_suspend) {
14711 /* drvier is in (early) suspend state, need to enable ND offload in FW */
14712 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
14713 ret = dhd_ndo_enable(dhdp, 1);
14714 }
14715 }
14716
14717 done:
14718 if (ipv6_addr) {
14719 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14720 }
14721
14722 return ret;
14723 }
14724 #pragma GCC diagnostic pop
14725
14726 #endif /* NDO_CONFIG_SUPPORT */
14727
14728 #ifdef PNO_SUPPORT
14729 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
14730 int
14731 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
14732 {
14733 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14734
14735 return (dhd_pno_stop_for_ssid(&dhd->pub));
14736 }
14737
14738 /* Linux wrapper to call common dhd_pno_set_for_ssid */
14739 int
14740 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
14741 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
14742 {
14743 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14744
14745 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
14746 pno_repeat, pno_freq_expo_max, channel_list, nchan));
14747 }
14748
14749 /* Linux wrapper to call common dhd_pno_enable */
14750 int
14751 dhd_dev_pno_enable(struct net_device *dev, int enable)
14752 {
14753 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14754
14755 return (dhd_pno_enable(&dhd->pub, enable));
14756 }
14757
14758 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
14759 int
14760 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
14761 struct dhd_pno_hotlist_params *hotlist_params)
14762 {
14763 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14764 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
14765 }
14766 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
14767 int
14768 dhd_dev_pno_stop_for_batch(struct net_device *dev)
14769 {
14770 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14771 return (dhd_pno_stop_for_batch(&dhd->pub));
14772 }
14773
14774 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
14775 int
14776 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
14777 {
14778 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14779 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
14780 }
14781
14782 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
14783 int
14784 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
14785 {
14786 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14787 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
14788 }
14789 #endif /* PNO_SUPPORT */
14790
14791 #if defined(PNO_SUPPORT)
14792 #ifdef GSCAN_SUPPORT
14793 bool
14794 dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
14795 {
14796 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14797
14798 return (dhd_is_legacy_pno_enabled(&dhd->pub));
14799 }
14800
14801 int
14802 dhd_dev_set_epno(struct net_device *dev)
14803 {
14804 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14805 if (!dhd) {
14806 return BCME_ERROR;
14807 }
14808 return dhd_pno_set_epno(&dhd->pub);
14809 }
14810 int
14811 dhd_dev_flush_fw_epno(struct net_device *dev)
14812 {
14813 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14814 if (!dhd) {
14815 return BCME_ERROR;
14816 }
14817 return dhd_pno_flush_fw_epno(&dhd->pub);
14818 }
14819
14820 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
14821 int
14822 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
14823 void *buf, bool flush)
14824 {
14825 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14826
14827 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
14828 }
14829
14830 /* Linux wrapper to call common dhd_wait_batch_results_complete */
14831 int
14832 dhd_dev_wait_batch_results_complete(struct net_device *dev)
14833 {
14834 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14835
14836 return (dhd_wait_batch_results_complete(&dhd->pub));
14837 }
14838
14839 /* Linux wrapper to call common dhd_pno_lock_batch_results */
14840 int
14841 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
14842 {
14843 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14844
14845 return (dhd_pno_lock_batch_results(&dhd->pub));
14846 }
14847 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
14848 void
14849 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
14850 {
14851 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14852
14853 return (dhd_pno_unlock_batch_results(&dhd->pub));
14854 }
14855
14856 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
14857 int
14858 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
14859 {
14860 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14861
14862 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
14863 }
14864
14865 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
14866 int
14867 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
14868 {
14869 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14870
14871 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
14872 }
14873
14874 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
14875 void *
14876 dhd_dev_hotlist_scan_event(struct net_device *dev,
14877 const void *data, int *send_evt_bytes, hotlist_type_t type)
14878 {
14879 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14880
14881 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
14882 }
14883
14884 /* Linux wrapper to call common dhd_process_full_gscan_result */
14885 void *
14886 dhd_dev_process_full_gscan_result(struct net_device *dev,
14887 const void *data, uint32 len, int *send_evt_bytes)
14888 {
14889 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14890
14891 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
14892 }
14893
14894 void
14895 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
14896 {
14897 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14898
14899 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
14900
14901 return;
14902 }
14903
14904 int
14905 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
14906 {
14907 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14908
14909 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
14910 }
14911
14912 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
14913 int
14914 dhd_dev_retrieve_batch_scan(struct net_device *dev)
14915 {
14916 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14917
14918 return (dhd_retreive_batch_scan_results(&dhd->pub));
14919 }
14920
14921 /* Linux wrapper to call common dhd_pno_process_epno_result */
14922 void * dhd_dev_process_epno_result(struct net_device *dev,
14923 const void *data, uint32 event, int *send_evt_bytes)
14924 {
14925 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14926
14927 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
14928 }
14929
14930 int
14931 dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
14932 wlc_roam_exp_params_t *roam_param)
14933 {
14934 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14935 wl_roam_exp_cfg_t roam_exp_cfg;
14936 int err;
14937
14938 if (!roam_param) {
14939 return BCME_BADARG;
14940 }
14941
14942 DHD_ERROR(("a_band_boost_thr %d a_band_penalty_thr %d\n",
14943 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
14944 DHD_ERROR(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
14945 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
14946 roam_param->cur_bssid_boost));
14947 DHD_ERROR(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
14948 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
14949
14950 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
14951 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14952 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
14953 if (dhd->pub.lazy_roam_enable) {
14954 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
14955 }
14956 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14957 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14958 TRUE);
14959 if (err < 0) {
14960 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14961 }
14962 return err;
14963 }
14964
14965 int
14966 dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
14967 {
14968 int err;
14969 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14970 wl_roam_exp_cfg_t roam_exp_cfg;
14971
14972 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
14973 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14974 if (enable) {
14975 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
14976 }
14977
14978 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14979 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14980 TRUE);
14981 if (err < 0) {
14982 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14983 } else {
14984 dhd->pub.lazy_roam_enable = (enable != 0);
14985 }
14986 return err;
14987 }
14988
14989 int
14990 dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
14991 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
14992 {
14993 int err;
14994 int len;
14995 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14996
14997 bssid_pref->version = BSSID_PREF_LIST_VERSION;
14998 /* By default programming bssid pref flushes out old values */
14999 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
15000 len = sizeof(wl_bssid_pref_cfg_t);
15001 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
15002 err = dhd_iovar(&(dhd->pub), 0, "roam_exp_bssid_pref", (char *)bssid_pref,
15003 len, NULL, 0, TRUE);
15004 if (err != BCME_OK) {
15005 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15006 }
15007 return err;
15008 }
15009
15010 int
15011 dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
15012 uint32 len, uint32 flush)
15013 {
15014 int err;
15015 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15016 int macmode;
15017
15018 if (blacklist) {
15019 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
15020 len, TRUE, 0);
15021 if (err != BCME_OK) {
15022 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
15023 return err;
15024 }
15025 }
15026 /* By default programming blacklist flushes out old values */
15027 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
15028 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
15029 sizeof(macmode), TRUE, 0);
15030 if (err != BCME_OK) {
15031 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
15032 }
15033 return err;
15034 }
15035
15036 int
15037 dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
15038 uint32 len, uint32 flush)
15039 {
15040 int err;
15041 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15042 wl_ssid_whitelist_t whitelist_ssid_flush;
15043
15044 if (!ssid_whitelist) {
15045 if (flush) {
15046 ssid_whitelist = &whitelist_ssid_flush;
15047 ssid_whitelist->ssid_count = 0;
15048 } else {
15049 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
15050 return BCME_BADARG;
15051 }
15052 }
15053 ssid_whitelist->version = SSID_WHITELIST_VERSION;
15054 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
15055 err = dhd_iovar(&(dhd->pub), 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist,
15056 len, NULL, 0, TRUE);
15057 if (err != BCME_OK) {
15058 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
15059 }
15060 return err;
15061 }
15062 #endif /* GSCAN_SUPPORT */
15063
15064 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
15065 /* Linux wrapper to call common dhd_pno_get_gscan */
15066 void *
15067 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
15068 void *info, uint32 *len)
15069 {
15070 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15071
15072 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
15073 }
15074 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
15075 #endif
15076
15077 #ifdef RSSI_MONITOR_SUPPORT
15078 int
15079 dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
15080 int8 max_rssi, int8 min_rssi)
15081 {
15082 int err;
15083 wl_rssi_monitor_cfg_t rssi_monitor;
15084 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15085
15086 rssi_monitor.version = RSSI_MONITOR_VERSION;
15087 rssi_monitor.max_rssi = max_rssi;
15088 rssi_monitor.min_rssi = min_rssi;
15089 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
15090 err = dhd_iovar(&(dhd->pub), 0, "rssi_monitor", (char *)&rssi_monitor,
15091 sizeof(rssi_monitor), NULL, 0, TRUE);
15092 if (err < 0 && err != BCME_UNSUPPORTED) {
15093 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
15094 }
15095 return err;
15096 }
15097 #endif /* RSSI_MONITOR_SUPPORT */
15098
15099 #ifdef DHDTCPACK_SUPPRESS
15100 int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
15101 {
15102 int err;
15103 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15104
15105 err = dhd_tcpack_suppress_set(&(dhd->pub), enable);
15106 if (err != BCME_OK) {
15107 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
15108 }
15109 return err;
15110 }
15111 #endif /* DHDTCPACK_SUPPRESS */
15112
15113 int
15114 dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
15115 {
15116 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15117 dhd_pub_t *dhdp = &dhd->pub;
15118
15119 if (!dhdp || !oui) {
15120 DHD_ERROR(("NULL POINTER : %s\n",
15121 __FUNCTION__));
15122 return BCME_ERROR;
15123 }
15124 if (ETHER_ISMULTI(oui)) {
15125 DHD_ERROR(("Expected unicast OUI\n"));
15126 return BCME_ERROR;
15127 } else {
15128 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
15129 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
15130 DHD_ERROR(("Random MAC OUI to be used - %02x:%02x:%02x\n", rand_mac_oui[0],
15131 rand_mac_oui[1], rand_mac_oui[2]));
15132 }
15133 return BCME_OK;
15134 }
15135
15136 int
15137 dhd_set_rand_mac_oui(dhd_pub_t *dhd)
15138 {
15139 int err;
15140 wl_pfn_macaddr_cfg_t wl_cfg;
15141 uint8 *rand_mac_oui = dhd->rand_mac_oui;
15142
15143 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
15144 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
15145 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
15146 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
15147 wl_cfg.flags = 0;
15148 } else {
15149 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
15150 }
15151
15152 DHD_ERROR(("Setting rand mac oui to FW - %02x:%02x:%02x\n", rand_mac_oui[0],
15153 rand_mac_oui[1], rand_mac_oui[2]));
15154
15155 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
15156 if (err < 0) {
15157 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
15158 }
15159 return err;
15160 }
15161
15162 #ifdef RTT_SUPPORT
15163 #ifdef WL_CFG80211
15164 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
15165 int
15166 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
15167 {
15168 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15169
15170 return (dhd_rtt_set_cfg(&dhd->pub, buf));
15171 }
15172
15173 int
15174 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
15175 {
15176 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15177
15178 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
15179 }
15180
15181 int
15182 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
15183 {
15184 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15185
15186 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
15187 }
15188
15189 int
15190 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
15191 {
15192 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15193
15194 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
15195 }
15196
15197 int
15198 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
15199 {
15200 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15201
15202 return (dhd_rtt_capability(&dhd->pub, capa));
15203 }
15204
15205 int
15206 dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
15207 {
15208 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15209 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
15210 }
15211
15212 int
15213 dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
15214 {
15215 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15216 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
15217 }
15218
15219 int dhd_dev_rtt_cancel_responder(struct net_device *dev)
15220 {
15221 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
15222 return (dhd_rtt_cancel_responder(&dhd->pub));
15223 }
15224 #endif /* WL_CFG80211 */
15225 #endif /* RTT_SUPPORT */
15226
15227 #ifdef KEEP_ALIVE
15228 #define KA_TEMP_BUF_SIZE 512
15229 #define KA_FRAME_SIZE 300
15230
15231 int
15232 dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
15233 uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
15234 {
15235 const int ETHERTYPE_LEN = 2;
15236 char *pbuf = NULL;
15237 const char *str;
15238 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15239 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15240 int buf_len = 0;
15241 int str_len = 0;
15242 int res = BCME_ERROR;
15243 int len_bytes = 0;
15244 int i = 0;
15245
15246 /* ether frame to have both max IP pkt (256 bytes) and ether header */
15247 char *pmac_frame = NULL;
15248 char *pmac_frame_begin = NULL;
15249
15250 /*
15251 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15252 * dongle shall reject a mkeep_alive request.
15253 */
15254 if (!dhd_support_sta_mode(dhd_pub))
15255 return res;
15256
15257 DHD_TRACE(("%s execution\n", __FUNCTION__));
15258
15259 if ((pbuf = kzalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
15260 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15261 res = BCME_NOMEM;
15262 return res;
15263 }
15264
15265 if ((pmac_frame = kzalloc(KA_FRAME_SIZE, GFP_KERNEL)) == NULL) {
15266 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
15267 res = BCME_NOMEM;
15268 goto exit;
15269 }
15270 pmac_frame_begin = pmac_frame;
15271
15272 /*
15273 * Get current mkeep-alive status.
15274 */
15275 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
15276 KA_TEMP_BUF_SIZE, FALSE);
15277 if (res < 0) {
15278 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15279 goto exit;
15280 } else {
15281 /* Check available ID whether it is occupied */
15282 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15283 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15284 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
15285 __FUNCTION__, mkeep_alive_id));
15286
15287 /* Current occupied ID info */
15288 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
15289 DHD_ERROR((" Id : %d\n"
15290 " Period: %d msec\n"
15291 " Length: %d\n"
15292 " Packet: 0x",
15293 mkeep_alive_pktp->keep_alive_id,
15294 dtoh32(mkeep_alive_pktp->period_msec),
15295 dtoh16(mkeep_alive_pktp->len_bytes)));
15296
15297 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15298 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
15299 }
15300 DHD_ERROR(("\n"));
15301
15302 res = BCME_NOTFOUND;
15303 goto exit;
15304 }
15305 }
15306
15307 /* Request the specified ID */
15308 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15309 memset(pbuf, 0, KA_TEMP_BUF_SIZE);
15310 str = "mkeep_alive";
15311 str_len = strlen(str);
15312 strncpy(pbuf, str, str_len);
15313 pbuf[str_len] = '\0';
15314
15315 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
15316 mkeep_alive_pkt.period_msec = htod32(period_msec);
15317 buf_len = str_len + 1;
15318 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15319 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15320
15321 /* ID assigned */
15322 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15323
15324 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
15325
15326 /*
15327 * Build up Ethernet Frame
15328 */
15329
15330 /* Mapping dest mac addr */
15331 memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
15332 pmac_frame += ETHER_ADDR_LEN;
15333
15334 /* Mapping src mac addr */
15335 memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
15336 pmac_frame += ETHER_ADDR_LEN;
15337
15338 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
15339 *(pmac_frame++) = 0x08;
15340 *(pmac_frame++) = 0x00;
15341
15342 /* Mapping IP pkt */
15343 memcpy(pmac_frame, ip_pkt, ip_pkt_len);
15344 pmac_frame += ip_pkt_len;
15345
15346 /*
15347 * Length of ether frame (assume to be all hexa bytes)
15348 * = src mac + dst mac + ether type + ip pkt len
15349 */
15350 len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
15351 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
15352 buf_len += len_bytes;
15353 mkeep_alive_pkt.len_bytes = htod16(len_bytes);
15354
15355 /*
15356 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15357 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15358 * guarantee that the buffer is properly aligned.
15359 */
15360 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
15361
15362 res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
15363 exit:
15364 kfree(pmac_frame_begin);
15365 kfree(pbuf);
15366 return res;
15367 }
15368
15369 int
15370 dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
15371 {
15372 char *pbuf;
15373 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15374 wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
15375 int res = BCME_ERROR;
15376 int i;
15377
15378 /*
15379 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15380 * dongle shall reject a mkeep_alive request.
15381 */
15382 if (!dhd_support_sta_mode(dhd_pub))
15383 return res;
15384
15385 DHD_TRACE(("%s execution\n", __FUNCTION__));
15386
15387 /*
15388 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15389 */
15390 if ((pbuf = kmalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
15391 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15392 return res;
15393 }
15394
15395 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
15396 sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
15397 if (res < 0) {
15398 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15399 goto exit;
15400 } else {
15401 /* Check occupied ID */
15402 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15403 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
15404 DHD_INFO((" Id : %d\n"
15405 " Period: %d msec\n"
15406 " Length: %d\n"
15407 " Packet: 0x",
15408 mkeep_alive_pktp->keep_alive_id,
15409 dtoh32(mkeep_alive_pktp->period_msec),
15410 dtoh16(mkeep_alive_pktp->len_bytes)));
15411
15412 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15413 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
15414 }
15415 DHD_INFO(("\n"));
15416 }
15417
15418 /* Make it stop if available */
15419 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15420 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
15421 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15422
15423 mkeep_alive_pkt.period_msec = 0;
15424 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15425 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15426 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15427
15428 res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
15429 (char *)&mkeep_alive_pkt,
15430 WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
15431 } else {
15432 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
15433 res = BCME_NOTFOUND;
15434 }
15435 exit:
15436 kfree(pbuf);
15437 return res;
15438 }
15439 #endif /* KEEP_ALIVE */
15440
15441 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
15442 static void _dhd_apf_lock_local(dhd_info_t *dhd)
15443 {
15444 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15445 if (dhd) {
15446 mutex_lock(&dhd->dhd_apf_mutex);
15447 }
15448 #endif
15449 }
15450
15451 static void _dhd_apf_unlock_local(dhd_info_t *dhd)
15452 {
15453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
15454 if (dhd) {
15455 mutex_unlock(&dhd->dhd_apf_mutex);
15456 }
15457 #endif
15458 }
15459
15460 static int
15461 __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
15462 u8* program, uint32 program_len)
15463 {
15464 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15465 dhd_pub_t *dhdp = &dhd->pub;
15466 wl_pkt_filter_t * pkt_filterp;
15467 wl_apf_program_t *apf_program;
15468 char *buf;
15469 u32 cmd_len, buf_len;
15470 int ifidx, ret;
15471 gfp_t kflags;
15472 char cmd[] = "pkt_filter_add";
15473
15474 ifidx = dhd_net2idx(dhd, ndev);
15475 if (ifidx == DHD_BAD_IF) {
15476 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15477 return -ENODEV;
15478 }
15479
15480 cmd_len = sizeof(cmd);
15481
15482 /* Check if the program_len is more than the expected len
15483 * and if the program is NULL return from here.
15484 */
15485 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
15486 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15487 __FUNCTION__, program_len, program));
15488 return -EINVAL;
15489 }
15490 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
15491 WL_APF_PROGRAM_FIXED_LEN + program_len;
15492
15493 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
15494 buf = kzalloc(buf_len, kflags);
15495 if (unlikely(!buf)) {
15496 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15497 return -ENOMEM;
15498 }
15499
15500 memcpy(buf, cmd, cmd_len);
15501
15502 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
15503 pkt_filterp->id = htod32(filter_id);
15504 pkt_filterp->negate_match = htod32(FALSE);
15505 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
15506
15507 apf_program = &pkt_filterp->u.apf_program;
15508 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
15509 apf_program->instr_len = htod16(program_len);
15510 memcpy(apf_program->instrs, program, program_len);
15511
15512 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15513 if (unlikely(ret)) {
15514 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15515 __FUNCTION__, filter_id, ret));
15516 }
15517
15518 if (buf) {
15519 kfree(buf);
15520 }
15521 return ret;
15522 }
15523
15524 static int
15525 __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
15526 uint32 mode, uint32 enable)
15527 {
15528 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15529 dhd_pub_t *dhdp = &dhd->pub;
15530 wl_pkt_filter_enable_t * pkt_filterp;
15531 char *buf;
15532 u32 cmd_len, buf_len;
15533 int ifidx, ret;
15534 gfp_t kflags;
15535 char cmd[] = "pkt_filter_enable";
15536
15537 ifidx = dhd_net2idx(dhd, ndev);
15538 if (ifidx == DHD_BAD_IF) {
15539 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15540 return -ENODEV;
15541 }
15542
15543 cmd_len = sizeof(cmd);
15544 buf_len = cmd_len + sizeof(*pkt_filterp);
15545
15546 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
15547 buf = kzalloc(buf_len, kflags);
15548 if (unlikely(!buf)) {
15549 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15550 return -ENOMEM;
15551 }
15552
15553 memcpy(buf, cmd, cmd_len);
15554
15555 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
15556 pkt_filterp->id = htod32(filter_id);
15557 pkt_filterp->enable = htod32(enable);
15558
15559 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15560 if (unlikely(ret)) {
15561 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15562 __FUNCTION__, filter_id, ret));
15563 goto exit;
15564 }
15565
15566 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
15567 WLC_SET_VAR, TRUE, ifidx);
15568 if (unlikely(ret)) {
15569 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15570 __FUNCTION__, filter_id, ret));
15571 }
15572
15573 exit:
15574 if (buf) {
15575 kfree(buf);
15576 }
15577 return ret;
15578 }
15579
15580 static int
15581 __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
15582 {
15583 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15584 dhd_pub_t *dhdp = &dhd->pub;
15585 int ifidx, ret;
15586
15587 ifidx = dhd_net2idx(dhd, ndev);
15588 if (ifidx == DHD_BAD_IF) {
15589 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15590 return -ENODEV;
15591 }
15592
15593 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
15594 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
15595 if (unlikely(ret)) {
15596 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15597 __FUNCTION__, filter_id, ret));
15598 }
15599
15600 return ret;
15601 }
15602
15603 void dhd_apf_lock(struct net_device *dev)
15604 {
15605 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15606 _dhd_apf_lock_local(dhd);
15607 }
15608
15609 void dhd_apf_unlock(struct net_device *dev)
15610 {
15611 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15612 _dhd_apf_unlock_local(dhd);
15613 }
15614
15615 int
15616 dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
15617 {
15618 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15619 dhd_pub_t *dhdp = &dhd->pub;
15620 int ifidx, ret;
15621
15622 if (!FW_SUPPORTED(dhdp, apf)) {
15623 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15624
15625 /*
15626 * Notify Android framework that APF is not supported by setting
15627 * version as zero.
15628 */
15629 *version = 0;
15630 return BCME_OK;
15631 }
15632
15633 ifidx = dhd_net2idx(dhd, ndev);
15634 if (ifidx == DHD_BAD_IF) {
15635 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15636 return -ENODEV;
15637 }
15638
15639 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
15640 WLC_GET_VAR, FALSE, ifidx);
15641 if (unlikely(ret)) {
15642 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15643 __FUNCTION__, ret));
15644 }
15645
15646 return ret;
15647 }
15648
15649 int
15650 dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
15651 {
15652 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15653 dhd_pub_t *dhdp = &dhd->pub;
15654 int ifidx, ret;
15655
15656 if (!FW_SUPPORTED(dhdp, apf)) {
15657 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15658 *max_len = 0;
15659 return BCME_OK;
15660 }
15661
15662 ifidx = dhd_net2idx(dhd, ndev);
15663 if (ifidx == DHD_BAD_IF) {
15664 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
15665 return -ENODEV;
15666 }
15667
15668 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
15669 WLC_GET_VAR, FALSE, ifidx);
15670 if (unlikely(ret)) {
15671 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15672 __FUNCTION__, ret));
15673 }
15674
15675 return ret;
15676 }
15677
15678 int
15679 dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
15680 uint32 program_len)
15681 {
15682 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15683 dhd_pub_t *dhdp = &dhd->pub;
15684 int ret;
15685
15686 DHD_APF_LOCK(ndev);
15687
15688 /* delete, if filter already exists */
15689 if (dhdp->apf_set) {
15690 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15691 if (unlikely(ret)) {
15692 goto exit;
15693 }
15694 dhdp->apf_set = FALSE;
15695 }
15696
15697 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
15698 if (ret) {
15699 goto exit;
15700 }
15701 dhdp->apf_set = TRUE;
15702
15703 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15704 /* Driver is still in (early) suspend state, enable APF filter back */
15705 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15706 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15707 }
15708 exit:
15709 DHD_APF_UNLOCK(ndev);
15710
15711 return ret;
15712 }
15713
15714 int
15715 dhd_dev_apf_enable_filter(struct net_device *ndev)
15716 {
15717 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15718 dhd_pub_t *dhdp = &dhd->pub;
15719 int ret = 0;
15720
15721 DHD_APF_LOCK(ndev);
15722
15723 if (dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15724 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15725 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15726 }
15727
15728 DHD_APF_UNLOCK(ndev);
15729
15730 return ret;
15731 }
15732
15733 int
15734 dhd_dev_apf_disable_filter(struct net_device *ndev)
15735 {
15736 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15737 dhd_pub_t *dhdp = &dhd->pub;
15738 int ret = 0;
15739
15740 DHD_APF_LOCK(ndev);
15741
15742 if (dhdp->apf_set) {
15743 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15744 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
15745 }
15746
15747 DHD_APF_UNLOCK(ndev);
15748
15749 return ret;
15750 }
15751
15752 int
15753 dhd_dev_apf_delete_filter(struct net_device *ndev)
15754 {
15755 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15756 dhd_pub_t *dhdp = &dhd->pub;
15757 int ret = 0;
15758
15759 DHD_APF_LOCK(ndev);
15760
15761 if (dhdp->apf_set) {
15762 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15763 if (!ret) {
15764 dhdp->apf_set = FALSE;
15765 }
15766 }
15767
15768 DHD_APF_UNLOCK(ndev);
15769
15770 return ret;
15771 }
15772 #endif /* PKT_FILTER_SUPPORT && APF */
15773
15774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
15775 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
15776 {
15777 dhd_info_t *dhd;
15778 struct net_device *dev;
15779
15780 dhd = (dhd_info_t *)dhd_info;
15781 dev = dhd->iflist[0]->net;
15782
15783 if (dev) {
15784 /*
15785 * For HW2, dev_close need to be done to recover
15786 * from upper layer after hang. For Interposer skip
15787 * dev_close so that dhd iovars can be used to take
15788 * socramdump after crash, also skip for HW4 as
15789 * handling of hang event is different
15790 */
15791 #if !defined(CUSTOMER_HW2_INTERPOSER)
15792 rtnl_lock();
15793 dev_close(dev);
15794 rtnl_unlock();
15795 #endif
15796 #if defined(WL_WIRELESS_EXT)
15797 wl_iw_send_priv_event(dev, "HANG");
15798 #endif
15799 #if defined(WL_CFG80211)
15800 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
15801 #endif
15802 }
15803 }
15804
15805 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
15806 extern dhd_pub_t *link_recovery;
15807 void dhd_host_recover_link(void)
15808 {
15809 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
15810 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
15811 dhd_bus_set_linkdown(link_recovery, TRUE);
15812 dhd_os_send_hang_message(link_recovery);
15813 }
15814 EXPORT_SYMBOL(dhd_host_recover_link);
15815 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
15816
15817 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
15818 {
15819 int ret = 0;
15820 if (dhdp) {
15821 #if defined(DHD_HANG_SEND_UP_TEST)
15822 if (dhdp->req_hang_type) {
15823 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
15824 __FUNCTION__, dhdp->req_hang_type));
15825 dhdp->req_hang_type = 0;
15826 }
15827 #endif /* DHD_HANG_SEND_UP_TEST */
15828
15829 if (!dhdp->hang_was_sent) {
15830 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
15831 dhdp->hang_counts++;
15832 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
15833 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
15834 __func__, dhdp->hang_counts));
15835 BUG_ON(1);
15836 }
15837 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
15838 #ifdef DHD_DEBUG_UART
15839 /* If PCIe lane has broken, execute the debug uart application
15840 * to gether a ramdump data from dongle via uart
15841 */
15842 if (!dhdp->info->duart_execute) {
15843 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
15844 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
15845 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
15846 }
15847 #endif /* DHD_DEBUG_UART */
15848 dhdp->hang_was_sent = 1;
15849 #ifdef BT_OVER_SDIO
15850 dhdp->is_bt_recovery_required = TRUE;
15851 #endif
15852 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
15853 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH);
15854 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
15855 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
15856 }
15857 }
15858 return ret;
15859 }
15860
15861 int net_os_send_hang_message(struct net_device *dev)
15862 {
15863 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15864 int ret = 0;
15865
15866 if (dhd) {
15867 /* Report FW problem when enabled */
15868 if (dhd->pub.hang_report) {
15869 #ifdef BT_OVER_SDIO
15870 if (netif_running(dev)) {
15871 #endif /* BT_OVER_SDIO */
15872 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
15873 ret = dhd_os_send_hang_message(&dhd->pub);
15874 #else
15875 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
15876 #endif
15877 #ifdef BT_OVER_SDIO
15878 }
15879 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
15880 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
15881 #endif /* BT_OVER_SDIO */
15882 } else {
15883 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
15884 __FUNCTION__));
15885 }
15886 }
15887 return ret;
15888 }
15889
15890 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
15891 {
15892 dhd_info_t *dhd = NULL;
15893 dhd_pub_t *dhdp = NULL;
15894 int reason;
15895
15896 dhd = DHD_DEV_INFO(dev);
15897 if (dhd) {
15898 dhdp = &dhd->pub;
15899 }
15900
15901 if (!dhd || !dhdp) {
15902 return 0;
15903 }
15904
15905 reason = bcm_strtoul(string_num, NULL, 0);
15906 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
15907
15908 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
15909 reason = 0;
15910 }
15911
15912 dhdp->hang_reason = reason;
15913
15914 return net_os_send_hang_message(dev);
15915 }
15916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
15917
15918
15919 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
15920 {
15921 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15922 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
15923 }
15924
15925 bool dhd_force_country_change(struct net_device *dev)
15926 {
15927 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15928
15929 if (dhd && dhd->pub.up)
15930 return dhd->pub.force_country_change;
15931 return FALSE;
15932 }
15933
15934 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
15935 wl_country_t *cspec)
15936 {
15937 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15938 #if defined(DHD_BLOB_EXISTENCE_CHECK)
15939 if (!dhd->pub.is_blob)
15940 #endif /* DHD_BLOB_EXISTENCE_CHECK */
15941 {
15942 #if defined(CUSTOM_COUNTRY_CODE)
15943 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
15944 dhd->pub.dhd_cflags);
15945 #else
15946 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
15947 #endif /* CUSTOM_COUNTRY_CODE */
15948 }
15949
15950 BCM_REFERENCE(dhd);
15951 }
15952
15953 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
15954 {
15955 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15956 #ifdef WL_CFG80211
15957 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15958 #endif
15959
15960 if (dhd && dhd->pub.up) {
15961 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
15962 #ifdef WL_CFG80211
15963 wl_update_wiphybands(cfg, notify);
15964 #endif
15965 }
15966 }
15967
15968 void dhd_bus_band_set(struct net_device *dev, uint band)
15969 {
15970 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15971 #ifdef WL_CFG80211
15972 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15973 #endif
15974 if (dhd && dhd->pub.up) {
15975 #ifdef WL_CFG80211
15976 wl_update_wiphybands(cfg, true);
15977 #endif
15978 }
15979 }
15980
15981 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
15982 {
15983 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15984
15985 if (!fw || fw[0] == '\0')
15986 return -EINVAL;
15987
15988 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
15989 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
15990
15991 #if defined(SOFTAP)
15992 if (strstr(fw, "apsta") != NULL) {
15993 DHD_INFO(("GOT APSTA FIRMWARE\n"));
15994 ap_fw_loaded = TRUE;
15995 } else {
15996 DHD_INFO(("GOT STA FIRMWARE\n"));
15997 ap_fw_loaded = FALSE;
15998 }
15999 #endif
16000 return 0;
16001 }
16002
16003 void dhd_net_if_lock(struct net_device *dev)
16004 {
16005 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16006 dhd_net_if_lock_local(dhd);
16007 }
16008
16009 void dhd_net_if_unlock(struct net_device *dev)
16010 {
16011 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16012 dhd_net_if_unlock_local(dhd);
16013 }
16014
16015 static void dhd_net_if_lock_local(dhd_info_t *dhd)
16016 {
16017 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16018 if (dhd)
16019 mutex_lock(&dhd->dhd_net_if_mutex);
16020 #endif
16021 }
16022
16023 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
16024 {
16025 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16026 if (dhd)
16027 mutex_unlock(&dhd->dhd_net_if_mutex);
16028 #endif
16029 }
16030
16031 static void dhd_suspend_lock(dhd_pub_t *pub)
16032 {
16033 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16034 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16035 if (dhd)
16036 mutex_lock(&dhd->dhd_suspend_mutex);
16037 #endif
16038 }
16039
16040 static void dhd_suspend_unlock(dhd_pub_t *pub)
16041 {
16042 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
16043 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16044 if (dhd)
16045 mutex_unlock(&dhd->dhd_suspend_mutex);
16046 #endif
16047 }
16048
16049 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
16050 {
16051 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16052 unsigned long flags = 0;
16053
16054 if (dhd)
16055 spin_lock_irqsave(&dhd->dhd_lock, flags);
16056
16057 return flags;
16058 }
16059
16060 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
16061 {
16062 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16063
16064 if (dhd)
16065 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
16066 }
16067
16068 /* Linux specific multipurpose spinlock API */
16069 void *
16070 dhd_os_spin_lock_init(osl_t *osh)
16071 {
16072 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
16073 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
16074 /* and this results in kernel asserts in internal builds */
16075 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
16076 if (lock)
16077 spin_lock_init(lock);
16078 return ((void *)lock);
16079 }
16080 void
16081 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
16082 {
16083 if (lock)
16084 MFREE(osh, lock, sizeof(spinlock_t) + 4);
16085 }
16086 unsigned long
16087 dhd_os_spin_lock(void *lock)
16088 {
16089 unsigned long flags = 0;
16090
16091 if (lock)
16092 spin_lock_irqsave((spinlock_t *)lock, flags);
16093
16094 return flags;
16095 }
16096 void
16097 dhd_os_spin_unlock(void *lock, unsigned long flags)
16098 {
16099 if (lock)
16100 spin_unlock_irqrestore((spinlock_t *)lock, flags);
16101 }
16102
16103 static int
16104 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
16105 {
16106 return (atomic_read(&dhd->pend_8021x_cnt));
16107 }
16108
16109 #define MAX_WAIT_FOR_8021X_TX 100
16110
16111 int
16112 dhd_wait_pend8021x(struct net_device *dev)
16113 {
16114 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16115 int timeout = msecs_to_jiffies(10);
16116 int ntimes = MAX_WAIT_FOR_8021X_TX;
16117 int pend = dhd_get_pend_8021x_cnt(dhd);
16118
16119 while (ntimes && pend) {
16120 if (pend) {
16121 set_current_state(TASK_INTERRUPTIBLE);
16122 DHD_PERIM_UNLOCK(&dhd->pub);
16123 schedule_timeout(timeout);
16124 DHD_PERIM_LOCK(&dhd->pub);
16125 set_current_state(TASK_RUNNING);
16126 ntimes--;
16127 }
16128 pend = dhd_get_pend_8021x_cnt(dhd);
16129 }
16130 if (ntimes == 0)
16131 {
16132 atomic_set(&dhd->pend_8021x_cnt, 0);
16133 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
16134 }
16135 return pend;
16136 }
16137
16138 #if defined(DHD_DEBUG)
16139 int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
16140 {
16141 int ret = 0;
16142 struct file *fp = NULL;
16143 mm_segment_t old_fs;
16144 loff_t pos = 0;
16145 /* change to KERNEL_DS address limit */
16146 old_fs = get_fs();
16147 set_fs(KERNEL_DS);
16148
16149 /* open file to write */
16150 fp = filp_open(file_name, flags, 0664);
16151 if (IS_ERR(fp)) {
16152 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
16153 ret = -1;
16154 goto exit;
16155 }
16156
16157 /* Write buf to file */
16158 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
16159 ret = kernel_write(fp, buf, size, &pos);
16160 #else
16161 ret = vfs_write(fp, buf, size, &pos);
16162 #endif
16163 if (ret < 0) {
16164 DHD_ERROR(("write file error, err = %d\n", ret));
16165 goto exit;
16166 }
16167
16168 /* Sync file from filesystem to physical media */
16169 ret = vfs_fsync(fp, 0);
16170 if (ret < 0) {
16171 DHD_ERROR(("sync file error, error = %d\n", ret));
16172 goto exit;
16173 }
16174 ret = BCME_OK;
16175
16176 exit:
16177 /* close file before return */
16178 if (!IS_ERR(fp))
16179 filp_close(fp, current->files);
16180
16181 /* restore previous address limit */
16182 set_fs(old_fs);
16183
16184 return ret;
16185 }
16186 #endif
16187
16188 #ifdef DHD_DEBUG
16189 static void
16190 dhd_convert_memdump_type_to_str(uint32 type, char *buf)
16191 {
16192 char *type_str = NULL;
16193
16194 switch (type) {
16195 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
16196 type_str = "resumed_on_timeout";
16197 break;
16198 case DUMP_TYPE_D3_ACK_TIMEOUT:
16199 type_str = "D3_ACK_timeout";
16200 break;
16201 case DUMP_TYPE_DONGLE_TRAP:
16202 type_str = "Dongle_Trap";
16203 break;
16204 case DUMP_TYPE_MEMORY_CORRUPTION:
16205 type_str = "Memory_Corruption";
16206 break;
16207 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
16208 type_str = "PKTID_AUDIT_Fail";
16209 break;
16210 case DUMP_TYPE_PKTID_INVALID:
16211 type_str = "PKTID_INVALID";
16212 break;
16213 case DUMP_TYPE_SCAN_TIMEOUT:
16214 type_str = "SCAN_timeout";
16215 break;
16216 case DUMP_TYPE_JOIN_TIMEOUT:
16217 type_str = "JOIN_timeout";
16218 break;
16219 case DUMP_TYPE_SCAN_BUSY:
16220 type_str = "SCAN_Busy";
16221 break;
16222 case DUMP_TYPE_BY_SYSDUMP:
16223 type_str = "BY_SYSDUMP";
16224 break;
16225 case DUMP_TYPE_BY_LIVELOCK:
16226 type_str = "BY_LIVELOCK";
16227 break;
16228 case DUMP_TYPE_AP_LINKUP_FAILURE:
16229 type_str = "BY_AP_LINK_FAILURE";
16230 break;
16231 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
16232 type_str = "INVALID_ACCESS";
16233 break;
16234 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
16235 type_str = "CFG_VENDOR_TRIGGERED";
16236 break;
16237 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
16238 type_str = "ERROR_RX_TIMED_OUT";
16239 break;
16240 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
16241 type_str = "ERROR_TX_TIMED_OUT";
16242 break;
16243 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
16244 type_str = "BY_INVALID_RING_RDWR";
16245 break;
16246 case DUMP_TYPE_DONGLE_HOST_EVENT:
16247 type_str = "BY_DONGLE_HOST_EVENT";
16248 break;
16249 case DUMP_TYPE_TRANS_ID_MISMATCH:
16250 type_str = "BY_TRANS_ID_MISMATCH";
16251 break;
16252 case DUMP_TYPE_HANG_ON_IFACE_OP_FAIL:
16253 type_str = "HANG_IFACE_OP_FAIL";
16254 break;
16255 #ifdef SUPPORT_LINKDOWN_RECOVERY
16256 case DUMP_TYPE_READ_SHM_FAIL:
16257 type_str = "READ_SHM_FAIL";
16258 break;
16259 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16260 default:
16261 type_str = "Unknown_type";
16262 break;
16263 }
16264
16265 strncpy(buf, type_str, strlen(type_str));
16266 buf[strlen(type_str)] = 0;
16267 }
16268
16269 int
16270 write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
16271 {
16272 int ret = 0;
16273 char memdump_path[128];
16274 char memdump_type[32];
16275 struct timeval curtime;
16276 uint32 file_mode;
16277
16278 /* Init file name */
16279 memset(memdump_path, 0, sizeof(memdump_path));
16280 memset(memdump_type, 0, sizeof(memdump_type));
16281 do_gettimeofday(&curtime);
16282 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
16283 #ifdef CUSTOMER_HW4_DEBUG
16284 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16285 DHD_COMMON_DUMP_PATH, fname, memdump_type,
16286 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16287 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16288 #elif defined(CUSTOMER_HW2)
16289 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16290 "/data/misc/wifi/", fname, memdump_type,
16291 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16292 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16293 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16294 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16295 "/data/misc/wifi/", fname, memdump_type,
16296 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16297 file_mode = O_CREAT | O_WRONLY;
16298 #else
16299 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16300 "/installmedia/", fname, memdump_type,
16301 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16302 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16303 * calling BUG_ON immediately after collecting the socram dump.
16304 * So the file write operation should directly write the contents into the
16305 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16306 * instead of appending.
16307 */
16308 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16309 {
16310 struct file *fp = filp_open(memdump_path, file_mode, 0664);
16311 /* Check if it is live Brix image having /installmedia, else use /data */
16312 if (IS_ERR(fp)) {
16313 DHD_ERROR(("open file %s, try /data/\n", memdump_path));
16314 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
16315 "/data/", fname, memdump_type,
16316 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
16317 } else {
16318 filp_close(fp, NULL);
16319 }
16320 }
16321 #endif /* CUSTOMER_HW4_DEBUG */
16322
16323 /* print SOCRAM dump file path */
16324 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
16325
16326 /* Write file */
16327 ret = write_file(memdump_path, file_mode, buf, size);
16328
16329 return ret;
16330 }
16331 #endif /* DHD_DEBUG */
16332
16333 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
16334 {
16335 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16336 unsigned long flags;
16337 int ret = 0;
16338
16339 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16340 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16341 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
16342 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
16343 #ifdef CONFIG_HAS_WAKELOCK
16344 if (dhd->wakelock_rx_timeout_enable)
16345 wake_lock_timeout(&dhd->wl_rxwake,
16346 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
16347 if (dhd->wakelock_ctrl_timeout_enable)
16348 wake_lock_timeout(&dhd->wl_ctrlwake,
16349 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
16350 #endif
16351 dhd->wakelock_rx_timeout_enable = 0;
16352 dhd->wakelock_ctrl_timeout_enable = 0;
16353 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16354 }
16355 return ret;
16356 }
16357
16358 int net_os_wake_lock_timeout(struct net_device *dev)
16359 {
16360 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16361 int ret = 0;
16362
16363 if (dhd)
16364 ret = dhd_os_wake_lock_timeout(&dhd->pub);
16365 return ret;
16366 }
16367
16368 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
16369 {
16370 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16371 unsigned long flags;
16372
16373 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16374 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16375 if (val > dhd->wakelock_rx_timeout_enable)
16376 dhd->wakelock_rx_timeout_enable = val;
16377 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16378 }
16379 return 0;
16380 }
16381
16382 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
16383 {
16384 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16385 unsigned long flags;
16386
16387 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16388 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16389 if (val > dhd->wakelock_ctrl_timeout_enable)
16390 dhd->wakelock_ctrl_timeout_enable = val;
16391 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16392 }
16393 return 0;
16394 }
16395
16396 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
16397 {
16398 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16399 unsigned long flags;
16400
16401 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16402 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16403 dhd->wakelock_ctrl_timeout_enable = 0;
16404 #ifdef CONFIG_HAS_WAKELOCK
16405 if (wake_lock_active(&dhd->wl_ctrlwake))
16406 wake_unlock(&dhd->wl_ctrlwake);
16407 #endif
16408 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16409 }
16410 return 0;
16411 }
16412
16413 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
16414 {
16415 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16416 int ret = 0;
16417
16418 if (dhd)
16419 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
16420 return ret;
16421 }
16422
16423 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
16424 {
16425 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16426 int ret = 0;
16427
16428 if (dhd)
16429 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
16430 return ret;
16431 }
16432
16433
16434 #if defined(DHD_TRACE_WAKE_LOCK)
16435 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16436 #include <linux/hashtable.h>
16437 #else
16438 #include <linux/hash.h>
16439 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16440
16441
16442 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16443 /* Define 2^5 = 32 bucket size hash table */
16444 DEFINE_HASHTABLE(wklock_history, 5);
16445 #else
16446 /* Define 2^5 = 32 bucket size hash table */
16447 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
16448 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16449
16450 int trace_wklock_onoff = 1;
16451 typedef enum dhd_wklock_type {
16452 DHD_WAKE_LOCK,
16453 DHD_WAKE_UNLOCK,
16454 DHD_WAIVE_LOCK,
16455 DHD_RESTORE_LOCK
16456 } dhd_wklock_t;
16457
16458 struct wk_trace_record {
16459 unsigned long addr; /* Address of the instruction */
16460 dhd_wklock_t lock_type; /* lock_type */
16461 unsigned long long counter; /* counter information */
16462 struct hlist_node wklock_node; /* hash node */
16463 };
16464
16465 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
16466 {
16467 struct wk_trace_record *wklock_info;
16468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16469 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
16470 #else
16471 struct hlist_node *entry;
16472 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
16473 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
16474 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16475 {
16476 if (wklock_info->addr == addr) {
16477 return wklock_info;
16478 }
16479 }
16480 return NULL;
16481 }
16482
16483
16484 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16485 #define HASH_ADD(hashtable, node, key) \
16486 do { \
16487 hash_add(hashtable, node, key); \
16488 } while (0);
16489 #else
16490 #define HASH_ADD(hashtable, node, key) \
16491 do { \
16492 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
16493 hlist_add_head(node, &hashtable[index]); \
16494 } while (0);
16495 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
16496
16497 #define STORE_WKLOCK_RECORD(wklock_type) \
16498 do { \
16499 struct wk_trace_record *wklock_info = NULL; \
16500 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
16501 wklock_info = find_wklock_entry(func_addr); \
16502 if (wklock_info) { \
16503 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
16504 wklock_info->counter = dhd->wakelock_counter; \
16505 } else { \
16506 wklock_info->counter++; \
16507 } \
16508 } else { \
16509 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
16510 if (!wklock_info) {\
16511 printk("Can't allocate wk_trace_record \n"); \
16512 } else { \
16513 wklock_info->addr = func_addr; \
16514 wklock_info->lock_type = wklock_type; \
16515 if (wklock_type == DHD_WAIVE_LOCK || \
16516 wklock_type == DHD_RESTORE_LOCK) { \
16517 wklock_info->counter = dhd->wakelock_counter; \
16518 } else { \
16519 wklock_info->counter++; \
16520 } \
16521 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16522 } \
16523 } \
16524 } while (0);
16525
16526 static inline void dhd_wk_lock_rec_dump(void)
16527 {
16528 int bkt;
16529 struct wk_trace_record *wklock_info;
16530
16531 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16532 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
16533 #else
16534 struct hlist_node *entry = NULL;
16535 int max_index = ARRAY_SIZE(wklock_history);
16536 for (bkt = 0; bkt < max_index; bkt++)
16537 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
16538 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16539 {
16540 switch (wklock_info->lock_type) {
16541 case DHD_WAKE_LOCK:
16542 printk("wakelock lock : %pS lock_counter : %llu \n",
16543 (void *)wklock_info->addr, wklock_info->counter);
16544 break;
16545 case DHD_WAKE_UNLOCK:
16546 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
16547 (void *)wklock_info->addr, wklock_info->counter);
16548 break;
16549 case DHD_WAIVE_LOCK:
16550 printk("wakelock waive : %pS before_waive : %llu \n",
16551 (void *)wklock_info->addr, wklock_info->counter);
16552 break;
16553 case DHD_RESTORE_LOCK:
16554 printk("wakelock restore : %pS, after_waive : %llu \n",
16555 (void *)wklock_info->addr, wklock_info->counter);
16556 break;
16557 }
16558 }
16559 }
16560
16561 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
16562 {
16563 unsigned long flags;
16564 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16565 int i;
16566 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16567
16568 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16569 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16570 hash_init(wklock_history);
16571 #else
16572 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
16573 INIT_HLIST_HEAD(&wklock_history[i]);
16574 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16575 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16576 }
16577
16578 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
16579 {
16580 int bkt;
16581 struct wk_trace_record *wklock_info;
16582 struct hlist_node *tmp;
16583 unsigned long flags;
16584 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16585 struct hlist_node *entry = NULL;
16586 int max_index = ARRAY_SIZE(wklock_history);
16587 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16588
16589 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16591 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
16592 #else
16593 for (bkt = 0; bkt < max_index; bkt++)
16594 hlist_for_each_entry_safe(wklock_info, entry, tmp,
16595 &wklock_history[bkt], wklock_node)
16596 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16597 {
16598 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16599 hash_del(&wklock_info->wklock_node);
16600 #else
16601 hlist_del_init(&wklock_info->wklock_node);
16602 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16603 kfree(wklock_info);
16604 }
16605 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16606 }
16607
16608 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
16609 {
16610 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
16611 unsigned long flags;
16612
16613 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
16614 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16615 dhd_wk_lock_rec_dump();
16616 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16617
16618 }
16619 #else
16620 #define STORE_WKLOCK_RECORD(wklock_type)
16621 #endif /* ! DHD_TRACE_WAKE_LOCK */
16622
16623 int dhd_os_wake_lock(dhd_pub_t *pub)
16624 {
16625 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16626 unsigned long flags;
16627 int ret = 0;
16628
16629 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16630 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16631 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16632 #ifdef CONFIG_HAS_WAKELOCK
16633 wake_lock(&dhd->wl_wifi);
16634 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16635 dhd_bus_dev_pm_stay_awake(pub);
16636 #endif
16637 }
16638 #ifdef DHD_TRACE_WAKE_LOCK
16639 if (trace_wklock_onoff) {
16640 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
16641 }
16642 #endif /* DHD_TRACE_WAKE_LOCK */
16643 dhd->wakelock_counter++;
16644 ret = dhd->wakelock_counter;
16645 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16646 }
16647
16648 return ret;
16649 }
16650
16651 void dhd_event_wake_lock(dhd_pub_t *pub)
16652 {
16653 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16654
16655 if (dhd) {
16656 #ifdef CONFIG_HAS_WAKELOCK
16657 wake_lock(&dhd->wl_evtwake);
16658 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16659 dhd_bus_dev_pm_stay_awake(pub);
16660 #endif
16661 }
16662 }
16663
16664 void
16665 dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
16666 {
16667 #ifdef CONFIG_HAS_WAKELOCK
16668 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16669
16670 if (dhd) {
16671 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
16672 }
16673 #endif /* CONFIG_HAS_WAKE_LOCK */
16674 }
16675
16676 void
16677 dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
16678 {
16679 #ifdef CONFIG_HAS_WAKELOCK
16680 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16681
16682 if (dhd) {
16683 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
16684 }
16685 #endif /* CONFIG_HAS_WAKE_LOCK */
16686 }
16687
16688 int net_os_wake_lock(struct net_device *dev)
16689 {
16690 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16691 int ret = 0;
16692
16693 if (dhd)
16694 ret = dhd_os_wake_lock(&dhd->pub);
16695 return ret;
16696 }
16697
16698 int dhd_os_wake_unlock(dhd_pub_t *pub)
16699 {
16700 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16701 unsigned long flags;
16702 int ret = 0;
16703
16704 dhd_os_wake_lock_timeout(pub);
16705 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16706 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16707
16708 if (dhd->wakelock_counter > 0) {
16709 dhd->wakelock_counter--;
16710 #ifdef DHD_TRACE_WAKE_LOCK
16711 if (trace_wklock_onoff) {
16712 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
16713 }
16714 #endif /* DHD_TRACE_WAKE_LOCK */
16715 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16716 #ifdef CONFIG_HAS_WAKELOCK
16717 wake_unlock(&dhd->wl_wifi);
16718 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16719 dhd_bus_dev_pm_relax(pub);
16720 #endif
16721 }
16722 ret = dhd->wakelock_counter;
16723 }
16724 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16725 }
16726 return ret;
16727 }
16728
16729 void dhd_event_wake_unlock(dhd_pub_t *pub)
16730 {
16731 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16732
16733 if (dhd) {
16734 #ifdef CONFIG_HAS_WAKELOCK
16735 wake_unlock(&dhd->wl_evtwake);
16736 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16737 dhd_bus_dev_pm_relax(pub);
16738 #endif
16739 }
16740 }
16741
16742 void dhd_pm_wake_unlock(dhd_pub_t *pub)
16743 {
16744 #ifdef CONFIG_HAS_WAKELOCK
16745 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16746
16747 if (dhd) {
16748 /* if wl_pmwake is active, unlock it */
16749 if (wake_lock_active(&dhd->wl_pmwake)) {
16750 wake_unlock(&dhd->wl_pmwake);
16751 }
16752 }
16753 #endif /* CONFIG_HAS_WAKELOCK */
16754 }
16755
16756 void dhd_txfl_wake_unlock(dhd_pub_t *pub)
16757 {
16758 #ifdef CONFIG_HAS_WAKELOCK
16759 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16760
16761 if (dhd) {
16762 /* if wl_txflwake is active, unlock it */
16763 if (wake_lock_active(&dhd->wl_txflwake)) {
16764 wake_unlock(&dhd->wl_txflwake);
16765 }
16766 }
16767 #endif /* CONFIG_HAS_WAKELOCK */
16768 }
16769
16770 int dhd_os_check_wakelock(dhd_pub_t *pub)
16771 {
16772 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
16773 KERNEL_VERSION(2, 6, 36)))
16774 dhd_info_t *dhd;
16775
16776 if (!pub)
16777 return 0;
16778 dhd = (dhd_info_t *)(pub->info);
16779 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
16780
16781 #ifdef CONFIG_HAS_WAKELOCK
16782 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
16783 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
16784 (wake_lock_active(&dhd->wl_wdwake))))
16785 return 1;
16786 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16787 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
16788 return 1;
16789 #endif
16790 return 0;
16791 }
16792
16793 int
16794 dhd_os_check_wakelock_all(dhd_pub_t *pub)
16795 {
16796 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
16797 KERNEL_VERSION(2, 6, 36)))
16798 #if defined(CONFIG_HAS_WAKELOCK)
16799 int l1, l2, l3, l4, l7, l8, l9;
16800 int l5 = 0, l6 = 0;
16801 int c, lock_active;
16802 #endif /* CONFIG_HAS_WAKELOCK */
16803 dhd_info_t *dhd;
16804
16805 if (!pub) {
16806 return 0;
16807 }
16808 dhd = (dhd_info_t *)(pub->info);
16809 if (!dhd) {
16810 return 0;
16811 }
16812 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
16813
16814 #ifdef CONFIG_HAS_WAKELOCK
16815 c = dhd->wakelock_counter;
16816 l1 = wake_lock_active(&dhd->wl_wifi);
16817 l2 = wake_lock_active(&dhd->wl_wdwake);
16818 l3 = wake_lock_active(&dhd->wl_rxwake);
16819 l4 = wake_lock_active(&dhd->wl_ctrlwake);
16820 l7 = wake_lock_active(&dhd->wl_evtwake);
16821 #ifdef BCMPCIE_OOB_HOST_WAKE
16822 l5 = wake_lock_active(&dhd->wl_intrwake);
16823 #endif /* BCMPCIE_OOB_HOST_WAKE */
16824 #ifdef DHD_USE_SCAN_WAKELOCK
16825 l6 = wake_lock_active(&dhd->wl_scanwake);
16826 #endif /* DHD_USE_SCAN_WAKELOCK */
16827 l8 = wake_lock_active(&dhd->wl_pmwake);
16828 l9 = wake_lock_active(&dhd->wl_txflwake);
16829 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
16830
16831 /* Indicate to the Host to avoid going to suspend if internal locks are up */
16832 if (lock_active) {
16833 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
16834 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
16835 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
16836 return 1;
16837 }
16838 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
16839 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
16840 return 1;
16841 }
16842 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
16843 return 0;
16844 }
16845
16846 int net_os_wake_unlock(struct net_device *dev)
16847 {
16848 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16849 int ret = 0;
16850
16851 if (dhd)
16852 ret = dhd_os_wake_unlock(&dhd->pub);
16853 return ret;
16854 }
16855
16856 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
16857 {
16858 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16859 unsigned long flags;
16860 int ret = 0;
16861
16862 if (dhd) {
16863 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16864 #ifdef CONFIG_HAS_WAKELOCK
16865 /* if wakelock_wd_counter was never used : lock it at once */
16866 if (!dhd->wakelock_wd_counter)
16867 wake_lock(&dhd->wl_wdwake);
16868 #endif
16869 dhd->wakelock_wd_counter++;
16870 ret = dhd->wakelock_wd_counter;
16871 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16872 }
16873 return ret;
16874 }
16875
16876 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
16877 {
16878 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16879 unsigned long flags;
16880 int ret = 0;
16881
16882 if (dhd) {
16883 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16884 if (dhd->wakelock_wd_counter) {
16885 dhd->wakelock_wd_counter = 0;
16886 #ifdef CONFIG_HAS_WAKELOCK
16887 wake_unlock(&dhd->wl_wdwake);
16888 #endif
16889 }
16890 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16891 }
16892 return ret;
16893 }
16894
16895 #ifdef BCMPCIE_OOB_HOST_WAKE
16896 void
16897 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
16898 {
16899 #ifdef CONFIG_HAS_WAKELOCK
16900 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16901
16902 if (dhd) {
16903 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
16904 }
16905 #endif /* CONFIG_HAS_WAKELOCK */
16906 }
16907
16908 void
16909 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
16910 {
16911 #ifdef CONFIG_HAS_WAKELOCK
16912 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16913
16914 if (dhd) {
16915 /* if wl_intrwake is active, unlock it */
16916 if (wake_lock_active(&dhd->wl_intrwake)) {
16917 wake_unlock(&dhd->wl_intrwake);
16918 }
16919 }
16920 #endif /* CONFIG_HAS_WAKELOCK */
16921 }
16922 #endif /* BCMPCIE_OOB_HOST_WAKE */
16923
16924 #ifdef DHD_USE_SCAN_WAKELOCK
16925 void
16926 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
16927 {
16928 #ifdef CONFIG_HAS_WAKELOCK
16929 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16930
16931 if (dhd) {
16932 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
16933 }
16934 #endif /* CONFIG_HAS_WAKELOCK */
16935 }
16936
16937 void
16938 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
16939 {
16940 #ifdef CONFIG_HAS_WAKELOCK
16941 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16942
16943 if (dhd) {
16944 /* if wl_scanwake is active, unlock it */
16945 if (wake_lock_active(&dhd->wl_scanwake)) {
16946 wake_unlock(&dhd->wl_scanwake);
16947 }
16948 }
16949 #endif /* CONFIG_HAS_WAKELOCK */
16950 }
16951 #endif /* DHD_USE_SCAN_WAKELOCK */
16952
16953 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
16954 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
16955 */
16956 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
16957 {
16958 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16959 unsigned long flags;
16960 int ret = 0;
16961
16962 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16963 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16964
16965 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16966 if (dhd->waive_wakelock == FALSE) {
16967 #ifdef DHD_TRACE_WAKE_LOCK
16968 if (trace_wklock_onoff) {
16969 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
16970 }
16971 #endif /* DHD_TRACE_WAKE_LOCK */
16972 /* record current lock status */
16973 dhd->wakelock_before_waive = dhd->wakelock_counter;
16974 dhd->waive_wakelock = TRUE;
16975 }
16976 ret = dhd->wakelock_wd_counter;
16977 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16978 }
16979 return ret;
16980 }
16981
16982 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
16983 {
16984 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16985 unsigned long flags;
16986 int ret = 0;
16987
16988 if (!dhd)
16989 return 0;
16990 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
16991 return 0;
16992
16993 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16994
16995 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16996 if (!dhd->waive_wakelock)
16997 goto exit;
16998
16999 dhd->waive_wakelock = FALSE;
17000 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
17001 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
17002 * the lock in between, do the same by calling wake_unlock or pm_relax
17003 */
17004 #ifdef DHD_TRACE_WAKE_LOCK
17005 if (trace_wklock_onoff) {
17006 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
17007 }
17008 #endif /* DHD_TRACE_WAKE_LOCK */
17009
17010 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
17011 #ifdef CONFIG_HAS_WAKELOCK
17012 wake_lock(&dhd->wl_wifi);
17013 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17014 dhd_bus_dev_pm_stay_awake(&dhd->pub);
17015 #endif
17016 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
17017 #ifdef CONFIG_HAS_WAKELOCK
17018 wake_unlock(&dhd->wl_wifi);
17019 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
17020 dhd_bus_dev_pm_relax(&dhd->pub);
17021 #endif
17022 }
17023 dhd->wakelock_before_waive = 0;
17024 exit:
17025 ret = dhd->wakelock_wd_counter;
17026 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
17027 return ret;
17028 }
17029
17030 void dhd_os_wake_lock_init(struct dhd_info *dhd)
17031 {
17032 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
17033 dhd->wakelock_counter = 0;
17034 dhd->wakelock_rx_timeout_enable = 0;
17035 dhd->wakelock_ctrl_timeout_enable = 0;
17036 #ifdef CONFIG_HAS_WAKELOCK
17037 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17038 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
17039 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
17040 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
17041 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
17042 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
17043 #ifdef BCMPCIE_OOB_HOST_WAKE
17044 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
17045 #endif /* BCMPCIE_OOB_HOST_WAKE */
17046 #ifdef DHD_USE_SCAN_WAKELOCK
17047 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
17048 #endif /* DHD_USE_SCAN_WAKELOCK */
17049 #endif /* CONFIG_HAS_WAKELOCK */
17050 #ifdef DHD_TRACE_WAKE_LOCK
17051 dhd_wk_lock_trace_init(dhd);
17052 #endif /* DHD_TRACE_WAKE_LOCK */
17053 }
17054
17055 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
17056 {
17057 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
17058 #ifdef CONFIG_HAS_WAKELOCK
17059 dhd->wakelock_counter = 0;
17060 dhd->wakelock_rx_timeout_enable = 0;
17061 dhd->wakelock_ctrl_timeout_enable = 0;
17062 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
17063 wake_lock_destroy(&dhd->wl_rxwake);
17064 wake_lock_destroy(&dhd->wl_ctrlwake);
17065 wake_lock_destroy(&dhd->wl_evtwake);
17066 wake_lock_destroy(&dhd->wl_pmwake);
17067 wake_lock_destroy(&dhd->wl_txflwake);
17068 #ifdef BCMPCIE_OOB_HOST_WAKE
17069 wake_lock_destroy(&dhd->wl_intrwake);
17070 #endif /* BCMPCIE_OOB_HOST_WAKE */
17071 #ifdef DHD_USE_SCAN_WAKELOCK
17072 wake_lock_destroy(&dhd->wl_scanwake);
17073 #endif /* DHD_USE_SCAN_WAKELOCK */
17074 #ifdef DHD_TRACE_WAKE_LOCK
17075 dhd_wk_lock_trace_deinit(dhd);
17076 #endif /* DHD_TRACE_WAKE_LOCK */
17077 #endif /* CONFIG_HAS_WAKELOCK */
17078 }
17079
17080 bool dhd_os_check_if_up(dhd_pub_t *pub)
17081 {
17082 if (!pub)
17083 return FALSE;
17084 return pub->up;
17085 }
17086
17087 /* function to collect firmware, chip id and chip version info */
17088 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
17089 {
17090 int i;
17091
17092 i = snprintf(info_string, sizeof(info_string),
17093 " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
17094 printf("%s\n", info_string);
17095
17096 if (!dhdp)
17097 return;
17098
17099 i = snprintf(&info_string[i], sizeof(info_string) - i,
17100 "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
17101 dhd_conf_get_chiprev(dhdp));
17102 }
17103
17104 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
17105 {
17106 int ifidx;
17107 int ret = 0;
17108 dhd_info_t *dhd = NULL;
17109
17110 if (!net || !DEV_PRIV(net)) {
17111 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
17112 return -EINVAL;
17113 }
17114
17115 dhd = DHD_DEV_INFO(net);
17116 if (!dhd)
17117 return -EINVAL;
17118
17119 ifidx = dhd_net2idx(dhd, net);
17120 if (ifidx == DHD_BAD_IF) {
17121 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
17122 return -ENODEV;
17123 }
17124
17125 DHD_OS_WAKE_LOCK(&dhd->pub);
17126 DHD_PERIM_LOCK(&dhd->pub);
17127
17128 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
17129 dhd_check_hang(net, &dhd->pub, ret);
17130
17131 DHD_PERIM_UNLOCK(&dhd->pub);
17132 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17133
17134 return ret;
17135 }
17136
17137 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
17138 {
17139 struct net_device *net;
17140
17141 net = dhd_idx2net(dhdp, ifidx);
17142 if (!net) {
17143 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
17144 return -EINVAL;
17145 }
17146
17147 return dhd_check_hang(net, dhdp, ret);
17148 }
17149
17150 /* Return instance */
17151 int dhd_get_instance(dhd_pub_t *dhdp)
17152 {
17153 return dhdp->info->unit;
17154 }
17155
17156
17157 #ifdef PROP_TXSTATUS
17158
17159 void dhd_wlfc_plat_init(void *dhd)
17160 {
17161 #ifdef USE_DYNAMIC_F2_BLKSIZE
17162 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
17163 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17164 return;
17165 }
17166
17167 void dhd_wlfc_plat_deinit(void *dhd)
17168 {
17169 #ifdef USE_DYNAMIC_F2_BLKSIZE
17170 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
17171 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17172 return;
17173 }
17174
17175 bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
17176 {
17177 #ifdef SKIP_WLFC_ON_CONCURRENT
17178
17179 #ifdef WL_CFG80211
17180 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
17181 if (net)
17182 /* enable flow control in vsdb mode */
17183 return !(wl_cfg80211_is_concurrent_mode(net));
17184 #else
17185 return TRUE; /* skip flow control */
17186 #endif /* WL_CFG80211 */
17187
17188 #else
17189 return FALSE;
17190 #endif /* SKIP_WLFC_ON_CONCURRENT */
17191 return FALSE;
17192 }
17193 #endif /* PROP_TXSTATUS */
17194
17195 #ifdef BCMDBGFS
17196 #include <linux/debugfs.h>
17197
17198 typedef struct dhd_dbgfs {
17199 struct dentry *debugfs_dir;
17200 struct dentry *debugfs_mem;
17201 dhd_pub_t *dhdp;
17202 uint32 size;
17203 } dhd_dbgfs_t;
17204
17205 dhd_dbgfs_t g_dbgfs;
17206
17207 extern uint32 dhd_readregl(void *bp, uint32 addr);
17208 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
17209
17210 static int
17211 dhd_dbg_state_open(struct inode *inode, struct file *file)
17212 {
17213 file->private_data = inode->i_private;
17214 return 0;
17215 }
17216
17217 static ssize_t
17218 dhd_dbg_state_read(struct file *file, char __user *ubuf,
17219 size_t count, loff_t *ppos)
17220 {
17221 ssize_t rval;
17222 uint32 tmp;
17223 loff_t pos = *ppos;
17224 size_t ret;
17225
17226 if (pos < 0)
17227 return -EINVAL;
17228 if (pos >= g_dbgfs.size || !count)
17229 return 0;
17230 if (count > g_dbgfs.size - pos)
17231 count = g_dbgfs.size - pos;
17232
17233 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17234 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
17235
17236 ret = copy_to_user(ubuf, &tmp, 4);
17237 if (ret == count)
17238 return -EFAULT;
17239
17240 count -= ret;
17241 *ppos = pos + count;
17242 rval = count;
17243
17244 return rval;
17245 }
17246
17247
17248 static ssize_t
17249 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
17250 {
17251 loff_t pos = *ppos;
17252 size_t ret;
17253 uint32 buf;
17254
17255 if (pos < 0)
17256 return -EINVAL;
17257 if (pos >= g_dbgfs.size || !count)
17258 return 0;
17259 if (count > g_dbgfs.size - pos)
17260 count = g_dbgfs.size - pos;
17261
17262 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
17263 if (ret == count)
17264 return -EFAULT;
17265
17266 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17267 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
17268
17269 return count;
17270 }
17271
17272
17273 loff_t
17274 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
17275 {
17276 loff_t pos = -1;
17277
17278 switch (whence) {
17279 case 0:
17280 pos = off;
17281 break;
17282 case 1:
17283 pos = file->f_pos + off;
17284 break;
17285 case 2:
17286 pos = g_dbgfs.size - off;
17287 }
17288 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
17289 }
17290
17291 static const struct file_operations dhd_dbg_state_ops = {
17292 .read = dhd_dbg_state_read,
17293 .write = dhd_debugfs_write,
17294 .open = dhd_dbg_state_open,
17295 .llseek = dhd_debugfs_lseek
17296 };
17297
17298 static void dhd_dbgfs_create(void)
17299 {
17300 if (g_dbgfs.debugfs_dir) {
17301 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
17302 NULL, &dhd_dbg_state_ops);
17303 }
17304 }
17305
17306 void dhd_dbgfs_init(dhd_pub_t *dhdp)
17307 {
17308 g_dbgfs.dhdp = dhdp;
17309 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
17310
17311 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
17312 if (IS_ERR(g_dbgfs.debugfs_dir)) {
17313 g_dbgfs.debugfs_dir = NULL;
17314 return;
17315 }
17316
17317 dhd_dbgfs_create();
17318
17319 return;
17320 }
17321
17322 void dhd_dbgfs_remove(void)
17323 {
17324 debugfs_remove(g_dbgfs.debugfs_mem);
17325 debugfs_remove(g_dbgfs.debugfs_dir);
17326
17327 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
17328 }
17329 #endif /* BCMDBGFS */
17330
17331 #ifdef WLMEDIA_HTSF
17332
17333 static
17334 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
17335 {
17336 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
17337 struct sk_buff *skb;
17338 uint32 htsf = 0;
17339 uint16 dport = 0, oldmagic = 0xACAC;
17340 char *p1;
17341 htsfts_t ts;
17342
17343 /* timestamp packet */
17344
17345 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
17346
17347 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
17348 /* memcpy(&proto, p1+26, 4); */
17349 memcpy(&dport, p1+40, 2);
17350 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
17351 dport = ntoh16(dport);
17352 }
17353
17354 /* timestamp only if icmp or udb iperf with port 5555 */
17355 /* if (proto == 17 && dport == tsport) { */
17356 if (dport >= tsport && dport <= tsport + 20) {
17357
17358 skb = (struct sk_buff *) pktbuf;
17359
17360 htsf = dhd_get_htsf(dhd, 0);
17361 memset(skb->data + 44, 0, 2); /* clear checksum */
17362 memcpy(skb->data+82, &oldmagic, 2);
17363 memcpy(skb->data+84, &htsf, 4);
17364
17365 memset(&ts, 0, sizeof(htsfts_t));
17366 ts.magic = HTSFMAGIC;
17367 ts.prio = PKTPRIO(pktbuf);
17368 ts.seqnum = htsf_seqnum++;
17369 ts.c10 = get_cycles();
17370 ts.t10 = htsf;
17371 ts.endmagic = HTSFENDMAGIC;
17372
17373 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
17374 }
17375 }
17376
17377 static void dhd_dump_htsfhisto(histo_t *his, char *s)
17378 {
17379 int pktcnt = 0, curval = 0, i;
17380 for (i = 0; i < (NUMBIN-2); i++) {
17381 curval += 500;
17382 printf("%d ", his->bin[i]);
17383 pktcnt += his->bin[i];
17384 }
17385 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
17386 his->bin[NUMBIN-1], s);
17387 }
17388
17389 static
17390 void sorttobin(int value, histo_t *histo)
17391 {
17392 int i, binval = 0;
17393
17394 if (value < 0) {
17395 histo->bin[NUMBIN-1]++;
17396 return;
17397 }
17398 if (value > histo->bin[NUMBIN-2]) /* store the max value */
17399 histo->bin[NUMBIN-2] = value;
17400
17401 for (i = 0; i < (NUMBIN-2); i++) {
17402 binval += 500; /* 500m s bins */
17403 if (value <= binval) {
17404 histo->bin[i]++;
17405 return;
17406 }
17407 }
17408 histo->bin[NUMBIN-3]++;
17409 }
17410
17411 static
17412 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
17413 {
17414 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
17415 struct sk_buff *skb;
17416 char *p1;
17417 uint16 old_magic;
17418 int d1, d2, d3, end2end;
17419 htsfts_t *htsf_ts;
17420 uint32 htsf;
17421
17422 skb = PKTTONATIVE(dhdp->osh, pktbuf);
17423 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
17424
17425 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
17426 memcpy(&old_magic, p1+78, 2);
17427 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
17428 } else {
17429 return;
17430 }
17431
17432 if (htsf_ts->magic == HTSFMAGIC) {
17433 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
17434 htsf_ts->cE0 = get_cycles();
17435 }
17436
17437 if (old_magic == 0xACAC) {
17438
17439 tspktcnt++;
17440 htsf = dhd_get_htsf(dhd, 0);
17441 memcpy(skb->data+92, &htsf, sizeof(uint32));
17442
17443 memcpy(&ts[tsidx].t1, skb->data+80, 16);
17444
17445 d1 = ts[tsidx].t2 - ts[tsidx].t1;
17446 d2 = ts[tsidx].t3 - ts[tsidx].t2;
17447 d3 = ts[tsidx].t4 - ts[tsidx].t3;
17448 end2end = ts[tsidx].t4 - ts[tsidx].t1;
17449
17450 sorttobin(d1, &vi_d1);
17451 sorttobin(d2, &vi_d2);
17452 sorttobin(d3, &vi_d3);
17453 sorttobin(end2end, &vi_d4);
17454
17455 if (end2end > 0 && end2end > maxdelay) {
17456 maxdelay = end2end;
17457 maxdelaypktno = tspktcnt;
17458 memcpy(&maxdelayts, &ts[tsidx], 16);
17459 }
17460 if (++tsidx >= TSMAX)
17461 tsidx = 0;
17462 }
17463 }
17464
17465 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
17466 {
17467 uint32 htsf = 0, cur_cycle, delta, delta_us;
17468 uint32 factor, baseval, baseval2;
17469 cycles_t t;
17470
17471 t = get_cycles();
17472 cur_cycle = t;
17473
17474 if (cur_cycle > dhd->htsf.last_cycle)
17475 delta = cur_cycle - dhd->htsf.last_cycle;
17476 else {
17477 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
17478 }
17479
17480 delta = delta >> 4;
17481
17482 if (dhd->htsf.coef) {
17483 /* times ten to get the first digit */
17484 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
17485 baseval = (delta*10)/factor;
17486 baseval2 = (delta*10)/(factor+1);
17487 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
17488 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
17489 } else {
17490 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
17491 }
17492
17493 return htsf;
17494 }
17495
17496 static void dhd_dump_latency(void)
17497 {
17498 int i, max = 0;
17499 int d1, d2, d3, d4, d5;
17500
17501 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
17502 for (i = 0; i < TSMAX; i++) {
17503 d1 = ts[i].t2 - ts[i].t1;
17504 d2 = ts[i].t3 - ts[i].t2;
17505 d3 = ts[i].t4 - ts[i].t3;
17506 d4 = ts[i].t4 - ts[i].t1;
17507 d5 = ts[max].t4-ts[max].t1;
17508 if (d4 > d5 && d4 > 0) {
17509 max = i;
17510 }
17511 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
17512 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
17513 d1, d2, d3, d4, i);
17514 }
17515
17516 printf("current idx = %d \n", tsidx);
17517
17518 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
17519 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
17520 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
17521 maxdelayts.t2 - maxdelayts.t1,
17522 maxdelayts.t3 - maxdelayts.t2,
17523 maxdelayts.t4 - maxdelayts.t3,
17524 maxdelayts.t4 - maxdelayts.t1);
17525 }
17526
17527
17528 static int
17529 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
17530 {
17531 char buf[32];
17532 int ret;
17533 uint32 s1, s2;
17534
17535 struct tsf {
17536 uint32 low;
17537 uint32 high;
17538 } tsf_buf;
17539
17540 memset(&tsf_buf, 0, sizeof(tsf_buf));
17541
17542 s1 = dhd_get_htsf(dhd, 0);
17543 ret = dhd_iovar(&dhd->pub, ifidx, "tsf", NULL, 0, buf, sizeof(buf), FALSE);
17544 if (ret < 0) {
17545 if (ret == -EIO) {
17546 DHD_ERROR(("%s: tsf is not supported by device\n",
17547 dhd_ifname(&dhd->pub, ifidx)));
17548 return -EOPNOTSUPP;
17549 }
17550 return ret;
17551 }
17552 s2 = dhd_get_htsf(dhd, 0);
17553
17554 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
17555 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
17556 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
17557 dhd->htsf.coefdec2, s2-tsf_buf.low);
17558 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
17559 return 0;
17560 }
17561
17562 void htsf_update(dhd_info_t *dhd, void *data)
17563 {
17564 static ulong cur_cycle = 0, prev_cycle = 0;
17565 uint32 htsf, tsf_delta = 0;
17566 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
17567 ulong b, a;
17568 cycles_t t;
17569
17570 /* cycles_t in inlcude/mips/timex.h */
17571
17572 t = get_cycles();
17573
17574 prev_cycle = cur_cycle;
17575 cur_cycle = t;
17576
17577 if (cur_cycle > prev_cycle)
17578 cyc_delta = cur_cycle - prev_cycle;
17579 else {
17580 b = cur_cycle;
17581 a = prev_cycle;
17582 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
17583 }
17584
17585 if (data == NULL)
17586 printf(" tsf update ata point er is null \n");
17587
17588 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
17589 memcpy(&cur_tsf, data, sizeof(tsf_t));
17590
17591 if (cur_tsf.low == 0) {
17592 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
17593 return;
17594 }
17595
17596 if (cur_tsf.low > prev_tsf.low)
17597 tsf_delta = (cur_tsf.low - prev_tsf.low);
17598 else {
17599 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
17600 cur_tsf.low, prev_tsf.low));
17601 if (cur_tsf.high > prev_tsf.high) {
17602 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
17603 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
17604 } else {
17605 return; /* do not update */
17606 }
17607 }
17608
17609 if (tsf_delta) {
17610 hfactor = cyc_delta / tsf_delta;
17611 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
17612 dec1 = tmp/tsf_delta;
17613 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
17614 tmp = (tmp - (dec1*tsf_delta))*10;
17615 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
17616
17617 if (dec3 > 4) {
17618 if (dec2 == 9) {
17619 dec2 = 0;
17620 if (dec1 == 9) {
17621 dec1 = 0;
17622 hfactor++;
17623 } else {
17624 dec1++;
17625 }
17626 } else {
17627 dec2++;
17628 }
17629 }
17630 }
17631
17632 if (hfactor) {
17633 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
17634 dhd->htsf.coef = hfactor;
17635 dhd->htsf.last_cycle = cur_cycle;
17636 dhd->htsf.last_tsf = cur_tsf.low;
17637 dhd->htsf.coefdec1 = dec1;
17638 dhd->htsf.coefdec2 = dec2;
17639 } else {
17640 htsf = prev_tsf.low;
17641 }
17642 }
17643
17644 #endif /* WLMEDIA_HTSF */
17645
17646 #ifdef CUSTOM_SET_CPUCORE
17647 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
17648 {
17649 int e_dpc = 0, e_rxf = 0, retry_set = 0;
17650
17651 if (!(dhd->chan_isvht80)) {
17652 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
17653 return;
17654 }
17655
17656 if (DPC_CPUCORE) {
17657 do {
17658 if (set == TRUE) {
17659 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17660 cpumask_of(DPC_CPUCORE));
17661 } else {
17662 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17663 cpumask_of(PRIMARY_CPUCORE));
17664 }
17665 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17666 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
17667 return;
17668 }
17669 if (e_dpc < 0)
17670 OSL_SLEEP(1);
17671 } while (e_dpc < 0);
17672 }
17673 if (RXF_CPUCORE) {
17674 do {
17675 if (set == TRUE) {
17676 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17677 cpumask_of(RXF_CPUCORE));
17678 } else {
17679 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17680 cpumask_of(PRIMARY_CPUCORE));
17681 }
17682 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17683 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
17684 return;
17685 }
17686 if (e_rxf < 0)
17687 OSL_SLEEP(1);
17688 } while (e_rxf < 0);
17689 }
17690 #ifdef DHD_OF_SUPPORT
17691 interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
17692 #endif /* DHD_OF_SUPPORT */
17693 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
17694
17695 return;
17696 }
17697 #endif /* CUSTOM_SET_CPUCORE */
17698
17699 #ifdef DHD_MCAST_REGEN
17700 /* Get interface specific ap_isolate configuration */
17701 int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
17702 {
17703 dhd_info_t *dhd = dhdp->info;
17704 dhd_if_t *ifp;
17705
17706 ASSERT(idx < DHD_MAX_IFS);
17707
17708 ifp = dhd->iflist[idx];
17709
17710 return ifp->mcast_regen_bss_enable;
17711 }
17712
17713 /* Set interface specific mcast_regen configuration */
17714 int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
17715 {
17716 dhd_info_t *dhd = dhdp->info;
17717 dhd_if_t *ifp;
17718
17719 ASSERT(idx < DHD_MAX_IFS);
17720
17721 ifp = dhd->iflist[idx];
17722
17723 ifp->mcast_regen_bss_enable = val;
17724
17725 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
17726 * is enabled
17727 */
17728 dhd_update_rx_pkt_chainable_state(dhdp, idx);
17729 return BCME_OK;
17730 }
17731 #endif /* DHD_MCAST_REGEN */
17732
17733 /* Get interface specific ap_isolate configuration */
17734 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
17735 {
17736 dhd_info_t *dhd = dhdp->info;
17737 dhd_if_t *ifp;
17738
17739 ASSERT(idx < DHD_MAX_IFS);
17740
17741 ifp = dhd->iflist[idx];
17742
17743 return ifp->ap_isolate;
17744 }
17745
17746 /* Set interface specific ap_isolate configuration */
17747 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
17748 {
17749 dhd_info_t *dhd = dhdp->info;
17750 dhd_if_t *ifp;
17751
17752 ASSERT(idx < DHD_MAX_IFS);
17753
17754 ifp = dhd->iflist[idx];
17755
17756 if (ifp)
17757 ifp->ap_isolate = val;
17758
17759 return 0;
17760 }
17761
17762 #ifdef DHD_FW_COREDUMP
17763 #if defined(CONFIG_X86)
17764 #define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
17765 #define MEMDUMPINFO_INST "/data/.memdump.info"
17766 #endif /* CONFIG_X86 && OEM_ANDROID */
17767
17768 #ifdef CUSTOMER_HW4_DEBUG
17769 #define MEMDUMPINFO PLATFORM_PATH".memdump.info"
17770 #elif defined(CUSTOMER_HW2)
17771 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17772 #elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
17773 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17774 #else
17775 #define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
17776 #endif /* CUSTOMER_HW4_DEBUG */
17777
17778 void dhd_get_memdump_info(dhd_pub_t *dhd)
17779 {
17780 struct file *fp = NULL;
17781 uint32 mem_val = DUMP_MEMFILE_MAX;
17782 int ret = 0;
17783 char *filepath = MEMDUMPINFO;
17784
17785 /* Read memdump info from the file */
17786 fp = filp_open(filepath, O_RDONLY, 0);
17787 if (IS_ERR(fp)) {
17788 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17789 #if defined(CONFIG_X86)
17790 /* Check if it is Live Brix Image */
17791 if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
17792 goto done;
17793 }
17794 /* Try if it is Installed Brix Image */
17795 filepath = MEMDUMPINFO_INST;
17796 DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
17797 fp = filp_open(filepath, O_RDONLY, 0);
17798 if (IS_ERR(fp)) {
17799 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17800 goto done;
17801 }
17802 #else /* Non Brix Android platform */
17803 goto done;
17804 #endif /* CONFIG_X86 && OEM_ANDROID */
17805 }
17806
17807 /* Handle success case */
17808 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
17809 ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
17810 #else
17811 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
17812 #endif
17813 if (ret < 0) {
17814 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
17815 filp_close(fp, NULL);
17816 goto done;
17817 }
17818
17819 mem_val = bcm_atoi((char *)&mem_val);
17820
17821 filp_close(fp, NULL);
17822
17823 #ifdef DHD_INIT_DEFAULT_MEMDUMP
17824 if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
17825 mem_val = DUMP_MEMFILE_BUGON;
17826 #endif /* DHD_INIT_DEFAULT_MEMDUMP */
17827
17828 done:
17829 #ifdef CUSTOMER_HW4_DEBUG
17830 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
17831 #else
17832 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE;
17833 #endif /* CUSTOMER_HW4_DEBUG */
17834
17835 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
17836 }
17837
17838 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
17839 {
17840 dhd_dump_t *dump = NULL;
17841 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
17842 if (dump == NULL) {
17843 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
17844 return;
17845 }
17846 dump->buf = buf;
17847 dump->bufsize = size;
17848
17849 #if defined(CONFIG_ARM64)
17850 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
17851 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
17852 #elif defined(__ARM_ARCH_7A__)
17853 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
17854 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
17855 #endif /* __ARM_ARCH_7A__ */
17856 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
17857 BUG_ON(1);
17858 }
17859
17860 #ifdef DHD_LOG_DUMP
17861 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
17862 dhd_schedule_log_dump(dhdp);
17863 }
17864 #endif /* DHD_LOG_DUMP */
17865 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
17866 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17867 }
17868
17869 static void
17870 dhd_mem_dump(void *handle, void *event_info, u8 event)
17871 {
17872 dhd_info_t *dhd = handle;
17873 dhd_dump_t *dump = event_info;
17874
17875 if (!dhd) {
17876 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17877 return;
17878 }
17879
17880 if (!dump) {
17881 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
17882 return;
17883 }
17884
17885 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
17886 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
17887 dhd->pub.memdump_success = FALSE;
17888 }
17889
17890 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
17891 #ifdef DHD_LOG_DUMP
17892 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
17893 #endif /* DHD_LOG_DUMP */
17894 #ifdef DHD_DEBUG_UART
17895 dhd->pub.memdump_success == TRUE &&
17896 #endif /* DHD_DEBUG_UART */
17897 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
17898
17899 #ifdef SHOW_LOGTRACE
17900 /* Wait till event_log_dispatcher_work finishes */
17901 cancel_work_sync(&dhd->event_log_dispatcher_work);
17902 #endif /* SHOW_LOGTRACE */
17903
17904 BUG_ON(1);
17905 }
17906 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
17907 }
17908 #endif /* DHD_FW_COREDUMP */
17909
17910 #ifdef DHD_SSSR_DUMP
17911
17912 static void
17913 dhd_sssr_dump(void *handle, void *event_info, u8 event)
17914 {
17915 dhd_info_t *dhd = handle;
17916 dhd_pub_t *dhdp;
17917 int i;
17918 char before_sr_dump[128];
17919 char after_sr_dump[128];
17920
17921 if (!dhd) {
17922 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17923 return;
17924 }
17925
17926 dhdp = &dhd->pub;
17927
17928 for (i = 0; i < MAX_NUM_D11CORES; i++) {
17929 /* Init file name */
17930 memset(before_sr_dump, 0, sizeof(before_sr_dump));
17931 memset(after_sr_dump, 0, sizeof(after_sr_dump));
17932
17933 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
17934 "sssr_core", i, "before_SR");
17935 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
17936 "sssr_core", i, "after_SR");
17937
17938 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
17939 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
17940 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
17941 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
17942 __FUNCTION__));
17943 }
17944 }
17945 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
17946 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
17947 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
17948 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
17949 __FUNCTION__));
17950 }
17951 }
17952 }
17953
17954 if (dhdp->sssr_vasip_buf_before) {
17955 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_before,
17956 dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_before_SR")) {
17957 DHD_ERROR(("%s: writing SSSR VASIP dump before to the file failed\n",
17958 __FUNCTION__));
17959 }
17960 }
17961
17962 if (dhdp->sssr_vasip_buf_after) {
17963 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_after,
17964 dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_after_SR")) {
17965 DHD_ERROR(("%s: writing SSSR VASIP dump after to the file failed\n",
17966 __FUNCTION__));
17967 }
17968 }
17969
17970 }
17971
17972 void
17973 dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
17974 {
17975 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
17976 DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17977 }
17978 #endif /* DHD_SSSR_DUMP */
17979
17980 #ifdef DHD_LOG_DUMP
17981 static void
17982 dhd_log_dump(void *handle, void *event_info, u8 event)
17983 {
17984 dhd_info_t *dhd = handle;
17985
17986 if (!dhd) {
17987 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17988 return;
17989 }
17990
17991 if (do_dhd_log_dump(&dhd->pub)) {
17992 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
17993 return;
17994 }
17995 }
17996
17997 void dhd_schedule_log_dump(dhd_pub_t *dhdp)
17998 {
17999 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
18000 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
18001 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
18002 }
18003
18004 static int
18005 do_dhd_log_dump(dhd_pub_t *dhdp)
18006 {
18007 int ret = 0, i = 0;
18008 struct file *fp = NULL;
18009 mm_segment_t old_fs;
18010 loff_t pos = 0;
18011 unsigned int wr_size = 0;
18012 char dump_path[128];
18013 struct timeval curtime;
18014 uint32 file_mode;
18015 unsigned long flags = 0;
18016 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
18017
18018 const char *pre_strs =
18019 "-------------------- General log ---------------------------\n";
18020
18021 const char *post_strs =
18022 "-------------------- Specific log --------------------------\n";
18023
18024 if (!dhdp) {
18025 return -1;
18026 }
18027
18028 DHD_ERROR(("DHD version: %s\n", dhd_version));
18029 DHD_ERROR(("F/W version: %s\n", fw_version));
18030
18031 /* change to KERNEL_DS address limit */
18032 old_fs = get_fs();
18033 set_fs(KERNEL_DS);
18034
18035 /* Init file name */
18036 memset(dump_path, 0, sizeof(dump_path));
18037 do_gettimeofday(&curtime);
18038 snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
18039 DHD_COMMON_DUMP_PATH "debug_dump",
18040 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
18041 file_mode = O_CREAT | O_WRONLY | O_SYNC;
18042
18043 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
18044 fp = filp_open(dump_path, file_mode, 0664);
18045 if (IS_ERR(fp)) {
18046 ret = PTR_ERR(fp);
18047 DHD_ERROR(("open file error, err = %d\n", ret));
18048 goto exit;
18049 }
18050
18051 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18052 ret = kernel_write(fp, pre_strs, strlen(pre_strs), &pos);
18053 #else
18054 ret = vfs_write(fp, pre_strs, strlen(pre_strs), &pos);
18055 #endif
18056 if (ret < 0) {
18057 DHD_ERROR(("write file error, err = %d\n", ret));
18058 goto exit;
18059 }
18060
18061 do {
18062 unsigned int buf_size = (unsigned int)(dld_buf->max -
18063 (unsigned long)dld_buf->buffer);
18064 if (dld_buf->wraparound) {
18065 wr_size = buf_size;
18066 } else {
18067 if (!dld_buf->buffer[0]) { /* print log if buf is empty. */
18068 DHD_ERROR_EX(("Buffer is empty. No event/log.\n"));
18069 }
18070 wr_size = (unsigned int)(dld_buf->present - dld_buf->front);
18071 }
18072
18073 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18074 ret = kernel_write(fp, dld_buf->buffer, wr_size, &pos);
18075 #else
18076 ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
18077 #endif
18078 if (ret < 0) {
18079 DHD_ERROR(("write file error, err = %d\n", ret));
18080 goto exit;
18081 }
18082
18083 /* re-init dhd_log_dump_buf structure */
18084 spin_lock_irqsave(&dld_buf->lock, flags);
18085 dld_buf->wraparound = 0;
18086 dld_buf->present = dld_buf->front;
18087 dld_buf->remain = buf_size;
18088 bzero(dld_buf->buffer, buf_size);
18089 spin_unlock_irqrestore(&dld_buf->lock, flags);
18090 ret = BCME_OK;
18091
18092 if (++i < DLD_BUFFER_NUM) {
18093 dld_buf = &g_dld_buf[i];
18094 } else {
18095 break;
18096 }
18097
18098 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18099 ret = kernel_write(fp, post_strs, strlen(post_strs), &pos);
18100 #else
18101 ret = vfs_write(fp, post_strs, strlen(post_strs), &pos);
18102 #endif
18103 if (ret < 0) {
18104 DHD_ERROR(("write file error, err = %d\n", ret));
18105 goto exit;
18106 }
18107 } while (1);
18108
18109 exit:
18110 #if defined(STAT_REPORT)
18111 if (!IS_ERR(fp) && ret >= 0) {
18112 wl_stat_report_file_save(dhdp, fp);
18113 }
18114 #endif /* STAT_REPORT */
18115
18116 if (!IS_ERR(fp)) {
18117 filp_close(fp, NULL);
18118 }
18119 set_fs(old_fs);
18120
18121 return ret;
18122 }
18123 #endif /* DHD_LOG_DUMP */
18124
18125
18126 #ifdef BCMASSERT_LOG
18127 #ifdef CUSTOMER_HW4_DEBUG
18128 #define ASSERTINFO PLATFORM_PATH".assert.info"
18129 #elif defined(CUSTOMER_HW2)
18130 #define ASSERTINFO "/data/misc/wifi/.assert.info"
18131 #else
18132 #define ASSERTINFO "/installmedia/.assert.info"
18133 #endif /* CUSTOMER_HW4_DEBUG */
18134 void dhd_get_assert_info(dhd_pub_t *dhd)
18135 {
18136 struct file *fp = NULL;
18137 char *filepath = ASSERTINFO;
18138 int mem_val = -1;
18139
18140 /*
18141 * Read assert info from the file
18142 * 0: Trigger Kernel crash by panic()
18143 * 1: Print out the logs and don't trigger Kernel panic. (default)
18144 * 2: Trigger Kernel crash by BUG()
18145 * File doesn't exist: Keep default value (1).
18146 */
18147 fp = filp_open(filepath, O_RDONLY, 0);
18148 if (IS_ERR(fp)) {
18149 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
18150 } else {
18151 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
18152 ssize_t ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
18153 #else
18154 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
18155 #endif
18156 if (ret < 0) {
18157 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
18158 } else {
18159 mem_val = bcm_atoi((char *)&mem_val);
18160 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
18161 }
18162 filp_close(fp, NULL);
18163 }
18164 #ifdef CUSTOMER_HW4_DEBUG
18165 /* By default. set to 1, No Kernel Panic */
18166 g_assert_type = (mem_val >= 0) ? mem_val : 1;
18167 #else
18168 /* By default. set to 0, Kernel Panic */
18169 g_assert_type = (mem_val >= 0) ? mem_val : 0;
18170 #endif
18171 }
18172 #endif /* BCMASSERT_LOG */
18173
18174 /*
18175 * This call is to get the memdump size so that,
18176 * halutil can alloc that much buffer in user space.
18177 */
18178 int
18179 dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
18180 {
18181 int ret = BCME_OK;
18182 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18183 dhd_pub_t *dhdp = &dhd->pub;
18184
18185 if (dhdp->busstate == DHD_BUS_DOWN) {
18186 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
18187 return BCME_ERROR;
18188 }
18189
18190 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
18191 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
18192 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
18193 return BCME_ERROR;
18194 }
18195
18196 ret = dhd_common_socram_dump(dhdp);
18197 if (ret == BCME_OK) {
18198 *dump_size = dhdp->soc_ram_length;
18199 }
18200 return ret;
18201 }
18202
18203 /*
18204 * This is to get the actual memdup after getting the memdump size
18205 */
18206 int
18207 dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
18208 {
18209 int ret = BCME_OK;
18210 int orig_len = 0;
18211 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
18212 dhd_pub_t *dhdp = &dhd->pub;
18213 if (buf == NULL)
18214 return BCME_ERROR;
18215 orig_len = *size;
18216 if (dhdp->soc_ram) {
18217 if (orig_len >= dhdp->soc_ram_length) {
18218 memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
18219 /* reset the storage of dump */
18220 memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
18221 *size = dhdp->soc_ram_length;
18222 } else {
18223 ret = BCME_BUFTOOSHORT;
18224 DHD_ERROR(("The length of the buffer is too short"
18225 " to save the memory dump with %d\n", dhdp->soc_ram_length));
18226 }
18227 } else {
18228 DHD_ERROR(("socram_dump is not ready to get\n"));
18229 ret = BCME_NOTREADY;
18230 }
18231 return ret;
18232 }
18233
18234 int
18235 dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
18236 {
18237 char *fw_str;
18238
18239 if (size == 0)
18240 return BCME_BADARG;
18241
18242 fw_str = strstr(info_string, "Firmware: ");
18243 if (fw_str == NULL) {
18244 return BCME_ERROR;
18245 }
18246
18247 memset(*buf, 0, size);
18248 if (dhd_ver) {
18249 strncpy(*buf, dhd_version, size - 1);
18250 } else {
18251 strncpy(*buf, fw_str, size - 1);
18252 }
18253 return BCME_OK;
18254 }
18255
18256 #ifdef DHD_WMF
18257 /* Returns interface specific WMF configuration */
18258 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
18259 {
18260 dhd_info_t *dhd = dhdp->info;
18261 dhd_if_t *ifp;
18262
18263 ASSERT(idx < DHD_MAX_IFS);
18264
18265 ifp = dhd->iflist[idx];
18266 return &ifp->wmf;
18267 }
18268 #endif /* DHD_WMF */
18269
18270 #if defined(TRAFFIC_MGMT_DWM)
18271 void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf)
18272 {
18273 struct ether_header *eh;
18274 struct ethervlan_header *evh;
18275 uint8 *pktdata, *ip_body;
18276 uint8 dwm_filter;
18277 uint8 tos_tc = 0;
18278 uint8 dscp = 0;
18279 pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
18280 eh = (struct ether_header *) pktdata;
18281 ip_body = NULL;
18282
18283 if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) {
18284 if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
18285 evh = (struct ethervlan_header *)eh;
18286 if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
18287 (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
18288 ip_body = pktdata + sizeof(struct ethervlan_header);
18289 }
18290 } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
18291 (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
18292 ip_body = pktdata + sizeof(struct ether_header);
18293 }
18294 if (ip_body) {
18295 tos_tc = IP_TOS46(ip_body);
18296 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
18297 }
18298
18299 if (dscp < DHD_DWM_TBL_SIZE) {
18300 dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp];
18301 if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) {
18302 PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter));
18303 }
18304 }
18305 }
18306 }
18307 #endif
18308
18309 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
18310 {
18311 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
18312 }
18313
18314 #ifdef DHD_L2_FILTER
18315 arp_table_t*
18316 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
18317 {
18318 dhd_info_t *dhd = dhdp->info;
18319 dhd_if_t *ifp;
18320
18321 ASSERT(bssidx < DHD_MAX_IFS);
18322
18323 ifp = dhd->iflist[bssidx];
18324 return ifp->phnd_arp_table;
18325 }
18326
18327 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
18328 {
18329 dhd_info_t *dhd = dhdp->info;
18330 dhd_if_t *ifp;
18331
18332 ASSERT(idx < DHD_MAX_IFS);
18333
18334 ifp = dhd->iflist[idx];
18335
18336 if (ifp)
18337 return ifp->parp_enable;
18338 else
18339 return FALSE;
18340 }
18341
18342 /* Set interface specific proxy arp configuration */
18343 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
18344 {
18345 dhd_info_t *dhd = dhdp->info;
18346 dhd_if_t *ifp;
18347 ASSERT(idx < DHD_MAX_IFS);
18348 ifp = dhd->iflist[idx];
18349
18350 if (!ifp)
18351 return BCME_ERROR;
18352
18353 /* At present all 3 variables are being
18354 * handled at once
18355 */
18356 ifp->parp_enable = val;
18357 ifp->parp_discard = val;
18358 ifp->parp_allnode = val;
18359
18360 /* Flush ARP entries when disabled */
18361 if (val == FALSE) {
18362 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
18363 FALSE, dhdp->tickcnt);
18364 }
18365 return BCME_OK;
18366 }
18367
18368 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
18369 {
18370 dhd_info_t *dhd = dhdp->info;
18371 dhd_if_t *ifp;
18372
18373 ASSERT(idx < DHD_MAX_IFS);
18374
18375 ifp = dhd->iflist[idx];
18376
18377 ASSERT(ifp);
18378 return ifp->parp_discard;
18379 }
18380
18381 bool
18382 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
18383 {
18384 dhd_info_t *dhd = dhdp->info;
18385 dhd_if_t *ifp;
18386
18387 ASSERT(idx < DHD_MAX_IFS);
18388
18389 ifp = dhd->iflist[idx];
18390
18391 ASSERT(ifp);
18392
18393 return ifp->parp_allnode;
18394 }
18395
18396 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
18397 {
18398 dhd_info_t *dhd = dhdp->info;
18399 dhd_if_t *ifp;
18400
18401 ASSERT(idx < DHD_MAX_IFS);
18402
18403 ifp = dhd->iflist[idx];
18404
18405 ASSERT(ifp);
18406
18407 return ifp->dhcp_unicast;
18408 }
18409
18410 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
18411 {
18412 dhd_info_t *dhd = dhdp->info;
18413 dhd_if_t *ifp;
18414 ASSERT(idx < DHD_MAX_IFS);
18415 ifp = dhd->iflist[idx];
18416
18417 ASSERT(ifp);
18418
18419 ifp->dhcp_unicast = val;
18420 return BCME_OK;
18421 }
18422
18423 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
18424 {
18425 dhd_info_t *dhd = dhdp->info;
18426 dhd_if_t *ifp;
18427
18428 ASSERT(idx < DHD_MAX_IFS);
18429
18430 ifp = dhd->iflist[idx];
18431
18432 ASSERT(ifp);
18433
18434 return ifp->block_ping;
18435 }
18436
18437 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
18438 {
18439 dhd_info_t *dhd = dhdp->info;
18440 dhd_if_t *ifp;
18441 ASSERT(idx < DHD_MAX_IFS);
18442 ifp = dhd->iflist[idx];
18443
18444 ASSERT(ifp);
18445
18446 ifp->block_ping = val;
18447 /* Disable rx_pkt_chain feature for interface if block_ping option is
18448 * enabled
18449 */
18450 dhd_update_rx_pkt_chainable_state(dhdp, idx);
18451 return BCME_OK;
18452 }
18453
18454 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
18455 {
18456 dhd_info_t *dhd = dhdp->info;
18457 dhd_if_t *ifp;
18458
18459 ASSERT(idx < DHD_MAX_IFS);
18460
18461 ifp = dhd->iflist[idx];
18462
18463 ASSERT(ifp);
18464
18465 return ifp->grat_arp;
18466 }
18467
18468 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
18469 {
18470 dhd_info_t *dhd = dhdp->info;
18471 dhd_if_t *ifp;
18472 ASSERT(idx < DHD_MAX_IFS);
18473 ifp = dhd->iflist[idx];
18474
18475 ASSERT(ifp);
18476
18477 ifp->grat_arp = val;
18478
18479 return BCME_OK;
18480 }
18481 #endif /* DHD_L2_FILTER */
18482
18483
18484 #if defined(SET_RPS_CPUS)
18485 int dhd_rps_cpus_enable(struct net_device *net, int enable)
18486 {
18487 dhd_info_t *dhd = DHD_DEV_INFO(net);
18488 dhd_if_t *ifp;
18489 int ifidx;
18490 char * RPS_CPU_SETBUF;
18491
18492 ifidx = dhd_net2idx(dhd, net);
18493 if (ifidx == DHD_BAD_IF) {
18494 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
18495 return -ENODEV;
18496 }
18497
18498 if (ifidx == PRIMARY_INF) {
18499 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
18500 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
18501 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
18502 } else {
18503 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
18504 RPS_CPU_SETBUF = RPS_CPUS_MASK;
18505 }
18506 } else if (ifidx == VIRTUAL_INF) {
18507 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
18508 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
18509 } else {
18510 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
18511 return -EINVAL;
18512 }
18513
18514 ifp = dhd->iflist[ifidx];
18515 if (ifp) {
18516 if (enable) {
18517 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
18518 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
18519 } else {
18520 custom_rps_map_clear(ifp->net->_rx);
18521 }
18522 } else {
18523 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
18524 return -ENODEV;
18525 }
18526 return BCME_OK;
18527 }
18528
18529 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
18530 {
18531 struct rps_map *old_map, *map;
18532 cpumask_var_t mask;
18533 int err, cpu, i;
18534 static DEFINE_SPINLOCK(rps_map_lock);
18535
18536 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
18537
18538 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
18539 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
18540 return -ENOMEM;
18541 }
18542
18543 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
18544 if (err) {
18545 free_cpumask_var(mask);
18546 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
18547 return err;
18548 }
18549
18550 map = kzalloc(max_t(unsigned int,
18551 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
18552 GFP_KERNEL);
18553 if (!map) {
18554 free_cpumask_var(mask);
18555 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
18556 return -ENOMEM;
18557 }
18558
18559 i = 0;
18560 for_each_cpu(cpu, mask) {
18561 map->cpus[i++] = cpu;
18562 }
18563
18564 if (i) {
18565 map->len = i;
18566 } else {
18567 kfree(map);
18568 map = NULL;
18569 free_cpumask_var(mask);
18570 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
18571 return -1;
18572 }
18573
18574 spin_lock(&rps_map_lock);
18575 old_map = rcu_dereference_protected(queue->rps_map,
18576 lockdep_is_held(&rps_map_lock));
18577 rcu_assign_pointer(queue->rps_map, map);
18578 spin_unlock(&rps_map_lock);
18579
18580 if (map) {
18581 static_key_slow_inc(&rps_needed);
18582 }
18583 if (old_map) {
18584 kfree_rcu(old_map, rcu);
18585 static_key_slow_dec(&rps_needed);
18586 }
18587 free_cpumask_var(mask);
18588
18589 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
18590 return map->len;
18591 }
18592
18593 void custom_rps_map_clear(struct netdev_rx_queue *queue)
18594 {
18595 struct rps_map *map;
18596
18597 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
18598
18599 map = rcu_dereference_protected(queue->rps_map, 1);
18600 if (map) {
18601 RCU_INIT_POINTER(queue->rps_map, NULL);
18602 kfree_rcu(map, rcu);
18603 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
18604 }
18605 }
18606 #endif
18607
18608
18609
18610 #ifdef DHD_DEBUG_PAGEALLOC
18611
18612 void
18613 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
18614 {
18615 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
18616
18617 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
18618 __FUNCTION__, addr_corrupt, (uint32)len));
18619
18620 DHD_OS_WAKE_LOCK(dhdp);
18621 prhex("Page Corruption:", addr_corrupt, len);
18622 dhd_dump_to_kernelog(dhdp);
18623 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
18624 /* Load the dongle side dump to host memory and then BUG_ON() */
18625 dhdp->memdump_enabled = DUMP_MEMONLY;
18626 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
18627 dhd_bus_mem_dump(dhdp);
18628 #endif /* BCMPCIE && DHD_FW_COREDUMP */
18629 DHD_OS_WAKE_UNLOCK(dhdp);
18630 }
18631 EXPORT_SYMBOL(dhd_page_corrupt_cb);
18632 #endif /* DHD_DEBUG_PAGEALLOC */
18633
18634 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
18635 void
18636 dhd_pktid_error_handler(dhd_pub_t *dhdp)
18637 {
18638 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
18639 DHD_OS_WAKE_LOCK(dhdp);
18640 dhd_dump_to_kernelog(dhdp);
18641 #ifdef DHD_FW_COREDUMP
18642 /* Load the dongle side dump to host memory */
18643 if (dhdp->memdump_enabled == DUMP_DISABLED) {
18644 dhdp->memdump_enabled = DUMP_MEMFILE;
18645 }
18646 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
18647 dhd_bus_mem_dump(dhdp);
18648 #endif /* DHD_FW_COREDUMP */
18649 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
18650 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
18651 DHD_OS_WAKE_UNLOCK(dhdp);
18652 }
18653 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
18654
18655 struct net_device *
18656 dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
18657 {
18658 dhd_info_t *dhd = dhdp->info;
18659
18660 if (dhd->iflist[0] && dhd->iflist[0]->net)
18661 return dhd->iflist[0]->net;
18662 else
18663 return NULL;
18664 }
18665
18666 #ifdef DHD_DHCP_DUMP
18667 static void
18668 dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
18669 {
18670 struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
18671 struct iphdr *h = &b->ip_header;
18672 uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
18673 int dhcp_type = 0, len, opt_len;
18674
18675 /* check IP header */
18676 if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
18677 return;
18678 }
18679
18680 /* check UDP port for bootp (67, 68) */
18681 if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
18682 b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
18683 return;
18684 }
18685
18686 /* check header length */
18687 if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
18688 return;
18689 }
18690
18691 len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
18692 opt_len = len
18693 - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
18694
18695 /* parse bootp options */
18696 if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
18697 ptr = &b->options[4];
18698 while (ptr < end && *ptr != 0xff) {
18699 opt = ptr++;
18700 if (*opt == 0) {
18701 continue;
18702 }
18703 ptr += *ptr + 1;
18704 if (ptr >= end) {
18705 break;
18706 }
18707 /* 53 is dhcp type */
18708 if (*opt == 53) {
18709 if (opt[1]) {
18710 dhcp_type = opt[2];
18711 DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
18712 ifname, dhcp_types[dhcp_type],
18713 tx ? "TX" : "RX", dhcp_ops[b->op]));
18714 break;
18715 }
18716 }
18717 }
18718 }
18719 }
18720 #endif /* DHD_DHCP_DUMP */
18721
18722 #ifdef DHD_ICMP_DUMP
18723 static void
18724 dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
18725 {
18726 uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
18727 struct iphdr *iph = (struct iphdr *)pkt;
18728 struct icmphdr *icmph;
18729
18730 /* check IP header */
18731 if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
18732 return;
18733 }
18734
18735 icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
18736 if (icmph->type == ICMP_ECHO) {
18737 DHD_ERROR(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
18738 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
18739 } else if (icmph->type == ICMP_ECHOREPLY) {
18740 DHD_ERROR(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
18741 ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
18742 } else {
18743 DHD_ERROR(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
18744 ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
18745 }
18746 }
18747 #endif /* DHD_ICMP_DUMP */
18748
18749 #ifdef SHOW_LOGTRACE
18750 void
18751 dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
18752 {
18753 dhd_dbg_ring_status_t ring_status;
18754 uint32 rlen;
18755
18756 rlen = dhd_dbg_ring_pull_single(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
18757 TRACE_LOG_BUF_MAX_SIZE, TRUE);
18758 trace_buf_info->size = rlen;
18759 trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
18760 if (rlen == 0) {
18761 trace_buf_info->availability = BUF_NOT_AVAILABLE;
18762 return;
18763 }
18764 dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
18765 if (ring_status.written_bytes != ring_status.read_bytes) {
18766 trace_buf_info->availability = NEXT_BUF_AVAIL;
18767 }
18768 }
18769 #endif /* SHOW_LOGTRACE */
18770
18771 bool
18772 dhd_fw_download_status(dhd_pub_t * dhd_pub)
18773 {
18774 return dhd_pub->fw_download_done;
18775 }
18776
18777 int
18778 dhd_create_to_notifier_skt(void)
18779 {
18780 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
18781 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
18782 /* Kernel version 3.6 is a special case which accepts 4 arguments */
18783 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &g_cfg);
18784 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
18785 /* Kernel version 3.5 and below use this old API format */
18786 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
18787 dhd_process_daemon_msg, NULL, THIS_MODULE);
18788 #else
18789 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, &g_cfg);
18790 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
18791 if (!nl_to_event_sk)
18792 {
18793 printf("Error creating socket.\n");
18794 return -1;
18795 }
18796 DHD_INFO(("nl_to socket created successfully...\n"));
18797 return 0;
18798 }
18799
18800 void
18801 dhd_destroy_to_notifier_skt(void)
18802 {
18803 DHD_INFO(("Destroying nl_to socket\n"));
18804 if (nl_to_event_sk) {
18805 netlink_kernel_release(nl_to_event_sk);
18806 }
18807 }
18808
18809 static void
18810 dhd_recv_msg_from_daemon(struct sk_buff *skb)
18811 {
18812 struct nlmsghdr *nlh;
18813 bcm_to_info_t *cmd;
18814
18815 nlh = (struct nlmsghdr *)skb->data;
18816 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
18817 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
18818 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
18819 DHD_INFO(("DHD Daemon Started\n"));
18820 }
18821 }
18822
18823 int
18824 dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
18825 {
18826 struct nlmsghdr *nlh;
18827 struct sk_buff *skb_out;
18828
18829 if (!nl_to_event_sk) {
18830 DHD_INFO(("No socket available\n"));
18831 return -1;
18832 }
18833
18834 BCM_REFERENCE(skb);
18835 if (sender_pid == 0) {
18836 DHD_INFO(("Invalid PID 0\n"));
18837 return -1;
18838 }
18839
18840 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
18841 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
18842 return -1;
18843 }
18844 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
18845 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
18846 memcpy(nlmsg_data(nlh), (char *)data, size);
18847
18848 if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
18849 DHD_INFO(("Error sending message\n"));
18850 }
18851 return 0;
18852 }
18853
18854
18855 static void
18856 dhd_process_daemon_msg(struct sk_buff *skb)
18857 {
18858 bcm_to_info_t to_info;
18859
18860 to_info.magic = BCM_TO_MAGIC;
18861 to_info.reason = REASON_DAEMON_STARTED;
18862 to_info.trap = NO_TRAP;
18863
18864 dhd_recv_msg_from_daemon(skb);
18865 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
18866 }
18867
18868 #ifdef REPORT_FATAL_TIMEOUTS
18869 static void
18870 dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap)
18871 {
18872 bcm_to_info_t to_info;
18873
18874 to_info.magic = BCM_TO_MAGIC;
18875 to_info.reason = reason;
18876 to_info.trap = trap;
18877
18878 DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap));
18879 dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
18880 }
18881
18882 void
18883 dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason)
18884 {
18885 int to_reason;
18886 int trap = NO_TRAP;
18887 switch (reason) {
18888 case DHD_REASON_COMMAND_TO:
18889 to_reason = REASON_COMMAND_TO;
18890 trap = DO_TRAP;
18891 break;
18892 case DHD_REASON_JOIN_TO:
18893 to_reason = REASON_JOIN_TO;
18894 break;
18895 case DHD_REASON_SCAN_TO:
18896 to_reason = REASON_SCAN_TO;
18897 break;
18898 case DHD_REASON_OQS_TO:
18899 to_reason = REASON_OQS_TO;
18900 trap = DO_TRAP;
18901 break;
18902 default:
18903 to_reason = REASON_UNKOWN;
18904 }
18905 dhd_send_trap_to_fw(pub, to_reason, trap);
18906 }
18907 #endif /* REPORT_FATAL_TIMEOUTS */
18908
18909 #ifdef DHD_LOG_DUMP
18910 void
18911 dhd_log_dump_init(dhd_pub_t *dhd)
18912 {
18913 struct dhd_log_dump_buf *dld_buf;
18914 int i = 0;
18915 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18916 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
18917 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18918
18919 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18920 dld_buf = &g_dld_buf[i];
18921 spin_lock_init(&dld_buf->lock);
18922 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18923 dld_buf->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, dld_buf_size[i]);
18924 #else
18925 dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
18926 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18927
18928 if (!dld_buf->buffer) {
18929 dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
18930 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
18931
18932 if (!dld_buf->buffer) {
18933 DHD_ERROR(("Failed to allocate memory for dld_buf[%d].\n", i));
18934 goto fail;
18935 }
18936 }
18937
18938 dld_buf->wraparound = 0;
18939 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
18940 dld_buf->present = dld_buf->front = dld_buf->buffer;
18941 dld_buf->remain = dld_buf_size[i];
18942 dld_buf->enable = 1;
18943 }
18944 return;
18945
18946 fail:
18947 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18948 if (dld_buf[i].buffer) {
18949 kfree(dld_buf[i].buffer);
18950 }
18951 }
18952 }
18953
18954 void
18955 dhd_log_dump_deinit(dhd_pub_t *dhd)
18956 {
18957 struct dhd_log_dump_buf *dld_buf;
18958 int i = 0;
18959
18960 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18961 dld_buf = &g_dld_buf[i];
18962 dld_buf->enable = 0;
18963 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
18964 DHD_OS_PREFREE(dhd, dld_buf->buffer, dld_buf_size[i]);
18965 #else
18966 kfree(dld_buf->buffer);
18967 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
18968 }
18969 }
18970
18971 void
18972 dhd_log_dump_write(int type, const char *fmt, ...)
18973 {
18974 int len = 0;
18975 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
18976 va_list args;
18977 unsigned long flags = 0;
18978 struct dhd_log_dump_buf *dld_buf = NULL;
18979
18980 switch (type)
18981 {
18982 case DLD_BUF_TYPE_GENERAL:
18983 dld_buf = &g_dld_buf[type];
18984 break;
18985 case DLD_BUF_TYPE_SPECIAL:
18986 dld_buf = &g_dld_buf[type];
18987 break;
18988 default:
18989 DHD_ERROR(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
18990 __FUNCTION__, type));
18991 return;
18992 }
18993
18994 if (dld_buf->enable != 1) {
18995 return;
18996 }
18997
18998 va_start(args, fmt);
18999
19000 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
19001 /* Non ANSI C99 compliant returns -1,
19002 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
19003 */
19004 if (len < 0) {
19005 return;
19006 }
19007
19008 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
19009 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
19010 tmp_buf[len] = '\0';
19011 }
19012
19013 /* make a critical section to eliminate race conditions */
19014 spin_lock_irqsave(&dld_buf->lock, flags);
19015 if (dld_buf->remain < len) {
19016 dld_buf->wraparound = 1;
19017 dld_buf->present = dld_buf->front;
19018 dld_buf->remain = dld_buf_size[type];
19019 }
19020
19021 strncpy(dld_buf->present, tmp_buf, len);
19022 dld_buf->remain -= len;
19023 dld_buf->present += len;
19024 spin_unlock_irqrestore(&dld_buf->lock, flags);
19025
19026 /* double check invalid memory operation */
19027 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
19028 va_end(args);
19029 }
19030
19031 char*
19032 dhd_log_dump_get_timestamp(void)
19033 {
19034 static char buf[16];
19035 u64 ts_nsec;
19036 unsigned long rem_nsec;
19037
19038 ts_nsec = local_clock();
19039 rem_nsec = do_div(ts_nsec, 1000000000);
19040 snprintf(buf, sizeof(buf), "%5lu.%06lu",
19041 (unsigned long)ts_nsec, rem_nsec / 1000);
19042
19043 return buf;
19044 }
19045 #endif /* DHD_LOG_DUMP */
19046
19047 int
19048 dhd_write_file(const char *filepath, char *buf, int buf_len)
19049 {
19050 struct file *fp = NULL;
19051 mm_segment_t old_fs;
19052 int ret = 0;
19053
19054 /* change to KERNEL_DS address limit */
19055 old_fs = get_fs();
19056 set_fs(KERNEL_DS);
19057
19058 /* File is always created. */
19059 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
19060 if (IS_ERR(fp)) {
19061 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
19062 __FUNCTION__, filepath, PTR_ERR(fp)));
19063 ret = BCME_ERROR;
19064 } else {
19065 if (fp->f_mode & FMODE_WRITE) {
19066 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
19067 ret = kernel_write(fp, buf, buf_len, &fp->f_pos);
19068 #else
19069 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
19070 #endif
19071 if (ret < 0) {
19072 DHD_ERROR(("%s: Couldn't write file '%s'\n",
19073 __FUNCTION__, filepath));
19074 ret = BCME_ERROR;
19075 } else {
19076 ret = BCME_OK;
19077 }
19078 }
19079 filp_close(fp, NULL);
19080 }
19081
19082 /* restore previous address limit */
19083 set_fs(old_fs);
19084
19085 return ret;
19086 }
19087
19088 int
19089 dhd_read_file(const char *filepath, char *buf, int buf_len)
19090 {
19091 struct file *fp = NULL;
19092 mm_segment_t old_fs;
19093 int ret;
19094
19095 /* change to KERNEL_DS address limit */
19096 old_fs = get_fs();
19097 set_fs(KERNEL_DS);
19098
19099 fp = filp_open(filepath, O_RDONLY, 0);
19100 if (IS_ERR(fp)) {
19101 set_fs(old_fs);
19102 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
19103 return BCME_ERROR;
19104 }
19105
19106 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
19107 ret = kernel_read(fp, buf, buf_len, NULL);
19108 #else
19109 ret = kernel_read(fp, 0, buf, buf_len);
19110 #endif
19111 filp_close(fp, NULL);
19112
19113 /* restore previous address limit */
19114 set_fs(old_fs);
19115
19116 /* Return the number of bytes read */
19117 if (ret > 0) {
19118 /* Success to read */
19119 ret = 0;
19120 } else {
19121 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
19122 __FUNCTION__, filepath, ret));
19123 ret = BCME_ERROR;
19124 }
19125
19126 return ret;
19127 }
19128
19129 int
19130 dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
19131 {
19132 int ret;
19133
19134 ret = dhd_write_file(filepath, buf, buf_len);
19135 if (ret < 0) {
19136 return ret;
19137 }
19138
19139 /* Read the file again and check if the file size is not zero */
19140 memset(buf, 0, buf_len);
19141 ret = dhd_read_file(filepath, buf, buf_len);
19142
19143 return ret;
19144 }
19145
19146 #ifdef DHD_LB_TXP
19147 #define DHD_LB_TXBOUND 64
19148 /*
19149 * Function that performs the TX processing on a given CPU
19150 */
19151 bool
19152 dhd_lb_tx_process(dhd_info_t *dhd)
19153 {
19154 struct sk_buff *skb;
19155 int cnt = 0;
19156 struct net_device *net;
19157 int ifidx;
19158 bool resched = FALSE;
19159
19160 DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
19161 if (dhd == NULL) {
19162 DHD_ERROR((" Null pointer DHD \r\n"));
19163 return resched;
19164 }
19165
19166 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
19167
19168 /* Base Loop to perform the actual Tx */
19169 do {
19170 skb = skb_dequeue(&dhd->tx_pend_queue);
19171 if (skb == NULL) {
19172 DHD_TRACE(("Dequeued a Null Packet \r\n"));
19173 break;
19174 }
19175 cnt++;
19176
19177 net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
19178 ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
19179
19180 BCM_REFERENCE(net);
19181 DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
19182 net, ifidx));
19183
19184 __dhd_sendpkt(&dhd->pub, ifidx, skb);
19185
19186 if (cnt >= DHD_LB_TXBOUND) {
19187 resched = TRUE;
19188 break;
19189 }
19190
19191 } while (1);
19192
19193 DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
19194
19195 return resched;
19196 }
19197
19198 void
19199 dhd_lb_tx_handler(unsigned long data)
19200 {
19201 dhd_info_t *dhd = (dhd_info_t *)data;
19202
19203 if (dhd_lb_tx_process(dhd)) {
19204 dhd_tasklet_schedule(&dhd->tx_tasklet);
19205 }
19206 }
19207
19208 #endif /* DHD_LB_TXP */
19209
19210 /* ----------------------------------------------------------------------------
19211 * Infrastructure code for sysfs interface support for DHD
19212 *
19213 * What is sysfs interface?
19214 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
19215 *
19216 * Why sysfs interface?
19217 * This is the Linux standard way of changing/configuring Run Time parameters
19218 * for a driver. We can use this interface to control "linux" specific driver
19219 * parameters.
19220 *
19221 * -----------------------------------------------------------------------------
19222 */
19223
19224 #include <linux/sysfs.h>
19225 #include <linux/kobject.h>
19226
19227 #if defined(DHD_TRACE_WAKE_LOCK)
19228
19229 /* Function to show the history buffer */
19230 static ssize_t
19231 show_wklock_trace(struct dhd_info *dev, char *buf)
19232 {
19233 ssize_t ret = 0;
19234 dhd_info_t *dhd = (dhd_info_t *)dev;
19235
19236 buf[ret] = '\n';
19237 buf[ret+1] = 0;
19238
19239 dhd_wk_lock_stats_dump(&dhd->pub);
19240 return ret+1;
19241 }
19242
19243 /* Function to enable/disable wakelock trace */
19244 static ssize_t
19245 wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
19246 {
19247 unsigned long onoff;
19248 unsigned long flags;
19249 dhd_info_t *dhd = (dhd_info_t *)dev;
19250
19251 onoff = bcm_strtoul(buf, NULL, 10);
19252 if (onoff != 0 && onoff != 1) {
19253 return -EINVAL;
19254 }
19255
19256 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
19257 trace_wklock_onoff = onoff;
19258 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
19259 if (trace_wklock_onoff) {
19260 printk("ENABLE WAKLOCK TRACE\n");
19261 } else {
19262 printk("DISABLE WAKELOCK TRACE\n");
19263 }
19264
19265 return (ssize_t)(onoff+1);
19266 }
19267 #endif /* DHD_TRACE_WAKE_LOCK */
19268
19269 #if defined(DHD_LB_TXP)
19270 static ssize_t
19271 show_lbtxp(struct dhd_info *dev, char *buf)
19272 {
19273 ssize_t ret = 0;
19274 unsigned long onoff;
19275 dhd_info_t *dhd = (dhd_info_t *)dev;
19276
19277 onoff = atomic_read(&dhd->lb_txp_active);
19278 ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
19279 onoff);
19280 return ret;
19281 }
19282
19283 static ssize_t
19284 lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
19285 {
19286 unsigned long onoff;
19287 dhd_info_t *dhd = (dhd_info_t *)dev;
19288 int i;
19289
19290 onoff = bcm_strtoul(buf, NULL, 10);
19291
19292 sscanf(buf, "%lu", &onoff);
19293 if (onoff != 0 && onoff != 1) {
19294 return -EINVAL;
19295 }
19296 atomic_set(&dhd->lb_txp_active, onoff);
19297
19298 /* Since the scheme is changed clear the counters */
19299 for (i = 0; i < NR_CPUS; i++) {
19300 DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
19301 DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
19302 }
19303
19304 return count;
19305 }
19306
19307 #endif /* DHD_LB_TXP */
19308 /*
19309 * Generic Attribute Structure for DHD.
19310 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
19311 * to instantiate an object of type dhd_attr, populate it with
19312 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
19313 * and add the object to default_attrs[] array, that gets registered
19314 * to the kobject of dhd (named bcm-dhd).
19315 */
19316
19317 struct dhd_attr {
19318 struct attribute attr;
19319 ssize_t(*show)(struct dhd_info *, char *);
19320 ssize_t(*store)(struct dhd_info *, const char *, size_t count);
19321 };
19322
19323 #if defined(DHD_TRACE_WAKE_LOCK)
19324 static struct dhd_attr dhd_attr_wklock =
19325 __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
19326 #endif /* defined(DHD_TRACE_WAKE_LOCK */
19327
19328 #if defined(DHD_LB_TXP)
19329 static struct dhd_attr dhd_attr_lbtxp =
19330 __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
19331 #endif /* DHD_LB_TXP */
19332
19333 /* Attribute object that gets registered with "bcm-dhd" kobject tree */
19334 static struct attribute *default_attrs[] = {
19335 #if defined(DHD_TRACE_WAKE_LOCK)
19336 &dhd_attr_wklock.attr,
19337 #endif /* DHD_TRACE_WAKE_LOCK */
19338 #if defined(DHD_LB_TXP)
19339 &dhd_attr_lbtxp.attr,
19340 #endif /* DHD_LB_TXP */
19341 NULL
19342 };
19343
19344 #define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
19345 #define to_attr(a) container_of(a, struct dhd_attr, attr)
19346
19347 /*
19348 * bcm-dhd kobject show function, the "attr" attribute specifices to which
19349 * node under "bcm-dhd" the show function is called.
19350 */
19351 static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
19352 {
19353 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19354 #pragma GCC diagnostic push
19355 #pragma GCC diagnostic ignored "-Wcast-qual"
19356 #endif
19357 dhd_info_t *dhd = to_dhd(kobj);
19358 struct dhd_attr *d_attr = to_attr(attr);
19359 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19360 #pragma GCC diagnostic pop
19361 #endif
19362 int ret;
19363
19364 if (d_attr->show)
19365 ret = d_attr->show(dhd, buf);
19366 else
19367 ret = -EIO;
19368
19369 return ret;
19370 }
19371
19372 /*
19373 * bcm-dhd kobject show function, the "attr" attribute specifices to which
19374 * node under "bcm-dhd" the store function is called.
19375 */
19376 static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
19377 const char *buf, size_t count)
19378 {
19379 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19380 #pragma GCC diagnostic push
19381 #pragma GCC diagnostic ignored "-Wcast-qual"
19382 #endif
19383 dhd_info_t *dhd = to_dhd(kobj);
19384 struct dhd_attr *d_attr = to_attr(attr);
19385 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
19386 #pragma GCC diagnostic pop
19387 #endif
19388 int ret;
19389
19390 if (d_attr->store)
19391 ret = d_attr->store(dhd, buf, count);
19392 else
19393 ret = -EIO;
19394
19395 return ret;
19396
19397 }
19398
19399 static struct sysfs_ops dhd_sysfs_ops = {
19400 .show = dhd_show,
19401 .store = dhd_store,
19402 };
19403
19404 static struct kobj_type dhd_ktype = {
19405 .sysfs_ops = &dhd_sysfs_ops,
19406 .default_attrs = default_attrs,
19407 };
19408
19409 /* Create a kobject and attach to sysfs interface */
19410 static int dhd_sysfs_init(dhd_info_t *dhd)
19411 {
19412 int ret = -1;
19413
19414 if (dhd == NULL) {
19415 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
19416 return ret;
19417 }
19418
19419 /* Initialize the kobject */
19420 ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
19421 if (ret) {
19422 kobject_put(&dhd->dhd_kobj);
19423 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
19424 return ret;
19425 }
19426
19427 /*
19428 * We are always responsible for sending the uevent that the kobject
19429 * was added to the system.
19430 */
19431 kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
19432
19433 return ret;
19434 }
19435
19436 /* Done with the kobject and detach the sysfs interface */
19437 static void dhd_sysfs_exit(dhd_info_t *dhd)
19438 {
19439 if (dhd == NULL) {
19440 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
19441 return;
19442 }
19443
19444 /* Releae the kobject */
19445 if (dhd->dhd_kobj.state_initialized)
19446 kobject_put(&dhd->dhd_kobj);
19447 }
19448
19449 #ifdef DHD_DEBUG_UART
19450 bool
19451 dhd_debug_uart_is_running(struct net_device *dev)
19452 {
19453 dhd_info_t *dhd = DHD_DEV_INFO(dev);
19454
19455 if (dhd->duart_execute) {
19456 return TRUE;
19457 }
19458
19459 return FALSE;
19460 }
19461
19462 static void
19463 dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
19464 {
19465 dhd_pub_t *dhdp = handle;
19466 dhd_debug_uart_exec(dhdp, "rd");
19467 }
19468
19469 static void
19470 dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
19471 {
19472 int ret;
19473
19474 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
19475 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
19476
19477 #ifdef DHD_FW_COREDUMP
19478 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
19479 #endif
19480 {
19481 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
19482 #ifdef DHD_FW_COREDUMP
19483 dhdp->memdump_success == FALSE ||
19484 #endif
19485 FALSE) {
19486 dhdp->info->duart_execute = TRUE;
19487 DHD_ERROR(("DHD: %s - execute %s %s\n",
19488 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
19489 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
19490 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
19491 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
19492 dhdp->info->duart_execute = FALSE;
19493
19494 #ifdef DHD_LOG_DUMP
19495 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
19496 #endif
19497 {
19498 BUG_ON(1);
19499 }
19500 }
19501 }
19502 }
19503 #endif /* DHD_DEBUG_UART */
19504
19505 #if defined(DHD_BLOB_EXISTENCE_CHECK)
19506 void
19507 dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
19508 {
19509 struct file *fp;
19510 char *filepath = CONFIG_BCMDHD_CLM_PATH;
19511
19512 fp = filp_open(filepath, O_RDONLY, 0);
19513 if (IS_ERR(fp)) {
19514 DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__));
19515 dhdp->is_blob = FALSE;
19516 } else {
19517 DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__));
19518 dhdp->is_blob = TRUE;
19519 #if defined(CONCATE_BLOB)
19520 strncat(fw_path, "_blob", strlen("_blob"));
19521 #else
19522 BCM_REFERENCE(fw_path);
19523 #endif /* SKIP_CONCATE_BLOB */
19524 filp_close(fp, NULL);
19525 }
19526 }
19527 #endif /* DHD_BLOB_EXISTENCE_CHECK */
19528
19529 #if defined(PCIE_FULL_DONGLE)
19530 /** test / loopback */
19531 void
19532 dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
19533 {
19534 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
19535 dhd_info_t *dhd_info = (dhd_info_t *)handle;
19536 dhd_pub_t *dhdp = &dhd_info->pub;
19537
19538 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
19539 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
19540 return;
19541 }
19542
19543 if ((dhd_info == NULL) || (dhdp == NULL)) {
19544 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
19545 return;
19546 }
19547
19548 if (dmmap == NULL) {
19549 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
19550 return;
19551 }
19552 dmaxfer_free_prev_dmaaddr(dhdp, dmmap);
19553 }
19554
19555
19556 void
19557 dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
19558 {
19559 dhd_info_t *dhd_info = dhdp->info;
19560
19561 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
19562 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
19563 }
19564 #endif /* PCIE_FULL_DONGLE */
19565 /* ---------------------------- End of sysfs implementation ------------------------------------- */
19566 #ifdef HOFFLOAD_MODULES
19567 void
19568 dhd_linux_get_modfw_address(dhd_pub_t *dhd)
19569 {
19570 const char* module_name = NULL;
19571 const struct firmware *module_fw;
19572 struct module_metadata *hmem = &dhd->hmem;
19573
19574 if (dhd_hmem_module_string[0] != '\0') {
19575 module_name = dhd_hmem_module_string;
19576 } else {
19577 DHD_ERROR(("%s No module image name specified\n", __FUNCTION__));
19578 return;
19579 }
19580 if (request_firmware(&module_fw, module_name, dhd_bus_to_dev(dhd->bus))) {
19581 DHD_ERROR(("modules.img not available\n"));
19582 return;
19583 }
19584 if (!dhd_alloc_module_memory(dhd->bus, module_fw->size, hmem)) {
19585 release_firmware(module_fw);
19586 return;
19587 }
19588 memcpy(hmem->data, module_fw->data, module_fw->size);
19589 release_firmware(module_fw);
19590 }
19591 #endif /* HOFFLOAD_MODULES */
19592
19593 #ifdef SET_PCIE_IRQ_CPU_CORE
19594 void
19595 dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set)
19596 {
19597 unsigned int irq;
19598 if (!dhdp) {
19599 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
19600 return;
19601 }
19602
19603 if (!dhdp->bus) {
19604 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
19605 return;
19606 }
19607
19608 if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) {
19609 return;
19610 }
19611
19612 set_irq_cpucore(irq, set);
19613 }
19614 #endif /* SET_PCIE_IRQ_CPU_CORE */
19615
19616 #if defined(DHD_HANG_SEND_UP_TEST)
19617 void
19618 dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
19619 {
19620 dhd_info_t *dhd = NULL;
19621 dhd_pub_t *dhdp = NULL;
19622 uint reason = HANG_REASON_MAX;
19623 char buf[WLC_IOCTL_SMLEN] = {0, };
19624 uint32 fw_test_code = 0;
19625 dhd = DHD_DEV_INFO(dev);
19626
19627 if (dhd) {
19628 dhdp = &dhd->pub;
19629 }
19630
19631 if (!dhd || !dhdp) {
19632 return;
19633 }
19634
19635 reason = (uint) bcm_strtoul(string_num, NULL, 0);
19636 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
19637
19638 if (reason == 0) {
19639 if (dhdp->req_hang_type) {
19640 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
19641 __FUNCTION__, dhdp->req_hang_type));
19642 dhdp->req_hang_type = 0;
19643 return;
19644 } else {
19645 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
19646 return;
19647 }
19648 } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
19649 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
19650 return;
19651 }
19652
19653 if (dhdp->req_hang_type != 0) {
19654 DHD_ERROR(("Already HANG requested for test\n"));
19655 return;
19656 }
19657
19658 switch (reason) {
19659 case HANG_REASON_IOCTL_RESP_TIMEOUT:
19660 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
19661 dhdp->req_hang_type = reason;
19662 fw_test_code = 102; /* resumed on timeour */
19663 bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
19664 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
19665 break;
19666 case HANG_REASON_DONGLE_TRAP:
19667 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
19668 dhdp->req_hang_type = reason;
19669 fw_test_code = 99; /* dongle trap */
19670 bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
19671 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
19672 break;
19673 case HANG_REASON_D3_ACK_TIMEOUT:
19674 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
19675 dhdp->req_hang_type = reason;
19676 break;
19677 case HANG_REASON_BUS_DOWN:
19678 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
19679 dhdp->req_hang_type = reason;
19680 break;
19681 case HANG_REASON_PCIE_LINK_DOWN:
19682 case HANG_REASON_MSGBUF_LIVELOCK:
19683 dhdp->req_hang_type = 0;
19684 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
19685 break;
19686 case HANG_REASON_IFACE_OP_FAILURE:
19687 DHD_ERROR(("Make HANG!!!: P2P inrerface delete failure(0x%x)\n", reason));
19688 dhdp->req_hang_type = reason;
19689 break;
19690 case HANG_REASON_HT_AVAIL_ERROR:
19691 dhdp->req_hang_type = 0;
19692 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
19693 break;
19694 case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
19695 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
19696 dhdp->req_hang_type = reason;
19697 break;
19698 default:
19699 dhdp->req_hang_type = 0;
19700 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
19701 break;
19702 }
19703 }
19704 #endif /* DHD_HANG_SEND_UP_TEST */
19705 #ifdef DHD_WAKE_STATUS
19706 wake_counts_t*
19707 dhd_get_wakecount(dhd_pub_t *dhdp)
19708 {
19709 #ifdef BCMDBUS
19710 return NULL;
19711 #else
19712 return dhd_bus_get_wakecount(dhdp);
19713 #endif /* BCMDBUS */
19714 }
19715 #endif /* DHD_WAKE_STATUS */
19716
19717 #ifdef BCM_ASLR_HEAP
19718 uint32
19719 dhd_get_random_number(void)
19720 {
19721 uint32 rand = 0;
19722 get_random_bytes_arch(&rand, sizeof(rand));
19723 return rand;
19724 }
19725 #endif /* BCM_ASLR_HEAP */
19726
19727 #ifdef DHD_PKT_LOGGING
19728 void
19729 dhd_pktlog_dump(void *handle, void *event_info, u8 event)
19730 {
19731 dhd_info_t *dhd = handle;
19732
19733 if (!dhd) {
19734 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
19735 return;
19736 }
19737
19738 if (dhd_pktlog_write_file(&dhd->pub)) {
19739 DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
19740 return;
19741 }
19742 }
19743
19744 void
19745 dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
19746 {
19747 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
19748 (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
19749 dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
19750 }
19751 #endif /* DHD_PKT_LOGGING */
19752
19753 void *dhd_get_pub(struct net_device *dev)
19754 {
19755 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
19756 if (dhdinfo)
19757 return (void *)&dhdinfo->pub;
19758 else {
19759 printf("%s: null dhdinfo\n", __FUNCTION__);
19760 return NULL;
19761 }
19762 }
19763
19764 void *dhd_get_conf(struct net_device *dev)
19765 {
19766 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
19767 if (dhdinfo)
19768 return (void *)dhdinfo->pub.conf;
19769 else {
19770 printf("%s: null dhdinfo\n", __FUNCTION__);
19771 return NULL;
19772 }
19773 }
19774
19775 bool dhd_os_wd_timer_enabled(void *bus)
19776 {
19777 dhd_pub_t *pub = bus;
19778 dhd_info_t *dhd = (dhd_info_t *)pub->info;
19779
19780 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
19781 if (!dhd) {
19782 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
19783 return FALSE;
19784 }
19785 return dhd->wd_timer_valid;
19786 }