dhd: fix wifi driver caused selinux denied issue [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.1.363.59.144.x.cn / dhd_linux.c
CommitLineData
ef6a5fee
RC
1/*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2016, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
29 */
30
31#include <typedefs.h>
32#include <linuxver.h>
33#include <osl.h>
34#ifdef SHOW_LOGTRACE
35#include <linux/syscalls.h>
36#include <event_log.h>
37#endif /* SHOW_LOGTRACE */
38
39
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/inetdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/etherdevice.h>
48#include <linux/random.h>
49#include <linux/spinlock.h>
50#include <linux/ethtool.h>
51#include <linux/fcntl.h>
52#include <linux/fs.h>
53#include <linux/ip.h>
54#include <linux/reboot.h>
55#include <linux/notifier.h>
56#include <net/addrconf.h>
57#ifdef ENABLE_ADAPTIVE_SCHED
58#include <linux/cpufreq.h>
59#endif /* ENABLE_ADAPTIVE_SCHED */
60
61#include <asm/uaccess.h>
62#include <asm/unaligned.h>
63
64#include <epivers.h>
65#include <bcmutils.h>
66#include <bcmendian.h>
67#include <bcmdevs.h>
68
69#include <proto/ethernet.h>
70#include <proto/bcmevent.h>
71#include <proto/vlan.h>
72#include <proto/802.3.h>
73
74#include <dngl_stats.h>
75#include <dhd_linux_wq.h>
76#include <dhd.h>
77#include <dhd_linux.h>
78#ifdef PCIE_FULL_DONGLE
79#include <dhd_flowring.h>
80#endif
81#include <dhd_bus.h>
82#include <dhd_proto.h>
83#include <dhd_config.h>
08dfb6c4
RC
84#ifdef WL_ESCAN
85#include <wl_escan.h>
86#endif
ef6a5fee
RC
87#include <dhd_dbg.h>
88#ifdef CONFIG_HAS_WAKELOCK
89#include <linux/wakelock.h>
90#endif
91#ifdef WL_CFG80211
92#include <wl_cfg80211.h>
93#endif
94#ifdef PNO_SUPPORT
95#include <dhd_pno.h>
96#endif
97#ifdef RTT_SUPPORT
98#include <dhd_rtt.h>
99#endif
100
101#ifdef CONFIG_COMPAT
102#include <linux/compat.h>
103#endif
104
105#ifdef DHD_WMF
106#include <dhd_wmf_linux.h>
107#endif /* DHD_WMF */
108
109#ifdef DHD_L2_FILTER
110#include <proto/bcmicmp.h>
111#include <bcm_l2_filter.h>
112#include <dhd_l2_filter.h>
113#endif /* DHD_L2_FILTER */
114
115#ifdef DHD_PSTA
116#include <dhd_psta.h>
117#endif /* DHD_PSTA */
118
119
120#ifdef DHDTCPACK_SUPPRESS
121#include <dhd_ip.h>
122#endif /* DHDTCPACK_SUPPRESS */
123
124#ifdef DHD_DEBUG_PAGEALLOC
125typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
126void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
127extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
128#endif /* DHD_DEBUG_PAGEALLOC */
129
130
131#if defined(DHD_LB)
132/* Dynamic CPU selection for load balancing */
133#include <linux/cpu.h>
134#include <linux/cpumask.h>
135#include <linux/notifier.h>
136#include <linux/workqueue.h>
137#include <asm/atomic.h>
138
139#if !defined(DHD_LB_PRIMARY_CPUS)
140#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
141#endif
142
143#if !defined(DHD_LB_SECONDARY_CPUS)
144#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
145#endif
146
147#define HIST_BIN_SIZE 8
148
149#if defined(DHD_LB_RXP)
150static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
151#endif /* DHD_LB_RXP */
152
153#endif /* DHD_LB */
154
155#ifdef WLMEDIA_HTSF
156#include <linux/time.h>
157#include <htsf.h>
158
159#define HTSF_MINLEN 200 /* min. packet length to timestamp */
160#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
161#define TSMAX 1000 /* max no. of timing record kept */
162#define NUMBIN 34
163
164static uint32 tsidx = 0;
165static uint32 htsf_seqnum = 0;
166uint32 tsfsync;
167struct timeval tsync;
168static uint32 tsport = 5010;
169
170typedef struct histo_ {
171 uint32 bin[NUMBIN];
172} histo_t;
173
174#if !ISPOWEROF2(DHD_SDALIGN)
175#error DHD_SDALIGN is not a power of 2!
176#endif
177
178static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
179#endif /* WLMEDIA_HTSF */
180
181#ifdef STBLINUX
182#ifdef quote_str
183#undef quote_str
184#endif /* quote_str */
185#ifdef to_str
186#undef to_str
187#endif /* quote_str */
188#define to_str(s) #s
189#define quote_str(s) to_str(s)
190
191static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
192#endif /* STBLINUX */
193
194
195#if defined(SOFTAP)
196extern bool ap_cfg_running;
197extern bool ap_fw_loaded;
198#endif
199extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
200
201#ifdef FIX_CPU_MIN_CLOCK
202#include <linux/pm_qos.h>
203#endif /* FIX_CPU_MIN_CLOCK */
204#ifdef SET_RANDOM_MAC_SOFTAP
205#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
206#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
207#endif
208static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
209#endif /* SET_RANDOM_MAC_SOFTAP */
210#ifdef ENABLE_ADAPTIVE_SCHED
211#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
212#ifndef CUSTOM_CPUFREQ_THRESH
213#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
214#endif /* CUSTOM_CPUFREQ_THRESH */
215#endif /* ENABLE_ADAPTIVE_SCHED */
216
217/* enable HOSTIP cache update from the host side when an eth0:N is up */
218#define AOE_IP_ALIAS_SUPPORT 1
219
220#ifdef BCM_FD_AGGR
221#include <bcm_rpc.h>
222#include <bcm_rpc_tp.h>
223#endif
224#ifdef PROP_TXSTATUS
225#include <wlfc_proto.h>
226#include <dhd_wlfc.h>
227#endif
228
229#include <wl_android.h>
230
ef6a5fee
RC
231/* Maximum STA per radio */
232#define DHD_MAX_STA 32
233
08dfb6c4
RC
234#ifdef CUSTOMER_HW_AMLOGIC
235#include <linux/amlogic/wifi_dt.h>
236#endif
ef6a5fee
RC
237
238
239const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
240const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
241#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
242
243#ifdef ARP_OFFLOAD_SUPPORT
244void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
245static int dhd_inetaddr_notifier_call(struct notifier_block *this,
246 unsigned long event, void *ptr);
247static struct notifier_block dhd_inetaddr_notifier = {
248 .notifier_call = dhd_inetaddr_notifier_call
249};
250/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
251 * created in kernel notifier link list (with 'next' pointing to itself)
252 */
253static bool dhd_inetaddr_notifier_registered = FALSE;
254#endif /* ARP_OFFLOAD_SUPPORT */
255
256#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
257static int dhd_inet6addr_notifier_call(struct notifier_block *this,
258 unsigned long event, void *ptr);
259static struct notifier_block dhd_inet6addr_notifier = {
260 .notifier_call = dhd_inet6addr_notifier_call
261};
262/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
263 * created in kernel notifier link list (with 'next' pointing to itself)
264 */
265static bool dhd_inet6addr_notifier_registered = FALSE;
266#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
267
268#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
269#include <linux/suspend.h>
270volatile bool dhd_mmc_suspend = FALSE;
271DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
272#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
273
274#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
275extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
276#endif
277#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
278static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
279#endif
280#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
281MODULE_LICENSE("GPL and additional rights");
282#endif /* LinuxVer */
283
284#include <dhd_bus.h>
285
286#ifdef BCM_FD_AGGR
287#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
288#else
289#ifndef PROP_TXSTATUS
290#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
291#else
292#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
293#endif
294#endif /* BCM_FD_AGGR */
295
296#ifdef PROP_TXSTATUS
297extern bool dhd_wlfc_skip_fc(void);
298extern void dhd_wlfc_plat_init(void *dhd);
299extern void dhd_wlfc_plat_deinit(void *dhd);
300#endif /* PROP_TXSTATUS */
ef6a5fee
RC
301extern uint sd_f2_blocksize;
302extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
ef6a5fee
RC
303
304#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
305const char *
306print_tainted()
307{
308 return "";
309}
310#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
311
312/* Linux wireless extension support */
313#if defined(WL_WIRELESS_EXT)
314#include <wl_iw.h>
315extern wl_iw_extra_params_t g_wl_iw_params;
316#endif /* defined(WL_WIRELESS_EXT) */
317
318#ifdef CONFIG_PARTIALSUSPEND_SLP
319#include <linux/partialsuspend_slp.h>
320#define CONFIG_HAS_EARLYSUSPEND
321#define DHD_USE_EARLYSUSPEND
322#define register_early_suspend register_pre_suspend
323#define unregister_early_suspend unregister_pre_suspend
324#define early_suspend pre_suspend
325#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
326#else
327#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
328#include <linux/earlysuspend.h>
329#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
330#endif /* CONFIG_PARTIALSUSPEND_SLP */
331
332extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
333
334#ifdef PKT_FILTER_SUPPORT
335extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
336extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
337extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
338#endif
339
340
341#ifdef READ_MACADDR
342extern int dhd_read_macaddr(struct dhd_info *dhd);
343#else
344static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
345#endif
346#ifdef WRITE_MACADDR
347extern int dhd_write_macaddr(struct ether_addr *mac);
348#else
349static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
350#endif
351
352
353
354
355
356#ifdef DHD_FW_COREDUMP
357static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
358#endif /* DHD_FW_COREDUMP */
359#ifdef DHD_LOG_DUMP
360static void dhd_log_dump_init(dhd_pub_t *dhd);
361static void dhd_log_dump_deinit(dhd_pub_t *dhd);
362static void dhd_log_dump(void *handle, void *event_info, u8 event);
363void dhd_schedule_log_dump(dhd_pub_t *dhdp);
364static int do_dhd_log_dump(dhd_pub_t *dhdp);
365#endif /* DHD_LOG_DUMP */
366
367static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
368static struct notifier_block dhd_reboot_notifier = {
369 .notifier_call = dhd_reboot_callback,
370 .priority = 1,
371};
372
373#ifdef BCMPCIE
374static int is_reboot = 0;
375#endif /* BCMPCIE */
376
377typedef struct dhd_if_event {
378 struct list_head list;
379 wl_event_data_if_t event;
380 char name[IFNAMSIZ+1];
381 uint8 mac[ETHER_ADDR_LEN];
382} dhd_if_event_t;
383
384/* Interface control information */
385typedef struct dhd_if {
386 struct dhd_info *info; /* back pointer to dhd_info */
387 /* OS/stack specifics */
388 struct net_device *net;
389 int idx; /* iface idx in dongle */
390 uint subunit; /* subunit */
391 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
392 bool set_macaddress;
393 bool set_multicast;
394 uint8 bssidx; /* bsscfg index for the interface */
395 bool attached; /* Delayed attachment when unset */
396 bool txflowcontrol; /* Per interface flow control indicator */
397 char name[IFNAMSIZ+1]; /* linux interface name */
398 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
399 struct net_device_stats stats;
400#ifdef DHD_WMF
401 dhd_wmf_t wmf; /* per bsscfg wmf setting */
402#endif /* DHD_WMF */
403#ifdef PCIE_FULL_DONGLE
404 struct list_head sta_list; /* sll of associated stations */
405#if !defined(BCM_GMAC3)
406 spinlock_t sta_list_lock; /* lock for manipulating sll */
407#endif /* ! BCM_GMAC3 */
408#endif /* PCIE_FULL_DONGLE */
409 uint32 ap_isolate; /* ap-isolation settings */
410#ifdef DHD_L2_FILTER
411 bool parp_enable;
412 bool parp_discard;
413 bool parp_allnode;
414 arp_table_t *phnd_arp_table;
415/* for Per BSS modification */
416 bool dhcp_unicast;
417 bool block_ping;
418 bool grat_arp;
419#endif /* DHD_L2_FILTER */
420} dhd_if_t;
421
422#ifdef WLMEDIA_HTSF
423typedef struct {
424 uint32 low;
425 uint32 high;
426} tsf_t;
427
428typedef struct {
429 uint32 last_cycle;
430 uint32 last_sec;
431 uint32 last_tsf;
432 uint32 coef; /* scaling factor */
433 uint32 coefdec1; /* first decimal */
434 uint32 coefdec2; /* second decimal */
435} htsf_t;
436
437typedef struct {
438 uint32 t1;
439 uint32 t2;
440 uint32 t3;
441 uint32 t4;
442} tstamp_t;
443
444static tstamp_t ts[TSMAX];
445static tstamp_t maxdelayts;
446static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
447
448#endif /* WLMEDIA_HTSF */
449
450struct ipv6_work_info_t {
451 uint8 if_idx;
452 char ipv6_addr[16];
453 unsigned long event;
454};
455
456#ifdef DHD_DEBUG
457typedef struct dhd_dump {
458 uint8 *buf;
459 int bufsize;
460} dhd_dump_t;
461#endif /* DHD_DEBUG */
462
463/* When Perimeter locks are deployed, any blocking calls must be preceeded
464 * with a PERIM UNLOCK and followed by a PERIM LOCK.
465 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
466 * wait_event_timeout().
467 */
468
469/* Local private structure (extension of pub) */
470typedef struct dhd_info {
471#if defined(WL_WIRELESS_EXT)
472 wl_iw_t iw; /* wireless extensions state (must be first) */
473#endif /* defined(WL_WIRELESS_EXT) */
474 dhd_pub_t pub;
475 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
476
477 void *adapter; /* adapter information, interrupt, fw path etc. */
478 char fw_path[PATH_MAX]; /* path to firmware image */
479 char nv_path[PATH_MAX]; /* path to nvram vars file */
08dfb6c4 480 char clm_path[PATH_MAX]; /* path to clm vars file */
ef6a5fee
RC
481 char conf_path[PATH_MAX]; /* path to config vars file */
482
483 /* serialize dhd iovars */
484 struct mutex dhd_iovar_mutex;
485
486 struct semaphore proto_sem;
487#ifdef PROP_TXSTATUS
488 spinlock_t wlfc_spinlock;
489
490#endif /* PROP_TXSTATUS */
491#ifdef WLMEDIA_HTSF
492 htsf_t htsf;
493#endif
494 wait_queue_head_t ioctl_resp_wait;
495 wait_queue_head_t d3ack_wait;
496 wait_queue_head_t dhd_bus_busy_state_wait;
497 uint32 default_wd_interval;
498
499 struct timer_list timer;
500 bool wd_timer_valid;
501#ifdef DHD_PCIE_RUNTIMEPM
502 struct timer_list rpm_timer;
503 bool rpm_timer_valid;
504 tsk_ctl_t thr_rpm_ctl;
505#endif /* DHD_PCIE_RUNTIMEPM */
506 struct tasklet_struct tasklet;
507 spinlock_t sdlock;
508 spinlock_t txqlock;
509 spinlock_t dhd_lock;
510
511 struct semaphore sdsem;
512 tsk_ctl_t thr_dpc_ctl;
513 tsk_ctl_t thr_wdt_ctl;
514
515 tsk_ctl_t thr_rxf_ctl;
516 spinlock_t rxf_lock;
517 bool rxthread_enabled;
518
519 /* Wakelocks */
520#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
521 struct wake_lock wl_wifi; /* Wifi wakelock */
522 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
523 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
524 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
525 struct wake_lock wl_evtwake; /* Wifi event wakelock */
526#ifdef BCMPCIE_OOB_HOST_WAKE
527 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
528#endif /* BCMPCIE_OOB_HOST_WAKE */
529#ifdef DHD_USE_SCAN_WAKELOCK
530 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
531#endif /* DHD_USE_SCAN_WAKELOCK */
532#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
533
534#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
535 /* net_device interface lock, prevent race conditions among net_dev interface
536 * calls and wifi_on or wifi_off
537 */
538 struct mutex dhd_net_if_mutex;
539 struct mutex dhd_suspend_mutex;
540#endif
541 spinlock_t wakelock_spinlock;
542 spinlock_t wakelock_evt_spinlock;
543 uint32 wakelock_event_counter;
544 uint32 wakelock_counter;
545 int wakelock_wd_counter;
546 int wakelock_rx_timeout_enable;
547 int wakelock_ctrl_timeout_enable;
548 bool waive_wakelock;
549 uint32 wakelock_before_waive;
550
551 /* Thread to issue ioctl for multicast */
552 wait_queue_head_t ctrl_wait;
553 atomic_t pend_8021x_cnt;
554 dhd_attach_states_t dhd_state;
555#ifdef SHOW_LOGTRACE
556 dhd_event_log_t event_data;
557#endif /* SHOW_LOGTRACE */
558
559#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
560 struct early_suspend early_suspend;
561#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
562
563#ifdef ARP_OFFLOAD_SUPPORT
564 u32 pend_ipaddr;
565#endif /* ARP_OFFLOAD_SUPPORT */
566#ifdef BCM_FD_AGGR
567 void *rpc_th;
568 void *rpc_osh;
569 struct timer_list rpcth_timer;
570 bool rpcth_timer_active;
571 uint8 fdaggr;
572#endif
573#ifdef DHDTCPACK_SUPPRESS
574 spinlock_t tcpack_lock;
575#endif /* DHDTCPACK_SUPPRESS */
576#ifdef FIX_CPU_MIN_CLOCK
577 bool cpufreq_fix_status;
578 struct mutex cpufreq_fix;
579 struct pm_qos_request dhd_cpu_qos;
580#ifdef FIX_BUS_MIN_CLOCK
581 struct pm_qos_request dhd_bus_qos;
582#endif /* FIX_BUS_MIN_CLOCK */
583#endif /* FIX_CPU_MIN_CLOCK */
584 void *dhd_deferred_wq;
585#ifdef DEBUG_CPU_FREQ
586 struct notifier_block freq_trans;
587 int __percpu *new_freq;
588#endif
589 unsigned int unit;
590 struct notifier_block pm_notifier;
591#ifdef DHD_PSTA
592 uint32 psta_mode; /* PSTA or PSR */
593#endif /* DHD_PSTA */
594#ifdef DHD_DEBUG
595 dhd_dump_t *dump;
596 struct timer_list join_timer;
597 u32 join_timeout_val;
598 bool join_timer_active;
599 uint scan_time_count;
600 struct timer_list scan_timer;
601 bool scan_timer_active;
602#endif
603#if defined(DHD_LB)
604 /* CPU Load Balance dynamic CPU selection */
605
606 /* Variable that tracks the currect CPUs available for candidacy */
607 cpumask_var_t cpumask_curr_avail;
608
609 /* Primary and secondary CPU mask */
610 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
611 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
612
613 struct notifier_block cpu_notifier;
614
615 /* Tasklet to handle Tx Completion packet freeing */
616 struct tasklet_struct tx_compl_tasklet;
617 atomic_t tx_compl_cpu;
618
619
620 /* Tasklet to handle RxBuf Post during Rx completion */
621 struct tasklet_struct rx_compl_tasklet;
622 atomic_t rx_compl_cpu;
623
624 /* Napi struct for handling rx packet sendup. Packets are removed from
625 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
626 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
627 * to run to rx_napi_cpu.
628 */
629 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
630 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
631 struct napi_struct rx_napi_struct ____cacheline_aligned;
632 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
633 struct net_device *rx_napi_netdev; /* netdev of primary interface */
634
635 struct work_struct rx_napi_dispatcher_work;
636 struct work_struct tx_compl_dispatcher_work;
637 struct work_struct rx_compl_dispatcher_work;
638 /* Number of times DPC Tasklet ran */
639 uint32 dhd_dpc_cnt;
640
641 /* Number of times NAPI processing got scheduled */
642 uint32 napi_sched_cnt;
643
644 /* Number of times NAPI processing ran on each available core */
645 uint32 napi_percpu_run_cnt[NR_CPUS];
646
647 /* Number of times RX Completions got scheduled */
648 uint32 rxc_sched_cnt;
649 /* Number of times RX Completion ran on each available core */
650 uint32 rxc_percpu_run_cnt[NR_CPUS];
651
652 /* Number of times TX Completions got scheduled */
653 uint32 txc_sched_cnt;
654 /* Number of times TX Completions ran on each available core */
655 uint32 txc_percpu_run_cnt[NR_CPUS];
656
657 /* CPU status */
658 /* Number of times each CPU came online */
659 uint32 cpu_online_cnt[NR_CPUS];
660
661 /* Number of times each CPU went offline */
662 uint32 cpu_offline_cnt[NR_CPUS];
663
664 /*
665 * Consumer Histogram - NAPI RX Packet processing
666 * -----------------------------------------------
667 * On Each CPU, when the NAPI RX Packet processing call back was invoked
668 * how many packets were processed is captured in this data structure.
669 * Now its difficult to capture the "exact" number of packets processed.
670 * So considering the packet counter to be a 32 bit one, we have a
671 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
672 * processed is rounded off to the next power of 2 and put in the
673 * approriate "bin" the value in the bin gets incremented.
674 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
675 * and the packet count processed is as follows (assume the bin counters are 0)
676 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
677 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
678 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
679 */
680 uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
681 uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
682 uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
683#endif /* DHD_LB */
684
685#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
686#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
687
688 struct kobject dhd_kobj;
689#ifdef SUPPORT_SENSORHUB
690 uint32 shub_enable;
691#endif /* SUPPORT_SENSORHUB */
692
693 struct delayed_work dhd_memdump_work;
694} dhd_info_t;
695
696#define DHDIF_FWDER(dhdif) FALSE
697
698/* Flag to indicate if we should download firmware on driver load */
699uint dhd_download_fw_on_driverload = TRUE;
700
701/* Flag to indicate if driver is initialized */
702uint dhd_driver_init_done = FALSE;
703
704/* Definitions to provide path to the firmware and nvram
705 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
706 */
707char firmware_path[MOD_PARAM_PATHLEN];
708char nvram_path[MOD_PARAM_PATHLEN];
08dfb6c4 709char clm_path[MOD_PARAM_PATHLEN];
ef6a5fee
RC
710char config_path[MOD_PARAM_PATHLEN];
711
712/* backup buffer for firmware and nvram path */
713char fw_bak_path[MOD_PARAM_PATHLEN];
714char nv_bak_path[MOD_PARAM_PATHLEN];
715
716/* information string to keep firmware, chio, cheip version info visiable from log */
717char info_string[MOD_PARAM_INFOLEN];
718module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
719int op_mode = 0;
720int disable_proptx = 0;
721module_param(op_mode, int, 0644);
722
723#if defined(DHD_LB_RXP)
724static int dhd_napi_weight = 32;
725module_param(dhd_napi_weight, int, 0644);
726#endif /* DHD_LB_RXP */
727
728extern int wl_control_wl_start(struct net_device *dev);
729#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
730struct semaphore dhd_registration_sem;
731#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
732
733/* deferred handlers */
734static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
735static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
736static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
737static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
738#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
739static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
740#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
741#ifdef WL_CFG80211
742extern void dhd_netdev_free(struct net_device *ndev);
743#endif /* WL_CFG80211 */
744
745/* Error bits */
746module_param(dhd_msg_level, int, 0);
747#if defined(WL_WIRELESS_EXT)
748module_param(iw_msg_level, int, 0);
749#endif
750#ifdef WL_CFG80211
751module_param(wl_dbg_level, int, 0);
752#endif
753module_param(android_msg_level, int, 0);
754module_param(config_msg_level, int, 0);
755
756#ifdef ARP_OFFLOAD_SUPPORT
757/* ARP offload enable */
758uint dhd_arp_enable = TRUE;
759module_param(dhd_arp_enable, uint, 0);
760
761/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
762
763#ifdef ENABLE_ARP_SNOOP_MODE
764uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
765#else
766uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
767#endif /* ENABLE_ARP_SNOOP_MODE */
768
769module_param(dhd_arp_mode, uint, 0);
770#endif /* ARP_OFFLOAD_SUPPORT */
771
772/* Disable Prop tx */
773module_param(disable_proptx, int, 0644);
774/* load firmware and/or nvram values from the filesystem */
775module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
776module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
08dfb6c4 777module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
ef6a5fee
RC
778module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
779
780/* Watchdog interval */
781
782/* extend watchdog expiration to 2 seconds when DPC is running */
783#define WATCHDOG_EXTEND_INTERVAL (2000)
784
785uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
786module_param(dhd_watchdog_ms, uint, 0);
787
788#ifdef DHD_PCIE_RUNTIMEPM
789uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
790#endif /* DHD_PCIE_RUNTIMEPMT */
791#if defined(DHD_DEBUG)
792/* Console poll interval */
793uint dhd_console_ms = 0;
794module_param(dhd_console_ms, uint, 0644);
795#endif /* defined(DHD_DEBUG) */
796
797
798uint dhd_slpauto = TRUE;
799module_param(dhd_slpauto, uint, 0);
800
801#ifdef PKT_FILTER_SUPPORT
802/* Global Pkt filter enable control */
803uint dhd_pkt_filter_enable = TRUE;
804module_param(dhd_pkt_filter_enable, uint, 0);
805#endif
806
807/* Pkt filter init setup */
808uint dhd_pkt_filter_init = 0;
809module_param(dhd_pkt_filter_init, uint, 0);
810
811/* Pkt filter mode control */
812#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
813uint dhd_master_mode = FALSE;
814#else
815uint dhd_master_mode = FALSE;
816#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
817module_param(dhd_master_mode, uint, 0);
818
819int dhd_watchdog_prio = 0;
820module_param(dhd_watchdog_prio, int, 0);
821
822/* DPC thread priority */
823int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
424b00bf 824module_param(dhd_dpc_prio, int, 0);
ef6a5fee
RC
825
826/* RX frame thread priority */
827int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
424b00bf 828module_param(dhd_rxf_prio, int, 0);
ef6a5fee
RC
829
830int passive_channel_skip = 0;
831module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
832
833#if !defined(BCMDHDUSB)
834extern int dhd_dongle_ramsize;
835module_param(dhd_dongle_ramsize, int, 0);
836#endif /* BCMDHDUSB */
837
838/* Keep track of number of instances */
839static int dhd_found = 0;
840static int instance_base = 0; /* Starting instance number */
841module_param(instance_base, int, 0644);
842
843/* Functions to manage sysfs interface for dhd */
844static int dhd_sysfs_init(dhd_info_t *dhd);
845static void dhd_sysfs_exit(dhd_info_t *dhd);
846
847#if defined(DHD_LB)
848
849static void
850dhd_lb_set_default_cpus(dhd_info_t *dhd)
851{
852 /* Default CPU allocation for the jobs */
853 atomic_set(&dhd->rx_napi_cpu, 1);
854 atomic_set(&dhd->rx_compl_cpu, 2);
855 atomic_set(&dhd->tx_compl_cpu, 2);
856}
857
858static void
859dhd_cpumasks_deinit(dhd_info_t *dhd)
860{
861 free_cpumask_var(dhd->cpumask_curr_avail);
862 free_cpumask_var(dhd->cpumask_primary);
863 free_cpumask_var(dhd->cpumask_primary_new);
864 free_cpumask_var(dhd->cpumask_secondary);
865 free_cpumask_var(dhd->cpumask_secondary_new);
866}
867
868static int
869dhd_cpumasks_init(dhd_info_t *dhd)
870{
871 int id;
872 uint32 cpus;
873 int ret = 0;
874
875 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
876 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
877 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
878 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
879 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
880 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
881 ret = -ENOMEM;
882 goto fail;
883 }
884
885 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
886 cpumask_clear(dhd->cpumask_primary);
887 cpumask_clear(dhd->cpumask_secondary);
888
889 cpus = DHD_LB_PRIMARY_CPUS;
890 for (id = 0; id < NR_CPUS; id++) {
891 if (isset(&cpus, id))
892 cpumask_set_cpu(id, dhd->cpumask_primary);
893 }
894
895 cpus = DHD_LB_SECONDARY_CPUS;
896 for (id = 0; id < NR_CPUS; id++) {
897 if (isset(&cpus, id))
898 cpumask_set_cpu(id, dhd->cpumask_secondary);
899 }
900
901 return ret;
902fail:
903 dhd_cpumasks_deinit(dhd);
904 return ret;
905}
906
907/*
908 * The CPU Candidacy Algorithm
909 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
910 * The available CPUs for selection are divided into two groups
911 * Primary Set - A CPU mask that carries the First Choice CPUs
912 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
913 *
914 * There are two types of Job, that needs to be assigned to
915 * the CPUs, from one of the above mentioned CPU group. The Jobs are
916 * 1) Rx Packet Processing - napi_cpu
917 * 2) Completion Processiong (Tx, RX) - compl_cpu
918 *
919 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
920 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
921 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
922 * If there are more processors free, it assigns one to compl_cpu.
923 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
924 * CPU, as much as possible.
925 *
926 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
927 * would allow Tx completion skb's to be released into a local free pool from
928 * which the rx buffer posts could have been serviced. it is important to note
929 * that a Tx packet may not have a large enough buffer for rx posting.
930 */
931void dhd_select_cpu_candidacy(dhd_info_t *dhd)
932{
933 uint32 primary_available_cpus; /* count of primary available cpus */
934 uint32 secondary_available_cpus; /* count of secondary available cpus */
935 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
936 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
937
938 cpumask_clear(dhd->cpumask_primary_new);
939 cpumask_clear(dhd->cpumask_secondary_new);
940
941 /*
942 * Now select from the primary mask. Even if a Job is
943 * already running on a CPU in secondary group, we still move
944 * to primary CPU. So no conditional checks.
945 */
946 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
947 dhd->cpumask_curr_avail);
948
949 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
950 dhd->cpumask_curr_avail);
951
952 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
953
954 if (primary_available_cpus > 0) {
955 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
956
957 /* If no further CPU is available,
958 * cpumask_next returns >= nr_cpu_ids
959 */
960 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
961 if (compl_cpu >= nr_cpu_ids)
962 compl_cpu = 0;
963 }
964
965 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
966 __FUNCTION__, napi_cpu, compl_cpu));
967
968 /* -- Now check for the CPUs from the secondary mask -- */
969 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
970
971 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
972 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
973
974 if (secondary_available_cpus > 0) {
975 /* At this point if napi_cpu is unassigned it means no CPU
976 * is online from Primary Group
977 */
978 if (napi_cpu == 0) {
979 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
980 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
981 } else if (compl_cpu == 0) {
982 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
983 }
984
985 /* If no CPU was available for completion, choose CPU 0 */
986 if (compl_cpu >= nr_cpu_ids)
987 compl_cpu = 0;
988 }
989 if ((primary_available_cpus == 0) &&
990 (secondary_available_cpus == 0)) {
991 /* No CPUs available from primary or secondary mask */
992 napi_cpu = 0;
993 compl_cpu = 0;
994 }
995
996 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
997 __FUNCTION__, napi_cpu, compl_cpu));
998 ASSERT(napi_cpu < nr_cpu_ids);
999 ASSERT(compl_cpu < nr_cpu_ids);
1000
1001 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
1002 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
1003 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1004 return;
1005}
1006
1007/*
1008 * Function to handle CPU Hotplug notifications.
1009 * One of the task it does is to trigger the CPU Candidacy algorithm
1010 * for load balancing.
1011 */
1012int
1013dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1014{
1015 unsigned int cpu = (unsigned int)(long)hcpu;
1016
1017 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1018
1019 switch (action)
1020 {
1021 case CPU_ONLINE:
1022 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1023 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1024 dhd_select_cpu_candidacy(dhd);
1025 break;
1026
1027 case CPU_DOWN_PREPARE:
1028 case CPU_DOWN_PREPARE_FROZEN:
1029 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1030 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1031 dhd_select_cpu_candidacy(dhd);
1032 break;
1033 default:
1034 break;
1035 }
1036
1037 return NOTIFY_OK;
1038}
1039
1040#if defined(DHD_LB_STATS)
1041void dhd_lb_stats_init(dhd_pub_t *dhdp)
1042{
1043 dhd_info_t *dhd;
1044 int i, j;
1045
1046 if (dhdp == NULL) {
1047 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1048 __FUNCTION__));
1049 return;
1050 }
1051
1052 dhd = dhdp->info;
1053 if (dhd == NULL) {
1054 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1055 return;
1056 }
1057
1058 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1059 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1060 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1061 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1062
1063 for (i = 0; i < NR_CPUS; i++) {
1064 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1065 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1066 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1067
1068 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1069 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1070 }
1071
1072 for (i = 0; i < NR_CPUS; i++) {
1073 for (j = 0; j < HIST_BIN_SIZE; j++) {
1074 DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1075 DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1076 DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1077 }
1078 }
1079
1080 return;
1081}
1082
1083static void dhd_lb_stats_dump_histo(
1084 struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1085{
1086 int i, j;
1087 uint32 per_cpu_total[NR_CPUS] = {0};
1088 uint32 total = 0;
1089
1090 bcm_bprintf(strbuf, "CPU: \t\t");
1091 for (i = 0; i < num_possible_cpus(); i++)
1092 bcm_bprintf(strbuf, "%d\t", i);
1093 bcm_bprintf(strbuf, "\nBin\n");
1094
1095 for (i = 0; i < HIST_BIN_SIZE; i++) {
1096 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1097 for (j = 0; j < num_possible_cpus(); j++) {
1098 bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1099 }
1100 bcm_bprintf(strbuf, "\n");
1101 }
1102 bcm_bprintf(strbuf, "Per CPU Total \t");
1103 total = 0;
1104 for (i = 0; i < num_possible_cpus(); i++) {
1105 for (j = 0; j < HIST_BIN_SIZE; j++) {
1106 per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1107 }
1108 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1109 total += per_cpu_total[i];
1110 }
1111 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1112
1113 return;
1114}
1115
1116static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1117{
1118 int i;
1119
1120 bcm_bprintf(strbuf, "CPU: \t");
1121 for (i = 0; i < num_possible_cpus(); i++)
1122 bcm_bprintf(strbuf, "%d\t", i);
1123 bcm_bprintf(strbuf, "\n");
1124
1125 bcm_bprintf(strbuf, "Val: \t");
1126 for (i = 0; i < num_possible_cpus(); i++)
1127 bcm_bprintf(strbuf, "%u\t", *(p+i));
1128 bcm_bprintf(strbuf, "\n");
1129 return;
1130}
1131
1132void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1133{
1134 dhd_info_t *dhd;
1135
1136 if (dhdp == NULL || strbuf == NULL) {
1137 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1138 __FUNCTION__, dhdp, strbuf));
1139 return;
1140 }
1141
1142 dhd = dhdp->info;
1143 if (dhd == NULL) {
1144 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1145 return;
1146 }
1147
1148 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1149 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1150
1151 bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1152 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1153
1154 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1155 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1156 dhd->txc_sched_cnt);
1157#ifdef DHD_LB_RXP
1158 bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1159 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1160 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1161 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1162#endif /* DHD_LB_RXP */
1163
1164#ifdef DHD_LB_RXC
1165 bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1166 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1167 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1168 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1169#endif /* DHD_LB_RXC */
1170
1171
1172#ifdef DHD_LB_TXC
1173 bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1174 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1175 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1176 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1177#endif /* DHD_LB_TXC */
1178}
1179
1180static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1181{
1182 uint32 bin_power;
1183 uint32 *p = NULL;
1184
1185 bin_power = next_larger_power2(count);
1186
1187 switch (bin_power) {
1188 case 0: break;
1189 case 1: /* Fall through intentionally */
1190 case 2: p = bin + 0; break;
1191 case 4: p = bin + 1; break;
1192 case 8: p = bin + 2; break;
1193 case 16: p = bin + 3; break;
1194 case 32: p = bin + 4; break;
1195 case 64: p = bin + 5; break;
1196 case 128: p = bin + 6; break;
1197 default : p = bin + 7; break;
1198 }
1199 if (p)
1200 *p = *p + 1;
1201 return;
1202}
1203
1204extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1205{
1206 int cpu;
1207 dhd_info_t *dhd = dhdp->info;
1208
1209 cpu = get_cpu();
1210 put_cpu();
1211 dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1212
1213 return;
1214}
1215
1216extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1217{
1218 int cpu;
1219 dhd_info_t *dhd = dhdp->info;
1220
1221 cpu = get_cpu();
1222 put_cpu();
1223 dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1224
1225 return;
1226}
1227
1228extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1229{
1230 int cpu;
1231 dhd_info_t *dhd = dhdp->info;
1232
1233 cpu = get_cpu();
1234 put_cpu();
1235 dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1236
1237 return;
1238}
1239
1240extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1241{
1242 dhd_info_t *dhd = dhdp->info;
1243 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1244}
1245
1246extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1247{
1248 dhd_info_t *dhd = dhdp->info;
1249 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1250}
1251
1252#endif /* DHD_LB_STATS */
1253#endif /* DHD_LB */
1254
1255
1256#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1257int g_frameburst = 1;
1258#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1259
1260static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1261
1262/* DHD Perimiter lock only used in router with bypass forwarding. */
1263#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1264#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1265#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1266
1267#ifdef PCIE_FULL_DONGLE
1268#if defined(BCM_GMAC3)
1269#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1270#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1271#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1272
1273#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1274#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1275#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1276#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1277
1278#else /* ! BCM_GMAC3 */
1279#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1280#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1281 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1282#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1283 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1284
1285#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1286static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1287 struct list_head *snapshot_list);
1288static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1289#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1290#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1291#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1292
1293#endif /* ! BCM_GMAC3 */
1294#endif /* PCIE_FULL_DONGLE */
1295
1296/* Control fw roaming */
1297uint dhd_roam_disable = 0;
1298
1299#ifdef BCMDBGFS
1300extern int dhd_dbg_init(dhd_pub_t *dhdp);
1301extern void dhd_dbg_remove(void);
1302#endif
1303
1304/* Control radio state */
1305uint dhd_radio_up = 1;
1306
1307/* Network inteface name */
1308char iface_name[IFNAMSIZ] = {'\0'};
1309module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1310
1311/* The following are specific to the SDIO dongle */
1312
1313/* IOCTL response timeout */
1314int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1315
1316/* Idle timeout for backplane clock */
1317int dhd_idletime = DHD_IDLETIME_TICKS;
1318module_param(dhd_idletime, int, 0);
1319
1320/* Use polling */
1321uint dhd_poll = FALSE;
1322module_param(dhd_poll, uint, 0);
1323
1324/* Use interrupts */
1325uint dhd_intr = TRUE;
1326module_param(dhd_intr, uint, 0);
1327
1328/* SDIO Drive Strength (in milliamps) */
1329uint dhd_sdiod_drive_strength = 6;
1330module_param(dhd_sdiod_drive_strength, uint, 0);
1331
1332#ifdef BCMSDIO
1333/* Tx/Rx bounds */
1334extern uint dhd_txbound;
1335extern uint dhd_rxbound;
1336module_param(dhd_txbound, uint, 0);
1337module_param(dhd_rxbound, uint, 0);
1338
1339/* Deferred transmits */
1340extern uint dhd_deferred_tx;
1341module_param(dhd_deferred_tx, uint, 0);
1342
1343#endif /* BCMSDIO */
1344
1345
1346#ifdef SDTEST
1347/* Echo packet generator (pkts/s) */
1348uint dhd_pktgen = 0;
1349module_param(dhd_pktgen, uint, 0);
1350
1351/* Echo packet len (0 => sawtooth, max 2040) */
1352uint dhd_pktgen_len = 0;
1353module_param(dhd_pktgen_len, uint, 0);
1354#endif /* SDTEST */
1355
1356
1357
1358/* Allow delayed firmware download for debug purpose */
1359int allow_delay_fwdl = FALSE;
1360module_param(allow_delay_fwdl, int, 0);
1361
1362extern char dhd_version[];
1363extern char fw_version[];
08dfb6c4 1364extern char clm_version[];
ef6a5fee
RC
1365
1366int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1367static void dhd_net_if_lock_local(dhd_info_t *dhd);
1368static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1369static void dhd_suspend_lock(dhd_pub_t *dhdp);
1370static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1371
1372#ifdef WLMEDIA_HTSF
1373void htsf_update(dhd_info_t *dhd, void *data);
1374tsf_t prev_tsf, cur_tsf;
1375
1376uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1377static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1378static void dhd_dump_latency(void);
1379static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1380static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1381static void dhd_dump_htsfhisto(histo_t *his, char *s);
1382#endif /* WLMEDIA_HTSF */
1383
1384/* Monitor interface */
1385int dhd_monitor_init(void *dhd_pub);
1386int dhd_monitor_uninit(void);
1387
1388
1389#if defined(WL_WIRELESS_EXT)
1390struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1391#endif /* defined(WL_WIRELESS_EXT) */
1392
1393static void dhd_dpc(ulong data);
1394/* forward decl */
1395extern int dhd_wait_pend8021x(struct net_device *dev);
1396void dhd_os_wd_timer_extend(void *bus, bool extend);
1397
1398#ifdef TOE
1399#ifndef BDC
1400#error TOE requires BDC
1401#endif /* !BDC */
1402static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1403static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1404#endif /* TOE */
1405
1406static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1407 wl_event_msg_t *event_ptr, void **data_ptr);
1408
1409#if defined(CONFIG_PM_SLEEP)
1410static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1411{
1412 int ret = NOTIFY_DONE;
1413 bool suspend = FALSE;
1414 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1415
1416 BCM_REFERENCE(dhdinfo);
1417
1418 switch (action) {
1419 case PM_HIBERNATION_PREPARE:
1420 case PM_SUSPEND_PREPARE:
1421 suspend = TRUE;
1422 break;
1423
1424 case PM_POST_HIBERNATION:
1425 case PM_POST_SUSPEND:
1426 suspend = FALSE;
1427 break;
1428 }
1429
1430#if defined(SUPPORT_P2P_GO_PS)
1431#ifdef PROP_TXSTATUS
1432 if (suspend) {
1433 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1434 dhd_wlfc_suspend(&dhdinfo->pub);
1435 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1436 } else
1437 dhd_wlfc_resume(&dhdinfo->pub);
1438#endif /* PROP_TXSTATUS */
1439#endif /* defined(SUPPORT_P2P_GO_PS) */
1440
1441#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1442 KERNEL_VERSION(2, 6, 39))
1443 dhd_mmc_suspend = suspend;
1444 smp_mb();
1445#endif
1446
1447 return ret;
1448}
1449
1450/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1451 * created in kernel notifier link list (with 'next' pointing to itself)
1452 */
1453static bool dhd_pm_notifier_registered = FALSE;
1454
1455extern int register_pm_notifier(struct notifier_block *nb);
1456extern int unregister_pm_notifier(struct notifier_block *nb);
1457#endif /* CONFIG_PM_SLEEP */
1458
1459/* Request scheduling of the bus rx frame */
1460static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1461static void dhd_os_rxflock(dhd_pub_t *pub);
1462static void dhd_os_rxfunlock(dhd_pub_t *pub);
1463
1464/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1465typedef struct dhd_dev_priv {
1466 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1467 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1468 int ifidx; /* interface index */
1469} dhd_dev_priv_t;
1470
1471#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1472#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1473#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1474#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1475#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1476
1477/** Clear the dhd net_device's private structure. */
1478static inline void
1479dhd_dev_priv_clear(struct net_device * dev)
1480{
1481 dhd_dev_priv_t * dev_priv;
1482 ASSERT(dev != (struct net_device *)NULL);
1483 dev_priv = DHD_DEV_PRIV(dev);
1484 dev_priv->dhd = (dhd_info_t *)NULL;
1485 dev_priv->ifp = (dhd_if_t *)NULL;
1486 dev_priv->ifidx = DHD_BAD_IF;
1487}
1488
1489/** Setup the dhd net_device's private structure. */
1490static inline void
1491dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1492 int ifidx)
1493{
1494 dhd_dev_priv_t * dev_priv;
1495 ASSERT(dev != (struct net_device *)NULL);
1496 dev_priv = DHD_DEV_PRIV(dev);
1497 dev_priv->dhd = dhd;
1498 dev_priv->ifp = ifp;
1499 dev_priv->ifidx = ifidx;
1500}
1501
1502#ifdef PCIE_FULL_DONGLE
1503
1504/** Dummy objects are defined with state representing bad|down.
1505 * Performance gains from reducing branch conditionals, instruction parallelism,
1506 * dual issue, reducing load shadows, avail of larger pipelines.
1507 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1508 * is accessed via the dhd_sta_t.
1509 */
1510
1511/* Dummy dhd_info object */
1512dhd_info_t dhd_info_null = {
1513#if defined(BCM_GMAC3)
1514 .fwdh = FWDER_NULL,
1515#endif
1516 .pub = {
1517 .info = &dhd_info_null,
1518#ifdef DHDTCPACK_SUPPRESS
1519 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1520#endif /* DHDTCPACK_SUPPRESS */
1521 .up = FALSE,
1522 .busstate = DHD_BUS_DOWN
1523 }
1524};
1525#define DHD_INFO_NULL (&dhd_info_null)
1526#define DHD_PUB_NULL (&dhd_info_null.pub)
1527
1528/* Dummy netdevice object */
1529struct net_device dhd_net_dev_null = {
1530 .reg_state = NETREG_UNREGISTERED
1531};
1532#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1533
1534/* Dummy dhd_if object */
1535dhd_if_t dhd_if_null = {
1536#if defined(BCM_GMAC3)
1537 .fwdh = FWDER_NULL,
1538#endif
1539#ifdef WMF
1540 .wmf = { .wmf_enable = TRUE },
1541#endif
1542 .info = DHD_INFO_NULL,
1543 .net = DHD_NET_DEV_NULL,
1544 .idx = DHD_BAD_IF
1545};
1546#define DHD_IF_NULL (&dhd_if_null)
1547
1548#define DHD_STA_NULL ((dhd_sta_t *)NULL)
1549
1550/** Interface STA list management. */
1551
1552/** Fetch the dhd_if object, given the interface index in the dhd. */
1553static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1554
1555/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1556static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1557static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1558
1559/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1560static void dhd_if_del_sta_list(dhd_if_t * ifp);
1561static void dhd_if_flush_sta(dhd_if_t * ifp);
1562
1563/* Construct/Destruct a sta pool. */
1564static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1565static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1566/* Clear the pool of dhd_sta_t objects for built-in type driver */
1567static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1568
1569
1570/* Return interface pointer */
1571static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1572{
1573 ASSERT(ifidx < DHD_MAX_IFS);
1574
1575 if (ifidx >= DHD_MAX_IFS)
1576 return NULL;
1577
1578 return dhdp->info->iflist[ifidx];
1579}
1580
1581/** Reset a dhd_sta object and free into the dhd pool. */
1582static void
1583dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1584{
1585 int prio;
1586
1587 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1588
1589 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1590
1591 /*
1592 * Flush and free all packets in all flowring's queues belonging to sta.
1593 * Packets in flow ring will be flushed later.
1594 */
1595 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1596 uint16 flowid = sta->flowid[prio];
1597
1598 if (flowid != FLOWID_INVALID) {
1599 unsigned long flags;
1600 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1601 flow_ring_node_t * flow_ring_node;
1602
1603#ifdef DHDTCPACK_SUPPRESS
1604 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1605 * when there is a newly coming packet from network stack.
1606 */
1607 dhd_tcpack_info_tbl_clean(dhdp);
1608#endif /* DHDTCPACK_SUPPRESS */
1609
1610 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1611 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1612 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1613
1614 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1615 void * pkt;
1616 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1617 PKTFREE(dhdp->osh, pkt, TRUE);
1618 }
1619 }
1620
1621 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1622 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1623 }
1624
1625 sta->flowid[prio] = FLOWID_INVALID;
1626 }
1627
1628 id16_map_free(dhdp->staid_allocator, sta->idx);
1629 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1630 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1631 sta->ifidx = DHD_BAD_IF;
1632 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1633 INIT_LIST_HEAD(&sta->list);
1634 sta->idx = ID16_INVALID; /* implying free */
1635}
1636
1637/** Allocate a dhd_sta object from the dhd pool. */
1638static dhd_sta_t *
1639dhd_sta_alloc(dhd_pub_t * dhdp)
1640{
1641 uint16 idx;
1642 dhd_sta_t * sta;
1643 dhd_sta_pool_t * sta_pool;
1644
1645 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1646
1647 idx = id16_map_alloc(dhdp->staid_allocator);
1648 if (idx == ID16_INVALID) {
1649 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1650 return DHD_STA_NULL;
1651 }
1652
1653 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1654 sta = &sta_pool[idx];
1655
1656 ASSERT((sta->idx == ID16_INVALID) &&
1657 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1658
1659 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1660
1661 sta->idx = idx; /* implying allocated */
1662
1663 return sta;
1664}
1665
1666/** Delete all STAs in an interface's STA list. */
1667static void
1668dhd_if_del_sta_list(dhd_if_t *ifp)
1669{
1670 dhd_sta_t *sta, *next;
1671 unsigned long flags;
1672
1673 DHD_IF_STA_LIST_LOCK(ifp, flags);
1674
1675 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1676#if defined(BCM_GMAC3)
1677 if (ifp->fwdh) {
1678 /* Remove sta from WOFA forwarder. */
1679 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1680 }
1681#endif /* BCM_GMAC3 */
1682 list_del(&sta->list);
1683 dhd_sta_free(&ifp->info->pub, sta);
1684 }
1685
1686 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1687
1688 return;
1689}
1690
1691/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1692static void
1693dhd_if_flush_sta(dhd_if_t * ifp)
1694{
1695#if defined(BCM_GMAC3)
1696
1697 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1698 dhd_sta_t *sta, *next;
1699 unsigned long flags;
1700
1701 DHD_IF_STA_LIST_LOCK(ifp, flags);
1702
1703 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1704 /* Remove any sta entry from WOFA forwarder. */
1705 fwder_flush(ifp->fwdh, (wofa_t)sta);
1706 }
1707
1708 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1709 }
1710#endif /* BCM_GMAC3 */
1711}
1712
1713/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1714static int
1715dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1716{
1717 int idx, prio, sta_pool_memsz;
1718 dhd_sta_t * sta;
1719 dhd_sta_pool_t * sta_pool;
1720 void * staid_allocator;
1721
1722 ASSERT(dhdp != (dhd_pub_t *)NULL);
1723 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1724
1725 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1726 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1727 if (staid_allocator == NULL) {
1728 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1729 return BCME_ERROR;
1730 }
1731
1732 /* Pre allocate a pool of dhd_sta objects (one extra). */
1733 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1734 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1735 if (sta_pool == NULL) {
1736 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1737 id16_map_fini(dhdp->osh, staid_allocator);
1738 return BCME_ERROR;
1739 }
1740
1741 dhdp->sta_pool = sta_pool;
1742 dhdp->staid_allocator = staid_allocator;
1743
1744 /* Initialize all sta(s) for the pre-allocated free pool. */
1745 bzero((uchar *)sta_pool, sta_pool_memsz);
1746 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1747 sta = &sta_pool[idx];
1748 sta->idx = id16_map_alloc(staid_allocator);
1749 ASSERT(sta->idx <= max_sta);
1750 }
1751 /* Now place them into the pre-allocated free pool. */
1752 for (idx = 1; idx <= max_sta; idx++) {
1753 sta = &sta_pool[idx];
1754 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1755 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1756 }
1757 dhd_sta_free(dhdp, sta);
1758 }
1759
1760 return BCME_OK;
1761}
1762
1763/** Destruct the pool of dhd_sta_t objects.
1764 * Caller must ensure that no STA objects are currently associated with an if.
1765 */
1766static void
1767dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1768{
1769 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1770
1771 if (sta_pool) {
1772 int idx;
1773 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1774 for (idx = 1; idx <= max_sta; idx++) {
1775 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1776 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1777 }
1778 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1779 dhdp->sta_pool = NULL;
1780 }
1781
1782 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1783 dhdp->staid_allocator = NULL;
1784}
1785
1786/* Clear the pool of dhd_sta_t objects for built-in type driver */
1787static void
1788dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1789{
1790 int idx, prio, sta_pool_memsz;
1791 dhd_sta_t * sta;
1792 dhd_sta_pool_t * sta_pool;
1793 void *staid_allocator;
1794
1795 if (!dhdp) {
1796 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1797 return;
1798 }
1799
1800 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1801 staid_allocator = dhdp->staid_allocator;
1802
1803 if (!sta_pool) {
1804 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1805 return;
1806 }
1807
1808 if (!staid_allocator) {
1809 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1810 return;
1811 }
1812
1813 /* clear free pool */
1814 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1815 bzero((uchar *)sta_pool, sta_pool_memsz);
1816
1817 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1818 id16_map_clear(staid_allocator, max_sta, 1);
1819
1820 /* Initialize all sta(s) for the pre-allocated free pool. */
1821 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1822 sta = &sta_pool[idx];
1823 sta->idx = id16_map_alloc(staid_allocator);
1824 ASSERT(sta->idx <= max_sta);
1825 }
1826 /* Now place them into the pre-allocated free pool. */
1827 for (idx = 1; idx <= max_sta; idx++) {
1828 sta = &sta_pool[idx];
1829 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1830 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1831 }
1832 dhd_sta_free(dhdp, sta);
1833 }
1834}
1835
1836/** Find STA with MAC address ea in an interface's STA list. */
1837dhd_sta_t *
1838dhd_find_sta(void *pub, int ifidx, void *ea)
1839{
1840 dhd_sta_t *sta;
1841 dhd_if_t *ifp;
1842 unsigned long flags;
1843
1844 ASSERT(ea != NULL);
1845 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1846 if (ifp == NULL)
1847 return DHD_STA_NULL;
1848
1849 DHD_IF_STA_LIST_LOCK(ifp, flags);
1850
1851 list_for_each_entry(sta, &ifp->sta_list, list) {
1852 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1853 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1854 return sta;
1855 }
1856 }
1857
1858 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1859
1860 return DHD_STA_NULL;
1861}
1862
1863/** Add STA into the interface's STA list. */
1864dhd_sta_t *
1865dhd_add_sta(void *pub, int ifidx, void *ea)
1866{
1867 dhd_sta_t *sta;
1868 dhd_if_t *ifp;
1869 unsigned long flags;
1870
1871 ASSERT(ea != NULL);
1872 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1873 if (ifp == NULL)
1874 return DHD_STA_NULL;
1875
1876 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1877 if (sta == DHD_STA_NULL) {
1878 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1879 return DHD_STA_NULL;
1880 }
1881
1882 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1883
1884 /* link the sta and the dhd interface */
1885 sta->ifp = ifp;
1886 sta->ifidx = ifidx;
1887 INIT_LIST_HEAD(&sta->list);
1888
1889 DHD_IF_STA_LIST_LOCK(ifp, flags);
1890
1891 list_add_tail(&sta->list, &ifp->sta_list);
1892
1893#if defined(BCM_GMAC3)
1894 if (ifp->fwdh) {
1895 ASSERT(ISALIGNED(ea, 2));
1896 /* Add sta to WOFA forwarder. */
1897 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1898 }
1899#endif /* BCM_GMAC3 */
1900
1901 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1902
1903 return sta;
1904}
1905
1906/** Delete STA from the interface's STA list. */
1907void
1908dhd_del_sta(void *pub, int ifidx, void *ea)
1909{
1910 dhd_sta_t *sta, *next;
1911 dhd_if_t *ifp;
1912 unsigned long flags;
1913
1914 ASSERT(ea != NULL);
1915 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1916 if (ifp == NULL)
1917 return;
1918
1919 DHD_IF_STA_LIST_LOCK(ifp, flags);
1920
1921 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1922 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1923#if defined(BCM_GMAC3)
1924 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1925 ASSERT(ISALIGNED(ea, 2));
1926 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1927 }
1928#endif /* BCM_GMAC3 */
1929 list_del(&sta->list);
1930 dhd_sta_free(&ifp->info->pub, sta);
1931 }
1932 }
1933
1934 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1935#ifdef DHD_L2_FILTER
1936 if (ifp->parp_enable) {
1937 /* clear Proxy ARP cache of specific Ethernet Address */
1938 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1939 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1940 }
1941#endif /* DHD_L2_FILTER */
1942 return;
1943}
1944
1945/** Add STA if it doesn't exist. Not reentrant. */
1946dhd_sta_t*
1947dhd_findadd_sta(void *pub, int ifidx, void *ea)
1948{
1949 dhd_sta_t *sta;
1950
1951 sta = dhd_find_sta(pub, ifidx, ea);
1952
1953 if (!sta) {
1954 /* Add entry */
1955 sta = dhd_add_sta(pub, ifidx, ea);
1956 }
1957
1958 return sta;
1959}
1960
1961#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1962#if !defined(BCM_GMAC3)
1963static struct list_head *
1964dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1965{
1966 unsigned long flags;
1967 dhd_sta_t *sta, *snapshot;
1968
1969 INIT_LIST_HEAD(snapshot_list);
1970
1971 DHD_IF_STA_LIST_LOCK(ifp, flags);
1972
1973 list_for_each_entry(sta, &ifp->sta_list, list) {
1974 /* allocate one and add to snapshot */
1975 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1976 if (snapshot == NULL) {
1977 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1978 continue;
1979 }
1980
1981 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1982
1983 INIT_LIST_HEAD(&snapshot->list);
1984 list_add_tail(&snapshot->list, snapshot_list);
1985 }
1986
1987 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1988
1989 return snapshot_list;
1990}
1991
1992static void
1993dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1994{
1995 dhd_sta_t *sta, *next;
1996
1997 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1998 list_del(&sta->list);
1999 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
2000 }
2001}
2002#endif /* !BCM_GMAC3 */
2003#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2004
2005#else
2006static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2007static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2008static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2009static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2010static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2011dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2012void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2013#endif /* PCIE_FULL_DONGLE */
2014
2015
2016#if defined(DHD_LB)
2017
2018#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2019/**
2020 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2021 * CPU and schedules a tasklet.
2022 * @tasklet: opaque pointer to the tasklet
2023 */
2024static INLINE void
2025dhd_tasklet_schedule(void *tasklet)
2026{
2027 tasklet_schedule((struct tasklet_struct *)tasklet);
2028}
2029
2030/**
2031 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2032 * @tasklet: tasklet to be scheduled
2033 * @on_cpu: cpu core id
2034 *
2035 * If the requested cpu is online, then an IPI is sent to this cpu via the
2036 * smp_call_function_single with no wait and the tasklet_schedule function
2037 * will be invoked to schedule the specified tasklet on the requested CPU.
2038 */
2039static void
2040dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2041{
2042 const int wait = 0;
2043 smp_call_function_single(on_cpu,
2044 dhd_tasklet_schedule, (void *)tasklet, wait);
2045}
2046#endif /* DHD_LB_TXC || DHD_LB_RXC */
2047
2048
2049#if defined(DHD_LB_TXC)
2050/**
2051 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2052 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2053 * freeing the packets placed in the tx_compl workq
2054 */
2055void
2056dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2057{
2058 dhd_info_t *dhd = dhdp->info;
2059 int curr_cpu, on_cpu;
2060
2061 if (dhd->rx_napi_netdev == NULL) {
2062 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2063 return;
2064 }
2065
2066 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2067 /*
2068 * If the destination CPU is NOT online or is same as current CPU
2069 * no need to schedule the work
2070 */
2071 curr_cpu = get_cpu();
2072 put_cpu();
2073
2074 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2075
2076 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2077 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2078 } else {
2079 schedule_work(&dhd->tx_compl_dispatcher_work);
2080 }
2081}
2082
2083static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2084{
2085 struct dhd_info *dhd =
2086 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2087 int cpu;
2088
2089 get_online_cpus();
2090 cpu = atomic_read(&dhd->tx_compl_cpu);
2091 if (!cpu_online(cpu))
2092 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2093 else
2094 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2095 put_online_cpus();
2096}
2097
2098#endif /* DHD_LB_TXC */
2099
2100
2101#if defined(DHD_LB_RXC)
2102/**
2103 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2104 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2105 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2106 * placed in the rx_compl workq.
2107 *
2108 * @dhdp: pointer to dhd_pub object
2109 */
2110void
2111dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2112{
2113 dhd_info_t *dhd = dhdp->info;
2114 int curr_cpu, on_cpu;
2115
2116 if (dhd->rx_napi_netdev == NULL) {
2117 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2118 return;
2119 }
2120
2121 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2122 /*
2123 * If the destination CPU is NOT online or is same as current CPU
2124 * no need to schedule the work
2125 */
2126 curr_cpu = get_cpu();
2127 put_cpu();
2128
2129 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2130
2131 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2132 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2133 } else {
2134 schedule_work(&dhd->rx_compl_dispatcher_work);
2135 }
2136}
2137
2138static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2139{
2140 struct dhd_info *dhd =
2141 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2142 int cpu;
2143
2144 get_online_cpus();
2145 cpu = atomic_read(&dhd->tx_compl_cpu);
2146 if (!cpu_online(cpu))
2147 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2148 else
2149 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2150 put_online_cpus();
2151}
2152
2153#endif /* DHD_LB_RXC */
2154
2155
2156#if defined(DHD_LB_RXP)
2157/**
2158 * dhd_napi_poll - Load balance napi poll function to process received
2159 * packets and send up the network stack using netif_receive_skb()
2160 *
2161 * @napi: napi object in which context this poll function is invoked
2162 * @budget: number of packets to be processed.
2163 *
2164 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2165 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2166 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2167 * packet tag and sendup.
2168 */
2169static int
2170dhd_napi_poll(struct napi_struct *napi, int budget)
2171{
2172 int ifid;
2173 const int pkt_count = 1;
2174 const int chan = 0;
2175 struct sk_buff * skb;
2176 unsigned long flags;
2177 struct dhd_info *dhd;
2178 int processed = 0;
2179 struct sk_buff_head rx_process_queue;
2180
2181 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2182 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2183 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2184
2185 __skb_queue_head_init(&rx_process_queue);
2186
2187 /* extract the entire rx_napi_queue into local rx_process_queue */
2188 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2189 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2190 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2191
2192 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2193 OSL_PREFETCH(skb->data);
2194
2195 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2196
2197 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2198 __FUNCTION__, skb, ifid));
2199
2200 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2201 processed++;
2202 }
2203
2204 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2205
2206 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2207 napi_complete(napi);
2208
2209 return budget - 1;
2210}
2211
2212/**
2213 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2214 * poll list. This function may be invoked via the smp_call_function_single
2215 * from a remote CPU.
2216 *
2217 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2218 * after the napi_struct is added to the softnet data's poll_list
2219 *
2220 * @info: pointer to a dhd_info struct
2221 */
2222static void
2223dhd_napi_schedule(void *info)
2224{
2225 dhd_info_t *dhd = (dhd_info_t *)info;
2226
2227 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2228 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2229
2230 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2231 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2232 __napi_schedule(&dhd->rx_napi_struct);
2233 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2234 }
2235
2236 /*
2237 * If the rx_napi_struct was already running, then we let it complete
2238 * processing all its packets. The rx_napi_struct may only run on one
2239 * core at a time, to avoid out-of-order handling.
2240 */
2241}
2242
2243/**
2244 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2245 * action after placing the dhd's rx_process napi object in the the remote CPU's
2246 * softnet data's poll_list.
2247 *
2248 * @dhd: dhd_info which has the rx_process napi object
2249 * @on_cpu: desired remote CPU id
2250 */
2251static INLINE int
2252dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2253{
2254 int wait = 0; /* asynchronous IPI */
2255
2256 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2257 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2258
2259 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2260 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2261 __FUNCTION__, on_cpu));
2262 }
2263
2264 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2265
2266 return 0;
2267}
2268
2269/*
2270 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2271 * Why should we do this?
2272 * The candidacy algorithm is run from the call back function
2273 * registered to CPU hotplug notifier. This call back happens from Worker
2274 * context. The dhd_napi_schedule_on is also from worker context.
2275 * Note that both of this can run on two different CPUs at the same time.
2276 * So we can possibly have a window where a given CPUn is being brought
2277 * down from CPUm while we try to run a function on CPUn.
2278 * To prevent this its better have the whole code to execute an SMP
2279 * function under get_online_cpus.
2280 * This function call ensures that hotplug mechanism does not kick-in
2281 * until we are done dealing with online CPUs
2282 * If the hotplug worker is already running, no worries because the
2283 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2284 *
2285 * The below mentioned code structure is proposed in
2286 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2287 * for the question
2288 * Q: I need to ensure that a particular cpu is not removed when there is some
2289 * work specific to this cpu is in progress
2290 *
2291 * According to the documentation calling get_online_cpus is NOT required, if
2292 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2293 * run from Work Queue context we have to call these functions
2294 */
2295static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2296{
2297 struct dhd_info *dhd =
2298 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2299 int cpu;
2300
2301 get_online_cpus();
2302 cpu = atomic_read(&dhd->rx_napi_cpu);
2303 if (!cpu_online(cpu))
2304 dhd_napi_schedule(dhd);
2305 else
2306 dhd_napi_schedule_on(dhd, cpu);
2307 put_online_cpus();
2308}
2309
2310/**
2311 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2312 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2313 * the packets enqueued into the rx_napi_queue and sendup.
2314 * The producer's rx packet queue is appended to the rx_napi_queue before
2315 * dispatching the rx_napi_struct.
2316 */
2317void
2318dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2319{
2320 unsigned long flags;
2321 dhd_info_t *dhd = dhdp->info;
2322 int curr_cpu;
2323 int on_cpu;
2324
2325 if (dhd->rx_napi_netdev == NULL) {
2326 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2327 return;
2328 }
2329
2330 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2331 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2332
2333 /* append the producer's queue of packets to the napi's rx process queue */
2334 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2335 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2336 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2337
2338 /*
2339 * If the destination CPU is NOT online or is same as current CPU
2340 * no need to schedule the work
2341 */
2342 curr_cpu = get_cpu();
2343 put_cpu();
2344
2345 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2346
2347 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2348 dhd_napi_schedule(dhd);
2349 } else {
2350 schedule_work(&dhd->rx_napi_dispatcher_work);
2351 }
2352}
2353
2354/**
2355 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2356 */
2357void
2358dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2359{
2360 dhd_info_t *dhd = dhdp->info;
2361
2362 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2363 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2364 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2365 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2366}
2367#endif /* DHD_LB_RXP */
2368
2369#endif /* DHD_LB */
2370
2371static void dhd_memdump_work_handler(struct work_struct * work)
2372{
2373 struct dhd_info *dhd =
2374 container_of(work, struct dhd_info, dhd_memdump_work.work);
2375
2376 BCM_REFERENCE(dhd);
2377#ifdef BCMPCIE
2378 dhd_prot_collect_memdump(&dhd->pub);
2379#endif
2380}
2381
2382
2383/** Returns dhd iflist index corresponding the the bssidx provided by apps */
2384int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2385{
2386 dhd_if_t *ifp;
2387 dhd_info_t *dhd = dhdp->info;
2388 int i;
2389
2390 ASSERT(bssidx < DHD_MAX_IFS);
2391 ASSERT(dhdp);
2392
2393 for (i = 0; i < DHD_MAX_IFS; i++) {
2394 ifp = dhd->iflist[i];
2395 if (ifp && (ifp->bssidx == bssidx)) {
2396 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2397 ifp->name, bssidx, i));
2398 break;
2399 }
2400 }
2401 return i;
2402}
2403
2404static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2405{
2406 uint32 store_idx;
2407 uint32 sent_idx;
2408
2409 if (!skb) {
2410 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2411 return BCME_ERROR;
2412 }
2413
2414 dhd_os_rxflock(dhdp);
2415 store_idx = dhdp->store_idx;
2416 sent_idx = dhdp->sent_idx;
2417 if (dhdp->skbbuf[store_idx] != NULL) {
2418 /* Make sure the previous packets are processed */
2419 dhd_os_rxfunlock(dhdp);
2420#ifdef RXF_DEQUEUE_ON_BUSY
2421 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2422 skb, store_idx, sent_idx));
2423 return BCME_BUSY;
2424#else /* RXF_DEQUEUE_ON_BUSY */
2425 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2426 skb, store_idx, sent_idx));
2427 /* removed msleep here, should use wait_event_timeout if we
2428 * want to give rx frame thread a chance to run
2429 */
2430#if defined(WAIT_DEQUEUE)
2431 OSL_SLEEP(1);
2432#endif
2433 return BCME_ERROR;
2434#endif /* RXF_DEQUEUE_ON_BUSY */
2435 }
2436 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2437 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2438 dhdp->skbbuf[store_idx] = skb;
2439 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2440 dhd_os_rxfunlock(dhdp);
2441
2442 return BCME_OK;
2443}
2444
2445static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2446{
2447 uint32 store_idx;
2448 uint32 sent_idx;
2449 void *skb;
2450
2451 dhd_os_rxflock(dhdp);
2452
2453 store_idx = dhdp->store_idx;
2454 sent_idx = dhdp->sent_idx;
2455 skb = dhdp->skbbuf[sent_idx];
2456
2457 if (skb == NULL) {
2458 dhd_os_rxfunlock(dhdp);
2459 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2460 store_idx, sent_idx));
2461 return NULL;
2462 }
2463
2464 dhdp->skbbuf[sent_idx] = NULL;
2465 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2466
2467 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2468 skb, sent_idx));
2469
2470 dhd_os_rxfunlock(dhdp);
2471
2472 return skb;
2473}
2474
2475int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2476{
2477 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2478
2479 if (prepost) { /* pre process */
2480 dhd_read_macaddr(dhd);
2481 } else { /* post process */
2482 dhd_write_macaddr(&dhd->pub.mac);
2483 }
2484
2485 return 0;
2486}
2487
2488// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2489#if defined(PKT_FILTER_SUPPORT) &&defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2490static bool
2491_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2492{
2493 bool _apply = FALSE;
2494 /* In case of IBSS mode, apply arp pkt filter */
2495 if (op_mode & DHD_FLAG_IBSS_MODE) {
2496 _apply = TRUE;
2497 goto exit;
2498 }
2499 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2500 if ((dhd->arp_version == 1) &&
2501 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2502 _apply = TRUE;
2503 goto exit;
2504 }
2505
2506exit:
2507 return _apply;
2508}
2509#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2510
2511void dhd_set_packet_filter(dhd_pub_t *dhd)
2512{
2513#ifdef PKT_FILTER_SUPPORT
2514 int i;
2515
2516 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2517 if (dhd_pkt_filter_enable) {
2518 for (i = 0; i < dhd->pktfilter_count; i++) {
2519 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2520 }
2521 }
2522#endif /* PKT_FILTER_SUPPORT */
2523}
2524
2525void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2526{
2527#ifdef PKT_FILTER_SUPPORT
2528 int i;
2529
2530 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2531
2532 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2533 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2534 return;
2535 }
2536 /* 1 - Enable packet filter, only allow unicast packet to send up */
2537 /* 0 - Disable packet filter */
2538 if (dhd_pkt_filter_enable && (!value ||
2539 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2540 {
2541 for (i = 0; i < dhd->pktfilter_count; i++) {
2542// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2543#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2544 if (value && (i == DHD_ARP_FILTER_NUM) &&
2545 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2546 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2547 "val %d, cnt %d, op_mode 0x%x\n",
2548 value, i, dhd->op_mode));
2549 continue;
2550 }
2551#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2552 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2553 value, dhd_master_mode);
2554 }
2555 }
2556#endif /* PKT_FILTER_SUPPORT */
2557}
2558
2559static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2560{
ef6a5fee 2561 int power_mode = PM_MAX;
ef6a5fee
RC
2562#ifdef SUPPORT_SENSORHUB
2563 uint32 shub_msreq;
2564#endif /* SUPPORT_SENSORHUB */
2565 /* wl_pkt_filter_enable_t enable_parm; */
2566 char iovbuf[32];
2567 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2568#ifdef DHD_USE_EARLYSUSPEND
2569#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2570 int bcn_timeout = 0;
2571#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2572#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2573 int roam_time_thresh = 0; /* (ms) */
2574#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2575#ifndef ENABLE_FW_ROAM_SUSPEND
2576 uint roamvar = dhd->conf->roam_off_suspend;
2577#endif /* ENABLE_FW_ROAM_SUSPEND */
2578#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2579 int bcn_li_bcn;
2580#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2581 uint nd_ra_filter = 0;
2582 int ret = 0;
2583#endif /* DHD_USE_EARLYSUSPEND */
2584#ifdef PASS_ALL_MCAST_PKTS
2585 struct dhd_info *dhdinfo;
2586 uint32 allmulti;
2587 uint i;
2588#endif /* PASS_ALL_MCAST_PKTS */
2589#ifdef DYNAMIC_SWOOB_DURATION
2590#ifndef CUSTOM_INTR_WIDTH
2591#define CUSTOM_INTR_WIDTH 100
2592 int intr_width = 0;
2593#endif /* CUSTOM_INTR_WIDTH */
2594#endif /* DYNAMIC_SWOOB_DURATION */
2595
2596 if (!dhd)
2597 return -ENODEV;
2598
2599#ifdef PASS_ALL_MCAST_PKTS
2600 dhdinfo = dhd->info;
2601#endif /* PASS_ALL_MCAST_PKTS */
2602
2603 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2604 __FUNCTION__, value, dhd->in_suspend));
2605
2606 dhd_suspend_lock(dhd);
2607
2608#ifdef CUSTOM_SET_CPUCORE
2609 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2610 /* set specific cpucore */
2611 dhd_set_cpucore(dhd, TRUE);
2612#endif /* CUSTOM_SET_CPUCORE */
08dfb6c4 2613
ef6a5fee
RC
2614 if (dhd->conf->pm >= 0)
2615 power_mode = dhd->conf->pm;
08dfb6c4
RC
2616 else
2617 power_mode = PM_FAST;
ef6a5fee
RC
2618 if (dhd->up) {
2619 if (value && dhd->in_suspend) {
2620#ifdef PKT_FILTER_SUPPORT
2621 dhd->early_suspended = 1;
2622#endif
2623 /* Kernel suspended */
2624 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2625
2626#ifdef SUPPORT_SENSORHUB
2627 shub_msreq = 1;
2628 if (dhd->info->shub_enable == 1) {
2629 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2630 iovbuf, sizeof(iovbuf));
2631 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2632 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2633 DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2634 __FUNCTION__, ret));
2635 }
2636 }
2637#endif /* SUPPORT_SENSORHUB */
2638
08dfb6c4
RC
2639 if (dhd->conf->pm_in_suspend >= 0)
2640 power_mode = dhd->conf->pm_in_suspend;
ef6a5fee
RC
2641 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2642 sizeof(power_mode), TRUE, 0);
ef6a5fee
RC
2643
2644#ifdef PKT_FILTER_SUPPORT
2645 /* Enable packet filter,
2646 * only allow unicast packet to send up
2647 */
2648 dhd_enable_packet_filter(1, dhd);
2649#endif /* PKT_FILTER_SUPPORT */
2650
2651#ifdef PASS_ALL_MCAST_PKTS
2652 allmulti = 0;
2653 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2654 iovbuf, sizeof(iovbuf));
2655 for (i = 0; i < DHD_MAX_IFS; i++) {
2656 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2657 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2658 sizeof(iovbuf), TRUE, i);
2659 }
2660#endif /* PASS_ALL_MCAST_PKTS */
2661
2662 /* If DTIM skip is set up as default, force it to wake
2663 * each third DTIM for better power savings. Note that
2664 * one side effect is a chance to miss BC/MC packet.
2665 */
2666#ifdef WLTDLS
2667 /* Do not set bcn_li_ditm on WFD mode */
2668 if (dhd->tdls_mode) {
2669 bcn_li_dtim = 0;
2670 } else
2671#endif /* WLTDLS */
2672 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2673 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2674 4, iovbuf, sizeof(iovbuf));
2675 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2676 TRUE, 0) < 0)
2677 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2678
2679#ifdef DHD_USE_EARLYSUSPEND
2680#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2681 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2682 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2683 4, iovbuf, sizeof(iovbuf));
2684 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2685#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2686#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2687 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2688 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2689 4, iovbuf, sizeof(iovbuf));
2690 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2691#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2692#ifndef ENABLE_FW_ROAM_SUSPEND
2693 /* Disable firmware roaming during suspend */
2694 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2695 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2696#endif /* ENABLE_FW_ROAM_SUSPEND */
2697#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2698 bcn_li_bcn = 0;
2699 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2700 4, iovbuf, sizeof(iovbuf));
2701 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2702#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2703 if (FW_SUPPORTED(dhd, ndoe)) {
2704 /* enable IPv6 RA filter in firmware during suspend */
2705 nd_ra_filter = 1;
2706 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2707 iovbuf, sizeof(iovbuf));
2708 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2709 sizeof(iovbuf), TRUE, 0)) < 0)
2710 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2711 ret));
2712 }
2713#ifdef DYNAMIC_SWOOB_DURATION
2714 intr_width = CUSTOM_INTR_WIDTH;
2715 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2716 iovbuf, sizeof(iovbuf));
2717 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2718 sizeof(iovbuf), TRUE, 0)) < 0) {
2719 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2720 }
2721#endif /* DYNAMIC_SWOOB_DURATION */
2722#endif /* DHD_USE_EARLYSUSPEND */
424b00bf 2723 dhd_conf_set_ap_in_suspend(dhd, value);
ef6a5fee 2724 } else {
424b00bf 2725 dhd_conf_set_ap_in_suspend(dhd, value);
ef6a5fee
RC
2726#ifdef PKT_FILTER_SUPPORT
2727 dhd->early_suspended = 0;
2728#endif
2729 /* Kernel resumed */
2730 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2731
2732#ifdef SUPPORT_SENSORHUB
2733 shub_msreq = 0;
2734 if (dhd->info->shub_enable == 1) {
2735 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2736 4, iovbuf, sizeof(iovbuf));
2737 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2738 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2739 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2740 "failed %d\n", __FUNCTION__, ret));
2741 }
2742 }
2743#endif /* SUPPORT_SENSORHUB */
2744
2745
2746#ifdef DYNAMIC_SWOOB_DURATION
2747 intr_width = 0;
2748 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2749 iovbuf, sizeof(iovbuf));
2750 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2751 sizeof(iovbuf), TRUE, 0)) < 0) {
2752 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2753 }
2754#endif /* DYNAMIC_SWOOB_DURATION */
ef6a5fee
RC
2755 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2756 sizeof(power_mode), TRUE, 0);
ef6a5fee
RC
2757#ifdef PKT_FILTER_SUPPORT
2758 /* disable pkt filter */
2759 dhd_enable_packet_filter(0, dhd);
2760#endif /* PKT_FILTER_SUPPORT */
2761#ifdef PASS_ALL_MCAST_PKTS
2762 allmulti = 1;
2763 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2764 iovbuf, sizeof(iovbuf));
2765 for (i = 0; i < DHD_MAX_IFS; i++) {
2766 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2767 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2768 sizeof(iovbuf), TRUE, i);
2769 }
2770#endif /* PASS_ALL_MCAST_PKTS */
2771
2772 /* restore pre-suspend setting for dtim_skip */
2773 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2774 4, iovbuf, sizeof(iovbuf));
2775
2776 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2777#ifdef DHD_USE_EARLYSUSPEND
2778#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2779 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2780 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2781 4, iovbuf, sizeof(iovbuf));
2782 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2783#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2784#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2785 roam_time_thresh = 2000;
2786 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2787 4, iovbuf, sizeof(iovbuf));
2788 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2789#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2790#ifndef ENABLE_FW_ROAM_SUSPEND
2791 roamvar = dhd_roam_disable;
2792 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2793 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2794#endif /* ENABLE_FW_ROAM_SUSPEND */
2795#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2796 bcn_li_bcn = 1;
2797 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2798 4, iovbuf, sizeof(iovbuf));
2799 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2800#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2801 if (FW_SUPPORTED(dhd, ndoe)) {
2802 /* disable IPv6 RA filter in firmware during suspend */
2803 nd_ra_filter = 0;
2804 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2805 iovbuf, sizeof(iovbuf));
2806 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2807 sizeof(iovbuf), TRUE, 0)) < 0)
2808 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2809 ret));
2810 }
2811#endif /* DHD_USE_EARLYSUSPEND */
08dfb6c4
RC
2812
2813 /* terence 2017029: Reject in early suspend */
2814 if (!dhd->conf->xmit_in_suspend) {
2815 dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
2816 }
ef6a5fee
RC
2817 }
2818 }
2819 dhd_suspend_unlock(dhd);
2820
2821 return 0;
2822}
2823
2824static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2825{
2826 dhd_pub_t *dhdp = &dhd->pub;
2827 int ret = 0;
2828
2829 DHD_OS_WAKE_LOCK(dhdp);
2830 DHD_PERIM_LOCK(dhdp);
2831
2832 /* Set flag when early suspend was called */
2833 dhdp->in_suspend = val;
2834 if ((force || !dhdp->suspend_disable_flag) &&
424b00bf 2835 (dhd_support_sta_mode(dhdp) || dhd_conf_get_ap_mode_in_suspend(dhdp)))
ef6a5fee
RC
2836 {
2837 ret = dhd_set_suspend(val, dhdp);
2838 }
2839
2840 DHD_PERIM_UNLOCK(dhdp);
2841 DHD_OS_WAKE_UNLOCK(dhdp);
2842 return ret;
2843}
2844
2845#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2846static void dhd_early_suspend(struct early_suspend *h)
2847{
2848 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2849 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2850
2851 if (dhd)
2852 dhd_suspend_resume_helper(dhd, 1, 0);
2853}
2854
2855static void dhd_late_resume(struct early_suspend *h)
2856{
2857 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2858 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2859
2860 if (dhd)
2861 dhd_suspend_resume_helper(dhd, 0, 0);
2862}
2863#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2864
2865/*
2866 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2867 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2868 *
2869 * dhd_timeout_start(&tmo, usec);
2870 * while (!dhd_timeout_expired(&tmo))
2871 * if (poll_something())
2872 * break;
2873 * if (dhd_timeout_expired(&tmo))
2874 * fatal();
2875 */
2876
2877void
2878dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2879{
2880 tmo->limit = usec;
2881 tmo->increment = 0;
2882 tmo->elapsed = 0;
2883 tmo->tick = jiffies_to_usecs(1);
2884}
2885
2886int
2887dhd_timeout_expired(dhd_timeout_t *tmo)
2888{
2889 /* Does nothing the first call */
2890 if (tmo->increment == 0) {
2891 tmo->increment = 1;
2892 return 0;
2893 }
2894
2895 if (tmo->elapsed >= tmo->limit)
2896 return 1;
2897
2898 /* Add the delay that's about to take place */
2899 tmo->elapsed += tmo->increment;
2900
2901 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2902 OSL_DELAY(tmo->increment);
2903 tmo->increment *= 2;
2904 if (tmo->increment > tmo->tick)
2905 tmo->increment = tmo->tick;
2906 } else {
2907 wait_queue_head_t delay_wait;
2908 DECLARE_WAITQUEUE(wait, current);
2909 init_waitqueue_head(&delay_wait);
2910 add_wait_queue(&delay_wait, &wait);
2911 set_current_state(TASK_INTERRUPTIBLE);
2912 (void)schedule_timeout(1);
2913 remove_wait_queue(&delay_wait, &wait);
2914 set_current_state(TASK_RUNNING);
2915 }
2916
2917 return 0;
2918}
2919
2920int
2921dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2922{
2923 int i = 0;
2924
2925 if (!dhd) {
2926 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2927 return DHD_BAD_IF;
2928 }
2929
2930 while (i < DHD_MAX_IFS) {
2931 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2932 return i;
2933 i++;
2934 }
2935
2936 return DHD_BAD_IF;
2937}
2938
2939struct net_device * dhd_idx2net(void *pub, int ifidx)
2940{
2941 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2942 struct dhd_info *dhd_info;
2943
2944 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2945 return NULL;
2946 dhd_info = dhd_pub->info;
2947 if (dhd_info && dhd_info->iflist[ifidx])
2948 return dhd_info->iflist[ifidx]->net;
2949 return NULL;
2950}
2951
2952int
2953dhd_ifname2idx(dhd_info_t *dhd, char *name)
2954{
2955 int i = DHD_MAX_IFS;
2956
2957 ASSERT(dhd);
2958
2959 if (name == NULL || *name == '\0')
2960 return 0;
2961
2962 while (--i > 0)
2963 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2964 break;
2965
2966 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2967
2968 return i; /* default - the primary interface */
2969}
2970
2971char *
2972dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2973{
2974 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2975
2976 ASSERT(dhd);
2977
2978 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2979 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2980 return "<if_bad>";
2981 }
2982
2983 if (dhd->iflist[ifidx] == NULL) {
2984 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2985 return "<if_null>";
2986 }
2987
2988 if (dhd->iflist[ifidx]->net)
2989 return dhd->iflist[ifidx]->net->name;
2990
2991 return "<if_none>";
2992}
2993
2994uint8 *
2995dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2996{
2997 int i;
2998 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2999
3000 ASSERT(dhd);
3001 for (i = 0; i < DHD_MAX_IFS; i++)
3002 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
3003 return dhd->iflist[i]->mac_addr;
3004
3005 return NULL;
3006}
3007
3008
3009static void
3010_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3011{
3012 struct net_device *dev;
3013#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3014 struct netdev_hw_addr *ha;
3015#else
3016 struct dev_mc_list *mclist;
3017#endif
3018 uint32 allmulti, cnt;
3019
3020 wl_ioctl_t ioc;
3021 char *buf, *bufp;
3022 uint buflen;
3023 int ret;
3024
3025 if (!dhd->iflist[ifidx]) {
3026 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3027 return;
3028 }
3029 dev = dhd->iflist[ifidx]->net;
3030 if (!dev)
3031 return;
3032#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3033 netif_addr_lock_bh(dev);
3034#endif /* LINUX >= 2.6.27 */
3035#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3036 cnt = netdev_mc_count(dev);
3037#else
3038 cnt = dev->mc_count;
3039#endif /* LINUX >= 2.6.35 */
3040#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3041 netif_addr_unlock_bh(dev);
3042#endif /* LINUX >= 2.6.27 */
3043
3044 /* Determine initial value of allmulti flag */
3045 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3046
3047#ifdef PASS_ALL_MCAST_PKTS
3048#ifdef PKT_FILTER_SUPPORT
3049 if (!dhd->pub.early_suspended)
3050#endif /* PKT_FILTER_SUPPORT */
3051 allmulti = TRUE;
3052#endif /* PASS_ALL_MCAST_PKTS */
3053
3054 /* Send down the multicast list first. */
3055
3056
3057 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3058 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3059 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3060 dhd_ifname(&dhd->pub, ifidx), cnt));
3061 return;
3062 }
3063
3064 strncpy(bufp, "mcast_list", buflen - 1);
3065 bufp[buflen - 1] = '\0';
3066 bufp += strlen("mcast_list") + 1;
3067
3068 cnt = htol32(cnt);
3069 memcpy(bufp, &cnt, sizeof(cnt));
3070 bufp += sizeof(cnt);
3071
3072#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3073 netif_addr_lock_bh(dev);
3074#endif /* LINUX >= 2.6.27 */
3075#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3076 netdev_for_each_mc_addr(ha, dev) {
3077 if (!cnt)
3078 break;
3079 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3080 bufp += ETHER_ADDR_LEN;
3081 cnt--;
3082 }
3083#else /* LINUX < 2.6.35 */
3084 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3085 cnt--, mclist = mclist->next) {
3086 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3087 bufp += ETHER_ADDR_LEN;
3088 }
3089#endif /* LINUX >= 2.6.35 */
3090#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3091 netif_addr_unlock_bh(dev);
3092#endif /* LINUX >= 2.6.27 */
3093
3094 memset(&ioc, 0, sizeof(ioc));
3095 ioc.cmd = WLC_SET_VAR;
3096 ioc.buf = buf;
3097 ioc.len = buflen;
3098 ioc.set = TRUE;
3099
3100 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3101 if (ret < 0) {
3102 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3103 dhd_ifname(&dhd->pub, ifidx), cnt));
3104 allmulti = cnt ? TRUE : allmulti;
3105 }
3106
3107 MFREE(dhd->pub.osh, buf, buflen);
3108
3109 /* Now send the allmulti setting. This is based on the setting in the
3110 * net_device flags, but might be modified above to be turned on if we
3111 * were trying to set some addresses and dongle rejected it...
3112 */
3113
3114 buflen = sizeof("allmulti") + sizeof(allmulti);
3115 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3116 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3117 return;
3118 }
3119 allmulti = htol32(allmulti);
3120
3121 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3122 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3123 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3124 MFREE(dhd->pub.osh, buf, buflen);
3125 return;
3126 }
3127
3128
3129 memset(&ioc, 0, sizeof(ioc));
3130 ioc.cmd = WLC_SET_VAR;
3131 ioc.buf = buf;
3132 ioc.len = buflen;
3133 ioc.set = TRUE;
3134
3135 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3136 if (ret < 0) {
3137 DHD_ERROR(("%s: set allmulti %d failed\n",
3138 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3139 }
3140
3141 MFREE(dhd->pub.osh, buf, buflen);
3142
3143 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3144
3145 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3146
3147 allmulti = htol32(allmulti);
3148
3149 memset(&ioc, 0, sizeof(ioc));
3150 ioc.cmd = WLC_SET_PROMISC;
3151 ioc.buf = &allmulti;
3152 ioc.len = sizeof(allmulti);
3153 ioc.set = TRUE;
3154
3155 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3156 if (ret < 0) {
3157 DHD_ERROR(("%s: set promisc %d failed\n",
3158 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3159 }
3160}
3161
3162int
3163_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3164{
3165 char buf[32];
3166 wl_ioctl_t ioc;
3167 int ret;
3168
3169 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3170 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3171 return -1;
3172 }
3173 memset(&ioc, 0, sizeof(ioc));
3174 ioc.cmd = WLC_SET_VAR;
3175 ioc.buf = buf;
3176 ioc.len = 32;
3177 ioc.set = TRUE;
3178
3179 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3180 if (ret < 0) {
3181 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3182 } else {
3183 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3184 if (ifidx == 0)
3185 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3186 }
3187
3188 return ret;
3189}
3190
3191#ifdef SOFTAP
3192extern struct net_device *ap_net_dev;
3193extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3194#endif
3195
3196#ifdef DHD_PSTA
3197/* Get psta/psr configuration configuration */
3198int dhd_get_psta_mode(dhd_pub_t *dhdp)
3199{
3200 dhd_info_t *dhd = dhdp->info;
3201 return (int)dhd->psta_mode;
3202}
3203/* Set psta/psr configuration configuration */
3204int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3205{
3206 dhd_info_t *dhd = dhdp->info;
3207 dhd->psta_mode = val;
3208 return 0;
3209}
3210#endif /* DHD_PSTA */
3211
3212static void
3213dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3214{
3215 dhd_info_t *dhd = handle;
3216 dhd_if_event_t *if_event = event_info;
3217 struct net_device *ndev;
3218 int ifidx, bssidx;
3219 int ret;
3220#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3221 struct wireless_dev *vwdev, *primary_wdev;
3222 struct net_device *primary_ndev;
3223#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3224
3225 if (event != DHD_WQ_WORK_IF_ADD) {
3226 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3227 return;
3228 }
3229
3230 if (!dhd) {
3231 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3232 return;
3233 }
3234
3235 if (!if_event) {
3236 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3237 return;
3238 }
3239
3240 dhd_net_if_lock_local(dhd);
3241 DHD_OS_WAKE_LOCK(&dhd->pub);
3242 DHD_PERIM_LOCK(&dhd->pub);
3243
3244 ifidx = if_event->event.ifidx;
3245 bssidx = if_event->event.bssidx;
3246 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3247
3248 /* This path is for non-android case */
3249 /* The interface name in host and in event msg are same */
3250 /* if name in event msg is used to create dongle if list on host */
3251 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3252 if_event->mac, bssidx, TRUE, if_event->name);
3253 if (!ndev) {
3254 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3255 goto done;
3256 }
3257
3258#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3259 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3260 if (unlikely(!vwdev)) {
3261 DHD_ERROR(("Could not allocate wireless device\n"));
3262 goto done;
3263 }
3264 primary_ndev = dhd->pub.info->iflist[0]->net;
3265 primary_wdev = ndev_to_wdev(primary_ndev);
3266 vwdev->wiphy = primary_wdev->wiphy;
3267 vwdev->iftype = if_event->event.role;
3268 vwdev->netdev = ndev;
3269 ndev->ieee80211_ptr = vwdev;
3270 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3271 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3272#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3273
3274 DHD_PERIM_UNLOCK(&dhd->pub);
3275 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3276 DHD_PERIM_LOCK(&dhd->pub);
3277 if (ret != BCME_OK) {
3278 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3279 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3280 goto done;
3281 }
3282#ifdef PCIE_FULL_DONGLE
3283 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3284 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3285 char iovbuf[WLC_IOCTL_SMLEN];
3286 uint32 var_int = 1;
3287
3288 memset(iovbuf, 0, sizeof(iovbuf));
3289 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3290 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3291
3292 if (ret != BCME_OK) {
3293 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3294 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3295 }
3296 }
3297#endif /* PCIE_FULL_DONGLE */
3298
3299done:
3300 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3301
3302 DHD_PERIM_UNLOCK(&dhd->pub);
3303 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3304 dhd_net_if_unlock_local(dhd);
3305}
3306
3307static void
3308dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3309{
3310 dhd_info_t *dhd = handle;
3311 int ifidx;
3312 dhd_if_event_t *if_event = event_info;
3313
3314
3315 if (event != DHD_WQ_WORK_IF_DEL) {
3316 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3317 return;
3318 }
3319
3320 if (!dhd) {
3321 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3322 return;
3323 }
3324
3325 if (!if_event) {
3326 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3327 return;
3328 }
3329
3330 dhd_net_if_lock_local(dhd);
3331 DHD_OS_WAKE_LOCK(&dhd->pub);
3332 DHD_PERIM_LOCK(&dhd->pub);
3333
3334 ifidx = if_event->event.ifidx;
3335 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3336
3337 DHD_PERIM_UNLOCK(&dhd->pub);
3338 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3339 DHD_PERIM_LOCK(&dhd->pub);
3340
3341 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3342
3343 DHD_PERIM_UNLOCK(&dhd->pub);
3344 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3345 dhd_net_if_unlock_local(dhd);
3346}
3347
3348static void
3349dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3350{
3351 dhd_info_t *dhd = handle;
3352 dhd_if_t *ifp = event_info;
3353
3354 if (event != DHD_WQ_WORK_SET_MAC) {
3355 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3356 }
3357
3358 if (!dhd) {
3359 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3360 return;
3361 }
3362
3363 dhd_net_if_lock_local(dhd);
3364 DHD_OS_WAKE_LOCK(&dhd->pub);
3365 DHD_PERIM_LOCK(&dhd->pub);
3366
3367#ifdef SOFTAP
3368 {
3369 unsigned long flags;
3370 bool in_ap = FALSE;
3371 DHD_GENERAL_LOCK(&dhd->pub, flags);
3372 in_ap = (ap_net_dev != NULL);
3373 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3374
3375 if (in_ap) {
3376 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3377 ifp->net->name));
3378 goto done;
3379 }
3380 }
3381#endif /* SOFTAP */
3382
3383 // terence 20160907: fix for not able to set mac when wlan0 is down
3384 if (ifp == NULL || !ifp->set_macaddress) {
3385 goto done;
3386 }
3387 if (ifp == NULL || !dhd->pub.up) {
3388 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3389 goto done;
3390 }
3391
3392 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3393 ifp->set_macaddress = FALSE;
3394 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3395 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3396 else
3397 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3398
3399done:
3400 DHD_PERIM_UNLOCK(&dhd->pub);
3401 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3402 dhd_net_if_unlock_local(dhd);
3403}
3404
3405static void
3406dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3407{
3408 dhd_info_t *dhd = handle;
3409 dhd_if_t *ifp = event_info;
3410 int ifidx;
3411
3412 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3413 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3414 return;
3415 }
3416
3417 if (!dhd) {
3418 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3419 return;
3420 }
3421
3422 dhd_net_if_lock_local(dhd);
3423 DHD_OS_WAKE_LOCK(&dhd->pub);
3424 DHD_PERIM_LOCK(&dhd->pub);
3425
3426#ifdef SOFTAP
3427 {
3428 bool in_ap = FALSE;
3429 unsigned long flags;
3430 DHD_GENERAL_LOCK(&dhd->pub, flags);
3431 in_ap = (ap_net_dev != NULL);
3432 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3433
3434 if (in_ap) {
3435 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3436 ifp->net->name));
3437 ifp->set_multicast = FALSE;
3438 goto done;
3439 }
3440 }
3441#endif /* SOFTAP */
3442
3443 if (ifp == NULL || !dhd->pub.up) {
3444 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3445 goto done;
3446 }
3447
3448 ifidx = ifp->idx;
3449
3450
3451 _dhd_set_multicast_list(dhd, ifidx);
3452 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3453
3454done:
3455 DHD_PERIM_UNLOCK(&dhd->pub);
3456 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3457 dhd_net_if_unlock_local(dhd);
3458}
3459
3460static int
3461dhd_set_mac_address(struct net_device *dev, void *addr)
3462{
3463 int ret = 0;
3464
3465 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3466 struct sockaddr *sa = (struct sockaddr *)addr;
3467 int ifidx;
3468 dhd_if_t *dhdif;
3469
3470 ifidx = dhd_net2idx(dhd, dev);
3471 if (ifidx == DHD_BAD_IF)
3472 return -1;
3473
3474 dhdif = dhd->iflist[ifidx];
3475
3476 dhd_net_if_lock_local(dhd);
3477 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3478 dhdif->set_macaddress = TRUE;
3479 dhd_net_if_unlock_local(dhd);
3480 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3481 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3482 return ret;
3483}
3484
3485static void
3486dhd_set_multicast_list(struct net_device *dev)
3487{
3488 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3489 int ifidx;
3490
3491 ifidx = dhd_net2idx(dhd, dev);
3492 if (ifidx == DHD_BAD_IF)
3493 return;
3494
3495 dhd->iflist[ifidx]->set_multicast = TRUE;
3496 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3497 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
3498
3499 // terence 20160907: fix for not able to set mac when wlan0 is down
3500 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3501 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3502}
3503
3504#ifdef PROP_TXSTATUS
3505int
3506dhd_os_wlfc_block(dhd_pub_t *pub)
3507{
3508 dhd_info_t *di = (dhd_info_t *)(pub->info);
3509 ASSERT(di != NULL);
08dfb6c4
RC
3510 /* terence 20161229: don't do spin lock if proptx not enabled */
3511 if (disable_proptx)
3512 return 1;
ef6a5fee
RC
3513 spin_lock_bh(&di->wlfc_spinlock);
3514 return 1;
3515}
3516
3517int
3518dhd_os_wlfc_unblock(dhd_pub_t *pub)
3519{
3520 dhd_info_t *di = (dhd_info_t *)(pub->info);
3521
3522 ASSERT(di != NULL);
08dfb6c4
RC
3523 /* terence 20161229: don't do spin lock if proptx not enabled */
3524 if (disable_proptx)
3525 return 1;
ef6a5fee
RC
3526 spin_unlock_bh(&di->wlfc_spinlock);
3527 return 1;
3528}
3529
3530#endif /* PROP_TXSTATUS */
3531
3532#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
3533typedef struct {
3534 uint16 type;
3535 const char *str;
3536} PKTTYPE_INFO;
3537
3538static const PKTTYPE_INFO packet_type_info[] =
3539{
3540 { ETHER_TYPE_IP, "IP" },
3541 { ETHER_TYPE_ARP, "ARP" },
3542 { ETHER_TYPE_BRCM, "BRCM" },
3543 { ETHER_TYPE_802_1X, "802.1X" },
3544 { ETHER_TYPE_WAI, "WAPI" },
3545 { 0, ""}
3546};
3547
3548static const char *_get_packet_type_str(uint16 type)
3549{
3550 int i;
3551 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
3552
3553 for (i = 0; i < n; i++) {
3554 if (packet_type_info[i].type == type)
3555 return packet_type_info[i].str;
3556 }
3557
3558 return packet_type_info[n].str;
3559}
3560#endif /* DHD_RX_DUMP || DHD_TX_DUMP */
3561
3562#if defined(DHD_TX_DUMP)
3563void
3564dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
3565{
3566 uint8 *dump_data;
3567 uint16 protocol;
3568 char *ifname;
3569
3570 dump_data = PKTDATA(osh, pkt);
3571 protocol = (dump_data[12] << 8) | dump_data[13];
3572 ifname = ndev ? ndev->name : "N/A";
3573
3574 DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
3575
3576 if (protocol == ETHER_TYPE_802_1X) {
3577 dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
3578 }
3579
3580#if defined(DHD_TX_FULL_DUMP)
3581 {
3582 int i;
3583 uint datalen;
3584 datalen = PKTLEN(osh, pkt);
3585
3586 for (i = 0; i < datalen; i++) {
3587 printk("%02X ", dump_data[i]);
3588 if ((i & 15) == 15)
3589 printk("\n");
3590 }
3591 printk("\n");
3592 }
3593#endif /* DHD_TX_FULL_DUMP */
3594}
3595#endif /* DHD_TX_DUMP */
3596
3597/* This routine do not support Packet chain feature, Currently tested for
3598 * proxy arp feature
3599 */
3600int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3601{
3602 struct sk_buff *skb;
3603 void *skbhead = NULL;
3604 void *skbprev = NULL;
3605 dhd_if_t *ifp;
3606 ASSERT(!PKTISCHAINED(p));
3607 skb = PKTTONATIVE(dhdp->osh, p);
3608
3609 ifp = dhdp->info->iflist[ifidx];
3610 skb->dev = ifp->net;
3611#if defined(BCM_GMAC3)
3612 /* Forwarder capable interfaces use WOFA based forwarding */
3613 if (ifp->fwdh) {
3614 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
3615 uint16 * da = (uint16 *)(eh->ether_dhost);
3616 wofa_t wofa;
3617 ASSERT(ISALIGNED(da, 2));
3618
3619 wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
3620 if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
3621 if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
3622 return BCME_OK;
3623 }
3624 }
3625 PKTFRMNATIVE(dhdp->osh, p);
3626 PKTFREE(dhdp->osh, p, FALSE);
3627 return BCME_OK;
3628 }
3629#endif /* BCM_GMAC3 */
3630
3631 skb->protocol = eth_type_trans(skb, skb->dev);
3632
3633 if (in_interrupt()) {
3634 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3635 __FUNCTION__, __LINE__);
3636 netif_rx(skb);
3637 } else {
3638 if (dhdp->info->rxthread_enabled) {
3639 if (!skbhead) {
3640 skbhead = skb;
3641 } else {
3642 PKTSETNEXT(dhdp->osh, skbprev, skb);
3643 }
3644 skbprev = skb;
3645 } else {
3646 /* If the receive is not processed inside an ISR,
3647 * the softirqd must be woken explicitly to service
3648 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3649 * by netif_rx_ni(), but in earlier kernels, we need
3650 * to do it manually.
3651 */
3652 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3653 __FUNCTION__, __LINE__);
3654#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3655 netif_rx_ni(skb);
3656#else
3657 ulong flags;
3658 netif_rx(skb);
3659 local_irq_save(flags);
3660 RAISE_RX_SOFTIRQ();
3661 local_irq_restore(flags);
3662#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3663 }
3664 }
3665
3666 if (dhdp->info->rxthread_enabled && skbhead)
3667 dhd_sched_rxf(dhdp, skbhead);
3668
3669 return BCME_OK;
3670}
3671
3672int BCMFASTPATH
3673__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3674{
3675 int ret = BCME_OK;
3676 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3677 struct ether_header *eh = NULL;
3678#ifdef DHD_L2_FILTER
3679 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3680#endif
3681#ifdef DHD_8021X_DUMP
3682 struct net_device *ndev;
3683#endif /* DHD_8021X_DUMP */
3684
3685 /* Reject if down */
3686 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3687 /* free the packet here since the caller won't */
3688 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3689 return -ENODEV;
3690 }
3691
3692#ifdef PCIE_FULL_DONGLE
3693 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3694 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3695 PKTFREE(dhdp->osh, pktbuf, TRUE);
3696 return -EBUSY;
3697 }
3698#endif /* PCIE_FULL_DONGLE */
3699
3700#ifdef DHD_L2_FILTER
3701 /* if dhcp_unicast is enabled, we need to convert the */
3702 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3703 if (ifp->dhcp_unicast) {
3704 uint8* mac_addr;
3705 uint8* ehptr = NULL;
3706 int ret;
3707 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3708 if (ret == BCME_OK) {
3709 /* if given mac address having valid entry in sta list
3710 * copy the given mac address, and return with BCME_OK
3711 */
3712 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3713 ehptr = PKTDATA(dhdp->osh, pktbuf);
3714 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3715 }
3716 }
3717 }
3718
3719 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3720 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3721 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3722 return BCME_ERROR;
3723 }
3724 }
3725
3726 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3727 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3728
3729 /* Drop the packets if l2 filter has processed it already
3730 * otherwise continue with the normal path
3731 */
3732 if (ret == BCME_OK) {
3733 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3734 return BCME_ERROR;
3735 }
3736 }
3737#endif /* DHD_L2_FILTER */
3738 /* Update multicast statistic */
3739 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3740 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3741 eh = (struct ether_header *)pktdata;
3742
3743 if (ETHER_ISMULTI(eh->ether_dhost))
3744 dhdp->tx_multicast++;
3745 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
3746 atomic_inc(&dhd->pend_8021x_cnt);
3747#ifdef DHD_DHCP_DUMP
3748 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3749 uint16 dump_hex;
3750 uint16 source_port;
3751 uint16 dest_port;
3752 uint16 udp_port_pos;
3753 uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
3754 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3755 struct net_device *net;
3756 char *ifname;
3757
3758 net = dhd_idx2net(dhdp, ifidx);
3759 ifname = net ? net->name : "N/A";
3760 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3761 source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
3762 dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
3763 if (source_port == 0x0044 || dest_port == 0x0044) {
3764 dump_hex = (pktdata[udp_port_pos+249] << 8) |
3765 pktdata[udp_port_pos+250];
3766 if (dump_hex == 0x0101) {
3767 DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
3768 } else if (dump_hex == 0x0102) {
3769 DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
3770 } else if (dump_hex == 0x0103) {
3771 DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
3772 } else if (dump_hex == 0x0105) {
3773 DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
3774 } else {
3775 DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
3776 }
3777#ifdef DHD_LOSSLESS_ROAMING
3778 if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
3779 DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
3780 }
3781#endif /* DHD_LOSSLESS_ROAMING */
3782 DHD_ERROR(("\n"));
3783 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3784 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
3785 }
3786 }
3787#endif /* DHD_DHCP_DUMP */
3788 } else {
3789 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3790 return BCME_ERROR;
3791 }
3792
3793 /* Look into the packet and update the packet priority */
3794#ifndef PKTPRIO_OVERRIDE
3795 if (PKTPRIO(pktbuf) == 0)
3796#endif /* !PKTPRIO_OVERRIDE */
3797 {
3798#ifdef QOS_MAP_SET
3799 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3800#else
3801 pktsetprio(pktbuf, FALSE);
3802#endif /* QOS_MAP_SET */
3803 }
3804
3805
3806#ifdef PCIE_FULL_DONGLE
3807 /*
3808 * Lkup the per interface hash table, for a matching flowring. If one is not
3809 * available, allocate a unique flowid and add a flowring entry.
3810 * The found or newly created flowid is placed into the pktbuf's tag.
3811 */
3812 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3813 if (ret != BCME_OK) {
3814 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3815 return ret;
3816 }
3817#endif
3818
3819#if defined(DHD_TX_DUMP)
3820 ndev = dhd_idx2net(dhdp, ifidx);
3821 dhd_tx_dump(ndev, dhdp->osh, pktbuf);
3822#endif
3823 /* terence 20150901: Micky add to ajust the 802.1X priority */
3824 /* Set the 802.1X packet with the highest priority 7 */
3825 if (dhdp->conf->pktprio8021x >= 0)
3826 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3827
3828#ifdef PROP_TXSTATUS
3829 if (dhd_wlfc_is_supported(dhdp)) {
3830 /* store the interface ID */
3831 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3832
3833 /* store destination MAC in the tag as well */
3834 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3835
3836 /* decide which FIFO this packet belongs to */
3837 if (ETHER_ISMULTI(eh->ether_dhost))
3838 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3839 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3840 else
3841 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3842 } else
3843#endif /* PROP_TXSTATUS */
3844 {
3845 /* If the protocol uses a data header, apply it */
3846 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3847 }
3848
3849 /* Use bus module to send data frame */
3850#ifdef WLMEDIA_HTSF
3851 dhd_htsf_addtxts(dhdp, pktbuf);
3852#endif
3853#ifdef PROP_TXSTATUS
3854 {
3855 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3856 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3857 /* non-proptxstatus way */
3858#ifdef BCMPCIE
3859 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3860#else
3861 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3862#endif /* BCMPCIE */
3863 }
3864 }
3865#else
3866#ifdef BCMPCIE
3867 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3868#else
3869 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3870#endif /* BCMPCIE */
3871#endif /* PROP_TXSTATUS */
3872
3873 return ret;
3874}
3875
3876int BCMFASTPATH
3877dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3878{
3879 int ret = 0;
3880 unsigned long flags;
3881
3882 DHD_GENERAL_LOCK(dhdp, flags);
3883 if (dhdp->busstate == DHD_BUS_DOWN ||
3884 dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3885 DHD_ERROR(("%s: returning as busstate=%d\n",
3886 __FUNCTION__, dhdp->busstate));
3887 DHD_GENERAL_UNLOCK(dhdp, flags);
3888 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3889 return -ENODEV;
3890 }
3891 dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
3892 DHD_GENERAL_UNLOCK(dhdp, flags);
3893
3894#ifdef DHD_PCIE_RUNTIMEPM
3895 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3896 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3897 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3898 ret = -EBUSY;
3899 goto exit;
3900 }
3901#endif /* DHD_PCIE_RUNTIMEPM */
3902
3903 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3904
3905#ifdef DHD_PCIE_RUNTIMEPM
3906exit:
3907#endif
3908 DHD_GENERAL_LOCK(dhdp, flags);
3909 dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
3910 DHD_GENERAL_UNLOCK(dhdp, flags);
3911 return ret;
3912}
3913
3914int BCMFASTPATH
3915dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3916{
3917 int ret;
3918 uint datalen;
3919 void *pktbuf;
3920 dhd_info_t *dhd = DHD_DEV_INFO(net);
3921 dhd_if_t *ifp = NULL;
3922 int ifidx;
3923 unsigned long flags;
3924#ifdef WLMEDIA_HTSF
3925 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3926#else
3927 uint8 htsfdlystat_sz = 0;
3928#endif
3929#ifdef DHD_WMF
3930 struct ether_header *eh;
3931 uint8 *iph;
3932#endif /* DHD_WMF */
3933
3934 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3935
08dfb6c4
RC
3936 /* terence 2017029: Reject in early suspend */
3937 if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) {
3938 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
3939#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3940 return -ENODEV;
3941#else
3942 return NETDEV_TX_BUSY;
3943#endif
3944 }
3945
ef6a5fee
RC
3946
3947#ifdef PCIE_FULL_DONGLE
3948 DHD_GENERAL_LOCK(&dhd->pub, flags);
3949 dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
3950 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3951#endif /* PCIE_FULL_DONGLE */
3952
3953#ifdef DHD_PCIE_RUNTIMEPM
3954 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3955 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3956 /* stop the network queue temporarily until resume done */
3957 DHD_GENERAL_LOCK(&dhd->pub, flags);
3958 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3959 dhd_bus_stop_queue(dhd->pub.bus);
3960 }
3961 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3962 dhd_os_busbusy_wake(&dhd->pub);
3963 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3964#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3965 return -ENODEV;
3966#else
3967 return NETDEV_TX_BUSY;
3968#endif
3969 }
3970#endif /* DHD_PCIE_RUNTIMEPM */
3971
3972 DHD_GENERAL_LOCK(&dhd->pub, flags);
3973#ifdef PCIE_FULL_DONGLE
3974 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3975 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3976 dhd_os_busbusy_wake(&dhd->pub);
3977 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3978#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3979 return -ENODEV;
3980#else
3981 return NETDEV_TX_BUSY;
3982#endif
3983 }
3984#endif /* PCIE_FULL_DONGLE */
3985
3986 DHD_OS_WAKE_LOCK(&dhd->pub);
3987 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3988
3989 /* Reject if down */
3990 if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
3991 dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3992 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3993 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3994 netif_stop_queue(net);
3995 /* Send Event when bus down detected during data session */
3996 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3997 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3998 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3999 net_os_send_hang_message(net);
4000 }
4001#ifdef PCIE_FULL_DONGLE
4002 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4003 dhd_os_busbusy_wake(&dhd->pub);
ef6a5fee
RC
4004#endif /* PCIE_FULL_DONGLE */
4005 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4006 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5680b389 4007 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
ef6a5fee
RC
4008#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4009 return -ENODEV;
4010#else
4011 return NETDEV_TX_BUSY;
4012#endif
4013 }
4014
4015 ifp = DHD_DEV_IFP(net);
4016 ifidx = DHD_DEV_IFIDX(net);
4017 BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
4018
4019 if (ifidx == DHD_BAD_IF) {
4020 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
4021 netif_stop_queue(net);
4022#ifdef PCIE_FULL_DONGLE
4023 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4024 dhd_os_busbusy_wake(&dhd->pub);
ef6a5fee
RC
4025#endif /* PCIE_FULL_DONGLE */
4026 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4027 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5680b389 4028 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
ef6a5fee
RC
4029#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4030 return -ENODEV;
4031#else
4032 return NETDEV_TX_BUSY;
4033#endif
4034 }
4035 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4036
4037 ASSERT(ifidx == dhd_net2idx(dhd, net));
4038 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4039
4040 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4041
4042 /* re-align socket buffer if "skb->data" is odd address */
4043 if (((unsigned long)(skb->data)) & 0x1) {
4044 unsigned char *data = skb->data;
4045 uint32 length = skb->len;
4046 PKTPUSH(dhd->pub.osh, skb, 1);
4047 memmove(skb->data, data, length);
4048 PKTSETLEN(dhd->pub.osh, skb, length);
4049 }
4050
4051 datalen = PKTLEN(dhd->pub.osh, skb);
4052
4053 /* Make sure there's enough room for any header */
4054 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4055 struct sk_buff *skb2;
4056
4057 DHD_INFO(("%s: insufficient headroom\n",
4058 dhd_ifname(&dhd->pub, ifidx)));
4059 dhd->pub.tx_realloc++;
4060
4061 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4062 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4063
4064 dev_kfree_skb(skb);
4065 if ((skb = skb2) == NULL) {
4066 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4067 dhd_ifname(&dhd->pub, ifidx)));
4068 ret = -ENOMEM;
4069 goto done;
4070 }
4071 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4072 }
4073
4074 /* Convert to packet */
4075 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4076 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4077 dhd_ifname(&dhd->pub, ifidx)));
4078 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4079 dev_kfree_skb_any(skb);
4080 ret = -ENOMEM;
4081 goto done;
4082 }
4083
4084#if defined(WLMEDIA_HTSF)
4085 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
4086 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
4087 struct ether_header *eh = (struct ether_header *)pktdata;
4088
4089 if (!ETHER_ISMULTI(eh->ether_dhost) &&
4090 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
4091 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
4092 }
4093 }
4094#endif
4095
4096#ifdef DHD_WMF
4097 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4098 iph = (uint8 *)eh + ETHER_HDR_LEN;
4099
4100 /* WMF processing for multicast packets
4101 * Only IPv4 packets are handled
4102 */
4103 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4104 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4105 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4106#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4107 void *sdu_clone;
4108 bool ucast_convert = FALSE;
4109#ifdef DHD_UCAST_UPNP
4110 uint32 dest_ip;
4111
4112 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4113 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4114#endif /* DHD_UCAST_UPNP */
4115#ifdef DHD_IGMP_UCQUERY
4116 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4117 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
4118 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4119#endif /* DHD_IGMP_UCQUERY */
4120 if (ucast_convert) {
4121 dhd_sta_t *sta;
4122#ifdef PCIE_FULL_DONGLE
4123 unsigned long flags;
4124#endif
4125 struct list_head snapshot_list;
4126 struct list_head *wmf_ucforward_list;
4127
4128 ret = NETDEV_TX_OK;
4129
4130 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
4131 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4132 */
4133 wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4134
4135 /* Convert upnp/igmp query to unicast for each assoc STA */
4136 list_for_each_entry(sta, wmf_ucforward_list, list) {
4137 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4138 ret = WMF_NOP;
4139 break;
4140 }
4141 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4142 }
4143 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4144
4145#ifdef PCIE_FULL_DONGLE
4146 DHD_GENERAL_LOCK(&dhd->pub, flags);
4147 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4148 dhd_os_busbusy_wake(&dhd->pub);
4149 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4150#endif /* PCIE_FULL_DONGLE */
4151 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4152 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4153
4154 if (ret == NETDEV_TX_OK)
4155 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4156
4157 return ret;
4158 } else
4159#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4160 {
4161 /* There will be no STA info if the packet is coming from LAN host
4162 * Pass as NULL
4163 */
4164 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4165 switch (ret) {
4166 case WMF_TAKEN:
4167 case WMF_DROP:
4168 /* Either taken by WMF or we should drop it.
4169 * Exiting send path
4170 */
4171#ifdef PCIE_FULL_DONGLE
4172 DHD_GENERAL_LOCK(&dhd->pub, flags);
4173 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4174 dhd_os_busbusy_wake(&dhd->pub);
4175 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4176#endif /* PCIE_FULL_DONGLE */
4177 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4178 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4179 return NETDEV_TX_OK;
4180 default:
4181 /* Continue the transmit path */
4182 break;
4183 }
4184 }
4185 }
4186#endif /* DHD_WMF */
4187#ifdef DHD_PSTA
4188 /* PSR related packet proto manipulation should be done in DHD
4189 * since dongle doesn't have complete payload
4190 */
4191 if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
4192 ifidx, &pktbuf, TRUE) < 0)) {
4193 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4194 dhd_ifname(&dhd->pub, ifidx)));
4195 }
4196#endif /* DHD_PSTA */
4197
4198#ifdef DHDTCPACK_SUPPRESS
4199 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4200 /* If this packet has been hold or got freed, just return */
4201 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4202 ret = 0;
4203 goto done;
4204 }
4205 } else {
4206 /* If this packet has replaced another packet and got freed, just return */
4207 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4208 ret = 0;
4209 goto done;
4210 }
4211 }
4212#endif /* DHDTCPACK_SUPPRESS */
4213
4214 /* no segmented SKB support (Kernel-3.18.y) */
4215 if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
4216 PKTSETLINK(skb, NULL);
4217 }
4218
4219 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4220
4221done:
4222 if (ret) {
4223 ifp->stats.tx_dropped++;
4224 dhd->pub.tx_dropped++;
4225 } else {
4226
4227#ifdef PROP_TXSTATUS
4228 /* tx_packets counter can counted only when wlfc is disabled */
4229 if (!dhd_wlfc_is_supported(&dhd->pub))
4230#endif
4231 {
4232 dhd->pub.tx_packets++;
4233 ifp->stats.tx_packets++;
4234 ifp->stats.tx_bytes += datalen;
4235 }
4236 }
4237
4238#ifdef PCIE_FULL_DONGLE
4239 DHD_GENERAL_LOCK(&dhd->pub, flags);
4240 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4241 dhd_os_busbusy_wake(&dhd->pub);
4242 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4243#endif /* PCIE_FULL_DONGLE */
4244
4245 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4246 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4247 BUZZZ_LOG(START_XMIT_END, 0);
4248
4249 /* Return ok: we always eat the packet */
4250#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4251 return 0;
4252#else
4253 return NETDEV_TX_OK;
4254#endif
4255}
4256
4257
4258void
4259dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4260{
4261 struct net_device *net;
4262 dhd_info_t *dhd = dhdp->info;
4263 int i;
4264
4265 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4266
4267 ASSERT(dhd);
4268
4269#ifdef DHD_LOSSLESS_ROAMING
4270 /* block flowcontrol during roaming */
4271 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4272 return;
4273 }
4274#endif
4275
4276 if (ifidx == ALL_INTERFACES) {
4277 /* Flow control on all active interfaces */
4278 dhdp->txoff = state;
4279 for (i = 0; i < DHD_MAX_IFS; i++) {
4280 if (dhd->iflist[i]) {
4281 net = dhd->iflist[i]->net;
4282 if (state == ON)
4283 netif_stop_queue(net);
4284 else
4285 netif_wake_queue(net);
4286 }
4287 }
4288 } else {
4289 if (dhd->iflist[ifidx]) {
4290 net = dhd->iflist[ifidx]->net;
4291 if (state == ON)
4292 netif_stop_queue(net);
4293 else
4294 netif_wake_queue(net);
4295 }
4296 }
4297}
4298
4299
4300#ifdef DHD_WMF
4301bool
4302dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4303{
4304 dhd_info_t *dhd = dhdp->info;
4305
4306 return dhd->rxthread_enabled;
4307}
4308#endif /* DHD_WMF */
4309
4310/** Called when a frame is received by the dongle on interface 'ifidx' */
4311void
4312dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4313{
4314 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4315 struct sk_buff *skb;
4316 uchar *eth;
4317 uint len;
4318 void *data, *pnext = NULL;
4319 int i;
4320 dhd_if_t *ifp;
4321 wl_event_msg_t event;
4322 int tout_rx = 0;
4323 int tout_ctrl = 0;
4324 void *skbhead = NULL;
4325 void *skbprev = NULL;
4326#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4327 char *dump_data;
4328 uint16 protocol;
4329 char *ifname;
4330#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4331
4332 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4333
4334 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4335 struct ether_header *eh;
4336
4337 pnext = PKTNEXT(dhdp->osh, pktbuf);
4338 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4339
4340 ifp = dhd->iflist[ifidx];
4341 if (ifp == NULL) {
4342 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4343 __FUNCTION__));
4344 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4345 continue;
4346 }
4347
4348 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4349
4350 /* Dropping only data packets before registering net device to avoid kernel panic */
4351#ifndef PROP_TXSTATUS_VSDB
4352 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4353 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4354#else
4355 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4356 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4357#endif /* PROP_TXSTATUS_VSDB */
4358 {
4359 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4360 __FUNCTION__));
4361 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4362 continue;
4363 }
4364
4365
4366#ifdef PROP_TXSTATUS
4367 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4368 /* WLFC may send header only packet when
4369 there is an urgent message but no packet to
4370 piggy-back on
4371 */
4372 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4373 continue;
4374 }
4375#endif
4376#ifdef DHD_L2_FILTER
4377 /* If block_ping is enabled drop the ping packet */
4378 if (ifp->block_ping) {
4379 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4380 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4381 continue;
4382 }
4383 }
4384 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4385 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4386 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4387 continue;
4388 }
4389 }
4390 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4391 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4392
4393 /* Drop the packets if l2 filter has processed it already
4394 * otherwise continue with the normal path
4395 */
4396 if (ret == BCME_OK) {
4397 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4398 continue;
4399 }
4400 }
4401#endif /* DHD_L2_FILTER */
4402#ifdef DHD_WMF
4403 /* WMF processing for multicast packets */
4404 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
4405 dhd_sta_t *sta;
4406 int ret;
4407
4408 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
4409 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
4410 switch (ret) {
4411 case WMF_TAKEN:
4412 /* The packet is taken by WMF. Continue to next iteration */
4413 continue;
4414 case WMF_DROP:
4415 /* Packet DROP decision by WMF. Toss it */
4416 DHD_ERROR(("%s: WMF decides to drop packet\n",
4417 __FUNCTION__));
4418 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4419 continue;
4420 default:
4421 /* Continue the transmit path */
4422 break;
4423 }
4424 }
4425#endif /* DHD_WMF */
4426
4427#ifdef DHDTCPACK_SUPPRESS
4428 dhd_tcpdata_info_get(dhdp, pktbuf);
4429#endif
4430 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4431
4432 ASSERT(ifp);
4433 skb->dev = ifp->net;
4434
4435#ifdef DHD_PSTA
4436 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4437 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4438 dhd_ifname(dhdp, ifidx)));
4439 }
4440#endif /* DHD_PSTA */
4441
4442#ifdef PCIE_FULL_DONGLE
4443 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4444 (!ifp->ap_isolate)) {
4445 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4446 if (ETHER_ISUCAST(eh->ether_dhost)) {
4447 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4448 dhd_sendpkt(dhdp, ifidx, pktbuf);
4449 continue;
4450 }
4451 } else {
4452 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4453 if (npktbuf)
4454 dhd_sendpkt(dhdp, ifidx, npktbuf);
4455 }
4456 }
4457#endif /* PCIE_FULL_DONGLE */
4458
4459 /* Get the protocol, maintain skb around eth_type_trans()
4460 * The main reason for this hack is for the limitation of
4461 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4462 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4463 * coping of the packet coming from the network stack to add
4464 * BDC, Hardware header etc, during network interface registration
4465 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4466 * for BDC, Hardware header etc. and not just the ETH_HLEN
4467 */
4468 eth = skb->data;
4469 len = skb->len;
4470
4471#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4472 dump_data = skb->data;
4473 protocol = (dump_data[12] << 8) | dump_data[13];
4474 ifname = skb->dev ? skb->dev->name : "N/A";
4475#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4476#ifdef DHD_8021X_DUMP
4477 if (protocol == ETHER_TYPE_802_1X) {
4478 dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
4479 }
4480#endif /* DHD_8021X_DUMP */
4481#ifdef DHD_DHCP_DUMP
4482 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
4483 uint16 dump_hex;
4484 uint16 source_port;
4485 uint16 dest_port;
4486 uint16 udp_port_pos;
4487 uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
4488 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
4489
4490 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
4491 source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
4492 dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
4493 if (source_port == 0x0044 || dest_port == 0x0044) {
4494 dump_hex = (dump_data[udp_port_pos+249] << 8) |
4495 dump_data[udp_port_pos+250];
4496 if (dump_hex == 0x0101) {
4497 DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
4498 } else if (dump_hex == 0x0102) {
4499 DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
4500 } else if (dump_hex == 0x0103) {
4501 DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
4502 } else if (dump_hex == 0x0105) {
4503 DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
4504 } else {
4505 DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
4506 }
4507 } else if (source_port == 0x0043 || dest_port == 0x0043) {
4508 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
4509 }
4510 }
4511#endif /* DHD_DHCP_DUMP */
4512#if defined(DHD_RX_DUMP)
4513 DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
4514 if (protocol != ETHER_TYPE_BRCM) {
4515 if (dump_data[0] == 0xFF) {
4516 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
4517
4518 if ((dump_data[12] == 8) &&
4519 (dump_data[13] == 6)) {
4520 DHD_ERROR(("%s: ARP %d\n",
4521 __FUNCTION__, dump_data[0x15]));
4522 }
4523 } else if (dump_data[0] & 1) {
4524 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
4525 __FUNCTION__, MAC2STRDBG(dump_data)));
4526 }
4527#ifdef DHD_RX_FULL_DUMP
4528 {
4529 int k;
4530 for (k = 0; k < skb->len; k++) {
4531 printk("%02X ", dump_data[k]);
4532 if ((k & 15) == 15)
4533 printk("\n");
4534 }
4535 printk("\n");
4536 }
4537#endif /* DHD_RX_FULL_DUMP */
4538 }
4539#endif /* DHD_RX_DUMP */
4540
4541 skb->protocol = eth_type_trans(skb, skb->dev);
4542
4543 if (skb->pkt_type == PACKET_MULTICAST) {
4544 dhd->pub.rx_multicast++;
4545 ifp->stats.multicast++;
4546 }
4547
4548 skb->data = eth;
4549 skb->len = len;
4550
4551#ifdef WLMEDIA_HTSF
4552 dhd_htsf_addrxts(dhdp, pktbuf);
4553#endif
4554 /* Strip header, count, deliver upward */
4555 skb_pull(skb, ETH_HLEN);
4556
4557 /* Process special event packets and then discard them */
4558 memset(&event, 0, sizeof(event));
4559 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4560 dhd_wl_host_event(dhd, &ifidx,
4561#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4562 skb_mac_header(skb),
4563#else
4564 skb->mac.raw,
4565#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4566 &event,
4567 &data);
4568
4569 wl_event_to_host_order(&event);
4570 if (!tout_ctrl)
4571 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4572
4573#if defined(PNO_SUPPORT)
4574 if (event.event_type == WLC_E_PFN_NET_FOUND) {
4575 /* enforce custom wake lock to garantee that Kernel not suspended */
4576 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4577 }
4578#endif /* PNO_SUPPORT */
4579
4580#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
4581#ifdef DHD_USE_STATIC_CTRLBUF
4582 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4583#else
4584 PKTFREE(dhdp->osh, pktbuf, FALSE);
4585#endif /* DHD_USE_STATIC_CTRLBUF */
4586 continue;
4587#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
4588 } else {
4589 tout_rx = DHD_PACKET_TIMEOUT_MS;
4590
4591#ifdef PROP_TXSTATUS
4592 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4593#endif /* PROP_TXSTATUS */
4594 }
4595
4596 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4597 ifp = dhd->iflist[ifidx];
4598
4599 if (ifp->net)
4600 ifp->net->last_rx = jiffies;
4601
4602 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4603 dhdp->dstats.rx_bytes += skb->len;
4604 dhdp->rx_packets++; /* Local count */
4605 ifp->stats.rx_bytes += skb->len;
4606 ifp->stats.rx_packets++;
4607 }
4608
4609 if (in_interrupt()) {
4610 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4611 __FUNCTION__, __LINE__);
4612 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4613#if defined(DHD_LB) && defined(DHD_LB_RXP)
4614 netif_receive_skb(skb);
4615#else
4616 netif_rx(skb);
4617#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4618 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4619 } else {
4620 if (dhd->rxthread_enabled) {
4621 if (!skbhead)
4622 skbhead = skb;
4623 else
4624 PKTSETNEXT(dhdp->osh, skbprev, skb);
4625 skbprev = skb;
4626 } else {
4627
4628 /* If the receive is not processed inside an ISR,
4629 * the softirqd must be woken explicitly to service
4630 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4631 * by netif_rx_ni(), but in earlier kernels, we need
4632 * to do it manually.
4633 */
4634 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4635 __FUNCTION__, __LINE__);
4636
4637#if defined(DHD_LB) && defined(DHD_LB_RXP)
4638 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4639 netif_receive_skb(skb);
4640 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4641#else
4642#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4643 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4644 netif_rx_ni(skb);
4645 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4646#else
4647 ulong flags;
4648 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4649 netif_rx(skb);
4650 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4651 local_irq_save(flags);
4652 RAISE_RX_SOFTIRQ();
4653 local_irq_restore(flags);
4654#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4655#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4656 }
4657 }
4658 }
4659
4660 if (dhd->rxthread_enabled && skbhead)
4661 dhd_sched_rxf(dhdp, skbhead);
4662
4663 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4664 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4665 DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
4666}
4667
4668void
4669dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4670{
4671 /* Linux version has nothing to do */
4672 return;
4673}
4674
4675void
4676dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4677{
4678 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4679 struct ether_header *eh;
4680 uint16 type;
4681
4682 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4683
4684 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4685 type = ntoh16(eh->ether_type);
4686
4687 if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
4688 atomic_dec(&dhd->pend_8021x_cnt);
4689
4690#ifdef PROP_TXSTATUS
4691 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4692 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4693 uint datalen = PKTLEN(dhd->pub.osh, txp);
4694 if (ifp != NULL) {
4695 if (success) {
4696 dhd->pub.tx_packets++;
4697 ifp->stats.tx_packets++;
4698 ifp->stats.tx_bytes += datalen;
4699 } else {
4700 ifp->stats.tx_dropped++;
4701 }
4702 }
4703 }
4704#endif
4705}
4706
4707static struct net_device_stats *
4708dhd_get_stats(struct net_device *net)
4709{
4710 dhd_info_t *dhd = DHD_DEV_INFO(net);
4711 dhd_if_t *ifp;
4712 int ifidx;
4713
4714 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4715
4716 ifidx = dhd_net2idx(dhd, net);
4717 if (ifidx == DHD_BAD_IF) {
4718 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4719
4720 memset(&net->stats, 0, sizeof(net->stats));
4721 return &net->stats;
4722 }
4723
4724 ifp = dhd->iflist[ifidx];
4725 ASSERT(dhd && ifp);
4726
4727 if (dhd->pub.up) {
4728 /* Use the protocol to get dongle stats */
4729 dhd_prot_dstats(&dhd->pub);
4730 }
4731 return &ifp->stats;
4732}
4733
4734static int
4735dhd_watchdog_thread(void *data)
4736{
4737 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4738 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4739 /* This thread doesn't need any user-level access,
4740 * so get rid of all our resources
4741 */
4742 if (dhd_watchdog_prio > 0) {
4743 struct sched_param param;
4744 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
4745 dhd_watchdog_prio:(MAX_RT_PRIO-1);
4746 setScheduler(current, SCHED_FIFO, &param);
4747 }
4748
4749 while (1) {
4750 if (down_interruptible (&tsk->sema) == 0) {
4751 unsigned long flags;
4752 unsigned long jiffies_at_start = jiffies;
4753 unsigned long time_lapse;
4754
4755 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4756 SMP_RD_BARRIER_DEPENDS();
4757 if (tsk->terminated) {
4758 break;
4759 }
4760
4761 if (dhd->pub.dongle_reset == FALSE) {
4762 DHD_TIMER(("%s:\n", __FUNCTION__));
4763 dhd_bus_watchdog(&dhd->pub);
4764
4765 DHD_GENERAL_LOCK(&dhd->pub, flags);
4766 /* Count the tick for reference */
4767 dhd->pub.tickcnt++;
4768#ifdef DHD_L2_FILTER
4769 dhd_l2_filter_watchdog(&dhd->pub);
4770#endif /* DHD_L2_FILTER */
4771 time_lapse = jiffies - jiffies_at_start;
4772
4773 /* Reschedule the watchdog */
4774 if (dhd->wd_timer_valid) {
4775 mod_timer(&dhd->timer,
4776 jiffies +
4777 msecs_to_jiffies(dhd_watchdog_ms) -
4778 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
4779 }
4780 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4781 }
4782 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4783 } else {
4784 break;
4785 }
4786 }
4787
4788 complete_and_exit(&tsk->completed, 0);
4789}
4790
4791static void dhd_watchdog(ulong data)
4792{
4793 dhd_info_t *dhd = (dhd_info_t *)data;
4794 unsigned long flags;
4795
4796 if (dhd->pub.dongle_reset) {
4797 return;
4798 }
4799
4800 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4801 DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
4802 return;
4803 }
4804
4805 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4806 up(&dhd->thr_wdt_ctl.sema);
4807 return;
4808 }
4809
4810 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4811 /* Call the bus module watchdog */
4812 dhd_bus_watchdog(&dhd->pub);
4813 DHD_GENERAL_LOCK(&dhd->pub, flags);
4814 /* Count the tick for reference */
4815 dhd->pub.tickcnt++;
4816
4817#ifdef DHD_L2_FILTER
4818 dhd_l2_filter_watchdog(&dhd->pub);
4819#endif /* DHD_L2_FILTER */
4820 /* Reschedule the watchdog */
4821 if (dhd->wd_timer_valid)
4822 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
4823 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4824 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4825}
4826
4827#ifdef DHD_PCIE_RUNTIMEPM
4828static int
4829dhd_rpm_state_thread(void *data)
4830{
4831 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4832 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4833
4834 while (1) {
4835 if (down_interruptible (&tsk->sema) == 0) {
4836 unsigned long flags;
4837 unsigned long jiffies_at_start = jiffies;
4838 unsigned long time_lapse;
4839
4840 SMP_RD_BARRIER_DEPENDS();
4841 if (tsk->terminated) {
4842 break;
4843 }
4844
4845 if (dhd->pub.dongle_reset == FALSE) {
4846 DHD_TIMER(("%s:\n", __FUNCTION__));
4847 if (dhd->pub.up) {
4848 dhd_runtimepm_state(&dhd->pub);
4849 }
4850
4851 DHD_GENERAL_LOCK(&dhd->pub, flags);
4852 time_lapse = jiffies - jiffies_at_start;
4853
4854 /* Reschedule the watchdog */
4855 if (dhd->rpm_timer_valid) {
4856 mod_timer(&dhd->rpm_timer,
4857 jiffies +
4858 msecs_to_jiffies(dhd_runtimepm_ms) -
4859 min(msecs_to_jiffies(dhd_runtimepm_ms),
4860 time_lapse));
4861 }
4862 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4863 }
4864 } else {
4865 break;
4866 }
4867 }
4868
4869 complete_and_exit(&tsk->completed, 0);
4870}
4871
4872static void dhd_runtimepm(ulong data)
4873{
4874 dhd_info_t *dhd = (dhd_info_t *)data;
4875
4876 if (dhd->pub.dongle_reset) {
4877 return;
4878 }
4879
4880 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
4881 up(&dhd->thr_rpm_ctl.sema);
4882 return;
4883 }
4884}
4885
4886void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
4887{
4888 dhd_os_runtimepm_timer(dhdp, 0);
4889 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
4890 DHD_ERROR(("DHD Runtime PM Disabled \n"));
4891}
4892
4893void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
4894{
4895 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
4896 DHD_ERROR(("DHD Runtime PM Enabled \n"));
4897}
4898
4899#endif /* DHD_PCIE_RUNTIMEPM */
4900
4901
4902#ifdef ENABLE_ADAPTIVE_SCHED
4903static void
4904dhd_sched_policy(int prio)
4905{
4906 struct sched_param param;
4907 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
4908 param.sched_priority = 0;
4909 setScheduler(current, SCHED_NORMAL, &param);
4910 } else {
4911 if (get_scheduler_policy(current) != SCHED_FIFO) {
4912 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
4913 setScheduler(current, SCHED_FIFO, &param);
4914 }
4915 }
4916}
4917#endif /* ENABLE_ADAPTIVE_SCHED */
4918#ifdef DEBUG_CPU_FREQ
4919static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
4920{
4921 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
4922 struct cpufreq_freqs *freq = data;
4923 if (dhd) {
4924 if (!dhd->new_freq)
4925 goto exit;
4926 if (val == CPUFREQ_POSTCHANGE) {
4927 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
4928 freq->new, freq->cpu));
4929 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
4930 }
4931 }
4932exit:
4933 return 0;
4934}
4935#endif /* DEBUG_CPU_FREQ */
4936static int
4937dhd_dpc_thread(void *data)
4938{
4939 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4940 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4941
4942 /* This thread doesn't need any user-level access,
4943 * so get rid of all our resources
4944 */
4945 if (dhd_dpc_prio > 0)
4946 {
4947 struct sched_param param;
4948 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
4949 setScheduler(current, SCHED_FIFO, &param);
4950 }
4951
4952#ifdef CUSTOM_DPC_CPUCORE
4953 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
ef6a5fee
RC
4954#endif
4955#ifdef CUSTOM_SET_CPUCORE
4956 dhd->pub.current_dpc = current;
4957#endif /* CUSTOM_SET_CPUCORE */
4958 /* Run until signal received */
4959 while (1) {
424b00bf
WR
4960 if (dhd->pub.conf->dpc_cpucore >= 0) {
4961 printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
4962 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
4963 dhd->pub.conf->dpc_cpucore = -1;
4964 }
ef6a5fee
RC
4965 if (!binary_sema_down(tsk)) {
4966#ifdef ENABLE_ADAPTIVE_SCHED
4967 dhd_sched_policy(dhd_dpc_prio);
4968#endif /* ENABLE_ADAPTIVE_SCHED */
4969 SMP_RD_BARRIER_DEPENDS();
4970 if (tsk->terminated) {
4971 break;
4972 }
4973
4974 /* Call bus dpc unless it indicated down (then clean stop) */
4975 if (dhd->pub.busstate != DHD_BUS_DOWN) {
4976#ifdef DEBUG_DPC_THREAD_WATCHDOG
4977 int resched_cnt = 0;
4978#endif /* DEBUG_DPC_THREAD_WATCHDOG */
4979 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
4980 while (dhd_bus_dpc(dhd->pub.bus)) {
4981 /* process all data */
4982#ifdef DEBUG_DPC_THREAD_WATCHDOG
4983 resched_cnt++;
4984 if (resched_cnt > MAX_RESCHED_CNT) {
4985 DHD_INFO(("%s Calling msleep to"
4986 "let other processes run. \n",
4987 __FUNCTION__));
4988 dhd->pub.dhd_bug_on = true;
4989 resched_cnt = 0;
4990 OSL_SLEEP(1);
4991 }
4992#endif /* DEBUG_DPC_THREAD_WATCHDOG */
4993 }
4994 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
4995 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4996 } else {
4997 if (dhd->pub.up)
4998 dhd_bus_stop(dhd->pub.bus, TRUE);
4999 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5000 }
5001 } else {
5002 break;
5003 }
5004 }
5005 complete_and_exit(&tsk->completed, 0);
5006}
5007
5008static int
5009dhd_rxf_thread(void *data)
5010{
5011 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5012 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5013#if defined(WAIT_DEQUEUE)
5014#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
5015 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
5016#endif
5017 dhd_pub_t *pub = &dhd->pub;
5018
5019 /* This thread doesn't need any user-level access,
5020 * so get rid of all our resources
5021 */
5022 if (dhd_rxf_prio > 0)
5023 {
5024 struct sched_param param;
5025 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5026 setScheduler(current, SCHED_FIFO, &param);
5027 }
5028
5029 DAEMONIZE("dhd_rxf");
5030 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
5031
5967f664 5032#ifdef CUSTOM_RXF_CPUCORE
08dfb6c4 5033 /* change rxf thread to other cpu core */
5967f664
RC
5034 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_RXF_CPUCORE));
5035#endif
5036
ef6a5fee
RC
5037 /* signal: thread has started */
5038 complete(&tsk->completed);
5039#ifdef CUSTOM_SET_CPUCORE
5040 dhd->pub.current_rxf = current;
5041#endif /* CUSTOM_SET_CPUCORE */
5042 /* Run until signal received */
5043 while (1) {
424b00bf
WR
5044 if (dhd->pub.conf->rxf_cpucore >= 0) {
5045 printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
5046 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
5047 dhd->pub.conf->rxf_cpucore = -1;
5048 }
ef6a5fee
RC
5049 if (down_interruptible(&tsk->sema) == 0) {
5050 void *skb;
5051#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
5052 ulong flags;
5053#endif
5054#ifdef ENABLE_ADAPTIVE_SCHED
5055 dhd_sched_policy(dhd_rxf_prio);
5056#endif /* ENABLE_ADAPTIVE_SCHED */
5057
5058 SMP_RD_BARRIER_DEPENDS();
5059
5060 if (tsk->terminated) {
5061 break;
5062 }
5063 skb = dhd_rxf_dequeue(pub);
5064
5065 if (skb == NULL) {
5066 continue;
5067 }
5068 while (skb) {
5069 void *skbnext = PKTNEXT(pub->osh, skb);
5070 PKTSETNEXT(pub->osh, skb, NULL);
5071 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5072 __FUNCTION__, __LINE__);
5073#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5074 netif_rx_ni(skb);
5075#else
5076 netif_rx(skb);
5077 local_irq_save(flags);
5078 RAISE_RX_SOFTIRQ();
5079 local_irq_restore(flags);
5080
5081#endif
5082 skb = skbnext;
5083 }
5084#if defined(WAIT_DEQUEUE)
5085 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5086 OSL_SLEEP(1);
5087 watchdogTime = OSL_SYSUPTIME();
5088 }
5089#endif
5090
5091 DHD_OS_WAKE_UNLOCK(pub);
5092 } else {
5093 break;
5094 }
5095 }
5096 complete_and_exit(&tsk->completed, 0);
5097}
5098
5099#ifdef BCMPCIE
5100void dhd_dpc_enable(dhd_pub_t *dhdp)
5101{
5102 dhd_info_t *dhd;
5103
5104 if (!dhdp || !dhdp->info)
5105 return;
5106 dhd = dhdp->info;
5107
5108#ifdef DHD_LB
5109#ifdef DHD_LB_RXP
5110 __skb_queue_head_init(&dhd->rx_pend_queue);
5111#endif /* DHD_LB_RXP */
5112#ifdef DHD_LB_TXC
5113 if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
5114 tasklet_enable(&dhd->tx_compl_tasklet);
5115#endif /* DHD_LB_TXC */
5116#ifdef DHD_LB_RXC
5117 if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
5118 tasklet_enable(&dhd->rx_compl_tasklet);
5119#endif /* DHD_LB_RXC */
5120#endif /* DHD_LB */
5121 if (atomic_read(&dhd->tasklet.count) == 1)
5122 tasklet_enable(&dhd->tasklet);
5123}
5124#endif /* BCMPCIE */
5125
5126
5127#ifdef BCMPCIE
5128void
5129dhd_dpc_kill(dhd_pub_t *dhdp)
5130{
5131 dhd_info_t *dhd;
5132
5133 if (!dhdp) {
5134 return;
5135 }
5136
5137 dhd = dhdp->info;
5138
5139 if (!dhd) {
5140 return;
5141 }
5142
5143 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5144 tasklet_disable(&dhd->tasklet);
5145 tasklet_kill(&dhd->tasklet);
5146 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5147 }
5148#if defined(DHD_LB)
5149#ifdef DHD_LB_RXP
5150 __skb_queue_purge(&dhd->rx_pend_queue);
5151#endif /* DHD_LB_RXP */
5152 /* Kill the Load Balancing Tasklets */
5153#if defined(DHD_LB_TXC)
5154 tasklet_disable(&dhd->tx_compl_tasklet);
5155 tasklet_kill(&dhd->tx_compl_tasklet);
5156#endif /* DHD_LB_TXC */
5157#if defined(DHD_LB_RXC)
5158 tasklet_disable(&dhd->rx_compl_tasklet);
5159 tasklet_kill(&dhd->rx_compl_tasklet);
5160#endif /* DHD_LB_RXC */
5161#endif /* DHD_LB */
5162}
5163#endif /* BCMPCIE */
5164
5165static void
5166dhd_dpc(ulong data)
5167{
5168 dhd_info_t *dhd;
5169
5170 dhd = (dhd_info_t *)data;
5171
5172 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5173 * down below , wake lock is set,
5174 * the tasklet is initialized in dhd_attach()
5175 */
5176 /* Call bus dpc unless it indicated down (then clean stop) */
5177 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5178 if (dhd_bus_dpc(dhd->pub.bus)) {
5179 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5180 tasklet_schedule(&dhd->tasklet);
5181 }
5182 } else {
5183 dhd_bus_stop(dhd->pub.bus, TRUE);
5184 }
5185}
5186
5187void
5188dhd_sched_dpc(dhd_pub_t *dhdp)
5189{
5190 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5191
5192 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5193 DHD_OS_WAKE_LOCK(dhdp);
5194 /* If the semaphore does not get up,
5195 * wake unlock should be done here
5196 */
5197 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5198 DHD_OS_WAKE_UNLOCK(dhdp);
5199 }
5200 return;
5201 } else {
5202 tasklet_schedule(&dhd->tasklet);
5203 }
5204}
5205
5206static void
5207dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5208{
5209 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5210#ifdef RXF_DEQUEUE_ON_BUSY
5211 int ret = BCME_OK;
5212 int retry = 2;
5213#endif /* RXF_DEQUEUE_ON_BUSY */
5214
5215 DHD_OS_WAKE_LOCK(dhdp);
5216
5217 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5218#ifdef RXF_DEQUEUE_ON_BUSY
5219 do {
5220 ret = dhd_rxf_enqueue(dhdp, skb);
5221 if (ret == BCME_OK || ret == BCME_ERROR)
5222 break;
5223 else
5224 OSL_SLEEP(50); /* waiting for dequeueing */
5225 } while (retry-- > 0);
5226
5227 if (retry <= 0 && ret == BCME_BUSY) {
5228 void *skbp = skb;
5229
5230 while (skbp) {
5231 void *skbnext = PKTNEXT(dhdp->osh, skbp);
5232 PKTSETNEXT(dhdp->osh, skbp, NULL);
5233 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5234 __FUNCTION__, __LINE__);
5235 netif_rx_ni(skbp);
5236 skbp = skbnext;
5237 }
5238 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
5239 } else {
5240 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5241 up(&dhd->thr_rxf_ctl.sema);
5242 }
5243 }
5244#else /* RXF_DEQUEUE_ON_BUSY */
5245 do {
5246 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5247 break;
5248 } while (1);
5249 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5250 up(&dhd->thr_rxf_ctl.sema);
5251 }
5252 return;
5253#endif /* RXF_DEQUEUE_ON_BUSY */
5254}
5255
5256#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5257#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5258
5259#ifdef TOE
5260/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5261static int
5262dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5263{
5264 wl_ioctl_t ioc;
5265 char buf[32];
5266 int ret;
5267
5268 memset(&ioc, 0, sizeof(ioc));
5269
5270 ioc.cmd = WLC_GET_VAR;
5271 ioc.buf = buf;
5272 ioc.len = (uint)sizeof(buf);
5273 ioc.set = FALSE;
5274
5275 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5276 buf[sizeof(buf) - 1] = '\0';
5277 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5278 /* Check for older dongle image that doesn't support toe_ol */
5279 if (ret == -EIO) {
5280 DHD_ERROR(("%s: toe not supported by device\n",
5281 dhd_ifname(&dhd->pub, ifidx)));
5282 return -EOPNOTSUPP;
5283 }
5284
5285 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5286 return ret;
5287 }
5288
5289 memcpy(toe_ol, buf, sizeof(uint32));
5290 return 0;
5291}
5292
5293/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5294static int
5295dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5296{
5297 wl_ioctl_t ioc;
5298 char buf[32];
5299 int toe, ret;
5300
5301 memset(&ioc, 0, sizeof(ioc));
5302
5303 ioc.cmd = WLC_SET_VAR;
5304 ioc.buf = buf;
5305 ioc.len = (uint)sizeof(buf);
5306 ioc.set = TRUE;
5307
5308 /* Set toe_ol as requested */
5309
5310 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5311 buf[sizeof(buf) - 1] = '\0';
5312 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
5313
5314 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5315 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5316 dhd_ifname(&dhd->pub, ifidx), ret));
5317 return ret;
5318 }
5319
5320 /* Enable toe globally only if any components are enabled. */
5321
5322 toe = (toe_ol != 0);
5323
5324 strcpy(buf, "toe");
5325 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
5326
5327 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5328 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5329 return ret;
5330 }
5331
5332 return 0;
5333}
5334#endif /* TOE */
5335
5336#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5337void dhd_set_scb_probe(dhd_pub_t *dhd)
5338{
5339 int ret = 0;
5340 wl_scb_probe_t scb_probe;
5341 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5342
5343 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
5344
5345 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5346 return;
5347 }
5348
5349 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
5350
5351 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
5352 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5353 }
5354
5355 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5356
5357 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5358
5359 bcm_mkiovar("scb_probe", (char *)&scb_probe,
5360 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
5361 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5362 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5363 return;
5364 }
5365}
5366#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5367
5368#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
5369static void
5370dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5371{
5372 dhd_info_t *dhd = DHD_DEV_INFO(net);
5373
5374 snprintf(info->driver, sizeof(info->driver), "wl");
5375 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5376}
5377
5378struct ethtool_ops dhd_ethtool_ops = {
5379 .get_drvinfo = dhd_ethtool_get_drvinfo
5380};
5381#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
5382
5383
5384#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5385static int
5386dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5387{
5388 struct ethtool_drvinfo info;
5389 char drvname[sizeof(info.driver)];
5390 uint32 cmd;
5391#ifdef TOE
5392 struct ethtool_value edata;
5393 uint32 toe_cmpnt, csum_dir;
5394 int ret;
5395#endif
5396
5397 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5398
5399 /* all ethtool calls start with a cmd word */
5400 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5401 return -EFAULT;
5402
5403 switch (cmd) {
5404 case ETHTOOL_GDRVINFO:
5405 /* Copy out any request driver name */
5406 if (copy_from_user(&info, uaddr, sizeof(info)))
5407 return -EFAULT;
5408 strncpy(drvname, info.driver, sizeof(info.driver));
5409 drvname[sizeof(info.driver)-1] = '\0';
5410
5411 /* clear struct for return */
5412 memset(&info, 0, sizeof(info));
5413 info.cmd = cmd;
5414
5415 /* if dhd requested, identify ourselves */
5416 if (strcmp(drvname, "?dhd") == 0) {
5417 snprintf(info.driver, sizeof(info.driver), "dhd");
5418 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5419 info.version[sizeof(info.version) - 1] = '\0';
5420 }
5421
5422 /* otherwise, require dongle to be up */
5423 else if (!dhd->pub.up) {
5424 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5425 return -ENODEV;
5426 }
5427
5428 /* finally, report dongle driver type */
5429 else if (dhd->pub.iswl)
5430 snprintf(info.driver, sizeof(info.driver), "wl");
5431 else
5432 snprintf(info.driver, sizeof(info.driver), "xx");
5433
5434 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5435 if (copy_to_user(uaddr, &info, sizeof(info)))
5436 return -EFAULT;
5437 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5438 (int)sizeof(drvname), drvname, info.driver));
5439 break;
5440
5441#ifdef TOE
5442 /* Get toe offload components from dongle */
5443 case ETHTOOL_GRXCSUM:
5444 case ETHTOOL_GTXCSUM:
5445 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5446 return ret;
5447
5448 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5449
5450 edata.cmd = cmd;
5451 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5452
5453 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5454 return -EFAULT;
5455 break;
5456
5457 /* Set toe offload components in dongle */
5458 case ETHTOOL_SRXCSUM:
5459 case ETHTOOL_STXCSUM:
5460 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5461 return -EFAULT;
5462
5463 /* Read the current settings, update and write back */
5464 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5465 return ret;
5466
5467 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5468
5469 if (edata.data != 0)
5470 toe_cmpnt |= csum_dir;
5471 else
5472 toe_cmpnt &= ~csum_dir;
5473
5474 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5475 return ret;
5476
5477 /* If setting TX checksum mode, tell Linux the new mode */
5478 if (cmd == ETHTOOL_STXCSUM) {
5479 if (edata.data)
5480 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5481 else
5482 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5483 }
5484
5485 break;
5486#endif /* TOE */
5487
5488 default:
5489 return -EOPNOTSUPP;
5490 }
5491
5492 return 0;
5493}
5494#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5495
5496static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5497{
5498 dhd_info_t *dhd;
5499
5500 if (!dhdp) {
5501 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5502 return FALSE;
5503 }
5504
5505 if (!dhdp->up)
5506 return FALSE;
5507
5508 dhd = (dhd_info_t *)dhdp->info;
5509#if !defined(BCMPCIE)
5510 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5511 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5512 return FALSE;
5513 }
5514#endif
5515
5516 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5517 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5518#ifdef BCMPCIE
5519 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
5520 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5521 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5522#else
5523 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5524 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5525#endif /* BCMPCIE */
5526 if (dhdp->hang_reason == 0) {
5527 if (dhdp->dongle_trap_occured) {
5528 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5529#ifdef BCMPCIE
5530 } else if (dhdp->d3ackcnt_timeout) {
5531 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
5532#endif /* BCMPCIE */
5533 } else {
5534 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
5535 }
5536 }
5537 net_os_send_hang_message(net);
5538 return TRUE;
5539 }
5540 return FALSE;
5541}
5542
5543int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
5544{
5545 int bcmerror = BCME_OK;
5546 int buflen = 0;
5547 struct net_device *net;
5548
5549 net = dhd_idx2net(pub, ifidx);
5550 if (!net) {
5551 bcmerror = BCME_BADARG;
5552 goto done;
5553 }
5554
5555 if (data_buf)
5556 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
5557
5558 /* check for local dhd ioctl and handle it */
5559 if (ioc->driver == DHD_IOCTL_MAGIC) {
5560 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
5561 if (bcmerror)
5562 pub->bcmerror = bcmerror;
5563 goto done;
5564 }
5565
5566 /* send to dongle (must be up, and wl). */
5567 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
5568 if (allow_delay_fwdl) {
5569 int ret = dhd_bus_start(pub);
5570 if (ret != 0) {
5571 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5572 bcmerror = BCME_DONGLE_DOWN;
5573 goto done;
5574 }
5575 } else {
5576 bcmerror = BCME_DONGLE_DOWN;
5577 goto done;
5578 }
5579 }
5580
5581 if (!pub->iswl) {
5582 bcmerror = BCME_DONGLE_DOWN;
5583 goto done;
5584 }
5585
5586 /*
5587 * Flush the TX queue if required for proper message serialization:
5588 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
5589 * prevent M4 encryption and
5590 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
5591 * prevent disassoc frame being sent before WPS-DONE frame.
5592 */
5593 if (ioc->cmd == WLC_SET_KEY ||
5594 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5595 strncmp("wsec_key", data_buf, 9) == 0) ||
5596 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5597 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
5598 ioc->cmd == WLC_DISASSOC)
5599 dhd_wait_pend8021x(net);
5600
5601#ifdef WLMEDIA_HTSF
5602 if (data_buf) {
5603 /* short cut wl ioctl calls here */
5604 if (strcmp("htsf", data_buf) == 0) {
5605 dhd_ioctl_htsf_get(dhd, 0);
5606 return BCME_OK;
5607 }
5608
5609 if (strcmp("htsflate", data_buf) == 0) {
5610 if (ioc->set) {
5611 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
5612 memset(&maxdelayts, 0, sizeof(tstamp_t));
5613 maxdelay = 0;
5614 tspktcnt = 0;
5615 maxdelaypktno = 0;
5616 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5617 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5618 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5619 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5620 } else {
5621 dhd_dump_latency();
5622 }
5623 return BCME_OK;
5624 }
5625 if (strcmp("htsfclear", data_buf) == 0) {
5626 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5627 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5628 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5629 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5630 htsf_seqnum = 0;
5631 return BCME_OK;
5632 }
5633 if (strcmp("htsfhis", data_buf) == 0) {
5634 dhd_dump_htsfhisto(&vi_d1, "H to D");
5635 dhd_dump_htsfhisto(&vi_d2, "D to D");
5636 dhd_dump_htsfhisto(&vi_d3, "D to H");
5637 dhd_dump_htsfhisto(&vi_d4, "H to H");
5638 return BCME_OK;
5639 }
5640 if (strcmp("tsport", data_buf) == 0) {
5641 if (ioc->set) {
5642 memcpy(&tsport, data_buf + 7, 4);
5643 } else {
5644 DHD_ERROR(("current timestamp port: %d \n", tsport));
5645 }
5646 return BCME_OK;
5647 }
5648 }
5649#endif /* WLMEDIA_HTSF */
5650
5651 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
5652 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
5653#ifdef BCM_FD_AGGR
5654 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5655#else
5656 bcmerror = BCME_UNSUPPORTED;
5657#endif
5658 goto done;
5659 }
5660
5661#ifdef DHD_DEBUG
5662 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
5663 if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
5664 /* Print IOVAR Information */
5665 DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
5666 __FUNCTION__, (char *)data_buf, ioc->set));
5667 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5668 prhex(NULL, data_buf + strlen(data_buf) + 1,
5669 buflen - strlen(data_buf) - 1);
5670 }
5671 } else {
5672 /* Print IOCTL Information */
5673 DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
5674 __FUNCTION__, ioc->cmd, ioc->set));
5675 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5676 prhex(NULL, data_buf, buflen);
5677 }
5678 }
5679 }
5680#endif /* DHD_DEBUG */
5681
5682 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5683
5684done:
5685 dhd_check_hang(net, pub, bcmerror);
5686
5687 return bcmerror;
5688}
5689
5690static int
5691dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
5692{
5693 dhd_info_t *dhd = DHD_DEV_INFO(net);
5694 dhd_ioctl_t ioc;
5695 int ifidx;
5696 int ret;
5697 void *local_buf = NULL;
5698 u16 buflen = 0;
5699
5700 DHD_OS_WAKE_LOCK(&dhd->pub);
5701 DHD_PERIM_LOCK(&dhd->pub);
5702
5703 /* Interface up check for built-in type */
5704 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
5705 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
5706 ret = BCME_NOTUP;
5707 goto exit;
5708 }
5709
5710 /* send to dongle only if we are not waiting for reload already */
5711 if (dhd->pub.hang_was_sent) {
5712 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
5713 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
5714 ret = BCME_DONGLE_DOWN;
5715 goto exit;
5716 }
5717
5718 ifidx = dhd_net2idx(dhd, net);
5719 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
5720
5721 if (ifidx == DHD_BAD_IF) {
5722 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
5723 ret = -1;
5724 goto exit;
5725 }
5726
5727#if defined(WL_WIRELESS_EXT)
5728 /* linux wireless extensions */
5729 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
5730 /* may recurse, do NOT lock */
5731 ret = wl_iw_ioctl(net, ifr, cmd);
5732 goto exit;
5733 }
5734#endif /* defined(WL_WIRELESS_EXT) */
5735
5736#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5737 if (cmd == SIOCETHTOOL) {
5738 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
5739 goto exit;
5740 }
5741#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5742
5743 if (cmd == SIOCDEVPRIVATE+1) {
5744 ret = wl_android_priv_cmd(net, ifr, cmd);
5745 dhd_check_hang(net, &dhd->pub, ret);
5746 goto exit;
5747 }
5748
5749 if (cmd != SIOCDEVPRIVATE) {
5750 ret = -EOPNOTSUPP;
5751 goto exit;
5752 }
5753
5754 memset(&ioc, 0, sizeof(ioc));
5755
5756#ifdef CONFIG_COMPAT
5757#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
5758 if (in_compat_syscall())
5759#else
5760 if (is_compat_task())
5761#endif
5762 {
5763 compat_wl_ioctl_t compat_ioc;
5764 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
5765 ret = BCME_BADADDR;
5766 goto done;
5767 }
5768 ioc.cmd = compat_ioc.cmd;
10e68999 5769 ioc.buf = (uint64 *)compat_ioc.buf;
ef6a5fee
RC
5770 ioc.len = compat_ioc.len;
5771 ioc.set = compat_ioc.set;
5772 ioc.used = compat_ioc.used;
5773 ioc.needed = compat_ioc.needed;
5774 /* To differentiate between wl and dhd read 4 more byes */
5775 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
5776 sizeof(uint)) != 0)) {
5777 ret = BCME_BADADDR;
5778 goto done;
5779 }
5780 } else
5781#endif /* CONFIG_COMPAT */
5782 {
5783 /* Copy the ioc control structure part of ioctl request */
5784 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5785 ret = BCME_BADADDR;
5786 goto done;
5787 }
5788
5789 /* To differentiate between wl and dhd read 4 more byes */
5790 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5791 sizeof(uint)) != 0)) {
5792 ret = BCME_BADADDR;
5793 goto done;
5794 }
5795 }
5796
5797 if (!capable(CAP_NET_ADMIN)) {
5798 ret = BCME_EPERM;
5799 goto done;
5800 }
5801
5802 if (ioc.len > 0) {
5803 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
5804 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
5805 ret = BCME_NOMEM;
5806 goto done;
5807 }
5808
5809 DHD_PERIM_UNLOCK(&dhd->pub);
5810 if (copy_from_user(local_buf, ioc.buf, buflen)) {
5811 DHD_PERIM_LOCK(&dhd->pub);
5812 ret = BCME_BADADDR;
5813 goto done;
5814 }
5815 DHD_PERIM_LOCK(&dhd->pub);
5816
5817 *(char *)(local_buf + buflen) = '\0';
5818 }
5819
5820 ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
5821
5822 if (!ret && buflen && local_buf && ioc.buf) {
5823 DHD_PERIM_UNLOCK(&dhd->pub);
5824 if (copy_to_user(ioc.buf, local_buf, buflen))
5825 ret = -EFAULT;
5826 DHD_PERIM_LOCK(&dhd->pub);
5827 }
5828
5829done:
5830 if (local_buf)
5831 MFREE(dhd->pub.osh, local_buf, buflen+1);
5832
5833exit:
5834 DHD_PERIM_UNLOCK(&dhd->pub);
5835 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5836
5837 return OSL_ERROR(ret);
5838}
5839
5840
5841#ifdef FIX_CPU_MIN_CLOCK
5842static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
5843{
5844 if (dhd) {
5845#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5846 mutex_init(&dhd->cpufreq_fix);
5847#endif
5848 dhd->cpufreq_fix_status = FALSE;
5849 }
5850 return 0;
5851}
5852
5853static void dhd_fix_cpu_freq(dhd_info_t *dhd)
5854{
5855#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5856 mutex_lock(&dhd->cpufreq_fix);
5857#endif
5858 if (dhd && !dhd->cpufreq_fix_status) {
5859 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
5860#ifdef FIX_BUS_MIN_CLOCK
5861 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
5862#endif /* FIX_BUS_MIN_CLOCK */
5863 DHD_ERROR(("pm_qos_add_requests called\n"));
5864
5865 dhd->cpufreq_fix_status = TRUE;
5866 }
5867#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5868 mutex_unlock(&dhd->cpufreq_fix);
5869#endif
5870}
5871
5872static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
5873{
5874#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5875 mutex_lock(&dhd ->cpufreq_fix);
5876#endif
5877 if (dhd && dhd->cpufreq_fix_status != TRUE) {
5878#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5879 mutex_unlock(&dhd->cpufreq_fix);
5880#endif
5881 return;
5882 }
5883
5884 pm_qos_remove_request(&dhd->dhd_cpu_qos);
5885#ifdef FIX_BUS_MIN_CLOCK
5886 pm_qos_remove_request(&dhd->dhd_bus_qos);
5887#endif /* FIX_BUS_MIN_CLOCK */
5888 DHD_ERROR(("pm_qos_add_requests called\n"));
5889
5890 dhd->cpufreq_fix_status = FALSE;
5891#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5892 mutex_unlock(&dhd->cpufreq_fix);
5893#endif
5894}
5895#endif /* FIX_CPU_MIN_CLOCK */
5896
5897#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
5898int dhd_deepsleep(dhd_info_t *dhd, int flag)
5899{
5900 char iovbuf[20];
5901 uint powervar = 0;
5902 dhd_pub_t *dhdp;
5903 int cnt = 0;
5904 int ret = 0;
5905
5906 dhdp = &dhd->pub;
5907
5908 switch (flag) {
5909 case 1 : /* Deepsleep on */
5910 DHD_ERROR(("dhd_deepsleep: ON\n"));
5911 /* give some time to sysioc_work before deepsleep */
5912 OSL_SLEEP(200);
5913#ifdef PKT_FILTER_SUPPORT
5914 /* disable pkt filter */
5915 dhd_enable_packet_filter(0, dhdp);
5916#endif /* PKT_FILTER_SUPPORT */
5917 /* Disable MPC */
5918 powervar = 0;
5919 memset(iovbuf, 0, sizeof(iovbuf));
5920 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5921 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5922
5923 /* Enable Deepsleep */
5924 powervar = 1;
5925 memset(iovbuf, 0, sizeof(iovbuf));
5926 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5927 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5928 break;
5929
5930 case 0: /* Deepsleep Off */
5931 DHD_ERROR(("dhd_deepsleep: OFF\n"));
5932
5933 /* Disable Deepsleep */
5934 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
5935 powervar = 0;
5936 memset(iovbuf, 0, sizeof(iovbuf));
5937 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5938 iovbuf, sizeof(iovbuf));
5939 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
5940 sizeof(iovbuf), TRUE, 0);
5941
5942 memset(iovbuf, 0, sizeof(iovbuf));
5943 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5944 iovbuf, sizeof(iovbuf));
5945 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
5946 sizeof(iovbuf), FALSE, 0)) < 0) {
5947 DHD_ERROR(("the error of dhd deepsleep status"
5948 " ret value :%d\n", ret));
5949 } else {
5950 if (!(*(int *)iovbuf)) {
5951 DHD_ERROR(("deepsleep mode is 0,"
5952 " count: %d\n", cnt));
5953 break;
5954 }
5955 }
5956 }
5957
5958 /* Enable MPC */
5959 powervar = 1;
5960 memset(iovbuf, 0, sizeof(iovbuf));
5961 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5962 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5963 break;
5964 }
5965
5966 return 0;
5967}
5968
5969static int
5970dhd_stop(struct net_device *net)
5971{
5972 int ifidx = 0;
5973 dhd_info_t *dhd = DHD_DEV_INFO(net);
5974 DHD_OS_WAKE_LOCK(&dhd->pub);
5975 DHD_PERIM_LOCK(&dhd->pub);
5976 printf("%s: Enter %p\n", __FUNCTION__, net);
5977 dhd->pub.rxcnt_timeout = 0;
5978 dhd->pub.txcnt_timeout = 0;
5979
5980#ifdef BCMPCIE
5981 dhd->pub.d3ackcnt_timeout = 0;
5982#endif /* BCMPCIE */
5983
5984 if (dhd->pub.up == 0) {
5985 goto exit;
5986 }
5987
5988 dhd_if_flush_sta(DHD_DEV_IFP(net));
5989
5990 /* Disable Runtime PM before interface down */
5991 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5992
5993#ifdef FIX_CPU_MIN_CLOCK
5994 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
5995 dhd_rollback_cpu_freq(dhd);
5996#endif /* FIX_CPU_MIN_CLOCK */
5997
5998 ifidx = dhd_net2idx(dhd, net);
5999 BCM_REFERENCE(ifidx);
6000
6001 /* Set state and stop OS transmissions */
6002 netif_stop_queue(net);
6003 dhd->pub.up = 0;
6004
6005#ifdef WL_CFG80211
6006 if (ifidx == 0) {
6007 dhd_if_t *ifp;
6008 wl_cfg80211_down(NULL);
6009
6010 ifp = dhd->iflist[0];
6011 ASSERT(ifp && ifp->net);
6012 /*
6013 * For CFG80211: Clean up all the left over virtual interfaces
6014 * when the primary Interface is brought down. [ifconfig wlan0 down]
6015 */
6016 if (!dhd_download_fw_on_driverload) {
6017 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
6018 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
6019 int i;
6020
6021#ifdef WL_CFG80211_P2P_DEV_IF
6022 wl_cfg80211_del_p2p_wdev();
6023#endif /* WL_CFG80211_P2P_DEV_IF */
6024
6025 dhd_net_if_lock_local(dhd);
6026 for (i = 1; i < DHD_MAX_IFS; i++)
6027 dhd_remove_if(&dhd->pub, i, FALSE);
6028
6029 if (ifp && ifp->net) {
6030 dhd_if_del_sta_list(ifp);
6031 }
6032
6033#ifdef ARP_OFFLOAD_SUPPORT
6034 if (dhd_inetaddr_notifier_registered) {
6035 dhd_inetaddr_notifier_registered = FALSE;
6036 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6037 }
6038#endif /* ARP_OFFLOAD_SUPPORT */
6039#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6040 if (dhd_inet6addr_notifier_registered) {
6041 dhd_inet6addr_notifier_registered = FALSE;
6042 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6043 }
6044#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6045 dhd_net_if_unlock_local(dhd);
6046 }
08dfb6c4
RC
6047#if 0
6048 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
ef6a5fee 6049 cancel_work_sync(dhd->dhd_deferred_wq);
08dfb6c4 6050#endif
ef6a5fee
RC
6051#if defined(DHD_LB) && defined(DHD_LB_RXP)
6052 __skb_queue_purge(&dhd->rx_pend_queue);
6053#endif /* DHD_LB && DHD_LB_RXP */
6054 }
6055
6056#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6057 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6058#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6059#if defined(DHD_LB) && defined(DHD_LB_RXP)
6060 if (ifp->net == dhd->rx_napi_netdev) {
6061 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6062 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6063 skb_queue_purge(&dhd->rx_napi_queue);
6064 napi_disable(&dhd->rx_napi_struct);
6065 netif_napi_del(&dhd->rx_napi_struct);
6066 dhd->rx_napi_netdev = NULL;
6067 }
6068#endif /* DHD_LB && DHD_LB_RXP */
6069
6070 }
6071#endif /* WL_CFG80211 */
6072
6073#ifdef PROP_TXSTATUS
6074 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6075#endif
6076 /* Stop the protocol module */
6077 dhd_prot_stop(&dhd->pub);
6078
6079 OLD_MOD_DEC_USE_COUNT;
6080exit:
08dfb6c4 6081 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
ef6a5fee 6082 wl_android_wifi_off(net, TRUE);
08dfb6c4
RC
6083#ifdef WL_EXT_IAPSTA
6084 wl_android_ext_dettach_netdev();
6085#endif
6086 } else {
ef6a5fee
RC
6087 if (dhd->pub.conf->deepsleep)
6088 dhd_deepsleep(dhd, 1);
6089 }
6090 dhd->pub.hang_was_sent = 0;
6091
6092 /* Clear country spec for for built-in type driver */
6093 if (!dhd_download_fw_on_driverload) {
6094 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6095 dhd->pub.dhd_cspec.rev = 0;
6096 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6097 }
6098
6099#ifdef BCMDBGFS
6100 dhd_dbg_remove();
6101#endif
6102
6103 DHD_PERIM_UNLOCK(&dhd->pub);
6104 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6105
6106 /* Destroy wakelock */
6107 if (!dhd_download_fw_on_driverload &&
6108 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6109 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6110 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6111 }
6112 printf("%s: Exit\n", __FUNCTION__);
6113
6114 return 0;
6115}
6116
6117#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
6118extern bool g_first_broadcast_scan;
6119#endif
6120
6121#ifdef WL11U
6122static int dhd_interworking_enable(dhd_pub_t *dhd)
6123{
6124 char iovbuf[WLC_IOCTL_SMLEN];
6125 uint32 enable = true;
6126 int ret = BCME_OK;
6127
6128 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
6129 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6130 if (ret < 0) {
6131 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6132 }
6133
6134 if (ret == BCME_OK) {
6135 /* basic capabilities for HS20 REL2 */
6136 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
6137 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
6138 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6139 if (ret < 0) {
6140 DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
6141 }
6142 }
6143
6144 return ret;
6145}
6146#endif /* WL11u */
6147
6148static int
6149dhd_open(struct net_device *net)
6150{
6151 dhd_info_t *dhd = DHD_DEV_INFO(net);
6152#ifdef TOE
6153 uint32 toe_ol;
6154#endif
6155#ifdef BCM_FD_AGGR
6156 char iovbuf[WLC_IOCTL_SMLEN];
6157 dbus_config_t config;
6158 uint32 agglimit = 0;
6159 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
6160#endif /* BCM_FD_AGGR */
6161 int ifidx;
6162 int32 ret = 0;
08dfb6c4
RC
6163#if defined(OOB_INTR_ONLY)
6164 uint32 bus_type = -1;
6165 uint32 bus_num = -1;
6166 uint32 slot_num = -1;
6167 wifi_adapter_info_t *adapter = NULL;
6168#endif
ef6a5fee
RC
6169
6170 if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
6171 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6172 return -1;
6173 }
6174
6175 printf("%s: Enter %p\n", __FUNCTION__, net);
6176#if defined(MULTIPLE_SUPPLICANT)
6177#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6178 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6179 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
6180 }
6181 mutex_lock(&_dhd_sdio_mutex_lock_);
6182#endif
6183#endif /* MULTIPLE_SUPPLICANT */
6184 /* Init wakelock */
6185 if (!dhd_download_fw_on_driverload &&
6186 !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6187 DHD_OS_WAKE_LOCK_INIT(dhd);
6188 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6189 }
6190
6191#ifdef PREVENT_REOPEN_DURING_HANG
6192 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6193 if (dhd->pub.hang_was_sent == 1) {
6194 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6195 /* Force to bring down WLAN interface in case dhd_stop() is not called
6196 * from the upper layer when HANG event is triggered.
6197 */
6198 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6199 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6200 dhd_stop(net);
6201 } else {
6202 return -1;
6203 }
6204 }
6205#endif /* PREVENT_REOPEN_DURING_HANG */
6206
6207
6208 DHD_OS_WAKE_LOCK(&dhd->pub);
6209 DHD_PERIM_LOCK(&dhd->pub);
6210 dhd->pub.dongle_trap_occured = 0;
6211 dhd->pub.hang_was_sent = 0;
6212 dhd->pub.hang_reason = 0;
6213#ifdef DHD_LOSSLESS_ROAMING
6214 dhd->pub.dequeue_prec_map = ALLPRIO;
6215#endif
6216#if 0
6217 /*
6218 * Force start if ifconfig_up gets called before START command
6219 * We keep WEXT's wl_control_wl_start to provide backward compatibility
6220 * This should be removed in the future
6221 */
6222 ret = wl_control_wl_start(net);
6223 if (ret != 0) {
6224 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6225 ret = -1;
6226 goto exit;
6227 }
6228#endif
6229
6230 ifidx = dhd_net2idx(dhd, net);
6231 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6232
6233 if (ifidx < 0) {
6234 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6235 ret = -1;
6236 goto exit;
6237 }
6238
6239 if (!dhd->iflist[ifidx]) {
6240 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6241 ret = -1;
6242 goto exit;
6243 }
6244
6245 if (ifidx == 0) {
6246 atomic_set(&dhd->pend_8021x_cnt, 0);
6247 if (!dhd_download_fw_on_driverload) {
6248 DHD_ERROR(("\n%s\n", dhd_version));
6249#if defined(USE_INITIAL_SHORT_DWELL_TIME)
6250 g_first_broadcast_scan = TRUE;
6251#endif
6252 ret = wl_android_wifi_on(net);
6253 if (ret != 0) {
6254 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
6255 __FUNCTION__, ret));
6256 ret = -1;
6257 goto exit;
6258 }
6259 }
6260#ifdef FIX_CPU_MIN_CLOCK
6261 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
6262 dhd_init_cpufreq_fix(dhd);
6263 dhd_fix_cpu_freq(dhd);
6264 }
6265#endif /* FIX_CPU_MIN_CLOCK */
08dfb6c4
RC
6266#if defined(OOB_INTR_ONLY)
6267 if (dhd->pub.conf->dpc_cpucore >= 0) {
6268 dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
6269 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
6270 if (adapter) {
6271 printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
6272 irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
6273 }
6274 }
6275#endif
ef6a5fee
RC
6276
6277 if (dhd->pub.busstate != DHD_BUS_DATA) {
6278
6279 /* try to bring up bus */
6280 DHD_PERIM_UNLOCK(&dhd->pub);
6281 ret = dhd_bus_start(&dhd->pub);
6282 DHD_PERIM_LOCK(&dhd->pub);
6283 if (ret) {
6284 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6285 ret = -1;
6286 goto exit;
6287 }
6288
6289 }
6290 if (dhd_download_fw_on_driverload) {
6291 if (dhd->pub.conf->deepsleep)
6292 dhd_deepsleep(dhd, 0);
6293 }
6294
6295#ifdef BCM_FD_AGGR
6296 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
6297
6298
6299 memset(iovbuf, 0, sizeof(iovbuf));
6300 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
6301 iovbuf, sizeof(iovbuf));
6302
6303 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
6304 agglimit = *(uint32 *)iovbuf;
6305 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
6306 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
6307 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
6308 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
6309 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
6310 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
6311 }
6312 } else {
6313 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
6314 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
6315 }
6316
6317 /* Set aggregation for TX */
6318 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
6319 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
6320
6321 /* Set aggregation for RX */
6322 memset(iovbuf, 0, sizeof(iovbuf));
6323 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
6324 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
6325 dhd->pub.info->fdaggr = 0;
6326 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
6327 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
6328 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
6329 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
6330 } else {
6331 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
6332 }
6333#endif /* BCM_FD_AGGR */
6334
6335 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
6336 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6337
6338#ifdef TOE
6339 /* Get current TOE mode from dongle */
6340 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
6341 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
6342 } else {
6343 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
6344 }
6345#endif /* TOE */
6346
6347#if defined(WL_CFG80211)
6348 if (unlikely(wl_cfg80211_up(NULL))) {
6349 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
6350 ret = -1;
6351 goto exit;
6352 }
6353 if (!dhd_download_fw_on_driverload) {
6354#ifdef ARP_OFFLOAD_SUPPORT
6355 dhd->pend_ipaddr = 0;
6356 if (!dhd_inetaddr_notifier_registered) {
6357 dhd_inetaddr_notifier_registered = TRUE;
6358 register_inetaddr_notifier(&dhd_inetaddr_notifier);
6359 }
6360#endif /* ARP_OFFLOAD_SUPPORT */
6361#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6362 if (!dhd_inet6addr_notifier_registered) {
6363 dhd_inet6addr_notifier_registered = TRUE;
6364 register_inet6addr_notifier(&dhd_inet6addr_notifier);
6365 }
6366#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6367#ifdef DHD_LB
6368 DHD_LB_STATS_INIT(&dhd->pub);
6369#ifdef DHD_LB_RXP
6370 __skb_queue_head_init(&dhd->rx_pend_queue);
6371#endif /* DHD_LB_RXP */
6372#endif /* DHD_LB */
6373 }
6374
6375#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6376#if defined(SET_RPS_CPUS)
6377 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6378#else
6379 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
6380#endif
6381#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6382#if defined(DHD_LB) && defined(DHD_LB_RXP)
6383 if (dhd->rx_napi_netdev == NULL) {
6384 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
6385 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
6386 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
6387 dhd_napi_poll, dhd_napi_weight);
6388 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
6389 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6390 napi_enable(&dhd->rx_napi_struct);
6391 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
6392 skb_queue_head_init(&dhd->rx_napi_queue);
6393 }
6394#endif /* DHD_LB && DHD_LB_RXP */
6395#if defined(NUM_SCB_MAX_PROBE)
6396 dhd_set_scb_probe(&dhd->pub);
6397#endif /* NUM_SCB_MAX_PROBE */
6398#endif /* WL_CFG80211 */
6399 }
6400
6401 /* Allow transmit calls */
6402 netif_start_queue(net);
6403 dhd->pub.up = 1;
6404
6405 OLD_MOD_INC_USE_COUNT;
6406
6407#ifdef BCMDBGFS
6408 dhd_dbg_init(&dhd->pub);
6409#endif
6410
6411exit:
6412 if (ret) {
6413 dhd_stop(net);
6414 }
6415
6416 DHD_PERIM_UNLOCK(&dhd->pub);
6417 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6418
6419#if defined(MULTIPLE_SUPPLICANT)
6420#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6421 mutex_unlock(&_dhd_sdio_mutex_lock_);
6422#endif
6423#endif /* MULTIPLE_SUPPLICANT */
6424
6425 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
6426 return ret;
6427}
6428
6429int dhd_do_driver_init(struct net_device *net)
6430{
6431 dhd_info_t *dhd = NULL;
6432
6433 if (!net) {
6434 DHD_ERROR(("Primary Interface not initialized \n"));
6435 return -EINVAL;
6436 }
6437
6438#ifdef MULTIPLE_SUPPLICANT
6439#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
6440 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6441 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
6442 return 0;
6443 }
6444#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
6445#endif /* MULTIPLE_SUPPLICANT */
6446
6447 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
6448 dhd = DHD_DEV_INFO(net);
6449
6450 /* If driver is already initialized, do nothing
6451 */
6452 if (dhd->pub.busstate == DHD_BUS_DATA) {
6453 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
6454 return 0;
6455 }
6456
6457 if (dhd_open(net) < 0) {
6458 DHD_ERROR(("Driver Init Failed \n"));
6459 return -1;
6460 }
6461
6462 return 0;
6463}
6464
6465int
6466dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6467{
6468
6469#ifdef WL_CFG80211
6470 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6471 return BCME_OK;
6472#endif
6473
6474 /* handle IF event caused by wl commands, SoftAP, WEXT and
6475 * anything else. This has to be done asynchronously otherwise
6476 * DPC will be blocked (and iovars will timeout as DPC has no chance
6477 * to read the response back)
6478 */
6479 if (ifevent->ifidx > 0) {
6480 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6481 if (if_event == NULL) {
6482 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
6483 MALLOCED(dhdinfo->pub.osh)));
6484 return BCME_NOMEM;
6485 }
6486
6487 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6488 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6489 strncpy(if_event->name, name, IFNAMSIZ);
6490 if_event->name[IFNAMSIZ - 1] = '\0';
6491 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
6492 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
6493 }
6494
6495 return BCME_OK;
6496}
6497
6498int
6499dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6500{
6501 dhd_if_event_t *if_event;
6502
6503#ifdef WL_CFG80211
6504 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6505 return BCME_OK;
6506#endif /* WL_CFG80211 */
6507
6508 /* handle IF event caused by wl commands, SoftAP, WEXT and
6509 * anything else
6510 */
6511 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6512 if (if_event == NULL) {
6513 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
6514 MALLOCED(dhdinfo->pub.osh)));
6515 return BCME_NOMEM;
6516 }
6517 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6518 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6519 strncpy(if_event->name, name, IFNAMSIZ);
6520 if_event->name[IFNAMSIZ - 1] = '\0';
6521 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
6522 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
6523
6524 return BCME_OK;
6525}
6526
6527/* unregister and free the existing net_device interface (if any) in iflist and
6528 * allocate a new one. the slot is reused. this function does NOT register the
6529 * new interface to linux kernel. dhd_register_if does the job
6530 */
6531struct net_device*
6532dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
6533 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
6534{
6535 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6536 dhd_if_t *ifp;
6537
6538 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
6539 ifp = dhdinfo->iflist[ifidx];
6540
6541 if (ifp != NULL) {
6542 if (ifp->net != NULL) {
6543 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
6544
6545 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
6546
6547 /* in unregister_netdev case, the interface gets freed by net->destructor
6548 * (which is set to free_netdev)
6549 */
6550 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6551 free_netdev(ifp->net);
6552 } else {
6553 netif_stop_queue(ifp->net);
6554 if (need_rtnl_lock)
6555 unregister_netdev(ifp->net);
6556 else
6557 unregister_netdevice(ifp->net);
6558 }
6559 ifp->net = NULL;
6560 }
6561 } else {
6562 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
6563 if (ifp == NULL) {
6564 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
6565 return NULL;
6566 }
6567 }
6568
6569 memset(ifp, 0, sizeof(dhd_if_t));
6570 ifp->info = dhdinfo;
6571 ifp->idx = ifidx;
6572 ifp->bssidx = bssidx;
6573 if (mac != NULL)
6574 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
6575
6576 /* Allocate etherdev, including space for private structure */
6577 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
6578 if (ifp->net == NULL) {
6579 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
6580 goto fail;
6581 }
6582
6583 /* Setup the dhd interface's netdevice private structure. */
6584 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
6585
6586 if (name && name[0]) {
6587 strncpy(ifp->net->name, name, IFNAMSIZ);
6588 ifp->net->name[IFNAMSIZ - 1] = '\0';
6589 }
6590
6591#ifdef WL_CFG80211
6592 if (ifidx == 0)
6593 ifp->net->destructor = free_netdev;
6594 else
6595 ifp->net->destructor = dhd_netdev_free;
6596#else
6597 ifp->net->destructor = free_netdev;
6598#endif /* WL_CFG80211 */
6599 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
6600 ifp->name[IFNAMSIZ - 1] = '\0';
6601 dhdinfo->iflist[ifidx] = ifp;
6602
6603/* initialize the dongle provided if name */
6604 if (dngl_name)
6605 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
6606 else
6607 strncpy(ifp->dngl_name, name, IFNAMSIZ);
6608
6609#ifdef PCIE_FULL_DONGLE
6610 /* Initialize STA info list */
6611 INIT_LIST_HEAD(&ifp->sta_list);
6612 DHD_IF_STA_LIST_LOCK_INIT(ifp);
6613#endif /* PCIE_FULL_DONGLE */
6614
6615#ifdef DHD_L2_FILTER
6616 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
6617 ifp->parp_allnode = TRUE;
6618#endif
6619 return ifp->net;
6620
6621fail:
6622
6623 if (ifp != NULL) {
6624 if (ifp->net != NULL) {
6625 dhd_dev_priv_clear(ifp->net);
6626 free_netdev(ifp->net);
6627 ifp->net = NULL;
6628 }
6629 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6630 ifp = NULL;
6631 }
6632
6633 dhdinfo->iflist[ifidx] = NULL;
6634 return NULL;
6635}
6636
6637/* unregister and free the the net_device interface associated with the indexed
6638 * slot, also free the slot memory and set the slot pointer to NULL
6639 */
6640int
6641dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
6642{
6643 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6644 dhd_if_t *ifp;
6645
6646 ifp = dhdinfo->iflist[ifidx];
6647
6648 if (ifp != NULL) {
6649 if (ifp->net != NULL) {
6650 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
6651
6652 /* in unregister_netdev case, the interface gets freed by net->destructor
6653 * (which is set to free_netdev)
6654 */
6655 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6656 free_netdev(ifp->net);
6657 } else {
6658 netif_tx_disable(ifp->net);
6659
6660
6661
6662#if defined(SET_RPS_CPUS)
6663 custom_rps_map_clear(ifp->net->_rx);
6664#endif /* SET_RPS_CPUS */
6665#if defined(SET_RPS_CPUS)
6666#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
6667 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
6668#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
6669#endif
6670 if (need_rtnl_lock)
6671 unregister_netdev(ifp->net);
6672 else
6673 unregister_netdevice(ifp->net);
6674 }
6675 ifp->net = NULL;
6676 dhdinfo->iflist[ifidx] = NULL;
6677 }
6678#ifdef DHD_WMF
6679 dhd_wmf_cleanup(dhdpub, ifidx);
6680#endif /* DHD_WMF */
6681#ifdef DHD_L2_FILTER
6682 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
6683 NULL, FALSE, dhdpub->tickcnt);
6684 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
6685 ifp->phnd_arp_table = NULL;
6686#endif /* DHD_L2_FILTER */
6687
6688 dhd_if_del_sta_list(ifp);
6689
6690 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
08dfb6c4 6691 ifp = NULL;
ef6a5fee
RC
6692 }
6693
6694 return BCME_OK;
6695}
6696
6697#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6698static struct net_device_ops dhd_ops_pri = {
6699 .ndo_open = dhd_open,
6700 .ndo_stop = dhd_stop,
6701 .ndo_get_stats = dhd_get_stats,
6702 .ndo_do_ioctl = dhd_ioctl_entry,
6703 .ndo_start_xmit = dhd_start_xmit,
6704 .ndo_set_mac_address = dhd_set_mac_address,
6705#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6706 .ndo_set_rx_mode = dhd_set_multicast_list,
6707#else
6708 .ndo_set_multicast_list = dhd_set_multicast_list,
6709#endif
6710};
6711
6712static struct net_device_ops dhd_ops_virt = {
6713 .ndo_get_stats = dhd_get_stats,
6714 .ndo_do_ioctl = dhd_ioctl_entry,
6715 .ndo_start_xmit = dhd_start_xmit,
6716 .ndo_set_mac_address = dhd_set_mac_address,
6717#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6718 .ndo_set_rx_mode = dhd_set_multicast_list,
6719#else
6720 .ndo_set_multicast_list = dhd_set_multicast_list,
6721#endif
6722};
6723#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
6724
6725#ifdef DEBUGGER
6726extern void debugger_init(void *bus_handle);
6727#endif
6728
6729
6730#ifdef SHOW_LOGTRACE
6731static char *logstrs_path = "/root/logstrs.bin";
6732static char *st_str_file_path = "/root/rtecdc.bin";
6733static char *map_file_path = "/root/rtecdc.map";
6734static char *rom_st_str_file_path = "/root/roml.bin";
6735static char *rom_map_file_path = "/root/roml.map";
6736
6737#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */
6738#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
6739#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
6740static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
6741static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
6742static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
6743static char *ram_file_str = "rtecdc";
6744static char *rom_file_str = "roml";
6745#define RAMSTART_BIT 0x01
6746#define RDSTART_BIT 0x02
6747#define RDEND_BIT 0x04
6748#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
6749
6750module_param(logstrs_path, charp, S_IRUGO);
6751module_param(st_str_file_path, charp, S_IRUGO);
6752module_param(map_file_path, charp, S_IRUGO);
6753module_param(rom_st_str_file_path, charp, S_IRUGO);
6754module_param(rom_map_file_path, charp, S_IRUGO);
6755
6756static void
6757dhd_init_logstrs_array(dhd_event_log_t *temp)
6758{
6759 struct file *filep = NULL;
6760 struct kstat stat;
6761 mm_segment_t fs;
6762 char *raw_fmts = NULL;
6763 int logstrs_size = 0;
6764
6765 logstr_header_t *hdr = NULL;
6766 uint32 *lognums = NULL;
6767 char *logstrs = NULL;
6768 int ram_index = 0;
6769 char **fmts;
6770 int num_fmts = 0;
6771 uint32 i = 0;
6772 int error = 0;
6773
6774 fs = get_fs();
6775 set_fs(KERNEL_DS);
6776
6777 filep = filp_open(logstrs_path, O_RDONLY, 0);
6778
6779 if (IS_ERR(filep)) {
6780 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
6781 goto fail;
6782 }
6783 error = vfs_stat(logstrs_path, &stat);
6784 if (error) {
6785 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
6786 goto fail;
6787 }
6788 logstrs_size = (int) stat.size;
6789
6790 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6791 if (raw_fmts == NULL) {
6792 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
6793 goto fail;
6794 }
6795 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
6796 DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
6797 goto fail;
6798 }
6799
6800 /* Remember header from the logstrs.bin file */
6801 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6802 sizeof(logstr_header_t));
6803
6804 if (hdr->log_magic == LOGSTRS_MAGIC) {
6805 /*
6806 * logstrs.bin start with header.
6807 */
6808 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
6809 ram_index = (hdr->ram_lognums_offset -
6810 hdr->rom_lognums_offset) / sizeof(uint32);
6811 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6812 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
6813 } else {
6814 /*
6815 * Legacy logstrs.bin format without header.
6816 */
6817 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6818 if (num_fmts == 0) {
6819 /* Legacy ROM/RAM logstrs.bin format:
6820 * - ROM 'lognums' section
6821 * - RAM 'lognums' section
6822 * - ROM 'logstrs' section.
6823 * - RAM 'logstrs' section.
6824 *
6825 * 'lognums' is an array of indexes for the strings in the
6826 * 'logstrs' section. The first uint32 is 0 (index of first
6827 * string in ROM 'logstrs' section).
6828 *
6829 * The 4324b5 is the only ROM that uses this legacy format. Use the
6830 * fixed number of ROM fmtnums to find the start of the RAM
6831 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
6832 * find the ROM 'logstrs' section.
6833 */
6834 #define NUM_4324B5_ROM_FMTS 186
6835 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
6836 ram_index = NUM_4324B5_ROM_FMTS;
6837 lognums = (uint32 *) raw_fmts;
6838 num_fmts = ram_index;
6839 logstrs = (char *) &raw_fmts[num_fmts << 2];
6840 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
6841 num_fmts++;
6842 logstrs = (char *) &raw_fmts[num_fmts << 2];
6843 }
6844 } else {
6845 /* Legacy RAM-only logstrs.bin format:
6846 * - RAM 'lognums' section
6847 * - RAM 'logstrs' section.
6848 *
6849 * 'lognums' is an array of indexes for the strings in the
6850 * 'logstrs' section. The first uint32 is an index to the
6851 * start of 'logstrs'. Therefore, if this index is divided
6852 * by 'sizeof(uint32)' it provides the number of logstr
6853 * entries.
6854 */
6855 ram_index = 0;
6856 lognums = (uint32 *) raw_fmts;
6857 logstrs = (char *) &raw_fmts[num_fmts << 2];
6858 }
6859 }
6860 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
6861 if (fmts == NULL) {
6862 DHD_ERROR(("Failed to allocate fmts memory\n"));
6863 goto fail;
6864 }
6865
6866 for (i = 0; i < num_fmts; i++) {
6867 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6868 * (they are 0-indexed relative to 'rom_logstrs_offset').
6869 *
6870 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6871 * are 0-indexed relative to the start of the logstrs.bin file).
6872 */
6873 if (i == ram_index) {
6874 logstrs = raw_fmts;
6875 }
6876 fmts[i] = &logstrs[lognums[i]];
6877 }
6878 temp->fmts = fmts;
6879 temp->raw_fmts = raw_fmts;
6880 temp->num_fmts = num_fmts;
6881 filp_close(filep, NULL);
6882 set_fs(fs);
6883 return;
6884fail:
6885 if (raw_fmts) {
6886 kfree(raw_fmts);
6887 raw_fmts = NULL;
6888 }
6889 if (!IS_ERR(filep))
6890 filp_close(filep, NULL);
6891 set_fs(fs);
6892 temp->fmts = NULL;
6893 return;
6894}
6895
6896static int
6897dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
6898 uint32 *rodata_end)
6899{
6900 struct file *filep = NULL;
6901 mm_segment_t fs;
6902 char *raw_fmts = NULL;
6903 uint32 read_size = READ_NUM_BYTES;
6904 int error = 0;
6905 char * cptr = NULL;
6906 char c;
6907 uint8 count = 0;
6908
6909 *ramstart = 0;
6910 *rodata_start = 0;
6911 *rodata_end = 0;
6912
6913 if (fname == NULL) {
6914 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
6915 return BCME_ERROR;
6916 }
6917
6918 fs = get_fs();
6919 set_fs(KERNEL_DS);
6920
6921 filep = filp_open(fname, O_RDONLY, 0);
6922 if (IS_ERR(filep)) {
6923 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
6924 goto fail;
6925 }
6926
6927 /* Allocate 1 byte more than read_size to terminate it with NULL */
6928 raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
6929 if (raw_fmts == NULL) {
6930 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6931 goto fail;
6932 }
6933
6934 /* read ram start, rodata_start and rodata_end values from map file */
6935
6936 while (count != ALL_MAP_VAL)
6937 {
6938 error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
6939 if (error < 0) {
6940 DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
6941 map_file_path, error));
6942 goto fail;
6943 }
6944
6945 if (error < read_size) {
6946 /*
6947 * since we reset file pos back to earlier pos by
6948 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6949 * So if ret value is less than read_size, reached EOF don't read further
6950 */
6951 break;
6952 }
6953 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6954 raw_fmts[read_size] = '\0';
6955
6956 /* Get ramstart address */
6957 if ((cptr = strstr(raw_fmts, ramstart_str))) {
6958 cptr = cptr - BYTES_AHEAD_NUM;
6959 sscanf(cptr, "%x %c text_start", ramstart, &c);
6960 count |= RAMSTART_BIT;
6961 }
6962
6963 /* Get ram rodata start address */
6964 if ((cptr = strstr(raw_fmts, rodata_start_str))) {
6965 cptr = cptr - BYTES_AHEAD_NUM;
6966 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6967 count |= RDSTART_BIT;
6968 }
6969
6970 /* Get ram rodata end address */
6971 if ((cptr = strstr(raw_fmts, rodata_end_str))) {
6972 cptr = cptr - BYTES_AHEAD_NUM;
6973 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6974 count |= RDEND_BIT;
6975 }
6976 memset(raw_fmts, 0, read_size);
6977 /*
6978 * go back to predefined NUM of bytes so that we won't miss
6979 * the string and addr even if it comes as splited in next read.
6980 */
6981 filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
6982 }
6983
6984 DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6985 *ramstart, *rodata_start, *rodata_end));
6986
6987 DHD_ERROR(("readmap over \n"));
6988
6989fail:
6990 if (raw_fmts) {
6991 kfree(raw_fmts);
6992 raw_fmts = NULL;
6993 }
6994 if (!IS_ERR(filep))
6995 filp_close(filep, NULL);
6996
6997 set_fs(fs);
6998 if (count == ALL_MAP_VAL) {
6999 return BCME_OK;
7000 }
7001 DHD_ERROR(("readmap error 0X%x \n", count));
7002 return BCME_ERROR;
7003}
7004
7005static void
7006dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
7007{
7008 struct file *filep = NULL;
7009 mm_segment_t fs;
7010 char *raw_fmts = NULL;
7011 uint32 logstrs_size = 0;
7012
7013 int error = 0;
7014 uint32 ramstart = 0;
7015 uint32 rodata_start = 0;
7016 uint32 rodata_end = 0;
7017 uint32 logfilebase = 0;
7018
7019 error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
7020 if (error == BCME_ERROR) {
7021 DHD_ERROR(("readmap Error!! \n"));
7022 /* don't do event log parsing in actual case */
7023 temp->raw_sstr = NULL;
7024 return;
7025 }
7026 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
7027 ramstart, rodata_start, rodata_end));
7028
7029 fs = get_fs();
7030 set_fs(KERNEL_DS);
7031
7032 filep = filp_open(str_file, O_RDONLY, 0);
7033 if (IS_ERR(filep)) {
7034 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
7035 goto fail;
7036 }
7037
7038 /* Full file size is huge. Just read required part */
7039 logstrs_size = rodata_end - rodata_start;
7040
7041 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
7042 if (raw_fmts == NULL) {
7043 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
7044 goto fail;
7045 }
7046
7047 logfilebase = rodata_start - ramstart;
7048
7049 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
7050 if (error < 0) {
7051 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
7052 goto fail;
7053 }
7054
7055 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
7056 if (error != logstrs_size) {
7057 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
7058 goto fail;
7059 }
7060
7061 if (strstr(str_file, ram_file_str) != NULL) {
7062 temp->raw_sstr = raw_fmts;
7063 temp->ramstart = ramstart;
7064 temp->rodata_start = rodata_start;
7065 temp->rodata_end = rodata_end;
7066 } else if (strstr(str_file, rom_file_str) != NULL) {
7067 temp->rom_raw_sstr = raw_fmts;
7068 temp->rom_ramstart = ramstart;
7069 temp->rom_rodata_start = rodata_start;
7070 temp->rom_rodata_end = rodata_end;
7071 }
7072
7073 filp_close(filep, NULL);
7074 set_fs(fs);
7075
7076 return;
7077fail:
7078 if (raw_fmts) {
7079 kfree(raw_fmts);
7080 raw_fmts = NULL;
7081 }
7082 if (!IS_ERR(filep))
7083 filp_close(filep, NULL);
7084 set_fs(fs);
7085 if (strstr(str_file, ram_file_str) != NULL) {
7086 temp->raw_sstr = NULL;
7087 } else if (strstr(str_file, rom_file_str) != NULL) {
7088 temp->rom_raw_sstr = NULL;
7089 }
7090 return;
7091}
7092
7093#endif /* SHOW_LOGTRACE */
7094
7095
7096dhd_pub_t *
7097dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
7098{
7099 dhd_info_t *dhd = NULL;
7100 struct net_device *net = NULL;
7101 char if_name[IFNAMSIZ] = {'\0'};
7102 uint32 bus_type = -1;
7103 uint32 bus_num = -1;
7104 uint32 slot_num = -1;
7105 wifi_adapter_info_t *adapter = NULL;
7106
7107 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
7108 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7109
7110#ifdef STBLINUX
7111 DHD_ERROR(("%s\n", driver_target));
7112#endif /* STBLINUX */
7113 /* will implement get_ids for DBUS later */
7114#if defined(BCMSDIO)
7115 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
7116#endif
7117 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7118
7119 /* Allocate primary dhd_info */
7120 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
7121 if (dhd == NULL) {
7122 dhd = MALLOC(osh, sizeof(dhd_info_t));
7123 if (dhd == NULL) {
7124 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
7125 goto fail;
7126 }
7127 }
7128 memset(dhd, 0, sizeof(dhd_info_t));
7129 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
7130
7131 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
7132
7133 dhd->pub.osh = osh;
7134 dhd->adapter = adapter;
7135
7136#ifdef GET_CUSTOM_MAC_ENABLE
7137 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
7138#endif /* GET_CUSTOM_MAC_ENABLE */
7139#ifdef CUSTOM_FORCE_NODFS_FLAG
7140 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
7141 dhd->pub.force_country_change = TRUE;
7142#endif /* CUSTOM_FORCE_NODFS_FLAG */
7143#ifdef CUSTOM_COUNTRY_CODE
7144 get_customized_country_code(dhd->adapter,
7145 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
7146 dhd->pub.dhd_cflags);
7147#endif /* CUSTOM_COUNTRY_CODE */
7148 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
7149 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
7150
7151 /* Initialize thread based operation and lock */
7152 sema_init(&dhd->sdsem, 1);
7153
7154 /* Link to info module */
7155 dhd->pub.info = dhd;
7156
7157
7158 /* Link to bus module */
7159 dhd->pub.bus = bus;
7160 dhd->pub.hdrlen = bus_hdrlen;
7161
7162 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
7163 * because dhd_detech will check .info is NULL or not.
7164 */
7165 if (dhd_conf_attach(&dhd->pub) != 0) {
7166 DHD_ERROR(("dhd_conf_attach failed\n"));
7167 goto fail;
7168 }
7169 dhd_conf_reset(&dhd->pub);
7170 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
7171 dhd_conf_preinit(&dhd->pub);
7172
7173 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
7174 * This is indeed a hack but we have to make it work properly before we have a better
7175 * solution
7176 */
7177 dhd_update_fw_nv_path(dhd);
ef6a5fee
RC
7178
7179 /* Set network interface name if it was provided as module parameter */
7180 if (iface_name[0]) {
7181 int len;
7182 char ch;
7183 strncpy(if_name, iface_name, IFNAMSIZ);
7184 if_name[IFNAMSIZ - 1] = 0;
7185 len = strlen(if_name);
7186 ch = if_name[len - 1];
7187 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
7188 strcat(if_name, "%d");
7189 }
7190
7191 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
7192 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
7193 if (net == NULL) {
7194 goto fail;
7195 }
7196
7197
7198 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
7199#ifdef DHD_L2_FILTER
7200 /* initialize the l2_filter_cnt */
7201 dhd->pub.l2_filter_cnt = 0;
7202#endif
7203#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7204 net->open = NULL;
7205#else
7206 net->netdev_ops = NULL;
7207#endif
7208
7209 mutex_init(&dhd->dhd_iovar_mutex);
7210 sema_init(&dhd->proto_sem, 1);
7211
7212#ifdef PROP_TXSTATUS
7213 spin_lock_init(&dhd->wlfc_spinlock);
7214
7215 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
7216 dhd->pub.plat_init = dhd_wlfc_plat_init;
7217 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
7218
7219#ifdef DHD_WLFC_THREAD
7220 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
7221 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
7222 if (IS_ERR(dhd->pub.wlfc_thread)) {
7223 DHD_ERROR(("create wlfc thread failed\n"));
7224 goto fail;
7225 } else {
7226 wake_up_process(dhd->pub.wlfc_thread);
7227 }
7228#endif /* DHD_WLFC_THREAD */
7229#endif /* PROP_TXSTATUS */
7230
7231 /* Initialize other structure content */
7232 init_waitqueue_head(&dhd->ioctl_resp_wait);
7233 init_waitqueue_head(&dhd->d3ack_wait);
7234 init_waitqueue_head(&dhd->ctrl_wait);
7235 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
7236 dhd->pub.dhd_bus_busy_state = 0;
7237
7238 /* Initialize the spinlocks */
7239 spin_lock_init(&dhd->sdlock);
7240 spin_lock_init(&dhd->txqlock);
7241 spin_lock_init(&dhd->dhd_lock);
7242 spin_lock_init(&dhd->rxf_lock);
7243#if defined(RXFRAME_THREAD)
7244 dhd->rxthread_enabled = TRUE;
7245#endif /* defined(RXFRAME_THREAD) */
7246
7247#ifdef DHDTCPACK_SUPPRESS
7248 spin_lock_init(&dhd->tcpack_lock);
7249#endif /* DHDTCPACK_SUPPRESS */
7250
7251 /* Initialize Wakelock stuff */
7252 spin_lock_init(&dhd->wakelock_spinlock);
7253 spin_lock_init(&dhd->wakelock_evt_spinlock);
7254 DHD_OS_WAKE_LOCK_INIT(dhd);
7255 dhd->wakelock_wd_counter = 0;
7256#ifdef CONFIG_HAS_WAKELOCK
08dfb6c4
RC
7257 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
7258 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
ef6a5fee
RC
7259 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
7260#endif /* CONFIG_HAS_WAKELOCK */
7261
7262#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7263 mutex_init(&dhd->dhd_net_if_mutex);
7264 mutex_init(&dhd->dhd_suspend_mutex);
7265#endif
7266 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7267
7268 /* Attach and link in the protocol */
7269 if (dhd_prot_attach(&dhd->pub) != 0) {
7270 DHD_ERROR(("dhd_prot_attach failed\n"));
7271 goto fail;
7272 }
7273 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
7274
7275#ifdef WL_CFG80211
7276 /* Attach and link in the cfg80211 */
7277 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
7278 DHD_ERROR(("wl_cfg80211_attach failed\n"));
7279 goto fail;
7280 }
7281
7282 dhd_monitor_init(&dhd->pub);
7283 dhd_state |= DHD_ATTACH_STATE_CFG80211;
7284#endif
7285#ifdef DHD_LOG_DUMP
7286 dhd_log_dump_init(&dhd->pub);
7287#endif /* DHD_LOG_DUMP */
7288#if defined(WL_WIRELESS_EXT)
7289 /* Attach and link in the iw */
7290 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7291 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
7292 DHD_ERROR(("wl_iw_attach failed\n"));
7293 goto fail;
7294 }
7295 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
7296 }
08dfb6c4
RC
7297#ifdef WL_ESCAN
7298 wl_escan_attach(net, (void *)&dhd->pub);
424b00bf 7299#endif /* WL_ESCAN */
ef6a5fee
RC
7300#endif /* defined(WL_WIRELESS_EXT) */
7301
7302#ifdef SHOW_LOGTRACE
7303 dhd_init_logstrs_array(&dhd->event_data);
7304 dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
7305 dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
7306#endif /* SHOW_LOGTRACE */
7307
7308 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
7309 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
7310 goto fail;
7311 }
7312
7313
7314
7315 /* Set up the watchdog timer */
7316 init_timer(&dhd->timer);
7317 dhd->timer.data = (ulong)dhd;
7318 dhd->timer.function = dhd_watchdog;
7319 dhd->default_wd_interval = dhd_watchdog_ms;
7320
7321 if (dhd_watchdog_prio >= 0) {
7322 /* Initialize watchdog thread */
7323 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
7324 if (dhd->thr_wdt_ctl.thr_pid < 0) {
7325 goto fail;
7326 }
7327
7328 } else {
7329 dhd->thr_wdt_ctl.thr_pid = -1;
7330 }
7331
7332#ifdef DHD_PCIE_RUNTIMEPM
7333 /* Setup up the runtime PM Idlecount timer */
7334 init_timer(&dhd->rpm_timer);
7335 dhd->rpm_timer.data = (ulong)dhd;
7336 dhd->rpm_timer.function = dhd_runtimepm;
7337 dhd->rpm_timer_valid = FALSE;
7338
7339 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
7340 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
7341 if (dhd->thr_rpm_ctl.thr_pid < 0) {
7342 goto fail;
7343 }
7344#endif /* DHD_PCIE_RUNTIMEPM */
7345
7346#ifdef DEBUGGER
7347 debugger_init((void *) bus);
7348#endif
7349
7350 /* Set up the bottom half handler */
7351 if (dhd_dpc_prio >= 0) {
7352 /* Initialize DPC thread */
7353 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
7354 if (dhd->thr_dpc_ctl.thr_pid < 0) {
7355 goto fail;
7356 }
7357 } else {
7358 /* use tasklet for dpc */
7359 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
7360 dhd->thr_dpc_ctl.thr_pid = -1;
7361 }
7362
7363 if (dhd->rxthread_enabled) {
7364 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
7365 /* Initialize RXF thread */
7366 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
7367 if (dhd->thr_rxf_ctl.thr_pid < 0) {
7368 goto fail;
7369 }
7370 }
7371
7372 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
7373
7374#if defined(CONFIG_PM_SLEEP)
7375 if (!dhd_pm_notifier_registered) {
7376 dhd_pm_notifier_registered = TRUE;
7377 dhd->pm_notifier.notifier_call = dhd_pm_callback;
7378 dhd->pm_notifier.priority = 10;
7379 register_pm_notifier(&dhd->pm_notifier);
7380 }
7381
7382#endif /* CONFIG_PM_SLEEP */
7383
7384#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7385 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
7386 dhd->early_suspend.suspend = dhd_early_suspend;
7387 dhd->early_suspend.resume = dhd_late_resume;
7388 register_early_suspend(&dhd->early_suspend);
7389 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
7390#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7391
7392#ifdef ARP_OFFLOAD_SUPPORT
7393 dhd->pend_ipaddr = 0;
7394 if (!dhd_inetaddr_notifier_registered) {
7395 dhd_inetaddr_notifier_registered = TRUE;
7396 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7397 }
7398#endif /* ARP_OFFLOAD_SUPPORT */
7399
7400#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7401 if (!dhd_inet6addr_notifier_registered) {
7402 dhd_inet6addr_notifier_registered = TRUE;
7403 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7404 }
7405#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7406 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
7407#ifdef DEBUG_CPU_FREQ
7408 dhd->new_freq = alloc_percpu(int);
7409 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
7410 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7411#endif
7412#ifdef DHDTCPACK_SUPPRESS
7413#ifdef BCMSDIO
7414 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
7415#elif defined(BCMPCIE)
7416 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
7417#else
7418 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7419#endif /* BCMSDIO */
7420#endif /* DHDTCPACK_SUPPRESS */
7421
7422#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7423#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7424
7425 dhd_state |= DHD_ATTACH_STATE_DONE;
7426 dhd->dhd_state = dhd_state;
7427
7428 dhd_found++;
7429#ifdef DHD_DEBUG_PAGEALLOC
7430 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
7431#endif /* DHD_DEBUG_PAGEALLOC */
7432
7433#if defined(DHD_LB)
7434 DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
7435
7436 dhd_lb_set_default_cpus(dhd);
7437
7438 /* Initialize the CPU Masks */
7439 if (dhd_cpumasks_init(dhd) == 0) {
7440
7441 /* Now we have the current CPU maps, run through candidacy */
7442 dhd_select_cpu_candidacy(dhd);
7443
7444 /*
7445 * If we are able to initialize CPU masks, lets register to the
7446 * CPU Hotplug framework to change the CPU for each job dynamically
7447 * using candidacy algorithm.
7448 */
7449 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
7450 register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
7451 } else {
7452 /*
7453 * We are unable to initialize CPU masks, so candidacy algorithm
7454 * won't run, but still Load Balancing will be honoured based
7455 * on the CPUs allocated for a given job statically during init
7456 */
7457 dhd->cpu_notifier.notifier_call = NULL;
7458 DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
7459 __FUNCTION__));
7460 }
7461
7462
7463 DHD_LB_STATS_INIT(&dhd->pub);
7464
7465 /* Initialize the Load Balancing Tasklets and Napi object */
7466#if defined(DHD_LB_TXC)
7467 tasklet_init(&dhd->tx_compl_tasklet,
7468 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
7469 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
7470 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
7471#endif /* DHD_LB_TXC */
7472
7473#if defined(DHD_LB_RXC)
7474 tasklet_init(&dhd->rx_compl_tasklet,
7475 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
7476 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
7477 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
7478#endif /* DHD_LB_RXC */
7479
7480#if defined(DHD_LB_RXP)
7481 __skb_queue_head_init(&dhd->rx_pend_queue);
7482 skb_queue_head_init(&dhd->rx_napi_queue);
7483
7484 /* Initialize the work that dispatches NAPI job to a given core */
7485 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
7486 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
7487#endif /* DHD_LB_RXP */
7488
7489#endif /* DHD_LB */
7490
7491 INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
7492
7493 (void)dhd_sysfs_init(dhd);
7494
7495 return &dhd->pub;
7496
7497fail:
7498 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
7499 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
7500 __FUNCTION__, dhd_state, &dhd->pub));
7501 dhd->dhd_state = dhd_state;
7502 dhd_detach(&dhd->pub);
7503 dhd_free(&dhd->pub);
7504 }
7505
7506 return NULL;
7507}
7508
7509#include <linux/delay.h>
7510
7511void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
7512{
7513 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7514
7515 schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
7516}
7517
7518int dhd_get_fw_mode(dhd_info_t *dhdinfo)
7519{
7520 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
7521 return DHD_FLAG_HOSTAP_MODE;
7522 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
7523 return DHD_FLAG_P2P_MODE;
7524 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
7525 return DHD_FLAG_IBSS_MODE;
7526 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
7527 return DHD_FLAG_MFG_MODE;
7528
7529 return DHD_FLAG_STA_MODE;
7530}
7531
7532bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
7533{
7534 int fw_len;
7535 int nv_len;
08dfb6c4 7536 int clm_len;
ef6a5fee
RC
7537 int conf_len;
7538 const char *fw = NULL;
7539 const char *nv = NULL;
08dfb6c4 7540 const char *clm = NULL;
ef6a5fee
RC
7541 const char *conf = NULL;
7542 wifi_adapter_info_t *adapter = dhdinfo->adapter;
7543
7544
7545 /* Update firmware and nvram path. The path may be from adapter info or module parameter
7546 * The path from adapter info is used for initialization only (as it won't change).
7547 *
7548 * The firmware_path/nvram_path module parameter may be changed by the system at run
7549 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
7550 * command may change dhdinfo->fw_path. As such we need to clear the path info in
7551 * module parameter after it is copied. We won't update the path until the module parameter
7552 * is changed again (first character is not '\0')
7553 */
7554
7555 /* set default firmware and nvram path for built-in type driver */
7556// if (!dhd_download_fw_on_driverload) {
7557#ifdef CONFIG_BCMDHD_FW_PATH
7558 fw = CONFIG_BCMDHD_FW_PATH;
7559#endif /* CONFIG_BCMDHD_FW_PATH */
7560#ifdef CONFIG_BCMDHD_NVRAM_PATH
7561 nv = CONFIG_BCMDHD_NVRAM_PATH;
7562#endif /* CONFIG_BCMDHD_NVRAM_PATH */
7563// }
7564
7565 /* check if we need to initialize the path */
7566 if (dhdinfo->fw_path[0] == '\0') {
7567 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
7568 fw = adapter->fw_path;
7569
7570 }
7571 if (dhdinfo->nv_path[0] == '\0') {
7572 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
7573 nv = adapter->nv_path;
7574 }
08dfb6c4
RC
7575 if (dhdinfo->clm_path[0] == '\0') {
7576 if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
7577 clm = adapter->clm_path;
7578 }
ef6a5fee
RC
7579 if (dhdinfo->conf_path[0] == '\0') {
7580 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
7581 conf = adapter->conf_path;
7582 }
7583
7584 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
7585 *
7586 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
7587 */
7588 if (firmware_path[0] != '\0')
7589 fw = firmware_path;
7590 if (nvram_path[0] != '\0')
7591 nv = nvram_path;
08dfb6c4
RC
7592 if (clm_path[0] != '\0')
7593 clm = clm_path;
ef6a5fee
RC
7594 if (config_path[0] != '\0')
7595 conf = config_path;
7596
7597 if (fw && fw[0] != '\0') {
7598 fw_len = strlen(fw);
7599 if (fw_len >= sizeof(dhdinfo->fw_path)) {
7600 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
7601 return FALSE;
7602 }
7603 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
7604 if (dhdinfo->fw_path[fw_len-1] == '\n')
7605 dhdinfo->fw_path[fw_len-1] = '\0';
7606 }
7607 if (nv && nv[0] != '\0') {
7608 nv_len = strlen(nv);
7609 if (nv_len >= sizeof(dhdinfo->nv_path)) {
7610 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
7611 return FALSE;
7612 }
7613 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
7614 if (dhdinfo->nv_path[nv_len-1] == '\n')
7615 dhdinfo->nv_path[nv_len-1] = '\0';
7616 }
08dfb6c4
RC
7617 if (clm && clm[0] != '\0') {
7618 clm_len = strlen(clm);
7619 if (clm_len >= sizeof(dhdinfo->clm_path)) {
7620 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
7621 return FALSE;
7622 }
7623 strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
7624 if (dhdinfo->clm_path[clm_len-1] == '\n')
7625 dhdinfo->clm_path[clm_len-1] = '\0';
7626 }
ef6a5fee
RC
7627 if (conf && conf[0] != '\0') {
7628 conf_len = strlen(conf);
7629 if (conf_len >= sizeof(dhdinfo->conf_path)) {
7630 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
7631 return FALSE;
7632 }
7633 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
7634 if (dhdinfo->conf_path[conf_len-1] == '\n')
7635 dhdinfo->conf_path[conf_len-1] = '\0';
7636 }
7637
7638#if 0
7639 /* clear the path in module parameter */
7640 if (dhd_download_fw_on_driverload) {
7641 firmware_path[0] = '\0';
7642 nvram_path[0] = '\0';
08dfb6c4 7643 clm_path[0] = '\0';
ef6a5fee
RC
7644 config_path[0] = '\0';
7645 }
7646#endif
7647
7648#ifndef BCMEMBEDIMAGE
7649 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
7650 if (dhdinfo->fw_path[0] == '\0') {
7651 DHD_ERROR(("firmware path not found\n"));
7652 return FALSE;
7653 }
7654 if (dhdinfo->nv_path[0] == '\0') {
7655 DHD_ERROR(("nvram path not found\n"));
7656 return FALSE;
7657 }
ef6a5fee
RC
7658#endif /* BCMEMBEDIMAGE */
7659
7660 return TRUE;
7661}
7662
7663#ifdef CUSTOMER_HW4_DEBUG
7664bool dhd_validate_chipid(dhd_pub_t *dhdp)
7665{
7666 uint chipid = dhd_bus_chip_id(dhdp);
7667 uint config_chipid;
7668
7669#ifdef BCM4359_CHIP
7670 config_chipid = BCM4359_CHIP_ID;
7671#elif defined(BCM4358_CHIP)
7672 config_chipid = BCM4358_CHIP_ID;
7673#elif defined(BCM4354_CHIP)
7674 config_chipid = BCM4354_CHIP_ID;
7675#elif defined(BCM4356_CHIP)
7676 config_chipid = BCM4356_CHIP_ID;
7677#elif defined(BCM4339_CHIP)
7678 config_chipid = BCM4339_CHIP_ID;
7679#elif defined(BCM43349_CHIP)
7680 config_chipid = BCM43349_CHIP_ID;
7681#elif defined(BCM4335_CHIP)
7682 config_chipid = BCM4335_CHIP_ID;
7683#elif defined(BCM43241_CHIP)
7684 config_chipid = BCM4324_CHIP_ID;
7685#elif defined(BCM4330_CHIP)
7686 config_chipid = BCM4330_CHIP_ID;
7687#elif defined(BCM43430_CHIP)
7688 config_chipid = BCM43430_CHIP_ID;
7689#elif defined(BCM4334W_CHIP)
7690 config_chipid = BCM43342_CHIP_ID;
7691#elif defined(BCM43455_CHIP)
7692 config_chipid = BCM4345_CHIP_ID;
08dfb6c4
RC
7693#elif defined(BCM43012_CHIP_)
7694 config_chipid = BCM43012_CHIP_ID;
ef6a5fee
RC
7695#else
7696 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
7697 " please add CONFIG_BCMXXXX into the Kernel and"
7698 " BCMXXXX_CHIP definition into the DHD driver\n",
7699 __FUNCTION__));
7700 config_chipid = 0;
7701
7702 return FALSE;
7703#endif /* BCM4354_CHIP */
7704
7705#if defined(BCM4359_CHIP)
7706 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
7707 return TRUE;
7708 }
7709#endif /* BCM4359_CHIP */
7710
7711 return config_chipid == chipid;
7712}
7713#endif /* CUSTOMER_HW4_DEBUG */
7714
7715int
7716dhd_bus_start(dhd_pub_t *dhdp)
7717{
7718 int ret = -1;
7719 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7720 unsigned long flags;
7721
7722 ASSERT(dhd);
7723
7724 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
7725
7726 DHD_PERIM_LOCK(dhdp);
7727
7728 /* try to download image and nvram to the dongle */
7729 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
7730 /* Indicate FW Download has not yet done */
7731 dhd->pub.is_fw_download_done = FALSE;
7732 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
7733 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
7734 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
08dfb6c4 7735 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
ef6a5fee
RC
7736 if (ret < 0) {
7737 DHD_ERROR(("%s: failed to download firmware %s\n",
7738 __FUNCTION__, dhd->fw_path));
7739 DHD_PERIM_UNLOCK(dhdp);
7740 return ret;
7741 }
7742 /* Indicate FW Download has succeeded */
7743 dhd->pub.is_fw_download_done = TRUE;
7744 }
7745 if (dhd->pub.busstate != DHD_BUS_LOAD) {
7746 DHD_PERIM_UNLOCK(dhdp);
7747 return -ENETDOWN;
7748 }
7749
7750 dhd_os_sdlock(dhdp);
7751
7752 /* Start the watchdog timer */
7753 dhd->pub.tickcnt = 0;
7754 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
7755 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
7756
7757 /* Bring up the bus */
7758 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
7759
7760 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
7761 dhd_os_sdunlock(dhdp);
7762 DHD_PERIM_UNLOCK(dhdp);
7763 return ret;
7764 }
7765#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7766#if defined(BCMPCIE_OOB_HOST_WAKE)
7767 dhd_os_sdunlock(dhdp);
7768#endif /* BCMPCIE_OOB_HOST_WAKE */
7769 /* Host registration for OOB interrupt */
7770 if (dhd_bus_oob_intr_register(dhdp)) {
7771 /* deactivate timer and wait for the handler to finish */
7772#if !defined(BCMPCIE_OOB_HOST_WAKE)
7773 DHD_GENERAL_LOCK(&dhd->pub, flags);
7774 dhd->wd_timer_valid = FALSE;
7775 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7776 del_timer_sync(&dhd->timer);
7777
7778 dhd_os_sdunlock(dhdp);
7779#endif /* !BCMPCIE_OOB_HOST_WAKE */
7780 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7781 DHD_PERIM_UNLOCK(dhdp);
7782 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7783 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
7784 return -ENODEV;
7785 }
7786
7787#if defined(BCMPCIE_OOB_HOST_WAKE)
7788 dhd_os_sdlock(dhdp);
7789 dhd_bus_oob_intr_set(dhdp, TRUE);
7790#else
7791 /* Enable oob at firmware */
7792 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7793#endif /* BCMPCIE_OOB_HOST_WAKE */
7794#elif defined(FORCE_WOWLAN)
7795 /* Enable oob at firmware */
7796 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7797#endif
7798#ifdef PCIE_FULL_DONGLE
7799 {
7800 /* max_h2d_rings includes H2D common rings */
7801 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
7802
7803 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
7804 max_h2d_rings));
7805 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
7806 dhd_os_sdunlock(dhdp);
7807 DHD_PERIM_UNLOCK(dhdp);
7808 return ret;
7809 }
7810 }
7811#endif /* PCIE_FULL_DONGLE */
7812
7813 /* Do protocol initialization necessary for IOCTL/IOVAR */
7814#ifdef PCIE_FULL_DONGLE
7815 dhd_os_sdunlock(dhdp);
7816#endif /* PCIE_FULL_DONGLE */
7817 ret = dhd_prot_init(&dhd->pub);
7818 if (unlikely(ret) != BCME_OK) {
7819 DHD_PERIM_UNLOCK(dhdp);
7820 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7821 return ret;
7822 }
7823#ifdef PCIE_FULL_DONGLE
7824 dhd_os_sdlock(dhdp);
7825#endif /* PCIE_FULL_DONGLE */
7826
7827 /* If bus is not ready, can't come up */
7828 if (dhd->pub.busstate != DHD_BUS_DATA) {
7829 DHD_GENERAL_LOCK(&dhd->pub, flags);
7830 dhd->wd_timer_valid = FALSE;
7831 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7832 del_timer_sync(&dhd->timer);
7833 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
7834 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7835 dhd_os_sdunlock(dhdp);
7836 DHD_PERIM_UNLOCK(dhdp);
7837 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7838 return -ENODEV;
7839 }
7840
7841 dhd_os_sdunlock(dhdp);
7842
7843 /* Bus is ready, query any dongle information */
7844 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7845 DHD_GENERAL_LOCK(&dhd->pub, flags);
7846 dhd->wd_timer_valid = FALSE;
7847 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7848 del_timer_sync(&dhd->timer);
7849 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
7850 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7851 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7852 DHD_PERIM_UNLOCK(dhdp);
7853 return ret;
7854 }
7855
7856#ifdef ARP_OFFLOAD_SUPPORT
7857 if (dhd->pend_ipaddr) {
7858#ifdef AOE_IP_ALIAS_SUPPORT
7859 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
7860#endif /* AOE_IP_ALIAS_SUPPORT */
7861 dhd->pend_ipaddr = 0;
7862 }
7863#endif /* ARP_OFFLOAD_SUPPORT */
7864
7865 DHD_PERIM_UNLOCK(dhdp);
7866 return 0;
7867}
7868
7869#ifdef WLTDLS
7870int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
7871{
7872 char iovbuf[WLC_IOCTL_SMLEN];
7873 uint32 tdls = tdls_on;
7874 int ret = 0;
7875 uint32 tdls_auto_op = 0;
7876 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
7877 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
7878 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
7879 BCM_REFERENCE(mac);
7880 if (!FW_SUPPORTED(dhd, tdls))
7881 return BCME_ERROR;
7882
7883 if (dhd->tdls_enable == tdls_on)
7884 goto auto_mode;
7885 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
7886 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7887 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
7888 goto exit;
7889 }
7890 dhd->tdls_enable = tdls_on;
7891auto_mode:
7892
7893 tdls_auto_op = auto_on;
7894 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
7895 iovbuf, sizeof(iovbuf));
7896 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7897 sizeof(iovbuf), TRUE, 0)) < 0) {
7898 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
7899 goto exit;
7900 }
7901
7902 if (tdls_auto_op) {
7903 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
7904 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
7905 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7906 sizeof(iovbuf), TRUE, 0)) < 0) {
7907 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
7908 goto exit;
7909 }
7910 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
7911 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7912 sizeof(iovbuf), TRUE, 0)) < 0) {
7913 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
7914 goto exit;
7915 }
7916 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
7917 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7918 sizeof(iovbuf), TRUE, 0)) < 0) {
7919 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
7920 goto exit;
7921 }
7922 }
7923
7924exit:
7925 return ret;
7926}
7927
7928int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
7929{
7930 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7931 int ret = 0;
7932 if (dhd)
7933 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
7934 else
7935 ret = BCME_ERROR;
7936 return ret;
7937}
7938int
7939dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
7940{
7941 char iovbuf[WLC_IOCTL_SMLEN];
7942 int ret = 0;
7943 bool auto_on = false;
7944 uint32 mode = wfd_mode;
7945
7946#ifdef ENABLE_TDLS_AUTO_MODE
7947 if (wfd_mode) {
7948 auto_on = false;
7949 } else {
7950 auto_on = true;
7951 }
7952#else
7953 auto_on = false;
7954#endif /* ENABLE_TDLS_AUTO_MODE */
7955 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
7956 if (ret < 0) {
7957 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
7958 return ret;
7959 }
7960
7961
7962 bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
7963 iovbuf, sizeof(iovbuf));
7964 if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7965 sizeof(iovbuf), TRUE, 0)) < 0) &&
7966 (ret != BCME_UNSUPPORTED)) {
7967 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
7968 return ret;
7969 }
7970
7971 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
7972 if (ret < 0) {
7973 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
7974 return ret;
7975 }
7976
7977 dhd->tdls_mode = mode;
7978 return ret;
7979}
7980#ifdef PCIE_FULL_DONGLE
7981void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
7982{
7983 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7984 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
7985 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
7986 tdls_peer_node_t *new = NULL, *prev = NULL;
7987 dhd_if_t *dhdif;
7988 uint8 sa[ETHER_ADDR_LEN];
7989 int ifidx = dhd_net2idx(dhd, dev);
7990
7991 if (ifidx == DHD_BAD_IF)
7992 return;
7993
7994 dhdif = dhd->iflist[ifidx];
7995 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
7996
7997 if (connect) {
7998 while (cur != NULL) {
7999 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
8000 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
8001 __FUNCTION__, __LINE__));
8002 return;
8003 }
8004 cur = cur->next;
8005 }
8006
8007 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
8008 if (new == NULL) {
8009 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
8010 return;
8011 }
8012 memcpy(new->addr, da, ETHER_ADDR_LEN);
8013 new->next = dhdp->peer_tbl.node;
8014 dhdp->peer_tbl.node = new;
8015 dhdp->peer_tbl.tdls_peer_count++;
8016
8017 } else {
8018 while (cur != NULL) {
8019 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
8020 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
8021 if (prev)
8022 prev->next = cur->next;
8023 else
8024 dhdp->peer_tbl.node = cur->next;
8025 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
8026 dhdp->peer_tbl.tdls_peer_count--;
8027 return;
8028 }
8029 prev = cur;
8030 cur = cur->next;
8031 }
8032 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
8033 }
8034}
8035#endif /* PCIE_FULL_DONGLE */
8036#endif
8037
8038bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
8039{
8040 if (!dhd)
8041 return FALSE;
8042
8043 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
8044 return TRUE;
8045 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
8046 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
8047 return TRUE;
8048 else
8049 return FALSE;
8050}
8051#if !defined(AP) && defined(WLP2P)
8052/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
8053 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
8054 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
8055 * would still be named as fw_bcmdhd_apsta.
8056 */
8057uint32
8058dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
8059{
8060 int32 ret = 0;
8061 char buf[WLC_IOCTL_SMLEN];
8062 bool mchan_supported = FALSE;
8063 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
8064 * test mode, that means we only will use the mode as it is
8065 */
8066 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
8067 return 0;
8068 if (FW_SUPPORTED(dhd, vsdb)) {
8069 mchan_supported = TRUE;
8070 }
8071 if (!FW_SUPPORTED(dhd, p2p)) {
8072 DHD_TRACE(("Chip does not support p2p\n"));
8073 return 0;
8074 } else {
8075 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
8076 memset(buf, 0, sizeof(buf));
8077 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
8078 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8079 FALSE, 0)) < 0) {
8080 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
8081 return 0;
8082 } else {
8083 if (buf[0] == 1) {
8084 /* By default, chip supports single chan concurrency,
8085 * now lets check for mchan
8086 */
8087 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
8088 if (mchan_supported)
8089 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
8090 if (FW_SUPPORTED(dhd, rsdb)) {
8091 ret |= DHD_FLAG_RSDB_MODE;
8092 }
8093 if (FW_SUPPORTED(dhd, mp2p)) {
8094 ret |= DHD_FLAG_MP2P_MODE;
8095 }
8096#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
8097 return ret;
8098#else
8099 return 0;
8100#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
8101 }
8102 }
8103 }
8104 return 0;
8105}
8106#endif
8107
8108#ifdef SUPPORT_AP_POWERSAVE
8109#define RXCHAIN_PWRSAVE_PPS 10
8110#define RXCHAIN_PWRSAVE_QUIET_TIME 10
8111#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
8112int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
8113{
8114 char iovbuf[128];
8115 int32 pps = RXCHAIN_PWRSAVE_PPS;
8116 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
8117 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
8118
8119 if (enable) {
8120 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8121 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8122 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8123 DHD_ERROR(("Failed to enable AP power save\n"));
8124 }
8125 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
8126 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8127 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8128 DHD_ERROR(("Failed to set pps\n"));
8129 }
8130 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
8131 4, iovbuf, sizeof(iovbuf));
8132 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8133 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8134 DHD_ERROR(("Failed to set quiet time\n"));
8135 }
8136 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
8137 4, iovbuf, sizeof(iovbuf));
8138 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8139 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8140 DHD_ERROR(("Failed to set stas assoc check\n"));
8141 }
8142 } else {
8143 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8144 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8145 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8146 DHD_ERROR(("Failed to disable AP power save\n"));
8147 }
8148 }
8149
8150 return 0;
8151}
8152#endif /* SUPPORT_AP_POWERSAVE */
8153
8154
8155int
8156dhd_preinit_ioctls(dhd_pub_t *dhd)
8157{
8158 int ret = 0;
8159 char eventmask[WL_EVENTING_MASK_LEN];
8160 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
8161 uint32 buf_key_b4_m4 = 1;
8162#ifndef WL_CFG80211
8163 u32 up = 0;
8164#endif
8165 uint8 msglen;
8166 eventmsgs_ext_t *eventmask_msg = NULL;
8167 char* iov_buf = NULL;
8168 int ret2 = 0;
8169#if defined(CUSTOM_AMPDU_BA_WSIZE)
8170 uint32 ampdu_ba_wsize = 0;
8171#endif
8172#if defined(CUSTOM_AMPDU_MPDU)
8173 int32 ampdu_mpdu = 0;
8174#endif
8175#if defined(CUSTOM_AMPDU_RELEASE)
8176 int32 ampdu_release = 0;
8177#endif
8178#if defined(CUSTOM_AMSDU_AGGSF)
8179 int32 amsdu_aggsf = 0;
8180#endif
8181#ifdef SUPPORT_SENSORHUB
8182 int32 shub_enable = 0;
8183#endif /* SUPPORT_SENSORHUB */
8184#if defined(BCMSDIO)
8185#ifdef PROP_TXSTATUS
8186 int wlfc_enable = TRUE;
8187#ifndef DISABLE_11N
8188 uint32 hostreorder = 1;
8189 uint wl_down = 1;
8190#endif /* DISABLE_11N */
8191#endif /* PROP_TXSTATUS */
8192#endif
8193#ifdef PCIE_FULL_DONGLE
8194 uint32 wl_ap_isolate;
8195#endif /* PCIE_FULL_DONGLE */
8196
8197#if defined(BCMSDIO)
8198 /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
8199 uint32 frameburst = 0;
8200#else
8201 uint32 frameburst = 1;
8202#endif /* BCMSDIO */
5967f664 8203
ef6a5fee
RC
8204#ifdef DHD_ENABLE_LPC
8205 uint32 lpc = 1;
8206#endif /* DHD_ENABLE_LPC */
8207 uint power_mode = PM_FAST;
8208#if defined(BCMSDIO)
8209 uint32 dongle_align = DHD_SDALIGN;
8210 uint32 glom = CUSTOM_GLOM_SETTING;
8211#endif /* defined(BCMSDIO) */
8212#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8213 uint32 credall = 1;
8214#endif
8215 uint bcn_timeout = dhd->conf->bcn_timeout;
8216#ifdef ENABLE_BCN_LI_BCN_WAKEUP
8217 uint32 bcn_li_bcn = 1;
8218#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8219 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
8220#if defined(ARP_OFFLOAD_SUPPORT)
8221 int arpoe = 1;
8222#endif
8223 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
8224 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
8225 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
8226 char buf[WLC_IOCTL_SMLEN];
8227 char *ptr;
8228 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
8229#ifdef ROAM_ENABLE
8230 uint roamvar = 0;
8231 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
8232 int roam_scan_period[2] = {10, WLC_BAND_ALL};
8233 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
8234#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
8235 int roam_fullscan_period = 60;
8236#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8237 int roam_fullscan_period = 120;
8238#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8239#else
8240#ifdef DISABLE_BUILTIN_ROAM
8241 uint roamvar = 1;
8242#endif /* DISABLE_BUILTIN_ROAM */
8243#endif /* ROAM_ENABLE */
8244
8245#if defined(SOFTAP)
8246 uint dtim = 1;
8247#endif
8248#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
8249 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
8250 struct ether_addr p2p_ea;
8251#endif
8252#ifdef SOFTAP_UAPSD_OFF
8253 uint32 wme_apsd = 0;
8254#endif /* SOFTAP_UAPSD_OFF */
8255#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
8256 uint32 apsta = 1; /* Enable APSTA mode */
8257#elif defined(SOFTAP_AND_GC)
8258 uint32 apsta = 0;
8259 int ap_mode = 1;
8260#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
8261#ifdef GET_CUSTOM_MAC_ENABLE
8262 struct ether_addr ea_addr;
424b00bf 8263 char hw_ether[62];
ef6a5fee
RC
8264#endif /* GET_CUSTOM_MAC_ENABLE */
8265
8266#ifdef DISABLE_11N
8267 uint32 nmode = 0;
8268#endif /* DISABLE_11N */
8269
8270#ifdef USE_WL_TXBF
8271 uint32 txbf = 1;
8272#endif /* USE_WL_TXBF */
8273#if defined(PROP_TXSTATUS)
8274#ifdef USE_WFA_CERT_CONF
8275 uint32 proptx = 0;
8276#endif /* USE_WFA_CERT_CONF */
8277#endif /* PROP_TXSTATUS */
8278#ifdef CUSTOM_PSPRETEND_THR
8279 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
8280#endif
8281 uint32 rsdb_mode = 0;
8282#ifdef ENABLE_TEMP_THROTTLING
8283 wl_temp_control_t temp_control;
8284#endif /* ENABLE_TEMP_THROTTLING */
8285#ifdef DISABLE_PRUNED_SCAN
8286 uint32 scan_features = 0;
8287#endif /* DISABLE_PRUNED_SCAN */
8288#ifdef CUSTOM_EVENT_PM_WAKE
8289 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
8290#endif /* CUSTOM_EVENT_PM_WAKE */
8291#ifdef PKT_FILTER_SUPPORT
8292 dhd_pkt_filter_enable = TRUE;
8293#endif /* PKT_FILTER_SUPPORT */
8294#ifdef WLTDLS
8295 dhd->tdls_enable = FALSE;
8296 dhd_tdls_set_mode(dhd, false);
8297#endif /* WLTDLS */
8298 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
8299 DHD_TRACE(("Enter %s\n", __FUNCTION__));
8300
08dfb6c4 8301 dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", dhd->conf->band, 0, FALSE);
ef6a5fee
RC
8302#ifdef DHDTCPACK_SUPPRESS
8303 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
8304 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
8305#endif
8306
8307 dhd->op_mode = 0;
8308#ifdef CUSTOMER_HW4_DEBUG
8309 if (!dhd_validate_chipid(dhd)) {
8310 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
8311 __FUNCTION__, dhd_bus_chip_id(dhd)));
8312#ifndef SUPPORT_MULTIPLE_CHIPS
8313 ret = BCME_BADARG;
8314 goto done;
8315#endif /* !SUPPORT_MULTIPLE_CHIPS */
8316 }
8317#endif /* CUSTOMER_HW4_DEBUG */
8318 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8319 (op_mode == DHD_FLAG_MFG_MODE)) {
8320#ifdef DHD_PCIE_RUNTIMEPM
8321 /* Disable RuntimePM in mfg mode */
8322 DHD_DISABLE_RUNTIME_PM(dhd);
8323 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
8324#endif /* DHD_PCIE_RUNTIME_PM */
8325 /* Check and adjust IOCTL response timeout for Manufactring firmware */
8326 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
8327 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
8328 __FUNCTION__));
8329 } else {
8330 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
8331 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
8332 }
8333#ifdef GET_CUSTOM_MAC_ENABLE
424b00bf 8334 ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether);
ef6a5fee
RC
8335 if (!ret) {
8336 memset(buf, 0, sizeof(buf));
424b00bf 8337 bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
ef6a5fee
RC
8338 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
8339 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8340 if (ret < 0) {
424b00bf
WR
8341 memset(buf, 0, sizeof(buf));
8342 bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
8343 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8344 if (ret) {
8345 int i;
8346 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
8347 __FUNCTION__, MAC2STRDBG(hw_ether), ret));
8348 for (i=0; i<sizeof(hw_ether)-ETHER_ADDR_LEN; i++) {
8349 printf("0x%02x,", hw_ether[i+ETHER_ADDR_LEN]);
8350 if ((i+1)%8 == 0)
8351 printf("\n");
8352 }
8353 ret = BCME_NOTUP;
8354 goto done;
8355 }
ef6a5fee 8356 }
424b00bf
WR
8357 } else {
8358 DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
8359 ret = BCME_NOTUP;
8360 goto done;
08dfb6c4 8361 }
ef6a5fee 8362#endif /* GET_CUSTOM_MAC_ENABLE */
08dfb6c4
RC
8363 /* Get the default device MAC address directly from firmware */
8364 memset(buf, 0, sizeof(buf));
8365 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
8366 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8367 FALSE, 0)) < 0) {
8368 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
8369 ret = BCME_NOTUP;
8370 goto done;
8371 }
8372 /* Update public MAC address after reading from Firmware */
8373 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
ef6a5fee 8374
08dfb6c4
RC
8375 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
8376 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
8377 goto done;
ef6a5fee 8378 }
ef6a5fee
RC
8379
8380 /* get a capabilities from firmware */
8381 {
8382 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
8383 memset(dhd->fw_capabilities, 0, cap_buf_size);
8384 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
8385 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
8386 (cap_buf_size - 1), FALSE, 0)) < 0)
8387 {
8388 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
8389 __FUNCTION__, ret));
8390 return 0;
8391 }
8392
8393 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
8394 dhd->fw_capabilities[0] = ' ';
8395 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
8396 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
8397 }
8398
8399 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
8400 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
8401#ifdef SET_RANDOM_MAC_SOFTAP
8402 uint rand_mac;
8403#endif /* SET_RANDOM_MAC_SOFTAP */
8404 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
8405#if defined(ARP_OFFLOAD_SUPPORT)
8406 arpoe = 0;
8407#endif
8408#ifdef PKT_FILTER_SUPPORT
8409 dhd_pkt_filter_enable = FALSE;
8410#endif
8411#ifdef SET_RANDOM_MAC_SOFTAP
8412 SRANDOM32((uint)jiffies);
8413 rand_mac = RANDOM32();
8414 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
8415 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
8416 iovbuf[2] = (unsigned char)vendor_oui;
8417 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
8418 iovbuf[4] = (unsigned char)(rand_mac >> 8);
8419 iovbuf[5] = (unsigned char)(rand_mac >> 16);
8420
8421 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
8422 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8423 if (ret < 0) {
8424 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
8425 } else
8426 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
8427#endif /* SET_RANDOM_MAC_SOFTAP */
8428#if !defined(AP) && defined(WL_CFG80211)
8429 /* Turn off MPC in AP mode */
8430 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8431 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8432 sizeof(iovbuf), TRUE, 0)) < 0) {
8433 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
8434 }
8435#endif
8436#ifdef USE_DYNAMIC_F2_BLKSIZE
8437 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8438#endif /* USE_DYNAMIC_F2_BLKSIZE */
8439#ifdef SUPPORT_AP_POWERSAVE
8440 dhd_set_ap_powersave(dhd, 0, TRUE);
8441#endif /* SUPPORT_AP_POWERSAVE */
8442#ifdef SOFTAP_UAPSD_OFF
8443 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
8444 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8445 sizeof(iovbuf), TRUE, 0)) < 0) {
8446 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
8447 __FUNCTION__, ret));
8448 }
8449#endif /* SOFTAP_UAPSD_OFF */
8450 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8451 (op_mode == DHD_FLAG_MFG_MODE)) {
8452#if defined(ARP_OFFLOAD_SUPPORT)
8453 arpoe = 0;
8454#endif /* ARP_OFFLOAD_SUPPORT */
8455#ifdef PKT_FILTER_SUPPORT
8456 dhd_pkt_filter_enable = FALSE;
8457#endif /* PKT_FILTER_SUPPORT */
8458 dhd->op_mode = DHD_FLAG_MFG_MODE;
8459#ifdef USE_DYNAMIC_F2_BLKSIZE
8460 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8461#endif /* USE_DYNAMIC_F2_BLKSIZE */
8462 if (FW_SUPPORTED(dhd, rsdb)) {
8463 rsdb_mode = 0;
8464 bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
8465 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8466 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8467 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
8468 __FUNCTION__, ret));
8469 }
8470 }
8471 } else {
8472 uint32 concurrent_mode = 0;
8473 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
8474 (op_mode == DHD_FLAG_P2P_MODE)) {
8475#if defined(ARP_OFFLOAD_SUPPORT)
8476 arpoe = 0;
8477#endif
8478#ifdef PKT_FILTER_SUPPORT
8479 dhd_pkt_filter_enable = FALSE;
8480#endif
8481 dhd->op_mode = DHD_FLAG_P2P_MODE;
8482 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
8483 (op_mode == DHD_FLAG_IBSS_MODE)) {
8484 dhd->op_mode = DHD_FLAG_IBSS_MODE;
8485 } else
8486 dhd->op_mode = DHD_FLAG_STA_MODE;
8487#if !defined(AP) && defined(WLP2P)
8488 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
8489 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
8490#if defined(ARP_OFFLOAD_SUPPORT)
8491 arpoe = 1;
8492#endif
8493 dhd->op_mode |= concurrent_mode;
8494 }
8495
8496 /* Check if we are enabling p2p */
8497 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8498 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8499 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8500 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8501 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
8502 }
8503
8504#if defined(SOFTAP_AND_GC)
8505 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
8506 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
8507 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
8508 }
8509#endif
8510 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
8511 ETHER_SET_LOCALADDR(&p2p_ea);
8512 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
8513 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
8514 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8515 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8516 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
8517 } else {
8518 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
8519 }
8520 }
8521#else
8522 (void)concurrent_mode;
8523#endif
8524 }
08dfb6c4
RC
8525#ifdef BCMSDIO
8526 if (dhd->conf->sd_f2_blocksize)
8527 dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
8528#endif
ef6a5fee
RC
8529
8530#ifdef RSDB_MODE_FROM_FILE
8531 (void)dhd_rsdb_mode_from_file(dhd);
8532#endif /* RSDB_MODE_FROM_FILE */
8533
8534#ifdef DISABLE_PRUNED_SCAN
8535 if (FW_SUPPORTED(dhd, rsdb)) {
8536 memset(iovbuf, 0, sizeof(iovbuf));
8537 bcm_mkiovar("scan_features", (char *)&scan_features,
8538 4, iovbuf, sizeof(iovbuf));
8539 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
8540 iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8541 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
8542 __FUNCTION__, ret));
8543 } else {
8544 memcpy(&scan_features, iovbuf, 4);
8545 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
8546 memset(iovbuf, 0, sizeof(iovbuf));
8547 bcm_mkiovar("scan_features", (char *)&scan_features,
8548 4, iovbuf, sizeof(iovbuf));
8549 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8550 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8551 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
8552 __FUNCTION__, ret));
8553 }
8554 }
8555 }
8556#endif /* DISABLE_PRUNED_SCAN */
8557
8558 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
8559 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
8560 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
8561 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
8562 dhd->info->rxthread_enabled = FALSE;
8563 else
8564 dhd->info->rxthread_enabled = TRUE;
8565 #endif
8566 /* Set Country code */
8567 if (dhd->dhd_cspec.ccode[0] != 0) {
8568 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
8569 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
8570 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
8571 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8572 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
8573 } else {
8574 dhd_conf_set_country(dhd);
8575 dhd_conf_fix_country(dhd);
8576 }
08dfb6c4 8577 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "autocountry", dhd->conf->autocountry, 0, FALSE);
ef6a5fee
RC
8578 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
8579
8580
8581 /* Set Listen Interval */
8582 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
8583 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8584 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
8585
8586#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
8587#ifdef USE_WFA_CERT_CONF
8588 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
8589 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
8590 }
8591#endif /* USE_WFA_CERT_CONF */
8592 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
8593 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
8594 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8595#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
8596#if defined(ROAM_ENABLE)
8597 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
8598 sizeof(roam_trigger), TRUE, 0)) < 0)
8599 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
8600 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
8601 sizeof(roam_scan_period), TRUE, 0)) < 0)
8602 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
8603 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
8604 sizeof(roam_delta), TRUE, 0)) < 0)
8605 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
8606 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
8607 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8608 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
8609#endif /* ROAM_ENABLE */
8610 dhd_conf_set_roam(dhd);
8611
8612#ifdef CUSTOM_EVENT_PM_WAKE
8613 bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
8614 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8615 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
8616 }
8617#endif /* CUSTOM_EVENT_PM_WAKE */
8618#ifdef WLTDLS
8619#ifdef ENABLE_TDLS_AUTO_MODE
8620 /* by default TDLS on and auto mode on */
8621 _dhd_tdls_enable(dhd, true, true, NULL);
8622#else
8623 /* by default TDLS on and auto mode off */
8624 _dhd_tdls_enable(dhd, true, false, NULL);
8625#endif /* ENABLE_TDLS_AUTO_MODE */
8626#endif /* WLTDLS */
8627
8628#ifdef DHD_ENABLE_LPC
8629 /* Set lpc 1 */
8630 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8631 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8632 sizeof(iovbuf), TRUE, 0)) < 0) {
8633 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
8634
8635 if (ret == BCME_NOTDOWN) {
8636 uint wl_down = 1;
8637 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
8638 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8639 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
8640
8641 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8642 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8643 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
8644 }
8645 }
8646#endif /* DHD_ENABLE_LPC */
424b00bf 8647 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "lpc", dhd->conf->lpc, 0, TRUE);
ef6a5fee
RC
8648
8649 /* Set PowerSave mode */
8650 if (dhd->conf->pm >= 0)
8651 power_mode = dhd->conf->pm;
8652 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
08dfb6c4 8653 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "pm2_sleep_ret", dhd->conf->pm2_sleep_ret, 0, FALSE);
ef6a5fee
RC
8654
8655#if defined(BCMSDIO)
8656 /* Match Host and Dongle rx alignment */
8657 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
8658 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8659
8660#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8661 /* enable credall to reduce the chance of no bus credit happened. */
8662 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
8663 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8664#endif
8665
8666#ifdef USE_WFA_CERT_CONF
8667 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
8668 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
8669 }
8670#endif /* USE_WFA_CERT_CONF */
8671 if (glom != DEFAULT_GLOM_VALUE) {
8672 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
8673 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
8674 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8675 }
8676#endif /* defined(BCMSDIO) */
8677
8678 /* Setup timeout if Beacons are lost and roam is off to report link down */
8679 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
8680 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8681 /* Setup assoc_retry_max count to reconnect target AP in dongle */
8682 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
8683 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8684#if defined(AP) && !defined(WLP2P)
8685 /* Turn off MPC in AP mode */
8686 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8687 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8688 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8689 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8690#endif /* defined(AP) && !defined(WLP2P) */
8691 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
424b00bf 8692 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 0, TRUE);
08dfb6c4
RC
8693 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
8694 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "stbc_tx", dhd->conf->stbc, 0, FALSE);
8695 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "stbc_rx", dhd->conf->stbc, 0, FALSE);
8696 dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", dhd->conf->srl, 0, TRUE);
8697 dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", dhd->conf->lrl, 0, FALSE);
8698 dhd_conf_set_intiovar(dhd, WLC_SET_SPECT_MANAGMENT, "WLC_SET_SPECT_MANAGMENT", dhd->conf->spect, 0, FALSE);
8699 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
8700 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "vhtmode", dhd->conf->vhtmode, 0, TRUE);
424b00bf 8701#ifdef IDHCP
08dfb6c4 8702 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", dhd->conf->dhcpc_enable, 0, FALSE);
424b00bf
WR
8703 if (dhd->conf->dhcpd_enable >= 0) {
8704 dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_addr", (char *)&dhd->conf->dhcpd_ip_addr, sizeof(dhd->conf->dhcpd_ip_addr), FALSE);
8705 dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_mask", (char *)&dhd->conf->dhcpd_ip_mask, sizeof(dhd->conf->dhcpd_ip_mask), FALSE);
8706 dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_start", (char *)&dhd->conf->dhcpd_ip_start, sizeof(dhd->conf->dhcpd_ip_start), FALSE);
8707 dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_end", (char *)&dhd->conf->dhcpd_ip_end, sizeof(dhd->conf->dhcpd_ip_end), FALSE);
8708 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable", dhd->conf->dhcpd_enable, 0, FALSE);
8709 }
08dfb6c4 8710#endif
ef6a5fee
RC
8711 dhd_conf_set_bw_cap(dhd);
8712
8713#ifdef MIMO_ANT_SETTING
8714 dhd_sel_ant_from_file(dhd);
8715#endif /* MIMO_ANT_SETTING */
8716
8717#if defined(SOFTAP)
8718 if (ap_fw_loaded == TRUE) {
8719 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
8720 }
8721#endif
8722
8723#if defined(KEEP_ALIVE)
8724 {
8725 /* Set Keep Alive : be sure to use FW with -keepalive */
8726 int res;
8727
8728#if defined(SOFTAP)
8729 if (ap_fw_loaded == FALSE)
8730#endif
8731 if (!(dhd->op_mode &
8732 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
8733 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
8734 DHD_ERROR(("%s set keeplive failed %d\n",
8735 __FUNCTION__, res));
8736 }
8737 }
8738#endif /* defined(KEEP_ALIVE) */
8739
8740#ifdef USE_WL_TXBF
8741 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
8742 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8743 sizeof(iovbuf), TRUE, 0)) < 0) {
8744 DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
8745 }
8746#endif /* USE_WL_TXBF */
08dfb6c4 8747 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", dhd->conf->txbf, 0, FALSE);
ef6a5fee
RC
8748
8749#ifdef USE_WFA_CERT_CONF
8750#ifdef USE_WL_FRAMEBURST
8751 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
8752 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
8753 }
8754#endif /* USE_WL_FRAMEBURST */
8755#ifdef DISABLE_FRAMEBURST_VSDB
8756 g_frameburst = frameburst;
8757#endif /* DISABLE_FRAMEBURST_VSDB */
8758#endif /* USE_WFA_CERT_CONF */
8759#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
8760 /* Disable Framebursting for SofAP */
8761 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
8762 frameburst = 0;
8763 }
8764#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
8765 /* Set frameburst to value */
8766 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
8767 sizeof(frameburst), TRUE, 0)) < 0) {
8768 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
8769 }
08dfb6c4 8770 dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", dhd->conf->frameburst, 0, FALSE);
ef6a5fee
RC
8771#if defined(CUSTOM_AMPDU_BA_WSIZE)
8772 /* Set ampdu ba wsize to 64 or 16 */
8773#ifdef CUSTOM_AMPDU_BA_WSIZE
8774 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
8775#endif
8776 if (ampdu_ba_wsize != 0) {
8777 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
8778 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8779 sizeof(iovbuf), TRUE, 0)) < 0) {
8780 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
8781 __FUNCTION__, ampdu_ba_wsize, ret));
8782 }
8783 }
8784#endif
08dfb6c4 8785 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
ef6a5fee
RC
8786
8787 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
8788 if (iov_buf == NULL) {
8789 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
8790 ret = BCME_NOMEM;
8791 goto done;
8792 }
8793#ifdef ENABLE_TEMP_THROTTLING
8794 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
8795 memset(&temp_control, 0, sizeof(temp_control));
8796 temp_control.enable = 1;
8797 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
8798 bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
8799 sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
8800 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
8801 if (ret < 0) {
8802 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
8803 __FUNCTION__, ret));
8804 }
8805 }
8806#endif /* ENABLE_TEMP_THROTTLING */
8807#if defined(CUSTOM_AMPDU_MPDU)
8808 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
8809 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
8810 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
8811 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8812 sizeof(iovbuf), TRUE, 0)) < 0) {
8813 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
8814 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
8815 }
8816 }
8817#endif /* CUSTOM_AMPDU_MPDU */
8818
8819#if defined(CUSTOM_AMPDU_RELEASE)
8820 ampdu_release = CUSTOM_AMPDU_RELEASE;
8821 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
8822 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
8823 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8824 sizeof(iovbuf), TRUE, 0)) < 0) {
8825 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
8826 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
8827 }
8828 }
8829#endif /* CUSTOM_AMPDU_RELEASE */
8830
8831#if defined(CUSTOM_AMSDU_AGGSF)
8832 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
8833 if (amsdu_aggsf != 0) {
8834 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
8835 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8836 if (ret < 0) {
8837 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
8838 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
8839 }
8840 }
8841#endif /* CUSTOM_AMSDU_AGGSF */
8842
8843#ifdef CUSTOM_PSPRETEND_THR
8844 /* Turn off MPC in AP mode */
8845 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
8846 iovbuf, sizeof(iovbuf));
8847 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8848 sizeof(iovbuf), TRUE, 0)) < 0) {
8849 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
8850 __FUNCTION__, ret));
8851 }
8852#endif
5967f664 8853
ef6a5fee
RC
8854 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
8855 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8856 sizeof(iovbuf), TRUE, 0)) < 0) {
8857 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
8858 }
8859
8860 /* Read event_msgs mask */
8861 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8862 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8863 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
8864 goto done;
8865 }
8866 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
8867
8868 /* Setup event_msgs */
8869 setbit(eventmask, WLC_E_SET_SSID);
8870 setbit(eventmask, WLC_E_PRUNE);
8871 setbit(eventmask, WLC_E_AUTH);
8872 setbit(eventmask, WLC_E_AUTH_IND);
8873 setbit(eventmask, WLC_E_ASSOC);
8874 setbit(eventmask, WLC_E_REASSOC);
8875 setbit(eventmask, WLC_E_REASSOC_IND);
8876 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
8877 setbit(eventmask, WLC_E_DEAUTH);
8878 setbit(eventmask, WLC_E_DEAUTH_IND);
8879 setbit(eventmask, WLC_E_DISASSOC_IND);
8880 setbit(eventmask, WLC_E_DISASSOC);
8881 setbit(eventmask, WLC_E_JOIN);
8882 setbit(eventmask, WLC_E_START);
8883 setbit(eventmask, WLC_E_ASSOC_IND);
8884 setbit(eventmask, WLC_E_PSK_SUP);
8885 setbit(eventmask, WLC_E_LINK);
8886 setbit(eventmask, WLC_E_MIC_ERROR);
8887 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
8888 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
8889#ifndef WL_CFG80211
8890 setbit(eventmask, WLC_E_PMKID_CACHE);
8891 setbit(eventmask, WLC_E_TXFAIL);
8892#endif
8893 setbit(eventmask, WLC_E_JOIN_START);
8894// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
8895#ifdef DHD_DEBUG
8896 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
8897#endif
8898#ifdef WLMEDIA_HTSF
8899 setbit(eventmask, WLC_E_HTSFSYNC);
8900#endif /* WLMEDIA_HTSF */
8901#ifdef PNO_SUPPORT
8902 setbit(eventmask, WLC_E_PFN_NET_FOUND);
8903 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
8904 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
8905 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
8906#endif /* PNO_SUPPORT */
8907 /* enable dongle roaming event */
8908 setbit(eventmask, WLC_E_ROAM);
8909 setbit(eventmask, WLC_E_BSSID);
8910#ifdef WLTDLS
8911 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
8912#endif /* WLTDLS */
08dfb6c4
RC
8913#ifdef WL_ESCAN
8914 setbit(eventmask, WLC_E_ESCAN_RESULT);
424b00bf 8915#endif /* WL_ESCAN */
ef6a5fee
RC
8916#ifdef WL_CFG80211
8917 setbit(eventmask, WLC_E_ESCAN_RESULT);
8918 setbit(eventmask, WLC_E_AP_STARTED);
8919 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8920 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
8921 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
8922 }
8923#endif /* WL_CFG80211 */
8924
8925#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
8926 if (dhd_logtrace_from_file(dhd)) {
8927 setbit(eventmask, WLC_E_TRACE);
8928 } else {
8929 clrbit(eventmask, WLC_E_TRACE);
8930 }
8931#elif defined(SHOW_LOGTRACE)
8932 setbit(eventmask, WLC_E_TRACE);
8933#else
8934 clrbit(eventmask, WLC_E_TRACE);
8935#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
8936
8937 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
8938#ifdef DHD_LOSSLESS_ROAMING
8939 setbit(eventmask, WLC_E_ROAM_PREP);
8940#endif
8941#ifdef CUSTOM_EVENT_PM_WAKE
8942 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
8943#endif /* CUSTOM_EVENT_PM_WAKE */
8944#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
8945 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
8946#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
8947
424b00bf
WR
8948#ifdef SUSPEND_EVENT
8949 bcopy(eventmask, dhd->conf->resume_eventmask, WL_EVENTING_MASK_LEN);
8950#endif
ef6a5fee
RC
8951 /* Write updated Event mask */
8952 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8953 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8954 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
8955 goto done;
8956 }
8957
8958 /* make up event mask ext message iovar for event larger than 128 */
8959 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
8960 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
8961 if (eventmask_msg == NULL) {
8962 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
8963 ret = BCME_NOMEM;
8964 goto done;
8965 }
8966 bzero(eventmask_msg, msglen);
8967 eventmask_msg->ver = EVENTMSGS_VER;
8968 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8969
8970 /* Read event_msgs_ext mask */
8971 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
8972 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
8973 if (ret2 == 0) { /* event_msgs_ext must be supported */
8974 bcopy(iov_buf, eventmask_msg, msglen);
8975#ifdef GSCAN_SUPPORT
8976 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
8977 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
8978 setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
8979#endif /* GSCAN_SUPPORT */
8980#ifdef BT_WIFI_HANDOVER
8981 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
8982#endif /* BT_WIFI_HANDOVER */
8983
8984 /* Write updated Event mask */
8985 eventmask_msg->ver = EVENTMSGS_VER;
8986 eventmask_msg->command = EVENTMSGS_SET_MASK;
8987 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8988 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
8989 msglen, iov_buf, WLC_IOCTL_SMLEN);
8990 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8991 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
8992 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
8993 goto done;
8994 }
8995 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
8996 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
8997 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
8998 __FUNCTION__, ret2));
8999 } else {
9000 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
9001 ret = ret2;
9002 goto done;
9003 }
9004
9005 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
9006 sizeof(scan_assoc_time), TRUE, 0);
9007 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
9008 sizeof(scan_unassoc_time), TRUE, 0);
9009 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
9010 sizeof(scan_passive_time), TRUE, 0);
9011
9012#ifdef ARP_OFFLOAD_SUPPORT
9013 /* Set and enable ARP offload feature for STA only */
9014#if defined(SOFTAP)
9015 if (arpoe && !ap_fw_loaded)
9016#else
9017 if (arpoe)
9018#endif
9019 {
9020 dhd_arp_offload_enable(dhd, TRUE);
9021 dhd_arp_offload_set(dhd, dhd_arp_mode);
9022 } else {
9023 dhd_arp_offload_enable(dhd, FALSE);
9024 dhd_arp_offload_set(dhd, 0);
9025 }
9026 dhd_arp_enable = arpoe;
9027#endif /* ARP_OFFLOAD_SUPPORT */
9028
9029#ifdef PKT_FILTER_SUPPORT
9030 /* Setup default defintions for pktfilter , enable in suspend */
9031 if (dhd_master_mode) {
9032 dhd->pktfilter_count = 6;
9033 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
9034 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
9035 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
9036 /* apply APP pktfilter */
9037 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
9038
9039 /* Setup filter to allow only unicast */
9040 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
9041
9042 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
9043 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
9044
9045#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
9046 dhd->pktfilter_count = 4;
9047 /* Setup filter to block broadcast and NAT Keepalive packets */
9048 /* discard all broadcast packets */
9049 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
9050 /* discard NAT Keepalive packets */
9051 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
9052 /* discard NAT Keepalive packets */
9053 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
9054 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
9055#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
9056 } else
9057 dhd_conf_discard_pkt_filter(dhd);
9058 dhd_conf_add_pkt_filter(dhd);
9059
9060#if defined(SOFTAP)
9061 if (ap_fw_loaded) {
9062 dhd_enable_packet_filter(0, dhd);
9063 }
9064#endif /* defined(SOFTAP) */
9065 dhd_set_packet_filter(dhd);
9066#endif /* PKT_FILTER_SUPPORT */
9067#ifdef DISABLE_11N
9068 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
9069 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9070 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
9071#endif /* DISABLE_11N */
9072
9073#ifdef ENABLE_BCN_LI_BCN_WAKEUP
9074 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
9075 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9076#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
9077 /* query for 'ver' to get version info from firmware */
9078 memset(buf, 0, sizeof(buf));
9079 ptr = buf;
9080 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
9081 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
9082 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9083 else {
9084 bcmstrtok(&ptr, "\n", 0);
9085 /* Print fw version info */
9086 DHD_ERROR(("Firmware version = %s\n", buf));
9087 strncpy(fw_version, buf, FW_VER_STR_LEN);
9088 dhd_set_version_info(dhd, buf);
9089#ifdef WRITE_WLANINFO
9090 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
9091#endif /* WRITE_WLANINFO */
9092 }
08dfb6c4
RC
9093 /* query for 'clmver' to get clm version info from firmware */
9094 memset(buf, 0, sizeof(buf));
9095 bcm_mkiovar("clmver", (char *)&buf, 4, buf, sizeof(buf));
9096 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
9097 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9098 else {
9099 char *clmver_temp_buf = NULL;
9100
9101 if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
9102 DHD_ERROR(("Couldn't find \"Data:\"\n"));
9103 } else {
9104 ptr = (clmver_temp_buf + strlen("Data:"));
9105 if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
9106 DHD_ERROR(("Couldn't find New line character\n"));
9107 } else {
9108 memset(clm_version, 0, CLM_VER_STR_LEN);
9109 strncpy(clm_version, clmver_temp_buf,
9110 MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
9111 DHD_ERROR((" clm = %s\n", clm_version));
9112 }
9113 }
9114 }
ef6a5fee
RC
9115
9116#if defined(BCMSDIO)
9117 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
9118 // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
08dfb6c4 9119 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:txglom", dhd->conf->bus_txglom, 0, FALSE);
ef6a5fee
RC
9120#endif /* defined(BCMSDIO) */
9121
ef6a5fee
RC
9122#if defined(BCMSDIO)
9123#ifdef PROP_TXSTATUS
9124 if (disable_proptx ||
9125#ifdef PROP_TXSTATUS_VSDB
9126 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
9127 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
9128 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
9129#endif /* PROP_TXSTATUS_VSDB */
9130 FALSE) {
9131 wlfc_enable = FALSE;
9132 }
08dfb6c4
RC
9133 ret = dhd_conf_get_disable_proptx(dhd);
9134 if (ret == 0){
9135 disable_proptx = 0;
9136 wlfc_enable = TRUE;
9137 } else if (ret >= 1) {
9138 disable_proptx = 1;
9139 wlfc_enable = FALSE;
9140 /* terence 20161229: we should set ampdu_hostreorder=0 when disalbe_proptx=1 */
9141 hostreorder = 0;
9142 }
ef6a5fee
RC
9143
9144#ifdef USE_WFA_CERT_CONF
9145 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
9146 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
9147 wlfc_enable = proptx;
9148 }
9149#endif /* USE_WFA_CERT_CONF */
9150
9151#ifndef DISABLE_11N
9152 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
9153 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
9154 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
9155 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
9156 if (ret2 != BCME_UNSUPPORTED)
9157 ret = ret2;
9158
9159 if (ret == BCME_NOTDOWN) {
9160 uint wl_down = 1;
9161 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
9162 sizeof(wl_down), TRUE, 0);
9163 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
9164 __FUNCTION__, ret2, hostreorder));
9165
9166 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
9167 iovbuf, sizeof(iovbuf));
9168 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9169 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
9170 if (ret2 != BCME_UNSUPPORTED)
9171 ret = ret2;
9172 }
9173 if (ret2 != BCME_OK)
9174 hostreorder = 0;
9175 }
9176#endif /* DISABLE_11N */
9177
9178
08dfb6c4 9179 if (wlfc_enable) {
ef6a5fee 9180 dhd_wlfc_init(dhd);
08dfb6c4
RC
9181 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
9182 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
9183 }
ef6a5fee
RC
9184#ifndef DISABLE_11N
9185 else if (hostreorder)
9186 dhd_wlfc_hostreorder_init(dhd);
9187#endif /* DISABLE_11N */
08dfb6c4
RC
9188#else
9189 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
9190 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
9191 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
ef6a5fee 9192#endif /* PROP_TXSTATUS */
08dfb6c4 9193 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", dhd->conf->ampdu_hostreorder, 0, TRUE);
ef6a5fee
RC
9194#endif /* BCMSDIO || BCMBUS */
9195#ifdef PCIE_FULL_DONGLE
9196 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
9197 if (FW_SUPPORTED(dhd, ap)) {
9198 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
9199 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
9200 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9201 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9202 }
9203#endif /* PCIE_FULL_DONGLE */
9204#ifdef PNO_SUPPORT
9205 if (!dhd->pno_state) {
9206 dhd_pno_init(dhd);
9207 }
9208#endif
9209#ifdef WL11U
9210 dhd_interworking_enable(dhd);
9211#endif /* WL11U */
9212#ifndef WL_CFG80211
9213 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
9214#endif
9215
9216#ifdef SUPPORT_SENSORHUB
9217 bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
9218 if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
9219 FALSE, 0)) < 0) {
9220 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
9221 __FUNCTION__, ret));
9222 dhd->info->shub_enable = 0;
9223 } else {
9224 memcpy(&shub_enable, iovbuf, sizeof(uint32));
9225 dhd->info->shub_enable = shub_enable;
9226 DHD_ERROR(("%s: checking sensorhub enable %d\n",
9227 __FUNCTION__, dhd->info->shub_enable));
9228 }
9229#endif /* SUPPORT_SENSORHUB */
9230done:
9231
9232 if (eventmask_msg)
9233 kfree(eventmask_msg);
9234 if (iov_buf)
9235 kfree(iov_buf);
9236
9237 return ret;
9238}
9239
9240
9241int
9242dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
9243{
9244 char buf[strlen(name) + 1 + cmd_len];
9245 int len = sizeof(buf);
9246 wl_ioctl_t ioc;
9247 int ret;
9248
9249 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
9250
9251 memset(&ioc, 0, sizeof(ioc));
9252
9253 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
9254 ioc.buf = buf;
9255 ioc.len = len;
9256 ioc.set = set;
9257
9258 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
9259 if (!set && ret >= 0)
9260 memcpy(cmd_buf, buf, cmd_len);
9261
9262 return ret;
9263}
9264
9265int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
9266{
9267 struct dhd_info *dhd = dhdp->info;
9268 struct net_device *dev = NULL;
9269
9270 ASSERT(dhd && dhd->iflist[ifidx]);
9271 dev = dhd->iflist[ifidx]->net;
9272 ASSERT(dev);
9273
9274 if (netif_running(dev)) {
9275 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
9276 return BCME_NOTDOWN;
9277 }
9278
9279#define DHD_MIN_MTU 1500
9280#define DHD_MAX_MTU 1752
9281
9282 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
9283 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
9284 return BCME_BADARG;
9285 }
9286
9287 dev->mtu = new_mtu;
9288 return 0;
9289}
9290
9291#ifdef ARP_OFFLOAD_SUPPORT
9292/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
9293void
9294aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
9295{
9296 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
9297 int i;
9298 int ret;
9299
9300 bzero(ipv4_buf, sizeof(ipv4_buf));
9301
9302 /* display what we've got */
9303 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9304 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
9305#ifdef AOE_DBG
9306 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9307#endif
9308 /* now we saved hoste_ip table, clr it in the dongle AOE */
9309 dhd_aoe_hostip_clr(dhd_pub, idx);
9310
9311 if (ret) {
9312 DHD_ERROR(("%s failed\n", __FUNCTION__));
9313 return;
9314 }
9315
9316 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
9317 if (add && (ipv4_buf[i] == 0)) {
9318 ipv4_buf[i] = ipa;
9319 add = FALSE; /* added ipa to local table */
9320 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
9321 __FUNCTION__, i));
9322 } else if (ipv4_buf[i] == ipa) {
9323 ipv4_buf[i] = 0;
9324 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
9325 __FUNCTION__, ipa, i));
9326 }
9327
9328 if (ipv4_buf[i] != 0) {
9329 /* add back host_ip entries from our local cache */
9330 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
9331 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
9332 __FUNCTION__, ipv4_buf[i], i));
9333 }
9334 }
9335#ifdef AOE_DBG
9336 /* see the resulting hostip table */
9337 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9338 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
9339 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9340#endif
9341}
9342
9343/*
9344 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
9345 * whenever there is an event related to an IP address.
9346 * ptr : kernel provided pointer to IP address that has changed
9347 */
9348static int dhd_inetaddr_notifier_call(struct notifier_block *this,
9349 unsigned long event,
9350 void *ptr)
9351{
9352 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
9353
9354 dhd_info_t *dhd;
9355 dhd_pub_t *dhd_pub;
9356 int idx;
9357
9358 if (!dhd_arp_enable)
9359 return NOTIFY_DONE;
9360 if (!ifa || !(ifa->ifa_dev->dev))
9361 return NOTIFY_DONE;
9362
9363#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9364 /* Filter notifications meant for non Broadcom devices */
9365 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
9366 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
9367#if defined(WL_ENABLE_P2P_IF)
9368 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
9369#endif /* WL_ENABLE_P2P_IF */
9370 return NOTIFY_DONE;
9371 }
9372#endif /* LINUX_VERSION_CODE */
9373
9374 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
9375 if (!dhd)
9376 return NOTIFY_DONE;
9377
9378 dhd_pub = &dhd->pub;
9379
9380 if (dhd_pub->arp_version == 1) {
9381 idx = 0;
9382 } else {
9383 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
9384 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
9385 break;
9386 }
9387 if (idx < DHD_MAX_IFS) {
9388 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
9389 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
9390 } else {
9391 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
9392 idx = 0;
9393 }
9394 }
9395
9396 switch (event) {
9397 case NETDEV_UP:
9398 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
9399 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9400
9401 if (dhd->pub.busstate != DHD_BUS_DATA) {
9402 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
9403 if (dhd->pend_ipaddr) {
9404 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
9405 __FUNCTION__, dhd->pend_ipaddr));
9406 }
9407 dhd->pend_ipaddr = ifa->ifa_address;
9408 break;
9409 }
9410
9411#ifdef AOE_IP_ALIAS_SUPPORT
9412 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
9413 __FUNCTION__));
9414 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
9415#endif /* AOE_IP_ALIAS_SUPPORT */
9416 break;
9417
9418 case NETDEV_DOWN:
9419 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
9420 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9421 dhd->pend_ipaddr = 0;
9422#ifdef AOE_IP_ALIAS_SUPPORT
9423 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
9424 __FUNCTION__));
9425 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
9426#else
9427 dhd_aoe_hostip_clr(&dhd->pub, idx);
9428 dhd_aoe_arp_clr(&dhd->pub, idx);
9429#endif /* AOE_IP_ALIAS_SUPPORT */
9430 break;
9431
9432 default:
9433 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
9434 __func__, ifa->ifa_label, event));
9435 break;
9436 }
9437 return NOTIFY_DONE;
9438}
9439#endif /* ARP_OFFLOAD_SUPPORT */
9440
9441#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9442/* Neighbor Discovery Offload: defered handler */
9443static void
9444dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
9445{
9446 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
9447 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
9448 int ret;
9449
9450 if (event != DHD_WQ_WORK_IPV6_NDO) {
9451 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9452 return;
9453 }
9454
9455 if (!ndo_work) {
9456 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
9457 return;
9458 }
9459
9460 if (!pub) {
9461 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
9462 return;
9463 }
9464
9465 if (ndo_work->if_idx) {
9466 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
9467 return;
9468 }
9469
9470 switch (ndo_work->event) {
9471 case NETDEV_UP:
9472 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
9473 ret = dhd_ndo_enable(pub, TRUE);
9474 if (ret < 0) {
9475 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
9476 }
9477
9478 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
9479 if (ret < 0) {
9480 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
9481 __FUNCTION__, ret));
9482 }
9483 break;
9484 case NETDEV_DOWN:
9485 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
9486 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
9487 if (ret < 0) {
9488 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
9489 __FUNCTION__, ret));
9490 goto done;
9491 }
9492
9493 ret = dhd_ndo_enable(pub, FALSE);
9494 if (ret < 0) {
9495 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
9496 goto done;
9497 }
9498 break;
9499 default:
9500 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
9501 break;
9502 }
9503done:
9504 /* free ndo_work. alloced while scheduling the work */
9505 kfree(ndo_work);
9506
9507 return;
9508}
9509
9510/*
9511 * Neighbor Discovery Offload: Called when an interface
9512 * is assigned with ipv6 address.
9513 * Handles only primary interface
9514 */
9515static int dhd_inet6addr_notifier_call(struct notifier_block *this,
9516 unsigned long event,
9517 void *ptr)
9518{
9519 dhd_info_t *dhd;
9520 dhd_pub_t *dhd_pub;
9521 struct inet6_ifaddr *inet6_ifa = ptr;
9522 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
9523 struct ipv6_work_info_t *ndo_info;
9524 int idx = 0; /* REVISIT */
9525
9526#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9527 /* Filter notifications meant for non Broadcom devices */
9528 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
9529 return NOTIFY_DONE;
9530 }
9531#endif /* LINUX_VERSION_CODE */
9532
9533 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
9534 if (!dhd)
9535 return NOTIFY_DONE;
9536
9537 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
9538 return NOTIFY_DONE;
9539 dhd_pub = &dhd->pub;
9540
9541 if (!FW_SUPPORTED(dhd_pub, ndoe))
9542 return NOTIFY_DONE;
9543
9544 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
9545 if (!ndo_info) {
9546 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
9547 return NOTIFY_DONE;
9548 }
9549
9550 ndo_info->event = event;
9551 ndo_info->if_idx = idx;
9552 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
9553
9554 /* defer the work to thread as it may block kernel */
9555 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
9556 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
9557 return NOTIFY_DONE;
9558}
9559#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9560
9561int
9562dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
9563{
9564 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9565 dhd_if_t *ifp;
9566 struct net_device *net = NULL;
9567 int err = 0;
9568 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
9569
9570 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9571
9572 ASSERT(dhd && dhd->iflist[ifidx]);
9573 ifp = dhd->iflist[ifidx];
9574 net = ifp->net;
9575 ASSERT(net && (ifp->idx == ifidx));
9576
9577#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9578 ASSERT(!net->open);
9579 net->get_stats = dhd_get_stats;
9580 net->do_ioctl = dhd_ioctl_entry;
9581 net->hard_start_xmit = dhd_start_xmit;
9582 net->set_mac_address = dhd_set_mac_address;
9583 net->set_multicast_list = dhd_set_multicast_list;
9584 net->open = net->stop = NULL;
9585#else
9586 ASSERT(!net->netdev_ops);
9587 net->netdev_ops = &dhd_ops_virt;
9588#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9589
9590 /* Ok, link into the network layer... */
9591 if (ifidx == 0) {
9592 /*
9593 * device functions for the primary interface only
9594 */
9595#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9596 net->open = dhd_open;
9597 net->stop = dhd_stop;
9598#else
9599 net->netdev_ops = &dhd_ops_pri;
9600#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9601 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
9602 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9603 } else {
9604 /*
9605 * We have to use the primary MAC for virtual interfaces
9606 */
9607 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
9608 /*
9609 * Android sets the locally administered bit to indicate that this is a
9610 * portable hotspot. This will not work in simultaneous AP/STA mode,
9611 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
9612 */
9613 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
9614 ETHER_ADDR_LEN)) {
9615 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
9616 __func__, net->name));
9617 temp_addr[0] |= 0x02;
9618 }
9619 }
9620
9621 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
9622#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
9623 net->ethtool_ops = &dhd_ethtool_ops;
9624#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
9625
9626#if defined(WL_WIRELESS_EXT)
9627#if WIRELESS_EXT < 19
9628 net->get_wireless_stats = dhd_get_wireless_stats;
9629#endif /* WIRELESS_EXT < 19 */
9630#if WIRELESS_EXT > 12
9631 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
9632#endif /* WIRELESS_EXT > 12 */
9633#endif /* defined(WL_WIRELESS_EXT) */
9634
9635 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9636
9637 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
9638
9639 if (ifidx == 0)
9640 printf("%s\n", dhd_version);
08dfb6c4
RC
9641#ifdef WL_EXT_IAPSTA
9642 else if (!strncmp(net->name, "wl0.", strlen("wl0."))) {
9643 wl_android_ext_attach_netdev(net, ifidx);
9644 }
9645#endif
ef6a5fee
RC
9646
9647 if (need_rtnl_lock)
9648 err = register_netdev(net);
9649 else
9650 err = register_netdevice(net);
9651
9652 if (err != 0) {
9653 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
9654 goto fail;
9655 }
9656
9657
9658
9659 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
9660#if defined(CUSTOMER_HW4_DEBUG)
9661 MAC2STRDBG(dhd->pub.mac.octet));
9662#else
9663 MAC2STRDBG(net->dev_addr));
9664#endif /* CUSTOMER_HW4_DEBUG */
9665
9666#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
9667// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
9668#endif
9669
9670#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
9671 KERNEL_VERSION(2, 6, 27))))
9672 if (ifidx == 0) {
9673#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
9674 up(&dhd_registration_sem);
9675#endif /* BCMLXSDMMC */
9676 if (!dhd_download_fw_on_driverload) {
9677#ifdef WL_CFG80211
9678 wl_terminate_event_handler();
9679#endif /* WL_CFG80211 */
9680#if defined(DHD_LB) && defined(DHD_LB_RXP)
9681 __skb_queue_purge(&dhd->rx_pend_queue);
9682#endif /* DHD_LB && DHD_LB_RXP */
9683#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
9684 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
9685#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
9686 dhd_net_bus_devreset(net, TRUE);
9687#ifdef BCMLXSDMMC
9688 dhd_net_bus_suspend(net);
9689#endif /* BCMLXSDMMC */
9690 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
9691 }
9692 }
9693#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
9694 return 0;
9695
9696fail:
9697#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
9698 net->open = NULL;
9699#else
9700 net->netdev_ops = NULL;
9701#endif
9702 return err;
9703}
9704
9705void
9706dhd_bus_detach(dhd_pub_t *dhdp)
9707{
9708 dhd_info_t *dhd;
9709
9710 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9711
9712 if (dhdp) {
9713 dhd = (dhd_info_t *)dhdp->info;
9714 if (dhd) {
9715
9716 /*
9717 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
9718 * calling stop again will cuase SD read/write errors.
9719 */
9720 if (dhd->pub.busstate != DHD_BUS_DOWN) {
9721 /* Stop the protocol module */
9722 dhd_prot_stop(&dhd->pub);
9723
9724 /* Stop the bus module */
9725 dhd_bus_stop(dhd->pub.bus, TRUE);
9726 }
9727
9728#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
9729 dhd_bus_oob_intr_unregister(dhdp);
9730#endif
9731 }
9732 }
9733}
9734
9735
9736void dhd_detach(dhd_pub_t *dhdp)
9737{
9738 dhd_info_t *dhd;
9739 unsigned long flags;
9740 int timer_valid = FALSE;
9741 struct net_device *dev;
9742
9743 if (!dhdp)
9744 return;
9745
9746 dhd = (dhd_info_t *)dhdp->info;
9747 if (!dhd)
9748 return;
9749
9750 dev = dhd->iflist[0]->net;
9751
9752 if (dev) {
9753 rtnl_lock();
9754 if (dev->flags & IFF_UP) {
9755 /* If IFF_UP is still up, it indicates that
9756 * "ifconfig wlan0 down" hasn't been called.
9757 * So invoke dev_close explicitly here to
9758 * bring down the interface.
9759 */
9760 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
9761 dev_close(dev);
9762 }
9763 rtnl_unlock();
9764 }
9765
9766 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
9767
9768 dhd->pub.up = 0;
9769 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
9770 /* Give sufficient time for threads to start running in case
9771 * dhd_attach() has failed
9772 */
9773 OSL_SLEEP(100);
9774 }
9775
9776#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9777#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9778
9779#ifdef PROP_TXSTATUS
9780#ifdef DHD_WLFC_THREAD
9781 if (dhd->pub.wlfc_thread) {
9782 kthread_stop(dhd->pub.wlfc_thread);
9783 dhdp->wlfc_thread_go = TRUE;
9784 wake_up_interruptible(&dhdp->wlfc_wqhead);
9785 }
9786 dhd->pub.wlfc_thread = NULL;
9787#endif /* DHD_WLFC_THREAD */
9788#endif /* PROP_TXSTATUS */
9789
9790 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
9791
9792 dhd_bus_detach(dhdp);
9793#ifdef BCMPCIE
9794 if (is_reboot == SYS_RESTART) {
9795 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
9796 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
9797 dhdpcie_bus_clock_stop(dhdp->bus);
9798 wifi_platform_set_power(dhd_wifi_platdata->adapters,
9799 FALSE, WIFI_TURNOFF_DELAY);
9800 }
9801 }
9802#endif /* BCMPCIE */
9803#ifndef PCIE_FULL_DONGLE
9804 if (dhdp->prot)
9805 dhd_prot_detach(dhdp);
9806#endif
9807 }
9808
9809#ifdef ARP_OFFLOAD_SUPPORT
9810 if (dhd_inetaddr_notifier_registered) {
9811 dhd_inetaddr_notifier_registered = FALSE;
9812 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
9813 }
9814#endif /* ARP_OFFLOAD_SUPPORT */
9815#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9816 if (dhd_inet6addr_notifier_registered) {
9817 dhd_inet6addr_notifier_registered = FALSE;
9818 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
9819 }
9820#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9821#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9822 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
9823 if (dhd->early_suspend.suspend)
9824 unregister_early_suspend(&dhd->early_suspend);
9825 }
9826#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9827
9828#if defined(WL_WIRELESS_EXT)
9829 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
9830 /* Detatch and unlink in the iw */
9831 wl_iw_detach();
9832 }
08dfb6c4
RC
9833#ifdef WL_ESCAN
9834 wl_escan_detach();
424b00bf 9835#endif /* WL_ESCAN */
ef6a5fee
RC
9836#endif /* defined(WL_WIRELESS_EXT) */
9837
9838 /* delete all interfaces, start with virtual */
9839 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
9840 int i = 1;
9841 dhd_if_t *ifp;
9842
9843 /* Cleanup virtual interfaces */
9844 dhd_net_if_lock_local(dhd);
9845 for (i = 1; i < DHD_MAX_IFS; i++) {
9846 if (dhd->iflist[i])
9847 dhd_remove_if(&dhd->pub, i, TRUE);
9848 }
9849 dhd_net_if_unlock_local(dhd);
9850
9851 /* delete primary interface 0 */
9852 ifp = dhd->iflist[0];
9853 ASSERT(ifp);
9854 ASSERT(ifp->net);
9855 if (ifp && ifp->net) {
9856
9857
9858
9859 /* in unregister_netdev case, the interface gets freed by net->destructor
9860 * (which is set to free_netdev)
9861 */
9862 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9863 free_netdev(ifp->net);
9864 } else {
9865#ifdef SET_RPS_CPUS
9866 custom_rps_map_clear(ifp->net->_rx);
9867#endif /* SET_RPS_CPUS */
9868 netif_tx_disable(ifp->net);
9869 unregister_netdev(ifp->net);
9870 }
9871 ifp->net = NULL;
9872#ifdef DHD_WMF
9873 dhd_wmf_cleanup(dhdp, 0);
9874#endif /* DHD_WMF */
9875#ifdef DHD_L2_FILTER
9876 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
9877 NULL, FALSE, dhdp->tickcnt);
9878 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
9879 ifp->phnd_arp_table = NULL;
9880#endif /* DHD_L2_FILTER */
9881
9882 dhd_if_del_sta_list(ifp);
9883
9884 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
9885 dhd->iflist[0] = NULL;
9886 }
9887 }
9888
9889 /* Clear the watchdog timer */
9890 DHD_GENERAL_LOCK(&dhd->pub, flags);
9891 timer_valid = dhd->wd_timer_valid;
9892 dhd->wd_timer_valid = FALSE;
9893 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9894 if (timer_valid)
9895 del_timer_sync(&dhd->timer);
9896 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9897
9898 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
9899#ifdef DHD_PCIE_RUNTIMEPM
9900 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
9901 PROC_STOP(&dhd->thr_rpm_ctl);
9902 }
9903#endif /* DHD_PCIE_RUNTIMEPM */
9904 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
9905 PROC_STOP(&dhd->thr_wdt_ctl);
9906 }
9907
9908 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
9909 PROC_STOP(&dhd->thr_rxf_ctl);
9910 }
9911
9912 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
9913 PROC_STOP(&dhd->thr_dpc_ctl);
9914 } else {
9915 tasklet_kill(&dhd->tasklet);
9916#ifdef DHD_LB_RXP
9917 __skb_queue_purge(&dhd->rx_pend_queue);
9918#endif /* DHD_LB_RXP */
9919 }
9920 }
9921
9922#if defined(DHD_LB)
9923 /* Kill the Load Balancing Tasklets */
9924#if defined(DHD_LB_TXC)
9925 tasklet_disable(&dhd->tx_compl_tasklet);
9926 tasklet_kill(&dhd->tx_compl_tasklet);
9927#endif /* DHD_LB_TXC */
9928#if defined(DHD_LB_RXC)
9929 tasklet_disable(&dhd->rx_compl_tasklet);
9930 tasklet_kill(&dhd->rx_compl_tasklet);
9931#endif /* DHD_LB_RXC */
9932 if (dhd->cpu_notifier.notifier_call != NULL)
9933 unregister_cpu_notifier(&dhd->cpu_notifier);
9934 dhd_cpumasks_deinit(dhd);
9935#endif /* DHD_LB */
9936
9937#ifdef DHD_LOG_DUMP
9938 dhd_log_dump_deinit(&dhd->pub);
9939#endif /* DHD_LOG_DUMP */
9940#ifdef WL_CFG80211
9941 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
9942 wl_cfg80211_detach(NULL);
9943 dhd_monitor_uninit();
9944 }
9945#endif
9946 /* free deferred work queue */
9947 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
9948 dhd->dhd_deferred_wq = NULL;
9949
9950#ifdef SHOW_LOGTRACE
9951 if (dhd->event_data.fmts)
9952 kfree(dhd->event_data.fmts);
9953 if (dhd->event_data.raw_fmts)
9954 kfree(dhd->event_data.raw_fmts);
9955 if (dhd->event_data.raw_sstr)
9956 kfree(dhd->event_data.raw_sstr);
9957#endif /* SHOW_LOGTRACE */
9958
9959#ifdef PNO_SUPPORT
9960 if (dhdp->pno_state)
9961 dhd_pno_deinit(dhdp);
9962#endif
9963#if defined(CONFIG_PM_SLEEP)
9964 if (dhd_pm_notifier_registered) {
9965 unregister_pm_notifier(&dhd->pm_notifier);
9966 dhd_pm_notifier_registered = FALSE;
9967 }
9968#endif /* CONFIG_PM_SLEEP */
9969
9970#ifdef DEBUG_CPU_FREQ
9971 if (dhd->new_freq)
9972 free_percpu(dhd->new_freq);
9973 dhd->new_freq = NULL;
9974 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9975#endif
9976 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
9977 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
9978#ifdef CONFIG_HAS_WAKELOCK
9979 dhd->wakelock_wd_counter = 0;
9980 wake_lock_destroy(&dhd->wl_wdwake);
08dfb6c4
RC
9981 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
9982 wake_lock_destroy(&dhd->wl_wifi);
ef6a5fee
RC
9983#endif /* CONFIG_HAS_WAKELOCK */
9984 DHD_OS_WAKE_LOCK_DESTROY(dhd);
9985 }
9986
9987
9988
9989#ifdef DHDTCPACK_SUPPRESS
9990 /* This will free all MEM allocated for TCPACK SUPPRESS */
9991 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9992#endif /* DHDTCPACK_SUPPRESS */
9993
9994#ifdef PCIE_FULL_DONGLE
9995 dhd_flow_rings_deinit(dhdp);
9996 if (dhdp->prot)
9997 dhd_prot_detach(dhdp);
9998#endif
9999
10000
10001 dhd_sysfs_exit(dhd);
10002 dhd->pub.is_fw_download_done = FALSE;
10003 dhd_conf_detach(dhdp);
10004}
10005
10006
10007void
10008dhd_free(dhd_pub_t *dhdp)
10009{
10010 dhd_info_t *dhd;
10011 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10012
10013 if (dhdp) {
10014 int i;
10015 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
10016 if (dhdp->reorder_bufs[i]) {
10017 reorder_info_t *ptr;
10018 uint32 buf_size = sizeof(struct reorder_info);
10019
10020 ptr = dhdp->reorder_bufs[i];
10021
10022 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
10023 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
10024 i, ptr->max_idx, buf_size));
10025
10026 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
10027 dhdp->reorder_bufs[i] = NULL;
10028 }
10029 }
10030
10031 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
10032
10033 dhd = (dhd_info_t *)dhdp->info;
10034 if (dhdp->soc_ram) {
10035#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
10036 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
10037#else
10038 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
10039#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
10040 dhdp->soc_ram = NULL;
10041 }
10042#ifdef CACHE_FW_IMAGES
10043 if (dhdp->cached_fw) {
10044 MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
10045 dhdp->cached_fw = NULL;
10046 }
10047
10048 if (dhdp->cached_nvram) {
10049 MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
10050 dhdp->cached_nvram = NULL;
10051 }
10052#endif
10053 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
10054 if (dhd &&
10055 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
10056 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
10057 dhd = NULL;
10058 }
10059}
10060
10061void
10062dhd_clear(dhd_pub_t *dhdp)
10063{
10064 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10065
10066 if (dhdp) {
10067 int i;
10068#ifdef DHDTCPACK_SUPPRESS
10069 /* Clean up timer/data structure for any remaining/pending packet or timer. */
10070 dhd_tcpack_info_tbl_clean(dhdp);
10071#endif /* DHDTCPACK_SUPPRESS */
10072 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
10073 if (dhdp->reorder_bufs[i]) {
10074 reorder_info_t *ptr;
10075 uint32 buf_size = sizeof(struct reorder_info);
10076
10077 ptr = dhdp->reorder_bufs[i];
10078
10079 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
10080 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
10081 i, ptr->max_idx, buf_size));
10082
10083 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
10084 dhdp->reorder_bufs[i] = NULL;
10085 }
10086 }
10087
10088 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
10089
10090 if (dhdp->soc_ram) {
10091#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
10092 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
10093#else
10094 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
10095#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
10096 dhdp->soc_ram = NULL;
10097 }
10098 }
10099}
10100
10101static void
10102dhd_module_cleanup(void)
10103{
10104 printf("%s: Enter\n", __FUNCTION__);
10105
10106 dhd_bus_unregister();
10107
10108 wl_android_exit();
10109
10110 dhd_wifi_platform_unregister_drv();
08dfb6c4 10111
ef6a5fee
RC
10112#ifdef CUSTOMER_HW_AMLOGIC
10113#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10114 wifi_teardown_dt();
10115#endif
10116#endif
10117 printf("%s: Exit\n", __FUNCTION__);
10118}
10119
10120static void __exit
10121dhd_module_exit(void)
10122{
10123 dhd_buzzz_detach();
10124 dhd_module_cleanup();
10125 unregister_reboot_notifier(&dhd_reboot_notifier);
10126}
10127
10128static int __init
10129dhd_module_init(void)
10130{
10131 int err;
10132 int retry = POWERUP_MAX_RETRY;
10133
08dfb6c4 10134 printf("%s: in %s\n", __FUNCTION__, dhd_version);
ef6a5fee
RC
10135#ifdef CUSTOMER_HW_AMLOGIC
10136#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10137 if (wifi_setup_dt()) {
10138 printf("wifi_dt : fail to setup dt\n");
10139 }
10140#endif
10141#endif
10142
10143 dhd_buzzz_attach();
10144
10145 DHD_PERIM_RADIO_INIT();
10146
10147
10148 if (firmware_path[0] != '\0') {
10149 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
10150 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10151 }
10152
10153 if (nvram_path[0] != '\0') {
10154 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
10155 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10156 }
10157
10158 do {
10159 err = dhd_wifi_platform_register_drv();
10160 if (!err) {
10161 register_reboot_notifier(&dhd_reboot_notifier);
10162 break;
10163 }
10164 else {
10165 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
10166 __FUNCTION__, retry));
10167 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
10168 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
10169 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
10170 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
10171 }
10172 } while (retry--);
10173
10174 if (err) {
10175#ifdef CUSTOMER_HW_AMLOGIC
10176#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10177 wifi_teardown_dt();
10178#endif
10179#endif
10180 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
10181 } else {
10182 if (!dhd_download_fw_on_driverload) {
10183 dhd_driver_init_done = TRUE;
10184 }
10185 }
10186
10187 printf("%s: Exit err=%d\n", __FUNCTION__, err);
10188 return err;
10189}
10190
10191static int
10192dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
10193{
10194 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
10195 if (code == SYS_RESTART) {
10196#ifdef BCMPCIE
10197 is_reboot = code;
10198#endif /* BCMPCIE */
10199 }
10200 return NOTIFY_DONE;
10201}
10202
10203
10204#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
10205#if defined(CONFIG_DEFERRED_INITCALLS)
10206#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
10207 defined(CONFIG_ARCH_MSM8996)
10208deferred_module_init_sync(dhd_module_init);
10209#else
10210deferred_module_init(dhd_module_init);
10211#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
10212 * CONFIG_ARCH_MSM8996
10213 */
10214#elif defined(USE_LATE_INITCALL_SYNC)
10215late_initcall_sync(dhd_module_init);
10216#else
10217late_initcall(dhd_module_init);
10218#endif /* USE_LATE_INITCALL_SYNC */
10219#else
10220module_init(dhd_module_init);
10221#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
10222
10223module_exit(dhd_module_exit);
10224
10225/*
10226 * OS specific functions required to implement DHD driver in OS independent way
10227 */
10228int
10229dhd_os_proto_block(dhd_pub_t *pub)
10230{
10231 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10232
10233 if (dhd) {
10234 DHD_PERIM_UNLOCK(pub);
10235
10236 down(&dhd->proto_sem);
10237
10238 DHD_PERIM_LOCK(pub);
10239 return 1;
10240 }
10241
10242 return 0;
10243}
10244
10245int
10246dhd_os_proto_unblock(dhd_pub_t *pub)
10247{
10248 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10249
10250 if (dhd) {
10251 up(&dhd->proto_sem);
10252 return 1;
10253 }
10254
10255 return 0;
10256}
10257
10258void
10259dhd_os_dhdiovar_lock(dhd_pub_t *pub)
10260{
10261 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10262
10263 if (dhd) {
10264 mutex_lock(&dhd->dhd_iovar_mutex);
10265 }
10266}
10267
10268void
10269dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
10270{
10271 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10272
10273 if (dhd) {
10274 mutex_unlock(&dhd->dhd_iovar_mutex);
10275 }
10276}
10277
10278unsigned int
10279dhd_os_get_ioctl_resp_timeout(void)
10280{
10281 return ((unsigned int)dhd_ioctl_timeout_msec);
10282}
10283
10284void
10285dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
10286{
10287 dhd_ioctl_timeout_msec = (int)timeout_msec;
10288}
10289
10290int
a56046bb 10291dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
ef6a5fee
RC
10292{
10293 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
a56046bb 10294 int timeout;
ef6a5fee
RC
10295
10296 /* Convert timeout in millsecond to jiffies */
10297#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10298 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10299#else
10300 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10301#endif
10302
10303 DHD_PERIM_UNLOCK(pub);
10304
10305 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
10306
10307 DHD_PERIM_LOCK(pub);
10308
10309 return timeout;
10310}
10311
10312int
10313dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
10314{
10315 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10316
10317 wake_up(&dhd->ioctl_resp_wait);
10318 return 0;
10319}
10320
10321int
10322dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
10323{
10324 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10325 int timeout;
10326
10327 /* Convert timeout in millsecond to jiffies */
10328#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10329 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10330#else
10331 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10332#endif
10333
10334 DHD_PERIM_UNLOCK(pub);
10335
10336 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
10337
10338 DHD_PERIM_LOCK(pub);
10339
10340 return timeout;
10341}
10342
10343int
10344dhd_os_d3ack_wake(dhd_pub_t *pub)
10345{
10346 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10347
10348 wake_up(&dhd->d3ack_wait);
10349 return 0;
10350}
10351
10352int
10353dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
10354{
10355 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10356 int timeout;
10357
10358 /* Wait for bus usage contexts to gracefully exit within some timeout value
10359 * Set time out to little higher than dhd_ioctl_timeout_msec,
10360 * so that IOCTL timeout should not get affected.
10361 */
10362 /* Convert timeout in millsecond to jiffies */
10363#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10364 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
10365#else
10366 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
10367#endif
10368
10369 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
10370
10371 return timeout;
10372}
10373
10374int INLINE
10375dhd_os_busbusy_wake(dhd_pub_t *pub)
10376{
10377 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10378 /* Call wmb() to make sure before waking up the other event value gets updated */
10379 OSL_SMP_WMB();
10380 wake_up(&dhd->dhd_bus_busy_state_wait);
10381 return 0;
10382}
10383
10384void
10385dhd_os_wd_timer_extend(void *bus, bool extend)
10386{
10387 dhd_pub_t *pub = bus;
10388 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10389
10390 if (extend)
10391 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
10392 else
10393 dhd_os_wd_timer(bus, dhd->default_wd_interval);
10394}
10395
10396
10397void
10398dhd_os_wd_timer(void *bus, uint wdtick)
10399{
10400 dhd_pub_t *pub = bus;
10401 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10402 unsigned long flags;
10403
10404 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10405
10406 if (!dhd) {
10407 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10408 return;
10409 }
10410
10411 DHD_OS_WD_WAKE_LOCK(pub);
10412 DHD_GENERAL_LOCK(pub, flags);
10413
10414 /* don't start the wd until fw is loaded */
10415 if (pub->busstate == DHD_BUS_DOWN) {
10416 DHD_GENERAL_UNLOCK(pub, flags);
10417 if (!wdtick)
10418 DHD_OS_WD_WAKE_UNLOCK(pub);
10419 return;
10420 }
10421
10422 /* Totally stop the timer */
10423 if (!wdtick && dhd->wd_timer_valid == TRUE) {
10424 dhd->wd_timer_valid = FALSE;
10425 DHD_GENERAL_UNLOCK(pub, flags);
10426 del_timer_sync(&dhd->timer);
10427 DHD_OS_WD_WAKE_UNLOCK(pub);
10428 return;
10429 }
10430
10431 if (wdtick) {
10432 DHD_OS_WD_WAKE_LOCK(pub);
10433 dhd_watchdog_ms = (uint)wdtick;
10434 /* Re arm the timer, at last watchdog period */
10435 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
10436 dhd->wd_timer_valid = TRUE;
10437 }
10438 DHD_GENERAL_UNLOCK(pub, flags);
10439 DHD_OS_WD_WAKE_UNLOCK(pub);
10440}
10441
10442#ifdef DHD_PCIE_RUNTIMEPM
10443void
10444dhd_os_runtimepm_timer(void *bus, uint tick)
10445{
10446 dhd_pub_t *pub = bus;
10447 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10448 unsigned long flags;
10449
10450 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10451
10452 if (!dhd) {
10453 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
10454 return;
10455 }
10456
10457 DHD_GENERAL_LOCK(pub, flags);
10458
10459 /* don't start the RPM until fw is loaded */
10460 if (pub->busstate == DHD_BUS_DOWN ||
10461 pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
10462 DHD_GENERAL_UNLOCK(pub, flags);
10463 return;
10464 }
10465
10466 /* If tick is non-zero, the request is to start the timer */
10467 if (tick) {
10468 /* Start the timer only if its not already running */
10469 if (dhd->rpm_timer_valid == FALSE) {
10470 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
10471 dhd->rpm_timer_valid = TRUE;
10472 }
10473 } else {
10474 /* tick is zero, we have to stop the timer */
10475 /* Stop the timer only if its running, otherwise we don't have to do anything */
10476 if (dhd->rpm_timer_valid == TRUE) {
10477 dhd->rpm_timer_valid = FALSE;
10478 DHD_GENERAL_UNLOCK(pub, flags);
10479 del_timer_sync(&dhd->rpm_timer);
10480 /* we have already released the lock, so just go to exit */
10481 goto exit;
10482 }
10483 }
10484
10485 DHD_GENERAL_UNLOCK(pub, flags);
10486exit:
10487 return;
10488
10489}
10490
10491#endif /* DHD_PCIE_RUNTIMEPM */
10492
10493void *
10494dhd_os_open_image(char *filename)
10495{
10496 struct file *fp;
10497 int size;
10498
10499 fp = filp_open(filename, O_RDONLY, 0);
10500 /*
10501 * 2.6.11 (FC4) supports filp_open() but later revs don't?
10502 * Alternative:
10503 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
10504 * ???
10505 */
10506 if (IS_ERR(fp)) {
10507 fp = NULL;
10508 goto err;
10509 }
10510
10511 if (!S_ISREG(file_inode(fp)->i_mode)) {
10512 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
10513 fp = NULL;
10514 goto err;
10515 }
10516
10517 size = i_size_read(file_inode(fp));
10518 if (size <= 0) {
10519 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
10520 fp = NULL;
10521 goto err;
10522 }
10523
10524 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
10525
10526err:
10527 return fp;
10528}
10529
10530int
10531dhd_os_get_image_block(char *buf, int len, void *image)
10532{
10533 struct file *fp = (struct file *)image;
10534 int rdlen;
10535 int size;
10536
10537 if (!image)
10538 return 0;
10539
10540 size = i_size_read(file_inode(fp));
10541 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
10542
10543 if (len >= size && size != rdlen) {
10544 return -EIO;
10545 }
10546
10547 if (rdlen > 0)
10548 fp->f_pos += rdlen;
10549
10550 return rdlen;
10551}
10552
08dfb6c4
RC
10553int
10554dhd_os_get_image_size(void *image)
10555{
10556 struct file *fp = (struct file *)image;
10557 int size;
10558 if (!image) {
10559 return 0;
10560 }
10561
10562 size = i_size_read(file_inode(fp));
10563
10564 return size;
10565}
10566
ef6a5fee
RC
10567void
10568dhd_os_close_image(void *image)
10569{
10570 if (image)
10571 filp_close((struct file *)image, NULL);
10572}
10573
10574void
10575dhd_os_sdlock(dhd_pub_t *pub)
10576{
10577 dhd_info_t *dhd;
10578
10579 dhd = (dhd_info_t *)(pub->info);
10580
10581 if (dhd_dpc_prio >= 0)
10582 down(&dhd->sdsem);
10583 else
10584 spin_lock_bh(&dhd->sdlock);
10585}
10586
10587void
10588dhd_os_sdunlock(dhd_pub_t *pub)
10589{
10590 dhd_info_t *dhd;
10591
10592 dhd = (dhd_info_t *)(pub->info);
10593
10594 if (dhd_dpc_prio >= 0)
10595 up(&dhd->sdsem);
10596 else
10597 spin_unlock_bh(&dhd->sdlock);
10598}
10599
10600void
10601dhd_os_sdlock_txq(dhd_pub_t *pub)
10602{
10603 dhd_info_t *dhd;
10604
10605 dhd = (dhd_info_t *)(pub->info);
10606 spin_lock_bh(&dhd->txqlock);
10607}
10608
10609void
10610dhd_os_sdunlock_txq(dhd_pub_t *pub)
10611{
10612 dhd_info_t *dhd;
10613
10614 dhd = (dhd_info_t *)(pub->info);
10615 spin_unlock_bh(&dhd->txqlock);
10616}
10617
10618void
10619dhd_os_sdlock_rxq(dhd_pub_t *pub)
10620{
10621}
10622
10623void
10624dhd_os_sdunlock_rxq(dhd_pub_t *pub)
10625{
10626}
10627
10628static void
10629dhd_os_rxflock(dhd_pub_t *pub)
10630{
10631 dhd_info_t *dhd;
10632
10633 dhd = (dhd_info_t *)(pub->info);
10634 spin_lock_bh(&dhd->rxf_lock);
10635
10636}
10637
10638static void
10639dhd_os_rxfunlock(dhd_pub_t *pub)
10640{
10641 dhd_info_t *dhd;
10642
10643 dhd = (dhd_info_t *)(pub->info);
10644 spin_unlock_bh(&dhd->rxf_lock);
10645}
10646
10647#ifdef DHDTCPACK_SUPPRESS
10648unsigned long
10649dhd_os_tcpacklock(dhd_pub_t *pub)
10650{
10651 dhd_info_t *dhd;
10652 unsigned long flags = 0;
10653
10654 dhd = (dhd_info_t *)(pub->info);
10655
10656 if (dhd) {
10657#ifdef BCMSDIO
10658 spin_lock_bh(&dhd->tcpack_lock);
10659#else
10660 spin_lock_irqsave(&dhd->tcpack_lock, flags);
10661#endif /* BCMSDIO */
10662 }
10663
10664 return flags;
10665}
10666
10667void
10668dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
10669{
10670 dhd_info_t *dhd;
10671
10672#ifdef BCMSDIO
10673 BCM_REFERENCE(flags);
10674#endif /* BCMSDIO */
10675
10676 dhd = (dhd_info_t *)(pub->info);
10677
10678 if (dhd) {
10679#ifdef BCMSDIO
10680 spin_unlock_bh(&dhd->tcpack_lock); // terence 20160519
10681#else
10682 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
10683#endif /* BCMSDIO */
10684 }
10685}
10686#endif /* DHDTCPACK_SUPPRESS */
10687
10688uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
10689{
10690 uint8* buf;
10691 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
10692
10693 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
10694 if (buf == NULL && kmalloc_if_fail)
10695 buf = kmalloc(size, flags);
10696
10697 return buf;
10698}
10699
10700void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
10701{
10702}
10703
10704#if defined(WL_WIRELESS_EXT)
10705struct iw_statistics *
10706dhd_get_wireless_stats(struct net_device *dev)
10707{
10708 int res = 0;
10709 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10710
10711 if (!dhd->pub.up) {
10712 return NULL;
10713 }
10714
10715 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
10716
10717 if (res == 0)
10718 return &dhd->iw.wstats;
10719 else
10720 return NULL;
10721}
10722#endif /* defined(WL_WIRELESS_EXT) */
10723
10724static int
10725dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
10726 wl_event_msg_t *event, void **data)
10727{
10728 int bcmerror = 0;
10729 ASSERT(dhd != NULL);
10730
10731#ifdef SHOW_LOGTRACE
10732 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
10733#else
10734 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
10735#endif /* SHOW_LOGTRACE */
10736
10737 if (bcmerror != BCME_OK)
10738 return (bcmerror);
10739
10740#if defined(WL_WIRELESS_EXT)
10741 if (event->bsscfgidx == 0) {
10742 /*
10743 * Wireless ext is on primary interface only
10744 */
10745
10746 ASSERT(dhd->iflist[*ifidx] != NULL);
10747 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10748
10749 if (dhd->iflist[*ifidx]->net) {
10750 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
10751 }
10752 }
10753#endif /* defined(WL_WIRELESS_EXT) */
10754
10755#ifdef WL_CFG80211
10756 ASSERT(dhd->iflist[*ifidx] != NULL);
10757 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10758 if (dhd->iflist[*ifidx]->net)
10759 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
10760#endif /* defined(WL_CFG80211) */
10761
10762 return (bcmerror);
10763}
10764
10765/* send up locally generated event */
10766void
10767dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
10768{
10769 switch (ntoh32(event->event_type)) {
10770
10771 default:
10772 break;
10773 }
10774}
10775
10776#ifdef LOG_INTO_TCPDUMP
10777void
10778dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
10779{
10780 struct sk_buff *p, *skb;
10781 uint32 pktlen;
10782 int len;
10783 dhd_if_t *ifp;
10784 dhd_info_t *dhd;
10785 uchar *skb_data;
10786 int ifidx = 0;
10787 struct ether_header eth;
10788
10789 pktlen = sizeof(eth) + data_len;
10790 dhd = dhdp->info;
10791
10792 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
10793 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
10794
10795 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
10796 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
10797 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
10798 eth.ether_type = hton16(ETHER_TYPE_BRCM);
10799
10800 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
10801 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
10802 skb = PKTTONATIVE(dhdp->osh, p);
10803 skb_data = skb->data;
10804 len = skb->len;
10805
10806 ifidx = dhd_ifname2idx(dhd, "wlan0");
10807 ifp = dhd->iflist[ifidx];
10808 if (ifp == NULL)
10809 ifp = dhd->iflist[0];
10810
10811 ASSERT(ifp);
10812 skb->dev = ifp->net;
10813 skb->protocol = eth_type_trans(skb, skb->dev);
10814 skb->data = skb_data;
10815 skb->len = len;
10816
10817 /* Strip header, count, deliver upward */
10818 skb_pull(skb, ETH_HLEN);
10819
10820 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
10821 __FUNCTION__, __LINE__);
10822 /* Send the packet */
10823 if (in_interrupt()) {
10824 netif_rx(skb);
10825 } else {
10826 netif_rx_ni(skb);
10827 }
10828 }
10829 else {
10830 /* Could not allocate a sk_buf */
10831 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
10832 }
10833}
10834#endif /* LOG_INTO_TCPDUMP */
10835
10836void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
10837{
10838#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10839 struct dhd_info *dhdinfo = dhd->info;
10840
10841#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10842 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
10843#else
10844 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
10845#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
10846
10847 dhd_os_sdunlock(dhd);
10848 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
10849 dhd_os_sdlock(dhd);
10850#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
10851 return;
10852}
10853
10854void dhd_wait_event_wakeup(dhd_pub_t *dhd)
10855{
10856#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10857 struct dhd_info *dhdinfo = dhd->info;
10858 if (waitqueue_active(&dhdinfo->ctrl_wait))
10859 wake_up(&dhdinfo->ctrl_wait);
10860#endif
10861 return;
10862}
10863
10864#if defined(BCMSDIO) || defined(BCMPCIE)
10865int
10866dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
10867{
10868 int ret;
10869
10870 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10871
10872 if (flag == TRUE) {
10873 /* Issue wl down command before resetting the chip */
10874 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
10875 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
10876 }
10877#ifdef PROP_TXSTATUS
10878 if (dhd->pub.wlfc_enabled)
10879 dhd_wlfc_deinit(&dhd->pub);
10880#endif /* PROP_TXSTATUS */
10881#ifdef PNO_SUPPORT
10882 if (dhd->pub.pno_state)
10883 dhd_pno_deinit(&dhd->pub);
10884#endif
10885 }
10886
10887#ifdef BCMSDIO
10888 if (!flag) {
10889 dhd_update_fw_nv_path(dhd);
10890 /* update firmware and nvram path to sdio bus */
10891 dhd_bus_update_fw_nv_path(dhd->pub.bus,
08dfb6c4 10892 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
ef6a5fee
RC
10893 }
10894#endif /* BCMSDIO */
10895
10896 ret = dhd_bus_devreset(&dhd->pub, flag);
10897 if (ret) {
10898 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
10899 return ret;
10900 }
10901
10902 return ret;
10903}
10904
10905#ifdef BCMSDIO
10906int
10907dhd_net_bus_suspend(struct net_device *dev)
10908{
10909 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10910 return dhd_bus_suspend(&dhd->pub);
10911}
10912
10913int
10914dhd_net_bus_resume(struct net_device *dev, uint8 stage)
10915{
10916 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10917 return dhd_bus_resume(&dhd->pub, stage);
10918}
10919
10920#endif /* BCMSDIO */
10921#endif /* BCMSDIO || BCMPCIE */
10922
10923int net_os_set_suspend_disable(struct net_device *dev, int val)
10924{
10925 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10926 int ret = 0;
10927
10928 if (dhd) {
10929 ret = dhd->pub.suspend_disable_flag;
10930 dhd->pub.suspend_disable_flag = val;
10931 }
10932 return ret;
10933}
10934
10935int net_os_set_suspend(struct net_device *dev, int val, int force)
10936{
10937 int ret = 0;
10938 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10939
10940 if (dhd) {
10941#ifdef CONFIG_MACH_UNIVERSAL7420
10942#endif /* CONFIG_MACH_UNIVERSAL7420 */
10943#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
10944 ret = dhd_set_suspend(val, &dhd->pub);
10945#else
10946 ret = dhd_suspend_resume_helper(dhd, val, force);
10947#endif
10948#ifdef WL_CFG80211
10949 wl_cfg80211_update_power_mode(dev);
10950#endif
10951 }
10952 return ret;
10953}
10954
10955int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
10956{
10957 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10958
10959 if (dhd)
10960 dhd->pub.suspend_bcn_li_dtim = val;
10961
10962 return 0;
10963}
10964
10965#ifdef PKT_FILTER_SUPPORT
10966int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
10967{
10968#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
10969 return 0;
10970#else
10971 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10972 char *filterp = NULL;
10973 int filter_id = 0;
10974 int ret = 0;
10975
10976 if (!dhd_master_mode)
10977 add_remove = !add_remove;
10978 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
10979 if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
10980 return ret;
10981 if (num >= dhd->pub.pktfilter_count)
10982 return -EINVAL;
10983 switch (num) {
10984 case DHD_BROADCAST_FILTER_NUM:
10985 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
10986 filter_id = 101;
10987 break;
10988 case DHD_MULTICAST4_FILTER_NUM:
10989 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
10990 filter_id = 102;
10991 break;
10992 case DHD_MULTICAST6_FILTER_NUM:
10993 filterp = "103 0 0 0 0xFFFF 0x3333";
10994 filter_id = 103;
10995 break;
10996 case DHD_MDNS_FILTER_NUM:
10997 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
10998 filter_id = 104;
10999 break;
11000 default:
11001 return -EINVAL;
11002 }
11003
11004 /* Add filter */
11005 if (add_remove) {
11006 dhd->pub.pktfilter[num] = filterp;
11007 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
11008 } else { /* Delete filter */
11009 if (dhd->pub.pktfilter[num] != NULL) {
11010 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
11011 dhd->pub.pktfilter[num] = NULL;
11012 }
11013 }
11014 return ret;
11015#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11016}
11017
11018int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
11019
11020{
11021 int ret = 0;
11022
11023 /* Packet filtering is set only if we still in early-suspend and
11024 * we need either to turn it ON or turn it OFF
11025 * We can always turn it OFF in case of early-suspend, but we turn it
11026 * back ON only if suspend_disable_flag was not set
11027 */
11028 if (dhdp && dhdp->up) {
11029 if (dhdp->in_suspend) {
11030 if (!val || (val && !dhdp->suspend_disable_flag))
11031 dhd_enable_packet_filter(val, dhdp);
11032 }
11033 }
11034 return ret;
11035}
11036
11037/* function to enable/disable packet for Network device */
11038int net_os_enable_packet_filter(struct net_device *dev, int val)
11039{
11040 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11041
11042 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
11043 return dhd_os_enable_packet_filter(&dhd->pub, val);
11044}
11045#endif /* PKT_FILTER_SUPPORT */
11046
11047int
11048dhd_dev_init_ioctl(struct net_device *dev)
11049{
11050 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11051 int ret;
11052
11053 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
11054 goto done;
11055
11056done:
11057 return ret;
11058}
11059
11060int
11061dhd_dev_get_feature_set(struct net_device *dev)
11062{
11063 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
11064 dhd_pub_t *dhd = (&ptr->pub);
11065 int feature_set = 0;
11066
11067#ifdef DYNAMIC_SWOOB_DURATION
11068#ifndef CUSTOM_INTR_WIDTH
11069#define CUSTOM_INTR_WIDTH 100
11070 int intr_width = 0;
11071#endif /* CUSTOM_INTR_WIDTH */
11072#endif /* DYNAMIC_SWOOB_DURATION */
11073 if (!dhd)
11074 return feature_set;
11075
11076 if (FW_SUPPORTED(dhd, sta))
11077 feature_set |= WIFI_FEATURE_INFRA;
11078 if (FW_SUPPORTED(dhd, dualband))
11079 feature_set |= WIFI_FEATURE_INFRA_5G;
11080 if (FW_SUPPORTED(dhd, p2p))
11081 feature_set |= WIFI_FEATURE_P2P;
11082 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
11083 feature_set |= WIFI_FEATURE_SOFT_AP;
11084 if (FW_SUPPORTED(dhd, tdls))
11085 feature_set |= WIFI_FEATURE_TDLS;
11086 if (FW_SUPPORTED(dhd, vsdb))
11087 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
11088 if (FW_SUPPORTED(dhd, nan)) {
11089 feature_set |= WIFI_FEATURE_NAN;
11090 /* NAN is essentail for d2d rtt */
11091 if (FW_SUPPORTED(dhd, rttd2d))
11092 feature_set |= WIFI_FEATURE_D2D_RTT;
11093 }
11094#ifdef RTT_SUPPORT
11095 feature_set |= WIFI_FEATURE_D2AP_RTT;
11096#endif /* RTT_SUPPORT */
11097#ifdef LINKSTAT_SUPPORT
11098 feature_set |= WIFI_FEATURE_LINKSTAT;
11099#endif /* LINKSTAT_SUPPORT */
11100 /* Supports STA + STA always */
11101 feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
11102#ifdef PNO_SUPPORT
11103 if (dhd_is_pno_supported(dhd)) {
11104 feature_set |= WIFI_FEATURE_PNO;
11105 feature_set |= WIFI_FEATURE_BATCH_SCAN;
11106#ifdef GSCAN_SUPPORT
11107 feature_set |= WIFI_FEATURE_GSCAN;
11108#endif /* GSCAN_SUPPORT */
11109 }
11110#endif /* PNO_SUPPORT */
11111#ifdef WL11U
11112 feature_set |= WIFI_FEATURE_HOTSPOT;
11113#endif /* WL11U */
11114 return feature_set;
11115}
11116
11117
11118int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
11119{
11120 int feature_set_full, mem_needed;
11121 int *ret;
11122
11123 *num = 0;
11124 mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
11125 ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
11126 if (!ret) {
11127 DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
11128 mem_needed));
11129 return ret;
11130 }
11131
11132 feature_set_full = dhd_dev_get_feature_set(dev);
11133
11134 ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
11135 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11136 (feature_set_full & WIFI_FEATURE_NAN) |
11137 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11138 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11139 (feature_set_full & WIFI_FEATURE_PNO) |
11140 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
11141 (feature_set_full & WIFI_FEATURE_GSCAN) |
11142 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
11143 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
11144 (feature_set_full & WIFI_FEATURE_EPR);
11145
11146 ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
11147 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11148 /* Not yet verified NAN with P2P */
11149 /* (feature_set_full & WIFI_FEATURE_NAN) | */
11150 (feature_set_full & WIFI_FEATURE_P2P) |
11151 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11152 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11153 (feature_set_full & WIFI_FEATURE_EPR);
11154
11155 ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
11156 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11157 (feature_set_full & WIFI_FEATURE_NAN) |
11158 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11159 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11160 (feature_set_full & WIFI_FEATURE_TDLS) |
11161 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
11162 (feature_set_full & WIFI_FEATURE_EPR);
11163 *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
11164
11165 return ret;
11166}
11167#ifdef CUSTOM_FORCE_NODFS_FLAG
11168int
11169dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
11170{
11171 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11172
11173 if (nodfs)
11174 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
11175 else
11176 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
11177 dhd->pub.force_country_change = TRUE;
11178 return 0;
11179}
11180#endif /* CUSTOM_FORCE_NODFS_FLAG */
11181#ifdef PNO_SUPPORT
11182/* Linux wrapper to call common dhd_pno_stop_for_ssid */
11183int
11184dhd_dev_pno_stop_for_ssid(struct net_device *dev)
11185{
11186 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11187
11188 return (dhd_pno_stop_for_ssid(&dhd->pub));
11189}
11190/* Linux wrapper to call common dhd_pno_set_for_ssid */
11191int
11192dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
11193 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
11194{
11195 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11196
11197 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
11198 pno_repeat, pno_freq_expo_max, channel_list, nchan));
11199}
11200
11201/* Linux wrapper to call common dhd_pno_enable */
11202int
11203dhd_dev_pno_enable(struct net_device *dev, int enable)
11204{
11205 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11206
11207 return (dhd_pno_enable(&dhd->pub, enable));
11208}
11209
11210/* Linux wrapper to call common dhd_pno_set_for_hotlist */
11211int
11212dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
11213 struct dhd_pno_hotlist_params *hotlist_params)
11214{
11215 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11216 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
11217}
11218/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
11219int
11220dhd_dev_pno_stop_for_batch(struct net_device *dev)
11221{
11222 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11223 return (dhd_pno_stop_for_batch(&dhd->pub));
11224}
11225/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
11226int
11227dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
11228{
11229 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11230 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
11231}
11232/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
11233int
11234dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
11235{
11236 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11237 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
11238}
11239/* Linux wrapper to call common dhd_pno_set_mac_oui */
11240int
11241dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
11242{
11243 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11244 return (dhd_pno_set_mac_oui(&dhd->pub, oui));
11245}
11246#endif /* PNO_SUPPORT */
11247
11248#if defined(PNO_SUPPORT)
11249#ifdef GSCAN_SUPPORT
11250/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11251int
11252dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11253 void *buf, uint8 flush)
11254{
11255 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11256
11257 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
11258}
11259
11260/* Linux wrapper to call common dhd_pno_get_gscan */
11261void *
11262dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11263 void *info, uint32 *len)
11264{
11265 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11266
11267 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
11268}
11269
11270/* Linux wrapper to call common dhd_wait_batch_results_complete */
11271void
11272dhd_dev_wait_batch_results_complete(struct net_device *dev)
11273{
11274 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11275
11276 return (dhd_wait_batch_results_complete(&dhd->pub));
11277}
11278
11279/* Linux wrapper to call common dhd_pno_lock_batch_results */
11280void
11281dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
11282{
11283 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11284
11285 return (dhd_pno_lock_batch_results(&dhd->pub));
11286}
11287/* Linux wrapper to call common dhd_pno_unlock_batch_results */
11288void
11289dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
11290{
11291 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11292
11293 return (dhd_pno_unlock_batch_results(&dhd->pub));
11294}
11295
11296/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
11297int
11298dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
11299{
11300 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11301
11302 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
11303}
11304
11305/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
11306int
11307dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
11308{
11309 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11310
11311 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
11312}
11313
11314/* Linux wrapper to call common dhd_handle_swc_evt */
11315void *
11316dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes)
11317{
11318 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11319
11320 return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
11321}
11322
11323/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
11324void *
11325dhd_dev_hotlist_scan_event(struct net_device *dev,
11326 const void *data, int *send_evt_bytes, hotlist_type_t type)
11327{
11328 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11329
11330 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
11331}
11332
11333/* Linux wrapper to call common dhd_process_full_gscan_result */
11334void *
11335dhd_dev_process_full_gscan_result(struct net_device *dev,
11336const void *data, int *send_evt_bytes)
11337{
11338 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11339
11340 return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
11341}
11342
11343void
11344dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
11345{
11346 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11347
11348 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
11349
11350 return;
11351}
11352
11353int
11354dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
11355{
11356 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11357
11358 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
11359}
11360
11361/* Linux wrapper to call common dhd_retreive_batch_scan_results */
11362int
11363dhd_dev_retrieve_batch_scan(struct net_device *dev)
11364{
11365 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11366
11367 return (dhd_retreive_batch_scan_results(&dhd->pub));
11368}
11369#endif /* GSCAN_SUPPORT */
11370#endif
11371#ifdef RTT_SUPPORT
11372/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11373int
11374dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
11375{
11376 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11377
11378 return (dhd_rtt_set_cfg(&dhd->pub, buf));
11379}
11380int
11381dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
11382{
11383 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11384
11385 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
11386}
11387int
11388dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
11389{
11390 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11391
11392 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
11393}
11394int
11395dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
11396{
11397 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11398
11399 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
11400}
11401
11402int
11403dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
11404{
11405 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11406
11407 return (dhd_rtt_capability(&dhd->pub, capa));
11408}
11409
11410#endif /* RTT_SUPPORT */
11411
11412#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11413static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
11414{
11415 dhd_info_t *dhd;
11416 struct net_device *dev;
11417
11418 dhd = (dhd_info_t *)dhd_info;
11419 dev = dhd->iflist[0]->net;
11420
11421 if (dev) {
08dfb6c4
RC
11422 // terence 20161024: let wlan0 down when hang happened
11423 rtnl_lock();
11424 dev_close(dev);
11425 rtnl_unlock();
ef6a5fee
RC
11426#if defined(WL_WIRELESS_EXT)
11427 wl_iw_send_priv_event(dev, "HANG");
11428#endif
11429#if defined(WL_CFG80211)
11430 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11431#endif
11432 }
11433}
11434
11435#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
11436extern dhd_pub_t *link_recovery;
11437void dhd_host_recover_link(void)
11438{
11439 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
11440 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
11441 dhd_bus_set_linkdown(link_recovery, TRUE);
11442 dhd_os_send_hang_message(link_recovery);
11443}
11444EXPORT_SYMBOL(dhd_host_recover_link);
11445#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
11446
11447int dhd_os_send_hang_message(dhd_pub_t *dhdp)
11448{
11449 int ret = 0;
11450 if (dhdp) {
11451 if (!dhdp->hang_was_sent) {
11452 dhdp->hang_was_sent = 1;
11453 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
11454 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
11455 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
11456 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
11457 }
11458 }
11459 return ret;
11460}
11461
11462int net_os_send_hang_message(struct net_device *dev)
11463{
11464 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11465 int ret = 0;
11466
11467 if (dhd) {
11468 /* Report FW problem when enabled */
11469 if (dhd->pub.hang_report) {
11470#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11471 ret = dhd_os_send_hang_message(&dhd->pub);
11472#else
11473 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11474#endif
11475 } else {
11476 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
11477 __FUNCTION__));
11478 /* Enforce bus down to stop any future traffic */
11479 dhd->pub.busstate = DHD_BUS_DOWN;
11480 }
11481 }
11482 return ret;
11483}
11484
11485int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
11486{
11487 dhd_info_t *dhd = NULL;
11488 dhd_pub_t *dhdp = NULL;
11489 int reason;
11490
11491 dhd = DHD_DEV_INFO(dev);
11492 if (dhd) {
11493 dhdp = &dhd->pub;
11494 }
11495
11496 if (!dhd || !dhdp) {
11497 return 0;
11498 }
11499
11500 reason = bcm_strtoul(string_num, NULL, 0);
11501 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
11502
11503 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
11504 reason = 0;
11505 }
11506
11507 dhdp->hang_reason = reason;
11508
11509 return net_os_send_hang_message(dev);
11510}
11511#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
11512
11513
11514int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
11515{
11516 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11517 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
11518}
11519
11520bool dhd_force_country_change(struct net_device *dev)
11521{
11522 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11523
11524 if (dhd && dhd->pub.up)
11525 return dhd->pub.force_country_change;
11526 return FALSE;
11527}
11528void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
11529 wl_country_t *cspec)
11530{
11531 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11532#ifdef CUSTOM_COUNTRY_CODE
11533 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
11534 dhd->pub.dhd_cflags);
11535#else
11536 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
11537#endif /* CUSTOM_COUNTRY_CODE */
11538}
11539void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
11540{
11541 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11542 if (dhd && dhd->pub.up) {
11543 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
11544#ifdef WL_CFG80211
11545 wl_update_wiphybands(NULL, notify);
11546#endif
11547 }
11548}
11549
11550void dhd_bus_band_set(struct net_device *dev, uint band)
11551{
11552 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11553 if (dhd && dhd->pub.up) {
11554#ifdef WL_CFG80211
11555 wl_update_wiphybands(NULL, true);
11556#endif
11557 }
11558}
11559
11560int dhd_net_set_fw_path(struct net_device *dev, char *fw)
11561{
11562 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11563
11564 if (!fw || fw[0] == '\0')
11565 return -EINVAL;
11566
11567 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
11568 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
11569
11570#if defined(SOFTAP)
11571 if (strstr(fw, "apsta") != NULL) {
11572 DHD_INFO(("GOT APSTA FIRMWARE\n"));
11573 ap_fw_loaded = TRUE;
11574 } else {
11575 DHD_INFO(("GOT STA FIRMWARE\n"));
11576 ap_fw_loaded = FALSE;
11577 }
11578#endif
11579 return 0;
11580}
11581
11582void dhd_net_if_lock(struct net_device *dev)
11583{
11584 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11585 dhd_net_if_lock_local(dhd);
11586}
11587
11588void dhd_net_if_unlock(struct net_device *dev)
11589{
11590 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11591 dhd_net_if_unlock_local(dhd);
11592}
11593
11594static void dhd_net_if_lock_local(dhd_info_t *dhd)
11595{
11596#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11597 if (dhd)
11598 mutex_lock(&dhd->dhd_net_if_mutex);
11599#endif
11600}
11601
11602static void dhd_net_if_unlock_local(dhd_info_t *dhd)
11603{
11604#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11605 if (dhd)
11606 mutex_unlock(&dhd->dhd_net_if_mutex);
11607#endif
11608}
11609
11610static void dhd_suspend_lock(dhd_pub_t *pub)
11611{
11612#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11613 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11614 if (dhd)
11615 mutex_lock(&dhd->dhd_suspend_mutex);
11616#endif
11617}
11618
11619static void dhd_suspend_unlock(dhd_pub_t *pub)
11620{
11621#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11622 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11623 if (dhd)
11624 mutex_unlock(&dhd->dhd_suspend_mutex);
11625#endif
11626}
11627
11628unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
11629{
11630 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11631 unsigned long flags = 0;
11632
11633 if (dhd)
11634 spin_lock_irqsave(&dhd->dhd_lock, flags);
11635
11636 return flags;
11637}
11638
11639void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
11640{
11641 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11642
11643 if (dhd)
11644 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
11645}
11646
11647/* Linux specific multipurpose spinlock API */
11648void *
11649dhd_os_spin_lock_init(osl_t *osh)
11650{
11651 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
11652 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
11653 /* and this results in kernel asserts in internal builds */
11654 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
11655 if (lock)
11656 spin_lock_init(lock);
11657 return ((void *)lock);
11658}
11659void
11660dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
11661{
11662 if (lock)
11663 MFREE(osh, lock, sizeof(spinlock_t) + 4);
11664}
11665unsigned long
11666dhd_os_spin_lock(void *lock)
11667{
11668 unsigned long flags = 0;
11669
11670 if (lock)
11671 spin_lock_irqsave((spinlock_t *)lock, flags);
11672
11673 return flags;
11674}
11675void
11676dhd_os_spin_unlock(void *lock, unsigned long flags)
11677{
11678 if (lock)
11679 spin_unlock_irqrestore((spinlock_t *)lock, flags);
11680}
11681
11682static int
11683dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
11684{
11685 return (atomic_read(&dhd->pend_8021x_cnt));
11686}
11687
11688#define MAX_WAIT_FOR_8021X_TX 100
11689
11690int
11691dhd_wait_pend8021x(struct net_device *dev)
11692{
11693 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11694 int timeout = msecs_to_jiffies(10);
11695 int ntimes = MAX_WAIT_FOR_8021X_TX;
11696 int pend = dhd_get_pend_8021x_cnt(dhd);
11697
11698 while (ntimes && pend) {
11699 if (pend) {
11700 set_current_state(TASK_INTERRUPTIBLE);
11701 DHD_PERIM_UNLOCK(&dhd->pub);
11702 schedule_timeout(timeout);
11703 DHD_PERIM_LOCK(&dhd->pub);
11704 set_current_state(TASK_RUNNING);
11705 ntimes--;
11706 }
11707 pend = dhd_get_pend_8021x_cnt(dhd);
11708 }
11709 if (ntimes == 0)
11710 {
11711 atomic_set(&dhd->pend_8021x_cnt, 0);
11712 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
11713 }
11714 return pend;
11715}
11716
11717#ifdef DHD_DEBUG
11718static void
11719dhd_convert_memdump_type_to_str(uint32 type, char *buf)
11720{
11721 char *type_str = NULL;
11722
11723 switch (type) {
11724 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
11725 type_str = "resumed_on_timeout";
11726 break;
11727 case DUMP_TYPE_D3_ACK_TIMEOUT:
11728 type_str = "D3_ACK_timeout";
11729 break;
11730 case DUMP_TYPE_DONGLE_TRAP:
11731 type_str = "Dongle_Trap";
11732 break;
11733 case DUMP_TYPE_MEMORY_CORRUPTION:
11734 type_str = "Memory_Corruption";
11735 break;
11736 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
11737 type_str = "PKTID_AUDIT_Fail";
11738 break;
11739 case DUMP_TYPE_SCAN_TIMEOUT:
11740 type_str = "SCAN_timeout";
11741 break;
11742 case DUMP_TYPE_SCAN_BUSY:
11743 type_str = "SCAN_Busy";
11744 break;
11745 case DUMP_TYPE_BY_SYSDUMP:
11746 type_str = "BY_SYSDUMP";
11747 break;
11748 case DUMP_TYPE_BY_LIVELOCK:
11749 type_str = "BY_LIVELOCK";
11750 break;
11751 case DUMP_TYPE_AP_LINKUP_FAILURE:
11752 type_str = "BY_AP_LINK_FAILURE";
11753 break;
11754 default:
11755 type_str = "Unknown_type";
11756 break;
11757 }
11758
11759 strncpy(buf, type_str, strlen(type_str));
11760 buf[strlen(type_str)] = 0;
11761}
11762
11763int
11764write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
11765{
11766 int ret = 0;
11767 struct file *fp = NULL;
11768 mm_segment_t old_fs;
11769 loff_t pos = 0;
11770 char memdump_path[128];
11771 char memdump_type[32];
11772 struct timeval curtime;
11773 uint32 file_mode;
11774
11775 /* change to KERNEL_DS address limit */
11776 old_fs = get_fs();
11777 set_fs(KERNEL_DS);
11778
11779 /* Init file name */
11780 memset(memdump_path, 0, sizeof(memdump_path));
11781 memset(memdump_type, 0, sizeof(memdump_type));
11782 do_gettimeofday(&curtime);
11783 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
11784#ifdef CUSTOMER_HW4_DEBUG
11785 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11786 DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
11787 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11788 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11789#elif defined(CUSTOMER_HW2)
11790 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
12cae11e 11791 "/data/vendor/misc/wifi/mem_dump", memdump_type,
ef6a5fee
RC
11792 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11793 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11794#else
11795 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11796 "/installmedia/mem_dump", memdump_type,
11797 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11798 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
11799 * calling BUG_ON immediately after collecting the socram dump.
11800 * So the file write operation should directly write the contents into the
11801 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
11802 * instead of appending.
11803 */
11804 file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
11805#endif /* CUSTOMER_HW4_DEBUG */
11806
11807 /* print SOCRAM dump file path */
11808 DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
11809
11810 /* open file to write */
11811 fp = filp_open(memdump_path, file_mode, 0644);
11812 if (IS_ERR(fp)) {
11813 ret = PTR_ERR(fp);
11814 printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
11815 goto exit;
11816 }
11817
11818 /* Write buf to file */
11819 fp->f_op->write(fp, buf, size, &pos);
11820
11821exit:
11822 /* close file before return */
11823 if (!ret)
11824 filp_close(fp, current->files);
11825
11826 /* restore previous address limit */
11827 set_fs(old_fs);
11828
11829 /* free buf before return */
11830#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
11831 DHD_OS_PREFREE(dhd, buf, size);
11832#else
11833 MFREE(dhd->osh, buf, size);
11834#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
11835
11836 return ret;
11837}
11838#endif /* DHD_DEBUG */
11839
11840int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
11841{
11842 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11843 unsigned long flags;
11844 int ret = 0;
11845
11846 if (dhd) {
11847 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11848 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
11849 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
11850#ifdef CONFIG_HAS_WAKELOCK
11851 if (dhd->wakelock_rx_timeout_enable)
11852 wake_lock_timeout(&dhd->wl_rxwake,
11853 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
11854 if (dhd->wakelock_ctrl_timeout_enable)
11855 wake_lock_timeout(&dhd->wl_ctrlwake,
11856 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
11857#endif
11858 dhd->wakelock_rx_timeout_enable = 0;
11859 dhd->wakelock_ctrl_timeout_enable = 0;
11860 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11861 }
11862 return ret;
11863}
11864
11865int net_os_wake_lock_timeout(struct net_device *dev)
11866{
11867 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11868 int ret = 0;
11869
11870 if (dhd)
11871 ret = dhd_os_wake_lock_timeout(&dhd->pub);
11872 return ret;
11873}
11874
11875int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
11876{
11877 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11878 unsigned long flags;
11879
11880 if (dhd) {
11881 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11882 if (val > dhd->wakelock_rx_timeout_enable)
11883 dhd->wakelock_rx_timeout_enable = val;
11884 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11885 }
11886 return 0;
11887}
11888
11889int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
11890{
11891 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11892 unsigned long flags;
11893
11894 if (dhd) {
11895 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11896 if (val > dhd->wakelock_ctrl_timeout_enable)
11897 dhd->wakelock_ctrl_timeout_enable = val;
11898 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11899 }
11900 return 0;
11901}
11902
11903int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
11904{
11905 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11906 unsigned long flags;
11907
11908 if (dhd) {
11909 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11910 dhd->wakelock_ctrl_timeout_enable = 0;
11911#ifdef CONFIG_HAS_WAKELOCK
11912 if (wake_lock_active(&dhd->wl_ctrlwake))
11913 wake_unlock(&dhd->wl_ctrlwake);
11914#endif
11915 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11916 }
11917 return 0;
11918}
11919
11920int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
11921{
11922 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11923 int ret = 0;
11924
11925 if (dhd)
11926 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
11927 return ret;
11928}
11929
11930int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
11931{
11932 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11933 int ret = 0;
11934
11935 if (dhd)
11936 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
11937 return ret;
11938}
11939
11940
11941#if defined(DHD_TRACE_WAKE_LOCK)
11942#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11943#include <linux/hashtable.h>
11944#else
11945#include <linux/hash.h>
11946#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11947
11948
11949#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11950/* Define 2^5 = 32 bucket size hash table */
11951DEFINE_HASHTABLE(wklock_history, 5);
11952#else
11953/* Define 2^5 = 32 bucket size hash table */
11954struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
11955#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11956
11957int trace_wklock_onoff = 1;
11958
11959typedef enum dhd_wklock_type {
11960 DHD_WAKE_LOCK,
11961 DHD_WAKE_UNLOCK,
11962 DHD_WAIVE_LOCK,
11963 DHD_RESTORE_LOCK
11964} dhd_wklock_t;
11965
11966struct wk_trace_record {
11967 unsigned long addr; /* Address of the instruction */
11968 dhd_wklock_t lock_type; /* lock_type */
11969 unsigned long long counter; /* counter information */
11970 struct hlist_node wklock_node; /* hash node */
11971};
11972
11973
11974static struct wk_trace_record *find_wklock_entry(unsigned long addr)
11975{
11976 struct wk_trace_record *wklock_info;
11977#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11978 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
11979#else
11980 struct hlist_node *entry;
11981 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
11982 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
11983#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11984 {
11985 if (wklock_info->addr == addr) {
11986 return wklock_info;
11987 }
11988 }
11989 return NULL;
11990}
11991
11992#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11993#define HASH_ADD(hashtable, node, key) \
11994 do { \
11995 hash_add(hashtable, node, key); \
11996 } while (0);
11997#else
11998#define HASH_ADD(hashtable, node, key) \
11999 do { \
12000 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
12001 hlist_add_head(node, &hashtable[index]); \
12002 } while (0);
12003#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
12004
12005#define STORE_WKLOCK_RECORD(wklock_type) \
12006 do { \
12007 struct wk_trace_record *wklock_info = NULL; \
12008 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
12009 wklock_info = find_wklock_entry(func_addr); \
12010 if (wklock_info) { \
12011 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
12012 wklock_info->counter = dhd->wakelock_counter; \
12013 } else { \
12014 wklock_info->counter++; \
12015 } \
12016 } else { \
12017 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
12018 if (!wklock_info) {\
12019 printk("Can't allocate wk_trace_record \n"); \
12020 } else { \
12021 wklock_info->addr = func_addr; \
12022 wklock_info->lock_type = wklock_type; \
12023 if (wklock_type == DHD_WAIVE_LOCK || \
12024 wklock_type == DHD_RESTORE_LOCK) { \
12025 wklock_info->counter = dhd->wakelock_counter; \
12026 } else { \
12027 wklock_info->counter++; \
12028 } \
12029 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
12030 } \
12031 } \
12032 } while (0);
12033
12034static inline void dhd_wk_lock_rec_dump(void)
12035{
12036 int bkt;
12037 struct wk_trace_record *wklock_info;
12038
12039#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12040 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
12041#else
12042 struct hlist_node *entry = NULL;
12043 int max_index = ARRAY_SIZE(wklock_history);
12044 for (bkt = 0; bkt < max_index; bkt++)
12045 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
12046#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12047 {
12048 switch (wklock_info->lock_type) {
12049 case DHD_WAKE_LOCK:
12050 DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n",
12051 (void *)wklock_info->addr, wklock_info->counter));
12052 break;
12053 case DHD_WAKE_UNLOCK:
12054 DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
12055 (void *)wklock_info->addr, wklock_info->counter));
12056 break;
12057 case DHD_WAIVE_LOCK:
12058 DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n",
12059 (void *)wklock_info->addr, wklock_info->counter));
12060 break;
12061 case DHD_RESTORE_LOCK:
12062 DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
12063 (void *)wklock_info->addr, wklock_info->counter));
12064 break;
12065 }
12066 }
12067}
12068
12069static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
12070{
12071 unsigned long flags;
12072#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
12073 int i;
12074#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12075
12076 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12077#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12078 hash_init(wklock_history);
12079#else
12080 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
12081 INIT_HLIST_HEAD(&wklock_history[i]);
12082#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12083 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12084}
12085
12086static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
12087{
12088 int bkt;
12089 struct wk_trace_record *wklock_info;
12090 struct hlist_node *tmp;
12091 unsigned long flags;
12092#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
12093 struct hlist_node *entry = NULL;
12094 int max_index = ARRAY_SIZE(wklock_history);
12095#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12096
12097 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12098#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12099 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
12100#else
12101 for (bkt = 0; bkt < max_index; bkt++)
12102 hlist_for_each_entry_safe(wklock_info, entry, tmp,
12103 &wklock_history[bkt], wklock_node)
12104#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
12105 {
12106#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12107 hash_del(&wklock_info->wklock_node);
12108#else
12109 hlist_del_init(&wklock_info->wklock_node);
12110#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
12111 kfree(wklock_info);
12112 }
12113 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12114}
12115
12116void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
12117{
12118 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12119 unsigned long flags;
12120
12121 DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
12122 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12123 dhd_wk_lock_rec_dump();
12124 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12125 DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
12126}
12127#else
12128#define STORE_WKLOCK_RECORD(wklock_type)
12129#endif /* ! DHD_TRACE_WAKE_LOCK */
12130
12131int dhd_os_wake_lock(dhd_pub_t *pub)
12132{
12133 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12134 unsigned long flags;
12135 int ret = 0;
12136
12137 if (dhd) {
12138 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12139 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12140#ifdef CONFIG_HAS_WAKELOCK
12141 wake_lock(&dhd->wl_wifi);
12142#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12143 dhd_bus_dev_pm_stay_awake(pub);
12144#endif
12145 }
12146#ifdef DHD_TRACE_WAKE_LOCK
12147 if (trace_wklock_onoff) {
12148 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
12149 }
12150#endif /* DHD_TRACE_WAKE_LOCK */
12151 dhd->wakelock_counter++;
12152 ret = dhd->wakelock_counter;
12153 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12154 }
12155
12156 return ret;
12157}
12158
12159int dhd_event_wake_lock(dhd_pub_t *pub)
12160{
12161 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12162 unsigned long flags;
12163 int ret = 0;
12164
12165 if (dhd) {
12166 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12167 if (dhd->wakelock_event_counter == 0) {
12168#ifdef CONFIG_HAS_WAKELOCK
12169 wake_lock(&dhd->wl_evtwake);
12170#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12171 dhd_bus_dev_pm_stay_awake(pub);
12172#endif
12173 }
12174 dhd->wakelock_event_counter++;
12175 ret = dhd->wakelock_event_counter;
12176 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12177 }
12178
12179 return ret;
12180}
12181
12182int net_os_wake_lock(struct net_device *dev)
12183{
12184 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12185 int ret = 0;
12186
12187 if (dhd)
12188 ret = dhd_os_wake_lock(&dhd->pub);
12189 return ret;
12190}
12191
12192int dhd_os_wake_unlock(dhd_pub_t *pub)
12193{
12194 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12195 unsigned long flags;
12196 int ret = 0;
12197
12198 dhd_os_wake_lock_timeout(pub);
12199 if (dhd) {
12200 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12201
12202 if (dhd->wakelock_counter > 0) {
12203 dhd->wakelock_counter--;
12204#ifdef DHD_TRACE_WAKE_LOCK
12205 if (trace_wklock_onoff) {
12206 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
12207 }
12208#endif /* DHD_TRACE_WAKE_LOCK */
12209 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12210#ifdef CONFIG_HAS_WAKELOCK
12211 wake_unlock(&dhd->wl_wifi);
12212#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12213 dhd_bus_dev_pm_relax(pub);
12214#endif
12215 }
12216 ret = dhd->wakelock_counter;
12217 }
12218 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12219 }
12220 return ret;
12221}
12222
12223int dhd_event_wake_unlock(dhd_pub_t *pub)
12224{
12225 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12226 unsigned long flags;
12227 int ret = 0;
12228
12229 if (dhd) {
12230 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12231
12232 if (dhd->wakelock_event_counter > 0) {
12233 dhd->wakelock_event_counter--;
12234 if (dhd->wakelock_event_counter == 0) {
12235#ifdef CONFIG_HAS_WAKELOCK
12236 wake_unlock(&dhd->wl_evtwake);
12237#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12238 dhd_bus_dev_pm_relax(pub);
12239#endif
12240 }
12241 ret = dhd->wakelock_event_counter;
12242 }
12243 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12244 }
12245 return ret;
12246}
12247
12248int dhd_os_check_wakelock(dhd_pub_t *pub)
12249{
12250#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12251 KERNEL_VERSION(2, 6, 36)))
12252 dhd_info_t *dhd;
12253
12254 if (!pub)
12255 return 0;
12256 dhd = (dhd_info_t *)(pub->info);
12257#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12258
12259#ifdef CONFIG_HAS_WAKELOCK
12260 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
12261 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
12262 (wake_lock_active(&dhd->wl_wdwake))))
12263 return 1;
12264#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12265 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
12266 return 1;
12267#endif
12268 return 0;
12269}
12270
12271int
12272dhd_os_check_wakelock_all(dhd_pub_t *pub)
12273{
12274#ifdef CONFIG_HAS_WAKELOCK
12275 int l1, l2, l3, l4, l7;
12276 int l5 = 0, l6 = 0;
12277 int c, lock_active;
12278#endif /* CONFIG_HAS_WAKELOCK */
12279#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12280 KERNEL_VERSION(2, 6, 36)))
12281 dhd_info_t *dhd;
12282
12283 if (!pub) {
12284 return 0;
12285 }
12286 dhd = (dhd_info_t *)(pub->info);
12287 if (!dhd) {
12288 return 0;
12289 }
12290#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12291
12292#ifdef CONFIG_HAS_WAKELOCK
12293 c = dhd->wakelock_counter;
12294 l1 = wake_lock_active(&dhd->wl_wifi);
12295 l2 = wake_lock_active(&dhd->wl_wdwake);
12296 l3 = wake_lock_active(&dhd->wl_rxwake);
12297 l4 = wake_lock_active(&dhd->wl_ctrlwake);
12298#ifdef BCMPCIE_OOB_HOST_WAKE
12299 l5 = wake_lock_active(&dhd->wl_intrwake);
12300#endif /* BCMPCIE_OOB_HOST_WAKE */
12301#ifdef DHD_USE_SCAN_WAKELOCK
12302 l6 = wake_lock_active(&dhd->wl_scanwake);
12303#endif /* DHD_USE_SCAN_WAKELOCK */
12304 l7 = wake_lock_active(&dhd->wl_evtwake);
12305 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
12306
12307 /* Indicate to the Host to avoid going to suspend if internal locks are up */
12308 if (dhd && lock_active) {
12309 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
12310 "ctl-%d intr-%d scan-%d evt-%d\n",
12311 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
12312 return 1;
12313 }
12314#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12315 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
12316 return 1;
12317 }
12318#endif /* CONFIG_HAS_WAKELOCK */
12319 return 0;
12320}
12321
12322int net_os_wake_unlock(struct net_device *dev)
12323{
12324 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12325 int ret = 0;
12326
12327 if (dhd)
12328 ret = dhd_os_wake_unlock(&dhd->pub);
12329 return ret;
12330}
12331
12332int dhd_os_wd_wake_lock(dhd_pub_t *pub)
12333{
12334 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12335 unsigned long flags;
12336 int ret = 0;
12337
12338 if (dhd) {
12339 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12340#ifdef CONFIG_HAS_WAKELOCK
12341 /* if wakelock_wd_counter was never used : lock it at once */
12342 if (!dhd->wakelock_wd_counter)
12343 wake_lock(&dhd->wl_wdwake);
12344#endif
12345 dhd->wakelock_wd_counter++;
12346 ret = dhd->wakelock_wd_counter;
12347 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12348 }
12349 return ret;
12350}
12351
12352int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
12353{
12354 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12355 unsigned long flags;
12356 int ret = 0;
12357
12358 if (dhd) {
12359 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12360 if (dhd->wakelock_wd_counter) {
12361 dhd->wakelock_wd_counter = 0;
12362#ifdef CONFIG_HAS_WAKELOCK
12363 wake_unlock(&dhd->wl_wdwake);
12364#endif
12365 }
12366 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12367 }
12368 return ret;
12369}
12370
12371#ifdef BCMPCIE_OOB_HOST_WAKE
12372void
12373dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
12374{
12375#ifdef CONFIG_HAS_WAKELOCK
12376 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12377
12378 if (dhd) {
12379 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
12380 }
12381#endif /* CONFIG_HAS_WAKELOCK */
12382}
12383
12384void
12385dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
12386{
12387#ifdef CONFIG_HAS_WAKELOCK
12388 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12389
12390 if (dhd) {
12391 /* if wl_intrwake is active, unlock it */
12392 if (wake_lock_active(&dhd->wl_intrwake)) {
12393 wake_unlock(&dhd->wl_intrwake);
12394 }
12395 }
12396#endif /* CONFIG_HAS_WAKELOCK */
12397}
12398#endif /* BCMPCIE_OOB_HOST_WAKE */
12399
12400#ifdef DHD_USE_SCAN_WAKELOCK
12401void
12402dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
12403{
12404#ifdef CONFIG_HAS_WAKELOCK
12405 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12406
12407 if (dhd) {
12408 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
12409 }
12410#endif /* CONFIG_HAS_WAKELOCK */
12411}
12412
12413void
12414dhd_os_scan_wake_unlock(dhd_pub_t *pub)
12415{
12416#ifdef CONFIG_HAS_WAKELOCK
12417 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12418
12419 if (dhd) {
12420 /* if wl_scanwake is active, unlock it */
12421 if (wake_lock_active(&dhd->wl_scanwake)) {
12422 wake_unlock(&dhd->wl_scanwake);
12423 }
12424 }
12425#endif /* CONFIG_HAS_WAKELOCK */
12426}
12427#endif /* DHD_USE_SCAN_WAKELOCK */
12428
12429/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
12430 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
12431 */
12432int dhd_os_wake_lock_waive(dhd_pub_t *pub)
12433{
12434 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12435 unsigned long flags;
12436 int ret = 0;
12437
12438 if (dhd) {
12439 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12440
12441 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12442 if (dhd->waive_wakelock == FALSE) {
12443#ifdef DHD_TRACE_WAKE_LOCK
12444 if (trace_wklock_onoff) {
12445 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
12446 }
12447#endif /* DHD_TRACE_WAKE_LOCK */
12448 /* record current lock status */
12449 dhd->wakelock_before_waive = dhd->wakelock_counter;
12450 dhd->waive_wakelock = TRUE;
12451 }
12452 ret = dhd->wakelock_wd_counter;
12453 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12454 }
12455 return ret;
12456}
12457
12458int dhd_os_wake_lock_restore(dhd_pub_t *pub)
12459{
12460 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12461 unsigned long flags;
12462 int ret = 0;
12463
12464 if (!dhd)
12465 return 0;
12466
12467 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12468
12469 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12470 if (!dhd->waive_wakelock)
12471 goto exit;
12472
12473 dhd->waive_wakelock = FALSE;
12474 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
12475 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
12476 * the lock in between, do the same by calling wake_unlock or pm_relax
12477 */
12478#ifdef DHD_TRACE_WAKE_LOCK
12479 if (trace_wklock_onoff) {
12480 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
12481 }
12482#endif /* DHD_TRACE_WAKE_LOCK */
12483
12484 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
12485#ifdef CONFIG_HAS_WAKELOCK
12486 wake_lock(&dhd->wl_wifi);
12487#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12488 dhd_bus_dev_pm_stay_awake(&dhd->pub);
12489#endif
12490 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
12491#ifdef CONFIG_HAS_WAKELOCK
12492 wake_unlock(&dhd->wl_wifi);
12493#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12494 dhd_bus_dev_pm_relax(&dhd->pub);
12495#endif
12496 }
12497 dhd->wakelock_before_waive = 0;
12498exit:
12499 ret = dhd->wakelock_wd_counter;
12500 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12501 return ret;
12502}
12503
12504void dhd_os_wake_lock_init(struct dhd_info *dhd)
12505{
12506 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
12507 dhd->wakelock_event_counter = 0;
12508 dhd->wakelock_counter = 0;
12509 dhd->wakelock_rx_timeout_enable = 0;
12510 dhd->wakelock_ctrl_timeout_enable = 0;
12511#ifdef CONFIG_HAS_WAKELOCK
08dfb6c4 12512 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
ef6a5fee
RC
12513 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
12514 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
12515 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
12516#ifdef BCMPCIE_OOB_HOST_WAKE
12517 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
12518#endif /* BCMPCIE_OOB_HOST_WAKE */
12519#ifdef DHD_USE_SCAN_WAKELOCK
12520 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
12521#endif /* DHD_USE_SCAN_WAKELOCK */
12522#endif /* CONFIG_HAS_WAKELOCK */
12523#ifdef DHD_TRACE_WAKE_LOCK
12524 dhd_wk_lock_trace_init(dhd);
12525#endif /* DHD_TRACE_WAKE_LOCK */
12526}
12527
12528void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
12529{
12530 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
12531#ifdef CONFIG_HAS_WAKELOCK
12532 dhd->wakelock_event_counter = 0;
12533 dhd->wakelock_counter = 0;
12534 dhd->wakelock_rx_timeout_enable = 0;
12535 dhd->wakelock_ctrl_timeout_enable = 0;
08dfb6c4 12536 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
ef6a5fee
RC
12537 wake_lock_destroy(&dhd->wl_rxwake);
12538 wake_lock_destroy(&dhd->wl_ctrlwake);
12539 wake_lock_destroy(&dhd->wl_evtwake);
12540#ifdef BCMPCIE_OOB_HOST_WAKE
12541 wake_lock_destroy(&dhd->wl_intrwake);
12542#endif /* BCMPCIE_OOB_HOST_WAKE */
12543#ifdef DHD_USE_SCAN_WAKELOCK
12544 wake_lock_destroy(&dhd->wl_scanwake);
12545#endif /* DHD_USE_SCAN_WAKELOCK */
12546#ifdef DHD_TRACE_WAKE_LOCK
12547 dhd_wk_lock_trace_deinit(dhd);
12548#endif /* DHD_TRACE_WAKE_LOCK */
12549#endif /* CONFIG_HAS_WAKELOCK */
12550}
12551
12552bool dhd_os_check_if_up(dhd_pub_t *pub)
12553{
12554 if (!pub)
12555 return FALSE;
12556 return pub->up;
12557}
12558
12559/* function to collect firmware, chip id and chip version info */
12560void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
12561{
12562 int i;
12563
12564 i = snprintf(info_string, sizeof(info_string),
12565 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
12566 printf("%s\n", info_string);
12567
12568 if (!dhdp)
12569 return;
12570
12571 i = snprintf(&info_string[i], sizeof(info_string) - i,
12572 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
12573 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
12574}
12575
12576int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
12577{
12578 int ifidx;
12579 int ret = 0;
12580 dhd_info_t *dhd = NULL;
12581
12582 if (!net || !DEV_PRIV(net)) {
12583 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
12584 return -EINVAL;
12585 }
12586
12587 dhd = DHD_DEV_INFO(net);
12588 if (!dhd)
12589 return -EINVAL;
12590
12591 ifidx = dhd_net2idx(dhd, net);
12592 if (ifidx == DHD_BAD_IF) {
12593 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
12594 return -ENODEV;
12595 }
12596
12597 DHD_OS_WAKE_LOCK(&dhd->pub);
12598 DHD_PERIM_LOCK(&dhd->pub);
12599
12600 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
12601 dhd_check_hang(net, &dhd->pub, ret);
12602
12603 DHD_PERIM_UNLOCK(&dhd->pub);
12604 DHD_OS_WAKE_UNLOCK(&dhd->pub);
12605
12606 return ret;
12607}
12608
12609bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
12610{
12611 struct net_device *net;
12612
12613 net = dhd_idx2net(dhdp, ifidx);
12614 if (!net) {
12615 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
12616 return -EINVAL;
12617 }
12618
12619 return dhd_check_hang(net, dhdp, ret);
12620}
12621
12622/* Return instance */
12623int dhd_get_instance(dhd_pub_t *dhdp)
12624{
12625 return dhdp->info->unit;
12626}
12627
12628
12629#ifdef PROP_TXSTATUS
12630
12631void dhd_wlfc_plat_init(void *dhd)
12632{
12633#ifdef USE_DYNAMIC_F2_BLKSIZE
12634 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12635#endif /* USE_DYNAMIC_F2_BLKSIZE */
12636 return;
12637}
12638
12639void dhd_wlfc_plat_deinit(void *dhd)
12640{
12641#ifdef USE_DYNAMIC_F2_BLKSIZE
12642 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
12643#endif /* USE_DYNAMIC_F2_BLKSIZE */
12644 return;
12645}
12646
12647bool dhd_wlfc_skip_fc(void)
12648{
12649#ifdef SKIP_WLFC_ON_CONCURRENT
12650#ifdef WL_CFG80211
12651
12652 /* enable flow control in vsdb mode */
12653 return !(wl_cfg80211_is_concurrent_mode());
12654#else
12655 return TRUE; /* skip flow control */
12656#endif /* WL_CFG80211 */
12657
12658#else
12659 return FALSE;
12660#endif /* SKIP_WLFC_ON_CONCURRENT */
12661}
12662#endif /* PROP_TXSTATUS */
12663
12664#ifdef BCMDBGFS
12665#include <linux/debugfs.h>
12666
12667typedef struct dhd_dbgfs {
12668 struct dentry *debugfs_dir;
12669 struct dentry *debugfs_mem;
12670 dhd_pub_t *dhdp;
12671 uint32 size;
12672} dhd_dbgfs_t;
12673
12674dhd_dbgfs_t g_dbgfs;
12675
12676extern uint32 dhd_readregl(void *bp, uint32 addr);
12677extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
12678
12679static int
12680dhd_dbg_state_open(struct inode *inode, struct file *file)
12681{
12682 file->private_data = inode->i_private;
12683 return 0;
12684}
12685
12686static ssize_t
12687dhd_dbg_state_read(struct file *file, char __user *ubuf,
12688 size_t count, loff_t *ppos)
12689{
12690 ssize_t rval;
12691 uint32 tmp;
12692 loff_t pos = *ppos;
12693 size_t ret;
12694
12695 if (pos < 0)
12696 return -EINVAL;
12697 if (pos >= g_dbgfs.size || !count)
12698 return 0;
12699 if (count > g_dbgfs.size - pos)
12700 count = g_dbgfs.size - pos;
12701
12702 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
12703 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
12704
12705 ret = copy_to_user(ubuf, &tmp, 4);
12706 if (ret == count)
12707 return -EFAULT;
12708
12709 count -= ret;
12710 *ppos = pos + count;
12711 rval = count;
12712
12713 return rval;
12714}
12715
12716
12717static ssize_t
12718dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
12719{
12720 loff_t pos = *ppos;
12721 size_t ret;
12722 uint32 buf;
12723
12724 if (pos < 0)
12725 return -EINVAL;
12726 if (pos >= g_dbgfs.size || !count)
12727 return 0;
12728 if (count > g_dbgfs.size - pos)
12729 count = g_dbgfs.size - pos;
12730
12731 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
12732 if (ret == count)
12733 return -EFAULT;
12734
12735 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
12736 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
12737
12738 return count;
12739}
12740
12741
12742loff_t
12743dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
12744{
12745 loff_t pos = -1;
12746
12747 switch (whence) {
12748 case 0:
12749 pos = off;
12750 break;
12751 case 1:
12752 pos = file->f_pos + off;
12753 break;
12754 case 2:
12755 pos = g_dbgfs.size - off;
12756 }
12757 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
12758}
12759
12760static const struct file_operations dhd_dbg_state_ops = {
12761 .read = dhd_dbg_state_read,
12762 .write = dhd_debugfs_write,
12763 .open = dhd_dbg_state_open,
12764 .llseek = dhd_debugfs_lseek
12765};
12766
12767static void dhd_dbg_create(void)
12768{
12769 if (g_dbgfs.debugfs_dir) {
12770 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
12771 NULL, &dhd_dbg_state_ops);
12772 }
12773}
12774
12775void dhd_dbg_init(dhd_pub_t *dhdp)
12776{
12777 g_dbgfs.dhdp = dhdp;
12778 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
12779
12780 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
12781 if (IS_ERR(g_dbgfs.debugfs_dir)) {
12782 g_dbgfs.debugfs_dir = NULL;
12783 return;
12784 }
12785
12786 dhd_dbg_create();
12787
12788 return;
12789}
12790
12791void dhd_dbg_remove(void)
12792{
12793 debugfs_remove(g_dbgfs.debugfs_mem);
12794 debugfs_remove(g_dbgfs.debugfs_dir);
12795
12796 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
12797}
12798#endif /* BCMDBGFS */
12799
12800#ifdef WLMEDIA_HTSF
12801
12802static
12803void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
12804{
12805 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12806 struct sk_buff *skb;
12807 uint32 htsf = 0;
12808 uint16 dport = 0, oldmagic = 0xACAC;
12809 char *p1;
12810 htsfts_t ts;
12811
12812 /* timestamp packet */
12813
12814 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
12815
12816 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
12817/* memcpy(&proto, p1+26, 4); */
12818 memcpy(&dport, p1+40, 2);
12819/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
12820 dport = ntoh16(dport);
12821 }
12822
12823 /* timestamp only if icmp or udb iperf with port 5555 */
12824/* if (proto == 17 && dport == tsport) { */
12825 if (dport >= tsport && dport <= tsport + 20) {
12826
12827 skb = (struct sk_buff *) pktbuf;
12828
12829 htsf = dhd_get_htsf(dhd, 0);
12830 memset(skb->data + 44, 0, 2); /* clear checksum */
12831 memcpy(skb->data+82, &oldmagic, 2);
12832 memcpy(skb->data+84, &htsf, 4);
12833
12834 memset(&ts, 0, sizeof(htsfts_t));
12835 ts.magic = HTSFMAGIC;
12836 ts.prio = PKTPRIO(pktbuf);
12837 ts.seqnum = htsf_seqnum++;
12838 ts.c10 = get_cycles();
12839 ts.t10 = htsf;
12840 ts.endmagic = HTSFENDMAGIC;
12841
12842 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
12843 }
12844}
12845
12846static void dhd_dump_htsfhisto(histo_t *his, char *s)
12847{
12848 int pktcnt = 0, curval = 0, i;
12849 for (i = 0; i < (NUMBIN-2); i++) {
12850 curval += 500;
12851 printf("%d ", his->bin[i]);
12852 pktcnt += his->bin[i];
12853 }
12854 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
12855 his->bin[NUMBIN-1], s);
12856}
12857
12858static
12859void sorttobin(int value, histo_t *histo)
12860{
12861 int i, binval = 0;
12862
12863 if (value < 0) {
12864 histo->bin[NUMBIN-1]++;
12865 return;
12866 }
12867 if (value > histo->bin[NUMBIN-2]) /* store the max value */
12868 histo->bin[NUMBIN-2] = value;
12869
12870 for (i = 0; i < (NUMBIN-2); i++) {
12871 binval += 500; /* 500m s bins */
12872 if (value <= binval) {
12873 histo->bin[i]++;
12874 return;
12875 }
12876 }
12877 histo->bin[NUMBIN-3]++;
12878}
12879
12880static
12881void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
12882{
12883 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12884 struct sk_buff *skb;
12885 char *p1;
12886 uint16 old_magic;
12887 int d1, d2, d3, end2end;
12888 htsfts_t *htsf_ts;
12889 uint32 htsf;
12890
12891 skb = PKTTONATIVE(dhdp->osh, pktbuf);
12892 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
12893
12894 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
12895 memcpy(&old_magic, p1+78, 2);
12896 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
12897 } else {
12898 return;
12899 }
12900 if (htsf_ts->magic == HTSFMAGIC) {
12901 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
12902 htsf_ts->cE0 = get_cycles();
12903 }
12904
12905 if (old_magic == 0xACAC) {
12906
12907 tspktcnt++;
12908 htsf = dhd_get_htsf(dhd, 0);
12909 memcpy(skb->data+92, &htsf, sizeof(uint32));
12910
12911 memcpy(&ts[tsidx].t1, skb->data+80, 16);
12912
12913 d1 = ts[tsidx].t2 - ts[tsidx].t1;
12914 d2 = ts[tsidx].t3 - ts[tsidx].t2;
12915 d3 = ts[tsidx].t4 - ts[tsidx].t3;
12916 end2end = ts[tsidx].t4 - ts[tsidx].t1;
12917
12918 sorttobin(d1, &vi_d1);
12919 sorttobin(d2, &vi_d2);
12920 sorttobin(d3, &vi_d3);
12921 sorttobin(end2end, &vi_d4);
12922
12923 if (end2end > 0 && end2end > maxdelay) {
12924 maxdelay = end2end;
12925 maxdelaypktno = tspktcnt;
12926 memcpy(&maxdelayts, &ts[tsidx], 16);
12927 }
12928 if (++tsidx >= TSMAX)
12929 tsidx = 0;
12930 }
12931}
12932
12933uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
12934{
12935 uint32 htsf = 0, cur_cycle, delta, delta_us;
12936 uint32 factor, baseval, baseval2;
12937 cycles_t t;
12938
12939 t = get_cycles();
12940 cur_cycle = t;
12941
12942 if (cur_cycle > dhd->htsf.last_cycle) {
12943 delta = cur_cycle - dhd->htsf.last_cycle;
12944 } else {
12945 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
12946 }
12947
12948 delta = delta >> 4;
12949
12950 if (dhd->htsf.coef) {
12951 /* times ten to get the first digit */
12952 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
12953 baseval = (delta*10)/factor;
12954 baseval2 = (delta*10)/(factor+1);
12955 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
12956 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
12957 } else {
12958 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
12959 }
12960
12961 return htsf;
12962}
12963
12964static void dhd_dump_latency(void)
12965{
12966 int i, max = 0;
12967 int d1, d2, d3, d4, d5;
12968
12969 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
12970 for (i = 0; i < TSMAX; i++) {
12971 d1 = ts[i].t2 - ts[i].t1;
12972 d2 = ts[i].t3 - ts[i].t2;
12973 d3 = ts[i].t4 - ts[i].t3;
12974 d4 = ts[i].t4 - ts[i].t1;
12975 d5 = ts[max].t4-ts[max].t1;
12976 if (d4 > d5 && d4 > 0) {
12977 max = i;
12978 }
12979 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
12980 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
12981 d1, d2, d3, d4, i);
12982 }
12983
12984 printf("current idx = %d \n", tsidx);
12985
12986 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
12987 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
12988 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
12989 maxdelayts.t2 - maxdelayts.t1,
12990 maxdelayts.t3 - maxdelayts.t2,
12991 maxdelayts.t4 - maxdelayts.t3,
12992 maxdelayts.t4 - maxdelayts.t1);
12993}
12994
12995
12996static int
12997dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
12998{
12999 wl_ioctl_t ioc;
13000 char buf[32];
13001 int ret;
13002 uint32 s1, s2;
13003
13004 struct tsf {
13005 uint32 low;
13006 uint32 high;
13007 } tsf_buf;
13008
13009 memset(&ioc, 0, sizeof(ioc));
13010 memset(&tsf_buf, 0, sizeof(tsf_buf));
13011
13012 ioc.cmd = WLC_GET_VAR;
13013 ioc.buf = buf;
13014 ioc.len = (uint)sizeof(buf);
13015 ioc.set = FALSE;
13016
13017 strncpy(buf, "tsf", sizeof(buf) - 1);
13018 buf[sizeof(buf) - 1] = '\0';
13019 s1 = dhd_get_htsf(dhd, 0);
13020 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
13021 if (ret == -EIO) {
13022 DHD_ERROR(("%s: tsf is not supported by device\n",
13023 dhd_ifname(&dhd->pub, ifidx)));
13024 return -EOPNOTSUPP;
13025 }
13026 return ret;
13027 }
13028 s2 = dhd_get_htsf(dhd, 0);
13029
13030 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
13031 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
13032 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
13033 dhd->htsf.coefdec2, s2-tsf_buf.low);
13034 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
13035 return 0;
13036}
13037
13038void htsf_update(dhd_info_t *dhd, void *data)
13039{
13040 static ulong cur_cycle = 0, prev_cycle = 0;
13041 uint32 htsf, tsf_delta = 0;
13042 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
13043 ulong b, a;
13044 cycles_t t;
13045
13046 /* cycles_t in inlcude/mips/timex.h */
13047
13048 t = get_cycles();
13049
13050 prev_cycle = cur_cycle;
13051 cur_cycle = t;
13052
13053 if (cur_cycle > prev_cycle)
13054 cyc_delta = cur_cycle - prev_cycle;
13055 else {
13056 b = cur_cycle;
13057 a = prev_cycle;
13058 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
13059 }
13060
13061 if (data == NULL)
13062 printf(" tsf update ata point er is null \n");
13063
13064 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
13065 memcpy(&cur_tsf, data, sizeof(tsf_t));
13066
13067 if (cur_tsf.low == 0) {
13068 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
13069 return;
13070 }
13071
13072 if (cur_tsf.low > prev_tsf.low)
13073 tsf_delta = (cur_tsf.low - prev_tsf.low);
13074 else {
13075 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
13076 cur_tsf.low, prev_tsf.low));
13077 if (cur_tsf.high > prev_tsf.high) {
13078 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
13079 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
13080 } else {
13081 return; /* do not update */
13082 }
13083 }
13084
13085 if (tsf_delta) {
13086 hfactor = cyc_delta / tsf_delta;
13087 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
13088 dec1 = tmp/tsf_delta;
13089 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
13090 tmp = (tmp - (dec1*tsf_delta))*10;
13091 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
13092
13093 if (dec3 > 4) {
13094 if (dec2 == 9) {
13095 dec2 = 0;
13096 if (dec1 == 9) {
13097 dec1 = 0;
13098 hfactor++;
13099 } else {
13100 dec1++;
13101 }
13102 } else {
13103 dec2++;
13104 }
13105 }
13106 }
13107
13108 if (hfactor) {
13109 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
13110 dhd->htsf.coef = hfactor;
13111 dhd->htsf.last_cycle = cur_cycle;
13112 dhd->htsf.last_tsf = cur_tsf.low;
13113 dhd->htsf.coefdec1 = dec1;
13114 dhd->htsf.coefdec2 = dec2;
13115 } else {
13116 htsf = prev_tsf.low;
13117 }
13118}
13119
13120#endif /* WLMEDIA_HTSF */
13121
13122#ifdef CUSTOM_SET_CPUCORE
13123void dhd_set_cpucore(dhd_pub_t *dhd, int set)
13124{
13125 int e_dpc = 0, e_rxf = 0, retry_set = 0;
13126
13127 if (!(dhd->chan_isvht80)) {
13128 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
13129 return;
13130 }
13131
13132 if (DPC_CPUCORE) {
13133 do {
13134 if (set == TRUE) {
13135 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
13136 cpumask_of(DPC_CPUCORE));
13137 } else {
13138 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
13139 cpumask_of(PRIMARY_CPUCORE));
13140 }
13141 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
13142 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
13143 return;
13144 }
13145 if (e_dpc < 0)
13146 OSL_SLEEP(1);
13147 } while (e_dpc < 0);
13148 }
13149 if (RXF_CPUCORE) {
13150 do {
13151 if (set == TRUE) {
13152 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
13153 cpumask_of(RXF_CPUCORE));
13154 } else {
13155 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
13156 cpumask_of(PRIMARY_CPUCORE));
13157 }
13158 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
13159 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
13160 return;
13161 }
13162 if (e_rxf < 0)
13163 OSL_SLEEP(1);
13164 } while (e_rxf < 0);
13165 }
13166#ifdef DHD_OF_SUPPORT
13167 interrupt_set_cpucore(set);
13168#endif /* DHD_OF_SUPPORT */
13169 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
13170
13171 return;
13172}
13173#endif /* CUSTOM_SET_CPUCORE */
13174
13175/* Get interface specific ap_isolate configuration */
13176int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
13177{
13178 dhd_info_t *dhd = dhdp->info;
13179 dhd_if_t *ifp;
13180
13181 ASSERT(idx < DHD_MAX_IFS);
13182
13183 ifp = dhd->iflist[idx];
13184
13185 return ifp->ap_isolate;
13186}
13187
13188/* Set interface specific ap_isolate configuration */
13189int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
13190{
13191 dhd_info_t *dhd = dhdp->info;
13192 dhd_if_t *ifp;
13193
13194 ASSERT(idx < DHD_MAX_IFS);
13195
13196 ifp = dhd->iflist[idx];
13197
13198 ifp->ap_isolate = val;
13199
13200 return 0;
13201}
13202
13203#ifdef DHD_FW_COREDUMP
13204
13205
13206#ifdef CUSTOMER_HW4_DEBUG
13207#ifdef PLATFORM_SLP
13208#define MEMDUMPINFO "/opt/etc/.memdump.info"
13209#else
13210#define MEMDUMPINFO "/data/.memdump.info"
13211#endif /* PLATFORM_SLP */
13212#elif defined(CUSTOMER_HW2)
12cae11e 13213#define MEMDUMPINFO "/data/vendor/misc/wifi/.memdump.info"
ef6a5fee
RC
13214#else
13215#define MEMDUMPINFO "/installmedia/.memdump.info"
13216#endif /* CUSTOMER_HW4_DEBUG */
13217
13218void dhd_get_memdump_info(dhd_pub_t *dhd)
13219{
13220 struct file *fp = NULL;
13221 uint32 mem_val = DUMP_MEMFILE_MAX;
13222 int ret = 0;
13223 char *filepath = MEMDUMPINFO;
13224
13225 /* Read memdump info from the file */
13226 fp = filp_open(filepath, O_RDONLY, 0);
13227 if (IS_ERR(fp)) {
13228 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13229 goto done;
13230 } else {
13231 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13232 if (ret < 0) {
13233 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13234 filp_close(fp, NULL);
13235 goto done;
13236 }
13237
13238 mem_val = bcm_atoi((char *)&mem_val);
13239
13240 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
13241 filp_close(fp, NULL);
13242 }
13243
13244done:
13245#ifdef CUSTOMER_HW4_DEBUG
13246 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
13247#else
13248 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
13249#endif /* CUSTOMER_HW4_DEBUG */
13250}
13251
13252
13253void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
13254{
13255 dhd_dump_t *dump = NULL;
13256 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
13257 if (dump == NULL) {
13258 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
13259 return;
13260 }
13261 dump->buf = buf;
13262 dump->bufsize = size;
13263
13264#if defined(CONFIG_ARM64)
13265 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
13266 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
13267#elif defined(__ARM_ARCH_7A__)
13268 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
13269 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
13270#endif /* __ARM_ARCH_7A__ */
13271 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
13272 BUG_ON(1);
13273 }
13274
13275#ifdef DHD_LOG_DUMP
13276 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
13277 dhd_schedule_log_dump(dhdp);
13278 }
13279#endif /* DHD_LOG_DUMP */
13280 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
13281 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
13282}
13283static void
13284dhd_mem_dump(void *handle, void *event_info, u8 event)
13285{
13286 dhd_info_t *dhd = handle;
13287 dhd_dump_t *dump = event_info;
13288
13289 if (!dhd) {
13290 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13291 return;
13292 }
13293
13294 if (!dump) {
13295 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
13296 return;
13297 }
13298
13299 if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
13300 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
13301 }
13302
13303 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
13304#ifdef DHD_LOG_DUMP
13305 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
13306#endif
13307 TRUE) {
13308 BUG_ON(1);
13309 }
13310 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
13311}
13312#endif /* DHD_FW_COREDUMP */
13313
13314#ifdef DHD_LOG_DUMP
13315static void
13316dhd_log_dump(void *handle, void *event_info, u8 event)
13317{
13318 dhd_info_t *dhd = handle;
13319
13320 if (!dhd) {
13321 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13322 return;
13323 }
13324
13325 if (do_dhd_log_dump(&dhd->pub)) {
13326 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
13327 return;
13328 }
13329}
13330
13331void dhd_schedule_log_dump(dhd_pub_t *dhdp)
13332{
13333 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
13334 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
13335 dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
13336}
13337
13338static int
13339do_dhd_log_dump(dhd_pub_t *dhdp)
13340{
13341 int ret = 0;
13342 struct file *fp = NULL;
13343 mm_segment_t old_fs;
13344 loff_t pos = 0;
13345 char dump_path[128];
13346 char common_info[1024];
13347 struct timeval curtime;
13348 uint32 file_mode;
13349 unsigned long flags = 0;
13350
13351 if (!dhdp) {
13352 return -1;
13353 }
13354
13355 /* Building the additional information like DHD, F/W version */
13356 memset(common_info, 0, sizeof(common_info));
13357 snprintf(common_info, sizeof(common_info),
13358 "---------- Common information ----------\n"
13359 "DHD version: %s\n"
13360 "F/W version: %s\n"
13361 "----------------------------------------\n",
13362 dhd_version, fw_version);
13363
13364 /* change to KERNEL_DS address limit */
13365 old_fs = get_fs();
13366 set_fs(KERNEL_DS);
13367
13368 /* Init file name */
13369 memset(dump_path, 0, sizeof(dump_path));
13370 do_gettimeofday(&curtime);
13371 snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
13372 DHD_COMMON_DUMP_PATH "debug_dump",
13373 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
13374 file_mode = O_CREAT | O_WRONLY | O_SYNC;
13375
13376 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
13377 fp = filp_open(dump_path, file_mode, 0644);
13378 if (IS_ERR(fp)) {
13379 ret = PTR_ERR(fp);
13380 DHD_ERROR(("open file error, err = %d\n", ret));
13381 ret = -1;
13382 goto exit;
13383 }
13384
13385 fp->f_op->write(fp, common_info, strlen(common_info), &pos);
13386 if (dhdp->dld_buf.wraparound) {
13387 fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
13388 } else {
13389 fp->f_op->write(fp, dhdp->dld_buf.buffer,
13390 (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
13391 }
13392
13393 /* re-init dhd_log_dump_buf structure */
13394 spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
13395 dhdp->dld_buf.wraparound = 0;
13396 dhdp->dld_buf.present = dhdp->dld_buf.front;
13397 dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13398 bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13399 spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
13400exit:
13401 if (!ret) {
13402 filp_close(fp, NULL);
13403 }
13404 set_fs(old_fs);
13405
13406 return ret;
13407}
13408#endif /* DHD_LOG_DUMP */
13409
13410#ifdef BCMASSERT_LOG
13411#ifdef CUSTOMER_HW4_DEBUG
13412#ifdef PLATFORM_SLP
13413#define ASSERTINFO "/opt/etc/.assert.info"
13414#else
13415#define ASSERTINFO "/data/.assert.info"
13416#endif /* PLATFORM_SLP */
13417#elif defined(CUSTOMER_HW2)
12cae11e 13418#define ASSERTINFO "/data/vendor/misc/wifi/.assert.info"
ef6a5fee
RC
13419#else
13420#define ASSERTINFO "/installmedia/.assert.info"
13421#endif /* CUSTOMER_HW4_DEBUG */
13422void dhd_get_assert_info(dhd_pub_t *dhd)
13423{
13424 struct file *fp = NULL;
13425 char *filepath = ASSERTINFO;
13426
13427 /*
13428 * Read assert info from the file
13429 * 0: Trigger Kernel crash by panic()
13430 * 1: Print out the logs and don't trigger Kernel panic. (default)
13431 * 2: Trigger Kernel crash by BUG()
13432 * File doesn't exist: Keep default value (1).
13433 */
13434 fp = filp_open(filepath, O_RDONLY, 0);
13435 if (IS_ERR(fp)) {
13436 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13437 } else {
13438 int mem_val = 0;
13439 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13440 if (ret < 0) {
13441 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13442 } else {
13443 mem_val = bcm_atoi((char *)&mem_val);
13444 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
13445 g_assert_type = mem_val;
13446 }
13447 filp_close(fp, NULL);
13448 }
13449}
13450#endif /* BCMASSERT_LOG */
13451
13452
13453#ifdef DHD_WMF
13454/* Returns interface specific WMF configuration */
13455dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
13456{
13457 dhd_info_t *dhd = dhdp->info;
13458 dhd_if_t *ifp;
13459
13460 ASSERT(idx < DHD_MAX_IFS);
13461
13462 ifp = dhd->iflist[idx];
13463 return &ifp->wmf;
13464}
13465#endif /* DHD_WMF */
13466
13467
13468#if defined(DHD_L2_FILTER)
13469bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
13470{
13471 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
13472}
13473#endif
13474
13475#ifdef DHD_L2_FILTER
13476arp_table_t*
13477dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
13478{
13479 dhd_info_t *dhd = dhdp->info;
13480 dhd_if_t *ifp;
13481
13482 ASSERT(bssidx < DHD_MAX_IFS);
13483
13484 ifp = dhd->iflist[bssidx];
13485 return ifp->phnd_arp_table;
13486}
13487
13488int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
13489{
13490 dhd_info_t *dhd = dhdp->info;
13491 dhd_if_t *ifp;
13492
13493 ASSERT(idx < DHD_MAX_IFS);
13494
13495 ifp = dhd->iflist[idx];
13496
13497 if (ifp)
13498 return ifp->parp_enable;
13499 else
13500 return FALSE;
13501}
13502
13503/* Set interface specific proxy arp configuration */
13504int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13505{
13506 dhd_info_t *dhd = dhdp->info;
13507 dhd_if_t *ifp;
13508 ASSERT(idx < DHD_MAX_IFS);
13509 ifp = dhd->iflist[idx];
13510
13511 if (!ifp)
13512 return BCME_ERROR;
13513
13514 /* At present all 3 variables are being
13515 * handled at once
13516 */
13517 ifp->parp_enable = val;
13518 ifp->parp_discard = val;
13519 ifp->parp_allnode = !val;
13520
13521 /* Flush ARP entries when disabled */
13522 if (val == FALSE) {
13523 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
13524 FALSE, dhdp->tickcnt);
13525 }
13526 return BCME_OK;
13527}
13528
13529bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13530{
13531 dhd_info_t *dhd = dhdp->info;
13532 dhd_if_t *ifp;
13533
13534 ASSERT(idx < DHD_MAX_IFS);
13535
13536 ifp = dhd->iflist[idx];
13537
13538 ASSERT(ifp);
13539 return ifp->parp_discard;
13540}
13541
13542bool
13543dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13544{
13545 dhd_info_t *dhd = dhdp->info;
13546 dhd_if_t *ifp;
13547
13548 ASSERT(idx < DHD_MAX_IFS);
13549
13550 ifp = dhd->iflist[idx];
13551
13552 ASSERT(ifp);
13553
13554 return ifp->parp_allnode;
13555}
13556
13557int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
13558{
13559 dhd_info_t *dhd = dhdp->info;
13560 dhd_if_t *ifp;
13561
13562 ASSERT(idx < DHD_MAX_IFS);
13563
13564 ifp = dhd->iflist[idx];
13565
13566 ASSERT(ifp);
13567
13568 return ifp->dhcp_unicast;
13569}
13570
13571int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
13572{
13573 dhd_info_t *dhd = dhdp->info;
13574 dhd_if_t *ifp;
13575 ASSERT(idx < DHD_MAX_IFS);
13576 ifp = dhd->iflist[idx];
13577
13578 ASSERT(ifp);
13579
13580 ifp->dhcp_unicast = val;
13581 return BCME_OK;
13582}
13583
13584int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
13585{
13586 dhd_info_t *dhd = dhdp->info;
13587 dhd_if_t *ifp;
13588
13589 ASSERT(idx < DHD_MAX_IFS);
13590
13591 ifp = dhd->iflist[idx];
13592
13593 ASSERT(ifp);
13594
13595 return ifp->block_ping;
13596}
13597
13598int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
13599{
13600 dhd_info_t *dhd = dhdp->info;
13601 dhd_if_t *ifp;
13602 ASSERT(idx < DHD_MAX_IFS);
13603 ifp = dhd->iflist[idx];
13604
13605 ASSERT(ifp);
13606
13607 ifp->block_ping = val;
13608
13609 return BCME_OK;
13610}
13611
13612int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
13613{
13614 dhd_info_t *dhd = dhdp->info;
13615 dhd_if_t *ifp;
13616
13617 ASSERT(idx < DHD_MAX_IFS);
13618
13619 ifp = dhd->iflist[idx];
13620
13621 ASSERT(ifp);
13622
13623 return ifp->grat_arp;
13624}
13625
13626int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13627{
13628 dhd_info_t *dhd = dhdp->info;
13629 dhd_if_t *ifp;
13630 ASSERT(idx < DHD_MAX_IFS);
13631 ifp = dhd->iflist[idx];
13632
13633 ASSERT(ifp);
13634
13635 ifp->grat_arp = val;
13636
13637 return BCME_OK;
13638}
13639#endif /* DHD_L2_FILTER */
13640
13641
13642#if defined(SET_RPS_CPUS)
13643int dhd_rps_cpus_enable(struct net_device *net, int enable)
13644{
13645 dhd_info_t *dhd = DHD_DEV_INFO(net);
13646 dhd_if_t *ifp;
13647 int ifidx;
13648 char * RPS_CPU_SETBUF;
13649
13650 ifidx = dhd_net2idx(dhd, net);
13651 if (ifidx == DHD_BAD_IF) {
13652 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
13653 return -ENODEV;
13654 }
13655
13656 if (ifidx == PRIMARY_INF) {
13657 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
13658 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
13659 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
13660 } else {
13661 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
13662 RPS_CPU_SETBUF = RPS_CPUS_MASK;
13663 }
13664 } else if (ifidx == VIRTUAL_INF) {
13665 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
13666 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
13667 } else {
13668 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
13669 return -EINVAL;
13670 }
13671
13672 ifp = dhd->iflist[ifidx];
13673 if (ifp) {
13674 if (enable) {
13675 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
13676 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
13677 } else {
13678 custom_rps_map_clear(ifp->net->_rx);
13679 }
13680 } else {
13681 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
13682 return -ENODEV;
13683 }
13684 return BCME_OK;
13685}
13686
13687int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
13688{
13689 struct rps_map *old_map, *map;
13690 cpumask_var_t mask;
13691 int err, cpu, i;
13692 static DEFINE_SPINLOCK(rps_map_lock);
13693
13694 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13695
13696 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
13697 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
13698 return -ENOMEM;
13699 }
13700
13701 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
13702 if (err) {
13703 free_cpumask_var(mask);
13704 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
13705 return err;
13706 }
13707
13708 map = kzalloc(max_t(unsigned int,
13709 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
13710 GFP_KERNEL);
13711 if (!map) {
13712 free_cpumask_var(mask);
13713 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
13714 return -ENOMEM;
13715 }
13716
13717 i = 0;
13718 for_each_cpu(cpu, mask) {
13719 map->cpus[i++] = cpu;
13720 }
13721
13722 if (i) {
13723 map->len = i;
13724 } else {
13725 kfree(map);
13726 map = NULL;
13727 free_cpumask_var(mask);
13728 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
13729 return -1;
13730 }
13731
13732 spin_lock(&rps_map_lock);
13733 old_map = rcu_dereference_protected(queue->rps_map,
13734 lockdep_is_held(&rps_map_lock));
13735 rcu_assign_pointer(queue->rps_map, map);
13736 spin_unlock(&rps_map_lock);
13737
13738 if (map) {
13739 static_key_slow_inc(&rps_needed);
13740 }
13741 if (old_map) {
13742 kfree_rcu(old_map, rcu);
13743 static_key_slow_dec(&rps_needed);
13744 }
13745 free_cpumask_var(mask);
13746
13747 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
13748 return map->len;
13749}
13750
13751void custom_rps_map_clear(struct netdev_rx_queue *queue)
13752{
13753 struct rps_map *map;
13754
13755 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13756
13757 map = rcu_dereference_protected(queue->rps_map, 1);
13758 if (map) {
13759 RCU_INIT_POINTER(queue->rps_map, NULL);
13760 kfree_rcu(map, rcu);
13761 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
13762 }
13763}
13764#endif
13765
13766
13767
13768#ifdef DHD_DEBUG_PAGEALLOC
13769
13770void
13771dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
13772{
13773 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
13774
13775 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
13776 __FUNCTION__, addr_corrupt, (uint32)len));
13777
13778 DHD_OS_WAKE_LOCK(dhdp);
13779 prhex("Page Corruption:", addr_corrupt, len);
13780 dhd_dump_to_kernelog(dhdp);
13781#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13782 /* Load the dongle side dump to host memory and then BUG_ON() */
13783 dhdp->memdump_enabled = DUMP_MEMONLY;
13784 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
13785 dhd_bus_mem_dump(dhdp);
13786#endif /* BCMPCIE && DHD_FW_COREDUMP */
13787 DHD_OS_WAKE_UNLOCK(dhdp);
13788}
13789EXPORT_SYMBOL(dhd_page_corrupt_cb);
13790#endif /* DHD_DEBUG_PAGEALLOC */
13791
13792#ifdef DHD_PKTID_AUDIT_ENABLED
13793void
13794dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
13795{
13796 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
13797 DHD_OS_WAKE_LOCK(dhdp);
13798 dhd_dump_to_kernelog(dhdp);
13799#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13800 /* Load the dongle side dump to host memory and then BUG_ON() */
13801 dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
13802 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
13803 dhd_bus_mem_dump(dhdp);
13804#endif /* BCMPCIE && DHD_FW_COREDUMP */
13805 DHD_OS_WAKE_UNLOCK(dhdp);
13806}
13807#endif /* DHD_PKTID_AUDIT_ENABLED */
13808
13809/* ----------------------------------------------------------------------------
13810 * Infrastructure code for sysfs interface support for DHD
13811 *
13812 * What is sysfs interface?
13813 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
13814 *
13815 * Why sysfs interface?
13816 * This is the Linux standard way of changing/configuring Run Time parameters
13817 * for a driver. We can use this interface to control "linux" specific driver
13818 * parameters.
13819 *
13820 * -----------------------------------------------------------------------------
13821 */
13822
13823#include <linux/sysfs.h>
13824#include <linux/kobject.h>
13825
13826#if defined(DHD_TRACE_WAKE_LOCK)
13827
13828/* Function to show the history buffer */
13829static ssize_t
13830show_wklock_trace(struct dhd_info *dev, char *buf)
13831{
13832 ssize_t ret = 0;
13833 dhd_info_t *dhd = (dhd_info_t *)dev;
13834
13835 buf[ret] = '\n';
13836 buf[ret+1] = 0;
13837
13838 dhd_wk_lock_stats_dump(&dhd->pub);
13839 return ret+1;
13840}
13841
13842/* Function to enable/disable wakelock trace */
13843static ssize_t
13844wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
13845{
13846 unsigned long onoff;
13847 unsigned long flags;
13848 dhd_info_t *dhd = (dhd_info_t *)dev;
13849
13850 onoff = bcm_strtoul(buf, NULL, 10);
13851 if (onoff != 0 && onoff != 1) {
13852 return -EINVAL;
13853 }
13854
13855 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
13856 trace_wklock_onoff = onoff;
13857 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
13858 if (trace_wklock_onoff) {
13859 printk("ENABLE WAKLOCK TRACE\n");
13860 } else {
13861 printk("DISABLE WAKELOCK TRACE\n");
13862 }
13863
13864 return (ssize_t)(onoff+1);
13865}
13866#endif /* DHD_TRACE_WAKE_LOCK */
13867
13868/*
13869 * Generic Attribute Structure for DHD.
13870 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
13871 * to instantiate an object of type dhd_attr, populate it with
13872 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
13873 * and add the object to default_attrs[] array, that gets registered
13874 * to the kobject of dhd (named bcm-dhd).
13875 */
13876
13877struct dhd_attr {
13878 struct attribute attr;
13879 ssize_t(*show)(struct dhd_info *, char *);
13880 ssize_t(*store)(struct dhd_info *, const char *, size_t count);
13881};
13882
13883#if defined(DHD_TRACE_WAKE_LOCK)
13884static struct dhd_attr dhd_attr_wklock =
13885 __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
13886#endif /* defined(DHD_TRACE_WAKE_LOCK */
13887
13888/* Attribute object that gets registered with "bcm-dhd" kobject tree */
13889static struct attribute *default_attrs[] = {
13890#if defined(DHD_TRACE_WAKE_LOCK)
13891 &dhd_attr_wklock.attr,
13892#endif
13893 NULL
13894};
13895
13896#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
13897#define to_attr(a) container_of(a, struct dhd_attr, attr)
13898
13899/*
13900 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13901 * node under "bcm-dhd" the show function is called.
13902 */
13903static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
13904{
13905 dhd_info_t *dhd = to_dhd(kobj);
13906 struct dhd_attr *d_attr = to_attr(attr);
13907 int ret;
13908
13909 if (d_attr->show)
13910 ret = d_attr->show(dhd, buf);
13911 else
13912 ret = -EIO;
13913
13914 return ret;
13915}
13916
13917
13918/*
13919 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13920 * node under "bcm-dhd" the store function is called.
13921 */
13922static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
13923 const char *buf, size_t count)
13924{
13925 dhd_info_t *dhd = to_dhd(kobj);
13926 struct dhd_attr *d_attr = to_attr(attr);
13927 int ret;
13928
13929 if (d_attr->store)
13930 ret = d_attr->store(dhd, buf, count);
13931 else
13932 ret = -EIO;
13933
13934 return ret;
13935
13936}
13937
13938static struct sysfs_ops dhd_sysfs_ops = {
13939 .show = dhd_show,
13940 .store = dhd_store,
13941};
13942
13943static struct kobj_type dhd_ktype = {
13944 .sysfs_ops = &dhd_sysfs_ops,
13945 .default_attrs = default_attrs,
13946};
13947
13948/* Create a kobject and attach to sysfs interface */
13949static int dhd_sysfs_init(dhd_info_t *dhd)
13950{
13951 int ret = -1;
13952
13953 if (dhd == NULL) {
13954 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13955 return ret;
13956 }
13957
13958 /* Initialize the kobject */
13959 ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
13960 if (ret) {
13961 kobject_put(&dhd->dhd_kobj);
13962 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
13963 return ret;
13964 }
13965
13966 /*
13967 * We are always responsible for sending the uevent that the kobject
13968 * was added to the system.
13969 */
13970 kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
13971
13972 return ret;
13973}
13974
13975/* Done with the kobject and detach the sysfs interface */
13976static void dhd_sysfs_exit(dhd_info_t *dhd)
13977{
13978 if (dhd == NULL) {
13979 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13980 return;
13981 }
13982
13983 /* Releae the kobject */
13984 kobject_put(&dhd->dhd_kobj);
13985}
13986
13987#ifdef DHD_LOG_DUMP
13988void
13989dhd_log_dump_init(dhd_pub_t *dhd)
13990{
13991 spin_lock_init(&dhd->dld_buf.lock);
13992#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13993 dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
13994 DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
13995#else
13996 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13997#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13998
13999 if (!dhd->dld_buf.buffer) {
14000 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
14001 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
14002
14003 if (!dhd->dld_buf.buffer) {
14004 DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
14005 return;
14006 }
14007 }
14008
14009 dhd->dld_buf.wraparound = 0;
14010 dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
14011 dhd->dld_buf.present = dhd->dld_buf.buffer;
14012 dhd->dld_buf.front = dhd->dld_buf.buffer;
14013 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
14014 dhd->dld_enable = 1;
14015}
14016
14017void
14018dhd_log_dump_deinit(dhd_pub_t *dhd)
14019{
14020 dhd->dld_enable = 0;
14021#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
14022 DHD_OS_PREFREE(dhd,
14023 dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
14024#else
14025 kfree(dhd->dld_buf.buffer);
14026#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
14027}
14028
14029void
14030dhd_log_dump_print(const char *fmt, ...)
14031{
14032 int len = 0;
14033 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
14034 va_list args;
14035 dhd_pub_t *dhd = NULL;
14036 unsigned long flags = 0;
14037
14038 if (wl_get_bcm_cfg80211_ptr()) {
14039 dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
14040 }
14041
14042 if (!dhd || dhd->dld_enable != 1) {
14043 return;
14044 }
14045
14046 va_start(args, fmt);
14047
14048 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
14049 if (len < 0) {
14050 return;
14051 }
14052
14053 /* make a critical section to eliminate race conditions */
14054 spin_lock_irqsave(&dhd->dld_buf.lock, flags);
14055 if (dhd->dld_buf.remain < len) {
14056 dhd->dld_buf.wraparound = 1;
14057 dhd->dld_buf.present = dhd->dld_buf.front;
14058 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
14059 }
14060
14061 strncpy(dhd->dld_buf.present, tmp_buf, len);
14062 dhd->dld_buf.remain -= len;
14063 dhd->dld_buf.present += len;
14064 spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
14065
14066 /* double check invalid memory operation */
14067 ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
14068 va_end(args);
14069}
14070
14071char*
14072dhd_log_dump_get_timestamp(void)
14073{
14074 static char buf[16];
14075 u64 ts_nsec;
14076 unsigned long rem_nsec;
14077
14078 ts_nsec = local_clock();
14079 rem_nsec = do_div(ts_nsec, 1000000000);
14080 snprintf(buf, sizeof(buf), "%5lu.%06lu",
14081 (unsigned long)ts_nsec, rem_nsec / 1000);
14082
14083 return buf;
14084}
14085
14086#endif /* DHD_LOG_DUMP */
14087
14088/* ---------------------------- End of sysfs implementation ------------------------------------- */
14089
14090void *dhd_get_pub(struct net_device *dev)
14091{
14092 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
14093 if (dhdinfo)
14094 return (void *)&dhdinfo->pub;
08dfb6c4
RC
14095 else {
14096 printf("%s: null dhdinfo\n", __FUNCTION__);
14097 return NULL;
14098 }
14099}
14100
14101void *dhd_get_conf(struct net_device *dev)
14102{
14103 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
14104 if (dhdinfo)
14105 return (void *)dhdinfo->pub.conf;
14106 else {
14107 printf("%s: null dhdinfo\n", __FUNCTION__);
ef6a5fee 14108 return NULL;
08dfb6c4 14109 }
ef6a5fee
RC
14110}
14111
14112bool dhd_os_wd_timer_enabled(void *bus)
14113{
14114 dhd_pub_t *pub = bus;
14115 dhd_info_t *dhd = (dhd_info_t *)pub->info;
14116
14117 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
14118 if (!dhd) {
14119 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
14120 return FALSE;
14121 }
14122 return dhd->wd_timer_valid;
14123}