wifi: update bcm4358 wifi driver
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.1.363.59.144.x.cn / dhd_linux.c
CommitLineData
ef6a5fee
RC
1/*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2016, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
29 */
30
31#include <typedefs.h>
32#include <linuxver.h>
33#include <osl.h>
34#ifdef SHOW_LOGTRACE
35#include <linux/syscalls.h>
36#include <event_log.h>
37#endif /* SHOW_LOGTRACE */
38
39
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/inetdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/etherdevice.h>
48#include <linux/random.h>
49#include <linux/spinlock.h>
50#include <linux/ethtool.h>
51#include <linux/fcntl.h>
52#include <linux/fs.h>
53#include <linux/ip.h>
54#include <linux/reboot.h>
55#include <linux/notifier.h>
56#include <net/addrconf.h>
57#ifdef ENABLE_ADAPTIVE_SCHED
58#include <linux/cpufreq.h>
59#endif /* ENABLE_ADAPTIVE_SCHED */
60
61#include <asm/uaccess.h>
62#include <asm/unaligned.h>
63
64#include <epivers.h>
65#include <bcmutils.h>
66#include <bcmendian.h>
67#include <bcmdevs.h>
68
69#include <proto/ethernet.h>
70#include <proto/bcmevent.h>
71#include <proto/vlan.h>
72#include <proto/802.3.h>
73
74#include <dngl_stats.h>
75#include <dhd_linux_wq.h>
76#include <dhd.h>
77#include <dhd_linux.h>
78#ifdef PCIE_FULL_DONGLE
79#include <dhd_flowring.h>
80#endif
81#include <dhd_bus.h>
82#include <dhd_proto.h>
83#include <dhd_config.h>
84#include <dhd_dbg.h>
85#ifdef CONFIG_HAS_WAKELOCK
86#include <linux/wakelock.h>
87#endif
88#ifdef WL_CFG80211
89#include <wl_cfg80211.h>
90#endif
91#ifdef PNO_SUPPORT
92#include <dhd_pno.h>
93#endif
94#ifdef RTT_SUPPORT
95#include <dhd_rtt.h>
96#endif
97
98#ifdef CONFIG_COMPAT
99#include <linux/compat.h>
100#endif
101
102#ifdef DHD_WMF
103#include <dhd_wmf_linux.h>
104#endif /* DHD_WMF */
105
106#ifdef DHD_L2_FILTER
107#include <proto/bcmicmp.h>
108#include <bcm_l2_filter.h>
109#include <dhd_l2_filter.h>
110#endif /* DHD_L2_FILTER */
111
112#ifdef DHD_PSTA
113#include <dhd_psta.h>
114#endif /* DHD_PSTA */
115
116
117#ifdef DHDTCPACK_SUPPRESS
118#include <dhd_ip.h>
119#endif /* DHDTCPACK_SUPPRESS */
120
121#ifdef DHD_DEBUG_PAGEALLOC
122typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
123void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
124extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
125#endif /* DHD_DEBUG_PAGEALLOC */
126
127
128#if defined(DHD_LB)
129/* Dynamic CPU selection for load balancing */
130#include <linux/cpu.h>
131#include <linux/cpumask.h>
132#include <linux/notifier.h>
133#include <linux/workqueue.h>
134#include <asm/atomic.h>
135
136#if !defined(DHD_LB_PRIMARY_CPUS)
137#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
138#endif
139
140#if !defined(DHD_LB_SECONDARY_CPUS)
141#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
142#endif
143
144#define HIST_BIN_SIZE 8
145
146#if defined(DHD_LB_RXP)
147static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
148#endif /* DHD_LB_RXP */
149
150#endif /* DHD_LB */
151
152#ifdef WLMEDIA_HTSF
153#include <linux/time.h>
154#include <htsf.h>
155
156#define HTSF_MINLEN 200 /* min. packet length to timestamp */
157#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
158#define TSMAX 1000 /* max no. of timing record kept */
159#define NUMBIN 34
160
161static uint32 tsidx = 0;
162static uint32 htsf_seqnum = 0;
163uint32 tsfsync;
164struct timeval tsync;
165static uint32 tsport = 5010;
166
167typedef struct histo_ {
168 uint32 bin[NUMBIN];
169} histo_t;
170
171#if !ISPOWEROF2(DHD_SDALIGN)
172#error DHD_SDALIGN is not a power of 2!
173#endif
174
175static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
176#endif /* WLMEDIA_HTSF */
177
178#ifdef STBLINUX
179#ifdef quote_str
180#undef quote_str
181#endif /* quote_str */
182#ifdef to_str
183#undef to_str
184#endif /* quote_str */
185#define to_str(s) #s
186#define quote_str(s) to_str(s)
187
188static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
189#endif /* STBLINUX */
190
191
192#if defined(SOFTAP)
193extern bool ap_cfg_running;
194extern bool ap_fw_loaded;
195#endif
196extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
197
198#ifdef FIX_CPU_MIN_CLOCK
199#include <linux/pm_qos.h>
200#endif /* FIX_CPU_MIN_CLOCK */
201#ifdef SET_RANDOM_MAC_SOFTAP
202#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
203#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
204#endif
205static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
206#endif /* SET_RANDOM_MAC_SOFTAP */
207#ifdef ENABLE_ADAPTIVE_SCHED
208#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
209#ifndef CUSTOM_CPUFREQ_THRESH
210#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
211#endif /* CUSTOM_CPUFREQ_THRESH */
212#endif /* ENABLE_ADAPTIVE_SCHED */
213
214/* enable HOSTIP cache update from the host side when an eth0:N is up */
215#define AOE_IP_ALIAS_SUPPORT 1
216
217#ifdef BCM_FD_AGGR
218#include <bcm_rpc.h>
219#include <bcm_rpc_tp.h>
220#endif
221#ifdef PROP_TXSTATUS
222#include <wlfc_proto.h>
223#include <dhd_wlfc.h>
224#endif
225
226#include <wl_android.h>
227
228#ifdef CUSTOMER_HW_AMLOGIC
229#include <linux/amlogic/wifi_dt.h>
230#endif
231
232/* Maximum STA per radio */
233#define DHD_MAX_STA 32
234
235
236
237const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
238const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
239#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
240
241#ifdef ARP_OFFLOAD_SUPPORT
242void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
243static int dhd_inetaddr_notifier_call(struct notifier_block *this,
244 unsigned long event, void *ptr);
245static struct notifier_block dhd_inetaddr_notifier = {
246 .notifier_call = dhd_inetaddr_notifier_call
247};
248/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
249 * created in kernel notifier link list (with 'next' pointing to itself)
250 */
251static bool dhd_inetaddr_notifier_registered = FALSE;
252#endif /* ARP_OFFLOAD_SUPPORT */
253
254#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
255static int dhd_inet6addr_notifier_call(struct notifier_block *this,
256 unsigned long event, void *ptr);
257static struct notifier_block dhd_inet6addr_notifier = {
258 .notifier_call = dhd_inet6addr_notifier_call
259};
260/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
261 * created in kernel notifier link list (with 'next' pointing to itself)
262 */
263static bool dhd_inet6addr_notifier_registered = FALSE;
264#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
265
266#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
267#include <linux/suspend.h>
268volatile bool dhd_mmc_suspend = FALSE;
269DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
270#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
271
272#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
273extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
274#endif
275#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
276static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
277#endif
278#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
279MODULE_LICENSE("GPL and additional rights");
280#endif /* LinuxVer */
281
282#include <dhd_bus.h>
283
284#ifdef BCM_FD_AGGR
285#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
286#else
287#ifndef PROP_TXSTATUS
288#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
289#else
290#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
291#endif
292#endif /* BCM_FD_AGGR */
293
294#ifdef PROP_TXSTATUS
295extern bool dhd_wlfc_skip_fc(void);
296extern void dhd_wlfc_plat_init(void *dhd);
297extern void dhd_wlfc_plat_deinit(void *dhd);
298#endif /* PROP_TXSTATUS */
299#ifdef USE_DYNAMIC_F2_BLKSIZE
300extern uint sd_f2_blocksize;
301extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
302#endif /* USE_DYNAMIC_F2_BLKSIZE */
303
304#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
305const char *
306print_tainted()
307{
308 return "";
309}
310#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
311
312/* Linux wireless extension support */
313#if defined(WL_WIRELESS_EXT)
314#include <wl_iw.h>
315extern wl_iw_extra_params_t g_wl_iw_params;
316#endif /* defined(WL_WIRELESS_EXT) */
317
318#ifdef CONFIG_PARTIALSUSPEND_SLP
319#include <linux/partialsuspend_slp.h>
320#define CONFIG_HAS_EARLYSUSPEND
321#define DHD_USE_EARLYSUSPEND
322#define register_early_suspend register_pre_suspend
323#define unregister_early_suspend unregister_pre_suspend
324#define early_suspend pre_suspend
325#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
326#else
327#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
328#include <linux/earlysuspend.h>
329#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
330#endif /* CONFIG_PARTIALSUSPEND_SLP */
331
332extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
333
334#ifdef PKT_FILTER_SUPPORT
335extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
336extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
337extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
338#endif
339
340
341#ifdef READ_MACADDR
342extern int dhd_read_macaddr(struct dhd_info *dhd);
343#else
344static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
345#endif
346#ifdef WRITE_MACADDR
347extern int dhd_write_macaddr(struct ether_addr *mac);
348#else
349static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
350#endif
351
352
353
354
355
356#ifdef DHD_FW_COREDUMP
357static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
358#endif /* DHD_FW_COREDUMP */
359#ifdef DHD_LOG_DUMP
360static void dhd_log_dump_init(dhd_pub_t *dhd);
361static void dhd_log_dump_deinit(dhd_pub_t *dhd);
362static void dhd_log_dump(void *handle, void *event_info, u8 event);
363void dhd_schedule_log_dump(dhd_pub_t *dhdp);
364static int do_dhd_log_dump(dhd_pub_t *dhdp);
365#endif /* DHD_LOG_DUMP */
366
367static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
368static struct notifier_block dhd_reboot_notifier = {
369 .notifier_call = dhd_reboot_callback,
370 .priority = 1,
371};
372
373#ifdef BCMPCIE
374static int is_reboot = 0;
375#endif /* BCMPCIE */
376
377typedef struct dhd_if_event {
378 struct list_head list;
379 wl_event_data_if_t event;
380 char name[IFNAMSIZ+1];
381 uint8 mac[ETHER_ADDR_LEN];
382} dhd_if_event_t;
383
384/* Interface control information */
385typedef struct dhd_if {
386 struct dhd_info *info; /* back pointer to dhd_info */
387 /* OS/stack specifics */
388 struct net_device *net;
389 int idx; /* iface idx in dongle */
390 uint subunit; /* subunit */
391 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
392 bool set_macaddress;
393 bool set_multicast;
394 uint8 bssidx; /* bsscfg index for the interface */
395 bool attached; /* Delayed attachment when unset */
396 bool txflowcontrol; /* Per interface flow control indicator */
397 char name[IFNAMSIZ+1]; /* linux interface name */
398 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
399 struct net_device_stats stats;
400#ifdef DHD_WMF
401 dhd_wmf_t wmf; /* per bsscfg wmf setting */
402#endif /* DHD_WMF */
403#ifdef PCIE_FULL_DONGLE
404 struct list_head sta_list; /* sll of associated stations */
405#if !defined(BCM_GMAC3)
406 spinlock_t sta_list_lock; /* lock for manipulating sll */
407#endif /* ! BCM_GMAC3 */
408#endif /* PCIE_FULL_DONGLE */
409 uint32 ap_isolate; /* ap-isolation settings */
410#ifdef DHD_L2_FILTER
411 bool parp_enable;
412 bool parp_discard;
413 bool parp_allnode;
414 arp_table_t *phnd_arp_table;
415/* for Per BSS modification */
416 bool dhcp_unicast;
417 bool block_ping;
418 bool grat_arp;
419#endif /* DHD_L2_FILTER */
420} dhd_if_t;
421
422#ifdef WLMEDIA_HTSF
423typedef struct {
424 uint32 low;
425 uint32 high;
426} tsf_t;
427
428typedef struct {
429 uint32 last_cycle;
430 uint32 last_sec;
431 uint32 last_tsf;
432 uint32 coef; /* scaling factor */
433 uint32 coefdec1; /* first decimal */
434 uint32 coefdec2; /* second decimal */
435} htsf_t;
436
437typedef struct {
438 uint32 t1;
439 uint32 t2;
440 uint32 t3;
441 uint32 t4;
442} tstamp_t;
443
444static tstamp_t ts[TSMAX];
445static tstamp_t maxdelayts;
446static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
447
448#endif /* WLMEDIA_HTSF */
449
450struct ipv6_work_info_t {
451 uint8 if_idx;
452 char ipv6_addr[16];
453 unsigned long event;
454};
455
456#ifdef DHD_DEBUG
457typedef struct dhd_dump {
458 uint8 *buf;
459 int bufsize;
460} dhd_dump_t;
461#endif /* DHD_DEBUG */
462
463/* When Perimeter locks are deployed, any blocking calls must be preceeded
464 * with a PERIM UNLOCK and followed by a PERIM LOCK.
465 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
466 * wait_event_timeout().
467 */
468
469/* Local private structure (extension of pub) */
470typedef struct dhd_info {
471#if defined(WL_WIRELESS_EXT)
472 wl_iw_t iw; /* wireless extensions state (must be first) */
473#endif /* defined(WL_WIRELESS_EXT) */
474 dhd_pub_t pub;
475 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
476
477 void *adapter; /* adapter information, interrupt, fw path etc. */
478 char fw_path[PATH_MAX]; /* path to firmware image */
479 char nv_path[PATH_MAX]; /* path to nvram vars file */
480 char conf_path[PATH_MAX]; /* path to config vars file */
481
482 /* serialize dhd iovars */
483 struct mutex dhd_iovar_mutex;
484
485 struct semaphore proto_sem;
486#ifdef PROP_TXSTATUS
487 spinlock_t wlfc_spinlock;
488
489#endif /* PROP_TXSTATUS */
490#ifdef WLMEDIA_HTSF
491 htsf_t htsf;
492#endif
493 wait_queue_head_t ioctl_resp_wait;
494 wait_queue_head_t d3ack_wait;
495 wait_queue_head_t dhd_bus_busy_state_wait;
496 uint32 default_wd_interval;
497
498 struct timer_list timer;
499 bool wd_timer_valid;
500#ifdef DHD_PCIE_RUNTIMEPM
501 struct timer_list rpm_timer;
502 bool rpm_timer_valid;
503 tsk_ctl_t thr_rpm_ctl;
504#endif /* DHD_PCIE_RUNTIMEPM */
505 struct tasklet_struct tasklet;
506 spinlock_t sdlock;
507 spinlock_t txqlock;
508 spinlock_t dhd_lock;
509
510 struct semaphore sdsem;
511 tsk_ctl_t thr_dpc_ctl;
512 tsk_ctl_t thr_wdt_ctl;
513
514 tsk_ctl_t thr_rxf_ctl;
515 spinlock_t rxf_lock;
516 bool rxthread_enabled;
517
518 /* Wakelocks */
519#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
520 struct wake_lock wl_wifi; /* Wifi wakelock */
521 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
522 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
523 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
524 struct wake_lock wl_evtwake; /* Wifi event wakelock */
525#ifdef BCMPCIE_OOB_HOST_WAKE
526 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
527#endif /* BCMPCIE_OOB_HOST_WAKE */
528#ifdef DHD_USE_SCAN_WAKELOCK
529 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
530#endif /* DHD_USE_SCAN_WAKELOCK */
531#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
532
533#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
534 /* net_device interface lock, prevent race conditions among net_dev interface
535 * calls and wifi_on or wifi_off
536 */
537 struct mutex dhd_net_if_mutex;
538 struct mutex dhd_suspend_mutex;
539#endif
540 spinlock_t wakelock_spinlock;
541 spinlock_t wakelock_evt_spinlock;
542 uint32 wakelock_event_counter;
543 uint32 wakelock_counter;
544 int wakelock_wd_counter;
545 int wakelock_rx_timeout_enable;
546 int wakelock_ctrl_timeout_enable;
547 bool waive_wakelock;
548 uint32 wakelock_before_waive;
549
550 /* Thread to issue ioctl for multicast */
551 wait_queue_head_t ctrl_wait;
552 atomic_t pend_8021x_cnt;
553 dhd_attach_states_t dhd_state;
554#ifdef SHOW_LOGTRACE
555 dhd_event_log_t event_data;
556#endif /* SHOW_LOGTRACE */
557
558#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
559 struct early_suspend early_suspend;
560#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
561
562#ifdef ARP_OFFLOAD_SUPPORT
563 u32 pend_ipaddr;
564#endif /* ARP_OFFLOAD_SUPPORT */
565#ifdef BCM_FD_AGGR
566 void *rpc_th;
567 void *rpc_osh;
568 struct timer_list rpcth_timer;
569 bool rpcth_timer_active;
570 uint8 fdaggr;
571#endif
572#ifdef DHDTCPACK_SUPPRESS
573 spinlock_t tcpack_lock;
574#endif /* DHDTCPACK_SUPPRESS */
575#ifdef FIX_CPU_MIN_CLOCK
576 bool cpufreq_fix_status;
577 struct mutex cpufreq_fix;
578 struct pm_qos_request dhd_cpu_qos;
579#ifdef FIX_BUS_MIN_CLOCK
580 struct pm_qos_request dhd_bus_qos;
581#endif /* FIX_BUS_MIN_CLOCK */
582#endif /* FIX_CPU_MIN_CLOCK */
583 void *dhd_deferred_wq;
584#ifdef DEBUG_CPU_FREQ
585 struct notifier_block freq_trans;
586 int __percpu *new_freq;
587#endif
588 unsigned int unit;
589 struct notifier_block pm_notifier;
590#ifdef DHD_PSTA
591 uint32 psta_mode; /* PSTA or PSR */
592#endif /* DHD_PSTA */
593#ifdef DHD_DEBUG
594 dhd_dump_t *dump;
595 struct timer_list join_timer;
596 u32 join_timeout_val;
597 bool join_timer_active;
598 uint scan_time_count;
599 struct timer_list scan_timer;
600 bool scan_timer_active;
601#endif
602#if defined(DHD_LB)
603 /* CPU Load Balance dynamic CPU selection */
604
605 /* Variable that tracks the currect CPUs available for candidacy */
606 cpumask_var_t cpumask_curr_avail;
607
608 /* Primary and secondary CPU mask */
609 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
610 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
611
612 struct notifier_block cpu_notifier;
613
614 /* Tasklet to handle Tx Completion packet freeing */
615 struct tasklet_struct tx_compl_tasklet;
616 atomic_t tx_compl_cpu;
617
618
619 /* Tasklet to handle RxBuf Post during Rx completion */
620 struct tasklet_struct rx_compl_tasklet;
621 atomic_t rx_compl_cpu;
622
623 /* Napi struct for handling rx packet sendup. Packets are removed from
624 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
625 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
626 * to run to rx_napi_cpu.
627 */
628 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
629 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
630 struct napi_struct rx_napi_struct ____cacheline_aligned;
631 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
632 struct net_device *rx_napi_netdev; /* netdev of primary interface */
633
634 struct work_struct rx_napi_dispatcher_work;
635 struct work_struct tx_compl_dispatcher_work;
636 struct work_struct rx_compl_dispatcher_work;
637 /* Number of times DPC Tasklet ran */
638 uint32 dhd_dpc_cnt;
639
640 /* Number of times NAPI processing got scheduled */
641 uint32 napi_sched_cnt;
642
643 /* Number of times NAPI processing ran on each available core */
644 uint32 napi_percpu_run_cnt[NR_CPUS];
645
646 /* Number of times RX Completions got scheduled */
647 uint32 rxc_sched_cnt;
648 /* Number of times RX Completion ran on each available core */
649 uint32 rxc_percpu_run_cnt[NR_CPUS];
650
651 /* Number of times TX Completions got scheduled */
652 uint32 txc_sched_cnt;
653 /* Number of times TX Completions ran on each available core */
654 uint32 txc_percpu_run_cnt[NR_CPUS];
655
656 /* CPU status */
657 /* Number of times each CPU came online */
658 uint32 cpu_online_cnt[NR_CPUS];
659
660 /* Number of times each CPU went offline */
661 uint32 cpu_offline_cnt[NR_CPUS];
662
663 /*
664 * Consumer Histogram - NAPI RX Packet processing
665 * -----------------------------------------------
666 * On Each CPU, when the NAPI RX Packet processing call back was invoked
667 * how many packets were processed is captured in this data structure.
668 * Now its difficult to capture the "exact" number of packets processed.
669 * So considering the packet counter to be a 32 bit one, we have a
670 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
671 * processed is rounded off to the next power of 2 and put in the
672 * approriate "bin" the value in the bin gets incremented.
673 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
674 * and the packet count processed is as follows (assume the bin counters are 0)
675 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
676 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
677 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
678 */
679 uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
680 uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
681 uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
682#endif /* DHD_LB */
683
684#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
685#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
686
687 struct kobject dhd_kobj;
688#ifdef SUPPORT_SENSORHUB
689 uint32 shub_enable;
690#endif /* SUPPORT_SENSORHUB */
691
692 struct delayed_work dhd_memdump_work;
693} dhd_info_t;
694
695#define DHDIF_FWDER(dhdif) FALSE
696
697/* Flag to indicate if we should download firmware on driver load */
698uint dhd_download_fw_on_driverload = TRUE;
699
700/* Flag to indicate if driver is initialized */
701uint dhd_driver_init_done = FALSE;
702
703/* Definitions to provide path to the firmware and nvram
704 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
705 */
706char firmware_path[MOD_PARAM_PATHLEN];
707char nvram_path[MOD_PARAM_PATHLEN];
708char config_path[MOD_PARAM_PATHLEN];
709
710/* backup buffer for firmware and nvram path */
711char fw_bak_path[MOD_PARAM_PATHLEN];
712char nv_bak_path[MOD_PARAM_PATHLEN];
713
714/* information string to keep firmware, chio, cheip version info visiable from log */
715char info_string[MOD_PARAM_INFOLEN];
716module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
717int op_mode = 0;
718int disable_proptx = 0;
719module_param(op_mode, int, 0644);
720
721#if defined(DHD_LB_RXP)
722static int dhd_napi_weight = 32;
723module_param(dhd_napi_weight, int, 0644);
724#endif /* DHD_LB_RXP */
725
726extern int wl_control_wl_start(struct net_device *dev);
727#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
728struct semaphore dhd_registration_sem;
729#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
730
731/* deferred handlers */
732static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
733static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
734static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
735static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
736#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
737static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
738#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
739#ifdef WL_CFG80211
740extern void dhd_netdev_free(struct net_device *ndev);
741#endif /* WL_CFG80211 */
742
743/* Error bits */
744module_param(dhd_msg_level, int, 0);
745#if defined(WL_WIRELESS_EXT)
746module_param(iw_msg_level, int, 0);
747#endif
748#ifdef WL_CFG80211
749module_param(wl_dbg_level, int, 0);
750#endif
751module_param(android_msg_level, int, 0);
752module_param(config_msg_level, int, 0);
753
754#ifdef ARP_OFFLOAD_SUPPORT
755/* ARP offload enable */
756uint dhd_arp_enable = TRUE;
757module_param(dhd_arp_enable, uint, 0);
758
759/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
760
761#ifdef ENABLE_ARP_SNOOP_MODE
762uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
763#else
764uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
765#endif /* ENABLE_ARP_SNOOP_MODE */
766
767module_param(dhd_arp_mode, uint, 0);
768#endif /* ARP_OFFLOAD_SUPPORT */
769
770/* Disable Prop tx */
771module_param(disable_proptx, int, 0644);
772/* load firmware and/or nvram values from the filesystem */
773module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
774module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
775module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
776
777/* Watchdog interval */
778
779/* extend watchdog expiration to 2 seconds when DPC is running */
780#define WATCHDOG_EXTEND_INTERVAL (2000)
781
782uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
783module_param(dhd_watchdog_ms, uint, 0);
784
785#ifdef DHD_PCIE_RUNTIMEPM
786uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
787#endif /* DHD_PCIE_RUNTIMEPMT */
788#if defined(DHD_DEBUG)
789/* Console poll interval */
790uint dhd_console_ms = 0;
791module_param(dhd_console_ms, uint, 0644);
792#endif /* defined(DHD_DEBUG) */
793
794
795uint dhd_slpauto = TRUE;
796module_param(dhd_slpauto, uint, 0);
797
798#ifdef PKT_FILTER_SUPPORT
799/* Global Pkt filter enable control */
800uint dhd_pkt_filter_enable = TRUE;
801module_param(dhd_pkt_filter_enable, uint, 0);
802#endif
803
804/* Pkt filter init setup */
805uint dhd_pkt_filter_init = 0;
806module_param(dhd_pkt_filter_init, uint, 0);
807
808/* Pkt filter mode control */
809#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
810uint dhd_master_mode = FALSE;
811#else
812uint dhd_master_mode = FALSE;
813#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
814module_param(dhd_master_mode, uint, 0);
815
816int dhd_watchdog_prio = 0;
817module_param(dhd_watchdog_prio, int, 0);
818
819/* DPC thread priority */
820int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
821module_param(dhd_dpc_prio, int, 0);
822
823/* RX frame thread priority */
824int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
825module_param(dhd_rxf_prio, int, 0);
826
827int passive_channel_skip = 0;
828module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
829
830#if !defined(BCMDHDUSB)
831extern int dhd_dongle_ramsize;
832module_param(dhd_dongle_ramsize, int, 0);
833#endif /* BCMDHDUSB */
834
835/* Keep track of number of instances */
836static int dhd_found = 0;
837static int instance_base = 0; /* Starting instance number */
838module_param(instance_base, int, 0644);
839
840/* Functions to manage sysfs interface for dhd */
841static int dhd_sysfs_init(dhd_info_t *dhd);
842static void dhd_sysfs_exit(dhd_info_t *dhd);
843
844#if defined(DHD_LB)
845
846static void
847dhd_lb_set_default_cpus(dhd_info_t *dhd)
848{
849 /* Default CPU allocation for the jobs */
850 atomic_set(&dhd->rx_napi_cpu, 1);
851 atomic_set(&dhd->rx_compl_cpu, 2);
852 atomic_set(&dhd->tx_compl_cpu, 2);
853}
854
855static void
856dhd_cpumasks_deinit(dhd_info_t *dhd)
857{
858 free_cpumask_var(dhd->cpumask_curr_avail);
859 free_cpumask_var(dhd->cpumask_primary);
860 free_cpumask_var(dhd->cpumask_primary_new);
861 free_cpumask_var(dhd->cpumask_secondary);
862 free_cpumask_var(dhd->cpumask_secondary_new);
863}
864
865static int
866dhd_cpumasks_init(dhd_info_t *dhd)
867{
868 int id;
869 uint32 cpus;
870 int ret = 0;
871
872 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
873 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
874 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
875 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
876 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
877 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
878 ret = -ENOMEM;
879 goto fail;
880 }
881
882 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
883 cpumask_clear(dhd->cpumask_primary);
884 cpumask_clear(dhd->cpumask_secondary);
885
886 cpus = DHD_LB_PRIMARY_CPUS;
887 for (id = 0; id < NR_CPUS; id++) {
888 if (isset(&cpus, id))
889 cpumask_set_cpu(id, dhd->cpumask_primary);
890 }
891
892 cpus = DHD_LB_SECONDARY_CPUS;
893 for (id = 0; id < NR_CPUS; id++) {
894 if (isset(&cpus, id))
895 cpumask_set_cpu(id, dhd->cpumask_secondary);
896 }
897
898 return ret;
899fail:
900 dhd_cpumasks_deinit(dhd);
901 return ret;
902}
903
904/*
905 * The CPU Candidacy Algorithm
906 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
907 * The available CPUs for selection are divided into two groups
908 * Primary Set - A CPU mask that carries the First Choice CPUs
909 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
910 *
911 * There are two types of Job, that needs to be assigned to
912 * the CPUs, from one of the above mentioned CPU group. The Jobs are
913 * 1) Rx Packet Processing - napi_cpu
914 * 2) Completion Processiong (Tx, RX) - compl_cpu
915 *
916 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
917 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
918 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
919 * If there are more processors free, it assigns one to compl_cpu.
920 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
921 * CPU, as much as possible.
922 *
923 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
924 * would allow Tx completion skb's to be released into a local free pool from
925 * which the rx buffer posts could have been serviced. it is important to note
926 * that a Tx packet may not have a large enough buffer for rx posting.
927 */
928void dhd_select_cpu_candidacy(dhd_info_t *dhd)
929{
930 uint32 primary_available_cpus; /* count of primary available cpus */
931 uint32 secondary_available_cpus; /* count of secondary available cpus */
932 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
933 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
934
935 cpumask_clear(dhd->cpumask_primary_new);
936 cpumask_clear(dhd->cpumask_secondary_new);
937
938 /*
939 * Now select from the primary mask. Even if a Job is
940 * already running on a CPU in secondary group, we still move
941 * to primary CPU. So no conditional checks.
942 */
943 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
944 dhd->cpumask_curr_avail);
945
946 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
947 dhd->cpumask_curr_avail);
948
949 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
950
951 if (primary_available_cpus > 0) {
952 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
953
954 /* If no further CPU is available,
955 * cpumask_next returns >= nr_cpu_ids
956 */
957 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
958 if (compl_cpu >= nr_cpu_ids)
959 compl_cpu = 0;
960 }
961
962 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
963 __FUNCTION__, napi_cpu, compl_cpu));
964
965 /* -- Now check for the CPUs from the secondary mask -- */
966 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
967
968 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
969 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
970
971 if (secondary_available_cpus > 0) {
972 /* At this point if napi_cpu is unassigned it means no CPU
973 * is online from Primary Group
974 */
975 if (napi_cpu == 0) {
976 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
977 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
978 } else if (compl_cpu == 0) {
979 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
980 }
981
982 /* If no CPU was available for completion, choose CPU 0 */
983 if (compl_cpu >= nr_cpu_ids)
984 compl_cpu = 0;
985 }
986 if ((primary_available_cpus == 0) &&
987 (secondary_available_cpus == 0)) {
988 /* No CPUs available from primary or secondary mask */
989 napi_cpu = 0;
990 compl_cpu = 0;
991 }
992
993 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
994 __FUNCTION__, napi_cpu, compl_cpu));
995 ASSERT(napi_cpu < nr_cpu_ids);
996 ASSERT(compl_cpu < nr_cpu_ids);
997
998 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
999 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
1000 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1001 return;
1002}
1003
1004/*
1005 * Function to handle CPU Hotplug notifications.
1006 * One of the task it does is to trigger the CPU Candidacy algorithm
1007 * for load balancing.
1008 */
1009int
1010dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1011{
1012 unsigned int cpu = (unsigned int)(long)hcpu;
1013
1014 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1015
1016 switch (action)
1017 {
1018 case CPU_ONLINE:
1019 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1020 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1021 dhd_select_cpu_candidacy(dhd);
1022 break;
1023
1024 case CPU_DOWN_PREPARE:
1025 case CPU_DOWN_PREPARE_FROZEN:
1026 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1027 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1028 dhd_select_cpu_candidacy(dhd);
1029 break;
1030 default:
1031 break;
1032 }
1033
1034 return NOTIFY_OK;
1035}
1036
1037#if defined(DHD_LB_STATS)
1038void dhd_lb_stats_init(dhd_pub_t *dhdp)
1039{
1040 dhd_info_t *dhd;
1041 int i, j;
1042
1043 if (dhdp == NULL) {
1044 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1045 __FUNCTION__));
1046 return;
1047 }
1048
1049 dhd = dhdp->info;
1050 if (dhd == NULL) {
1051 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1052 return;
1053 }
1054
1055 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1056 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1057 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1058 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1059
1060 for (i = 0; i < NR_CPUS; i++) {
1061 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1062 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1063 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1064
1065 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1066 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1067 }
1068
1069 for (i = 0; i < NR_CPUS; i++) {
1070 for (j = 0; j < HIST_BIN_SIZE; j++) {
1071 DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1072 DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1073 DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1074 }
1075 }
1076
1077 return;
1078}
1079
1080static void dhd_lb_stats_dump_histo(
1081 struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1082{
1083 int i, j;
1084 uint32 per_cpu_total[NR_CPUS] = {0};
1085 uint32 total = 0;
1086
1087 bcm_bprintf(strbuf, "CPU: \t\t");
1088 for (i = 0; i < num_possible_cpus(); i++)
1089 bcm_bprintf(strbuf, "%d\t", i);
1090 bcm_bprintf(strbuf, "\nBin\n");
1091
1092 for (i = 0; i < HIST_BIN_SIZE; i++) {
1093 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1094 for (j = 0; j < num_possible_cpus(); j++) {
1095 bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1096 }
1097 bcm_bprintf(strbuf, "\n");
1098 }
1099 bcm_bprintf(strbuf, "Per CPU Total \t");
1100 total = 0;
1101 for (i = 0; i < num_possible_cpus(); i++) {
1102 for (j = 0; j < HIST_BIN_SIZE; j++) {
1103 per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1104 }
1105 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1106 total += per_cpu_total[i];
1107 }
1108 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1109
1110 return;
1111}
1112
1113static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1114{
1115 int i;
1116
1117 bcm_bprintf(strbuf, "CPU: \t");
1118 for (i = 0; i < num_possible_cpus(); i++)
1119 bcm_bprintf(strbuf, "%d\t", i);
1120 bcm_bprintf(strbuf, "\n");
1121
1122 bcm_bprintf(strbuf, "Val: \t");
1123 for (i = 0; i < num_possible_cpus(); i++)
1124 bcm_bprintf(strbuf, "%u\t", *(p+i));
1125 bcm_bprintf(strbuf, "\n");
1126 return;
1127}
1128
1129void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1130{
1131 dhd_info_t *dhd;
1132
1133 if (dhdp == NULL || strbuf == NULL) {
1134 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1135 __FUNCTION__, dhdp, strbuf));
1136 return;
1137 }
1138
1139 dhd = dhdp->info;
1140 if (dhd == NULL) {
1141 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1142 return;
1143 }
1144
1145 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1146 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1147
1148 bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1149 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1150
1151 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1152 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1153 dhd->txc_sched_cnt);
1154#ifdef DHD_LB_RXP
1155 bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1156 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1157 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1158 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1159#endif /* DHD_LB_RXP */
1160
1161#ifdef DHD_LB_RXC
1162 bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1163 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1164 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1165 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1166#endif /* DHD_LB_RXC */
1167
1168
1169#ifdef DHD_LB_TXC
1170 bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1171 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1172 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1173 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1174#endif /* DHD_LB_TXC */
1175}
1176
1177static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1178{
1179 uint32 bin_power;
1180 uint32 *p = NULL;
1181
1182 bin_power = next_larger_power2(count);
1183
1184 switch (bin_power) {
1185 case 0: break;
1186 case 1: /* Fall through intentionally */
1187 case 2: p = bin + 0; break;
1188 case 4: p = bin + 1; break;
1189 case 8: p = bin + 2; break;
1190 case 16: p = bin + 3; break;
1191 case 32: p = bin + 4; break;
1192 case 64: p = bin + 5; break;
1193 case 128: p = bin + 6; break;
1194 default : p = bin + 7; break;
1195 }
1196 if (p)
1197 *p = *p + 1;
1198 return;
1199}
1200
1201extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1202{
1203 int cpu;
1204 dhd_info_t *dhd = dhdp->info;
1205
1206 cpu = get_cpu();
1207 put_cpu();
1208 dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1209
1210 return;
1211}
1212
1213extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1214{
1215 int cpu;
1216 dhd_info_t *dhd = dhdp->info;
1217
1218 cpu = get_cpu();
1219 put_cpu();
1220 dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1221
1222 return;
1223}
1224
1225extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1226{
1227 int cpu;
1228 dhd_info_t *dhd = dhdp->info;
1229
1230 cpu = get_cpu();
1231 put_cpu();
1232 dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1233
1234 return;
1235}
1236
1237extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1238{
1239 dhd_info_t *dhd = dhdp->info;
1240 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1241}
1242
1243extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1244{
1245 dhd_info_t *dhd = dhdp->info;
1246 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1247}
1248
1249#endif /* DHD_LB_STATS */
1250#endif /* DHD_LB */
1251
1252
1253#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1254int g_frameburst = 1;
1255#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1256
1257static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1258
1259/* DHD Perimiter lock only used in router with bypass forwarding. */
1260#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1261#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1262#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1263
1264#ifdef PCIE_FULL_DONGLE
1265#if defined(BCM_GMAC3)
1266#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1267#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1268#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1269
1270#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1271#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1272#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1273#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1274
1275#else /* ! BCM_GMAC3 */
1276#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1277#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1278 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1279#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1280 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1281
1282#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1283static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1284 struct list_head *snapshot_list);
1285static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1286#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1287#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1288#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1289
1290#endif /* ! BCM_GMAC3 */
1291#endif /* PCIE_FULL_DONGLE */
1292
1293/* Control fw roaming */
1294uint dhd_roam_disable = 0;
1295
1296#ifdef BCMDBGFS
1297extern int dhd_dbg_init(dhd_pub_t *dhdp);
1298extern void dhd_dbg_remove(void);
1299#endif
1300
1301/* Control radio state */
1302uint dhd_radio_up = 1;
1303
1304/* Network inteface name */
1305char iface_name[IFNAMSIZ] = {'\0'};
1306module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1307
1308/* The following are specific to the SDIO dongle */
1309
1310/* IOCTL response timeout */
1311int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1312
1313/* Idle timeout for backplane clock */
1314int dhd_idletime = DHD_IDLETIME_TICKS;
1315module_param(dhd_idletime, int, 0);
1316
1317/* Use polling */
1318uint dhd_poll = FALSE;
1319module_param(dhd_poll, uint, 0);
1320
1321/* Use interrupts */
1322uint dhd_intr = TRUE;
1323module_param(dhd_intr, uint, 0);
1324
1325/* SDIO Drive Strength (in milliamps) */
1326uint dhd_sdiod_drive_strength = 6;
1327module_param(dhd_sdiod_drive_strength, uint, 0);
1328
1329#ifdef BCMSDIO
1330/* Tx/Rx bounds */
1331extern uint dhd_txbound;
1332extern uint dhd_rxbound;
1333module_param(dhd_txbound, uint, 0);
1334module_param(dhd_rxbound, uint, 0);
1335
1336/* Deferred transmits */
1337extern uint dhd_deferred_tx;
1338module_param(dhd_deferred_tx, uint, 0);
1339
1340#endif /* BCMSDIO */
1341
1342
1343#ifdef SDTEST
1344/* Echo packet generator (pkts/s) */
1345uint dhd_pktgen = 0;
1346module_param(dhd_pktgen, uint, 0);
1347
1348/* Echo packet len (0 => sawtooth, max 2040) */
1349uint dhd_pktgen_len = 0;
1350module_param(dhd_pktgen_len, uint, 0);
1351#endif /* SDTEST */
1352
1353
1354
1355/* Allow delayed firmware download for debug purpose */
1356int allow_delay_fwdl = FALSE;
1357module_param(allow_delay_fwdl, int, 0);
1358
1359extern char dhd_version[];
1360extern char fw_version[];
1361
1362int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1363static void dhd_net_if_lock_local(dhd_info_t *dhd);
1364static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1365static void dhd_suspend_lock(dhd_pub_t *dhdp);
1366static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1367
1368#ifdef WLMEDIA_HTSF
1369void htsf_update(dhd_info_t *dhd, void *data);
1370tsf_t prev_tsf, cur_tsf;
1371
1372uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1373static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1374static void dhd_dump_latency(void);
1375static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1376static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1377static void dhd_dump_htsfhisto(histo_t *his, char *s);
1378#endif /* WLMEDIA_HTSF */
1379
1380/* Monitor interface */
1381int dhd_monitor_init(void *dhd_pub);
1382int dhd_monitor_uninit(void);
1383
1384
1385#if defined(WL_WIRELESS_EXT)
1386struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1387#endif /* defined(WL_WIRELESS_EXT) */
1388
1389static void dhd_dpc(ulong data);
1390/* forward decl */
1391extern int dhd_wait_pend8021x(struct net_device *dev);
1392void dhd_os_wd_timer_extend(void *bus, bool extend);
1393
1394#ifdef TOE
1395#ifndef BDC
1396#error TOE requires BDC
1397#endif /* !BDC */
1398static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1399static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1400#endif /* TOE */
1401
1402static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1403 wl_event_msg_t *event_ptr, void **data_ptr);
1404
1405#if defined(CONFIG_PM_SLEEP)
1406static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1407{
1408 int ret = NOTIFY_DONE;
1409 bool suspend = FALSE;
1410 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1411
1412 BCM_REFERENCE(dhdinfo);
1413
1414 switch (action) {
1415 case PM_HIBERNATION_PREPARE:
1416 case PM_SUSPEND_PREPARE:
1417 suspend = TRUE;
1418 break;
1419
1420 case PM_POST_HIBERNATION:
1421 case PM_POST_SUSPEND:
1422 suspend = FALSE;
1423 break;
1424 }
1425
1426#if defined(SUPPORT_P2P_GO_PS)
1427#ifdef PROP_TXSTATUS
1428 if (suspend) {
1429 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1430 dhd_wlfc_suspend(&dhdinfo->pub);
1431 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1432 } else
1433 dhd_wlfc_resume(&dhdinfo->pub);
1434#endif /* PROP_TXSTATUS */
1435#endif /* defined(SUPPORT_P2P_GO_PS) */
1436
1437#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1438 KERNEL_VERSION(2, 6, 39))
1439 dhd_mmc_suspend = suspend;
1440 smp_mb();
1441#endif
1442
1443 return ret;
1444}
1445
1446/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1447 * created in kernel notifier link list (with 'next' pointing to itself)
1448 */
1449static bool dhd_pm_notifier_registered = FALSE;
1450
1451extern int register_pm_notifier(struct notifier_block *nb);
1452extern int unregister_pm_notifier(struct notifier_block *nb);
1453#endif /* CONFIG_PM_SLEEP */
1454
1455/* Request scheduling of the bus rx frame */
1456static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1457static void dhd_os_rxflock(dhd_pub_t *pub);
1458static void dhd_os_rxfunlock(dhd_pub_t *pub);
1459
1460/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1461typedef struct dhd_dev_priv {
1462 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1463 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1464 int ifidx; /* interface index */
1465} dhd_dev_priv_t;
1466
1467#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1468#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1469#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1470#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1471#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1472
1473/** Clear the dhd net_device's private structure. */
1474static inline void
1475dhd_dev_priv_clear(struct net_device * dev)
1476{
1477 dhd_dev_priv_t * dev_priv;
1478 ASSERT(dev != (struct net_device *)NULL);
1479 dev_priv = DHD_DEV_PRIV(dev);
1480 dev_priv->dhd = (dhd_info_t *)NULL;
1481 dev_priv->ifp = (dhd_if_t *)NULL;
1482 dev_priv->ifidx = DHD_BAD_IF;
1483}
1484
1485/** Setup the dhd net_device's private structure. */
1486static inline void
1487dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1488 int ifidx)
1489{
1490 dhd_dev_priv_t * dev_priv;
1491 ASSERT(dev != (struct net_device *)NULL);
1492 dev_priv = DHD_DEV_PRIV(dev);
1493 dev_priv->dhd = dhd;
1494 dev_priv->ifp = ifp;
1495 dev_priv->ifidx = ifidx;
1496}
1497
1498#ifdef PCIE_FULL_DONGLE
1499
1500/** Dummy objects are defined with state representing bad|down.
1501 * Performance gains from reducing branch conditionals, instruction parallelism,
1502 * dual issue, reducing load shadows, avail of larger pipelines.
1503 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1504 * is accessed via the dhd_sta_t.
1505 */
1506
1507/* Dummy dhd_info object */
1508dhd_info_t dhd_info_null = {
1509#if defined(BCM_GMAC3)
1510 .fwdh = FWDER_NULL,
1511#endif
1512 .pub = {
1513 .info = &dhd_info_null,
1514#ifdef DHDTCPACK_SUPPRESS
1515 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1516#endif /* DHDTCPACK_SUPPRESS */
1517 .up = FALSE,
1518 .busstate = DHD_BUS_DOWN
1519 }
1520};
1521#define DHD_INFO_NULL (&dhd_info_null)
1522#define DHD_PUB_NULL (&dhd_info_null.pub)
1523
1524/* Dummy netdevice object */
1525struct net_device dhd_net_dev_null = {
1526 .reg_state = NETREG_UNREGISTERED
1527};
1528#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1529
1530/* Dummy dhd_if object */
1531dhd_if_t dhd_if_null = {
1532#if defined(BCM_GMAC3)
1533 .fwdh = FWDER_NULL,
1534#endif
1535#ifdef WMF
1536 .wmf = { .wmf_enable = TRUE },
1537#endif
1538 .info = DHD_INFO_NULL,
1539 .net = DHD_NET_DEV_NULL,
1540 .idx = DHD_BAD_IF
1541};
1542#define DHD_IF_NULL (&dhd_if_null)
1543
1544#define DHD_STA_NULL ((dhd_sta_t *)NULL)
1545
1546/** Interface STA list management. */
1547
1548/** Fetch the dhd_if object, given the interface index in the dhd. */
1549static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1550
1551/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1552static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1553static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1554
1555/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1556static void dhd_if_del_sta_list(dhd_if_t * ifp);
1557static void dhd_if_flush_sta(dhd_if_t * ifp);
1558
1559/* Construct/Destruct a sta pool. */
1560static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1561static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1562/* Clear the pool of dhd_sta_t objects for built-in type driver */
1563static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1564
1565
1566/* Return interface pointer */
1567static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1568{
1569 ASSERT(ifidx < DHD_MAX_IFS);
1570
1571 if (ifidx >= DHD_MAX_IFS)
1572 return NULL;
1573
1574 return dhdp->info->iflist[ifidx];
1575}
1576
1577/** Reset a dhd_sta object and free into the dhd pool. */
1578static void
1579dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1580{
1581 int prio;
1582
1583 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1584
1585 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1586
1587 /*
1588 * Flush and free all packets in all flowring's queues belonging to sta.
1589 * Packets in flow ring will be flushed later.
1590 */
1591 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1592 uint16 flowid = sta->flowid[prio];
1593
1594 if (flowid != FLOWID_INVALID) {
1595 unsigned long flags;
1596 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1597 flow_ring_node_t * flow_ring_node;
1598
1599#ifdef DHDTCPACK_SUPPRESS
1600 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1601 * when there is a newly coming packet from network stack.
1602 */
1603 dhd_tcpack_info_tbl_clean(dhdp);
1604#endif /* DHDTCPACK_SUPPRESS */
1605
1606 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1607 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1608 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1609
1610 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1611 void * pkt;
1612 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1613 PKTFREE(dhdp->osh, pkt, TRUE);
1614 }
1615 }
1616
1617 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1618 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1619 }
1620
1621 sta->flowid[prio] = FLOWID_INVALID;
1622 }
1623
1624 id16_map_free(dhdp->staid_allocator, sta->idx);
1625 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1626 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1627 sta->ifidx = DHD_BAD_IF;
1628 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1629 INIT_LIST_HEAD(&sta->list);
1630 sta->idx = ID16_INVALID; /* implying free */
1631}
1632
1633/** Allocate a dhd_sta object from the dhd pool. */
1634static dhd_sta_t *
1635dhd_sta_alloc(dhd_pub_t * dhdp)
1636{
1637 uint16 idx;
1638 dhd_sta_t * sta;
1639 dhd_sta_pool_t * sta_pool;
1640
1641 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1642
1643 idx = id16_map_alloc(dhdp->staid_allocator);
1644 if (idx == ID16_INVALID) {
1645 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1646 return DHD_STA_NULL;
1647 }
1648
1649 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1650 sta = &sta_pool[idx];
1651
1652 ASSERT((sta->idx == ID16_INVALID) &&
1653 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1654
1655 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1656
1657 sta->idx = idx; /* implying allocated */
1658
1659 return sta;
1660}
1661
1662/** Delete all STAs in an interface's STA list. */
1663static void
1664dhd_if_del_sta_list(dhd_if_t *ifp)
1665{
1666 dhd_sta_t *sta, *next;
1667 unsigned long flags;
1668
1669 DHD_IF_STA_LIST_LOCK(ifp, flags);
1670
1671 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1672#if defined(BCM_GMAC3)
1673 if (ifp->fwdh) {
1674 /* Remove sta from WOFA forwarder. */
1675 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1676 }
1677#endif /* BCM_GMAC3 */
1678 list_del(&sta->list);
1679 dhd_sta_free(&ifp->info->pub, sta);
1680 }
1681
1682 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1683
1684 return;
1685}
1686
1687/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1688static void
1689dhd_if_flush_sta(dhd_if_t * ifp)
1690{
1691#if defined(BCM_GMAC3)
1692
1693 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1694 dhd_sta_t *sta, *next;
1695 unsigned long flags;
1696
1697 DHD_IF_STA_LIST_LOCK(ifp, flags);
1698
1699 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1700 /* Remove any sta entry from WOFA forwarder. */
1701 fwder_flush(ifp->fwdh, (wofa_t)sta);
1702 }
1703
1704 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1705 }
1706#endif /* BCM_GMAC3 */
1707}
1708
1709/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1710static int
1711dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1712{
1713 int idx, prio, sta_pool_memsz;
1714 dhd_sta_t * sta;
1715 dhd_sta_pool_t * sta_pool;
1716 void * staid_allocator;
1717
1718 ASSERT(dhdp != (dhd_pub_t *)NULL);
1719 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1720
1721 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1722 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1723 if (staid_allocator == NULL) {
1724 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1725 return BCME_ERROR;
1726 }
1727
1728 /* Pre allocate a pool of dhd_sta objects (one extra). */
1729 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1730 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1731 if (sta_pool == NULL) {
1732 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1733 id16_map_fini(dhdp->osh, staid_allocator);
1734 return BCME_ERROR;
1735 }
1736
1737 dhdp->sta_pool = sta_pool;
1738 dhdp->staid_allocator = staid_allocator;
1739
1740 /* Initialize all sta(s) for the pre-allocated free pool. */
1741 bzero((uchar *)sta_pool, sta_pool_memsz);
1742 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1743 sta = &sta_pool[idx];
1744 sta->idx = id16_map_alloc(staid_allocator);
1745 ASSERT(sta->idx <= max_sta);
1746 }
1747 /* Now place them into the pre-allocated free pool. */
1748 for (idx = 1; idx <= max_sta; idx++) {
1749 sta = &sta_pool[idx];
1750 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1751 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1752 }
1753 dhd_sta_free(dhdp, sta);
1754 }
1755
1756 return BCME_OK;
1757}
1758
1759/** Destruct the pool of dhd_sta_t objects.
1760 * Caller must ensure that no STA objects are currently associated with an if.
1761 */
1762static void
1763dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1764{
1765 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1766
1767 if (sta_pool) {
1768 int idx;
1769 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1770 for (idx = 1; idx <= max_sta; idx++) {
1771 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1772 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1773 }
1774 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1775 dhdp->sta_pool = NULL;
1776 }
1777
1778 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1779 dhdp->staid_allocator = NULL;
1780}
1781
1782/* Clear the pool of dhd_sta_t objects for built-in type driver */
1783static void
1784dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1785{
1786 int idx, prio, sta_pool_memsz;
1787 dhd_sta_t * sta;
1788 dhd_sta_pool_t * sta_pool;
1789 void *staid_allocator;
1790
1791 if (!dhdp) {
1792 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1793 return;
1794 }
1795
1796 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1797 staid_allocator = dhdp->staid_allocator;
1798
1799 if (!sta_pool) {
1800 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1801 return;
1802 }
1803
1804 if (!staid_allocator) {
1805 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1806 return;
1807 }
1808
1809 /* clear free pool */
1810 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1811 bzero((uchar *)sta_pool, sta_pool_memsz);
1812
1813 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1814 id16_map_clear(staid_allocator, max_sta, 1);
1815
1816 /* Initialize all sta(s) for the pre-allocated free pool. */
1817 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1818 sta = &sta_pool[idx];
1819 sta->idx = id16_map_alloc(staid_allocator);
1820 ASSERT(sta->idx <= max_sta);
1821 }
1822 /* Now place them into the pre-allocated free pool. */
1823 for (idx = 1; idx <= max_sta; idx++) {
1824 sta = &sta_pool[idx];
1825 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1826 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1827 }
1828 dhd_sta_free(dhdp, sta);
1829 }
1830}
1831
1832/** Find STA with MAC address ea in an interface's STA list. */
1833dhd_sta_t *
1834dhd_find_sta(void *pub, int ifidx, void *ea)
1835{
1836 dhd_sta_t *sta;
1837 dhd_if_t *ifp;
1838 unsigned long flags;
1839
1840 ASSERT(ea != NULL);
1841 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1842 if (ifp == NULL)
1843 return DHD_STA_NULL;
1844
1845 DHD_IF_STA_LIST_LOCK(ifp, flags);
1846
1847 list_for_each_entry(sta, &ifp->sta_list, list) {
1848 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1849 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1850 return sta;
1851 }
1852 }
1853
1854 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1855
1856 return DHD_STA_NULL;
1857}
1858
1859/** Add STA into the interface's STA list. */
1860dhd_sta_t *
1861dhd_add_sta(void *pub, int ifidx, void *ea)
1862{
1863 dhd_sta_t *sta;
1864 dhd_if_t *ifp;
1865 unsigned long flags;
1866
1867 ASSERT(ea != NULL);
1868 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1869 if (ifp == NULL)
1870 return DHD_STA_NULL;
1871
1872 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1873 if (sta == DHD_STA_NULL) {
1874 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1875 return DHD_STA_NULL;
1876 }
1877
1878 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1879
1880 /* link the sta and the dhd interface */
1881 sta->ifp = ifp;
1882 sta->ifidx = ifidx;
1883 INIT_LIST_HEAD(&sta->list);
1884
1885 DHD_IF_STA_LIST_LOCK(ifp, flags);
1886
1887 list_add_tail(&sta->list, &ifp->sta_list);
1888
1889#if defined(BCM_GMAC3)
1890 if (ifp->fwdh) {
1891 ASSERT(ISALIGNED(ea, 2));
1892 /* Add sta to WOFA forwarder. */
1893 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1894 }
1895#endif /* BCM_GMAC3 */
1896
1897 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1898
1899 return sta;
1900}
1901
1902/** Delete STA from the interface's STA list. */
1903void
1904dhd_del_sta(void *pub, int ifidx, void *ea)
1905{
1906 dhd_sta_t *sta, *next;
1907 dhd_if_t *ifp;
1908 unsigned long flags;
1909
1910 ASSERT(ea != NULL);
1911 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1912 if (ifp == NULL)
1913 return;
1914
1915 DHD_IF_STA_LIST_LOCK(ifp, flags);
1916
1917 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1918 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1919#if defined(BCM_GMAC3)
1920 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1921 ASSERT(ISALIGNED(ea, 2));
1922 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1923 }
1924#endif /* BCM_GMAC3 */
1925 list_del(&sta->list);
1926 dhd_sta_free(&ifp->info->pub, sta);
1927 }
1928 }
1929
1930 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1931#ifdef DHD_L2_FILTER
1932 if (ifp->parp_enable) {
1933 /* clear Proxy ARP cache of specific Ethernet Address */
1934 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1935 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1936 }
1937#endif /* DHD_L2_FILTER */
1938 return;
1939}
1940
1941/** Add STA if it doesn't exist. Not reentrant. */
1942dhd_sta_t*
1943dhd_findadd_sta(void *pub, int ifidx, void *ea)
1944{
1945 dhd_sta_t *sta;
1946
1947 sta = dhd_find_sta(pub, ifidx, ea);
1948
1949 if (!sta) {
1950 /* Add entry */
1951 sta = dhd_add_sta(pub, ifidx, ea);
1952 }
1953
1954 return sta;
1955}
1956
1957#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1958#if !defined(BCM_GMAC3)
1959static struct list_head *
1960dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1961{
1962 unsigned long flags;
1963 dhd_sta_t *sta, *snapshot;
1964
1965 INIT_LIST_HEAD(snapshot_list);
1966
1967 DHD_IF_STA_LIST_LOCK(ifp, flags);
1968
1969 list_for_each_entry(sta, &ifp->sta_list, list) {
1970 /* allocate one and add to snapshot */
1971 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1972 if (snapshot == NULL) {
1973 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1974 continue;
1975 }
1976
1977 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1978
1979 INIT_LIST_HEAD(&snapshot->list);
1980 list_add_tail(&snapshot->list, snapshot_list);
1981 }
1982
1983 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1984
1985 return snapshot_list;
1986}
1987
1988static void
1989dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1990{
1991 dhd_sta_t *sta, *next;
1992
1993 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1994 list_del(&sta->list);
1995 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1996 }
1997}
1998#endif /* !BCM_GMAC3 */
1999#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2000
2001#else
2002static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2003static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2004static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2005static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2006static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2007dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2008void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2009#endif /* PCIE_FULL_DONGLE */
2010
2011
2012#if defined(DHD_LB)
2013
2014#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2015/**
2016 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2017 * CPU and schedules a tasklet.
2018 * @tasklet: opaque pointer to the tasklet
2019 */
2020static INLINE void
2021dhd_tasklet_schedule(void *tasklet)
2022{
2023 tasklet_schedule((struct tasklet_struct *)tasklet);
2024}
2025
2026/**
2027 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2028 * @tasklet: tasklet to be scheduled
2029 * @on_cpu: cpu core id
2030 *
2031 * If the requested cpu is online, then an IPI is sent to this cpu via the
2032 * smp_call_function_single with no wait and the tasklet_schedule function
2033 * will be invoked to schedule the specified tasklet on the requested CPU.
2034 */
2035static void
2036dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2037{
2038 const int wait = 0;
2039 smp_call_function_single(on_cpu,
2040 dhd_tasklet_schedule, (void *)tasklet, wait);
2041}
2042#endif /* DHD_LB_TXC || DHD_LB_RXC */
2043
2044
2045#if defined(DHD_LB_TXC)
2046/**
2047 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2048 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2049 * freeing the packets placed in the tx_compl workq
2050 */
2051void
2052dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2053{
2054 dhd_info_t *dhd = dhdp->info;
2055 int curr_cpu, on_cpu;
2056
2057 if (dhd->rx_napi_netdev == NULL) {
2058 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2059 return;
2060 }
2061
2062 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2063 /*
2064 * If the destination CPU is NOT online or is same as current CPU
2065 * no need to schedule the work
2066 */
2067 curr_cpu = get_cpu();
2068 put_cpu();
2069
2070 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2071
2072 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2073 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2074 } else {
2075 schedule_work(&dhd->tx_compl_dispatcher_work);
2076 }
2077}
2078
2079static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2080{
2081 struct dhd_info *dhd =
2082 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2083 int cpu;
2084
2085 get_online_cpus();
2086 cpu = atomic_read(&dhd->tx_compl_cpu);
2087 if (!cpu_online(cpu))
2088 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2089 else
2090 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2091 put_online_cpus();
2092}
2093
2094#endif /* DHD_LB_TXC */
2095
2096
2097#if defined(DHD_LB_RXC)
2098/**
2099 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2100 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2101 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2102 * placed in the rx_compl workq.
2103 *
2104 * @dhdp: pointer to dhd_pub object
2105 */
2106void
2107dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2108{
2109 dhd_info_t *dhd = dhdp->info;
2110 int curr_cpu, on_cpu;
2111
2112 if (dhd->rx_napi_netdev == NULL) {
2113 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2114 return;
2115 }
2116
2117 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2118 /*
2119 * If the destination CPU is NOT online or is same as current CPU
2120 * no need to schedule the work
2121 */
2122 curr_cpu = get_cpu();
2123 put_cpu();
2124
2125 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2126
2127 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2128 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2129 } else {
2130 schedule_work(&dhd->rx_compl_dispatcher_work);
2131 }
2132}
2133
2134static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2135{
2136 struct dhd_info *dhd =
2137 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2138 int cpu;
2139
2140 get_online_cpus();
2141 cpu = atomic_read(&dhd->tx_compl_cpu);
2142 if (!cpu_online(cpu))
2143 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2144 else
2145 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2146 put_online_cpus();
2147}
2148
2149#endif /* DHD_LB_RXC */
2150
2151
2152#if defined(DHD_LB_RXP)
2153/**
2154 * dhd_napi_poll - Load balance napi poll function to process received
2155 * packets and send up the network stack using netif_receive_skb()
2156 *
2157 * @napi: napi object in which context this poll function is invoked
2158 * @budget: number of packets to be processed.
2159 *
2160 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2161 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2162 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2163 * packet tag and sendup.
2164 */
2165static int
2166dhd_napi_poll(struct napi_struct *napi, int budget)
2167{
2168 int ifid;
2169 const int pkt_count = 1;
2170 const int chan = 0;
2171 struct sk_buff * skb;
2172 unsigned long flags;
2173 struct dhd_info *dhd;
2174 int processed = 0;
2175 struct sk_buff_head rx_process_queue;
2176
2177 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2178 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2179 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2180
2181 __skb_queue_head_init(&rx_process_queue);
2182
2183 /* extract the entire rx_napi_queue into local rx_process_queue */
2184 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2185 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2186 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2187
2188 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2189 OSL_PREFETCH(skb->data);
2190
2191 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2192
2193 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2194 __FUNCTION__, skb, ifid));
2195
2196 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2197 processed++;
2198 }
2199
2200 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2201
2202 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2203 napi_complete(napi);
2204
2205 return budget - 1;
2206}
2207
2208/**
2209 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2210 * poll list. This function may be invoked via the smp_call_function_single
2211 * from a remote CPU.
2212 *
2213 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2214 * after the napi_struct is added to the softnet data's poll_list
2215 *
2216 * @info: pointer to a dhd_info struct
2217 */
2218static void
2219dhd_napi_schedule(void *info)
2220{
2221 dhd_info_t *dhd = (dhd_info_t *)info;
2222
2223 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2224 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2225
2226 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2227 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2228 __napi_schedule(&dhd->rx_napi_struct);
2229 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2230 }
2231
2232 /*
2233 * If the rx_napi_struct was already running, then we let it complete
2234 * processing all its packets. The rx_napi_struct may only run on one
2235 * core at a time, to avoid out-of-order handling.
2236 */
2237}
2238
2239/**
2240 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2241 * action after placing the dhd's rx_process napi object in the the remote CPU's
2242 * softnet data's poll_list.
2243 *
2244 * @dhd: dhd_info which has the rx_process napi object
2245 * @on_cpu: desired remote CPU id
2246 */
2247static INLINE int
2248dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2249{
2250 int wait = 0; /* asynchronous IPI */
2251
2252 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2253 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2254
2255 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2256 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2257 __FUNCTION__, on_cpu));
2258 }
2259
2260 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2261
2262 return 0;
2263}
2264
2265/*
2266 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2267 * Why should we do this?
2268 * The candidacy algorithm is run from the call back function
2269 * registered to CPU hotplug notifier. This call back happens from Worker
2270 * context. The dhd_napi_schedule_on is also from worker context.
2271 * Note that both of this can run on two different CPUs at the same time.
2272 * So we can possibly have a window where a given CPUn is being brought
2273 * down from CPUm while we try to run a function on CPUn.
2274 * To prevent this its better have the whole code to execute an SMP
2275 * function under get_online_cpus.
2276 * This function call ensures that hotplug mechanism does not kick-in
2277 * until we are done dealing with online CPUs
2278 * If the hotplug worker is already running, no worries because the
2279 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2280 *
2281 * The below mentioned code structure is proposed in
2282 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2283 * for the question
2284 * Q: I need to ensure that a particular cpu is not removed when there is some
2285 * work specific to this cpu is in progress
2286 *
2287 * According to the documentation calling get_online_cpus is NOT required, if
2288 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2289 * run from Work Queue context we have to call these functions
2290 */
2291static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2292{
2293 struct dhd_info *dhd =
2294 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2295 int cpu;
2296
2297 get_online_cpus();
2298 cpu = atomic_read(&dhd->rx_napi_cpu);
2299 if (!cpu_online(cpu))
2300 dhd_napi_schedule(dhd);
2301 else
2302 dhd_napi_schedule_on(dhd, cpu);
2303 put_online_cpus();
2304}
2305
2306/**
2307 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2308 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2309 * the packets enqueued into the rx_napi_queue and sendup.
2310 * The producer's rx packet queue is appended to the rx_napi_queue before
2311 * dispatching the rx_napi_struct.
2312 */
2313void
2314dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2315{
2316 unsigned long flags;
2317 dhd_info_t *dhd = dhdp->info;
2318 int curr_cpu;
2319 int on_cpu;
2320
2321 if (dhd->rx_napi_netdev == NULL) {
2322 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2323 return;
2324 }
2325
2326 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2327 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2328
2329 /* append the producer's queue of packets to the napi's rx process queue */
2330 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2331 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2332 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2333
2334 /*
2335 * If the destination CPU is NOT online or is same as current CPU
2336 * no need to schedule the work
2337 */
2338 curr_cpu = get_cpu();
2339 put_cpu();
2340
2341 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2342
2343 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2344 dhd_napi_schedule(dhd);
2345 } else {
2346 schedule_work(&dhd->rx_napi_dispatcher_work);
2347 }
2348}
2349
2350/**
2351 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2352 */
2353void
2354dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2355{
2356 dhd_info_t *dhd = dhdp->info;
2357
2358 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2359 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2360 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2361 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2362}
2363#endif /* DHD_LB_RXP */
2364
2365#endif /* DHD_LB */
2366
2367static void dhd_memdump_work_handler(struct work_struct * work)
2368{
2369 struct dhd_info *dhd =
2370 container_of(work, struct dhd_info, dhd_memdump_work.work);
2371
2372 BCM_REFERENCE(dhd);
2373#ifdef BCMPCIE
2374 dhd_prot_collect_memdump(&dhd->pub);
2375#endif
2376}
2377
2378
2379/** Returns dhd iflist index corresponding the the bssidx provided by apps */
2380int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2381{
2382 dhd_if_t *ifp;
2383 dhd_info_t *dhd = dhdp->info;
2384 int i;
2385
2386 ASSERT(bssidx < DHD_MAX_IFS);
2387 ASSERT(dhdp);
2388
2389 for (i = 0; i < DHD_MAX_IFS; i++) {
2390 ifp = dhd->iflist[i];
2391 if (ifp && (ifp->bssidx == bssidx)) {
2392 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2393 ifp->name, bssidx, i));
2394 break;
2395 }
2396 }
2397 return i;
2398}
2399
2400static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2401{
2402 uint32 store_idx;
2403 uint32 sent_idx;
2404
2405 if (!skb) {
2406 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2407 return BCME_ERROR;
2408 }
2409
2410 dhd_os_rxflock(dhdp);
2411 store_idx = dhdp->store_idx;
2412 sent_idx = dhdp->sent_idx;
2413 if (dhdp->skbbuf[store_idx] != NULL) {
2414 /* Make sure the previous packets are processed */
2415 dhd_os_rxfunlock(dhdp);
2416#ifdef RXF_DEQUEUE_ON_BUSY
2417 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2418 skb, store_idx, sent_idx));
2419 return BCME_BUSY;
2420#else /* RXF_DEQUEUE_ON_BUSY */
2421 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2422 skb, store_idx, sent_idx));
2423 /* removed msleep here, should use wait_event_timeout if we
2424 * want to give rx frame thread a chance to run
2425 */
2426#if defined(WAIT_DEQUEUE)
2427 OSL_SLEEP(1);
2428#endif
2429 return BCME_ERROR;
2430#endif /* RXF_DEQUEUE_ON_BUSY */
2431 }
2432 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2433 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2434 dhdp->skbbuf[store_idx] = skb;
2435 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2436 dhd_os_rxfunlock(dhdp);
2437
2438 return BCME_OK;
2439}
2440
2441static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2442{
2443 uint32 store_idx;
2444 uint32 sent_idx;
2445 void *skb;
2446
2447 dhd_os_rxflock(dhdp);
2448
2449 store_idx = dhdp->store_idx;
2450 sent_idx = dhdp->sent_idx;
2451 skb = dhdp->skbbuf[sent_idx];
2452
2453 if (skb == NULL) {
2454 dhd_os_rxfunlock(dhdp);
2455 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2456 store_idx, sent_idx));
2457 return NULL;
2458 }
2459
2460 dhdp->skbbuf[sent_idx] = NULL;
2461 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2462
2463 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2464 skb, sent_idx));
2465
2466 dhd_os_rxfunlock(dhdp);
2467
2468 return skb;
2469}
2470
2471int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2472{
2473 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2474
2475 if (prepost) { /* pre process */
2476 dhd_read_macaddr(dhd);
2477 } else { /* post process */
2478 dhd_write_macaddr(&dhd->pub.mac);
2479 }
2480
2481 return 0;
2482}
2483
2484// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2485#if defined(PKT_FILTER_SUPPORT) &&defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2486static bool
2487_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2488{
2489 bool _apply = FALSE;
2490 /* In case of IBSS mode, apply arp pkt filter */
2491 if (op_mode & DHD_FLAG_IBSS_MODE) {
2492 _apply = TRUE;
2493 goto exit;
2494 }
2495 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2496 if ((dhd->arp_version == 1) &&
2497 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2498 _apply = TRUE;
2499 goto exit;
2500 }
2501
2502exit:
2503 return _apply;
2504}
2505#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2506
2507void dhd_set_packet_filter(dhd_pub_t *dhd)
2508{
2509#ifdef PKT_FILTER_SUPPORT
2510 int i;
2511
2512 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2513 if (dhd_pkt_filter_enable) {
2514 for (i = 0; i < dhd->pktfilter_count; i++) {
2515 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2516 }
2517 }
2518#endif /* PKT_FILTER_SUPPORT */
2519}
2520
2521void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2522{
2523#ifdef PKT_FILTER_SUPPORT
2524 int i;
2525
2526 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2527
2528 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2529 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2530 return;
2531 }
2532 /* 1 - Enable packet filter, only allow unicast packet to send up */
2533 /* 0 - Disable packet filter */
2534 if (dhd_pkt_filter_enable && (!value ||
2535 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2536 {
2537 for (i = 0; i < dhd->pktfilter_count; i++) {
2538// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2539#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2540 if (value && (i == DHD_ARP_FILTER_NUM) &&
2541 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2542 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2543 "val %d, cnt %d, op_mode 0x%x\n",
2544 value, i, dhd->op_mode));
2545 continue;
2546 }
2547#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2548 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2549 value, dhd_master_mode);
2550 }
2551 }
2552#endif /* PKT_FILTER_SUPPORT */
2553}
2554
2555static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2556{
2557#ifndef SUPPORT_PM2_ONLY
2558 int power_mode = PM_MAX;
2559#endif /* SUPPORT_PM2_ONLY */
2560#ifdef SUPPORT_SENSORHUB
2561 uint32 shub_msreq;
2562#endif /* SUPPORT_SENSORHUB */
2563 /* wl_pkt_filter_enable_t enable_parm; */
2564 char iovbuf[32];
2565 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2566#ifdef DHD_USE_EARLYSUSPEND
2567#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2568 int bcn_timeout = 0;
2569#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2570#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2571 int roam_time_thresh = 0; /* (ms) */
2572#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2573#ifndef ENABLE_FW_ROAM_SUSPEND
2574 uint roamvar = dhd->conf->roam_off_suspend;
2575#endif /* ENABLE_FW_ROAM_SUSPEND */
2576#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2577 int bcn_li_bcn;
2578#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2579 uint nd_ra_filter = 0;
2580 int ret = 0;
2581#endif /* DHD_USE_EARLYSUSPEND */
2582#ifdef PASS_ALL_MCAST_PKTS
2583 struct dhd_info *dhdinfo;
2584 uint32 allmulti;
2585 uint i;
2586#endif /* PASS_ALL_MCAST_PKTS */
2587#ifdef DYNAMIC_SWOOB_DURATION
2588#ifndef CUSTOM_INTR_WIDTH
2589#define CUSTOM_INTR_WIDTH 100
2590 int intr_width = 0;
2591#endif /* CUSTOM_INTR_WIDTH */
2592#endif /* DYNAMIC_SWOOB_DURATION */
2593
2594 if (!dhd)
2595 return -ENODEV;
2596
2597#ifdef PASS_ALL_MCAST_PKTS
2598 dhdinfo = dhd->info;
2599#endif /* PASS_ALL_MCAST_PKTS */
2600
2601 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2602 __FUNCTION__, value, dhd->in_suspend));
2603
2604 dhd_suspend_lock(dhd);
2605
2606#ifdef CUSTOM_SET_CPUCORE
2607 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2608 /* set specific cpucore */
2609 dhd_set_cpucore(dhd, TRUE);
2610#endif /* CUSTOM_SET_CPUCORE */
2611#ifndef SUPPORT_PM2_ONLY
2612 if (dhd->conf->pm >= 0)
2613 power_mode = dhd->conf->pm;
2614#endif /* SUPPORT_PM2_ONLY */
2615 if (dhd->up) {
2616 if (value && dhd->in_suspend) {
2617#ifdef PKT_FILTER_SUPPORT
2618 dhd->early_suspended = 1;
2619#endif
2620 /* Kernel suspended */
2621 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2622
2623#ifdef SUPPORT_SENSORHUB
2624 shub_msreq = 1;
2625 if (dhd->info->shub_enable == 1) {
2626 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2627 iovbuf, sizeof(iovbuf));
2628 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2629 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2630 DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2631 __FUNCTION__, ret));
2632 }
2633 }
2634#endif /* SUPPORT_SENSORHUB */
2635
2636#ifndef SUPPORT_PM2_ONLY
2637 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2638 sizeof(power_mode), TRUE, 0);
2639#endif /* SUPPORT_PM2_ONLY */
2640
2641#ifdef PKT_FILTER_SUPPORT
2642 /* Enable packet filter,
2643 * only allow unicast packet to send up
2644 */
2645 dhd_enable_packet_filter(1, dhd);
2646#endif /* PKT_FILTER_SUPPORT */
2647
2648#ifdef PASS_ALL_MCAST_PKTS
2649 allmulti = 0;
2650 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2651 iovbuf, sizeof(iovbuf));
2652 for (i = 0; i < DHD_MAX_IFS; i++) {
2653 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2654 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2655 sizeof(iovbuf), TRUE, i);
2656 }
2657#endif /* PASS_ALL_MCAST_PKTS */
2658
2659 /* If DTIM skip is set up as default, force it to wake
2660 * each third DTIM for better power savings. Note that
2661 * one side effect is a chance to miss BC/MC packet.
2662 */
2663#ifdef WLTDLS
2664 /* Do not set bcn_li_ditm on WFD mode */
2665 if (dhd->tdls_mode) {
2666 bcn_li_dtim = 0;
2667 } else
2668#endif /* WLTDLS */
2669 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2670 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2671 4, iovbuf, sizeof(iovbuf));
2672 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2673 TRUE, 0) < 0)
2674 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2675
2676#ifdef DHD_USE_EARLYSUSPEND
2677#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2678 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2679 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2680 4, iovbuf, sizeof(iovbuf));
2681 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2682#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2683#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2684 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2685 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2686 4, iovbuf, sizeof(iovbuf));
2687 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2688#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2689#ifndef ENABLE_FW_ROAM_SUSPEND
2690 /* Disable firmware roaming during suspend */
2691 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2692 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2693#endif /* ENABLE_FW_ROAM_SUSPEND */
2694#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2695 bcn_li_bcn = 0;
2696 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2697 4, iovbuf, sizeof(iovbuf));
2698 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2699#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2700 if (FW_SUPPORTED(dhd, ndoe)) {
2701 /* enable IPv6 RA filter in firmware during suspend */
2702 nd_ra_filter = 1;
2703 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2704 iovbuf, sizeof(iovbuf));
2705 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2706 sizeof(iovbuf), TRUE, 0)) < 0)
2707 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2708 ret));
2709 }
2710#ifdef DYNAMIC_SWOOB_DURATION
2711 intr_width = CUSTOM_INTR_WIDTH;
2712 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2713 iovbuf, sizeof(iovbuf));
2714 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2715 sizeof(iovbuf), TRUE, 0)) < 0) {
2716 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2717 }
2718#endif /* DYNAMIC_SWOOB_DURATION */
2719#endif /* DHD_USE_EARLYSUSPEND */
2720 } else {
2721#ifdef PKT_FILTER_SUPPORT
2722 dhd->early_suspended = 0;
2723#endif
2724 /* Kernel resumed */
2725 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2726
2727#ifdef SUPPORT_SENSORHUB
2728 shub_msreq = 0;
2729 if (dhd->info->shub_enable == 1) {
2730 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2731 4, iovbuf, sizeof(iovbuf));
2732 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2733 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2734 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2735 "failed %d\n", __FUNCTION__, ret));
2736 }
2737 }
2738#endif /* SUPPORT_SENSORHUB */
2739
2740
2741#ifdef DYNAMIC_SWOOB_DURATION
2742 intr_width = 0;
2743 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2744 iovbuf, sizeof(iovbuf));
2745 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2746 sizeof(iovbuf), TRUE, 0)) < 0) {
2747 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2748 }
2749#endif /* DYNAMIC_SWOOB_DURATION */
2750#ifndef SUPPORT_PM2_ONLY
2751 power_mode = PM_FAST;
2752 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2753 sizeof(power_mode), TRUE, 0);
2754#endif /* SUPPORT_PM2_ONLY */
2755#ifdef PKT_FILTER_SUPPORT
2756 /* disable pkt filter */
2757 dhd_enable_packet_filter(0, dhd);
2758#endif /* PKT_FILTER_SUPPORT */
2759#ifdef PASS_ALL_MCAST_PKTS
2760 allmulti = 1;
2761 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2762 iovbuf, sizeof(iovbuf));
2763 for (i = 0; i < DHD_MAX_IFS; i++) {
2764 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2765 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2766 sizeof(iovbuf), TRUE, i);
2767 }
2768#endif /* PASS_ALL_MCAST_PKTS */
2769
2770 /* restore pre-suspend setting for dtim_skip */
2771 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2772 4, iovbuf, sizeof(iovbuf));
2773
2774 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2775#ifdef DHD_USE_EARLYSUSPEND
2776#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2777 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2778 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2779 4, iovbuf, sizeof(iovbuf));
2780 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2781#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2782#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2783 roam_time_thresh = 2000;
2784 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2785 4, iovbuf, sizeof(iovbuf));
2786 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2787#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2788#ifndef ENABLE_FW_ROAM_SUSPEND
2789 roamvar = dhd_roam_disable;
2790 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2791 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2792#endif /* ENABLE_FW_ROAM_SUSPEND */
2793#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2794 bcn_li_bcn = 1;
2795 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2796 4, iovbuf, sizeof(iovbuf));
2797 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2798#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2799 if (FW_SUPPORTED(dhd, ndoe)) {
2800 /* disable IPv6 RA filter in firmware during suspend */
2801 nd_ra_filter = 0;
2802 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2803 iovbuf, sizeof(iovbuf));
2804 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2805 sizeof(iovbuf), TRUE, 0)) < 0)
2806 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2807 ret));
2808 }
2809#endif /* DHD_USE_EARLYSUSPEND */
2810 }
2811 }
2812 dhd_suspend_unlock(dhd);
2813
2814 return 0;
2815}
2816
2817static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2818{
2819 dhd_pub_t *dhdp = &dhd->pub;
2820 int ret = 0;
2821
2822 DHD_OS_WAKE_LOCK(dhdp);
2823 DHD_PERIM_LOCK(dhdp);
2824
2825 /* Set flag when early suspend was called */
2826 dhdp->in_suspend = val;
2827 if ((force || !dhdp->suspend_disable_flag) &&
2828 dhd_support_sta_mode(dhdp))
2829 {
2830 ret = dhd_set_suspend(val, dhdp);
2831 }
2832
2833 DHD_PERIM_UNLOCK(dhdp);
2834 DHD_OS_WAKE_UNLOCK(dhdp);
2835 return ret;
2836}
2837
2838#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2839static void dhd_early_suspend(struct early_suspend *h)
2840{
2841 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2842 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2843
2844 if (dhd)
2845 dhd_suspend_resume_helper(dhd, 1, 0);
2846}
2847
2848static void dhd_late_resume(struct early_suspend *h)
2849{
2850 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2851 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2852
2853 if (dhd)
2854 dhd_suspend_resume_helper(dhd, 0, 0);
2855}
2856#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2857
2858/*
2859 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2860 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2861 *
2862 * dhd_timeout_start(&tmo, usec);
2863 * while (!dhd_timeout_expired(&tmo))
2864 * if (poll_something())
2865 * break;
2866 * if (dhd_timeout_expired(&tmo))
2867 * fatal();
2868 */
2869
2870void
2871dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2872{
2873 tmo->limit = usec;
2874 tmo->increment = 0;
2875 tmo->elapsed = 0;
2876 tmo->tick = jiffies_to_usecs(1);
2877}
2878
2879int
2880dhd_timeout_expired(dhd_timeout_t *tmo)
2881{
2882 /* Does nothing the first call */
2883 if (tmo->increment == 0) {
2884 tmo->increment = 1;
2885 return 0;
2886 }
2887
2888 if (tmo->elapsed >= tmo->limit)
2889 return 1;
2890
2891 /* Add the delay that's about to take place */
2892 tmo->elapsed += tmo->increment;
2893
2894 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2895 OSL_DELAY(tmo->increment);
2896 tmo->increment *= 2;
2897 if (tmo->increment > tmo->tick)
2898 tmo->increment = tmo->tick;
2899 } else {
2900 wait_queue_head_t delay_wait;
2901 DECLARE_WAITQUEUE(wait, current);
2902 init_waitqueue_head(&delay_wait);
2903 add_wait_queue(&delay_wait, &wait);
2904 set_current_state(TASK_INTERRUPTIBLE);
2905 (void)schedule_timeout(1);
2906 remove_wait_queue(&delay_wait, &wait);
2907 set_current_state(TASK_RUNNING);
2908 }
2909
2910 return 0;
2911}
2912
2913int
2914dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2915{
2916 int i = 0;
2917
2918 if (!dhd) {
2919 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2920 return DHD_BAD_IF;
2921 }
2922
2923 while (i < DHD_MAX_IFS) {
2924 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2925 return i;
2926 i++;
2927 }
2928
2929 return DHD_BAD_IF;
2930}
2931
2932struct net_device * dhd_idx2net(void *pub, int ifidx)
2933{
2934 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2935 struct dhd_info *dhd_info;
2936
2937 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2938 return NULL;
2939 dhd_info = dhd_pub->info;
2940 if (dhd_info && dhd_info->iflist[ifidx])
2941 return dhd_info->iflist[ifidx]->net;
2942 return NULL;
2943}
2944
2945int
2946dhd_ifname2idx(dhd_info_t *dhd, char *name)
2947{
2948 int i = DHD_MAX_IFS;
2949
2950 ASSERT(dhd);
2951
2952 if (name == NULL || *name == '\0')
2953 return 0;
2954
2955 while (--i > 0)
2956 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2957 break;
2958
2959 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2960
2961 return i; /* default - the primary interface */
2962}
2963
2964char *
2965dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2966{
2967 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2968
2969 ASSERT(dhd);
2970
2971 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2972 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2973 return "<if_bad>";
2974 }
2975
2976 if (dhd->iflist[ifidx] == NULL) {
2977 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2978 return "<if_null>";
2979 }
2980
2981 if (dhd->iflist[ifidx]->net)
2982 return dhd->iflist[ifidx]->net->name;
2983
2984 return "<if_none>";
2985}
2986
2987uint8 *
2988dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2989{
2990 int i;
2991 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2992
2993 ASSERT(dhd);
2994 for (i = 0; i < DHD_MAX_IFS; i++)
2995 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2996 return dhd->iflist[i]->mac_addr;
2997
2998 return NULL;
2999}
3000
3001
3002static void
3003_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3004{
3005 struct net_device *dev;
3006#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3007 struct netdev_hw_addr *ha;
3008#else
3009 struct dev_mc_list *mclist;
3010#endif
3011 uint32 allmulti, cnt;
3012
3013 wl_ioctl_t ioc;
3014 char *buf, *bufp;
3015 uint buflen;
3016 int ret;
3017
3018 if (!dhd->iflist[ifidx]) {
3019 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3020 return;
3021 }
3022 dev = dhd->iflist[ifidx]->net;
3023 if (!dev)
3024 return;
3025#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3026 netif_addr_lock_bh(dev);
3027#endif /* LINUX >= 2.6.27 */
3028#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3029 cnt = netdev_mc_count(dev);
3030#else
3031 cnt = dev->mc_count;
3032#endif /* LINUX >= 2.6.35 */
3033#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3034 netif_addr_unlock_bh(dev);
3035#endif /* LINUX >= 2.6.27 */
3036
3037 /* Determine initial value of allmulti flag */
3038 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3039
3040#ifdef PASS_ALL_MCAST_PKTS
3041#ifdef PKT_FILTER_SUPPORT
3042 if (!dhd->pub.early_suspended)
3043#endif /* PKT_FILTER_SUPPORT */
3044 allmulti = TRUE;
3045#endif /* PASS_ALL_MCAST_PKTS */
3046
3047 /* Send down the multicast list first. */
3048
3049
3050 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3051 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3052 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3053 dhd_ifname(&dhd->pub, ifidx), cnt));
3054 return;
3055 }
3056
3057 strncpy(bufp, "mcast_list", buflen - 1);
3058 bufp[buflen - 1] = '\0';
3059 bufp += strlen("mcast_list") + 1;
3060
3061 cnt = htol32(cnt);
3062 memcpy(bufp, &cnt, sizeof(cnt));
3063 bufp += sizeof(cnt);
3064
3065#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3066 netif_addr_lock_bh(dev);
3067#endif /* LINUX >= 2.6.27 */
3068#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3069 netdev_for_each_mc_addr(ha, dev) {
3070 if (!cnt)
3071 break;
3072 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3073 bufp += ETHER_ADDR_LEN;
3074 cnt--;
3075 }
3076#else /* LINUX < 2.6.35 */
3077 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3078 cnt--, mclist = mclist->next) {
3079 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3080 bufp += ETHER_ADDR_LEN;
3081 }
3082#endif /* LINUX >= 2.6.35 */
3083#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3084 netif_addr_unlock_bh(dev);
3085#endif /* LINUX >= 2.6.27 */
3086
3087 memset(&ioc, 0, sizeof(ioc));
3088 ioc.cmd = WLC_SET_VAR;
3089 ioc.buf = buf;
3090 ioc.len = buflen;
3091 ioc.set = TRUE;
3092
3093 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3094 if (ret < 0) {
3095 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3096 dhd_ifname(&dhd->pub, ifidx), cnt));
3097 allmulti = cnt ? TRUE : allmulti;
3098 }
3099
3100 MFREE(dhd->pub.osh, buf, buflen);
3101
3102 /* Now send the allmulti setting. This is based on the setting in the
3103 * net_device flags, but might be modified above to be turned on if we
3104 * were trying to set some addresses and dongle rejected it...
3105 */
3106
3107 buflen = sizeof("allmulti") + sizeof(allmulti);
3108 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3109 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3110 return;
3111 }
3112 allmulti = htol32(allmulti);
3113
3114 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3115 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3116 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3117 MFREE(dhd->pub.osh, buf, buflen);
3118 return;
3119 }
3120
3121
3122 memset(&ioc, 0, sizeof(ioc));
3123 ioc.cmd = WLC_SET_VAR;
3124 ioc.buf = buf;
3125 ioc.len = buflen;
3126 ioc.set = TRUE;
3127
3128 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3129 if (ret < 0) {
3130 DHD_ERROR(("%s: set allmulti %d failed\n",
3131 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3132 }
3133
3134 MFREE(dhd->pub.osh, buf, buflen);
3135
3136 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3137
3138 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3139
3140 allmulti = htol32(allmulti);
3141
3142 memset(&ioc, 0, sizeof(ioc));
3143 ioc.cmd = WLC_SET_PROMISC;
3144 ioc.buf = &allmulti;
3145 ioc.len = sizeof(allmulti);
3146 ioc.set = TRUE;
3147
3148 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3149 if (ret < 0) {
3150 DHD_ERROR(("%s: set promisc %d failed\n",
3151 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3152 }
3153}
3154
3155int
3156_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3157{
3158 char buf[32];
3159 wl_ioctl_t ioc;
3160 int ret;
3161
3162 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3163 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3164 return -1;
3165 }
3166 memset(&ioc, 0, sizeof(ioc));
3167 ioc.cmd = WLC_SET_VAR;
3168 ioc.buf = buf;
3169 ioc.len = 32;
3170 ioc.set = TRUE;
3171
3172 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3173 if (ret < 0) {
3174 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3175 } else {
3176 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3177 if (ifidx == 0)
3178 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3179 }
3180
3181 return ret;
3182}
3183
3184#ifdef SOFTAP
3185extern struct net_device *ap_net_dev;
3186extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3187#endif
3188
3189#ifdef DHD_PSTA
3190/* Get psta/psr configuration configuration */
3191int dhd_get_psta_mode(dhd_pub_t *dhdp)
3192{
3193 dhd_info_t *dhd = dhdp->info;
3194 return (int)dhd->psta_mode;
3195}
3196/* Set psta/psr configuration configuration */
3197int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3198{
3199 dhd_info_t *dhd = dhdp->info;
3200 dhd->psta_mode = val;
3201 return 0;
3202}
3203#endif /* DHD_PSTA */
3204
3205static void
3206dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3207{
3208 dhd_info_t *dhd = handle;
3209 dhd_if_event_t *if_event = event_info;
3210 struct net_device *ndev;
3211 int ifidx, bssidx;
3212 int ret;
3213#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3214 struct wireless_dev *vwdev, *primary_wdev;
3215 struct net_device *primary_ndev;
3216#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3217
3218 if (event != DHD_WQ_WORK_IF_ADD) {
3219 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3220 return;
3221 }
3222
3223 if (!dhd) {
3224 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3225 return;
3226 }
3227
3228 if (!if_event) {
3229 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3230 return;
3231 }
3232
3233 dhd_net_if_lock_local(dhd);
3234 DHD_OS_WAKE_LOCK(&dhd->pub);
3235 DHD_PERIM_LOCK(&dhd->pub);
3236
3237 ifidx = if_event->event.ifidx;
3238 bssidx = if_event->event.bssidx;
3239 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3240
3241 /* This path is for non-android case */
3242 /* The interface name in host and in event msg are same */
3243 /* if name in event msg is used to create dongle if list on host */
3244 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3245 if_event->mac, bssidx, TRUE, if_event->name);
3246 if (!ndev) {
3247 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3248 goto done;
3249 }
3250
3251#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3252 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3253 if (unlikely(!vwdev)) {
3254 DHD_ERROR(("Could not allocate wireless device\n"));
3255 goto done;
3256 }
3257 primary_ndev = dhd->pub.info->iflist[0]->net;
3258 primary_wdev = ndev_to_wdev(primary_ndev);
3259 vwdev->wiphy = primary_wdev->wiphy;
3260 vwdev->iftype = if_event->event.role;
3261 vwdev->netdev = ndev;
3262 ndev->ieee80211_ptr = vwdev;
3263 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3264 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3265#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3266
3267 DHD_PERIM_UNLOCK(&dhd->pub);
3268 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3269 DHD_PERIM_LOCK(&dhd->pub);
3270 if (ret != BCME_OK) {
3271 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3272 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3273 goto done;
3274 }
3275#ifdef PCIE_FULL_DONGLE
3276 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3277 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3278 char iovbuf[WLC_IOCTL_SMLEN];
3279 uint32 var_int = 1;
3280
3281 memset(iovbuf, 0, sizeof(iovbuf));
3282 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3283 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3284
3285 if (ret != BCME_OK) {
3286 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3287 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3288 }
3289 }
3290#endif /* PCIE_FULL_DONGLE */
3291
3292done:
3293 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3294
3295 DHD_PERIM_UNLOCK(&dhd->pub);
3296 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3297 dhd_net_if_unlock_local(dhd);
3298}
3299
3300static void
3301dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3302{
3303 dhd_info_t *dhd = handle;
3304 int ifidx;
3305 dhd_if_event_t *if_event = event_info;
3306
3307
3308 if (event != DHD_WQ_WORK_IF_DEL) {
3309 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3310 return;
3311 }
3312
3313 if (!dhd) {
3314 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3315 return;
3316 }
3317
3318 if (!if_event) {
3319 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3320 return;
3321 }
3322
3323 dhd_net_if_lock_local(dhd);
3324 DHD_OS_WAKE_LOCK(&dhd->pub);
3325 DHD_PERIM_LOCK(&dhd->pub);
3326
3327 ifidx = if_event->event.ifidx;
3328 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3329
3330 DHD_PERIM_UNLOCK(&dhd->pub);
3331 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3332 DHD_PERIM_LOCK(&dhd->pub);
3333
3334 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3335
3336 DHD_PERIM_UNLOCK(&dhd->pub);
3337 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3338 dhd_net_if_unlock_local(dhd);
3339}
3340
3341static void
3342dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3343{
3344 dhd_info_t *dhd = handle;
3345 dhd_if_t *ifp = event_info;
3346
3347 if (event != DHD_WQ_WORK_SET_MAC) {
3348 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3349 }
3350
3351 if (!dhd) {
3352 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3353 return;
3354 }
3355
3356 dhd_net_if_lock_local(dhd);
3357 DHD_OS_WAKE_LOCK(&dhd->pub);
3358 DHD_PERIM_LOCK(&dhd->pub);
3359
3360#ifdef SOFTAP
3361 {
3362 unsigned long flags;
3363 bool in_ap = FALSE;
3364 DHD_GENERAL_LOCK(&dhd->pub, flags);
3365 in_ap = (ap_net_dev != NULL);
3366 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3367
3368 if (in_ap) {
3369 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3370 ifp->net->name));
3371 goto done;
3372 }
3373 }
3374#endif /* SOFTAP */
3375
3376 // terence 20160907: fix for not able to set mac when wlan0 is down
3377 if (ifp == NULL || !ifp->set_macaddress) {
3378 goto done;
3379 }
3380 if (ifp == NULL || !dhd->pub.up) {
3381 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3382 goto done;
3383 }
3384
3385 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3386 ifp->set_macaddress = FALSE;
3387 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3388 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3389 else
3390 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3391
3392done:
3393 DHD_PERIM_UNLOCK(&dhd->pub);
3394 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3395 dhd_net_if_unlock_local(dhd);
3396}
3397
3398static void
3399dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3400{
3401 dhd_info_t *dhd = handle;
3402 dhd_if_t *ifp = event_info;
3403 int ifidx;
3404
3405 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3406 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3407 return;
3408 }
3409
3410 if (!dhd) {
3411 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3412 return;
3413 }
3414
3415 dhd_net_if_lock_local(dhd);
3416 DHD_OS_WAKE_LOCK(&dhd->pub);
3417 DHD_PERIM_LOCK(&dhd->pub);
3418
3419#ifdef SOFTAP
3420 {
3421 bool in_ap = FALSE;
3422 unsigned long flags;
3423 DHD_GENERAL_LOCK(&dhd->pub, flags);
3424 in_ap = (ap_net_dev != NULL);
3425 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3426
3427 if (in_ap) {
3428 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3429 ifp->net->name));
3430 ifp->set_multicast = FALSE;
3431 goto done;
3432 }
3433 }
3434#endif /* SOFTAP */
3435
3436 if (ifp == NULL || !dhd->pub.up) {
3437 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3438 goto done;
3439 }
3440
3441 ifidx = ifp->idx;
3442
3443
3444 _dhd_set_multicast_list(dhd, ifidx);
3445 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3446
3447done:
3448 DHD_PERIM_UNLOCK(&dhd->pub);
3449 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3450 dhd_net_if_unlock_local(dhd);
3451}
3452
3453static int
3454dhd_set_mac_address(struct net_device *dev, void *addr)
3455{
3456 int ret = 0;
3457
3458 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3459 struct sockaddr *sa = (struct sockaddr *)addr;
3460 int ifidx;
3461 dhd_if_t *dhdif;
3462
3463 ifidx = dhd_net2idx(dhd, dev);
3464 if (ifidx == DHD_BAD_IF)
3465 return -1;
3466
3467 dhdif = dhd->iflist[ifidx];
3468
3469 dhd_net_if_lock_local(dhd);
3470 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3471 dhdif->set_macaddress = TRUE;
3472 dhd_net_if_unlock_local(dhd);
3473 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3474 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3475 return ret;
3476}
3477
3478static void
3479dhd_set_multicast_list(struct net_device *dev)
3480{
3481 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3482 int ifidx;
3483
3484 ifidx = dhd_net2idx(dhd, dev);
3485 if (ifidx == DHD_BAD_IF)
3486 return;
3487
3488 dhd->iflist[ifidx]->set_multicast = TRUE;
3489 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3490 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
3491
3492 // terence 20160907: fix for not able to set mac when wlan0 is down
3493 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3494 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3495}
3496
3497#ifdef PROP_TXSTATUS
3498int
3499dhd_os_wlfc_block(dhd_pub_t *pub)
3500{
3501 dhd_info_t *di = (dhd_info_t *)(pub->info);
3502 ASSERT(di != NULL);
3503 spin_lock_bh(&di->wlfc_spinlock);
3504 return 1;
3505}
3506
3507int
3508dhd_os_wlfc_unblock(dhd_pub_t *pub)
3509{
3510 dhd_info_t *di = (dhd_info_t *)(pub->info);
3511
3512 ASSERT(di != NULL);
3513 spin_unlock_bh(&di->wlfc_spinlock);
3514 return 1;
3515}
3516
3517#endif /* PROP_TXSTATUS */
3518
3519#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
3520typedef struct {
3521 uint16 type;
3522 const char *str;
3523} PKTTYPE_INFO;
3524
3525static const PKTTYPE_INFO packet_type_info[] =
3526{
3527 { ETHER_TYPE_IP, "IP" },
3528 { ETHER_TYPE_ARP, "ARP" },
3529 { ETHER_TYPE_BRCM, "BRCM" },
3530 { ETHER_TYPE_802_1X, "802.1X" },
3531 { ETHER_TYPE_WAI, "WAPI" },
3532 { 0, ""}
3533};
3534
3535static const char *_get_packet_type_str(uint16 type)
3536{
3537 int i;
3538 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
3539
3540 for (i = 0; i < n; i++) {
3541 if (packet_type_info[i].type == type)
3542 return packet_type_info[i].str;
3543 }
3544
3545 return packet_type_info[n].str;
3546}
3547#endif /* DHD_RX_DUMP || DHD_TX_DUMP */
3548
3549#if defined(DHD_TX_DUMP)
3550void
3551dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
3552{
3553 uint8 *dump_data;
3554 uint16 protocol;
3555 char *ifname;
3556
3557 dump_data = PKTDATA(osh, pkt);
3558 protocol = (dump_data[12] << 8) | dump_data[13];
3559 ifname = ndev ? ndev->name : "N/A";
3560
3561 DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
3562
3563 if (protocol == ETHER_TYPE_802_1X) {
3564 dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
3565 }
3566
3567#if defined(DHD_TX_FULL_DUMP)
3568 {
3569 int i;
3570 uint datalen;
3571 datalen = PKTLEN(osh, pkt);
3572
3573 for (i = 0; i < datalen; i++) {
3574 printk("%02X ", dump_data[i]);
3575 if ((i & 15) == 15)
3576 printk("\n");
3577 }
3578 printk("\n");
3579 }
3580#endif /* DHD_TX_FULL_DUMP */
3581}
3582#endif /* DHD_TX_DUMP */
3583
3584/* This routine do not support Packet chain feature, Currently tested for
3585 * proxy arp feature
3586 */
3587int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3588{
3589 struct sk_buff *skb;
3590 void *skbhead = NULL;
3591 void *skbprev = NULL;
3592 dhd_if_t *ifp;
3593 ASSERT(!PKTISCHAINED(p));
3594 skb = PKTTONATIVE(dhdp->osh, p);
3595
3596 ifp = dhdp->info->iflist[ifidx];
3597 skb->dev = ifp->net;
3598#if defined(BCM_GMAC3)
3599 /* Forwarder capable interfaces use WOFA based forwarding */
3600 if (ifp->fwdh) {
3601 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
3602 uint16 * da = (uint16 *)(eh->ether_dhost);
3603 wofa_t wofa;
3604 ASSERT(ISALIGNED(da, 2));
3605
3606 wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
3607 if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
3608 if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
3609 return BCME_OK;
3610 }
3611 }
3612 PKTFRMNATIVE(dhdp->osh, p);
3613 PKTFREE(dhdp->osh, p, FALSE);
3614 return BCME_OK;
3615 }
3616#endif /* BCM_GMAC3 */
3617
3618 skb->protocol = eth_type_trans(skb, skb->dev);
3619
3620 if (in_interrupt()) {
3621 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3622 __FUNCTION__, __LINE__);
3623 netif_rx(skb);
3624 } else {
3625 if (dhdp->info->rxthread_enabled) {
3626 if (!skbhead) {
3627 skbhead = skb;
3628 } else {
3629 PKTSETNEXT(dhdp->osh, skbprev, skb);
3630 }
3631 skbprev = skb;
3632 } else {
3633 /* If the receive is not processed inside an ISR,
3634 * the softirqd must be woken explicitly to service
3635 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3636 * by netif_rx_ni(), but in earlier kernels, we need
3637 * to do it manually.
3638 */
3639 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3640 __FUNCTION__, __LINE__);
3641#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3642 netif_rx_ni(skb);
3643#else
3644 ulong flags;
3645 netif_rx(skb);
3646 local_irq_save(flags);
3647 RAISE_RX_SOFTIRQ();
3648 local_irq_restore(flags);
3649#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3650 }
3651 }
3652
3653 if (dhdp->info->rxthread_enabled && skbhead)
3654 dhd_sched_rxf(dhdp, skbhead);
3655
3656 return BCME_OK;
3657}
3658
3659int BCMFASTPATH
3660__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3661{
3662 int ret = BCME_OK;
3663 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3664 struct ether_header *eh = NULL;
3665#ifdef DHD_L2_FILTER
3666 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3667#endif
3668#ifdef DHD_8021X_DUMP
3669 struct net_device *ndev;
3670#endif /* DHD_8021X_DUMP */
3671
3672 /* Reject if down */
3673 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3674 /* free the packet here since the caller won't */
3675 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3676 return -ENODEV;
3677 }
3678
3679#ifdef PCIE_FULL_DONGLE
3680 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3681 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3682 PKTFREE(dhdp->osh, pktbuf, TRUE);
3683 return -EBUSY;
3684 }
3685#endif /* PCIE_FULL_DONGLE */
3686
3687#ifdef DHD_L2_FILTER
3688 /* if dhcp_unicast is enabled, we need to convert the */
3689 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3690 if (ifp->dhcp_unicast) {
3691 uint8* mac_addr;
3692 uint8* ehptr = NULL;
3693 int ret;
3694 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3695 if (ret == BCME_OK) {
3696 /* if given mac address having valid entry in sta list
3697 * copy the given mac address, and return with BCME_OK
3698 */
3699 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3700 ehptr = PKTDATA(dhdp->osh, pktbuf);
3701 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3702 }
3703 }
3704 }
3705
3706 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3707 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3708 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3709 return BCME_ERROR;
3710 }
3711 }
3712
3713 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3714 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3715
3716 /* Drop the packets if l2 filter has processed it already
3717 * otherwise continue with the normal path
3718 */
3719 if (ret == BCME_OK) {
3720 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3721 return BCME_ERROR;
3722 }
3723 }
3724#endif /* DHD_L2_FILTER */
3725 /* Update multicast statistic */
3726 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3727 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3728 eh = (struct ether_header *)pktdata;
3729
3730 if (ETHER_ISMULTI(eh->ether_dhost))
3731 dhdp->tx_multicast++;
3732 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
3733 atomic_inc(&dhd->pend_8021x_cnt);
3734#ifdef DHD_DHCP_DUMP
3735 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3736 uint16 dump_hex;
3737 uint16 source_port;
3738 uint16 dest_port;
3739 uint16 udp_port_pos;
3740 uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
3741 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3742 struct net_device *net;
3743 char *ifname;
3744
3745 net = dhd_idx2net(dhdp, ifidx);
3746 ifname = net ? net->name : "N/A";
3747 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3748 source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
3749 dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
3750 if (source_port == 0x0044 || dest_port == 0x0044) {
3751 dump_hex = (pktdata[udp_port_pos+249] << 8) |
3752 pktdata[udp_port_pos+250];
3753 if (dump_hex == 0x0101) {
3754 DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
3755 } else if (dump_hex == 0x0102) {
3756 DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
3757 } else if (dump_hex == 0x0103) {
3758 DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
3759 } else if (dump_hex == 0x0105) {
3760 DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
3761 } else {
3762 DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
3763 }
3764#ifdef DHD_LOSSLESS_ROAMING
3765 if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
3766 DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
3767 }
3768#endif /* DHD_LOSSLESS_ROAMING */
3769 DHD_ERROR(("\n"));
3770 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3771 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
3772 }
3773 }
3774#endif /* DHD_DHCP_DUMP */
3775 } else {
3776 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3777 return BCME_ERROR;
3778 }
3779
3780 /* Look into the packet and update the packet priority */
3781#ifndef PKTPRIO_OVERRIDE
3782 if (PKTPRIO(pktbuf) == 0)
3783#endif /* !PKTPRIO_OVERRIDE */
3784 {
3785#ifdef QOS_MAP_SET
3786 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3787#else
3788 pktsetprio(pktbuf, FALSE);
3789#endif /* QOS_MAP_SET */
3790 }
3791
3792
3793#ifdef PCIE_FULL_DONGLE
3794 /*
3795 * Lkup the per interface hash table, for a matching flowring. If one is not
3796 * available, allocate a unique flowid and add a flowring entry.
3797 * The found or newly created flowid is placed into the pktbuf's tag.
3798 */
3799 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3800 if (ret != BCME_OK) {
3801 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3802 return ret;
3803 }
3804#endif
3805
3806#if defined(DHD_TX_DUMP)
3807 ndev = dhd_idx2net(dhdp, ifidx);
3808 dhd_tx_dump(ndev, dhdp->osh, pktbuf);
3809#endif
3810 /* terence 20150901: Micky add to ajust the 802.1X priority */
3811 /* Set the 802.1X packet with the highest priority 7 */
3812 if (dhdp->conf->pktprio8021x >= 0)
3813 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3814
3815#ifdef PROP_TXSTATUS
3816 if (dhd_wlfc_is_supported(dhdp)) {
3817 /* store the interface ID */
3818 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3819
3820 /* store destination MAC in the tag as well */
3821 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3822
3823 /* decide which FIFO this packet belongs to */
3824 if (ETHER_ISMULTI(eh->ether_dhost))
3825 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3826 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3827 else
3828 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3829 } else
3830#endif /* PROP_TXSTATUS */
3831 {
3832 /* If the protocol uses a data header, apply it */
3833 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3834 }
3835
3836 /* Use bus module to send data frame */
3837#ifdef WLMEDIA_HTSF
3838 dhd_htsf_addtxts(dhdp, pktbuf);
3839#endif
3840#ifdef PROP_TXSTATUS
3841 {
3842 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3843 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3844 /* non-proptxstatus way */
3845#ifdef BCMPCIE
3846 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3847#else
3848 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3849#endif /* BCMPCIE */
3850 }
3851 }
3852#else
3853#ifdef BCMPCIE
3854 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3855#else
3856 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3857#endif /* BCMPCIE */
3858#endif /* PROP_TXSTATUS */
3859
3860 return ret;
3861}
3862
3863int BCMFASTPATH
3864dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3865{
3866 int ret = 0;
3867 unsigned long flags;
3868
3869 DHD_GENERAL_LOCK(dhdp, flags);
3870 if (dhdp->busstate == DHD_BUS_DOWN ||
3871 dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3872 DHD_ERROR(("%s: returning as busstate=%d\n",
3873 __FUNCTION__, dhdp->busstate));
3874 DHD_GENERAL_UNLOCK(dhdp, flags);
3875 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3876 return -ENODEV;
3877 }
3878 dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
3879 DHD_GENERAL_UNLOCK(dhdp, flags);
3880
3881#ifdef DHD_PCIE_RUNTIMEPM
3882 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3883 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3884 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3885 ret = -EBUSY;
3886 goto exit;
3887 }
3888#endif /* DHD_PCIE_RUNTIMEPM */
3889
3890 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3891
3892#ifdef DHD_PCIE_RUNTIMEPM
3893exit:
3894#endif
3895 DHD_GENERAL_LOCK(dhdp, flags);
3896 dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
3897 DHD_GENERAL_UNLOCK(dhdp, flags);
3898 return ret;
3899}
3900
3901int BCMFASTPATH
3902dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3903{
3904 int ret;
3905 uint datalen;
3906 void *pktbuf;
3907 dhd_info_t *dhd = DHD_DEV_INFO(net);
3908 dhd_if_t *ifp = NULL;
3909 int ifidx;
3910 unsigned long flags;
3911#ifdef WLMEDIA_HTSF
3912 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3913#else
3914 uint8 htsfdlystat_sz = 0;
3915#endif
3916#ifdef DHD_WMF
3917 struct ether_header *eh;
3918 uint8 *iph;
3919#endif /* DHD_WMF */
3920
3921 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3922
3923
3924#ifdef PCIE_FULL_DONGLE
3925 DHD_GENERAL_LOCK(&dhd->pub, flags);
3926 dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
3927 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3928#endif /* PCIE_FULL_DONGLE */
3929
3930#ifdef DHD_PCIE_RUNTIMEPM
3931 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3932 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3933 /* stop the network queue temporarily until resume done */
3934 DHD_GENERAL_LOCK(&dhd->pub, flags);
3935 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3936 dhd_bus_stop_queue(dhd->pub.bus);
3937 }
3938 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3939 dhd_os_busbusy_wake(&dhd->pub);
3940 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3941#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3942 return -ENODEV;
3943#else
3944 return NETDEV_TX_BUSY;
3945#endif
3946 }
3947#endif /* DHD_PCIE_RUNTIMEPM */
3948
3949 DHD_GENERAL_LOCK(&dhd->pub, flags);
3950#ifdef PCIE_FULL_DONGLE
3951 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3952 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3953 dhd_os_busbusy_wake(&dhd->pub);
3954 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3955#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3956 return -ENODEV;
3957#else
3958 return NETDEV_TX_BUSY;
3959#endif
3960 }
3961#endif /* PCIE_FULL_DONGLE */
3962
3963 DHD_OS_WAKE_LOCK(&dhd->pub);
3964 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3965
3966 /* Reject if down */
3967 if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
3968 dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3969 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3970 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3971 netif_stop_queue(net);
3972 /* Send Event when bus down detected during data session */
3973 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3974 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3975 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3976 net_os_send_hang_message(net);
3977 }
3978#ifdef PCIE_FULL_DONGLE
3979 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3980 dhd_os_busbusy_wake(&dhd->pub);
3981 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3982#endif /* PCIE_FULL_DONGLE */
3983 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3984 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3985#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3986 return -ENODEV;
3987#else
3988 return NETDEV_TX_BUSY;
3989#endif
3990 }
3991
3992 ifp = DHD_DEV_IFP(net);
3993 ifidx = DHD_DEV_IFIDX(net);
3994 BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
3995
3996 if (ifidx == DHD_BAD_IF) {
3997 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
3998 netif_stop_queue(net);
3999#ifdef PCIE_FULL_DONGLE
4000 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4001 dhd_os_busbusy_wake(&dhd->pub);
4002 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4003#endif /* PCIE_FULL_DONGLE */
4004 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4005 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4006#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4007 return -ENODEV;
4008#else
4009 return NETDEV_TX_BUSY;
4010#endif
4011 }
4012 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4013
4014 ASSERT(ifidx == dhd_net2idx(dhd, net));
4015 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4016
4017 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4018
4019 /* re-align socket buffer if "skb->data" is odd address */
4020 if (((unsigned long)(skb->data)) & 0x1) {
4021 unsigned char *data = skb->data;
4022 uint32 length = skb->len;
4023 PKTPUSH(dhd->pub.osh, skb, 1);
4024 memmove(skb->data, data, length);
4025 PKTSETLEN(dhd->pub.osh, skb, length);
4026 }
4027
4028 datalen = PKTLEN(dhd->pub.osh, skb);
4029
4030 /* Make sure there's enough room for any header */
4031 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4032 struct sk_buff *skb2;
4033
4034 DHD_INFO(("%s: insufficient headroom\n",
4035 dhd_ifname(&dhd->pub, ifidx)));
4036 dhd->pub.tx_realloc++;
4037
4038 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4039 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4040
4041 dev_kfree_skb(skb);
4042 if ((skb = skb2) == NULL) {
4043 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4044 dhd_ifname(&dhd->pub, ifidx)));
4045 ret = -ENOMEM;
4046 goto done;
4047 }
4048 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4049 }
4050
4051 /* Convert to packet */
4052 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4053 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4054 dhd_ifname(&dhd->pub, ifidx)));
4055 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4056 dev_kfree_skb_any(skb);
4057 ret = -ENOMEM;
4058 goto done;
4059 }
4060
4061#if defined(WLMEDIA_HTSF)
4062 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
4063 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
4064 struct ether_header *eh = (struct ether_header *)pktdata;
4065
4066 if (!ETHER_ISMULTI(eh->ether_dhost) &&
4067 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
4068 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
4069 }
4070 }
4071#endif
4072
4073#ifdef DHD_WMF
4074 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4075 iph = (uint8 *)eh + ETHER_HDR_LEN;
4076
4077 /* WMF processing for multicast packets
4078 * Only IPv4 packets are handled
4079 */
4080 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4081 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4082 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4083#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4084 void *sdu_clone;
4085 bool ucast_convert = FALSE;
4086#ifdef DHD_UCAST_UPNP
4087 uint32 dest_ip;
4088
4089 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4090 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4091#endif /* DHD_UCAST_UPNP */
4092#ifdef DHD_IGMP_UCQUERY
4093 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4094 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
4095 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4096#endif /* DHD_IGMP_UCQUERY */
4097 if (ucast_convert) {
4098 dhd_sta_t *sta;
4099#ifdef PCIE_FULL_DONGLE
4100 unsigned long flags;
4101#endif
4102 struct list_head snapshot_list;
4103 struct list_head *wmf_ucforward_list;
4104
4105 ret = NETDEV_TX_OK;
4106
4107 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
4108 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4109 */
4110 wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4111
4112 /* Convert upnp/igmp query to unicast for each assoc STA */
4113 list_for_each_entry(sta, wmf_ucforward_list, list) {
4114 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4115 ret = WMF_NOP;
4116 break;
4117 }
4118 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4119 }
4120 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4121
4122#ifdef PCIE_FULL_DONGLE
4123 DHD_GENERAL_LOCK(&dhd->pub, flags);
4124 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4125 dhd_os_busbusy_wake(&dhd->pub);
4126 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4127#endif /* PCIE_FULL_DONGLE */
4128 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4129 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4130
4131 if (ret == NETDEV_TX_OK)
4132 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4133
4134 return ret;
4135 } else
4136#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4137 {
4138 /* There will be no STA info if the packet is coming from LAN host
4139 * Pass as NULL
4140 */
4141 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4142 switch (ret) {
4143 case WMF_TAKEN:
4144 case WMF_DROP:
4145 /* Either taken by WMF or we should drop it.
4146 * Exiting send path
4147 */
4148#ifdef PCIE_FULL_DONGLE
4149 DHD_GENERAL_LOCK(&dhd->pub, flags);
4150 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4151 dhd_os_busbusy_wake(&dhd->pub);
4152 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4153#endif /* PCIE_FULL_DONGLE */
4154 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4155 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4156 return NETDEV_TX_OK;
4157 default:
4158 /* Continue the transmit path */
4159 break;
4160 }
4161 }
4162 }
4163#endif /* DHD_WMF */
4164#ifdef DHD_PSTA
4165 /* PSR related packet proto manipulation should be done in DHD
4166 * since dongle doesn't have complete payload
4167 */
4168 if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
4169 ifidx, &pktbuf, TRUE) < 0)) {
4170 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4171 dhd_ifname(&dhd->pub, ifidx)));
4172 }
4173#endif /* DHD_PSTA */
4174
4175#ifdef DHDTCPACK_SUPPRESS
4176 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4177 /* If this packet has been hold or got freed, just return */
4178 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4179 ret = 0;
4180 goto done;
4181 }
4182 } else {
4183 /* If this packet has replaced another packet and got freed, just return */
4184 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4185 ret = 0;
4186 goto done;
4187 }
4188 }
4189#endif /* DHDTCPACK_SUPPRESS */
4190
4191 /* no segmented SKB support (Kernel-3.18.y) */
4192 if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
4193 PKTSETLINK(skb, NULL);
4194 }
4195
4196 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4197
4198done:
4199 if (ret) {
4200 ifp->stats.tx_dropped++;
4201 dhd->pub.tx_dropped++;
4202 } else {
4203
4204#ifdef PROP_TXSTATUS
4205 /* tx_packets counter can counted only when wlfc is disabled */
4206 if (!dhd_wlfc_is_supported(&dhd->pub))
4207#endif
4208 {
4209 dhd->pub.tx_packets++;
4210 ifp->stats.tx_packets++;
4211 ifp->stats.tx_bytes += datalen;
4212 }
4213 }
4214
4215#ifdef PCIE_FULL_DONGLE
4216 DHD_GENERAL_LOCK(&dhd->pub, flags);
4217 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4218 dhd_os_busbusy_wake(&dhd->pub);
4219 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4220#endif /* PCIE_FULL_DONGLE */
4221
4222 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4223 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4224 BUZZZ_LOG(START_XMIT_END, 0);
4225
4226 /* Return ok: we always eat the packet */
4227#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4228 return 0;
4229#else
4230 return NETDEV_TX_OK;
4231#endif
4232}
4233
4234
4235void
4236dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4237{
4238 struct net_device *net;
4239 dhd_info_t *dhd = dhdp->info;
4240 int i;
4241
4242 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4243
4244 ASSERT(dhd);
4245
4246#ifdef DHD_LOSSLESS_ROAMING
4247 /* block flowcontrol during roaming */
4248 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4249 return;
4250 }
4251#endif
4252
4253 if (ifidx == ALL_INTERFACES) {
4254 /* Flow control on all active interfaces */
4255 dhdp->txoff = state;
4256 for (i = 0; i < DHD_MAX_IFS; i++) {
4257 if (dhd->iflist[i]) {
4258 net = dhd->iflist[i]->net;
4259 if (state == ON)
4260 netif_stop_queue(net);
4261 else
4262 netif_wake_queue(net);
4263 }
4264 }
4265 } else {
4266 if (dhd->iflist[ifidx]) {
4267 net = dhd->iflist[ifidx]->net;
4268 if (state == ON)
4269 netif_stop_queue(net);
4270 else
4271 netif_wake_queue(net);
4272 }
4273 }
4274}
4275
4276
4277#ifdef DHD_WMF
4278bool
4279dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4280{
4281 dhd_info_t *dhd = dhdp->info;
4282
4283 return dhd->rxthread_enabled;
4284}
4285#endif /* DHD_WMF */
4286
4287/** Called when a frame is received by the dongle on interface 'ifidx' */
4288void
4289dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4290{
4291 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4292 struct sk_buff *skb;
4293 uchar *eth;
4294 uint len;
4295 void *data, *pnext = NULL;
4296 int i;
4297 dhd_if_t *ifp;
4298 wl_event_msg_t event;
4299 int tout_rx = 0;
4300 int tout_ctrl = 0;
4301 void *skbhead = NULL;
4302 void *skbprev = NULL;
4303#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4304 char *dump_data;
4305 uint16 protocol;
4306 char *ifname;
4307#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4308
4309 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4310
4311 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4312 struct ether_header *eh;
4313
4314 pnext = PKTNEXT(dhdp->osh, pktbuf);
4315 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4316
4317 ifp = dhd->iflist[ifidx];
4318 if (ifp == NULL) {
4319 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4320 __FUNCTION__));
4321 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4322 continue;
4323 }
4324
4325 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4326
4327 /* Dropping only data packets before registering net device to avoid kernel panic */
4328#ifndef PROP_TXSTATUS_VSDB
4329 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4330 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4331#else
4332 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4333 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4334#endif /* PROP_TXSTATUS_VSDB */
4335 {
4336 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4337 __FUNCTION__));
4338 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4339 continue;
4340 }
4341
4342
4343#ifdef PROP_TXSTATUS
4344 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4345 /* WLFC may send header only packet when
4346 there is an urgent message but no packet to
4347 piggy-back on
4348 */
4349 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4350 continue;
4351 }
4352#endif
4353#ifdef DHD_L2_FILTER
4354 /* If block_ping is enabled drop the ping packet */
4355 if (ifp->block_ping) {
4356 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4357 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4358 continue;
4359 }
4360 }
4361 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4362 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4363 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4364 continue;
4365 }
4366 }
4367 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4368 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4369
4370 /* Drop the packets if l2 filter has processed it already
4371 * otherwise continue with the normal path
4372 */
4373 if (ret == BCME_OK) {
4374 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4375 continue;
4376 }
4377 }
4378#endif /* DHD_L2_FILTER */
4379#ifdef DHD_WMF
4380 /* WMF processing for multicast packets */
4381 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
4382 dhd_sta_t *sta;
4383 int ret;
4384
4385 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
4386 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
4387 switch (ret) {
4388 case WMF_TAKEN:
4389 /* The packet is taken by WMF. Continue to next iteration */
4390 continue;
4391 case WMF_DROP:
4392 /* Packet DROP decision by WMF. Toss it */
4393 DHD_ERROR(("%s: WMF decides to drop packet\n",
4394 __FUNCTION__));
4395 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4396 continue;
4397 default:
4398 /* Continue the transmit path */
4399 break;
4400 }
4401 }
4402#endif /* DHD_WMF */
4403
4404#ifdef DHDTCPACK_SUPPRESS
4405 dhd_tcpdata_info_get(dhdp, pktbuf);
4406#endif
4407 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4408
4409 ASSERT(ifp);
4410 skb->dev = ifp->net;
4411
4412#ifdef DHD_PSTA
4413 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4414 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4415 dhd_ifname(dhdp, ifidx)));
4416 }
4417#endif /* DHD_PSTA */
4418
4419#ifdef PCIE_FULL_DONGLE
4420 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4421 (!ifp->ap_isolate)) {
4422 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4423 if (ETHER_ISUCAST(eh->ether_dhost)) {
4424 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4425 dhd_sendpkt(dhdp, ifidx, pktbuf);
4426 continue;
4427 }
4428 } else {
4429 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4430 if (npktbuf)
4431 dhd_sendpkt(dhdp, ifidx, npktbuf);
4432 }
4433 }
4434#endif /* PCIE_FULL_DONGLE */
4435
4436 /* Get the protocol, maintain skb around eth_type_trans()
4437 * The main reason for this hack is for the limitation of
4438 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4439 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4440 * coping of the packet coming from the network stack to add
4441 * BDC, Hardware header etc, during network interface registration
4442 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4443 * for BDC, Hardware header etc. and not just the ETH_HLEN
4444 */
4445 eth = skb->data;
4446 len = skb->len;
4447
4448#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4449 dump_data = skb->data;
4450 protocol = (dump_data[12] << 8) | dump_data[13];
4451 ifname = skb->dev ? skb->dev->name : "N/A";
4452#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4453#ifdef DHD_8021X_DUMP
4454 if (protocol == ETHER_TYPE_802_1X) {
4455 dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
4456 }
4457#endif /* DHD_8021X_DUMP */
4458#ifdef DHD_DHCP_DUMP
4459 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
4460 uint16 dump_hex;
4461 uint16 source_port;
4462 uint16 dest_port;
4463 uint16 udp_port_pos;
4464 uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
4465 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
4466
4467 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
4468 source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
4469 dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
4470 if (source_port == 0x0044 || dest_port == 0x0044) {
4471 dump_hex = (dump_data[udp_port_pos+249] << 8) |
4472 dump_data[udp_port_pos+250];
4473 if (dump_hex == 0x0101) {
4474 DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
4475 } else if (dump_hex == 0x0102) {
4476 DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
4477 } else if (dump_hex == 0x0103) {
4478 DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
4479 } else if (dump_hex == 0x0105) {
4480 DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
4481 } else {
4482 DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
4483 }
4484 } else if (source_port == 0x0043 || dest_port == 0x0043) {
4485 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
4486 }
4487 }
4488#endif /* DHD_DHCP_DUMP */
4489#if defined(DHD_RX_DUMP)
4490 DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
4491 if (protocol != ETHER_TYPE_BRCM) {
4492 if (dump_data[0] == 0xFF) {
4493 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
4494
4495 if ((dump_data[12] == 8) &&
4496 (dump_data[13] == 6)) {
4497 DHD_ERROR(("%s: ARP %d\n",
4498 __FUNCTION__, dump_data[0x15]));
4499 }
4500 } else if (dump_data[0] & 1) {
4501 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
4502 __FUNCTION__, MAC2STRDBG(dump_data)));
4503 }
4504#ifdef DHD_RX_FULL_DUMP
4505 {
4506 int k;
4507 for (k = 0; k < skb->len; k++) {
4508 printk("%02X ", dump_data[k]);
4509 if ((k & 15) == 15)
4510 printk("\n");
4511 }
4512 printk("\n");
4513 }
4514#endif /* DHD_RX_FULL_DUMP */
4515 }
4516#endif /* DHD_RX_DUMP */
4517
4518 skb->protocol = eth_type_trans(skb, skb->dev);
4519
4520 if (skb->pkt_type == PACKET_MULTICAST) {
4521 dhd->pub.rx_multicast++;
4522 ifp->stats.multicast++;
4523 }
4524
4525 skb->data = eth;
4526 skb->len = len;
4527
4528#ifdef WLMEDIA_HTSF
4529 dhd_htsf_addrxts(dhdp, pktbuf);
4530#endif
4531 /* Strip header, count, deliver upward */
4532 skb_pull(skb, ETH_HLEN);
4533
4534 /* Process special event packets and then discard them */
4535 memset(&event, 0, sizeof(event));
4536 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4537 dhd_wl_host_event(dhd, &ifidx,
4538#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4539 skb_mac_header(skb),
4540#else
4541 skb->mac.raw,
4542#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4543 &event,
4544 &data);
4545
4546 wl_event_to_host_order(&event);
4547 if (!tout_ctrl)
4548 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4549
4550#if defined(PNO_SUPPORT)
4551 if (event.event_type == WLC_E_PFN_NET_FOUND) {
4552 /* enforce custom wake lock to garantee that Kernel not suspended */
4553 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4554 }
4555#endif /* PNO_SUPPORT */
4556
4557#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
4558#ifdef DHD_USE_STATIC_CTRLBUF
4559 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4560#else
4561 PKTFREE(dhdp->osh, pktbuf, FALSE);
4562#endif /* DHD_USE_STATIC_CTRLBUF */
4563 continue;
4564#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
4565 } else {
4566 tout_rx = DHD_PACKET_TIMEOUT_MS;
4567
4568#ifdef PROP_TXSTATUS
4569 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4570#endif /* PROP_TXSTATUS */
4571 }
4572
4573 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4574 ifp = dhd->iflist[ifidx];
4575
4576 if (ifp->net)
4577 ifp->net->last_rx = jiffies;
4578
4579 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4580 dhdp->dstats.rx_bytes += skb->len;
4581 dhdp->rx_packets++; /* Local count */
4582 ifp->stats.rx_bytes += skb->len;
4583 ifp->stats.rx_packets++;
4584 }
4585
4586 if (in_interrupt()) {
4587 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4588 __FUNCTION__, __LINE__);
4589 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4590#if defined(DHD_LB) && defined(DHD_LB_RXP)
4591 netif_receive_skb(skb);
4592#else
4593 netif_rx(skb);
4594#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4595 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4596 } else {
4597 if (dhd->rxthread_enabled) {
4598 if (!skbhead)
4599 skbhead = skb;
4600 else
4601 PKTSETNEXT(dhdp->osh, skbprev, skb);
4602 skbprev = skb;
4603 } else {
4604
4605 /* If the receive is not processed inside an ISR,
4606 * the softirqd must be woken explicitly to service
4607 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4608 * by netif_rx_ni(), but in earlier kernels, we need
4609 * to do it manually.
4610 */
4611 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4612 __FUNCTION__, __LINE__);
4613
4614#if defined(DHD_LB) && defined(DHD_LB_RXP)
4615 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4616 netif_receive_skb(skb);
4617 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4618#else
4619#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4620 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4621 netif_rx_ni(skb);
4622 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4623#else
4624 ulong flags;
4625 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4626 netif_rx(skb);
4627 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4628 local_irq_save(flags);
4629 RAISE_RX_SOFTIRQ();
4630 local_irq_restore(flags);
4631#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4632#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4633 }
4634 }
4635 }
4636
4637 if (dhd->rxthread_enabled && skbhead)
4638 dhd_sched_rxf(dhdp, skbhead);
4639
4640 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4641 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4642 DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
4643}
4644
4645void
4646dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4647{
4648 /* Linux version has nothing to do */
4649 return;
4650}
4651
4652void
4653dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4654{
4655 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4656 struct ether_header *eh;
4657 uint16 type;
4658
4659 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4660
4661 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4662 type = ntoh16(eh->ether_type);
4663
4664 if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
4665 atomic_dec(&dhd->pend_8021x_cnt);
4666
4667#ifdef PROP_TXSTATUS
4668 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4669 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4670 uint datalen = PKTLEN(dhd->pub.osh, txp);
4671 if (ifp != NULL) {
4672 if (success) {
4673 dhd->pub.tx_packets++;
4674 ifp->stats.tx_packets++;
4675 ifp->stats.tx_bytes += datalen;
4676 } else {
4677 ifp->stats.tx_dropped++;
4678 }
4679 }
4680 }
4681#endif
4682}
4683
4684static struct net_device_stats *
4685dhd_get_stats(struct net_device *net)
4686{
4687 dhd_info_t *dhd = DHD_DEV_INFO(net);
4688 dhd_if_t *ifp;
4689 int ifidx;
4690
4691 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4692
4693 ifidx = dhd_net2idx(dhd, net);
4694 if (ifidx == DHD_BAD_IF) {
4695 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4696
4697 memset(&net->stats, 0, sizeof(net->stats));
4698 return &net->stats;
4699 }
4700
4701 ifp = dhd->iflist[ifidx];
4702 ASSERT(dhd && ifp);
4703
4704 if (dhd->pub.up) {
4705 /* Use the protocol to get dongle stats */
4706 dhd_prot_dstats(&dhd->pub);
4707 }
4708 return &ifp->stats;
4709}
4710
4711static int
4712dhd_watchdog_thread(void *data)
4713{
4714 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4715 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4716 /* This thread doesn't need any user-level access,
4717 * so get rid of all our resources
4718 */
4719 if (dhd_watchdog_prio > 0) {
4720 struct sched_param param;
4721 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
4722 dhd_watchdog_prio:(MAX_RT_PRIO-1);
4723 setScheduler(current, SCHED_FIFO, &param);
4724 }
4725
4726 while (1) {
4727 if (down_interruptible (&tsk->sema) == 0) {
4728 unsigned long flags;
4729 unsigned long jiffies_at_start = jiffies;
4730 unsigned long time_lapse;
4731
4732 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4733 SMP_RD_BARRIER_DEPENDS();
4734 if (tsk->terminated) {
4735 break;
4736 }
4737
4738 if (dhd->pub.dongle_reset == FALSE) {
4739 DHD_TIMER(("%s:\n", __FUNCTION__));
4740 dhd_bus_watchdog(&dhd->pub);
4741
4742 DHD_GENERAL_LOCK(&dhd->pub, flags);
4743 /* Count the tick for reference */
4744 dhd->pub.tickcnt++;
4745#ifdef DHD_L2_FILTER
4746 dhd_l2_filter_watchdog(&dhd->pub);
4747#endif /* DHD_L2_FILTER */
4748 time_lapse = jiffies - jiffies_at_start;
4749
4750 /* Reschedule the watchdog */
4751 if (dhd->wd_timer_valid) {
4752 mod_timer(&dhd->timer,
4753 jiffies +
4754 msecs_to_jiffies(dhd_watchdog_ms) -
4755 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
4756 }
4757 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4758 }
4759 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4760 } else {
4761 break;
4762 }
4763 }
4764
4765 complete_and_exit(&tsk->completed, 0);
4766}
4767
4768static void dhd_watchdog(ulong data)
4769{
4770 dhd_info_t *dhd = (dhd_info_t *)data;
4771 unsigned long flags;
4772
4773 if (dhd->pub.dongle_reset) {
4774 return;
4775 }
4776
4777 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4778 DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
4779 return;
4780 }
4781
4782 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4783 up(&dhd->thr_wdt_ctl.sema);
4784 return;
4785 }
4786
4787 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4788 /* Call the bus module watchdog */
4789 dhd_bus_watchdog(&dhd->pub);
4790 DHD_GENERAL_LOCK(&dhd->pub, flags);
4791 /* Count the tick for reference */
4792 dhd->pub.tickcnt++;
4793
4794#ifdef DHD_L2_FILTER
4795 dhd_l2_filter_watchdog(&dhd->pub);
4796#endif /* DHD_L2_FILTER */
4797 /* Reschedule the watchdog */
4798 if (dhd->wd_timer_valid)
4799 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
4800 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4801 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4802}
4803
4804#ifdef DHD_PCIE_RUNTIMEPM
4805static int
4806dhd_rpm_state_thread(void *data)
4807{
4808 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4809 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4810
4811 while (1) {
4812 if (down_interruptible (&tsk->sema) == 0) {
4813 unsigned long flags;
4814 unsigned long jiffies_at_start = jiffies;
4815 unsigned long time_lapse;
4816
4817 SMP_RD_BARRIER_DEPENDS();
4818 if (tsk->terminated) {
4819 break;
4820 }
4821
4822 if (dhd->pub.dongle_reset == FALSE) {
4823 DHD_TIMER(("%s:\n", __FUNCTION__));
4824 if (dhd->pub.up) {
4825 dhd_runtimepm_state(&dhd->pub);
4826 }
4827
4828 DHD_GENERAL_LOCK(&dhd->pub, flags);
4829 time_lapse = jiffies - jiffies_at_start;
4830
4831 /* Reschedule the watchdog */
4832 if (dhd->rpm_timer_valid) {
4833 mod_timer(&dhd->rpm_timer,
4834 jiffies +
4835 msecs_to_jiffies(dhd_runtimepm_ms) -
4836 min(msecs_to_jiffies(dhd_runtimepm_ms),
4837 time_lapse));
4838 }
4839 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4840 }
4841 } else {
4842 break;
4843 }
4844 }
4845
4846 complete_and_exit(&tsk->completed, 0);
4847}
4848
4849static void dhd_runtimepm(ulong data)
4850{
4851 dhd_info_t *dhd = (dhd_info_t *)data;
4852
4853 if (dhd->pub.dongle_reset) {
4854 return;
4855 }
4856
4857 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
4858 up(&dhd->thr_rpm_ctl.sema);
4859 return;
4860 }
4861}
4862
4863void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
4864{
4865 dhd_os_runtimepm_timer(dhdp, 0);
4866 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
4867 DHD_ERROR(("DHD Runtime PM Disabled \n"));
4868}
4869
4870void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
4871{
4872 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
4873 DHD_ERROR(("DHD Runtime PM Enabled \n"));
4874}
4875
4876#endif /* DHD_PCIE_RUNTIMEPM */
4877
4878
4879#ifdef ENABLE_ADAPTIVE_SCHED
4880static void
4881dhd_sched_policy(int prio)
4882{
4883 struct sched_param param;
4884 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
4885 param.sched_priority = 0;
4886 setScheduler(current, SCHED_NORMAL, &param);
4887 } else {
4888 if (get_scheduler_policy(current) != SCHED_FIFO) {
4889 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
4890 setScheduler(current, SCHED_FIFO, &param);
4891 }
4892 }
4893}
4894#endif /* ENABLE_ADAPTIVE_SCHED */
4895#ifdef DEBUG_CPU_FREQ
4896static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
4897{
4898 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
4899 struct cpufreq_freqs *freq = data;
4900 if (dhd) {
4901 if (!dhd->new_freq)
4902 goto exit;
4903 if (val == CPUFREQ_POSTCHANGE) {
4904 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
4905 freq->new, freq->cpu));
4906 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
4907 }
4908 }
4909exit:
4910 return 0;
4911}
4912#endif /* DEBUG_CPU_FREQ */
4913static int
4914dhd_dpc_thread(void *data)
4915{
4916 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4917 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4918
4919 /* This thread doesn't need any user-level access,
4920 * so get rid of all our resources
4921 */
4922 if (dhd_dpc_prio > 0)
4923 {
4924 struct sched_param param;
4925 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
4926 setScheduler(current, SCHED_FIFO, &param);
4927 }
4928
4929#ifdef CUSTOM_DPC_CPUCORE
4930 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
4931#else
4932 if (dhd->pub.conf->dpc_cpucore >= 0) {
4933 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
4934 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
4935 }
4936#endif
4937#ifdef CUSTOM_SET_CPUCORE
4938 dhd->pub.current_dpc = current;
4939#endif /* CUSTOM_SET_CPUCORE */
4940 /* Run until signal received */
4941 while (1) {
4942 if (!binary_sema_down(tsk)) {
4943#ifdef ENABLE_ADAPTIVE_SCHED
4944 dhd_sched_policy(dhd_dpc_prio);
4945#endif /* ENABLE_ADAPTIVE_SCHED */
4946 SMP_RD_BARRIER_DEPENDS();
4947 if (tsk->terminated) {
4948 break;
4949 }
4950
4951 /* Call bus dpc unless it indicated down (then clean stop) */
4952 if (dhd->pub.busstate != DHD_BUS_DOWN) {
4953#ifdef DEBUG_DPC_THREAD_WATCHDOG
4954 int resched_cnt = 0;
4955#endif /* DEBUG_DPC_THREAD_WATCHDOG */
4956 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
4957 while (dhd_bus_dpc(dhd->pub.bus)) {
4958 /* process all data */
4959#ifdef DEBUG_DPC_THREAD_WATCHDOG
4960 resched_cnt++;
4961 if (resched_cnt > MAX_RESCHED_CNT) {
4962 DHD_INFO(("%s Calling msleep to"
4963 "let other processes run. \n",
4964 __FUNCTION__));
4965 dhd->pub.dhd_bug_on = true;
4966 resched_cnt = 0;
4967 OSL_SLEEP(1);
4968 }
4969#endif /* DEBUG_DPC_THREAD_WATCHDOG */
4970 }
4971 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
4972 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4973 } else {
4974 if (dhd->pub.up)
4975 dhd_bus_stop(dhd->pub.bus, TRUE);
4976 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4977 }
4978 } else {
4979 break;
4980 }
4981 }
4982 complete_and_exit(&tsk->completed, 0);
4983}
4984
4985static int
4986dhd_rxf_thread(void *data)
4987{
4988 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4989 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4990#if defined(WAIT_DEQUEUE)
4991#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
4992 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
4993#endif
4994 dhd_pub_t *pub = &dhd->pub;
4995
4996 /* This thread doesn't need any user-level access,
4997 * so get rid of all our resources
4998 */
4999 if (dhd_rxf_prio > 0)
5000 {
5001 struct sched_param param;
5002 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5003 setScheduler(current, SCHED_FIFO, &param);
5004 }
5005
5006 DAEMONIZE("dhd_rxf");
5007 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
5008
5009 /* signal: thread has started */
5010 complete(&tsk->completed);
5011#ifdef CUSTOM_SET_CPUCORE
5012 dhd->pub.current_rxf = current;
5013#endif /* CUSTOM_SET_CPUCORE */
5014 /* Run until signal received */
5015 while (1) {
5016 if (down_interruptible(&tsk->sema) == 0) {
5017 void *skb;
5018#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
5019 ulong flags;
5020#endif
5021#ifdef ENABLE_ADAPTIVE_SCHED
5022 dhd_sched_policy(dhd_rxf_prio);
5023#endif /* ENABLE_ADAPTIVE_SCHED */
5024
5025 SMP_RD_BARRIER_DEPENDS();
5026
5027 if (tsk->terminated) {
5028 break;
5029 }
5030 skb = dhd_rxf_dequeue(pub);
5031
5032 if (skb == NULL) {
5033 continue;
5034 }
5035 while (skb) {
5036 void *skbnext = PKTNEXT(pub->osh, skb);
5037 PKTSETNEXT(pub->osh, skb, NULL);
5038 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5039 __FUNCTION__, __LINE__);
5040#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5041 netif_rx_ni(skb);
5042#else
5043 netif_rx(skb);
5044 local_irq_save(flags);
5045 RAISE_RX_SOFTIRQ();
5046 local_irq_restore(flags);
5047
5048#endif
5049 skb = skbnext;
5050 }
5051#if defined(WAIT_DEQUEUE)
5052 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5053 OSL_SLEEP(1);
5054 watchdogTime = OSL_SYSUPTIME();
5055 }
5056#endif
5057
5058 DHD_OS_WAKE_UNLOCK(pub);
5059 } else {
5060 break;
5061 }
5062 }
5063 complete_and_exit(&tsk->completed, 0);
5064}
5065
5066#ifdef BCMPCIE
5067void dhd_dpc_enable(dhd_pub_t *dhdp)
5068{
5069 dhd_info_t *dhd;
5070
5071 if (!dhdp || !dhdp->info)
5072 return;
5073 dhd = dhdp->info;
5074
5075#ifdef DHD_LB
5076#ifdef DHD_LB_RXP
5077 __skb_queue_head_init(&dhd->rx_pend_queue);
5078#endif /* DHD_LB_RXP */
5079#ifdef DHD_LB_TXC
5080 if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
5081 tasklet_enable(&dhd->tx_compl_tasklet);
5082#endif /* DHD_LB_TXC */
5083#ifdef DHD_LB_RXC
5084 if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
5085 tasklet_enable(&dhd->rx_compl_tasklet);
5086#endif /* DHD_LB_RXC */
5087#endif /* DHD_LB */
5088 if (atomic_read(&dhd->tasklet.count) == 1)
5089 tasklet_enable(&dhd->tasklet);
5090}
5091#endif /* BCMPCIE */
5092
5093
5094#ifdef BCMPCIE
5095void
5096dhd_dpc_kill(dhd_pub_t *dhdp)
5097{
5098 dhd_info_t *dhd;
5099
5100 if (!dhdp) {
5101 return;
5102 }
5103
5104 dhd = dhdp->info;
5105
5106 if (!dhd) {
5107 return;
5108 }
5109
5110 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5111 tasklet_disable(&dhd->tasklet);
5112 tasklet_kill(&dhd->tasklet);
5113 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5114 }
5115#if defined(DHD_LB)
5116#ifdef DHD_LB_RXP
5117 __skb_queue_purge(&dhd->rx_pend_queue);
5118#endif /* DHD_LB_RXP */
5119 /* Kill the Load Balancing Tasklets */
5120#if defined(DHD_LB_TXC)
5121 tasklet_disable(&dhd->tx_compl_tasklet);
5122 tasklet_kill(&dhd->tx_compl_tasklet);
5123#endif /* DHD_LB_TXC */
5124#if defined(DHD_LB_RXC)
5125 tasklet_disable(&dhd->rx_compl_tasklet);
5126 tasklet_kill(&dhd->rx_compl_tasklet);
5127#endif /* DHD_LB_RXC */
5128#endif /* DHD_LB */
5129}
5130#endif /* BCMPCIE */
5131
5132static void
5133dhd_dpc(ulong data)
5134{
5135 dhd_info_t *dhd;
5136
5137 dhd = (dhd_info_t *)data;
5138
5139 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5140 * down below , wake lock is set,
5141 * the tasklet is initialized in dhd_attach()
5142 */
5143 /* Call bus dpc unless it indicated down (then clean stop) */
5144 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5145 if (dhd_bus_dpc(dhd->pub.bus)) {
5146 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5147 tasklet_schedule(&dhd->tasklet);
5148 }
5149 } else {
5150 dhd_bus_stop(dhd->pub.bus, TRUE);
5151 }
5152}
5153
5154void
5155dhd_sched_dpc(dhd_pub_t *dhdp)
5156{
5157 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5158
5159 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5160 DHD_OS_WAKE_LOCK(dhdp);
5161 /* If the semaphore does not get up,
5162 * wake unlock should be done here
5163 */
5164 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5165 DHD_OS_WAKE_UNLOCK(dhdp);
5166 }
5167 return;
5168 } else {
5169 tasklet_schedule(&dhd->tasklet);
5170 }
5171}
5172
5173static void
5174dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5175{
5176 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5177#ifdef RXF_DEQUEUE_ON_BUSY
5178 int ret = BCME_OK;
5179 int retry = 2;
5180#endif /* RXF_DEQUEUE_ON_BUSY */
5181
5182 DHD_OS_WAKE_LOCK(dhdp);
5183
5184 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5185#ifdef RXF_DEQUEUE_ON_BUSY
5186 do {
5187 ret = dhd_rxf_enqueue(dhdp, skb);
5188 if (ret == BCME_OK || ret == BCME_ERROR)
5189 break;
5190 else
5191 OSL_SLEEP(50); /* waiting for dequeueing */
5192 } while (retry-- > 0);
5193
5194 if (retry <= 0 && ret == BCME_BUSY) {
5195 void *skbp = skb;
5196
5197 while (skbp) {
5198 void *skbnext = PKTNEXT(dhdp->osh, skbp);
5199 PKTSETNEXT(dhdp->osh, skbp, NULL);
5200 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5201 __FUNCTION__, __LINE__);
5202 netif_rx_ni(skbp);
5203 skbp = skbnext;
5204 }
5205 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
5206 } else {
5207 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5208 up(&dhd->thr_rxf_ctl.sema);
5209 }
5210 }
5211#else /* RXF_DEQUEUE_ON_BUSY */
5212 do {
5213 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5214 break;
5215 } while (1);
5216 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5217 up(&dhd->thr_rxf_ctl.sema);
5218 }
5219 return;
5220#endif /* RXF_DEQUEUE_ON_BUSY */
5221}
5222
5223#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5224#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5225
5226#ifdef TOE
5227/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5228static int
5229dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5230{
5231 wl_ioctl_t ioc;
5232 char buf[32];
5233 int ret;
5234
5235 memset(&ioc, 0, sizeof(ioc));
5236
5237 ioc.cmd = WLC_GET_VAR;
5238 ioc.buf = buf;
5239 ioc.len = (uint)sizeof(buf);
5240 ioc.set = FALSE;
5241
5242 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5243 buf[sizeof(buf) - 1] = '\0';
5244 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5245 /* Check for older dongle image that doesn't support toe_ol */
5246 if (ret == -EIO) {
5247 DHD_ERROR(("%s: toe not supported by device\n",
5248 dhd_ifname(&dhd->pub, ifidx)));
5249 return -EOPNOTSUPP;
5250 }
5251
5252 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5253 return ret;
5254 }
5255
5256 memcpy(toe_ol, buf, sizeof(uint32));
5257 return 0;
5258}
5259
5260/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5261static int
5262dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5263{
5264 wl_ioctl_t ioc;
5265 char buf[32];
5266 int toe, ret;
5267
5268 memset(&ioc, 0, sizeof(ioc));
5269
5270 ioc.cmd = WLC_SET_VAR;
5271 ioc.buf = buf;
5272 ioc.len = (uint)sizeof(buf);
5273 ioc.set = TRUE;
5274
5275 /* Set toe_ol as requested */
5276
5277 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5278 buf[sizeof(buf) - 1] = '\0';
5279 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
5280
5281 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5282 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5283 dhd_ifname(&dhd->pub, ifidx), ret));
5284 return ret;
5285 }
5286
5287 /* Enable toe globally only if any components are enabled. */
5288
5289 toe = (toe_ol != 0);
5290
5291 strcpy(buf, "toe");
5292 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
5293
5294 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5295 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5296 return ret;
5297 }
5298
5299 return 0;
5300}
5301#endif /* TOE */
5302
5303#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5304void dhd_set_scb_probe(dhd_pub_t *dhd)
5305{
5306 int ret = 0;
5307 wl_scb_probe_t scb_probe;
5308 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5309
5310 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
5311
5312 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5313 return;
5314 }
5315
5316 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
5317
5318 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
5319 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5320 }
5321
5322 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5323
5324 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5325
5326 bcm_mkiovar("scb_probe", (char *)&scb_probe,
5327 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
5328 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5329 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5330 return;
5331 }
5332}
5333#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5334
5335#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
5336static void
5337dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5338{
5339 dhd_info_t *dhd = DHD_DEV_INFO(net);
5340
5341 snprintf(info->driver, sizeof(info->driver), "wl");
5342 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5343}
5344
5345struct ethtool_ops dhd_ethtool_ops = {
5346 .get_drvinfo = dhd_ethtool_get_drvinfo
5347};
5348#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
5349
5350
5351#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5352static int
5353dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5354{
5355 struct ethtool_drvinfo info;
5356 char drvname[sizeof(info.driver)];
5357 uint32 cmd;
5358#ifdef TOE
5359 struct ethtool_value edata;
5360 uint32 toe_cmpnt, csum_dir;
5361 int ret;
5362#endif
5363
5364 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5365
5366 /* all ethtool calls start with a cmd word */
5367 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5368 return -EFAULT;
5369
5370 switch (cmd) {
5371 case ETHTOOL_GDRVINFO:
5372 /* Copy out any request driver name */
5373 if (copy_from_user(&info, uaddr, sizeof(info)))
5374 return -EFAULT;
5375 strncpy(drvname, info.driver, sizeof(info.driver));
5376 drvname[sizeof(info.driver)-1] = '\0';
5377
5378 /* clear struct for return */
5379 memset(&info, 0, sizeof(info));
5380 info.cmd = cmd;
5381
5382 /* if dhd requested, identify ourselves */
5383 if (strcmp(drvname, "?dhd") == 0) {
5384 snprintf(info.driver, sizeof(info.driver), "dhd");
5385 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5386 info.version[sizeof(info.version) - 1] = '\0';
5387 }
5388
5389 /* otherwise, require dongle to be up */
5390 else if (!dhd->pub.up) {
5391 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5392 return -ENODEV;
5393 }
5394
5395 /* finally, report dongle driver type */
5396 else if (dhd->pub.iswl)
5397 snprintf(info.driver, sizeof(info.driver), "wl");
5398 else
5399 snprintf(info.driver, sizeof(info.driver), "xx");
5400
5401 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5402 if (copy_to_user(uaddr, &info, sizeof(info)))
5403 return -EFAULT;
5404 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5405 (int)sizeof(drvname), drvname, info.driver));
5406 break;
5407
5408#ifdef TOE
5409 /* Get toe offload components from dongle */
5410 case ETHTOOL_GRXCSUM:
5411 case ETHTOOL_GTXCSUM:
5412 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5413 return ret;
5414
5415 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5416
5417 edata.cmd = cmd;
5418 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5419
5420 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5421 return -EFAULT;
5422 break;
5423
5424 /* Set toe offload components in dongle */
5425 case ETHTOOL_SRXCSUM:
5426 case ETHTOOL_STXCSUM:
5427 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5428 return -EFAULT;
5429
5430 /* Read the current settings, update and write back */
5431 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5432 return ret;
5433
5434 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5435
5436 if (edata.data != 0)
5437 toe_cmpnt |= csum_dir;
5438 else
5439 toe_cmpnt &= ~csum_dir;
5440
5441 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5442 return ret;
5443
5444 /* If setting TX checksum mode, tell Linux the new mode */
5445 if (cmd == ETHTOOL_STXCSUM) {
5446 if (edata.data)
5447 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5448 else
5449 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5450 }
5451
5452 break;
5453#endif /* TOE */
5454
5455 default:
5456 return -EOPNOTSUPP;
5457 }
5458
5459 return 0;
5460}
5461#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5462
5463static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5464{
5465 dhd_info_t *dhd;
5466
5467 if (!dhdp) {
5468 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5469 return FALSE;
5470 }
5471
5472 if (!dhdp->up)
5473 return FALSE;
5474
5475 dhd = (dhd_info_t *)dhdp->info;
5476#if !defined(BCMPCIE)
5477 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5478 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5479 return FALSE;
5480 }
5481#endif
5482
5483 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5484 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5485#ifdef BCMPCIE
5486 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
5487 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5488 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5489#else
5490 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5491 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5492#endif /* BCMPCIE */
5493 if (dhdp->hang_reason == 0) {
5494 if (dhdp->dongle_trap_occured) {
5495 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5496#ifdef BCMPCIE
5497 } else if (dhdp->d3ackcnt_timeout) {
5498 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
5499#endif /* BCMPCIE */
5500 } else {
5501 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
5502 }
5503 }
5504 net_os_send_hang_message(net);
5505 return TRUE;
5506 }
5507 return FALSE;
5508}
5509
5510int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
5511{
5512 int bcmerror = BCME_OK;
5513 int buflen = 0;
5514 struct net_device *net;
5515
5516 net = dhd_idx2net(pub, ifidx);
5517 if (!net) {
5518 bcmerror = BCME_BADARG;
5519 goto done;
5520 }
5521
5522 if (data_buf)
5523 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
5524
5525 /* check for local dhd ioctl and handle it */
5526 if (ioc->driver == DHD_IOCTL_MAGIC) {
5527 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
5528 if (bcmerror)
5529 pub->bcmerror = bcmerror;
5530 goto done;
5531 }
5532
5533 /* send to dongle (must be up, and wl). */
5534 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
5535 if (allow_delay_fwdl) {
5536 int ret = dhd_bus_start(pub);
5537 if (ret != 0) {
5538 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5539 bcmerror = BCME_DONGLE_DOWN;
5540 goto done;
5541 }
5542 } else {
5543 bcmerror = BCME_DONGLE_DOWN;
5544 goto done;
5545 }
5546 }
5547
5548 if (!pub->iswl) {
5549 bcmerror = BCME_DONGLE_DOWN;
5550 goto done;
5551 }
5552
5553 /*
5554 * Flush the TX queue if required for proper message serialization:
5555 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
5556 * prevent M4 encryption and
5557 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
5558 * prevent disassoc frame being sent before WPS-DONE frame.
5559 */
5560 if (ioc->cmd == WLC_SET_KEY ||
5561 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5562 strncmp("wsec_key", data_buf, 9) == 0) ||
5563 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5564 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
5565 ioc->cmd == WLC_DISASSOC)
5566 dhd_wait_pend8021x(net);
5567
5568#ifdef WLMEDIA_HTSF
5569 if (data_buf) {
5570 /* short cut wl ioctl calls here */
5571 if (strcmp("htsf", data_buf) == 0) {
5572 dhd_ioctl_htsf_get(dhd, 0);
5573 return BCME_OK;
5574 }
5575
5576 if (strcmp("htsflate", data_buf) == 0) {
5577 if (ioc->set) {
5578 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
5579 memset(&maxdelayts, 0, sizeof(tstamp_t));
5580 maxdelay = 0;
5581 tspktcnt = 0;
5582 maxdelaypktno = 0;
5583 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5584 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5585 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5586 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5587 } else {
5588 dhd_dump_latency();
5589 }
5590 return BCME_OK;
5591 }
5592 if (strcmp("htsfclear", data_buf) == 0) {
5593 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5594 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5595 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5596 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5597 htsf_seqnum = 0;
5598 return BCME_OK;
5599 }
5600 if (strcmp("htsfhis", data_buf) == 0) {
5601 dhd_dump_htsfhisto(&vi_d1, "H to D");
5602 dhd_dump_htsfhisto(&vi_d2, "D to D");
5603 dhd_dump_htsfhisto(&vi_d3, "D to H");
5604 dhd_dump_htsfhisto(&vi_d4, "H to H");
5605 return BCME_OK;
5606 }
5607 if (strcmp("tsport", data_buf) == 0) {
5608 if (ioc->set) {
5609 memcpy(&tsport, data_buf + 7, 4);
5610 } else {
5611 DHD_ERROR(("current timestamp port: %d \n", tsport));
5612 }
5613 return BCME_OK;
5614 }
5615 }
5616#endif /* WLMEDIA_HTSF */
5617
5618 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
5619 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
5620#ifdef BCM_FD_AGGR
5621 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5622#else
5623 bcmerror = BCME_UNSUPPORTED;
5624#endif
5625 goto done;
5626 }
5627
5628#ifdef DHD_DEBUG
5629 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
5630 if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
5631 /* Print IOVAR Information */
5632 DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
5633 __FUNCTION__, (char *)data_buf, ioc->set));
5634 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5635 prhex(NULL, data_buf + strlen(data_buf) + 1,
5636 buflen - strlen(data_buf) - 1);
5637 }
5638 } else {
5639 /* Print IOCTL Information */
5640 DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
5641 __FUNCTION__, ioc->cmd, ioc->set));
5642 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5643 prhex(NULL, data_buf, buflen);
5644 }
5645 }
5646 }
5647#endif /* DHD_DEBUG */
5648
5649 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5650
5651done:
5652 dhd_check_hang(net, pub, bcmerror);
5653
5654 return bcmerror;
5655}
5656
5657static int
5658dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
5659{
5660 dhd_info_t *dhd = DHD_DEV_INFO(net);
5661 dhd_ioctl_t ioc;
5662 int ifidx;
5663 int ret;
5664 void *local_buf = NULL;
5665 u16 buflen = 0;
5666
5667 DHD_OS_WAKE_LOCK(&dhd->pub);
5668 DHD_PERIM_LOCK(&dhd->pub);
5669
5670 /* Interface up check for built-in type */
5671 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
5672 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
5673 ret = BCME_NOTUP;
5674 goto exit;
5675 }
5676
5677 /* send to dongle only if we are not waiting for reload already */
5678 if (dhd->pub.hang_was_sent) {
5679 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
5680 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
5681 ret = BCME_DONGLE_DOWN;
5682 goto exit;
5683 }
5684
5685 ifidx = dhd_net2idx(dhd, net);
5686 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
5687
5688 if (ifidx == DHD_BAD_IF) {
5689 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
5690 ret = -1;
5691 goto exit;
5692 }
5693
5694#if defined(WL_WIRELESS_EXT)
5695 /* linux wireless extensions */
5696 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
5697 /* may recurse, do NOT lock */
5698 ret = wl_iw_ioctl(net, ifr, cmd);
5699 goto exit;
5700 }
5701#endif /* defined(WL_WIRELESS_EXT) */
5702
5703#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5704 if (cmd == SIOCETHTOOL) {
5705 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
5706 goto exit;
5707 }
5708#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5709
5710 if (cmd == SIOCDEVPRIVATE+1) {
5711 ret = wl_android_priv_cmd(net, ifr, cmd);
5712 dhd_check_hang(net, &dhd->pub, ret);
5713 goto exit;
5714 }
5715
5716 if (cmd != SIOCDEVPRIVATE) {
5717 ret = -EOPNOTSUPP;
5718 goto exit;
5719 }
5720
5721 memset(&ioc, 0, sizeof(ioc));
5722
5723#ifdef CONFIG_COMPAT
5724#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
5725 if (in_compat_syscall())
5726#else
5727 if (is_compat_task())
5728#endif
5729 {
5730 compat_wl_ioctl_t compat_ioc;
5731 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
5732 ret = BCME_BADADDR;
5733 goto done;
5734 }
5735 ioc.cmd = compat_ioc.cmd;
5736 ioc.buf = compat_ptr(compat_ioc.buf);
5737 ioc.len = compat_ioc.len;
5738 ioc.set = compat_ioc.set;
5739 ioc.used = compat_ioc.used;
5740 ioc.needed = compat_ioc.needed;
5741 /* To differentiate between wl and dhd read 4 more byes */
5742 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
5743 sizeof(uint)) != 0)) {
5744 ret = BCME_BADADDR;
5745 goto done;
5746 }
5747 } else
5748#endif /* CONFIG_COMPAT */
5749 {
5750 /* Copy the ioc control structure part of ioctl request */
5751 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5752 ret = BCME_BADADDR;
5753 goto done;
5754 }
5755
5756 /* To differentiate between wl and dhd read 4 more byes */
5757 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5758 sizeof(uint)) != 0)) {
5759 ret = BCME_BADADDR;
5760 goto done;
5761 }
5762 }
5763
5764 if (!capable(CAP_NET_ADMIN)) {
5765 ret = BCME_EPERM;
5766 goto done;
5767 }
5768
5769 if (ioc.len > 0) {
5770 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
5771 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
5772 ret = BCME_NOMEM;
5773 goto done;
5774 }
5775
5776 DHD_PERIM_UNLOCK(&dhd->pub);
5777 if (copy_from_user(local_buf, ioc.buf, buflen)) {
5778 DHD_PERIM_LOCK(&dhd->pub);
5779 ret = BCME_BADADDR;
5780 goto done;
5781 }
5782 DHD_PERIM_LOCK(&dhd->pub);
5783
5784 *(char *)(local_buf + buflen) = '\0';
5785 }
5786
5787 ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
5788
5789 if (!ret && buflen && local_buf && ioc.buf) {
5790 DHD_PERIM_UNLOCK(&dhd->pub);
5791 if (copy_to_user(ioc.buf, local_buf, buflen))
5792 ret = -EFAULT;
5793 DHD_PERIM_LOCK(&dhd->pub);
5794 }
5795
5796done:
5797 if (local_buf)
5798 MFREE(dhd->pub.osh, local_buf, buflen+1);
5799
5800exit:
5801 DHD_PERIM_UNLOCK(&dhd->pub);
5802 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5803
5804 return OSL_ERROR(ret);
5805}
5806
5807
5808#ifdef FIX_CPU_MIN_CLOCK
5809static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
5810{
5811 if (dhd) {
5812#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5813 mutex_init(&dhd->cpufreq_fix);
5814#endif
5815 dhd->cpufreq_fix_status = FALSE;
5816 }
5817 return 0;
5818}
5819
5820static void dhd_fix_cpu_freq(dhd_info_t *dhd)
5821{
5822#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5823 mutex_lock(&dhd->cpufreq_fix);
5824#endif
5825 if (dhd && !dhd->cpufreq_fix_status) {
5826 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
5827#ifdef FIX_BUS_MIN_CLOCK
5828 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
5829#endif /* FIX_BUS_MIN_CLOCK */
5830 DHD_ERROR(("pm_qos_add_requests called\n"));
5831
5832 dhd->cpufreq_fix_status = TRUE;
5833 }
5834#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5835 mutex_unlock(&dhd->cpufreq_fix);
5836#endif
5837}
5838
5839static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
5840{
5841#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5842 mutex_lock(&dhd ->cpufreq_fix);
5843#endif
5844 if (dhd && dhd->cpufreq_fix_status != TRUE) {
5845#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5846 mutex_unlock(&dhd->cpufreq_fix);
5847#endif
5848 return;
5849 }
5850
5851 pm_qos_remove_request(&dhd->dhd_cpu_qos);
5852#ifdef FIX_BUS_MIN_CLOCK
5853 pm_qos_remove_request(&dhd->dhd_bus_qos);
5854#endif /* FIX_BUS_MIN_CLOCK */
5855 DHD_ERROR(("pm_qos_add_requests called\n"));
5856
5857 dhd->cpufreq_fix_status = FALSE;
5858#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5859 mutex_unlock(&dhd->cpufreq_fix);
5860#endif
5861}
5862#endif /* FIX_CPU_MIN_CLOCK */
5863
5864#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
5865int dhd_deepsleep(dhd_info_t *dhd, int flag)
5866{
5867 char iovbuf[20];
5868 uint powervar = 0;
5869 dhd_pub_t *dhdp;
5870 int cnt = 0;
5871 int ret = 0;
5872
5873 dhdp = &dhd->pub;
5874
5875 switch (flag) {
5876 case 1 : /* Deepsleep on */
5877 DHD_ERROR(("dhd_deepsleep: ON\n"));
5878 /* give some time to sysioc_work before deepsleep */
5879 OSL_SLEEP(200);
5880#ifdef PKT_FILTER_SUPPORT
5881 /* disable pkt filter */
5882 dhd_enable_packet_filter(0, dhdp);
5883#endif /* PKT_FILTER_SUPPORT */
5884 /* Disable MPC */
5885 powervar = 0;
5886 memset(iovbuf, 0, sizeof(iovbuf));
5887 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5888 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5889
5890 /* Enable Deepsleep */
5891 powervar = 1;
5892 memset(iovbuf, 0, sizeof(iovbuf));
5893 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5894 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5895 break;
5896
5897 case 0: /* Deepsleep Off */
5898 DHD_ERROR(("dhd_deepsleep: OFF\n"));
5899
5900 /* Disable Deepsleep */
5901 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
5902 powervar = 0;
5903 memset(iovbuf, 0, sizeof(iovbuf));
5904 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5905 iovbuf, sizeof(iovbuf));
5906 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
5907 sizeof(iovbuf), TRUE, 0);
5908
5909 memset(iovbuf, 0, sizeof(iovbuf));
5910 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5911 iovbuf, sizeof(iovbuf));
5912 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
5913 sizeof(iovbuf), FALSE, 0)) < 0) {
5914 DHD_ERROR(("the error of dhd deepsleep status"
5915 " ret value :%d\n", ret));
5916 } else {
5917 if (!(*(int *)iovbuf)) {
5918 DHD_ERROR(("deepsleep mode is 0,"
5919 " count: %d\n", cnt));
5920 break;
5921 }
5922 }
5923 }
5924
5925 /* Enable MPC */
5926 powervar = 1;
5927 memset(iovbuf, 0, sizeof(iovbuf));
5928 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5929 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5930 break;
5931 }
5932
5933 return 0;
5934}
5935
5936static int
5937dhd_stop(struct net_device *net)
5938{
5939 int ifidx = 0;
5940 dhd_info_t *dhd = DHD_DEV_INFO(net);
5941 DHD_OS_WAKE_LOCK(&dhd->pub);
5942 DHD_PERIM_LOCK(&dhd->pub);
5943 printf("%s: Enter %p\n", __FUNCTION__, net);
5944 dhd->pub.rxcnt_timeout = 0;
5945 dhd->pub.txcnt_timeout = 0;
5946
5947#ifdef BCMPCIE
5948 dhd->pub.d3ackcnt_timeout = 0;
5949#endif /* BCMPCIE */
5950
5951 if (dhd->pub.up == 0) {
5952 goto exit;
5953 }
5954
5955 dhd_if_flush_sta(DHD_DEV_IFP(net));
5956
5957 /* Disable Runtime PM before interface down */
5958 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5959
5960#ifdef FIX_CPU_MIN_CLOCK
5961 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
5962 dhd_rollback_cpu_freq(dhd);
5963#endif /* FIX_CPU_MIN_CLOCK */
5964
5965 ifidx = dhd_net2idx(dhd, net);
5966 BCM_REFERENCE(ifidx);
5967
5968 /* Set state and stop OS transmissions */
5969 netif_stop_queue(net);
5970 dhd->pub.up = 0;
5971
5972#ifdef WL_CFG80211
5973 if (ifidx == 0) {
5974 dhd_if_t *ifp;
5975 wl_cfg80211_down(NULL);
5976
5977 ifp = dhd->iflist[0];
5978 ASSERT(ifp && ifp->net);
5979 /*
5980 * For CFG80211: Clean up all the left over virtual interfaces
5981 * when the primary Interface is brought down. [ifconfig wlan0 down]
5982 */
5983 if (!dhd_download_fw_on_driverload) {
5984 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
5985 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5986 int i;
5987
5988#ifdef WL_CFG80211_P2P_DEV_IF
5989 wl_cfg80211_del_p2p_wdev();
5990#endif /* WL_CFG80211_P2P_DEV_IF */
5991
5992 dhd_net_if_lock_local(dhd);
5993 for (i = 1; i < DHD_MAX_IFS; i++)
5994 dhd_remove_if(&dhd->pub, i, FALSE);
5995
5996 if (ifp && ifp->net) {
5997 dhd_if_del_sta_list(ifp);
5998 }
5999
6000#ifdef ARP_OFFLOAD_SUPPORT
6001 if (dhd_inetaddr_notifier_registered) {
6002 dhd_inetaddr_notifier_registered = FALSE;
6003 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6004 }
6005#endif /* ARP_OFFLOAD_SUPPORT */
6006#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6007 if (dhd_inet6addr_notifier_registered) {
6008 dhd_inet6addr_notifier_registered = FALSE;
6009 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6010 }
6011#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6012 dhd_net_if_unlock_local(dhd);
6013 }
6014 cancel_work_sync(dhd->dhd_deferred_wq);
6015#if defined(DHD_LB) && defined(DHD_LB_RXP)
6016 __skb_queue_purge(&dhd->rx_pend_queue);
6017#endif /* DHD_LB && DHD_LB_RXP */
6018 }
6019
6020#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6021 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6022#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6023#if defined(DHD_LB) && defined(DHD_LB_RXP)
6024 if (ifp->net == dhd->rx_napi_netdev) {
6025 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6026 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6027 skb_queue_purge(&dhd->rx_napi_queue);
6028 napi_disable(&dhd->rx_napi_struct);
6029 netif_napi_del(&dhd->rx_napi_struct);
6030 dhd->rx_napi_netdev = NULL;
6031 }
6032#endif /* DHD_LB && DHD_LB_RXP */
6033
6034 }
6035#endif /* WL_CFG80211 */
6036
6037#ifdef PROP_TXSTATUS
6038 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6039#endif
6040 /* Stop the protocol module */
6041 dhd_prot_stop(&dhd->pub);
6042
6043 OLD_MOD_DEC_USE_COUNT;
6044exit:
6045 if (ifidx == 0 && !dhd_download_fw_on_driverload)
6046 wl_android_wifi_off(net, TRUE);
6047 else {
6048 if (dhd->pub.conf->deepsleep)
6049 dhd_deepsleep(dhd, 1);
6050 }
6051 dhd->pub.hang_was_sent = 0;
6052
6053 /* Clear country spec for for built-in type driver */
6054 if (!dhd_download_fw_on_driverload) {
6055 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6056 dhd->pub.dhd_cspec.rev = 0;
6057 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6058 }
6059
6060#ifdef BCMDBGFS
6061 dhd_dbg_remove();
6062#endif
6063
6064 DHD_PERIM_UNLOCK(&dhd->pub);
6065 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6066
6067 /* Destroy wakelock */
6068 if (!dhd_download_fw_on_driverload &&
6069 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6070 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6071 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6072 }
6073 printf("%s: Exit\n", __FUNCTION__);
6074
6075 return 0;
6076}
6077
6078#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
6079extern bool g_first_broadcast_scan;
6080#endif
6081
6082#ifdef WL11U
6083static int dhd_interworking_enable(dhd_pub_t *dhd)
6084{
6085 char iovbuf[WLC_IOCTL_SMLEN];
6086 uint32 enable = true;
6087 int ret = BCME_OK;
6088
6089 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
6090 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6091 if (ret < 0) {
6092 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6093 }
6094
6095 if (ret == BCME_OK) {
6096 /* basic capabilities for HS20 REL2 */
6097 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
6098 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
6099 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6100 if (ret < 0) {
6101 DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
6102 }
6103 }
6104
6105 return ret;
6106}
6107#endif /* WL11u */
6108
6109static int
6110dhd_open(struct net_device *net)
6111{
6112 dhd_info_t *dhd = DHD_DEV_INFO(net);
6113#ifdef TOE
6114 uint32 toe_ol;
6115#endif
6116#ifdef BCM_FD_AGGR
6117 char iovbuf[WLC_IOCTL_SMLEN];
6118 dbus_config_t config;
6119 uint32 agglimit = 0;
6120 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
6121#endif /* BCM_FD_AGGR */
6122 int ifidx;
6123 int32 ret = 0;
6124
6125 if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
6126 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6127 return -1;
6128 }
6129
6130 printf("%s: Enter %p\n", __FUNCTION__, net);
6131#if defined(MULTIPLE_SUPPLICANT)
6132#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6133 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6134 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
6135 }
6136 mutex_lock(&_dhd_sdio_mutex_lock_);
6137#endif
6138#endif /* MULTIPLE_SUPPLICANT */
6139 /* Init wakelock */
6140 if (!dhd_download_fw_on_driverload &&
6141 !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6142 DHD_OS_WAKE_LOCK_INIT(dhd);
6143 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6144 }
6145
6146#ifdef PREVENT_REOPEN_DURING_HANG
6147 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6148 if (dhd->pub.hang_was_sent == 1) {
6149 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6150 /* Force to bring down WLAN interface in case dhd_stop() is not called
6151 * from the upper layer when HANG event is triggered.
6152 */
6153 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6154 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6155 dhd_stop(net);
6156 } else {
6157 return -1;
6158 }
6159 }
6160#endif /* PREVENT_REOPEN_DURING_HANG */
6161
6162
6163 DHD_OS_WAKE_LOCK(&dhd->pub);
6164 DHD_PERIM_LOCK(&dhd->pub);
6165 dhd->pub.dongle_trap_occured = 0;
6166 dhd->pub.hang_was_sent = 0;
6167 dhd->pub.hang_reason = 0;
6168#ifdef DHD_LOSSLESS_ROAMING
6169 dhd->pub.dequeue_prec_map = ALLPRIO;
6170#endif
6171#if 0
6172 /*
6173 * Force start if ifconfig_up gets called before START command
6174 * We keep WEXT's wl_control_wl_start to provide backward compatibility
6175 * This should be removed in the future
6176 */
6177 ret = wl_control_wl_start(net);
6178 if (ret != 0) {
6179 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6180 ret = -1;
6181 goto exit;
6182 }
6183#endif
6184
6185 ifidx = dhd_net2idx(dhd, net);
6186 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6187
6188 if (ifidx < 0) {
6189 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6190 ret = -1;
6191 goto exit;
6192 }
6193
6194 if (!dhd->iflist[ifidx]) {
6195 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6196 ret = -1;
6197 goto exit;
6198 }
6199
6200 if (ifidx == 0) {
6201 atomic_set(&dhd->pend_8021x_cnt, 0);
6202 if (!dhd_download_fw_on_driverload) {
6203 DHD_ERROR(("\n%s\n", dhd_version));
6204#if defined(USE_INITIAL_SHORT_DWELL_TIME)
6205 g_first_broadcast_scan = TRUE;
6206#endif
6207 ret = wl_android_wifi_on(net);
6208 if (ret != 0) {
6209 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
6210 __FUNCTION__, ret));
6211 ret = -1;
6212 goto exit;
6213 }
6214 }
6215#ifdef FIX_CPU_MIN_CLOCK
6216 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
6217 dhd_init_cpufreq_fix(dhd);
6218 dhd_fix_cpu_freq(dhd);
6219 }
6220#endif /* FIX_CPU_MIN_CLOCK */
6221
6222 if (dhd->pub.busstate != DHD_BUS_DATA) {
6223
6224 /* try to bring up bus */
6225 DHD_PERIM_UNLOCK(&dhd->pub);
6226 ret = dhd_bus_start(&dhd->pub);
6227 DHD_PERIM_LOCK(&dhd->pub);
6228 if (ret) {
6229 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6230 ret = -1;
6231 goto exit;
6232 }
6233
6234 }
6235 if (dhd_download_fw_on_driverload) {
6236 if (dhd->pub.conf->deepsleep)
6237 dhd_deepsleep(dhd, 0);
6238 }
6239
6240#ifdef BCM_FD_AGGR
6241 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
6242
6243
6244 memset(iovbuf, 0, sizeof(iovbuf));
6245 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
6246 iovbuf, sizeof(iovbuf));
6247
6248 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
6249 agglimit = *(uint32 *)iovbuf;
6250 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
6251 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
6252 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
6253 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
6254 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
6255 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
6256 }
6257 } else {
6258 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
6259 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
6260 }
6261
6262 /* Set aggregation for TX */
6263 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
6264 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
6265
6266 /* Set aggregation for RX */
6267 memset(iovbuf, 0, sizeof(iovbuf));
6268 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
6269 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
6270 dhd->pub.info->fdaggr = 0;
6271 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
6272 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
6273 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
6274 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
6275 } else {
6276 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
6277 }
6278#endif /* BCM_FD_AGGR */
6279
6280 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
6281 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6282
6283#ifdef TOE
6284 /* Get current TOE mode from dongle */
6285 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
6286 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
6287 } else {
6288 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
6289 }
6290#endif /* TOE */
6291
6292#if defined(WL_CFG80211)
6293 if (unlikely(wl_cfg80211_up(NULL))) {
6294 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
6295 ret = -1;
6296 goto exit;
6297 }
6298 if (!dhd_download_fw_on_driverload) {
6299#ifdef ARP_OFFLOAD_SUPPORT
6300 dhd->pend_ipaddr = 0;
6301 if (!dhd_inetaddr_notifier_registered) {
6302 dhd_inetaddr_notifier_registered = TRUE;
6303 register_inetaddr_notifier(&dhd_inetaddr_notifier);
6304 }
6305#endif /* ARP_OFFLOAD_SUPPORT */
6306#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6307 if (!dhd_inet6addr_notifier_registered) {
6308 dhd_inet6addr_notifier_registered = TRUE;
6309 register_inet6addr_notifier(&dhd_inet6addr_notifier);
6310 }
6311#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6312#ifdef DHD_LB
6313 DHD_LB_STATS_INIT(&dhd->pub);
6314#ifdef DHD_LB_RXP
6315 __skb_queue_head_init(&dhd->rx_pend_queue);
6316#endif /* DHD_LB_RXP */
6317#endif /* DHD_LB */
6318 }
6319
6320#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6321#if defined(SET_RPS_CPUS)
6322 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6323#else
6324 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
6325#endif
6326#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6327#if defined(DHD_LB) && defined(DHD_LB_RXP)
6328 if (dhd->rx_napi_netdev == NULL) {
6329 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
6330 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
6331 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
6332 dhd_napi_poll, dhd_napi_weight);
6333 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
6334 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6335 napi_enable(&dhd->rx_napi_struct);
6336 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
6337 skb_queue_head_init(&dhd->rx_napi_queue);
6338 }
6339#endif /* DHD_LB && DHD_LB_RXP */
6340#if defined(NUM_SCB_MAX_PROBE)
6341 dhd_set_scb_probe(&dhd->pub);
6342#endif /* NUM_SCB_MAX_PROBE */
6343#endif /* WL_CFG80211 */
6344 }
6345
6346 /* Allow transmit calls */
6347 netif_start_queue(net);
6348 dhd->pub.up = 1;
6349
6350 OLD_MOD_INC_USE_COUNT;
6351
6352#ifdef BCMDBGFS
6353 dhd_dbg_init(&dhd->pub);
6354#endif
6355
6356exit:
6357 if (ret) {
6358 dhd_stop(net);
6359 }
6360
6361 DHD_PERIM_UNLOCK(&dhd->pub);
6362 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6363
6364#if defined(MULTIPLE_SUPPLICANT)
6365#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6366 mutex_unlock(&_dhd_sdio_mutex_lock_);
6367#endif
6368#endif /* MULTIPLE_SUPPLICANT */
6369
6370 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
6371 return ret;
6372}
6373
6374int dhd_do_driver_init(struct net_device *net)
6375{
6376 dhd_info_t *dhd = NULL;
6377
6378 if (!net) {
6379 DHD_ERROR(("Primary Interface not initialized \n"));
6380 return -EINVAL;
6381 }
6382
6383#ifdef MULTIPLE_SUPPLICANT
6384#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
6385 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6386 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
6387 return 0;
6388 }
6389#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
6390#endif /* MULTIPLE_SUPPLICANT */
6391
6392 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
6393 dhd = DHD_DEV_INFO(net);
6394
6395 /* If driver is already initialized, do nothing
6396 */
6397 if (dhd->pub.busstate == DHD_BUS_DATA) {
6398 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
6399 return 0;
6400 }
6401
6402 if (dhd_open(net) < 0) {
6403 DHD_ERROR(("Driver Init Failed \n"));
6404 return -1;
6405 }
6406
6407 return 0;
6408}
6409
6410int
6411dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6412{
6413
6414#ifdef WL_CFG80211
6415 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6416 return BCME_OK;
6417#endif
6418
6419 /* handle IF event caused by wl commands, SoftAP, WEXT and
6420 * anything else. This has to be done asynchronously otherwise
6421 * DPC will be blocked (and iovars will timeout as DPC has no chance
6422 * to read the response back)
6423 */
6424 if (ifevent->ifidx > 0) {
6425 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6426 if (if_event == NULL) {
6427 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
6428 MALLOCED(dhdinfo->pub.osh)));
6429 return BCME_NOMEM;
6430 }
6431
6432 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6433 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6434 strncpy(if_event->name, name, IFNAMSIZ);
6435 if_event->name[IFNAMSIZ - 1] = '\0';
6436 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
6437 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
6438 }
6439
6440 return BCME_OK;
6441}
6442
6443int
6444dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6445{
6446 dhd_if_event_t *if_event;
6447
6448#ifdef WL_CFG80211
6449 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6450 return BCME_OK;
6451#endif /* WL_CFG80211 */
6452
6453 /* handle IF event caused by wl commands, SoftAP, WEXT and
6454 * anything else
6455 */
6456 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6457 if (if_event == NULL) {
6458 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
6459 MALLOCED(dhdinfo->pub.osh)));
6460 return BCME_NOMEM;
6461 }
6462 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6463 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6464 strncpy(if_event->name, name, IFNAMSIZ);
6465 if_event->name[IFNAMSIZ - 1] = '\0';
6466 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
6467 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
6468
6469 return BCME_OK;
6470}
6471
6472/* unregister and free the existing net_device interface (if any) in iflist and
6473 * allocate a new one. the slot is reused. this function does NOT register the
6474 * new interface to linux kernel. dhd_register_if does the job
6475 */
6476struct net_device*
6477dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
6478 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
6479{
6480 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6481 dhd_if_t *ifp;
6482
6483 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
6484 ifp = dhdinfo->iflist[ifidx];
6485
6486 if (ifp != NULL) {
6487 if (ifp->net != NULL) {
6488 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
6489
6490 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
6491
6492 /* in unregister_netdev case, the interface gets freed by net->destructor
6493 * (which is set to free_netdev)
6494 */
6495 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6496 free_netdev(ifp->net);
6497 } else {
6498 netif_stop_queue(ifp->net);
6499 if (need_rtnl_lock)
6500 unregister_netdev(ifp->net);
6501 else
6502 unregister_netdevice(ifp->net);
6503 }
6504 ifp->net = NULL;
6505 }
6506 } else {
6507 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
6508 if (ifp == NULL) {
6509 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
6510 return NULL;
6511 }
6512 }
6513
6514 memset(ifp, 0, sizeof(dhd_if_t));
6515 ifp->info = dhdinfo;
6516 ifp->idx = ifidx;
6517 ifp->bssidx = bssidx;
6518 if (mac != NULL)
6519 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
6520
6521 /* Allocate etherdev, including space for private structure */
6522 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
6523 if (ifp->net == NULL) {
6524 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
6525 goto fail;
6526 }
6527
6528 /* Setup the dhd interface's netdevice private structure. */
6529 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
6530
6531 if (name && name[0]) {
6532 strncpy(ifp->net->name, name, IFNAMSIZ);
6533 ifp->net->name[IFNAMSIZ - 1] = '\0';
6534 }
6535
6536#ifdef WL_CFG80211
6537 if (ifidx == 0)
6538 ifp->net->destructor = free_netdev;
6539 else
6540 ifp->net->destructor = dhd_netdev_free;
6541#else
6542 ifp->net->destructor = free_netdev;
6543#endif /* WL_CFG80211 */
6544 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
6545 ifp->name[IFNAMSIZ - 1] = '\0';
6546 dhdinfo->iflist[ifidx] = ifp;
6547
6548/* initialize the dongle provided if name */
6549 if (dngl_name)
6550 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
6551 else
6552 strncpy(ifp->dngl_name, name, IFNAMSIZ);
6553
6554#ifdef PCIE_FULL_DONGLE
6555 /* Initialize STA info list */
6556 INIT_LIST_HEAD(&ifp->sta_list);
6557 DHD_IF_STA_LIST_LOCK_INIT(ifp);
6558#endif /* PCIE_FULL_DONGLE */
6559
6560#ifdef DHD_L2_FILTER
6561 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
6562 ifp->parp_allnode = TRUE;
6563#endif
6564 return ifp->net;
6565
6566fail:
6567
6568 if (ifp != NULL) {
6569 if (ifp->net != NULL) {
6570 dhd_dev_priv_clear(ifp->net);
6571 free_netdev(ifp->net);
6572 ifp->net = NULL;
6573 }
6574 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6575 ifp = NULL;
6576 }
6577
6578 dhdinfo->iflist[ifidx] = NULL;
6579 return NULL;
6580}
6581
6582/* unregister and free the the net_device interface associated with the indexed
6583 * slot, also free the slot memory and set the slot pointer to NULL
6584 */
6585int
6586dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
6587{
6588 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6589 dhd_if_t *ifp;
6590
6591 ifp = dhdinfo->iflist[ifidx];
6592
6593 if (ifp != NULL) {
6594 if (ifp->net != NULL) {
6595 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
6596
6597 /* in unregister_netdev case, the interface gets freed by net->destructor
6598 * (which is set to free_netdev)
6599 */
6600 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6601 free_netdev(ifp->net);
6602 } else {
6603 netif_tx_disable(ifp->net);
6604
6605
6606
6607#if defined(SET_RPS_CPUS)
6608 custom_rps_map_clear(ifp->net->_rx);
6609#endif /* SET_RPS_CPUS */
6610#if defined(SET_RPS_CPUS)
6611#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
6612 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
6613#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
6614#endif
6615 if (need_rtnl_lock)
6616 unregister_netdev(ifp->net);
6617 else
6618 unregister_netdevice(ifp->net);
6619 }
6620 ifp->net = NULL;
6621 dhdinfo->iflist[ifidx] = NULL;
6622 }
6623#ifdef DHD_WMF
6624 dhd_wmf_cleanup(dhdpub, ifidx);
6625#endif /* DHD_WMF */
6626#ifdef DHD_L2_FILTER
6627 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
6628 NULL, FALSE, dhdpub->tickcnt);
6629 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
6630 ifp->phnd_arp_table = NULL;
6631#endif /* DHD_L2_FILTER */
6632
6633 dhd_if_del_sta_list(ifp);
6634
6635 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6636
6637 }
6638
6639 return BCME_OK;
6640}
6641
6642#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6643static struct net_device_ops dhd_ops_pri = {
6644 .ndo_open = dhd_open,
6645 .ndo_stop = dhd_stop,
6646 .ndo_get_stats = dhd_get_stats,
6647 .ndo_do_ioctl = dhd_ioctl_entry,
6648 .ndo_start_xmit = dhd_start_xmit,
6649 .ndo_set_mac_address = dhd_set_mac_address,
6650#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6651 .ndo_set_rx_mode = dhd_set_multicast_list,
6652#else
6653 .ndo_set_multicast_list = dhd_set_multicast_list,
6654#endif
6655};
6656
6657static struct net_device_ops dhd_ops_virt = {
6658 .ndo_get_stats = dhd_get_stats,
6659 .ndo_do_ioctl = dhd_ioctl_entry,
6660 .ndo_start_xmit = dhd_start_xmit,
6661 .ndo_set_mac_address = dhd_set_mac_address,
6662#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6663 .ndo_set_rx_mode = dhd_set_multicast_list,
6664#else
6665 .ndo_set_multicast_list = dhd_set_multicast_list,
6666#endif
6667};
6668#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
6669
6670#ifdef DEBUGGER
6671extern void debugger_init(void *bus_handle);
6672#endif
6673
6674
6675#ifdef SHOW_LOGTRACE
6676static char *logstrs_path = "/root/logstrs.bin";
6677static char *st_str_file_path = "/root/rtecdc.bin";
6678static char *map_file_path = "/root/rtecdc.map";
6679static char *rom_st_str_file_path = "/root/roml.bin";
6680static char *rom_map_file_path = "/root/roml.map";
6681
6682#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */
6683#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
6684#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
6685static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
6686static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
6687static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
6688static char *ram_file_str = "rtecdc";
6689static char *rom_file_str = "roml";
6690#define RAMSTART_BIT 0x01
6691#define RDSTART_BIT 0x02
6692#define RDEND_BIT 0x04
6693#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
6694
6695module_param(logstrs_path, charp, S_IRUGO);
6696module_param(st_str_file_path, charp, S_IRUGO);
6697module_param(map_file_path, charp, S_IRUGO);
6698module_param(rom_st_str_file_path, charp, S_IRUGO);
6699module_param(rom_map_file_path, charp, S_IRUGO);
6700
6701static void
6702dhd_init_logstrs_array(dhd_event_log_t *temp)
6703{
6704 struct file *filep = NULL;
6705 struct kstat stat;
6706 mm_segment_t fs;
6707 char *raw_fmts = NULL;
6708 int logstrs_size = 0;
6709
6710 logstr_header_t *hdr = NULL;
6711 uint32 *lognums = NULL;
6712 char *logstrs = NULL;
6713 int ram_index = 0;
6714 char **fmts;
6715 int num_fmts = 0;
6716 uint32 i = 0;
6717 int error = 0;
6718
6719 fs = get_fs();
6720 set_fs(KERNEL_DS);
6721
6722 filep = filp_open(logstrs_path, O_RDONLY, 0);
6723
6724 if (IS_ERR(filep)) {
6725 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
6726 goto fail;
6727 }
6728 error = vfs_stat(logstrs_path, &stat);
6729 if (error) {
6730 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
6731 goto fail;
6732 }
6733 logstrs_size = (int) stat.size;
6734
6735 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6736 if (raw_fmts == NULL) {
6737 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
6738 goto fail;
6739 }
6740 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
6741 DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
6742 goto fail;
6743 }
6744
6745 /* Remember header from the logstrs.bin file */
6746 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6747 sizeof(logstr_header_t));
6748
6749 if (hdr->log_magic == LOGSTRS_MAGIC) {
6750 /*
6751 * logstrs.bin start with header.
6752 */
6753 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
6754 ram_index = (hdr->ram_lognums_offset -
6755 hdr->rom_lognums_offset) / sizeof(uint32);
6756 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6757 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
6758 } else {
6759 /*
6760 * Legacy logstrs.bin format without header.
6761 */
6762 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6763 if (num_fmts == 0) {
6764 /* Legacy ROM/RAM logstrs.bin format:
6765 * - ROM 'lognums' section
6766 * - RAM 'lognums' section
6767 * - ROM 'logstrs' section.
6768 * - RAM 'logstrs' section.
6769 *
6770 * 'lognums' is an array of indexes for the strings in the
6771 * 'logstrs' section. The first uint32 is 0 (index of first
6772 * string in ROM 'logstrs' section).
6773 *
6774 * The 4324b5 is the only ROM that uses this legacy format. Use the
6775 * fixed number of ROM fmtnums to find the start of the RAM
6776 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
6777 * find the ROM 'logstrs' section.
6778 */
6779 #define NUM_4324B5_ROM_FMTS 186
6780 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
6781 ram_index = NUM_4324B5_ROM_FMTS;
6782 lognums = (uint32 *) raw_fmts;
6783 num_fmts = ram_index;
6784 logstrs = (char *) &raw_fmts[num_fmts << 2];
6785 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
6786 num_fmts++;
6787 logstrs = (char *) &raw_fmts[num_fmts << 2];
6788 }
6789 } else {
6790 /* Legacy RAM-only logstrs.bin format:
6791 * - RAM 'lognums' section
6792 * - RAM 'logstrs' section.
6793 *
6794 * 'lognums' is an array of indexes for the strings in the
6795 * 'logstrs' section. The first uint32 is an index to the
6796 * start of 'logstrs'. Therefore, if this index is divided
6797 * by 'sizeof(uint32)' it provides the number of logstr
6798 * entries.
6799 */
6800 ram_index = 0;
6801 lognums = (uint32 *) raw_fmts;
6802 logstrs = (char *) &raw_fmts[num_fmts << 2];
6803 }
6804 }
6805 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
6806 if (fmts == NULL) {
6807 DHD_ERROR(("Failed to allocate fmts memory\n"));
6808 goto fail;
6809 }
6810
6811 for (i = 0; i < num_fmts; i++) {
6812 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6813 * (they are 0-indexed relative to 'rom_logstrs_offset').
6814 *
6815 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6816 * are 0-indexed relative to the start of the logstrs.bin file).
6817 */
6818 if (i == ram_index) {
6819 logstrs = raw_fmts;
6820 }
6821 fmts[i] = &logstrs[lognums[i]];
6822 }
6823 temp->fmts = fmts;
6824 temp->raw_fmts = raw_fmts;
6825 temp->num_fmts = num_fmts;
6826 filp_close(filep, NULL);
6827 set_fs(fs);
6828 return;
6829fail:
6830 if (raw_fmts) {
6831 kfree(raw_fmts);
6832 raw_fmts = NULL;
6833 }
6834 if (!IS_ERR(filep))
6835 filp_close(filep, NULL);
6836 set_fs(fs);
6837 temp->fmts = NULL;
6838 return;
6839}
6840
6841static int
6842dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
6843 uint32 *rodata_end)
6844{
6845 struct file *filep = NULL;
6846 mm_segment_t fs;
6847 char *raw_fmts = NULL;
6848 uint32 read_size = READ_NUM_BYTES;
6849 int error = 0;
6850 char * cptr = NULL;
6851 char c;
6852 uint8 count = 0;
6853
6854 *ramstart = 0;
6855 *rodata_start = 0;
6856 *rodata_end = 0;
6857
6858 if (fname == NULL) {
6859 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
6860 return BCME_ERROR;
6861 }
6862
6863 fs = get_fs();
6864 set_fs(KERNEL_DS);
6865
6866 filep = filp_open(fname, O_RDONLY, 0);
6867 if (IS_ERR(filep)) {
6868 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
6869 goto fail;
6870 }
6871
6872 /* Allocate 1 byte more than read_size to terminate it with NULL */
6873 raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
6874 if (raw_fmts == NULL) {
6875 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6876 goto fail;
6877 }
6878
6879 /* read ram start, rodata_start and rodata_end values from map file */
6880
6881 while (count != ALL_MAP_VAL)
6882 {
6883 error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
6884 if (error < 0) {
6885 DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
6886 map_file_path, error));
6887 goto fail;
6888 }
6889
6890 if (error < read_size) {
6891 /*
6892 * since we reset file pos back to earlier pos by
6893 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6894 * So if ret value is less than read_size, reached EOF don't read further
6895 */
6896 break;
6897 }
6898 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6899 raw_fmts[read_size] = '\0';
6900
6901 /* Get ramstart address */
6902 if ((cptr = strstr(raw_fmts, ramstart_str))) {
6903 cptr = cptr - BYTES_AHEAD_NUM;
6904 sscanf(cptr, "%x %c text_start", ramstart, &c);
6905 count |= RAMSTART_BIT;
6906 }
6907
6908 /* Get ram rodata start address */
6909 if ((cptr = strstr(raw_fmts, rodata_start_str))) {
6910 cptr = cptr - BYTES_AHEAD_NUM;
6911 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6912 count |= RDSTART_BIT;
6913 }
6914
6915 /* Get ram rodata end address */
6916 if ((cptr = strstr(raw_fmts, rodata_end_str))) {
6917 cptr = cptr - BYTES_AHEAD_NUM;
6918 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6919 count |= RDEND_BIT;
6920 }
6921 memset(raw_fmts, 0, read_size);
6922 /*
6923 * go back to predefined NUM of bytes so that we won't miss
6924 * the string and addr even if it comes as splited in next read.
6925 */
6926 filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
6927 }
6928
6929 DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6930 *ramstart, *rodata_start, *rodata_end));
6931
6932 DHD_ERROR(("readmap over \n"));
6933
6934fail:
6935 if (raw_fmts) {
6936 kfree(raw_fmts);
6937 raw_fmts = NULL;
6938 }
6939 if (!IS_ERR(filep))
6940 filp_close(filep, NULL);
6941
6942 set_fs(fs);
6943 if (count == ALL_MAP_VAL) {
6944 return BCME_OK;
6945 }
6946 DHD_ERROR(("readmap error 0X%x \n", count));
6947 return BCME_ERROR;
6948}
6949
6950static void
6951dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
6952{
6953 struct file *filep = NULL;
6954 mm_segment_t fs;
6955 char *raw_fmts = NULL;
6956 uint32 logstrs_size = 0;
6957
6958 int error = 0;
6959 uint32 ramstart = 0;
6960 uint32 rodata_start = 0;
6961 uint32 rodata_end = 0;
6962 uint32 logfilebase = 0;
6963
6964 error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
6965 if (error == BCME_ERROR) {
6966 DHD_ERROR(("readmap Error!! \n"));
6967 /* don't do event log parsing in actual case */
6968 temp->raw_sstr = NULL;
6969 return;
6970 }
6971 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6972 ramstart, rodata_start, rodata_end));
6973
6974 fs = get_fs();
6975 set_fs(KERNEL_DS);
6976
6977 filep = filp_open(str_file, O_RDONLY, 0);
6978 if (IS_ERR(filep)) {
6979 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
6980 goto fail;
6981 }
6982
6983 /* Full file size is huge. Just read required part */
6984 logstrs_size = rodata_end - rodata_start;
6985
6986 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6987 if (raw_fmts == NULL) {
6988 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6989 goto fail;
6990 }
6991
6992 logfilebase = rodata_start - ramstart;
6993
6994 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
6995 if (error < 0) {
6996 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
6997 goto fail;
6998 }
6999
7000 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
7001 if (error != logstrs_size) {
7002 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
7003 goto fail;
7004 }
7005
7006 if (strstr(str_file, ram_file_str) != NULL) {
7007 temp->raw_sstr = raw_fmts;
7008 temp->ramstart = ramstart;
7009 temp->rodata_start = rodata_start;
7010 temp->rodata_end = rodata_end;
7011 } else if (strstr(str_file, rom_file_str) != NULL) {
7012 temp->rom_raw_sstr = raw_fmts;
7013 temp->rom_ramstart = ramstart;
7014 temp->rom_rodata_start = rodata_start;
7015 temp->rom_rodata_end = rodata_end;
7016 }
7017
7018 filp_close(filep, NULL);
7019 set_fs(fs);
7020
7021 return;
7022fail:
7023 if (raw_fmts) {
7024 kfree(raw_fmts);
7025 raw_fmts = NULL;
7026 }
7027 if (!IS_ERR(filep))
7028 filp_close(filep, NULL);
7029 set_fs(fs);
7030 if (strstr(str_file, ram_file_str) != NULL) {
7031 temp->raw_sstr = NULL;
7032 } else if (strstr(str_file, rom_file_str) != NULL) {
7033 temp->rom_raw_sstr = NULL;
7034 }
7035 return;
7036}
7037
7038#endif /* SHOW_LOGTRACE */
7039
7040
7041dhd_pub_t *
7042dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
7043{
7044 dhd_info_t *dhd = NULL;
7045 struct net_device *net = NULL;
7046 char if_name[IFNAMSIZ] = {'\0'};
7047 uint32 bus_type = -1;
7048 uint32 bus_num = -1;
7049 uint32 slot_num = -1;
7050 wifi_adapter_info_t *adapter = NULL;
7051
7052 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
7053 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7054
7055#ifdef STBLINUX
7056 DHD_ERROR(("%s\n", driver_target));
7057#endif /* STBLINUX */
7058 /* will implement get_ids for DBUS later */
7059#if defined(BCMSDIO)
7060 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
7061#endif
7062 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7063
7064 /* Allocate primary dhd_info */
7065 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
7066 if (dhd == NULL) {
7067 dhd = MALLOC(osh, sizeof(dhd_info_t));
7068 if (dhd == NULL) {
7069 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
7070 goto fail;
7071 }
7072 }
7073 memset(dhd, 0, sizeof(dhd_info_t));
7074 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
7075
7076 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
7077
7078 dhd->pub.osh = osh;
7079 dhd->adapter = adapter;
7080
7081#ifdef GET_CUSTOM_MAC_ENABLE
7082 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
7083#endif /* GET_CUSTOM_MAC_ENABLE */
7084#ifdef CUSTOM_FORCE_NODFS_FLAG
7085 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
7086 dhd->pub.force_country_change = TRUE;
7087#endif /* CUSTOM_FORCE_NODFS_FLAG */
7088#ifdef CUSTOM_COUNTRY_CODE
7089 get_customized_country_code(dhd->adapter,
7090 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
7091 dhd->pub.dhd_cflags);
7092#endif /* CUSTOM_COUNTRY_CODE */
7093 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
7094 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
7095
7096 /* Initialize thread based operation and lock */
7097 sema_init(&dhd->sdsem, 1);
7098
7099 /* Link to info module */
7100 dhd->pub.info = dhd;
7101
7102
7103 /* Link to bus module */
7104 dhd->pub.bus = bus;
7105 dhd->pub.hdrlen = bus_hdrlen;
7106
7107 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
7108 * because dhd_detech will check .info is NULL or not.
7109 */
7110 if (dhd_conf_attach(&dhd->pub) != 0) {
7111 DHD_ERROR(("dhd_conf_attach failed\n"));
7112 goto fail;
7113 }
7114 dhd_conf_reset(&dhd->pub);
7115 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
7116 dhd_conf_preinit(&dhd->pub);
7117
7118 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
7119 * This is indeed a hack but we have to make it work properly before we have a better
7120 * solution
7121 */
7122 dhd_update_fw_nv_path(dhd);
7123#ifndef BUILD_IN_KERNEL
7124 dhd_conf_read_config(&dhd->pub, dhd->conf_path);
7125#endif
7126
7127 /* Set network interface name if it was provided as module parameter */
7128 if (iface_name[0]) {
7129 int len;
7130 char ch;
7131 strncpy(if_name, iface_name, IFNAMSIZ);
7132 if_name[IFNAMSIZ - 1] = 0;
7133 len = strlen(if_name);
7134 ch = if_name[len - 1];
7135 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
7136 strcat(if_name, "%d");
7137 }
7138
7139 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
7140 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
7141 if (net == NULL) {
7142 goto fail;
7143 }
7144
7145
7146 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
7147#ifdef DHD_L2_FILTER
7148 /* initialize the l2_filter_cnt */
7149 dhd->pub.l2_filter_cnt = 0;
7150#endif
7151#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7152 net->open = NULL;
7153#else
7154 net->netdev_ops = NULL;
7155#endif
7156
7157 mutex_init(&dhd->dhd_iovar_mutex);
7158 sema_init(&dhd->proto_sem, 1);
7159
7160#ifdef PROP_TXSTATUS
7161 spin_lock_init(&dhd->wlfc_spinlock);
7162
7163 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
7164 dhd->pub.plat_init = dhd_wlfc_plat_init;
7165 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
7166
7167#ifdef DHD_WLFC_THREAD
7168 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
7169 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
7170 if (IS_ERR(dhd->pub.wlfc_thread)) {
7171 DHD_ERROR(("create wlfc thread failed\n"));
7172 goto fail;
7173 } else {
7174 wake_up_process(dhd->pub.wlfc_thread);
7175 }
7176#endif /* DHD_WLFC_THREAD */
7177#endif /* PROP_TXSTATUS */
7178
7179 /* Initialize other structure content */
7180 init_waitqueue_head(&dhd->ioctl_resp_wait);
7181 init_waitqueue_head(&dhd->d3ack_wait);
7182 init_waitqueue_head(&dhd->ctrl_wait);
7183 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
7184 dhd->pub.dhd_bus_busy_state = 0;
7185
7186 /* Initialize the spinlocks */
7187 spin_lock_init(&dhd->sdlock);
7188 spin_lock_init(&dhd->txqlock);
7189 spin_lock_init(&dhd->dhd_lock);
7190 spin_lock_init(&dhd->rxf_lock);
7191#if defined(RXFRAME_THREAD)
7192 dhd->rxthread_enabled = TRUE;
7193#endif /* defined(RXFRAME_THREAD) */
7194
7195#ifdef DHDTCPACK_SUPPRESS
7196 spin_lock_init(&dhd->tcpack_lock);
7197#endif /* DHDTCPACK_SUPPRESS */
7198
7199 /* Initialize Wakelock stuff */
7200 spin_lock_init(&dhd->wakelock_spinlock);
7201 spin_lock_init(&dhd->wakelock_evt_spinlock);
7202 DHD_OS_WAKE_LOCK_INIT(dhd);
7203 dhd->wakelock_wd_counter = 0;
7204#ifdef CONFIG_HAS_WAKELOCK
7205 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
7206#endif /* CONFIG_HAS_WAKELOCK */
7207
7208#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7209 mutex_init(&dhd->dhd_net_if_mutex);
7210 mutex_init(&dhd->dhd_suspend_mutex);
7211#endif
7212 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7213
7214 /* Attach and link in the protocol */
7215 if (dhd_prot_attach(&dhd->pub) != 0) {
7216 DHD_ERROR(("dhd_prot_attach failed\n"));
7217 goto fail;
7218 }
7219 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
7220
7221#ifdef WL_CFG80211
7222 /* Attach and link in the cfg80211 */
7223 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
7224 DHD_ERROR(("wl_cfg80211_attach failed\n"));
7225 goto fail;
7226 }
7227
7228 dhd_monitor_init(&dhd->pub);
7229 dhd_state |= DHD_ATTACH_STATE_CFG80211;
7230#endif
7231#ifdef DHD_LOG_DUMP
7232 dhd_log_dump_init(&dhd->pub);
7233#endif /* DHD_LOG_DUMP */
7234#if defined(WL_WIRELESS_EXT)
7235 /* Attach and link in the iw */
7236 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7237 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
7238 DHD_ERROR(("wl_iw_attach failed\n"));
7239 goto fail;
7240 }
7241 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
7242 }
7243#endif /* defined(WL_WIRELESS_EXT) */
7244
7245#ifdef SHOW_LOGTRACE
7246 dhd_init_logstrs_array(&dhd->event_data);
7247 dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
7248 dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
7249#endif /* SHOW_LOGTRACE */
7250
7251 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
7252 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
7253 goto fail;
7254 }
7255
7256
7257
7258 /* Set up the watchdog timer */
7259 init_timer(&dhd->timer);
7260 dhd->timer.data = (ulong)dhd;
7261 dhd->timer.function = dhd_watchdog;
7262 dhd->default_wd_interval = dhd_watchdog_ms;
7263
7264 if (dhd_watchdog_prio >= 0) {
7265 /* Initialize watchdog thread */
7266 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
7267 if (dhd->thr_wdt_ctl.thr_pid < 0) {
7268 goto fail;
7269 }
7270
7271 } else {
7272 dhd->thr_wdt_ctl.thr_pid = -1;
7273 }
7274
7275#ifdef DHD_PCIE_RUNTIMEPM
7276 /* Setup up the runtime PM Idlecount timer */
7277 init_timer(&dhd->rpm_timer);
7278 dhd->rpm_timer.data = (ulong)dhd;
7279 dhd->rpm_timer.function = dhd_runtimepm;
7280 dhd->rpm_timer_valid = FALSE;
7281
7282 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
7283 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
7284 if (dhd->thr_rpm_ctl.thr_pid < 0) {
7285 goto fail;
7286 }
7287#endif /* DHD_PCIE_RUNTIMEPM */
7288
7289#ifdef DEBUGGER
7290 debugger_init((void *) bus);
7291#endif
7292
7293 /* Set up the bottom half handler */
7294 if (dhd_dpc_prio >= 0) {
7295 /* Initialize DPC thread */
7296 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
7297 if (dhd->thr_dpc_ctl.thr_pid < 0) {
7298 goto fail;
7299 }
7300 } else {
7301 /* use tasklet for dpc */
7302 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
7303 dhd->thr_dpc_ctl.thr_pid = -1;
7304 }
7305
7306 if (dhd->rxthread_enabled) {
7307 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
7308 /* Initialize RXF thread */
7309 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
7310 if (dhd->thr_rxf_ctl.thr_pid < 0) {
7311 goto fail;
7312 }
7313 }
7314
7315 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
7316
7317#if defined(CONFIG_PM_SLEEP)
7318 if (!dhd_pm_notifier_registered) {
7319 dhd_pm_notifier_registered = TRUE;
7320 dhd->pm_notifier.notifier_call = dhd_pm_callback;
7321 dhd->pm_notifier.priority = 10;
7322 register_pm_notifier(&dhd->pm_notifier);
7323 }
7324
7325#endif /* CONFIG_PM_SLEEP */
7326
7327#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7328 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
7329 dhd->early_suspend.suspend = dhd_early_suspend;
7330 dhd->early_suspend.resume = dhd_late_resume;
7331 register_early_suspend(&dhd->early_suspend);
7332 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
7333#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7334
7335#ifdef ARP_OFFLOAD_SUPPORT
7336 dhd->pend_ipaddr = 0;
7337 if (!dhd_inetaddr_notifier_registered) {
7338 dhd_inetaddr_notifier_registered = TRUE;
7339 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7340 }
7341#endif /* ARP_OFFLOAD_SUPPORT */
7342
7343#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7344 if (!dhd_inet6addr_notifier_registered) {
7345 dhd_inet6addr_notifier_registered = TRUE;
7346 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7347 }
7348#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7349 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
7350#ifdef DEBUG_CPU_FREQ
7351 dhd->new_freq = alloc_percpu(int);
7352 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
7353 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7354#endif
7355#ifdef DHDTCPACK_SUPPRESS
7356#ifdef BCMSDIO
7357 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
7358#elif defined(BCMPCIE)
7359 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
7360#else
7361 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7362#endif /* BCMSDIO */
7363#endif /* DHDTCPACK_SUPPRESS */
7364
7365#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7366#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7367
7368 dhd_state |= DHD_ATTACH_STATE_DONE;
7369 dhd->dhd_state = dhd_state;
7370
7371 dhd_found++;
7372#ifdef DHD_DEBUG_PAGEALLOC
7373 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
7374#endif /* DHD_DEBUG_PAGEALLOC */
7375
7376#if defined(DHD_LB)
7377 DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
7378
7379 dhd_lb_set_default_cpus(dhd);
7380
7381 /* Initialize the CPU Masks */
7382 if (dhd_cpumasks_init(dhd) == 0) {
7383
7384 /* Now we have the current CPU maps, run through candidacy */
7385 dhd_select_cpu_candidacy(dhd);
7386
7387 /*
7388 * If we are able to initialize CPU masks, lets register to the
7389 * CPU Hotplug framework to change the CPU for each job dynamically
7390 * using candidacy algorithm.
7391 */
7392 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
7393 register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
7394 } else {
7395 /*
7396 * We are unable to initialize CPU masks, so candidacy algorithm
7397 * won't run, but still Load Balancing will be honoured based
7398 * on the CPUs allocated for a given job statically during init
7399 */
7400 dhd->cpu_notifier.notifier_call = NULL;
7401 DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
7402 __FUNCTION__));
7403 }
7404
7405
7406 DHD_LB_STATS_INIT(&dhd->pub);
7407
7408 /* Initialize the Load Balancing Tasklets and Napi object */
7409#if defined(DHD_LB_TXC)
7410 tasklet_init(&dhd->tx_compl_tasklet,
7411 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
7412 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
7413 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
7414#endif /* DHD_LB_TXC */
7415
7416#if defined(DHD_LB_RXC)
7417 tasklet_init(&dhd->rx_compl_tasklet,
7418 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
7419 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
7420 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
7421#endif /* DHD_LB_RXC */
7422
7423#if defined(DHD_LB_RXP)
7424 __skb_queue_head_init(&dhd->rx_pend_queue);
7425 skb_queue_head_init(&dhd->rx_napi_queue);
7426
7427 /* Initialize the work that dispatches NAPI job to a given core */
7428 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
7429 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
7430#endif /* DHD_LB_RXP */
7431
7432#endif /* DHD_LB */
7433
7434 INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
7435
7436 (void)dhd_sysfs_init(dhd);
7437
7438 return &dhd->pub;
7439
7440fail:
7441 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
7442 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
7443 __FUNCTION__, dhd_state, &dhd->pub));
7444 dhd->dhd_state = dhd_state;
7445 dhd_detach(&dhd->pub);
7446 dhd_free(&dhd->pub);
7447 }
7448
7449 return NULL;
7450}
7451
7452#include <linux/delay.h>
7453
7454void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
7455{
7456 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7457
7458 schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
7459}
7460
7461int dhd_get_fw_mode(dhd_info_t *dhdinfo)
7462{
7463 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
7464 return DHD_FLAG_HOSTAP_MODE;
7465 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
7466 return DHD_FLAG_P2P_MODE;
7467 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
7468 return DHD_FLAG_IBSS_MODE;
7469 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
7470 return DHD_FLAG_MFG_MODE;
7471
7472 return DHD_FLAG_STA_MODE;
7473}
7474
7475bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
7476{
7477 int fw_len;
7478 int nv_len;
7479 int conf_len;
7480 const char *fw = NULL;
7481 const char *nv = NULL;
7482 const char *conf = NULL;
7483 wifi_adapter_info_t *adapter = dhdinfo->adapter;
7484
7485
7486 /* Update firmware and nvram path. The path may be from adapter info or module parameter
7487 * The path from adapter info is used for initialization only (as it won't change).
7488 *
7489 * The firmware_path/nvram_path module parameter may be changed by the system at run
7490 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
7491 * command may change dhdinfo->fw_path. As such we need to clear the path info in
7492 * module parameter after it is copied. We won't update the path until the module parameter
7493 * is changed again (first character is not '\0')
7494 */
7495
7496 /* set default firmware and nvram path for built-in type driver */
7497// if (!dhd_download_fw_on_driverload) {
7498#ifdef CONFIG_BCMDHD_FW_PATH
7499 fw = CONFIG_BCMDHD_FW_PATH;
7500#endif /* CONFIG_BCMDHD_FW_PATH */
7501#ifdef CONFIG_BCMDHD_NVRAM_PATH
7502 nv = CONFIG_BCMDHD_NVRAM_PATH;
7503#endif /* CONFIG_BCMDHD_NVRAM_PATH */
7504// }
7505
7506 /* check if we need to initialize the path */
7507 if (dhdinfo->fw_path[0] == '\0') {
7508 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
7509 fw = adapter->fw_path;
7510
7511 }
7512 if (dhdinfo->nv_path[0] == '\0') {
7513 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
7514 nv = adapter->nv_path;
7515 }
7516 if (dhdinfo->conf_path[0] == '\0') {
7517 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
7518 conf = adapter->conf_path;
7519 }
7520
7521 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
7522 *
7523 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
7524 */
7525 if (firmware_path[0] != '\0')
7526 fw = firmware_path;
7527 if (nvram_path[0] != '\0')
7528 nv = nvram_path;
7529 if (config_path[0] != '\0')
7530 conf = config_path;
7531
7532 if (fw && fw[0] != '\0') {
7533 fw_len = strlen(fw);
7534 if (fw_len >= sizeof(dhdinfo->fw_path)) {
7535 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
7536 return FALSE;
7537 }
7538 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
7539 if (dhdinfo->fw_path[fw_len-1] == '\n')
7540 dhdinfo->fw_path[fw_len-1] = '\0';
7541 }
7542 if (nv && nv[0] != '\0') {
7543 nv_len = strlen(nv);
7544 if (nv_len >= sizeof(dhdinfo->nv_path)) {
7545 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
7546 return FALSE;
7547 }
7548 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
7549 if (dhdinfo->nv_path[nv_len-1] == '\n')
7550 dhdinfo->nv_path[nv_len-1] = '\0';
7551 }
7552 if (conf && conf[0] != '\0') {
7553 conf_len = strlen(conf);
7554 if (conf_len >= sizeof(dhdinfo->conf_path)) {
7555 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
7556 return FALSE;
7557 }
7558 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
7559 if (dhdinfo->conf_path[conf_len-1] == '\n')
7560 dhdinfo->conf_path[conf_len-1] = '\0';
7561 }
7562
7563#if 0
7564 /* clear the path in module parameter */
7565 if (dhd_download_fw_on_driverload) {
7566 firmware_path[0] = '\0';
7567 nvram_path[0] = '\0';
7568 config_path[0] = '\0';
7569 }
7570#endif
7571
7572#ifndef BCMEMBEDIMAGE
7573 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
7574 if (dhdinfo->fw_path[0] == '\0') {
7575 DHD_ERROR(("firmware path not found\n"));
7576 return FALSE;
7577 }
7578 if (dhdinfo->nv_path[0] == '\0') {
7579 DHD_ERROR(("nvram path not found\n"));
7580 return FALSE;
7581 }
7582 if (dhdinfo->conf_path[0] == '\0') {
7583 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
7584 }
7585#ifdef CONFIG_PATH_AUTO_SELECT
7586 dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path);
7587#endif
7588#endif /* BCMEMBEDIMAGE */
7589
7590 return TRUE;
7591}
7592
7593#ifdef CUSTOMER_HW4_DEBUG
7594bool dhd_validate_chipid(dhd_pub_t *dhdp)
7595{
7596 uint chipid = dhd_bus_chip_id(dhdp);
7597 uint config_chipid;
7598
7599#ifdef BCM4359_CHIP
7600 config_chipid = BCM4359_CHIP_ID;
7601#elif defined(BCM4358_CHIP)
7602 config_chipid = BCM4358_CHIP_ID;
7603#elif defined(BCM4354_CHIP)
7604 config_chipid = BCM4354_CHIP_ID;
7605#elif defined(BCM4356_CHIP)
7606 config_chipid = BCM4356_CHIP_ID;
7607#elif defined(BCM4339_CHIP)
7608 config_chipid = BCM4339_CHIP_ID;
7609#elif defined(BCM43349_CHIP)
7610 config_chipid = BCM43349_CHIP_ID;
7611#elif defined(BCM4335_CHIP)
7612 config_chipid = BCM4335_CHIP_ID;
7613#elif defined(BCM43241_CHIP)
7614 config_chipid = BCM4324_CHIP_ID;
7615#elif defined(BCM4330_CHIP)
7616 config_chipid = BCM4330_CHIP_ID;
7617#elif defined(BCM43430_CHIP)
7618 config_chipid = BCM43430_CHIP_ID;
7619#elif defined(BCM4334W_CHIP)
7620 config_chipid = BCM43342_CHIP_ID;
7621#elif defined(BCM43455_CHIP)
7622 config_chipid = BCM4345_CHIP_ID;
7623#else
7624 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
7625 " please add CONFIG_BCMXXXX into the Kernel and"
7626 " BCMXXXX_CHIP definition into the DHD driver\n",
7627 __FUNCTION__));
7628 config_chipid = 0;
7629
7630 return FALSE;
7631#endif /* BCM4354_CHIP */
7632
7633#if defined(BCM4359_CHIP)
7634 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
7635 return TRUE;
7636 }
7637#endif /* BCM4359_CHIP */
7638
7639 return config_chipid == chipid;
7640}
7641#endif /* CUSTOMER_HW4_DEBUG */
7642
7643int
7644dhd_bus_start(dhd_pub_t *dhdp)
7645{
7646 int ret = -1;
7647 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7648 unsigned long flags;
7649
7650 ASSERT(dhd);
7651
7652 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
7653
7654 DHD_PERIM_LOCK(dhdp);
7655
7656 /* try to download image and nvram to the dongle */
7657 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
7658 /* Indicate FW Download has not yet done */
7659 dhd->pub.is_fw_download_done = FALSE;
7660 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
7661 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
7662 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
7663 dhd->fw_path, dhd->nv_path, dhd->conf_path);
7664 if (ret < 0) {
7665 DHD_ERROR(("%s: failed to download firmware %s\n",
7666 __FUNCTION__, dhd->fw_path));
7667 DHD_PERIM_UNLOCK(dhdp);
7668 return ret;
7669 }
7670 /* Indicate FW Download has succeeded */
7671 dhd->pub.is_fw_download_done = TRUE;
7672 }
7673 if (dhd->pub.busstate != DHD_BUS_LOAD) {
7674 DHD_PERIM_UNLOCK(dhdp);
7675 return -ENETDOWN;
7676 }
7677
7678 dhd_os_sdlock(dhdp);
7679
7680 /* Start the watchdog timer */
7681 dhd->pub.tickcnt = 0;
7682 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
7683 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
7684
7685 /* Bring up the bus */
7686 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
7687
7688 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
7689 dhd_os_sdunlock(dhdp);
7690 DHD_PERIM_UNLOCK(dhdp);
7691 return ret;
7692 }
7693#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7694#if defined(BCMPCIE_OOB_HOST_WAKE)
7695 dhd_os_sdunlock(dhdp);
7696#endif /* BCMPCIE_OOB_HOST_WAKE */
7697 /* Host registration for OOB interrupt */
7698 if (dhd_bus_oob_intr_register(dhdp)) {
7699 /* deactivate timer and wait for the handler to finish */
7700#if !defined(BCMPCIE_OOB_HOST_WAKE)
7701 DHD_GENERAL_LOCK(&dhd->pub, flags);
7702 dhd->wd_timer_valid = FALSE;
7703 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7704 del_timer_sync(&dhd->timer);
7705
7706 dhd_os_sdunlock(dhdp);
7707#endif /* !BCMPCIE_OOB_HOST_WAKE */
7708 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7709 DHD_PERIM_UNLOCK(dhdp);
7710 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7711 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
7712 return -ENODEV;
7713 }
7714
7715#if defined(BCMPCIE_OOB_HOST_WAKE)
7716 dhd_os_sdlock(dhdp);
7717 dhd_bus_oob_intr_set(dhdp, TRUE);
7718#else
7719 /* Enable oob at firmware */
7720 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7721#endif /* BCMPCIE_OOB_HOST_WAKE */
7722#elif defined(FORCE_WOWLAN)
7723 /* Enable oob at firmware */
7724 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7725#endif
7726#ifdef PCIE_FULL_DONGLE
7727 {
7728 /* max_h2d_rings includes H2D common rings */
7729 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
7730
7731 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
7732 max_h2d_rings));
7733 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
7734 dhd_os_sdunlock(dhdp);
7735 DHD_PERIM_UNLOCK(dhdp);
7736 return ret;
7737 }
7738 }
7739#endif /* PCIE_FULL_DONGLE */
7740
7741 /* Do protocol initialization necessary for IOCTL/IOVAR */
7742#ifdef PCIE_FULL_DONGLE
7743 dhd_os_sdunlock(dhdp);
7744#endif /* PCIE_FULL_DONGLE */
7745 ret = dhd_prot_init(&dhd->pub);
7746 if (unlikely(ret) != BCME_OK) {
7747 DHD_PERIM_UNLOCK(dhdp);
7748 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7749 return ret;
7750 }
7751#ifdef PCIE_FULL_DONGLE
7752 dhd_os_sdlock(dhdp);
7753#endif /* PCIE_FULL_DONGLE */
7754
7755 /* If bus is not ready, can't come up */
7756 if (dhd->pub.busstate != DHD_BUS_DATA) {
7757 DHD_GENERAL_LOCK(&dhd->pub, flags);
7758 dhd->wd_timer_valid = FALSE;
7759 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7760 del_timer_sync(&dhd->timer);
7761 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
7762 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7763 dhd_os_sdunlock(dhdp);
7764 DHD_PERIM_UNLOCK(dhdp);
7765 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7766 return -ENODEV;
7767 }
7768
7769 dhd_os_sdunlock(dhdp);
7770
7771 /* Bus is ready, query any dongle information */
7772 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7773 DHD_GENERAL_LOCK(&dhd->pub, flags);
7774 dhd->wd_timer_valid = FALSE;
7775 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7776 del_timer_sync(&dhd->timer);
7777 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
7778 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7779 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7780 DHD_PERIM_UNLOCK(dhdp);
7781 return ret;
7782 }
7783
7784#ifdef ARP_OFFLOAD_SUPPORT
7785 if (dhd->pend_ipaddr) {
7786#ifdef AOE_IP_ALIAS_SUPPORT
7787 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
7788#endif /* AOE_IP_ALIAS_SUPPORT */
7789 dhd->pend_ipaddr = 0;
7790 }
7791#endif /* ARP_OFFLOAD_SUPPORT */
7792
7793 DHD_PERIM_UNLOCK(dhdp);
7794 return 0;
7795}
7796
7797#ifdef WLTDLS
7798int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
7799{
7800 char iovbuf[WLC_IOCTL_SMLEN];
7801 uint32 tdls = tdls_on;
7802 int ret = 0;
7803 uint32 tdls_auto_op = 0;
7804 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
7805 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
7806 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
7807 BCM_REFERENCE(mac);
7808 if (!FW_SUPPORTED(dhd, tdls))
7809 return BCME_ERROR;
7810
7811 if (dhd->tdls_enable == tdls_on)
7812 goto auto_mode;
7813 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
7814 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7815 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
7816 goto exit;
7817 }
7818 dhd->tdls_enable = tdls_on;
7819auto_mode:
7820
7821 tdls_auto_op = auto_on;
7822 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
7823 iovbuf, sizeof(iovbuf));
7824 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7825 sizeof(iovbuf), TRUE, 0)) < 0) {
7826 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
7827 goto exit;
7828 }
7829
7830 if (tdls_auto_op) {
7831 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
7832 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
7833 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7834 sizeof(iovbuf), TRUE, 0)) < 0) {
7835 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
7836 goto exit;
7837 }
7838 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
7839 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7840 sizeof(iovbuf), TRUE, 0)) < 0) {
7841 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
7842 goto exit;
7843 }
7844 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
7845 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7846 sizeof(iovbuf), TRUE, 0)) < 0) {
7847 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
7848 goto exit;
7849 }
7850 }
7851
7852exit:
7853 return ret;
7854}
7855
7856int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
7857{
7858 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7859 int ret = 0;
7860 if (dhd)
7861 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
7862 else
7863 ret = BCME_ERROR;
7864 return ret;
7865}
7866int
7867dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
7868{
7869 char iovbuf[WLC_IOCTL_SMLEN];
7870 int ret = 0;
7871 bool auto_on = false;
7872 uint32 mode = wfd_mode;
7873
7874#ifdef ENABLE_TDLS_AUTO_MODE
7875 if (wfd_mode) {
7876 auto_on = false;
7877 } else {
7878 auto_on = true;
7879 }
7880#else
7881 auto_on = false;
7882#endif /* ENABLE_TDLS_AUTO_MODE */
7883 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
7884 if (ret < 0) {
7885 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
7886 return ret;
7887 }
7888
7889
7890 bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
7891 iovbuf, sizeof(iovbuf));
7892 if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7893 sizeof(iovbuf), TRUE, 0)) < 0) &&
7894 (ret != BCME_UNSUPPORTED)) {
7895 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
7896 return ret;
7897 }
7898
7899 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
7900 if (ret < 0) {
7901 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
7902 return ret;
7903 }
7904
7905 dhd->tdls_mode = mode;
7906 return ret;
7907}
7908#ifdef PCIE_FULL_DONGLE
7909void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
7910{
7911 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7912 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
7913 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
7914 tdls_peer_node_t *new = NULL, *prev = NULL;
7915 dhd_if_t *dhdif;
7916 uint8 sa[ETHER_ADDR_LEN];
7917 int ifidx = dhd_net2idx(dhd, dev);
7918
7919 if (ifidx == DHD_BAD_IF)
7920 return;
7921
7922 dhdif = dhd->iflist[ifidx];
7923 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
7924
7925 if (connect) {
7926 while (cur != NULL) {
7927 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7928 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
7929 __FUNCTION__, __LINE__));
7930 return;
7931 }
7932 cur = cur->next;
7933 }
7934
7935 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
7936 if (new == NULL) {
7937 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
7938 return;
7939 }
7940 memcpy(new->addr, da, ETHER_ADDR_LEN);
7941 new->next = dhdp->peer_tbl.node;
7942 dhdp->peer_tbl.node = new;
7943 dhdp->peer_tbl.tdls_peer_count++;
7944
7945 } else {
7946 while (cur != NULL) {
7947 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
7948 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
7949 if (prev)
7950 prev->next = cur->next;
7951 else
7952 dhdp->peer_tbl.node = cur->next;
7953 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
7954 dhdp->peer_tbl.tdls_peer_count--;
7955 return;
7956 }
7957 prev = cur;
7958 cur = cur->next;
7959 }
7960 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
7961 }
7962}
7963#endif /* PCIE_FULL_DONGLE */
7964#endif
7965
7966bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
7967{
7968 if (!dhd)
7969 return FALSE;
7970
7971 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
7972 return TRUE;
7973 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
7974 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
7975 return TRUE;
7976 else
7977 return FALSE;
7978}
7979#if !defined(AP) && defined(WLP2P)
7980/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
7981 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
7982 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
7983 * would still be named as fw_bcmdhd_apsta.
7984 */
7985uint32
7986dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
7987{
7988 int32 ret = 0;
7989 char buf[WLC_IOCTL_SMLEN];
7990 bool mchan_supported = FALSE;
7991 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
7992 * test mode, that means we only will use the mode as it is
7993 */
7994 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
7995 return 0;
7996 if (FW_SUPPORTED(dhd, vsdb)) {
7997 mchan_supported = TRUE;
7998 }
7999 if (!FW_SUPPORTED(dhd, p2p)) {
8000 DHD_TRACE(("Chip does not support p2p\n"));
8001 return 0;
8002 } else {
8003 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
8004 memset(buf, 0, sizeof(buf));
8005 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
8006 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8007 FALSE, 0)) < 0) {
8008 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
8009 return 0;
8010 } else {
8011 if (buf[0] == 1) {
8012 /* By default, chip supports single chan concurrency,
8013 * now lets check for mchan
8014 */
8015 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
8016 if (mchan_supported)
8017 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
8018 if (FW_SUPPORTED(dhd, rsdb)) {
8019 ret |= DHD_FLAG_RSDB_MODE;
8020 }
8021 if (FW_SUPPORTED(dhd, mp2p)) {
8022 ret |= DHD_FLAG_MP2P_MODE;
8023 }
8024#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
8025 return ret;
8026#else
8027 return 0;
8028#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
8029 }
8030 }
8031 }
8032 return 0;
8033}
8034#endif
8035
8036#ifdef SUPPORT_AP_POWERSAVE
8037#define RXCHAIN_PWRSAVE_PPS 10
8038#define RXCHAIN_PWRSAVE_QUIET_TIME 10
8039#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
8040int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
8041{
8042 char iovbuf[128];
8043 int32 pps = RXCHAIN_PWRSAVE_PPS;
8044 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
8045 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
8046
8047 if (enable) {
8048 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8049 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8050 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8051 DHD_ERROR(("Failed to enable AP power save\n"));
8052 }
8053 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
8054 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8055 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8056 DHD_ERROR(("Failed to set pps\n"));
8057 }
8058 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
8059 4, iovbuf, sizeof(iovbuf));
8060 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8061 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8062 DHD_ERROR(("Failed to set quiet time\n"));
8063 }
8064 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
8065 4, iovbuf, sizeof(iovbuf));
8066 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8067 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8068 DHD_ERROR(("Failed to set stas assoc check\n"));
8069 }
8070 } else {
8071 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8072 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8073 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8074 DHD_ERROR(("Failed to disable AP power save\n"));
8075 }
8076 }
8077
8078 return 0;
8079}
8080#endif /* SUPPORT_AP_POWERSAVE */
8081
8082
8083int
8084dhd_preinit_ioctls(dhd_pub_t *dhd)
8085{
8086 int ret = 0;
8087 char eventmask[WL_EVENTING_MASK_LEN];
8088 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
8089 uint32 buf_key_b4_m4 = 1;
8090#ifndef WL_CFG80211
8091 u32 up = 0;
8092#endif
8093 uint8 msglen;
8094 eventmsgs_ext_t *eventmask_msg = NULL;
8095 char* iov_buf = NULL;
8096 int ret2 = 0;
8097#if defined(CUSTOM_AMPDU_BA_WSIZE)
8098 uint32 ampdu_ba_wsize = 0;
8099#endif
8100#if defined(CUSTOM_AMPDU_MPDU)
8101 int32 ampdu_mpdu = 0;
8102#endif
8103#if defined(CUSTOM_AMPDU_RELEASE)
8104 int32 ampdu_release = 0;
8105#endif
8106#if defined(CUSTOM_AMSDU_AGGSF)
8107 int32 amsdu_aggsf = 0;
8108#endif
8109#ifdef SUPPORT_SENSORHUB
8110 int32 shub_enable = 0;
8111#endif /* SUPPORT_SENSORHUB */
8112#if defined(BCMSDIO)
8113#ifdef PROP_TXSTATUS
8114 int wlfc_enable = TRUE;
8115#ifndef DISABLE_11N
8116 uint32 hostreorder = 1;
8117 uint wl_down = 1;
8118#endif /* DISABLE_11N */
8119#endif /* PROP_TXSTATUS */
8120#endif
8121#ifdef PCIE_FULL_DONGLE
8122 uint32 wl_ap_isolate;
8123#endif /* PCIE_FULL_DONGLE */
8124
8125#if defined(BCMSDIO)
8126 /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
8127 uint32 frameburst = 0;
8128#else
8129 uint32 frameburst = 1;
8130#endif /* BCMSDIO */
c6ef1a93 8131 int maxtxpktglom = 0;
ef6a5fee
RC
8132#ifdef DHD_ENABLE_LPC
8133 uint32 lpc = 1;
8134#endif /* DHD_ENABLE_LPC */
8135 uint power_mode = PM_FAST;
8136#if defined(BCMSDIO)
8137 uint32 dongle_align = DHD_SDALIGN;
8138 uint32 glom = CUSTOM_GLOM_SETTING;
8139#endif /* defined(BCMSDIO) */
8140#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8141 uint32 credall = 1;
8142#endif
8143 uint bcn_timeout = dhd->conf->bcn_timeout;
8144#ifdef ENABLE_BCN_LI_BCN_WAKEUP
8145 uint32 bcn_li_bcn = 1;
8146#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8147 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
8148#if defined(ARP_OFFLOAD_SUPPORT)
8149 int arpoe = 1;
8150#endif
8151 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
8152 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
8153 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
8154 char buf[WLC_IOCTL_SMLEN];
8155 char *ptr;
8156 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
8157#ifdef ROAM_ENABLE
8158 uint roamvar = 0;
8159 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
8160 int roam_scan_period[2] = {10, WLC_BAND_ALL};
8161 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
8162#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
8163 int roam_fullscan_period = 60;
8164#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8165 int roam_fullscan_period = 120;
8166#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8167#else
8168#ifdef DISABLE_BUILTIN_ROAM
8169 uint roamvar = 1;
8170#endif /* DISABLE_BUILTIN_ROAM */
8171#endif /* ROAM_ENABLE */
8172
8173#if defined(SOFTAP)
8174 uint dtim = 1;
8175#endif
8176#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
8177 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
8178 struct ether_addr p2p_ea;
8179#endif
8180#ifdef SOFTAP_UAPSD_OFF
8181 uint32 wme_apsd = 0;
8182#endif /* SOFTAP_UAPSD_OFF */
8183#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
8184 uint32 apsta = 1; /* Enable APSTA mode */
8185#elif defined(SOFTAP_AND_GC)
8186 uint32 apsta = 0;
8187 int ap_mode = 1;
8188#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
8189#ifdef GET_CUSTOM_MAC_ENABLE
8190 struct ether_addr ea_addr;
8191#endif /* GET_CUSTOM_MAC_ENABLE */
8192
8193#ifdef DISABLE_11N
8194 uint32 nmode = 0;
8195#endif /* DISABLE_11N */
8196
8197#ifdef USE_WL_TXBF
8198 uint32 txbf = 1;
8199#endif /* USE_WL_TXBF */
8200#if defined(PROP_TXSTATUS)
8201#ifdef USE_WFA_CERT_CONF
8202 uint32 proptx = 0;
8203#endif /* USE_WFA_CERT_CONF */
8204#endif /* PROP_TXSTATUS */
8205#ifdef CUSTOM_PSPRETEND_THR
8206 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
8207#endif
8208 uint32 rsdb_mode = 0;
8209#ifdef ENABLE_TEMP_THROTTLING
8210 wl_temp_control_t temp_control;
8211#endif /* ENABLE_TEMP_THROTTLING */
8212#ifdef DISABLE_PRUNED_SCAN
8213 uint32 scan_features = 0;
8214#endif /* DISABLE_PRUNED_SCAN */
8215#ifdef CUSTOM_EVENT_PM_WAKE
8216 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
8217#endif /* CUSTOM_EVENT_PM_WAKE */
8218#ifdef PKT_FILTER_SUPPORT
8219 dhd_pkt_filter_enable = TRUE;
8220#endif /* PKT_FILTER_SUPPORT */
8221#ifdef WLTDLS
8222 dhd->tdls_enable = FALSE;
8223 dhd_tdls_set_mode(dhd, false);
8224#endif /* WLTDLS */
8225 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
8226 DHD_TRACE(("Enter %s\n", __FUNCTION__));
8227
8228 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
8229#ifdef DHDTCPACK_SUPPRESS
8230 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
8231 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
8232#endif
8233
8234 dhd->op_mode = 0;
8235#ifdef CUSTOMER_HW4_DEBUG
8236 if (!dhd_validate_chipid(dhd)) {
8237 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
8238 __FUNCTION__, dhd_bus_chip_id(dhd)));
8239#ifndef SUPPORT_MULTIPLE_CHIPS
8240 ret = BCME_BADARG;
8241 goto done;
8242#endif /* !SUPPORT_MULTIPLE_CHIPS */
8243 }
8244#endif /* CUSTOMER_HW4_DEBUG */
8245 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8246 (op_mode == DHD_FLAG_MFG_MODE)) {
8247#ifdef DHD_PCIE_RUNTIMEPM
8248 /* Disable RuntimePM in mfg mode */
8249 DHD_DISABLE_RUNTIME_PM(dhd);
8250 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
8251#endif /* DHD_PCIE_RUNTIME_PM */
8252 /* Check and adjust IOCTL response timeout for Manufactring firmware */
8253 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
8254 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
8255 __FUNCTION__));
8256 } else {
8257 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
8258 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
8259 }
8260#ifdef GET_CUSTOM_MAC_ENABLE
8261 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
8262 if (!ret) {
8263 memset(buf, 0, sizeof(buf));
8264 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
8265 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8266 if (ret < 0) {
8267 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
8268 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
8269 ret = BCME_NOTUP;
8270 goto done;
8271 }
8272 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
8273 } else {
8274#endif /* GET_CUSTOM_MAC_ENABLE */
8275 /* Get the default device MAC address directly from firmware */
8276 memset(buf, 0, sizeof(buf));
8277 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
8278 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8279 FALSE, 0)) < 0) {
8280 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
8281 ret = BCME_NOTUP;
8282 goto done;
8283 }
8284 /* Update public MAC address after reading from Firmware */
8285 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
8286
8287#ifdef GET_CUSTOM_MAC_ENABLE
8288 }
8289#endif /* GET_CUSTOM_MAC_ENABLE */
8290
8291 /* get a capabilities from firmware */
8292 {
8293 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
8294 memset(dhd->fw_capabilities, 0, cap_buf_size);
8295 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
8296 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
8297 (cap_buf_size - 1), FALSE, 0)) < 0)
8298 {
8299 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
8300 __FUNCTION__, ret));
8301 return 0;
8302 }
8303
8304 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
8305 dhd->fw_capabilities[0] = ' ';
8306 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
8307 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
8308 }
8309
8310 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
8311 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
8312#ifdef SET_RANDOM_MAC_SOFTAP
8313 uint rand_mac;
8314#endif /* SET_RANDOM_MAC_SOFTAP */
8315 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
8316#if defined(ARP_OFFLOAD_SUPPORT)
8317 arpoe = 0;
8318#endif
8319#ifdef PKT_FILTER_SUPPORT
8320 dhd_pkt_filter_enable = FALSE;
8321#endif
8322#ifdef SET_RANDOM_MAC_SOFTAP
8323 SRANDOM32((uint)jiffies);
8324 rand_mac = RANDOM32();
8325 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
8326 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
8327 iovbuf[2] = (unsigned char)vendor_oui;
8328 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
8329 iovbuf[4] = (unsigned char)(rand_mac >> 8);
8330 iovbuf[5] = (unsigned char)(rand_mac >> 16);
8331
8332 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
8333 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8334 if (ret < 0) {
8335 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
8336 } else
8337 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
8338#endif /* SET_RANDOM_MAC_SOFTAP */
8339#if !defined(AP) && defined(WL_CFG80211)
8340 /* Turn off MPC in AP mode */
8341 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8342 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8343 sizeof(iovbuf), TRUE, 0)) < 0) {
8344 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
8345 }
8346#endif
8347#ifdef USE_DYNAMIC_F2_BLKSIZE
8348 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8349#endif /* USE_DYNAMIC_F2_BLKSIZE */
8350#ifdef SUPPORT_AP_POWERSAVE
8351 dhd_set_ap_powersave(dhd, 0, TRUE);
8352#endif /* SUPPORT_AP_POWERSAVE */
8353#ifdef SOFTAP_UAPSD_OFF
8354 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
8355 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8356 sizeof(iovbuf), TRUE, 0)) < 0) {
8357 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
8358 __FUNCTION__, ret));
8359 }
8360#endif /* SOFTAP_UAPSD_OFF */
8361 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8362 (op_mode == DHD_FLAG_MFG_MODE)) {
8363#if defined(ARP_OFFLOAD_SUPPORT)
8364 arpoe = 0;
8365#endif /* ARP_OFFLOAD_SUPPORT */
8366#ifdef PKT_FILTER_SUPPORT
8367 dhd_pkt_filter_enable = FALSE;
8368#endif /* PKT_FILTER_SUPPORT */
8369 dhd->op_mode = DHD_FLAG_MFG_MODE;
8370#ifdef USE_DYNAMIC_F2_BLKSIZE
8371 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8372#endif /* USE_DYNAMIC_F2_BLKSIZE */
8373 if (FW_SUPPORTED(dhd, rsdb)) {
8374 rsdb_mode = 0;
8375 bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
8376 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8377 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8378 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
8379 __FUNCTION__, ret));
8380 }
8381 }
8382 } else {
8383 uint32 concurrent_mode = 0;
8384 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
8385 (op_mode == DHD_FLAG_P2P_MODE)) {
8386#if defined(ARP_OFFLOAD_SUPPORT)
8387 arpoe = 0;
8388#endif
8389#ifdef PKT_FILTER_SUPPORT
8390 dhd_pkt_filter_enable = FALSE;
8391#endif
8392 dhd->op_mode = DHD_FLAG_P2P_MODE;
8393 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
8394 (op_mode == DHD_FLAG_IBSS_MODE)) {
8395 dhd->op_mode = DHD_FLAG_IBSS_MODE;
8396 } else
8397 dhd->op_mode = DHD_FLAG_STA_MODE;
8398#if !defined(AP) && defined(WLP2P)
8399 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
8400 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
8401#if defined(ARP_OFFLOAD_SUPPORT)
8402 arpoe = 1;
8403#endif
8404 dhd->op_mode |= concurrent_mode;
8405 }
8406
8407 /* Check if we are enabling p2p */
8408 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8409 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8410 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8411 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8412 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
8413 }
8414
8415#if defined(SOFTAP_AND_GC)
8416 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
8417 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
8418 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
8419 }
8420#endif
8421 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
8422 ETHER_SET_LOCALADDR(&p2p_ea);
8423 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
8424 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
8425 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8426 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8427 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
8428 } else {
8429 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
8430 }
8431 }
8432#else
8433 (void)concurrent_mode;
8434#endif
8435 }
8436
8437#ifdef RSDB_MODE_FROM_FILE
8438 (void)dhd_rsdb_mode_from_file(dhd);
8439#endif /* RSDB_MODE_FROM_FILE */
8440
8441#ifdef DISABLE_PRUNED_SCAN
8442 if (FW_SUPPORTED(dhd, rsdb)) {
8443 memset(iovbuf, 0, sizeof(iovbuf));
8444 bcm_mkiovar("scan_features", (char *)&scan_features,
8445 4, iovbuf, sizeof(iovbuf));
8446 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
8447 iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8448 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
8449 __FUNCTION__, ret));
8450 } else {
8451 memcpy(&scan_features, iovbuf, 4);
8452 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
8453 memset(iovbuf, 0, sizeof(iovbuf));
8454 bcm_mkiovar("scan_features", (char *)&scan_features,
8455 4, iovbuf, sizeof(iovbuf));
8456 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8457 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8458 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
8459 __FUNCTION__, ret));
8460 }
8461 }
8462 }
8463#endif /* DISABLE_PRUNED_SCAN */
8464
8465 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
8466 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
8467 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
8468 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
8469 dhd->info->rxthread_enabled = FALSE;
8470 else
8471 dhd->info->rxthread_enabled = TRUE;
8472 #endif
8473 /* Set Country code */
8474 if (dhd->dhd_cspec.ccode[0] != 0) {
8475 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
8476 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
8477 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
8478 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8479 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
8480 } else {
8481 dhd_conf_set_country(dhd);
8482 dhd_conf_fix_country(dhd);
8483 }
8484 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
8485
8486
8487 /* Set Listen Interval */
8488 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
8489 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8490 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
8491
8492#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
8493#ifdef USE_WFA_CERT_CONF
8494 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
8495 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
8496 }
8497#endif /* USE_WFA_CERT_CONF */
8498 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
8499 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
8500 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8501#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
8502#if defined(ROAM_ENABLE)
8503 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
8504 sizeof(roam_trigger), TRUE, 0)) < 0)
8505 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
8506 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
8507 sizeof(roam_scan_period), TRUE, 0)) < 0)
8508 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
8509 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
8510 sizeof(roam_delta), TRUE, 0)) < 0)
8511 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
8512 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
8513 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8514 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
8515#endif /* ROAM_ENABLE */
8516 dhd_conf_set_roam(dhd);
8517
8518#ifdef CUSTOM_EVENT_PM_WAKE
8519 bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
8520 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8521 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
8522 }
8523#endif /* CUSTOM_EVENT_PM_WAKE */
8524#ifdef WLTDLS
8525#ifdef ENABLE_TDLS_AUTO_MODE
8526 /* by default TDLS on and auto mode on */
8527 _dhd_tdls_enable(dhd, true, true, NULL);
8528#else
8529 /* by default TDLS on and auto mode off */
8530 _dhd_tdls_enable(dhd, true, false, NULL);
8531#endif /* ENABLE_TDLS_AUTO_MODE */
8532#endif /* WLTDLS */
8533
8534#ifdef DHD_ENABLE_LPC
8535 /* Set lpc 1 */
8536 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8537 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8538 sizeof(iovbuf), TRUE, 0)) < 0) {
8539 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
8540
8541 if (ret == BCME_NOTDOWN) {
8542 uint wl_down = 1;
8543 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
8544 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8545 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
8546
8547 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8548 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8549 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
8550 }
8551 }
8552#endif /* DHD_ENABLE_LPC */
8553 dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
8554
8555 /* Set PowerSave mode */
8556 if (dhd->conf->pm >= 0)
8557 power_mode = dhd->conf->pm;
8558 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
8559 dhd_conf_set_fw_string_cmd(dhd, "pm2_sleep_ret", dhd->conf->pm2_sleep_ret, 0, FALSE);
8560
8561#if defined(BCMSDIO)
8562 /* Match Host and Dongle rx alignment */
8563 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
8564 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8565
8566#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8567 /* enable credall to reduce the chance of no bus credit happened. */
8568 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
8569 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8570#endif
8571
8572#ifdef USE_WFA_CERT_CONF
8573 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
8574 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
8575 }
8576#endif /* USE_WFA_CERT_CONF */
8577 if (glom != DEFAULT_GLOM_VALUE) {
8578 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
8579 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
8580 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8581 }
8582#endif /* defined(BCMSDIO) */
8583
8584 /* Setup timeout if Beacons are lost and roam is off to report link down */
8585 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
8586 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8587 /* Setup assoc_retry_max count to reconnect target AP in dongle */
8588 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
8589 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8590#if defined(AP) && !defined(WLP2P)
8591 /* Turn off MPC in AP mode */
8592 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8593 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8594 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8595 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8596#endif /* defined(AP) && !defined(WLP2P) */
8597 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
8598 dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
8599 dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
8600 dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
8601 dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
8602 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
8603 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
8604 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
8605 dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
8606 dhd_conf_set_fw_string_cmd(dhd, "vhtmode", dhd->conf->vhtmode, 0, TRUE);
8607 dhd_conf_set_bw_cap(dhd);
8608
8609#ifdef MIMO_ANT_SETTING
8610 dhd_sel_ant_from_file(dhd);
8611#endif /* MIMO_ANT_SETTING */
8612
8613#if defined(SOFTAP)
8614 if (ap_fw_loaded == TRUE) {
8615 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
8616 }
8617#endif
8618
8619#if defined(KEEP_ALIVE)
8620 {
8621 /* Set Keep Alive : be sure to use FW with -keepalive */
8622 int res;
8623
8624#if defined(SOFTAP)
8625 if (ap_fw_loaded == FALSE)
8626#endif
8627 if (!(dhd->op_mode &
8628 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
8629 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
8630 DHD_ERROR(("%s set keeplive failed %d\n",
8631 __FUNCTION__, res));
8632 }
8633 }
8634#endif /* defined(KEEP_ALIVE) */
8635
8636#ifdef USE_WL_TXBF
8637 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
8638 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8639 sizeof(iovbuf), TRUE, 0)) < 0) {
8640 DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
8641 }
8642#endif /* USE_WL_TXBF */
8643 dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
8644
8645#ifdef USE_WFA_CERT_CONF
8646#ifdef USE_WL_FRAMEBURST
8647 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
8648 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
8649 }
8650#endif /* USE_WL_FRAMEBURST */
8651#ifdef DISABLE_FRAMEBURST_VSDB
8652 g_frameburst = frameburst;
8653#endif /* DISABLE_FRAMEBURST_VSDB */
8654#endif /* USE_WFA_CERT_CONF */
8655#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
8656 /* Disable Framebursting for SofAP */
8657 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
8658 frameburst = 0;
8659 }
8660#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
8661 /* Set frameburst to value */
8662 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
8663 sizeof(frameburst), TRUE, 0)) < 0) {
8664 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
8665 }
8666 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_FAKEFRAG", WLC_SET_FAKEFRAG, dhd->conf->frameburst, 0, FALSE);
8667#if defined(CUSTOM_AMPDU_BA_WSIZE)
8668 /* Set ampdu ba wsize to 64 or 16 */
8669#ifdef CUSTOM_AMPDU_BA_WSIZE
8670 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
8671#endif
8672 if (ampdu_ba_wsize != 0) {
8673 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
8674 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8675 sizeof(iovbuf), TRUE, 0)) < 0) {
8676 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
8677 __FUNCTION__, ampdu_ba_wsize, ret));
8678 }
8679 }
8680#endif
8681 dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
8682
8683 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
8684 if (iov_buf == NULL) {
8685 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
8686 ret = BCME_NOMEM;
8687 goto done;
8688 }
8689#ifdef ENABLE_TEMP_THROTTLING
8690 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
8691 memset(&temp_control, 0, sizeof(temp_control));
8692 temp_control.enable = 1;
8693 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
8694 bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
8695 sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
8696 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
8697 if (ret < 0) {
8698 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
8699 __FUNCTION__, ret));
8700 }
8701 }
8702#endif /* ENABLE_TEMP_THROTTLING */
8703#if defined(CUSTOM_AMPDU_MPDU)
8704 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
8705 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
8706 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
8707 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8708 sizeof(iovbuf), TRUE, 0)) < 0) {
8709 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
8710 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
8711 }
8712 }
8713#endif /* CUSTOM_AMPDU_MPDU */
8714
8715#if defined(CUSTOM_AMPDU_RELEASE)
8716 ampdu_release = CUSTOM_AMPDU_RELEASE;
8717 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
8718 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
8719 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8720 sizeof(iovbuf), TRUE, 0)) < 0) {
8721 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
8722 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
8723 }
8724 }
8725#endif /* CUSTOM_AMPDU_RELEASE */
8726
8727#if defined(CUSTOM_AMSDU_AGGSF)
8728 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
8729 if (amsdu_aggsf != 0) {
8730 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
8731 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8732 if (ret < 0) {
8733 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
8734 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
8735 }
8736 }
8737#endif /* CUSTOM_AMSDU_AGGSF */
8738
8739#ifdef CUSTOM_PSPRETEND_THR
8740 /* Turn off MPC in AP mode */
8741 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
8742 iovbuf, sizeof(iovbuf));
8743 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8744 sizeof(iovbuf), TRUE, 0)) < 0) {
8745 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
8746 __FUNCTION__, ret));
8747 }
8748#endif
c6ef1a93
RC
8749/* Tune txpkt glom*/
8750 maxtxpktglom = 32;
8751 bcm_mkiovar("bus:maxtxpktglom", (char *)&maxtxpktglom, 4,
8752 iovbuf, sizeof(iovbuf));
8753 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8754 sizeof(iovbuf), TRUE, 0)) < 0) {
8755 DHD_ERROR(("failed to set maxtxpktglom (%d)\n", ret));
8756 }
8757/* End of Tune txpkt glom*/
ef6a5fee
RC
8758 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
8759 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8760 sizeof(iovbuf), TRUE, 0)) < 0) {
8761 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
8762 }
8763
8764 /* Read event_msgs mask */
8765 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8766 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8767 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
8768 goto done;
8769 }
8770 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
8771
8772 /* Setup event_msgs */
8773 setbit(eventmask, WLC_E_SET_SSID);
8774 setbit(eventmask, WLC_E_PRUNE);
8775 setbit(eventmask, WLC_E_AUTH);
8776 setbit(eventmask, WLC_E_AUTH_IND);
8777 setbit(eventmask, WLC_E_ASSOC);
8778 setbit(eventmask, WLC_E_REASSOC);
8779 setbit(eventmask, WLC_E_REASSOC_IND);
8780 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
8781 setbit(eventmask, WLC_E_DEAUTH);
8782 setbit(eventmask, WLC_E_DEAUTH_IND);
8783 setbit(eventmask, WLC_E_DISASSOC_IND);
8784 setbit(eventmask, WLC_E_DISASSOC);
8785 setbit(eventmask, WLC_E_JOIN);
8786 setbit(eventmask, WLC_E_START);
8787 setbit(eventmask, WLC_E_ASSOC_IND);
8788 setbit(eventmask, WLC_E_PSK_SUP);
8789 setbit(eventmask, WLC_E_LINK);
8790 setbit(eventmask, WLC_E_MIC_ERROR);
8791 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
8792 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
8793#ifndef WL_CFG80211
8794 setbit(eventmask, WLC_E_PMKID_CACHE);
8795 setbit(eventmask, WLC_E_TXFAIL);
8796#endif
8797 setbit(eventmask, WLC_E_JOIN_START);
8798// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
8799#ifdef DHD_DEBUG
8800 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
8801#endif
8802#ifdef WLMEDIA_HTSF
8803 setbit(eventmask, WLC_E_HTSFSYNC);
8804#endif /* WLMEDIA_HTSF */
8805#ifdef PNO_SUPPORT
8806 setbit(eventmask, WLC_E_PFN_NET_FOUND);
8807 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
8808 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
8809 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
8810#endif /* PNO_SUPPORT */
8811 /* enable dongle roaming event */
8812 setbit(eventmask, WLC_E_ROAM);
8813 setbit(eventmask, WLC_E_BSSID);
8814#ifdef WLTDLS
8815 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
8816#endif /* WLTDLS */
8817#ifdef WL_CFG80211
8818 setbit(eventmask, WLC_E_ESCAN_RESULT);
8819 setbit(eventmask, WLC_E_AP_STARTED);
8820 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8821 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
8822 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
8823 }
8824#endif /* WL_CFG80211 */
8825
8826#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
8827 if (dhd_logtrace_from_file(dhd)) {
8828 setbit(eventmask, WLC_E_TRACE);
8829 } else {
8830 clrbit(eventmask, WLC_E_TRACE);
8831 }
8832#elif defined(SHOW_LOGTRACE)
8833 setbit(eventmask, WLC_E_TRACE);
8834#else
8835 clrbit(eventmask, WLC_E_TRACE);
8836#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
8837
8838 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
8839#ifdef DHD_LOSSLESS_ROAMING
8840 setbit(eventmask, WLC_E_ROAM_PREP);
8841#endif
8842#ifdef CUSTOM_EVENT_PM_WAKE
8843 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
8844#endif /* CUSTOM_EVENT_PM_WAKE */
8845#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
8846 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
8847#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
8848
8849 /* Write updated Event mask */
8850 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8851 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8852 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
8853 goto done;
8854 }
8855
8856 /* make up event mask ext message iovar for event larger than 128 */
8857 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
8858 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
8859 if (eventmask_msg == NULL) {
8860 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
8861 ret = BCME_NOMEM;
8862 goto done;
8863 }
8864 bzero(eventmask_msg, msglen);
8865 eventmask_msg->ver = EVENTMSGS_VER;
8866 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8867
8868 /* Read event_msgs_ext mask */
8869 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
8870 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
8871 if (ret2 == 0) { /* event_msgs_ext must be supported */
8872 bcopy(iov_buf, eventmask_msg, msglen);
8873#ifdef GSCAN_SUPPORT
8874 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
8875 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
8876 setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
8877#endif /* GSCAN_SUPPORT */
8878#ifdef BT_WIFI_HANDOVER
8879 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
8880#endif /* BT_WIFI_HANDOVER */
8881
8882 /* Write updated Event mask */
8883 eventmask_msg->ver = EVENTMSGS_VER;
8884 eventmask_msg->command = EVENTMSGS_SET_MASK;
8885 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8886 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
8887 msglen, iov_buf, WLC_IOCTL_SMLEN);
8888 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8889 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
8890 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
8891 goto done;
8892 }
8893 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
8894 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
8895 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
8896 __FUNCTION__, ret2));
8897 } else {
8898 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
8899 ret = ret2;
8900 goto done;
8901 }
8902
8903 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
8904 sizeof(scan_assoc_time), TRUE, 0);
8905 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
8906 sizeof(scan_unassoc_time), TRUE, 0);
8907 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
8908 sizeof(scan_passive_time), TRUE, 0);
8909
8910#ifdef ARP_OFFLOAD_SUPPORT
8911 /* Set and enable ARP offload feature for STA only */
8912#if defined(SOFTAP)
8913 if (arpoe && !ap_fw_loaded)
8914#else
8915 if (arpoe)
8916#endif
8917 {
8918 dhd_arp_offload_enable(dhd, TRUE);
8919 dhd_arp_offload_set(dhd, dhd_arp_mode);
8920 } else {
8921 dhd_arp_offload_enable(dhd, FALSE);
8922 dhd_arp_offload_set(dhd, 0);
8923 }
8924 dhd_arp_enable = arpoe;
8925#endif /* ARP_OFFLOAD_SUPPORT */
8926
8927#ifdef PKT_FILTER_SUPPORT
8928 /* Setup default defintions for pktfilter , enable in suspend */
8929 if (dhd_master_mode) {
8930 dhd->pktfilter_count = 6;
8931 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
8932 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
8933 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8934 /* apply APP pktfilter */
8935 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
8936
8937 /* Setup filter to allow only unicast */
8938 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
8939
8940 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
8941 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
8942
8943#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
8944 dhd->pktfilter_count = 4;
8945 /* Setup filter to block broadcast and NAT Keepalive packets */
8946 /* discard all broadcast packets */
8947 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
8948 /* discard NAT Keepalive packets */
8949 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
8950 /* discard NAT Keepalive packets */
8951 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
8952 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
8953#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
8954 } else
8955 dhd_conf_discard_pkt_filter(dhd);
8956 dhd_conf_add_pkt_filter(dhd);
8957
8958#if defined(SOFTAP)
8959 if (ap_fw_loaded) {
8960 dhd_enable_packet_filter(0, dhd);
8961 }
8962#endif /* defined(SOFTAP) */
8963 dhd_set_packet_filter(dhd);
8964#endif /* PKT_FILTER_SUPPORT */
8965#ifdef DISABLE_11N
8966 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
8967 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8968 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
8969#endif /* DISABLE_11N */
8970
8971#ifdef ENABLE_BCN_LI_BCN_WAKEUP
8972 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
8973 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8974#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8975 /* query for 'ver' to get version info from firmware */
8976 memset(buf, 0, sizeof(buf));
8977 ptr = buf;
8978 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
8979 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
8980 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
8981 else {
8982 bcmstrtok(&ptr, "\n", 0);
8983 /* Print fw version info */
8984 DHD_ERROR(("Firmware version = %s\n", buf));
8985 strncpy(fw_version, buf, FW_VER_STR_LEN);
8986 dhd_set_version_info(dhd, buf);
8987#ifdef WRITE_WLANINFO
8988 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
8989#endif /* WRITE_WLANINFO */
8990 }
8991
8992#if defined(BCMSDIO)
8993 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
8994 // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
8995 dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 0, FALSE);
8996#endif /* defined(BCMSDIO) */
8997
8998 dhd_conf_set_disable_proptx(dhd);
8999#if defined(BCMSDIO)
9000#ifdef PROP_TXSTATUS
9001 if (disable_proptx ||
9002#ifdef PROP_TXSTATUS_VSDB
9003 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
9004 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
9005 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
9006#endif /* PROP_TXSTATUS_VSDB */
9007 FALSE) {
9008 wlfc_enable = FALSE;
9009 }
9010
9011#ifdef USE_WFA_CERT_CONF
9012 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
9013 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
9014 wlfc_enable = proptx;
9015 }
9016#endif /* USE_WFA_CERT_CONF */
9017
9018#ifndef DISABLE_11N
9019 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
9020 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
9021 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
9022 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
9023 if (ret2 != BCME_UNSUPPORTED)
9024 ret = ret2;
9025
9026 if (ret == BCME_NOTDOWN) {
9027 uint wl_down = 1;
9028 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
9029 sizeof(wl_down), TRUE, 0);
9030 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
9031 __FUNCTION__, ret2, hostreorder));
9032
9033 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
9034 iovbuf, sizeof(iovbuf));
9035 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9036 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
9037 if (ret2 != BCME_UNSUPPORTED)
9038 ret = ret2;
9039 }
9040 if (ret2 != BCME_OK)
9041 hostreorder = 0;
9042 }
9043#endif /* DISABLE_11N */
9044
9045
9046 if (wlfc_enable)
9047 dhd_wlfc_init(dhd);
9048#ifndef DISABLE_11N
9049 else if (hostreorder)
9050 dhd_wlfc_hostreorder_init(dhd);
9051#endif /* DISABLE_11N */
9052
9053#endif /* PROP_TXSTATUS */
9054#endif /* BCMSDIO || BCMBUS */
9055#ifdef PCIE_FULL_DONGLE
9056 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
9057 if (FW_SUPPORTED(dhd, ap)) {
9058 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
9059 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
9060 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9061 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9062 }
9063#endif /* PCIE_FULL_DONGLE */
9064#ifdef PNO_SUPPORT
9065 if (!dhd->pno_state) {
9066 dhd_pno_init(dhd);
9067 }
9068#endif
9069#ifdef WL11U
9070 dhd_interworking_enable(dhd);
9071#endif /* WL11U */
9072#ifndef WL_CFG80211
9073 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
9074#endif
9075
9076#ifdef SUPPORT_SENSORHUB
9077 bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
9078 if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
9079 FALSE, 0)) < 0) {
9080 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
9081 __FUNCTION__, ret));
9082 dhd->info->shub_enable = 0;
9083 } else {
9084 memcpy(&shub_enable, iovbuf, sizeof(uint32));
9085 dhd->info->shub_enable = shub_enable;
9086 DHD_ERROR(("%s: checking sensorhub enable %d\n",
9087 __FUNCTION__, dhd->info->shub_enable));
9088 }
9089#endif /* SUPPORT_SENSORHUB */
9090done:
9091
9092 if (eventmask_msg)
9093 kfree(eventmask_msg);
9094 if (iov_buf)
9095 kfree(iov_buf);
9096
9097 return ret;
9098}
9099
9100
9101int
9102dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
9103{
9104 char buf[strlen(name) + 1 + cmd_len];
9105 int len = sizeof(buf);
9106 wl_ioctl_t ioc;
9107 int ret;
9108
9109 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
9110
9111 memset(&ioc, 0, sizeof(ioc));
9112
9113 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
9114 ioc.buf = buf;
9115 ioc.len = len;
9116 ioc.set = set;
9117
9118 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
9119 if (!set && ret >= 0)
9120 memcpy(cmd_buf, buf, cmd_len);
9121
9122 return ret;
9123}
9124
9125int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
9126{
9127 struct dhd_info *dhd = dhdp->info;
9128 struct net_device *dev = NULL;
9129
9130 ASSERT(dhd && dhd->iflist[ifidx]);
9131 dev = dhd->iflist[ifidx]->net;
9132 ASSERT(dev);
9133
9134 if (netif_running(dev)) {
9135 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
9136 return BCME_NOTDOWN;
9137 }
9138
9139#define DHD_MIN_MTU 1500
9140#define DHD_MAX_MTU 1752
9141
9142 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
9143 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
9144 return BCME_BADARG;
9145 }
9146
9147 dev->mtu = new_mtu;
9148 return 0;
9149}
9150
9151#ifdef ARP_OFFLOAD_SUPPORT
9152/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
9153void
9154aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
9155{
9156 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
9157 int i;
9158 int ret;
9159
9160 bzero(ipv4_buf, sizeof(ipv4_buf));
9161
9162 /* display what we've got */
9163 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9164 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
9165#ifdef AOE_DBG
9166 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9167#endif
9168 /* now we saved hoste_ip table, clr it in the dongle AOE */
9169 dhd_aoe_hostip_clr(dhd_pub, idx);
9170
9171 if (ret) {
9172 DHD_ERROR(("%s failed\n", __FUNCTION__));
9173 return;
9174 }
9175
9176 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
9177 if (add && (ipv4_buf[i] == 0)) {
9178 ipv4_buf[i] = ipa;
9179 add = FALSE; /* added ipa to local table */
9180 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
9181 __FUNCTION__, i));
9182 } else if (ipv4_buf[i] == ipa) {
9183 ipv4_buf[i] = 0;
9184 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
9185 __FUNCTION__, ipa, i));
9186 }
9187
9188 if (ipv4_buf[i] != 0) {
9189 /* add back host_ip entries from our local cache */
9190 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
9191 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
9192 __FUNCTION__, ipv4_buf[i], i));
9193 }
9194 }
9195#ifdef AOE_DBG
9196 /* see the resulting hostip table */
9197 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9198 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
9199 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9200#endif
9201}
9202
9203/*
9204 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
9205 * whenever there is an event related to an IP address.
9206 * ptr : kernel provided pointer to IP address that has changed
9207 */
9208static int dhd_inetaddr_notifier_call(struct notifier_block *this,
9209 unsigned long event,
9210 void *ptr)
9211{
9212 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
9213
9214 dhd_info_t *dhd;
9215 dhd_pub_t *dhd_pub;
9216 int idx;
9217
9218 if (!dhd_arp_enable)
9219 return NOTIFY_DONE;
9220 if (!ifa || !(ifa->ifa_dev->dev))
9221 return NOTIFY_DONE;
9222
9223#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9224 /* Filter notifications meant for non Broadcom devices */
9225 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
9226 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
9227#if defined(WL_ENABLE_P2P_IF)
9228 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
9229#endif /* WL_ENABLE_P2P_IF */
9230 return NOTIFY_DONE;
9231 }
9232#endif /* LINUX_VERSION_CODE */
9233
9234 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
9235 if (!dhd)
9236 return NOTIFY_DONE;
9237
9238 dhd_pub = &dhd->pub;
9239
9240 if (dhd_pub->arp_version == 1) {
9241 idx = 0;
9242 } else {
9243 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
9244 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
9245 break;
9246 }
9247 if (idx < DHD_MAX_IFS) {
9248 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
9249 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
9250 } else {
9251 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
9252 idx = 0;
9253 }
9254 }
9255
9256 switch (event) {
9257 case NETDEV_UP:
9258 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
9259 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9260
9261 if (dhd->pub.busstate != DHD_BUS_DATA) {
9262 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
9263 if (dhd->pend_ipaddr) {
9264 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
9265 __FUNCTION__, dhd->pend_ipaddr));
9266 }
9267 dhd->pend_ipaddr = ifa->ifa_address;
9268 break;
9269 }
9270
9271#ifdef AOE_IP_ALIAS_SUPPORT
9272 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
9273 __FUNCTION__));
9274 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
9275#endif /* AOE_IP_ALIAS_SUPPORT */
9276 break;
9277
9278 case NETDEV_DOWN:
9279 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
9280 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9281 dhd->pend_ipaddr = 0;
9282#ifdef AOE_IP_ALIAS_SUPPORT
9283 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
9284 __FUNCTION__));
9285 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
9286#else
9287 dhd_aoe_hostip_clr(&dhd->pub, idx);
9288 dhd_aoe_arp_clr(&dhd->pub, idx);
9289#endif /* AOE_IP_ALIAS_SUPPORT */
9290 break;
9291
9292 default:
9293 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
9294 __func__, ifa->ifa_label, event));
9295 break;
9296 }
9297 return NOTIFY_DONE;
9298}
9299#endif /* ARP_OFFLOAD_SUPPORT */
9300
9301#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9302/* Neighbor Discovery Offload: defered handler */
9303static void
9304dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
9305{
9306 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
9307 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
9308 int ret;
9309
9310 if (event != DHD_WQ_WORK_IPV6_NDO) {
9311 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9312 return;
9313 }
9314
9315 if (!ndo_work) {
9316 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
9317 return;
9318 }
9319
9320 if (!pub) {
9321 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
9322 return;
9323 }
9324
9325 if (ndo_work->if_idx) {
9326 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
9327 return;
9328 }
9329
9330 switch (ndo_work->event) {
9331 case NETDEV_UP:
9332 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
9333 ret = dhd_ndo_enable(pub, TRUE);
9334 if (ret < 0) {
9335 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
9336 }
9337
9338 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
9339 if (ret < 0) {
9340 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
9341 __FUNCTION__, ret));
9342 }
9343 break;
9344 case NETDEV_DOWN:
9345 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
9346 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
9347 if (ret < 0) {
9348 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
9349 __FUNCTION__, ret));
9350 goto done;
9351 }
9352
9353 ret = dhd_ndo_enable(pub, FALSE);
9354 if (ret < 0) {
9355 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
9356 goto done;
9357 }
9358 break;
9359 default:
9360 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
9361 break;
9362 }
9363done:
9364 /* free ndo_work. alloced while scheduling the work */
9365 kfree(ndo_work);
9366
9367 return;
9368}
9369
9370/*
9371 * Neighbor Discovery Offload: Called when an interface
9372 * is assigned with ipv6 address.
9373 * Handles only primary interface
9374 */
9375static int dhd_inet6addr_notifier_call(struct notifier_block *this,
9376 unsigned long event,
9377 void *ptr)
9378{
9379 dhd_info_t *dhd;
9380 dhd_pub_t *dhd_pub;
9381 struct inet6_ifaddr *inet6_ifa = ptr;
9382 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
9383 struct ipv6_work_info_t *ndo_info;
9384 int idx = 0; /* REVISIT */
9385
9386#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9387 /* Filter notifications meant for non Broadcom devices */
9388 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
9389 return NOTIFY_DONE;
9390 }
9391#endif /* LINUX_VERSION_CODE */
9392
9393 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
9394 if (!dhd)
9395 return NOTIFY_DONE;
9396
9397 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
9398 return NOTIFY_DONE;
9399 dhd_pub = &dhd->pub;
9400
9401 if (!FW_SUPPORTED(dhd_pub, ndoe))
9402 return NOTIFY_DONE;
9403
9404 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
9405 if (!ndo_info) {
9406 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
9407 return NOTIFY_DONE;
9408 }
9409
9410 ndo_info->event = event;
9411 ndo_info->if_idx = idx;
9412 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
9413
9414 /* defer the work to thread as it may block kernel */
9415 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
9416 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
9417 return NOTIFY_DONE;
9418}
9419#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9420
9421int
9422dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
9423{
9424 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9425 dhd_if_t *ifp;
9426 struct net_device *net = NULL;
9427 int err = 0;
9428 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
9429
9430 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9431
9432 ASSERT(dhd && dhd->iflist[ifidx]);
9433 ifp = dhd->iflist[ifidx];
9434 net = ifp->net;
9435 ASSERT(net && (ifp->idx == ifidx));
9436
9437#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9438 ASSERT(!net->open);
9439 net->get_stats = dhd_get_stats;
9440 net->do_ioctl = dhd_ioctl_entry;
9441 net->hard_start_xmit = dhd_start_xmit;
9442 net->set_mac_address = dhd_set_mac_address;
9443 net->set_multicast_list = dhd_set_multicast_list;
9444 net->open = net->stop = NULL;
9445#else
9446 ASSERT(!net->netdev_ops);
9447 net->netdev_ops = &dhd_ops_virt;
9448#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9449
9450 /* Ok, link into the network layer... */
9451 if (ifidx == 0) {
9452 /*
9453 * device functions for the primary interface only
9454 */
9455#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9456 net->open = dhd_open;
9457 net->stop = dhd_stop;
9458#else
9459 net->netdev_ops = &dhd_ops_pri;
9460#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9461 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
9462 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9463 } else {
9464 /*
9465 * We have to use the primary MAC for virtual interfaces
9466 */
9467 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
9468 /*
9469 * Android sets the locally administered bit to indicate that this is a
9470 * portable hotspot. This will not work in simultaneous AP/STA mode,
9471 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
9472 */
9473 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
9474 ETHER_ADDR_LEN)) {
9475 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
9476 __func__, net->name));
9477 temp_addr[0] |= 0x02;
9478 }
9479 }
9480
9481 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
9482#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
9483 net->ethtool_ops = &dhd_ethtool_ops;
9484#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
9485
9486#if defined(WL_WIRELESS_EXT)
9487#if WIRELESS_EXT < 19
9488 net->get_wireless_stats = dhd_get_wireless_stats;
9489#endif /* WIRELESS_EXT < 19 */
9490#if WIRELESS_EXT > 12
9491 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
9492#endif /* WIRELESS_EXT > 12 */
9493#endif /* defined(WL_WIRELESS_EXT) */
9494
9495 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9496
9497 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
9498
9499 if (ifidx == 0)
9500 printf("%s\n", dhd_version);
9501
9502 if (need_rtnl_lock)
9503 err = register_netdev(net);
9504 else
9505 err = register_netdevice(net);
9506
9507 if (err != 0) {
9508 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
9509 goto fail;
9510 }
9511
9512
9513
9514 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
9515#if defined(CUSTOMER_HW4_DEBUG)
9516 MAC2STRDBG(dhd->pub.mac.octet));
9517#else
9518 MAC2STRDBG(net->dev_addr));
9519#endif /* CUSTOMER_HW4_DEBUG */
9520
9521#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
9522// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
9523#endif
9524
9525#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
9526 KERNEL_VERSION(2, 6, 27))))
9527 if (ifidx == 0) {
9528#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
9529 up(&dhd_registration_sem);
9530#endif /* BCMLXSDMMC */
9531 if (!dhd_download_fw_on_driverload) {
9532#ifdef WL_CFG80211
9533 wl_terminate_event_handler();
9534#endif /* WL_CFG80211 */
9535#if defined(DHD_LB) && defined(DHD_LB_RXP)
9536 __skb_queue_purge(&dhd->rx_pend_queue);
9537#endif /* DHD_LB && DHD_LB_RXP */
9538#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
9539 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
9540#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
9541 dhd_net_bus_devreset(net, TRUE);
9542#ifdef BCMLXSDMMC
9543 dhd_net_bus_suspend(net);
9544#endif /* BCMLXSDMMC */
9545 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
9546 }
9547 }
9548#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
9549 return 0;
9550
9551fail:
9552#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
9553 net->open = NULL;
9554#else
9555 net->netdev_ops = NULL;
9556#endif
9557 return err;
9558}
9559
9560void
9561dhd_bus_detach(dhd_pub_t *dhdp)
9562{
9563 dhd_info_t *dhd;
9564
9565 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9566
9567 if (dhdp) {
9568 dhd = (dhd_info_t *)dhdp->info;
9569 if (dhd) {
9570
9571 /*
9572 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
9573 * calling stop again will cuase SD read/write errors.
9574 */
9575 if (dhd->pub.busstate != DHD_BUS_DOWN) {
9576 /* Stop the protocol module */
9577 dhd_prot_stop(&dhd->pub);
9578
9579 /* Stop the bus module */
9580 dhd_bus_stop(dhd->pub.bus, TRUE);
9581 }
9582
9583#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
9584 dhd_bus_oob_intr_unregister(dhdp);
9585#endif
9586 }
9587 }
9588}
9589
9590
9591void dhd_detach(dhd_pub_t *dhdp)
9592{
9593 dhd_info_t *dhd;
9594 unsigned long flags;
9595 int timer_valid = FALSE;
9596 struct net_device *dev;
9597
9598 if (!dhdp)
9599 return;
9600
9601 dhd = (dhd_info_t *)dhdp->info;
9602 if (!dhd)
9603 return;
9604
9605 dev = dhd->iflist[0]->net;
9606
9607 if (dev) {
9608 rtnl_lock();
9609 if (dev->flags & IFF_UP) {
9610 /* If IFF_UP is still up, it indicates that
9611 * "ifconfig wlan0 down" hasn't been called.
9612 * So invoke dev_close explicitly here to
9613 * bring down the interface.
9614 */
9615 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
9616 dev_close(dev);
9617 }
9618 rtnl_unlock();
9619 }
9620
9621 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
9622
9623 dhd->pub.up = 0;
9624 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
9625 /* Give sufficient time for threads to start running in case
9626 * dhd_attach() has failed
9627 */
9628 OSL_SLEEP(100);
9629 }
9630
9631#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9632#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9633
9634#ifdef PROP_TXSTATUS
9635#ifdef DHD_WLFC_THREAD
9636 if (dhd->pub.wlfc_thread) {
9637 kthread_stop(dhd->pub.wlfc_thread);
9638 dhdp->wlfc_thread_go = TRUE;
9639 wake_up_interruptible(&dhdp->wlfc_wqhead);
9640 }
9641 dhd->pub.wlfc_thread = NULL;
9642#endif /* DHD_WLFC_THREAD */
9643#endif /* PROP_TXSTATUS */
9644
9645 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
9646
9647 dhd_bus_detach(dhdp);
9648#ifdef BCMPCIE
9649 if (is_reboot == SYS_RESTART) {
9650 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
9651 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
9652 dhdpcie_bus_clock_stop(dhdp->bus);
9653 wifi_platform_set_power(dhd_wifi_platdata->adapters,
9654 FALSE, WIFI_TURNOFF_DELAY);
9655 }
9656 }
9657#endif /* BCMPCIE */
9658#ifndef PCIE_FULL_DONGLE
9659 if (dhdp->prot)
9660 dhd_prot_detach(dhdp);
9661#endif
9662 }
9663
9664#ifdef ARP_OFFLOAD_SUPPORT
9665 if (dhd_inetaddr_notifier_registered) {
9666 dhd_inetaddr_notifier_registered = FALSE;
9667 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
9668 }
9669#endif /* ARP_OFFLOAD_SUPPORT */
9670#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9671 if (dhd_inet6addr_notifier_registered) {
9672 dhd_inet6addr_notifier_registered = FALSE;
9673 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
9674 }
9675#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9676#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9677 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
9678 if (dhd->early_suspend.suspend)
9679 unregister_early_suspend(&dhd->early_suspend);
9680 }
9681#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9682
9683#if defined(WL_WIRELESS_EXT)
9684 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
9685 /* Detatch and unlink in the iw */
9686 wl_iw_detach();
9687 }
9688#endif /* defined(WL_WIRELESS_EXT) */
9689
9690 /* delete all interfaces, start with virtual */
9691 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
9692 int i = 1;
9693 dhd_if_t *ifp;
9694
9695 /* Cleanup virtual interfaces */
9696 dhd_net_if_lock_local(dhd);
9697 for (i = 1; i < DHD_MAX_IFS; i++) {
9698 if (dhd->iflist[i])
9699 dhd_remove_if(&dhd->pub, i, TRUE);
9700 }
9701 dhd_net_if_unlock_local(dhd);
9702
9703 /* delete primary interface 0 */
9704 ifp = dhd->iflist[0];
9705 ASSERT(ifp);
9706 ASSERT(ifp->net);
9707 if (ifp && ifp->net) {
9708
9709
9710
9711 /* in unregister_netdev case, the interface gets freed by net->destructor
9712 * (which is set to free_netdev)
9713 */
9714 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9715 free_netdev(ifp->net);
9716 } else {
9717#ifdef SET_RPS_CPUS
9718 custom_rps_map_clear(ifp->net->_rx);
9719#endif /* SET_RPS_CPUS */
9720 netif_tx_disable(ifp->net);
9721 unregister_netdev(ifp->net);
9722 }
9723 ifp->net = NULL;
9724#ifdef DHD_WMF
9725 dhd_wmf_cleanup(dhdp, 0);
9726#endif /* DHD_WMF */
9727#ifdef DHD_L2_FILTER
9728 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
9729 NULL, FALSE, dhdp->tickcnt);
9730 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
9731 ifp->phnd_arp_table = NULL;
9732#endif /* DHD_L2_FILTER */
9733
9734 dhd_if_del_sta_list(ifp);
9735
9736 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
9737 dhd->iflist[0] = NULL;
9738 }
9739 }
9740
9741 /* Clear the watchdog timer */
9742 DHD_GENERAL_LOCK(&dhd->pub, flags);
9743 timer_valid = dhd->wd_timer_valid;
9744 dhd->wd_timer_valid = FALSE;
9745 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9746 if (timer_valid)
9747 del_timer_sync(&dhd->timer);
9748 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9749
9750 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
9751#ifdef DHD_PCIE_RUNTIMEPM
9752 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
9753 PROC_STOP(&dhd->thr_rpm_ctl);
9754 }
9755#endif /* DHD_PCIE_RUNTIMEPM */
9756 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
9757 PROC_STOP(&dhd->thr_wdt_ctl);
9758 }
9759
9760 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
9761 PROC_STOP(&dhd->thr_rxf_ctl);
9762 }
9763
9764 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
9765 PROC_STOP(&dhd->thr_dpc_ctl);
9766 } else {
9767 tasklet_kill(&dhd->tasklet);
9768#ifdef DHD_LB_RXP
9769 __skb_queue_purge(&dhd->rx_pend_queue);
9770#endif /* DHD_LB_RXP */
9771 }
9772 }
9773
9774#if defined(DHD_LB)
9775 /* Kill the Load Balancing Tasklets */
9776#if defined(DHD_LB_TXC)
9777 tasklet_disable(&dhd->tx_compl_tasklet);
9778 tasklet_kill(&dhd->tx_compl_tasklet);
9779#endif /* DHD_LB_TXC */
9780#if defined(DHD_LB_RXC)
9781 tasklet_disable(&dhd->rx_compl_tasklet);
9782 tasklet_kill(&dhd->rx_compl_tasklet);
9783#endif /* DHD_LB_RXC */
9784 if (dhd->cpu_notifier.notifier_call != NULL)
9785 unregister_cpu_notifier(&dhd->cpu_notifier);
9786 dhd_cpumasks_deinit(dhd);
9787#endif /* DHD_LB */
9788
9789#ifdef DHD_LOG_DUMP
9790 dhd_log_dump_deinit(&dhd->pub);
9791#endif /* DHD_LOG_DUMP */
9792#ifdef WL_CFG80211
9793 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
9794 wl_cfg80211_detach(NULL);
9795 dhd_monitor_uninit();
9796 }
9797#endif
9798 /* free deferred work queue */
9799 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
9800 dhd->dhd_deferred_wq = NULL;
9801
9802#ifdef SHOW_LOGTRACE
9803 if (dhd->event_data.fmts)
9804 kfree(dhd->event_data.fmts);
9805 if (dhd->event_data.raw_fmts)
9806 kfree(dhd->event_data.raw_fmts);
9807 if (dhd->event_data.raw_sstr)
9808 kfree(dhd->event_data.raw_sstr);
9809#endif /* SHOW_LOGTRACE */
9810
9811#ifdef PNO_SUPPORT
9812 if (dhdp->pno_state)
9813 dhd_pno_deinit(dhdp);
9814#endif
9815#if defined(CONFIG_PM_SLEEP)
9816 if (dhd_pm_notifier_registered) {
9817 unregister_pm_notifier(&dhd->pm_notifier);
9818 dhd_pm_notifier_registered = FALSE;
9819 }
9820#endif /* CONFIG_PM_SLEEP */
9821
9822#ifdef DEBUG_CPU_FREQ
9823 if (dhd->new_freq)
9824 free_percpu(dhd->new_freq);
9825 dhd->new_freq = NULL;
9826 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9827#endif
9828 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
9829 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
9830#ifdef CONFIG_HAS_WAKELOCK
9831 dhd->wakelock_wd_counter = 0;
9832 wake_lock_destroy(&dhd->wl_wdwake);
9833#endif /* CONFIG_HAS_WAKELOCK */
9834 DHD_OS_WAKE_LOCK_DESTROY(dhd);
9835 }
9836
9837
9838
9839#ifdef DHDTCPACK_SUPPRESS
9840 /* This will free all MEM allocated for TCPACK SUPPRESS */
9841 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9842#endif /* DHDTCPACK_SUPPRESS */
9843
9844#ifdef PCIE_FULL_DONGLE
9845 dhd_flow_rings_deinit(dhdp);
9846 if (dhdp->prot)
9847 dhd_prot_detach(dhdp);
9848#endif
9849
9850
9851 dhd_sysfs_exit(dhd);
9852 dhd->pub.is_fw_download_done = FALSE;
9853 dhd_conf_detach(dhdp);
9854}
9855
9856
9857void
9858dhd_free(dhd_pub_t *dhdp)
9859{
9860 dhd_info_t *dhd;
9861 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9862
9863 if (dhdp) {
9864 int i;
9865 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9866 if (dhdp->reorder_bufs[i]) {
9867 reorder_info_t *ptr;
9868 uint32 buf_size = sizeof(struct reorder_info);
9869
9870 ptr = dhdp->reorder_bufs[i];
9871
9872 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9873 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9874 i, ptr->max_idx, buf_size));
9875
9876 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9877 dhdp->reorder_bufs[i] = NULL;
9878 }
9879 }
9880
9881 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
9882
9883 dhd = (dhd_info_t *)dhdp->info;
9884 if (dhdp->soc_ram) {
9885#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9886 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9887#else
9888 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9889#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9890 dhdp->soc_ram = NULL;
9891 }
9892#ifdef CACHE_FW_IMAGES
9893 if (dhdp->cached_fw) {
9894 MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
9895 dhdp->cached_fw = NULL;
9896 }
9897
9898 if (dhdp->cached_nvram) {
9899 MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
9900 dhdp->cached_nvram = NULL;
9901 }
9902#endif
9903 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
9904 if (dhd &&
9905 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
9906 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
9907 dhd = NULL;
9908 }
9909}
9910
9911void
9912dhd_clear(dhd_pub_t *dhdp)
9913{
9914 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9915
9916 if (dhdp) {
9917 int i;
9918#ifdef DHDTCPACK_SUPPRESS
9919 /* Clean up timer/data structure for any remaining/pending packet or timer. */
9920 dhd_tcpack_info_tbl_clean(dhdp);
9921#endif /* DHDTCPACK_SUPPRESS */
9922 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9923 if (dhdp->reorder_bufs[i]) {
9924 reorder_info_t *ptr;
9925 uint32 buf_size = sizeof(struct reorder_info);
9926
9927 ptr = dhdp->reorder_bufs[i];
9928
9929 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9930 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9931 i, ptr->max_idx, buf_size));
9932
9933 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9934 dhdp->reorder_bufs[i] = NULL;
9935 }
9936 }
9937
9938 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
9939
9940 if (dhdp->soc_ram) {
9941#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
9942 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
9943#else
9944 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
9945#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
9946 dhdp->soc_ram = NULL;
9947 }
9948 }
9949}
9950
9951static void
9952dhd_module_cleanup(void)
9953{
9954 printf("%s: Enter\n", __FUNCTION__);
9955
9956 dhd_bus_unregister();
9957
9958 wl_android_exit();
9959
9960 dhd_wifi_platform_unregister_drv();
9961#ifdef CUSTOMER_HW_AMLOGIC
9962#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
9963 wifi_teardown_dt();
9964#endif
9965#endif
9966 printf("%s: Exit\n", __FUNCTION__);
9967}
9968
9969static void __exit
9970dhd_module_exit(void)
9971{
9972 dhd_buzzz_detach();
9973 dhd_module_cleanup();
9974 unregister_reboot_notifier(&dhd_reboot_notifier);
9975}
9976
9977static int __init
9978dhd_module_init(void)
9979{
9980 int err;
9981 int retry = POWERUP_MAX_RETRY;
9982
9983 printf("%s: in\n", __FUNCTION__);
9984#ifdef CUSTOMER_HW_AMLOGIC
9985#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
9986 if (wifi_setup_dt()) {
9987 printf("wifi_dt : fail to setup dt\n");
9988 }
9989#endif
9990#endif
9991
9992 dhd_buzzz_attach();
9993
9994 DHD_PERIM_RADIO_INIT();
9995
9996
9997 if (firmware_path[0] != '\0') {
9998 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
9999 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10000 }
10001
10002 if (nvram_path[0] != '\0') {
10003 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
10004 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10005 }
10006
10007 do {
10008 err = dhd_wifi_platform_register_drv();
10009 if (!err) {
10010 register_reboot_notifier(&dhd_reboot_notifier);
10011 break;
10012 }
10013 else {
10014 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
10015 __FUNCTION__, retry));
10016 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
10017 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
10018 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
10019 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
10020 }
10021 } while (retry--);
10022
10023 if (err) {
10024#ifdef CUSTOMER_HW_AMLOGIC
10025#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10026 wifi_teardown_dt();
10027#endif
10028#endif
10029 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
10030 } else {
10031 if (!dhd_download_fw_on_driverload) {
10032 dhd_driver_init_done = TRUE;
10033 }
10034 }
10035
10036 printf("%s: Exit err=%d\n", __FUNCTION__, err);
10037 return err;
10038}
10039
10040static int
10041dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
10042{
10043 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
10044 if (code == SYS_RESTART) {
10045#ifdef BCMPCIE
10046 is_reboot = code;
10047#endif /* BCMPCIE */
10048 }
10049 return NOTIFY_DONE;
10050}
10051
10052
10053#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
10054#if defined(CONFIG_DEFERRED_INITCALLS)
10055#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
10056 defined(CONFIG_ARCH_MSM8996)
10057deferred_module_init_sync(dhd_module_init);
10058#else
10059deferred_module_init(dhd_module_init);
10060#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
10061 * CONFIG_ARCH_MSM8996
10062 */
10063#elif defined(USE_LATE_INITCALL_SYNC)
10064late_initcall_sync(dhd_module_init);
10065#else
10066late_initcall(dhd_module_init);
10067#endif /* USE_LATE_INITCALL_SYNC */
10068#else
10069module_init(dhd_module_init);
10070#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
10071
10072module_exit(dhd_module_exit);
10073
10074/*
10075 * OS specific functions required to implement DHD driver in OS independent way
10076 */
10077int
10078dhd_os_proto_block(dhd_pub_t *pub)
10079{
10080 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10081
10082 if (dhd) {
10083 DHD_PERIM_UNLOCK(pub);
10084
10085 down(&dhd->proto_sem);
10086
10087 DHD_PERIM_LOCK(pub);
10088 return 1;
10089 }
10090
10091 return 0;
10092}
10093
10094int
10095dhd_os_proto_unblock(dhd_pub_t *pub)
10096{
10097 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10098
10099 if (dhd) {
10100 up(&dhd->proto_sem);
10101 return 1;
10102 }
10103
10104 return 0;
10105}
10106
10107void
10108dhd_os_dhdiovar_lock(dhd_pub_t *pub)
10109{
10110 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10111
10112 if (dhd) {
10113 mutex_lock(&dhd->dhd_iovar_mutex);
10114 }
10115}
10116
10117void
10118dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
10119{
10120 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10121
10122 if (dhd) {
10123 mutex_unlock(&dhd->dhd_iovar_mutex);
10124 }
10125}
10126
10127unsigned int
10128dhd_os_get_ioctl_resp_timeout(void)
10129{
10130 return ((unsigned int)dhd_ioctl_timeout_msec);
10131}
10132
10133void
10134dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
10135{
10136 dhd_ioctl_timeout_msec = (int)timeout_msec;
10137}
10138
10139int
10140dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
10141{
10142 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10143 int timeout;
10144
10145 /* Convert timeout in millsecond to jiffies */
10146#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10147 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10148#else
10149 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10150#endif
10151
10152 DHD_PERIM_UNLOCK(pub);
10153
10154 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
10155
10156 DHD_PERIM_LOCK(pub);
10157
10158 return timeout;
10159}
10160
10161int
10162dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
10163{
10164 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10165
10166 wake_up(&dhd->ioctl_resp_wait);
10167 return 0;
10168}
10169
10170int
10171dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
10172{
10173 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10174 int timeout;
10175
10176 /* Convert timeout in millsecond to jiffies */
10177#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10178 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10179#else
10180 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10181#endif
10182
10183 DHD_PERIM_UNLOCK(pub);
10184
10185 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
10186
10187 DHD_PERIM_LOCK(pub);
10188
10189 return timeout;
10190}
10191
10192int
10193dhd_os_d3ack_wake(dhd_pub_t *pub)
10194{
10195 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10196
10197 wake_up(&dhd->d3ack_wait);
10198 return 0;
10199}
10200
10201int
10202dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
10203{
10204 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10205 int timeout;
10206
10207 /* Wait for bus usage contexts to gracefully exit within some timeout value
10208 * Set time out to little higher than dhd_ioctl_timeout_msec,
10209 * so that IOCTL timeout should not get affected.
10210 */
10211 /* Convert timeout in millsecond to jiffies */
10212#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10213 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
10214#else
10215 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
10216#endif
10217
10218 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
10219
10220 return timeout;
10221}
10222
10223int INLINE
10224dhd_os_busbusy_wake(dhd_pub_t *pub)
10225{
10226 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10227 /* Call wmb() to make sure before waking up the other event value gets updated */
10228 OSL_SMP_WMB();
10229 wake_up(&dhd->dhd_bus_busy_state_wait);
10230 return 0;
10231}
10232
10233void
10234dhd_os_wd_timer_extend(void *bus, bool extend)
10235{
10236 dhd_pub_t *pub = bus;
10237 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10238
10239 if (extend)
10240 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
10241 else
10242 dhd_os_wd_timer(bus, dhd->default_wd_interval);
10243}
10244
10245
10246void
10247dhd_os_wd_timer(void *bus, uint wdtick)
10248{
10249 dhd_pub_t *pub = bus;
10250 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10251 unsigned long flags;
10252
10253 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10254
10255 if (!dhd) {
10256 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10257 return;
10258 }
10259
10260 DHD_OS_WD_WAKE_LOCK(pub);
10261 DHD_GENERAL_LOCK(pub, flags);
10262
10263 /* don't start the wd until fw is loaded */
10264 if (pub->busstate == DHD_BUS_DOWN) {
10265 DHD_GENERAL_UNLOCK(pub, flags);
10266 if (!wdtick)
10267 DHD_OS_WD_WAKE_UNLOCK(pub);
10268 return;
10269 }
10270
10271 /* Totally stop the timer */
10272 if (!wdtick && dhd->wd_timer_valid == TRUE) {
10273 dhd->wd_timer_valid = FALSE;
10274 DHD_GENERAL_UNLOCK(pub, flags);
10275 del_timer_sync(&dhd->timer);
10276 DHD_OS_WD_WAKE_UNLOCK(pub);
10277 return;
10278 }
10279
10280 if (wdtick) {
10281 DHD_OS_WD_WAKE_LOCK(pub);
10282 dhd_watchdog_ms = (uint)wdtick;
10283 /* Re arm the timer, at last watchdog period */
10284 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
10285 dhd->wd_timer_valid = TRUE;
10286 }
10287 DHD_GENERAL_UNLOCK(pub, flags);
10288 DHD_OS_WD_WAKE_UNLOCK(pub);
10289}
10290
10291#ifdef DHD_PCIE_RUNTIMEPM
10292void
10293dhd_os_runtimepm_timer(void *bus, uint tick)
10294{
10295 dhd_pub_t *pub = bus;
10296 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10297 unsigned long flags;
10298
10299 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10300
10301 if (!dhd) {
10302 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
10303 return;
10304 }
10305
10306 DHD_GENERAL_LOCK(pub, flags);
10307
10308 /* don't start the RPM until fw is loaded */
10309 if (pub->busstate == DHD_BUS_DOWN ||
10310 pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
10311 DHD_GENERAL_UNLOCK(pub, flags);
10312 return;
10313 }
10314
10315 /* If tick is non-zero, the request is to start the timer */
10316 if (tick) {
10317 /* Start the timer only if its not already running */
10318 if (dhd->rpm_timer_valid == FALSE) {
10319 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
10320 dhd->rpm_timer_valid = TRUE;
10321 }
10322 } else {
10323 /* tick is zero, we have to stop the timer */
10324 /* Stop the timer only if its running, otherwise we don't have to do anything */
10325 if (dhd->rpm_timer_valid == TRUE) {
10326 dhd->rpm_timer_valid = FALSE;
10327 DHD_GENERAL_UNLOCK(pub, flags);
10328 del_timer_sync(&dhd->rpm_timer);
10329 /* we have already released the lock, so just go to exit */
10330 goto exit;
10331 }
10332 }
10333
10334 DHD_GENERAL_UNLOCK(pub, flags);
10335exit:
10336 return;
10337
10338}
10339
10340#endif /* DHD_PCIE_RUNTIMEPM */
10341
10342void *
10343dhd_os_open_image(char *filename)
10344{
10345 struct file *fp;
10346 int size;
10347
10348 fp = filp_open(filename, O_RDONLY, 0);
10349 /*
10350 * 2.6.11 (FC4) supports filp_open() but later revs don't?
10351 * Alternative:
10352 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
10353 * ???
10354 */
10355 if (IS_ERR(fp)) {
10356 fp = NULL;
10357 goto err;
10358 }
10359
10360 if (!S_ISREG(file_inode(fp)->i_mode)) {
10361 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
10362 fp = NULL;
10363 goto err;
10364 }
10365
10366 size = i_size_read(file_inode(fp));
10367 if (size <= 0) {
10368 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
10369 fp = NULL;
10370 goto err;
10371 }
10372
10373 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
10374
10375err:
10376 return fp;
10377}
10378
10379int
10380dhd_os_get_image_block(char *buf, int len, void *image)
10381{
10382 struct file *fp = (struct file *)image;
10383 int rdlen;
10384 int size;
10385
10386 if (!image)
10387 return 0;
10388
10389 size = i_size_read(file_inode(fp));
10390 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
10391
10392 if (len >= size && size != rdlen) {
10393 return -EIO;
10394 }
10395
10396 if (rdlen > 0)
10397 fp->f_pos += rdlen;
10398
10399 return rdlen;
10400}
10401
10402void
10403dhd_os_close_image(void *image)
10404{
10405 if (image)
10406 filp_close((struct file *)image, NULL);
10407}
10408
10409void
10410dhd_os_sdlock(dhd_pub_t *pub)
10411{
10412 dhd_info_t *dhd;
10413
10414 dhd = (dhd_info_t *)(pub->info);
10415
10416 if (dhd_dpc_prio >= 0)
10417 down(&dhd->sdsem);
10418 else
10419 spin_lock_bh(&dhd->sdlock);
10420}
10421
10422void
10423dhd_os_sdunlock(dhd_pub_t *pub)
10424{
10425 dhd_info_t *dhd;
10426
10427 dhd = (dhd_info_t *)(pub->info);
10428
10429 if (dhd_dpc_prio >= 0)
10430 up(&dhd->sdsem);
10431 else
10432 spin_unlock_bh(&dhd->sdlock);
10433}
10434
10435void
10436dhd_os_sdlock_txq(dhd_pub_t *pub)
10437{
10438 dhd_info_t *dhd;
10439
10440 dhd = (dhd_info_t *)(pub->info);
10441 spin_lock_bh(&dhd->txqlock);
10442}
10443
10444void
10445dhd_os_sdunlock_txq(dhd_pub_t *pub)
10446{
10447 dhd_info_t *dhd;
10448
10449 dhd = (dhd_info_t *)(pub->info);
10450 spin_unlock_bh(&dhd->txqlock);
10451}
10452
10453void
10454dhd_os_sdlock_rxq(dhd_pub_t *pub)
10455{
10456}
10457
10458void
10459dhd_os_sdunlock_rxq(dhd_pub_t *pub)
10460{
10461}
10462
10463static void
10464dhd_os_rxflock(dhd_pub_t *pub)
10465{
10466 dhd_info_t *dhd;
10467
10468 dhd = (dhd_info_t *)(pub->info);
10469 spin_lock_bh(&dhd->rxf_lock);
10470
10471}
10472
10473static void
10474dhd_os_rxfunlock(dhd_pub_t *pub)
10475{
10476 dhd_info_t *dhd;
10477
10478 dhd = (dhd_info_t *)(pub->info);
10479 spin_unlock_bh(&dhd->rxf_lock);
10480}
10481
10482#ifdef DHDTCPACK_SUPPRESS
10483unsigned long
10484dhd_os_tcpacklock(dhd_pub_t *pub)
10485{
10486 dhd_info_t *dhd;
10487 unsigned long flags = 0;
10488
10489 dhd = (dhd_info_t *)(pub->info);
10490
10491 if (dhd) {
10492#ifdef BCMSDIO
10493 spin_lock_bh(&dhd->tcpack_lock);
10494#else
10495 spin_lock_irqsave(&dhd->tcpack_lock, flags);
10496#endif /* BCMSDIO */
10497 }
10498
10499 return flags;
10500}
10501
10502void
10503dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
10504{
10505 dhd_info_t *dhd;
10506
10507#ifdef BCMSDIO
10508 BCM_REFERENCE(flags);
10509#endif /* BCMSDIO */
10510
10511 dhd = (dhd_info_t *)(pub->info);
10512
10513 if (dhd) {
10514#ifdef BCMSDIO
10515 spin_unlock_bh(&dhd->tcpack_lock); // terence 20160519
10516#else
10517 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
10518#endif /* BCMSDIO */
10519 }
10520}
10521#endif /* DHDTCPACK_SUPPRESS */
10522
10523uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
10524{
10525 uint8* buf;
10526 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
10527
10528 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
10529 if (buf == NULL && kmalloc_if_fail)
10530 buf = kmalloc(size, flags);
10531
10532 return buf;
10533}
10534
10535void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
10536{
10537}
10538
10539#if defined(WL_WIRELESS_EXT)
10540struct iw_statistics *
10541dhd_get_wireless_stats(struct net_device *dev)
10542{
10543 int res = 0;
10544 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10545
10546 if (!dhd->pub.up) {
10547 return NULL;
10548 }
10549
10550 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
10551
10552 if (res == 0)
10553 return &dhd->iw.wstats;
10554 else
10555 return NULL;
10556}
10557#endif /* defined(WL_WIRELESS_EXT) */
10558
10559static int
10560dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
10561 wl_event_msg_t *event, void **data)
10562{
10563 int bcmerror = 0;
10564 ASSERT(dhd != NULL);
10565
10566#ifdef SHOW_LOGTRACE
10567 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
10568#else
10569 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
10570#endif /* SHOW_LOGTRACE */
10571
10572 if (bcmerror != BCME_OK)
10573 return (bcmerror);
10574
10575#if defined(WL_WIRELESS_EXT)
10576 if (event->bsscfgidx == 0) {
10577 /*
10578 * Wireless ext is on primary interface only
10579 */
10580
10581 ASSERT(dhd->iflist[*ifidx] != NULL);
10582 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10583
10584 if (dhd->iflist[*ifidx]->net) {
10585 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
10586 }
10587 }
10588#endif /* defined(WL_WIRELESS_EXT) */
10589
10590#ifdef WL_CFG80211
10591 ASSERT(dhd->iflist[*ifidx] != NULL);
10592 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10593 if (dhd->iflist[*ifidx]->net)
10594 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
10595#endif /* defined(WL_CFG80211) */
10596
10597 return (bcmerror);
10598}
10599
10600/* send up locally generated event */
10601void
10602dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
10603{
10604 switch (ntoh32(event->event_type)) {
10605
10606 default:
10607 break;
10608 }
10609}
10610
10611#ifdef LOG_INTO_TCPDUMP
10612void
10613dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
10614{
10615 struct sk_buff *p, *skb;
10616 uint32 pktlen;
10617 int len;
10618 dhd_if_t *ifp;
10619 dhd_info_t *dhd;
10620 uchar *skb_data;
10621 int ifidx = 0;
10622 struct ether_header eth;
10623
10624 pktlen = sizeof(eth) + data_len;
10625 dhd = dhdp->info;
10626
10627 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
10628 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
10629
10630 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
10631 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
10632 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
10633 eth.ether_type = hton16(ETHER_TYPE_BRCM);
10634
10635 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
10636 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
10637 skb = PKTTONATIVE(dhdp->osh, p);
10638 skb_data = skb->data;
10639 len = skb->len;
10640
10641 ifidx = dhd_ifname2idx(dhd, "wlan0");
10642 ifp = dhd->iflist[ifidx];
10643 if (ifp == NULL)
10644 ifp = dhd->iflist[0];
10645
10646 ASSERT(ifp);
10647 skb->dev = ifp->net;
10648 skb->protocol = eth_type_trans(skb, skb->dev);
10649 skb->data = skb_data;
10650 skb->len = len;
10651
10652 /* Strip header, count, deliver upward */
10653 skb_pull(skb, ETH_HLEN);
10654
10655 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
10656 __FUNCTION__, __LINE__);
10657 /* Send the packet */
10658 if (in_interrupt()) {
10659 netif_rx(skb);
10660 } else {
10661 netif_rx_ni(skb);
10662 }
10663 }
10664 else {
10665 /* Could not allocate a sk_buf */
10666 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
10667 }
10668}
10669#endif /* LOG_INTO_TCPDUMP */
10670
10671void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
10672{
10673#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10674 struct dhd_info *dhdinfo = dhd->info;
10675
10676#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10677 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
10678#else
10679 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
10680#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
10681
10682 dhd_os_sdunlock(dhd);
10683 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
10684 dhd_os_sdlock(dhd);
10685#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
10686 return;
10687}
10688
10689void dhd_wait_event_wakeup(dhd_pub_t *dhd)
10690{
10691#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10692 struct dhd_info *dhdinfo = dhd->info;
10693 if (waitqueue_active(&dhdinfo->ctrl_wait))
10694 wake_up(&dhdinfo->ctrl_wait);
10695#endif
10696 return;
10697}
10698
10699#if defined(BCMSDIO) || defined(BCMPCIE)
10700int
10701dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
10702{
10703 int ret;
10704
10705 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10706
10707 if (flag == TRUE) {
10708 /* Issue wl down command before resetting the chip */
10709 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
10710 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
10711 }
10712#ifdef PROP_TXSTATUS
10713 if (dhd->pub.wlfc_enabled)
10714 dhd_wlfc_deinit(&dhd->pub);
10715#endif /* PROP_TXSTATUS */
10716#ifdef PNO_SUPPORT
10717 if (dhd->pub.pno_state)
10718 dhd_pno_deinit(&dhd->pub);
10719#endif
10720 }
10721
10722#ifdef BCMSDIO
10723 if (!flag) {
10724 dhd_update_fw_nv_path(dhd);
10725 /* update firmware and nvram path to sdio bus */
10726 dhd_bus_update_fw_nv_path(dhd->pub.bus,
10727 dhd->fw_path, dhd->nv_path, dhd->conf_path);
10728 }
10729#endif /* BCMSDIO */
10730
10731 ret = dhd_bus_devreset(&dhd->pub, flag);
10732 if (ret) {
10733 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
10734 return ret;
10735 }
10736
10737 return ret;
10738}
10739
10740#ifdef BCMSDIO
10741int
10742dhd_net_bus_suspend(struct net_device *dev)
10743{
10744 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10745 return dhd_bus_suspend(&dhd->pub);
10746}
10747
10748int
10749dhd_net_bus_resume(struct net_device *dev, uint8 stage)
10750{
10751 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10752 return dhd_bus_resume(&dhd->pub, stage);
10753}
10754
10755#endif /* BCMSDIO */
10756#endif /* BCMSDIO || BCMPCIE */
10757
10758int net_os_set_suspend_disable(struct net_device *dev, int val)
10759{
10760 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10761 int ret = 0;
10762
10763 if (dhd) {
10764 ret = dhd->pub.suspend_disable_flag;
10765 dhd->pub.suspend_disable_flag = val;
10766 }
10767 return ret;
10768}
10769
10770int net_os_set_suspend(struct net_device *dev, int val, int force)
10771{
10772 int ret = 0;
10773 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10774
10775 if (dhd) {
10776#ifdef CONFIG_MACH_UNIVERSAL7420
10777#endif /* CONFIG_MACH_UNIVERSAL7420 */
10778#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
10779 ret = dhd_set_suspend(val, &dhd->pub);
10780#else
10781 ret = dhd_suspend_resume_helper(dhd, val, force);
10782#endif
10783#ifdef WL_CFG80211
10784 wl_cfg80211_update_power_mode(dev);
10785#endif
10786 }
10787 return ret;
10788}
10789
10790int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
10791{
10792 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10793
10794 if (dhd)
10795 dhd->pub.suspend_bcn_li_dtim = val;
10796
10797 return 0;
10798}
10799
10800#ifdef PKT_FILTER_SUPPORT
10801int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
10802{
10803#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
10804 return 0;
10805#else
10806 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10807 char *filterp = NULL;
10808 int filter_id = 0;
10809 int ret = 0;
10810
10811 if (!dhd_master_mode)
10812 add_remove = !add_remove;
10813 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
10814 if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
10815 return ret;
10816 if (num >= dhd->pub.pktfilter_count)
10817 return -EINVAL;
10818 switch (num) {
10819 case DHD_BROADCAST_FILTER_NUM:
10820 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
10821 filter_id = 101;
10822 break;
10823 case DHD_MULTICAST4_FILTER_NUM:
10824 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
10825 filter_id = 102;
10826 break;
10827 case DHD_MULTICAST6_FILTER_NUM:
10828 filterp = "103 0 0 0 0xFFFF 0x3333";
10829 filter_id = 103;
10830 break;
10831 case DHD_MDNS_FILTER_NUM:
10832 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
10833 filter_id = 104;
10834 break;
10835 default:
10836 return -EINVAL;
10837 }
10838
10839 /* Add filter */
10840 if (add_remove) {
10841 dhd->pub.pktfilter[num] = filterp;
10842 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
10843 } else { /* Delete filter */
10844 if (dhd->pub.pktfilter[num] != NULL) {
10845 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
10846 dhd->pub.pktfilter[num] = NULL;
10847 }
10848 }
10849 return ret;
10850#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
10851}
10852
10853int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
10854
10855{
10856 int ret = 0;
10857
10858 /* Packet filtering is set only if we still in early-suspend and
10859 * we need either to turn it ON or turn it OFF
10860 * We can always turn it OFF in case of early-suspend, but we turn it
10861 * back ON only if suspend_disable_flag was not set
10862 */
10863 if (dhdp && dhdp->up) {
10864 if (dhdp->in_suspend) {
10865 if (!val || (val && !dhdp->suspend_disable_flag))
10866 dhd_enable_packet_filter(val, dhdp);
10867 }
10868 }
10869 return ret;
10870}
10871
10872/* function to enable/disable packet for Network device */
10873int net_os_enable_packet_filter(struct net_device *dev, int val)
10874{
10875 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10876
10877 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
10878 return dhd_os_enable_packet_filter(&dhd->pub, val);
10879}
10880#endif /* PKT_FILTER_SUPPORT */
10881
10882int
10883dhd_dev_init_ioctl(struct net_device *dev)
10884{
10885 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10886 int ret;
10887
10888 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
10889 goto done;
10890
10891done:
10892 return ret;
10893}
10894
10895int
10896dhd_dev_get_feature_set(struct net_device *dev)
10897{
10898 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
10899 dhd_pub_t *dhd = (&ptr->pub);
10900 int feature_set = 0;
10901
10902#ifdef DYNAMIC_SWOOB_DURATION
10903#ifndef CUSTOM_INTR_WIDTH
10904#define CUSTOM_INTR_WIDTH 100
10905 int intr_width = 0;
10906#endif /* CUSTOM_INTR_WIDTH */
10907#endif /* DYNAMIC_SWOOB_DURATION */
10908 if (!dhd)
10909 return feature_set;
10910
10911 if (FW_SUPPORTED(dhd, sta))
10912 feature_set |= WIFI_FEATURE_INFRA;
10913 if (FW_SUPPORTED(dhd, dualband))
10914 feature_set |= WIFI_FEATURE_INFRA_5G;
10915 if (FW_SUPPORTED(dhd, p2p))
10916 feature_set |= WIFI_FEATURE_P2P;
10917 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
10918 feature_set |= WIFI_FEATURE_SOFT_AP;
10919 if (FW_SUPPORTED(dhd, tdls))
10920 feature_set |= WIFI_FEATURE_TDLS;
10921 if (FW_SUPPORTED(dhd, vsdb))
10922 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
10923 if (FW_SUPPORTED(dhd, nan)) {
10924 feature_set |= WIFI_FEATURE_NAN;
10925 /* NAN is essentail for d2d rtt */
10926 if (FW_SUPPORTED(dhd, rttd2d))
10927 feature_set |= WIFI_FEATURE_D2D_RTT;
10928 }
10929#ifdef RTT_SUPPORT
10930 feature_set |= WIFI_FEATURE_D2AP_RTT;
10931#endif /* RTT_SUPPORT */
10932#ifdef LINKSTAT_SUPPORT
10933 feature_set |= WIFI_FEATURE_LINKSTAT;
10934#endif /* LINKSTAT_SUPPORT */
10935 /* Supports STA + STA always */
10936 feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
10937#ifdef PNO_SUPPORT
10938 if (dhd_is_pno_supported(dhd)) {
10939 feature_set |= WIFI_FEATURE_PNO;
10940 feature_set |= WIFI_FEATURE_BATCH_SCAN;
10941#ifdef GSCAN_SUPPORT
10942 feature_set |= WIFI_FEATURE_GSCAN;
10943#endif /* GSCAN_SUPPORT */
10944 }
10945#endif /* PNO_SUPPORT */
10946#ifdef WL11U
10947 feature_set |= WIFI_FEATURE_HOTSPOT;
10948#endif /* WL11U */
10949 return feature_set;
10950}
10951
10952
10953int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
10954{
10955 int feature_set_full, mem_needed;
10956 int *ret;
10957
10958 *num = 0;
10959 mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10960 ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
10961 if (!ret) {
10962 DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
10963 mem_needed));
10964 return ret;
10965 }
10966
10967 feature_set_full = dhd_dev_get_feature_set(dev);
10968
10969 ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
10970 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10971 (feature_set_full & WIFI_FEATURE_NAN) |
10972 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10973 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10974 (feature_set_full & WIFI_FEATURE_PNO) |
10975 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
10976 (feature_set_full & WIFI_FEATURE_GSCAN) |
10977 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
10978 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
10979 (feature_set_full & WIFI_FEATURE_EPR);
10980
10981 ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
10982 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10983 /* Not yet verified NAN with P2P */
10984 /* (feature_set_full & WIFI_FEATURE_NAN) | */
10985 (feature_set_full & WIFI_FEATURE_P2P) |
10986 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10987 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10988 (feature_set_full & WIFI_FEATURE_EPR);
10989
10990 ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
10991 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
10992 (feature_set_full & WIFI_FEATURE_NAN) |
10993 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
10994 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
10995 (feature_set_full & WIFI_FEATURE_TDLS) |
10996 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
10997 (feature_set_full & WIFI_FEATURE_EPR);
10998 *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
10999
11000 return ret;
11001}
11002#ifdef CUSTOM_FORCE_NODFS_FLAG
11003int
11004dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
11005{
11006 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11007
11008 if (nodfs)
11009 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
11010 else
11011 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
11012 dhd->pub.force_country_change = TRUE;
11013 return 0;
11014}
11015#endif /* CUSTOM_FORCE_NODFS_FLAG */
11016#ifdef PNO_SUPPORT
11017/* Linux wrapper to call common dhd_pno_stop_for_ssid */
11018int
11019dhd_dev_pno_stop_for_ssid(struct net_device *dev)
11020{
11021 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11022
11023 return (dhd_pno_stop_for_ssid(&dhd->pub));
11024}
11025/* Linux wrapper to call common dhd_pno_set_for_ssid */
11026int
11027dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
11028 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
11029{
11030 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11031
11032 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
11033 pno_repeat, pno_freq_expo_max, channel_list, nchan));
11034}
11035
11036/* Linux wrapper to call common dhd_pno_enable */
11037int
11038dhd_dev_pno_enable(struct net_device *dev, int enable)
11039{
11040 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11041
11042 return (dhd_pno_enable(&dhd->pub, enable));
11043}
11044
11045/* Linux wrapper to call common dhd_pno_set_for_hotlist */
11046int
11047dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
11048 struct dhd_pno_hotlist_params *hotlist_params)
11049{
11050 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11051 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
11052}
11053/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
11054int
11055dhd_dev_pno_stop_for_batch(struct net_device *dev)
11056{
11057 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11058 return (dhd_pno_stop_for_batch(&dhd->pub));
11059}
11060/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
11061int
11062dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
11063{
11064 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11065 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
11066}
11067/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
11068int
11069dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
11070{
11071 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11072 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
11073}
11074/* Linux wrapper to call common dhd_pno_set_mac_oui */
11075int
11076dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
11077{
11078 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11079 return (dhd_pno_set_mac_oui(&dhd->pub, oui));
11080}
11081#endif /* PNO_SUPPORT */
11082
11083#if defined(PNO_SUPPORT)
11084#ifdef GSCAN_SUPPORT
11085/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11086int
11087dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11088 void *buf, uint8 flush)
11089{
11090 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11091
11092 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
11093}
11094
11095/* Linux wrapper to call common dhd_pno_get_gscan */
11096void *
11097dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11098 void *info, uint32 *len)
11099{
11100 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11101
11102 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
11103}
11104
11105/* Linux wrapper to call common dhd_wait_batch_results_complete */
11106void
11107dhd_dev_wait_batch_results_complete(struct net_device *dev)
11108{
11109 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11110
11111 return (dhd_wait_batch_results_complete(&dhd->pub));
11112}
11113
11114/* Linux wrapper to call common dhd_pno_lock_batch_results */
11115void
11116dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
11117{
11118 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11119
11120 return (dhd_pno_lock_batch_results(&dhd->pub));
11121}
11122/* Linux wrapper to call common dhd_pno_unlock_batch_results */
11123void
11124dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
11125{
11126 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11127
11128 return (dhd_pno_unlock_batch_results(&dhd->pub));
11129}
11130
11131/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
11132int
11133dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
11134{
11135 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11136
11137 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
11138}
11139
11140/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
11141int
11142dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
11143{
11144 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11145
11146 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
11147}
11148
11149/* Linux wrapper to call common dhd_handle_swc_evt */
11150void *
11151dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes)
11152{
11153 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11154
11155 return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
11156}
11157
11158/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
11159void *
11160dhd_dev_hotlist_scan_event(struct net_device *dev,
11161 const void *data, int *send_evt_bytes, hotlist_type_t type)
11162{
11163 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11164
11165 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
11166}
11167
11168/* Linux wrapper to call common dhd_process_full_gscan_result */
11169void *
11170dhd_dev_process_full_gscan_result(struct net_device *dev,
11171const void *data, int *send_evt_bytes)
11172{
11173 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11174
11175 return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
11176}
11177
11178void
11179dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
11180{
11181 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11182
11183 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
11184
11185 return;
11186}
11187
11188int
11189dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
11190{
11191 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11192
11193 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
11194}
11195
11196/* Linux wrapper to call common dhd_retreive_batch_scan_results */
11197int
11198dhd_dev_retrieve_batch_scan(struct net_device *dev)
11199{
11200 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11201
11202 return (dhd_retreive_batch_scan_results(&dhd->pub));
11203}
11204#endif /* GSCAN_SUPPORT */
11205#endif
11206#ifdef RTT_SUPPORT
11207/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11208int
11209dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
11210{
11211 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11212
11213 return (dhd_rtt_set_cfg(&dhd->pub, buf));
11214}
11215int
11216dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
11217{
11218 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11219
11220 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
11221}
11222int
11223dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
11224{
11225 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11226
11227 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
11228}
11229int
11230dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
11231{
11232 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11233
11234 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
11235}
11236
11237int
11238dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
11239{
11240 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11241
11242 return (dhd_rtt_capability(&dhd->pub, capa));
11243}
11244
11245#endif /* RTT_SUPPORT */
11246
11247#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11248static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
11249{
11250 dhd_info_t *dhd;
11251 struct net_device *dev;
11252
11253 dhd = (dhd_info_t *)dhd_info;
11254 dev = dhd->iflist[0]->net;
11255
11256 if (dev) {
11257#if defined(WL_WIRELESS_EXT)
11258 wl_iw_send_priv_event(dev, "HANG");
11259#endif
11260#if defined(WL_CFG80211)
11261 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11262#endif
11263 }
11264}
11265
11266#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
11267extern dhd_pub_t *link_recovery;
11268void dhd_host_recover_link(void)
11269{
11270 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
11271 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
11272 dhd_bus_set_linkdown(link_recovery, TRUE);
11273 dhd_os_send_hang_message(link_recovery);
11274}
11275EXPORT_SYMBOL(dhd_host_recover_link);
11276#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
11277
11278int dhd_os_send_hang_message(dhd_pub_t *dhdp)
11279{
11280 int ret = 0;
11281 if (dhdp) {
11282 if (!dhdp->hang_was_sent) {
11283 dhdp->hang_was_sent = 1;
11284 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
11285 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
11286 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
11287 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
11288 }
11289 }
11290 return ret;
11291}
11292
11293int net_os_send_hang_message(struct net_device *dev)
11294{
11295 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11296 int ret = 0;
11297
11298 if (dhd) {
11299 /* Report FW problem when enabled */
11300 if (dhd->pub.hang_report) {
11301#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11302 ret = dhd_os_send_hang_message(&dhd->pub);
11303#else
11304 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11305#endif
11306 } else {
11307 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
11308 __FUNCTION__));
11309 /* Enforce bus down to stop any future traffic */
11310 dhd->pub.busstate = DHD_BUS_DOWN;
11311 }
11312 }
11313 return ret;
11314}
11315
11316int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
11317{
11318 dhd_info_t *dhd = NULL;
11319 dhd_pub_t *dhdp = NULL;
11320 int reason;
11321
11322 dhd = DHD_DEV_INFO(dev);
11323 if (dhd) {
11324 dhdp = &dhd->pub;
11325 }
11326
11327 if (!dhd || !dhdp) {
11328 return 0;
11329 }
11330
11331 reason = bcm_strtoul(string_num, NULL, 0);
11332 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
11333
11334 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
11335 reason = 0;
11336 }
11337
11338 dhdp->hang_reason = reason;
11339
11340 return net_os_send_hang_message(dev);
11341}
11342#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
11343
11344
11345int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
11346{
11347 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11348 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
11349}
11350
11351bool dhd_force_country_change(struct net_device *dev)
11352{
11353 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11354
11355 if (dhd && dhd->pub.up)
11356 return dhd->pub.force_country_change;
11357 return FALSE;
11358}
11359void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
11360 wl_country_t *cspec)
11361{
11362 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11363#ifdef CUSTOM_COUNTRY_CODE
11364 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
11365 dhd->pub.dhd_cflags);
11366#else
11367 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
11368#endif /* CUSTOM_COUNTRY_CODE */
11369}
11370void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
11371{
11372 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11373 if (dhd && dhd->pub.up) {
11374 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
11375#ifdef WL_CFG80211
11376 wl_update_wiphybands(NULL, notify);
11377#endif
11378 }
11379}
11380
11381void dhd_bus_band_set(struct net_device *dev, uint band)
11382{
11383 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11384 if (dhd && dhd->pub.up) {
11385#ifdef WL_CFG80211
11386 wl_update_wiphybands(NULL, true);
11387#endif
11388 }
11389}
11390
11391int dhd_net_set_fw_path(struct net_device *dev, char *fw)
11392{
11393 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11394
11395 if (!fw || fw[0] == '\0')
11396 return -EINVAL;
11397
11398 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
11399 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
11400
11401#if defined(SOFTAP)
11402 if (strstr(fw, "apsta") != NULL) {
11403 DHD_INFO(("GOT APSTA FIRMWARE\n"));
11404 ap_fw_loaded = TRUE;
11405 } else {
11406 DHD_INFO(("GOT STA FIRMWARE\n"));
11407 ap_fw_loaded = FALSE;
11408 }
11409#endif
11410 return 0;
11411}
11412
11413void dhd_net_if_lock(struct net_device *dev)
11414{
11415 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11416 dhd_net_if_lock_local(dhd);
11417}
11418
11419void dhd_net_if_unlock(struct net_device *dev)
11420{
11421 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11422 dhd_net_if_unlock_local(dhd);
11423}
11424
11425static void dhd_net_if_lock_local(dhd_info_t *dhd)
11426{
11427#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11428 if (dhd)
11429 mutex_lock(&dhd->dhd_net_if_mutex);
11430#endif
11431}
11432
11433static void dhd_net_if_unlock_local(dhd_info_t *dhd)
11434{
11435#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11436 if (dhd)
11437 mutex_unlock(&dhd->dhd_net_if_mutex);
11438#endif
11439}
11440
11441static void dhd_suspend_lock(dhd_pub_t *pub)
11442{
11443#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11444 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11445 if (dhd)
11446 mutex_lock(&dhd->dhd_suspend_mutex);
11447#endif
11448}
11449
11450static void dhd_suspend_unlock(dhd_pub_t *pub)
11451{
11452#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11453 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11454 if (dhd)
11455 mutex_unlock(&dhd->dhd_suspend_mutex);
11456#endif
11457}
11458
11459unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
11460{
11461 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11462 unsigned long flags = 0;
11463
11464 if (dhd)
11465 spin_lock_irqsave(&dhd->dhd_lock, flags);
11466
11467 return flags;
11468}
11469
11470void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
11471{
11472 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11473
11474 if (dhd)
11475 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
11476}
11477
11478/* Linux specific multipurpose spinlock API */
11479void *
11480dhd_os_spin_lock_init(osl_t *osh)
11481{
11482 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
11483 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
11484 /* and this results in kernel asserts in internal builds */
11485 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
11486 if (lock)
11487 spin_lock_init(lock);
11488 return ((void *)lock);
11489}
11490void
11491dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
11492{
11493 if (lock)
11494 MFREE(osh, lock, sizeof(spinlock_t) + 4);
11495}
11496unsigned long
11497dhd_os_spin_lock(void *lock)
11498{
11499 unsigned long flags = 0;
11500
11501 if (lock)
11502 spin_lock_irqsave((spinlock_t *)lock, flags);
11503
11504 return flags;
11505}
11506void
11507dhd_os_spin_unlock(void *lock, unsigned long flags)
11508{
11509 if (lock)
11510 spin_unlock_irqrestore((spinlock_t *)lock, flags);
11511}
11512
11513static int
11514dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
11515{
11516 return (atomic_read(&dhd->pend_8021x_cnt));
11517}
11518
11519#define MAX_WAIT_FOR_8021X_TX 100
11520
11521int
11522dhd_wait_pend8021x(struct net_device *dev)
11523{
11524 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11525 int timeout = msecs_to_jiffies(10);
11526 int ntimes = MAX_WAIT_FOR_8021X_TX;
11527 int pend = dhd_get_pend_8021x_cnt(dhd);
11528
11529 while (ntimes && pend) {
11530 if (pend) {
11531 set_current_state(TASK_INTERRUPTIBLE);
11532 DHD_PERIM_UNLOCK(&dhd->pub);
11533 schedule_timeout(timeout);
11534 DHD_PERIM_LOCK(&dhd->pub);
11535 set_current_state(TASK_RUNNING);
11536 ntimes--;
11537 }
11538 pend = dhd_get_pend_8021x_cnt(dhd);
11539 }
11540 if (ntimes == 0)
11541 {
11542 atomic_set(&dhd->pend_8021x_cnt, 0);
11543 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
11544 }
11545 return pend;
11546}
11547
11548#ifdef DHD_DEBUG
11549static void
11550dhd_convert_memdump_type_to_str(uint32 type, char *buf)
11551{
11552 char *type_str = NULL;
11553
11554 switch (type) {
11555 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
11556 type_str = "resumed_on_timeout";
11557 break;
11558 case DUMP_TYPE_D3_ACK_TIMEOUT:
11559 type_str = "D3_ACK_timeout";
11560 break;
11561 case DUMP_TYPE_DONGLE_TRAP:
11562 type_str = "Dongle_Trap";
11563 break;
11564 case DUMP_TYPE_MEMORY_CORRUPTION:
11565 type_str = "Memory_Corruption";
11566 break;
11567 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
11568 type_str = "PKTID_AUDIT_Fail";
11569 break;
11570 case DUMP_TYPE_SCAN_TIMEOUT:
11571 type_str = "SCAN_timeout";
11572 break;
11573 case DUMP_TYPE_SCAN_BUSY:
11574 type_str = "SCAN_Busy";
11575 break;
11576 case DUMP_TYPE_BY_SYSDUMP:
11577 type_str = "BY_SYSDUMP";
11578 break;
11579 case DUMP_TYPE_BY_LIVELOCK:
11580 type_str = "BY_LIVELOCK";
11581 break;
11582 case DUMP_TYPE_AP_LINKUP_FAILURE:
11583 type_str = "BY_AP_LINK_FAILURE";
11584 break;
11585 default:
11586 type_str = "Unknown_type";
11587 break;
11588 }
11589
11590 strncpy(buf, type_str, strlen(type_str));
11591 buf[strlen(type_str)] = 0;
11592}
11593
11594int
11595write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
11596{
11597 int ret = 0;
11598 struct file *fp = NULL;
11599 mm_segment_t old_fs;
11600 loff_t pos = 0;
11601 char memdump_path[128];
11602 char memdump_type[32];
11603 struct timeval curtime;
11604 uint32 file_mode;
11605
11606 /* change to KERNEL_DS address limit */
11607 old_fs = get_fs();
11608 set_fs(KERNEL_DS);
11609
11610 /* Init file name */
11611 memset(memdump_path, 0, sizeof(memdump_path));
11612 memset(memdump_type, 0, sizeof(memdump_type));
11613 do_gettimeofday(&curtime);
11614 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
11615#ifdef CUSTOMER_HW4_DEBUG
11616 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11617 DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
11618 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11619 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11620#elif defined(CUSTOMER_HW2)
11621 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11622 "/data/misc/wifi/mem_dump", memdump_type,
11623 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11624 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11625#else
11626 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11627 "/installmedia/mem_dump", memdump_type,
11628 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11629 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
11630 * calling BUG_ON immediately after collecting the socram dump.
11631 * So the file write operation should directly write the contents into the
11632 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
11633 * instead of appending.
11634 */
11635 file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
11636#endif /* CUSTOMER_HW4_DEBUG */
11637
11638 /* print SOCRAM dump file path */
11639 DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
11640
11641 /* open file to write */
11642 fp = filp_open(memdump_path, file_mode, 0644);
11643 if (IS_ERR(fp)) {
11644 ret = PTR_ERR(fp);
11645 printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
11646 goto exit;
11647 }
11648
11649 /* Write buf to file */
11650 fp->f_op->write(fp, buf, size, &pos);
11651
11652exit:
11653 /* close file before return */
11654 if (!ret)
11655 filp_close(fp, current->files);
11656
11657 /* restore previous address limit */
11658 set_fs(old_fs);
11659
11660 /* free buf before return */
11661#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
11662 DHD_OS_PREFREE(dhd, buf, size);
11663#else
11664 MFREE(dhd->osh, buf, size);
11665#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
11666
11667 return ret;
11668}
11669#endif /* DHD_DEBUG */
11670
11671int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
11672{
11673 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11674 unsigned long flags;
11675 int ret = 0;
11676
11677 if (dhd) {
11678 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11679 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
11680 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
11681#ifdef CONFIG_HAS_WAKELOCK
11682 if (dhd->wakelock_rx_timeout_enable)
11683 wake_lock_timeout(&dhd->wl_rxwake,
11684 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
11685 if (dhd->wakelock_ctrl_timeout_enable)
11686 wake_lock_timeout(&dhd->wl_ctrlwake,
11687 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
11688#endif
11689 dhd->wakelock_rx_timeout_enable = 0;
11690 dhd->wakelock_ctrl_timeout_enable = 0;
11691 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11692 }
11693 return ret;
11694}
11695
11696int net_os_wake_lock_timeout(struct net_device *dev)
11697{
11698 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11699 int ret = 0;
11700
11701 if (dhd)
11702 ret = dhd_os_wake_lock_timeout(&dhd->pub);
11703 return ret;
11704}
11705
11706int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
11707{
11708 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11709 unsigned long flags;
11710
11711 if (dhd) {
11712 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11713 if (val > dhd->wakelock_rx_timeout_enable)
11714 dhd->wakelock_rx_timeout_enable = val;
11715 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11716 }
11717 return 0;
11718}
11719
11720int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
11721{
11722 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11723 unsigned long flags;
11724
11725 if (dhd) {
11726 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11727 if (val > dhd->wakelock_ctrl_timeout_enable)
11728 dhd->wakelock_ctrl_timeout_enable = val;
11729 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11730 }
11731 return 0;
11732}
11733
11734int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
11735{
11736 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11737 unsigned long flags;
11738
11739 if (dhd) {
11740 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11741 dhd->wakelock_ctrl_timeout_enable = 0;
11742#ifdef CONFIG_HAS_WAKELOCK
11743 if (wake_lock_active(&dhd->wl_ctrlwake))
11744 wake_unlock(&dhd->wl_ctrlwake);
11745#endif
11746 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11747 }
11748 return 0;
11749}
11750
11751int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
11752{
11753 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11754 int ret = 0;
11755
11756 if (dhd)
11757 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
11758 return ret;
11759}
11760
11761int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
11762{
11763 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11764 int ret = 0;
11765
11766 if (dhd)
11767 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
11768 return ret;
11769}
11770
11771
11772#if defined(DHD_TRACE_WAKE_LOCK)
11773#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11774#include <linux/hashtable.h>
11775#else
11776#include <linux/hash.h>
11777#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11778
11779
11780#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11781/* Define 2^5 = 32 bucket size hash table */
11782DEFINE_HASHTABLE(wklock_history, 5);
11783#else
11784/* Define 2^5 = 32 bucket size hash table */
11785struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
11786#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11787
11788int trace_wklock_onoff = 1;
11789
11790typedef enum dhd_wklock_type {
11791 DHD_WAKE_LOCK,
11792 DHD_WAKE_UNLOCK,
11793 DHD_WAIVE_LOCK,
11794 DHD_RESTORE_LOCK
11795} dhd_wklock_t;
11796
11797struct wk_trace_record {
11798 unsigned long addr; /* Address of the instruction */
11799 dhd_wklock_t lock_type; /* lock_type */
11800 unsigned long long counter; /* counter information */
11801 struct hlist_node wklock_node; /* hash node */
11802};
11803
11804
11805static struct wk_trace_record *find_wklock_entry(unsigned long addr)
11806{
11807 struct wk_trace_record *wklock_info;
11808#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11809 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
11810#else
11811 struct hlist_node *entry;
11812 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
11813 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
11814#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11815 {
11816 if (wklock_info->addr == addr) {
11817 return wklock_info;
11818 }
11819 }
11820 return NULL;
11821}
11822
11823#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11824#define HASH_ADD(hashtable, node, key) \
11825 do { \
11826 hash_add(hashtable, node, key); \
11827 } while (0);
11828#else
11829#define HASH_ADD(hashtable, node, key) \
11830 do { \
11831 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
11832 hlist_add_head(node, &hashtable[index]); \
11833 } while (0);
11834#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
11835
11836#define STORE_WKLOCK_RECORD(wklock_type) \
11837 do { \
11838 struct wk_trace_record *wklock_info = NULL; \
11839 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
11840 wklock_info = find_wklock_entry(func_addr); \
11841 if (wklock_info) { \
11842 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
11843 wklock_info->counter = dhd->wakelock_counter; \
11844 } else { \
11845 wklock_info->counter++; \
11846 } \
11847 } else { \
11848 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
11849 if (!wklock_info) {\
11850 printk("Can't allocate wk_trace_record \n"); \
11851 } else { \
11852 wklock_info->addr = func_addr; \
11853 wklock_info->lock_type = wklock_type; \
11854 if (wklock_type == DHD_WAIVE_LOCK || \
11855 wklock_type == DHD_RESTORE_LOCK) { \
11856 wklock_info->counter = dhd->wakelock_counter; \
11857 } else { \
11858 wklock_info->counter++; \
11859 } \
11860 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
11861 } \
11862 } \
11863 } while (0);
11864
11865static inline void dhd_wk_lock_rec_dump(void)
11866{
11867 int bkt;
11868 struct wk_trace_record *wklock_info;
11869
11870#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11871 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
11872#else
11873 struct hlist_node *entry = NULL;
11874 int max_index = ARRAY_SIZE(wklock_history);
11875 for (bkt = 0; bkt < max_index; bkt++)
11876 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
11877#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11878 {
11879 switch (wklock_info->lock_type) {
11880 case DHD_WAKE_LOCK:
11881 DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n",
11882 (void *)wklock_info->addr, wklock_info->counter));
11883 break;
11884 case DHD_WAKE_UNLOCK:
11885 DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
11886 (void *)wklock_info->addr, wklock_info->counter));
11887 break;
11888 case DHD_WAIVE_LOCK:
11889 DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n",
11890 (void *)wklock_info->addr, wklock_info->counter));
11891 break;
11892 case DHD_RESTORE_LOCK:
11893 DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
11894 (void *)wklock_info->addr, wklock_info->counter));
11895 break;
11896 }
11897 }
11898}
11899
11900static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
11901{
11902 unsigned long flags;
11903#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11904 int i;
11905#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11906
11907 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11908#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11909 hash_init(wklock_history);
11910#else
11911 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
11912 INIT_HLIST_HEAD(&wklock_history[i]);
11913#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11914 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11915}
11916
11917static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
11918{
11919 int bkt;
11920 struct wk_trace_record *wklock_info;
11921 struct hlist_node *tmp;
11922 unsigned long flags;
11923#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
11924 struct hlist_node *entry = NULL;
11925 int max_index = ARRAY_SIZE(wklock_history);
11926#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11927
11928 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11929#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11930 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
11931#else
11932 for (bkt = 0; bkt < max_index; bkt++)
11933 hlist_for_each_entry_safe(wklock_info, entry, tmp,
11934 &wklock_history[bkt], wklock_node)
11935#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11936 {
11937#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11938 hash_del(&wklock_info->wklock_node);
11939#else
11940 hlist_del_init(&wklock_info->wklock_node);
11941#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
11942 kfree(wklock_info);
11943 }
11944 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11945}
11946
11947void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
11948{
11949 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
11950 unsigned long flags;
11951
11952 DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
11953 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11954 dhd_wk_lock_rec_dump();
11955 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11956 DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
11957}
11958#else
11959#define STORE_WKLOCK_RECORD(wklock_type)
11960#endif /* ! DHD_TRACE_WAKE_LOCK */
11961
11962int dhd_os_wake_lock(dhd_pub_t *pub)
11963{
11964 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11965 unsigned long flags;
11966 int ret = 0;
11967
11968 if (dhd) {
11969 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11970 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
11971#ifdef CONFIG_HAS_WAKELOCK
11972 wake_lock(&dhd->wl_wifi);
11973#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
11974 dhd_bus_dev_pm_stay_awake(pub);
11975#endif
11976 }
11977#ifdef DHD_TRACE_WAKE_LOCK
11978 if (trace_wklock_onoff) {
11979 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
11980 }
11981#endif /* DHD_TRACE_WAKE_LOCK */
11982 dhd->wakelock_counter++;
11983 ret = dhd->wakelock_counter;
11984 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11985 }
11986
11987 return ret;
11988}
11989
11990int dhd_event_wake_lock(dhd_pub_t *pub)
11991{
11992 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11993 unsigned long flags;
11994 int ret = 0;
11995
11996 if (dhd) {
11997 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
11998 if (dhd->wakelock_event_counter == 0) {
11999#ifdef CONFIG_HAS_WAKELOCK
12000 wake_lock(&dhd->wl_evtwake);
12001#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12002 dhd_bus_dev_pm_stay_awake(pub);
12003#endif
12004 }
12005 dhd->wakelock_event_counter++;
12006 ret = dhd->wakelock_event_counter;
12007 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12008 }
12009
12010 return ret;
12011}
12012
12013int net_os_wake_lock(struct net_device *dev)
12014{
12015 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12016 int ret = 0;
12017
12018 if (dhd)
12019 ret = dhd_os_wake_lock(&dhd->pub);
12020 return ret;
12021}
12022
12023int dhd_os_wake_unlock(dhd_pub_t *pub)
12024{
12025 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12026 unsigned long flags;
12027 int ret = 0;
12028
12029 dhd_os_wake_lock_timeout(pub);
12030 if (dhd) {
12031 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12032
12033 if (dhd->wakelock_counter > 0) {
12034 dhd->wakelock_counter--;
12035#ifdef DHD_TRACE_WAKE_LOCK
12036 if (trace_wklock_onoff) {
12037 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
12038 }
12039#endif /* DHD_TRACE_WAKE_LOCK */
12040 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12041#ifdef CONFIG_HAS_WAKELOCK
12042 wake_unlock(&dhd->wl_wifi);
12043#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12044 dhd_bus_dev_pm_relax(pub);
12045#endif
12046 }
12047 ret = dhd->wakelock_counter;
12048 }
12049 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12050 }
12051 return ret;
12052}
12053
12054int dhd_event_wake_unlock(dhd_pub_t *pub)
12055{
12056 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12057 unsigned long flags;
12058 int ret = 0;
12059
12060 if (dhd) {
12061 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12062
12063 if (dhd->wakelock_event_counter > 0) {
12064 dhd->wakelock_event_counter--;
12065 if (dhd->wakelock_event_counter == 0) {
12066#ifdef CONFIG_HAS_WAKELOCK
12067 wake_unlock(&dhd->wl_evtwake);
12068#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12069 dhd_bus_dev_pm_relax(pub);
12070#endif
12071 }
12072 ret = dhd->wakelock_event_counter;
12073 }
12074 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12075 }
12076 return ret;
12077}
12078
12079int dhd_os_check_wakelock(dhd_pub_t *pub)
12080{
12081#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12082 KERNEL_VERSION(2, 6, 36)))
12083 dhd_info_t *dhd;
12084
12085 if (!pub)
12086 return 0;
12087 dhd = (dhd_info_t *)(pub->info);
12088#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12089
12090#ifdef CONFIG_HAS_WAKELOCK
12091 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
12092 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
12093 (wake_lock_active(&dhd->wl_wdwake))))
12094 return 1;
12095#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12096 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
12097 return 1;
12098#endif
12099 return 0;
12100}
12101
12102int
12103dhd_os_check_wakelock_all(dhd_pub_t *pub)
12104{
12105#ifdef CONFIG_HAS_WAKELOCK
12106 int l1, l2, l3, l4, l7;
12107 int l5 = 0, l6 = 0;
12108 int c, lock_active;
12109#endif /* CONFIG_HAS_WAKELOCK */
12110#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12111 KERNEL_VERSION(2, 6, 36)))
12112 dhd_info_t *dhd;
12113
12114 if (!pub) {
12115 return 0;
12116 }
12117 dhd = (dhd_info_t *)(pub->info);
12118 if (!dhd) {
12119 return 0;
12120 }
12121#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12122
12123#ifdef CONFIG_HAS_WAKELOCK
12124 c = dhd->wakelock_counter;
12125 l1 = wake_lock_active(&dhd->wl_wifi);
12126 l2 = wake_lock_active(&dhd->wl_wdwake);
12127 l3 = wake_lock_active(&dhd->wl_rxwake);
12128 l4 = wake_lock_active(&dhd->wl_ctrlwake);
12129#ifdef BCMPCIE_OOB_HOST_WAKE
12130 l5 = wake_lock_active(&dhd->wl_intrwake);
12131#endif /* BCMPCIE_OOB_HOST_WAKE */
12132#ifdef DHD_USE_SCAN_WAKELOCK
12133 l6 = wake_lock_active(&dhd->wl_scanwake);
12134#endif /* DHD_USE_SCAN_WAKELOCK */
12135 l7 = wake_lock_active(&dhd->wl_evtwake);
12136 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
12137
12138 /* Indicate to the Host to avoid going to suspend if internal locks are up */
12139 if (dhd && lock_active) {
12140 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
12141 "ctl-%d intr-%d scan-%d evt-%d\n",
12142 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
12143 return 1;
12144 }
12145#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12146 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
12147 return 1;
12148 }
12149#endif /* CONFIG_HAS_WAKELOCK */
12150 return 0;
12151}
12152
12153int net_os_wake_unlock(struct net_device *dev)
12154{
12155 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12156 int ret = 0;
12157
12158 if (dhd)
12159 ret = dhd_os_wake_unlock(&dhd->pub);
12160 return ret;
12161}
12162
12163int dhd_os_wd_wake_lock(dhd_pub_t *pub)
12164{
12165 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12166 unsigned long flags;
12167 int ret = 0;
12168
12169 if (dhd) {
12170 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12171#ifdef CONFIG_HAS_WAKELOCK
12172 /* if wakelock_wd_counter was never used : lock it at once */
12173 if (!dhd->wakelock_wd_counter)
12174 wake_lock(&dhd->wl_wdwake);
12175#endif
12176 dhd->wakelock_wd_counter++;
12177 ret = dhd->wakelock_wd_counter;
12178 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12179 }
12180 return ret;
12181}
12182
12183int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
12184{
12185 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12186 unsigned long flags;
12187 int ret = 0;
12188
12189 if (dhd) {
12190 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12191 if (dhd->wakelock_wd_counter) {
12192 dhd->wakelock_wd_counter = 0;
12193#ifdef CONFIG_HAS_WAKELOCK
12194 wake_unlock(&dhd->wl_wdwake);
12195#endif
12196 }
12197 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12198 }
12199 return ret;
12200}
12201
12202#ifdef BCMPCIE_OOB_HOST_WAKE
12203void
12204dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
12205{
12206#ifdef CONFIG_HAS_WAKELOCK
12207 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12208
12209 if (dhd) {
12210 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
12211 }
12212#endif /* CONFIG_HAS_WAKELOCK */
12213}
12214
12215void
12216dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
12217{
12218#ifdef CONFIG_HAS_WAKELOCK
12219 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12220
12221 if (dhd) {
12222 /* if wl_intrwake is active, unlock it */
12223 if (wake_lock_active(&dhd->wl_intrwake)) {
12224 wake_unlock(&dhd->wl_intrwake);
12225 }
12226 }
12227#endif /* CONFIG_HAS_WAKELOCK */
12228}
12229#endif /* BCMPCIE_OOB_HOST_WAKE */
12230
12231#ifdef DHD_USE_SCAN_WAKELOCK
12232void
12233dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
12234{
12235#ifdef CONFIG_HAS_WAKELOCK
12236 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12237
12238 if (dhd) {
12239 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
12240 }
12241#endif /* CONFIG_HAS_WAKELOCK */
12242}
12243
12244void
12245dhd_os_scan_wake_unlock(dhd_pub_t *pub)
12246{
12247#ifdef CONFIG_HAS_WAKELOCK
12248 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12249
12250 if (dhd) {
12251 /* if wl_scanwake is active, unlock it */
12252 if (wake_lock_active(&dhd->wl_scanwake)) {
12253 wake_unlock(&dhd->wl_scanwake);
12254 }
12255 }
12256#endif /* CONFIG_HAS_WAKELOCK */
12257}
12258#endif /* DHD_USE_SCAN_WAKELOCK */
12259
12260/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
12261 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
12262 */
12263int dhd_os_wake_lock_waive(dhd_pub_t *pub)
12264{
12265 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12266 unsigned long flags;
12267 int ret = 0;
12268
12269 if (dhd) {
12270 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12271
12272 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12273 if (dhd->waive_wakelock == FALSE) {
12274#ifdef DHD_TRACE_WAKE_LOCK
12275 if (trace_wklock_onoff) {
12276 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
12277 }
12278#endif /* DHD_TRACE_WAKE_LOCK */
12279 /* record current lock status */
12280 dhd->wakelock_before_waive = dhd->wakelock_counter;
12281 dhd->waive_wakelock = TRUE;
12282 }
12283 ret = dhd->wakelock_wd_counter;
12284 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12285 }
12286 return ret;
12287}
12288
12289int dhd_os_wake_lock_restore(dhd_pub_t *pub)
12290{
12291 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12292 unsigned long flags;
12293 int ret = 0;
12294
12295 if (!dhd)
12296 return 0;
12297
12298 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12299
12300 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12301 if (!dhd->waive_wakelock)
12302 goto exit;
12303
12304 dhd->waive_wakelock = FALSE;
12305 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
12306 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
12307 * the lock in between, do the same by calling wake_unlock or pm_relax
12308 */
12309#ifdef DHD_TRACE_WAKE_LOCK
12310 if (trace_wklock_onoff) {
12311 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
12312 }
12313#endif /* DHD_TRACE_WAKE_LOCK */
12314
12315 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
12316#ifdef CONFIG_HAS_WAKELOCK
12317 wake_lock(&dhd->wl_wifi);
12318#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12319 dhd_bus_dev_pm_stay_awake(&dhd->pub);
12320#endif
12321 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
12322#ifdef CONFIG_HAS_WAKELOCK
12323 wake_unlock(&dhd->wl_wifi);
12324#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12325 dhd_bus_dev_pm_relax(&dhd->pub);
12326#endif
12327 }
12328 dhd->wakelock_before_waive = 0;
12329exit:
12330 ret = dhd->wakelock_wd_counter;
12331 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12332 return ret;
12333}
12334
12335void dhd_os_wake_lock_init(struct dhd_info *dhd)
12336{
12337 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
12338 dhd->wakelock_event_counter = 0;
12339 dhd->wakelock_counter = 0;
12340 dhd->wakelock_rx_timeout_enable = 0;
12341 dhd->wakelock_ctrl_timeout_enable = 0;
12342#ifdef CONFIG_HAS_WAKELOCK
12343 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
12344 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
12345 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
12346 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
12347#ifdef BCMPCIE_OOB_HOST_WAKE
12348 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
12349#endif /* BCMPCIE_OOB_HOST_WAKE */
12350#ifdef DHD_USE_SCAN_WAKELOCK
12351 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
12352#endif /* DHD_USE_SCAN_WAKELOCK */
12353#endif /* CONFIG_HAS_WAKELOCK */
12354#ifdef DHD_TRACE_WAKE_LOCK
12355 dhd_wk_lock_trace_init(dhd);
12356#endif /* DHD_TRACE_WAKE_LOCK */
12357}
12358
12359void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
12360{
12361 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
12362#ifdef CONFIG_HAS_WAKELOCK
12363 dhd->wakelock_event_counter = 0;
12364 dhd->wakelock_counter = 0;
12365 dhd->wakelock_rx_timeout_enable = 0;
12366 dhd->wakelock_ctrl_timeout_enable = 0;
12367 wake_lock_destroy(&dhd->wl_wifi);
12368 wake_lock_destroy(&dhd->wl_rxwake);
12369 wake_lock_destroy(&dhd->wl_ctrlwake);
12370 wake_lock_destroy(&dhd->wl_evtwake);
12371#ifdef BCMPCIE_OOB_HOST_WAKE
12372 wake_lock_destroy(&dhd->wl_intrwake);
12373#endif /* BCMPCIE_OOB_HOST_WAKE */
12374#ifdef DHD_USE_SCAN_WAKELOCK
12375 wake_lock_destroy(&dhd->wl_scanwake);
12376#endif /* DHD_USE_SCAN_WAKELOCK */
12377#ifdef DHD_TRACE_WAKE_LOCK
12378 dhd_wk_lock_trace_deinit(dhd);
12379#endif /* DHD_TRACE_WAKE_LOCK */
12380#endif /* CONFIG_HAS_WAKELOCK */
12381}
12382
12383bool dhd_os_check_if_up(dhd_pub_t *pub)
12384{
12385 if (!pub)
12386 return FALSE;
12387 return pub->up;
12388}
12389
12390/* function to collect firmware, chip id and chip version info */
12391void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
12392{
12393 int i;
12394
12395 i = snprintf(info_string, sizeof(info_string),
12396 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
12397 printf("%s\n", info_string);
12398
12399 if (!dhdp)
12400 return;
12401
12402 i = snprintf(&info_string[i], sizeof(info_string) - i,
12403 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
12404 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
12405}
12406
12407int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
12408{
12409 int ifidx;
12410 int ret = 0;
12411 dhd_info_t *dhd = NULL;
12412
12413 if (!net || !DEV_PRIV(net)) {
12414 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
12415 return -EINVAL;
12416 }
12417
12418 dhd = DHD_DEV_INFO(net);
12419 if (!dhd)
12420 return -EINVAL;
12421
12422 ifidx = dhd_net2idx(dhd, net);
12423 if (ifidx == DHD_BAD_IF) {
12424 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
12425 return -ENODEV;
12426 }
12427
12428 DHD_OS_WAKE_LOCK(&dhd->pub);
12429 DHD_PERIM_LOCK(&dhd->pub);
12430
12431 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
12432 dhd_check_hang(net, &dhd->pub, ret);
12433
12434 DHD_PERIM_UNLOCK(&dhd->pub);
12435 DHD_OS_WAKE_UNLOCK(&dhd->pub);
12436
12437 return ret;
12438}
12439
12440bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
12441{
12442 struct net_device *net;
12443
12444 net = dhd_idx2net(dhdp, ifidx);
12445 if (!net) {
12446 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
12447 return -EINVAL;
12448 }
12449
12450 return dhd_check_hang(net, dhdp, ret);
12451}
12452
12453/* Return instance */
12454int dhd_get_instance(dhd_pub_t *dhdp)
12455{
12456 return dhdp->info->unit;
12457}
12458
12459
12460#ifdef PROP_TXSTATUS
12461
12462void dhd_wlfc_plat_init(void *dhd)
12463{
12464#ifdef USE_DYNAMIC_F2_BLKSIZE
12465 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12466#endif /* USE_DYNAMIC_F2_BLKSIZE */
12467 return;
12468}
12469
12470void dhd_wlfc_plat_deinit(void *dhd)
12471{
12472#ifdef USE_DYNAMIC_F2_BLKSIZE
12473 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
12474#endif /* USE_DYNAMIC_F2_BLKSIZE */
12475 return;
12476}
12477
12478bool dhd_wlfc_skip_fc(void)
12479{
12480#ifdef SKIP_WLFC_ON_CONCURRENT
12481#ifdef WL_CFG80211
12482
12483 /* enable flow control in vsdb mode */
12484 return !(wl_cfg80211_is_concurrent_mode());
12485#else
12486 return TRUE; /* skip flow control */
12487#endif /* WL_CFG80211 */
12488
12489#else
12490 return FALSE;
12491#endif /* SKIP_WLFC_ON_CONCURRENT */
12492}
12493#endif /* PROP_TXSTATUS */
12494
12495#ifdef BCMDBGFS
12496#include <linux/debugfs.h>
12497
12498typedef struct dhd_dbgfs {
12499 struct dentry *debugfs_dir;
12500 struct dentry *debugfs_mem;
12501 dhd_pub_t *dhdp;
12502 uint32 size;
12503} dhd_dbgfs_t;
12504
12505dhd_dbgfs_t g_dbgfs;
12506
12507extern uint32 dhd_readregl(void *bp, uint32 addr);
12508extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
12509
12510static int
12511dhd_dbg_state_open(struct inode *inode, struct file *file)
12512{
12513 file->private_data = inode->i_private;
12514 return 0;
12515}
12516
12517static ssize_t
12518dhd_dbg_state_read(struct file *file, char __user *ubuf,
12519 size_t count, loff_t *ppos)
12520{
12521 ssize_t rval;
12522 uint32 tmp;
12523 loff_t pos = *ppos;
12524 size_t ret;
12525
12526 if (pos < 0)
12527 return -EINVAL;
12528 if (pos >= g_dbgfs.size || !count)
12529 return 0;
12530 if (count > g_dbgfs.size - pos)
12531 count = g_dbgfs.size - pos;
12532
12533 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
12534 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
12535
12536 ret = copy_to_user(ubuf, &tmp, 4);
12537 if (ret == count)
12538 return -EFAULT;
12539
12540 count -= ret;
12541 *ppos = pos + count;
12542 rval = count;
12543
12544 return rval;
12545}
12546
12547
12548static ssize_t
12549dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
12550{
12551 loff_t pos = *ppos;
12552 size_t ret;
12553 uint32 buf;
12554
12555 if (pos < 0)
12556 return -EINVAL;
12557 if (pos >= g_dbgfs.size || !count)
12558 return 0;
12559 if (count > g_dbgfs.size - pos)
12560 count = g_dbgfs.size - pos;
12561
12562 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
12563 if (ret == count)
12564 return -EFAULT;
12565
12566 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
12567 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
12568
12569 return count;
12570}
12571
12572
12573loff_t
12574dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
12575{
12576 loff_t pos = -1;
12577
12578 switch (whence) {
12579 case 0:
12580 pos = off;
12581 break;
12582 case 1:
12583 pos = file->f_pos + off;
12584 break;
12585 case 2:
12586 pos = g_dbgfs.size - off;
12587 }
12588 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
12589}
12590
12591static const struct file_operations dhd_dbg_state_ops = {
12592 .read = dhd_dbg_state_read,
12593 .write = dhd_debugfs_write,
12594 .open = dhd_dbg_state_open,
12595 .llseek = dhd_debugfs_lseek
12596};
12597
12598static void dhd_dbg_create(void)
12599{
12600 if (g_dbgfs.debugfs_dir) {
12601 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
12602 NULL, &dhd_dbg_state_ops);
12603 }
12604}
12605
12606void dhd_dbg_init(dhd_pub_t *dhdp)
12607{
12608 g_dbgfs.dhdp = dhdp;
12609 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
12610
12611 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
12612 if (IS_ERR(g_dbgfs.debugfs_dir)) {
12613 g_dbgfs.debugfs_dir = NULL;
12614 return;
12615 }
12616
12617 dhd_dbg_create();
12618
12619 return;
12620}
12621
12622void dhd_dbg_remove(void)
12623{
12624 debugfs_remove(g_dbgfs.debugfs_mem);
12625 debugfs_remove(g_dbgfs.debugfs_dir);
12626
12627 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
12628}
12629#endif /* BCMDBGFS */
12630
12631#ifdef WLMEDIA_HTSF
12632
12633static
12634void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
12635{
12636 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12637 struct sk_buff *skb;
12638 uint32 htsf = 0;
12639 uint16 dport = 0, oldmagic = 0xACAC;
12640 char *p1;
12641 htsfts_t ts;
12642
12643 /* timestamp packet */
12644
12645 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
12646
12647 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
12648/* memcpy(&proto, p1+26, 4); */
12649 memcpy(&dport, p1+40, 2);
12650/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
12651 dport = ntoh16(dport);
12652 }
12653
12654 /* timestamp only if icmp or udb iperf with port 5555 */
12655/* if (proto == 17 && dport == tsport) { */
12656 if (dport >= tsport && dport <= tsport + 20) {
12657
12658 skb = (struct sk_buff *) pktbuf;
12659
12660 htsf = dhd_get_htsf(dhd, 0);
12661 memset(skb->data + 44, 0, 2); /* clear checksum */
12662 memcpy(skb->data+82, &oldmagic, 2);
12663 memcpy(skb->data+84, &htsf, 4);
12664
12665 memset(&ts, 0, sizeof(htsfts_t));
12666 ts.magic = HTSFMAGIC;
12667 ts.prio = PKTPRIO(pktbuf);
12668 ts.seqnum = htsf_seqnum++;
12669 ts.c10 = get_cycles();
12670 ts.t10 = htsf;
12671 ts.endmagic = HTSFENDMAGIC;
12672
12673 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
12674 }
12675}
12676
12677static void dhd_dump_htsfhisto(histo_t *his, char *s)
12678{
12679 int pktcnt = 0, curval = 0, i;
12680 for (i = 0; i < (NUMBIN-2); i++) {
12681 curval += 500;
12682 printf("%d ", his->bin[i]);
12683 pktcnt += his->bin[i];
12684 }
12685 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
12686 his->bin[NUMBIN-1], s);
12687}
12688
12689static
12690void sorttobin(int value, histo_t *histo)
12691{
12692 int i, binval = 0;
12693
12694 if (value < 0) {
12695 histo->bin[NUMBIN-1]++;
12696 return;
12697 }
12698 if (value > histo->bin[NUMBIN-2]) /* store the max value */
12699 histo->bin[NUMBIN-2] = value;
12700
12701 for (i = 0; i < (NUMBIN-2); i++) {
12702 binval += 500; /* 500m s bins */
12703 if (value <= binval) {
12704 histo->bin[i]++;
12705 return;
12706 }
12707 }
12708 histo->bin[NUMBIN-3]++;
12709}
12710
12711static
12712void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
12713{
12714 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12715 struct sk_buff *skb;
12716 char *p1;
12717 uint16 old_magic;
12718 int d1, d2, d3, end2end;
12719 htsfts_t *htsf_ts;
12720 uint32 htsf;
12721
12722 skb = PKTTONATIVE(dhdp->osh, pktbuf);
12723 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
12724
12725 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
12726 memcpy(&old_magic, p1+78, 2);
12727 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
12728 } else {
12729 return;
12730 }
12731 if (htsf_ts->magic == HTSFMAGIC) {
12732 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
12733 htsf_ts->cE0 = get_cycles();
12734 }
12735
12736 if (old_magic == 0xACAC) {
12737
12738 tspktcnt++;
12739 htsf = dhd_get_htsf(dhd, 0);
12740 memcpy(skb->data+92, &htsf, sizeof(uint32));
12741
12742 memcpy(&ts[tsidx].t1, skb->data+80, 16);
12743
12744 d1 = ts[tsidx].t2 - ts[tsidx].t1;
12745 d2 = ts[tsidx].t3 - ts[tsidx].t2;
12746 d3 = ts[tsidx].t4 - ts[tsidx].t3;
12747 end2end = ts[tsidx].t4 - ts[tsidx].t1;
12748
12749 sorttobin(d1, &vi_d1);
12750 sorttobin(d2, &vi_d2);
12751 sorttobin(d3, &vi_d3);
12752 sorttobin(end2end, &vi_d4);
12753
12754 if (end2end > 0 && end2end > maxdelay) {
12755 maxdelay = end2end;
12756 maxdelaypktno = tspktcnt;
12757 memcpy(&maxdelayts, &ts[tsidx], 16);
12758 }
12759 if (++tsidx >= TSMAX)
12760 tsidx = 0;
12761 }
12762}
12763
12764uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
12765{
12766 uint32 htsf = 0, cur_cycle, delta, delta_us;
12767 uint32 factor, baseval, baseval2;
12768 cycles_t t;
12769
12770 t = get_cycles();
12771 cur_cycle = t;
12772
12773 if (cur_cycle > dhd->htsf.last_cycle) {
12774 delta = cur_cycle - dhd->htsf.last_cycle;
12775 } else {
12776 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
12777 }
12778
12779 delta = delta >> 4;
12780
12781 if (dhd->htsf.coef) {
12782 /* times ten to get the first digit */
12783 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
12784 baseval = (delta*10)/factor;
12785 baseval2 = (delta*10)/(factor+1);
12786 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
12787 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
12788 } else {
12789 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
12790 }
12791
12792 return htsf;
12793}
12794
12795static void dhd_dump_latency(void)
12796{
12797 int i, max = 0;
12798 int d1, d2, d3, d4, d5;
12799
12800 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
12801 for (i = 0; i < TSMAX; i++) {
12802 d1 = ts[i].t2 - ts[i].t1;
12803 d2 = ts[i].t3 - ts[i].t2;
12804 d3 = ts[i].t4 - ts[i].t3;
12805 d4 = ts[i].t4 - ts[i].t1;
12806 d5 = ts[max].t4-ts[max].t1;
12807 if (d4 > d5 && d4 > 0) {
12808 max = i;
12809 }
12810 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
12811 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
12812 d1, d2, d3, d4, i);
12813 }
12814
12815 printf("current idx = %d \n", tsidx);
12816
12817 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
12818 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
12819 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
12820 maxdelayts.t2 - maxdelayts.t1,
12821 maxdelayts.t3 - maxdelayts.t2,
12822 maxdelayts.t4 - maxdelayts.t3,
12823 maxdelayts.t4 - maxdelayts.t1);
12824}
12825
12826
12827static int
12828dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
12829{
12830 wl_ioctl_t ioc;
12831 char buf[32];
12832 int ret;
12833 uint32 s1, s2;
12834
12835 struct tsf {
12836 uint32 low;
12837 uint32 high;
12838 } tsf_buf;
12839
12840 memset(&ioc, 0, sizeof(ioc));
12841 memset(&tsf_buf, 0, sizeof(tsf_buf));
12842
12843 ioc.cmd = WLC_GET_VAR;
12844 ioc.buf = buf;
12845 ioc.len = (uint)sizeof(buf);
12846 ioc.set = FALSE;
12847
12848 strncpy(buf, "tsf", sizeof(buf) - 1);
12849 buf[sizeof(buf) - 1] = '\0';
12850 s1 = dhd_get_htsf(dhd, 0);
12851 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
12852 if (ret == -EIO) {
12853 DHD_ERROR(("%s: tsf is not supported by device\n",
12854 dhd_ifname(&dhd->pub, ifidx)));
12855 return -EOPNOTSUPP;
12856 }
12857 return ret;
12858 }
12859 s2 = dhd_get_htsf(dhd, 0);
12860
12861 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
12862 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
12863 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
12864 dhd->htsf.coefdec2, s2-tsf_buf.low);
12865 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
12866 return 0;
12867}
12868
12869void htsf_update(dhd_info_t *dhd, void *data)
12870{
12871 static ulong cur_cycle = 0, prev_cycle = 0;
12872 uint32 htsf, tsf_delta = 0;
12873 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
12874 ulong b, a;
12875 cycles_t t;
12876
12877 /* cycles_t in inlcude/mips/timex.h */
12878
12879 t = get_cycles();
12880
12881 prev_cycle = cur_cycle;
12882 cur_cycle = t;
12883
12884 if (cur_cycle > prev_cycle)
12885 cyc_delta = cur_cycle - prev_cycle;
12886 else {
12887 b = cur_cycle;
12888 a = prev_cycle;
12889 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
12890 }
12891
12892 if (data == NULL)
12893 printf(" tsf update ata point er is null \n");
12894
12895 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
12896 memcpy(&cur_tsf, data, sizeof(tsf_t));
12897
12898 if (cur_tsf.low == 0) {
12899 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
12900 return;
12901 }
12902
12903 if (cur_tsf.low > prev_tsf.low)
12904 tsf_delta = (cur_tsf.low - prev_tsf.low);
12905 else {
12906 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
12907 cur_tsf.low, prev_tsf.low));
12908 if (cur_tsf.high > prev_tsf.high) {
12909 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
12910 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
12911 } else {
12912 return; /* do not update */
12913 }
12914 }
12915
12916 if (tsf_delta) {
12917 hfactor = cyc_delta / tsf_delta;
12918 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
12919 dec1 = tmp/tsf_delta;
12920 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
12921 tmp = (tmp - (dec1*tsf_delta))*10;
12922 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
12923
12924 if (dec3 > 4) {
12925 if (dec2 == 9) {
12926 dec2 = 0;
12927 if (dec1 == 9) {
12928 dec1 = 0;
12929 hfactor++;
12930 } else {
12931 dec1++;
12932 }
12933 } else {
12934 dec2++;
12935 }
12936 }
12937 }
12938
12939 if (hfactor) {
12940 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
12941 dhd->htsf.coef = hfactor;
12942 dhd->htsf.last_cycle = cur_cycle;
12943 dhd->htsf.last_tsf = cur_tsf.low;
12944 dhd->htsf.coefdec1 = dec1;
12945 dhd->htsf.coefdec2 = dec2;
12946 } else {
12947 htsf = prev_tsf.low;
12948 }
12949}
12950
12951#endif /* WLMEDIA_HTSF */
12952
12953#ifdef CUSTOM_SET_CPUCORE
12954void dhd_set_cpucore(dhd_pub_t *dhd, int set)
12955{
12956 int e_dpc = 0, e_rxf = 0, retry_set = 0;
12957
12958 if (!(dhd->chan_isvht80)) {
12959 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
12960 return;
12961 }
12962
12963 if (DPC_CPUCORE) {
12964 do {
12965 if (set == TRUE) {
12966 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12967 cpumask_of(DPC_CPUCORE));
12968 } else {
12969 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
12970 cpumask_of(PRIMARY_CPUCORE));
12971 }
12972 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12973 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
12974 return;
12975 }
12976 if (e_dpc < 0)
12977 OSL_SLEEP(1);
12978 } while (e_dpc < 0);
12979 }
12980 if (RXF_CPUCORE) {
12981 do {
12982 if (set == TRUE) {
12983 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
12984 cpumask_of(RXF_CPUCORE));
12985 } else {
12986 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
12987 cpumask_of(PRIMARY_CPUCORE));
12988 }
12989 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
12990 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
12991 return;
12992 }
12993 if (e_rxf < 0)
12994 OSL_SLEEP(1);
12995 } while (e_rxf < 0);
12996 }
12997#ifdef DHD_OF_SUPPORT
12998 interrupt_set_cpucore(set);
12999#endif /* DHD_OF_SUPPORT */
13000 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
13001
13002 return;
13003}
13004#endif /* CUSTOM_SET_CPUCORE */
13005
13006/* Get interface specific ap_isolate configuration */
13007int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
13008{
13009 dhd_info_t *dhd = dhdp->info;
13010 dhd_if_t *ifp;
13011
13012 ASSERT(idx < DHD_MAX_IFS);
13013
13014 ifp = dhd->iflist[idx];
13015
13016 return ifp->ap_isolate;
13017}
13018
13019/* Set interface specific ap_isolate configuration */
13020int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
13021{
13022 dhd_info_t *dhd = dhdp->info;
13023 dhd_if_t *ifp;
13024
13025 ASSERT(idx < DHD_MAX_IFS);
13026
13027 ifp = dhd->iflist[idx];
13028
13029 ifp->ap_isolate = val;
13030
13031 return 0;
13032}
13033
13034#ifdef DHD_FW_COREDUMP
13035
13036
13037#ifdef CUSTOMER_HW4_DEBUG
13038#ifdef PLATFORM_SLP
13039#define MEMDUMPINFO "/opt/etc/.memdump.info"
13040#else
13041#define MEMDUMPINFO "/data/.memdump.info"
13042#endif /* PLATFORM_SLP */
13043#elif defined(CUSTOMER_HW2)
13044#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
13045#else
13046#define MEMDUMPINFO "/installmedia/.memdump.info"
13047#endif /* CUSTOMER_HW4_DEBUG */
13048
13049void dhd_get_memdump_info(dhd_pub_t *dhd)
13050{
13051 struct file *fp = NULL;
13052 uint32 mem_val = DUMP_MEMFILE_MAX;
13053 int ret = 0;
13054 char *filepath = MEMDUMPINFO;
13055
13056 /* Read memdump info from the file */
13057 fp = filp_open(filepath, O_RDONLY, 0);
13058 if (IS_ERR(fp)) {
13059 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13060 goto done;
13061 } else {
13062 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13063 if (ret < 0) {
13064 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13065 filp_close(fp, NULL);
13066 goto done;
13067 }
13068
13069 mem_val = bcm_atoi((char *)&mem_val);
13070
13071 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
13072 filp_close(fp, NULL);
13073 }
13074
13075done:
13076#ifdef CUSTOMER_HW4_DEBUG
13077 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
13078#else
13079 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
13080#endif /* CUSTOMER_HW4_DEBUG */
13081}
13082
13083
13084void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
13085{
13086 dhd_dump_t *dump = NULL;
13087 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
13088 if (dump == NULL) {
13089 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
13090 return;
13091 }
13092 dump->buf = buf;
13093 dump->bufsize = size;
13094
13095#if defined(CONFIG_ARM64)
13096 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
13097 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
13098#elif defined(__ARM_ARCH_7A__)
13099 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
13100 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
13101#endif /* __ARM_ARCH_7A__ */
13102 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
13103 BUG_ON(1);
13104 }
13105
13106#ifdef DHD_LOG_DUMP
13107 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
13108 dhd_schedule_log_dump(dhdp);
13109 }
13110#endif /* DHD_LOG_DUMP */
13111 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
13112 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
13113}
13114static void
13115dhd_mem_dump(void *handle, void *event_info, u8 event)
13116{
13117 dhd_info_t *dhd = handle;
13118 dhd_dump_t *dump = event_info;
13119
13120 if (!dhd) {
13121 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13122 return;
13123 }
13124
13125 if (!dump) {
13126 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
13127 return;
13128 }
13129
13130 if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
13131 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
13132 }
13133
13134 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
13135#ifdef DHD_LOG_DUMP
13136 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
13137#endif
13138 TRUE) {
13139 BUG_ON(1);
13140 }
13141 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
13142}
13143#endif /* DHD_FW_COREDUMP */
13144
13145#ifdef DHD_LOG_DUMP
13146static void
13147dhd_log_dump(void *handle, void *event_info, u8 event)
13148{
13149 dhd_info_t *dhd = handle;
13150
13151 if (!dhd) {
13152 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13153 return;
13154 }
13155
13156 if (do_dhd_log_dump(&dhd->pub)) {
13157 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
13158 return;
13159 }
13160}
13161
13162void dhd_schedule_log_dump(dhd_pub_t *dhdp)
13163{
13164 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
13165 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
13166 dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
13167}
13168
13169static int
13170do_dhd_log_dump(dhd_pub_t *dhdp)
13171{
13172 int ret = 0;
13173 struct file *fp = NULL;
13174 mm_segment_t old_fs;
13175 loff_t pos = 0;
13176 char dump_path[128];
13177 char common_info[1024];
13178 struct timeval curtime;
13179 uint32 file_mode;
13180 unsigned long flags = 0;
13181
13182 if (!dhdp) {
13183 return -1;
13184 }
13185
13186 /* Building the additional information like DHD, F/W version */
13187 memset(common_info, 0, sizeof(common_info));
13188 snprintf(common_info, sizeof(common_info),
13189 "---------- Common information ----------\n"
13190 "DHD version: %s\n"
13191 "F/W version: %s\n"
13192 "----------------------------------------\n",
13193 dhd_version, fw_version);
13194
13195 /* change to KERNEL_DS address limit */
13196 old_fs = get_fs();
13197 set_fs(KERNEL_DS);
13198
13199 /* Init file name */
13200 memset(dump_path, 0, sizeof(dump_path));
13201 do_gettimeofday(&curtime);
13202 snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
13203 DHD_COMMON_DUMP_PATH "debug_dump",
13204 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
13205 file_mode = O_CREAT | O_WRONLY | O_SYNC;
13206
13207 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
13208 fp = filp_open(dump_path, file_mode, 0644);
13209 if (IS_ERR(fp)) {
13210 ret = PTR_ERR(fp);
13211 DHD_ERROR(("open file error, err = %d\n", ret));
13212 ret = -1;
13213 goto exit;
13214 }
13215
13216 fp->f_op->write(fp, common_info, strlen(common_info), &pos);
13217 if (dhdp->dld_buf.wraparound) {
13218 fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
13219 } else {
13220 fp->f_op->write(fp, dhdp->dld_buf.buffer,
13221 (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
13222 }
13223
13224 /* re-init dhd_log_dump_buf structure */
13225 spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
13226 dhdp->dld_buf.wraparound = 0;
13227 dhdp->dld_buf.present = dhdp->dld_buf.front;
13228 dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13229 bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13230 spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
13231exit:
13232 if (!ret) {
13233 filp_close(fp, NULL);
13234 }
13235 set_fs(old_fs);
13236
13237 return ret;
13238}
13239#endif /* DHD_LOG_DUMP */
13240
13241#ifdef BCMASSERT_LOG
13242#ifdef CUSTOMER_HW4_DEBUG
13243#ifdef PLATFORM_SLP
13244#define ASSERTINFO "/opt/etc/.assert.info"
13245#else
13246#define ASSERTINFO "/data/.assert.info"
13247#endif /* PLATFORM_SLP */
13248#elif defined(CUSTOMER_HW2)
13249#define ASSERTINFO "/data/misc/wifi/.assert.info"
13250#else
13251#define ASSERTINFO "/installmedia/.assert.info"
13252#endif /* CUSTOMER_HW4_DEBUG */
13253void dhd_get_assert_info(dhd_pub_t *dhd)
13254{
13255 struct file *fp = NULL;
13256 char *filepath = ASSERTINFO;
13257
13258 /*
13259 * Read assert info from the file
13260 * 0: Trigger Kernel crash by panic()
13261 * 1: Print out the logs and don't trigger Kernel panic. (default)
13262 * 2: Trigger Kernel crash by BUG()
13263 * File doesn't exist: Keep default value (1).
13264 */
13265 fp = filp_open(filepath, O_RDONLY, 0);
13266 if (IS_ERR(fp)) {
13267 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13268 } else {
13269 int mem_val = 0;
13270 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13271 if (ret < 0) {
13272 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13273 } else {
13274 mem_val = bcm_atoi((char *)&mem_val);
13275 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
13276 g_assert_type = mem_val;
13277 }
13278 filp_close(fp, NULL);
13279 }
13280}
13281#endif /* BCMASSERT_LOG */
13282
13283
13284#ifdef DHD_WMF
13285/* Returns interface specific WMF configuration */
13286dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
13287{
13288 dhd_info_t *dhd = dhdp->info;
13289 dhd_if_t *ifp;
13290
13291 ASSERT(idx < DHD_MAX_IFS);
13292
13293 ifp = dhd->iflist[idx];
13294 return &ifp->wmf;
13295}
13296#endif /* DHD_WMF */
13297
13298
13299#if defined(DHD_L2_FILTER)
13300bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
13301{
13302 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
13303}
13304#endif
13305
13306#ifdef DHD_L2_FILTER
13307arp_table_t*
13308dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
13309{
13310 dhd_info_t *dhd = dhdp->info;
13311 dhd_if_t *ifp;
13312
13313 ASSERT(bssidx < DHD_MAX_IFS);
13314
13315 ifp = dhd->iflist[bssidx];
13316 return ifp->phnd_arp_table;
13317}
13318
13319int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
13320{
13321 dhd_info_t *dhd = dhdp->info;
13322 dhd_if_t *ifp;
13323
13324 ASSERT(idx < DHD_MAX_IFS);
13325
13326 ifp = dhd->iflist[idx];
13327
13328 if (ifp)
13329 return ifp->parp_enable;
13330 else
13331 return FALSE;
13332}
13333
13334/* Set interface specific proxy arp configuration */
13335int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13336{
13337 dhd_info_t *dhd = dhdp->info;
13338 dhd_if_t *ifp;
13339 ASSERT(idx < DHD_MAX_IFS);
13340 ifp = dhd->iflist[idx];
13341
13342 if (!ifp)
13343 return BCME_ERROR;
13344
13345 /* At present all 3 variables are being
13346 * handled at once
13347 */
13348 ifp->parp_enable = val;
13349 ifp->parp_discard = val;
13350 ifp->parp_allnode = !val;
13351
13352 /* Flush ARP entries when disabled */
13353 if (val == FALSE) {
13354 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
13355 FALSE, dhdp->tickcnt);
13356 }
13357 return BCME_OK;
13358}
13359
13360bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13361{
13362 dhd_info_t *dhd = dhdp->info;
13363 dhd_if_t *ifp;
13364
13365 ASSERT(idx < DHD_MAX_IFS);
13366
13367 ifp = dhd->iflist[idx];
13368
13369 ASSERT(ifp);
13370 return ifp->parp_discard;
13371}
13372
13373bool
13374dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13375{
13376 dhd_info_t *dhd = dhdp->info;
13377 dhd_if_t *ifp;
13378
13379 ASSERT(idx < DHD_MAX_IFS);
13380
13381 ifp = dhd->iflist[idx];
13382
13383 ASSERT(ifp);
13384
13385 return ifp->parp_allnode;
13386}
13387
13388int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
13389{
13390 dhd_info_t *dhd = dhdp->info;
13391 dhd_if_t *ifp;
13392
13393 ASSERT(idx < DHD_MAX_IFS);
13394
13395 ifp = dhd->iflist[idx];
13396
13397 ASSERT(ifp);
13398
13399 return ifp->dhcp_unicast;
13400}
13401
13402int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
13403{
13404 dhd_info_t *dhd = dhdp->info;
13405 dhd_if_t *ifp;
13406 ASSERT(idx < DHD_MAX_IFS);
13407 ifp = dhd->iflist[idx];
13408
13409 ASSERT(ifp);
13410
13411 ifp->dhcp_unicast = val;
13412 return BCME_OK;
13413}
13414
13415int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
13416{
13417 dhd_info_t *dhd = dhdp->info;
13418 dhd_if_t *ifp;
13419
13420 ASSERT(idx < DHD_MAX_IFS);
13421
13422 ifp = dhd->iflist[idx];
13423
13424 ASSERT(ifp);
13425
13426 return ifp->block_ping;
13427}
13428
13429int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
13430{
13431 dhd_info_t *dhd = dhdp->info;
13432 dhd_if_t *ifp;
13433 ASSERT(idx < DHD_MAX_IFS);
13434 ifp = dhd->iflist[idx];
13435
13436 ASSERT(ifp);
13437
13438 ifp->block_ping = val;
13439
13440 return BCME_OK;
13441}
13442
13443int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
13444{
13445 dhd_info_t *dhd = dhdp->info;
13446 dhd_if_t *ifp;
13447
13448 ASSERT(idx < DHD_MAX_IFS);
13449
13450 ifp = dhd->iflist[idx];
13451
13452 ASSERT(ifp);
13453
13454 return ifp->grat_arp;
13455}
13456
13457int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13458{
13459 dhd_info_t *dhd = dhdp->info;
13460 dhd_if_t *ifp;
13461 ASSERT(idx < DHD_MAX_IFS);
13462 ifp = dhd->iflist[idx];
13463
13464 ASSERT(ifp);
13465
13466 ifp->grat_arp = val;
13467
13468 return BCME_OK;
13469}
13470#endif /* DHD_L2_FILTER */
13471
13472
13473#if defined(SET_RPS_CPUS)
13474int dhd_rps_cpus_enable(struct net_device *net, int enable)
13475{
13476 dhd_info_t *dhd = DHD_DEV_INFO(net);
13477 dhd_if_t *ifp;
13478 int ifidx;
13479 char * RPS_CPU_SETBUF;
13480
13481 ifidx = dhd_net2idx(dhd, net);
13482 if (ifidx == DHD_BAD_IF) {
13483 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
13484 return -ENODEV;
13485 }
13486
13487 if (ifidx == PRIMARY_INF) {
13488 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
13489 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
13490 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
13491 } else {
13492 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
13493 RPS_CPU_SETBUF = RPS_CPUS_MASK;
13494 }
13495 } else if (ifidx == VIRTUAL_INF) {
13496 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
13497 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
13498 } else {
13499 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
13500 return -EINVAL;
13501 }
13502
13503 ifp = dhd->iflist[ifidx];
13504 if (ifp) {
13505 if (enable) {
13506 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
13507 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
13508 } else {
13509 custom_rps_map_clear(ifp->net->_rx);
13510 }
13511 } else {
13512 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
13513 return -ENODEV;
13514 }
13515 return BCME_OK;
13516}
13517
13518int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
13519{
13520 struct rps_map *old_map, *map;
13521 cpumask_var_t mask;
13522 int err, cpu, i;
13523 static DEFINE_SPINLOCK(rps_map_lock);
13524
13525 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13526
13527 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
13528 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
13529 return -ENOMEM;
13530 }
13531
13532 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
13533 if (err) {
13534 free_cpumask_var(mask);
13535 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
13536 return err;
13537 }
13538
13539 map = kzalloc(max_t(unsigned int,
13540 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
13541 GFP_KERNEL);
13542 if (!map) {
13543 free_cpumask_var(mask);
13544 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
13545 return -ENOMEM;
13546 }
13547
13548 i = 0;
13549 for_each_cpu(cpu, mask) {
13550 map->cpus[i++] = cpu;
13551 }
13552
13553 if (i) {
13554 map->len = i;
13555 } else {
13556 kfree(map);
13557 map = NULL;
13558 free_cpumask_var(mask);
13559 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
13560 return -1;
13561 }
13562
13563 spin_lock(&rps_map_lock);
13564 old_map = rcu_dereference_protected(queue->rps_map,
13565 lockdep_is_held(&rps_map_lock));
13566 rcu_assign_pointer(queue->rps_map, map);
13567 spin_unlock(&rps_map_lock);
13568
13569 if (map) {
13570 static_key_slow_inc(&rps_needed);
13571 }
13572 if (old_map) {
13573 kfree_rcu(old_map, rcu);
13574 static_key_slow_dec(&rps_needed);
13575 }
13576 free_cpumask_var(mask);
13577
13578 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
13579 return map->len;
13580}
13581
13582void custom_rps_map_clear(struct netdev_rx_queue *queue)
13583{
13584 struct rps_map *map;
13585
13586 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13587
13588 map = rcu_dereference_protected(queue->rps_map, 1);
13589 if (map) {
13590 RCU_INIT_POINTER(queue->rps_map, NULL);
13591 kfree_rcu(map, rcu);
13592 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
13593 }
13594}
13595#endif
13596
13597
13598
13599#ifdef DHD_DEBUG_PAGEALLOC
13600
13601void
13602dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
13603{
13604 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
13605
13606 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
13607 __FUNCTION__, addr_corrupt, (uint32)len));
13608
13609 DHD_OS_WAKE_LOCK(dhdp);
13610 prhex("Page Corruption:", addr_corrupt, len);
13611 dhd_dump_to_kernelog(dhdp);
13612#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13613 /* Load the dongle side dump to host memory and then BUG_ON() */
13614 dhdp->memdump_enabled = DUMP_MEMONLY;
13615 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
13616 dhd_bus_mem_dump(dhdp);
13617#endif /* BCMPCIE && DHD_FW_COREDUMP */
13618 DHD_OS_WAKE_UNLOCK(dhdp);
13619}
13620EXPORT_SYMBOL(dhd_page_corrupt_cb);
13621#endif /* DHD_DEBUG_PAGEALLOC */
13622
13623#ifdef DHD_PKTID_AUDIT_ENABLED
13624void
13625dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
13626{
13627 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
13628 DHD_OS_WAKE_LOCK(dhdp);
13629 dhd_dump_to_kernelog(dhdp);
13630#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13631 /* Load the dongle side dump to host memory and then BUG_ON() */
13632 dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
13633 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
13634 dhd_bus_mem_dump(dhdp);
13635#endif /* BCMPCIE && DHD_FW_COREDUMP */
13636 DHD_OS_WAKE_UNLOCK(dhdp);
13637}
13638#endif /* DHD_PKTID_AUDIT_ENABLED */
13639
13640/* ----------------------------------------------------------------------------
13641 * Infrastructure code for sysfs interface support for DHD
13642 *
13643 * What is sysfs interface?
13644 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
13645 *
13646 * Why sysfs interface?
13647 * This is the Linux standard way of changing/configuring Run Time parameters
13648 * for a driver. We can use this interface to control "linux" specific driver
13649 * parameters.
13650 *
13651 * -----------------------------------------------------------------------------
13652 */
13653
13654#include <linux/sysfs.h>
13655#include <linux/kobject.h>
13656
13657#if defined(DHD_TRACE_WAKE_LOCK)
13658
13659/* Function to show the history buffer */
13660static ssize_t
13661show_wklock_trace(struct dhd_info *dev, char *buf)
13662{
13663 ssize_t ret = 0;
13664 dhd_info_t *dhd = (dhd_info_t *)dev;
13665
13666 buf[ret] = '\n';
13667 buf[ret+1] = 0;
13668
13669 dhd_wk_lock_stats_dump(&dhd->pub);
13670 return ret+1;
13671}
13672
13673/* Function to enable/disable wakelock trace */
13674static ssize_t
13675wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
13676{
13677 unsigned long onoff;
13678 unsigned long flags;
13679 dhd_info_t *dhd = (dhd_info_t *)dev;
13680
13681 onoff = bcm_strtoul(buf, NULL, 10);
13682 if (onoff != 0 && onoff != 1) {
13683 return -EINVAL;
13684 }
13685
13686 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
13687 trace_wklock_onoff = onoff;
13688 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
13689 if (trace_wklock_onoff) {
13690 printk("ENABLE WAKLOCK TRACE\n");
13691 } else {
13692 printk("DISABLE WAKELOCK TRACE\n");
13693 }
13694
13695 return (ssize_t)(onoff+1);
13696}
13697#endif /* DHD_TRACE_WAKE_LOCK */
13698
13699/*
13700 * Generic Attribute Structure for DHD.
13701 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
13702 * to instantiate an object of type dhd_attr, populate it with
13703 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
13704 * and add the object to default_attrs[] array, that gets registered
13705 * to the kobject of dhd (named bcm-dhd).
13706 */
13707
13708struct dhd_attr {
13709 struct attribute attr;
13710 ssize_t(*show)(struct dhd_info *, char *);
13711 ssize_t(*store)(struct dhd_info *, const char *, size_t count);
13712};
13713
13714#if defined(DHD_TRACE_WAKE_LOCK)
13715static struct dhd_attr dhd_attr_wklock =
13716 __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
13717#endif /* defined(DHD_TRACE_WAKE_LOCK */
13718
13719/* Attribute object that gets registered with "bcm-dhd" kobject tree */
13720static struct attribute *default_attrs[] = {
13721#if defined(DHD_TRACE_WAKE_LOCK)
13722 &dhd_attr_wklock.attr,
13723#endif
13724 NULL
13725};
13726
13727#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
13728#define to_attr(a) container_of(a, struct dhd_attr, attr)
13729
13730/*
13731 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13732 * node under "bcm-dhd" the show function is called.
13733 */
13734static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
13735{
13736 dhd_info_t *dhd = to_dhd(kobj);
13737 struct dhd_attr *d_attr = to_attr(attr);
13738 int ret;
13739
13740 if (d_attr->show)
13741 ret = d_attr->show(dhd, buf);
13742 else
13743 ret = -EIO;
13744
13745 return ret;
13746}
13747
13748
13749/*
13750 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13751 * node under "bcm-dhd" the store function is called.
13752 */
13753static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
13754 const char *buf, size_t count)
13755{
13756 dhd_info_t *dhd = to_dhd(kobj);
13757 struct dhd_attr *d_attr = to_attr(attr);
13758 int ret;
13759
13760 if (d_attr->store)
13761 ret = d_attr->store(dhd, buf, count);
13762 else
13763 ret = -EIO;
13764
13765 return ret;
13766
13767}
13768
13769static struct sysfs_ops dhd_sysfs_ops = {
13770 .show = dhd_show,
13771 .store = dhd_store,
13772};
13773
13774static struct kobj_type dhd_ktype = {
13775 .sysfs_ops = &dhd_sysfs_ops,
13776 .default_attrs = default_attrs,
13777};
13778
13779/* Create a kobject and attach to sysfs interface */
13780static int dhd_sysfs_init(dhd_info_t *dhd)
13781{
13782 int ret = -1;
13783
13784 if (dhd == NULL) {
13785 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13786 return ret;
13787 }
13788
13789 /* Initialize the kobject */
13790 ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
13791 if (ret) {
13792 kobject_put(&dhd->dhd_kobj);
13793 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
13794 return ret;
13795 }
13796
13797 /*
13798 * We are always responsible for sending the uevent that the kobject
13799 * was added to the system.
13800 */
13801 kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
13802
13803 return ret;
13804}
13805
13806/* Done with the kobject and detach the sysfs interface */
13807static void dhd_sysfs_exit(dhd_info_t *dhd)
13808{
13809 if (dhd == NULL) {
13810 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13811 return;
13812 }
13813
13814 /* Releae the kobject */
13815 kobject_put(&dhd->dhd_kobj);
13816}
13817
13818#ifdef DHD_LOG_DUMP
13819void
13820dhd_log_dump_init(dhd_pub_t *dhd)
13821{
13822 spin_lock_init(&dhd->dld_buf.lock);
13823#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13824 dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
13825 DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
13826#else
13827 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13828#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13829
13830 if (!dhd->dld_buf.buffer) {
13831 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13832 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
13833
13834 if (!dhd->dld_buf.buffer) {
13835 DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
13836 return;
13837 }
13838 }
13839
13840 dhd->dld_buf.wraparound = 0;
13841 dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
13842 dhd->dld_buf.present = dhd->dld_buf.buffer;
13843 dhd->dld_buf.front = dhd->dld_buf.buffer;
13844 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13845 dhd->dld_enable = 1;
13846}
13847
13848void
13849dhd_log_dump_deinit(dhd_pub_t *dhd)
13850{
13851 dhd->dld_enable = 0;
13852#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13853 DHD_OS_PREFREE(dhd,
13854 dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13855#else
13856 kfree(dhd->dld_buf.buffer);
13857#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13858}
13859
13860void
13861dhd_log_dump_print(const char *fmt, ...)
13862{
13863 int len = 0;
13864 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
13865 va_list args;
13866 dhd_pub_t *dhd = NULL;
13867 unsigned long flags = 0;
13868
13869 if (wl_get_bcm_cfg80211_ptr()) {
13870 dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
13871 }
13872
13873 if (!dhd || dhd->dld_enable != 1) {
13874 return;
13875 }
13876
13877 va_start(args, fmt);
13878
13879 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
13880 if (len < 0) {
13881 return;
13882 }
13883
13884 /* make a critical section to eliminate race conditions */
13885 spin_lock_irqsave(&dhd->dld_buf.lock, flags);
13886 if (dhd->dld_buf.remain < len) {
13887 dhd->dld_buf.wraparound = 1;
13888 dhd->dld_buf.present = dhd->dld_buf.front;
13889 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13890 }
13891
13892 strncpy(dhd->dld_buf.present, tmp_buf, len);
13893 dhd->dld_buf.remain -= len;
13894 dhd->dld_buf.present += len;
13895 spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
13896
13897 /* double check invalid memory operation */
13898 ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
13899 va_end(args);
13900}
13901
13902char*
13903dhd_log_dump_get_timestamp(void)
13904{
13905 static char buf[16];
13906 u64 ts_nsec;
13907 unsigned long rem_nsec;
13908
13909 ts_nsec = local_clock();
13910 rem_nsec = do_div(ts_nsec, 1000000000);
13911 snprintf(buf, sizeof(buf), "%5lu.%06lu",
13912 (unsigned long)ts_nsec, rem_nsec / 1000);
13913
13914 return buf;
13915}
13916
13917#endif /* DHD_LOG_DUMP */
13918
13919/* ---------------------------- End of sysfs implementation ------------------------------------- */
13920
13921void *dhd_get_pub(struct net_device *dev)
13922{
13923 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
13924 if (dhdinfo)
13925 return (void *)&dhdinfo->pub;
13926 else
13927 return NULL;
13928}
13929
13930bool dhd_os_wd_timer_enabled(void *bus)
13931{
13932 dhd_pub_t *pub = bus;
13933 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13934
13935 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13936 if (!dhd) {
13937 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13938 return FALSE;
13939 }
13940 return dhd->wd_timer_valid;
13941}