wifi: update driver to 1.579.77.41.2 (r)
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.1.363.59.144.x.cn / dhd_linux.c
CommitLineData
ef6a5fee
RC
1/*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2016, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
29 */
30
31#include <typedefs.h>
32#include <linuxver.h>
33#include <osl.h>
34#ifdef SHOW_LOGTRACE
35#include <linux/syscalls.h>
36#include <event_log.h>
37#endif /* SHOW_LOGTRACE */
38
39
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/inetdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/etherdevice.h>
48#include <linux/random.h>
49#include <linux/spinlock.h>
50#include <linux/ethtool.h>
51#include <linux/fcntl.h>
52#include <linux/fs.h>
53#include <linux/ip.h>
54#include <linux/reboot.h>
55#include <linux/notifier.h>
56#include <net/addrconf.h>
57#ifdef ENABLE_ADAPTIVE_SCHED
58#include <linux/cpufreq.h>
59#endif /* ENABLE_ADAPTIVE_SCHED */
60
61#include <asm/uaccess.h>
62#include <asm/unaligned.h>
63
64#include <epivers.h>
65#include <bcmutils.h>
66#include <bcmendian.h>
67#include <bcmdevs.h>
68
69#include <proto/ethernet.h>
70#include <proto/bcmevent.h>
71#include <proto/vlan.h>
72#include <proto/802.3.h>
73
74#include <dngl_stats.h>
75#include <dhd_linux_wq.h>
76#include <dhd.h>
77#include <dhd_linux.h>
78#ifdef PCIE_FULL_DONGLE
79#include <dhd_flowring.h>
80#endif
81#include <dhd_bus.h>
82#include <dhd_proto.h>
83#include <dhd_config.h>
08dfb6c4
RC
84#ifdef WL_ESCAN
85#include <wl_escan.h>
86#endif
ef6a5fee
RC
87#include <dhd_dbg.h>
88#ifdef CONFIG_HAS_WAKELOCK
89#include <linux/wakelock.h>
90#endif
91#ifdef WL_CFG80211
92#include <wl_cfg80211.h>
93#endif
94#ifdef PNO_SUPPORT
95#include <dhd_pno.h>
96#endif
97#ifdef RTT_SUPPORT
98#include <dhd_rtt.h>
99#endif
100
101#ifdef CONFIG_COMPAT
102#include <linux/compat.h>
103#endif
104
105#ifdef DHD_WMF
106#include <dhd_wmf_linux.h>
107#endif /* DHD_WMF */
108
109#ifdef DHD_L2_FILTER
110#include <proto/bcmicmp.h>
111#include <bcm_l2_filter.h>
112#include <dhd_l2_filter.h>
113#endif /* DHD_L2_FILTER */
114
115#ifdef DHD_PSTA
116#include <dhd_psta.h>
117#endif /* DHD_PSTA */
118
119
120#ifdef DHDTCPACK_SUPPRESS
121#include <dhd_ip.h>
122#endif /* DHDTCPACK_SUPPRESS */
123
124#ifdef DHD_DEBUG_PAGEALLOC
125typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
126void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
127extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
128#endif /* DHD_DEBUG_PAGEALLOC */
129
130
131#if defined(DHD_LB)
132/* Dynamic CPU selection for load balancing */
133#include <linux/cpu.h>
134#include <linux/cpumask.h>
135#include <linux/notifier.h>
136#include <linux/workqueue.h>
137#include <asm/atomic.h>
138
139#if !defined(DHD_LB_PRIMARY_CPUS)
140#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
141#endif
142
143#if !defined(DHD_LB_SECONDARY_CPUS)
144#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
145#endif
146
147#define HIST_BIN_SIZE 8
148
149#if defined(DHD_LB_RXP)
150static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
151#endif /* DHD_LB_RXP */
152
153#endif /* DHD_LB */
154
155#ifdef WLMEDIA_HTSF
156#include <linux/time.h>
157#include <htsf.h>
158
159#define HTSF_MINLEN 200 /* min. packet length to timestamp */
160#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
161#define TSMAX 1000 /* max no. of timing record kept */
162#define NUMBIN 34
163
164static uint32 tsidx = 0;
165static uint32 htsf_seqnum = 0;
166uint32 tsfsync;
167struct timeval tsync;
168static uint32 tsport = 5010;
169
170typedef struct histo_ {
171 uint32 bin[NUMBIN];
172} histo_t;
173
174#if !ISPOWEROF2(DHD_SDALIGN)
175#error DHD_SDALIGN is not a power of 2!
176#endif
177
178static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
179#endif /* WLMEDIA_HTSF */
180
181#ifdef STBLINUX
182#ifdef quote_str
183#undef quote_str
184#endif /* quote_str */
185#ifdef to_str
186#undef to_str
187#endif /* quote_str */
188#define to_str(s) #s
189#define quote_str(s) to_str(s)
190
191static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
192#endif /* STBLINUX */
193
194
195#if defined(SOFTAP)
196extern bool ap_cfg_running;
197extern bool ap_fw_loaded;
198#endif
199extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
200
201#ifdef FIX_CPU_MIN_CLOCK
202#include <linux/pm_qos.h>
203#endif /* FIX_CPU_MIN_CLOCK */
204#ifdef SET_RANDOM_MAC_SOFTAP
205#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
206#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
207#endif
208static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
209#endif /* SET_RANDOM_MAC_SOFTAP */
210#ifdef ENABLE_ADAPTIVE_SCHED
211#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
212#ifndef CUSTOM_CPUFREQ_THRESH
213#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
214#endif /* CUSTOM_CPUFREQ_THRESH */
215#endif /* ENABLE_ADAPTIVE_SCHED */
216
217/* enable HOSTIP cache update from the host side when an eth0:N is up */
218#define AOE_IP_ALIAS_SUPPORT 1
219
220#ifdef BCM_FD_AGGR
221#include <bcm_rpc.h>
222#include <bcm_rpc_tp.h>
223#endif
224#ifdef PROP_TXSTATUS
225#include <wlfc_proto.h>
226#include <dhd_wlfc.h>
227#endif
228
229#include <wl_android.h>
230
ef6a5fee
RC
231/* Maximum STA per radio */
232#define DHD_MAX_STA 32
233
08dfb6c4
RC
234#ifdef CUSTOMER_HW_AMLOGIC
235#include <linux/amlogic/wifi_dt.h>
236#endif
ef6a5fee
RC
237
238
239const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
240const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
241#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
242
243#ifdef ARP_OFFLOAD_SUPPORT
244void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
245static int dhd_inetaddr_notifier_call(struct notifier_block *this,
246 unsigned long event, void *ptr);
247static struct notifier_block dhd_inetaddr_notifier = {
248 .notifier_call = dhd_inetaddr_notifier_call
249};
250/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
251 * created in kernel notifier link list (with 'next' pointing to itself)
252 */
253static bool dhd_inetaddr_notifier_registered = FALSE;
254#endif /* ARP_OFFLOAD_SUPPORT */
255
256#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
257static int dhd_inet6addr_notifier_call(struct notifier_block *this,
258 unsigned long event, void *ptr);
259static struct notifier_block dhd_inet6addr_notifier = {
260 .notifier_call = dhd_inet6addr_notifier_call
261};
262/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
263 * created in kernel notifier link list (with 'next' pointing to itself)
264 */
265static bool dhd_inet6addr_notifier_registered = FALSE;
266#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
267
268#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
269#include <linux/suspend.h>
270volatile bool dhd_mmc_suspend = FALSE;
271DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
272#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
273
274#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
275extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
276#endif
277#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
278static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
279#endif
280#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
281MODULE_LICENSE("GPL and additional rights");
282#endif /* LinuxVer */
283
284#include <dhd_bus.h>
285
286#ifdef BCM_FD_AGGR
287#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
288#else
289#ifndef PROP_TXSTATUS
290#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
291#else
292#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
293#endif
294#endif /* BCM_FD_AGGR */
295
296#ifdef PROP_TXSTATUS
297extern bool dhd_wlfc_skip_fc(void);
298extern void dhd_wlfc_plat_init(void *dhd);
299extern void dhd_wlfc_plat_deinit(void *dhd);
300#endif /* PROP_TXSTATUS */
ef6a5fee
RC
301extern uint sd_f2_blocksize;
302extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
ef6a5fee
RC
303
304#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
305const char *
306print_tainted()
307{
308 return "";
309}
310#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
311
312/* Linux wireless extension support */
313#if defined(WL_WIRELESS_EXT)
314#include <wl_iw.h>
315extern wl_iw_extra_params_t g_wl_iw_params;
316#endif /* defined(WL_WIRELESS_EXT) */
317
318#ifdef CONFIG_PARTIALSUSPEND_SLP
319#include <linux/partialsuspend_slp.h>
320#define CONFIG_HAS_EARLYSUSPEND
321#define DHD_USE_EARLYSUSPEND
322#define register_early_suspend register_pre_suspend
323#define unregister_early_suspend unregister_pre_suspend
324#define early_suspend pre_suspend
325#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
326#else
327#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
328#include <linux/earlysuspend.h>
329#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
330#endif /* CONFIG_PARTIALSUSPEND_SLP */
331
332extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
333
334#ifdef PKT_FILTER_SUPPORT
335extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
336extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
337extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
338#endif
339
340
341#ifdef READ_MACADDR
342extern int dhd_read_macaddr(struct dhd_info *dhd);
343#else
344static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
345#endif
346#ifdef WRITE_MACADDR
347extern int dhd_write_macaddr(struct ether_addr *mac);
348#else
349static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
350#endif
351
352
353
354
355
356#ifdef DHD_FW_COREDUMP
357static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
358#endif /* DHD_FW_COREDUMP */
359#ifdef DHD_LOG_DUMP
360static void dhd_log_dump_init(dhd_pub_t *dhd);
361static void dhd_log_dump_deinit(dhd_pub_t *dhd);
362static void dhd_log_dump(void *handle, void *event_info, u8 event);
363void dhd_schedule_log_dump(dhd_pub_t *dhdp);
364static int do_dhd_log_dump(dhd_pub_t *dhdp);
365#endif /* DHD_LOG_DUMP */
366
367static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
368static struct notifier_block dhd_reboot_notifier = {
369 .notifier_call = dhd_reboot_callback,
370 .priority = 1,
371};
372
373#ifdef BCMPCIE
374static int is_reboot = 0;
375#endif /* BCMPCIE */
376
377typedef struct dhd_if_event {
378 struct list_head list;
379 wl_event_data_if_t event;
380 char name[IFNAMSIZ+1];
381 uint8 mac[ETHER_ADDR_LEN];
382} dhd_if_event_t;
383
384/* Interface control information */
385typedef struct dhd_if {
386 struct dhd_info *info; /* back pointer to dhd_info */
387 /* OS/stack specifics */
388 struct net_device *net;
389 int idx; /* iface idx in dongle */
390 uint subunit; /* subunit */
391 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
392 bool set_macaddress;
393 bool set_multicast;
394 uint8 bssidx; /* bsscfg index for the interface */
395 bool attached; /* Delayed attachment when unset */
396 bool txflowcontrol; /* Per interface flow control indicator */
397 char name[IFNAMSIZ+1]; /* linux interface name */
398 char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
399 struct net_device_stats stats;
400#ifdef DHD_WMF
401 dhd_wmf_t wmf; /* per bsscfg wmf setting */
402#endif /* DHD_WMF */
403#ifdef PCIE_FULL_DONGLE
404 struct list_head sta_list; /* sll of associated stations */
405#if !defined(BCM_GMAC3)
406 spinlock_t sta_list_lock; /* lock for manipulating sll */
407#endif /* ! BCM_GMAC3 */
408#endif /* PCIE_FULL_DONGLE */
409 uint32 ap_isolate; /* ap-isolation settings */
410#ifdef DHD_L2_FILTER
411 bool parp_enable;
412 bool parp_discard;
413 bool parp_allnode;
414 arp_table_t *phnd_arp_table;
415/* for Per BSS modification */
416 bool dhcp_unicast;
417 bool block_ping;
418 bool grat_arp;
419#endif /* DHD_L2_FILTER */
420} dhd_if_t;
421
422#ifdef WLMEDIA_HTSF
423typedef struct {
424 uint32 low;
425 uint32 high;
426} tsf_t;
427
428typedef struct {
429 uint32 last_cycle;
430 uint32 last_sec;
431 uint32 last_tsf;
432 uint32 coef; /* scaling factor */
433 uint32 coefdec1; /* first decimal */
434 uint32 coefdec2; /* second decimal */
435} htsf_t;
436
437typedef struct {
438 uint32 t1;
439 uint32 t2;
440 uint32 t3;
441 uint32 t4;
442} tstamp_t;
443
444static tstamp_t ts[TSMAX];
445static tstamp_t maxdelayts;
446static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
447
448#endif /* WLMEDIA_HTSF */
449
450struct ipv6_work_info_t {
451 uint8 if_idx;
452 char ipv6_addr[16];
453 unsigned long event;
454};
455
456#ifdef DHD_DEBUG
457typedef struct dhd_dump {
458 uint8 *buf;
459 int bufsize;
460} dhd_dump_t;
461#endif /* DHD_DEBUG */
462
463/* When Perimeter locks are deployed, any blocking calls must be preceeded
464 * with a PERIM UNLOCK and followed by a PERIM LOCK.
465 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
466 * wait_event_timeout().
467 */
468
469/* Local private structure (extension of pub) */
470typedef struct dhd_info {
471#if defined(WL_WIRELESS_EXT)
472 wl_iw_t iw; /* wireless extensions state (must be first) */
473#endif /* defined(WL_WIRELESS_EXT) */
474 dhd_pub_t pub;
475 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
476
477 void *adapter; /* adapter information, interrupt, fw path etc. */
478 char fw_path[PATH_MAX]; /* path to firmware image */
479 char nv_path[PATH_MAX]; /* path to nvram vars file */
08dfb6c4 480 char clm_path[PATH_MAX]; /* path to clm vars file */
ef6a5fee
RC
481 char conf_path[PATH_MAX]; /* path to config vars file */
482
483 /* serialize dhd iovars */
484 struct mutex dhd_iovar_mutex;
485
486 struct semaphore proto_sem;
487#ifdef PROP_TXSTATUS
488 spinlock_t wlfc_spinlock;
489
490#endif /* PROP_TXSTATUS */
491#ifdef WLMEDIA_HTSF
492 htsf_t htsf;
493#endif
494 wait_queue_head_t ioctl_resp_wait;
495 wait_queue_head_t d3ack_wait;
496 wait_queue_head_t dhd_bus_busy_state_wait;
497 uint32 default_wd_interval;
498
499 struct timer_list timer;
500 bool wd_timer_valid;
501#ifdef DHD_PCIE_RUNTIMEPM
502 struct timer_list rpm_timer;
503 bool rpm_timer_valid;
504 tsk_ctl_t thr_rpm_ctl;
505#endif /* DHD_PCIE_RUNTIMEPM */
506 struct tasklet_struct tasklet;
507 spinlock_t sdlock;
508 spinlock_t txqlock;
509 spinlock_t dhd_lock;
510
511 struct semaphore sdsem;
512 tsk_ctl_t thr_dpc_ctl;
513 tsk_ctl_t thr_wdt_ctl;
514
515 tsk_ctl_t thr_rxf_ctl;
516 spinlock_t rxf_lock;
517 bool rxthread_enabled;
518
519 /* Wakelocks */
520#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
521 struct wake_lock wl_wifi; /* Wifi wakelock */
522 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
523 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
524 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
525 struct wake_lock wl_evtwake; /* Wifi event wakelock */
526#ifdef BCMPCIE_OOB_HOST_WAKE
527 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
528#endif /* BCMPCIE_OOB_HOST_WAKE */
529#ifdef DHD_USE_SCAN_WAKELOCK
530 struct wake_lock wl_scanwake; /* Wifi scan wakelock */
531#endif /* DHD_USE_SCAN_WAKELOCK */
532#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
533
534#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
535 /* net_device interface lock, prevent race conditions among net_dev interface
536 * calls and wifi_on or wifi_off
537 */
538 struct mutex dhd_net_if_mutex;
539 struct mutex dhd_suspend_mutex;
540#endif
541 spinlock_t wakelock_spinlock;
542 spinlock_t wakelock_evt_spinlock;
543 uint32 wakelock_event_counter;
544 uint32 wakelock_counter;
545 int wakelock_wd_counter;
546 int wakelock_rx_timeout_enable;
547 int wakelock_ctrl_timeout_enable;
548 bool waive_wakelock;
549 uint32 wakelock_before_waive;
550
551 /* Thread to issue ioctl for multicast */
552 wait_queue_head_t ctrl_wait;
553 atomic_t pend_8021x_cnt;
554 dhd_attach_states_t dhd_state;
555#ifdef SHOW_LOGTRACE
556 dhd_event_log_t event_data;
557#endif /* SHOW_LOGTRACE */
558
559#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
560 struct early_suspend early_suspend;
561#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
562
563#ifdef ARP_OFFLOAD_SUPPORT
564 u32 pend_ipaddr;
565#endif /* ARP_OFFLOAD_SUPPORT */
566#ifdef BCM_FD_AGGR
567 void *rpc_th;
568 void *rpc_osh;
569 struct timer_list rpcth_timer;
570 bool rpcth_timer_active;
571 uint8 fdaggr;
572#endif
573#ifdef DHDTCPACK_SUPPRESS
574 spinlock_t tcpack_lock;
575#endif /* DHDTCPACK_SUPPRESS */
576#ifdef FIX_CPU_MIN_CLOCK
577 bool cpufreq_fix_status;
578 struct mutex cpufreq_fix;
579 struct pm_qos_request dhd_cpu_qos;
580#ifdef FIX_BUS_MIN_CLOCK
581 struct pm_qos_request dhd_bus_qos;
582#endif /* FIX_BUS_MIN_CLOCK */
583#endif /* FIX_CPU_MIN_CLOCK */
584 void *dhd_deferred_wq;
585#ifdef DEBUG_CPU_FREQ
586 struct notifier_block freq_trans;
587 int __percpu *new_freq;
588#endif
589 unsigned int unit;
590 struct notifier_block pm_notifier;
591#ifdef DHD_PSTA
592 uint32 psta_mode; /* PSTA or PSR */
593#endif /* DHD_PSTA */
594#ifdef DHD_DEBUG
595 dhd_dump_t *dump;
596 struct timer_list join_timer;
597 u32 join_timeout_val;
598 bool join_timer_active;
599 uint scan_time_count;
600 struct timer_list scan_timer;
601 bool scan_timer_active;
602#endif
603#if defined(DHD_LB)
604 /* CPU Load Balance dynamic CPU selection */
605
606 /* Variable that tracks the currect CPUs available for candidacy */
607 cpumask_var_t cpumask_curr_avail;
608
609 /* Primary and secondary CPU mask */
610 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
611 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
612
613 struct notifier_block cpu_notifier;
614
615 /* Tasklet to handle Tx Completion packet freeing */
616 struct tasklet_struct tx_compl_tasklet;
617 atomic_t tx_compl_cpu;
618
619
620 /* Tasklet to handle RxBuf Post during Rx completion */
621 struct tasklet_struct rx_compl_tasklet;
622 atomic_t rx_compl_cpu;
623
624 /* Napi struct for handling rx packet sendup. Packets are removed from
625 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
626 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
627 * to run to rx_napi_cpu.
628 */
629 struct sk_buff_head rx_pend_queue ____cacheline_aligned;
630 struct sk_buff_head rx_napi_queue ____cacheline_aligned;
631 struct napi_struct rx_napi_struct ____cacheline_aligned;
632 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
633 struct net_device *rx_napi_netdev; /* netdev of primary interface */
634
635 struct work_struct rx_napi_dispatcher_work;
636 struct work_struct tx_compl_dispatcher_work;
637 struct work_struct rx_compl_dispatcher_work;
638 /* Number of times DPC Tasklet ran */
639 uint32 dhd_dpc_cnt;
640
641 /* Number of times NAPI processing got scheduled */
642 uint32 napi_sched_cnt;
643
644 /* Number of times NAPI processing ran on each available core */
645 uint32 napi_percpu_run_cnt[NR_CPUS];
646
647 /* Number of times RX Completions got scheduled */
648 uint32 rxc_sched_cnt;
649 /* Number of times RX Completion ran on each available core */
650 uint32 rxc_percpu_run_cnt[NR_CPUS];
651
652 /* Number of times TX Completions got scheduled */
653 uint32 txc_sched_cnt;
654 /* Number of times TX Completions ran on each available core */
655 uint32 txc_percpu_run_cnt[NR_CPUS];
656
657 /* CPU status */
658 /* Number of times each CPU came online */
659 uint32 cpu_online_cnt[NR_CPUS];
660
661 /* Number of times each CPU went offline */
662 uint32 cpu_offline_cnt[NR_CPUS];
663
664 /*
665 * Consumer Histogram - NAPI RX Packet processing
666 * -----------------------------------------------
667 * On Each CPU, when the NAPI RX Packet processing call back was invoked
668 * how many packets were processed is captured in this data structure.
669 * Now its difficult to capture the "exact" number of packets processed.
670 * So considering the packet counter to be a 32 bit one, we have a
671 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
672 * processed is rounded off to the next power of 2 and put in the
673 * approriate "bin" the value in the bin gets incremented.
674 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
675 * and the packet count processed is as follows (assume the bin counters are 0)
676 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
677 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
678 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
679 */
680 uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
681 uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
682 uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
683#endif /* DHD_LB */
684
685#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
686#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
687
688 struct kobject dhd_kobj;
689#ifdef SUPPORT_SENSORHUB
690 uint32 shub_enable;
691#endif /* SUPPORT_SENSORHUB */
692
693 struct delayed_work dhd_memdump_work;
694} dhd_info_t;
695
696#define DHDIF_FWDER(dhdif) FALSE
697
698/* Flag to indicate if we should download firmware on driver load */
699uint dhd_download_fw_on_driverload = TRUE;
700
701/* Flag to indicate if driver is initialized */
702uint dhd_driver_init_done = FALSE;
703
704/* Definitions to provide path to the firmware and nvram
705 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
706 */
707char firmware_path[MOD_PARAM_PATHLEN];
708char nvram_path[MOD_PARAM_PATHLEN];
08dfb6c4 709char clm_path[MOD_PARAM_PATHLEN];
ef6a5fee
RC
710char config_path[MOD_PARAM_PATHLEN];
711
712/* backup buffer for firmware and nvram path */
713char fw_bak_path[MOD_PARAM_PATHLEN];
714char nv_bak_path[MOD_PARAM_PATHLEN];
715
716/* information string to keep firmware, chio, cheip version info visiable from log */
717char info_string[MOD_PARAM_INFOLEN];
718module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
719int op_mode = 0;
720int disable_proptx = 0;
721module_param(op_mode, int, 0644);
722
723#if defined(DHD_LB_RXP)
724static int dhd_napi_weight = 32;
725module_param(dhd_napi_weight, int, 0644);
726#endif /* DHD_LB_RXP */
727
728extern int wl_control_wl_start(struct net_device *dev);
729#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
730struct semaphore dhd_registration_sem;
731#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
732
733/* deferred handlers */
734static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
735static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
736static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
737static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
738#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
739static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
740#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
741#ifdef WL_CFG80211
742extern void dhd_netdev_free(struct net_device *ndev);
743#endif /* WL_CFG80211 */
744
745/* Error bits */
746module_param(dhd_msg_level, int, 0);
747#if defined(WL_WIRELESS_EXT)
748module_param(iw_msg_level, int, 0);
749#endif
750#ifdef WL_CFG80211
751module_param(wl_dbg_level, int, 0);
752#endif
753module_param(android_msg_level, int, 0);
754module_param(config_msg_level, int, 0);
755
756#ifdef ARP_OFFLOAD_SUPPORT
757/* ARP offload enable */
758uint dhd_arp_enable = TRUE;
759module_param(dhd_arp_enable, uint, 0);
760
761/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
762
763#ifdef ENABLE_ARP_SNOOP_MODE
764uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
765#else
766uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
767#endif /* ENABLE_ARP_SNOOP_MODE */
768
769module_param(dhd_arp_mode, uint, 0);
770#endif /* ARP_OFFLOAD_SUPPORT */
771
772/* Disable Prop tx */
773module_param(disable_proptx, int, 0644);
774/* load firmware and/or nvram values from the filesystem */
775module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
776module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
08dfb6c4 777module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
ef6a5fee
RC
778module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
779
780/* Watchdog interval */
781
782/* extend watchdog expiration to 2 seconds when DPC is running */
783#define WATCHDOG_EXTEND_INTERVAL (2000)
784
785uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
786module_param(dhd_watchdog_ms, uint, 0);
787
788#ifdef DHD_PCIE_RUNTIMEPM
789uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
790#endif /* DHD_PCIE_RUNTIMEPMT */
791#if defined(DHD_DEBUG)
792/* Console poll interval */
793uint dhd_console_ms = 0;
794module_param(dhd_console_ms, uint, 0644);
795#endif /* defined(DHD_DEBUG) */
796
797
798uint dhd_slpauto = TRUE;
799module_param(dhd_slpauto, uint, 0);
800
801#ifdef PKT_FILTER_SUPPORT
802/* Global Pkt filter enable control */
803uint dhd_pkt_filter_enable = TRUE;
804module_param(dhd_pkt_filter_enable, uint, 0);
805#endif
806
807/* Pkt filter init setup */
808uint dhd_pkt_filter_init = 0;
809module_param(dhd_pkt_filter_init, uint, 0);
810
811/* Pkt filter mode control */
812#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
813uint dhd_master_mode = FALSE;
814#else
815uint dhd_master_mode = FALSE;
816#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
817module_param(dhd_master_mode, uint, 0);
818
819int dhd_watchdog_prio = 0;
820module_param(dhd_watchdog_prio, int, 0);
821
822/* DPC thread priority */
823int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
5967f664 824module_param(dhd_dpc_prio, int, 0644);
ef6a5fee
RC
825
826/* RX frame thread priority */
827int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
5967f664 828module_param(dhd_rxf_prio, int, 0644);
ef6a5fee
RC
829
830int passive_channel_skip = 0;
831module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
832
833#if !defined(BCMDHDUSB)
834extern int dhd_dongle_ramsize;
835module_param(dhd_dongle_ramsize, int, 0);
836#endif /* BCMDHDUSB */
837
838/* Keep track of number of instances */
839static int dhd_found = 0;
840static int instance_base = 0; /* Starting instance number */
841module_param(instance_base, int, 0644);
842
843/* Functions to manage sysfs interface for dhd */
844static int dhd_sysfs_init(dhd_info_t *dhd);
845static void dhd_sysfs_exit(dhd_info_t *dhd);
846
847#if defined(DHD_LB)
848
849static void
850dhd_lb_set_default_cpus(dhd_info_t *dhd)
851{
852 /* Default CPU allocation for the jobs */
853 atomic_set(&dhd->rx_napi_cpu, 1);
854 atomic_set(&dhd->rx_compl_cpu, 2);
855 atomic_set(&dhd->tx_compl_cpu, 2);
856}
857
858static void
859dhd_cpumasks_deinit(dhd_info_t *dhd)
860{
861 free_cpumask_var(dhd->cpumask_curr_avail);
862 free_cpumask_var(dhd->cpumask_primary);
863 free_cpumask_var(dhd->cpumask_primary_new);
864 free_cpumask_var(dhd->cpumask_secondary);
865 free_cpumask_var(dhd->cpumask_secondary_new);
866}
867
868static int
869dhd_cpumasks_init(dhd_info_t *dhd)
870{
871 int id;
872 uint32 cpus;
873 int ret = 0;
874
875 if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
876 !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
877 !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
878 !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
879 !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
880 DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
881 ret = -ENOMEM;
882 goto fail;
883 }
884
885 cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
886 cpumask_clear(dhd->cpumask_primary);
887 cpumask_clear(dhd->cpumask_secondary);
888
889 cpus = DHD_LB_PRIMARY_CPUS;
890 for (id = 0; id < NR_CPUS; id++) {
891 if (isset(&cpus, id))
892 cpumask_set_cpu(id, dhd->cpumask_primary);
893 }
894
895 cpus = DHD_LB_SECONDARY_CPUS;
896 for (id = 0; id < NR_CPUS; id++) {
897 if (isset(&cpus, id))
898 cpumask_set_cpu(id, dhd->cpumask_secondary);
899 }
900
901 return ret;
902fail:
903 dhd_cpumasks_deinit(dhd);
904 return ret;
905}
906
907/*
908 * The CPU Candidacy Algorithm
909 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
910 * The available CPUs for selection are divided into two groups
911 * Primary Set - A CPU mask that carries the First Choice CPUs
912 * Secondary Set - A CPU mask that carries the Second Choice CPUs.
913 *
914 * There are two types of Job, that needs to be assigned to
915 * the CPUs, from one of the above mentioned CPU group. The Jobs are
916 * 1) Rx Packet Processing - napi_cpu
917 * 2) Completion Processiong (Tx, RX) - compl_cpu
918 *
919 * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
920 * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
921 * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
922 * If there are more processors free, it assigns one to compl_cpu.
923 * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
924 * CPU, as much as possible.
925 *
926 * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
927 * would allow Tx completion skb's to be released into a local free pool from
928 * which the rx buffer posts could have been serviced. it is important to note
929 * that a Tx packet may not have a large enough buffer for rx posting.
930 */
931void dhd_select_cpu_candidacy(dhd_info_t *dhd)
932{
933 uint32 primary_available_cpus; /* count of primary available cpus */
934 uint32 secondary_available_cpus; /* count of secondary available cpus */
935 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
936 uint32 compl_cpu = 0; /* cpu selected for completion jobs */
937
938 cpumask_clear(dhd->cpumask_primary_new);
939 cpumask_clear(dhd->cpumask_secondary_new);
940
941 /*
942 * Now select from the primary mask. Even if a Job is
943 * already running on a CPU in secondary group, we still move
944 * to primary CPU. So no conditional checks.
945 */
946 cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
947 dhd->cpumask_curr_avail);
948
949 cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
950 dhd->cpumask_curr_avail);
951
952 primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
953
954 if (primary_available_cpus > 0) {
955 napi_cpu = cpumask_first(dhd->cpumask_primary_new);
956
957 /* If no further CPU is available,
958 * cpumask_next returns >= nr_cpu_ids
959 */
960 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
961 if (compl_cpu >= nr_cpu_ids)
962 compl_cpu = 0;
963 }
964
965 DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
966 __FUNCTION__, napi_cpu, compl_cpu));
967
968 /* -- Now check for the CPUs from the secondary mask -- */
969 secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
970
971 DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
972 __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
973
974 if (secondary_available_cpus > 0) {
975 /* At this point if napi_cpu is unassigned it means no CPU
976 * is online from Primary Group
977 */
978 if (napi_cpu == 0) {
979 napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
980 compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
981 } else if (compl_cpu == 0) {
982 compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
983 }
984
985 /* If no CPU was available for completion, choose CPU 0 */
986 if (compl_cpu >= nr_cpu_ids)
987 compl_cpu = 0;
988 }
989 if ((primary_available_cpus == 0) &&
990 (secondary_available_cpus == 0)) {
991 /* No CPUs available from primary or secondary mask */
992 napi_cpu = 0;
993 compl_cpu = 0;
994 }
995
996 DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
997 __FUNCTION__, napi_cpu, compl_cpu));
998 ASSERT(napi_cpu < nr_cpu_ids);
999 ASSERT(compl_cpu < nr_cpu_ids);
1000
1001 atomic_set(&dhd->rx_napi_cpu, napi_cpu);
1002 atomic_set(&dhd->tx_compl_cpu, compl_cpu);
1003 atomic_set(&dhd->rx_compl_cpu, compl_cpu);
1004 return;
1005}
1006
1007/*
1008 * Function to handle CPU Hotplug notifications.
1009 * One of the task it does is to trigger the CPU Candidacy algorithm
1010 * for load balancing.
1011 */
1012int
1013dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1014{
1015 unsigned int cpu = (unsigned int)(long)hcpu;
1016
1017 dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
1018
1019 switch (action)
1020 {
1021 case CPU_ONLINE:
1022 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
1023 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
1024 dhd_select_cpu_candidacy(dhd);
1025 break;
1026
1027 case CPU_DOWN_PREPARE:
1028 case CPU_DOWN_PREPARE_FROZEN:
1029 DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
1030 cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
1031 dhd_select_cpu_candidacy(dhd);
1032 break;
1033 default:
1034 break;
1035 }
1036
1037 return NOTIFY_OK;
1038}
1039
1040#if defined(DHD_LB_STATS)
1041void dhd_lb_stats_init(dhd_pub_t *dhdp)
1042{
1043 dhd_info_t *dhd;
1044 int i, j;
1045
1046 if (dhdp == NULL) {
1047 DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
1048 __FUNCTION__));
1049 return;
1050 }
1051
1052 dhd = dhdp->info;
1053 if (dhd == NULL) {
1054 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1055 return;
1056 }
1057
1058 DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
1059 DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
1060 DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
1061 DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
1062
1063 for (i = 0; i < NR_CPUS; i++) {
1064 DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
1065 DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
1066 DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
1067
1068 DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
1069 DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
1070 }
1071
1072 for (i = 0; i < NR_CPUS; i++) {
1073 for (j = 0; j < HIST_BIN_SIZE; j++) {
1074 DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
1075 DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
1076 DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
1077 }
1078 }
1079
1080 return;
1081}
1082
1083static void dhd_lb_stats_dump_histo(
1084 struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
1085{
1086 int i, j;
1087 uint32 per_cpu_total[NR_CPUS] = {0};
1088 uint32 total = 0;
1089
1090 bcm_bprintf(strbuf, "CPU: \t\t");
1091 for (i = 0; i < num_possible_cpus(); i++)
1092 bcm_bprintf(strbuf, "%d\t", i);
1093 bcm_bprintf(strbuf, "\nBin\n");
1094
1095 for (i = 0; i < HIST_BIN_SIZE; i++) {
1096 bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
1097 for (j = 0; j < num_possible_cpus(); j++) {
1098 bcm_bprintf(strbuf, "%d\t", hist[j][i]);
1099 }
1100 bcm_bprintf(strbuf, "\n");
1101 }
1102 bcm_bprintf(strbuf, "Per CPU Total \t");
1103 total = 0;
1104 for (i = 0; i < num_possible_cpus(); i++) {
1105 for (j = 0; j < HIST_BIN_SIZE; j++) {
1106 per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
1107 }
1108 bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
1109 total += per_cpu_total[i];
1110 }
1111 bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
1112
1113 return;
1114}
1115
1116static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
1117{
1118 int i;
1119
1120 bcm_bprintf(strbuf, "CPU: \t");
1121 for (i = 0; i < num_possible_cpus(); i++)
1122 bcm_bprintf(strbuf, "%d\t", i);
1123 bcm_bprintf(strbuf, "\n");
1124
1125 bcm_bprintf(strbuf, "Val: \t");
1126 for (i = 0; i < num_possible_cpus(); i++)
1127 bcm_bprintf(strbuf, "%u\t", *(p+i));
1128 bcm_bprintf(strbuf, "\n");
1129 return;
1130}
1131
1132void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
1133{
1134 dhd_info_t *dhd;
1135
1136 if (dhdp == NULL || strbuf == NULL) {
1137 DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
1138 __FUNCTION__, dhdp, strbuf));
1139 return;
1140 }
1141
1142 dhd = dhdp->info;
1143 if (dhd == NULL) {
1144 DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
1145 return;
1146 }
1147
1148 bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
1149 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
1150
1151 bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
1152 dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
1153
1154 bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
1155 dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
1156 dhd->txc_sched_cnt);
1157#ifdef DHD_LB_RXP
1158 bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
1159 dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
1160 bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
1161 dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
1162#endif /* DHD_LB_RXP */
1163
1164#ifdef DHD_LB_RXC
1165 bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
1166 dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
1167 bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
1168 dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
1169#endif /* DHD_LB_RXC */
1170
1171
1172#ifdef DHD_LB_TXC
1173 bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
1174 dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
1175 bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
1176 dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
1177#endif /* DHD_LB_TXC */
1178}
1179
1180static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
1181{
1182 uint32 bin_power;
1183 uint32 *p = NULL;
1184
1185 bin_power = next_larger_power2(count);
1186
1187 switch (bin_power) {
1188 case 0: break;
1189 case 1: /* Fall through intentionally */
1190 case 2: p = bin + 0; break;
1191 case 4: p = bin + 1; break;
1192 case 8: p = bin + 2; break;
1193 case 16: p = bin + 3; break;
1194 case 32: p = bin + 4; break;
1195 case 64: p = bin + 5; break;
1196 case 128: p = bin + 6; break;
1197 default : p = bin + 7; break;
1198 }
1199 if (p)
1200 *p = *p + 1;
1201 return;
1202}
1203
1204extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
1205{
1206 int cpu;
1207 dhd_info_t *dhd = dhdp->info;
1208
1209 cpu = get_cpu();
1210 put_cpu();
1211 dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
1212
1213 return;
1214}
1215
1216extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
1217{
1218 int cpu;
1219 dhd_info_t *dhd = dhdp->info;
1220
1221 cpu = get_cpu();
1222 put_cpu();
1223 dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
1224
1225 return;
1226}
1227
1228extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
1229{
1230 int cpu;
1231 dhd_info_t *dhd = dhdp->info;
1232
1233 cpu = get_cpu();
1234 put_cpu();
1235 dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
1236
1237 return;
1238}
1239
1240extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
1241{
1242 dhd_info_t *dhd = dhdp->info;
1243 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
1244}
1245
1246extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
1247{
1248 dhd_info_t *dhd = dhdp->info;
1249 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
1250}
1251
1252#endif /* DHD_LB_STATS */
1253#endif /* DHD_LB */
1254
1255
1256#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
1257int g_frameburst = 1;
1258#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
1259
1260static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
1261
1262/* DHD Perimiter lock only used in router with bypass forwarding. */
1263#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
1264#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
1265#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
1266
1267#ifdef PCIE_FULL_DONGLE
1268#if defined(BCM_GMAC3)
1269#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
1270#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1271#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
1272
1273#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1274#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
1275#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
1276#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1277
1278#else /* ! BCM_GMAC3 */
1279#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
1280#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
1281 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
1282#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
1283 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
1284
1285#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1286static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
1287 struct list_head *snapshot_list);
1288static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
1289#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
1290#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
1291#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1292
1293#endif /* ! BCM_GMAC3 */
1294#endif /* PCIE_FULL_DONGLE */
1295
1296/* Control fw roaming */
1297uint dhd_roam_disable = 0;
1298
1299#ifdef BCMDBGFS
1300extern int dhd_dbg_init(dhd_pub_t *dhdp);
1301extern void dhd_dbg_remove(void);
1302#endif
1303
1304/* Control radio state */
1305uint dhd_radio_up = 1;
1306
1307/* Network inteface name */
1308char iface_name[IFNAMSIZ] = {'\0'};
1309module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
1310
1311/* The following are specific to the SDIO dongle */
1312
1313/* IOCTL response timeout */
1314int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
1315
1316/* Idle timeout for backplane clock */
1317int dhd_idletime = DHD_IDLETIME_TICKS;
1318module_param(dhd_idletime, int, 0);
1319
1320/* Use polling */
1321uint dhd_poll = FALSE;
1322module_param(dhd_poll, uint, 0);
1323
1324/* Use interrupts */
1325uint dhd_intr = TRUE;
1326module_param(dhd_intr, uint, 0);
1327
1328/* SDIO Drive Strength (in milliamps) */
1329uint dhd_sdiod_drive_strength = 6;
1330module_param(dhd_sdiod_drive_strength, uint, 0);
1331
1332#ifdef BCMSDIO
1333/* Tx/Rx bounds */
1334extern uint dhd_txbound;
1335extern uint dhd_rxbound;
1336module_param(dhd_txbound, uint, 0);
1337module_param(dhd_rxbound, uint, 0);
1338
1339/* Deferred transmits */
1340extern uint dhd_deferred_tx;
1341module_param(dhd_deferred_tx, uint, 0);
1342
1343#endif /* BCMSDIO */
1344
1345
1346#ifdef SDTEST
1347/* Echo packet generator (pkts/s) */
1348uint dhd_pktgen = 0;
1349module_param(dhd_pktgen, uint, 0);
1350
1351/* Echo packet len (0 => sawtooth, max 2040) */
1352uint dhd_pktgen_len = 0;
1353module_param(dhd_pktgen_len, uint, 0);
1354#endif /* SDTEST */
1355
1356
1357
1358/* Allow delayed firmware download for debug purpose */
1359int allow_delay_fwdl = FALSE;
1360module_param(allow_delay_fwdl, int, 0);
1361
1362extern char dhd_version[];
1363extern char fw_version[];
08dfb6c4 1364extern char clm_version[];
ef6a5fee
RC
1365
1366int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
1367static void dhd_net_if_lock_local(dhd_info_t *dhd);
1368static void dhd_net_if_unlock_local(dhd_info_t *dhd);
1369static void dhd_suspend_lock(dhd_pub_t *dhdp);
1370static void dhd_suspend_unlock(dhd_pub_t *dhdp);
1371
1372#ifdef WLMEDIA_HTSF
1373void htsf_update(dhd_info_t *dhd, void *data);
1374tsf_t prev_tsf, cur_tsf;
1375
1376uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
1377static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
1378static void dhd_dump_latency(void);
1379static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
1380static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
1381static void dhd_dump_htsfhisto(histo_t *his, char *s);
1382#endif /* WLMEDIA_HTSF */
1383
1384/* Monitor interface */
1385int dhd_monitor_init(void *dhd_pub);
1386int dhd_monitor_uninit(void);
1387
1388
1389#if defined(WL_WIRELESS_EXT)
1390struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
1391#endif /* defined(WL_WIRELESS_EXT) */
1392
1393static void dhd_dpc(ulong data);
1394/* forward decl */
1395extern int dhd_wait_pend8021x(struct net_device *dev);
1396void dhd_os_wd_timer_extend(void *bus, bool extend);
1397
1398#ifdef TOE
1399#ifndef BDC
1400#error TOE requires BDC
1401#endif /* !BDC */
1402static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
1403static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
1404#endif /* TOE */
1405
1406static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
1407 wl_event_msg_t *event_ptr, void **data_ptr);
1408
1409#if defined(CONFIG_PM_SLEEP)
1410static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
1411{
1412 int ret = NOTIFY_DONE;
1413 bool suspend = FALSE;
1414 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
1415
1416 BCM_REFERENCE(dhdinfo);
1417
1418 switch (action) {
1419 case PM_HIBERNATION_PREPARE:
1420 case PM_SUSPEND_PREPARE:
1421 suspend = TRUE;
1422 break;
1423
1424 case PM_POST_HIBERNATION:
1425 case PM_POST_SUSPEND:
1426 suspend = FALSE;
1427 break;
1428 }
1429
1430#if defined(SUPPORT_P2P_GO_PS)
1431#ifdef PROP_TXSTATUS
1432 if (suspend) {
1433 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
1434 dhd_wlfc_suspend(&dhdinfo->pub);
1435 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
1436 } else
1437 dhd_wlfc_resume(&dhdinfo->pub);
1438#endif /* PROP_TXSTATUS */
1439#endif /* defined(SUPPORT_P2P_GO_PS) */
1440
1441#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
1442 KERNEL_VERSION(2, 6, 39))
1443 dhd_mmc_suspend = suspend;
1444 smp_mb();
1445#endif
1446
1447 return ret;
1448}
1449
1450/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
1451 * created in kernel notifier link list (with 'next' pointing to itself)
1452 */
1453static bool dhd_pm_notifier_registered = FALSE;
1454
1455extern int register_pm_notifier(struct notifier_block *nb);
1456extern int unregister_pm_notifier(struct notifier_block *nb);
1457#endif /* CONFIG_PM_SLEEP */
1458
1459/* Request scheduling of the bus rx frame */
1460static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
1461static void dhd_os_rxflock(dhd_pub_t *pub);
1462static void dhd_os_rxfunlock(dhd_pub_t *pub);
1463
1464/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
1465typedef struct dhd_dev_priv {
1466 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
1467 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
1468 int ifidx; /* interface index */
1469} dhd_dev_priv_t;
1470
1471#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
1472#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
1473#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
1474#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
1475#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
1476
1477/** Clear the dhd net_device's private structure. */
1478static inline void
1479dhd_dev_priv_clear(struct net_device * dev)
1480{
1481 dhd_dev_priv_t * dev_priv;
1482 ASSERT(dev != (struct net_device *)NULL);
1483 dev_priv = DHD_DEV_PRIV(dev);
1484 dev_priv->dhd = (dhd_info_t *)NULL;
1485 dev_priv->ifp = (dhd_if_t *)NULL;
1486 dev_priv->ifidx = DHD_BAD_IF;
1487}
1488
1489/** Setup the dhd net_device's private structure. */
1490static inline void
1491dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1492 int ifidx)
1493{
1494 dhd_dev_priv_t * dev_priv;
1495 ASSERT(dev != (struct net_device *)NULL);
1496 dev_priv = DHD_DEV_PRIV(dev);
1497 dev_priv->dhd = dhd;
1498 dev_priv->ifp = ifp;
1499 dev_priv->ifidx = ifidx;
1500}
1501
1502#ifdef PCIE_FULL_DONGLE
1503
1504/** Dummy objects are defined with state representing bad|down.
1505 * Performance gains from reducing branch conditionals, instruction parallelism,
1506 * dual issue, reducing load shadows, avail of larger pipelines.
1507 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1508 * is accessed via the dhd_sta_t.
1509 */
1510
1511/* Dummy dhd_info object */
1512dhd_info_t dhd_info_null = {
1513#if defined(BCM_GMAC3)
1514 .fwdh = FWDER_NULL,
1515#endif
1516 .pub = {
1517 .info = &dhd_info_null,
1518#ifdef DHDTCPACK_SUPPRESS
1519 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1520#endif /* DHDTCPACK_SUPPRESS */
1521 .up = FALSE,
1522 .busstate = DHD_BUS_DOWN
1523 }
1524};
1525#define DHD_INFO_NULL (&dhd_info_null)
1526#define DHD_PUB_NULL (&dhd_info_null.pub)
1527
1528/* Dummy netdevice object */
1529struct net_device dhd_net_dev_null = {
1530 .reg_state = NETREG_UNREGISTERED
1531};
1532#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1533
1534/* Dummy dhd_if object */
1535dhd_if_t dhd_if_null = {
1536#if defined(BCM_GMAC3)
1537 .fwdh = FWDER_NULL,
1538#endif
1539#ifdef WMF
1540 .wmf = { .wmf_enable = TRUE },
1541#endif
1542 .info = DHD_INFO_NULL,
1543 .net = DHD_NET_DEV_NULL,
1544 .idx = DHD_BAD_IF
1545};
1546#define DHD_IF_NULL (&dhd_if_null)
1547
1548#define DHD_STA_NULL ((dhd_sta_t *)NULL)
1549
1550/** Interface STA list management. */
1551
1552/** Fetch the dhd_if object, given the interface index in the dhd. */
1553static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
1554
1555/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1556static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1557static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1558
1559/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1560static void dhd_if_del_sta_list(dhd_if_t * ifp);
1561static void dhd_if_flush_sta(dhd_if_t * ifp);
1562
1563/* Construct/Destruct a sta pool. */
1564static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1565static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1566/* Clear the pool of dhd_sta_t objects for built-in type driver */
1567static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1568
1569
1570/* Return interface pointer */
1571static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1572{
1573 ASSERT(ifidx < DHD_MAX_IFS);
1574
1575 if (ifidx >= DHD_MAX_IFS)
1576 return NULL;
1577
1578 return dhdp->info->iflist[ifidx];
1579}
1580
1581/** Reset a dhd_sta object and free into the dhd pool. */
1582static void
1583dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1584{
1585 int prio;
1586
1587 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1588
1589 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1590
1591 /*
1592 * Flush and free all packets in all flowring's queues belonging to sta.
1593 * Packets in flow ring will be flushed later.
1594 */
1595 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1596 uint16 flowid = sta->flowid[prio];
1597
1598 if (flowid != FLOWID_INVALID) {
1599 unsigned long flags;
1600 flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
1601 flow_ring_node_t * flow_ring_node;
1602
1603#ifdef DHDTCPACK_SUPPRESS
1604 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1605 * when there is a newly coming packet from network stack.
1606 */
1607 dhd_tcpack_info_tbl_clean(dhdp);
1608#endif /* DHDTCPACK_SUPPRESS */
1609
1610 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1611 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1612 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1613
1614 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1615 void * pkt;
1616 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
1617 PKTFREE(dhdp->osh, pkt, TRUE);
1618 }
1619 }
1620
1621 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1622 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1623 }
1624
1625 sta->flowid[prio] = FLOWID_INVALID;
1626 }
1627
1628 id16_map_free(dhdp->staid_allocator, sta->idx);
1629 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1630 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1631 sta->ifidx = DHD_BAD_IF;
1632 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1633 INIT_LIST_HEAD(&sta->list);
1634 sta->idx = ID16_INVALID; /* implying free */
1635}
1636
1637/** Allocate a dhd_sta object from the dhd pool. */
1638static dhd_sta_t *
1639dhd_sta_alloc(dhd_pub_t * dhdp)
1640{
1641 uint16 idx;
1642 dhd_sta_t * sta;
1643 dhd_sta_pool_t * sta_pool;
1644
1645 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1646
1647 idx = id16_map_alloc(dhdp->staid_allocator);
1648 if (idx == ID16_INVALID) {
1649 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1650 return DHD_STA_NULL;
1651 }
1652
1653 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1654 sta = &sta_pool[idx];
1655
1656 ASSERT((sta->idx == ID16_INVALID) &&
1657 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1658
1659 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1660
1661 sta->idx = idx; /* implying allocated */
1662
1663 return sta;
1664}
1665
1666/** Delete all STAs in an interface's STA list. */
1667static void
1668dhd_if_del_sta_list(dhd_if_t *ifp)
1669{
1670 dhd_sta_t *sta, *next;
1671 unsigned long flags;
1672
1673 DHD_IF_STA_LIST_LOCK(ifp, flags);
1674
1675 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1676#if defined(BCM_GMAC3)
1677 if (ifp->fwdh) {
1678 /* Remove sta from WOFA forwarder. */
1679 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1680 }
1681#endif /* BCM_GMAC3 */
1682 list_del(&sta->list);
1683 dhd_sta_free(&ifp->info->pub, sta);
1684 }
1685
1686 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1687
1688 return;
1689}
1690
1691/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1692static void
1693dhd_if_flush_sta(dhd_if_t * ifp)
1694{
1695#if defined(BCM_GMAC3)
1696
1697 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1698 dhd_sta_t *sta, *next;
1699 unsigned long flags;
1700
1701 DHD_IF_STA_LIST_LOCK(ifp, flags);
1702
1703 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1704 /* Remove any sta entry from WOFA forwarder. */
1705 fwder_flush(ifp->fwdh, (wofa_t)sta);
1706 }
1707
1708 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1709 }
1710#endif /* BCM_GMAC3 */
1711}
1712
1713/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1714static int
1715dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1716{
1717 int idx, prio, sta_pool_memsz;
1718 dhd_sta_t * sta;
1719 dhd_sta_pool_t * sta_pool;
1720 void * staid_allocator;
1721
1722 ASSERT(dhdp != (dhd_pub_t *)NULL);
1723 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1724
1725 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1726 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1727 if (staid_allocator == NULL) {
1728 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1729 return BCME_ERROR;
1730 }
1731
1732 /* Pre allocate a pool of dhd_sta objects (one extra). */
1733 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1734 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1735 if (sta_pool == NULL) {
1736 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1737 id16_map_fini(dhdp->osh, staid_allocator);
1738 return BCME_ERROR;
1739 }
1740
1741 dhdp->sta_pool = sta_pool;
1742 dhdp->staid_allocator = staid_allocator;
1743
1744 /* Initialize all sta(s) for the pre-allocated free pool. */
1745 bzero((uchar *)sta_pool, sta_pool_memsz);
1746 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1747 sta = &sta_pool[idx];
1748 sta->idx = id16_map_alloc(staid_allocator);
1749 ASSERT(sta->idx <= max_sta);
1750 }
1751 /* Now place them into the pre-allocated free pool. */
1752 for (idx = 1; idx <= max_sta; idx++) {
1753 sta = &sta_pool[idx];
1754 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1755 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1756 }
1757 dhd_sta_free(dhdp, sta);
1758 }
1759
1760 return BCME_OK;
1761}
1762
1763/** Destruct the pool of dhd_sta_t objects.
1764 * Caller must ensure that no STA objects are currently associated with an if.
1765 */
1766static void
1767dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1768{
1769 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1770
1771 if (sta_pool) {
1772 int idx;
1773 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1774 for (idx = 1; idx <= max_sta; idx++) {
1775 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1776 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1777 }
1778 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1779 dhdp->sta_pool = NULL;
1780 }
1781
1782 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1783 dhdp->staid_allocator = NULL;
1784}
1785
1786/* Clear the pool of dhd_sta_t objects for built-in type driver */
1787static void
1788dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1789{
1790 int idx, prio, sta_pool_memsz;
1791 dhd_sta_t * sta;
1792 dhd_sta_pool_t * sta_pool;
1793 void *staid_allocator;
1794
1795 if (!dhdp) {
1796 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1797 return;
1798 }
1799
1800 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1801 staid_allocator = dhdp->staid_allocator;
1802
1803 if (!sta_pool) {
1804 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1805 return;
1806 }
1807
1808 if (!staid_allocator) {
1809 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1810 return;
1811 }
1812
1813 /* clear free pool */
1814 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1815 bzero((uchar *)sta_pool, sta_pool_memsz);
1816
1817 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1818 id16_map_clear(staid_allocator, max_sta, 1);
1819
1820 /* Initialize all sta(s) for the pre-allocated free pool. */
1821 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1822 sta = &sta_pool[idx];
1823 sta->idx = id16_map_alloc(staid_allocator);
1824 ASSERT(sta->idx <= max_sta);
1825 }
1826 /* Now place them into the pre-allocated free pool. */
1827 for (idx = 1; idx <= max_sta; idx++) {
1828 sta = &sta_pool[idx];
1829 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1830 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1831 }
1832 dhd_sta_free(dhdp, sta);
1833 }
1834}
1835
1836/** Find STA with MAC address ea in an interface's STA list. */
1837dhd_sta_t *
1838dhd_find_sta(void *pub, int ifidx, void *ea)
1839{
1840 dhd_sta_t *sta;
1841 dhd_if_t *ifp;
1842 unsigned long flags;
1843
1844 ASSERT(ea != NULL);
1845 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1846 if (ifp == NULL)
1847 return DHD_STA_NULL;
1848
1849 DHD_IF_STA_LIST_LOCK(ifp, flags);
1850
1851 list_for_each_entry(sta, &ifp->sta_list, list) {
1852 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1853 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1854 return sta;
1855 }
1856 }
1857
1858 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1859
1860 return DHD_STA_NULL;
1861}
1862
1863/** Add STA into the interface's STA list. */
1864dhd_sta_t *
1865dhd_add_sta(void *pub, int ifidx, void *ea)
1866{
1867 dhd_sta_t *sta;
1868 dhd_if_t *ifp;
1869 unsigned long flags;
1870
1871 ASSERT(ea != NULL);
1872 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1873 if (ifp == NULL)
1874 return DHD_STA_NULL;
1875
1876 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1877 if (sta == DHD_STA_NULL) {
1878 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1879 return DHD_STA_NULL;
1880 }
1881
1882 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1883
1884 /* link the sta and the dhd interface */
1885 sta->ifp = ifp;
1886 sta->ifidx = ifidx;
1887 INIT_LIST_HEAD(&sta->list);
1888
1889 DHD_IF_STA_LIST_LOCK(ifp, flags);
1890
1891 list_add_tail(&sta->list, &ifp->sta_list);
1892
1893#if defined(BCM_GMAC3)
1894 if (ifp->fwdh) {
1895 ASSERT(ISALIGNED(ea, 2));
1896 /* Add sta to WOFA forwarder. */
1897 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1898 }
1899#endif /* BCM_GMAC3 */
1900
1901 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1902
1903 return sta;
1904}
1905
1906/** Delete STA from the interface's STA list. */
1907void
1908dhd_del_sta(void *pub, int ifidx, void *ea)
1909{
1910 dhd_sta_t *sta, *next;
1911 dhd_if_t *ifp;
1912 unsigned long flags;
1913
1914 ASSERT(ea != NULL);
1915 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1916 if (ifp == NULL)
1917 return;
1918
1919 DHD_IF_STA_LIST_LOCK(ifp, flags);
1920
1921 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1922 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1923#if defined(BCM_GMAC3)
1924 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1925 ASSERT(ISALIGNED(ea, 2));
1926 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1927 }
1928#endif /* BCM_GMAC3 */
1929 list_del(&sta->list);
1930 dhd_sta_free(&ifp->info->pub, sta);
1931 }
1932 }
1933
1934 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1935#ifdef DHD_L2_FILTER
1936 if (ifp->parp_enable) {
1937 /* clear Proxy ARP cache of specific Ethernet Address */
1938 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1939 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1940 }
1941#endif /* DHD_L2_FILTER */
1942 return;
1943}
1944
1945/** Add STA if it doesn't exist. Not reentrant. */
1946dhd_sta_t*
1947dhd_findadd_sta(void *pub, int ifidx, void *ea)
1948{
1949 dhd_sta_t *sta;
1950
1951 sta = dhd_find_sta(pub, ifidx, ea);
1952
1953 if (!sta) {
1954 /* Add entry */
1955 sta = dhd_add_sta(pub, ifidx, ea);
1956 }
1957
1958 return sta;
1959}
1960
1961#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1962#if !defined(BCM_GMAC3)
1963static struct list_head *
1964dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1965{
1966 unsigned long flags;
1967 dhd_sta_t *sta, *snapshot;
1968
1969 INIT_LIST_HEAD(snapshot_list);
1970
1971 DHD_IF_STA_LIST_LOCK(ifp, flags);
1972
1973 list_for_each_entry(sta, &ifp->sta_list, list) {
1974 /* allocate one and add to snapshot */
1975 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1976 if (snapshot == NULL) {
1977 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1978 continue;
1979 }
1980
1981 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1982
1983 INIT_LIST_HEAD(&snapshot->list);
1984 list_add_tail(&snapshot->list, snapshot_list);
1985 }
1986
1987 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1988
1989 return snapshot_list;
1990}
1991
1992static void
1993dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1994{
1995 dhd_sta_t *sta, *next;
1996
1997 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1998 list_del(&sta->list);
1999 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
2000 }
2001}
2002#endif /* !BCM_GMAC3 */
2003#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
2004
2005#else
2006static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
2007static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
2008static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
2009static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
2010static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
2011dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
2012void dhd_del_sta(void *pub, int ifidx, void *ea) {}
2013#endif /* PCIE_FULL_DONGLE */
2014
2015
2016#if defined(DHD_LB)
2017
2018#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
2019/**
2020 * dhd_tasklet_schedule - Function that runs in IPI context of the destination
2021 * CPU and schedules a tasklet.
2022 * @tasklet: opaque pointer to the tasklet
2023 */
2024static INLINE void
2025dhd_tasklet_schedule(void *tasklet)
2026{
2027 tasklet_schedule((struct tasklet_struct *)tasklet);
2028}
2029
2030/**
2031 * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
2032 * @tasklet: tasklet to be scheduled
2033 * @on_cpu: cpu core id
2034 *
2035 * If the requested cpu is online, then an IPI is sent to this cpu via the
2036 * smp_call_function_single with no wait and the tasklet_schedule function
2037 * will be invoked to schedule the specified tasklet on the requested CPU.
2038 */
2039static void
2040dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
2041{
2042 const int wait = 0;
2043 smp_call_function_single(on_cpu,
2044 dhd_tasklet_schedule, (void *)tasklet, wait);
2045}
2046#endif /* DHD_LB_TXC || DHD_LB_RXC */
2047
2048
2049#if defined(DHD_LB_TXC)
2050/**
2051 * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
2052 * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
2053 * freeing the packets placed in the tx_compl workq
2054 */
2055void
2056dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
2057{
2058 dhd_info_t *dhd = dhdp->info;
2059 int curr_cpu, on_cpu;
2060
2061 if (dhd->rx_napi_netdev == NULL) {
2062 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2063 return;
2064 }
2065
2066 DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
2067 /*
2068 * If the destination CPU is NOT online or is same as current CPU
2069 * no need to schedule the work
2070 */
2071 curr_cpu = get_cpu();
2072 put_cpu();
2073
2074 on_cpu = atomic_read(&dhd->tx_compl_cpu);
2075
2076 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2077 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2078 } else {
2079 schedule_work(&dhd->tx_compl_dispatcher_work);
2080 }
2081}
2082
2083static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
2084{
2085 struct dhd_info *dhd =
2086 container_of(work, struct dhd_info, tx_compl_dispatcher_work);
2087 int cpu;
2088
2089 get_online_cpus();
2090 cpu = atomic_read(&dhd->tx_compl_cpu);
2091 if (!cpu_online(cpu))
2092 dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
2093 else
2094 dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
2095 put_online_cpus();
2096}
2097
2098#endif /* DHD_LB_TXC */
2099
2100
2101#if defined(DHD_LB_RXC)
2102/**
2103 * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
2104 * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
2105 * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
2106 * placed in the rx_compl workq.
2107 *
2108 * @dhdp: pointer to dhd_pub object
2109 */
2110void
2111dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
2112{
2113 dhd_info_t *dhd = dhdp->info;
2114 int curr_cpu, on_cpu;
2115
2116 if (dhd->rx_napi_netdev == NULL) {
2117 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2118 return;
2119 }
2120
2121 DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
2122 /*
2123 * If the destination CPU is NOT online or is same as current CPU
2124 * no need to schedule the work
2125 */
2126 curr_cpu = get_cpu();
2127 put_cpu();
2128
2129 on_cpu = atomic_read(&dhd->rx_compl_cpu);
2130
2131 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2132 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2133 } else {
2134 schedule_work(&dhd->rx_compl_dispatcher_work);
2135 }
2136}
2137
2138static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
2139{
2140 struct dhd_info *dhd =
2141 container_of(work, struct dhd_info, rx_compl_dispatcher_work);
2142 int cpu;
2143
2144 get_online_cpus();
2145 cpu = atomic_read(&dhd->tx_compl_cpu);
2146 if (!cpu_online(cpu))
2147 dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
2148 else
2149 dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
2150 put_online_cpus();
2151}
2152
2153#endif /* DHD_LB_RXC */
2154
2155
2156#if defined(DHD_LB_RXP)
2157/**
2158 * dhd_napi_poll - Load balance napi poll function to process received
2159 * packets and send up the network stack using netif_receive_skb()
2160 *
2161 * @napi: napi object in which context this poll function is invoked
2162 * @budget: number of packets to be processed.
2163 *
2164 * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
2165 * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
2166 * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
2167 * packet tag and sendup.
2168 */
2169static int
2170dhd_napi_poll(struct napi_struct *napi, int budget)
2171{
2172 int ifid;
2173 const int pkt_count = 1;
2174 const int chan = 0;
2175 struct sk_buff * skb;
2176 unsigned long flags;
2177 struct dhd_info *dhd;
2178 int processed = 0;
2179 struct sk_buff_head rx_process_queue;
2180
2181 dhd = container_of(napi, struct dhd_info, rx_napi_struct);
2182 DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
2183 __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
2184
2185 __skb_queue_head_init(&rx_process_queue);
2186
2187 /* extract the entire rx_napi_queue into local rx_process_queue */
2188 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2189 skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
2190 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2191
2192 while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
2193 OSL_PREFETCH(skb->data);
2194
2195 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
2196
2197 DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
2198 __FUNCTION__, skb, ifid));
2199
2200 dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
2201 processed++;
2202 }
2203
2204 DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
2205
2206 DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
2207 napi_complete(napi);
2208
2209 return budget - 1;
2210}
2211
2212/**
2213 * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
2214 * poll list. This function may be invoked via the smp_call_function_single
2215 * from a remote CPU.
2216 *
2217 * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
2218 * after the napi_struct is added to the softnet data's poll_list
2219 *
2220 * @info: pointer to a dhd_info struct
2221 */
2222static void
2223dhd_napi_schedule(void *info)
2224{
2225 dhd_info_t *dhd = (dhd_info_t *)info;
2226
2227 DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
2228 __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
2229
2230 /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
2231 if (napi_schedule_prep(&dhd->rx_napi_struct)) {
2232 __napi_schedule(&dhd->rx_napi_struct);
2233 DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
2234 }
2235
2236 /*
2237 * If the rx_napi_struct was already running, then we let it complete
2238 * processing all its packets. The rx_napi_struct may only run on one
2239 * core at a time, to avoid out-of-order handling.
2240 */
2241}
2242
2243/**
2244 * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
2245 * action after placing the dhd's rx_process napi object in the the remote CPU's
2246 * softnet data's poll_list.
2247 *
2248 * @dhd: dhd_info which has the rx_process napi object
2249 * @on_cpu: desired remote CPU id
2250 */
2251static INLINE int
2252dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
2253{
2254 int wait = 0; /* asynchronous IPI */
2255
2256 DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
2257 __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
2258
2259 if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
2260 DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
2261 __FUNCTION__, on_cpu));
2262 }
2263
2264 DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
2265
2266 return 0;
2267}
2268
2269/*
2270 * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
2271 * Why should we do this?
2272 * The candidacy algorithm is run from the call back function
2273 * registered to CPU hotplug notifier. This call back happens from Worker
2274 * context. The dhd_napi_schedule_on is also from worker context.
2275 * Note that both of this can run on two different CPUs at the same time.
2276 * So we can possibly have a window where a given CPUn is being brought
2277 * down from CPUm while we try to run a function on CPUn.
2278 * To prevent this its better have the whole code to execute an SMP
2279 * function under get_online_cpus.
2280 * This function call ensures that hotplug mechanism does not kick-in
2281 * until we are done dealing with online CPUs
2282 * If the hotplug worker is already running, no worries because the
2283 * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
2284 *
2285 * The below mentioned code structure is proposed in
2286 * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
2287 * for the question
2288 * Q: I need to ensure that a particular cpu is not removed when there is some
2289 * work specific to this cpu is in progress
2290 *
2291 * According to the documentation calling get_online_cpus is NOT required, if
2292 * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
2293 * run from Work Queue context we have to call these functions
2294 */
2295static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
2296{
2297 struct dhd_info *dhd =
2298 container_of(work, struct dhd_info, rx_napi_dispatcher_work);
2299 int cpu;
2300
2301 get_online_cpus();
2302 cpu = atomic_read(&dhd->rx_napi_cpu);
2303 if (!cpu_online(cpu))
2304 dhd_napi_schedule(dhd);
2305 else
2306 dhd_napi_schedule_on(dhd, cpu);
2307 put_online_cpus();
2308}
2309
2310/**
2311 * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
2312 * to run on another CPU. The rx_napi_struct's poll function will retrieve all
2313 * the packets enqueued into the rx_napi_queue and sendup.
2314 * The producer's rx packet queue is appended to the rx_napi_queue before
2315 * dispatching the rx_napi_struct.
2316 */
2317void
2318dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
2319{
2320 unsigned long flags;
2321 dhd_info_t *dhd = dhdp->info;
2322 int curr_cpu;
2323 int on_cpu;
2324
2325 if (dhd->rx_napi_netdev == NULL) {
2326 DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
2327 return;
2328 }
2329
2330 DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
2331 skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
2332
2333 /* append the producer's queue of packets to the napi's rx process queue */
2334 spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
2335 skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
2336 spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
2337
2338 /*
2339 * If the destination CPU is NOT online or is same as current CPU
2340 * no need to schedule the work
2341 */
2342 curr_cpu = get_cpu();
2343 put_cpu();
2344
2345 on_cpu = atomic_read(&dhd->rx_napi_cpu);
2346
2347 if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
2348 dhd_napi_schedule(dhd);
2349 } else {
2350 schedule_work(&dhd->rx_napi_dispatcher_work);
2351 }
2352}
2353
2354/**
2355 * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
2356 */
2357void
2358dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
2359{
2360 dhd_info_t *dhd = dhdp->info;
2361
2362 DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
2363 pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
2364 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
2365 __skb_queue_tail(&dhd->rx_pend_queue, pkt);
2366}
2367#endif /* DHD_LB_RXP */
2368
2369#endif /* DHD_LB */
2370
2371static void dhd_memdump_work_handler(struct work_struct * work)
2372{
2373 struct dhd_info *dhd =
2374 container_of(work, struct dhd_info, dhd_memdump_work.work);
2375
2376 BCM_REFERENCE(dhd);
2377#ifdef BCMPCIE
2378 dhd_prot_collect_memdump(&dhd->pub);
2379#endif
2380}
2381
2382
2383/** Returns dhd iflist index corresponding the the bssidx provided by apps */
2384int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
2385{
2386 dhd_if_t *ifp;
2387 dhd_info_t *dhd = dhdp->info;
2388 int i;
2389
2390 ASSERT(bssidx < DHD_MAX_IFS);
2391 ASSERT(dhdp);
2392
2393 for (i = 0; i < DHD_MAX_IFS; i++) {
2394 ifp = dhd->iflist[i];
2395 if (ifp && (ifp->bssidx == bssidx)) {
2396 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
2397 ifp->name, bssidx, i));
2398 break;
2399 }
2400 }
2401 return i;
2402}
2403
2404static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
2405{
2406 uint32 store_idx;
2407 uint32 sent_idx;
2408
2409 if (!skb) {
2410 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
2411 return BCME_ERROR;
2412 }
2413
2414 dhd_os_rxflock(dhdp);
2415 store_idx = dhdp->store_idx;
2416 sent_idx = dhdp->sent_idx;
2417 if (dhdp->skbbuf[store_idx] != NULL) {
2418 /* Make sure the previous packets are processed */
2419 dhd_os_rxfunlock(dhdp);
2420#ifdef RXF_DEQUEUE_ON_BUSY
2421 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2422 skb, store_idx, sent_idx));
2423 return BCME_BUSY;
2424#else /* RXF_DEQUEUE_ON_BUSY */
2425 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
2426 skb, store_idx, sent_idx));
2427 /* removed msleep here, should use wait_event_timeout if we
2428 * want to give rx frame thread a chance to run
2429 */
2430#if defined(WAIT_DEQUEUE)
2431 OSL_SLEEP(1);
2432#endif
2433 return BCME_ERROR;
2434#endif /* RXF_DEQUEUE_ON_BUSY */
2435 }
2436 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
2437 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
2438 dhdp->skbbuf[store_idx] = skb;
2439 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
2440 dhd_os_rxfunlock(dhdp);
2441
2442 return BCME_OK;
2443}
2444
2445static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
2446{
2447 uint32 store_idx;
2448 uint32 sent_idx;
2449 void *skb;
2450
2451 dhd_os_rxflock(dhdp);
2452
2453 store_idx = dhdp->store_idx;
2454 sent_idx = dhdp->sent_idx;
2455 skb = dhdp->skbbuf[sent_idx];
2456
2457 if (skb == NULL) {
2458 dhd_os_rxfunlock(dhdp);
2459 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
2460 store_idx, sent_idx));
2461 return NULL;
2462 }
2463
2464 dhdp->skbbuf[sent_idx] = NULL;
2465 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
2466
2467 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
2468 skb, sent_idx));
2469
2470 dhd_os_rxfunlock(dhdp);
2471
2472 return skb;
2473}
2474
2475int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
2476{
2477 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2478
2479 if (prepost) { /* pre process */
2480 dhd_read_macaddr(dhd);
2481 } else { /* post process */
2482 dhd_write_macaddr(&dhd->pub.mac);
2483 }
2484
2485 return 0;
2486}
2487
2488// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2489#if defined(PKT_FILTER_SUPPORT) &&defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2490static bool
2491_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
2492{
2493 bool _apply = FALSE;
2494 /* In case of IBSS mode, apply arp pkt filter */
2495 if (op_mode & DHD_FLAG_IBSS_MODE) {
2496 _apply = TRUE;
2497 goto exit;
2498 }
2499 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
2500 if ((dhd->arp_version == 1) &&
2501 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
2502 _apply = TRUE;
2503 goto exit;
2504 }
2505
2506exit:
2507 return _apply;
2508}
2509#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
2510
2511void dhd_set_packet_filter(dhd_pub_t *dhd)
2512{
2513#ifdef PKT_FILTER_SUPPORT
2514 int i;
2515
2516 DHD_TRACE(("%s: enter\n", __FUNCTION__));
2517 if (dhd_pkt_filter_enable) {
2518 for (i = 0; i < dhd->pktfilter_count; i++) {
2519 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
2520 }
2521 }
2522#endif /* PKT_FILTER_SUPPORT */
2523}
2524
2525void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
2526{
2527#ifdef PKT_FILTER_SUPPORT
2528 int i;
2529
2530 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
2531
2532 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
2533 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
2534 return;
2535 }
2536 /* 1 - Enable packet filter, only allow unicast packet to send up */
2537 /* 0 - Disable packet filter */
2538 if (dhd_pkt_filter_enable && (!value ||
2539 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
2540 {
2541 for (i = 0; i < dhd->pktfilter_count; i++) {
2542// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
2543#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
2544 if (value && (i == DHD_ARP_FILTER_NUM) &&
2545 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
2546 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
2547 "val %d, cnt %d, op_mode 0x%x\n",
2548 value, i, dhd->op_mode));
2549 continue;
2550 }
2551#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
2552 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
2553 value, dhd_master_mode);
2554 }
2555 }
2556#endif /* PKT_FILTER_SUPPORT */
2557}
2558
2559static int dhd_set_suspend(int value, dhd_pub_t *dhd)
2560{
ef6a5fee 2561 int power_mode = PM_MAX;
ef6a5fee
RC
2562#ifdef SUPPORT_SENSORHUB
2563 uint32 shub_msreq;
2564#endif /* SUPPORT_SENSORHUB */
2565 /* wl_pkt_filter_enable_t enable_parm; */
2566 char iovbuf[32];
2567 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
2568#ifdef DHD_USE_EARLYSUSPEND
2569#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2570 int bcn_timeout = 0;
2571#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2572#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2573 int roam_time_thresh = 0; /* (ms) */
2574#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2575#ifndef ENABLE_FW_ROAM_SUSPEND
2576 uint roamvar = dhd->conf->roam_off_suspend;
2577#endif /* ENABLE_FW_ROAM_SUSPEND */
2578#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2579 int bcn_li_bcn;
2580#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2581 uint nd_ra_filter = 0;
2582 int ret = 0;
2583#endif /* DHD_USE_EARLYSUSPEND */
2584#ifdef PASS_ALL_MCAST_PKTS
2585 struct dhd_info *dhdinfo;
2586 uint32 allmulti;
2587 uint i;
2588#endif /* PASS_ALL_MCAST_PKTS */
2589#ifdef DYNAMIC_SWOOB_DURATION
2590#ifndef CUSTOM_INTR_WIDTH
2591#define CUSTOM_INTR_WIDTH 100
2592 int intr_width = 0;
2593#endif /* CUSTOM_INTR_WIDTH */
2594#endif /* DYNAMIC_SWOOB_DURATION */
2595
2596 if (!dhd)
2597 return -ENODEV;
2598
2599#ifdef PASS_ALL_MCAST_PKTS
2600 dhdinfo = dhd->info;
2601#endif /* PASS_ALL_MCAST_PKTS */
2602
2603 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
2604 __FUNCTION__, value, dhd->in_suspend));
2605
2606 dhd_suspend_lock(dhd);
2607
2608#ifdef CUSTOM_SET_CPUCORE
2609 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
2610 /* set specific cpucore */
2611 dhd_set_cpucore(dhd, TRUE);
2612#endif /* CUSTOM_SET_CPUCORE */
08dfb6c4 2613
ef6a5fee
RC
2614 if (dhd->conf->pm >= 0)
2615 power_mode = dhd->conf->pm;
08dfb6c4
RC
2616 else
2617 power_mode = PM_FAST;
ef6a5fee
RC
2618 if (dhd->up) {
2619 if (value && dhd->in_suspend) {
2620#ifdef PKT_FILTER_SUPPORT
2621 dhd->early_suspended = 1;
2622#endif
2623 /* Kernel suspended */
2624 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
2625
2626#ifdef SUPPORT_SENSORHUB
2627 shub_msreq = 1;
2628 if (dhd->info->shub_enable == 1) {
2629 bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
2630 iovbuf, sizeof(iovbuf));
2631 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2632 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2633 DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
2634 __FUNCTION__, ret));
2635 }
2636 }
2637#endif /* SUPPORT_SENSORHUB */
2638
08dfb6c4
RC
2639 if (dhd->conf->pm_in_suspend >= 0)
2640 power_mode = dhd->conf->pm_in_suspend;
ef6a5fee
RC
2641 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2642 sizeof(power_mode), TRUE, 0);
ef6a5fee
RC
2643
2644#ifdef PKT_FILTER_SUPPORT
2645 /* Enable packet filter,
2646 * only allow unicast packet to send up
2647 */
2648 dhd_enable_packet_filter(1, dhd);
2649#endif /* PKT_FILTER_SUPPORT */
2650
2651#ifdef PASS_ALL_MCAST_PKTS
2652 allmulti = 0;
2653 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2654 iovbuf, sizeof(iovbuf));
2655 for (i = 0; i < DHD_MAX_IFS; i++) {
2656 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2657 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2658 sizeof(iovbuf), TRUE, i);
2659 }
2660#endif /* PASS_ALL_MCAST_PKTS */
2661
2662 /* If DTIM skip is set up as default, force it to wake
2663 * each third DTIM for better power savings. Note that
2664 * one side effect is a chance to miss BC/MC packet.
2665 */
2666#ifdef WLTDLS
2667 /* Do not set bcn_li_ditm on WFD mode */
2668 if (dhd->tdls_mode) {
2669 bcn_li_dtim = 0;
2670 } else
2671#endif /* WLTDLS */
2672 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2673 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2674 4, iovbuf, sizeof(iovbuf));
2675 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
2676 TRUE, 0) < 0)
2677 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2678
2679#ifdef DHD_USE_EARLYSUSPEND
2680#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2681 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2682 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2683 4, iovbuf, sizeof(iovbuf));
2684 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2685#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2686#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2687 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2688 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2689 4, iovbuf, sizeof(iovbuf));
2690 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2691#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2692#ifndef ENABLE_FW_ROAM_SUSPEND
2693 /* Disable firmware roaming during suspend */
2694 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2695 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2696#endif /* ENABLE_FW_ROAM_SUSPEND */
2697#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2698 bcn_li_bcn = 0;
2699 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2700 4, iovbuf, sizeof(iovbuf));
2701 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2702#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2703 if (FW_SUPPORTED(dhd, ndoe)) {
2704 /* enable IPv6 RA filter in firmware during suspend */
2705 nd_ra_filter = 1;
2706 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2707 iovbuf, sizeof(iovbuf));
2708 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2709 sizeof(iovbuf), TRUE, 0)) < 0)
2710 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2711 ret));
2712 }
2713#ifdef DYNAMIC_SWOOB_DURATION
2714 intr_width = CUSTOM_INTR_WIDTH;
2715 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2716 iovbuf, sizeof(iovbuf));
2717 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2718 sizeof(iovbuf), TRUE, 0)) < 0) {
2719 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2720 }
2721#endif /* DYNAMIC_SWOOB_DURATION */
2722#endif /* DHD_USE_EARLYSUSPEND */
2723 } else {
2724#ifdef PKT_FILTER_SUPPORT
2725 dhd->early_suspended = 0;
2726#endif
2727 /* Kernel resumed */
2728 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
2729
2730#ifdef SUPPORT_SENSORHUB
2731 shub_msreq = 0;
2732 if (dhd->info->shub_enable == 1) {
2733 bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
2734 4, iovbuf, sizeof(iovbuf));
2735 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
2736 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
2737 DHD_ERROR(("%s Sensor Hub move/stop stop:"
2738 "failed %d\n", __FUNCTION__, ret));
2739 }
2740 }
2741#endif /* SUPPORT_SENSORHUB */
2742
2743
2744#ifdef DYNAMIC_SWOOB_DURATION
2745 intr_width = 0;
2746 bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
2747 iovbuf, sizeof(iovbuf));
2748 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2749 sizeof(iovbuf), TRUE, 0)) < 0) {
2750 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2751 }
2752#endif /* DYNAMIC_SWOOB_DURATION */
ef6a5fee
RC
2753 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2754 sizeof(power_mode), TRUE, 0);
ef6a5fee
RC
2755#ifdef PKT_FILTER_SUPPORT
2756 /* disable pkt filter */
2757 dhd_enable_packet_filter(0, dhd);
2758#endif /* PKT_FILTER_SUPPORT */
2759#ifdef PASS_ALL_MCAST_PKTS
2760 allmulti = 1;
2761 bcm_mkiovar("allmulti", (char *)&allmulti, 4,
2762 iovbuf, sizeof(iovbuf));
2763 for (i = 0; i < DHD_MAX_IFS; i++) {
2764 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2765 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2766 sizeof(iovbuf), TRUE, i);
2767 }
2768#endif /* PASS_ALL_MCAST_PKTS */
2769
2770 /* restore pre-suspend setting for dtim_skip */
2771 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
2772 4, iovbuf, sizeof(iovbuf));
2773
2774 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2775#ifdef DHD_USE_EARLYSUSPEND
2776#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2777 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2778 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
2779 4, iovbuf, sizeof(iovbuf));
2780 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2781#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2782#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2783 roam_time_thresh = 2000;
2784 bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
2785 4, iovbuf, sizeof(iovbuf));
2786 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2787#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2788#ifndef ENABLE_FW_ROAM_SUSPEND
2789 roamvar = dhd_roam_disable;
2790 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
2791 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2792#endif /* ENABLE_FW_ROAM_SUSPEND */
2793#ifdef ENABLE_BCN_LI_BCN_WAKEUP
2794 bcn_li_bcn = 1;
2795 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
2796 4, iovbuf, sizeof(iovbuf));
2797 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
2798#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2799 if (FW_SUPPORTED(dhd, ndoe)) {
2800 /* disable IPv6 RA filter in firmware during suspend */
2801 nd_ra_filter = 0;
2802 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
2803 iovbuf, sizeof(iovbuf));
2804 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
2805 sizeof(iovbuf), TRUE, 0)) < 0)
2806 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2807 ret));
2808 }
2809#endif /* DHD_USE_EARLYSUSPEND */
08dfb6c4
RC
2810
2811 /* terence 2017029: Reject in early suspend */
2812 if (!dhd->conf->xmit_in_suspend) {
2813 dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
2814 }
ef6a5fee
RC
2815 }
2816 }
2817 dhd_suspend_unlock(dhd);
2818
2819 return 0;
2820}
2821
2822static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2823{
2824 dhd_pub_t *dhdp = &dhd->pub;
2825 int ret = 0;
2826
2827 DHD_OS_WAKE_LOCK(dhdp);
2828 DHD_PERIM_LOCK(dhdp);
2829
2830 /* Set flag when early suspend was called */
2831 dhdp->in_suspend = val;
2832 if ((force || !dhdp->suspend_disable_flag) &&
2833 dhd_support_sta_mode(dhdp))
2834 {
2835 ret = dhd_set_suspend(val, dhdp);
2836 }
2837
2838 DHD_PERIM_UNLOCK(dhdp);
2839 DHD_OS_WAKE_UNLOCK(dhdp);
2840 return ret;
2841}
2842
2843#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2844static void dhd_early_suspend(struct early_suspend *h)
2845{
2846 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2847 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2848
2849 if (dhd)
2850 dhd_suspend_resume_helper(dhd, 1, 0);
2851}
2852
2853static void dhd_late_resume(struct early_suspend *h)
2854{
2855 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2856 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2857
2858 if (dhd)
2859 dhd_suspend_resume_helper(dhd, 0, 0);
2860}
2861#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2862
2863/*
2864 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2865 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2866 *
2867 * dhd_timeout_start(&tmo, usec);
2868 * while (!dhd_timeout_expired(&tmo))
2869 * if (poll_something())
2870 * break;
2871 * if (dhd_timeout_expired(&tmo))
2872 * fatal();
2873 */
2874
2875void
2876dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2877{
2878 tmo->limit = usec;
2879 tmo->increment = 0;
2880 tmo->elapsed = 0;
2881 tmo->tick = jiffies_to_usecs(1);
2882}
2883
2884int
2885dhd_timeout_expired(dhd_timeout_t *tmo)
2886{
2887 /* Does nothing the first call */
2888 if (tmo->increment == 0) {
2889 tmo->increment = 1;
2890 return 0;
2891 }
2892
2893 if (tmo->elapsed >= tmo->limit)
2894 return 1;
2895
2896 /* Add the delay that's about to take place */
2897 tmo->elapsed += tmo->increment;
2898
2899 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2900 OSL_DELAY(tmo->increment);
2901 tmo->increment *= 2;
2902 if (tmo->increment > tmo->tick)
2903 tmo->increment = tmo->tick;
2904 } else {
2905 wait_queue_head_t delay_wait;
2906 DECLARE_WAITQUEUE(wait, current);
2907 init_waitqueue_head(&delay_wait);
2908 add_wait_queue(&delay_wait, &wait);
2909 set_current_state(TASK_INTERRUPTIBLE);
2910 (void)schedule_timeout(1);
2911 remove_wait_queue(&delay_wait, &wait);
2912 set_current_state(TASK_RUNNING);
2913 }
2914
2915 return 0;
2916}
2917
2918int
2919dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2920{
2921 int i = 0;
2922
2923 if (!dhd) {
2924 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2925 return DHD_BAD_IF;
2926 }
2927
2928 while (i < DHD_MAX_IFS) {
2929 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2930 return i;
2931 i++;
2932 }
2933
2934 return DHD_BAD_IF;
2935}
2936
2937struct net_device * dhd_idx2net(void *pub, int ifidx)
2938{
2939 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2940 struct dhd_info *dhd_info;
2941
2942 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2943 return NULL;
2944 dhd_info = dhd_pub->info;
2945 if (dhd_info && dhd_info->iflist[ifidx])
2946 return dhd_info->iflist[ifidx]->net;
2947 return NULL;
2948}
2949
2950int
2951dhd_ifname2idx(dhd_info_t *dhd, char *name)
2952{
2953 int i = DHD_MAX_IFS;
2954
2955 ASSERT(dhd);
2956
2957 if (name == NULL || *name == '\0')
2958 return 0;
2959
2960 while (--i > 0)
2961 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2962 break;
2963
2964 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2965
2966 return i; /* default - the primary interface */
2967}
2968
2969char *
2970dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2971{
2972 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2973
2974 ASSERT(dhd);
2975
2976 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2977 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2978 return "<if_bad>";
2979 }
2980
2981 if (dhd->iflist[ifidx] == NULL) {
2982 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2983 return "<if_null>";
2984 }
2985
2986 if (dhd->iflist[ifidx]->net)
2987 return dhd->iflist[ifidx]->net->name;
2988
2989 return "<if_none>";
2990}
2991
2992uint8 *
2993dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2994{
2995 int i;
2996 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2997
2998 ASSERT(dhd);
2999 for (i = 0; i < DHD_MAX_IFS; i++)
3000 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
3001 return dhd->iflist[i]->mac_addr;
3002
3003 return NULL;
3004}
3005
3006
3007static void
3008_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
3009{
3010 struct net_device *dev;
3011#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3012 struct netdev_hw_addr *ha;
3013#else
3014 struct dev_mc_list *mclist;
3015#endif
3016 uint32 allmulti, cnt;
3017
3018 wl_ioctl_t ioc;
3019 char *buf, *bufp;
3020 uint buflen;
3021 int ret;
3022
3023 if (!dhd->iflist[ifidx]) {
3024 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
3025 return;
3026 }
3027 dev = dhd->iflist[ifidx]->net;
3028 if (!dev)
3029 return;
3030#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3031 netif_addr_lock_bh(dev);
3032#endif /* LINUX >= 2.6.27 */
3033#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3034 cnt = netdev_mc_count(dev);
3035#else
3036 cnt = dev->mc_count;
3037#endif /* LINUX >= 2.6.35 */
3038#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3039 netif_addr_unlock_bh(dev);
3040#endif /* LINUX >= 2.6.27 */
3041
3042 /* Determine initial value of allmulti flag */
3043 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
3044
3045#ifdef PASS_ALL_MCAST_PKTS
3046#ifdef PKT_FILTER_SUPPORT
3047 if (!dhd->pub.early_suspended)
3048#endif /* PKT_FILTER_SUPPORT */
3049 allmulti = TRUE;
3050#endif /* PASS_ALL_MCAST_PKTS */
3051
3052 /* Send down the multicast list first. */
3053
3054
3055 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
3056 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
3057 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
3058 dhd_ifname(&dhd->pub, ifidx), cnt));
3059 return;
3060 }
3061
3062 strncpy(bufp, "mcast_list", buflen - 1);
3063 bufp[buflen - 1] = '\0';
3064 bufp += strlen("mcast_list") + 1;
3065
3066 cnt = htol32(cnt);
3067 memcpy(bufp, &cnt, sizeof(cnt));
3068 bufp += sizeof(cnt);
3069
3070#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3071 netif_addr_lock_bh(dev);
3072#endif /* LINUX >= 2.6.27 */
3073#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
3074 netdev_for_each_mc_addr(ha, dev) {
3075 if (!cnt)
3076 break;
3077 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
3078 bufp += ETHER_ADDR_LEN;
3079 cnt--;
3080 }
3081#else /* LINUX < 2.6.35 */
3082 for (mclist = dev->mc_list; (mclist && (cnt > 0));
3083 cnt--, mclist = mclist->next) {
3084 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
3085 bufp += ETHER_ADDR_LEN;
3086 }
3087#endif /* LINUX >= 2.6.35 */
3088#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
3089 netif_addr_unlock_bh(dev);
3090#endif /* LINUX >= 2.6.27 */
3091
3092 memset(&ioc, 0, sizeof(ioc));
3093 ioc.cmd = WLC_SET_VAR;
3094 ioc.buf = buf;
3095 ioc.len = buflen;
3096 ioc.set = TRUE;
3097
3098 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3099 if (ret < 0) {
3100 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
3101 dhd_ifname(&dhd->pub, ifidx), cnt));
3102 allmulti = cnt ? TRUE : allmulti;
3103 }
3104
3105 MFREE(dhd->pub.osh, buf, buflen);
3106
3107 /* Now send the allmulti setting. This is based on the setting in the
3108 * net_device flags, but might be modified above to be turned on if we
3109 * were trying to set some addresses and dongle rejected it...
3110 */
3111
3112 buflen = sizeof("allmulti") + sizeof(allmulti);
3113 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
3114 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
3115 return;
3116 }
3117 allmulti = htol32(allmulti);
3118
3119 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
3120 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
3121 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
3122 MFREE(dhd->pub.osh, buf, buflen);
3123 return;
3124 }
3125
3126
3127 memset(&ioc, 0, sizeof(ioc));
3128 ioc.cmd = WLC_SET_VAR;
3129 ioc.buf = buf;
3130 ioc.len = buflen;
3131 ioc.set = TRUE;
3132
3133 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3134 if (ret < 0) {
3135 DHD_ERROR(("%s: set allmulti %d failed\n",
3136 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3137 }
3138
3139 MFREE(dhd->pub.osh, buf, buflen);
3140
3141 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
3142
3143 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
3144
3145 allmulti = htol32(allmulti);
3146
3147 memset(&ioc, 0, sizeof(ioc));
3148 ioc.cmd = WLC_SET_PROMISC;
3149 ioc.buf = &allmulti;
3150 ioc.len = sizeof(allmulti);
3151 ioc.set = TRUE;
3152
3153 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3154 if (ret < 0) {
3155 DHD_ERROR(("%s: set promisc %d failed\n",
3156 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
3157 }
3158}
3159
3160int
3161_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
3162{
3163 char buf[32];
3164 wl_ioctl_t ioc;
3165 int ret;
3166
3167 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
3168 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
3169 return -1;
3170 }
3171 memset(&ioc, 0, sizeof(ioc));
3172 ioc.cmd = WLC_SET_VAR;
3173 ioc.buf = buf;
3174 ioc.len = 32;
3175 ioc.set = TRUE;
3176
3177 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
3178 if (ret < 0) {
3179 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
3180 } else {
3181 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
3182 if (ifidx == 0)
3183 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
3184 }
3185
3186 return ret;
3187}
3188
3189#ifdef SOFTAP
3190extern struct net_device *ap_net_dev;
3191extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
3192#endif
3193
3194#ifdef DHD_PSTA
3195/* Get psta/psr configuration configuration */
3196int dhd_get_psta_mode(dhd_pub_t *dhdp)
3197{
3198 dhd_info_t *dhd = dhdp->info;
3199 return (int)dhd->psta_mode;
3200}
3201/* Set psta/psr configuration configuration */
3202int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
3203{
3204 dhd_info_t *dhd = dhdp->info;
3205 dhd->psta_mode = val;
3206 return 0;
3207}
3208#endif /* DHD_PSTA */
3209
3210static void
3211dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
3212{
3213 dhd_info_t *dhd = handle;
3214 dhd_if_event_t *if_event = event_info;
3215 struct net_device *ndev;
3216 int ifidx, bssidx;
3217 int ret;
3218#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3219 struct wireless_dev *vwdev, *primary_wdev;
3220 struct net_device *primary_ndev;
3221#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3222
3223 if (event != DHD_WQ_WORK_IF_ADD) {
3224 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3225 return;
3226 }
3227
3228 if (!dhd) {
3229 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3230 return;
3231 }
3232
3233 if (!if_event) {
3234 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3235 return;
3236 }
3237
3238 dhd_net_if_lock_local(dhd);
3239 DHD_OS_WAKE_LOCK(&dhd->pub);
3240 DHD_PERIM_LOCK(&dhd->pub);
3241
3242 ifidx = if_event->event.ifidx;
3243 bssidx = if_event->event.bssidx;
3244 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
3245
3246 /* This path is for non-android case */
3247 /* The interface name in host and in event msg are same */
3248 /* if name in event msg is used to create dongle if list on host */
3249 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
3250 if_event->mac, bssidx, TRUE, if_event->name);
3251 if (!ndev) {
3252 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
3253 goto done;
3254 }
3255
3256#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
3257 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
3258 if (unlikely(!vwdev)) {
3259 DHD_ERROR(("Could not allocate wireless device\n"));
3260 goto done;
3261 }
3262 primary_ndev = dhd->pub.info->iflist[0]->net;
3263 primary_wdev = ndev_to_wdev(primary_ndev);
3264 vwdev->wiphy = primary_wdev->wiphy;
3265 vwdev->iftype = if_event->event.role;
3266 vwdev->netdev = ndev;
3267 ndev->ieee80211_ptr = vwdev;
3268 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
3269 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
3270#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
3271
3272 DHD_PERIM_UNLOCK(&dhd->pub);
3273 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
3274 DHD_PERIM_LOCK(&dhd->pub);
3275 if (ret != BCME_OK) {
3276 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
3277 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3278 goto done;
3279 }
3280#ifdef PCIE_FULL_DONGLE
3281 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
3282 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
3283 char iovbuf[WLC_IOCTL_SMLEN];
3284 uint32 var_int = 1;
3285
3286 memset(iovbuf, 0, sizeof(iovbuf));
3287 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
3288 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
3289
3290 if (ret != BCME_OK) {
3291 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
3292 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3293 }
3294 }
3295#endif /* PCIE_FULL_DONGLE */
3296
3297done:
3298 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3299
3300 DHD_PERIM_UNLOCK(&dhd->pub);
3301 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3302 dhd_net_if_unlock_local(dhd);
3303}
3304
3305static void
3306dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
3307{
3308 dhd_info_t *dhd = handle;
3309 int ifidx;
3310 dhd_if_event_t *if_event = event_info;
3311
3312
3313 if (event != DHD_WQ_WORK_IF_DEL) {
3314 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3315 return;
3316 }
3317
3318 if (!dhd) {
3319 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3320 return;
3321 }
3322
3323 if (!if_event) {
3324 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
3325 return;
3326 }
3327
3328 dhd_net_if_lock_local(dhd);
3329 DHD_OS_WAKE_LOCK(&dhd->pub);
3330 DHD_PERIM_LOCK(&dhd->pub);
3331
3332 ifidx = if_event->event.ifidx;
3333 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
3334
3335 DHD_PERIM_UNLOCK(&dhd->pub);
3336 dhd_remove_if(&dhd->pub, ifidx, TRUE);
3337 DHD_PERIM_LOCK(&dhd->pub);
3338
3339 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
3340
3341 DHD_PERIM_UNLOCK(&dhd->pub);
3342 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3343 dhd_net_if_unlock_local(dhd);
3344}
3345
3346static void
3347dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
3348{
3349 dhd_info_t *dhd = handle;
3350 dhd_if_t *ifp = event_info;
3351
3352 if (event != DHD_WQ_WORK_SET_MAC) {
3353 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3354 }
3355
3356 if (!dhd) {
3357 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3358 return;
3359 }
3360
3361 dhd_net_if_lock_local(dhd);
3362 DHD_OS_WAKE_LOCK(&dhd->pub);
3363 DHD_PERIM_LOCK(&dhd->pub);
3364
3365#ifdef SOFTAP
3366 {
3367 unsigned long flags;
3368 bool in_ap = FALSE;
3369 DHD_GENERAL_LOCK(&dhd->pub, flags);
3370 in_ap = (ap_net_dev != NULL);
3371 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3372
3373 if (in_ap) {
3374 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3375 ifp->net->name));
3376 goto done;
3377 }
3378 }
3379#endif /* SOFTAP */
3380
3381 // terence 20160907: fix for not able to set mac when wlan0 is down
3382 if (ifp == NULL || !ifp->set_macaddress) {
3383 goto done;
3384 }
3385 if (ifp == NULL || !dhd->pub.up) {
3386 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3387 goto done;
3388 }
3389
3390 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3391 ifp->set_macaddress = FALSE;
3392 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3393 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3394 else
3395 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3396
3397done:
3398 DHD_PERIM_UNLOCK(&dhd->pub);
3399 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3400 dhd_net_if_unlock_local(dhd);
3401}
3402
3403static void
3404dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3405{
3406 dhd_info_t *dhd = handle;
3407 dhd_if_t *ifp = event_info;
3408 int ifidx;
3409
3410 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3411 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3412 return;
3413 }
3414
3415 if (!dhd) {
3416 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3417 return;
3418 }
3419
3420 dhd_net_if_lock_local(dhd);
3421 DHD_OS_WAKE_LOCK(&dhd->pub);
3422 DHD_PERIM_LOCK(&dhd->pub);
3423
3424#ifdef SOFTAP
3425 {
3426 bool in_ap = FALSE;
3427 unsigned long flags;
3428 DHD_GENERAL_LOCK(&dhd->pub, flags);
3429 in_ap = (ap_net_dev != NULL);
3430 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3431
3432 if (in_ap) {
3433 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3434 ifp->net->name));
3435 ifp->set_multicast = FALSE;
3436 goto done;
3437 }
3438 }
3439#endif /* SOFTAP */
3440
3441 if (ifp == NULL || !dhd->pub.up) {
3442 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3443 goto done;
3444 }
3445
3446 ifidx = ifp->idx;
3447
3448
3449 _dhd_set_multicast_list(dhd, ifidx);
3450 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3451
3452done:
3453 DHD_PERIM_UNLOCK(&dhd->pub);
3454 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3455 dhd_net_if_unlock_local(dhd);
3456}
3457
3458static int
3459dhd_set_mac_address(struct net_device *dev, void *addr)
3460{
3461 int ret = 0;
3462
3463 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3464 struct sockaddr *sa = (struct sockaddr *)addr;
3465 int ifidx;
3466 dhd_if_t *dhdif;
3467
3468 ifidx = dhd_net2idx(dhd, dev);
3469 if (ifidx == DHD_BAD_IF)
3470 return -1;
3471
3472 dhdif = dhd->iflist[ifidx];
3473
3474 dhd_net_if_lock_local(dhd);
3475 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3476 dhdif->set_macaddress = TRUE;
3477 dhd_net_if_unlock_local(dhd);
3478 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3479 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3480 return ret;
3481}
3482
3483static void
3484dhd_set_multicast_list(struct net_device *dev)
3485{
3486 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3487 int ifidx;
3488
3489 ifidx = dhd_net2idx(dhd, dev);
3490 if (ifidx == DHD_BAD_IF)
3491 return;
3492
3493 dhd->iflist[ifidx]->set_multicast = TRUE;
3494 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3495 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
3496
3497 // terence 20160907: fix for not able to set mac when wlan0 is down
3498 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
3499 DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
3500}
3501
3502#ifdef PROP_TXSTATUS
3503int
3504dhd_os_wlfc_block(dhd_pub_t *pub)
3505{
3506 dhd_info_t *di = (dhd_info_t *)(pub->info);
3507 ASSERT(di != NULL);
08dfb6c4
RC
3508 /* terence 20161229: don't do spin lock if proptx not enabled */
3509 if (disable_proptx)
3510 return 1;
ef6a5fee
RC
3511 spin_lock_bh(&di->wlfc_spinlock);
3512 return 1;
3513}
3514
3515int
3516dhd_os_wlfc_unblock(dhd_pub_t *pub)
3517{
3518 dhd_info_t *di = (dhd_info_t *)(pub->info);
3519
3520 ASSERT(di != NULL);
08dfb6c4
RC
3521 /* terence 20161229: don't do spin lock if proptx not enabled */
3522 if (disable_proptx)
3523 return 1;
ef6a5fee
RC
3524 spin_unlock_bh(&di->wlfc_spinlock);
3525 return 1;
3526}
3527
3528#endif /* PROP_TXSTATUS */
3529
3530#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
3531typedef struct {
3532 uint16 type;
3533 const char *str;
3534} PKTTYPE_INFO;
3535
3536static const PKTTYPE_INFO packet_type_info[] =
3537{
3538 { ETHER_TYPE_IP, "IP" },
3539 { ETHER_TYPE_ARP, "ARP" },
3540 { ETHER_TYPE_BRCM, "BRCM" },
3541 { ETHER_TYPE_802_1X, "802.1X" },
3542 { ETHER_TYPE_WAI, "WAPI" },
3543 { 0, ""}
3544};
3545
3546static const char *_get_packet_type_str(uint16 type)
3547{
3548 int i;
3549 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
3550
3551 for (i = 0; i < n; i++) {
3552 if (packet_type_info[i].type == type)
3553 return packet_type_info[i].str;
3554 }
3555
3556 return packet_type_info[n].str;
3557}
3558#endif /* DHD_RX_DUMP || DHD_TX_DUMP */
3559
3560#if defined(DHD_TX_DUMP)
3561void
3562dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
3563{
3564 uint8 *dump_data;
3565 uint16 protocol;
3566 char *ifname;
3567
3568 dump_data = PKTDATA(osh, pkt);
3569 protocol = (dump_data[12] << 8) | dump_data[13];
3570 ifname = ndev ? ndev->name : "N/A";
3571
3572 DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
3573
3574 if (protocol == ETHER_TYPE_802_1X) {
3575 dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
3576 }
3577
3578#if defined(DHD_TX_FULL_DUMP)
3579 {
3580 int i;
3581 uint datalen;
3582 datalen = PKTLEN(osh, pkt);
3583
3584 for (i = 0; i < datalen; i++) {
3585 printk("%02X ", dump_data[i]);
3586 if ((i & 15) == 15)
3587 printk("\n");
3588 }
3589 printk("\n");
3590 }
3591#endif /* DHD_TX_FULL_DUMP */
3592}
3593#endif /* DHD_TX_DUMP */
3594
3595/* This routine do not support Packet chain feature, Currently tested for
3596 * proxy arp feature
3597 */
3598int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3599{
3600 struct sk_buff *skb;
3601 void *skbhead = NULL;
3602 void *skbprev = NULL;
3603 dhd_if_t *ifp;
3604 ASSERT(!PKTISCHAINED(p));
3605 skb = PKTTONATIVE(dhdp->osh, p);
3606
3607 ifp = dhdp->info->iflist[ifidx];
3608 skb->dev = ifp->net;
3609#if defined(BCM_GMAC3)
3610 /* Forwarder capable interfaces use WOFA based forwarding */
3611 if (ifp->fwdh) {
3612 struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
3613 uint16 * da = (uint16 *)(eh->ether_dhost);
3614 wofa_t wofa;
3615 ASSERT(ISALIGNED(da, 2));
3616
3617 wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
3618 if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
3619 if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
3620 return BCME_OK;
3621 }
3622 }
3623 PKTFRMNATIVE(dhdp->osh, p);
3624 PKTFREE(dhdp->osh, p, FALSE);
3625 return BCME_OK;
3626 }
3627#endif /* BCM_GMAC3 */
3628
3629 skb->protocol = eth_type_trans(skb, skb->dev);
3630
3631 if (in_interrupt()) {
3632 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3633 __FUNCTION__, __LINE__);
3634 netif_rx(skb);
3635 } else {
3636 if (dhdp->info->rxthread_enabled) {
3637 if (!skbhead) {
3638 skbhead = skb;
3639 } else {
3640 PKTSETNEXT(dhdp->osh, skbprev, skb);
3641 }
3642 skbprev = skb;
3643 } else {
3644 /* If the receive is not processed inside an ISR,
3645 * the softirqd must be woken explicitly to service
3646 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3647 * by netif_rx_ni(), but in earlier kernels, we need
3648 * to do it manually.
3649 */
3650 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3651 __FUNCTION__, __LINE__);
3652#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3653 netif_rx_ni(skb);
3654#else
3655 ulong flags;
3656 netif_rx(skb);
3657 local_irq_save(flags);
3658 RAISE_RX_SOFTIRQ();
3659 local_irq_restore(flags);
3660#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3661 }
3662 }
3663
3664 if (dhdp->info->rxthread_enabled && skbhead)
3665 dhd_sched_rxf(dhdp, skbhead);
3666
3667 return BCME_OK;
3668}
3669
3670int BCMFASTPATH
3671__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3672{
3673 int ret = BCME_OK;
3674 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3675 struct ether_header *eh = NULL;
3676#ifdef DHD_L2_FILTER
3677 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3678#endif
3679#ifdef DHD_8021X_DUMP
3680 struct net_device *ndev;
3681#endif /* DHD_8021X_DUMP */
3682
3683 /* Reject if down */
3684 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3685 /* free the packet here since the caller won't */
3686 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3687 return -ENODEV;
3688 }
3689
3690#ifdef PCIE_FULL_DONGLE
3691 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3692 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3693 PKTFREE(dhdp->osh, pktbuf, TRUE);
3694 return -EBUSY;
3695 }
3696#endif /* PCIE_FULL_DONGLE */
3697
3698#ifdef DHD_L2_FILTER
3699 /* if dhcp_unicast is enabled, we need to convert the */
3700 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3701 if (ifp->dhcp_unicast) {
3702 uint8* mac_addr;
3703 uint8* ehptr = NULL;
3704 int ret;
3705 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3706 if (ret == BCME_OK) {
3707 /* if given mac address having valid entry in sta list
3708 * copy the given mac address, and return with BCME_OK
3709 */
3710 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3711 ehptr = PKTDATA(dhdp->osh, pktbuf);
3712 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3713 }
3714 }
3715 }
3716
3717 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3718 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3719 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3720 return BCME_ERROR;
3721 }
3722 }
3723
3724 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3725 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3726
3727 /* Drop the packets if l2 filter has processed it already
3728 * otherwise continue with the normal path
3729 */
3730 if (ret == BCME_OK) {
3731 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3732 return BCME_ERROR;
3733 }
3734 }
3735#endif /* DHD_L2_FILTER */
3736 /* Update multicast statistic */
3737 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3738 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3739 eh = (struct ether_header *)pktdata;
3740
3741 if (ETHER_ISMULTI(eh->ether_dhost))
3742 dhdp->tx_multicast++;
3743 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
3744 atomic_inc(&dhd->pend_8021x_cnt);
3745#ifdef DHD_DHCP_DUMP
3746 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
3747 uint16 dump_hex;
3748 uint16 source_port;
3749 uint16 dest_port;
3750 uint16 udp_port_pos;
3751 uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
3752 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3753 struct net_device *net;
3754 char *ifname;
3755
3756 net = dhd_idx2net(dhdp, ifidx);
3757 ifname = net ? net->name : "N/A";
3758 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3759 source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
3760 dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
3761 if (source_port == 0x0044 || dest_port == 0x0044) {
3762 dump_hex = (pktdata[udp_port_pos+249] << 8) |
3763 pktdata[udp_port_pos+250];
3764 if (dump_hex == 0x0101) {
3765 DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
3766 } else if (dump_hex == 0x0102) {
3767 DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
3768 } else if (dump_hex == 0x0103) {
3769 DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
3770 } else if (dump_hex == 0x0105) {
3771 DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
3772 } else {
3773 DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
3774 }
3775#ifdef DHD_LOSSLESS_ROAMING
3776 if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
3777 DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
3778 }
3779#endif /* DHD_LOSSLESS_ROAMING */
3780 DHD_ERROR(("\n"));
3781 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3782 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
3783 }
3784 }
3785#endif /* DHD_DHCP_DUMP */
3786 } else {
3787 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3788 return BCME_ERROR;
3789 }
3790
3791 /* Look into the packet and update the packet priority */
3792#ifndef PKTPRIO_OVERRIDE
3793 if (PKTPRIO(pktbuf) == 0)
3794#endif /* !PKTPRIO_OVERRIDE */
3795 {
3796#ifdef QOS_MAP_SET
3797 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3798#else
3799 pktsetprio(pktbuf, FALSE);
3800#endif /* QOS_MAP_SET */
3801 }
3802
3803
3804#ifdef PCIE_FULL_DONGLE
3805 /*
3806 * Lkup the per interface hash table, for a matching flowring. If one is not
3807 * available, allocate a unique flowid and add a flowring entry.
3808 * The found or newly created flowid is placed into the pktbuf's tag.
3809 */
3810 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3811 if (ret != BCME_OK) {
3812 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3813 return ret;
3814 }
3815#endif
3816
3817#if defined(DHD_TX_DUMP)
3818 ndev = dhd_idx2net(dhdp, ifidx);
3819 dhd_tx_dump(ndev, dhdp->osh, pktbuf);
3820#endif
3821 /* terence 20150901: Micky add to ajust the 802.1X priority */
3822 /* Set the 802.1X packet with the highest priority 7 */
3823 if (dhdp->conf->pktprio8021x >= 0)
3824 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3825
3826#ifdef PROP_TXSTATUS
3827 if (dhd_wlfc_is_supported(dhdp)) {
3828 /* store the interface ID */
3829 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3830
3831 /* store destination MAC in the tag as well */
3832 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3833
3834 /* decide which FIFO this packet belongs to */
3835 if (ETHER_ISMULTI(eh->ether_dhost))
3836 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3837 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3838 else
3839 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3840 } else
3841#endif /* PROP_TXSTATUS */
3842 {
3843 /* If the protocol uses a data header, apply it */
3844 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3845 }
3846
3847 /* Use bus module to send data frame */
3848#ifdef WLMEDIA_HTSF
3849 dhd_htsf_addtxts(dhdp, pktbuf);
3850#endif
3851#ifdef PROP_TXSTATUS
3852 {
3853 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3854 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3855 /* non-proptxstatus way */
3856#ifdef BCMPCIE
3857 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3858#else
3859 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3860#endif /* BCMPCIE */
3861 }
3862 }
3863#else
3864#ifdef BCMPCIE
3865 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3866#else
3867 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3868#endif /* BCMPCIE */
3869#endif /* PROP_TXSTATUS */
3870
3871 return ret;
3872}
3873
3874int BCMFASTPATH
3875dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3876{
3877 int ret = 0;
3878 unsigned long flags;
3879
3880 DHD_GENERAL_LOCK(dhdp, flags);
3881 if (dhdp->busstate == DHD_BUS_DOWN ||
3882 dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3883 DHD_ERROR(("%s: returning as busstate=%d\n",
3884 __FUNCTION__, dhdp->busstate));
3885 DHD_GENERAL_UNLOCK(dhdp, flags);
3886 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3887 return -ENODEV;
3888 }
3889 dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
3890 DHD_GENERAL_UNLOCK(dhdp, flags);
3891
3892#ifdef DHD_PCIE_RUNTIMEPM
3893 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3894 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3895 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3896 ret = -EBUSY;
3897 goto exit;
3898 }
3899#endif /* DHD_PCIE_RUNTIMEPM */
3900
3901 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3902
3903#ifdef DHD_PCIE_RUNTIMEPM
3904exit:
3905#endif
3906 DHD_GENERAL_LOCK(dhdp, flags);
3907 dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
3908 DHD_GENERAL_UNLOCK(dhdp, flags);
3909 return ret;
3910}
3911
3912int BCMFASTPATH
3913dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3914{
3915 int ret;
3916 uint datalen;
3917 void *pktbuf;
3918 dhd_info_t *dhd = DHD_DEV_INFO(net);
3919 dhd_if_t *ifp = NULL;
3920 int ifidx;
3921 unsigned long flags;
3922#ifdef WLMEDIA_HTSF
3923 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3924#else
3925 uint8 htsfdlystat_sz = 0;
3926#endif
3927#ifdef DHD_WMF
3928 struct ether_header *eh;
3929 uint8 *iph;
3930#endif /* DHD_WMF */
3931
3932 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3933
08dfb6c4
RC
3934 /* terence 2017029: Reject in early suspend */
3935 if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) {
3936 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
3937#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3938 return -ENODEV;
3939#else
3940 return NETDEV_TX_BUSY;
3941#endif
3942 }
3943
ef6a5fee
RC
3944
3945#ifdef PCIE_FULL_DONGLE
3946 DHD_GENERAL_LOCK(&dhd->pub, flags);
3947 dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
3948 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3949#endif /* PCIE_FULL_DONGLE */
3950
3951#ifdef DHD_PCIE_RUNTIMEPM
3952 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3953 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3954 /* stop the network queue temporarily until resume done */
3955 DHD_GENERAL_LOCK(&dhd->pub, flags);
3956 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3957 dhd_bus_stop_queue(dhd->pub.bus);
3958 }
3959 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3960 dhd_os_busbusy_wake(&dhd->pub);
3961 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3962#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3963 return -ENODEV;
3964#else
3965 return NETDEV_TX_BUSY;
3966#endif
3967 }
3968#endif /* DHD_PCIE_RUNTIMEPM */
3969
3970 DHD_GENERAL_LOCK(&dhd->pub, flags);
3971#ifdef PCIE_FULL_DONGLE
3972 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3973 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
3974 dhd_os_busbusy_wake(&dhd->pub);
3975 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3976#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3977 return -ENODEV;
3978#else
3979 return NETDEV_TX_BUSY;
3980#endif
3981 }
3982#endif /* PCIE_FULL_DONGLE */
3983
3984 DHD_OS_WAKE_LOCK(&dhd->pub);
3985 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3986
3987 /* Reject if down */
3988 if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
3989 dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
3990 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3991 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3992 netif_stop_queue(net);
3993 /* Send Event when bus down detected during data session */
3994 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3995 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3996 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3997 net_os_send_hang_message(net);
3998 }
3999#ifdef PCIE_FULL_DONGLE
4000 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4001 dhd_os_busbusy_wake(&dhd->pub);
ef6a5fee
RC
4002#endif /* PCIE_FULL_DONGLE */
4003 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4004 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5680b389 4005 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
ef6a5fee
RC
4006#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4007 return -ENODEV;
4008#else
4009 return NETDEV_TX_BUSY;
4010#endif
4011 }
4012
4013 ifp = DHD_DEV_IFP(net);
4014 ifidx = DHD_DEV_IFIDX(net);
4015 BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
4016
4017 if (ifidx == DHD_BAD_IF) {
4018 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
4019 netif_stop_queue(net);
4020#ifdef PCIE_FULL_DONGLE
4021 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4022 dhd_os_busbusy_wake(&dhd->pub);
ef6a5fee
RC
4023#endif /* PCIE_FULL_DONGLE */
4024 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4025 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5680b389 4026 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
ef6a5fee
RC
4027#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4028 return -ENODEV;
4029#else
4030 return NETDEV_TX_BUSY;
4031#endif
4032 }
4033 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4034
4035 ASSERT(ifidx == dhd_net2idx(dhd, net));
4036 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
4037
4038 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4039
4040 /* re-align socket buffer if "skb->data" is odd address */
4041 if (((unsigned long)(skb->data)) & 0x1) {
4042 unsigned char *data = skb->data;
4043 uint32 length = skb->len;
4044 PKTPUSH(dhd->pub.osh, skb, 1);
4045 memmove(skb->data, data, length);
4046 PKTSETLEN(dhd->pub.osh, skb, length);
4047 }
4048
4049 datalen = PKTLEN(dhd->pub.osh, skb);
4050
4051 /* Make sure there's enough room for any header */
4052 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
4053 struct sk_buff *skb2;
4054
4055 DHD_INFO(("%s: insufficient headroom\n",
4056 dhd_ifname(&dhd->pub, ifidx)));
4057 dhd->pub.tx_realloc++;
4058
4059 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4060 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
4061
4062 dev_kfree_skb(skb);
4063 if ((skb = skb2) == NULL) {
4064 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
4065 dhd_ifname(&dhd->pub, ifidx)));
4066 ret = -ENOMEM;
4067 goto done;
4068 }
4069 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
4070 }
4071
4072 /* Convert to packet */
4073 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
4074 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
4075 dhd_ifname(&dhd->pub, ifidx)));
4076 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
4077 dev_kfree_skb_any(skb);
4078 ret = -ENOMEM;
4079 goto done;
4080 }
4081
4082#if defined(WLMEDIA_HTSF)
4083 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
4084 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
4085 struct ether_header *eh = (struct ether_header *)pktdata;
4086
4087 if (!ETHER_ISMULTI(eh->ether_dhost) &&
4088 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
4089 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
4090 }
4091 }
4092#endif
4093
4094#ifdef DHD_WMF
4095 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
4096 iph = (uint8 *)eh + ETHER_HDR_LEN;
4097
4098 /* WMF processing for multicast packets
4099 * Only IPv4 packets are handled
4100 */
4101 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
4102 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
4103 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
4104#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
4105 void *sdu_clone;
4106 bool ucast_convert = FALSE;
4107#ifdef DHD_UCAST_UPNP
4108 uint32 dest_ip;
4109
4110 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
4111 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
4112#endif /* DHD_UCAST_UPNP */
4113#ifdef DHD_IGMP_UCQUERY
4114 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
4115 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
4116 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
4117#endif /* DHD_IGMP_UCQUERY */
4118 if (ucast_convert) {
4119 dhd_sta_t *sta;
4120#ifdef PCIE_FULL_DONGLE
4121 unsigned long flags;
4122#endif
4123 struct list_head snapshot_list;
4124 struct list_head *wmf_ucforward_list;
4125
4126 ret = NETDEV_TX_OK;
4127
4128 /* For non BCM_GMAC3 platform we need a snapshot sta_list to
4129 * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
4130 */
4131 wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
4132
4133 /* Convert upnp/igmp query to unicast for each assoc STA */
4134 list_for_each_entry(sta, wmf_ucforward_list, list) {
4135 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
4136 ret = WMF_NOP;
4137 break;
4138 }
4139 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
4140 }
4141 DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
4142
4143#ifdef PCIE_FULL_DONGLE
4144 DHD_GENERAL_LOCK(&dhd->pub, flags);
4145 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4146 dhd_os_busbusy_wake(&dhd->pub);
4147 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4148#endif /* PCIE_FULL_DONGLE */
4149 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4150 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4151
4152 if (ret == NETDEV_TX_OK)
4153 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
4154
4155 return ret;
4156 } else
4157#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
4158 {
4159 /* There will be no STA info if the packet is coming from LAN host
4160 * Pass as NULL
4161 */
4162 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
4163 switch (ret) {
4164 case WMF_TAKEN:
4165 case WMF_DROP:
4166 /* Either taken by WMF or we should drop it.
4167 * Exiting send path
4168 */
4169#ifdef PCIE_FULL_DONGLE
4170 DHD_GENERAL_LOCK(&dhd->pub, flags);
4171 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4172 dhd_os_busbusy_wake(&dhd->pub);
4173 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4174#endif /* PCIE_FULL_DONGLE */
4175 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4176 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4177 return NETDEV_TX_OK;
4178 default:
4179 /* Continue the transmit path */
4180 break;
4181 }
4182 }
4183 }
4184#endif /* DHD_WMF */
4185#ifdef DHD_PSTA
4186 /* PSR related packet proto manipulation should be done in DHD
4187 * since dongle doesn't have complete payload
4188 */
4189 if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
4190 ifidx, &pktbuf, TRUE) < 0)) {
4191 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
4192 dhd_ifname(&dhd->pub, ifidx)));
4193 }
4194#endif /* DHD_PSTA */
4195
4196#ifdef DHDTCPACK_SUPPRESS
4197 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
4198 /* If this packet has been hold or got freed, just return */
4199 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
4200 ret = 0;
4201 goto done;
4202 }
4203 } else {
4204 /* If this packet has replaced another packet and got freed, just return */
4205 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
4206 ret = 0;
4207 goto done;
4208 }
4209 }
4210#endif /* DHDTCPACK_SUPPRESS */
4211
4212 /* no segmented SKB support (Kernel-3.18.y) */
4213 if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
4214 PKTSETLINK(skb, NULL);
4215 }
4216
4217 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
4218
4219done:
4220 if (ret) {
4221 ifp->stats.tx_dropped++;
4222 dhd->pub.tx_dropped++;
4223 } else {
4224
4225#ifdef PROP_TXSTATUS
4226 /* tx_packets counter can counted only when wlfc is disabled */
4227 if (!dhd_wlfc_is_supported(&dhd->pub))
4228#endif
4229 {
4230 dhd->pub.tx_packets++;
4231 ifp->stats.tx_packets++;
4232 ifp->stats.tx_bytes += datalen;
4233 }
4234 }
4235
4236#ifdef PCIE_FULL_DONGLE
4237 DHD_GENERAL_LOCK(&dhd->pub, flags);
4238 dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
4239 dhd_os_busbusy_wake(&dhd->pub);
4240 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4241#endif /* PCIE_FULL_DONGLE */
4242
4243 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
4244 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4245 BUZZZ_LOG(START_XMIT_END, 0);
4246
4247 /* Return ok: we always eat the packet */
4248#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
4249 return 0;
4250#else
4251 return NETDEV_TX_OK;
4252#endif
4253}
4254
4255
4256void
4257dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
4258{
4259 struct net_device *net;
4260 dhd_info_t *dhd = dhdp->info;
4261 int i;
4262
4263 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4264
4265 ASSERT(dhd);
4266
4267#ifdef DHD_LOSSLESS_ROAMING
4268 /* block flowcontrol during roaming */
4269 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
4270 return;
4271 }
4272#endif
4273
4274 if (ifidx == ALL_INTERFACES) {
4275 /* Flow control on all active interfaces */
4276 dhdp->txoff = state;
4277 for (i = 0; i < DHD_MAX_IFS; i++) {
4278 if (dhd->iflist[i]) {
4279 net = dhd->iflist[i]->net;
4280 if (state == ON)
4281 netif_stop_queue(net);
4282 else
4283 netif_wake_queue(net);
4284 }
4285 }
4286 } else {
4287 if (dhd->iflist[ifidx]) {
4288 net = dhd->iflist[ifidx]->net;
4289 if (state == ON)
4290 netif_stop_queue(net);
4291 else
4292 netif_wake_queue(net);
4293 }
4294 }
4295}
4296
4297
4298#ifdef DHD_WMF
4299bool
4300dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
4301{
4302 dhd_info_t *dhd = dhdp->info;
4303
4304 return dhd->rxthread_enabled;
4305}
4306#endif /* DHD_WMF */
4307
4308/** Called when a frame is received by the dongle on interface 'ifidx' */
4309void
4310dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4311{
4312 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4313 struct sk_buff *skb;
4314 uchar *eth;
4315 uint len;
4316 void *data, *pnext = NULL;
4317 int i;
4318 dhd_if_t *ifp;
4319 wl_event_msg_t event;
4320 int tout_rx = 0;
4321 int tout_ctrl = 0;
4322 void *skbhead = NULL;
4323 void *skbprev = NULL;
4324#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4325 char *dump_data;
4326 uint16 protocol;
4327 char *ifname;
4328#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4329
4330 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4331
4332 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4333 struct ether_header *eh;
4334
4335 pnext = PKTNEXT(dhdp->osh, pktbuf);
4336 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4337
4338 ifp = dhd->iflist[ifidx];
4339 if (ifp == NULL) {
4340 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4341 __FUNCTION__));
4342 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4343 continue;
4344 }
4345
4346 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4347
4348 /* Dropping only data packets before registering net device to avoid kernel panic */
4349#ifndef PROP_TXSTATUS_VSDB
4350 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4351 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4352#else
4353 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4354 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4355#endif /* PROP_TXSTATUS_VSDB */
4356 {
4357 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4358 __FUNCTION__));
4359 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4360 continue;
4361 }
4362
4363
4364#ifdef PROP_TXSTATUS
4365 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4366 /* WLFC may send header only packet when
4367 there is an urgent message but no packet to
4368 piggy-back on
4369 */
4370 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4371 continue;
4372 }
4373#endif
4374#ifdef DHD_L2_FILTER
4375 /* If block_ping is enabled drop the ping packet */
4376 if (ifp->block_ping) {
4377 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4378 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4379 continue;
4380 }
4381 }
4382 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4383 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4384 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4385 continue;
4386 }
4387 }
4388 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4389 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4390
4391 /* Drop the packets if l2 filter has processed it already
4392 * otherwise continue with the normal path
4393 */
4394 if (ret == BCME_OK) {
4395 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4396 continue;
4397 }
4398 }
4399#endif /* DHD_L2_FILTER */
4400#ifdef DHD_WMF
4401 /* WMF processing for multicast packets */
4402 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
4403 dhd_sta_t *sta;
4404 int ret;
4405
4406 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
4407 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
4408 switch (ret) {
4409 case WMF_TAKEN:
4410 /* The packet is taken by WMF. Continue to next iteration */
4411 continue;
4412 case WMF_DROP:
4413 /* Packet DROP decision by WMF. Toss it */
4414 DHD_ERROR(("%s: WMF decides to drop packet\n",
4415 __FUNCTION__));
4416 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4417 continue;
4418 default:
4419 /* Continue the transmit path */
4420 break;
4421 }
4422 }
4423#endif /* DHD_WMF */
4424
4425#ifdef DHDTCPACK_SUPPRESS
4426 dhd_tcpdata_info_get(dhdp, pktbuf);
4427#endif
4428 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4429
4430 ASSERT(ifp);
4431 skb->dev = ifp->net;
4432
4433#ifdef DHD_PSTA
4434 if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4435 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4436 dhd_ifname(dhdp, ifidx)));
4437 }
4438#endif /* DHD_PSTA */
4439
4440#ifdef PCIE_FULL_DONGLE
4441 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4442 (!ifp->ap_isolate)) {
4443 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4444 if (ETHER_ISUCAST(eh->ether_dhost)) {
4445 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4446 dhd_sendpkt(dhdp, ifidx, pktbuf);
4447 continue;
4448 }
4449 } else {
4450 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4451 if (npktbuf)
4452 dhd_sendpkt(dhdp, ifidx, npktbuf);
4453 }
4454 }
4455#endif /* PCIE_FULL_DONGLE */
4456
4457 /* Get the protocol, maintain skb around eth_type_trans()
4458 * The main reason for this hack is for the limitation of
4459 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4460 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4461 * coping of the packet coming from the network stack to add
4462 * BDC, Hardware header etc, during network interface registration
4463 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4464 * for BDC, Hardware header etc. and not just the ETH_HLEN
4465 */
4466 eth = skb->data;
4467 len = skb->len;
4468
4469#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
4470 dump_data = skb->data;
4471 protocol = (dump_data[12] << 8) | dump_data[13];
4472 ifname = skb->dev ? skb->dev->name : "N/A";
4473#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
4474#ifdef DHD_8021X_DUMP
4475 if (protocol == ETHER_TYPE_802_1X) {
4476 dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
4477 }
4478#endif /* DHD_8021X_DUMP */
4479#ifdef DHD_DHCP_DUMP
4480 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
4481 uint16 dump_hex;
4482 uint16 source_port;
4483 uint16 dest_port;
4484 uint16 udp_port_pos;
4485 uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
4486 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
4487
4488 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
4489 source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
4490 dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
4491 if (source_port == 0x0044 || dest_port == 0x0044) {
4492 dump_hex = (dump_data[udp_port_pos+249] << 8) |
4493 dump_data[udp_port_pos+250];
4494 if (dump_hex == 0x0101) {
4495 DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
4496 } else if (dump_hex == 0x0102) {
4497 DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
4498 } else if (dump_hex == 0x0103) {
4499 DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
4500 } else if (dump_hex == 0x0105) {
4501 DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
4502 } else {
4503 DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
4504 }
4505 } else if (source_port == 0x0043 || dest_port == 0x0043) {
4506 DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
4507 }
4508 }
4509#endif /* DHD_DHCP_DUMP */
4510#if defined(DHD_RX_DUMP)
4511 DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
4512 if (protocol != ETHER_TYPE_BRCM) {
4513 if (dump_data[0] == 0xFF) {
4514 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
4515
4516 if ((dump_data[12] == 8) &&
4517 (dump_data[13] == 6)) {
4518 DHD_ERROR(("%s: ARP %d\n",
4519 __FUNCTION__, dump_data[0x15]));
4520 }
4521 } else if (dump_data[0] & 1) {
4522 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
4523 __FUNCTION__, MAC2STRDBG(dump_data)));
4524 }
4525#ifdef DHD_RX_FULL_DUMP
4526 {
4527 int k;
4528 for (k = 0; k < skb->len; k++) {
4529 printk("%02X ", dump_data[k]);
4530 if ((k & 15) == 15)
4531 printk("\n");
4532 }
4533 printk("\n");
4534 }
4535#endif /* DHD_RX_FULL_DUMP */
4536 }
4537#endif /* DHD_RX_DUMP */
4538
4539 skb->protocol = eth_type_trans(skb, skb->dev);
4540
4541 if (skb->pkt_type == PACKET_MULTICAST) {
4542 dhd->pub.rx_multicast++;
4543 ifp->stats.multicast++;
4544 }
4545
4546 skb->data = eth;
4547 skb->len = len;
4548
4549#ifdef WLMEDIA_HTSF
4550 dhd_htsf_addrxts(dhdp, pktbuf);
4551#endif
4552 /* Strip header, count, deliver upward */
4553 skb_pull(skb, ETH_HLEN);
4554
4555 /* Process special event packets and then discard them */
4556 memset(&event, 0, sizeof(event));
4557 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4558 dhd_wl_host_event(dhd, &ifidx,
4559#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
4560 skb_mac_header(skb),
4561#else
4562 skb->mac.raw,
4563#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
4564 &event,
4565 &data);
4566
4567 wl_event_to_host_order(&event);
4568 if (!tout_ctrl)
4569 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4570
4571#if defined(PNO_SUPPORT)
4572 if (event.event_type == WLC_E_PFN_NET_FOUND) {
4573 /* enforce custom wake lock to garantee that Kernel not suspended */
4574 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4575 }
4576#endif /* PNO_SUPPORT */
4577
4578#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
4579#ifdef DHD_USE_STATIC_CTRLBUF
4580 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4581#else
4582 PKTFREE(dhdp->osh, pktbuf, FALSE);
4583#endif /* DHD_USE_STATIC_CTRLBUF */
4584 continue;
4585#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
4586 } else {
4587 tout_rx = DHD_PACKET_TIMEOUT_MS;
4588
4589#ifdef PROP_TXSTATUS
4590 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4591#endif /* PROP_TXSTATUS */
4592 }
4593
4594 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4595 ifp = dhd->iflist[ifidx];
4596
4597 if (ifp->net)
4598 ifp->net->last_rx = jiffies;
4599
4600 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4601 dhdp->dstats.rx_bytes += skb->len;
4602 dhdp->rx_packets++; /* Local count */
4603 ifp->stats.rx_bytes += skb->len;
4604 ifp->stats.rx_packets++;
4605 }
4606
4607 if (in_interrupt()) {
4608 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4609 __FUNCTION__, __LINE__);
4610 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4611#if defined(DHD_LB) && defined(DHD_LB_RXP)
4612 netif_receive_skb(skb);
4613#else
4614 netif_rx(skb);
4615#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4616 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4617 } else {
4618 if (dhd->rxthread_enabled) {
4619 if (!skbhead)
4620 skbhead = skb;
4621 else
4622 PKTSETNEXT(dhdp->osh, skbprev, skb);
4623 skbprev = skb;
4624 } else {
4625
4626 /* If the receive is not processed inside an ISR,
4627 * the softirqd must be woken explicitly to service
4628 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4629 * by netif_rx_ni(), but in earlier kernels, we need
4630 * to do it manually.
4631 */
4632 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4633 __FUNCTION__, __LINE__);
4634
4635#if defined(DHD_LB) && defined(DHD_LB_RXP)
4636 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4637 netif_receive_skb(skb);
4638 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4639#else
4640#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4641 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4642 netif_rx_ni(skb);
4643 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4644#else
4645 ulong flags;
4646 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4647 netif_rx(skb);
4648 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4649 local_irq_save(flags);
4650 RAISE_RX_SOFTIRQ();
4651 local_irq_restore(flags);
4652#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
4653#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
4654 }
4655 }
4656 }
4657
4658 if (dhd->rxthread_enabled && skbhead)
4659 dhd_sched_rxf(dhdp, skbhead);
4660
4661 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4662 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4663 DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
4664}
4665
4666void
4667dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4668{
4669 /* Linux version has nothing to do */
4670 return;
4671}
4672
4673void
4674dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4675{
4676 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4677 struct ether_header *eh;
4678 uint16 type;
4679
4680 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4681
4682 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4683 type = ntoh16(eh->ether_type);
4684
4685 if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
4686 atomic_dec(&dhd->pend_8021x_cnt);
4687
4688#ifdef PROP_TXSTATUS
4689 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4690 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
4691 uint datalen = PKTLEN(dhd->pub.osh, txp);
4692 if (ifp != NULL) {
4693 if (success) {
4694 dhd->pub.tx_packets++;
4695 ifp->stats.tx_packets++;
4696 ifp->stats.tx_bytes += datalen;
4697 } else {
4698 ifp->stats.tx_dropped++;
4699 }
4700 }
4701 }
4702#endif
4703}
4704
4705static struct net_device_stats *
4706dhd_get_stats(struct net_device *net)
4707{
4708 dhd_info_t *dhd = DHD_DEV_INFO(net);
4709 dhd_if_t *ifp;
4710 int ifidx;
4711
4712 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4713
4714 ifidx = dhd_net2idx(dhd, net);
4715 if (ifidx == DHD_BAD_IF) {
4716 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
4717
4718 memset(&net->stats, 0, sizeof(net->stats));
4719 return &net->stats;
4720 }
4721
4722 ifp = dhd->iflist[ifidx];
4723 ASSERT(dhd && ifp);
4724
4725 if (dhd->pub.up) {
4726 /* Use the protocol to get dongle stats */
4727 dhd_prot_dstats(&dhd->pub);
4728 }
4729 return &ifp->stats;
4730}
4731
4732static int
4733dhd_watchdog_thread(void *data)
4734{
4735 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4736 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4737 /* This thread doesn't need any user-level access,
4738 * so get rid of all our resources
4739 */
4740 if (dhd_watchdog_prio > 0) {
4741 struct sched_param param;
4742 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
4743 dhd_watchdog_prio:(MAX_RT_PRIO-1);
4744 setScheduler(current, SCHED_FIFO, &param);
4745 }
4746
4747 while (1) {
4748 if (down_interruptible (&tsk->sema) == 0) {
4749 unsigned long flags;
4750 unsigned long jiffies_at_start = jiffies;
4751 unsigned long time_lapse;
4752
4753 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4754 SMP_RD_BARRIER_DEPENDS();
4755 if (tsk->terminated) {
4756 break;
4757 }
4758
4759 if (dhd->pub.dongle_reset == FALSE) {
4760 DHD_TIMER(("%s:\n", __FUNCTION__));
4761 dhd_bus_watchdog(&dhd->pub);
4762
4763 DHD_GENERAL_LOCK(&dhd->pub, flags);
4764 /* Count the tick for reference */
4765 dhd->pub.tickcnt++;
4766#ifdef DHD_L2_FILTER
4767 dhd_l2_filter_watchdog(&dhd->pub);
4768#endif /* DHD_L2_FILTER */
4769 time_lapse = jiffies - jiffies_at_start;
4770
4771 /* Reschedule the watchdog */
4772 if (dhd->wd_timer_valid) {
4773 mod_timer(&dhd->timer,
4774 jiffies +
4775 msecs_to_jiffies(dhd_watchdog_ms) -
4776 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
4777 }
4778 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4779 }
4780 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4781 } else {
4782 break;
4783 }
4784 }
4785
4786 complete_and_exit(&tsk->completed, 0);
4787}
4788
4789static void dhd_watchdog(ulong data)
4790{
4791 dhd_info_t *dhd = (dhd_info_t *)data;
4792 unsigned long flags;
4793
4794 if (dhd->pub.dongle_reset) {
4795 return;
4796 }
4797
4798 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
4799 DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
4800 return;
4801 }
4802
4803 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
4804 up(&dhd->thr_wdt_ctl.sema);
4805 return;
4806 }
4807
4808 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
4809 /* Call the bus module watchdog */
4810 dhd_bus_watchdog(&dhd->pub);
4811 DHD_GENERAL_LOCK(&dhd->pub, flags);
4812 /* Count the tick for reference */
4813 dhd->pub.tickcnt++;
4814
4815#ifdef DHD_L2_FILTER
4816 dhd_l2_filter_watchdog(&dhd->pub);
4817#endif /* DHD_L2_FILTER */
4818 /* Reschedule the watchdog */
4819 if (dhd->wd_timer_valid)
4820 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
4821 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4822 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
4823}
4824
4825#ifdef DHD_PCIE_RUNTIMEPM
4826static int
4827dhd_rpm_state_thread(void *data)
4828{
4829 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4830 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4831
4832 while (1) {
4833 if (down_interruptible (&tsk->sema) == 0) {
4834 unsigned long flags;
4835 unsigned long jiffies_at_start = jiffies;
4836 unsigned long time_lapse;
4837
4838 SMP_RD_BARRIER_DEPENDS();
4839 if (tsk->terminated) {
4840 break;
4841 }
4842
4843 if (dhd->pub.dongle_reset == FALSE) {
4844 DHD_TIMER(("%s:\n", __FUNCTION__));
4845 if (dhd->pub.up) {
4846 dhd_runtimepm_state(&dhd->pub);
4847 }
4848
4849 DHD_GENERAL_LOCK(&dhd->pub, flags);
4850 time_lapse = jiffies - jiffies_at_start;
4851
4852 /* Reschedule the watchdog */
4853 if (dhd->rpm_timer_valid) {
4854 mod_timer(&dhd->rpm_timer,
4855 jiffies +
4856 msecs_to_jiffies(dhd_runtimepm_ms) -
4857 min(msecs_to_jiffies(dhd_runtimepm_ms),
4858 time_lapse));
4859 }
4860 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
4861 }
4862 } else {
4863 break;
4864 }
4865 }
4866
4867 complete_and_exit(&tsk->completed, 0);
4868}
4869
4870static void dhd_runtimepm(ulong data)
4871{
4872 dhd_info_t *dhd = (dhd_info_t *)data;
4873
4874 if (dhd->pub.dongle_reset) {
4875 return;
4876 }
4877
4878 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
4879 up(&dhd->thr_rpm_ctl.sema);
4880 return;
4881 }
4882}
4883
4884void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
4885{
4886 dhd_os_runtimepm_timer(dhdp, 0);
4887 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
4888 DHD_ERROR(("DHD Runtime PM Disabled \n"));
4889}
4890
4891void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
4892{
4893 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
4894 DHD_ERROR(("DHD Runtime PM Enabled \n"));
4895}
4896
4897#endif /* DHD_PCIE_RUNTIMEPM */
4898
4899
4900#ifdef ENABLE_ADAPTIVE_SCHED
4901static void
4902dhd_sched_policy(int prio)
4903{
4904 struct sched_param param;
4905 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
4906 param.sched_priority = 0;
4907 setScheduler(current, SCHED_NORMAL, &param);
4908 } else {
4909 if (get_scheduler_policy(current) != SCHED_FIFO) {
4910 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
4911 setScheduler(current, SCHED_FIFO, &param);
4912 }
4913 }
4914}
4915#endif /* ENABLE_ADAPTIVE_SCHED */
4916#ifdef DEBUG_CPU_FREQ
4917static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
4918{
4919 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
4920 struct cpufreq_freqs *freq = data;
4921 if (dhd) {
4922 if (!dhd->new_freq)
4923 goto exit;
4924 if (val == CPUFREQ_POSTCHANGE) {
4925 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
4926 freq->new, freq->cpu));
4927 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
4928 }
4929 }
4930exit:
4931 return 0;
4932}
4933#endif /* DEBUG_CPU_FREQ */
4934static int
4935dhd_dpc_thread(void *data)
4936{
4937 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4938 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4939
4940 /* This thread doesn't need any user-level access,
4941 * so get rid of all our resources
4942 */
4943 if (dhd_dpc_prio > 0)
4944 {
4945 struct sched_param param;
4946 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
4947 setScheduler(current, SCHED_FIFO, &param);
4948 }
4949
4950#ifdef CUSTOM_DPC_CPUCORE
4951 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
4952#else
4953 if (dhd->pub.conf->dpc_cpucore >= 0) {
4954 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
4955 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
4956 }
4957#endif
4958#ifdef CUSTOM_SET_CPUCORE
4959 dhd->pub.current_dpc = current;
4960#endif /* CUSTOM_SET_CPUCORE */
4961 /* Run until signal received */
4962 while (1) {
4963 if (!binary_sema_down(tsk)) {
4964#ifdef ENABLE_ADAPTIVE_SCHED
4965 dhd_sched_policy(dhd_dpc_prio);
4966#endif /* ENABLE_ADAPTIVE_SCHED */
4967 SMP_RD_BARRIER_DEPENDS();
4968 if (tsk->terminated) {
4969 break;
4970 }
4971
4972 /* Call bus dpc unless it indicated down (then clean stop) */
4973 if (dhd->pub.busstate != DHD_BUS_DOWN) {
4974#ifdef DEBUG_DPC_THREAD_WATCHDOG
4975 int resched_cnt = 0;
4976#endif /* DEBUG_DPC_THREAD_WATCHDOG */
4977 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
4978 while (dhd_bus_dpc(dhd->pub.bus)) {
4979 /* process all data */
4980#ifdef DEBUG_DPC_THREAD_WATCHDOG
4981 resched_cnt++;
4982 if (resched_cnt > MAX_RESCHED_CNT) {
4983 DHD_INFO(("%s Calling msleep to"
4984 "let other processes run. \n",
4985 __FUNCTION__));
4986 dhd->pub.dhd_bug_on = true;
4987 resched_cnt = 0;
4988 OSL_SLEEP(1);
4989 }
4990#endif /* DEBUG_DPC_THREAD_WATCHDOG */
4991 }
4992 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
4993 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4994 } else {
4995 if (dhd->pub.up)
4996 dhd_bus_stop(dhd->pub.bus, TRUE);
4997 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4998 }
4999 } else {
5000 break;
5001 }
5002 }
5003 complete_and_exit(&tsk->completed, 0);
5004}
5005
5006static int
5007dhd_rxf_thread(void *data)
5008{
5009 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5010 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5011#if defined(WAIT_DEQUEUE)
5012#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
5013 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
5014#endif
5015 dhd_pub_t *pub = &dhd->pub;
5016
5017 /* This thread doesn't need any user-level access,
5018 * so get rid of all our resources
5019 */
5020 if (dhd_rxf_prio > 0)
5021 {
5022 struct sched_param param;
5023 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5024 setScheduler(current, SCHED_FIFO, &param);
5025 }
5026
5027 DAEMONIZE("dhd_rxf");
5028 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
5029
5967f664 5030#ifdef CUSTOM_RXF_CPUCORE
08dfb6c4 5031 /* change rxf thread to other cpu core */
5967f664 5032 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_RXF_CPUCORE));
08dfb6c4
RC
5033#else
5034 if (dhd->pub.conf->rxf_cpucore >= 0) {
5035 printf("%s: set rxf_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
5036 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
5037 }
5967f664
RC
5038#endif
5039
ef6a5fee
RC
5040 /* signal: thread has started */
5041 complete(&tsk->completed);
5042#ifdef CUSTOM_SET_CPUCORE
5043 dhd->pub.current_rxf = current;
5044#endif /* CUSTOM_SET_CPUCORE */
5045 /* Run until signal received */
5046 while (1) {
5047 if (down_interruptible(&tsk->sema) == 0) {
5048 void *skb;
5049#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
5050 ulong flags;
5051#endif
5052#ifdef ENABLE_ADAPTIVE_SCHED
5053 dhd_sched_policy(dhd_rxf_prio);
5054#endif /* ENABLE_ADAPTIVE_SCHED */
5055
5056 SMP_RD_BARRIER_DEPENDS();
5057
5058 if (tsk->terminated) {
5059 break;
5060 }
5061 skb = dhd_rxf_dequeue(pub);
5062
5063 if (skb == NULL) {
5064 continue;
5065 }
5066 while (skb) {
5067 void *skbnext = PKTNEXT(pub->osh, skb);
5068 PKTSETNEXT(pub->osh, skb, NULL);
5069 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5070 __FUNCTION__, __LINE__);
5071#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
5072 netif_rx_ni(skb);
5073#else
5074 netif_rx(skb);
5075 local_irq_save(flags);
5076 RAISE_RX_SOFTIRQ();
5077 local_irq_restore(flags);
5078
5079#endif
5080 skb = skbnext;
5081 }
5082#if defined(WAIT_DEQUEUE)
5083 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5084 OSL_SLEEP(1);
5085 watchdogTime = OSL_SYSUPTIME();
5086 }
5087#endif
5088
5089 DHD_OS_WAKE_UNLOCK(pub);
5090 } else {
5091 break;
5092 }
5093 }
5094 complete_and_exit(&tsk->completed, 0);
5095}
5096
5097#ifdef BCMPCIE
5098void dhd_dpc_enable(dhd_pub_t *dhdp)
5099{
5100 dhd_info_t *dhd;
5101
5102 if (!dhdp || !dhdp->info)
5103 return;
5104 dhd = dhdp->info;
5105
5106#ifdef DHD_LB
5107#ifdef DHD_LB_RXP
5108 __skb_queue_head_init(&dhd->rx_pend_queue);
5109#endif /* DHD_LB_RXP */
5110#ifdef DHD_LB_TXC
5111 if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
5112 tasklet_enable(&dhd->tx_compl_tasklet);
5113#endif /* DHD_LB_TXC */
5114#ifdef DHD_LB_RXC
5115 if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
5116 tasklet_enable(&dhd->rx_compl_tasklet);
5117#endif /* DHD_LB_RXC */
5118#endif /* DHD_LB */
5119 if (atomic_read(&dhd->tasklet.count) == 1)
5120 tasklet_enable(&dhd->tasklet);
5121}
5122#endif /* BCMPCIE */
5123
5124
5125#ifdef BCMPCIE
5126void
5127dhd_dpc_kill(dhd_pub_t *dhdp)
5128{
5129 dhd_info_t *dhd;
5130
5131 if (!dhdp) {
5132 return;
5133 }
5134
5135 dhd = dhdp->info;
5136
5137 if (!dhd) {
5138 return;
5139 }
5140
5141 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5142 tasklet_disable(&dhd->tasklet);
5143 tasklet_kill(&dhd->tasklet);
5144 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5145 }
5146#if defined(DHD_LB)
5147#ifdef DHD_LB_RXP
5148 __skb_queue_purge(&dhd->rx_pend_queue);
5149#endif /* DHD_LB_RXP */
5150 /* Kill the Load Balancing Tasklets */
5151#if defined(DHD_LB_TXC)
5152 tasklet_disable(&dhd->tx_compl_tasklet);
5153 tasklet_kill(&dhd->tx_compl_tasklet);
5154#endif /* DHD_LB_TXC */
5155#if defined(DHD_LB_RXC)
5156 tasklet_disable(&dhd->rx_compl_tasklet);
5157 tasklet_kill(&dhd->rx_compl_tasklet);
5158#endif /* DHD_LB_RXC */
5159#endif /* DHD_LB */
5160}
5161#endif /* BCMPCIE */
5162
5163static void
5164dhd_dpc(ulong data)
5165{
5166 dhd_info_t *dhd;
5167
5168 dhd = (dhd_info_t *)data;
5169
5170 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5171 * down below , wake lock is set,
5172 * the tasklet is initialized in dhd_attach()
5173 */
5174 /* Call bus dpc unless it indicated down (then clean stop) */
5175 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5176 if (dhd_bus_dpc(dhd->pub.bus)) {
5177 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5178 tasklet_schedule(&dhd->tasklet);
5179 }
5180 } else {
5181 dhd_bus_stop(dhd->pub.bus, TRUE);
5182 }
5183}
5184
5185void
5186dhd_sched_dpc(dhd_pub_t *dhdp)
5187{
5188 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5189
5190 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5191 DHD_OS_WAKE_LOCK(dhdp);
5192 /* If the semaphore does not get up,
5193 * wake unlock should be done here
5194 */
5195 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5196 DHD_OS_WAKE_UNLOCK(dhdp);
5197 }
5198 return;
5199 } else {
5200 tasklet_schedule(&dhd->tasklet);
5201 }
5202}
5203
5204static void
5205dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5206{
5207 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5208#ifdef RXF_DEQUEUE_ON_BUSY
5209 int ret = BCME_OK;
5210 int retry = 2;
5211#endif /* RXF_DEQUEUE_ON_BUSY */
5212
5213 DHD_OS_WAKE_LOCK(dhdp);
5214
5215 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5216#ifdef RXF_DEQUEUE_ON_BUSY
5217 do {
5218 ret = dhd_rxf_enqueue(dhdp, skb);
5219 if (ret == BCME_OK || ret == BCME_ERROR)
5220 break;
5221 else
5222 OSL_SLEEP(50); /* waiting for dequeueing */
5223 } while (retry-- > 0);
5224
5225 if (retry <= 0 && ret == BCME_BUSY) {
5226 void *skbp = skb;
5227
5228 while (skbp) {
5229 void *skbnext = PKTNEXT(dhdp->osh, skbp);
5230 PKTSETNEXT(dhdp->osh, skbp, NULL);
5231 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5232 __FUNCTION__, __LINE__);
5233 netif_rx_ni(skbp);
5234 skbp = skbnext;
5235 }
5236 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
5237 } else {
5238 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5239 up(&dhd->thr_rxf_ctl.sema);
5240 }
5241 }
5242#else /* RXF_DEQUEUE_ON_BUSY */
5243 do {
5244 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5245 break;
5246 } while (1);
5247 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5248 up(&dhd->thr_rxf_ctl.sema);
5249 }
5250 return;
5251#endif /* RXF_DEQUEUE_ON_BUSY */
5252}
5253
5254#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5255#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5256
5257#ifdef TOE
5258/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5259static int
5260dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5261{
5262 wl_ioctl_t ioc;
5263 char buf[32];
5264 int ret;
5265
5266 memset(&ioc, 0, sizeof(ioc));
5267
5268 ioc.cmd = WLC_GET_VAR;
5269 ioc.buf = buf;
5270 ioc.len = (uint)sizeof(buf);
5271 ioc.set = FALSE;
5272
5273 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5274 buf[sizeof(buf) - 1] = '\0';
5275 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5276 /* Check for older dongle image that doesn't support toe_ol */
5277 if (ret == -EIO) {
5278 DHD_ERROR(("%s: toe not supported by device\n",
5279 dhd_ifname(&dhd->pub, ifidx)));
5280 return -EOPNOTSUPP;
5281 }
5282
5283 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5284 return ret;
5285 }
5286
5287 memcpy(toe_ol, buf, sizeof(uint32));
5288 return 0;
5289}
5290
5291/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5292static int
5293dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5294{
5295 wl_ioctl_t ioc;
5296 char buf[32];
5297 int toe, ret;
5298
5299 memset(&ioc, 0, sizeof(ioc));
5300
5301 ioc.cmd = WLC_SET_VAR;
5302 ioc.buf = buf;
5303 ioc.len = (uint)sizeof(buf);
5304 ioc.set = TRUE;
5305
5306 /* Set toe_ol as requested */
5307
5308 strncpy(buf, "toe_ol", sizeof(buf) - 1);
5309 buf[sizeof(buf) - 1] = '\0';
5310 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
5311
5312 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5313 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5314 dhd_ifname(&dhd->pub, ifidx), ret));
5315 return ret;
5316 }
5317
5318 /* Enable toe globally only if any components are enabled. */
5319
5320 toe = (toe_ol != 0);
5321
5322 strcpy(buf, "toe");
5323 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
5324
5325 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
5326 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5327 return ret;
5328 }
5329
5330 return 0;
5331}
5332#endif /* TOE */
5333
5334#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5335void dhd_set_scb_probe(dhd_pub_t *dhd)
5336{
5337 int ret = 0;
5338 wl_scb_probe_t scb_probe;
5339 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5340
5341 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
5342
5343 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5344 return;
5345 }
5346
5347 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
5348
5349 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
5350 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5351 }
5352
5353 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5354
5355 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5356
5357 bcm_mkiovar("scb_probe", (char *)&scb_probe,
5358 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
5359 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5360 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5361 return;
5362 }
5363}
5364#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5365
5366#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
5367static void
5368dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5369{
5370 dhd_info_t *dhd = DHD_DEV_INFO(net);
5371
5372 snprintf(info->driver, sizeof(info->driver), "wl");
5373 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5374}
5375
5376struct ethtool_ops dhd_ethtool_ops = {
5377 .get_drvinfo = dhd_ethtool_get_drvinfo
5378};
5379#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
5380
5381
5382#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5383static int
5384dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5385{
5386 struct ethtool_drvinfo info;
5387 char drvname[sizeof(info.driver)];
5388 uint32 cmd;
5389#ifdef TOE
5390 struct ethtool_value edata;
5391 uint32 toe_cmpnt, csum_dir;
5392 int ret;
5393#endif
5394
5395 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5396
5397 /* all ethtool calls start with a cmd word */
5398 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5399 return -EFAULT;
5400
5401 switch (cmd) {
5402 case ETHTOOL_GDRVINFO:
5403 /* Copy out any request driver name */
5404 if (copy_from_user(&info, uaddr, sizeof(info)))
5405 return -EFAULT;
5406 strncpy(drvname, info.driver, sizeof(info.driver));
5407 drvname[sizeof(info.driver)-1] = '\0';
5408
5409 /* clear struct for return */
5410 memset(&info, 0, sizeof(info));
5411 info.cmd = cmd;
5412
5413 /* if dhd requested, identify ourselves */
5414 if (strcmp(drvname, "?dhd") == 0) {
5415 snprintf(info.driver, sizeof(info.driver), "dhd");
5416 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5417 info.version[sizeof(info.version) - 1] = '\0';
5418 }
5419
5420 /* otherwise, require dongle to be up */
5421 else if (!dhd->pub.up) {
5422 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5423 return -ENODEV;
5424 }
5425
5426 /* finally, report dongle driver type */
5427 else if (dhd->pub.iswl)
5428 snprintf(info.driver, sizeof(info.driver), "wl");
5429 else
5430 snprintf(info.driver, sizeof(info.driver), "xx");
5431
5432 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5433 if (copy_to_user(uaddr, &info, sizeof(info)))
5434 return -EFAULT;
5435 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5436 (int)sizeof(drvname), drvname, info.driver));
5437 break;
5438
5439#ifdef TOE
5440 /* Get toe offload components from dongle */
5441 case ETHTOOL_GRXCSUM:
5442 case ETHTOOL_GTXCSUM:
5443 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5444 return ret;
5445
5446 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5447
5448 edata.cmd = cmd;
5449 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5450
5451 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5452 return -EFAULT;
5453 break;
5454
5455 /* Set toe offload components in dongle */
5456 case ETHTOOL_SRXCSUM:
5457 case ETHTOOL_STXCSUM:
5458 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5459 return -EFAULT;
5460
5461 /* Read the current settings, update and write back */
5462 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5463 return ret;
5464
5465 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5466
5467 if (edata.data != 0)
5468 toe_cmpnt |= csum_dir;
5469 else
5470 toe_cmpnt &= ~csum_dir;
5471
5472 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5473 return ret;
5474
5475 /* If setting TX checksum mode, tell Linux the new mode */
5476 if (cmd == ETHTOOL_STXCSUM) {
5477 if (edata.data)
5478 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5479 else
5480 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5481 }
5482
5483 break;
5484#endif /* TOE */
5485
5486 default:
5487 return -EOPNOTSUPP;
5488 }
5489
5490 return 0;
5491}
5492#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5493
5494static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5495{
5496 dhd_info_t *dhd;
5497
5498 if (!dhdp) {
5499 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5500 return FALSE;
5501 }
5502
5503 if (!dhdp->up)
5504 return FALSE;
5505
5506 dhd = (dhd_info_t *)dhdp->info;
5507#if !defined(BCMPCIE)
5508 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5509 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5510 return FALSE;
5511 }
5512#endif
5513
5514 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5515 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5516#ifdef BCMPCIE
5517 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
5518 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5519 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5520#else
5521 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5522 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5523#endif /* BCMPCIE */
5524 if (dhdp->hang_reason == 0) {
5525 if (dhdp->dongle_trap_occured) {
5526 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5527#ifdef BCMPCIE
5528 } else if (dhdp->d3ackcnt_timeout) {
5529 dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
5530#endif /* BCMPCIE */
5531 } else {
5532 dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
5533 }
5534 }
5535 net_os_send_hang_message(net);
5536 return TRUE;
5537 }
5538 return FALSE;
5539}
5540
5541int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
5542{
5543 int bcmerror = BCME_OK;
5544 int buflen = 0;
5545 struct net_device *net;
5546
5547 net = dhd_idx2net(pub, ifidx);
5548 if (!net) {
5549 bcmerror = BCME_BADARG;
5550 goto done;
5551 }
5552
5553 if (data_buf)
5554 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
5555
5556 /* check for local dhd ioctl and handle it */
5557 if (ioc->driver == DHD_IOCTL_MAGIC) {
5558 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
5559 if (bcmerror)
5560 pub->bcmerror = bcmerror;
5561 goto done;
5562 }
5563
5564 /* send to dongle (must be up, and wl). */
5565 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
5566 if (allow_delay_fwdl) {
5567 int ret = dhd_bus_start(pub);
5568 if (ret != 0) {
5569 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5570 bcmerror = BCME_DONGLE_DOWN;
5571 goto done;
5572 }
5573 } else {
5574 bcmerror = BCME_DONGLE_DOWN;
5575 goto done;
5576 }
5577 }
5578
5579 if (!pub->iswl) {
5580 bcmerror = BCME_DONGLE_DOWN;
5581 goto done;
5582 }
5583
5584 /*
5585 * Flush the TX queue if required for proper message serialization:
5586 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
5587 * prevent M4 encryption and
5588 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
5589 * prevent disassoc frame being sent before WPS-DONE frame.
5590 */
5591 if (ioc->cmd == WLC_SET_KEY ||
5592 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5593 strncmp("wsec_key", data_buf, 9) == 0) ||
5594 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
5595 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
5596 ioc->cmd == WLC_DISASSOC)
5597 dhd_wait_pend8021x(net);
5598
5599#ifdef WLMEDIA_HTSF
5600 if (data_buf) {
5601 /* short cut wl ioctl calls here */
5602 if (strcmp("htsf", data_buf) == 0) {
5603 dhd_ioctl_htsf_get(dhd, 0);
5604 return BCME_OK;
5605 }
5606
5607 if (strcmp("htsflate", data_buf) == 0) {
5608 if (ioc->set) {
5609 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
5610 memset(&maxdelayts, 0, sizeof(tstamp_t));
5611 maxdelay = 0;
5612 tspktcnt = 0;
5613 maxdelaypktno = 0;
5614 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5615 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5616 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5617 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5618 } else {
5619 dhd_dump_latency();
5620 }
5621 return BCME_OK;
5622 }
5623 if (strcmp("htsfclear", data_buf) == 0) {
5624 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
5625 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
5626 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
5627 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
5628 htsf_seqnum = 0;
5629 return BCME_OK;
5630 }
5631 if (strcmp("htsfhis", data_buf) == 0) {
5632 dhd_dump_htsfhisto(&vi_d1, "H to D");
5633 dhd_dump_htsfhisto(&vi_d2, "D to D");
5634 dhd_dump_htsfhisto(&vi_d3, "D to H");
5635 dhd_dump_htsfhisto(&vi_d4, "H to H");
5636 return BCME_OK;
5637 }
5638 if (strcmp("tsport", data_buf) == 0) {
5639 if (ioc->set) {
5640 memcpy(&tsport, data_buf + 7, 4);
5641 } else {
5642 DHD_ERROR(("current timestamp port: %d \n", tsport));
5643 }
5644 return BCME_OK;
5645 }
5646 }
5647#endif /* WLMEDIA_HTSF */
5648
5649 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
5650 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
5651#ifdef BCM_FD_AGGR
5652 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5653#else
5654 bcmerror = BCME_UNSUPPORTED;
5655#endif
5656 goto done;
5657 }
5658
5659#ifdef DHD_DEBUG
5660 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
5661 if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
5662 /* Print IOVAR Information */
5663 DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
5664 __FUNCTION__, (char *)data_buf, ioc->set));
5665 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5666 prhex(NULL, data_buf + strlen(data_buf) + 1,
5667 buflen - strlen(data_buf) - 1);
5668 }
5669 } else {
5670 /* Print IOCTL Information */
5671 DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
5672 __FUNCTION__, ioc->cmd, ioc->set));
5673 if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
5674 prhex(NULL, data_buf, buflen);
5675 }
5676 }
5677 }
5678#endif /* DHD_DEBUG */
5679
5680 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
5681
5682done:
5683 dhd_check_hang(net, pub, bcmerror);
5684
5685 return bcmerror;
5686}
5687
5688static int
5689dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
5690{
5691 dhd_info_t *dhd = DHD_DEV_INFO(net);
5692 dhd_ioctl_t ioc;
5693 int ifidx;
5694 int ret;
5695 void *local_buf = NULL;
5696 u16 buflen = 0;
5697
5698 DHD_OS_WAKE_LOCK(&dhd->pub);
5699 DHD_PERIM_LOCK(&dhd->pub);
5700
5701 /* Interface up check for built-in type */
5702 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
5703 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
5704 ret = BCME_NOTUP;
5705 goto exit;
5706 }
5707
5708 /* send to dongle only if we are not waiting for reload already */
5709 if (dhd->pub.hang_was_sent) {
5710 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
5711 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
5712 ret = BCME_DONGLE_DOWN;
5713 goto exit;
5714 }
5715
5716 ifidx = dhd_net2idx(dhd, net);
5717 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
5718
5719 if (ifidx == DHD_BAD_IF) {
5720 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
5721 ret = -1;
5722 goto exit;
5723 }
5724
5725#if defined(WL_WIRELESS_EXT)
5726 /* linux wireless extensions */
5727 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
5728 /* may recurse, do NOT lock */
5729 ret = wl_iw_ioctl(net, ifr, cmd);
5730 goto exit;
5731 }
5732#endif /* defined(WL_WIRELESS_EXT) */
5733
5734#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
5735 if (cmd == SIOCETHTOOL) {
5736 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
5737 goto exit;
5738 }
5739#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
5740
5741 if (cmd == SIOCDEVPRIVATE+1) {
5742 ret = wl_android_priv_cmd(net, ifr, cmd);
5743 dhd_check_hang(net, &dhd->pub, ret);
5744 goto exit;
5745 }
5746
5747 if (cmd != SIOCDEVPRIVATE) {
5748 ret = -EOPNOTSUPP;
5749 goto exit;
5750 }
5751
5752 memset(&ioc, 0, sizeof(ioc));
5753
5754#ifdef CONFIG_COMPAT
5755#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
5756 if (in_compat_syscall())
5757#else
5758 if (is_compat_task())
5759#endif
5760 {
10e68999 5761
ef6a5fee
RC
5762 compat_wl_ioctl_t compat_ioc;
5763 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
5764 ret = BCME_BADADDR;
5765 goto done;
5766 }
10e68999 5767
ef6a5fee 5768 ioc.cmd = compat_ioc.cmd;
10e68999 5769 ioc.buf = (uint64 *)compat_ioc.buf;
ef6a5fee
RC
5770 ioc.len = compat_ioc.len;
5771 ioc.set = compat_ioc.set;
5772 ioc.used = compat_ioc.used;
5773 ioc.needed = compat_ioc.needed;
5774 /* To differentiate between wl and dhd read 4 more byes */
5775 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
5776 sizeof(uint)) != 0)) {
5777 ret = BCME_BADADDR;
5778 goto done;
5779 }
5780 } else
5781#endif /* CONFIG_COMPAT */
5782 {
5783 /* Copy the ioc control structure part of ioctl request */
5784 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
5785 ret = BCME_BADADDR;
5786 goto done;
5787 }
5788
5789 /* To differentiate between wl and dhd read 4 more byes */
5790 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
5791 sizeof(uint)) != 0)) {
5792 ret = BCME_BADADDR;
5793 goto done;
5794 }
5795 }
5796
5797 if (!capable(CAP_NET_ADMIN)) {
5798 ret = BCME_EPERM;
5799 goto done;
5800 }
5801
5802 if (ioc.len > 0) {
5803 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
5804 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
5805 ret = BCME_NOMEM;
5806 goto done;
5807 }
5808
5809 DHD_PERIM_UNLOCK(&dhd->pub);
5810 if (copy_from_user(local_buf, ioc.buf, buflen)) {
5811 DHD_PERIM_LOCK(&dhd->pub);
5812 ret = BCME_BADADDR;
5813 goto done;
5814 }
5815 DHD_PERIM_LOCK(&dhd->pub);
5816
5817 *(char *)(local_buf + buflen) = '\0';
5818 }
5819
5820 ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
5821
5822 if (!ret && buflen && local_buf && ioc.buf) {
5823 DHD_PERIM_UNLOCK(&dhd->pub);
5824 if (copy_to_user(ioc.buf, local_buf, buflen))
5825 ret = -EFAULT;
5826 DHD_PERIM_LOCK(&dhd->pub);
5827 }
5828
5829done:
5830 if (local_buf)
5831 MFREE(dhd->pub.osh, local_buf, buflen+1);
5832
5833exit:
5834 DHD_PERIM_UNLOCK(&dhd->pub);
5835 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5836
5837 return OSL_ERROR(ret);
5838}
5839
5840
5841#ifdef FIX_CPU_MIN_CLOCK
5842static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
5843{
5844 if (dhd) {
5845#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5846 mutex_init(&dhd->cpufreq_fix);
5847#endif
5848 dhd->cpufreq_fix_status = FALSE;
5849 }
5850 return 0;
5851}
5852
5853static void dhd_fix_cpu_freq(dhd_info_t *dhd)
5854{
5855#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5856 mutex_lock(&dhd->cpufreq_fix);
5857#endif
5858 if (dhd && !dhd->cpufreq_fix_status) {
5859 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
5860#ifdef FIX_BUS_MIN_CLOCK
5861 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
5862#endif /* FIX_BUS_MIN_CLOCK */
5863 DHD_ERROR(("pm_qos_add_requests called\n"));
5864
5865 dhd->cpufreq_fix_status = TRUE;
5866 }
5867#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5868 mutex_unlock(&dhd->cpufreq_fix);
5869#endif
5870}
5871
5872static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
5873{
5874#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5875 mutex_lock(&dhd ->cpufreq_fix);
5876#endif
5877 if (dhd && dhd->cpufreq_fix_status != TRUE) {
5878#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5879 mutex_unlock(&dhd->cpufreq_fix);
5880#endif
5881 return;
5882 }
5883
5884 pm_qos_remove_request(&dhd->dhd_cpu_qos);
5885#ifdef FIX_BUS_MIN_CLOCK
5886 pm_qos_remove_request(&dhd->dhd_bus_qos);
5887#endif /* FIX_BUS_MIN_CLOCK */
5888 DHD_ERROR(("pm_qos_add_requests called\n"));
5889
5890 dhd->cpufreq_fix_status = FALSE;
5891#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
5892 mutex_unlock(&dhd->cpufreq_fix);
5893#endif
5894}
5895#endif /* FIX_CPU_MIN_CLOCK */
5896
5897#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
5898int dhd_deepsleep(dhd_info_t *dhd, int flag)
5899{
5900 char iovbuf[20];
5901 uint powervar = 0;
5902 dhd_pub_t *dhdp;
5903 int cnt = 0;
5904 int ret = 0;
5905
5906 dhdp = &dhd->pub;
5907
5908 switch (flag) {
5909 case 1 : /* Deepsleep on */
5910 DHD_ERROR(("dhd_deepsleep: ON\n"));
5911 /* give some time to sysioc_work before deepsleep */
5912 OSL_SLEEP(200);
5913#ifdef PKT_FILTER_SUPPORT
5914 /* disable pkt filter */
5915 dhd_enable_packet_filter(0, dhdp);
5916#endif /* PKT_FILTER_SUPPORT */
5917 /* Disable MPC */
5918 powervar = 0;
5919 memset(iovbuf, 0, sizeof(iovbuf));
5920 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5921 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5922
5923 /* Enable Deepsleep */
5924 powervar = 1;
5925 memset(iovbuf, 0, sizeof(iovbuf));
5926 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5927 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5928 break;
5929
5930 case 0: /* Deepsleep Off */
5931 DHD_ERROR(("dhd_deepsleep: OFF\n"));
5932
5933 /* Disable Deepsleep */
5934 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
5935 powervar = 0;
5936 memset(iovbuf, 0, sizeof(iovbuf));
5937 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5938 iovbuf, sizeof(iovbuf));
5939 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
5940 sizeof(iovbuf), TRUE, 0);
5941
5942 memset(iovbuf, 0, sizeof(iovbuf));
5943 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
5944 iovbuf, sizeof(iovbuf));
5945 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
5946 sizeof(iovbuf), FALSE, 0)) < 0) {
5947 DHD_ERROR(("the error of dhd deepsleep status"
5948 " ret value :%d\n", ret));
5949 } else {
5950 if (!(*(int *)iovbuf)) {
5951 DHD_ERROR(("deepsleep mode is 0,"
5952 " count: %d\n", cnt));
5953 break;
5954 }
5955 }
5956 }
5957
5958 /* Enable MPC */
5959 powervar = 1;
5960 memset(iovbuf, 0, sizeof(iovbuf));
5961 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
5962 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5963 break;
5964 }
5965
5966 return 0;
5967}
5968
5969static int
5970dhd_stop(struct net_device *net)
5971{
5972 int ifidx = 0;
5973 dhd_info_t *dhd = DHD_DEV_INFO(net);
5974 DHD_OS_WAKE_LOCK(&dhd->pub);
5975 DHD_PERIM_LOCK(&dhd->pub);
5976 printf("%s: Enter %p\n", __FUNCTION__, net);
5977 dhd->pub.rxcnt_timeout = 0;
5978 dhd->pub.txcnt_timeout = 0;
5979
5980#ifdef BCMPCIE
5981 dhd->pub.d3ackcnt_timeout = 0;
5982#endif /* BCMPCIE */
5983
5984 if (dhd->pub.up == 0) {
5985 goto exit;
5986 }
5987
5988 dhd_if_flush_sta(DHD_DEV_IFP(net));
5989
5990 /* Disable Runtime PM before interface down */
5991 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5992
5993#ifdef FIX_CPU_MIN_CLOCK
5994 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
5995 dhd_rollback_cpu_freq(dhd);
5996#endif /* FIX_CPU_MIN_CLOCK */
5997
5998 ifidx = dhd_net2idx(dhd, net);
5999 BCM_REFERENCE(ifidx);
6000
6001 /* Set state and stop OS transmissions */
6002 netif_stop_queue(net);
6003 dhd->pub.up = 0;
6004
6005#ifdef WL_CFG80211
6006 if (ifidx == 0) {
6007 dhd_if_t *ifp;
6008 wl_cfg80211_down(NULL);
6009
6010 ifp = dhd->iflist[0];
6011 ASSERT(ifp && ifp->net);
6012 /*
6013 * For CFG80211: Clean up all the left over virtual interfaces
6014 * when the primary Interface is brought down. [ifconfig wlan0 down]
6015 */
6016 if (!dhd_download_fw_on_driverload) {
6017 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
6018 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
6019 int i;
6020
6021#ifdef WL_CFG80211_P2P_DEV_IF
6022 wl_cfg80211_del_p2p_wdev();
6023#endif /* WL_CFG80211_P2P_DEV_IF */
6024
6025 dhd_net_if_lock_local(dhd);
6026 for (i = 1; i < DHD_MAX_IFS; i++)
6027 dhd_remove_if(&dhd->pub, i, FALSE);
6028
6029 if (ifp && ifp->net) {
6030 dhd_if_del_sta_list(ifp);
6031 }
6032
6033#ifdef ARP_OFFLOAD_SUPPORT
6034 if (dhd_inetaddr_notifier_registered) {
6035 dhd_inetaddr_notifier_registered = FALSE;
6036 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6037 }
6038#endif /* ARP_OFFLOAD_SUPPORT */
6039#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6040 if (dhd_inet6addr_notifier_registered) {
6041 dhd_inet6addr_notifier_registered = FALSE;
6042 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6043 }
6044#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6045 dhd_net_if_unlock_local(dhd);
6046 }
08dfb6c4
RC
6047#if 0
6048 // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
ef6a5fee 6049 cancel_work_sync(dhd->dhd_deferred_wq);
08dfb6c4 6050#endif
ef6a5fee
RC
6051#if defined(DHD_LB) && defined(DHD_LB_RXP)
6052 __skb_queue_purge(&dhd->rx_pend_queue);
6053#endif /* DHD_LB && DHD_LB_RXP */
6054 }
6055
6056#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6057 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6058#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6059#if defined(DHD_LB) && defined(DHD_LB_RXP)
6060 if (ifp->net == dhd->rx_napi_netdev) {
6061 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6062 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6063 skb_queue_purge(&dhd->rx_napi_queue);
6064 napi_disable(&dhd->rx_napi_struct);
6065 netif_napi_del(&dhd->rx_napi_struct);
6066 dhd->rx_napi_netdev = NULL;
6067 }
6068#endif /* DHD_LB && DHD_LB_RXP */
6069
6070 }
6071#endif /* WL_CFG80211 */
6072
6073#ifdef PROP_TXSTATUS
6074 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6075#endif
6076 /* Stop the protocol module */
6077 dhd_prot_stop(&dhd->pub);
6078
6079 OLD_MOD_DEC_USE_COUNT;
6080exit:
08dfb6c4 6081 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
ef6a5fee 6082 wl_android_wifi_off(net, TRUE);
08dfb6c4
RC
6083#ifdef WL_EXT_IAPSTA
6084 wl_android_ext_dettach_netdev();
6085#endif
6086 } else {
ef6a5fee
RC
6087 if (dhd->pub.conf->deepsleep)
6088 dhd_deepsleep(dhd, 1);
6089 }
6090 dhd->pub.hang_was_sent = 0;
6091
6092 /* Clear country spec for for built-in type driver */
6093 if (!dhd_download_fw_on_driverload) {
6094 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6095 dhd->pub.dhd_cspec.rev = 0;
6096 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6097 }
6098
6099#ifdef BCMDBGFS
6100 dhd_dbg_remove();
6101#endif
6102
6103 DHD_PERIM_UNLOCK(&dhd->pub);
6104 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6105
6106 /* Destroy wakelock */
6107 if (!dhd_download_fw_on_driverload &&
6108 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6109 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6110 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6111 }
6112 printf("%s: Exit\n", __FUNCTION__);
6113
6114 return 0;
6115}
6116
6117#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
6118extern bool g_first_broadcast_scan;
6119#endif
6120
6121#ifdef WL11U
6122static int dhd_interworking_enable(dhd_pub_t *dhd)
6123{
6124 char iovbuf[WLC_IOCTL_SMLEN];
6125 uint32 enable = true;
6126 int ret = BCME_OK;
6127
6128 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
6129 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6130 if (ret < 0) {
6131 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6132 }
6133
6134 if (ret == BCME_OK) {
6135 /* basic capabilities for HS20 REL2 */
6136 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
6137 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
6138 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6139 if (ret < 0) {
6140 DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
6141 }
6142 }
6143
6144 return ret;
6145}
6146#endif /* WL11u */
6147
6148static int
6149dhd_open(struct net_device *net)
6150{
6151 dhd_info_t *dhd = DHD_DEV_INFO(net);
6152#ifdef TOE
6153 uint32 toe_ol;
6154#endif
6155#ifdef BCM_FD_AGGR
6156 char iovbuf[WLC_IOCTL_SMLEN];
6157 dbus_config_t config;
6158 uint32 agglimit = 0;
6159 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
6160#endif /* BCM_FD_AGGR */
6161 int ifidx;
6162 int32 ret = 0;
08dfb6c4
RC
6163#if defined(OOB_INTR_ONLY)
6164 uint32 bus_type = -1;
6165 uint32 bus_num = -1;
6166 uint32 slot_num = -1;
6167 wifi_adapter_info_t *adapter = NULL;
6168#endif
ef6a5fee
RC
6169
6170 if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
6171 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
6172 return -1;
6173 }
6174
6175 printf("%s: Enter %p\n", __FUNCTION__, net);
6176#if defined(MULTIPLE_SUPPLICANT)
6177#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6178 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6179 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
6180 }
6181 mutex_lock(&_dhd_sdio_mutex_lock_);
6182#endif
6183#endif /* MULTIPLE_SUPPLICANT */
6184 /* Init wakelock */
6185 if (!dhd_download_fw_on_driverload &&
6186 !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
6187 DHD_OS_WAKE_LOCK_INIT(dhd);
6188 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
6189 }
6190
6191#ifdef PREVENT_REOPEN_DURING_HANG
6192 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6193 if (dhd->pub.hang_was_sent == 1) {
6194 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6195 /* Force to bring down WLAN interface in case dhd_stop() is not called
6196 * from the upper layer when HANG event is triggered.
6197 */
6198 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6199 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6200 dhd_stop(net);
6201 } else {
6202 return -1;
6203 }
6204 }
6205#endif /* PREVENT_REOPEN_DURING_HANG */
6206
6207
6208 DHD_OS_WAKE_LOCK(&dhd->pub);
6209 DHD_PERIM_LOCK(&dhd->pub);
6210 dhd->pub.dongle_trap_occured = 0;
6211 dhd->pub.hang_was_sent = 0;
6212 dhd->pub.hang_reason = 0;
6213#ifdef DHD_LOSSLESS_ROAMING
6214 dhd->pub.dequeue_prec_map = ALLPRIO;
6215#endif
6216#if 0
6217 /*
6218 * Force start if ifconfig_up gets called before START command
6219 * We keep WEXT's wl_control_wl_start to provide backward compatibility
6220 * This should be removed in the future
6221 */
6222 ret = wl_control_wl_start(net);
6223 if (ret != 0) {
6224 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6225 ret = -1;
6226 goto exit;
6227 }
6228#endif
6229
6230 ifidx = dhd_net2idx(dhd, net);
6231 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
6232
6233 if (ifidx < 0) {
6234 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
6235 ret = -1;
6236 goto exit;
6237 }
6238
6239 if (!dhd->iflist[ifidx]) {
6240 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
6241 ret = -1;
6242 goto exit;
6243 }
6244
6245 if (ifidx == 0) {
6246 atomic_set(&dhd->pend_8021x_cnt, 0);
6247 if (!dhd_download_fw_on_driverload) {
6248 DHD_ERROR(("\n%s\n", dhd_version));
6249#if defined(USE_INITIAL_SHORT_DWELL_TIME)
6250 g_first_broadcast_scan = TRUE;
6251#endif
6252 ret = wl_android_wifi_on(net);
6253 if (ret != 0) {
6254 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
6255 __FUNCTION__, ret));
6256 ret = -1;
6257 goto exit;
6258 }
6259 }
6260#ifdef FIX_CPU_MIN_CLOCK
6261 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
6262 dhd_init_cpufreq_fix(dhd);
6263 dhd_fix_cpu_freq(dhd);
6264 }
6265#endif /* FIX_CPU_MIN_CLOCK */
08dfb6c4
RC
6266#if defined(OOB_INTR_ONLY)
6267 if (dhd->pub.conf->dpc_cpucore >= 0) {
6268 dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
6269 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
6270 if (adapter) {
6271 printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
6272 irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
6273 }
6274 }
6275#endif
ef6a5fee
RC
6276
6277 if (dhd->pub.busstate != DHD_BUS_DATA) {
6278
6279 /* try to bring up bus */
6280 DHD_PERIM_UNLOCK(&dhd->pub);
6281 ret = dhd_bus_start(&dhd->pub);
6282 DHD_PERIM_LOCK(&dhd->pub);
6283 if (ret) {
6284 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6285 ret = -1;
6286 goto exit;
6287 }
6288
6289 }
6290 if (dhd_download_fw_on_driverload) {
6291 if (dhd->pub.conf->deepsleep)
6292 dhd_deepsleep(dhd, 0);
6293 }
6294
6295#ifdef BCM_FD_AGGR
6296 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
6297
6298
6299 memset(iovbuf, 0, sizeof(iovbuf));
6300 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
6301 iovbuf, sizeof(iovbuf));
6302
6303 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
6304 agglimit = *(uint32 *)iovbuf;
6305 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
6306 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
6307 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
6308 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
6309 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
6310 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
6311 }
6312 } else {
6313 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
6314 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
6315 }
6316
6317 /* Set aggregation for TX */
6318 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
6319 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
6320
6321 /* Set aggregation for RX */
6322 memset(iovbuf, 0, sizeof(iovbuf));
6323 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
6324 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
6325 dhd->pub.info->fdaggr = 0;
6326 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
6327 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
6328 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
6329 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
6330 } else {
6331 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
6332 }
6333#endif /* BCM_FD_AGGR */
6334
6335 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
6336 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
6337
6338#ifdef TOE
6339 /* Get current TOE mode from dongle */
6340 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
6341 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
6342 } else {
6343 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
6344 }
6345#endif /* TOE */
6346
6347#if defined(WL_CFG80211)
6348 if (unlikely(wl_cfg80211_up(NULL))) {
6349 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
6350 ret = -1;
6351 goto exit;
6352 }
6353 if (!dhd_download_fw_on_driverload) {
6354#ifdef ARP_OFFLOAD_SUPPORT
6355 dhd->pend_ipaddr = 0;
6356 if (!dhd_inetaddr_notifier_registered) {
6357 dhd_inetaddr_notifier_registered = TRUE;
6358 register_inetaddr_notifier(&dhd_inetaddr_notifier);
6359 }
6360#endif /* ARP_OFFLOAD_SUPPORT */
6361#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6362 if (!dhd_inet6addr_notifier_registered) {
6363 dhd_inet6addr_notifier_registered = TRUE;
6364 register_inet6addr_notifier(&dhd_inet6addr_notifier);
6365 }
6366#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6367#ifdef DHD_LB
6368 DHD_LB_STATS_INIT(&dhd->pub);
6369#ifdef DHD_LB_RXP
6370 __skb_queue_head_init(&dhd->rx_pend_queue);
6371#endif /* DHD_LB_RXP */
6372#endif /* DHD_LB */
6373 }
6374
6375#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
6376#if defined(SET_RPS_CPUS)
6377 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6378#else
6379 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
6380#endif
6381#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
6382#if defined(DHD_LB) && defined(DHD_LB_RXP)
6383 if (dhd->rx_napi_netdev == NULL) {
6384 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
6385 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
6386 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
6387 dhd_napi_poll, dhd_napi_weight);
6388 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
6389 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6390 napi_enable(&dhd->rx_napi_struct);
6391 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
6392 skb_queue_head_init(&dhd->rx_napi_queue);
6393 }
6394#endif /* DHD_LB && DHD_LB_RXP */
6395#if defined(NUM_SCB_MAX_PROBE)
6396 dhd_set_scb_probe(&dhd->pub);
6397#endif /* NUM_SCB_MAX_PROBE */
6398#endif /* WL_CFG80211 */
6399 }
6400
6401 /* Allow transmit calls */
6402 netif_start_queue(net);
6403 dhd->pub.up = 1;
6404
6405 OLD_MOD_INC_USE_COUNT;
6406
6407#ifdef BCMDBGFS
6408 dhd_dbg_init(&dhd->pub);
6409#endif
6410
6411exit:
6412 if (ret) {
6413 dhd_stop(net);
6414 }
6415
6416 DHD_PERIM_UNLOCK(&dhd->pub);
6417 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6418
6419#if defined(MULTIPLE_SUPPLICANT)
6420#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
6421 mutex_unlock(&_dhd_sdio_mutex_lock_);
6422#endif
6423#endif /* MULTIPLE_SUPPLICANT */
6424
6425 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
6426 return ret;
6427}
6428
6429int dhd_do_driver_init(struct net_device *net)
6430{
6431 dhd_info_t *dhd = NULL;
6432
6433 if (!net) {
6434 DHD_ERROR(("Primary Interface not initialized \n"));
6435 return -EINVAL;
6436 }
6437
6438#ifdef MULTIPLE_SUPPLICANT
6439#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
6440 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
6441 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
6442 return 0;
6443 }
6444#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
6445#endif /* MULTIPLE_SUPPLICANT */
6446
6447 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
6448 dhd = DHD_DEV_INFO(net);
6449
6450 /* If driver is already initialized, do nothing
6451 */
6452 if (dhd->pub.busstate == DHD_BUS_DATA) {
6453 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
6454 return 0;
6455 }
6456
6457 if (dhd_open(net) < 0) {
6458 DHD_ERROR(("Driver Init Failed \n"));
6459 return -1;
6460 }
6461
6462 return 0;
6463}
6464
6465int
6466dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6467{
6468
6469#ifdef WL_CFG80211
6470 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6471 return BCME_OK;
6472#endif
6473
6474 /* handle IF event caused by wl commands, SoftAP, WEXT and
6475 * anything else. This has to be done asynchronously otherwise
6476 * DPC will be blocked (and iovars will timeout as DPC has no chance
6477 * to read the response back)
6478 */
6479 if (ifevent->ifidx > 0) {
6480 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6481 if (if_event == NULL) {
6482 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
6483 MALLOCED(dhdinfo->pub.osh)));
6484 return BCME_NOMEM;
6485 }
6486
6487 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6488 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6489 strncpy(if_event->name, name, IFNAMSIZ);
6490 if_event->name[IFNAMSIZ - 1] = '\0';
6491 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
6492 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
6493 }
6494
6495 return BCME_OK;
6496}
6497
6498int
6499dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
6500{
6501 dhd_if_event_t *if_event;
6502
6503#ifdef WL_CFG80211
6504 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
6505 return BCME_OK;
6506#endif /* WL_CFG80211 */
6507
6508 /* handle IF event caused by wl commands, SoftAP, WEXT and
6509 * anything else
6510 */
6511 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
6512 if (if_event == NULL) {
6513 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
6514 MALLOCED(dhdinfo->pub.osh)));
6515 return BCME_NOMEM;
6516 }
6517 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
6518 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
6519 strncpy(if_event->name, name, IFNAMSIZ);
6520 if_event->name[IFNAMSIZ - 1] = '\0';
6521 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
6522 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
6523
6524 return BCME_OK;
6525}
6526
6527/* unregister and free the existing net_device interface (if any) in iflist and
6528 * allocate a new one. the slot is reused. this function does NOT register the
6529 * new interface to linux kernel. dhd_register_if does the job
6530 */
6531struct net_device*
6532dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
6533 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
6534{
6535 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6536 dhd_if_t *ifp;
6537
6538 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
6539 ifp = dhdinfo->iflist[ifidx];
6540
6541 if (ifp != NULL) {
6542 if (ifp->net != NULL) {
6543 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
6544
6545 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
6546
6547 /* in unregister_netdev case, the interface gets freed by net->destructor
6548 * (which is set to free_netdev)
6549 */
6550 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6551 free_netdev(ifp->net);
6552 } else {
6553 netif_stop_queue(ifp->net);
6554 if (need_rtnl_lock)
6555 unregister_netdev(ifp->net);
6556 else
6557 unregister_netdevice(ifp->net);
6558 }
6559 ifp->net = NULL;
6560 }
6561 } else {
6562 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
6563 if (ifp == NULL) {
6564 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
6565 return NULL;
6566 }
6567 }
6568
6569 memset(ifp, 0, sizeof(dhd_if_t));
6570 ifp->info = dhdinfo;
6571 ifp->idx = ifidx;
6572 ifp->bssidx = bssidx;
6573 if (mac != NULL)
6574 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
6575
6576 /* Allocate etherdev, including space for private structure */
6577 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
6578 if (ifp->net == NULL) {
6579 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
6580 goto fail;
6581 }
6582
6583 /* Setup the dhd interface's netdevice private structure. */
6584 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
6585
6586 if (name && name[0]) {
6587 strncpy(ifp->net->name, name, IFNAMSIZ);
6588 ifp->net->name[IFNAMSIZ - 1] = '\0';
6589 }
6590
6591#ifdef WL_CFG80211
6592 if (ifidx == 0)
6593 ifp->net->destructor = free_netdev;
6594 else
6595 ifp->net->destructor = dhd_netdev_free;
6596#else
6597 ifp->net->destructor = free_netdev;
6598#endif /* WL_CFG80211 */
6599 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
6600 ifp->name[IFNAMSIZ - 1] = '\0';
6601 dhdinfo->iflist[ifidx] = ifp;
6602
6603/* initialize the dongle provided if name */
6604 if (dngl_name)
6605 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
6606 else
6607 strncpy(ifp->dngl_name, name, IFNAMSIZ);
6608
6609#ifdef PCIE_FULL_DONGLE
6610 /* Initialize STA info list */
6611 INIT_LIST_HEAD(&ifp->sta_list);
6612 DHD_IF_STA_LIST_LOCK_INIT(ifp);
6613#endif /* PCIE_FULL_DONGLE */
6614
6615#ifdef DHD_L2_FILTER
6616 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
6617 ifp->parp_allnode = TRUE;
6618#endif
6619 return ifp->net;
6620
6621fail:
6622
6623 if (ifp != NULL) {
6624 if (ifp->net != NULL) {
6625 dhd_dev_priv_clear(ifp->net);
6626 free_netdev(ifp->net);
6627 ifp->net = NULL;
6628 }
6629 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
6630 ifp = NULL;
6631 }
6632
6633 dhdinfo->iflist[ifidx] = NULL;
6634 return NULL;
6635}
6636
6637/* unregister and free the the net_device interface associated with the indexed
6638 * slot, also free the slot memory and set the slot pointer to NULL
6639 */
6640int
6641dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
6642{
6643 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
6644 dhd_if_t *ifp;
6645
6646 ifp = dhdinfo->iflist[ifidx];
6647
6648 if (ifp != NULL) {
6649 if (ifp->net != NULL) {
6650 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
6651
6652 /* in unregister_netdev case, the interface gets freed by net->destructor
6653 * (which is set to free_netdev)
6654 */
6655 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
6656 free_netdev(ifp->net);
6657 } else {
6658 netif_tx_disable(ifp->net);
6659
6660
6661
6662#if defined(SET_RPS_CPUS)
6663 custom_rps_map_clear(ifp->net->_rx);
6664#endif /* SET_RPS_CPUS */
6665#if defined(SET_RPS_CPUS)
6666#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
6667 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
6668#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
6669#endif
6670 if (need_rtnl_lock)
6671 unregister_netdev(ifp->net);
6672 else
6673 unregister_netdevice(ifp->net);
6674 }
6675 ifp->net = NULL;
6676 dhdinfo->iflist[ifidx] = NULL;
6677 }
6678#ifdef DHD_WMF
6679 dhd_wmf_cleanup(dhdpub, ifidx);
6680#endif /* DHD_WMF */
6681#ifdef DHD_L2_FILTER
6682 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
6683 NULL, FALSE, dhdpub->tickcnt);
6684 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
6685 ifp->phnd_arp_table = NULL;
6686#endif /* DHD_L2_FILTER */
6687
6688 dhd_if_del_sta_list(ifp);
6689
6690 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
08dfb6c4 6691 ifp = NULL;
ef6a5fee
RC
6692 }
6693
6694 return BCME_OK;
6695}
6696
6697#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6698static struct net_device_ops dhd_ops_pri = {
6699 .ndo_open = dhd_open,
6700 .ndo_stop = dhd_stop,
6701 .ndo_get_stats = dhd_get_stats,
6702 .ndo_do_ioctl = dhd_ioctl_entry,
6703 .ndo_start_xmit = dhd_start_xmit,
6704 .ndo_set_mac_address = dhd_set_mac_address,
6705#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6706 .ndo_set_rx_mode = dhd_set_multicast_list,
6707#else
6708 .ndo_set_multicast_list = dhd_set_multicast_list,
6709#endif
6710};
6711
6712static struct net_device_ops dhd_ops_virt = {
6713 .ndo_get_stats = dhd_get_stats,
6714 .ndo_do_ioctl = dhd_ioctl_entry,
6715 .ndo_start_xmit = dhd_start_xmit,
6716 .ndo_set_mac_address = dhd_set_mac_address,
6717#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
6718 .ndo_set_rx_mode = dhd_set_multicast_list,
6719#else
6720 .ndo_set_multicast_list = dhd_set_multicast_list,
6721#endif
6722};
6723#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
6724
6725#ifdef DEBUGGER
6726extern void debugger_init(void *bus_handle);
6727#endif
6728
6729
6730#ifdef SHOW_LOGTRACE
6731static char *logstrs_path = "/root/logstrs.bin";
6732static char *st_str_file_path = "/root/rtecdc.bin";
6733static char *map_file_path = "/root/rtecdc.map";
6734static char *rom_st_str_file_path = "/root/roml.bin";
6735static char *rom_map_file_path = "/root/roml.map";
6736
6737#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */
6738#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
6739#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
6740static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
6741static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
6742static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
6743static char *ram_file_str = "rtecdc";
6744static char *rom_file_str = "roml";
6745#define RAMSTART_BIT 0x01
6746#define RDSTART_BIT 0x02
6747#define RDEND_BIT 0x04
6748#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
6749
6750module_param(logstrs_path, charp, S_IRUGO);
6751module_param(st_str_file_path, charp, S_IRUGO);
6752module_param(map_file_path, charp, S_IRUGO);
6753module_param(rom_st_str_file_path, charp, S_IRUGO);
6754module_param(rom_map_file_path, charp, S_IRUGO);
6755
6756static void
6757dhd_init_logstrs_array(dhd_event_log_t *temp)
6758{
6759 struct file *filep = NULL;
6760 struct kstat stat;
6761 mm_segment_t fs;
6762 char *raw_fmts = NULL;
6763 int logstrs_size = 0;
6764
6765 logstr_header_t *hdr = NULL;
6766 uint32 *lognums = NULL;
6767 char *logstrs = NULL;
6768 int ram_index = 0;
6769 char **fmts;
6770 int num_fmts = 0;
6771 uint32 i = 0;
6772 int error = 0;
6773
6774 fs = get_fs();
6775 set_fs(KERNEL_DS);
6776
6777 filep = filp_open(logstrs_path, O_RDONLY, 0);
6778
6779 if (IS_ERR(filep)) {
6780 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
6781 goto fail;
6782 }
6783 error = vfs_stat(logstrs_path, &stat);
6784 if (error) {
6785 DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
6786 goto fail;
6787 }
6788 logstrs_size = (int) stat.size;
6789
6790 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
6791 if (raw_fmts == NULL) {
6792 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
6793 goto fail;
6794 }
6795 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
6796 DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
6797 goto fail;
6798 }
6799
6800 /* Remember header from the logstrs.bin file */
6801 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
6802 sizeof(logstr_header_t));
6803
6804 if (hdr->log_magic == LOGSTRS_MAGIC) {
6805 /*
6806 * logstrs.bin start with header.
6807 */
6808 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
6809 ram_index = (hdr->ram_lognums_offset -
6810 hdr->rom_lognums_offset) / sizeof(uint32);
6811 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
6812 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
6813 } else {
6814 /*
6815 * Legacy logstrs.bin format without header.
6816 */
6817 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
6818 if (num_fmts == 0) {
6819 /* Legacy ROM/RAM logstrs.bin format:
6820 * - ROM 'lognums' section
6821 * - RAM 'lognums' section
6822 * - ROM 'logstrs' section.
6823 * - RAM 'logstrs' section.
6824 *
6825 * 'lognums' is an array of indexes for the strings in the
6826 * 'logstrs' section. The first uint32 is 0 (index of first
6827 * string in ROM 'logstrs' section).
6828 *
6829 * The 4324b5 is the only ROM that uses this legacy format. Use the
6830 * fixed number of ROM fmtnums to find the start of the RAM
6831 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
6832 * find the ROM 'logstrs' section.
6833 */
6834 #define NUM_4324B5_ROM_FMTS 186
6835 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
6836 ram_index = NUM_4324B5_ROM_FMTS;
6837 lognums = (uint32 *) raw_fmts;
6838 num_fmts = ram_index;
6839 logstrs = (char *) &raw_fmts[num_fmts << 2];
6840 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
6841 num_fmts++;
6842 logstrs = (char *) &raw_fmts[num_fmts << 2];
6843 }
6844 } else {
6845 /* Legacy RAM-only logstrs.bin format:
6846 * - RAM 'lognums' section
6847 * - RAM 'logstrs' section.
6848 *
6849 * 'lognums' is an array of indexes for the strings in the
6850 * 'logstrs' section. The first uint32 is an index to the
6851 * start of 'logstrs'. Therefore, if this index is divided
6852 * by 'sizeof(uint32)' it provides the number of logstr
6853 * entries.
6854 */
6855 ram_index = 0;
6856 lognums = (uint32 *) raw_fmts;
6857 logstrs = (char *) &raw_fmts[num_fmts << 2];
6858 }
6859 }
6860 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
6861 if (fmts == NULL) {
6862 DHD_ERROR(("Failed to allocate fmts memory\n"));
6863 goto fail;
6864 }
6865
6866 for (i = 0; i < num_fmts; i++) {
6867 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
6868 * (they are 0-indexed relative to 'rom_logstrs_offset').
6869 *
6870 * RAM lognums are already indexed to point to the correct RAM logstrs (they
6871 * are 0-indexed relative to the start of the logstrs.bin file).
6872 */
6873 if (i == ram_index) {
6874 logstrs = raw_fmts;
6875 }
6876 fmts[i] = &logstrs[lognums[i]];
6877 }
6878 temp->fmts = fmts;
6879 temp->raw_fmts = raw_fmts;
6880 temp->num_fmts = num_fmts;
6881 filp_close(filep, NULL);
6882 set_fs(fs);
6883 return;
6884fail:
6885 if (raw_fmts) {
6886 kfree(raw_fmts);
6887 raw_fmts = NULL;
6888 }
6889 if (!IS_ERR(filep))
6890 filp_close(filep, NULL);
6891 set_fs(fs);
6892 temp->fmts = NULL;
6893 return;
6894}
6895
6896static int
6897dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
6898 uint32 *rodata_end)
6899{
6900 struct file *filep = NULL;
6901 mm_segment_t fs;
6902 char *raw_fmts = NULL;
6903 uint32 read_size = READ_NUM_BYTES;
6904 int error = 0;
6905 char * cptr = NULL;
6906 char c;
6907 uint8 count = 0;
6908
6909 *ramstart = 0;
6910 *rodata_start = 0;
6911 *rodata_end = 0;
6912
6913 if (fname == NULL) {
6914 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
6915 return BCME_ERROR;
6916 }
6917
6918 fs = get_fs();
6919 set_fs(KERNEL_DS);
6920
6921 filep = filp_open(fname, O_RDONLY, 0);
6922 if (IS_ERR(filep)) {
6923 DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
6924 goto fail;
6925 }
6926
6927 /* Allocate 1 byte more than read_size to terminate it with NULL */
6928 raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
6929 if (raw_fmts == NULL) {
6930 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6931 goto fail;
6932 }
6933
6934 /* read ram start, rodata_start and rodata_end values from map file */
6935
6936 while (count != ALL_MAP_VAL)
6937 {
6938 error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
6939 if (error < 0) {
6940 DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
6941 map_file_path, error));
6942 goto fail;
6943 }
6944
6945 if (error < read_size) {
6946 /*
6947 * since we reset file pos back to earlier pos by
6948 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6949 * So if ret value is less than read_size, reached EOF don't read further
6950 */
6951 break;
6952 }
6953 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6954 raw_fmts[read_size] = '\0';
6955
6956 /* Get ramstart address */
6957 if ((cptr = strstr(raw_fmts, ramstart_str))) {
6958 cptr = cptr - BYTES_AHEAD_NUM;
6959 sscanf(cptr, "%x %c text_start", ramstart, &c);
6960 count |= RAMSTART_BIT;
6961 }
6962
6963 /* Get ram rodata start address */
6964 if ((cptr = strstr(raw_fmts, rodata_start_str))) {
6965 cptr = cptr - BYTES_AHEAD_NUM;
6966 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6967 count |= RDSTART_BIT;
6968 }
6969
6970 /* Get ram rodata end address */
6971 if ((cptr = strstr(raw_fmts, rodata_end_str))) {
6972 cptr = cptr - BYTES_AHEAD_NUM;
6973 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6974 count |= RDEND_BIT;
6975 }
6976 memset(raw_fmts, 0, read_size);
6977 /*
6978 * go back to predefined NUM of bytes so that we won't miss
6979 * the string and addr even if it comes as splited in next read.
6980 */
6981 filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
6982 }
6983
6984 DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
6985 *ramstart, *rodata_start, *rodata_end));
6986
6987 DHD_ERROR(("readmap over \n"));
6988
6989fail:
6990 if (raw_fmts) {
6991 kfree(raw_fmts);
6992 raw_fmts = NULL;
6993 }
6994 if (!IS_ERR(filep))
6995 filp_close(filep, NULL);
6996
6997 set_fs(fs);
6998 if (count == ALL_MAP_VAL) {
6999 return BCME_OK;
7000 }
7001 DHD_ERROR(("readmap error 0X%x \n", count));
7002 return BCME_ERROR;
7003}
7004
7005static void
7006dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
7007{
7008 struct file *filep = NULL;
7009 mm_segment_t fs;
7010 char *raw_fmts = NULL;
7011 uint32 logstrs_size = 0;
7012
7013 int error = 0;
7014 uint32 ramstart = 0;
7015 uint32 rodata_start = 0;
7016 uint32 rodata_end = 0;
7017 uint32 logfilebase = 0;
7018
7019 error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
7020 if (error == BCME_ERROR) {
7021 DHD_ERROR(("readmap Error!! \n"));
7022 /* don't do event log parsing in actual case */
7023 temp->raw_sstr = NULL;
7024 return;
7025 }
7026 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
7027 ramstart, rodata_start, rodata_end));
7028
7029 fs = get_fs();
7030 set_fs(KERNEL_DS);
7031
7032 filep = filp_open(str_file, O_RDONLY, 0);
7033 if (IS_ERR(filep)) {
7034 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
7035 goto fail;
7036 }
7037
7038 /* Full file size is huge. Just read required part */
7039 logstrs_size = rodata_end - rodata_start;
7040
7041 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
7042 if (raw_fmts == NULL) {
7043 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
7044 goto fail;
7045 }
7046
7047 logfilebase = rodata_start - ramstart;
7048
7049 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
7050 if (error < 0) {
7051 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
7052 goto fail;
7053 }
7054
7055 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
7056 if (error != logstrs_size) {
7057 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
7058 goto fail;
7059 }
7060
7061 if (strstr(str_file, ram_file_str) != NULL) {
7062 temp->raw_sstr = raw_fmts;
7063 temp->ramstart = ramstart;
7064 temp->rodata_start = rodata_start;
7065 temp->rodata_end = rodata_end;
7066 } else if (strstr(str_file, rom_file_str) != NULL) {
7067 temp->rom_raw_sstr = raw_fmts;
7068 temp->rom_ramstart = ramstart;
7069 temp->rom_rodata_start = rodata_start;
7070 temp->rom_rodata_end = rodata_end;
7071 }
7072
7073 filp_close(filep, NULL);
7074 set_fs(fs);
7075
7076 return;
7077fail:
7078 if (raw_fmts) {
7079 kfree(raw_fmts);
7080 raw_fmts = NULL;
7081 }
7082 if (!IS_ERR(filep))
7083 filp_close(filep, NULL);
7084 set_fs(fs);
7085 if (strstr(str_file, ram_file_str) != NULL) {
7086 temp->raw_sstr = NULL;
7087 } else if (strstr(str_file, rom_file_str) != NULL) {
7088 temp->rom_raw_sstr = NULL;
7089 }
7090 return;
7091}
7092
7093#endif /* SHOW_LOGTRACE */
7094
7095
7096dhd_pub_t *
7097dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
7098{
7099 dhd_info_t *dhd = NULL;
7100 struct net_device *net = NULL;
7101 char if_name[IFNAMSIZ] = {'\0'};
7102 uint32 bus_type = -1;
7103 uint32 bus_num = -1;
7104 uint32 slot_num = -1;
7105 wifi_adapter_info_t *adapter = NULL;
7106
7107 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
7108 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7109
7110#ifdef STBLINUX
7111 DHD_ERROR(("%s\n", driver_target));
7112#endif /* STBLINUX */
7113 /* will implement get_ids for DBUS later */
7114#if defined(BCMSDIO)
7115 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
7116#endif
7117 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
7118
7119 /* Allocate primary dhd_info */
7120 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
7121 if (dhd == NULL) {
7122 dhd = MALLOC(osh, sizeof(dhd_info_t));
7123 if (dhd == NULL) {
7124 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
7125 goto fail;
7126 }
7127 }
7128 memset(dhd, 0, sizeof(dhd_info_t));
7129 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
7130
7131 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
7132
7133 dhd->pub.osh = osh;
7134 dhd->adapter = adapter;
7135
7136#ifdef GET_CUSTOM_MAC_ENABLE
7137 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
7138#endif /* GET_CUSTOM_MAC_ENABLE */
7139#ifdef CUSTOM_FORCE_NODFS_FLAG
7140 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
7141 dhd->pub.force_country_change = TRUE;
7142#endif /* CUSTOM_FORCE_NODFS_FLAG */
7143#ifdef CUSTOM_COUNTRY_CODE
7144 get_customized_country_code(dhd->adapter,
7145 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
7146 dhd->pub.dhd_cflags);
7147#endif /* CUSTOM_COUNTRY_CODE */
7148 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
7149 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
7150
7151 /* Initialize thread based operation and lock */
7152 sema_init(&dhd->sdsem, 1);
7153
7154 /* Link to info module */
7155 dhd->pub.info = dhd;
7156
7157
7158 /* Link to bus module */
7159 dhd->pub.bus = bus;
7160 dhd->pub.hdrlen = bus_hdrlen;
7161
7162 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
7163 * because dhd_detech will check .info is NULL or not.
7164 */
7165 if (dhd_conf_attach(&dhd->pub) != 0) {
7166 DHD_ERROR(("dhd_conf_attach failed\n"));
7167 goto fail;
7168 }
7169 dhd_conf_reset(&dhd->pub);
7170 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
7171 dhd_conf_preinit(&dhd->pub);
7172
7173 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
7174 * This is indeed a hack but we have to make it work properly before we have a better
7175 * solution
7176 */
7177 dhd_update_fw_nv_path(dhd);
ef6a5fee
RC
7178
7179 /* Set network interface name if it was provided as module parameter */
7180 if (iface_name[0]) {
7181 int len;
7182 char ch;
7183 strncpy(if_name, iface_name, IFNAMSIZ);
7184 if_name[IFNAMSIZ - 1] = 0;
7185 len = strlen(if_name);
7186 ch = if_name[len - 1];
7187 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
7188 strcat(if_name, "%d");
7189 }
7190
7191 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
7192 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
7193 if (net == NULL) {
7194 goto fail;
7195 }
7196
7197
7198 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
7199#ifdef DHD_L2_FILTER
7200 /* initialize the l2_filter_cnt */
7201 dhd->pub.l2_filter_cnt = 0;
7202#endif
7203#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7204 net->open = NULL;
7205#else
7206 net->netdev_ops = NULL;
7207#endif
7208
7209 mutex_init(&dhd->dhd_iovar_mutex);
7210 sema_init(&dhd->proto_sem, 1);
7211
7212#ifdef PROP_TXSTATUS
7213 spin_lock_init(&dhd->wlfc_spinlock);
7214
7215 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
7216 dhd->pub.plat_init = dhd_wlfc_plat_init;
7217 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
7218
7219#ifdef DHD_WLFC_THREAD
7220 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
7221 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
7222 if (IS_ERR(dhd->pub.wlfc_thread)) {
7223 DHD_ERROR(("create wlfc thread failed\n"));
7224 goto fail;
7225 } else {
7226 wake_up_process(dhd->pub.wlfc_thread);
7227 }
7228#endif /* DHD_WLFC_THREAD */
7229#endif /* PROP_TXSTATUS */
7230
7231 /* Initialize other structure content */
7232 init_waitqueue_head(&dhd->ioctl_resp_wait);
7233 init_waitqueue_head(&dhd->d3ack_wait);
7234 init_waitqueue_head(&dhd->ctrl_wait);
7235 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
7236 dhd->pub.dhd_bus_busy_state = 0;
7237
7238 /* Initialize the spinlocks */
7239 spin_lock_init(&dhd->sdlock);
7240 spin_lock_init(&dhd->txqlock);
7241 spin_lock_init(&dhd->dhd_lock);
7242 spin_lock_init(&dhd->rxf_lock);
7243#if defined(RXFRAME_THREAD)
7244 dhd->rxthread_enabled = TRUE;
7245#endif /* defined(RXFRAME_THREAD) */
7246
7247#ifdef DHDTCPACK_SUPPRESS
7248 spin_lock_init(&dhd->tcpack_lock);
7249#endif /* DHDTCPACK_SUPPRESS */
7250
7251 /* Initialize Wakelock stuff */
7252 spin_lock_init(&dhd->wakelock_spinlock);
7253 spin_lock_init(&dhd->wakelock_evt_spinlock);
7254 DHD_OS_WAKE_LOCK_INIT(dhd);
7255 dhd->wakelock_wd_counter = 0;
7256#ifdef CONFIG_HAS_WAKELOCK
08dfb6c4
RC
7257 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
7258 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
ef6a5fee
RC
7259 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
7260#endif /* CONFIG_HAS_WAKELOCK */
7261
7262#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
7263 mutex_init(&dhd->dhd_net_if_mutex);
7264 mutex_init(&dhd->dhd_suspend_mutex);
7265#endif
7266 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7267
7268 /* Attach and link in the protocol */
7269 if (dhd_prot_attach(&dhd->pub) != 0) {
7270 DHD_ERROR(("dhd_prot_attach failed\n"));
7271 goto fail;
7272 }
7273 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
7274
7275#ifdef WL_CFG80211
7276 /* Attach and link in the cfg80211 */
7277 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
7278 DHD_ERROR(("wl_cfg80211_attach failed\n"));
7279 goto fail;
7280 }
7281
7282 dhd_monitor_init(&dhd->pub);
7283 dhd_state |= DHD_ATTACH_STATE_CFG80211;
7284#endif
7285#ifdef DHD_LOG_DUMP
7286 dhd_log_dump_init(&dhd->pub);
7287#endif /* DHD_LOG_DUMP */
7288#if defined(WL_WIRELESS_EXT)
7289 /* Attach and link in the iw */
7290 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
7291 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
7292 DHD_ERROR(("wl_iw_attach failed\n"));
7293 goto fail;
7294 }
7295 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
7296 }
08dfb6c4
RC
7297#ifdef WL_ESCAN
7298 wl_escan_attach(net, (void *)&dhd->pub);
7299#endif
ef6a5fee
RC
7300#endif /* defined(WL_WIRELESS_EXT) */
7301
7302#ifdef SHOW_LOGTRACE
7303 dhd_init_logstrs_array(&dhd->event_data);
7304 dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
7305 dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
7306#endif /* SHOW_LOGTRACE */
7307
7308 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
7309 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
7310 goto fail;
7311 }
7312
7313
7314
7315 /* Set up the watchdog timer */
7316 init_timer(&dhd->timer);
7317 dhd->timer.data = (ulong)dhd;
7318 dhd->timer.function = dhd_watchdog;
7319 dhd->default_wd_interval = dhd_watchdog_ms;
7320
7321 if (dhd_watchdog_prio >= 0) {
7322 /* Initialize watchdog thread */
7323 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
7324 if (dhd->thr_wdt_ctl.thr_pid < 0) {
7325 goto fail;
7326 }
7327
7328 } else {
7329 dhd->thr_wdt_ctl.thr_pid = -1;
7330 }
7331
7332#ifdef DHD_PCIE_RUNTIMEPM
7333 /* Setup up the runtime PM Idlecount timer */
7334 init_timer(&dhd->rpm_timer);
7335 dhd->rpm_timer.data = (ulong)dhd;
7336 dhd->rpm_timer.function = dhd_runtimepm;
7337 dhd->rpm_timer_valid = FALSE;
7338
7339 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
7340 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
7341 if (dhd->thr_rpm_ctl.thr_pid < 0) {
7342 goto fail;
7343 }
7344#endif /* DHD_PCIE_RUNTIMEPM */
7345
7346#ifdef DEBUGGER
7347 debugger_init((void *) bus);
7348#endif
7349
7350 /* Set up the bottom half handler */
7351 if (dhd_dpc_prio >= 0) {
7352 /* Initialize DPC thread */
7353 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
7354 if (dhd->thr_dpc_ctl.thr_pid < 0) {
7355 goto fail;
7356 }
7357 } else {
7358 /* use tasklet for dpc */
7359 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
7360 dhd->thr_dpc_ctl.thr_pid = -1;
7361 }
7362
7363 if (dhd->rxthread_enabled) {
7364 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
7365 /* Initialize RXF thread */
7366 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
7367 if (dhd->thr_rxf_ctl.thr_pid < 0) {
7368 goto fail;
7369 }
7370 }
7371
7372 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
7373
7374#if defined(CONFIG_PM_SLEEP)
7375 if (!dhd_pm_notifier_registered) {
7376 dhd_pm_notifier_registered = TRUE;
7377 dhd->pm_notifier.notifier_call = dhd_pm_callback;
7378 dhd->pm_notifier.priority = 10;
7379 register_pm_notifier(&dhd->pm_notifier);
7380 }
7381
7382#endif /* CONFIG_PM_SLEEP */
7383
7384#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7385 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
7386 dhd->early_suspend.suspend = dhd_early_suspend;
7387 dhd->early_suspend.resume = dhd_late_resume;
7388 register_early_suspend(&dhd->early_suspend);
7389 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
7390#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7391
7392#ifdef ARP_OFFLOAD_SUPPORT
7393 dhd->pend_ipaddr = 0;
7394 if (!dhd_inetaddr_notifier_registered) {
7395 dhd_inetaddr_notifier_registered = TRUE;
7396 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7397 }
7398#endif /* ARP_OFFLOAD_SUPPORT */
7399
7400#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7401 if (!dhd_inet6addr_notifier_registered) {
7402 dhd_inet6addr_notifier_registered = TRUE;
7403 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7404 }
7405#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7406 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
7407#ifdef DEBUG_CPU_FREQ
7408 dhd->new_freq = alloc_percpu(int);
7409 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
7410 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7411#endif
7412#ifdef DHDTCPACK_SUPPRESS
7413#ifdef BCMSDIO
7414 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
7415#elif defined(BCMPCIE)
7416 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
7417#else
7418 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7419#endif /* BCMSDIO */
7420#endif /* DHDTCPACK_SUPPRESS */
7421
7422#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
7423#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
7424
7425 dhd_state |= DHD_ATTACH_STATE_DONE;
7426 dhd->dhd_state = dhd_state;
7427
7428 dhd_found++;
7429#ifdef DHD_DEBUG_PAGEALLOC
7430 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
7431#endif /* DHD_DEBUG_PAGEALLOC */
7432
7433#if defined(DHD_LB)
7434 DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
7435
7436 dhd_lb_set_default_cpus(dhd);
7437
7438 /* Initialize the CPU Masks */
7439 if (dhd_cpumasks_init(dhd) == 0) {
7440
7441 /* Now we have the current CPU maps, run through candidacy */
7442 dhd_select_cpu_candidacy(dhd);
7443
7444 /*
7445 * If we are able to initialize CPU masks, lets register to the
7446 * CPU Hotplug framework to change the CPU for each job dynamically
7447 * using candidacy algorithm.
7448 */
7449 dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
7450 register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
7451 } else {
7452 /*
7453 * We are unable to initialize CPU masks, so candidacy algorithm
7454 * won't run, but still Load Balancing will be honoured based
7455 * on the CPUs allocated for a given job statically during init
7456 */
7457 dhd->cpu_notifier.notifier_call = NULL;
7458 DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
7459 __FUNCTION__));
7460 }
7461
7462
7463 DHD_LB_STATS_INIT(&dhd->pub);
7464
7465 /* Initialize the Load Balancing Tasklets and Napi object */
7466#if defined(DHD_LB_TXC)
7467 tasklet_init(&dhd->tx_compl_tasklet,
7468 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
7469 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
7470 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
7471#endif /* DHD_LB_TXC */
7472
7473#if defined(DHD_LB_RXC)
7474 tasklet_init(&dhd->rx_compl_tasklet,
7475 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
7476 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
7477 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
7478#endif /* DHD_LB_RXC */
7479
7480#if defined(DHD_LB_RXP)
7481 __skb_queue_head_init(&dhd->rx_pend_queue);
7482 skb_queue_head_init(&dhd->rx_napi_queue);
7483
7484 /* Initialize the work that dispatches NAPI job to a given core */
7485 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
7486 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
7487#endif /* DHD_LB_RXP */
7488
7489#endif /* DHD_LB */
7490
7491 INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
7492
7493 (void)dhd_sysfs_init(dhd);
7494
7495 return &dhd->pub;
7496
7497fail:
7498 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
7499 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
7500 __FUNCTION__, dhd_state, &dhd->pub));
7501 dhd->dhd_state = dhd_state;
7502 dhd_detach(&dhd->pub);
7503 dhd_free(&dhd->pub);
7504 }
7505
7506 return NULL;
7507}
7508
7509#include <linux/delay.h>
7510
7511void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
7512{
7513 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7514
7515 schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
7516}
7517
7518int dhd_get_fw_mode(dhd_info_t *dhdinfo)
7519{
7520 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
7521 return DHD_FLAG_HOSTAP_MODE;
7522 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
7523 return DHD_FLAG_P2P_MODE;
7524 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
7525 return DHD_FLAG_IBSS_MODE;
7526 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
7527 return DHD_FLAG_MFG_MODE;
7528
7529 return DHD_FLAG_STA_MODE;
7530}
7531
7532bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
7533{
7534 int fw_len;
7535 int nv_len;
08dfb6c4 7536 int clm_len;
ef6a5fee
RC
7537 int conf_len;
7538 const char *fw = NULL;
7539 const char *nv = NULL;
08dfb6c4 7540 const char *clm = NULL;
ef6a5fee
RC
7541 const char *conf = NULL;
7542 wifi_adapter_info_t *adapter = dhdinfo->adapter;
7543
7544
7545 /* Update firmware and nvram path. The path may be from adapter info or module parameter
7546 * The path from adapter info is used for initialization only (as it won't change).
7547 *
7548 * The firmware_path/nvram_path module parameter may be changed by the system at run
7549 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
7550 * command may change dhdinfo->fw_path. As such we need to clear the path info in
7551 * module parameter after it is copied. We won't update the path until the module parameter
7552 * is changed again (first character is not '\0')
7553 */
7554
7555 /* set default firmware and nvram path for built-in type driver */
7556// if (!dhd_download_fw_on_driverload) {
7557#ifdef CONFIG_BCMDHD_FW_PATH
7558 fw = CONFIG_BCMDHD_FW_PATH;
7559#endif /* CONFIG_BCMDHD_FW_PATH */
7560#ifdef CONFIG_BCMDHD_NVRAM_PATH
7561 nv = CONFIG_BCMDHD_NVRAM_PATH;
7562#endif /* CONFIG_BCMDHD_NVRAM_PATH */
7563// }
7564
7565 /* check if we need to initialize the path */
7566 if (dhdinfo->fw_path[0] == '\0') {
7567 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
7568 fw = adapter->fw_path;
7569
7570 }
7571 if (dhdinfo->nv_path[0] == '\0') {
7572 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
7573 nv = adapter->nv_path;
7574 }
08dfb6c4
RC
7575 if (dhdinfo->clm_path[0] == '\0') {
7576 if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
7577 clm = adapter->clm_path;
7578 }
ef6a5fee
RC
7579 if (dhdinfo->conf_path[0] == '\0') {
7580 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
7581 conf = adapter->conf_path;
7582 }
7583
7584 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
7585 *
7586 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
7587 */
7588 if (firmware_path[0] != '\0')
7589 fw = firmware_path;
7590 if (nvram_path[0] != '\0')
7591 nv = nvram_path;
08dfb6c4
RC
7592 if (clm_path[0] != '\0')
7593 clm = clm_path;
ef6a5fee
RC
7594 if (config_path[0] != '\0')
7595 conf = config_path;
7596
7597 if (fw && fw[0] != '\0') {
7598 fw_len = strlen(fw);
7599 if (fw_len >= sizeof(dhdinfo->fw_path)) {
7600 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
7601 return FALSE;
7602 }
7603 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
7604 if (dhdinfo->fw_path[fw_len-1] == '\n')
7605 dhdinfo->fw_path[fw_len-1] = '\0';
7606 }
7607 if (nv && nv[0] != '\0') {
7608 nv_len = strlen(nv);
7609 if (nv_len >= sizeof(dhdinfo->nv_path)) {
7610 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
7611 return FALSE;
7612 }
7613 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
7614 if (dhdinfo->nv_path[nv_len-1] == '\n')
7615 dhdinfo->nv_path[nv_len-1] = '\0';
7616 }
08dfb6c4
RC
7617 if (clm && clm[0] != '\0') {
7618 clm_len = strlen(clm);
7619 if (clm_len >= sizeof(dhdinfo->clm_path)) {
7620 DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
7621 return FALSE;
7622 }
7623 strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
7624 if (dhdinfo->clm_path[clm_len-1] == '\n')
7625 dhdinfo->clm_path[clm_len-1] = '\0';
7626 }
ef6a5fee
RC
7627 if (conf && conf[0] != '\0') {
7628 conf_len = strlen(conf);
7629 if (conf_len >= sizeof(dhdinfo->conf_path)) {
7630 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
7631 return FALSE;
7632 }
7633 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
7634 if (dhdinfo->conf_path[conf_len-1] == '\n')
7635 dhdinfo->conf_path[conf_len-1] = '\0';
7636 }
7637
7638#if 0
7639 /* clear the path in module parameter */
7640 if (dhd_download_fw_on_driverload) {
7641 firmware_path[0] = '\0';
7642 nvram_path[0] = '\0';
08dfb6c4 7643 clm_path[0] = '\0';
ef6a5fee
RC
7644 config_path[0] = '\0';
7645 }
7646#endif
7647
7648#ifndef BCMEMBEDIMAGE
7649 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
7650 if (dhdinfo->fw_path[0] == '\0') {
7651 DHD_ERROR(("firmware path not found\n"));
7652 return FALSE;
7653 }
7654 if (dhdinfo->nv_path[0] == '\0') {
7655 DHD_ERROR(("nvram path not found\n"));
7656 return FALSE;
7657 }
ef6a5fee
RC
7658#endif /* BCMEMBEDIMAGE */
7659
7660 return TRUE;
7661}
7662
7663#ifdef CUSTOMER_HW4_DEBUG
7664bool dhd_validate_chipid(dhd_pub_t *dhdp)
7665{
7666 uint chipid = dhd_bus_chip_id(dhdp);
7667 uint config_chipid;
7668
7669#ifdef BCM4359_CHIP
7670 config_chipid = BCM4359_CHIP_ID;
7671#elif defined(BCM4358_CHIP)
7672 config_chipid = BCM4358_CHIP_ID;
7673#elif defined(BCM4354_CHIP)
7674 config_chipid = BCM4354_CHIP_ID;
7675#elif defined(BCM4356_CHIP)
7676 config_chipid = BCM4356_CHIP_ID;
7677#elif defined(BCM4339_CHIP)
7678 config_chipid = BCM4339_CHIP_ID;
7679#elif defined(BCM43349_CHIP)
7680 config_chipid = BCM43349_CHIP_ID;
7681#elif defined(BCM4335_CHIP)
7682 config_chipid = BCM4335_CHIP_ID;
7683#elif defined(BCM43241_CHIP)
7684 config_chipid = BCM4324_CHIP_ID;
7685#elif defined(BCM4330_CHIP)
7686 config_chipid = BCM4330_CHIP_ID;
7687#elif defined(BCM43430_CHIP)
7688 config_chipid = BCM43430_CHIP_ID;
7689#elif defined(BCM4334W_CHIP)
7690 config_chipid = BCM43342_CHIP_ID;
7691#elif defined(BCM43455_CHIP)
7692 config_chipid = BCM4345_CHIP_ID;
08dfb6c4
RC
7693#elif defined(BCM43012_CHIP_)
7694 config_chipid = BCM43012_CHIP_ID;
ef6a5fee
RC
7695#else
7696 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
7697 " please add CONFIG_BCMXXXX into the Kernel and"
7698 " BCMXXXX_CHIP definition into the DHD driver\n",
7699 __FUNCTION__));
7700 config_chipid = 0;
7701
7702 return FALSE;
7703#endif /* BCM4354_CHIP */
7704
7705#if defined(BCM4359_CHIP)
7706 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
7707 return TRUE;
7708 }
7709#endif /* BCM4359_CHIP */
7710
7711 return config_chipid == chipid;
7712}
7713#endif /* CUSTOMER_HW4_DEBUG */
7714
7715int
7716dhd_bus_start(dhd_pub_t *dhdp)
7717{
7718 int ret = -1;
7719 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
7720 unsigned long flags;
7721
7722 ASSERT(dhd);
7723
7724 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
7725
7726 DHD_PERIM_LOCK(dhdp);
7727
7728 /* try to download image and nvram to the dongle */
7729 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
7730 /* Indicate FW Download has not yet done */
7731 dhd->pub.is_fw_download_done = FALSE;
7732 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
7733 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
7734 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
08dfb6c4 7735 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
ef6a5fee
RC
7736 if (ret < 0) {
7737 DHD_ERROR(("%s: failed to download firmware %s\n",
7738 __FUNCTION__, dhd->fw_path));
7739 DHD_PERIM_UNLOCK(dhdp);
7740 return ret;
7741 }
7742 /* Indicate FW Download has succeeded */
7743 dhd->pub.is_fw_download_done = TRUE;
7744 }
7745 if (dhd->pub.busstate != DHD_BUS_LOAD) {
7746 DHD_PERIM_UNLOCK(dhdp);
7747 return -ENETDOWN;
7748 }
7749
7750 dhd_os_sdlock(dhdp);
7751
7752 /* Start the watchdog timer */
7753 dhd->pub.tickcnt = 0;
7754 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
7755 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
7756
7757 /* Bring up the bus */
7758 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
7759
7760 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
7761 dhd_os_sdunlock(dhdp);
7762 DHD_PERIM_UNLOCK(dhdp);
7763 return ret;
7764 }
7765#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7766#if defined(BCMPCIE_OOB_HOST_WAKE)
7767 dhd_os_sdunlock(dhdp);
7768#endif /* BCMPCIE_OOB_HOST_WAKE */
7769 /* Host registration for OOB interrupt */
7770 if (dhd_bus_oob_intr_register(dhdp)) {
7771 /* deactivate timer and wait for the handler to finish */
7772#if !defined(BCMPCIE_OOB_HOST_WAKE)
7773 DHD_GENERAL_LOCK(&dhd->pub, flags);
7774 dhd->wd_timer_valid = FALSE;
7775 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7776 del_timer_sync(&dhd->timer);
7777
7778 dhd_os_sdunlock(dhdp);
7779#endif /* !BCMPCIE_OOB_HOST_WAKE */
7780 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7781 DHD_PERIM_UNLOCK(dhdp);
7782 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7783 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
7784 return -ENODEV;
7785 }
7786
7787#if defined(BCMPCIE_OOB_HOST_WAKE)
7788 dhd_os_sdlock(dhdp);
7789 dhd_bus_oob_intr_set(dhdp, TRUE);
7790#else
7791 /* Enable oob at firmware */
7792 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7793#endif /* BCMPCIE_OOB_HOST_WAKE */
7794#elif defined(FORCE_WOWLAN)
7795 /* Enable oob at firmware */
7796 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
7797#endif
7798#ifdef PCIE_FULL_DONGLE
7799 {
7800 /* max_h2d_rings includes H2D common rings */
7801 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
7802
7803 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
7804 max_h2d_rings));
7805 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
7806 dhd_os_sdunlock(dhdp);
7807 DHD_PERIM_UNLOCK(dhdp);
7808 return ret;
7809 }
7810 }
7811#endif /* PCIE_FULL_DONGLE */
7812
7813 /* Do protocol initialization necessary for IOCTL/IOVAR */
7814#ifdef PCIE_FULL_DONGLE
7815 dhd_os_sdunlock(dhdp);
7816#endif /* PCIE_FULL_DONGLE */
7817 ret = dhd_prot_init(&dhd->pub);
7818 if (unlikely(ret) != BCME_OK) {
7819 DHD_PERIM_UNLOCK(dhdp);
7820 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7821 return ret;
7822 }
7823#ifdef PCIE_FULL_DONGLE
7824 dhd_os_sdlock(dhdp);
7825#endif /* PCIE_FULL_DONGLE */
7826
7827 /* If bus is not ready, can't come up */
7828 if (dhd->pub.busstate != DHD_BUS_DATA) {
7829 DHD_GENERAL_LOCK(&dhd->pub, flags);
7830 dhd->wd_timer_valid = FALSE;
7831 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7832 del_timer_sync(&dhd->timer);
7833 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
7834 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7835 dhd_os_sdunlock(dhdp);
7836 DHD_PERIM_UNLOCK(dhdp);
7837 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7838 return -ENODEV;
7839 }
7840
7841 dhd_os_sdunlock(dhdp);
7842
7843 /* Bus is ready, query any dongle information */
7844 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
7845 DHD_GENERAL_LOCK(&dhd->pub, flags);
7846 dhd->wd_timer_valid = FALSE;
7847 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7848 del_timer_sync(&dhd->timer);
7849 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
7850 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
7851 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
7852 DHD_PERIM_UNLOCK(dhdp);
7853 return ret;
7854 }
7855
7856#ifdef ARP_OFFLOAD_SUPPORT
7857 if (dhd->pend_ipaddr) {
7858#ifdef AOE_IP_ALIAS_SUPPORT
7859 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
7860#endif /* AOE_IP_ALIAS_SUPPORT */
7861 dhd->pend_ipaddr = 0;
7862 }
7863#endif /* ARP_OFFLOAD_SUPPORT */
7864
7865 DHD_PERIM_UNLOCK(dhdp);
7866 return 0;
7867}
7868
7869#ifdef WLTDLS
7870int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
7871{
7872 char iovbuf[WLC_IOCTL_SMLEN];
7873 uint32 tdls = tdls_on;
7874 int ret = 0;
7875 uint32 tdls_auto_op = 0;
7876 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
7877 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
7878 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
7879 BCM_REFERENCE(mac);
7880 if (!FW_SUPPORTED(dhd, tdls))
7881 return BCME_ERROR;
7882
7883 if (dhd->tdls_enable == tdls_on)
7884 goto auto_mode;
7885 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
7886 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7887 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
7888 goto exit;
7889 }
7890 dhd->tdls_enable = tdls_on;
7891auto_mode:
7892
7893 tdls_auto_op = auto_on;
7894 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
7895 iovbuf, sizeof(iovbuf));
7896 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7897 sizeof(iovbuf), TRUE, 0)) < 0) {
7898 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
7899 goto exit;
7900 }
7901
7902 if (tdls_auto_op) {
7903 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
7904 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
7905 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7906 sizeof(iovbuf), TRUE, 0)) < 0) {
7907 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
7908 goto exit;
7909 }
7910 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
7911 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7912 sizeof(iovbuf), TRUE, 0)) < 0) {
7913 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
7914 goto exit;
7915 }
7916 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
7917 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7918 sizeof(iovbuf), TRUE, 0)) < 0) {
7919 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
7920 goto exit;
7921 }
7922 }
7923
7924exit:
7925 return ret;
7926}
7927
7928int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
7929{
7930 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7931 int ret = 0;
7932 if (dhd)
7933 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
7934 else
7935 ret = BCME_ERROR;
7936 return ret;
7937}
7938int
7939dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
7940{
7941 char iovbuf[WLC_IOCTL_SMLEN];
7942 int ret = 0;
7943 bool auto_on = false;
7944 uint32 mode = wfd_mode;
7945
7946#ifdef ENABLE_TDLS_AUTO_MODE
7947 if (wfd_mode) {
7948 auto_on = false;
7949 } else {
7950 auto_on = true;
7951 }
7952#else
7953 auto_on = false;
7954#endif /* ENABLE_TDLS_AUTO_MODE */
7955 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
7956 if (ret < 0) {
7957 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
7958 return ret;
7959 }
7960
7961
7962 bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
7963 iovbuf, sizeof(iovbuf));
7964 if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7965 sizeof(iovbuf), TRUE, 0)) < 0) &&
7966 (ret != BCME_UNSUPPORTED)) {
7967 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
7968 return ret;
7969 }
7970
7971 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
7972 if (ret < 0) {
7973 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
7974 return ret;
7975 }
7976
7977 dhd->tdls_mode = mode;
7978 return ret;
7979}
7980#ifdef PCIE_FULL_DONGLE
7981void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
7982{
7983 dhd_info_t *dhd = DHD_DEV_INFO(dev);
7984 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
7985 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
7986 tdls_peer_node_t *new = NULL, *prev = NULL;
7987 dhd_if_t *dhdif;
7988 uint8 sa[ETHER_ADDR_LEN];
7989 int ifidx = dhd_net2idx(dhd, dev);
7990
7991 if (ifidx == DHD_BAD_IF)
7992 return;
7993
7994 dhdif = dhd->iflist[ifidx];
7995 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
7996
7997 if (connect) {
7998 while (cur != NULL) {
7999 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
8000 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
8001 __FUNCTION__, __LINE__));
8002 return;
8003 }
8004 cur = cur->next;
8005 }
8006
8007 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
8008 if (new == NULL) {
8009 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
8010 return;
8011 }
8012 memcpy(new->addr, da, ETHER_ADDR_LEN);
8013 new->next = dhdp->peer_tbl.node;
8014 dhdp->peer_tbl.node = new;
8015 dhdp->peer_tbl.tdls_peer_count++;
8016
8017 } else {
8018 while (cur != NULL) {
8019 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
8020 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
8021 if (prev)
8022 prev->next = cur->next;
8023 else
8024 dhdp->peer_tbl.node = cur->next;
8025 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
8026 dhdp->peer_tbl.tdls_peer_count--;
8027 return;
8028 }
8029 prev = cur;
8030 cur = cur->next;
8031 }
8032 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
8033 }
8034}
8035#endif /* PCIE_FULL_DONGLE */
8036#endif
8037
8038bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
8039{
8040 if (!dhd)
8041 return FALSE;
8042
8043 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
8044 return TRUE;
8045 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
8046 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
8047 return TRUE;
8048 else
8049 return FALSE;
8050}
8051#if !defined(AP) && defined(WLP2P)
8052/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
8053 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
8054 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
8055 * would still be named as fw_bcmdhd_apsta.
8056 */
8057uint32
8058dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
8059{
8060 int32 ret = 0;
8061 char buf[WLC_IOCTL_SMLEN];
8062 bool mchan_supported = FALSE;
8063 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
8064 * test mode, that means we only will use the mode as it is
8065 */
8066 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
8067 return 0;
8068 if (FW_SUPPORTED(dhd, vsdb)) {
8069 mchan_supported = TRUE;
8070 }
8071 if (!FW_SUPPORTED(dhd, p2p)) {
8072 DHD_TRACE(("Chip does not support p2p\n"));
8073 return 0;
8074 } else {
8075 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
8076 memset(buf, 0, sizeof(buf));
8077 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
8078 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8079 FALSE, 0)) < 0) {
8080 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
8081 return 0;
8082 } else {
8083 if (buf[0] == 1) {
8084 /* By default, chip supports single chan concurrency,
8085 * now lets check for mchan
8086 */
8087 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
8088 if (mchan_supported)
8089 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
8090 if (FW_SUPPORTED(dhd, rsdb)) {
8091 ret |= DHD_FLAG_RSDB_MODE;
8092 }
8093 if (FW_SUPPORTED(dhd, mp2p)) {
8094 ret |= DHD_FLAG_MP2P_MODE;
8095 }
8096#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
8097 return ret;
8098#else
8099 return 0;
8100#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
8101 }
8102 }
8103 }
8104 return 0;
8105}
8106#endif
8107
8108#ifdef SUPPORT_AP_POWERSAVE
8109#define RXCHAIN_PWRSAVE_PPS 10
8110#define RXCHAIN_PWRSAVE_QUIET_TIME 10
8111#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
8112int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
8113{
8114 char iovbuf[128];
8115 int32 pps = RXCHAIN_PWRSAVE_PPS;
8116 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
8117 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
8118
8119 if (enable) {
8120 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8121 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8122 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8123 DHD_ERROR(("Failed to enable AP power save\n"));
8124 }
8125 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
8126 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8127 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8128 DHD_ERROR(("Failed to set pps\n"));
8129 }
8130 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
8131 4, iovbuf, sizeof(iovbuf));
8132 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8133 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8134 DHD_ERROR(("Failed to set quiet time\n"));
8135 }
8136 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
8137 4, iovbuf, sizeof(iovbuf));
8138 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8139 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8140 DHD_ERROR(("Failed to set stas assoc check\n"));
8141 }
8142 } else {
8143 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
8144 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
8145 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
8146 DHD_ERROR(("Failed to disable AP power save\n"));
8147 }
8148 }
8149
8150 return 0;
8151}
8152#endif /* SUPPORT_AP_POWERSAVE */
8153
8154
8155int
8156dhd_preinit_ioctls(dhd_pub_t *dhd)
8157{
8158 int ret = 0;
8159 char eventmask[WL_EVENTING_MASK_LEN];
8160 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
8161 uint32 buf_key_b4_m4 = 1;
8162#ifndef WL_CFG80211
8163 u32 up = 0;
8164#endif
8165 uint8 msglen;
8166 eventmsgs_ext_t *eventmask_msg = NULL;
8167 char* iov_buf = NULL;
8168 int ret2 = 0;
8169#if defined(CUSTOM_AMPDU_BA_WSIZE)
8170 uint32 ampdu_ba_wsize = 0;
8171#endif
8172#if defined(CUSTOM_AMPDU_MPDU)
8173 int32 ampdu_mpdu = 0;
8174#endif
8175#if defined(CUSTOM_AMPDU_RELEASE)
8176 int32 ampdu_release = 0;
8177#endif
8178#if defined(CUSTOM_AMSDU_AGGSF)
8179 int32 amsdu_aggsf = 0;
8180#endif
8181#ifdef SUPPORT_SENSORHUB
8182 int32 shub_enable = 0;
8183#endif /* SUPPORT_SENSORHUB */
8184#if defined(BCMSDIO)
8185#ifdef PROP_TXSTATUS
8186 int wlfc_enable = TRUE;
8187#ifndef DISABLE_11N
8188 uint32 hostreorder = 1;
8189 uint wl_down = 1;
8190#endif /* DISABLE_11N */
8191#endif /* PROP_TXSTATUS */
8192#endif
8193#ifdef PCIE_FULL_DONGLE
8194 uint32 wl_ap_isolate;
8195#endif /* PCIE_FULL_DONGLE */
8196
8197#if defined(BCMSDIO)
8198 /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
8199 uint32 frameburst = 0;
8200#else
8201 uint32 frameburst = 1;
8202#endif /* BCMSDIO */
5967f664 8203
ef6a5fee
RC
8204#ifdef DHD_ENABLE_LPC
8205 uint32 lpc = 1;
8206#endif /* DHD_ENABLE_LPC */
8207 uint power_mode = PM_FAST;
8208#if defined(BCMSDIO)
8209 uint32 dongle_align = DHD_SDALIGN;
8210 uint32 glom = CUSTOM_GLOM_SETTING;
8211#endif /* defined(BCMSDIO) */
8212#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8213 uint32 credall = 1;
8214#endif
8215 uint bcn_timeout = dhd->conf->bcn_timeout;
8216#ifdef ENABLE_BCN_LI_BCN_WAKEUP
8217 uint32 bcn_li_bcn = 1;
8218#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
8219 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
8220#if defined(ARP_OFFLOAD_SUPPORT)
8221 int arpoe = 1;
8222#endif
8223 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
8224 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
8225 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
8226 char buf[WLC_IOCTL_SMLEN];
8227 char *ptr;
8228 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
8229#ifdef ROAM_ENABLE
8230 uint roamvar = 0;
8231 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
8232 int roam_scan_period[2] = {10, WLC_BAND_ALL};
8233 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
8234#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
8235 int roam_fullscan_period = 60;
8236#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8237 int roam_fullscan_period = 120;
8238#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
8239#else
8240#ifdef DISABLE_BUILTIN_ROAM
8241 uint roamvar = 1;
8242#endif /* DISABLE_BUILTIN_ROAM */
8243#endif /* ROAM_ENABLE */
8244
8245#if defined(SOFTAP)
8246 uint dtim = 1;
8247#endif
8248#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
8249 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
8250 struct ether_addr p2p_ea;
8251#endif
8252#ifdef SOFTAP_UAPSD_OFF
8253 uint32 wme_apsd = 0;
8254#endif /* SOFTAP_UAPSD_OFF */
8255#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
8256 uint32 apsta = 1; /* Enable APSTA mode */
8257#elif defined(SOFTAP_AND_GC)
8258 uint32 apsta = 0;
8259 int ap_mode = 1;
8260#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
8261#ifdef GET_CUSTOM_MAC_ENABLE
8262 struct ether_addr ea_addr;
8263#endif /* GET_CUSTOM_MAC_ENABLE */
8264
8265#ifdef DISABLE_11N
8266 uint32 nmode = 0;
8267#endif /* DISABLE_11N */
8268
8269#ifdef USE_WL_TXBF
8270 uint32 txbf = 1;
8271#endif /* USE_WL_TXBF */
8272#if defined(PROP_TXSTATUS)
8273#ifdef USE_WFA_CERT_CONF
8274 uint32 proptx = 0;
8275#endif /* USE_WFA_CERT_CONF */
8276#endif /* PROP_TXSTATUS */
8277#ifdef CUSTOM_PSPRETEND_THR
8278 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
8279#endif
8280 uint32 rsdb_mode = 0;
8281#ifdef ENABLE_TEMP_THROTTLING
8282 wl_temp_control_t temp_control;
8283#endif /* ENABLE_TEMP_THROTTLING */
8284#ifdef DISABLE_PRUNED_SCAN
8285 uint32 scan_features = 0;
8286#endif /* DISABLE_PRUNED_SCAN */
8287#ifdef CUSTOM_EVENT_PM_WAKE
8288 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
8289#endif /* CUSTOM_EVENT_PM_WAKE */
8290#ifdef PKT_FILTER_SUPPORT
8291 dhd_pkt_filter_enable = TRUE;
8292#endif /* PKT_FILTER_SUPPORT */
8293#ifdef WLTDLS
8294 dhd->tdls_enable = FALSE;
8295 dhd_tdls_set_mode(dhd, false);
8296#endif /* WLTDLS */
8297 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
8298 DHD_TRACE(("Enter %s\n", __FUNCTION__));
8299
08dfb6c4 8300 dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", dhd->conf->band, 0, FALSE);
ef6a5fee
RC
8301#ifdef DHDTCPACK_SUPPRESS
8302 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
8303 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
8304#endif
8305
8306 dhd->op_mode = 0;
8307#ifdef CUSTOMER_HW4_DEBUG
8308 if (!dhd_validate_chipid(dhd)) {
8309 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
8310 __FUNCTION__, dhd_bus_chip_id(dhd)));
8311#ifndef SUPPORT_MULTIPLE_CHIPS
8312 ret = BCME_BADARG;
8313 goto done;
8314#endif /* !SUPPORT_MULTIPLE_CHIPS */
8315 }
8316#endif /* CUSTOMER_HW4_DEBUG */
8317 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8318 (op_mode == DHD_FLAG_MFG_MODE)) {
8319#ifdef DHD_PCIE_RUNTIMEPM
8320 /* Disable RuntimePM in mfg mode */
8321 DHD_DISABLE_RUNTIME_PM(dhd);
8322 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
8323#endif /* DHD_PCIE_RUNTIME_PM */
8324 /* Check and adjust IOCTL response timeout for Manufactring firmware */
8325 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
8326 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
8327 __FUNCTION__));
8328 } else {
8329 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
8330 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
8331 }
8332#ifdef GET_CUSTOM_MAC_ENABLE
8333 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
8334 if (!ret) {
8335 memset(buf, 0, sizeof(buf));
8336 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
8337 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8338 if (ret < 0) {
8339 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
8340 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
ef6a5fee 8341 }
08dfb6c4 8342 }
ef6a5fee 8343#endif /* GET_CUSTOM_MAC_ENABLE */
08dfb6c4
RC
8344 /* Get the default device MAC address directly from firmware */
8345 memset(buf, 0, sizeof(buf));
8346 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
8347 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
8348 FALSE, 0)) < 0) {
8349 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
8350 ret = BCME_NOTUP;
8351 goto done;
8352 }
8353 /* Update public MAC address after reading from Firmware */
8354 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
ef6a5fee 8355
08dfb6c4
RC
8356 if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
8357 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
8358 goto done;
ef6a5fee 8359 }
ef6a5fee
RC
8360
8361 /* get a capabilities from firmware */
8362 {
8363 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
8364 memset(dhd->fw_capabilities, 0, cap_buf_size);
8365 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
8366 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
8367 (cap_buf_size - 1), FALSE, 0)) < 0)
8368 {
8369 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
8370 __FUNCTION__, ret));
8371 return 0;
8372 }
8373
8374 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
8375 dhd->fw_capabilities[0] = ' ';
8376 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
8377 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
8378 }
8379
8380 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
8381 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
8382#ifdef SET_RANDOM_MAC_SOFTAP
8383 uint rand_mac;
8384#endif /* SET_RANDOM_MAC_SOFTAP */
8385 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
8386#if defined(ARP_OFFLOAD_SUPPORT)
8387 arpoe = 0;
8388#endif
8389#ifdef PKT_FILTER_SUPPORT
8390 dhd_pkt_filter_enable = FALSE;
8391#endif
8392#ifdef SET_RANDOM_MAC_SOFTAP
8393 SRANDOM32((uint)jiffies);
8394 rand_mac = RANDOM32();
8395 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
8396 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
8397 iovbuf[2] = (unsigned char)vendor_oui;
8398 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
8399 iovbuf[4] = (unsigned char)(rand_mac >> 8);
8400 iovbuf[5] = (unsigned char)(rand_mac >> 16);
8401
8402 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
8403 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
8404 if (ret < 0) {
8405 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
8406 } else
8407 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
8408#endif /* SET_RANDOM_MAC_SOFTAP */
8409#if !defined(AP) && defined(WL_CFG80211)
8410 /* Turn off MPC in AP mode */
8411 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8412 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8413 sizeof(iovbuf), TRUE, 0)) < 0) {
8414 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
8415 }
8416#endif
8417#ifdef USE_DYNAMIC_F2_BLKSIZE
8418 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8419#endif /* USE_DYNAMIC_F2_BLKSIZE */
8420#ifdef SUPPORT_AP_POWERSAVE
8421 dhd_set_ap_powersave(dhd, 0, TRUE);
8422#endif /* SUPPORT_AP_POWERSAVE */
8423#ifdef SOFTAP_UAPSD_OFF
8424 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
8425 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8426 sizeof(iovbuf), TRUE, 0)) < 0) {
8427 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
8428 __FUNCTION__, ret));
8429 }
8430#endif /* SOFTAP_UAPSD_OFF */
8431 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
8432 (op_mode == DHD_FLAG_MFG_MODE)) {
8433#if defined(ARP_OFFLOAD_SUPPORT)
8434 arpoe = 0;
8435#endif /* ARP_OFFLOAD_SUPPORT */
8436#ifdef PKT_FILTER_SUPPORT
8437 dhd_pkt_filter_enable = FALSE;
8438#endif /* PKT_FILTER_SUPPORT */
8439 dhd->op_mode = DHD_FLAG_MFG_MODE;
8440#ifdef USE_DYNAMIC_F2_BLKSIZE
8441 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
8442#endif /* USE_DYNAMIC_F2_BLKSIZE */
8443 if (FW_SUPPORTED(dhd, rsdb)) {
8444 rsdb_mode = 0;
8445 bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
8446 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8447 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8448 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
8449 __FUNCTION__, ret));
8450 }
8451 }
8452 } else {
8453 uint32 concurrent_mode = 0;
8454 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
8455 (op_mode == DHD_FLAG_P2P_MODE)) {
8456#if defined(ARP_OFFLOAD_SUPPORT)
8457 arpoe = 0;
8458#endif
8459#ifdef PKT_FILTER_SUPPORT
8460 dhd_pkt_filter_enable = FALSE;
8461#endif
8462 dhd->op_mode = DHD_FLAG_P2P_MODE;
8463 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
8464 (op_mode == DHD_FLAG_IBSS_MODE)) {
8465 dhd->op_mode = DHD_FLAG_IBSS_MODE;
8466 } else
8467 dhd->op_mode = DHD_FLAG_STA_MODE;
8468#if !defined(AP) && defined(WLP2P)
8469 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
8470 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
8471#if defined(ARP_OFFLOAD_SUPPORT)
8472 arpoe = 1;
8473#endif
8474 dhd->op_mode |= concurrent_mode;
8475 }
8476
8477 /* Check if we are enabling p2p */
8478 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8479 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8480 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8481 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8482 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
8483 }
8484
8485#if defined(SOFTAP_AND_GC)
8486 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
8487 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
8488 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
8489 }
8490#endif
8491 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
8492 ETHER_SET_LOCALADDR(&p2p_ea);
8493 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
8494 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
8495 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8496 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8497 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
8498 } else {
8499 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
8500 }
8501 }
8502#else
8503 (void)concurrent_mode;
8504#endif
8505 }
08dfb6c4
RC
8506#ifdef BCMSDIO
8507 if (dhd->conf->sd_f2_blocksize)
8508 dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
8509#endif
ef6a5fee
RC
8510
8511#ifdef RSDB_MODE_FROM_FILE
8512 (void)dhd_rsdb_mode_from_file(dhd);
8513#endif /* RSDB_MODE_FROM_FILE */
8514
8515#ifdef DISABLE_PRUNED_SCAN
8516 if (FW_SUPPORTED(dhd, rsdb)) {
8517 memset(iovbuf, 0, sizeof(iovbuf));
8518 bcm_mkiovar("scan_features", (char *)&scan_features,
8519 4, iovbuf, sizeof(iovbuf));
8520 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
8521 iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8522 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
8523 __FUNCTION__, ret));
8524 } else {
8525 memcpy(&scan_features, iovbuf, 4);
8526 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
8527 memset(iovbuf, 0, sizeof(iovbuf));
8528 bcm_mkiovar("scan_features", (char *)&scan_features,
8529 4, iovbuf, sizeof(iovbuf));
8530 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8531 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8532 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
8533 __FUNCTION__, ret));
8534 }
8535 }
8536 }
8537#endif /* DISABLE_PRUNED_SCAN */
8538
8539 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
8540 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
8541 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
8542 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
8543 dhd->info->rxthread_enabled = FALSE;
8544 else
8545 dhd->info->rxthread_enabled = TRUE;
8546 #endif
8547 /* Set Country code */
8548 if (dhd->dhd_cspec.ccode[0] != 0) {
8549 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
8550 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
8551 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
8552 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8553 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
8554 } else {
8555 dhd_conf_set_country(dhd);
8556 dhd_conf_fix_country(dhd);
8557 }
08dfb6c4 8558 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "autocountry", dhd->conf->autocountry, 0, FALSE);
ef6a5fee
RC
8559 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
8560
8561
8562 /* Set Listen Interval */
8563 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
8564 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8565 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
8566
8567#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
8568#ifdef USE_WFA_CERT_CONF
8569 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
8570 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
8571 }
8572#endif /* USE_WFA_CERT_CONF */
8573 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
8574 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
8575 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8576#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
8577#if defined(ROAM_ENABLE)
8578 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
8579 sizeof(roam_trigger), TRUE, 0)) < 0)
8580 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
8581 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
8582 sizeof(roam_scan_period), TRUE, 0)) < 0)
8583 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
8584 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
8585 sizeof(roam_delta), TRUE, 0)) < 0)
8586 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
8587 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
8588 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
8589 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
8590#endif /* ROAM_ENABLE */
8591 dhd_conf_set_roam(dhd);
8592
8593#ifdef CUSTOM_EVENT_PM_WAKE
8594 bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
8595 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8596 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
8597 }
8598#endif /* CUSTOM_EVENT_PM_WAKE */
8599#ifdef WLTDLS
8600#ifdef ENABLE_TDLS_AUTO_MODE
8601 /* by default TDLS on and auto mode on */
8602 _dhd_tdls_enable(dhd, true, true, NULL);
8603#else
8604 /* by default TDLS on and auto mode off */
8605 _dhd_tdls_enable(dhd, true, false, NULL);
8606#endif /* ENABLE_TDLS_AUTO_MODE */
8607#endif /* WLTDLS */
8608
8609#ifdef DHD_ENABLE_LPC
8610 /* Set lpc 1 */
8611 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8612 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8613 sizeof(iovbuf), TRUE, 0)) < 0) {
8614 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
8615
8616 if (ret == BCME_NOTDOWN) {
8617 uint wl_down = 1;
8618 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
8619 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
8620 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
8621
8622 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
8623 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8624 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
8625 }
8626 }
8627#endif /* DHD_ENABLE_LPC */
08dfb6c4 8628 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "lpc", dhd->conf->lpc, 0, FALSE);
ef6a5fee
RC
8629
8630 /* Set PowerSave mode */
8631 if (dhd->conf->pm >= 0)
8632 power_mode = dhd->conf->pm;
8633 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
08dfb6c4 8634 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "pm2_sleep_ret", dhd->conf->pm2_sleep_ret, 0, FALSE);
ef6a5fee
RC
8635
8636#if defined(BCMSDIO)
8637 /* Match Host and Dongle rx alignment */
8638 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
8639 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8640
8641#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
8642 /* enable credall to reduce the chance of no bus credit happened. */
8643 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
8644 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8645#endif
8646
8647#ifdef USE_WFA_CERT_CONF
8648 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
8649 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
8650 }
8651#endif /* USE_WFA_CERT_CONF */
8652 if (glom != DEFAULT_GLOM_VALUE) {
8653 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
8654 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
8655 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8656 }
8657#endif /* defined(BCMSDIO) */
8658
8659 /* Setup timeout if Beacons are lost and roam is off to report link down */
8660 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
8661 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8662 /* Setup assoc_retry_max count to reconnect target AP in dongle */
8663 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
8664 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8665#if defined(AP) && !defined(WLP2P)
8666 /* Turn off MPC in AP mode */
8667 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
8668 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8669 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
8670 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8671#endif /* defined(AP) && !defined(WLP2P) */
8672 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
08dfb6c4
RC
8673 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
8674 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
8675 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "stbc_tx", dhd->conf->stbc, 0, FALSE);
8676 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "stbc_rx", dhd->conf->stbc, 0, FALSE);
8677 dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", dhd->conf->srl, 0, TRUE);
8678 dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", dhd->conf->lrl, 0, FALSE);
8679 dhd_conf_set_intiovar(dhd, WLC_SET_SPECT_MANAGMENT, "WLC_SET_SPECT_MANAGMENT", dhd->conf->spect, 0, FALSE);
8680 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
8681 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "vhtmode", dhd->conf->vhtmode, 0, TRUE);
8682#ifdef IDHCPC
8683 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", dhd->conf->dhcpc_enable, 0, FALSE);
8684#endif
ef6a5fee
RC
8685 dhd_conf_set_bw_cap(dhd);
8686
8687#ifdef MIMO_ANT_SETTING
8688 dhd_sel_ant_from_file(dhd);
8689#endif /* MIMO_ANT_SETTING */
8690
8691#if defined(SOFTAP)
8692 if (ap_fw_loaded == TRUE) {
8693 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
8694 }
8695#endif
8696
8697#if defined(KEEP_ALIVE)
8698 {
8699 /* Set Keep Alive : be sure to use FW with -keepalive */
8700 int res;
8701
8702#if defined(SOFTAP)
8703 if (ap_fw_loaded == FALSE)
8704#endif
8705 if (!(dhd->op_mode &
8706 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
8707 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
8708 DHD_ERROR(("%s set keeplive failed %d\n",
8709 __FUNCTION__, res));
8710 }
8711 }
8712#endif /* defined(KEEP_ALIVE) */
8713
8714#ifdef USE_WL_TXBF
8715 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
8716 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8717 sizeof(iovbuf), TRUE, 0)) < 0) {
8718 DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
8719 }
8720#endif /* USE_WL_TXBF */
08dfb6c4 8721 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", dhd->conf->txbf, 0, FALSE);
ef6a5fee
RC
8722
8723#ifdef USE_WFA_CERT_CONF
8724#ifdef USE_WL_FRAMEBURST
8725 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
8726 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
8727 }
8728#endif /* USE_WL_FRAMEBURST */
8729#ifdef DISABLE_FRAMEBURST_VSDB
8730 g_frameburst = frameburst;
8731#endif /* DISABLE_FRAMEBURST_VSDB */
8732#endif /* USE_WFA_CERT_CONF */
8733#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
8734 /* Disable Framebursting for SofAP */
8735 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
8736 frameburst = 0;
8737 }
8738#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
8739 /* Set frameburst to value */
8740 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
8741 sizeof(frameburst), TRUE, 0)) < 0) {
8742 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
8743 }
08dfb6c4 8744 dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", dhd->conf->frameburst, 0, FALSE);
ef6a5fee
RC
8745#if defined(CUSTOM_AMPDU_BA_WSIZE)
8746 /* Set ampdu ba wsize to 64 or 16 */
8747#ifdef CUSTOM_AMPDU_BA_WSIZE
8748 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
8749#endif
8750 if (ampdu_ba_wsize != 0) {
8751 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
8752 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8753 sizeof(iovbuf), TRUE, 0)) < 0) {
8754 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
8755 __FUNCTION__, ampdu_ba_wsize, ret));
8756 }
8757 }
8758#endif
08dfb6c4 8759 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
ef6a5fee
RC
8760
8761 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
8762 if (iov_buf == NULL) {
8763 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
8764 ret = BCME_NOMEM;
8765 goto done;
8766 }
8767#ifdef ENABLE_TEMP_THROTTLING
8768 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
8769 memset(&temp_control, 0, sizeof(temp_control));
8770 temp_control.enable = 1;
8771 temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
8772 bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
8773 sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
8774 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
8775 if (ret < 0) {
8776 DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
8777 __FUNCTION__, ret));
8778 }
8779 }
8780#endif /* ENABLE_TEMP_THROTTLING */
8781#if defined(CUSTOM_AMPDU_MPDU)
8782 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
8783 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
8784 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
8785 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8786 sizeof(iovbuf), TRUE, 0)) < 0) {
8787 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
8788 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
8789 }
8790 }
8791#endif /* CUSTOM_AMPDU_MPDU */
8792
8793#if defined(CUSTOM_AMPDU_RELEASE)
8794 ampdu_release = CUSTOM_AMPDU_RELEASE;
8795 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
8796 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
8797 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8798 sizeof(iovbuf), TRUE, 0)) < 0) {
8799 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
8800 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
8801 }
8802 }
8803#endif /* CUSTOM_AMPDU_RELEASE */
8804
8805#if defined(CUSTOM_AMSDU_AGGSF)
8806 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
8807 if (amsdu_aggsf != 0) {
8808 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
8809 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
8810 if (ret < 0) {
8811 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
8812 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
8813 }
8814 }
8815#endif /* CUSTOM_AMSDU_AGGSF */
8816
8817#ifdef CUSTOM_PSPRETEND_THR
8818 /* Turn off MPC in AP mode */
8819 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
8820 iovbuf, sizeof(iovbuf));
8821 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8822 sizeof(iovbuf), TRUE, 0)) < 0) {
8823 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
8824 __FUNCTION__, ret));
8825 }
8826#endif
5967f664 8827
ef6a5fee
RC
8828 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
8829 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
8830 sizeof(iovbuf), TRUE, 0)) < 0) {
8831 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
8832 }
8833
8834 /* Read event_msgs mask */
8835 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8836 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
8837 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
8838 goto done;
8839 }
8840 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
8841
8842 /* Setup event_msgs */
8843 setbit(eventmask, WLC_E_SET_SSID);
8844 setbit(eventmask, WLC_E_PRUNE);
8845 setbit(eventmask, WLC_E_AUTH);
8846 setbit(eventmask, WLC_E_AUTH_IND);
8847 setbit(eventmask, WLC_E_ASSOC);
8848 setbit(eventmask, WLC_E_REASSOC);
8849 setbit(eventmask, WLC_E_REASSOC_IND);
8850 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
8851 setbit(eventmask, WLC_E_DEAUTH);
8852 setbit(eventmask, WLC_E_DEAUTH_IND);
8853 setbit(eventmask, WLC_E_DISASSOC_IND);
8854 setbit(eventmask, WLC_E_DISASSOC);
8855 setbit(eventmask, WLC_E_JOIN);
8856 setbit(eventmask, WLC_E_START);
8857 setbit(eventmask, WLC_E_ASSOC_IND);
8858 setbit(eventmask, WLC_E_PSK_SUP);
8859 setbit(eventmask, WLC_E_LINK);
8860 setbit(eventmask, WLC_E_MIC_ERROR);
8861 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
8862 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
8863#ifndef WL_CFG80211
8864 setbit(eventmask, WLC_E_PMKID_CACHE);
8865 setbit(eventmask, WLC_E_TXFAIL);
8866#endif
8867 setbit(eventmask, WLC_E_JOIN_START);
8868// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
8869#ifdef DHD_DEBUG
8870 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
8871#endif
8872#ifdef WLMEDIA_HTSF
8873 setbit(eventmask, WLC_E_HTSFSYNC);
8874#endif /* WLMEDIA_HTSF */
8875#ifdef PNO_SUPPORT
8876 setbit(eventmask, WLC_E_PFN_NET_FOUND);
8877 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
8878 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
8879 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
8880#endif /* PNO_SUPPORT */
8881 /* enable dongle roaming event */
8882 setbit(eventmask, WLC_E_ROAM);
8883 setbit(eventmask, WLC_E_BSSID);
8884#ifdef WLTDLS
8885 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
8886#endif /* WLTDLS */
08dfb6c4
RC
8887#ifdef WL_ESCAN
8888 setbit(eventmask, WLC_E_ESCAN_RESULT);
8889#endif
ef6a5fee
RC
8890#ifdef WL_CFG80211
8891 setbit(eventmask, WLC_E_ESCAN_RESULT);
8892 setbit(eventmask, WLC_E_AP_STARTED);
8893 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
8894 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
8895 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
8896 }
8897#endif /* WL_CFG80211 */
8898
8899#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
8900 if (dhd_logtrace_from_file(dhd)) {
8901 setbit(eventmask, WLC_E_TRACE);
8902 } else {
8903 clrbit(eventmask, WLC_E_TRACE);
8904 }
8905#elif defined(SHOW_LOGTRACE)
8906 setbit(eventmask, WLC_E_TRACE);
8907#else
8908 clrbit(eventmask, WLC_E_TRACE);
8909#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
8910
8911 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
8912#ifdef DHD_LOSSLESS_ROAMING
8913 setbit(eventmask, WLC_E_ROAM_PREP);
8914#endif
8915#ifdef CUSTOM_EVENT_PM_WAKE
8916 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
8917#endif /* CUSTOM_EVENT_PM_WAKE */
8918#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
8919 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
8920#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
8921
8922 /* Write updated Event mask */
8923 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
8924 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
8925 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
8926 goto done;
8927 }
8928
8929 /* make up event mask ext message iovar for event larger than 128 */
8930 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
8931 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
8932 if (eventmask_msg == NULL) {
8933 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
8934 ret = BCME_NOMEM;
8935 goto done;
8936 }
8937 bzero(eventmask_msg, msglen);
8938 eventmask_msg->ver = EVENTMSGS_VER;
8939 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8940
8941 /* Read event_msgs_ext mask */
8942 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
8943 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
8944 if (ret2 == 0) { /* event_msgs_ext must be supported */
8945 bcopy(iov_buf, eventmask_msg, msglen);
8946#ifdef GSCAN_SUPPORT
8947 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
8948 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
8949 setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
8950#endif /* GSCAN_SUPPORT */
8951#ifdef BT_WIFI_HANDOVER
8952 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
8953#endif /* BT_WIFI_HANDOVER */
8954
8955 /* Write updated Event mask */
8956 eventmask_msg->ver = EVENTMSGS_VER;
8957 eventmask_msg->command = EVENTMSGS_SET_MASK;
8958 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
8959 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
8960 msglen, iov_buf, WLC_IOCTL_SMLEN);
8961 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
8962 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
8963 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
8964 goto done;
8965 }
8966 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
8967 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
8968 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
8969 __FUNCTION__, ret2));
8970 } else {
8971 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
8972 ret = ret2;
8973 goto done;
8974 }
8975
8976 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
8977 sizeof(scan_assoc_time), TRUE, 0);
8978 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
8979 sizeof(scan_unassoc_time), TRUE, 0);
8980 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
8981 sizeof(scan_passive_time), TRUE, 0);
8982
8983#ifdef ARP_OFFLOAD_SUPPORT
8984 /* Set and enable ARP offload feature for STA only */
8985#if defined(SOFTAP)
8986 if (arpoe && !ap_fw_loaded)
8987#else
8988 if (arpoe)
8989#endif
8990 {
8991 dhd_arp_offload_enable(dhd, TRUE);
8992 dhd_arp_offload_set(dhd, dhd_arp_mode);
8993 } else {
8994 dhd_arp_offload_enable(dhd, FALSE);
8995 dhd_arp_offload_set(dhd, 0);
8996 }
8997 dhd_arp_enable = arpoe;
8998#endif /* ARP_OFFLOAD_SUPPORT */
8999
9000#ifdef PKT_FILTER_SUPPORT
9001 /* Setup default defintions for pktfilter , enable in suspend */
9002 if (dhd_master_mode) {
9003 dhd->pktfilter_count = 6;
9004 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
9005 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
9006 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
9007 /* apply APP pktfilter */
9008 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
9009
9010 /* Setup filter to allow only unicast */
9011 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
9012
9013 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
9014 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
9015
9016#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
9017 dhd->pktfilter_count = 4;
9018 /* Setup filter to block broadcast and NAT Keepalive packets */
9019 /* discard all broadcast packets */
9020 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
9021 /* discard NAT Keepalive packets */
9022 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
9023 /* discard NAT Keepalive packets */
9024 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
9025 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
9026#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
9027 } else
9028 dhd_conf_discard_pkt_filter(dhd);
9029 dhd_conf_add_pkt_filter(dhd);
9030
9031#if defined(SOFTAP)
9032 if (ap_fw_loaded) {
9033 dhd_enable_packet_filter(0, dhd);
9034 }
9035#endif /* defined(SOFTAP) */
9036 dhd_set_packet_filter(dhd);
9037#endif /* PKT_FILTER_SUPPORT */
9038#ifdef DISABLE_11N
9039 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
9040 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9041 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
9042#endif /* DISABLE_11N */
9043
9044#ifdef ENABLE_BCN_LI_BCN_WAKEUP
9045 bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
9046 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9047#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
9048 /* query for 'ver' to get version info from firmware */
9049 memset(buf, 0, sizeof(buf));
9050 ptr = buf;
9051 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
9052 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
9053 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9054 else {
9055 bcmstrtok(&ptr, "\n", 0);
9056 /* Print fw version info */
9057 DHD_ERROR(("Firmware version = %s\n", buf));
9058 strncpy(fw_version, buf, FW_VER_STR_LEN);
9059 dhd_set_version_info(dhd, buf);
9060#ifdef WRITE_WLANINFO
9061 sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
9062#endif /* WRITE_WLANINFO */
9063 }
08dfb6c4
RC
9064 /* query for 'clmver' to get clm version info from firmware */
9065 memset(buf, 0, sizeof(buf));
9066 bcm_mkiovar("clmver", (char *)&buf, 4, buf, sizeof(buf));
9067 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
9068 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9069 else {
9070 char *clmver_temp_buf = NULL;
9071
9072 if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
9073 DHD_ERROR(("Couldn't find \"Data:\"\n"));
9074 } else {
9075 ptr = (clmver_temp_buf + strlen("Data:"));
9076 if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
9077 DHD_ERROR(("Couldn't find New line character\n"));
9078 } else {
9079 memset(clm_version, 0, CLM_VER_STR_LEN);
9080 strncpy(clm_version, clmver_temp_buf,
9081 MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
9082 DHD_ERROR((" clm = %s\n", clm_version));
9083 }
9084 }
9085 }
ef6a5fee
RC
9086
9087#if defined(BCMSDIO)
9088 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
9089 // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
08dfb6c4 9090 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:txglom", dhd->conf->bus_txglom, 0, FALSE);
ef6a5fee
RC
9091#endif /* defined(BCMSDIO) */
9092
ef6a5fee
RC
9093#if defined(BCMSDIO)
9094#ifdef PROP_TXSTATUS
9095 if (disable_proptx ||
9096#ifdef PROP_TXSTATUS_VSDB
9097 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
9098 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
9099 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
9100#endif /* PROP_TXSTATUS_VSDB */
9101 FALSE) {
9102 wlfc_enable = FALSE;
9103 }
08dfb6c4
RC
9104 ret = dhd_conf_get_disable_proptx(dhd);
9105 if (ret == 0){
9106 disable_proptx = 0;
9107 wlfc_enable = TRUE;
9108 } else if (ret >= 1) {
9109 disable_proptx = 1;
9110 wlfc_enable = FALSE;
9111 /* terence 20161229: we should set ampdu_hostreorder=0 when disalbe_proptx=1 */
9112 hostreorder = 0;
9113 }
ef6a5fee
RC
9114
9115#ifdef USE_WFA_CERT_CONF
9116 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
9117 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
9118 wlfc_enable = proptx;
9119 }
9120#endif /* USE_WFA_CERT_CONF */
9121
9122#ifndef DISABLE_11N
9123 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
9124 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
9125 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
9126 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
9127 if (ret2 != BCME_UNSUPPORTED)
9128 ret = ret2;
9129
9130 if (ret == BCME_NOTDOWN) {
9131 uint wl_down = 1;
9132 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
9133 sizeof(wl_down), TRUE, 0);
9134 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
9135 __FUNCTION__, ret2, hostreorder));
9136
9137 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
9138 iovbuf, sizeof(iovbuf));
9139 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
9140 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
9141 if (ret2 != BCME_UNSUPPORTED)
9142 ret = ret2;
9143 }
9144 if (ret2 != BCME_OK)
9145 hostreorder = 0;
9146 }
9147#endif /* DISABLE_11N */
9148
9149
08dfb6c4 9150 if (wlfc_enable) {
ef6a5fee 9151 dhd_wlfc_init(dhd);
08dfb6c4
RC
9152 /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
9153 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
9154 }
ef6a5fee
RC
9155#ifndef DISABLE_11N
9156 else if (hostreorder)
9157 dhd_wlfc_hostreorder_init(dhd);
9158#endif /* DISABLE_11N */
08dfb6c4
RC
9159#else
9160 /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
9161 printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
9162 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
ef6a5fee 9163#endif /* PROP_TXSTATUS */
08dfb6c4 9164 dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", dhd->conf->ampdu_hostreorder, 0, TRUE);
ef6a5fee
RC
9165#endif /* BCMSDIO || BCMBUS */
9166#ifdef PCIE_FULL_DONGLE
9167 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
9168 if (FW_SUPPORTED(dhd, ap)) {
9169 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
9170 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
9171 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
9172 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
9173 }
9174#endif /* PCIE_FULL_DONGLE */
9175#ifdef PNO_SUPPORT
9176 if (!dhd->pno_state) {
9177 dhd_pno_init(dhd);
9178 }
9179#endif
9180#ifdef WL11U
9181 dhd_interworking_enable(dhd);
9182#endif /* WL11U */
9183#ifndef WL_CFG80211
9184 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
9185#endif
9186
9187#ifdef SUPPORT_SENSORHUB
9188 bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
9189 if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
9190 FALSE, 0)) < 0) {
9191 DHD_ERROR(("%s failed to get shub hub enable information %d\n",
9192 __FUNCTION__, ret));
9193 dhd->info->shub_enable = 0;
9194 } else {
9195 memcpy(&shub_enable, iovbuf, sizeof(uint32));
9196 dhd->info->shub_enable = shub_enable;
9197 DHD_ERROR(("%s: checking sensorhub enable %d\n",
9198 __FUNCTION__, dhd->info->shub_enable));
9199 }
9200#endif /* SUPPORT_SENSORHUB */
9201done:
9202
9203 if (eventmask_msg)
9204 kfree(eventmask_msg);
9205 if (iov_buf)
9206 kfree(iov_buf);
9207
9208 return ret;
9209}
9210
9211
9212int
9213dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
9214{
9215 char buf[strlen(name) + 1 + cmd_len];
9216 int len = sizeof(buf);
9217 wl_ioctl_t ioc;
9218 int ret;
9219
9220 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
9221
9222 memset(&ioc, 0, sizeof(ioc));
9223
9224 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
9225 ioc.buf = buf;
9226 ioc.len = len;
9227 ioc.set = set;
9228
9229 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
9230 if (!set && ret >= 0)
9231 memcpy(cmd_buf, buf, cmd_len);
9232
9233 return ret;
9234}
9235
9236int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
9237{
9238 struct dhd_info *dhd = dhdp->info;
9239 struct net_device *dev = NULL;
9240
9241 ASSERT(dhd && dhd->iflist[ifidx]);
9242 dev = dhd->iflist[ifidx]->net;
9243 ASSERT(dev);
9244
9245 if (netif_running(dev)) {
9246 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
9247 return BCME_NOTDOWN;
9248 }
9249
9250#define DHD_MIN_MTU 1500
9251#define DHD_MAX_MTU 1752
9252
9253 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
9254 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
9255 return BCME_BADARG;
9256 }
9257
9258 dev->mtu = new_mtu;
9259 return 0;
9260}
9261
9262#ifdef ARP_OFFLOAD_SUPPORT
9263/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
9264void
9265aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
9266{
9267 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
9268 int i;
9269 int ret;
9270
9271 bzero(ipv4_buf, sizeof(ipv4_buf));
9272
9273 /* display what we've got */
9274 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9275 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
9276#ifdef AOE_DBG
9277 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9278#endif
9279 /* now we saved hoste_ip table, clr it in the dongle AOE */
9280 dhd_aoe_hostip_clr(dhd_pub, idx);
9281
9282 if (ret) {
9283 DHD_ERROR(("%s failed\n", __FUNCTION__));
9284 return;
9285 }
9286
9287 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
9288 if (add && (ipv4_buf[i] == 0)) {
9289 ipv4_buf[i] = ipa;
9290 add = FALSE; /* added ipa to local table */
9291 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
9292 __FUNCTION__, i));
9293 } else if (ipv4_buf[i] == ipa) {
9294 ipv4_buf[i] = 0;
9295 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
9296 __FUNCTION__, ipa, i));
9297 }
9298
9299 if (ipv4_buf[i] != 0) {
9300 /* add back host_ip entries from our local cache */
9301 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
9302 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
9303 __FUNCTION__, ipv4_buf[i], i));
9304 }
9305 }
9306#ifdef AOE_DBG
9307 /* see the resulting hostip table */
9308 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
9309 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
9310 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
9311#endif
9312}
9313
9314/*
9315 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
9316 * whenever there is an event related to an IP address.
9317 * ptr : kernel provided pointer to IP address that has changed
9318 */
9319static int dhd_inetaddr_notifier_call(struct notifier_block *this,
9320 unsigned long event,
9321 void *ptr)
9322{
9323 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
9324
9325 dhd_info_t *dhd;
9326 dhd_pub_t *dhd_pub;
9327 int idx;
9328
9329 if (!dhd_arp_enable)
9330 return NOTIFY_DONE;
9331 if (!ifa || !(ifa->ifa_dev->dev))
9332 return NOTIFY_DONE;
9333
9334#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9335 /* Filter notifications meant for non Broadcom devices */
9336 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
9337 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
9338#if defined(WL_ENABLE_P2P_IF)
9339 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
9340#endif /* WL_ENABLE_P2P_IF */
9341 return NOTIFY_DONE;
9342 }
9343#endif /* LINUX_VERSION_CODE */
9344
9345 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
9346 if (!dhd)
9347 return NOTIFY_DONE;
9348
9349 dhd_pub = &dhd->pub;
9350
9351 if (dhd_pub->arp_version == 1) {
9352 idx = 0;
9353 } else {
9354 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
9355 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
9356 break;
9357 }
9358 if (idx < DHD_MAX_IFS) {
9359 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
9360 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
9361 } else {
9362 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
9363 idx = 0;
9364 }
9365 }
9366
9367 switch (event) {
9368 case NETDEV_UP:
9369 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
9370 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9371
9372 if (dhd->pub.busstate != DHD_BUS_DATA) {
9373 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
9374 if (dhd->pend_ipaddr) {
9375 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
9376 __FUNCTION__, dhd->pend_ipaddr));
9377 }
9378 dhd->pend_ipaddr = ifa->ifa_address;
9379 break;
9380 }
9381
9382#ifdef AOE_IP_ALIAS_SUPPORT
9383 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
9384 __FUNCTION__));
9385 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
9386#endif /* AOE_IP_ALIAS_SUPPORT */
9387 break;
9388
9389 case NETDEV_DOWN:
9390 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
9391 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
9392 dhd->pend_ipaddr = 0;
9393#ifdef AOE_IP_ALIAS_SUPPORT
9394 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
9395 __FUNCTION__));
9396 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
9397#else
9398 dhd_aoe_hostip_clr(&dhd->pub, idx);
9399 dhd_aoe_arp_clr(&dhd->pub, idx);
9400#endif /* AOE_IP_ALIAS_SUPPORT */
9401 break;
9402
9403 default:
9404 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
9405 __func__, ifa->ifa_label, event));
9406 break;
9407 }
9408 return NOTIFY_DONE;
9409}
9410#endif /* ARP_OFFLOAD_SUPPORT */
9411
9412#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9413/* Neighbor Discovery Offload: defered handler */
9414static void
9415dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
9416{
9417 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
9418 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
9419 int ret;
9420
9421 if (event != DHD_WQ_WORK_IPV6_NDO) {
9422 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
9423 return;
9424 }
9425
9426 if (!ndo_work) {
9427 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
9428 return;
9429 }
9430
9431 if (!pub) {
9432 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
9433 return;
9434 }
9435
9436 if (ndo_work->if_idx) {
9437 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
9438 return;
9439 }
9440
9441 switch (ndo_work->event) {
9442 case NETDEV_UP:
9443 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
9444 ret = dhd_ndo_enable(pub, TRUE);
9445 if (ret < 0) {
9446 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
9447 }
9448
9449 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
9450 if (ret < 0) {
9451 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
9452 __FUNCTION__, ret));
9453 }
9454 break;
9455 case NETDEV_DOWN:
9456 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
9457 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
9458 if (ret < 0) {
9459 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
9460 __FUNCTION__, ret));
9461 goto done;
9462 }
9463
9464 ret = dhd_ndo_enable(pub, FALSE);
9465 if (ret < 0) {
9466 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
9467 goto done;
9468 }
9469 break;
9470 default:
9471 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
9472 break;
9473 }
9474done:
9475 /* free ndo_work. alloced while scheduling the work */
9476 kfree(ndo_work);
9477
9478 return;
9479}
9480
9481/*
9482 * Neighbor Discovery Offload: Called when an interface
9483 * is assigned with ipv6 address.
9484 * Handles only primary interface
9485 */
9486static int dhd_inet6addr_notifier_call(struct notifier_block *this,
9487 unsigned long event,
9488 void *ptr)
9489{
9490 dhd_info_t *dhd;
9491 dhd_pub_t *dhd_pub;
9492 struct inet6_ifaddr *inet6_ifa = ptr;
9493 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
9494 struct ipv6_work_info_t *ndo_info;
9495 int idx = 0; /* REVISIT */
9496
9497#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
9498 /* Filter notifications meant for non Broadcom devices */
9499 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
9500 return NOTIFY_DONE;
9501 }
9502#endif /* LINUX_VERSION_CODE */
9503
9504 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
9505 if (!dhd)
9506 return NOTIFY_DONE;
9507
9508 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
9509 return NOTIFY_DONE;
9510 dhd_pub = &dhd->pub;
9511
9512 if (!FW_SUPPORTED(dhd_pub, ndoe))
9513 return NOTIFY_DONE;
9514
9515 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
9516 if (!ndo_info) {
9517 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
9518 return NOTIFY_DONE;
9519 }
9520
9521 ndo_info->event = event;
9522 ndo_info->if_idx = idx;
9523 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
9524
9525 /* defer the work to thread as it may block kernel */
9526 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
9527 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
9528 return NOTIFY_DONE;
9529}
9530#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9531
9532int
9533dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
9534{
9535 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9536 dhd_if_t *ifp;
9537 struct net_device *net = NULL;
9538 int err = 0;
9539 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
9540
9541 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
9542
9543 ASSERT(dhd && dhd->iflist[ifidx]);
9544 ifp = dhd->iflist[ifidx];
9545 net = ifp->net;
9546 ASSERT(net && (ifp->idx == ifidx));
9547
9548#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9549 ASSERT(!net->open);
9550 net->get_stats = dhd_get_stats;
9551 net->do_ioctl = dhd_ioctl_entry;
9552 net->hard_start_xmit = dhd_start_xmit;
9553 net->set_mac_address = dhd_set_mac_address;
9554 net->set_multicast_list = dhd_set_multicast_list;
9555 net->open = net->stop = NULL;
9556#else
9557 ASSERT(!net->netdev_ops);
9558 net->netdev_ops = &dhd_ops_virt;
9559#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9560
9561 /* Ok, link into the network layer... */
9562 if (ifidx == 0) {
9563 /*
9564 * device functions for the primary interface only
9565 */
9566#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
9567 net->open = dhd_open;
9568 net->stop = dhd_stop;
9569#else
9570 net->netdev_ops = &dhd_ops_pri;
9571#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
9572 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
9573 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
9574 } else {
9575 /*
9576 * We have to use the primary MAC for virtual interfaces
9577 */
9578 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
9579 /*
9580 * Android sets the locally administered bit to indicate that this is a
9581 * portable hotspot. This will not work in simultaneous AP/STA mode,
9582 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
9583 */
9584 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
9585 ETHER_ADDR_LEN)) {
9586 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
9587 __func__, net->name));
9588 temp_addr[0] |= 0x02;
9589 }
9590 }
9591
9592 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
9593#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
9594 net->ethtool_ops = &dhd_ethtool_ops;
9595#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
9596
9597#if defined(WL_WIRELESS_EXT)
9598#if WIRELESS_EXT < 19
9599 net->get_wireless_stats = dhd_get_wireless_stats;
9600#endif /* WIRELESS_EXT < 19 */
9601#if WIRELESS_EXT > 12
9602 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
9603#endif /* WIRELESS_EXT > 12 */
9604#endif /* defined(WL_WIRELESS_EXT) */
9605
9606 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
9607
9608 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
9609
9610 if (ifidx == 0)
9611 printf("%s\n", dhd_version);
08dfb6c4
RC
9612#ifdef WL_EXT_IAPSTA
9613 else if (!strncmp(net->name, "wl0.", strlen("wl0."))) {
9614 wl_android_ext_attach_netdev(net, ifidx);
9615 }
9616#endif
ef6a5fee
RC
9617
9618 if (need_rtnl_lock)
9619 err = register_netdev(net);
9620 else
9621 err = register_netdevice(net);
9622
9623 if (err != 0) {
9624 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
9625 goto fail;
9626 }
9627
9628
9629
9630 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
9631#if defined(CUSTOMER_HW4_DEBUG)
9632 MAC2STRDBG(dhd->pub.mac.octet));
9633#else
9634 MAC2STRDBG(net->dev_addr));
9635#endif /* CUSTOMER_HW4_DEBUG */
9636
9637#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
9638// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
9639#endif
9640
9641#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
9642 KERNEL_VERSION(2, 6, 27))))
9643 if (ifidx == 0) {
9644#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
9645 up(&dhd_registration_sem);
9646#endif /* BCMLXSDMMC */
9647 if (!dhd_download_fw_on_driverload) {
9648#ifdef WL_CFG80211
9649 wl_terminate_event_handler();
9650#endif /* WL_CFG80211 */
9651#if defined(DHD_LB) && defined(DHD_LB_RXP)
9652 __skb_queue_purge(&dhd->rx_pend_queue);
9653#endif /* DHD_LB && DHD_LB_RXP */
9654#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
9655 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
9656#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
9657 dhd_net_bus_devreset(net, TRUE);
9658#ifdef BCMLXSDMMC
9659 dhd_net_bus_suspend(net);
9660#endif /* BCMLXSDMMC */
9661 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
9662 }
9663 }
9664#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
9665 return 0;
9666
9667fail:
9668#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
9669 net->open = NULL;
9670#else
9671 net->netdev_ops = NULL;
9672#endif
9673 return err;
9674}
9675
9676void
9677dhd_bus_detach(dhd_pub_t *dhdp)
9678{
9679 dhd_info_t *dhd;
9680
9681 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9682
9683 if (dhdp) {
9684 dhd = (dhd_info_t *)dhdp->info;
9685 if (dhd) {
9686
9687 /*
9688 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
9689 * calling stop again will cuase SD read/write errors.
9690 */
9691 if (dhd->pub.busstate != DHD_BUS_DOWN) {
9692 /* Stop the protocol module */
9693 dhd_prot_stop(&dhd->pub);
9694
9695 /* Stop the bus module */
9696 dhd_bus_stop(dhd->pub.bus, TRUE);
9697 }
9698
9699#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
9700 dhd_bus_oob_intr_unregister(dhdp);
9701#endif
9702 }
9703 }
9704}
9705
9706
9707void dhd_detach(dhd_pub_t *dhdp)
9708{
9709 dhd_info_t *dhd;
9710 unsigned long flags;
9711 int timer_valid = FALSE;
9712 struct net_device *dev;
9713
9714 if (!dhdp)
9715 return;
9716
9717 dhd = (dhd_info_t *)dhdp->info;
9718 if (!dhd)
9719 return;
9720
9721 dev = dhd->iflist[0]->net;
9722
9723 if (dev) {
9724 rtnl_lock();
9725 if (dev->flags & IFF_UP) {
9726 /* If IFF_UP is still up, it indicates that
9727 * "ifconfig wlan0 down" hasn't been called.
9728 * So invoke dev_close explicitly here to
9729 * bring down the interface.
9730 */
9731 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
9732 dev_close(dev);
9733 }
9734 rtnl_unlock();
9735 }
9736
9737 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
9738
9739 dhd->pub.up = 0;
9740 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
9741 /* Give sufficient time for threads to start running in case
9742 * dhd_attach() has failed
9743 */
9744 OSL_SLEEP(100);
9745 }
9746
9747#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
9748#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
9749
9750#ifdef PROP_TXSTATUS
9751#ifdef DHD_WLFC_THREAD
9752 if (dhd->pub.wlfc_thread) {
9753 kthread_stop(dhd->pub.wlfc_thread);
9754 dhdp->wlfc_thread_go = TRUE;
9755 wake_up_interruptible(&dhdp->wlfc_wqhead);
9756 }
9757 dhd->pub.wlfc_thread = NULL;
9758#endif /* DHD_WLFC_THREAD */
9759#endif /* PROP_TXSTATUS */
9760
9761 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
9762
9763 dhd_bus_detach(dhdp);
9764#ifdef BCMPCIE
9765 if (is_reboot == SYS_RESTART) {
9766 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
9767 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
9768 dhdpcie_bus_clock_stop(dhdp->bus);
9769 wifi_platform_set_power(dhd_wifi_platdata->adapters,
9770 FALSE, WIFI_TURNOFF_DELAY);
9771 }
9772 }
9773#endif /* BCMPCIE */
9774#ifndef PCIE_FULL_DONGLE
9775 if (dhdp->prot)
9776 dhd_prot_detach(dhdp);
9777#endif
9778 }
9779
9780#ifdef ARP_OFFLOAD_SUPPORT
9781 if (dhd_inetaddr_notifier_registered) {
9782 dhd_inetaddr_notifier_registered = FALSE;
9783 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
9784 }
9785#endif /* ARP_OFFLOAD_SUPPORT */
9786#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
9787 if (dhd_inet6addr_notifier_registered) {
9788 dhd_inet6addr_notifier_registered = FALSE;
9789 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
9790 }
9791#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
9792#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9793 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
9794 if (dhd->early_suspend.suspend)
9795 unregister_early_suspend(&dhd->early_suspend);
9796 }
9797#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
9798
9799#if defined(WL_WIRELESS_EXT)
9800 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
9801 /* Detatch and unlink in the iw */
9802 wl_iw_detach();
9803 }
08dfb6c4
RC
9804#ifdef WL_ESCAN
9805 wl_escan_detach();
9806#endif
ef6a5fee
RC
9807#endif /* defined(WL_WIRELESS_EXT) */
9808
9809 /* delete all interfaces, start with virtual */
9810 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
9811 int i = 1;
9812 dhd_if_t *ifp;
9813
9814 /* Cleanup virtual interfaces */
9815 dhd_net_if_lock_local(dhd);
9816 for (i = 1; i < DHD_MAX_IFS; i++) {
9817 if (dhd->iflist[i])
9818 dhd_remove_if(&dhd->pub, i, TRUE);
9819 }
9820 dhd_net_if_unlock_local(dhd);
9821
9822 /* delete primary interface 0 */
9823 ifp = dhd->iflist[0];
9824 ASSERT(ifp);
9825 ASSERT(ifp->net);
9826 if (ifp && ifp->net) {
9827
9828
9829
9830 /* in unregister_netdev case, the interface gets freed by net->destructor
9831 * (which is set to free_netdev)
9832 */
9833 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
9834 free_netdev(ifp->net);
9835 } else {
9836#ifdef SET_RPS_CPUS
9837 custom_rps_map_clear(ifp->net->_rx);
9838#endif /* SET_RPS_CPUS */
9839 netif_tx_disable(ifp->net);
9840 unregister_netdev(ifp->net);
9841 }
9842 ifp->net = NULL;
9843#ifdef DHD_WMF
9844 dhd_wmf_cleanup(dhdp, 0);
9845#endif /* DHD_WMF */
9846#ifdef DHD_L2_FILTER
9847 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
9848 NULL, FALSE, dhdp->tickcnt);
9849 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
9850 ifp->phnd_arp_table = NULL;
9851#endif /* DHD_L2_FILTER */
9852
9853 dhd_if_del_sta_list(ifp);
9854
9855 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
9856 dhd->iflist[0] = NULL;
9857 }
9858 }
9859
9860 /* Clear the watchdog timer */
9861 DHD_GENERAL_LOCK(&dhd->pub, flags);
9862 timer_valid = dhd->wd_timer_valid;
9863 dhd->wd_timer_valid = FALSE;
9864 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9865 if (timer_valid)
9866 del_timer_sync(&dhd->timer);
9867 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9868
9869 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
9870#ifdef DHD_PCIE_RUNTIMEPM
9871 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
9872 PROC_STOP(&dhd->thr_rpm_ctl);
9873 }
9874#endif /* DHD_PCIE_RUNTIMEPM */
9875 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
9876 PROC_STOP(&dhd->thr_wdt_ctl);
9877 }
9878
9879 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
9880 PROC_STOP(&dhd->thr_rxf_ctl);
9881 }
9882
9883 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
9884 PROC_STOP(&dhd->thr_dpc_ctl);
9885 } else {
9886 tasklet_kill(&dhd->tasklet);
9887#ifdef DHD_LB_RXP
9888 __skb_queue_purge(&dhd->rx_pend_queue);
9889#endif /* DHD_LB_RXP */
9890 }
9891 }
9892
9893#if defined(DHD_LB)
9894 /* Kill the Load Balancing Tasklets */
9895#if defined(DHD_LB_TXC)
9896 tasklet_disable(&dhd->tx_compl_tasklet);
9897 tasklet_kill(&dhd->tx_compl_tasklet);
9898#endif /* DHD_LB_TXC */
9899#if defined(DHD_LB_RXC)
9900 tasklet_disable(&dhd->rx_compl_tasklet);
9901 tasklet_kill(&dhd->rx_compl_tasklet);
9902#endif /* DHD_LB_RXC */
9903 if (dhd->cpu_notifier.notifier_call != NULL)
9904 unregister_cpu_notifier(&dhd->cpu_notifier);
9905 dhd_cpumasks_deinit(dhd);
9906#endif /* DHD_LB */
9907
9908#ifdef DHD_LOG_DUMP
9909 dhd_log_dump_deinit(&dhd->pub);
9910#endif /* DHD_LOG_DUMP */
9911#ifdef WL_CFG80211
9912 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
9913 wl_cfg80211_detach(NULL);
9914 dhd_monitor_uninit();
9915 }
9916#endif
9917 /* free deferred work queue */
9918 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
9919 dhd->dhd_deferred_wq = NULL;
9920
9921#ifdef SHOW_LOGTRACE
9922 if (dhd->event_data.fmts)
9923 kfree(dhd->event_data.fmts);
9924 if (dhd->event_data.raw_fmts)
9925 kfree(dhd->event_data.raw_fmts);
9926 if (dhd->event_data.raw_sstr)
9927 kfree(dhd->event_data.raw_sstr);
9928#endif /* SHOW_LOGTRACE */
9929
9930#ifdef PNO_SUPPORT
9931 if (dhdp->pno_state)
9932 dhd_pno_deinit(dhdp);
9933#endif
9934#if defined(CONFIG_PM_SLEEP)
9935 if (dhd_pm_notifier_registered) {
9936 unregister_pm_notifier(&dhd->pm_notifier);
9937 dhd_pm_notifier_registered = FALSE;
9938 }
9939#endif /* CONFIG_PM_SLEEP */
9940
9941#ifdef DEBUG_CPU_FREQ
9942 if (dhd->new_freq)
9943 free_percpu(dhd->new_freq);
9944 dhd->new_freq = NULL;
9945 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
9946#endif
9947 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
9948 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
9949#ifdef CONFIG_HAS_WAKELOCK
9950 dhd->wakelock_wd_counter = 0;
9951 wake_lock_destroy(&dhd->wl_wdwake);
08dfb6c4
RC
9952 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
9953 wake_lock_destroy(&dhd->wl_wifi);
ef6a5fee
RC
9954#endif /* CONFIG_HAS_WAKELOCK */
9955 DHD_OS_WAKE_LOCK_DESTROY(dhd);
9956 }
9957
9958
9959
9960#ifdef DHDTCPACK_SUPPRESS
9961 /* This will free all MEM allocated for TCPACK SUPPRESS */
9962 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
9963#endif /* DHDTCPACK_SUPPRESS */
9964
9965#ifdef PCIE_FULL_DONGLE
9966 dhd_flow_rings_deinit(dhdp);
9967 if (dhdp->prot)
9968 dhd_prot_detach(dhdp);
9969#endif
9970
9971
9972 dhd_sysfs_exit(dhd);
9973 dhd->pub.is_fw_download_done = FALSE;
9974 dhd_conf_detach(dhdp);
9975}
9976
9977
9978void
9979dhd_free(dhd_pub_t *dhdp)
9980{
9981 dhd_info_t *dhd;
9982 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9983
9984 if (dhdp) {
9985 int i;
9986 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
9987 if (dhdp->reorder_bufs[i]) {
9988 reorder_info_t *ptr;
9989 uint32 buf_size = sizeof(struct reorder_info);
9990
9991 ptr = dhdp->reorder_bufs[i];
9992
9993 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
9994 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
9995 i, ptr->max_idx, buf_size));
9996
9997 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
9998 dhdp->reorder_bufs[i] = NULL;
9999 }
10000 }
10001
10002 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
10003
10004 dhd = (dhd_info_t *)dhdp->info;
10005 if (dhdp->soc_ram) {
10006#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
10007 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
10008#else
10009 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
10010#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
10011 dhdp->soc_ram = NULL;
10012 }
10013#ifdef CACHE_FW_IMAGES
10014 if (dhdp->cached_fw) {
10015 MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
10016 dhdp->cached_fw = NULL;
10017 }
10018
10019 if (dhdp->cached_nvram) {
10020 MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
10021 dhdp->cached_nvram = NULL;
10022 }
10023#endif
10024 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
10025 if (dhd &&
10026 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
10027 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
10028 dhd = NULL;
10029 }
10030}
10031
10032void
10033dhd_clear(dhd_pub_t *dhdp)
10034{
10035 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10036
10037 if (dhdp) {
10038 int i;
10039#ifdef DHDTCPACK_SUPPRESS
10040 /* Clean up timer/data structure for any remaining/pending packet or timer. */
10041 dhd_tcpack_info_tbl_clean(dhdp);
10042#endif /* DHDTCPACK_SUPPRESS */
10043 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
10044 if (dhdp->reorder_bufs[i]) {
10045 reorder_info_t *ptr;
10046 uint32 buf_size = sizeof(struct reorder_info);
10047
10048 ptr = dhdp->reorder_bufs[i];
10049
10050 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
10051 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
10052 i, ptr->max_idx, buf_size));
10053
10054 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
10055 dhdp->reorder_bufs[i] = NULL;
10056 }
10057 }
10058
10059 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
10060
10061 if (dhdp->soc_ram) {
10062#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
10063 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
10064#else
10065 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
10066#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
10067 dhdp->soc_ram = NULL;
10068 }
10069 }
10070}
10071
10072static void
10073dhd_module_cleanup(void)
10074{
10075 printf("%s: Enter\n", __FUNCTION__);
10076
10077 dhd_bus_unregister();
10078
10079 wl_android_exit();
10080
10081 dhd_wifi_platform_unregister_drv();
08dfb6c4 10082
ef6a5fee
RC
10083#ifdef CUSTOMER_HW_AMLOGIC
10084#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10085 wifi_teardown_dt();
10086#endif
10087#endif
10088 printf("%s: Exit\n", __FUNCTION__);
10089}
10090
10091static void __exit
10092dhd_module_exit(void)
10093{
10094 dhd_buzzz_detach();
10095 dhd_module_cleanup();
10096 unregister_reboot_notifier(&dhd_reboot_notifier);
10097}
10098
10099static int __init
10100dhd_module_init(void)
10101{
10102 int err;
10103 int retry = POWERUP_MAX_RETRY;
10104
08dfb6c4 10105 printf("%s: in %s\n", __FUNCTION__, dhd_version);
ef6a5fee
RC
10106#ifdef CUSTOMER_HW_AMLOGIC
10107#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10108 if (wifi_setup_dt()) {
10109 printf("wifi_dt : fail to setup dt\n");
10110 }
10111#endif
10112#endif
10113
10114 dhd_buzzz_attach();
10115
10116 DHD_PERIM_RADIO_INIT();
10117
10118
10119 if (firmware_path[0] != '\0') {
10120 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
10121 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10122 }
10123
10124 if (nvram_path[0] != '\0') {
10125 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
10126 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
10127 }
10128
10129 do {
10130 err = dhd_wifi_platform_register_drv();
10131 if (!err) {
10132 register_reboot_notifier(&dhd_reboot_notifier);
10133 break;
10134 }
10135 else {
10136 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
10137 __FUNCTION__, retry));
10138 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
10139 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
10140 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
10141 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
10142 }
10143 } while (retry--);
10144
10145 if (err) {
10146#ifdef CUSTOMER_HW_AMLOGIC
10147#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
10148 wifi_teardown_dt();
10149#endif
10150#endif
10151 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
10152 } else {
10153 if (!dhd_download_fw_on_driverload) {
10154 dhd_driver_init_done = TRUE;
10155 }
10156 }
10157
10158 printf("%s: Exit err=%d\n", __FUNCTION__, err);
10159 return err;
10160}
10161
10162static int
10163dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
10164{
10165 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
10166 if (code == SYS_RESTART) {
10167#ifdef BCMPCIE
10168 is_reboot = code;
10169#endif /* BCMPCIE */
10170 }
10171 return NOTIFY_DONE;
10172}
10173
10174
10175#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
10176#if defined(CONFIG_DEFERRED_INITCALLS)
10177#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
10178 defined(CONFIG_ARCH_MSM8996)
10179deferred_module_init_sync(dhd_module_init);
10180#else
10181deferred_module_init(dhd_module_init);
10182#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
10183 * CONFIG_ARCH_MSM8996
10184 */
10185#elif defined(USE_LATE_INITCALL_SYNC)
10186late_initcall_sync(dhd_module_init);
10187#else
10188late_initcall(dhd_module_init);
10189#endif /* USE_LATE_INITCALL_SYNC */
10190#else
10191module_init(dhd_module_init);
10192#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
10193
10194module_exit(dhd_module_exit);
10195
10196/*
10197 * OS specific functions required to implement DHD driver in OS independent way
10198 */
10199int
10200dhd_os_proto_block(dhd_pub_t *pub)
10201{
10202 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10203
10204 if (dhd) {
10205 DHD_PERIM_UNLOCK(pub);
10206
10207 down(&dhd->proto_sem);
10208
10209 DHD_PERIM_LOCK(pub);
10210 return 1;
10211 }
10212
10213 return 0;
10214}
10215
10216int
10217dhd_os_proto_unblock(dhd_pub_t *pub)
10218{
10219 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10220
10221 if (dhd) {
10222 up(&dhd->proto_sem);
10223 return 1;
10224 }
10225
10226 return 0;
10227}
10228
10229void
10230dhd_os_dhdiovar_lock(dhd_pub_t *pub)
10231{
10232 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10233
10234 if (dhd) {
10235 mutex_lock(&dhd->dhd_iovar_mutex);
10236 }
10237}
10238
10239void
10240dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
10241{
10242 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10243
10244 if (dhd) {
10245 mutex_unlock(&dhd->dhd_iovar_mutex);
10246 }
10247}
10248
10249unsigned int
10250dhd_os_get_ioctl_resp_timeout(void)
10251{
10252 return ((unsigned int)dhd_ioctl_timeout_msec);
10253}
10254
10255void
10256dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
10257{
10258 dhd_ioctl_timeout_msec = (int)timeout_msec;
10259}
10260
10261int
10262dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
10263{
10264 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10265 int timeout;
10266
10267 /* Convert timeout in millsecond to jiffies */
10268#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10269 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10270#else
10271 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10272#endif
10273
10274 DHD_PERIM_UNLOCK(pub);
10275
10276 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
10277
10278 DHD_PERIM_LOCK(pub);
10279
10280 return timeout;
10281}
10282
10283int
10284dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
10285{
10286 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10287
10288 wake_up(&dhd->ioctl_resp_wait);
10289 return 0;
10290}
10291
10292int
10293dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
10294{
10295 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10296 int timeout;
10297
10298 /* Convert timeout in millsecond to jiffies */
10299#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10300 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
10301#else
10302 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
10303#endif
10304
10305 DHD_PERIM_UNLOCK(pub);
10306
10307 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
10308
10309 DHD_PERIM_LOCK(pub);
10310
10311 return timeout;
10312}
10313
10314int
10315dhd_os_d3ack_wake(dhd_pub_t *pub)
10316{
10317 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10318
10319 wake_up(&dhd->d3ack_wait);
10320 return 0;
10321}
10322
10323int
10324dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
10325{
10326 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
10327 int timeout;
10328
10329 /* Wait for bus usage contexts to gracefully exit within some timeout value
10330 * Set time out to little higher than dhd_ioctl_timeout_msec,
10331 * so that IOCTL timeout should not get affected.
10332 */
10333 /* Convert timeout in millsecond to jiffies */
10334#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10335 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
10336#else
10337 timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
10338#endif
10339
10340 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
10341
10342 return timeout;
10343}
10344
10345int INLINE
10346dhd_os_busbusy_wake(dhd_pub_t *pub)
10347{
10348 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10349 /* Call wmb() to make sure before waking up the other event value gets updated */
10350 OSL_SMP_WMB();
10351 wake_up(&dhd->dhd_bus_busy_state_wait);
10352 return 0;
10353}
10354
10355void
10356dhd_os_wd_timer_extend(void *bus, bool extend)
10357{
10358 dhd_pub_t *pub = bus;
10359 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10360
10361 if (extend)
10362 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
10363 else
10364 dhd_os_wd_timer(bus, dhd->default_wd_interval);
10365}
10366
10367
10368void
10369dhd_os_wd_timer(void *bus, uint wdtick)
10370{
10371 dhd_pub_t *pub = bus;
10372 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10373 unsigned long flags;
10374
10375 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10376
10377 if (!dhd) {
10378 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10379 return;
10380 }
10381
10382 DHD_OS_WD_WAKE_LOCK(pub);
10383 DHD_GENERAL_LOCK(pub, flags);
10384
10385 /* don't start the wd until fw is loaded */
10386 if (pub->busstate == DHD_BUS_DOWN) {
10387 DHD_GENERAL_UNLOCK(pub, flags);
10388 if (!wdtick)
10389 DHD_OS_WD_WAKE_UNLOCK(pub);
10390 return;
10391 }
10392
10393 /* Totally stop the timer */
10394 if (!wdtick && dhd->wd_timer_valid == TRUE) {
10395 dhd->wd_timer_valid = FALSE;
10396 DHD_GENERAL_UNLOCK(pub, flags);
10397 del_timer_sync(&dhd->timer);
10398 DHD_OS_WD_WAKE_UNLOCK(pub);
10399 return;
10400 }
10401
10402 if (wdtick) {
10403 DHD_OS_WD_WAKE_LOCK(pub);
10404 dhd_watchdog_ms = (uint)wdtick;
10405 /* Re arm the timer, at last watchdog period */
10406 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
10407 dhd->wd_timer_valid = TRUE;
10408 }
10409 DHD_GENERAL_UNLOCK(pub, flags);
10410 DHD_OS_WD_WAKE_UNLOCK(pub);
10411}
10412
10413#ifdef DHD_PCIE_RUNTIMEPM
10414void
10415dhd_os_runtimepm_timer(void *bus, uint tick)
10416{
10417 dhd_pub_t *pub = bus;
10418 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10419 unsigned long flags;
10420
10421 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10422
10423 if (!dhd) {
10424 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
10425 return;
10426 }
10427
10428 DHD_GENERAL_LOCK(pub, flags);
10429
10430 /* don't start the RPM until fw is loaded */
10431 if (pub->busstate == DHD_BUS_DOWN ||
10432 pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
10433 DHD_GENERAL_UNLOCK(pub, flags);
10434 return;
10435 }
10436
10437 /* If tick is non-zero, the request is to start the timer */
10438 if (tick) {
10439 /* Start the timer only if its not already running */
10440 if (dhd->rpm_timer_valid == FALSE) {
10441 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
10442 dhd->rpm_timer_valid = TRUE;
10443 }
10444 } else {
10445 /* tick is zero, we have to stop the timer */
10446 /* Stop the timer only if its running, otherwise we don't have to do anything */
10447 if (dhd->rpm_timer_valid == TRUE) {
10448 dhd->rpm_timer_valid = FALSE;
10449 DHD_GENERAL_UNLOCK(pub, flags);
10450 del_timer_sync(&dhd->rpm_timer);
10451 /* we have already released the lock, so just go to exit */
10452 goto exit;
10453 }
10454 }
10455
10456 DHD_GENERAL_UNLOCK(pub, flags);
10457exit:
10458 return;
10459
10460}
10461
10462#endif /* DHD_PCIE_RUNTIMEPM */
10463
10464void *
10465dhd_os_open_image(char *filename)
10466{
10467 struct file *fp;
10468 int size;
10469
10470 fp = filp_open(filename, O_RDONLY, 0);
10471 /*
10472 * 2.6.11 (FC4) supports filp_open() but later revs don't?
10473 * Alternative:
10474 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
10475 * ???
10476 */
10477 if (IS_ERR(fp)) {
10478 fp = NULL;
10479 goto err;
10480 }
10481
10482 if (!S_ISREG(file_inode(fp)->i_mode)) {
10483 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
10484 fp = NULL;
10485 goto err;
10486 }
10487
10488 size = i_size_read(file_inode(fp));
10489 if (size <= 0) {
10490 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
10491 fp = NULL;
10492 goto err;
10493 }
10494
10495 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
10496
10497err:
10498 return fp;
10499}
10500
10501int
10502dhd_os_get_image_block(char *buf, int len, void *image)
10503{
10504 struct file *fp = (struct file *)image;
10505 int rdlen;
10506 int size;
10507
10508 if (!image)
10509 return 0;
10510
10511 size = i_size_read(file_inode(fp));
10512 rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
10513
10514 if (len >= size && size != rdlen) {
10515 return -EIO;
10516 }
10517
10518 if (rdlen > 0)
10519 fp->f_pos += rdlen;
10520
10521 return rdlen;
10522}
10523
08dfb6c4
RC
10524int
10525dhd_os_get_image_size(void *image)
10526{
10527 struct file *fp = (struct file *)image;
10528 int size;
10529 if (!image) {
10530 return 0;
10531 }
10532
10533 size = i_size_read(file_inode(fp));
10534
10535 return size;
10536}
10537
ef6a5fee
RC
10538void
10539dhd_os_close_image(void *image)
10540{
10541 if (image)
10542 filp_close((struct file *)image, NULL);
10543}
10544
10545void
10546dhd_os_sdlock(dhd_pub_t *pub)
10547{
10548 dhd_info_t *dhd;
10549
10550 dhd = (dhd_info_t *)(pub->info);
10551
10552 if (dhd_dpc_prio >= 0)
10553 down(&dhd->sdsem);
10554 else
10555 spin_lock_bh(&dhd->sdlock);
10556}
10557
10558void
10559dhd_os_sdunlock(dhd_pub_t *pub)
10560{
10561 dhd_info_t *dhd;
10562
10563 dhd = (dhd_info_t *)(pub->info);
10564
10565 if (dhd_dpc_prio >= 0)
10566 up(&dhd->sdsem);
10567 else
10568 spin_unlock_bh(&dhd->sdlock);
10569}
10570
10571void
10572dhd_os_sdlock_txq(dhd_pub_t *pub)
10573{
10574 dhd_info_t *dhd;
10575
10576 dhd = (dhd_info_t *)(pub->info);
10577 spin_lock_bh(&dhd->txqlock);
10578}
10579
10580void
10581dhd_os_sdunlock_txq(dhd_pub_t *pub)
10582{
10583 dhd_info_t *dhd;
10584
10585 dhd = (dhd_info_t *)(pub->info);
10586 spin_unlock_bh(&dhd->txqlock);
10587}
10588
10589void
10590dhd_os_sdlock_rxq(dhd_pub_t *pub)
10591{
10592}
10593
10594void
10595dhd_os_sdunlock_rxq(dhd_pub_t *pub)
10596{
10597}
10598
10599static void
10600dhd_os_rxflock(dhd_pub_t *pub)
10601{
10602 dhd_info_t *dhd;
10603
10604 dhd = (dhd_info_t *)(pub->info);
10605 spin_lock_bh(&dhd->rxf_lock);
10606
10607}
10608
10609static void
10610dhd_os_rxfunlock(dhd_pub_t *pub)
10611{
10612 dhd_info_t *dhd;
10613
10614 dhd = (dhd_info_t *)(pub->info);
10615 spin_unlock_bh(&dhd->rxf_lock);
10616}
10617
10618#ifdef DHDTCPACK_SUPPRESS
10619unsigned long
10620dhd_os_tcpacklock(dhd_pub_t *pub)
10621{
10622 dhd_info_t *dhd;
10623 unsigned long flags = 0;
10624
10625 dhd = (dhd_info_t *)(pub->info);
10626
10627 if (dhd) {
10628#ifdef BCMSDIO
10629 spin_lock_bh(&dhd->tcpack_lock);
10630#else
10631 spin_lock_irqsave(&dhd->tcpack_lock, flags);
10632#endif /* BCMSDIO */
10633 }
10634
10635 return flags;
10636}
10637
10638void
10639dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
10640{
10641 dhd_info_t *dhd;
10642
10643#ifdef BCMSDIO
10644 BCM_REFERENCE(flags);
10645#endif /* BCMSDIO */
10646
10647 dhd = (dhd_info_t *)(pub->info);
10648
10649 if (dhd) {
10650#ifdef BCMSDIO
10651 spin_unlock_bh(&dhd->tcpack_lock); // terence 20160519
10652#else
10653 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
10654#endif /* BCMSDIO */
10655 }
10656}
10657#endif /* DHDTCPACK_SUPPRESS */
10658
10659uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
10660{
10661 uint8* buf;
10662 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
10663
10664 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
10665 if (buf == NULL && kmalloc_if_fail)
10666 buf = kmalloc(size, flags);
10667
10668 return buf;
10669}
10670
10671void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
10672{
10673}
10674
10675#if defined(WL_WIRELESS_EXT)
10676struct iw_statistics *
10677dhd_get_wireless_stats(struct net_device *dev)
10678{
10679 int res = 0;
10680 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10681
10682 if (!dhd->pub.up) {
10683 return NULL;
10684 }
10685
10686 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
10687
10688 if (res == 0)
10689 return &dhd->iw.wstats;
10690 else
10691 return NULL;
10692}
10693#endif /* defined(WL_WIRELESS_EXT) */
10694
10695static int
10696dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
10697 wl_event_msg_t *event, void **data)
10698{
10699 int bcmerror = 0;
10700 ASSERT(dhd != NULL);
10701
10702#ifdef SHOW_LOGTRACE
10703 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
10704#else
10705 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
10706#endif /* SHOW_LOGTRACE */
10707
10708 if (bcmerror != BCME_OK)
10709 return (bcmerror);
10710
10711#if defined(WL_WIRELESS_EXT)
10712 if (event->bsscfgidx == 0) {
10713 /*
10714 * Wireless ext is on primary interface only
10715 */
10716
10717 ASSERT(dhd->iflist[*ifidx] != NULL);
10718 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10719
10720 if (dhd->iflist[*ifidx]->net) {
10721 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
10722 }
10723 }
10724#endif /* defined(WL_WIRELESS_EXT) */
10725
10726#ifdef WL_CFG80211
10727 ASSERT(dhd->iflist[*ifidx] != NULL);
10728 ASSERT(dhd->iflist[*ifidx]->net != NULL);
10729 if (dhd->iflist[*ifidx]->net)
10730 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
10731#endif /* defined(WL_CFG80211) */
10732
10733 return (bcmerror);
10734}
10735
10736/* send up locally generated event */
10737void
10738dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
10739{
10740 switch (ntoh32(event->event_type)) {
10741
10742 default:
10743 break;
10744 }
10745}
10746
10747#ifdef LOG_INTO_TCPDUMP
10748void
10749dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
10750{
10751 struct sk_buff *p, *skb;
10752 uint32 pktlen;
10753 int len;
10754 dhd_if_t *ifp;
10755 dhd_info_t *dhd;
10756 uchar *skb_data;
10757 int ifidx = 0;
10758 struct ether_header eth;
10759
10760 pktlen = sizeof(eth) + data_len;
10761 dhd = dhdp->info;
10762
10763 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
10764 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
10765
10766 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
10767 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
10768 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
10769 eth.ether_type = hton16(ETHER_TYPE_BRCM);
10770
10771 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
10772 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
10773 skb = PKTTONATIVE(dhdp->osh, p);
10774 skb_data = skb->data;
10775 len = skb->len;
10776
10777 ifidx = dhd_ifname2idx(dhd, "wlan0");
10778 ifp = dhd->iflist[ifidx];
10779 if (ifp == NULL)
10780 ifp = dhd->iflist[0];
10781
10782 ASSERT(ifp);
10783 skb->dev = ifp->net;
10784 skb->protocol = eth_type_trans(skb, skb->dev);
10785 skb->data = skb_data;
10786 skb->len = len;
10787
10788 /* Strip header, count, deliver upward */
10789 skb_pull(skb, ETH_HLEN);
10790
10791 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
10792 __FUNCTION__, __LINE__);
10793 /* Send the packet */
10794 if (in_interrupt()) {
10795 netif_rx(skb);
10796 } else {
10797 netif_rx_ni(skb);
10798 }
10799 }
10800 else {
10801 /* Could not allocate a sk_buf */
10802 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
10803 }
10804}
10805#endif /* LOG_INTO_TCPDUMP */
10806
10807void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
10808{
10809#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10810 struct dhd_info *dhdinfo = dhd->info;
10811
10812#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
10813 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
10814#else
10815 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
10816#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
10817
10818 dhd_os_sdunlock(dhd);
10819 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
10820 dhd_os_sdlock(dhd);
10821#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
10822 return;
10823}
10824
10825void dhd_wait_event_wakeup(dhd_pub_t *dhd)
10826{
10827#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
10828 struct dhd_info *dhdinfo = dhd->info;
10829 if (waitqueue_active(&dhdinfo->ctrl_wait))
10830 wake_up(&dhdinfo->ctrl_wait);
10831#endif
10832 return;
10833}
10834
10835#if defined(BCMSDIO) || defined(BCMPCIE)
10836int
10837dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
10838{
10839 int ret;
10840
10841 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10842
10843 if (flag == TRUE) {
10844 /* Issue wl down command before resetting the chip */
10845 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
10846 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
10847 }
10848#ifdef PROP_TXSTATUS
10849 if (dhd->pub.wlfc_enabled)
10850 dhd_wlfc_deinit(&dhd->pub);
10851#endif /* PROP_TXSTATUS */
10852#ifdef PNO_SUPPORT
10853 if (dhd->pub.pno_state)
10854 dhd_pno_deinit(&dhd->pub);
10855#endif
10856 }
10857
10858#ifdef BCMSDIO
10859 if (!flag) {
10860 dhd_update_fw_nv_path(dhd);
10861 /* update firmware and nvram path to sdio bus */
10862 dhd_bus_update_fw_nv_path(dhd->pub.bus,
08dfb6c4 10863 dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
ef6a5fee
RC
10864 }
10865#endif /* BCMSDIO */
10866
10867 ret = dhd_bus_devreset(&dhd->pub, flag);
10868 if (ret) {
10869 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
10870 return ret;
10871 }
10872
10873 return ret;
10874}
10875
10876#ifdef BCMSDIO
10877int
10878dhd_net_bus_suspend(struct net_device *dev)
10879{
10880 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10881 return dhd_bus_suspend(&dhd->pub);
10882}
10883
10884int
10885dhd_net_bus_resume(struct net_device *dev, uint8 stage)
10886{
10887 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10888 return dhd_bus_resume(&dhd->pub, stage);
10889}
10890
10891#endif /* BCMSDIO */
10892#endif /* BCMSDIO || BCMPCIE */
10893
10894int net_os_set_suspend_disable(struct net_device *dev, int val)
10895{
10896 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10897 int ret = 0;
10898
10899 if (dhd) {
10900 ret = dhd->pub.suspend_disable_flag;
10901 dhd->pub.suspend_disable_flag = val;
10902 }
10903 return ret;
10904}
10905
10906int net_os_set_suspend(struct net_device *dev, int val, int force)
10907{
10908 int ret = 0;
10909 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10910
10911 if (dhd) {
10912#ifdef CONFIG_MACH_UNIVERSAL7420
10913#endif /* CONFIG_MACH_UNIVERSAL7420 */
10914#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
10915 ret = dhd_set_suspend(val, &dhd->pub);
10916#else
10917 ret = dhd_suspend_resume_helper(dhd, val, force);
10918#endif
10919#ifdef WL_CFG80211
10920 wl_cfg80211_update_power_mode(dev);
10921#endif
10922 }
10923 return ret;
10924}
10925
10926int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
10927{
10928 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10929
10930 if (dhd)
10931 dhd->pub.suspend_bcn_li_dtim = val;
10932
10933 return 0;
10934}
10935
10936#ifdef PKT_FILTER_SUPPORT
10937int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
10938{
10939#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
10940 return 0;
10941#else
10942 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10943 char *filterp = NULL;
10944 int filter_id = 0;
10945 int ret = 0;
10946
10947 if (!dhd_master_mode)
10948 add_remove = !add_remove;
10949 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
10950 if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
10951 return ret;
10952 if (num >= dhd->pub.pktfilter_count)
10953 return -EINVAL;
10954 switch (num) {
10955 case DHD_BROADCAST_FILTER_NUM:
10956 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
10957 filter_id = 101;
10958 break;
10959 case DHD_MULTICAST4_FILTER_NUM:
10960 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
10961 filter_id = 102;
10962 break;
10963 case DHD_MULTICAST6_FILTER_NUM:
10964 filterp = "103 0 0 0 0xFFFF 0x3333";
10965 filter_id = 103;
10966 break;
10967 case DHD_MDNS_FILTER_NUM:
10968 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
10969 filter_id = 104;
10970 break;
10971 default:
10972 return -EINVAL;
10973 }
10974
10975 /* Add filter */
10976 if (add_remove) {
10977 dhd->pub.pktfilter[num] = filterp;
10978 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
10979 } else { /* Delete filter */
10980 if (dhd->pub.pktfilter[num] != NULL) {
10981 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
10982 dhd->pub.pktfilter[num] = NULL;
10983 }
10984 }
10985 return ret;
10986#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
10987}
10988
10989int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
10990
10991{
10992 int ret = 0;
10993
10994 /* Packet filtering is set only if we still in early-suspend and
10995 * we need either to turn it ON or turn it OFF
10996 * We can always turn it OFF in case of early-suspend, but we turn it
10997 * back ON only if suspend_disable_flag was not set
10998 */
10999 if (dhdp && dhdp->up) {
11000 if (dhdp->in_suspend) {
11001 if (!val || (val && !dhdp->suspend_disable_flag))
11002 dhd_enable_packet_filter(val, dhdp);
11003 }
11004 }
11005 return ret;
11006}
11007
11008/* function to enable/disable packet for Network device */
11009int net_os_enable_packet_filter(struct net_device *dev, int val)
11010{
11011 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11012
11013 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
11014 return dhd_os_enable_packet_filter(&dhd->pub, val);
11015}
11016#endif /* PKT_FILTER_SUPPORT */
11017
11018int
11019dhd_dev_init_ioctl(struct net_device *dev)
11020{
11021 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11022 int ret;
11023
11024 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
11025 goto done;
11026
11027done:
11028 return ret;
11029}
11030
11031int
11032dhd_dev_get_feature_set(struct net_device *dev)
11033{
11034 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
11035 dhd_pub_t *dhd = (&ptr->pub);
11036 int feature_set = 0;
11037
11038#ifdef DYNAMIC_SWOOB_DURATION
11039#ifndef CUSTOM_INTR_WIDTH
11040#define CUSTOM_INTR_WIDTH 100
11041 int intr_width = 0;
11042#endif /* CUSTOM_INTR_WIDTH */
11043#endif /* DYNAMIC_SWOOB_DURATION */
11044 if (!dhd)
11045 return feature_set;
11046
11047 if (FW_SUPPORTED(dhd, sta))
11048 feature_set |= WIFI_FEATURE_INFRA;
11049 if (FW_SUPPORTED(dhd, dualband))
11050 feature_set |= WIFI_FEATURE_INFRA_5G;
11051 if (FW_SUPPORTED(dhd, p2p))
11052 feature_set |= WIFI_FEATURE_P2P;
11053 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
11054 feature_set |= WIFI_FEATURE_SOFT_AP;
11055 if (FW_SUPPORTED(dhd, tdls))
11056 feature_set |= WIFI_FEATURE_TDLS;
11057 if (FW_SUPPORTED(dhd, vsdb))
11058 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
11059 if (FW_SUPPORTED(dhd, nan)) {
11060 feature_set |= WIFI_FEATURE_NAN;
11061 /* NAN is essentail for d2d rtt */
11062 if (FW_SUPPORTED(dhd, rttd2d))
11063 feature_set |= WIFI_FEATURE_D2D_RTT;
11064 }
11065#ifdef RTT_SUPPORT
11066 feature_set |= WIFI_FEATURE_D2AP_RTT;
11067#endif /* RTT_SUPPORT */
11068#ifdef LINKSTAT_SUPPORT
11069 feature_set |= WIFI_FEATURE_LINKSTAT;
11070#endif /* LINKSTAT_SUPPORT */
11071 /* Supports STA + STA always */
11072 feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
11073#ifdef PNO_SUPPORT
11074 if (dhd_is_pno_supported(dhd)) {
11075 feature_set |= WIFI_FEATURE_PNO;
11076 feature_set |= WIFI_FEATURE_BATCH_SCAN;
11077#ifdef GSCAN_SUPPORT
11078 feature_set |= WIFI_FEATURE_GSCAN;
11079#endif /* GSCAN_SUPPORT */
11080 }
11081#endif /* PNO_SUPPORT */
11082#ifdef WL11U
11083 feature_set |= WIFI_FEATURE_HOTSPOT;
11084#endif /* WL11U */
11085 return feature_set;
11086}
11087
11088
11089int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
11090{
11091 int feature_set_full, mem_needed;
11092 int *ret;
11093
11094 *num = 0;
11095 mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
11096 ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
11097 if (!ret) {
11098 DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
11099 mem_needed));
11100 return ret;
11101 }
11102
11103 feature_set_full = dhd_dev_get_feature_set(dev);
11104
11105 ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
11106 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11107 (feature_set_full & WIFI_FEATURE_NAN) |
11108 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11109 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11110 (feature_set_full & WIFI_FEATURE_PNO) |
11111 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
11112 (feature_set_full & WIFI_FEATURE_GSCAN) |
11113 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
11114 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
11115 (feature_set_full & WIFI_FEATURE_EPR);
11116
11117 ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
11118 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11119 /* Not yet verified NAN with P2P */
11120 /* (feature_set_full & WIFI_FEATURE_NAN) | */
11121 (feature_set_full & WIFI_FEATURE_P2P) |
11122 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11123 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11124 (feature_set_full & WIFI_FEATURE_EPR);
11125
11126 ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
11127 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
11128 (feature_set_full & WIFI_FEATURE_NAN) |
11129 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
11130 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
11131 (feature_set_full & WIFI_FEATURE_TDLS) |
11132 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
11133 (feature_set_full & WIFI_FEATURE_EPR);
11134 *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
11135
11136 return ret;
11137}
11138#ifdef CUSTOM_FORCE_NODFS_FLAG
11139int
11140dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
11141{
11142 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11143
11144 if (nodfs)
11145 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
11146 else
11147 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
11148 dhd->pub.force_country_change = TRUE;
11149 return 0;
11150}
11151#endif /* CUSTOM_FORCE_NODFS_FLAG */
11152#ifdef PNO_SUPPORT
11153/* Linux wrapper to call common dhd_pno_stop_for_ssid */
11154int
11155dhd_dev_pno_stop_for_ssid(struct net_device *dev)
11156{
11157 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11158
11159 return (dhd_pno_stop_for_ssid(&dhd->pub));
11160}
11161/* Linux wrapper to call common dhd_pno_set_for_ssid */
11162int
11163dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
11164 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
11165{
11166 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11167
11168 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
11169 pno_repeat, pno_freq_expo_max, channel_list, nchan));
11170}
11171
11172/* Linux wrapper to call common dhd_pno_enable */
11173int
11174dhd_dev_pno_enable(struct net_device *dev, int enable)
11175{
11176 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11177
11178 return (dhd_pno_enable(&dhd->pub, enable));
11179}
11180
11181/* Linux wrapper to call common dhd_pno_set_for_hotlist */
11182int
11183dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
11184 struct dhd_pno_hotlist_params *hotlist_params)
11185{
11186 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11187 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
11188}
11189/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
11190int
11191dhd_dev_pno_stop_for_batch(struct net_device *dev)
11192{
11193 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11194 return (dhd_pno_stop_for_batch(&dhd->pub));
11195}
11196/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
11197int
11198dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
11199{
11200 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11201 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
11202}
11203/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
11204int
11205dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
11206{
11207 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11208 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
11209}
11210/* Linux wrapper to call common dhd_pno_set_mac_oui */
11211int
11212dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
11213{
11214 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11215 return (dhd_pno_set_mac_oui(&dhd->pub, oui));
11216}
11217#endif /* PNO_SUPPORT */
11218
11219#if defined(PNO_SUPPORT)
11220#ifdef GSCAN_SUPPORT
11221/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11222int
11223dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11224 void *buf, uint8 flush)
11225{
11226 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11227
11228 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
11229}
11230
11231/* Linux wrapper to call common dhd_pno_get_gscan */
11232void *
11233dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
11234 void *info, uint32 *len)
11235{
11236 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11237
11238 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
11239}
11240
11241/* Linux wrapper to call common dhd_wait_batch_results_complete */
11242void
11243dhd_dev_wait_batch_results_complete(struct net_device *dev)
11244{
11245 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11246
11247 return (dhd_wait_batch_results_complete(&dhd->pub));
11248}
11249
11250/* Linux wrapper to call common dhd_pno_lock_batch_results */
11251void
11252dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
11253{
11254 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11255
11256 return (dhd_pno_lock_batch_results(&dhd->pub));
11257}
11258/* Linux wrapper to call common dhd_pno_unlock_batch_results */
11259void
11260dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
11261{
11262 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11263
11264 return (dhd_pno_unlock_batch_results(&dhd->pub));
11265}
11266
11267/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
11268int
11269dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
11270{
11271 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11272
11273 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
11274}
11275
11276/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
11277int
11278dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
11279{
11280 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11281
11282 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
11283}
11284
11285/* Linux wrapper to call common dhd_handle_swc_evt */
11286void *
11287dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes)
11288{
11289 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11290
11291 return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
11292}
11293
11294/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
11295void *
11296dhd_dev_hotlist_scan_event(struct net_device *dev,
11297 const void *data, int *send_evt_bytes, hotlist_type_t type)
11298{
11299 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11300
11301 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
11302}
11303
11304/* Linux wrapper to call common dhd_process_full_gscan_result */
11305void *
11306dhd_dev_process_full_gscan_result(struct net_device *dev,
11307const void *data, int *send_evt_bytes)
11308{
11309 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11310
11311 return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
11312}
11313
11314void
11315dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
11316{
11317 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11318
11319 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
11320
11321 return;
11322}
11323
11324int
11325dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
11326{
11327 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11328
11329 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
11330}
11331
11332/* Linux wrapper to call common dhd_retreive_batch_scan_results */
11333int
11334dhd_dev_retrieve_batch_scan(struct net_device *dev)
11335{
11336 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11337
11338 return (dhd_retreive_batch_scan_results(&dhd->pub));
11339}
11340#endif /* GSCAN_SUPPORT */
11341#endif
11342#ifdef RTT_SUPPORT
11343/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
11344int
11345dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
11346{
11347 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11348
11349 return (dhd_rtt_set_cfg(&dhd->pub, buf));
11350}
11351int
11352dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
11353{
11354 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11355
11356 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
11357}
11358int
11359dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
11360{
11361 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11362
11363 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
11364}
11365int
11366dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
11367{
11368 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11369
11370 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
11371}
11372
11373int
11374dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
11375{
11376 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
11377
11378 return (dhd_rtt_capability(&dhd->pub, capa));
11379}
11380
11381#endif /* RTT_SUPPORT */
11382
11383#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11384static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
11385{
11386 dhd_info_t *dhd;
11387 struct net_device *dev;
11388
11389 dhd = (dhd_info_t *)dhd_info;
11390 dev = dhd->iflist[0]->net;
11391
11392 if (dev) {
08dfb6c4
RC
11393 // terence 20161024: let wlan0 down when hang happened
11394 rtnl_lock();
11395 dev_close(dev);
11396 rtnl_unlock();
ef6a5fee
RC
11397#if defined(WL_WIRELESS_EXT)
11398 wl_iw_send_priv_event(dev, "HANG");
11399#endif
11400#if defined(WL_CFG80211)
11401 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11402#endif
11403 }
11404}
11405
11406#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
11407extern dhd_pub_t *link_recovery;
11408void dhd_host_recover_link(void)
11409{
11410 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
11411 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
11412 dhd_bus_set_linkdown(link_recovery, TRUE);
11413 dhd_os_send_hang_message(link_recovery);
11414}
11415EXPORT_SYMBOL(dhd_host_recover_link);
11416#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
11417
11418int dhd_os_send_hang_message(dhd_pub_t *dhdp)
11419{
11420 int ret = 0;
11421 if (dhdp) {
11422 if (!dhdp->hang_was_sent) {
11423 dhdp->hang_was_sent = 1;
11424 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
11425 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
11426 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
11427 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
11428 }
11429 }
11430 return ret;
11431}
11432
11433int net_os_send_hang_message(struct net_device *dev)
11434{
11435 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11436 int ret = 0;
11437
11438 if (dhd) {
11439 /* Report FW problem when enabled */
11440 if (dhd->pub.hang_report) {
11441#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
11442 ret = dhd_os_send_hang_message(&dhd->pub);
11443#else
11444 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
11445#endif
11446 } else {
11447 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
11448 __FUNCTION__));
11449 /* Enforce bus down to stop any future traffic */
11450 dhd->pub.busstate = DHD_BUS_DOWN;
11451 }
11452 }
11453 return ret;
11454}
11455
11456int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
11457{
11458 dhd_info_t *dhd = NULL;
11459 dhd_pub_t *dhdp = NULL;
11460 int reason;
11461
11462 dhd = DHD_DEV_INFO(dev);
11463 if (dhd) {
11464 dhdp = &dhd->pub;
11465 }
11466
11467 if (!dhd || !dhdp) {
11468 return 0;
11469 }
11470
11471 reason = bcm_strtoul(string_num, NULL, 0);
11472 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
11473
11474 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
11475 reason = 0;
11476 }
11477
11478 dhdp->hang_reason = reason;
11479
11480 return net_os_send_hang_message(dev);
11481}
11482#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
11483
11484
11485int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
11486{
11487 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11488 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
11489}
11490
11491bool dhd_force_country_change(struct net_device *dev)
11492{
11493 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11494
11495 if (dhd && dhd->pub.up)
11496 return dhd->pub.force_country_change;
11497 return FALSE;
11498}
11499void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
11500 wl_country_t *cspec)
11501{
11502 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11503#ifdef CUSTOM_COUNTRY_CODE
11504 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
11505 dhd->pub.dhd_cflags);
11506#else
11507 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
11508#endif /* CUSTOM_COUNTRY_CODE */
11509}
11510void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
11511{
11512 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11513 if (dhd && dhd->pub.up) {
11514 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
11515#ifdef WL_CFG80211
11516 wl_update_wiphybands(NULL, notify);
11517#endif
11518 }
11519}
11520
11521void dhd_bus_band_set(struct net_device *dev, uint band)
11522{
11523 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11524 if (dhd && dhd->pub.up) {
11525#ifdef WL_CFG80211
11526 wl_update_wiphybands(NULL, true);
11527#endif
11528 }
11529}
11530
11531int dhd_net_set_fw_path(struct net_device *dev, char *fw)
11532{
11533 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11534
11535 if (!fw || fw[0] == '\0')
11536 return -EINVAL;
11537
11538 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
11539 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
11540
11541#if defined(SOFTAP)
11542 if (strstr(fw, "apsta") != NULL) {
11543 DHD_INFO(("GOT APSTA FIRMWARE\n"));
11544 ap_fw_loaded = TRUE;
11545 } else {
11546 DHD_INFO(("GOT STA FIRMWARE\n"));
11547 ap_fw_loaded = FALSE;
11548 }
11549#endif
11550 return 0;
11551}
11552
11553void dhd_net_if_lock(struct net_device *dev)
11554{
11555 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11556 dhd_net_if_lock_local(dhd);
11557}
11558
11559void dhd_net_if_unlock(struct net_device *dev)
11560{
11561 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11562 dhd_net_if_unlock_local(dhd);
11563}
11564
11565static void dhd_net_if_lock_local(dhd_info_t *dhd)
11566{
11567#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11568 if (dhd)
11569 mutex_lock(&dhd->dhd_net_if_mutex);
11570#endif
11571}
11572
11573static void dhd_net_if_unlock_local(dhd_info_t *dhd)
11574{
11575#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11576 if (dhd)
11577 mutex_unlock(&dhd->dhd_net_if_mutex);
11578#endif
11579}
11580
11581static void dhd_suspend_lock(dhd_pub_t *pub)
11582{
11583#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11584 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11585 if (dhd)
11586 mutex_lock(&dhd->dhd_suspend_mutex);
11587#endif
11588}
11589
11590static void dhd_suspend_unlock(dhd_pub_t *pub)
11591{
11592#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
11593 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11594 if (dhd)
11595 mutex_unlock(&dhd->dhd_suspend_mutex);
11596#endif
11597}
11598
11599unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
11600{
11601 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11602 unsigned long flags = 0;
11603
11604 if (dhd)
11605 spin_lock_irqsave(&dhd->dhd_lock, flags);
11606
11607 return flags;
11608}
11609
11610void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
11611{
11612 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11613
11614 if (dhd)
11615 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
11616}
11617
11618/* Linux specific multipurpose spinlock API */
11619void *
11620dhd_os_spin_lock_init(osl_t *osh)
11621{
11622 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
11623 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
11624 /* and this results in kernel asserts in internal builds */
11625 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
11626 if (lock)
11627 spin_lock_init(lock);
11628 return ((void *)lock);
11629}
11630void
11631dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
11632{
11633 if (lock)
11634 MFREE(osh, lock, sizeof(spinlock_t) + 4);
11635}
11636unsigned long
11637dhd_os_spin_lock(void *lock)
11638{
11639 unsigned long flags = 0;
11640
11641 if (lock)
11642 spin_lock_irqsave((spinlock_t *)lock, flags);
11643
11644 return flags;
11645}
11646void
11647dhd_os_spin_unlock(void *lock, unsigned long flags)
11648{
11649 if (lock)
11650 spin_unlock_irqrestore((spinlock_t *)lock, flags);
11651}
11652
11653static int
11654dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
11655{
11656 return (atomic_read(&dhd->pend_8021x_cnt));
11657}
11658
11659#define MAX_WAIT_FOR_8021X_TX 100
11660
11661int
11662dhd_wait_pend8021x(struct net_device *dev)
11663{
11664 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11665 int timeout = msecs_to_jiffies(10);
11666 int ntimes = MAX_WAIT_FOR_8021X_TX;
11667 int pend = dhd_get_pend_8021x_cnt(dhd);
11668
11669 while (ntimes && pend) {
11670 if (pend) {
11671 set_current_state(TASK_INTERRUPTIBLE);
11672 DHD_PERIM_UNLOCK(&dhd->pub);
11673 schedule_timeout(timeout);
11674 DHD_PERIM_LOCK(&dhd->pub);
11675 set_current_state(TASK_RUNNING);
11676 ntimes--;
11677 }
11678 pend = dhd_get_pend_8021x_cnt(dhd);
11679 }
11680 if (ntimes == 0)
11681 {
11682 atomic_set(&dhd->pend_8021x_cnt, 0);
11683 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
11684 }
11685 return pend;
11686}
11687
11688#ifdef DHD_DEBUG
11689static void
11690dhd_convert_memdump_type_to_str(uint32 type, char *buf)
11691{
11692 char *type_str = NULL;
11693
11694 switch (type) {
11695 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
11696 type_str = "resumed_on_timeout";
11697 break;
11698 case DUMP_TYPE_D3_ACK_TIMEOUT:
11699 type_str = "D3_ACK_timeout";
11700 break;
11701 case DUMP_TYPE_DONGLE_TRAP:
11702 type_str = "Dongle_Trap";
11703 break;
11704 case DUMP_TYPE_MEMORY_CORRUPTION:
11705 type_str = "Memory_Corruption";
11706 break;
11707 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
11708 type_str = "PKTID_AUDIT_Fail";
11709 break;
11710 case DUMP_TYPE_SCAN_TIMEOUT:
11711 type_str = "SCAN_timeout";
11712 break;
11713 case DUMP_TYPE_SCAN_BUSY:
11714 type_str = "SCAN_Busy";
11715 break;
11716 case DUMP_TYPE_BY_SYSDUMP:
11717 type_str = "BY_SYSDUMP";
11718 break;
11719 case DUMP_TYPE_BY_LIVELOCK:
11720 type_str = "BY_LIVELOCK";
11721 break;
11722 case DUMP_TYPE_AP_LINKUP_FAILURE:
11723 type_str = "BY_AP_LINK_FAILURE";
11724 break;
11725 default:
11726 type_str = "Unknown_type";
11727 break;
11728 }
11729
11730 strncpy(buf, type_str, strlen(type_str));
11731 buf[strlen(type_str)] = 0;
11732}
11733
11734int
11735write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
11736{
11737 int ret = 0;
11738 struct file *fp = NULL;
11739 mm_segment_t old_fs;
11740 loff_t pos = 0;
11741 char memdump_path[128];
11742 char memdump_type[32];
11743 struct timeval curtime;
11744 uint32 file_mode;
11745
11746 /* change to KERNEL_DS address limit */
11747 old_fs = get_fs();
11748 set_fs(KERNEL_DS);
11749
11750 /* Init file name */
11751 memset(memdump_path, 0, sizeof(memdump_path));
11752 memset(memdump_type, 0, sizeof(memdump_type));
11753 do_gettimeofday(&curtime);
11754 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
11755#ifdef CUSTOMER_HW4_DEBUG
11756 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11757 DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
11758 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11759 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11760#elif defined(CUSTOMER_HW2)
11761 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11762 "/data/misc/wifi/mem_dump", memdump_type,
11763 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11764 file_mode = O_CREAT | O_WRONLY | O_SYNC;
11765#else
11766 snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
11767 "/installmedia/mem_dump", memdump_type,
11768 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
11769 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
11770 * calling BUG_ON immediately after collecting the socram dump.
11771 * So the file write operation should directly write the contents into the
11772 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
11773 * instead of appending.
11774 */
11775 file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
11776#endif /* CUSTOMER_HW4_DEBUG */
11777
11778 /* print SOCRAM dump file path */
11779 DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
11780
11781 /* open file to write */
11782 fp = filp_open(memdump_path, file_mode, 0644);
11783 if (IS_ERR(fp)) {
11784 ret = PTR_ERR(fp);
11785 printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
11786 goto exit;
11787 }
11788
11789 /* Write buf to file */
11790 fp->f_op->write(fp, buf, size, &pos);
11791
11792exit:
11793 /* close file before return */
11794 if (!ret)
11795 filp_close(fp, current->files);
11796
11797 /* restore previous address limit */
11798 set_fs(old_fs);
11799
11800 /* free buf before return */
11801#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
11802 DHD_OS_PREFREE(dhd, buf, size);
11803#else
11804 MFREE(dhd->osh, buf, size);
11805#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
11806
11807 return ret;
11808}
11809#endif /* DHD_DEBUG */
11810
11811int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
11812{
11813 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11814 unsigned long flags;
11815 int ret = 0;
11816
11817 if (dhd) {
11818 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11819 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
11820 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
11821#ifdef CONFIG_HAS_WAKELOCK
11822 if (dhd->wakelock_rx_timeout_enable)
11823 wake_lock_timeout(&dhd->wl_rxwake,
11824 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
11825 if (dhd->wakelock_ctrl_timeout_enable)
11826 wake_lock_timeout(&dhd->wl_ctrlwake,
11827 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
11828#endif
11829 dhd->wakelock_rx_timeout_enable = 0;
11830 dhd->wakelock_ctrl_timeout_enable = 0;
11831 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11832 }
11833 return ret;
11834}
11835
11836int net_os_wake_lock_timeout(struct net_device *dev)
11837{
11838 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11839 int ret = 0;
11840
11841 if (dhd)
11842 ret = dhd_os_wake_lock_timeout(&dhd->pub);
11843 return ret;
11844}
11845
11846int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
11847{
11848 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11849 unsigned long flags;
11850
11851 if (dhd) {
11852 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11853 if (val > dhd->wakelock_rx_timeout_enable)
11854 dhd->wakelock_rx_timeout_enable = val;
11855 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11856 }
11857 return 0;
11858}
11859
11860int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
11861{
11862 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11863 unsigned long flags;
11864
11865 if (dhd) {
11866 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11867 if (val > dhd->wakelock_ctrl_timeout_enable)
11868 dhd->wakelock_ctrl_timeout_enable = val;
11869 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11870 }
11871 return 0;
11872}
11873
11874int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
11875{
11876 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
11877 unsigned long flags;
11878
11879 if (dhd) {
11880 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
11881 dhd->wakelock_ctrl_timeout_enable = 0;
11882#ifdef CONFIG_HAS_WAKELOCK
11883 if (wake_lock_active(&dhd->wl_ctrlwake))
11884 wake_unlock(&dhd->wl_ctrlwake);
11885#endif
11886 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
11887 }
11888 return 0;
11889}
11890
11891int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
11892{
11893 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11894 int ret = 0;
11895
11896 if (dhd)
11897 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
11898 return ret;
11899}
11900
11901int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
11902{
11903 dhd_info_t *dhd = DHD_DEV_INFO(dev);
11904 int ret = 0;
11905
11906 if (dhd)
11907 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
11908 return ret;
11909}
11910
11911
11912#if defined(DHD_TRACE_WAKE_LOCK)
11913#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11914#include <linux/hashtable.h>
11915#else
11916#include <linux/hash.h>
11917#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11918
11919
11920#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11921/* Define 2^5 = 32 bucket size hash table */
11922DEFINE_HASHTABLE(wklock_history, 5);
11923#else
11924/* Define 2^5 = 32 bucket size hash table */
11925struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
11926#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11927
11928int trace_wklock_onoff = 1;
11929
11930typedef enum dhd_wklock_type {
11931 DHD_WAKE_LOCK,
11932 DHD_WAKE_UNLOCK,
11933 DHD_WAIVE_LOCK,
11934 DHD_RESTORE_LOCK
11935} dhd_wklock_t;
11936
11937struct wk_trace_record {
11938 unsigned long addr; /* Address of the instruction */
11939 dhd_wklock_t lock_type; /* lock_type */
11940 unsigned long long counter; /* counter information */
11941 struct hlist_node wklock_node; /* hash node */
11942};
11943
11944
11945static struct wk_trace_record *find_wklock_entry(unsigned long addr)
11946{
11947 struct wk_trace_record *wklock_info;
11948#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11949 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
11950#else
11951 struct hlist_node *entry;
11952 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
11953 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
11954#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
11955 {
11956 if (wklock_info->addr == addr) {
11957 return wklock_info;
11958 }
11959 }
11960 return NULL;
11961}
11962
11963#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
11964#define HASH_ADD(hashtable, node, key) \
11965 do { \
11966 hash_add(hashtable, node, key); \
11967 } while (0);
11968#else
11969#define HASH_ADD(hashtable, node, key) \
11970 do { \
11971 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
11972 hlist_add_head(node, &hashtable[index]); \
11973 } while (0);
11974#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
11975
11976#define STORE_WKLOCK_RECORD(wklock_type) \
11977 do { \
11978 struct wk_trace_record *wklock_info = NULL; \
11979 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
11980 wklock_info = find_wklock_entry(func_addr); \
11981 if (wklock_info) { \
11982 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
11983 wklock_info->counter = dhd->wakelock_counter; \
11984 } else { \
11985 wklock_info->counter++; \
11986 } \
11987 } else { \
11988 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
11989 if (!wklock_info) {\
11990 printk("Can't allocate wk_trace_record \n"); \
11991 } else { \
11992 wklock_info->addr = func_addr; \
11993 wklock_info->lock_type = wklock_type; \
11994 if (wklock_type == DHD_WAIVE_LOCK || \
11995 wklock_type == DHD_RESTORE_LOCK) { \
11996 wklock_info->counter = dhd->wakelock_counter; \
11997 } else { \
11998 wklock_info->counter++; \
11999 } \
12000 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
12001 } \
12002 } \
12003 } while (0);
12004
12005static inline void dhd_wk_lock_rec_dump(void)
12006{
12007 int bkt;
12008 struct wk_trace_record *wklock_info;
12009
12010#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12011 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
12012#else
12013 struct hlist_node *entry = NULL;
12014 int max_index = ARRAY_SIZE(wklock_history);
12015 for (bkt = 0; bkt < max_index; bkt++)
12016 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
12017#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12018 {
12019 switch (wklock_info->lock_type) {
12020 case DHD_WAKE_LOCK:
12021 DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n",
12022 (void *)wklock_info->addr, wklock_info->counter));
12023 break;
12024 case DHD_WAKE_UNLOCK:
12025 DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
12026 (void *)wklock_info->addr, wklock_info->counter));
12027 break;
12028 case DHD_WAIVE_LOCK:
12029 DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n",
12030 (void *)wklock_info->addr, wklock_info->counter));
12031 break;
12032 case DHD_RESTORE_LOCK:
12033 DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
12034 (void *)wklock_info->addr, wklock_info->counter));
12035 break;
12036 }
12037 }
12038}
12039
12040static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
12041{
12042 unsigned long flags;
12043#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
12044 int i;
12045#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12046
12047 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12048#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12049 hash_init(wklock_history);
12050#else
12051 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
12052 INIT_HLIST_HEAD(&wklock_history[i]);
12053#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12054 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12055}
12056
12057static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
12058{
12059 int bkt;
12060 struct wk_trace_record *wklock_info;
12061 struct hlist_node *tmp;
12062 unsigned long flags;
12063#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
12064 struct hlist_node *entry = NULL;
12065 int max_index = ARRAY_SIZE(wklock_history);
12066#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
12067
12068 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12069#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12070 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
12071#else
12072 for (bkt = 0; bkt < max_index; bkt++)
12073 hlist_for_each_entry_safe(wklock_info, entry, tmp,
12074 &wklock_history[bkt], wklock_node)
12075#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
12076 {
12077#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
12078 hash_del(&wklock_info->wklock_node);
12079#else
12080 hlist_del_init(&wklock_info->wklock_node);
12081#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
12082 kfree(wklock_info);
12083 }
12084 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12085}
12086
12087void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
12088{
12089 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12090 unsigned long flags;
12091
12092 DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
12093 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12094 dhd_wk_lock_rec_dump();
12095 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12096 DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
12097}
12098#else
12099#define STORE_WKLOCK_RECORD(wklock_type)
12100#endif /* ! DHD_TRACE_WAKE_LOCK */
12101
12102int dhd_os_wake_lock(dhd_pub_t *pub)
12103{
12104 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12105 unsigned long flags;
12106 int ret = 0;
12107
12108 if (dhd) {
12109 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12110 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12111#ifdef CONFIG_HAS_WAKELOCK
12112 wake_lock(&dhd->wl_wifi);
12113#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12114 dhd_bus_dev_pm_stay_awake(pub);
12115#endif
12116 }
12117#ifdef DHD_TRACE_WAKE_LOCK
12118 if (trace_wklock_onoff) {
12119 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
12120 }
12121#endif /* DHD_TRACE_WAKE_LOCK */
12122 dhd->wakelock_counter++;
12123 ret = dhd->wakelock_counter;
12124 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12125 }
12126
12127 return ret;
12128}
12129
12130int dhd_event_wake_lock(dhd_pub_t *pub)
12131{
12132 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12133 unsigned long flags;
12134 int ret = 0;
12135
12136 if (dhd) {
12137 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12138 if (dhd->wakelock_event_counter == 0) {
12139#ifdef CONFIG_HAS_WAKELOCK
12140 wake_lock(&dhd->wl_evtwake);
12141#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12142 dhd_bus_dev_pm_stay_awake(pub);
12143#endif
12144 }
12145 dhd->wakelock_event_counter++;
12146 ret = dhd->wakelock_event_counter;
12147 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12148 }
12149
12150 return ret;
12151}
12152
12153int net_os_wake_lock(struct net_device *dev)
12154{
12155 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12156 int ret = 0;
12157
12158 if (dhd)
12159 ret = dhd_os_wake_lock(&dhd->pub);
12160 return ret;
12161}
12162
12163int dhd_os_wake_unlock(dhd_pub_t *pub)
12164{
12165 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12166 unsigned long flags;
12167 int ret = 0;
12168
12169 dhd_os_wake_lock_timeout(pub);
12170 if (dhd) {
12171 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12172
12173 if (dhd->wakelock_counter > 0) {
12174 dhd->wakelock_counter--;
12175#ifdef DHD_TRACE_WAKE_LOCK
12176 if (trace_wklock_onoff) {
12177 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
12178 }
12179#endif /* DHD_TRACE_WAKE_LOCK */
12180 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
12181#ifdef CONFIG_HAS_WAKELOCK
12182 wake_unlock(&dhd->wl_wifi);
12183#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12184 dhd_bus_dev_pm_relax(pub);
12185#endif
12186 }
12187 ret = dhd->wakelock_counter;
12188 }
12189 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12190 }
12191 return ret;
12192}
12193
12194int dhd_event_wake_unlock(dhd_pub_t *pub)
12195{
12196 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12197 unsigned long flags;
12198 int ret = 0;
12199
12200 if (dhd) {
12201 spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
12202
12203 if (dhd->wakelock_event_counter > 0) {
12204 dhd->wakelock_event_counter--;
12205 if (dhd->wakelock_event_counter == 0) {
12206#ifdef CONFIG_HAS_WAKELOCK
12207 wake_unlock(&dhd->wl_evtwake);
12208#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12209 dhd_bus_dev_pm_relax(pub);
12210#endif
12211 }
12212 ret = dhd->wakelock_event_counter;
12213 }
12214 spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
12215 }
12216 return ret;
12217}
12218
12219int dhd_os_check_wakelock(dhd_pub_t *pub)
12220{
12221#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12222 KERNEL_VERSION(2, 6, 36)))
12223 dhd_info_t *dhd;
12224
12225 if (!pub)
12226 return 0;
12227 dhd = (dhd_info_t *)(pub->info);
12228#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12229
12230#ifdef CONFIG_HAS_WAKELOCK
12231 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
12232 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
12233 (wake_lock_active(&dhd->wl_wdwake))))
12234 return 1;
12235#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12236 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
12237 return 1;
12238#endif
12239 return 0;
12240}
12241
12242int
12243dhd_os_check_wakelock_all(dhd_pub_t *pub)
12244{
12245#ifdef CONFIG_HAS_WAKELOCK
12246 int l1, l2, l3, l4, l7;
12247 int l5 = 0, l6 = 0;
12248 int c, lock_active;
12249#endif /* CONFIG_HAS_WAKELOCK */
12250#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
12251 KERNEL_VERSION(2, 6, 36)))
12252 dhd_info_t *dhd;
12253
12254 if (!pub) {
12255 return 0;
12256 }
12257 dhd = (dhd_info_t *)(pub->info);
12258 if (!dhd) {
12259 return 0;
12260 }
12261#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
12262
12263#ifdef CONFIG_HAS_WAKELOCK
12264 c = dhd->wakelock_counter;
12265 l1 = wake_lock_active(&dhd->wl_wifi);
12266 l2 = wake_lock_active(&dhd->wl_wdwake);
12267 l3 = wake_lock_active(&dhd->wl_rxwake);
12268 l4 = wake_lock_active(&dhd->wl_ctrlwake);
12269#ifdef BCMPCIE_OOB_HOST_WAKE
12270 l5 = wake_lock_active(&dhd->wl_intrwake);
12271#endif /* BCMPCIE_OOB_HOST_WAKE */
12272#ifdef DHD_USE_SCAN_WAKELOCK
12273 l6 = wake_lock_active(&dhd->wl_scanwake);
12274#endif /* DHD_USE_SCAN_WAKELOCK */
12275 l7 = wake_lock_active(&dhd->wl_evtwake);
12276 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
12277
12278 /* Indicate to the Host to avoid going to suspend if internal locks are up */
12279 if (dhd && lock_active) {
12280 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
12281 "ctl-%d intr-%d scan-%d evt-%d\n",
12282 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
12283 return 1;
12284 }
12285#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12286 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
12287 return 1;
12288 }
12289#endif /* CONFIG_HAS_WAKELOCK */
12290 return 0;
12291}
12292
12293int net_os_wake_unlock(struct net_device *dev)
12294{
12295 dhd_info_t *dhd = DHD_DEV_INFO(dev);
12296 int ret = 0;
12297
12298 if (dhd)
12299 ret = dhd_os_wake_unlock(&dhd->pub);
12300 return ret;
12301}
12302
12303int dhd_os_wd_wake_lock(dhd_pub_t *pub)
12304{
12305 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12306 unsigned long flags;
12307 int ret = 0;
12308
12309 if (dhd) {
12310 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12311#ifdef CONFIG_HAS_WAKELOCK
12312 /* if wakelock_wd_counter was never used : lock it at once */
12313 if (!dhd->wakelock_wd_counter)
12314 wake_lock(&dhd->wl_wdwake);
12315#endif
12316 dhd->wakelock_wd_counter++;
12317 ret = dhd->wakelock_wd_counter;
12318 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12319 }
12320 return ret;
12321}
12322
12323int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
12324{
12325 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12326 unsigned long flags;
12327 int ret = 0;
12328
12329 if (dhd) {
12330 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12331 if (dhd->wakelock_wd_counter) {
12332 dhd->wakelock_wd_counter = 0;
12333#ifdef CONFIG_HAS_WAKELOCK
12334 wake_unlock(&dhd->wl_wdwake);
12335#endif
12336 }
12337 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12338 }
12339 return ret;
12340}
12341
12342#ifdef BCMPCIE_OOB_HOST_WAKE
12343void
12344dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
12345{
12346#ifdef CONFIG_HAS_WAKELOCK
12347 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12348
12349 if (dhd) {
12350 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
12351 }
12352#endif /* CONFIG_HAS_WAKELOCK */
12353}
12354
12355void
12356dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
12357{
12358#ifdef CONFIG_HAS_WAKELOCK
12359 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12360
12361 if (dhd) {
12362 /* if wl_intrwake is active, unlock it */
12363 if (wake_lock_active(&dhd->wl_intrwake)) {
12364 wake_unlock(&dhd->wl_intrwake);
12365 }
12366 }
12367#endif /* CONFIG_HAS_WAKELOCK */
12368}
12369#endif /* BCMPCIE_OOB_HOST_WAKE */
12370
12371#ifdef DHD_USE_SCAN_WAKELOCK
12372void
12373dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
12374{
12375#ifdef CONFIG_HAS_WAKELOCK
12376 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12377
12378 if (dhd) {
12379 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
12380 }
12381#endif /* CONFIG_HAS_WAKELOCK */
12382}
12383
12384void
12385dhd_os_scan_wake_unlock(dhd_pub_t *pub)
12386{
12387#ifdef CONFIG_HAS_WAKELOCK
12388 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12389
12390 if (dhd) {
12391 /* if wl_scanwake is active, unlock it */
12392 if (wake_lock_active(&dhd->wl_scanwake)) {
12393 wake_unlock(&dhd->wl_scanwake);
12394 }
12395 }
12396#endif /* CONFIG_HAS_WAKELOCK */
12397}
12398#endif /* DHD_USE_SCAN_WAKELOCK */
12399
12400/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
12401 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
12402 */
12403int dhd_os_wake_lock_waive(dhd_pub_t *pub)
12404{
12405 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12406 unsigned long flags;
12407 int ret = 0;
12408
12409 if (dhd) {
12410 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12411
12412 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12413 if (dhd->waive_wakelock == FALSE) {
12414#ifdef DHD_TRACE_WAKE_LOCK
12415 if (trace_wklock_onoff) {
12416 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
12417 }
12418#endif /* DHD_TRACE_WAKE_LOCK */
12419 /* record current lock status */
12420 dhd->wakelock_before_waive = dhd->wakelock_counter;
12421 dhd->waive_wakelock = TRUE;
12422 }
12423 ret = dhd->wakelock_wd_counter;
12424 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12425 }
12426 return ret;
12427}
12428
12429int dhd_os_wake_lock_restore(dhd_pub_t *pub)
12430{
12431 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
12432 unsigned long flags;
12433 int ret = 0;
12434
12435 if (!dhd)
12436 return 0;
12437
12438 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
12439
12440 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
12441 if (!dhd->waive_wakelock)
12442 goto exit;
12443
12444 dhd->waive_wakelock = FALSE;
12445 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
12446 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
12447 * the lock in between, do the same by calling wake_unlock or pm_relax
12448 */
12449#ifdef DHD_TRACE_WAKE_LOCK
12450 if (trace_wklock_onoff) {
12451 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
12452 }
12453#endif /* DHD_TRACE_WAKE_LOCK */
12454
12455 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
12456#ifdef CONFIG_HAS_WAKELOCK
12457 wake_lock(&dhd->wl_wifi);
12458#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12459 dhd_bus_dev_pm_stay_awake(&dhd->pub);
12460#endif
12461 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
12462#ifdef CONFIG_HAS_WAKELOCK
12463 wake_unlock(&dhd->wl_wifi);
12464#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
12465 dhd_bus_dev_pm_relax(&dhd->pub);
12466#endif
12467 }
12468 dhd->wakelock_before_waive = 0;
12469exit:
12470 ret = dhd->wakelock_wd_counter;
12471 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
12472 return ret;
12473}
12474
12475void dhd_os_wake_lock_init(struct dhd_info *dhd)
12476{
12477 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
12478 dhd->wakelock_event_counter = 0;
12479 dhd->wakelock_counter = 0;
12480 dhd->wakelock_rx_timeout_enable = 0;
12481 dhd->wakelock_ctrl_timeout_enable = 0;
12482#ifdef CONFIG_HAS_WAKELOCK
08dfb6c4 12483 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
ef6a5fee
RC
12484 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
12485 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
12486 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
12487#ifdef BCMPCIE_OOB_HOST_WAKE
12488 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
12489#endif /* BCMPCIE_OOB_HOST_WAKE */
12490#ifdef DHD_USE_SCAN_WAKELOCK
12491 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
12492#endif /* DHD_USE_SCAN_WAKELOCK */
12493#endif /* CONFIG_HAS_WAKELOCK */
12494#ifdef DHD_TRACE_WAKE_LOCK
12495 dhd_wk_lock_trace_init(dhd);
12496#endif /* DHD_TRACE_WAKE_LOCK */
12497}
12498
12499void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
12500{
12501 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
12502#ifdef CONFIG_HAS_WAKELOCK
12503 dhd->wakelock_event_counter = 0;
12504 dhd->wakelock_counter = 0;
12505 dhd->wakelock_rx_timeout_enable = 0;
12506 dhd->wakelock_ctrl_timeout_enable = 0;
08dfb6c4 12507 // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
ef6a5fee
RC
12508 wake_lock_destroy(&dhd->wl_rxwake);
12509 wake_lock_destroy(&dhd->wl_ctrlwake);
12510 wake_lock_destroy(&dhd->wl_evtwake);
12511#ifdef BCMPCIE_OOB_HOST_WAKE
12512 wake_lock_destroy(&dhd->wl_intrwake);
12513#endif /* BCMPCIE_OOB_HOST_WAKE */
12514#ifdef DHD_USE_SCAN_WAKELOCK
12515 wake_lock_destroy(&dhd->wl_scanwake);
12516#endif /* DHD_USE_SCAN_WAKELOCK */
12517#ifdef DHD_TRACE_WAKE_LOCK
12518 dhd_wk_lock_trace_deinit(dhd);
12519#endif /* DHD_TRACE_WAKE_LOCK */
12520#endif /* CONFIG_HAS_WAKELOCK */
12521}
12522
12523bool dhd_os_check_if_up(dhd_pub_t *pub)
12524{
12525 if (!pub)
12526 return FALSE;
12527 return pub->up;
12528}
12529
12530/* function to collect firmware, chip id and chip version info */
12531void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
12532{
12533 int i;
12534
12535 i = snprintf(info_string, sizeof(info_string),
12536 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
12537 printf("%s\n", info_string);
12538
12539 if (!dhdp)
12540 return;
12541
12542 i = snprintf(&info_string[i], sizeof(info_string) - i,
12543 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
12544 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
12545}
12546
12547int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
12548{
12549 int ifidx;
12550 int ret = 0;
12551 dhd_info_t *dhd = NULL;
12552
12553 if (!net || !DEV_PRIV(net)) {
12554 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
12555 return -EINVAL;
12556 }
12557
12558 dhd = DHD_DEV_INFO(net);
12559 if (!dhd)
12560 return -EINVAL;
12561
12562 ifidx = dhd_net2idx(dhd, net);
12563 if (ifidx == DHD_BAD_IF) {
12564 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
12565 return -ENODEV;
12566 }
12567
12568 DHD_OS_WAKE_LOCK(&dhd->pub);
12569 DHD_PERIM_LOCK(&dhd->pub);
12570
12571 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
12572 dhd_check_hang(net, &dhd->pub, ret);
12573
12574 DHD_PERIM_UNLOCK(&dhd->pub);
12575 DHD_OS_WAKE_UNLOCK(&dhd->pub);
12576
12577 return ret;
12578}
12579
12580bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
12581{
12582 struct net_device *net;
12583
12584 net = dhd_idx2net(dhdp, ifidx);
12585 if (!net) {
12586 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
12587 return -EINVAL;
12588 }
12589
12590 return dhd_check_hang(net, dhdp, ret);
12591}
12592
12593/* Return instance */
12594int dhd_get_instance(dhd_pub_t *dhdp)
12595{
12596 return dhdp->info->unit;
12597}
12598
12599
12600#ifdef PROP_TXSTATUS
12601
12602void dhd_wlfc_plat_init(void *dhd)
12603{
12604#ifdef USE_DYNAMIC_F2_BLKSIZE
12605 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
12606#endif /* USE_DYNAMIC_F2_BLKSIZE */
12607 return;
12608}
12609
12610void dhd_wlfc_plat_deinit(void *dhd)
12611{
12612#ifdef USE_DYNAMIC_F2_BLKSIZE
12613 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
12614#endif /* USE_DYNAMIC_F2_BLKSIZE */
12615 return;
12616}
12617
12618bool dhd_wlfc_skip_fc(void)
12619{
12620#ifdef SKIP_WLFC_ON_CONCURRENT
12621#ifdef WL_CFG80211
12622
12623 /* enable flow control in vsdb mode */
12624 return !(wl_cfg80211_is_concurrent_mode());
12625#else
12626 return TRUE; /* skip flow control */
12627#endif /* WL_CFG80211 */
12628
12629#else
12630 return FALSE;
12631#endif /* SKIP_WLFC_ON_CONCURRENT */
12632}
12633#endif /* PROP_TXSTATUS */
12634
12635#ifdef BCMDBGFS
12636#include <linux/debugfs.h>
12637
12638typedef struct dhd_dbgfs {
12639 struct dentry *debugfs_dir;
12640 struct dentry *debugfs_mem;
12641 dhd_pub_t *dhdp;
12642 uint32 size;
12643} dhd_dbgfs_t;
12644
12645dhd_dbgfs_t g_dbgfs;
12646
12647extern uint32 dhd_readregl(void *bp, uint32 addr);
12648extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
12649
12650static int
12651dhd_dbg_state_open(struct inode *inode, struct file *file)
12652{
12653 file->private_data = inode->i_private;
12654 return 0;
12655}
12656
12657static ssize_t
12658dhd_dbg_state_read(struct file *file, char __user *ubuf,
12659 size_t count, loff_t *ppos)
12660{
12661 ssize_t rval;
12662 uint32 tmp;
12663 loff_t pos = *ppos;
12664 size_t ret;
12665
12666 if (pos < 0)
12667 return -EINVAL;
12668 if (pos >= g_dbgfs.size || !count)
12669 return 0;
12670 if (count > g_dbgfs.size - pos)
12671 count = g_dbgfs.size - pos;
12672
12673 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
12674 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
12675
12676 ret = copy_to_user(ubuf, &tmp, 4);
12677 if (ret == count)
12678 return -EFAULT;
12679
12680 count -= ret;
12681 *ppos = pos + count;
12682 rval = count;
12683
12684 return rval;
12685}
12686
12687
12688static ssize_t
12689dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
12690{
12691 loff_t pos = *ppos;
12692 size_t ret;
12693 uint32 buf;
12694
12695 if (pos < 0)
12696 return -EINVAL;
12697 if (pos >= g_dbgfs.size || !count)
12698 return 0;
12699 if (count > g_dbgfs.size - pos)
12700 count = g_dbgfs.size - pos;
12701
12702 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
12703 if (ret == count)
12704 return -EFAULT;
12705
12706 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
12707 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
12708
12709 return count;
12710}
12711
12712
12713loff_t
12714dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
12715{
12716 loff_t pos = -1;
12717
12718 switch (whence) {
12719 case 0:
12720 pos = off;
12721 break;
12722 case 1:
12723 pos = file->f_pos + off;
12724 break;
12725 case 2:
12726 pos = g_dbgfs.size - off;
12727 }
12728 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
12729}
12730
12731static const struct file_operations dhd_dbg_state_ops = {
12732 .read = dhd_dbg_state_read,
12733 .write = dhd_debugfs_write,
12734 .open = dhd_dbg_state_open,
12735 .llseek = dhd_debugfs_lseek
12736};
12737
12738static void dhd_dbg_create(void)
12739{
12740 if (g_dbgfs.debugfs_dir) {
12741 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
12742 NULL, &dhd_dbg_state_ops);
12743 }
12744}
12745
12746void dhd_dbg_init(dhd_pub_t *dhdp)
12747{
12748 g_dbgfs.dhdp = dhdp;
12749 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
12750
12751 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
12752 if (IS_ERR(g_dbgfs.debugfs_dir)) {
12753 g_dbgfs.debugfs_dir = NULL;
12754 return;
12755 }
12756
12757 dhd_dbg_create();
12758
12759 return;
12760}
12761
12762void dhd_dbg_remove(void)
12763{
12764 debugfs_remove(g_dbgfs.debugfs_mem);
12765 debugfs_remove(g_dbgfs.debugfs_dir);
12766
12767 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
12768}
12769#endif /* BCMDBGFS */
12770
12771#ifdef WLMEDIA_HTSF
12772
12773static
12774void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
12775{
12776 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
12777 struct sk_buff *skb;
12778 uint32 htsf = 0;
12779 uint16 dport = 0, oldmagic = 0xACAC;
12780 char *p1;
12781 htsfts_t ts;
12782
12783 /* timestamp packet */
12784
12785 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
12786
12787 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
12788/* memcpy(&proto, p1+26, 4); */
12789 memcpy(&dport, p1+40, 2);
12790/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
12791 dport = ntoh16(dport);
12792 }
12793
12794 /* timestamp only if icmp or udb iperf with port 5555 */
12795/* if (proto == 17 && dport == tsport) { */
12796 if (dport >= tsport && dport <= tsport + 20) {
12797
12798 skb = (struct sk_buff *) pktbuf;
12799
12800 htsf = dhd_get_htsf(dhd, 0);
12801 memset(skb->data + 44, 0, 2); /* clear checksum */
12802 memcpy(skb->data+82, &oldmagic, 2);
12803 memcpy(skb->data+84, &htsf, 4);
12804
12805 memset(&ts, 0, sizeof(htsfts_t));
12806 ts.magic = HTSFMAGIC;
12807 ts.prio = PKTPRIO(pktbuf);
12808 ts.seqnum = htsf_seqnum++;
12809 ts.c10 = get_cycles();
12810 ts.t10 = htsf;
12811 ts.endmagic = HTSFENDMAGIC;
12812
12813 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
12814 }
12815}
12816
12817static void dhd_dump_htsfhisto(histo_t *his, char *s)
12818{
12819 int pktcnt = 0, curval = 0, i;
12820 for (i = 0; i < (NUMBIN-2); i++) {
12821 curval += 500;
12822 printf("%d ", his->bin[i]);
12823 pktcnt += his->bin[i];
12824 }
12825 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
12826 his->bin[NUMBIN-1], s);
12827}
12828
12829static
12830void sorttobin(int value, histo_t *histo)
12831{
12832 int i, binval = 0;
12833
12834 if (value < 0) {
12835 histo->bin[NUMBIN-1]++;
12836 return;
12837 }
12838 if (value > histo->bin[NUMBIN-2]) /* store the max value */
12839 histo->bin[NUMBIN-2] = value;
12840
12841 for (i = 0; i < (NUMBIN-2); i++) {
12842 binval += 500; /* 500m s bins */
12843 if (value <= binval) {
12844 histo->bin[i]++;
12845 return;
12846 }
12847 }
12848 histo->bin[NUMBIN-3]++;
12849}
12850
12851static
12852void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
12853{
12854 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12855 struct sk_buff *skb;
12856 char *p1;
12857 uint16 old_magic;
12858 int d1, d2, d3, end2end;
12859 htsfts_t *htsf_ts;
12860 uint32 htsf;
12861
12862 skb = PKTTONATIVE(dhdp->osh, pktbuf);
12863 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
12864
12865 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
12866 memcpy(&old_magic, p1+78, 2);
12867 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
12868 } else {
12869 return;
12870 }
12871 if (htsf_ts->magic == HTSFMAGIC) {
12872 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
12873 htsf_ts->cE0 = get_cycles();
12874 }
12875
12876 if (old_magic == 0xACAC) {
12877
12878 tspktcnt++;
12879 htsf = dhd_get_htsf(dhd, 0);
12880 memcpy(skb->data+92, &htsf, sizeof(uint32));
12881
12882 memcpy(&ts[tsidx].t1, skb->data+80, 16);
12883
12884 d1 = ts[tsidx].t2 - ts[tsidx].t1;
12885 d2 = ts[tsidx].t3 - ts[tsidx].t2;
12886 d3 = ts[tsidx].t4 - ts[tsidx].t3;
12887 end2end = ts[tsidx].t4 - ts[tsidx].t1;
12888
12889 sorttobin(d1, &vi_d1);
12890 sorttobin(d2, &vi_d2);
12891 sorttobin(d3, &vi_d3);
12892 sorttobin(end2end, &vi_d4);
12893
12894 if (end2end > 0 && end2end > maxdelay) {
12895 maxdelay = end2end;
12896 maxdelaypktno = tspktcnt;
12897 memcpy(&maxdelayts, &ts[tsidx], 16);
12898 }
12899 if (++tsidx >= TSMAX)
12900 tsidx = 0;
12901 }
12902}
12903
12904uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
12905{
12906 uint32 htsf = 0, cur_cycle, delta, delta_us;
12907 uint32 factor, baseval, baseval2;
12908 cycles_t t;
12909
12910 t = get_cycles();
12911 cur_cycle = t;
12912
12913 if (cur_cycle > dhd->htsf.last_cycle) {
12914 delta = cur_cycle - dhd->htsf.last_cycle;
12915 } else {
12916 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
12917 }
12918
12919 delta = delta >> 4;
12920
12921 if (dhd->htsf.coef) {
12922 /* times ten to get the first digit */
12923 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
12924 baseval = (delta*10)/factor;
12925 baseval2 = (delta*10)/(factor+1);
12926 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
12927 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
12928 } else {
12929 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
12930 }
12931
12932 return htsf;
12933}
12934
12935static void dhd_dump_latency(void)
12936{
12937 int i, max = 0;
12938 int d1, d2, d3, d4, d5;
12939
12940 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
12941 for (i = 0; i < TSMAX; i++) {
12942 d1 = ts[i].t2 - ts[i].t1;
12943 d2 = ts[i].t3 - ts[i].t2;
12944 d3 = ts[i].t4 - ts[i].t3;
12945 d4 = ts[i].t4 - ts[i].t1;
12946 d5 = ts[max].t4-ts[max].t1;
12947 if (d4 > d5 && d4 > 0) {
12948 max = i;
12949 }
12950 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
12951 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
12952 d1, d2, d3, d4, i);
12953 }
12954
12955 printf("current idx = %d \n", tsidx);
12956
12957 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
12958 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
12959 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
12960 maxdelayts.t2 - maxdelayts.t1,
12961 maxdelayts.t3 - maxdelayts.t2,
12962 maxdelayts.t4 - maxdelayts.t3,
12963 maxdelayts.t4 - maxdelayts.t1);
12964}
12965
12966
12967static int
12968dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
12969{
12970 wl_ioctl_t ioc;
12971 char buf[32];
12972 int ret;
12973 uint32 s1, s2;
12974
12975 struct tsf {
12976 uint32 low;
12977 uint32 high;
12978 } tsf_buf;
12979
12980 memset(&ioc, 0, sizeof(ioc));
12981 memset(&tsf_buf, 0, sizeof(tsf_buf));
12982
12983 ioc.cmd = WLC_GET_VAR;
12984 ioc.buf = buf;
12985 ioc.len = (uint)sizeof(buf);
12986 ioc.set = FALSE;
12987
12988 strncpy(buf, "tsf", sizeof(buf) - 1);
12989 buf[sizeof(buf) - 1] = '\0';
12990 s1 = dhd_get_htsf(dhd, 0);
12991 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
12992 if (ret == -EIO) {
12993 DHD_ERROR(("%s: tsf is not supported by device\n",
12994 dhd_ifname(&dhd->pub, ifidx)));
12995 return -EOPNOTSUPP;
12996 }
12997 return ret;
12998 }
12999 s2 = dhd_get_htsf(dhd, 0);
13000
13001 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
13002 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
13003 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
13004 dhd->htsf.coefdec2, s2-tsf_buf.low);
13005 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
13006 return 0;
13007}
13008
13009void htsf_update(dhd_info_t *dhd, void *data)
13010{
13011 static ulong cur_cycle = 0, prev_cycle = 0;
13012 uint32 htsf, tsf_delta = 0;
13013 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
13014 ulong b, a;
13015 cycles_t t;
13016
13017 /* cycles_t in inlcude/mips/timex.h */
13018
13019 t = get_cycles();
13020
13021 prev_cycle = cur_cycle;
13022 cur_cycle = t;
13023
13024 if (cur_cycle > prev_cycle)
13025 cyc_delta = cur_cycle - prev_cycle;
13026 else {
13027 b = cur_cycle;
13028 a = prev_cycle;
13029 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
13030 }
13031
13032 if (data == NULL)
13033 printf(" tsf update ata point er is null \n");
13034
13035 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
13036 memcpy(&cur_tsf, data, sizeof(tsf_t));
13037
13038 if (cur_tsf.low == 0) {
13039 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
13040 return;
13041 }
13042
13043 if (cur_tsf.low > prev_tsf.low)
13044 tsf_delta = (cur_tsf.low - prev_tsf.low);
13045 else {
13046 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
13047 cur_tsf.low, prev_tsf.low));
13048 if (cur_tsf.high > prev_tsf.high) {
13049 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
13050 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
13051 } else {
13052 return; /* do not update */
13053 }
13054 }
13055
13056 if (tsf_delta) {
13057 hfactor = cyc_delta / tsf_delta;
13058 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
13059 dec1 = tmp/tsf_delta;
13060 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
13061 tmp = (tmp - (dec1*tsf_delta))*10;
13062 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
13063
13064 if (dec3 > 4) {
13065 if (dec2 == 9) {
13066 dec2 = 0;
13067 if (dec1 == 9) {
13068 dec1 = 0;
13069 hfactor++;
13070 } else {
13071 dec1++;
13072 }
13073 } else {
13074 dec2++;
13075 }
13076 }
13077 }
13078
13079 if (hfactor) {
13080 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
13081 dhd->htsf.coef = hfactor;
13082 dhd->htsf.last_cycle = cur_cycle;
13083 dhd->htsf.last_tsf = cur_tsf.low;
13084 dhd->htsf.coefdec1 = dec1;
13085 dhd->htsf.coefdec2 = dec2;
13086 } else {
13087 htsf = prev_tsf.low;
13088 }
13089}
13090
13091#endif /* WLMEDIA_HTSF */
13092
13093#ifdef CUSTOM_SET_CPUCORE
13094void dhd_set_cpucore(dhd_pub_t *dhd, int set)
13095{
13096 int e_dpc = 0, e_rxf = 0, retry_set = 0;
13097
13098 if (!(dhd->chan_isvht80)) {
13099 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
13100 return;
13101 }
13102
13103 if (DPC_CPUCORE) {
13104 do {
13105 if (set == TRUE) {
13106 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
13107 cpumask_of(DPC_CPUCORE));
13108 } else {
13109 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
13110 cpumask_of(PRIMARY_CPUCORE));
13111 }
13112 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
13113 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
13114 return;
13115 }
13116 if (e_dpc < 0)
13117 OSL_SLEEP(1);
13118 } while (e_dpc < 0);
13119 }
13120 if (RXF_CPUCORE) {
13121 do {
13122 if (set == TRUE) {
13123 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
13124 cpumask_of(RXF_CPUCORE));
13125 } else {
13126 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
13127 cpumask_of(PRIMARY_CPUCORE));
13128 }
13129 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
13130 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
13131 return;
13132 }
13133 if (e_rxf < 0)
13134 OSL_SLEEP(1);
13135 } while (e_rxf < 0);
13136 }
13137#ifdef DHD_OF_SUPPORT
13138 interrupt_set_cpucore(set);
13139#endif /* DHD_OF_SUPPORT */
13140 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
13141
13142 return;
13143}
13144#endif /* CUSTOM_SET_CPUCORE */
13145
13146/* Get interface specific ap_isolate configuration */
13147int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
13148{
13149 dhd_info_t *dhd = dhdp->info;
13150 dhd_if_t *ifp;
13151
13152 ASSERT(idx < DHD_MAX_IFS);
13153
13154 ifp = dhd->iflist[idx];
13155
13156 return ifp->ap_isolate;
13157}
13158
13159/* Set interface specific ap_isolate configuration */
13160int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
13161{
13162 dhd_info_t *dhd = dhdp->info;
13163 dhd_if_t *ifp;
13164
13165 ASSERT(idx < DHD_MAX_IFS);
13166
13167 ifp = dhd->iflist[idx];
13168
13169 ifp->ap_isolate = val;
13170
13171 return 0;
13172}
13173
13174#ifdef DHD_FW_COREDUMP
13175
13176
13177#ifdef CUSTOMER_HW4_DEBUG
13178#ifdef PLATFORM_SLP
13179#define MEMDUMPINFO "/opt/etc/.memdump.info"
13180#else
13181#define MEMDUMPINFO "/data/.memdump.info"
13182#endif /* PLATFORM_SLP */
13183#elif defined(CUSTOMER_HW2)
13184#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
13185#else
13186#define MEMDUMPINFO "/installmedia/.memdump.info"
13187#endif /* CUSTOMER_HW4_DEBUG */
13188
13189void dhd_get_memdump_info(dhd_pub_t *dhd)
13190{
13191 struct file *fp = NULL;
13192 uint32 mem_val = DUMP_MEMFILE_MAX;
13193 int ret = 0;
13194 char *filepath = MEMDUMPINFO;
13195
13196 /* Read memdump info from the file */
13197 fp = filp_open(filepath, O_RDONLY, 0);
13198 if (IS_ERR(fp)) {
13199 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13200 goto done;
13201 } else {
13202 ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13203 if (ret < 0) {
13204 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13205 filp_close(fp, NULL);
13206 goto done;
13207 }
13208
13209 mem_val = bcm_atoi((char *)&mem_val);
13210
13211 DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
13212 filp_close(fp, NULL);
13213 }
13214
13215done:
13216#ifdef CUSTOMER_HW4_DEBUG
13217 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
13218#else
13219 dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
13220#endif /* CUSTOMER_HW4_DEBUG */
13221}
13222
13223
13224void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
13225{
13226 dhd_dump_t *dump = NULL;
13227 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
13228 if (dump == NULL) {
13229 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
13230 return;
13231 }
13232 dump->buf = buf;
13233 dump->bufsize = size;
13234
13235#if defined(CONFIG_ARM64)
13236 DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
13237 (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
13238#elif defined(__ARM_ARCH_7A__)
13239 DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
13240 (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
13241#endif /* __ARM_ARCH_7A__ */
13242 if (dhdp->memdump_enabled == DUMP_MEMONLY) {
13243 BUG_ON(1);
13244 }
13245
13246#ifdef DHD_LOG_DUMP
13247 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
13248 dhd_schedule_log_dump(dhdp);
13249 }
13250#endif /* DHD_LOG_DUMP */
13251 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
13252 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
13253}
13254static void
13255dhd_mem_dump(void *handle, void *event_info, u8 event)
13256{
13257 dhd_info_t *dhd = handle;
13258 dhd_dump_t *dump = event_info;
13259
13260 if (!dhd) {
13261 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13262 return;
13263 }
13264
13265 if (!dump) {
13266 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
13267 return;
13268 }
13269
13270 if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
13271 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
13272 }
13273
13274 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
13275#ifdef DHD_LOG_DUMP
13276 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
13277#endif
13278 TRUE) {
13279 BUG_ON(1);
13280 }
13281 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
13282}
13283#endif /* DHD_FW_COREDUMP */
13284
13285#ifdef DHD_LOG_DUMP
13286static void
13287dhd_log_dump(void *handle, void *event_info, u8 event)
13288{
13289 dhd_info_t *dhd = handle;
13290
13291 if (!dhd) {
13292 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13293 return;
13294 }
13295
13296 if (do_dhd_log_dump(&dhd->pub)) {
13297 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
13298 return;
13299 }
13300}
13301
13302void dhd_schedule_log_dump(dhd_pub_t *dhdp)
13303{
13304 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
13305 (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
13306 dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
13307}
13308
13309static int
13310do_dhd_log_dump(dhd_pub_t *dhdp)
13311{
13312 int ret = 0;
13313 struct file *fp = NULL;
13314 mm_segment_t old_fs;
13315 loff_t pos = 0;
13316 char dump_path[128];
13317 char common_info[1024];
13318 struct timeval curtime;
13319 uint32 file_mode;
13320 unsigned long flags = 0;
13321
13322 if (!dhdp) {
13323 return -1;
13324 }
13325
13326 /* Building the additional information like DHD, F/W version */
13327 memset(common_info, 0, sizeof(common_info));
13328 snprintf(common_info, sizeof(common_info),
13329 "---------- Common information ----------\n"
13330 "DHD version: %s\n"
13331 "F/W version: %s\n"
13332 "----------------------------------------\n",
13333 dhd_version, fw_version);
13334
13335 /* change to KERNEL_DS address limit */
13336 old_fs = get_fs();
13337 set_fs(KERNEL_DS);
13338
13339 /* Init file name */
13340 memset(dump_path, 0, sizeof(dump_path));
13341 do_gettimeofday(&curtime);
13342 snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
13343 DHD_COMMON_DUMP_PATH "debug_dump",
13344 (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
13345 file_mode = O_CREAT | O_WRONLY | O_SYNC;
13346
13347 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
13348 fp = filp_open(dump_path, file_mode, 0644);
13349 if (IS_ERR(fp)) {
13350 ret = PTR_ERR(fp);
13351 DHD_ERROR(("open file error, err = %d\n", ret));
13352 ret = -1;
13353 goto exit;
13354 }
13355
13356 fp->f_op->write(fp, common_info, strlen(common_info), &pos);
13357 if (dhdp->dld_buf.wraparound) {
13358 fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
13359 } else {
13360 fp->f_op->write(fp, dhdp->dld_buf.buffer,
13361 (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
13362 }
13363
13364 /* re-init dhd_log_dump_buf structure */
13365 spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
13366 dhdp->dld_buf.wraparound = 0;
13367 dhdp->dld_buf.present = dhdp->dld_buf.front;
13368 dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13369 bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13370 spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
13371exit:
13372 if (!ret) {
13373 filp_close(fp, NULL);
13374 }
13375 set_fs(old_fs);
13376
13377 return ret;
13378}
13379#endif /* DHD_LOG_DUMP */
13380
13381#ifdef BCMASSERT_LOG
13382#ifdef CUSTOMER_HW4_DEBUG
13383#ifdef PLATFORM_SLP
13384#define ASSERTINFO "/opt/etc/.assert.info"
13385#else
13386#define ASSERTINFO "/data/.assert.info"
13387#endif /* PLATFORM_SLP */
13388#elif defined(CUSTOMER_HW2)
13389#define ASSERTINFO "/data/misc/wifi/.assert.info"
13390#else
13391#define ASSERTINFO "/installmedia/.assert.info"
13392#endif /* CUSTOMER_HW4_DEBUG */
13393void dhd_get_assert_info(dhd_pub_t *dhd)
13394{
13395 struct file *fp = NULL;
13396 char *filepath = ASSERTINFO;
13397
13398 /*
13399 * Read assert info from the file
13400 * 0: Trigger Kernel crash by panic()
13401 * 1: Print out the logs and don't trigger Kernel panic. (default)
13402 * 2: Trigger Kernel crash by BUG()
13403 * File doesn't exist: Keep default value (1).
13404 */
13405 fp = filp_open(filepath, O_RDONLY, 0);
13406 if (IS_ERR(fp)) {
13407 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
13408 } else {
13409 int mem_val = 0;
13410 int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
13411 if (ret < 0) {
13412 DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
13413 } else {
13414 mem_val = bcm_atoi((char *)&mem_val);
13415 DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
13416 g_assert_type = mem_val;
13417 }
13418 filp_close(fp, NULL);
13419 }
13420}
13421#endif /* BCMASSERT_LOG */
13422
13423
13424#ifdef DHD_WMF
13425/* Returns interface specific WMF configuration */
13426dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
13427{
13428 dhd_info_t *dhd = dhdp->info;
13429 dhd_if_t *ifp;
13430
13431 ASSERT(idx < DHD_MAX_IFS);
13432
13433 ifp = dhd->iflist[idx];
13434 return &ifp->wmf;
13435}
13436#endif /* DHD_WMF */
13437
13438
13439#if defined(DHD_L2_FILTER)
13440bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
13441{
13442 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
13443}
13444#endif
13445
13446#ifdef DHD_L2_FILTER
13447arp_table_t*
13448dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
13449{
13450 dhd_info_t *dhd = dhdp->info;
13451 dhd_if_t *ifp;
13452
13453 ASSERT(bssidx < DHD_MAX_IFS);
13454
13455 ifp = dhd->iflist[bssidx];
13456 return ifp->phnd_arp_table;
13457}
13458
13459int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
13460{
13461 dhd_info_t *dhd = dhdp->info;
13462 dhd_if_t *ifp;
13463
13464 ASSERT(idx < DHD_MAX_IFS);
13465
13466 ifp = dhd->iflist[idx];
13467
13468 if (ifp)
13469 return ifp->parp_enable;
13470 else
13471 return FALSE;
13472}
13473
13474/* Set interface specific proxy arp configuration */
13475int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13476{
13477 dhd_info_t *dhd = dhdp->info;
13478 dhd_if_t *ifp;
13479 ASSERT(idx < DHD_MAX_IFS);
13480 ifp = dhd->iflist[idx];
13481
13482 if (!ifp)
13483 return BCME_ERROR;
13484
13485 /* At present all 3 variables are being
13486 * handled at once
13487 */
13488 ifp->parp_enable = val;
13489 ifp->parp_discard = val;
13490 ifp->parp_allnode = !val;
13491
13492 /* Flush ARP entries when disabled */
13493 if (val == FALSE) {
13494 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
13495 FALSE, dhdp->tickcnt);
13496 }
13497 return BCME_OK;
13498}
13499
13500bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13501{
13502 dhd_info_t *dhd = dhdp->info;
13503 dhd_if_t *ifp;
13504
13505 ASSERT(idx < DHD_MAX_IFS);
13506
13507 ifp = dhd->iflist[idx];
13508
13509 ASSERT(ifp);
13510 return ifp->parp_discard;
13511}
13512
13513bool
13514dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
13515{
13516 dhd_info_t *dhd = dhdp->info;
13517 dhd_if_t *ifp;
13518
13519 ASSERT(idx < DHD_MAX_IFS);
13520
13521 ifp = dhd->iflist[idx];
13522
13523 ASSERT(ifp);
13524
13525 return ifp->parp_allnode;
13526}
13527
13528int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
13529{
13530 dhd_info_t *dhd = dhdp->info;
13531 dhd_if_t *ifp;
13532
13533 ASSERT(idx < DHD_MAX_IFS);
13534
13535 ifp = dhd->iflist[idx];
13536
13537 ASSERT(ifp);
13538
13539 return ifp->dhcp_unicast;
13540}
13541
13542int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
13543{
13544 dhd_info_t *dhd = dhdp->info;
13545 dhd_if_t *ifp;
13546 ASSERT(idx < DHD_MAX_IFS);
13547 ifp = dhd->iflist[idx];
13548
13549 ASSERT(ifp);
13550
13551 ifp->dhcp_unicast = val;
13552 return BCME_OK;
13553}
13554
13555int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
13556{
13557 dhd_info_t *dhd = dhdp->info;
13558 dhd_if_t *ifp;
13559
13560 ASSERT(idx < DHD_MAX_IFS);
13561
13562 ifp = dhd->iflist[idx];
13563
13564 ASSERT(ifp);
13565
13566 return ifp->block_ping;
13567}
13568
13569int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
13570{
13571 dhd_info_t *dhd = dhdp->info;
13572 dhd_if_t *ifp;
13573 ASSERT(idx < DHD_MAX_IFS);
13574 ifp = dhd->iflist[idx];
13575
13576 ASSERT(ifp);
13577
13578 ifp->block_ping = val;
13579
13580 return BCME_OK;
13581}
13582
13583int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
13584{
13585 dhd_info_t *dhd = dhdp->info;
13586 dhd_if_t *ifp;
13587
13588 ASSERT(idx < DHD_MAX_IFS);
13589
13590 ifp = dhd->iflist[idx];
13591
13592 ASSERT(ifp);
13593
13594 return ifp->grat_arp;
13595}
13596
13597int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
13598{
13599 dhd_info_t *dhd = dhdp->info;
13600 dhd_if_t *ifp;
13601 ASSERT(idx < DHD_MAX_IFS);
13602 ifp = dhd->iflist[idx];
13603
13604 ASSERT(ifp);
13605
13606 ifp->grat_arp = val;
13607
13608 return BCME_OK;
13609}
13610#endif /* DHD_L2_FILTER */
13611
13612
13613#if defined(SET_RPS_CPUS)
13614int dhd_rps_cpus_enable(struct net_device *net, int enable)
13615{
13616 dhd_info_t *dhd = DHD_DEV_INFO(net);
13617 dhd_if_t *ifp;
13618 int ifidx;
13619 char * RPS_CPU_SETBUF;
13620
13621 ifidx = dhd_net2idx(dhd, net);
13622 if (ifidx == DHD_BAD_IF) {
13623 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
13624 return -ENODEV;
13625 }
13626
13627 if (ifidx == PRIMARY_INF) {
13628 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
13629 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
13630 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
13631 } else {
13632 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
13633 RPS_CPU_SETBUF = RPS_CPUS_MASK;
13634 }
13635 } else if (ifidx == VIRTUAL_INF) {
13636 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
13637 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
13638 } else {
13639 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
13640 return -EINVAL;
13641 }
13642
13643 ifp = dhd->iflist[ifidx];
13644 if (ifp) {
13645 if (enable) {
13646 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
13647 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
13648 } else {
13649 custom_rps_map_clear(ifp->net->_rx);
13650 }
13651 } else {
13652 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
13653 return -ENODEV;
13654 }
13655 return BCME_OK;
13656}
13657
13658int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
13659{
13660 struct rps_map *old_map, *map;
13661 cpumask_var_t mask;
13662 int err, cpu, i;
13663 static DEFINE_SPINLOCK(rps_map_lock);
13664
13665 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13666
13667 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
13668 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
13669 return -ENOMEM;
13670 }
13671
13672 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
13673 if (err) {
13674 free_cpumask_var(mask);
13675 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
13676 return err;
13677 }
13678
13679 map = kzalloc(max_t(unsigned int,
13680 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
13681 GFP_KERNEL);
13682 if (!map) {
13683 free_cpumask_var(mask);
13684 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
13685 return -ENOMEM;
13686 }
13687
13688 i = 0;
13689 for_each_cpu(cpu, mask) {
13690 map->cpus[i++] = cpu;
13691 }
13692
13693 if (i) {
13694 map->len = i;
13695 } else {
13696 kfree(map);
13697 map = NULL;
13698 free_cpumask_var(mask);
13699 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
13700 return -1;
13701 }
13702
13703 spin_lock(&rps_map_lock);
13704 old_map = rcu_dereference_protected(queue->rps_map,
13705 lockdep_is_held(&rps_map_lock));
13706 rcu_assign_pointer(queue->rps_map, map);
13707 spin_unlock(&rps_map_lock);
13708
13709 if (map) {
13710 static_key_slow_inc(&rps_needed);
13711 }
13712 if (old_map) {
13713 kfree_rcu(old_map, rcu);
13714 static_key_slow_dec(&rps_needed);
13715 }
13716 free_cpumask_var(mask);
13717
13718 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
13719 return map->len;
13720}
13721
13722void custom_rps_map_clear(struct netdev_rx_queue *queue)
13723{
13724 struct rps_map *map;
13725
13726 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
13727
13728 map = rcu_dereference_protected(queue->rps_map, 1);
13729 if (map) {
13730 RCU_INIT_POINTER(queue->rps_map, NULL);
13731 kfree_rcu(map, rcu);
13732 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
13733 }
13734}
13735#endif
13736
13737
13738
13739#ifdef DHD_DEBUG_PAGEALLOC
13740
13741void
13742dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
13743{
13744 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
13745
13746 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
13747 __FUNCTION__, addr_corrupt, (uint32)len));
13748
13749 DHD_OS_WAKE_LOCK(dhdp);
13750 prhex("Page Corruption:", addr_corrupt, len);
13751 dhd_dump_to_kernelog(dhdp);
13752#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13753 /* Load the dongle side dump to host memory and then BUG_ON() */
13754 dhdp->memdump_enabled = DUMP_MEMONLY;
13755 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
13756 dhd_bus_mem_dump(dhdp);
13757#endif /* BCMPCIE && DHD_FW_COREDUMP */
13758 DHD_OS_WAKE_UNLOCK(dhdp);
13759}
13760EXPORT_SYMBOL(dhd_page_corrupt_cb);
13761#endif /* DHD_DEBUG_PAGEALLOC */
13762
13763#ifdef DHD_PKTID_AUDIT_ENABLED
13764void
13765dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
13766{
13767 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
13768 DHD_OS_WAKE_LOCK(dhdp);
13769 dhd_dump_to_kernelog(dhdp);
13770#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
13771 /* Load the dongle side dump to host memory and then BUG_ON() */
13772 dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
13773 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
13774 dhd_bus_mem_dump(dhdp);
13775#endif /* BCMPCIE && DHD_FW_COREDUMP */
13776 DHD_OS_WAKE_UNLOCK(dhdp);
13777}
13778#endif /* DHD_PKTID_AUDIT_ENABLED */
13779
13780/* ----------------------------------------------------------------------------
13781 * Infrastructure code for sysfs interface support for DHD
13782 *
13783 * What is sysfs interface?
13784 * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
13785 *
13786 * Why sysfs interface?
13787 * This is the Linux standard way of changing/configuring Run Time parameters
13788 * for a driver. We can use this interface to control "linux" specific driver
13789 * parameters.
13790 *
13791 * -----------------------------------------------------------------------------
13792 */
13793
13794#include <linux/sysfs.h>
13795#include <linux/kobject.h>
13796
13797#if defined(DHD_TRACE_WAKE_LOCK)
13798
13799/* Function to show the history buffer */
13800static ssize_t
13801show_wklock_trace(struct dhd_info *dev, char *buf)
13802{
13803 ssize_t ret = 0;
13804 dhd_info_t *dhd = (dhd_info_t *)dev;
13805
13806 buf[ret] = '\n';
13807 buf[ret+1] = 0;
13808
13809 dhd_wk_lock_stats_dump(&dhd->pub);
13810 return ret+1;
13811}
13812
13813/* Function to enable/disable wakelock trace */
13814static ssize_t
13815wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
13816{
13817 unsigned long onoff;
13818 unsigned long flags;
13819 dhd_info_t *dhd = (dhd_info_t *)dev;
13820
13821 onoff = bcm_strtoul(buf, NULL, 10);
13822 if (onoff != 0 && onoff != 1) {
13823 return -EINVAL;
13824 }
13825
13826 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
13827 trace_wklock_onoff = onoff;
13828 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
13829 if (trace_wklock_onoff) {
13830 printk("ENABLE WAKLOCK TRACE\n");
13831 } else {
13832 printk("DISABLE WAKELOCK TRACE\n");
13833 }
13834
13835 return (ssize_t)(onoff+1);
13836}
13837#endif /* DHD_TRACE_WAKE_LOCK */
13838
13839/*
13840 * Generic Attribute Structure for DHD.
13841 * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
13842 * to instantiate an object of type dhd_attr, populate it with
13843 * the required show/store functions (ex:- dhd_attr_cpumask_primary)
13844 * and add the object to default_attrs[] array, that gets registered
13845 * to the kobject of dhd (named bcm-dhd).
13846 */
13847
13848struct dhd_attr {
13849 struct attribute attr;
13850 ssize_t(*show)(struct dhd_info *, char *);
13851 ssize_t(*store)(struct dhd_info *, const char *, size_t count);
13852};
13853
13854#if defined(DHD_TRACE_WAKE_LOCK)
13855static struct dhd_attr dhd_attr_wklock =
13856 __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
13857#endif /* defined(DHD_TRACE_WAKE_LOCK */
13858
13859/* Attribute object that gets registered with "bcm-dhd" kobject tree */
13860static struct attribute *default_attrs[] = {
13861#if defined(DHD_TRACE_WAKE_LOCK)
13862 &dhd_attr_wklock.attr,
13863#endif
13864 NULL
13865};
13866
13867#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
13868#define to_attr(a) container_of(a, struct dhd_attr, attr)
13869
13870/*
13871 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13872 * node under "bcm-dhd" the show function is called.
13873 */
13874static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
13875{
13876 dhd_info_t *dhd = to_dhd(kobj);
13877 struct dhd_attr *d_attr = to_attr(attr);
13878 int ret;
13879
13880 if (d_attr->show)
13881 ret = d_attr->show(dhd, buf);
13882 else
13883 ret = -EIO;
13884
13885 return ret;
13886}
13887
13888
13889/*
13890 * bcm-dhd kobject show function, the "attr" attribute specifices to which
13891 * node under "bcm-dhd" the store function is called.
13892 */
13893static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
13894 const char *buf, size_t count)
13895{
13896 dhd_info_t *dhd = to_dhd(kobj);
13897 struct dhd_attr *d_attr = to_attr(attr);
13898 int ret;
13899
13900 if (d_attr->store)
13901 ret = d_attr->store(dhd, buf, count);
13902 else
13903 ret = -EIO;
13904
13905 return ret;
13906
13907}
13908
13909static struct sysfs_ops dhd_sysfs_ops = {
13910 .show = dhd_show,
13911 .store = dhd_store,
13912};
13913
13914static struct kobj_type dhd_ktype = {
13915 .sysfs_ops = &dhd_sysfs_ops,
13916 .default_attrs = default_attrs,
13917};
13918
13919/* Create a kobject and attach to sysfs interface */
13920static int dhd_sysfs_init(dhd_info_t *dhd)
13921{
13922 int ret = -1;
13923
13924 if (dhd == NULL) {
13925 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13926 return ret;
13927 }
13928
13929 /* Initialize the kobject */
13930 ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
13931 if (ret) {
13932 kobject_put(&dhd->dhd_kobj);
13933 DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
13934 return ret;
13935 }
13936
13937 /*
13938 * We are always responsible for sending the uevent that the kobject
13939 * was added to the system.
13940 */
13941 kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
13942
13943 return ret;
13944}
13945
13946/* Done with the kobject and detach the sysfs interface */
13947static void dhd_sysfs_exit(dhd_info_t *dhd)
13948{
13949 if (dhd == NULL) {
13950 DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
13951 return;
13952 }
13953
13954 /* Releae the kobject */
13955 kobject_put(&dhd->dhd_kobj);
13956}
13957
13958#ifdef DHD_LOG_DUMP
13959void
13960dhd_log_dump_init(dhd_pub_t *dhd)
13961{
13962 spin_lock_init(&dhd->dld_buf.lock);
13963#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13964 dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
13965 DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
13966#else
13967 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13968#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13969
13970 if (!dhd->dld_buf.buffer) {
13971 dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
13972 DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
13973
13974 if (!dhd->dld_buf.buffer) {
13975 DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
13976 return;
13977 }
13978 }
13979
13980 dhd->dld_buf.wraparound = 0;
13981 dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
13982 dhd->dld_buf.present = dhd->dld_buf.buffer;
13983 dhd->dld_buf.front = dhd->dld_buf.buffer;
13984 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
13985 dhd->dld_enable = 1;
13986}
13987
13988void
13989dhd_log_dump_deinit(dhd_pub_t *dhd)
13990{
13991 dhd->dld_enable = 0;
13992#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
13993 DHD_OS_PREFREE(dhd,
13994 dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
13995#else
13996 kfree(dhd->dld_buf.buffer);
13997#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
13998}
13999
14000void
14001dhd_log_dump_print(const char *fmt, ...)
14002{
14003 int len = 0;
14004 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
14005 va_list args;
14006 dhd_pub_t *dhd = NULL;
14007 unsigned long flags = 0;
14008
14009 if (wl_get_bcm_cfg80211_ptr()) {
14010 dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
14011 }
14012
14013 if (!dhd || dhd->dld_enable != 1) {
14014 return;
14015 }
14016
14017 va_start(args, fmt);
14018
14019 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
14020 if (len < 0) {
14021 return;
14022 }
14023
14024 /* make a critical section to eliminate race conditions */
14025 spin_lock_irqsave(&dhd->dld_buf.lock, flags);
14026 if (dhd->dld_buf.remain < len) {
14027 dhd->dld_buf.wraparound = 1;
14028 dhd->dld_buf.present = dhd->dld_buf.front;
14029 dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
14030 }
14031
14032 strncpy(dhd->dld_buf.present, tmp_buf, len);
14033 dhd->dld_buf.remain -= len;
14034 dhd->dld_buf.present += len;
14035 spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
14036
14037 /* double check invalid memory operation */
14038 ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
14039 va_end(args);
14040}
14041
14042char*
14043dhd_log_dump_get_timestamp(void)
14044{
14045 static char buf[16];
14046 u64 ts_nsec;
14047 unsigned long rem_nsec;
14048
14049 ts_nsec = local_clock();
14050 rem_nsec = do_div(ts_nsec, 1000000000);
14051 snprintf(buf, sizeof(buf), "%5lu.%06lu",
14052 (unsigned long)ts_nsec, rem_nsec / 1000);
14053
14054 return buf;
14055}
14056
14057#endif /* DHD_LOG_DUMP */
14058
14059/* ---------------------------- End of sysfs implementation ------------------------------------- */
14060
14061void *dhd_get_pub(struct net_device *dev)
14062{
14063 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
14064 if (dhdinfo)
14065 return (void *)&dhdinfo->pub;
08dfb6c4
RC
14066 else {
14067 printf("%s: null dhdinfo\n", __FUNCTION__);
14068 return NULL;
14069 }
14070}
14071
14072void *dhd_get_conf(struct net_device *dev)
14073{
14074 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
14075 if (dhdinfo)
14076 return (void *)dhdinfo->pub.conf;
14077 else {
14078 printf("%s: null dhdinfo\n", __FUNCTION__);
ef6a5fee 14079 return NULL;
08dfb6c4 14080 }
ef6a5fee
RC
14081}
14082
14083bool dhd_os_wd_timer_enabled(void *bus)
14084{
14085 dhd_pub_t *pub = bus;
14086 dhd_info_t *dhd = (dhd_info_t *)pub->info;
14087
14088 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
14089 if (!dhd) {
14090 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
14091 return FALSE;
14092 }
14093 return dhd->wd_timer_valid;
14094}