it cause ap6xxx wifi con't open
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd-usb.1.201.88.27.x / dhd_linux.c
1 /*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Copyright (C) 1999-2015, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 * $Id: dhd_linux.c 588496 2015-09-24 08:32:09Z $
26 */
27
28 #include <typedefs.h>
29 #include <linuxver.h>
30 #include <osl.h>
31 #ifdef SHOW_LOGTRACE
32 #include <linux/syscalls.h>
33 #include <event_log.h>
34 #endif /* SHOW_LOGTRACE */
35
36
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/etherdevice.h>
45 #include <linux/random.h>
46 #include <linux/spinlock.h>
47 #include <linux/ethtool.h>
48 #include <linux/fcntl.h>
49 #include <linux/fs.h>
50 #include <linux/ip.h>
51 #include <linux/reboot.h>
52 #include <linux/notifier.h>
53 #include <net/addrconf.h>
54 #ifdef ENABLE_ADAPTIVE_SCHED
55 #include <linux/cpufreq.h>
56 #endif /* ENABLE_ADAPTIVE_SCHED */
57
58 #include <asm/uaccess.h>
59 #include <asm/unaligned.h>
60
61 #include <epivers.h>
62 #include <bcmutils.h>
63 #include <bcmendian.h>
64 #include <bcmdevs.h>
65
66 #include <proto/ethernet.h>
67 #include <proto/bcmevent.h>
68 #include <proto/vlan.h>
69 #ifdef DHD_L2_FILTER
70 #include <proto/bcmicmp.h>
71 #endif
72 #include <proto/802.3.h>
73
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
76 #include <dhd.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
80 #endif
81 #include <dhd_bus.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
84 #include <dhd_dbg.h>
85 #ifdef CONFIG_HAS_WAKELOCK
86 #include <linux/wakelock.h>
87 #endif
88 #ifdef WL_CFG80211
89 #include <wl_cfg80211.h>
90 #endif
91 #ifdef P2PONEINT
92 #include <wl_cfgp2p.h>
93 #endif
94 #ifdef PNO_SUPPORT
95 #include <dhd_pno.h>
96 #endif
97 #ifdef WLBTAMP
98 #include <proto/802.11_bta.h>
99 #include <proto/bt_amp_hci.h>
100 #include <dhd_bta.h>
101 #endif
102
103 #ifdef CONFIG_COMPAT
104 #include <linux/compat.h>
105 #endif
106
107 #ifdef DHD_WMF
108 #include <dhd_wmf_linux.h>
109 #endif /* DHD_WMF */
110
111 #ifdef AMPDU_VO_ENABLE
112 #include <proto/802.1d.h>
113 #endif /* AMPDU_VO_ENABLE */
114 #ifdef DHDTCPACK_SUPPRESS
115 #include <dhd_ip.h>
116 #endif /* DHDTCPACK_SUPPRESS */
117
118 #if defined(DHD_TCP_WINSIZE_ADJUST)
119 #include <linux/tcp.h>
120 #include <net/tcp.h>
121 #endif /* DHD_TCP_WINSIZE_ADJUST */
122
123 #ifdef WLMEDIA_HTSF
124 #include <linux/time.h>
125 #include <htsf.h>
126
127 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
128 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
129 #define TSMAX 1000 /* max no. of timing record kept */
130 #define NUMBIN 34
131
132 static uint32 tsidx = 0;
133 static uint32 htsf_seqnum = 0;
134 uint32 tsfsync;
135 struct timeval tsync;
136 static uint32 tsport = 5010;
137
138 typedef struct histo_ {
139 uint32 bin[NUMBIN];
140 } histo_t;
141
142
143 #if !ISPOWEROF2(DHD_SDALIGN)
144 #error DHD_SDALIGN is not a power of 2!
145 #endif
146
147 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
148 #endif /* WLMEDIA_HTSF */
149
150 #ifdef STBLINUX
151 #ifdef quote_str
152 #undef quote_str
153 #endif /* quote_str */
154 #ifdef to_str
155 #undef to_str
156 #endif /* quote_str */
157 #define to_str(s) #s
158 #define quote_str(s) to_str(s)
159
160 static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
161 #endif /* STBLINUX */
162
163 #if defined(DHD_TCP_WINSIZE_ADJUST)
164 #define MIN_TCP_WIN_SIZE 18000
165 #define WIN_SIZE_SCALE_FACTOR 2
166 #define MAX_TARGET_PORTS 5
167
168 static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
169 static uint dhd_use_tcp_window_size_adjust = FALSE;
170 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
171 #endif /* DHD_TCP_WINSIZE_ADJUST */
172
173
174 #if defined(OEM_ANDROID) && defined(SOFTAP)
175 extern bool ap_cfg_running;
176 extern bool ap_fw_loaded;
177 #endif
178 extern void extern_wifi_set_enable(int is_on);
179
180 #ifdef ENABLE_ADAPTIVE_SCHED
181 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
182 #ifndef CUSTOM_CPUFREQ_THRESH
183 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
184 #endif /* CUSTOM_CPUFREQ_THRESH */
185 #endif /* ENABLE_ADAPTIVE_SCHED */
186
187 /* enable HOSTIP cache update from the host side when an eth0:N is up */
188 #define AOE_IP_ALIAS_SUPPORT 1
189
190 #ifdef BCM_FD_AGGR
191 #include <bcm_rpc.h>
192 #include <bcm_rpc_tp.h>
193 #endif
194 #ifdef PROP_TXSTATUS
195 #include <wlfc_proto.h>
196 #include <dhd_wlfc.h>
197 #endif
198
199 #if defined(OEM_ANDROID)
200 #include <wl_android.h>
201 #endif
202
203
204 /* Maximum STA per radio */
205 #define DHD_MAX_STA 32
206
207
208 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
209 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
210 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
211
212 #ifdef ARP_OFFLOAD_SUPPORT
213 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
214 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
215 unsigned long event, void *ptr);
216 static struct notifier_block dhd_inetaddr_notifier = {
217 .notifier_call = dhd_inetaddr_notifier_call
218 };
219 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
220 * created in kernel notifier link list (with 'next' pointing to itself)
221 */
222 static bool dhd_inetaddr_notifier_registered = FALSE;
223 #endif /* ARP_OFFLOAD_SUPPORT */
224
225 #ifdef CONFIG_IPV6
226 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
227 unsigned long event, void *ptr);
228 static struct notifier_block dhd_inet6addr_notifier = {
229 .notifier_call = dhd_inet6addr_notifier_call
230 };
231 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
232 * created in kernel notifier link list (with 'next' pointing to itself)
233 */
234 static bool dhd_inet6addr_notifier_registered = FALSE;
235 #endif
236
237 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
238 #include <linux/suspend.h>
239 volatile bool dhd_mmc_suspend = FALSE;
240 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
241 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
242
243 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
244 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
245 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
246 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID))
247 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
248 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID)) */
249 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
250 MODULE_LICENSE("GPL and additional rights");
251 #endif /* LinuxVer */
252
253 #if defined(MULTIPLE_SUPPLICANT)
254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
255 DEFINE_MUTEX(_dhd_sdio_mutex_lock_);
256 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
257 #endif
258
259 #ifdef BCMDBUS
260 #include <dbus.h>
261 extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
262 extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
263 extern void dhd_bus_unregister(void);
264
265 #else
266 #include <dhd_bus.h>
267 #endif /* BCMDBUS */
268
269 #ifdef BCM_FD_AGGR
270 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
271 #else
272 #ifndef PROP_TXSTATUS
273 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
274 #else
275 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
276 #endif
277 #endif /* BCM_FD_AGGR */
278
279 #ifdef PROP_TXSTATUS
280 extern bool dhd_wlfc_skip_fc(void);
281 extern void dhd_wlfc_plat_init(void *dhd);
282 extern void dhd_wlfc_plat_deinit(void *dhd);
283 #endif /* PROP_TXSTATUS */
284
285 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
286 const char *
287 print_tainted()
288 {
289 return "";
290 }
291 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
292
293 /* Linux wireless extension support */
294 #if defined(WL_WIRELESS_EXT)
295 #include <wl_iw.h>
296 extern wl_iw_extra_params_t g_wl_iw_params;
297 #endif /* defined(WL_WIRELESS_EXT) */
298
299 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
300 #include <linux/earlysuspend.h>
301 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
302
303 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
304
305 #ifdef PKT_FILTER_SUPPORT
306 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
307 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
308 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
309 #endif
310
311
312 #ifdef READ_MACADDR
313 extern int dhd_read_macaddr(struct dhd_info *dhd);
314 #else
315 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
316 #endif
317 #ifdef WRITE_MACADDR
318 extern int dhd_write_macaddr(struct ether_addr *mac);
319 #else
320 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
321 #endif
322
323
324 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
325 int argos_register_notifier_init(struct net_device *net);
326 int argos_register_notifier_deinit(void);
327
328 extern int sec_argos_register_notifier(struct notifier_block *n, char *label);
329 extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label);
330
331 static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
332 unsigned long speed, void *v);
333
334 static struct notifier_block argos_wifi = {
335 .notifier_call = argos_status_notifier_wifi_cb,
336 };
337
338 typedef struct {
339 struct net_device *wlan_primary_netdev;
340 int argos_rps_cpus_enabled;
341 } argos_rps_ctrl;
342
343 argos_rps_ctrl argos_rps_ctrl_data;
344 #define RPS_TPUT_THRESHOLD 300
345
346 #endif /* ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER */
347 #if defined(SOFTAP_TPUT_ENHANCE)
348 extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
349 extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
350 #endif /* SOFTAP_TPUT_ENHANCE */
351
352
353 #if defined(DHD_DEBUG)
354 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
355 #endif /* DHD_DEBUG */
356
357 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
358 static struct notifier_block dhd_reboot_notifier = {
359 .notifier_call = dhd_reboot_callback,
360 .priority = 1,
361 };
362
363
364 typedef struct dhd_if_event {
365 struct list_head list;
366 wl_event_data_if_t event;
367 char name[IFNAMSIZ+1];
368 uint8 mac[ETHER_ADDR_LEN];
369 } dhd_if_event_t;
370
371 /* Interface control information */
372 typedef struct dhd_if {
373 struct dhd_info *info; /* back pointer to dhd_info */
374 /* OS/stack specifics */
375 struct net_device *net;
376 int idx; /* iface idx in dongle */
377 uint subunit; /* subunit */
378 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
379 bool set_macaddress;
380 bool set_multicast;
381 uint8 bssidx; /* bsscfg index for the interface */
382 bool attached; /* Delayed attachment when unset */
383 bool txflowcontrol; /* Per interface flow control indicator */
384 char name[IFNAMSIZ+1]; /* linux interface name */
385 struct net_device_stats stats;
386 #ifdef DHD_WMF
387 dhd_wmf_t wmf; /* per bsscfg wmf setting */
388 #endif /* DHD_WMF */
389 #ifdef PCIE_FULL_DONGLE
390 struct list_head sta_list; /* sll of associated stations */
391 spinlock_t sta_list_lock; /* lock for manipulating sll */
392 #endif /* PCIE_FULL_DONGLE */
393 uint32 ap_isolate; /* ap-isolation settings */
394 } dhd_if_t;
395
396 #ifdef WLMEDIA_HTSF
397 typedef struct {
398 uint32 low;
399 uint32 high;
400 } tsf_t;
401
402 typedef struct {
403 uint32 last_cycle;
404 uint32 last_sec;
405 uint32 last_tsf;
406 uint32 coef; /* scaling factor */
407 uint32 coefdec1; /* first decimal */
408 uint32 coefdec2; /* second decimal */
409 } htsf_t;
410
411 typedef struct {
412 uint32 t1;
413 uint32 t2;
414 uint32 t3;
415 uint32 t4;
416 } tstamp_t;
417
418 static tstamp_t ts[TSMAX];
419 static tstamp_t maxdelayts;
420 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
421
422 #endif /* WLMEDIA_HTSF */
423
424 struct ipv6_work_info_t {
425 uint8 if_idx;
426 char ipv6_addr[16];
427 unsigned long event;
428 };
429
430
431 #if defined(DHD_DEBUG)
432 typedef struct dhd_dump {
433 uint8 *buf;
434 int bufsize;
435 } dhd_dump_t;
436 #endif /* DHD_DEBUG */
437
438 /* When Perimeter locks are deployed, any blocking calls must be preceeded
439 * with a PERIM UNLOCK and followed by a PERIM LOCK.
440 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
441 * wait_event_timeout().
442 */
443
444 /* Local private structure (extension of pub) */
445 typedef struct dhd_info {
446 #if defined(WL_WIRELESS_EXT)
447 wl_iw_t iw; /* wireless extensions state (must be first) */
448 #endif /* defined(WL_WIRELESS_EXT) */
449 dhd_pub_t pub;
450 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
451
452 void *adapter; /* adapter information, interrupt, fw path etc. */
453 char fw_path[PATH_MAX]; /* path to firmware image */
454 char nv_path[PATH_MAX]; /* path to nvram vars file */
455 char conf_path[PATH_MAX]; /* path to config vars file */
456
457 struct semaphore proto_sem;
458 #ifdef PROP_TXSTATUS
459 spinlock_t wlfc_spinlock;
460
461 #ifdef BCMDBUS
462 ulong wlfc_lock_flags;
463 ulong wlfc_pub_lock_flags;
464 #endif
465 #endif /* PROP_TXSTATUS */
466 #ifdef WLMEDIA_HTSF
467 htsf_t htsf;
468 #endif
469 wait_queue_head_t ioctl_resp_wait;
470 wait_queue_head_t d3ack_wait;
471 uint32 default_wd_interval;
472
473 struct timer_list timer;
474 bool wd_timer_valid;
475 struct tasklet_struct tasklet;
476 spinlock_t sdlock;
477 spinlock_t txqlock;
478 spinlock_t dhd_lock;
479 #ifdef BCMDBUS
480 ulong txqlock_flags;
481 #else
482
483 struct semaphore sdsem;
484 tsk_ctl_t thr_dpc_ctl;
485 tsk_ctl_t thr_wdt_ctl;
486 #endif /* BCMDBUS */
487
488 tsk_ctl_t thr_rxf_ctl;
489 spinlock_t rxf_lock;
490 bool rxthread_enabled;
491
492 /* Wakelocks */
493 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
494 struct wake_lock wl_wifi; /* Wifi wakelock */
495 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
496 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
497 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
498 #ifdef BCMPCIE_OOB_HOST_WAKE
499 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
500 #endif /* BCMPCIE_OOB_HOST_WAKE */
501 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
502
503 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
504 /* net_device interface lock, prevent race conditions among net_dev interface
505 * calls and wifi_on or wifi_off
506 */
507 struct mutex dhd_net_if_mutex;
508 struct mutex dhd_suspend_mutex;
509 #endif
510 spinlock_t wakelock_spinlock;
511 uint32 wakelock_counter;
512 int wakelock_wd_counter;
513 int wakelock_rx_timeout_enable;
514 int wakelock_ctrl_timeout_enable;
515 bool waive_wakelock;
516 uint32 wakelock_before_waive;
517
518 /* Thread to issue ioctl for multicast */
519 wait_queue_head_t ctrl_wait;
520 atomic_t pend_8021x_cnt;
521 dhd_attach_states_t dhd_state;
522 #ifdef SHOW_LOGTRACE
523 dhd_event_log_t event_data;
524 #endif /* SHOW_LOGTRACE */
525
526 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
527 struct early_suspend early_suspend;
528 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
529
530 #ifdef ARP_OFFLOAD_SUPPORT
531 u32 pend_ipaddr;
532 #endif /* ARP_OFFLOAD_SUPPORT */
533 #ifdef BCM_FD_AGGR
534 void *rpc_th;
535 void *rpc_osh;
536 struct timer_list rpcth_timer;
537 bool rpcth_timer_active;
538 uint8 fdaggr;
539 #endif
540 #ifdef DHDTCPACK_SUPPRESS
541 spinlock_t tcpack_lock;
542 #endif /* DHDTCPACK_SUPPRESS */
543 void *dhd_deferred_wq;
544 #ifdef DEBUG_CPU_FREQ
545 struct notifier_block freq_trans;
546 int __percpu *new_freq;
547 #endif
548 unsigned int unit;
549 struct notifier_block pm_notifier;
550 } dhd_info_t;
551
552 #define DHDIF_FWDER(dhdif) FALSE
553
554 /* Flag to indicate if we should download firmware on driver load */
555 uint dhd_download_fw_on_driverload = TRUE;
556
557 /* Definitions to provide path to the firmware and nvram
558 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
559 */
560 char firmware_path[MOD_PARAM_PATHLEN];
561 char nvram_path[MOD_PARAM_PATHLEN];
562 char config_path[MOD_PARAM_PATHLEN];
563
564 /* backup buffer for firmware and nvram path */
565 char fw_bak_path[MOD_PARAM_PATHLEN];
566 char nv_bak_path[MOD_PARAM_PATHLEN];
567
568 /* information string to keep firmware, chio, cheip version info visiable from log */
569 char info_string[MOD_PARAM_INFOLEN];
570 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
571 int op_mode = 0;
572 int disable_proptx = 0;
573 module_param(op_mode, int, 0644);
574 #if defined(OEM_ANDROID)
575 extern int wl_control_wl_start(struct net_device *dev);
576 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
577 struct semaphore dhd_registration_sem;
578 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
579 #endif /* defined(OEM_ANDROID) */
580
581 /* deferred handlers */
582 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
583 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
584 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
585 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
586 #ifdef CONFIG_IPV6
587 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
588 #endif
589
590 #ifdef WL_CFG80211
591 extern void dhd_netdev_free(struct net_device *ndev);
592 #endif /* WL_CFG80211 */
593
594 /* Error bits */
595 module_param(dhd_msg_level, int, 0);
596 #if defined(WL_WIRELESS_EXT)
597 module_param(iw_msg_level, int, 0);
598 #endif
599 #ifdef WL_CFG80211
600 module_param(wl_dbg_level, int, 0);
601 #endif
602 module_param(android_msg_level, int, 0);
603 module_param(config_msg_level, int, 0);
604
605 #ifdef ARP_OFFLOAD_SUPPORT
606 /* ARP offload enable */
607 uint dhd_arp_enable = TRUE;
608 module_param(dhd_arp_enable, uint, 0);
609
610 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
611
612 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
613
614 module_param(dhd_arp_mode, uint, 0);
615 #endif /* ARP_OFFLOAD_SUPPORT */
616
617 #if !defined(BCMDBUS)||defined(OEM_ANDROID)
618 /* Disable Prop tx */
619 module_param(disable_proptx, int, 0644);
620 /* load firmware and/or nvram values from the filesystem */
621 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
622 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
623 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
624
625 /* Watchdog interval */
626
627 /* extend watchdog expiration to 2 seconds when DPC is running */
628 #define WATCHDOG_EXTEND_INTERVAL (2000)
629
630 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
631 module_param(dhd_watchdog_ms, uint, 0);
632
633 #if defined(DHD_DEBUG)
634 /* Console poll interval */
635 #if defined(OEM_ANDROID)
636 uint dhd_console_ms = 0;
637 #else
638 uint dhd_console_ms = 250;
639 #endif
640 module_param(dhd_console_ms, uint, 0644);
641 #endif /* defined(DHD_DEBUG) */
642
643
644 uint dhd_slpauto = TRUE;
645 module_param(dhd_slpauto, uint, 0);
646
647 #ifdef PKT_FILTER_SUPPORT
648 /* Global Pkt filter enable control */
649 uint dhd_pkt_filter_enable = TRUE;
650 module_param(dhd_pkt_filter_enable, uint, 0);
651 #endif
652
653 /* Pkt filter init setup */
654 uint dhd_pkt_filter_init = 0;
655 module_param(dhd_pkt_filter_init, uint, 0);
656
657 /* Pkt filter mode control */
658 uint dhd_master_mode = FALSE;
659 module_param(dhd_master_mode, uint, 0);
660
661 int dhd_watchdog_prio = 0;
662 module_param(dhd_watchdog_prio, int, 0);
663
664 /* DPC thread priority */
665 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
666 module_param(dhd_dpc_prio, int, 0);
667
668 /* RX frame thread priority */
669 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
670 module_param(dhd_rxf_prio, int, 0);
671
672 int passive_channel_skip = 0;
673 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
674
675 #if !defined(BCMDHDUSB)
676 extern int dhd_dongle_ramsize;
677 module_param(dhd_dongle_ramsize, int, 0);
678 #endif /* BCMDHDUSB */
679 #endif /* BCMDBUS */
680
681 /* Keep track of number of instances */
682 static int dhd_found = 0;
683 static int instance_base = 0; /* Starting instance number */
684 module_param(instance_base, int, 0644);
685
686
687
688
689 /* DHD Perimiter lock only used in router with bypass forwarding. */
690 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
691 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
692 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
693 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
694 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
695
696 #ifdef PCIE_FULL_DONGLE
697 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
698 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
699 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
700 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
701 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
702 #endif /* PCIE_FULL_DONGLE */
703
704 /* Control fw roaming */
705 #ifdef OEM_ANDROID
706 uint dhd_roam_disable = 0;
707 #else
708 uint dhd_roam_disable = 1;
709 #endif
710
711 /* Control radio state */
712 uint dhd_radio_up = 1;
713
714 /* Network inteface name */
715 char iface_name[IFNAMSIZ] = {'\0'};
716 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
717
718 /* The following are specific to the SDIO dongle */
719
720 /* IOCTL response timeout */
721 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
722
723 /* Idle timeout for backplane clock */
724 int dhd_idletime = DHD_IDLETIME_TICKS;
725 module_param(dhd_idletime, int, 0);
726
727 /* Use polling */
728 uint dhd_poll = FALSE;
729 module_param(dhd_poll, uint, 0);
730
731 /* Use interrupts */
732 uint dhd_intr = TRUE;
733 module_param(dhd_intr, uint, 0);
734
735 /* SDIO Drive Strength (in milliamps) */
736 uint dhd_sdiod_drive_strength = 6;
737 module_param(dhd_sdiod_drive_strength, uint, 0);
738
739
740
741
742 #if defined(BCMSUP_4WAY_HANDSHAKE)
743 /* Use in dongle supplicant for 4-way handshake */
744 uint dhd_use_idsup = 0;
745 module_param(dhd_use_idsup, uint, 0);
746 #endif /* BCMSUP_4WAY_HANDSHAKE */
747
748 extern char dhd_version[];
749
750 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
751 static void dhd_net_if_lock_local(dhd_info_t *dhd);
752 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
753 static void dhd_suspend_lock(dhd_pub_t *dhdp);
754 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
755
756 #ifdef WLMEDIA_HTSF
757 void htsf_update(dhd_info_t *dhd, void *data);
758 tsf_t prev_tsf, cur_tsf;
759
760 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
761 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
762 static void dhd_dump_latency(void);
763 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
764 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
765 static void dhd_dump_htsfhisto(histo_t *his, char *s);
766 #endif /* WLMEDIA_HTSF */
767
768 /* Monitor interface */
769 int dhd_monitor_init(void *dhd_pub);
770 int dhd_monitor_uninit(void);
771
772
773 #if defined(WL_WIRELESS_EXT)
774 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
775 #endif /* defined(WL_WIRELESS_EXT) */
776
777 #ifndef BCMDBUS
778 static void dhd_dpc(ulong data);
779 #endif
780 /* forward decl */
781 extern int dhd_wait_pend8021x(struct net_device *dev);
782 void dhd_os_wd_timer_extend(void *bus, bool extend);
783
784 #ifdef TOE
785 #ifndef BDC
786 #error TOE requires BDC
787 #endif /* !BDC */
788 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
789 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
790 #endif /* TOE */
791 #ifdef BCMDBUS
792 int dhd_dbus_txdata(dhd_pub_t *dhdp, void *pktbuf);
793 #endif
794
795 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
796 wl_event_msg_t *event_ptr, void **data_ptr);
797 #ifdef DHD_UNICAST_DHCP
798 static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
799 static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
800 int *len_ptr, uint8 *prot_ptr);
801 static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
802 int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
803
804 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
805 #endif /* DHD_UNICAST_DHCP */
806 #ifdef DHD_L2_FILTER
807 static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
808 #endif
809 #if defined(CONFIG_PM_SLEEP)
810 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
811 {
812 int ret = NOTIFY_DONE;
813 bool suspend = FALSE;
814 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
815
816 BCM_REFERENCE(dhdinfo);
817 switch (action) {
818 case PM_HIBERNATION_PREPARE:
819 case PM_SUSPEND_PREPARE:
820 suspend = TRUE;
821 break;
822 case PM_POST_HIBERNATION:
823 case PM_POST_SUSPEND:
824 suspend = FALSE;
825 break;
826 }
827
828 #if defined(SUPPORT_P2P_GO_PS)
829 #ifdef PROP_TXSTATUS
830 if (suspend) {
831 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
832 dhd_wlfc_suspend(&dhdinfo->pub);
833 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
834 } else
835 dhd_wlfc_resume(&dhdinfo->pub);
836 #endif
837 #endif /* defined(SUPPORT_P2P_GO_PS) */
838
839 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
840 KERNEL_VERSION(2, 6, 39))
841 dhd_mmc_suspend = suspend;
842 smp_mb();
843 #endif
844
845 return ret;
846 }
847
848 static struct notifier_block dhd_pm_notifier = {
849 .notifier_call = dhd_pm_callback,
850 .priority = 10
851 };
852 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
853 * created in kernel notifier link list (with 'next' pointing to itself)
854 */
855 static bool dhd_pm_notifier_registered = FALSE;
856
857 extern int register_pm_notifier(struct notifier_block *nb);
858 extern int unregister_pm_notifier(struct notifier_block *nb);
859 #endif /* CONFIG_PM_SLEEP */
860
861 /* Request scheduling of the bus rx frame */
862 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
863 static void dhd_os_rxflock(dhd_pub_t *pub);
864 static void dhd_os_rxfunlock(dhd_pub_t *pub);
865
866 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
867 typedef struct dhd_dev_priv {
868 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
869 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
870 int ifidx; /* interface index */
871 } dhd_dev_priv_t;
872
873 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
874 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
875 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
876 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
877 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
878
879 /** Clear the dhd net_device's private structure. */
880 static inline void
881 dhd_dev_priv_clear(struct net_device * dev)
882 {
883 dhd_dev_priv_t * dev_priv;
884 ASSERT(dev != (struct net_device *)NULL);
885 dev_priv = DHD_DEV_PRIV(dev);
886 dev_priv->dhd = (dhd_info_t *)NULL;
887 dev_priv->ifp = (dhd_if_t *)NULL;
888 dev_priv->ifidx = DHD_BAD_IF;
889 }
890
891 /** Setup the dhd net_device's private structure. */
892 static inline void
893 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
894 int ifidx)
895 {
896 dhd_dev_priv_t * dev_priv;
897 ASSERT(dev != (struct net_device *)NULL);
898 dev_priv = DHD_DEV_PRIV(dev);
899 dev_priv->dhd = dhd;
900 dev_priv->ifp = ifp;
901 dev_priv->ifidx = ifidx;
902 }
903
904 #ifdef PCIE_FULL_DONGLE
905
906 /** Dummy objects are defined with state representing bad|down.
907 * Performance gains from reducing branch conditionals, instruction parallelism,
908 * dual issue, reducing load shadows, avail of larger pipelines.
909 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
910 * is accessed via the dhd_sta_t.
911 */
912
913 /* Dummy dhd_info object */
914 dhd_info_t dhd_info_null = {
915 .pub = {
916 .info = &dhd_info_null,
917 #ifdef DHDTCPACK_SUPPRESS
918 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
919 #endif /* DHDTCPACK_SUPPRESS */
920 .up = FALSE,
921 .busstate = DHD_BUS_DOWN
922 }
923 };
924 #define DHD_INFO_NULL (&dhd_info_null)
925 #define DHD_PUB_NULL (&dhd_info_null.pub)
926
927 /* Dummy netdevice object */
928 struct net_device dhd_net_dev_null = {
929 .reg_state = NETREG_UNREGISTERED
930 };
931 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
932
933 /* Dummy dhd_if object */
934 dhd_if_t dhd_if_null = {
935 #ifdef WMF
936 .wmf = { .wmf_enable = TRUE },
937 #endif
938 .info = DHD_INFO_NULL,
939 .net = DHD_NET_DEV_NULL,
940 .idx = DHD_BAD_IF
941 };
942 #define DHD_IF_NULL (&dhd_if_null)
943
944 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
945
946 /** Interface STA list management. */
947
948 /** Fetch the dhd_if object, given the interface index in the dhd. */
949 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
950
951 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
952 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
953 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
954
955 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
956 static void dhd_if_del_sta_list(dhd_if_t * ifp);
957 static void dhd_if_flush_sta(dhd_if_t * ifp);
958
959 /* Construct/Destruct a sta pool. */
960 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
961 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
962 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
963
964
965 /* Return interface pointer */
966 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
967 {
968 ASSERT(ifidx < DHD_MAX_IFS);
969
970 if (ifidx >= DHD_MAX_IFS)
971 return NULL;
972
973 return dhdp->info->iflist[ifidx];
974 }
975
976 /** Reset a dhd_sta object and free into the dhd pool. */
977 static void
978 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
979 {
980 int prio;
981
982 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
983
984 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
985 id16_map_free(dhdp->staid_allocator, sta->idx);
986 for (prio = 0; prio < (int)NUMPRIO; prio++)
987 sta->flowid[prio] = FLOWID_INVALID;
988 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
989 sta->ifidx = DHD_BAD_IF;
990 bzero(sta->ea.octet, ETHER_ADDR_LEN);
991 INIT_LIST_HEAD(&sta->list);
992 sta->idx = ID16_INVALID; /* implying free */
993 }
994
995 /** Allocate a dhd_sta object from the dhd pool. */
996 static dhd_sta_t *
997 dhd_sta_alloc(dhd_pub_t * dhdp)
998 {
999 uint16 idx;
1000 dhd_sta_t * sta;
1001 dhd_sta_pool_t * sta_pool;
1002
1003 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1004
1005 idx = id16_map_alloc(dhdp->staid_allocator);
1006 if (idx == ID16_INVALID) {
1007 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1008 return DHD_STA_NULL;
1009 }
1010
1011 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1012 sta = &sta_pool[idx];
1013
1014 ASSERT((sta->idx == ID16_INVALID) &&
1015 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1016 sta->idx = idx; /* implying allocated */
1017
1018 return sta;
1019 }
1020
1021 /** Delete all STAs in an interface's STA list. */
1022 static void
1023 dhd_if_del_sta_list(dhd_if_t *ifp)
1024 {
1025 dhd_sta_t *sta, *next;
1026 unsigned long flags;
1027
1028 DHD_IF_STA_LIST_LOCK(ifp, flags);
1029
1030 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1031 list_del(&sta->list);
1032 dhd_sta_free(&ifp->info->pub, sta);
1033 }
1034
1035 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1036
1037 return;
1038 }
1039
1040 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1041 static void
1042 dhd_if_flush_sta(dhd_if_t * ifp)
1043 {
1044 }
1045
1046 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1047 static int
1048 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1049 {
1050 int idx, sta_pool_memsz;
1051 dhd_sta_t * sta;
1052 dhd_sta_pool_t * sta_pool;
1053 void * staid_allocator;
1054
1055 ASSERT(dhdp != (dhd_pub_t *)NULL);
1056 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1057
1058 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1059 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1060 if (staid_allocator == NULL) {
1061 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1062 return BCME_ERROR;
1063 }
1064
1065 /* Pre allocate a pool of dhd_sta objects (one extra). */
1066 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1067 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1068 if (sta_pool == NULL) {
1069 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1070 id16_map_fini(dhdp->osh, staid_allocator);
1071 return BCME_ERROR;
1072 }
1073
1074 dhdp->sta_pool = sta_pool;
1075 dhdp->staid_allocator = staid_allocator;
1076
1077 /* Initialize all sta(s) for the pre-allocated free pool. */
1078 bzero((uchar *)sta_pool, sta_pool_memsz);
1079 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1080 sta = &sta_pool[idx];
1081 sta->idx = id16_map_alloc(staid_allocator);
1082 ASSERT(sta->idx <= max_sta);
1083 }
1084 /* Now place them into the pre-allocated free pool. */
1085 for (idx = 1; idx <= max_sta; idx++) {
1086 sta = &sta_pool[idx];
1087 dhd_sta_free(dhdp, sta);
1088 }
1089
1090 return BCME_OK;
1091 }
1092
1093 /** Destruct the pool of dhd_sta_t objects.
1094 * Caller must ensure that no STA objects are currently associated with an if.
1095 */
1096 static void
1097 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1098 {
1099 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1100
1101 if (sta_pool) {
1102 int idx;
1103 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1104 for (idx = 1; idx <= max_sta; idx++) {
1105 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1106 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1107 }
1108 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1109 dhdp->sta_pool = NULL;
1110 }
1111
1112 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1113 dhdp->staid_allocator = NULL;
1114 }
1115
1116 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1117 static void
1118 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1119 {
1120 int idx, sta_pool_memsz;
1121 dhd_sta_t * sta;
1122 dhd_sta_pool_t * sta_pool;
1123 void *staid_allocator;
1124
1125 if (!dhdp) {
1126 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1127 return;
1128 }
1129
1130 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1131 staid_allocator = dhdp->staid_allocator;
1132
1133 if (!sta_pool) {
1134 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1135 return;
1136 }
1137
1138 if (!staid_allocator) {
1139 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1140 return;
1141 }
1142
1143 /* clear free pool */
1144 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1145 bzero((uchar *)sta_pool, sta_pool_memsz);
1146
1147 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1148 id16_map_clear(staid_allocator, max_sta, 1);
1149
1150 /* Initialize all sta(s) for the pre-allocated free pool. */
1151 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1152 sta = &sta_pool[idx];
1153 sta->idx = id16_map_alloc(staid_allocator);
1154 ASSERT(sta->idx <= max_sta);
1155 }
1156 /* Now place them into the pre-allocated free pool. */
1157 for (idx = 1; idx <= max_sta; idx++) {
1158 sta = &sta_pool[idx];
1159 dhd_sta_free(dhdp, sta);
1160 }
1161 }
1162
1163 /** Find STA with MAC address ea in an interface's STA list. */
1164 dhd_sta_t *
1165 dhd_find_sta(void *pub, int ifidx, void *ea)
1166 {
1167 dhd_sta_t *sta, *next;
1168 dhd_if_t *ifp;
1169 unsigned long flags;
1170
1171 ASSERT(ea != NULL);
1172 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1173 if (ifp == NULL)
1174 return DHD_STA_NULL;
1175
1176 DHD_IF_STA_LIST_LOCK(ifp, flags);
1177
1178 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1179 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1180 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1181 return sta;
1182 }
1183 }
1184
1185 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1186
1187 return DHD_STA_NULL;
1188 }
1189
1190 /** Add STA into the interface's STA list. */
1191 dhd_sta_t *
1192 dhd_add_sta(void *pub, int ifidx, void *ea)
1193 {
1194 dhd_sta_t *sta;
1195 dhd_if_t *ifp;
1196 unsigned long flags;
1197
1198 ASSERT(ea != NULL);
1199 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1200 if (ifp == NULL)
1201 return DHD_STA_NULL;
1202
1203 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1204 if (sta == DHD_STA_NULL) {
1205 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1206 return DHD_STA_NULL;
1207 }
1208
1209 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1210
1211 /* link the sta and the dhd interface */
1212 sta->ifp = ifp;
1213 sta->ifidx = ifidx;
1214 INIT_LIST_HEAD(&sta->list);
1215
1216 DHD_IF_STA_LIST_LOCK(ifp, flags);
1217
1218 list_add_tail(&sta->list, &ifp->sta_list);
1219
1220
1221 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1222
1223 return sta;
1224 }
1225
1226 /** Delete STA from the interface's STA list. */
1227 void
1228 dhd_del_sta(void *pub, int ifidx, void *ea)
1229 {
1230 dhd_sta_t *sta, *next;
1231 dhd_if_t *ifp;
1232 unsigned long flags;
1233
1234 ASSERT(ea != NULL);
1235 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1236 if (ifp == NULL)
1237 return;
1238
1239 DHD_IF_STA_LIST_LOCK(ifp, flags);
1240
1241 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1242 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1243 list_del(&sta->list);
1244 dhd_sta_free(&ifp->info->pub, sta);
1245 }
1246 }
1247
1248 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1249
1250 return;
1251 }
1252
1253 /** Add STA if it doesn't exist. Not reentrant. */
1254 dhd_sta_t*
1255 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1256 {
1257 dhd_sta_t *sta;
1258
1259 sta = dhd_find_sta(pub, ifidx, ea);
1260
1261 if (!sta) {
1262 /* Add entry */
1263 sta = dhd_add_sta(pub, ifidx, ea);
1264 }
1265
1266 return sta;
1267 }
1268 #else
1269 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1270 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1271 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1272 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1273 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1274 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1275 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1276 #endif /* PCIE_FULL_DONGLE */
1277
1278
1279 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1280 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1281 {
1282 dhd_if_t *ifp;
1283 dhd_info_t *dhd = dhdp->info;
1284 int i;
1285
1286 ASSERT(bssidx < DHD_MAX_IFS);
1287 ASSERT(dhdp);
1288
1289 for (i = 0; i < DHD_MAX_IFS; i++) {
1290 ifp = dhd->iflist[i];
1291 if (ifp && (ifp->bssidx == bssidx)) {
1292 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1293 ifp->name, bssidx, i));
1294 break;
1295 }
1296 }
1297 return i;
1298 }
1299
1300 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1301 {
1302 uint32 store_idx;
1303 uint32 sent_idx;
1304
1305 if (!skb) {
1306 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1307 return BCME_ERROR;
1308 }
1309
1310 dhd_os_rxflock(dhdp);
1311 store_idx = dhdp->store_idx;
1312 sent_idx = dhdp->sent_idx;
1313 if (dhdp->skbbuf[store_idx] != NULL) {
1314 /* Make sure the previous packets are processed */
1315 dhd_os_rxfunlock(dhdp);
1316 #ifdef RXF_DEQUEUE_ON_BUSY
1317 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1318 skb, store_idx, sent_idx));
1319 return BCME_BUSY;
1320 #else /* RXF_DEQUEUE_ON_BUSY */
1321 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1322 skb, store_idx, sent_idx));
1323 /* removed msleep here, should use wait_event_timeout if we
1324 * want to give rx frame thread a chance to run
1325 */
1326 #if defined(WAIT_DEQUEUE)
1327 OSL_SLEEP(1);
1328 #endif
1329 return BCME_ERROR;
1330 #endif /* RXF_DEQUEUE_ON_BUSY */
1331 }
1332 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1333 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1334 dhdp->skbbuf[store_idx] = skb;
1335 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1336 dhd_os_rxfunlock(dhdp);
1337
1338 return BCME_OK;
1339 }
1340
1341 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1342 {
1343 uint32 store_idx;
1344 uint32 sent_idx;
1345 void *skb;
1346
1347 dhd_os_rxflock(dhdp);
1348
1349 store_idx = dhdp->store_idx;
1350 sent_idx = dhdp->sent_idx;
1351 skb = dhdp->skbbuf[sent_idx];
1352
1353 if (skb == NULL) {
1354 dhd_os_rxfunlock(dhdp);
1355 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1356 store_idx, sent_idx));
1357 return NULL;
1358 }
1359
1360 dhdp->skbbuf[sent_idx] = NULL;
1361 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1362
1363 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1364 skb, sent_idx));
1365
1366 dhd_os_rxfunlock(dhdp);
1367
1368 return skb;
1369 }
1370
1371 #ifdef OEM_ANDROID
1372 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1373 {
1374 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1375
1376 if (prepost) { /* pre process */
1377 dhd_read_macaddr(dhd);
1378 } else { /* post process */
1379 dhd_write_macaddr(&dhd->pub.mac);
1380 }
1381
1382 return 0;
1383 }
1384 #endif /* OEM_ANDROID */
1385
1386 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1387 static bool
1388 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
1389 {
1390 bool _apply = FALSE;
1391 /* In case of IBSS mode, apply arp pkt filter */
1392 if (op_mode & DHD_FLAG_IBSS_MODE) {
1393 _apply = TRUE;
1394 goto exit;
1395 }
1396 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1397 if ((dhd->arp_version == 1) &&
1398 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
1399 _apply = TRUE;
1400 goto exit;
1401 }
1402
1403 exit:
1404 return _apply;
1405 }
1406 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1407
1408 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1409 #ifdef PKT_FILTER_SUPPORT
1410 void
1411 dhd_set_packet_filter_mode(struct net_device *dev, char *command)
1412 {
1413 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1414
1415 dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
1416 }
1417
1418 int
1419 dhd_set_packet_filter_ports(struct net_device *dev, char *command)
1420 {
1421 int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
1422 uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
1423 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1424 dhd_pub_t *dhdp = &dhdi->pub;
1425 char iovbuf[WLC_IOCTL_SMLEN];
1426
1427 /* get action */
1428 action = bcm_strtoul(command, &command, 0);
1429 if (action > PKT_FILTER_PORTS_MAX)
1430 return BCME_BADARG;
1431
1432 if (action == PKT_FILTER_PORTS_LOOPBACK) {
1433 /* echo the loopback value if port filter is supported else error */
1434 bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
1435 error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
1436 if (error < 0) {
1437 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
1438 return error;
1439 }
1440
1441 if (strstr(iovbuf, "pktfltr2"))
1442 return bcm_strtoul(command, &command, 0);
1443 else {
1444 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
1445 return BCME_UNSUPPORTED;
1446 }
1447 }
1448
1449 if (action == PKT_FILTER_PORTS_CLEAR) {
1450 /* action 0 is clear all ports */
1451 dhdp->pkt_filter_ports_count = 0;
1452 bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
1453 }
1454 else {
1455 portnum = bcm_strtoul(command, &command, 0);
1456 if (portnum == 0) {
1457 /* no ports to add or remove */
1458 return BCME_BADARG;
1459 }
1460
1461 /* get configured ports */
1462 count = dhdp->pkt_filter_ports_count;
1463 ports = dhdp->pkt_filter_ports;
1464
1465 if (action == PKT_FILTER_PORTS_ADD) {
1466 /* action 1 is add ports */
1467
1468 /* copy new ports */
1469 while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
1470 for (i = 0; i < count; i++) {
1471 /* duplicate port */
1472 if (portnum == ports[i])
1473 break;
1474 }
1475 if (portnum != ports[i])
1476 ports[count++] = portnum;
1477 portnum = bcm_strtoul(command, &command, 0);
1478 }
1479 } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
1480 /* action 2 is remove ports */
1481 bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
1482 get_count = count;
1483
1484 while (portnum != 0) {
1485 count = 0;
1486 for (i = 0; i < get_count; i++) {
1487 if (portnum != get_ports[i])
1488 ports[count++] = get_ports[i];
1489 }
1490 get_count = count;
1491 bcopy(ports, get_ports, count * sizeof(uint16));
1492 portnum = bcm_strtoul(command, &command, 0);
1493 }
1494 }
1495 dhdp->pkt_filter_ports_count = count;
1496 }
1497 return error;
1498 }
1499
1500 static void
1501 dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
1502 {
1503 int error = 0;
1504 wl_pkt_filter_ports_t *portlist = NULL;
1505 const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
1506 + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
1507 char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
1508 char iovbuf[pkt_filter_ports_buf_len];
1509
1510 DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
1511 enable, dhd->in_suspend, dhd->pkt_filter_mode,
1512 dhd->pkt_filter_ports_count));
1513
1514 bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
1515 portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
1516 portlist->version = WL_PKT_FILTER_PORTS_VERSION;
1517 portlist->reserved = 0;
1518
1519 if (enable) {
1520 if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY))
1521 return;
1522
1523 /* enable port filter */
1524 dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
1525 if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH)
1526 /* whitelist mode: FORWARD_ON_MATCH */
1527 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1528 else
1529 /* blacklist mode: DISCARD_ON_MATCH */
1530 dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
1531
1532 portlist->count = dhd->pkt_filter_ports_count;
1533 bcopy(dhd->pkt_filter_ports, portlist->ports,
1534 dhd->pkt_filter_ports_count * sizeof(uint16));
1535 } else {
1536 /* disable port filter */
1537 portlist->count = 0;
1538 dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
1539 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1540 }
1541
1542 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
1543 portlist->count));
1544
1545 /* update ports */
1546 bcm_mkiovar("pkt_filter_ports",
1547 (char*)portlist,
1548 (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
1549 iovbuf, sizeof(iovbuf));
1550 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1551 if (error < 0)
1552 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
1553
1554 /* update mode */
1555 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
1556 sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
1557 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1558 if (error < 0)
1559 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
1560
1561 return;
1562 }
1563 #endif /* PKT_FILTER_SUPPORT */
1564 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1565
1566 void dhd_set_packet_filter(dhd_pub_t *dhd)
1567 {
1568 #ifdef PKT_FILTER_SUPPORT
1569 int i;
1570
1571 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1572 if (dhd_pkt_filter_enable) {
1573 for (i = 0; i < dhd->pktfilter_count; i++) {
1574 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1575 }
1576 }
1577 #endif /* PKT_FILTER_SUPPORT */
1578 }
1579
1580 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1581 {
1582 #ifdef PKT_FILTER_SUPPORT
1583 int i;
1584
1585 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
1586
1587 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1588 dhd_enable_packet_filter_ports(dhd, value);
1589 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1590
1591 /* 1 - Enable packet filter, only allow unicast packet to send up */
1592 /* 0 - Disable packet filter */
1593 if (dhd_pkt_filter_enable && (!value ||
1594 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1595 {
1596 for (i = 0; i < dhd->pktfilter_count; i++) {
1597 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1598 if (value && (i == DHD_ARP_FILTER_NUM) &&
1599 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1600 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1601 "val %d, cnt %d, op_mode 0x%x\n",
1602 value, i, dhd->op_mode));
1603 continue;
1604 }
1605 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1606 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1607 value, dhd_master_mode);
1608 }
1609 }
1610 #endif /* PKT_FILTER_SUPPORT */
1611 }
1612
1613 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1614 {
1615 #ifndef SUPPORT_PM2_ONLY
1616 int power_mode = PM_MAX;
1617 #endif /* SUPPORT_PM2_ONLY */
1618 /* wl_pkt_filter_enable_t enable_parm; */
1619 char iovbuf[32];
1620 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1621 #ifdef OEM_ANDROID
1622 uint roamvar = dhd->conf->roam_off_suspend;
1623 uint nd_ra_filter = 0;
1624 int ret = 0;
1625 #endif /* OEM_ANDROID */
1626
1627 if (!dhd)
1628 return -ENODEV;
1629
1630 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1631 __FUNCTION__, value, dhd->in_suspend));
1632
1633 dhd_suspend_lock(dhd);
1634
1635 #ifdef CUSTOM_SET_CPUCORE
1636 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1637 /* set specific cpucore */
1638 dhd_set_cpucore(dhd, TRUE);
1639 #endif /* CUSTOM_SET_CPUCORE */
1640 #ifndef SUPPORT_PM2_ONLY
1641 if (dhd->conf->pm >= 0)
1642 power_mode = dhd->conf->pm;
1643 #endif /* SUPPORT_PM2_ONLY */
1644 if (dhd->up) {
1645 if (value && dhd->in_suspend) {
1646 #ifdef PKT_FILTER_SUPPORT
1647 dhd->early_suspended = 1;
1648 #endif
1649 /* Kernel suspended */
1650 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
1651
1652 #ifndef SUPPORT_PM2_ONLY
1653 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1654 sizeof(power_mode), TRUE, 0);
1655 #endif /* SUPPORT_PM2_ONLY */
1656
1657 /* Enable packet filter, only allow unicast packet to send up */
1658 dhd_enable_packet_filter(1, dhd);
1659
1660 /* If DTIM skip is set up as default, force it to wake
1661 * each third DTIM for better power savings. Note that
1662 * one side effect is a chance to miss BC/MC packet.
1663 */
1664 #ifdef WLTDLS
1665 /* Do not set bcn_li_ditm on WFD mode */
1666 if (dhd->tdls_mode) {
1667 bcn_li_dtim = 0;
1668 } else
1669 #endif /* WLTDLS */
1670 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1671 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1672 4, iovbuf, sizeof(iovbuf));
1673 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
1674 TRUE, 0) < 0)
1675 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1676
1677 #ifdef OEM_ANDROID
1678 /* Disable firmware roaming during suspend */
1679 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1680 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1681 if (FW_SUPPORTED(dhd, ndoe)) {
1682 /* enable IPv6 RA filter in firmware during suspend */
1683 nd_ra_filter = 1;
1684 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1685 iovbuf, sizeof(iovbuf));
1686 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1687 sizeof(iovbuf), TRUE, 0)) < 0)
1688 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1689 ret));
1690 }
1691 #endif /* OEM_ANDROID */
1692 } else {
1693 #ifdef PKT_FILTER_SUPPORT
1694 dhd->early_suspended = 0;
1695 #endif
1696 /* Kernel resumed */
1697 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
1698
1699 #ifndef SUPPORT_PM2_ONLY
1700 power_mode = PM_FAST;
1701 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1702 sizeof(power_mode), TRUE, 0);
1703 #endif /* SUPPORT_PM2_ONLY */
1704 #ifdef PKT_FILTER_SUPPORT
1705 /* disable pkt filter */
1706 dhd_enable_packet_filter(0, dhd);
1707 #endif /* PKT_FILTER_SUPPORT */
1708
1709 /* restore pre-suspend setting for dtim_skip */
1710 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1711 4, iovbuf, sizeof(iovbuf));
1712
1713 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1714 #ifdef OEM_ANDROID
1715 roamvar = dhd_roam_disable;
1716 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1717 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1718 if (FW_SUPPORTED(dhd, ndoe)) {
1719 /* disable IPv6 RA filter in firmware during suspend */
1720 nd_ra_filter = 0;
1721 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1722 iovbuf, sizeof(iovbuf));
1723 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1724 sizeof(iovbuf), TRUE, 0)) < 0)
1725 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1726 ret));
1727 }
1728 #endif /* OEM_ANDROID */
1729 }
1730 }
1731 dhd_suspend_unlock(dhd);
1732
1733 return 0;
1734 }
1735
1736 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
1737 {
1738 dhd_pub_t *dhdp = &dhd->pub;
1739 int ret = 0;
1740
1741 DHD_OS_WAKE_LOCK(dhdp);
1742 DHD_PERIM_LOCK(dhdp);
1743
1744 /* Set flag when early suspend was called */
1745 dhdp->in_suspend = val;
1746 if ((force || !dhdp->suspend_disable_flag) &&
1747 dhd_support_sta_mode(dhdp))
1748 {
1749 ret = dhd_set_suspend(val, dhdp);
1750 }
1751
1752 DHD_PERIM_UNLOCK(dhdp);
1753 DHD_OS_WAKE_UNLOCK(dhdp);
1754 return ret;
1755 }
1756
1757 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1758 static void dhd_early_suspend(struct early_suspend *h)
1759 {
1760 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1761 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1762
1763 if (dhd)
1764 dhd_suspend_resume_helper(dhd, 1, 0);
1765 }
1766
1767 static void dhd_late_resume(struct early_suspend *h)
1768 {
1769 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1770 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1771
1772 if (dhd)
1773 dhd_suspend_resume_helper(dhd, 0, 0);
1774 }
1775 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1776
1777 /*
1778 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1779 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1780 *
1781 * dhd_timeout_start(&tmo, usec);
1782 * while (!dhd_timeout_expired(&tmo))
1783 * if (poll_something())
1784 * break;
1785 * if (dhd_timeout_expired(&tmo))
1786 * fatal();
1787 */
1788
1789 void
1790 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
1791 {
1792 tmo->limit = usec;
1793 tmo->increment = 0;
1794 tmo->elapsed = 0;
1795 tmo->tick = jiffies_to_usecs(1);
1796 }
1797
1798 int
1799 dhd_timeout_expired(dhd_timeout_t *tmo)
1800 {
1801 /* Does nothing the first call */
1802 if (tmo->increment == 0) {
1803 tmo->increment = 1;
1804 return 0;
1805 }
1806
1807 if (tmo->elapsed >= tmo->limit)
1808 return 1;
1809
1810 /* Add the delay that's about to take place */
1811 tmo->elapsed += tmo->increment;
1812
1813 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1814 OSL_DELAY(tmo->increment);
1815 tmo->increment *= 2;
1816 if (tmo->increment > tmo->tick)
1817 tmo->increment = tmo->tick;
1818 } else {
1819 wait_queue_head_t delay_wait;
1820 DECLARE_WAITQUEUE(wait, current);
1821 init_waitqueue_head(&delay_wait);
1822 add_wait_queue(&delay_wait, &wait);
1823 set_current_state(TASK_INTERRUPTIBLE);
1824 (void)schedule_timeout(1);
1825 remove_wait_queue(&delay_wait, &wait);
1826 set_current_state(TASK_RUNNING);
1827 }
1828
1829 return 0;
1830 }
1831
1832 int
1833 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1834 {
1835 int i = 0;
1836
1837 if (!dhd) {
1838 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
1839 return DHD_BAD_IF;
1840 }
1841 while (i < DHD_MAX_IFS) {
1842 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1843 return i;
1844 i++;
1845 }
1846
1847 return DHD_BAD_IF;
1848 }
1849
1850 struct net_device * dhd_idx2net(void *pub, int ifidx)
1851 {
1852 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1853 struct dhd_info *dhd_info;
1854
1855 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1856 return NULL;
1857 dhd_info = dhd_pub->info;
1858 if (dhd_info && dhd_info->iflist[ifidx])
1859 return dhd_info->iflist[ifidx]->net;
1860 return NULL;
1861 }
1862
1863 int
1864 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1865 {
1866 int i = DHD_MAX_IFS;
1867
1868 ASSERT(dhd);
1869
1870 if (name == NULL || *name == '\0')
1871 return 0;
1872
1873 while (--i > 0)
1874 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1875 break;
1876
1877 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1878
1879 return i; /* default - the primary interface */
1880 }
1881
1882 int
1883 dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
1884 {
1885 int i = DHD_MAX_IFS;
1886
1887 ASSERT(dhd);
1888
1889 while (--i > 0)
1890 if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
1891 break;
1892
1893 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
1894
1895 return i; /* default - the primary interface */
1896 }
1897
1898 char *
1899 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1900 {
1901 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1902
1903 ASSERT(dhd);
1904
1905 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1906 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1907 return "<if_bad>";
1908 }
1909
1910 if (dhd->iflist[ifidx] == NULL) {
1911 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1912 return "<if_null>";
1913 }
1914
1915 if (dhd->iflist[ifidx]->net)
1916 return dhd->iflist[ifidx]->net->name;
1917
1918 return "<if_none>";
1919 }
1920
1921 uint8 *
1922 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1923 {
1924 int i;
1925 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1926
1927 ASSERT(dhd);
1928 for (i = 0; i < DHD_MAX_IFS; i++)
1929 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1930 return dhd->iflist[i]->mac_addr;
1931
1932 return NULL;
1933 }
1934
1935 #ifdef BCMDBUS
1936 #define DBUS_NRXQ 50
1937 #define DBUS_NTXQ 100
1938
1939 static void
1940 dhd_dbus_send_complete(void *handle, void *info, int status)
1941 {
1942 dhd_info_t *dhd = (dhd_info_t *)handle;
1943 void *pkt = info;
1944
1945 if ((dhd == NULL) || (pkt == NULL))
1946 return;
1947
1948 if (status == DBUS_OK) {
1949 dhd->pub.dstats.tx_packets++;
1950 } else {
1951 DHD_ERROR(("TX error=%d\n", status));
1952 dhd->pub.dstats.tx_errors++;
1953 }
1954 #ifdef PROP_TXSTATUS
1955 if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) &&
1956 (dhd_wlfc_txcomplete(&dhd->pub, pkt, status == 0) != WLFC_UNSUPPORTED)) {
1957 return;
1958 }
1959 #endif /* PROP_TXSTATUS */
1960 PKTFREE(dhd->pub.osh, pkt, TRUE);
1961 }
1962
1963 static void
1964 dhd_dbus_recv_pkt(void *handle, void *pkt)
1965 {
1966 uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
1967 uint reorder_info_len;
1968 uint pkt_count;
1969 dhd_info_t *dhd = (dhd_info_t *)handle;
1970 int ifidx = 0;
1971
1972 if (dhd == NULL)
1973 return;
1974
1975 /* If the protocol uses a data header, check and remove it */
1976 if (dhd_prot_hdrpull(&dhd->pub, &ifidx, pkt, reorder_info_buf,
1977 &reorder_info_len) != 0) {
1978 DHD_ERROR(("rx protocol error\n"));
1979 PKTFREE(dhd->pub.osh, pkt, FALSE);
1980 dhd->pub.rx_errors++;
1981 return;
1982 }
1983
1984 if (reorder_info_len) {
1985 /* Reordering info from the firmware */
1986 dhd_process_pkt_reorder_info(&dhd->pub, reorder_info_buf, reorder_info_len,
1987 &pkt, &pkt_count);
1988 if (pkt_count == 0)
1989 return;
1990 }
1991 else
1992 pkt_count = 1;
1993
1994 dhd_rx_frame(&dhd->pub, ifidx, pkt, pkt_count, 0);
1995 }
1996
1997 static void
1998 dhd_dbus_recv_buf(void *handle, uint8 *buf, int len)
1999 {
2000 dhd_info_t *dhd = (dhd_info_t *)handle;
2001 void *pkt;
2002
2003 if (dhd == NULL)
2004 return;
2005
2006 if ((pkt = PKTGET(dhd->pub.osh, len, FALSE)) == NULL) {
2007 DHD_ERROR(("PKTGET (rx) failed=%d\n", len));
2008 return;
2009 }
2010
2011 bcopy(buf, PKTDATA(dhd->pub.osh, pkt), len);
2012 dhd_dbus_recv_pkt(dhd, pkt);
2013 }
2014
2015 static void
2016 dhd_dbus_txflowcontrol(void *handle, bool onoff)
2017 {
2018 dhd_info_t *dhd = (dhd_info_t *)handle;
2019 bool wlfc_enabled = FALSE;
2020
2021 if (dhd == NULL)
2022 return;
2023
2024 #ifdef PROP_TXSTATUS
2025 wlfc_enabled = (dhd_wlfc_flowcontrol(&dhd->pub, onoff, !onoff) != WLFC_UNSUPPORTED);
2026 #endif
2027
2028 if (!wlfc_enabled) {
2029 dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, onoff);
2030 }
2031 }
2032
2033 static void
2034 dhd_dbus_errhandler(void *handle, int err)
2035 {
2036 }
2037
2038 static void
2039 dhd_dbus_ctl_complete(void *handle, int type, int status)
2040 {
2041 dhd_info_t *dhd = (dhd_info_t *)handle;
2042
2043 if (dhd == NULL)
2044 return;
2045
2046 if (type == DBUS_CBCTL_READ) {
2047 if (status == DBUS_OK)
2048 dhd->pub.rx_ctlpkts++;
2049 else
2050 dhd->pub.rx_ctlerrs++;
2051 } else if (type == DBUS_CBCTL_WRITE) {
2052 if (status == DBUS_OK)
2053 dhd->pub.tx_ctlpkts++;
2054 else
2055 dhd->pub.tx_ctlerrs++;
2056 }
2057
2058 dhd_prot_ctl_complete(&dhd->pub);
2059 }
2060
2061 static void
2062 dhd_dbus_state_change(void *handle, int state)
2063 {
2064 dhd_info_t *dhd = (dhd_info_t *)handle;
2065
2066 if (dhd == NULL)
2067 return;
2068
2069 if (state == DBUS_STATE_DOWN) {
2070 DHD_TRACE(("%s: DBUS is down\n", __FUNCTION__));
2071 dhd->pub.busstate = DHD_BUS_DOWN;
2072 } else if (state == DBUS_STATE_UP) {
2073 DHD_TRACE(("%s: DBUS is up\n", __FUNCTION__));
2074 dhd->pub.busstate = DHD_BUS_DATA;
2075 }
2076
2077 DHD_TRACE(("%s: DBUS current state=%d\n", __FUNCTION__, state));
2078 }
2079
2080 static void *
2081 dhd_dbus_pktget(void *handle, uint len, bool send)
2082 {
2083 dhd_info_t *dhd = (dhd_info_t *)handle;
2084 void *p = NULL;
2085
2086 if (dhd == NULL)
2087 return NULL;
2088
2089 if (send == TRUE) {
2090 dhd_os_sdlock_txq(&dhd->pub);
2091 p = PKTGET(dhd->pub.osh, len, TRUE);
2092 dhd_os_sdunlock_txq(&dhd->pub);
2093 } else {
2094 dhd_os_sdlock_rxq(&dhd->pub);
2095 p = PKTGET(dhd->pub.osh, len, FALSE);
2096 dhd_os_sdunlock_rxq(&dhd->pub);
2097 }
2098
2099 return p;
2100 }
2101
2102 static void
2103 dhd_dbus_pktfree(void *handle, void *p, bool send)
2104 {
2105 dhd_info_t *dhd = (dhd_info_t *)handle;
2106
2107 if (dhd == NULL)
2108 return;
2109
2110 if (send == TRUE) {
2111 #ifdef PROP_TXSTATUS
2112 if (DHD_PKTTAG_WLFCPKT(PKTTAG(p)) &&
2113 (dhd_wlfc_txcomplete(&dhd->pub, p, FALSE) != WLFC_UNSUPPORTED)) {
2114 return;
2115 }
2116 #endif /* PROP_TXSTATUS */
2117
2118 dhd_os_sdlock_txq(&dhd->pub);
2119 PKTFREE(dhd->pub.osh, p, TRUE);
2120 dhd_os_sdunlock_txq(&dhd->pub);
2121 } else {
2122 dhd_os_sdlock_rxq(&dhd->pub);
2123 PKTFREE(dhd->pub.osh, p, FALSE);
2124 dhd_os_sdunlock_rxq(&dhd->pub);
2125 }
2126 }
2127
2128 #ifdef BCM_FD_AGGR
2129
2130 static void
2131 dbus_rpcth_tx_complete(void *ctx, void *pktbuf, int status)
2132 {
2133 dhd_info_t *dhd = (dhd_info_t *)ctx;
2134 void *tmp;
2135
2136 while (pktbuf && dhd) {
2137 tmp = PKTNEXT(dhd->pub.osh, pktbuf);
2138 PKTSETNEXT(dhd->pub.osh, pktbuf, NULL);
2139 dhd_dbus_send_complete(ctx, pktbuf, status);
2140 pktbuf = tmp;
2141 }
2142 }
2143 static void
2144 dbus_rpcth_rx_pkt(void *context, rpc_buf_t *rpc_buf)
2145 {
2146 dhd_dbus_recv_pkt(context, rpc_buf);
2147 }
2148
2149 static void
2150 dbus_rpcth_rx_aggrpkt(void *context, void *rpc_buf)
2151 {
2152 dhd_info_t *dhd = (dhd_info_t *)context;
2153
2154 if (dhd == NULL)
2155 return;
2156
2157 /* all the de-aggregated packets are delivered back to function dbus_rpcth_rx_pkt()
2158 * as cloned packets
2159 */
2160 bcm_rpc_dbus_recv_aggrpkt(dhd->rpc_th, rpc_buf,
2161 bcm_rpc_buf_len_get(dhd->rpc_th, rpc_buf));
2162
2163 /* free the original packet */
2164 dhd_dbus_pktfree(context, rpc_buf, FALSE);
2165 }
2166
2167 static void
2168 dbus_rpcth_rx_aggrbuf(void *context, uint8 *buf, int len)
2169 {
2170 dhd_info_t *dhd = (dhd_info_t *)context;
2171
2172 if (dhd == NULL)
2173 return;
2174
2175 if (dhd->fdaggr & BCM_FDAGGR_D2H_ENABLED) {
2176 bcm_rpc_dbus_recv_aggrbuf(dhd->rpc_th, buf, len);
2177 }
2178 else {
2179 dhd_dbus_recv_buf(context, buf, len);
2180 }
2181
2182 }
2183
2184 static void
2185 dhd_rpcth_watchdog(ulong data)
2186 {
2187 dhd_info_t *dhd = (dhd_info_t *)data;
2188
2189 if (dhd->pub.dongle_reset) {
2190 return;
2191 }
2192
2193 dhd->rpcth_timer_active = FALSE;
2194 /* release packets in the aggregation queue */
2195 bcm_rpc_tp_watchdog(dhd->rpc_th);
2196 }
2197
2198 static int
2199 dhd_fdaggr_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
2200 {
2201 int bcmerror = 0;
2202 void *rpc_th;
2203
2204 rpc_th = dhd_pub->info->rpc_th;
2205
2206 if (!strcmp("rpc_agg", ioc->buf)) {
2207 uint32 rpc_agg;
2208 uint32 rpc_agg_host;
2209 uint32 rpc_agg_dngl;
2210
2211 if (ioc->set) {
2212 memcpy(&rpc_agg, ioc->buf + strlen("rpc_agg") + 1, sizeof(uint32));
2213 rpc_agg_host = rpc_agg & BCM_RPC_TP_HOST_AGG_MASK;
2214 if (rpc_agg_host)
2215 bcm_rpc_tp_agg_set(rpc_th, rpc_agg_host, TRUE);
2216 else
2217 bcm_rpc_tp_agg_set(rpc_th, BCM_RPC_TP_HOST_AGG_MASK, FALSE);
2218 bcmerror = dhd_wl_ioctl(dhd_pub, ifindex, ioc, buf, len);
2219 if (bcmerror < 0) {
2220 DHD_ERROR(("usb aggregation not supported\n"));
2221 } else {
2222 dhd_pub->info->fdaggr = 0;
2223 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
2224 dhd_pub->info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
2225 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
2226 dhd_pub->info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
2227 }
2228 } else {
2229 rpc_agg_host = bcm_rpc_tp_agg_get(rpc_th);
2230 bcmerror = dhd_wl_ioctl(dhd_pub, ifindex, ioc, buf, len);
2231 if (!bcmerror) {
2232 memcpy(&rpc_agg_dngl, buf, sizeof(uint32));
2233 rpc_agg = (rpc_agg_host & BCM_RPC_TP_HOST_AGG_MASK) |
2234 (rpc_agg_dngl & BCM_RPC_TP_DNGL_AGG_MASK);
2235 memcpy(buf, &rpc_agg, sizeof(uint32));
2236 }
2237 }
2238 } else if (!strcmp("rpc_host_agglimit", ioc->buf)) {
2239 uint8 sf;
2240 uint16 bytes;
2241 uint32 agglimit;
2242
2243 if (ioc->set) {
2244 memcpy(&agglimit, ioc->buf + strlen("rpc_host_agglimit") + 1,
2245 sizeof(uint32));
2246 sf = agglimit >> 16;
2247 bytes = agglimit & 0xFFFF;
2248 bcm_rpc_tp_agg_limit_set(rpc_th, sf, bytes);
2249 } else {
2250 bcm_rpc_tp_agg_limit_get(rpc_th, &sf, &bytes);
2251 agglimit = (uint32)((sf << 16) + bytes);
2252 memcpy(buf, &agglimit, sizeof(uint32));
2253 }
2254
2255 } else {
2256 bcmerror = dhd_wl_ioctl(dhd_pub, ifindex, ioc, buf, len);
2257 }
2258 return bcmerror;
2259 }
2260 #endif /* BCM_FD_AGGR */
2261
2262 static dbus_callbacks_t dhd_dbus_cbs = {
2263 #ifdef BCM_FD_AGGR
2264 dbus_rpcth_tx_complete,
2265 dbus_rpcth_rx_aggrbuf,
2266 dbus_rpcth_rx_aggrpkt,
2267 #else
2268 dhd_dbus_send_complete,
2269 dhd_dbus_recv_buf,
2270 dhd_dbus_recv_pkt,
2271 #endif
2272 dhd_dbus_txflowcontrol,
2273 dhd_dbus_errhandler,
2274 dhd_dbus_ctl_complete,
2275 dhd_dbus_state_change,
2276 dhd_dbus_pktget,
2277 dhd_dbus_pktfree
2278 };
2279
2280 void
2281 dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
2282 {
2283 bcm_bprintf(strbuf, "Bus USB\n");
2284 }
2285
2286 void
2287 dhd_bus_clearcounts(dhd_pub_t *dhdp)
2288 {
2289 }
2290
2291 bool
2292 dhd_bus_dpc(struct dhd_bus *bus)
2293 {
2294 return FALSE;
2295 }
2296
2297 int
2298 dhd_dbus_txdata(dhd_pub_t *dhdp, void *pktbuf)
2299 {
2300
2301 if (dhdp->txoff)
2302 return BCME_EPERM;
2303 #ifdef BCM_FD_AGGR
2304 if (((dhd_info_t *)(dhdp->info))->fdaggr & BCM_FDAGGR_H2D_ENABLED)
2305
2306 {
2307 dhd_info_t *dhd;
2308 int ret;
2309 dhd = (dhd_info_t *)(dhdp->info);
2310 ret = bcm_rpc_tp_buf_send(dhd->rpc_th, pktbuf);
2311 if (dhd->rpcth_timer_active == FALSE) {
2312 dhd->rpcth_timer_active = TRUE;
2313 mod_timer(&dhd->rpcth_timer, jiffies + BCM_RPC_TP_HOST_TMOUT * HZ / 1000);
2314 }
2315 return ret;
2316 } else
2317 #endif /* BCM_FD_AGGR */
2318 return dbus_send_txdata(dhdp->dbus, pktbuf);
2319 }
2320
2321 #endif /* BCMDBUS */
2322
2323 static void
2324 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2325 {
2326 struct net_device *dev;
2327 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2328 struct netdev_hw_addr *ha;
2329 #else
2330 struct dev_mc_list *mclist;
2331 #endif
2332 uint32 allmulti, cnt;
2333
2334 wl_ioctl_t ioc;
2335 char *buf, *bufp;
2336 uint buflen;
2337 int ret;
2338
2339 #ifdef MCAST_LIST_ACCUMULATION
2340 int i;
2341 uint32 cnt_iface[DHD_MAX_IFS];
2342 cnt = 0;
2343 allmulti = 0;
2344
2345 for (i = 0; i < DHD_MAX_IFS; i++) {
2346 if (dhd->iflist[i]) {
2347 dev = dhd->iflist[i]->net;
2348 if (!dev)
2349 continue;
2350 #else
2351 ASSERT(dhd && dhd->iflist[ifidx]);
2352 dev = dhd->iflist[ifidx]->net;
2353 if (!dev)
2354 return;
2355 #endif /* MCAST_LIST_ACCUMULATION */
2356 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2357 netif_addr_lock_bh(dev);
2358 #endif
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2360 #ifdef MCAST_LIST_ACCUMULATION
2361 cnt_iface[i] = netdev_mc_count(dev);
2362 cnt += cnt_iface[i];
2363 #else
2364 cnt = netdev_mc_count(dev);
2365 #endif /* MCAST_LIST_ACCUMULATION */
2366 #else
2367 #ifdef MCAST_LIST_ACCUMULATION
2368 cnt += dev->mc_count;
2369 #else
2370 cnt = dev->mc_count;
2371 #endif /* MCAST_LIST_ACCUMULATION */
2372 #endif /* LINUX_VERSION_CODE */
2373
2374 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2375 netif_addr_unlock_bh(dev);
2376 #endif
2377
2378 /* Determine initial value of allmulti flag */
2379 #ifdef MCAST_LIST_ACCUMULATION
2380 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2381 }
2382 }
2383 #else
2384 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2385 #endif /* MCAST_LIST_ACCUMULATION */
2386
2387 /* Send down the multicast list first. */
2388
2389
2390 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
2391 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2392 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2393 dhd_ifname(&dhd->pub, ifidx), cnt));
2394 return;
2395 }
2396
2397 strncpy(bufp, "mcast_list", buflen - 1);
2398 bufp[buflen - 1] = '\0';
2399 bufp += strlen("mcast_list") + 1;
2400
2401 cnt = htol32(cnt);
2402 memcpy(bufp, &cnt, sizeof(cnt));
2403 bufp += sizeof(cnt);
2404
2405 #ifdef MCAST_LIST_ACCUMULATION
2406 for (i = 0; i < DHD_MAX_IFS; i++) {
2407 if (dhd->iflist[i]) {
2408 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
2409 dev = dhd->iflist[i]->net;
2410 #endif /* MCAST_LIST_ACCUMULATION */
2411
2412 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2413 netif_addr_lock_bh(dev);
2414 #endif
2415 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2416 netdev_for_each_mc_addr(ha, dev) {
2417 #ifdef MCAST_LIST_ACCUMULATION
2418 if (!cnt_iface[i])
2419 #else
2420 if (!cnt)
2421 #endif /* MCAST_LIST_ACCUMULATION */
2422 break;
2423 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2424 bufp += ETHER_ADDR_LEN;
2425 #ifdef MCAST_LIST_ACCUMULATION
2426 DHD_TRACE(("_dhd_set_multicast_list: cnt "
2427 "%d " MACDBG "\n",
2428 cnt_iface[i], MAC2STRDBG(ha->addr)));
2429 cnt_iface[i]--;
2430 #else
2431 cnt--;
2432 #endif /* MCAST_LIST_ACCUMULATION */
2433 }
2434 #else
2435 #ifdef MCAST_LIST_ACCUMULATION
2436 for (mclist = dev->mc_list; (mclist && (cnt_iface[i] > 0));
2437 cnt_iface[i]--, mclist = mclist->next)
2438 #else
2439 for (mclist = dev->mc_list; (mclist && (cnt > 0));
2440 cnt--, mclist = mclist->next)
2441 #endif /* MCAST_LIST_ACCUMULATION */
2442 {
2443 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
2444 bufp += ETHER_ADDR_LEN;
2445 }
2446 #endif /* LINUX_VERSION_CODE */
2447
2448 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2449 netif_addr_unlock_bh(dev);
2450 #endif
2451 #ifdef MCAST_LIST_ACCUMULATION
2452 }
2453 }
2454 #endif /* MCAST_LIST_ACCUMULATION */
2455
2456 memset(&ioc, 0, sizeof(ioc));
2457 ioc.cmd = WLC_SET_VAR;
2458 ioc.buf = buf;
2459 ioc.len = buflen;
2460 ioc.set = TRUE;
2461
2462 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2463 if (ret < 0) {
2464 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2465 dhd_ifname(&dhd->pub, ifidx), cnt));
2466 allmulti = cnt ? TRUE : allmulti;
2467 }
2468
2469 MFREE(dhd->pub.osh, buf, buflen);
2470
2471 /* Now send the allmulti setting. This is based on the setting in the
2472 * net_device flags, but might be modified above to be turned on if we
2473 * were trying to set some addresses and dongle rejected it...
2474 */
2475
2476 buflen = sizeof("allmulti") + sizeof(allmulti);
2477 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
2478 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
2479 return;
2480 }
2481 allmulti = htol32(allmulti);
2482
2483 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
2484 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2485 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
2486 MFREE(dhd->pub.osh, buf, buflen);
2487 return;
2488 }
2489
2490
2491 memset(&ioc, 0, sizeof(ioc));
2492 ioc.cmd = WLC_SET_VAR;
2493 ioc.buf = buf;
2494 ioc.len = buflen;
2495 ioc.set = TRUE;
2496
2497 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2498 if (ret < 0) {
2499 DHD_ERROR(("%s: set allmulti %d failed\n",
2500 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2501 }
2502
2503 MFREE(dhd->pub.osh, buf, buflen);
2504
2505 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2506
2507 #ifdef MCAST_LIST_ACCUMULATION
2508 allmulti = 0;
2509 for (i = 0; i < DHD_MAX_IFS; i++) {
2510 if (dhd->iflist[i]) {
2511 dev = dhd->iflist[i]->net;
2512 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2513 }
2514 }
2515 #else
2516 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2517 #endif /* MCAST_LIST_ACCUMULATION */
2518
2519 allmulti = htol32(allmulti);
2520
2521 memset(&ioc, 0, sizeof(ioc));
2522 ioc.cmd = WLC_SET_PROMISC;
2523 ioc.buf = &allmulti;
2524 ioc.len = sizeof(allmulti);
2525 ioc.set = TRUE;
2526
2527 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2528 if (ret < 0) {
2529 DHD_ERROR(("%s: set promisc %d failed\n",
2530 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2531 }
2532 }
2533
2534 int
2535 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2536 {
2537 char buf[32];
2538 wl_ioctl_t ioc;
2539 int ret;
2540
2541 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
2542 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
2543 return -1;
2544 }
2545 memset(&ioc, 0, sizeof(ioc));
2546 ioc.cmd = WLC_SET_VAR;
2547 ioc.buf = buf;
2548 ioc.len = 32;
2549 ioc.set = TRUE;
2550
2551 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2552 if (ret < 0) {
2553 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2554 } else {
2555 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2556 if (ifidx == 0)
2557 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2558 }
2559
2560 return ret;
2561 }
2562
2563 #ifdef SOFTAP
2564 extern struct net_device *ap_net_dev;
2565 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
2566 #endif
2567
2568 static void
2569 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2570 {
2571 dhd_info_t *dhd = handle;
2572 dhd_if_event_t *if_event = event_info;
2573 struct net_device *ndev;
2574 int ifidx, bssidx;
2575 int ret;
2576 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2577 struct wireless_dev *vwdev, *primary_wdev;
2578 struct net_device *primary_ndev;
2579 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2580
2581 if (event != DHD_WQ_WORK_IF_ADD) {
2582 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2583 return;
2584 }
2585
2586 if (!dhd) {
2587 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2588 return;
2589 }
2590
2591 if (!if_event) {
2592 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2593 return;
2594 }
2595
2596 dhd_net_if_lock_local(dhd);
2597 DHD_OS_WAKE_LOCK(&dhd->pub);
2598 DHD_PERIM_LOCK(&dhd->pub);
2599
2600 ifidx = if_event->event.ifidx;
2601 bssidx = if_event->event.bssidx;
2602 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2603
2604 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2605 if_event->mac, bssidx, TRUE);
2606 if (!ndev) {
2607 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2608 goto done;
2609 }
2610
2611 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2612 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
2613 if (unlikely(!vwdev)) {
2614 DHD_ERROR(("%s :Could not allocate wireless device\n", __FUNCTION__));
2615 goto done;
2616 }
2617 primary_ndev = dhd->pub.info->iflist[0]->net;
2618 primary_wdev = ndev_to_wdev(primary_ndev);
2619 vwdev->wiphy = primary_wdev->wiphy;
2620 vwdev->iftype = if_event->event.role;
2621 vwdev->netdev = ndev;
2622 ndev->ieee80211_ptr = vwdev;
2623 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
2624 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
2625 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2626
2627 DHD_PERIM_UNLOCK(&dhd->pub);
2628 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2629 DHD_PERIM_LOCK(&dhd->pub);
2630 if (ret != BCME_OK) {
2631 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2632 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2633 goto done;
2634 }
2635 #ifdef PCIE_FULL_DONGLE
2636 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2637 if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
2638 char iovbuf[WLC_IOCTL_SMLEN];
2639 uint32 var_int = 1;
2640
2641 memset(iovbuf, 0, sizeof(iovbuf));
2642 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
2643 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
2644
2645 if (ret != BCME_OK) {
2646 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2647 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2648 }
2649 }
2650 #endif /* PCIE_FULL_DONGLE */
2651 done:
2652 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2653
2654 DHD_PERIM_UNLOCK(&dhd->pub);
2655 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2656 dhd_net_if_unlock_local(dhd);
2657 }
2658
2659 static void
2660 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2661 {
2662 dhd_info_t *dhd = handle;
2663 int ifidx;
2664 dhd_if_event_t *if_event = event_info;
2665
2666
2667 if (event != DHD_WQ_WORK_IF_DEL) {
2668 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2669 return;
2670 }
2671
2672 if (!dhd) {
2673 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2674 return;
2675 }
2676
2677 if (!if_event) {
2678 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2679 return;
2680 }
2681
2682 dhd_net_if_lock_local(dhd);
2683 DHD_OS_WAKE_LOCK(&dhd->pub);
2684 DHD_PERIM_LOCK(&dhd->pub);
2685
2686 ifidx = if_event->event.ifidx;
2687 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
2688
2689 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2690
2691 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2692
2693 DHD_PERIM_UNLOCK(&dhd->pub);
2694 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2695 dhd_net_if_unlock_local(dhd);
2696 }
2697
2698 static void
2699 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
2700 {
2701 dhd_info_t *dhd = handle;
2702 dhd_if_t *ifp = event_info;
2703
2704 if (event != DHD_WQ_WORK_SET_MAC) {
2705 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2706 }
2707
2708 if (!dhd) {
2709 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2710 return;
2711 }
2712
2713 dhd_net_if_lock_local(dhd);
2714 DHD_OS_WAKE_LOCK(&dhd->pub);
2715 DHD_PERIM_LOCK(&dhd->pub);
2716
2717 #ifdef SOFTAP
2718 {
2719 unsigned long flags;
2720 bool in_ap = FALSE;
2721 DHD_GENERAL_LOCK(&dhd->pub, flags);
2722 in_ap = (ap_net_dev != NULL);
2723 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2724
2725 if (in_ap) {
2726 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2727 ifp->net->name));
2728 goto done;
2729 }
2730 }
2731 #endif /* SOFTAP */
2732
2733 if (ifp == NULL || !dhd->pub.up) {
2734 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2735 goto done;
2736 }
2737
2738 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
2739 ifp->set_macaddress = FALSE;
2740 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
2741 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
2742 else
2743 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
2744
2745 done:
2746 DHD_PERIM_UNLOCK(&dhd->pub);
2747 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2748 dhd_net_if_unlock_local(dhd);
2749 }
2750
2751 static void
2752 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
2753 {
2754 dhd_info_t *dhd = handle;
2755 dhd_if_t *ifp = event_info;
2756 int ifidx;
2757
2758 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
2759 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2760 return;
2761 }
2762
2763 if (!dhd) {
2764 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2765 return;
2766 }
2767
2768 dhd_net_if_lock_local(dhd);
2769 DHD_OS_WAKE_LOCK(&dhd->pub);
2770 DHD_PERIM_LOCK(&dhd->pub);
2771
2772 #ifdef SOFTAP
2773 {
2774 bool in_ap = FALSE;
2775 unsigned long flags;
2776 DHD_GENERAL_LOCK(&dhd->pub, flags);
2777 in_ap = (ap_net_dev != NULL);
2778 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2779
2780 if (in_ap) {
2781 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2782 ifp->net->name));
2783 ifp->set_multicast = FALSE;
2784 goto done;
2785 }
2786 }
2787 #endif /* SOFTAP */
2788
2789 if (ifp == NULL || !dhd->pub.up) {
2790 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2791 goto done;
2792 }
2793
2794 ifidx = ifp->idx;
2795
2796 #ifdef MCAST_LIST_ACCUMULATION
2797 ifidx = 0;
2798 #endif /* MCAST_LIST_ACCUMULATION */
2799
2800 _dhd_set_multicast_list(dhd, ifidx);
2801 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
2802
2803 done:
2804 DHD_PERIM_UNLOCK(&dhd->pub);
2805 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2806 dhd_net_if_unlock_local(dhd);
2807 }
2808
2809 static int
2810 dhd_set_mac_address(struct net_device *dev, void *addr)
2811 {
2812 int ret = 0;
2813
2814 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2815 struct sockaddr *sa = (struct sockaddr *)addr;
2816 int ifidx;
2817 dhd_if_t *dhdif;
2818
2819 ifidx = dhd_net2idx(dhd, dev);
2820 if (ifidx == DHD_BAD_IF)
2821 return -1;
2822
2823 dhdif = dhd->iflist[ifidx];
2824
2825 dhd_net_if_lock_local(dhd);
2826 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
2827 dhdif->set_macaddress = TRUE;
2828 dhd_net_if_unlock_local(dhd);
2829 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
2830 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
2831 return ret;
2832 }
2833
2834 static void
2835 dhd_set_multicast_list(struct net_device *dev)
2836 {
2837 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2838 int ifidx;
2839
2840 ifidx = dhd_net2idx(dhd, dev);
2841 if (ifidx == DHD_BAD_IF)
2842 return;
2843
2844 dhd->iflist[ifidx]->set_multicast = TRUE;
2845 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
2846 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
2847 }
2848
2849 #ifdef PROP_TXSTATUS
2850 int
2851 dhd_os_wlfc_block(dhd_pub_t *pub)
2852 {
2853 dhd_info_t *di = (dhd_info_t *)(pub->info);
2854 ASSERT(di != NULL);
2855 #ifdef BCMDBUS
2856 spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
2857 #else
2858 spin_lock_bh(&di->wlfc_spinlock);
2859 #endif
2860 return 1;
2861 }
2862
2863 int
2864 dhd_os_wlfc_unblock(dhd_pub_t *pub)
2865 {
2866 dhd_info_t *di = (dhd_info_t *)(pub->info);
2867
2868 ASSERT(di != NULL);
2869 #ifdef BCMDBUS
2870 spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
2871 #else
2872 spin_unlock_bh(&di->wlfc_spinlock);
2873 #endif
2874 return 1;
2875 }
2876
2877 #endif /* PROP_TXSTATUS */
2878
2879 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
2880 typedef struct {
2881 uint16 type;
2882 const char *str;
2883 } PKTTYPE_INFO;
2884
2885 static const PKTTYPE_INFO packet_type_info[] =
2886 {
2887 { ETHER_TYPE_IP, "IP" },
2888 { ETHER_TYPE_ARP, "ARP" },
2889 { ETHER_TYPE_BRCM, "BRCM" },
2890 { ETHER_TYPE_802_1X, "802.1X" },
2891 { ETHER_TYPE_WAI, "WAPI" },
2892 { 0, ""}
2893 };
2894
2895 static const char *_get_packet_type_str(uint16 type)
2896 {
2897 int i;
2898 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
2899
2900 for (i = 0; i < n; i++) {
2901 if (packet_type_info[i].type == type)
2902 return packet_type_info[i].str;
2903 }
2904
2905 return packet_type_info[n].str;
2906 }
2907 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
2908
2909 #if defined(DHD_TX_DUMP)
2910 void
2911 dhd_tx_dump(osl_t *osh, void *pkt)
2912 {
2913 uint8 *dump_data;
2914 uint16 protocol;
2915 struct ether_header *eh;
2916
2917 dump_data = PKTDATA(osh, pkt);
2918 eh = (struct ether_header *) dump_data;
2919 protocol = ntoh16(eh->ether_type);
2920
2921 DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol)));
2922
2923 if (protocol == ETHER_TYPE_802_1X) {
2924 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2925 dump_data[14], dump_data[15], dump_data[30]));
2926 }
2927
2928 #if defined(DHD_TX_FULL_DUMP)
2929 {
2930 int i;
2931 uint datalen;
2932 datalen = PKTLEN(osh, pkt);
2933
2934 for (i = 0; i < datalen; i++) {
2935 DHD_ERROR(("%02X ", dump_data[i]));
2936 if ((i & 15) == 15)
2937 printk("\n");
2938 }
2939 DHD_ERROR(("\n"));
2940 }
2941 #endif /* DHD_TX_FULL_DUMP */
2942 }
2943 #endif /* DHD_TX_DUMP */
2944
2945 int BCMFASTPATH
2946 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
2947 {
2948 int ret = BCME_OK;
2949 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2950 struct ether_header *eh = NULL;
2951
2952 /* Reject if down */
2953 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
2954 /* free the packet here since the caller won't */
2955 PKTFREE(dhdp->osh, pktbuf, TRUE);
2956 return -ENODEV;
2957 }
2958
2959 #ifdef PCIE_FULL_DONGLE
2960 if (dhdp->busstate == DHD_BUS_SUSPEND) {
2961 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
2962 PKTFREE(dhdp->osh, pktbuf, TRUE);
2963 return -EBUSY;
2964 }
2965 #endif /* PCIE_FULL_DONGLE */
2966
2967 #ifdef DHD_UNICAST_DHCP
2968 /* if dhcp_unicast is enabled, we need to convert the */
2969 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2970 if (dhdp->dhcp_unicast) {
2971 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
2972 }
2973 #endif /* DHD_UNICAST_DHCP */
2974 /* Update multicast statistic */
2975 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
2976 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
2977 eh = (struct ether_header *)pktdata;
2978
2979 if (ETHER_ISMULTI(eh->ether_dhost))
2980 dhdp->tx_multicast++;
2981 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
2982 atomic_inc(&dhd->pend_8021x_cnt);
2983 #ifdef DHD_DHCP_DUMP
2984 if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
2985 uint16 dump_hex;
2986 uint16 source_port;
2987 uint16 dest_port;
2988 uint16 udp_port_pos;
2989 uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
2990 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
2991
2992 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
2993 source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
2994 dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
2995 if (source_port == 0x0044 || dest_port == 0x0044) {
2996 dump_hex = (pktdata[udp_port_pos+249] << 8) |
2997 pktdata[udp_port_pos+250];
2998 if (dump_hex == 0x0101) {
2999 DHD_ERROR(("DHCP - DISCOVER [TX]\n"));
3000 } else if (dump_hex == 0x0102) {
3001 DHD_ERROR(("DHCP - OFFER [TX]\n"));
3002 } else if (dump_hex == 0x0103) {
3003 DHD_ERROR(("DHCP - REQUEST [TX]\n"));
3004 } else if (dump_hex == 0x0105) {
3005 DHD_ERROR(("DHCP - ACK [TX]\n"));
3006 } else {
3007 DHD_ERROR(("DHCP - 0x%X [TX]\n", dump_hex));
3008 }
3009 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3010 DHD_ERROR(("DHCP - BOOTP [RX]\n"));
3011 }
3012 }
3013 #endif /* DHD_DHCP_DUMP */
3014 } else {
3015 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
3016 return BCME_ERROR;
3017 }
3018
3019 /* Look into the packet and update the packet priority */
3020 #ifndef PKTPRIO_OVERRIDE
3021 if (PKTPRIO(pktbuf) == 0)
3022 #endif
3023 #ifdef QOS_MAP_SET
3024 pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
3025 #else
3026 pktsetprio(pktbuf, FALSE);
3027 #endif /* QOS_MAP_SET */
3028
3029
3030 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
3031 /*
3032 * Lkup the per interface hash table, for a matching flowring. If one is not
3033 * available, allocate a unique flowid and add a flowring entry.
3034 * The found or newly created flowid is placed into the pktbuf's tag.
3035 */
3036 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
3037 if (ret != BCME_OK) {
3038 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3039 return ret;
3040 }
3041 #endif
3042 #if defined(DHD_TX_DUMP)
3043 dhd_tx_dump(dhdp->osh, pktbuf);
3044 #endif
3045
3046 /* terence 20150901: Micky add to ajust the 802.1X priority */
3047 /* Set the 802.1X packet with the highest priority 7 */
3048 if (dhdp->conf->pktprio8021x >= 0)
3049 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
3050
3051 #ifdef PROP_TXSTATUS
3052 if (dhd_wlfc_is_supported(dhdp)) {
3053 /* store the interface ID */
3054 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3055
3056 /* store destination MAC in the tag as well */
3057 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3058
3059 /* decide which FIFO this packet belongs to */
3060 if (ETHER_ISMULTI(eh->ether_dhost))
3061 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3062 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3063 else
3064 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3065 } else
3066 #endif /* PROP_TXSTATUS */
3067 /* If the protocol uses a data header, apply it */
3068 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3069
3070 /* Use bus module to send data frame */
3071 #ifdef WLMEDIA_HTSF
3072 dhd_htsf_addtxts(dhdp, pktbuf);
3073 #endif
3074
3075 #ifdef BCMDBUS
3076 #ifdef PROP_TXSTATUS
3077 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_dbus_txdata,
3078 dhdp, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3079 /* non-proptxstatus way */
3080 ret = dhd_dbus_txdata(dhdp, pktbuf);
3081 }
3082 #else
3083 ret = dhd_dbus_txdata(dhdp, pktbuf);
3084 #endif /* PROP_TXSTATUS */
3085 if (ret)
3086 PKTFREE(dhdp->osh, pktbuf, TRUE);
3087 #else
3088 #ifdef PROP_TXSTATUS
3089 {
3090 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3091 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3092 /* non-proptxstatus way */
3093 #ifdef BCMPCIE
3094 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3095 #else
3096 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3097 #endif /* BCMPCIE */
3098 }
3099 }
3100 #else
3101 #ifdef BCMPCIE
3102 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3103 #else
3104 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3105 #endif /* BCMPCIE */
3106 #endif /* PROP_TXSTATUS */
3107
3108 #endif /* BCMDBUS */
3109
3110 return ret;
3111 }
3112
3113 int BCMFASTPATH
3114 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3115 {
3116 int ret;
3117 uint datalen;
3118 void *pktbuf;
3119 dhd_info_t *dhd = DHD_DEV_INFO(net);
3120 dhd_if_t *ifp = NULL;
3121 int ifidx;
3122 #ifdef WLMEDIA_HTSF
3123 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
3124 #else
3125 uint8 htsfdlystat_sz = 0;
3126 #endif
3127 #ifdef DHD_WMF
3128 struct ether_header *eh;
3129 uint8 *iph;
3130 #endif /* DHD_WMF */
3131
3132 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3133
3134 DHD_OS_WAKE_LOCK(&dhd->pub);
3135 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3136
3137 /* Reject if down */
3138 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
3139 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3140 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3141 netif_stop_queue(net);
3142 #if defined(OEM_ANDROID)
3143 /* Send Event when bus down detected during data session */
3144 if (dhd->pub.up) {
3145 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3146 net_os_send_hang_message(net);
3147 }
3148 #endif /* OEM_ANDROID */
3149 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3150 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3151 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3152 return -ENODEV;
3153 #else
3154 return NETDEV_TX_BUSY;
3155 #endif
3156 }
3157
3158 ifp = DHD_DEV_IFP(net);
3159 ifidx = DHD_DEV_IFIDX(net);
3160
3161 if (ifidx == DHD_BAD_IF) {
3162 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
3163 netif_stop_queue(net);
3164 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3165 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3166 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3167 return -ENODEV;
3168 #else
3169 return NETDEV_TX_BUSY;
3170 #endif
3171 }
3172
3173 ASSERT(ifidx == dhd_net2idx(dhd, net));
3174 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
3175
3176 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3177
3178 /* re-align socket buffer if "skb->data" is odd address */
3179 if (((unsigned long)(skb->data)) & 0x1) {
3180 unsigned char *data = skb->data;
3181 uint32 length = skb->len;
3182 PKTPUSH(dhd->pub.osh, skb, 1);
3183 memmove(skb->data, data, length);
3184 PKTSETLEN(dhd->pub.osh, skb, length);
3185 }
3186
3187 datalen = PKTLEN(dhd->pub.osh, skb);
3188
3189 /* Make sure there's enough room for any header */
3190
3191 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
3192 struct sk_buff *skb2;
3193
3194 DHD_INFO(("%s: insufficient headroom\n",
3195 dhd_ifname(&dhd->pub, ifidx)));
3196 dhd->pub.tx_realloc++;
3197
3198 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3199 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
3200
3201 dev_kfree_skb(skb);
3202 if ((skb = skb2) == NULL) {
3203 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
3204 dhd_ifname(&dhd->pub, ifidx)));
3205 ret = -ENOMEM;
3206 goto done;
3207 }
3208 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3209 }
3210
3211 /* Convert to packet */
3212 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
3213 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
3214 dhd_ifname(&dhd->pub, ifidx)));
3215 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3216 dev_kfree_skb_any(skb);
3217 ret = -ENOMEM;
3218 goto done;
3219 }
3220 #ifdef WLMEDIA_HTSF
3221 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
3222 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
3223 struct ether_header *eh = (struct ether_header *)pktdata;
3224
3225 if (!ETHER_ISMULTI(eh->ether_dhost) &&
3226 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
3227 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
3228 }
3229 }
3230 #endif
3231 #ifdef DHD_WMF
3232 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
3233 iph = (uint8 *)eh + ETHER_HDR_LEN;
3234
3235 /* WMF processing for multicast packets
3236 * Only IPv4 packets are handled
3237 */
3238 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
3239 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
3240 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
3241 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
3242 void *sdu_clone;
3243 bool ucast_convert = FALSE;
3244 #ifdef DHD_UCAST_UPNP
3245 uint32 dest_ip;
3246
3247 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
3248 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
3249 #endif /* DHD_UCAST_UPNP */
3250 #ifdef DHD_IGMP_UCQUERY
3251 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
3252 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
3253 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
3254 #endif /* DHD_IGMP_UCQUERY */
3255 if (ucast_convert) {
3256 dhd_sta_t *sta;
3257 unsigned long flags;
3258
3259 DHD_IF_STA_LIST_LOCK(ifp, flags);
3260
3261 /* Convert upnp/igmp query to unicast for each assoc STA */
3262 list_for_each_entry(sta, &ifp->sta_list, list) {
3263 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
3264 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
3265 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3266 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3267 return (WMF_NOP);
3268 }
3269 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
3270 }
3271
3272 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
3273 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3274 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3275
3276 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
3277 return NETDEV_TX_OK;
3278 } else
3279 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
3280 {
3281 /* There will be no STA info if the packet is coming from LAN host
3282 * Pass as NULL
3283 */
3284 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
3285 switch (ret) {
3286 case WMF_TAKEN:
3287 case WMF_DROP:
3288 /* Either taken by WMF or we should drop it.
3289 * Exiting send path
3290 */
3291 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3292 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3293 return NETDEV_TX_OK;
3294 default:
3295 /* Continue the transmit path */
3296 break;
3297 }
3298 }
3299 }
3300 #endif /* DHD_WMF */
3301
3302 #ifdef DHDTCPACK_SUPPRESS
3303 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
3304 /* If this packet has been hold or got freed, just return */
3305 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
3306 ret = 0;
3307 goto done;
3308 }
3309 } else {
3310 /* If this packet has replaced another packet and got freed, just return */
3311 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
3312 ret = 0;
3313 goto done;
3314 }
3315 }
3316 #endif /* DHDTCPACK_SUPPRESS */
3317
3318 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
3319
3320 done:
3321 if (ret) {
3322 ifp->stats.tx_dropped++;
3323 dhd->pub.tx_dropped++;
3324 }
3325 else {
3326
3327 #ifdef PROP_TXSTATUS
3328 /* tx_packets counter can counted only when wlfc is disabled */
3329 if (!dhd_wlfc_is_supported(&dhd->pub))
3330 #endif
3331 {
3332 dhd->pub.tx_packets++;
3333 ifp->stats.tx_packets++;
3334 ifp->stats.tx_bytes += datalen;
3335 }
3336 }
3337
3338 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
3339 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3340
3341 /* Return ok: we always eat the packet */
3342 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3343 return 0;
3344 #else
3345 return NETDEV_TX_OK;
3346 #endif
3347 }
3348
3349
3350 void
3351 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
3352 {
3353 struct net_device *net;
3354 dhd_info_t *dhd = dhdp->info;
3355 int i;
3356
3357 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3358
3359 ASSERT(dhd);
3360
3361 if (ifidx == ALL_INTERFACES) {
3362 /* Flow control on all active interfaces */
3363 dhdp->txoff = state;
3364 for (i = 0; i < DHD_MAX_IFS; i++) {
3365 if (dhd->iflist[i]) {
3366 net = dhd->iflist[i]->net;
3367 if (state == ON)
3368 netif_stop_queue(net);
3369 else
3370 netif_wake_queue(net);
3371 }
3372 }
3373 }
3374 else {
3375 if (dhd->iflist[ifidx]) {
3376 net = dhd->iflist[ifidx]->net;
3377 if (state == ON)
3378 netif_stop_queue(net);
3379 else
3380 netif_wake_queue(net);
3381 }
3382 }
3383 }
3384
3385
3386 #ifdef DHD_WMF
3387 bool
3388 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
3389 {
3390 dhd_info_t *dhd = dhdp->info;
3391
3392 return dhd->rxthread_enabled;
3393 }
3394 #endif /* DHD_WMF */
3395
3396 void
3397 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
3398 {
3399 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3400 struct sk_buff *skb;
3401 uchar *eth;
3402 uint len;
3403 void *data, *pnext = NULL;
3404 int i;
3405 dhd_if_t *ifp;
3406 wl_event_msg_t event;
3407 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3408 int tout_rx = 0;
3409 int tout_ctrl = 0;
3410 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
3411 void *skbhead = NULL;
3412 void *skbprev = NULL;
3413 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3414 char *dump_data;
3415 uint16 protocol;
3416 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3417
3418 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3419
3420 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
3421 struct ether_header *eh;
3422 #ifdef WLBTAMP
3423 struct dot11_llc_snap_header *lsh;
3424 #endif
3425
3426 pnext = PKTNEXT(dhdp->osh, pktbuf);
3427 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
3428
3429 ifp = dhd->iflist[ifidx];
3430 if (ifp == NULL) {
3431 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
3432 __FUNCTION__));
3433 PKTCFREE(dhdp->osh, pktbuf, FALSE);
3434 continue;
3435 }
3436
3437 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
3438
3439 /* Dropping only data packets before registering net device to avoid kernel panic */
3440 #ifndef PROP_TXSTATUS_VSDB
3441 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
3442 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
3443 #else
3444 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
3445 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
3446 #endif /* PROP_TXSTATUS_VSDB */
3447 {
3448 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
3449 __FUNCTION__));
3450 PKTCFREE(dhdp->osh, pktbuf, FALSE);
3451 continue;
3452 }
3453
3454 #ifdef WLBTAMP
3455 lsh = (struct dot11_llc_snap_header *)&eh[1];
3456
3457 if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
3458 (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
3459 bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
3460 lsh->type == HTON16(BTA_PROT_L2CAP)) {
3461 amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
3462 ((uint8 *)eh + RFC1042_HDR_LEN);
3463 ACL_data = NULL;
3464 }
3465 #endif /* WLBTAMP */
3466
3467 #ifdef PROP_TXSTATUS
3468 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
3469 /* WLFC may send header only packet when
3470 there is an urgent message but no packet to
3471 piggy-back on
3472 */
3473 PKTCFREE(dhdp->osh, pktbuf, FALSE);
3474 continue;
3475 }
3476 #endif
3477 #ifdef DHD_L2_FILTER
3478 /* If block_ping is enabled drop the ping packet */
3479 if (dhdp->block_ping) {
3480 if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
3481 PKTFREE(dhdp->osh, pktbuf, FALSE);
3482 continue;
3483 }
3484 }
3485 #endif
3486 #ifdef DHD_WMF
3487 /* WMF processing for multicast packets */
3488 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
3489 dhd_sta_t *sta;
3490 int ret;
3491
3492 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
3493 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
3494 switch (ret) {
3495 case WMF_TAKEN:
3496 /* The packet is taken by WMF. Continue to next iteration */
3497 continue;
3498 case WMF_DROP:
3499 /* Packet DROP decision by WMF. Toss it */
3500 DHD_ERROR(("%s: WMF decides to drop packet\n",
3501 __FUNCTION__));
3502 PKTCFREE(dhdp->osh, pktbuf, FALSE);
3503 continue;
3504 default:
3505 /* Continue the transmit path */
3506 break;
3507 }
3508 }
3509 #endif /* DHD_WMF */
3510 #ifdef DHDTCPACK_SUPPRESS
3511 dhd_tcpdata_info_get(dhdp, pktbuf);
3512 #endif
3513 skb = PKTTONATIVE(dhdp->osh, pktbuf);
3514
3515 ifp = dhd->iflist[ifidx];
3516 if (ifp == NULL)
3517 ifp = dhd->iflist[0];
3518
3519 ASSERT(ifp);
3520 skb->dev = ifp->net;
3521
3522 #ifdef PCIE_FULL_DONGLE
3523 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
3524 (!ifp->ap_isolate)) {
3525 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
3526 if (ETHER_ISUCAST(eh->ether_dhost)) {
3527 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
3528 dhd_sendpkt(dhdp, ifidx, pktbuf);
3529 continue;
3530 }
3531 } else {
3532 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
3533 dhd_sendpkt(dhdp, ifidx, npktbuf);
3534 }
3535 }
3536 #endif /* PCIE_FULL_DONGLE */
3537
3538 /* Get the protocol, maintain skb around eth_type_trans()
3539 * The main reason for this hack is for the limitation of
3540 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
3541 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
3542 * coping of the packet coming from the network stack to add
3543 * BDC, Hardware header etc, during network interface registration
3544 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
3545 * for BDC, Hardware header etc. and not just the ETH_HLEN
3546 */
3547 eth = skb->data;
3548 len = skb->len;
3549
3550 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3551 dump_data = skb->data;
3552 protocol = (dump_data[12] << 8) | dump_data[13];
3553 if (protocol == ETHER_TYPE_802_1X) {
3554 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3555 "ver %d, type %d, replay %d\n",
3556 dump_data[14], dump_data[15],
3557 dump_data[30]));
3558 }
3559 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3560 #ifdef DHD_DHCP_DUMP
3561 if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
3562 uint16 dump_hex;
3563 uint16 source_port;
3564 uint16 dest_port;
3565 uint16 udp_port_pos;
3566 uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
3567 uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
3568
3569 udp_port_pos = ETHER_HDR_LEN + ip_header_len;
3570 source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
3571 dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
3572 if (source_port == 0x0044 || dest_port == 0x0044) {
3573 dump_hex = (dump_data[udp_port_pos+249] << 8) |
3574 dump_data[udp_port_pos+250];
3575 if (dump_hex == 0x0101) {
3576 DHD_ERROR(("DHCP - DISCOVER [RX]\n"));
3577 } else if (dump_hex == 0x0102) {
3578 DHD_ERROR(("DHCP - OFFER [RX]\n"));
3579 } else if (dump_hex == 0x0103) {
3580 DHD_ERROR(("DHCP - REQUEST [RX]\n"));
3581 } else if (dump_hex == 0x0105) {
3582 DHD_ERROR(("DHCP - ACK [RX]\n"));
3583 } else {
3584 DHD_ERROR(("DHCP - 0x%X [RX]\n", dump_hex));
3585 }
3586 } else if (source_port == 0x0043 || dest_port == 0x0043) {
3587 DHD_ERROR(("DHCP - BOOTP [RX]\n"));
3588 }
3589 }
3590 #endif /* DHD_DHCP_DUMP */
3591 #if defined(DHD_RX_DUMP)
3592 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
3593 if (protocol != ETHER_TYPE_BRCM) {
3594 if (dump_data[0] == 0xFF) {
3595 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
3596
3597 if ((dump_data[12] == 8) &&
3598 (dump_data[13] == 6)) {
3599 DHD_ERROR(("%s: ARP %d\n",
3600 __FUNCTION__, dump_data[0x15]));
3601 }
3602 } else if (dump_data[0] & 1) {
3603 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
3604 __FUNCTION__, MAC2STRDBG(dump_data)));
3605 }
3606 #ifdef DHD_RX_FULL_DUMP
3607 {
3608 int k;
3609 for (k = 0; k < skb->len; k++) {
3610 DHD_ERROR(("%02X ", dump_data[k]));
3611 if ((k & 15) == 15)
3612 DHD_ERROR(("\n"));
3613 }
3614 DHD_ERROR(("\n"));
3615 }
3616 #endif /* DHD_RX_FULL_DUMP */
3617 }
3618 #endif /* DHD_RX_DUMP */
3619
3620 skb->protocol = eth_type_trans(skb, skb->dev);
3621
3622 if (skb->pkt_type == PACKET_MULTICAST) {
3623 dhd->pub.rx_multicast++;
3624 ifp->stats.multicast++;
3625 }
3626
3627 skb->data = eth;
3628 skb->len = len;
3629
3630 #ifdef WLMEDIA_HTSF
3631 dhd_htsf_addrxts(dhdp, pktbuf);
3632 #endif
3633 /* Strip header, count, deliver upward */
3634 skb_pull(skb, ETH_HLEN);
3635
3636 /* Process special event packets and then discard them */
3637 memset(&event, 0, sizeof(event));
3638 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
3639 dhd_wl_host_event(dhd, &ifidx,
3640 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3641 skb_mac_header(skb),
3642 #else
3643 skb->mac.raw,
3644 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3645 &event,
3646 &data);
3647
3648 wl_event_to_host_order(&event);
3649 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3650 if (!tout_ctrl)
3651 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
3652 #endif /* (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)) */
3653 #ifdef WLBTAMP
3654 if (event.event_type == WLC_E_BTA_HCI_EVENT) {
3655 dhd_bta_doevt(dhdp, data, event.datalen);
3656 }
3657 #endif /* WLBTAMP */
3658
3659 #if (defined(OEM_ANDROID) && defined(PNO_SUPPORT))
3660 if (event.event_type == WLC_E_PFN_NET_FOUND) {
3661 /* enforce custom wake lock to garantee that Kernel not suspended */
3662 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
3663 }
3664 #endif /* PNO_SUPPORT */
3665
3666 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3667 PKTFREE(dhdp->osh, pktbuf, FALSE);
3668 continue;
3669 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3670 } else {
3671 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3672 tout_rx = DHD_PACKET_TIMEOUT_MS;
3673 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
3674
3675 #ifdef PROP_TXSTATUS
3676 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
3677 #endif /* PROP_TXSTATUS */
3678 }
3679
3680 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
3681 ifp = dhd->iflist[ifidx];
3682
3683 if (ifp->net)
3684 ifp->net->last_rx = jiffies;
3685
3686 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
3687 dhdp->dstats.rx_bytes += skb->len;
3688 dhdp->rx_packets++; /* Local count */
3689 ifp->stats.rx_bytes += skb->len;
3690 ifp->stats.rx_packets++;
3691 }
3692 #if defined(DHD_TCP_WINSIZE_ADJUST)
3693 if (dhd_use_tcp_window_size_adjust) {
3694 if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
3695 dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
3696 }
3697 }
3698 #endif /* DHD_TCP_WINSIZE_ADJUST */
3699
3700 if (in_interrupt()) {
3701 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3702 __FUNCTION__, __LINE__);
3703 netif_rx(skb);
3704 } else {
3705 if (dhd->rxthread_enabled) {
3706 if (!skbhead)
3707 skbhead = skb;
3708 else
3709 PKTSETNEXT(dhdp->osh, skbprev, skb);
3710 skbprev = skb;
3711 } else {
3712
3713 /* If the receive is not processed inside an ISR,
3714 * the softirqd must be woken explicitly to service
3715 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3716 * by netif_rx_ni(), but in earlier kernels, we need
3717 * to do it manually.
3718 */
3719 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3720 __FUNCTION__, __LINE__);
3721 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3722 netif_rx_ni(skb);
3723 #else
3724 ulong flags;
3725 netif_rx(skb);
3726 local_irq_save(flags);
3727 RAISE_RX_SOFTIRQ();
3728 local_irq_restore(flags);
3729 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3730 }
3731 }
3732 }
3733
3734 if (dhd->rxthread_enabled && skbhead)
3735 dhd_sched_rxf(dhdp, skbhead);
3736
3737 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3738 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
3739 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
3740 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
3741 }
3742
3743 void
3744 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
3745 {
3746 /* Linux version has nothing to do */
3747 return;
3748 }
3749
3750 void
3751 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
3752 {
3753 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3754 struct ether_header *eh;
3755 uint16 type;
3756 #ifdef WLBTAMP
3757 uint len;
3758 #endif
3759
3760 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
3761
3762 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
3763 type = ntoh16(eh->ether_type);
3764
3765 if (type == ETHER_TYPE_802_1X)
3766 atomic_dec(&dhd->pend_8021x_cnt);
3767
3768 #ifdef WLBTAMP
3769 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
3770 * If yes generate packet completion event.
3771 */
3772 len = PKTLEN(dhdp->osh, txp);
3773
3774 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
3775 if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
3776 struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
3777
3778 if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
3779 ntoh16(lsh->type) == BTA_PROT_L2CAP) {
3780
3781 dhd_bta_tx_hcidata_complete(dhdp, txp, success);
3782 }
3783 }
3784 #endif /* WLBTAMP */
3785 #ifdef PROP_TXSTATUS
3786 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
3787 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
3788 uint datalen = PKTLEN(dhd->pub.osh, txp);
3789
3790 if (success) {
3791 dhd->pub.tx_packets++;
3792 ifp->stats.tx_packets++;
3793 ifp->stats.tx_bytes += datalen;
3794 } else {
3795 ifp->stats.tx_dropped++;
3796 }
3797 }
3798 #endif
3799 }
3800
3801 static struct net_device_stats *
3802 dhd_get_stats(struct net_device *net)
3803 {
3804 dhd_info_t *dhd = DHD_DEV_INFO(net);
3805 dhd_if_t *ifp;
3806 int ifidx;
3807
3808 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3809
3810 ifidx = dhd_net2idx(dhd, net);
3811 if (ifidx == DHD_BAD_IF) {
3812 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
3813
3814 memset(&net->stats, 0, sizeof(net->stats));
3815 return &net->stats;
3816 }
3817
3818 ifp = dhd->iflist[ifidx];
3819 ASSERT(dhd && ifp);
3820
3821 if (dhd->pub.up) {
3822 /* Use the protocol to get dongle stats */
3823 dhd_prot_dstats(&dhd->pub);
3824 }
3825 return &ifp->stats;
3826 }
3827
3828 #ifndef BCMDBUS
3829 static int
3830 dhd_watchdog_thread(void *data)
3831 {
3832 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3833 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3834 /* This thread doesn't need any user-level access,
3835 * so get rid of all our resources
3836 */
3837 if (dhd_watchdog_prio > 0) {
3838 struct sched_param param;
3839 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
3840 dhd_watchdog_prio:(MAX_RT_PRIO-1);
3841 setScheduler(current, SCHED_FIFO, &param);
3842 }
3843
3844 while (1)
3845 if (down_interruptible (&tsk->sema) == 0) {
3846 unsigned long flags;
3847 unsigned long jiffies_at_start = jiffies;
3848 unsigned long time_lapse;
3849
3850 SMP_RD_BARRIER_DEPENDS();
3851 if (tsk->terminated) {
3852 break;
3853 }
3854
3855 if (dhd->pub.dongle_reset == FALSE) {
3856 DHD_TIMER(("%s:\n", __FUNCTION__));
3857
3858 /* Call the bus module watchdog */
3859 dhd_bus_watchdog(&dhd->pub);
3860
3861
3862 DHD_GENERAL_LOCK(&dhd->pub, flags);
3863 /* Count the tick for reference */
3864 dhd->pub.tickcnt++;
3865 time_lapse = jiffies - jiffies_at_start;
3866
3867 /* Reschedule the watchdog */
3868 if (dhd->wd_timer_valid)
3869 mod_timer(&dhd->timer,
3870 jiffies +
3871 msecs_to_jiffies(dhd_watchdog_ms) -
3872 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
3873 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3874 }
3875 } else {
3876 break;
3877 }
3878
3879 complete_and_exit(&tsk->completed, 0);
3880 }
3881
3882 static void dhd_watchdog(ulong data)
3883 {
3884 dhd_info_t *dhd = (dhd_info_t *)data;
3885 unsigned long flags;
3886
3887 if (dhd->pub.dongle_reset) {
3888 return;
3889 }
3890
3891 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
3892 up(&dhd->thr_wdt_ctl.sema);
3893 return;
3894 }
3895
3896 /* Call the bus module watchdog */
3897 dhd_bus_watchdog(&dhd->pub);
3898
3899 DHD_GENERAL_LOCK(&dhd->pub, flags);
3900 /* Count the tick for reference */
3901 dhd->pub.tickcnt++;
3902
3903 /* Reschedule the watchdog */
3904 if (dhd->wd_timer_valid)
3905 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
3906 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3907
3908 }
3909
3910 #ifdef ENABLE_ADAPTIVE_SCHED
3911 static void
3912 dhd_sched_policy(int prio)
3913 {
3914 struct sched_param param;
3915 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
3916 param.sched_priority = 0;
3917 setScheduler(current, SCHED_NORMAL, &param);
3918 } else {
3919 if (get_scheduler_policy(current) != SCHED_FIFO) {
3920 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
3921 setScheduler(current, SCHED_FIFO, &param);
3922 }
3923 }
3924 }
3925 #endif /* ENABLE_ADAPTIVE_SCHED */
3926 #ifdef DEBUG_CPU_FREQ
3927 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
3928 {
3929 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
3930 struct cpufreq_freqs *freq = data;
3931 if (dhd) {
3932 if (!dhd->new_freq)
3933 goto exit;
3934 if (val == CPUFREQ_POSTCHANGE) {
3935 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3936 freq->new, freq->cpu));
3937 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
3938 }
3939 }
3940 exit:
3941 return 0;
3942 }
3943 #endif /* DEBUG_CPU_FREQ */
3944 static int
3945 dhd_dpc_thread(void *data)
3946 {
3947 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3948 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3949
3950 /* This thread doesn't need any user-level access,
3951 * so get rid of all our resources
3952 */
3953 if (dhd_dpc_prio > 0)
3954 {
3955 struct sched_param param;
3956 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
3957 setScheduler(current, SCHED_FIFO, &param);
3958 }
3959
3960 #ifdef CUSTOM_DPC_CPUCORE
3961 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
3962 #else
3963 if (dhd->pub.conf->dpc_cpucore >= 0) {
3964 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
3965 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
3966 }
3967 #endif
3968 #ifdef CUSTOM_SET_CPUCORE
3969 dhd->pub.current_dpc = current;
3970 #endif /* CUSTOM_SET_CPUCORE */
3971 /* Run until signal received */
3972 while (1) {
3973 if (!binary_sema_down(tsk)) {
3974 #ifdef ENABLE_ADAPTIVE_SCHED
3975 dhd_sched_policy(dhd_dpc_prio);
3976 #endif /* ENABLE_ADAPTIVE_SCHED */
3977 SMP_RD_BARRIER_DEPENDS();
3978 if (tsk->terminated) {
3979 break;
3980 }
3981
3982 /* Call bus dpc unless it indicated down (then clean stop) */
3983 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3984 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
3985 while (dhd_bus_dpc(dhd->pub.bus)) {
3986 /* process all data */
3987 }
3988 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
3989 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3990
3991 } else {
3992 if (dhd->pub.up)
3993 dhd_bus_stop(dhd->pub.bus, TRUE);
3994 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3995 }
3996 }
3997 else
3998 break;
3999 }
4000 complete_and_exit(&tsk->completed, 0);
4001 }
4002
4003 static int
4004 dhd_rxf_thread(void *data)
4005 {
4006 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4007 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4008 #if defined(WAIT_DEQUEUE)
4009 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
4010 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
4011 #endif
4012 dhd_pub_t *pub = &dhd->pub;
4013
4014 /* This thread doesn't need any user-level access,
4015 * so get rid of all our resources
4016 */
4017 if (dhd_rxf_prio > 0)
4018 {
4019 struct sched_param param;
4020 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
4021 setScheduler(current, SCHED_FIFO, &param);
4022 }
4023
4024 DAEMONIZE("dhd_rxf");
4025 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
4026
4027 /* signal: thread has started */
4028 complete(&tsk->completed);
4029 #ifdef CUSTOM_SET_CPUCORE
4030 dhd->pub.current_rxf = current;
4031 #endif /* CUSTOM_SET_CPUCORE */
4032 /* Run until signal received */
4033 while (1) {
4034 if (down_interruptible(&tsk->sema) == 0) {
4035 void *skb;
4036 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
4037 ulong flags;
4038 #endif
4039 #ifdef ENABLE_ADAPTIVE_SCHED
4040 dhd_sched_policy(dhd_rxf_prio);
4041 #endif /* ENABLE_ADAPTIVE_SCHED */
4042
4043 SMP_RD_BARRIER_DEPENDS();
4044
4045 if (tsk->terminated) {
4046 break;
4047 }
4048 skb = dhd_rxf_dequeue(pub);
4049
4050 if (skb == NULL) {
4051 continue;
4052 }
4053 while (skb) {
4054 void *skbnext = PKTNEXT(pub->osh, skb);
4055 PKTSETNEXT(pub->osh, skb, NULL);
4056 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4057 __FUNCTION__, __LINE__);
4058 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4059 netif_rx_ni(skb);
4060 #else
4061 netif_rx(skb);
4062 local_irq_save(flags);
4063 RAISE_RX_SOFTIRQ();
4064 local_irq_restore(flags);
4065
4066 #endif
4067 skb = skbnext;
4068 }
4069 #if defined(WAIT_DEQUEUE)
4070 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
4071 OSL_SLEEP(1);
4072 watchdogTime = OSL_SYSUPTIME();
4073 }
4074 #endif
4075
4076 DHD_OS_WAKE_UNLOCK(pub);
4077 }
4078 else
4079 break;
4080 }
4081 complete_and_exit(&tsk->completed, 0);
4082 }
4083
4084 #ifdef BCMPCIE
4085 void dhd_dpc_kill(dhd_pub_t *dhdp)
4086 {
4087 dhd_info_t *dhd;
4088
4089 if (!dhdp)
4090 return;
4091
4092 dhd = dhdp->info;
4093
4094 if (!dhd)
4095 return;
4096
4097 tasklet_kill(&dhd->tasklet);
4098 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
4099 }
4100 #endif /* BCMPCIE */
4101
4102 static void
4103 dhd_dpc(ulong data)
4104 {
4105 dhd_info_t *dhd;
4106
4107 dhd = (dhd_info_t *)data;
4108
4109 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
4110 * down below , wake lock is set,
4111 * the tasklet is initialized in dhd_attach()
4112 */
4113 /* Call bus dpc unless it indicated down (then clean stop) */
4114 if (dhd->pub.busstate != DHD_BUS_DOWN) {
4115 if (dhd_bus_dpc(dhd->pub.bus))
4116 tasklet_schedule(&dhd->tasklet);
4117 else
4118 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4119 } else {
4120 dhd_bus_stop(dhd->pub.bus, TRUE);
4121 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4122 }
4123 }
4124
4125 void
4126 dhd_sched_dpc(dhd_pub_t *dhdp)
4127 {
4128 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4129
4130 DHD_OS_WAKE_LOCK(dhdp);
4131 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
4132 /* If the semaphore does not get up,
4133 * wake unlock should be done here
4134 */
4135 if (!binary_sema_up(&dhd->thr_dpc_ctl))
4136 DHD_OS_WAKE_UNLOCK(dhdp);
4137 return;
4138 } else {
4139 tasklet_schedule(&dhd->tasklet);
4140 }
4141 }
4142 #endif /* BCMDBUS */
4143
4144 static void
4145 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
4146 {
4147 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4148 #ifdef RXF_DEQUEUE_ON_BUSY
4149 int ret = BCME_OK;
4150 int retry = 2;
4151 #endif /* RXF_DEQUEUE_ON_BUSY */
4152
4153 DHD_OS_WAKE_LOCK(dhdp);
4154
4155 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
4156 #ifdef RXF_DEQUEUE_ON_BUSY
4157 do {
4158 ret = dhd_rxf_enqueue(dhdp, skb);
4159 if (ret == BCME_OK || ret == BCME_ERROR)
4160 break;
4161 else
4162 OSL_SLEEP(50); /* waiting for dequeueing */
4163 } while (retry-- > 0);
4164
4165 if (retry <= 0 && ret == BCME_BUSY) {
4166 void *skbp = skb;
4167
4168 while (skbp) {
4169 void *skbnext = PKTNEXT(dhdp->osh, skbp);
4170 PKTSETNEXT(dhdp->osh, skbp, NULL);
4171 netif_rx_ni(skbp);
4172 skbp = skbnext;
4173 }
4174 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
4175 }
4176 else {
4177 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
4178 up(&dhd->thr_rxf_ctl.sema);
4179 }
4180 }
4181 #else /* RXF_DEQUEUE_ON_BUSY */
4182 do {
4183 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
4184 break;
4185 } while (1);
4186 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
4187 up(&dhd->thr_rxf_ctl.sema);
4188 }
4189 return;
4190 #endif /* RXF_DEQUEUE_ON_BUSY */
4191 }
4192
4193 #ifdef TOE
4194 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
4195 static int
4196 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
4197 {
4198 wl_ioctl_t ioc;
4199 char buf[32];
4200 int ret;
4201
4202 memset(&ioc, 0, sizeof(ioc));
4203
4204 ioc.cmd = WLC_GET_VAR;
4205 ioc.buf = buf;
4206 ioc.len = (uint)sizeof(buf);
4207 ioc.set = FALSE;
4208
4209 strncpy(buf, "toe_ol", sizeof(buf) - 1);
4210 buf[sizeof(buf) - 1] = '\0';
4211 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
4212 /* Check for older dongle image that doesn't support toe_ol */
4213 if (ret == -EIO) {
4214 DHD_ERROR(("%s: toe not supported by device\n",
4215 dhd_ifname(&dhd->pub, ifidx)));
4216 return -EOPNOTSUPP;
4217 }
4218
4219 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
4220 return ret;
4221 }
4222
4223 memcpy(toe_ol, buf, sizeof(uint32));
4224 return 0;
4225 }
4226
4227 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
4228 static int
4229 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
4230 {
4231 wl_ioctl_t ioc;
4232 char buf[32];
4233 int toe, ret;
4234
4235 memset(&ioc, 0, sizeof(ioc));
4236
4237 ioc.cmd = WLC_SET_VAR;
4238 ioc.buf = buf;
4239 ioc.len = (uint)sizeof(buf);
4240 ioc.set = TRUE;
4241
4242 /* Set toe_ol as requested */
4243
4244 strncpy(buf, "toe_ol", sizeof(buf) - 1);
4245 buf[sizeof(buf) - 1] = '\0';
4246 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
4247
4248 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
4249 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
4250 dhd_ifname(&dhd->pub, ifidx), ret));
4251 return ret;
4252 }
4253
4254 /* Enable toe globally only if any components are enabled. */
4255
4256 toe = (toe_ol != 0);
4257
4258 strcpy(buf, "toe");
4259 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
4260
4261 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
4262 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
4263 return ret;
4264 }
4265
4266 return 0;
4267 }
4268 #endif /* TOE */
4269
4270 #if defined(WL_CFG80211)
4271 void dhd_set_scb_probe(dhd_pub_t *dhd)
4272 {
4273 #define NUM_SCB_MAX_PROBE 3
4274 int ret = 0;
4275 wl_scb_probe_t scb_probe;
4276 char iovbuf[WL_EVENTING_MASK_LEN + 12];
4277
4278 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
4279
4280 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
4281 return;
4282
4283 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
4284
4285 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4286 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
4287
4288 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
4289
4290 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
4291
4292 bcm_mkiovar("scb_probe", (char *)&scb_probe,
4293 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
4294 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
4295 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
4296 #undef NUM_SCB_MAX_PROBE
4297 return;
4298 }
4299 #endif /* WL_CFG80211 */
4300
4301 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
4302 static void
4303 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
4304 {
4305 dhd_info_t *dhd = DHD_DEV_INFO(net);
4306
4307 snprintf(info->driver, sizeof(info->driver), "wl");
4308 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
4309 }
4310
4311 struct ethtool_ops dhd_ethtool_ops = {
4312 .get_drvinfo = dhd_ethtool_get_drvinfo
4313 };
4314 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
4315
4316
4317 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4318 static int
4319 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
4320 {
4321 struct ethtool_drvinfo info;
4322 char drvname[sizeof(info.driver)];
4323 uint32 cmd;
4324 #ifdef TOE
4325 struct ethtool_value edata;
4326 uint32 toe_cmpnt, csum_dir;
4327 int ret;
4328 #endif
4329
4330 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4331
4332 /* all ethtool calls start with a cmd word */
4333 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
4334 return -EFAULT;
4335
4336 switch (cmd) {
4337 case ETHTOOL_GDRVINFO:
4338 /* Copy out any request driver name */
4339 if (copy_from_user(&info, uaddr, sizeof(info)))
4340 return -EFAULT;
4341 strncpy(drvname, info.driver, sizeof(info.driver));
4342 drvname[sizeof(info.driver)-1] = '\0';
4343
4344 /* clear struct for return */
4345 memset(&info, 0, sizeof(info));
4346 info.cmd = cmd;
4347
4348 /* if dhd requested, identify ourselves */
4349 if (strcmp(drvname, "?dhd") == 0) {
4350 snprintf(info.driver, sizeof(info.driver), "dhd");
4351 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
4352 info.version[sizeof(info.version) - 1] = '\0';
4353 }
4354
4355 /* otherwise, require dongle to be up */
4356 else if (!dhd->pub.up) {
4357 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
4358 return -ENODEV;
4359 }
4360
4361 /* finally, report dongle driver type */
4362 else if (dhd->pub.iswl)
4363 snprintf(info.driver, sizeof(info.driver), "wl");
4364 else
4365 snprintf(info.driver, sizeof(info.driver), "xx");
4366
4367 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
4368 if (copy_to_user(uaddr, &info, sizeof(info)))
4369 return -EFAULT;
4370 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
4371 (int)sizeof(drvname), drvname, info.driver));
4372 break;
4373
4374 #ifdef TOE
4375 /* Get toe offload components from dongle */
4376 case ETHTOOL_GRXCSUM:
4377 case ETHTOOL_GTXCSUM:
4378 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
4379 return ret;
4380
4381 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
4382
4383 edata.cmd = cmd;
4384 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
4385
4386 if (copy_to_user(uaddr, &edata, sizeof(edata)))
4387 return -EFAULT;
4388 break;
4389
4390 /* Set toe offload components in dongle */
4391 case ETHTOOL_SRXCSUM:
4392 case ETHTOOL_STXCSUM:
4393 if (copy_from_user(&edata, uaddr, sizeof(edata)))
4394 return -EFAULT;
4395
4396 /* Read the current settings, update and write back */
4397 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
4398 return ret;
4399
4400 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
4401
4402 if (edata.data != 0)
4403 toe_cmpnt |= csum_dir;
4404 else
4405 toe_cmpnt &= ~csum_dir;
4406
4407 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
4408 return ret;
4409
4410 /* If setting TX checksum mode, tell Linux the new mode */
4411 if (cmd == ETHTOOL_STXCSUM) {
4412 if (edata.data)
4413 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
4414 else
4415 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
4416 }
4417
4418 break;
4419 #endif /* TOE */
4420
4421 default:
4422 return -EOPNOTSUPP;
4423 }
4424
4425 return 0;
4426 }
4427 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4428
4429 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
4430 {
4431 #if defined(OEM_ANDROID)
4432 dhd_info_t *dhd;
4433
4434 if (!dhdp) {
4435 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
4436 return FALSE;
4437 }
4438
4439 if (!dhdp->up)
4440 return FALSE;
4441
4442 dhd = (dhd_info_t *)dhdp->info;
4443 #if (!defined(BCMDBUS) && !defined(BCMPCIE))
4444 if (dhd->thr_dpc_ctl.thr_pid < 0) {
4445 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
4446 return FALSE;
4447 }
4448 #endif /* BCMDBUS */
4449
4450 #ifdef CONFIG_MACH_UNIVERSAL5433
4451 /* old revision does not send hang message */
4452 if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
4453 #else
4454 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
4455 #endif /* CONFIG_MACH_UNIVERSAL5433 */
4456 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
4457 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
4458 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
4459 net_os_send_hang_message(net);
4460 return TRUE;
4461 }
4462 #endif /* OEM_ANDROID */
4463 return FALSE;
4464 }
4465
4466 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
4467 {
4468 int bcmerror = BCME_OK;
4469 int buflen = 0;
4470 struct net_device *net;
4471
4472 net = dhd_idx2net(pub, ifidx);
4473 if (!net) {
4474 bcmerror = BCME_BADARG;
4475 goto done;
4476 }
4477
4478 if (data_buf)
4479 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
4480
4481 /* check for local dhd ioctl and handle it */
4482 if (ioc->driver == DHD_IOCTL_MAGIC) {
4483 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
4484 if (bcmerror)
4485 pub->bcmerror = bcmerror;
4486 goto done;
4487 }
4488
4489 #ifndef BCMDBUS
4490 /* send to dongle (must be up, and wl). */
4491 if (pub->busstate != DHD_BUS_DATA) {
4492 #if !defined(OEM_ANDROID)
4493 int ret = dhd_bus_start(pub);
4494 if (ret != 0) {
4495 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4496 bcmerror = BCME_DONGLE_DOWN;
4497 goto done;
4498 }
4499 #else
4500 bcmerror = BCME_DONGLE_DOWN;
4501 goto done;
4502 #endif
4503 }
4504
4505 if (!pub->iswl) {
4506 bcmerror = BCME_DONGLE_DOWN;
4507 goto done;
4508 }
4509 #endif /* BCMDBUS */
4510
4511 /*
4512 * Flush the TX queue if required for proper message serialization:
4513 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
4514 * prevent M4 encryption and
4515 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
4516 * prevent disassoc frame being sent before WPS-DONE frame.
4517 */
4518 if (ioc->cmd == WLC_SET_KEY ||
4519 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
4520 strncmp("wsec_key", data_buf, 9) == 0) ||
4521 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
4522 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
4523 ioc->cmd == WLC_DISASSOC)
4524 dhd_wait_pend8021x(net);
4525
4526 #ifdef WLMEDIA_HTSF
4527 if (data_buf) {
4528 /* short cut wl ioctl calls here */
4529 if (strcmp("htsf", data_buf) == 0) {
4530 dhd_ioctl_htsf_get(dhd, 0);
4531 return BCME_OK;
4532 }
4533
4534 if (strcmp("htsflate", data_buf) == 0) {
4535 if (ioc->set) {
4536 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
4537 memset(&maxdelayts, 0, sizeof(tstamp_t));
4538 maxdelay = 0;
4539 tspktcnt = 0;
4540 maxdelaypktno = 0;
4541 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
4542 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
4543 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
4544 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
4545 } else {
4546 dhd_dump_latency();
4547 }
4548 return BCME_OK;
4549 }
4550 if (strcmp("htsfclear", data_buf) == 0) {
4551 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
4552 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
4553 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
4554 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
4555 htsf_seqnum = 0;
4556 return BCME_OK;
4557 }
4558 if (strcmp("htsfhis", data_buf) == 0) {
4559 dhd_dump_htsfhisto(&vi_d1, "H to D");
4560 dhd_dump_htsfhisto(&vi_d2, "D to D");
4561 dhd_dump_htsfhisto(&vi_d3, "D to H");
4562 dhd_dump_htsfhisto(&vi_d4, "H to H");
4563 return BCME_OK;
4564 }
4565 if (strcmp("tsport", data_buf) == 0) {
4566 if (ioc->set) {
4567 memcpy(&tsport, data_buf + 7, 4);
4568 } else {
4569 DHD_ERROR(("current timestamp port: %d \n", tsport));
4570 }
4571 return BCME_OK;
4572 }
4573 }
4574 #endif /* WLMEDIA_HTSF */
4575
4576 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
4577 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
4578 #ifdef BCM_FD_AGGR
4579 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
4580 #else
4581 bcmerror = BCME_UNSUPPORTED;
4582 #endif
4583 goto done;
4584 }
4585 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
4586
4587 done:
4588 #if defined(OEM_ANDROID)
4589 dhd_check_hang(net, pub, bcmerror);
4590 #endif /* OEM_ANDROID */
4591
4592 return bcmerror;
4593 }
4594
4595 static int
4596 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
4597 {
4598 dhd_info_t *dhd = DHD_DEV_INFO(net);
4599 dhd_ioctl_t ioc;
4600 int bcmerror = 0;
4601 int ifidx;
4602 int ret;
4603 void *local_buf = NULL;
4604 u16 buflen = 0;
4605
4606 DHD_OS_WAKE_LOCK(&dhd->pub);
4607 DHD_PERIM_LOCK(&dhd->pub);
4608
4609 #if defined(OEM_ANDROID)
4610 /* Interface up check for built-in type */
4611 if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
4612 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
4613 DHD_PERIM_UNLOCK(&dhd->pub);
4614 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4615 return BCME_NOTUP;
4616 }
4617
4618 /* send to dongle only if we are not waiting for reload already */
4619 if (dhd->pub.hang_was_sent) {
4620 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
4621 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
4622 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4623 return OSL_ERROR(BCME_DONGLE_DOWN);
4624 }
4625 #endif /* (OEM_ANDROID) */
4626
4627 ifidx = dhd_net2idx(dhd, net);
4628 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
4629
4630 if (ifidx == DHD_BAD_IF) {
4631 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
4632 DHD_PERIM_UNLOCK(&dhd->pub);
4633 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4634 return -1;
4635 }
4636
4637 #if defined(WL_WIRELESS_EXT)
4638 /* linux wireless extensions */
4639 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
4640 /* may recurse, do NOT lock */
4641 ret = wl_iw_ioctl(net, ifr, cmd);
4642 DHD_PERIM_UNLOCK(&dhd->pub);
4643 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4644 return ret;
4645 }
4646 #endif /* defined(WL_WIRELESS_EXT) */
4647
4648 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4649 if (cmd == SIOCETHTOOL) {
4650 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
4651 DHD_PERIM_UNLOCK(&dhd->pub);
4652 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4653 return ret;
4654 }
4655 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4656
4657 #if defined(OEM_ANDROID) || defined(P2PONEINT)
4658 if (cmd == SIOCDEVPRIVATE+1) {
4659 ret = wl_android_priv_cmd(net, ifr, cmd);
4660 dhd_check_hang(net, &dhd->pub, ret);
4661 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4662 return ret;
4663 }
4664 #endif /* OEM_ANDROID */
4665
4666 if (cmd != SIOCDEVPRIVATE) {
4667 DHD_PERIM_UNLOCK(&dhd->pub);
4668 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4669 return -EOPNOTSUPP;
4670 }
4671
4672 memset(&ioc, 0, sizeof(ioc));
4673
4674 #ifdef CONFIG_COMPAT
4675 if (is_compat_task()) {
4676 compat_wl_ioctl_t compat_ioc;
4677 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
4678 bcmerror = BCME_BADADDR;
4679 goto done;
4680 }
4681 ioc.cmd = compat_ioc.cmd;
4682 ioc.buf = compat_ptr(compat_ioc.buf);
4683 ioc.len = compat_ioc.len;
4684 ioc.set = compat_ioc.set;
4685 ioc.used = compat_ioc.used;
4686 ioc.needed = compat_ioc.needed;
4687 /* To differentiate between wl and dhd read 4 more byes */
4688 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
4689 sizeof(uint)) != 0)) {
4690 bcmerror = BCME_BADADDR;
4691 goto done;
4692 }
4693 } else
4694 #endif /* CONFIG_COMPAT */
4695 {
4696 /* Copy the ioc control structure part of ioctl request */
4697 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
4698 bcmerror = BCME_BADADDR;
4699 goto done;
4700 }
4701
4702 /* To differentiate between wl and dhd read 4 more byes */
4703 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
4704 sizeof(uint)) != 0)) {
4705 bcmerror = BCME_BADADDR;
4706 goto done;
4707 }
4708 }
4709
4710 if (!capable(CAP_NET_ADMIN)) {
4711 bcmerror = BCME_EPERM;
4712 goto done;
4713 }
4714
4715 if (ioc.len > 0) {
4716 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
4717 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
4718 bcmerror = BCME_NOMEM;
4719 goto done;
4720 }
4721
4722 DHD_PERIM_UNLOCK(&dhd->pub);
4723 if (copy_from_user(local_buf, ioc.buf, buflen)) {
4724 DHD_PERIM_LOCK(&dhd->pub);
4725 bcmerror = BCME_BADADDR;
4726 goto done;
4727 }
4728 DHD_PERIM_LOCK(&dhd->pub);
4729
4730 *(char *)(local_buf + buflen) = '\0';
4731 }
4732
4733 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
4734
4735 if (!bcmerror && buflen && local_buf && ioc.buf) {
4736 DHD_PERIM_UNLOCK(&dhd->pub);
4737 if (copy_to_user(ioc.buf, local_buf, buflen))
4738 bcmerror = -EFAULT;
4739 DHD_PERIM_LOCK(&dhd->pub);
4740 }
4741
4742 done:
4743 if (local_buf)
4744 MFREE(dhd->pub.osh, local_buf, buflen+1);
4745
4746 DHD_PERIM_UNLOCK(&dhd->pub);
4747 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4748
4749 return OSL_ERROR(bcmerror);
4750 }
4751
4752 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
4753 int dhd_deepsleep(dhd_info_t *dhd, int flag)
4754 {
4755 char iovbuf[20];
4756 uint powervar = 0;
4757 dhd_pub_t *dhdp;
4758 int cnt = 0;
4759 int ret = 0;
4760
4761 dhdp = &dhd->pub;
4762
4763 switch (flag) {
4764 case 1 : /* Deepsleep on */
4765 DHD_ERROR(("[WiFi] Deepsleep On\n"));
4766 /* give some time to sysioc_work before deepsleep */
4767 OSL_SLEEP(200);
4768 #ifdef PKT_FILTER_SUPPORT
4769 /* disable pkt filter */
4770 dhd_enable_packet_filter(0, dhdp);
4771 #endif /* PKT_FILTER_SUPPORT */
4772 /* Disable MPC */
4773 powervar = 0;
4774 memset(iovbuf, 0, sizeof(iovbuf));
4775 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4776 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4777
4778 /* Enable Deepsleep */
4779 powervar = 1;
4780 memset(iovbuf, 0, sizeof(iovbuf));
4781 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4782 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4783 break;
4784
4785 case 0: /* Deepsleep Off */
4786 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
4787
4788 /* Disable Deepsleep */
4789 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
4790 powervar = 0;
4791 memset(iovbuf, 0, sizeof(iovbuf));
4792 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4793 iovbuf, sizeof(iovbuf));
4794 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
4795 sizeof(iovbuf), TRUE, 0);
4796
4797 memset(iovbuf, 0, sizeof(iovbuf));
4798 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4799 iovbuf, sizeof(iovbuf));
4800 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
4801 sizeof(iovbuf), FALSE, 0)) < 0) {
4802 DHD_ERROR(("the error of dhd deepsleep status"
4803 " ret value :%d\n", ret));
4804 } else {
4805 if (!(*(int *)iovbuf)) {
4806 DHD_ERROR(("deepsleep mode is 0,"
4807 " count: %d\n", cnt));
4808 break;
4809 }
4810 }
4811 }
4812
4813 /* Enable MPC */
4814 powervar = 1;
4815 memset(iovbuf, 0, sizeof(iovbuf));
4816 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4817 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4818 break;
4819 }
4820
4821 return 0;
4822 }
4823
4824 static int
4825 dhd_stop(struct net_device *net)
4826 {
4827 int ifidx = 0;
4828 dhd_info_t *dhd = DHD_DEV_INFO(net);
4829 DHD_OS_WAKE_LOCK(&dhd->pub);
4830 DHD_PERIM_LOCK(&dhd->pub);
4831 printf("%s: Enter %p\n", __FUNCTION__, net);
4832 if (dhd->pub.up == 0) {
4833 goto exit;
4834 }
4835
4836 dhd_if_flush_sta(DHD_DEV_IFP(net));
4837
4838
4839 ifidx = dhd_net2idx(dhd, net);
4840 BCM_REFERENCE(ifidx);
4841
4842 /* Set state and stop OS transmissions */
4843 netif_stop_queue(net);
4844 dhd->pub.up = 0;
4845
4846 #ifdef WL_CFG80211
4847 if (ifidx == 0) {
4848 wl_cfg80211_down(NULL);
4849
4850 /*
4851 * For CFG80211: Clean up all the left over virtual interfaces
4852 * when the primary Interface is brought down. [ifconfig wlan0 down]
4853 */
4854 if (!dhd_download_fw_on_driverload) {
4855 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
4856 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
4857 int i;
4858
4859 dhd_net_if_lock_local(dhd);
4860 for (i = 1; i < DHD_MAX_IFS; i++)
4861 dhd_remove_if(&dhd->pub, i, FALSE);
4862 #ifdef ARP_OFFLOAD_SUPPORT
4863 if (dhd_inetaddr_notifier_registered) {
4864 dhd_inetaddr_notifier_registered = FALSE;
4865 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
4866 }
4867 #endif /* ARP_OFFLOAD_SUPPORT */
4868 #ifdef CONFIG_IPV6
4869 if (dhd_inet6addr_notifier_registered) {
4870 dhd_inet6addr_notifier_registered = FALSE;
4871 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
4872 }
4873 #endif /* CONFIG_IPV6 */
4874 dhd_net_if_unlock_local(dhd);
4875 }
4876 cancel_work_sync(dhd->dhd_deferred_wq);
4877 }
4878 }
4879 #endif /* WL_CFG80211 */
4880
4881 #ifdef PROP_TXSTATUS
4882 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
4883 #endif
4884 /* Stop the protocol module */
4885 dhd_prot_stop(&dhd->pub);
4886
4887 OLD_MOD_DEC_USE_COUNT;
4888 exit:
4889 #if defined(WL_CFG80211) && defined(OEM_ANDROID)
4890 if (ifidx == 0 && !dhd_download_fw_on_driverload)
4891 wl_android_wifi_off(net);
4892 else {
4893 if (dhd->pub.conf && dhd->pub.conf->deepsleep)
4894 dhd_deepsleep(dhd, 1);
4895 }
4896 #endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
4897 dhd->pub.rxcnt_timeout = 0;
4898 dhd->pub.txcnt_timeout = 0;
4899
4900 dhd->pub.hang_was_sent = 0;
4901
4902 /* Clear country spec for for built-in type driver */
4903 if (!dhd_download_fw_on_driverload) {
4904 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
4905 dhd->pub.dhd_cspec.rev = 0;
4906 dhd->pub.dhd_cspec.ccode[0] = 0x00;
4907 }
4908
4909 printf("%s: Exit\n", __FUNCTION__);
4910 DHD_PERIM_UNLOCK(&dhd->pub);
4911 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4912 return 0;
4913 }
4914
4915 #if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
4916 defined(USE_INITIAL_SHORT_DWELL_TIME))
4917 extern bool g_first_broadcast_scan;
4918 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
4919
4920 #ifdef WL11U
4921 static int dhd_interworking_enable(dhd_pub_t *dhd)
4922 {
4923 char iovbuf[WLC_IOCTL_SMLEN];
4924 uint32 enable = true;
4925 int ret = BCME_OK;
4926
4927 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
4928 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4929 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
4930 }
4931
4932 if (ret == BCME_OK) {
4933 /* basic capabilities for HS20 REL2 */
4934 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
4935 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
4936 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4937 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4938 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
4939 }
4940 }
4941
4942 return ret;
4943 }
4944 #endif /* WL11u */
4945
4946 static int
4947 dhd_open(struct net_device *net)
4948 {
4949 dhd_info_t *dhd = DHD_DEV_INFO(net);
4950 #ifdef TOE
4951 uint32 toe_ol;
4952 #endif
4953 #ifdef BCM_FD_AGGR
4954 char iovbuf[WLC_IOCTL_SMLEN];
4955 dbus_config_t config;
4956 uint32 agglimit = 0;
4957 uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
4958 #endif /* BCM_FD_AGGR */
4959 int ifidx;
4960 int32 ret = 0;
4961
4962 printf("%s: Enter %p\n", __FUNCTION__, net);
4963 #if defined(MULTIPLE_SUPPLICANT)
4964 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
4965 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4966 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
4967 }
4968 mutex_lock(&_dhd_sdio_mutex_lock_);
4969 #endif
4970 #endif /* MULTIPLE_SUPPLICANT */
4971
4972 DHD_OS_WAKE_LOCK(&dhd->pub);
4973 DHD_PERIM_LOCK(&dhd->pub);
4974 dhd->pub.dongle_trap_occured = 0;
4975 dhd->pub.hang_was_sent = 0;
4976
4977 #if 0
4978 /*
4979 * Force start if ifconfig_up gets called before START command
4980 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4981 * This should be removed in the future
4982 */
4983 ret = wl_control_wl_start(net);
4984 if (ret != 0) {
4985 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4986 ret = -1;
4987 goto exit;
4988 }
4989 #endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */
4990
4991 ifidx = dhd_net2idx(dhd, net);
4992 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4993
4994 if (ifidx < 0) {
4995 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
4996 ret = -1;
4997 goto exit;
4998 }
4999
5000 if (!dhd->iflist[ifidx]) {
5001 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
5002 ret = -1;
5003 goto exit;
5004 }
5005
5006 if (ifidx == 0) {
5007 atomic_set(&dhd->pend_8021x_cnt, 0);
5008 dhd_update_fw_nv_path(dhd); // terence 20140807: fix for op_mode issue
5009 if (!dhd_download_fw_on_driverload) {
5010 DHD_ERROR(("\n%s\n", dhd_version));
5011 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
5012 g_first_broadcast_scan = TRUE;
5013 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
5014 ret = wl_android_wifi_on(net);
5015 if (ret != 0) {
5016 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
5017 __FUNCTION__, ret));
5018 ret = -1;
5019 goto exit;
5020 }
5021 }
5022
5023 if (dhd->pub.busstate != DHD_BUS_DATA) {
5024
5025 #ifndef BCMDBUS
5026 /* try to bring up bus */
5027 DHD_PERIM_UNLOCK(&dhd->pub);
5028 ret = dhd_bus_start(&dhd->pub);
5029 DHD_PERIM_LOCK(&dhd->pub);
5030 if (ret) {
5031 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5032 ret = -1;
5033 goto exit;
5034 }
5035 #else /* BCMDBUS */
5036 if ((ret = dbus_up(dhd->pub.dbus)) != 0) {
5037 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
5038 goto exit;
5039 } else
5040 dhd->pub.busstate = DHD_BUS_DATA;
5041
5042 /* Bus is ready, query any dongle information */
5043 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
5044 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
5045 goto exit;
5046 }
5047 #endif /* BCMDBUS */
5048
5049 }
5050
5051 #ifdef BCM_FD_AGGR
5052 config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
5053
5054
5055 memset(iovbuf, 0, sizeof(iovbuf));
5056 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
5057 iovbuf, sizeof(iovbuf));
5058
5059 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
5060 agglimit = *(uint32 *)iovbuf;
5061 config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
5062 config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
5063 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
5064 agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
5065 if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
5066 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
5067 }
5068 } else {
5069 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
5070 rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
5071 }
5072
5073 /* Set aggregation for TX */
5074 bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
5075 rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
5076
5077 /* Set aggregation for RX */
5078 memset(iovbuf, 0, sizeof(iovbuf));
5079 bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
5080 if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
5081 dhd->pub.info->fdaggr = 0;
5082 if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
5083 dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
5084 if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
5085 dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
5086 } else {
5087 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
5088 }
5089 #endif /* BCM_FD_AGGR */
5090 if (dhd_download_fw_on_driverload) {
5091 if (dhd->pub.conf->deepsleep)
5092 dhd_deepsleep(dhd, 0);
5093 }
5094
5095 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
5096 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
5097
5098 #ifdef TOE
5099 /* Get current TOE mode from dongle */
5100 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
5101 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
5102 else
5103 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
5104 #endif /* TOE */
5105
5106 #if defined(WL_CFG80211)
5107 if (unlikely(wl_cfg80211_up(NULL))) {
5108 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
5109 ret = -1;
5110 goto exit;
5111 }
5112 if (!dhd_download_fw_on_driverload) {
5113 #ifdef ARP_OFFLOAD_SUPPORT
5114 dhd->pend_ipaddr = 0;
5115 if (!dhd_inetaddr_notifier_registered) {
5116 dhd_inetaddr_notifier_registered = TRUE;
5117 register_inetaddr_notifier(&dhd_inetaddr_notifier);
5118 }
5119 #endif /* ARP_OFFLOAD_SUPPORT */
5120 #ifdef CONFIG_IPV6
5121 if (!dhd_inet6addr_notifier_registered) {
5122 dhd_inet6addr_notifier_registered = TRUE;
5123 register_inet6addr_notifier(&dhd_inet6addr_notifier);
5124 }
5125 #endif /* CONFIG_IPV6 */
5126 }
5127 dhd_set_scb_probe(&dhd->pub);
5128 #endif /* WL_CFG80211 */
5129 }
5130
5131 /* Allow transmit calls */
5132 netif_start_queue(net);
5133 dhd->pub.up = 1;
5134
5135 #ifdef BCMDBGFS
5136 dhd_dbg_init(&dhd->pub);
5137 #endif
5138
5139 OLD_MOD_INC_USE_COUNT;
5140 exit:
5141 if (ret)
5142 dhd_stop(net);
5143
5144 DHD_PERIM_UNLOCK(&dhd->pub);
5145 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5146
5147 #if defined(MULTIPLE_SUPPLICANT)
5148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
5149 mutex_unlock(&_dhd_sdio_mutex_lock_);
5150 #endif
5151 #endif /* MULTIPLE_SUPPLICANT */
5152
5153 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
5154 return ret;
5155 }
5156
5157 int dhd_do_driver_init(struct net_device *net)
5158 {
5159 dhd_info_t *dhd = NULL;
5160
5161 if (!net) {
5162 DHD_ERROR(("Primary Interface not initialized \n"));
5163 return -EINVAL;
5164 }
5165
5166 #ifdef MULTIPLE_SUPPLICANT
5167 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
5168 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
5169 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
5170 return 0;
5171 }
5172 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
5173 #endif /* MULTIPLE_SUPPLICANT */
5174
5175 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
5176 dhd = DHD_DEV_INFO(net);
5177
5178 /* If driver is already initialized, do nothing
5179 */
5180 if (dhd->pub.busstate == DHD_BUS_DATA) {
5181 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
5182 return 0;
5183 }
5184
5185 if (dhd_open(net) < 0) {
5186 DHD_ERROR(("Driver Init Failed \n"));
5187 return -1;
5188 }
5189
5190 return 0;
5191 }
5192
5193 int
5194 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
5195 {
5196
5197 #ifdef WL_CFG80211
5198 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
5199 return BCME_OK;
5200 #endif
5201
5202 /* handle IF event caused by wl commands, SoftAP, WEXT and
5203 * anything else. This has to be done asynchronously otherwise
5204 * DPC will be blocked (and iovars will timeout as DPC has no chance
5205 * to read the response back)
5206 */
5207 if (ifevent->ifidx > 0) {
5208 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
5209
5210 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
5211 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
5212 strncpy(if_event->name, name, IFNAMSIZ);
5213 if_event->name[IFNAMSIZ - 1] = '\0';
5214 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
5215 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
5216 }
5217
5218 return BCME_OK;
5219 }
5220
5221 int
5222 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
5223 {
5224 dhd_if_event_t *if_event;
5225
5226 #if defined(WL_CFG80211) && !defined(P2PONEINT)
5227 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
5228 return BCME_OK;
5229 #endif /* WL_CFG80211 */
5230
5231 /* handle IF event caused by wl commands, SoftAP, WEXT and
5232 * anything else
5233 */
5234 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
5235 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
5236 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
5237 strncpy(if_event->name, name, IFNAMSIZ);
5238 if_event->name[IFNAMSIZ - 1] = '\0';
5239 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
5240 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
5241
5242 return BCME_OK;
5243 }
5244
5245 /* unregister and free the existing net_device interface (if any) in iflist and
5246 * allocate a new one. the slot is reused. this function does NOT register the
5247 * new interface to linux kernel. dhd_register_if does the job
5248 */
5249 struct net_device*
5250 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
5251 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
5252 {
5253 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
5254 dhd_if_t *ifp;
5255
5256 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
5257 ifp = dhdinfo->iflist[ifidx];
5258
5259 if (ifp != NULL) {
5260 if (ifp->net != NULL) {
5261 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
5262
5263 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
5264
5265 /* in unregister_netdev case, the interface gets freed by net->destructor
5266 * (which is set to free_netdev)
5267 */
5268 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
5269 free_netdev(ifp->net);
5270 } else {
5271 netif_stop_queue(ifp->net);
5272 if (need_rtnl_lock)
5273 unregister_netdev(ifp->net);
5274 else
5275 unregister_netdevice(ifp->net);
5276 }
5277 ifp->net = NULL;
5278 }
5279 } else {
5280 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
5281 if (ifp == NULL) {
5282 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
5283 return NULL;
5284 }
5285 }
5286
5287 memset(ifp, 0, sizeof(dhd_if_t));
5288 ifp->info = dhdinfo;
5289 ifp->idx = ifidx;
5290 ifp->bssidx = bssidx;
5291 if (mac != NULL)
5292 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
5293
5294 /* Allocate etherdev, including space for private structure */
5295 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
5296 if (ifp->net == NULL) {
5297 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
5298 goto fail;
5299 }
5300
5301 /* Setup the dhd interface's netdevice private structure. */
5302 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
5303
5304 if (name && name[0]) {
5305 strncpy(ifp->net->name, name, IFNAMSIZ);
5306 ifp->net->name[IFNAMSIZ - 1] = '\0';
5307 }
5308 #ifdef WL_CFG80211
5309 if (ifidx == 0)
5310 ifp->net->destructor = free_netdev;
5311 else
5312 ifp->net->destructor = dhd_netdev_free;
5313 #else
5314 ifp->net->destructor = free_netdev;
5315 #endif /* WL_CFG80211 */
5316 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
5317 ifp->name[IFNAMSIZ - 1] = '\0';
5318 dhdinfo->iflist[ifidx] = ifp;
5319
5320 #ifdef PCIE_FULL_DONGLE
5321 /* Initialize STA info list */
5322 INIT_LIST_HEAD(&ifp->sta_list);
5323 DHD_IF_STA_LIST_LOCK_INIT(ifp);
5324 #endif /* PCIE_FULL_DONGLE */
5325
5326 return ifp->net;
5327
5328 fail:
5329 if (ifp != NULL) {
5330 if (ifp->net != NULL) {
5331 dhd_dev_priv_clear(ifp->net);
5332 free_netdev(ifp->net);
5333 ifp->net = NULL;
5334 }
5335 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
5336 ifp = NULL;
5337 }
5338 dhdinfo->iflist[ifidx] = NULL;
5339 return NULL;
5340 }
5341
5342 /* unregister the the net_device interface associated with the indexed slot
5343 */
5344 static int
5345 dhd_preremove_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
5346 {
5347 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
5348 dhd_if_t *ifp;
5349
5350 ifp = dhdinfo->iflist[ifidx];
5351 if (ifp != NULL) {
5352 if (ifp->net != NULL) {
5353 DHD_ERROR(("unregister interface '%s' idx %d\n", ifp->net->name, ifp->idx));
5354
5355 /* in unregister_netdev case, the interface gets freed by net->destructor
5356 * (which is set to free_netdev)
5357 */
5358 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
5359 free_netdev(ifp->net);
5360 } else {
5361 netif_stop_queue(ifp->net);
5362
5363
5364
5365 #if defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)
5366 if (ifidx == 0) {
5367 argos_register_notifier_deinit();
5368 }
5369 #endif
5370
5371 #ifdef SET_RPS_CPUS
5372 custom_rps_map_clear(ifp->net->_rx);
5373 #endif /* SET_RPS_CPUS */
5374 if (need_rtnl_lock)
5375 unregister_netdev(ifp->net);
5376 else
5377 unregister_netdevice(ifp->net);
5378 }
5379 ifp->net = NULL;
5380 }
5381 }
5382
5383 return BCME_OK;
5384 }
5385
5386 /* free the the net_device interface associated with the indexed
5387 * slot, also free the slot memory and set the slot pointer to NULL
5388 */
5389 int
5390 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
5391 {
5392 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
5393 dhd_if_t *ifp;
5394
5395 ifp = dhdinfo->iflist[ifidx];
5396 if (ifp != NULL) {
5397 DHD_ERROR(("deleting if idx %d\n", ifp->idx));
5398 if (ifp->net)
5399 dhd_preremove_if(dhdpub, ifidx, need_rtnl_lock);
5400 #ifdef DHD_WMF
5401 dhd_wmf_cleanup(dhdpub, ifidx);
5402 #endif /* DHD_WMF */
5403
5404 dhd_if_del_sta_list(ifp);
5405
5406 dhdinfo->iflist[ifidx] = NULL;
5407 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
5408
5409 }
5410
5411 return BCME_OK;
5412 }
5413
5414 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
5415 static struct net_device_ops dhd_ops_pri = {
5416 .ndo_open = dhd_open,
5417 .ndo_stop = dhd_stop,
5418 .ndo_get_stats = dhd_get_stats,
5419 .ndo_do_ioctl = dhd_ioctl_entry,
5420 .ndo_start_xmit = dhd_start_xmit,
5421 .ndo_set_mac_address = dhd_set_mac_address,
5422 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
5423 .ndo_set_rx_mode = dhd_set_multicast_list,
5424 #else
5425 .ndo_set_multicast_list = dhd_set_multicast_list,
5426 #endif
5427 };
5428
5429 static struct net_device_ops dhd_ops_virt = {
5430 .ndo_get_stats = dhd_get_stats,
5431 .ndo_do_ioctl = dhd_ioctl_entry,
5432 .ndo_start_xmit = dhd_start_xmit,
5433 .ndo_set_mac_address = dhd_set_mac_address,
5434 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
5435 .ndo_set_rx_mode = dhd_set_multicast_list,
5436 #else
5437 .ndo_set_multicast_list = dhd_set_multicast_list,
5438 #endif
5439 };
5440
5441 #ifdef P2PONEINT
5442 extern int wl_cfgp2p_if_open(struct net_device *net);
5443 extern int wl_cfgp2p_if_stop(struct net_device *net);
5444
5445 static struct net_device_ops dhd_cfgp2p_ops_virt = {
5446 .ndo_open = wl_cfgp2p_if_open,
5447 .ndo_stop = wl_cfgp2p_if_stop,
5448 .ndo_get_stats = dhd_get_stats,
5449 .ndo_do_ioctl = dhd_ioctl_entry,
5450 .ndo_start_xmit = dhd_start_xmit,
5451 .ndo_set_mac_address = dhd_set_mac_address,
5452 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
5453 .ndo_set_rx_mode = dhd_set_multicast_list,
5454 #else
5455 .ndo_set_multicast_list = dhd_set_multicast_list,
5456 #endif
5457 };
5458 #endif /* P2PONEINT */
5459 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
5460
5461 #ifdef DEBUGGER
5462 extern void debugger_init(void *bus_handle);
5463 #endif
5464
5465
5466 #ifdef SHOW_LOGTRACE
5467 static char *logstrs_path = "/root/logstrs.bin";
5468 module_param(logstrs_path, charp, S_IRUGO);
5469
5470 int
5471 dhd_init_logstrs_array(dhd_event_log_t *temp)
5472 {
5473 struct file *filep = NULL;
5474 struct kstat stat;
5475 mm_segment_t fs;
5476 char *raw_fmts = NULL;
5477 int logstrs_size = 0;
5478
5479 logstr_header_t *hdr = NULL;
5480 uint32 *lognums = NULL;
5481 char *logstrs = NULL;
5482 int ram_index = 0;
5483 char **fmts;
5484 int num_fmts = 0;
5485 uint32 i = 0;
5486 int error = 0;
5487 set_fs(KERNEL_DS);
5488 fs = get_fs();
5489 filep = filp_open(logstrs_path, O_RDONLY, 0);
5490 if (IS_ERR(filep)) {
5491 DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__));
5492 goto fail;
5493 }
5494 error = vfs_stat(logstrs_path, &stat);
5495 if (error) {
5496 DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__));
5497 goto fail;
5498 }
5499 logstrs_size = (int) stat.size;
5500
5501 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
5502 if (raw_fmts == NULL) {
5503 DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
5504 goto fail;
5505 }
5506 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
5507 DHD_ERROR(("Error: Log strings file read failed\n"));
5508 goto fail;
5509 }
5510
5511 /* Remember header from the logstrs.bin file */
5512 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
5513 sizeof(logstr_header_t));
5514
5515 if (hdr->log_magic == LOGSTRS_MAGIC) {
5516 /*
5517 * logstrs.bin start with header.
5518 */
5519 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
5520 ram_index = (hdr->ram_lognums_offset -
5521 hdr->rom_lognums_offset) / sizeof(uint32);
5522 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
5523 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
5524 } else {
5525 /*
5526 * Legacy logstrs.bin format without header.
5527 */
5528 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
5529 if (num_fmts == 0) {
5530 /* Legacy ROM/RAM logstrs.bin format:
5531 * - ROM 'lognums' section
5532 * - RAM 'lognums' section
5533 * - ROM 'logstrs' section.
5534 * - RAM 'logstrs' section.
5535 *
5536 * 'lognums' is an array of indexes for the strings in the
5537 * 'logstrs' section. The first uint32 is 0 (index of first
5538 * string in ROM 'logstrs' section).
5539 *
5540 * The 4324b5 is the only ROM that uses this legacy format. Use the
5541 * fixed number of ROM fmtnums to find the start of the RAM
5542 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
5543 * find the ROM 'logstrs' section.
5544 */
5545 #define NUM_4324B5_ROM_FMTS 186
5546 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
5547 ram_index = NUM_4324B5_ROM_FMTS;
5548 lognums = (uint32 *) raw_fmts;
5549 num_fmts = ram_index;
5550 logstrs = (char *) &raw_fmts[num_fmts << 2];
5551 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
5552 num_fmts++;
5553 logstrs = (char *) &raw_fmts[num_fmts << 2];
5554 }
5555 } else {
5556 /* Legacy RAM-only logstrs.bin format:
5557 * - RAM 'lognums' section
5558 * - RAM 'logstrs' section.
5559 *
5560 * 'lognums' is an array of indexes for the strings in the
5561 * 'logstrs' section. The first uint32 is an index to the
5562 * start of 'logstrs'. Therefore, if this index is divided
5563 * by 'sizeof(uint32)' it provides the number of logstr
5564 * entries.
5565 */
5566 ram_index = 0;
5567 lognums = (uint32 *) raw_fmts;
5568 logstrs = (char *) &raw_fmts[num_fmts << 2];
5569 }
5570 }
5571 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
5572 if (fmts == NULL) {
5573 DHD_ERROR(("Failed to allocate fmts memory\n"));
5574 goto fail;
5575 }
5576
5577 for (i = 0; i < num_fmts; i++) {
5578 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
5579 * (they are 0-indexed relative to 'rom_logstrs_offset').
5580 *
5581 * RAM lognums are already indexed to point to the correct RAM logstrs (they
5582 * are 0-indexed relative to the start of the logstrs.bin file).
5583 */
5584 if (i == ram_index) {
5585 logstrs = raw_fmts;
5586 }
5587 fmts[i] = &logstrs[lognums[i]];
5588 }
5589 temp->fmts = fmts;
5590 temp->raw_fmts = raw_fmts;
5591 temp->num_fmts = num_fmts;
5592 filp_close(filep, NULL);
5593 set_fs(fs);
5594 return 0;
5595 fail:
5596 if (raw_fmts) {
5597 kfree(raw_fmts);
5598 raw_fmts = NULL;
5599 }
5600 if (!IS_ERR(filep))
5601 filp_close(filep, NULL);
5602 set_fs(fs);
5603 temp->fmts = NULL;
5604 return -1;
5605 }
5606 #endif /* SHOW_LOGTRACE */
5607
5608
5609 dhd_pub_t *
5610 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
5611 {
5612 dhd_info_t *dhd = NULL;
5613 struct net_device *net = NULL;
5614 char if_name[IFNAMSIZ] = {'\0'};
5615 uint32 bus_type = -1;
5616 uint32 bus_num = -1;
5617 uint32 slot_num = -1;
5618 wifi_adapter_info_t *adapter = NULL;
5619
5620 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
5621 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5622
5623 #ifdef STBLINUX
5624 DHD_ERROR(("%s\n", driver_target));
5625 #endif /* STBLINUX */
5626 /* will implement get_ids for DBUS later */
5627 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
5628
5629 /* Allocate primary dhd_info */
5630 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
5631 if (dhd == NULL) {
5632 dhd = MALLOC(osh, sizeof(dhd_info_t));
5633 if (dhd == NULL) {
5634 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
5635 goto fail;
5636 }
5637 }
5638 memset(dhd, 0, sizeof(dhd_info_t));
5639 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
5640
5641 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
5642
5643 dhd->pub.osh = osh;
5644 dhd->adapter = adapter;
5645
5646 #ifdef GET_CUSTOM_MAC_ENABLE
5647 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
5648 #endif /* GET_CUSTOM_MAC_ENABLE */
5649 #ifndef BCMDBUS
5650 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
5651 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
5652
5653 /* Initialize thread based operation and lock */
5654 sema_init(&dhd->sdsem, 1);
5655
5656 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
5657 * This is indeed a hack but we have to make it work properly before we have a better
5658 * solution
5659 */
5660 dhd_update_fw_nv_path(dhd);
5661 #endif /* BCMDBUS */
5662
5663 /* Link to info module */
5664 dhd->pub.info = dhd;
5665
5666
5667 /* Link to bus module */
5668 dhd->pub.bus = bus;
5669 dhd->pub.hdrlen = bus_hdrlen;
5670
5671 /* Set network interface name if it was provided as module parameter */
5672 if (iface_name[0]) {
5673 int len;
5674 char ch;
5675 strncpy(if_name, iface_name, IFNAMSIZ);
5676 if_name[IFNAMSIZ - 1] = 0;
5677 len = strlen(if_name);
5678 ch = if_name[len - 1];
5679 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
5680 strcat(if_name, "%d");
5681 }
5682 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
5683 if (net == NULL)
5684 goto fail;
5685 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
5686
5687 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
5688 net->open = NULL;
5689 #else
5690 net->netdev_ops = NULL;
5691 #endif
5692
5693 sema_init(&dhd->proto_sem, 1);
5694
5695 #ifdef PROP_TXSTATUS
5696 spin_lock_init(&dhd->wlfc_spinlock);
5697
5698 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
5699 dhd->pub.plat_init = dhd_wlfc_plat_init;
5700 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
5701
5702 #ifdef DHD_WLFC_THREAD
5703 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
5704 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
5705 if (IS_ERR(dhd->pub.wlfc_thread)) {
5706 DHD_ERROR(("create wlfc thread failed\n"));
5707 goto fail;
5708 } else {
5709 wake_up_process(dhd->pub.wlfc_thread);
5710 }
5711 #endif /* DHD_WLFC_THREAD */
5712 #endif /* PROP_TXSTATUS */
5713
5714 /* Initialize other structure content */
5715 init_waitqueue_head(&dhd->ioctl_resp_wait);
5716 init_waitqueue_head(&dhd->d3ack_wait);
5717 init_waitqueue_head(&dhd->ctrl_wait);
5718
5719 /* Initialize the spinlocks */
5720 spin_lock_init(&dhd->sdlock);
5721 spin_lock_init(&dhd->txqlock);
5722 spin_lock_init(&dhd->dhd_lock);
5723 spin_lock_init(&dhd->rxf_lock);
5724 #if defined(RXFRAME_THREAD)
5725 dhd->rxthread_enabled = TRUE;
5726 #endif /* defined(RXFRAME_THREAD) */
5727
5728 #ifdef DHDTCPACK_SUPPRESS
5729 spin_lock_init(&dhd->tcpack_lock);
5730 #endif /* DHDTCPACK_SUPPRESS */
5731
5732 /* Initialize Wakelock stuff */
5733 spin_lock_init(&dhd->wakelock_spinlock);
5734 dhd->wakelock_counter = 0;
5735 dhd->wakelock_wd_counter = 0;
5736 dhd->wakelock_rx_timeout_enable = 0;
5737 dhd->wakelock_ctrl_timeout_enable = 0;
5738 #ifdef CONFIG_HAS_WAKELOCK
5739 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
5740 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
5741 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
5742 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
5743 #ifdef BCMPCIE_OOB_HOST_WAKE
5744 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
5745 #endif /* BCMPCIE_OOB_HOST_WAKE */
5746 #endif /* CONFIG_HAS_WAKELOCK */
5747 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
5748 mutex_init(&dhd->dhd_net_if_mutex);
5749 mutex_init(&dhd->dhd_suspend_mutex);
5750 #endif
5751 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
5752
5753 /* Attach and link in the protocol */
5754 if (dhd_prot_attach(&dhd->pub) != 0) {
5755 DHD_ERROR(("dhd_prot_attach failed\n"));
5756 goto fail;
5757 }
5758 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
5759
5760 #ifdef WL_CFG80211
5761 /* Attach and link in the cfg80211 */
5762 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
5763 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5764 goto fail;
5765 }
5766
5767 dhd_monitor_init(&dhd->pub);
5768 dhd_state |= DHD_ATTACH_STATE_CFG80211;
5769 #endif
5770 #if defined(WL_WIRELESS_EXT)
5771 /* Attach and link in the iw */
5772 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5773 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
5774 DHD_ERROR(("wl_iw_attach failed\n"));
5775 goto fail;
5776 }
5777 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
5778 }
5779 #endif /* defined(WL_WIRELESS_EXT) */
5780
5781 #ifdef SHOW_LOGTRACE
5782 dhd_init_logstrs_array(&dhd->event_data);
5783 #endif /* SHOW_LOGTRACE */
5784
5785 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
5786 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
5787 goto fail;
5788 }
5789
5790
5791 #ifndef BCMDBUS
5792 /* Set up the watchdog timer */
5793 init_timer(&dhd->timer);
5794 dhd->timer.data = (ulong)dhd;
5795 dhd->timer.function = dhd_watchdog;
5796 dhd->default_wd_interval = dhd_watchdog_ms;
5797
5798 if (dhd_watchdog_prio >= 0) {
5799 /* Initialize watchdog thread */
5800 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
5801
5802 } else {
5803 dhd->thr_wdt_ctl.thr_pid = -1;
5804 }
5805
5806 #ifdef DEBUGGER
5807 debugger_init((void *) bus);
5808 #endif
5809
5810 /* Set up the bottom half handler */
5811 if (dhd_dpc_prio >= 0) {
5812 /* Initialize DPC thread */
5813 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
5814 } else {
5815 /* use tasklet for dpc */
5816 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
5817 dhd->thr_dpc_ctl.thr_pid = -1;
5818 }
5819
5820 if (dhd->rxthread_enabled) {
5821 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
5822 /* Initialize RXF thread */
5823 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
5824 }
5825 #endif /* BCMDBUS */
5826
5827 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
5828
5829 #if defined(CONFIG_PM_SLEEP)
5830 if (!dhd_pm_notifier_registered) {
5831 dhd_pm_notifier_registered = TRUE;
5832 register_pm_notifier(&dhd_pm_notifier);
5833 }
5834 #endif /* CONFIG_PM_SLEEP */
5835
5836 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5837 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
5838 dhd->early_suspend.suspend = dhd_early_suspend;
5839 dhd->early_suspend.resume = dhd_late_resume;
5840 register_early_suspend(&dhd->early_suspend);
5841 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
5842 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5843
5844 #ifdef ARP_OFFLOAD_SUPPORT
5845 dhd->pend_ipaddr = 0;
5846 if (!dhd_inetaddr_notifier_registered) {
5847 dhd_inetaddr_notifier_registered = TRUE;
5848 register_inetaddr_notifier(&dhd_inetaddr_notifier);
5849 }
5850 #endif /* ARP_OFFLOAD_SUPPORT */
5851 #ifdef CONFIG_IPV6
5852 if (!dhd_inet6addr_notifier_registered) {
5853 dhd_inet6addr_notifier_registered = TRUE;
5854 register_inet6addr_notifier(&dhd_inet6addr_notifier);
5855 }
5856 #endif
5857 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
5858 #ifdef DEBUG_CPU_FREQ
5859 dhd->new_freq = alloc_percpu(int);
5860 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
5861 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
5862 #endif
5863 #ifdef DHDTCPACK_SUPPRESS
5864 #if defined(BCMPCIE)
5865 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
5866 #else
5867 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5868 #endif
5869 #endif /* DHDTCPACK_SUPPRESS */
5870
5871 dhd_state |= DHD_ATTACH_STATE_DONE;
5872 dhd->dhd_state = dhd_state;
5873
5874 dhd_found++;
5875 return &dhd->pub;
5876
5877 fail:
5878 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
5879 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5880 __FUNCTION__, dhd_state, &dhd->pub));
5881 dhd->dhd_state = dhd_state;
5882 dhd_detach(&dhd->pub);
5883 dhd_free(&dhd->pub);
5884 }
5885
5886 return NULL;
5887 }
5888
5889 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
5890 {
5891 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
5892 return DHD_FLAG_HOSTAP_MODE;
5893 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
5894 return DHD_FLAG_P2P_MODE;
5895 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
5896 return DHD_FLAG_IBSS_MODE;
5897 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
5898 return DHD_FLAG_MFG_MODE;
5899
5900 return DHD_FLAG_STA_MODE;
5901 }
5902
5903 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
5904 {
5905 int fw_len;
5906 int nv_len;
5907 int conf_len;
5908 const char *fw = NULL;
5909 const char *nv = NULL;
5910 const char *conf = NULL;
5911 wifi_adapter_info_t *adapter = dhdinfo->adapter;
5912
5913
5914 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5915 * The path from adapter info is used for initialization only (as it won't change).
5916 *
5917 * The firmware_path/nvram_path module parameter may be changed by the system at run
5918 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5919 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5920 * module parameter after it is copied. We won't update the path until the module parameter
5921 * is changed again (first character is not '\0')
5922 */
5923
5924 /* set default firmware and nvram path for built-in type driver */
5925 // if (!dhd_download_fw_on_driverload) {
5926 #ifdef CONFIG_BCMDHD_FW_PATH
5927 fw = CONFIG_BCMDHD_FW_PATH;
5928 #endif /* CONFIG_BCMDHD_FW_PATH */
5929 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5930 nv = CONFIG_BCMDHD_NVRAM_PATH;
5931 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5932 // }
5933
5934 /* check if we need to initialize the path */
5935 if (dhdinfo->fw_path[0] == '\0') {
5936 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
5937 fw = adapter->fw_path;
5938
5939 }
5940 if (dhdinfo->nv_path[0] == '\0') {
5941 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
5942 nv = adapter->nv_path;
5943 }
5944 if (dhdinfo->conf_path[0] == '\0') {
5945 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
5946 conf = adapter->conf_path;
5947 }
5948
5949 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5950 *
5951 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5952 */
5953 if (firmware_path[0] != '\0')
5954 fw = firmware_path;
5955 if (nvram_path[0] != '\0')
5956 nv = nvram_path;
5957 if (config_path[0] != '\0')
5958 conf = config_path;
5959
5960 if (fw && fw[0] != '\0') {
5961 fw_len = strlen(fw);
5962 if (fw_len >= sizeof(dhdinfo->fw_path)) {
5963 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5964 return FALSE;
5965 }
5966 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
5967 if (dhdinfo->fw_path[fw_len-1] == '\n')
5968 dhdinfo->fw_path[fw_len-1] = '\0';
5969 }
5970 if (nv && nv[0] != '\0') {
5971 nv_len = strlen(nv);
5972 if (nv_len >= sizeof(dhdinfo->nv_path)) {
5973 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5974 return FALSE;
5975 }
5976 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
5977 if (dhdinfo->nv_path[nv_len-1] == '\n')
5978 dhdinfo->nv_path[nv_len-1] = '\0';
5979 }
5980 if (conf && conf[0] != '\0') {
5981 conf_len = strlen(conf);
5982 if (conf_len >= sizeof(dhdinfo->conf_path)) {
5983 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
5984 return FALSE;
5985 }
5986 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
5987 if (dhdinfo->conf_path[conf_len-1] == '\n')
5988 dhdinfo->conf_path[conf_len-1] = '\0';
5989 }
5990
5991 #if 0
5992 /* clear the path in module parameter */
5993 firmware_path[0] = '\0';
5994 nvram_path[0] = '\0';
5995 config_path[0] = '\0';
5996 #endif
5997
5998 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5999 if (dhdinfo->fw_path[0] == '\0') {
6000 DHD_ERROR(("firmware path not found\n"));
6001 } else
6002 printf("%s: fw_path=%s\n", __FUNCTION__, dhdinfo->fw_path);
6003 #if 0
6004 if (dhdinfo->nv_path[0] == '\0') {
6005 DHD_ERROR(("nvram path not found\n"));
6006 return FALSE;
6007 }
6008 #endif
6009 if (dhdinfo->conf_path[0] == '\0') {
6010 dhd_conf_set_conf_path_by_fw_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->fw_path);
6011 } else {
6012 dhdinfo->pub.conf_path = dhdinfo->conf_path;
6013 printf("%s: conf_path=%s\n", __FUNCTION__, dhdinfo->conf_path);
6014 }
6015
6016 return TRUE;
6017 }
6018
6019 #ifndef BCMDBUS
6020 int
6021 dhd_bus_start(dhd_pub_t *dhdp)
6022 {
6023 int ret = -1;
6024 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
6025 unsigned long flags;
6026
6027 ASSERT(dhd);
6028
6029 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
6030
6031 DHD_PERIM_LOCK(dhdp);
6032
6033 /* try to download image and nvram to the dongle */
6034 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
6035 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
6036 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
6037 dhd->fw_path, dhd->nv_path);
6038 if (ret < 0) {
6039 DHD_ERROR(("%s: failed to download firmware %s\n",
6040 __FUNCTION__, dhd->fw_path));
6041 DHD_PERIM_UNLOCK(dhdp);
6042 return ret;
6043 }
6044 }
6045 if (dhd->pub.busstate != DHD_BUS_LOAD) {
6046 DHD_PERIM_UNLOCK(dhdp);
6047 return -ENETDOWN;
6048 }
6049
6050 dhd_os_sdlock(dhdp);
6051
6052 /* Start the watchdog timer */
6053 dhd->pub.tickcnt = 0;
6054 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
6055
6056 /* Bring up the bus */
6057 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
6058
6059 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
6060 dhd_os_sdunlock(dhdp);
6061 DHD_PERIM_UNLOCK(dhdp);
6062 return ret;
6063 }
6064 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
6065 #if defined(BCMPCIE_OOB_HOST_WAKE)
6066 dhd_os_sdunlock(dhdp);
6067 #endif /* BCMPCIE_OOB_HOST_WAKE */
6068 /* Host registration for OOB interrupt */
6069 if (dhd_bus_oob_intr_register(dhdp)) {
6070 /* deactivate timer and wait for the handler to finish */
6071 #if !defined(BCMPCIE_OOB_HOST_WAKE)
6072 DHD_GENERAL_LOCK(&dhd->pub, flags);
6073 dhd->wd_timer_valid = FALSE;
6074 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6075 del_timer_sync(&dhd->timer);
6076
6077 dhd_os_sdunlock(dhdp);
6078 #endif /* BCMPCIE_OOB_HOST_WAKE */
6079 DHD_PERIM_UNLOCK(dhdp);
6080 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6081 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
6082 return -ENODEV;
6083 }
6084
6085 #if defined(BCMPCIE_OOB_HOST_WAKE)
6086 dhd_os_sdlock(dhdp);
6087 dhd_bus_oob_intr_set(dhdp, TRUE);
6088 #else
6089 /* Enable oob at firmware */
6090 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
6091 #endif /* BCMPCIE_OOB_HOST_WAKE */
6092 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
6093 #ifdef PCIE_FULL_DONGLE
6094 {
6095 uint8 txpush = 0;
6096 uint32 num_flowrings; /* includes H2D common rings */
6097 num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
6098 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
6099 num_flowrings));
6100 if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
6101 dhd_os_sdunlock(dhdp);
6102 DHD_PERIM_UNLOCK(dhdp);
6103 return ret;
6104 }
6105 }
6106 #endif /* PCIE_FULL_DONGLE */
6107
6108 /* Do protocol initialization necessary for IOCTL/IOVAR */
6109 dhd_prot_init(&dhd->pub);
6110
6111 /* If bus is not ready, can't come up */
6112 if (dhd->pub.busstate != DHD_BUS_DATA) {
6113 DHD_GENERAL_LOCK(&dhd->pub, flags);
6114 dhd->wd_timer_valid = FALSE;
6115 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
6116 del_timer_sync(&dhd->timer);
6117 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
6118 dhd_os_sdunlock(dhdp);
6119 DHD_PERIM_UNLOCK(dhdp);
6120 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
6121 return -ENODEV;
6122 }
6123
6124 dhd_os_sdunlock(dhdp);
6125
6126 /* Bus is ready, query any dongle information */
6127 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
6128 DHD_PERIM_UNLOCK(dhdp);
6129 return ret;
6130 }
6131
6132 #ifdef ARP_OFFLOAD_SUPPORT
6133 if (dhd->pend_ipaddr) {
6134 #ifdef AOE_IP_ALIAS_SUPPORT
6135 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
6136 #endif /* AOE_IP_ALIAS_SUPPORT */
6137 dhd->pend_ipaddr = 0;
6138 }
6139 #endif /* ARP_OFFLOAD_SUPPORT */
6140
6141 DHD_PERIM_UNLOCK(dhdp);
6142 return 0;
6143 }
6144 #endif /* BCMDBUS */
6145
6146 #ifdef WLTDLS
6147 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
6148 {
6149 char iovbuf[WLC_IOCTL_SMLEN];
6150 uint32 tdls = tdls_on;
6151 int ret = 0;
6152 uint32 tdls_auto_op = 0;
6153 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
6154 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
6155 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
6156 BCM_REFERENCE(mac);
6157 if (!FW_SUPPORTED(dhd, tdls))
6158 return BCME_ERROR;
6159
6160 if (dhd->tdls_enable == tdls_on)
6161 goto auto_mode;
6162 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
6163 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6164 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
6165 goto exit;
6166 }
6167 dhd->tdls_enable = tdls_on;
6168 auto_mode:
6169
6170 tdls_auto_op = auto_on;
6171 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
6172 iovbuf, sizeof(iovbuf));
6173 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6174 sizeof(iovbuf), TRUE, 0)) < 0) {
6175 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
6176 goto exit;
6177 }
6178
6179 if (tdls_auto_op) {
6180 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
6181 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
6182 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6183 sizeof(iovbuf), TRUE, 0)) < 0) {
6184 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
6185 goto exit;
6186 }
6187 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
6188 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6189 sizeof(iovbuf), TRUE, 0)) < 0) {
6190 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
6191 goto exit;
6192 }
6193 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
6194 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6195 sizeof(iovbuf), TRUE, 0)) < 0) {
6196 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
6197 goto exit;
6198 }
6199 }
6200
6201 exit:
6202 return ret;
6203 }
6204
6205 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
6206 {
6207 dhd_info_t *dhd = DHD_DEV_INFO(dev);
6208 int ret = 0;
6209 if (dhd)
6210 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
6211 else
6212 ret = BCME_ERROR;
6213 return ret;
6214 }
6215
6216 int
6217 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
6218 {
6219 char iovbuf[WLC_IOCTL_SMLEN];
6220 int ret = 0;
6221 bool auto_on = false;
6222 uint32 mode = wfd_mode;
6223
6224 auto_on = false;
6225 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
6226 if (ret < 0) {
6227 DHD_ERROR(("%s Disable tdls_auto_op failed. %d\n", __FUNCTION__, ret));
6228 return ret;
6229 }
6230
6231
6232 bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
6233 iovbuf, sizeof(iovbuf));
6234 if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6235 sizeof(iovbuf), TRUE, 0)) < 0) &&
6236 (ret != BCME_UNSUPPORTED)) {
6237 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
6238 return ret;
6239 }
6240
6241 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
6242 if (ret < 0) {
6243 DHD_ERROR(("%s enable tdls_auto_op failed. %d\n", __FUNCTION__, ret));
6244 return ret;
6245 }
6246
6247 dhd->tdls_mode = mode;
6248 return ret;
6249 }
6250
6251 #ifdef PCIE_FULL_DONGLE
6252 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
6253 {
6254 dhd_info_t *dhd = DHD_DEV_INFO(dev);
6255 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
6256 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
6257 tdls_peer_node_t *new = NULL, *prev = NULL;
6258 dhd_if_t *dhdif;
6259 uint8 sa[ETHER_ADDR_LEN];
6260 int ifidx = dhd_net2idx(dhd, dev);
6261
6262 if (ifidx == DHD_BAD_IF)
6263 return;
6264
6265 dhdif = dhd->iflist[ifidx];
6266 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
6267
6268 if (connect) {
6269 while (cur != NULL) {
6270 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
6271 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
6272 __FUNCTION__, __LINE__));
6273 return;
6274 }
6275 cur = cur->next;
6276 }
6277
6278 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
6279 if (new == NULL) {
6280 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
6281 return;
6282 }
6283 memcpy(new->addr, da, ETHER_ADDR_LEN);
6284 new->next = dhdp->peer_tbl.node;
6285 dhdp->peer_tbl.node = new;
6286 dhdp->peer_tbl.tdls_peer_count++;
6287
6288 } else {
6289 while (cur != NULL) {
6290 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
6291 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
6292 if (prev)
6293 prev->next = cur->next;
6294 else
6295 dhdp->peer_tbl.node = cur->next;
6296 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
6297 dhdp->peer_tbl.tdls_peer_count--;
6298 return;
6299 }
6300 prev = cur;
6301 cur = cur->next;
6302 }
6303 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
6304 }
6305 }
6306 #endif /* PCIE_FULL_DONGLE */
6307 #endif /* BCMDBUS */
6308
6309 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
6310 {
6311 if (!dhd)
6312 return FALSE;
6313
6314 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
6315 return TRUE;
6316 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
6317 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
6318 return TRUE;
6319 else
6320 return FALSE;
6321 }
6322 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
6323 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
6324 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
6325 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
6326 * would still be named as fw_bcmdhd_apsta.
6327 */
6328 uint32
6329 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
6330 {
6331 int32 ret = 0;
6332 char buf[WLC_IOCTL_SMLEN];
6333 bool mchan_supported = FALSE;
6334 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
6335 * test mode, that means we only will use the mode as it is
6336 */
6337 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
6338 return 0;
6339 if (FW_SUPPORTED(dhd, vsdb)) {
6340 mchan_supported = TRUE;
6341 }
6342 if (!FW_SUPPORTED(dhd, p2p)) {
6343 DHD_TRACE(("Chip does not support p2p\n"));
6344 return 0;
6345 }
6346 else {
6347 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
6348 memset(buf, 0, sizeof(buf));
6349 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
6350 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
6351 FALSE, 0)) < 0) {
6352 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
6353 return 0;
6354 }
6355 else {
6356 if (buf[0] == 1) {
6357 /* By default, chip supports single chan concurrency,
6358 * now lets check for mchan
6359 */
6360 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
6361 if (mchan_supported)
6362 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
6363 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
6364 /* For customer_hw4, although ICS,
6365 * we still support concurrent mode
6366 */
6367 return ret;
6368 #else
6369 return 0;
6370 #endif
6371 }
6372 }
6373 }
6374 return 0;
6375 }
6376 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
6377
6378 #ifdef SUPPORT_AP_POWERSAVE
6379 #define RXCHAIN_PWRSAVE_PPS 10
6380 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
6381 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
6382 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
6383 {
6384 char iovbuf[128];
6385 int32 pps = RXCHAIN_PWRSAVE_PPS;
6386 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
6387 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
6388
6389 if (enable) {
6390 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
6391 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
6392 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
6393 DHD_ERROR(("Failed to enable AP power save\n"));
6394 }
6395 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
6396 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
6397 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
6398 DHD_ERROR(("Failed to set pps\n"));
6399 }
6400 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
6401 4, iovbuf, sizeof(iovbuf));
6402 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
6403 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
6404 DHD_ERROR(("Failed to set quiet time\n"));
6405 }
6406 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
6407 4, iovbuf, sizeof(iovbuf));
6408 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
6409 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
6410 DHD_ERROR(("Failed to set stas assoc check\n"));
6411 }
6412 } else {
6413 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
6414 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
6415 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
6416 DHD_ERROR(("Failed to disable AP power save\n"));
6417 }
6418 }
6419
6420 return 0;
6421 }
6422 #endif /* SUPPORT_AP_POWERSAVE */
6423
6424
6425 #if defined(READ_CONFIG_FROM_FILE)
6426 #include <linux/fs.h>
6427 #include <linux/ctype.h>
6428
6429 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
6430 bool PM_control = TRUE;
6431
6432 static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
6433 {
6434 int var_int;
6435 wl_country_t cspec = {{0}, -1, {0}};
6436 char *revstr;
6437 char *endptr = NULL;
6438 int iolen;
6439 char smbuf[WLC_IOCTL_SMLEN*2];
6440 #ifdef ROAM_AP_ENV_DETECTION
6441 int roam_env_mode = AP_ENV_INDETERMINATE;
6442 #endif /* ROAM_AP_ENV_DETECTION */
6443
6444 if (!strcmp(name, "country")) {
6445 revstr = strchr(value, '/');
6446 if (revstr) {
6447 cspec.rev = strtoul(revstr + 1, &endptr, 10);
6448 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
6449 cspec.country_abbrev[2] = '\0';
6450 memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
6451 } else {
6452 cspec.rev = -1;
6453 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
6454 memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
6455 get_customized_country_code(dhd->info->adapter,
6456 (char *)&cspec.country_abbrev, &cspec);
6457 }
6458 memset(smbuf, 0, sizeof(smbuf));
6459 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
6460 cspec.country_abbrev, cspec.rev));
6461 iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
6462 smbuf, sizeof(smbuf));
6463 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6464 smbuf, iolen, TRUE, 0);
6465 } else if (!strcmp(name, "roam_scan_period")) {
6466 var_int = (int)simple_strtol(value, NULL, 0);
6467 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
6468 &var_int, sizeof(var_int), TRUE, 0);
6469 } else if (!strcmp(name, "roam_delta")) {
6470 struct {
6471 int val;
6472 int band;
6473 } x;
6474 x.val = (int)simple_strtol(value, NULL, 0);
6475 /* x.band = WLC_BAND_AUTO; */
6476 x.band = WLC_BAND_ALL;
6477 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
6478 } else if (!strcmp(name, "roam_trigger")) {
6479 int ret = 0;
6480
6481 roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
6482 roam_trigger[1] = WLC_BAND_ALL;
6483 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
6484 sizeof(roam_trigger), TRUE, 0);
6485
6486 #ifdef ROAM_AP_ENV_DETECTION
6487 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
6488 char iovbuf[128];
6489 bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode,
6490 4, iovbuf, sizeof(iovbuf));
6491 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6492 sizeof(iovbuf), TRUE, 0) == BCME_OK) {
6493 dhd->roam_env_detection = TRUE;
6494 } else {
6495 dhd->roam_env_detection = FALSE;
6496 }
6497 }
6498 #endif /* ROAM_AP_ENV_DETECTION */
6499 return ret;
6500 } else if (!strcmp(name, "PM")) {
6501 int ret = 0;
6502 var_int = (int)simple_strtol(value, NULL, 0);
6503
6504 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
6505 &var_int, sizeof(var_int), TRUE, 0);
6506
6507 #if defined(CONFIG_CONTROL_PM) || defined(CONFIG_PM_LOCK)
6508 if (var_int == 0) {
6509 g_pm_control = TRUE;
6510 printk("%s var_int=%d don't control PM\n", __func__, var_int);
6511 } else {
6512 g_pm_control = FALSE;
6513 printk("%s var_int=%d do control PM\n", __func__, var_int);
6514 }
6515 #endif
6516
6517 return ret;
6518 }
6519 #ifdef WLBTAMP
6520 else if (!strcmp(name, "btamp_chan")) {
6521 int btamp_chan;
6522 int iov_len = 0;
6523 char iovbuf[128];
6524 int ret;
6525
6526 btamp_chan = (int)simple_strtol(value, NULL, 0);
6527 iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
6528 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
6529 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
6530 __FUNCTION__, btamp_chan, ret));
6531 else
6532 DHD_ERROR(("%s btamp_chan %d set success\n",
6533 __FUNCTION__, btamp_chan));
6534 }
6535 #endif /* WLBTAMP */
6536 else if (!strcmp(name, "band")) {
6537 int ret;
6538 if (!strcmp(value, "auto"))
6539 var_int = WLC_BAND_AUTO;
6540 else if (!strcmp(value, "a"))
6541 var_int = WLC_BAND_5G;
6542 else if (!strcmp(value, "b"))
6543 var_int = WLC_BAND_2G;
6544 else if (!strcmp(value, "all"))
6545 var_int = WLC_BAND_ALL;
6546 else {
6547 printk(" set band value should be one of the a or b or all\n");
6548 var_int = WLC_BAND_AUTO;
6549 }
6550 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
6551 sizeof(var_int), TRUE, 0)) < 0)
6552 printk(" set band err=%d\n", ret);
6553 return ret;
6554 } else if (!strcmp(name, "cur_etheraddr")) {
6555 struct ether_addr ea;
6556 char buf[32];
6557 uint iovlen;
6558 int ret;
6559
6560 bcm_ether_atoe(value, &ea);
6561
6562 ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
6563 if (ret == 0) {
6564 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
6565 return 0;
6566 }
6567
6568 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
6569 ea.octet[0], ea.octet[1], ea.octet[2],
6570 ea.octet[3], ea.octet[4], ea.octet[5]));
6571
6572 iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
6573
6574 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
6575 if (ret < 0) {
6576 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6577 return ret;
6578 }
6579 else {
6580 memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
6581 return ret;
6582 }
6583 } else if (!strcmp(name, "lpc")) {
6584 int ret = 0;
6585 char buf[32];
6586 uint iovlen;
6587 var_int = (int)simple_strtol(value, NULL, 0);
6588 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
6589 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
6590 }
6591 iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf));
6592 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
6593 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
6594 }
6595 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
6596 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
6597 }
6598 return ret;
6599 } else if (!strcmp(name, "vht_features")) {
6600 int ret = 0;
6601 char buf[32];
6602 uint iovlen;
6603 var_int = (int)simple_strtol(value, NULL, 0);
6604
6605 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
6606 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
6607 }
6608 iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf));
6609 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
6610 DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
6611 }
6612 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
6613 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
6614 }
6615 return ret;
6616 } else {
6617 uint iovlen;
6618 char iovbuf[WLC_IOCTL_SMLEN];
6619
6620 /* wlu_iovar_setint */
6621 var_int = (int)simple_strtol(value, NULL, 0);
6622
6623 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
6624 if (!strcmp(name, "roam_off")) {
6625 /* Setup timeout if Beacons are lost to report link down */
6626 if (var_int) {
6627 uint bcn_timeout = 2;
6628 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
6629 iovbuf, sizeof(iovbuf));
6630 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6631 }
6632 }
6633 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
6634
6635 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
6636
6637 iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
6638 iovbuf, sizeof(iovbuf));
6639 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6640 iovbuf, iovlen, TRUE, 0);
6641 }
6642
6643 return 0;
6644 }
6645
6646 static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
6647 {
6648 mm_segment_t old_fs;
6649 struct kstat stat;
6650 struct file *fp = NULL;
6651 unsigned int len;
6652 char *buf = NULL, *p, *name, *value;
6653 int ret = 0;
6654 char *config_path;
6655
6656 config_path = CONFIG_BCMDHD_CONFIG_PATH;
6657
6658 if (!config_path)
6659 {
6660 printk(KERN_ERR "config_path can't read. \n");
6661 return 0;
6662 }
6663
6664 old_fs = get_fs();
6665 set_fs(get_ds());
6666 if ((ret = vfs_stat(config_path, &stat))) {
6667 set_fs(old_fs);
6668 printk(KERN_ERR "%s: Failed to get information (%d)\n",
6669 config_path, ret);
6670 return ret;
6671 }
6672 set_fs(old_fs);
6673
6674 if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
6675 printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
6676 return -ENOMEM;
6677 }
6678
6679 printk("dhd_preinit_config : config path : %s \n", config_path);
6680
6681 if (!(fp = dhd_os_open_image(config_path)) ||
6682 (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
6683 goto err;
6684
6685 buf[stat.size] = '\0';
6686 for (p = buf; *p; p++) {
6687 if (isspace(*p))
6688 continue;
6689 for (name = p++; *p && !isspace(*p); p++) {
6690 if (*p == '=') {
6691 *p = '\0';
6692 p++;
6693 for (value = p; *p && !isspace(*p); p++);
6694 *p = '\0';
6695 if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
6696 printk(KERN_ERR "%s: %s=%s\n",
6697 bcmerrorstr(ret), name, value);
6698 }
6699 break;
6700 }
6701 }
6702 }
6703 ret = 0;
6704
6705 out:
6706 if (fp)
6707 dhd_os_close_image(fp);
6708 if (buf)
6709 MFREE(dhd->osh, buf, stat.size+1);
6710 return ret;
6711
6712 err:
6713 ret = -1;
6714 goto out;
6715 }
6716 #endif /* READ_CONFIG_FROM_FILE */
6717
6718 int
6719 dhd_preinit_ioctls(dhd_pub_t *dhd)
6720 {
6721 int ret = 0;
6722 char eventmask[WL_EVENTING_MASK_LEN];
6723 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
6724 uint32 buf_key_b4_m4 = 1;
6725 #ifndef WL_CFG80211
6726 u32 up = 0;
6727 #endif
6728 uint8 msglen;
6729 eventmsgs_ext_t *eventmask_msg = NULL;
6730 char* iov_buf = NULL;
6731 int ret2 = 0;
6732 #ifdef WLAIBSS
6733 aibss_bcn_force_config_t bcn_config;
6734 uint32 aibss;
6735 #ifdef WLAIBSS_PS
6736 uint32 aibss_ps;
6737 #endif /* WLAIBSS_PS */
6738 #endif /* WLAIBSS */
6739 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
6740 uint32 sup_wpa = 0;
6741 #endif
6742 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
6743 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
6744 uint32 ampdu_ba_wsize = 0;
6745 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
6746 #if defined(CUSTOM_AMPDU_MPDU)
6747 int32 ampdu_mpdu = 0;
6748 #endif
6749 #if defined(CUSTOM_AMPDU_RELEASE)
6750 int32 ampdu_release = 0;
6751 #endif
6752 #if defined(CUSTOM_AMSDU_AGGSF)
6753 int32 amsdu_aggsf = 0;
6754 #endif
6755
6756 #if defined(BCMDBUS)
6757 #ifdef PROP_TXSTATUS
6758 int wlfc_enable = TRUE;
6759 #ifndef DISABLE_11N
6760 uint32 hostreorder = 1;
6761 uint wl_down = 1;
6762 #endif /* DISABLE_11N */
6763 #endif /* PROP_TXSTATUS */
6764 #endif
6765 #ifdef PCIE_FULL_DONGLE
6766 uint32 wl_ap_isolate;
6767 #endif /* PCIE_FULL_DONGLE */
6768
6769 #ifdef OEM_ANDROID
6770 #ifdef DHD_ENABLE_LPC
6771 uint32 lpc = 1;
6772 #endif /* DHD_ENABLE_LPC */
6773 uint power_mode = PM_FAST;
6774 uint32 dongle_align = DHD_SDALIGN;
6775 uint bcn_timeout = dhd->conf->bcn_timeout;
6776 uint retry_max = 3;
6777 #if defined(ARP_OFFLOAD_SUPPORT)
6778 int arpoe = 1;
6779 #endif
6780 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
6781 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
6782 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
6783 char buf[WLC_IOCTL_SMLEN];
6784 char *ptr;
6785 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
6786 #ifdef ROAM_ENABLE
6787 uint roamvar = 0;
6788 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
6789 int roam_scan_period[2] = {10, WLC_BAND_ALL};
6790 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
6791 #ifdef ROAM_AP_ENV_DETECTION
6792 int roam_env_mode = AP_ENV_INDETERMINATE;
6793 #endif /* ROAM_AP_ENV_DETECTION */
6794 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
6795 int roam_fullscan_period = 60;
6796 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6797 int roam_fullscan_period = 120;
6798 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6799 #else
6800 #ifdef DISABLE_BUILTIN_ROAM
6801 uint roamvar = 1;
6802 #endif /* DISABLE_BUILTIN_ROAM */
6803 #endif /* ROAM_ENABLE */
6804
6805 #if defined(SOFTAP)
6806 uint dtim = 1;
6807 #endif
6808 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
6809 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
6810 struct ether_addr p2p_ea;
6811 #endif
6812 #ifdef SOFTAP_UAPSD_OFF
6813 uint32 wme_apsd = 0;
6814 #endif /* SOFTAP_UAPSD_OFF */
6815 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
6816 uint32 apsta = 1; /* Enable APSTA mode */
6817 #elif defined(SOFTAP_AND_GC)
6818 uint32 apsta = 0;
6819 int ap_mode = 1;
6820 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
6821 #ifdef GET_CUSTOM_MAC_ENABLE
6822 struct ether_addr ea_addr;
6823 #endif /* GET_CUSTOM_MAC_ENABLE */
6824 #ifdef OKC_SUPPORT
6825 uint32 okc = 1;
6826 #endif
6827
6828 #ifdef DISABLE_11N
6829 uint32 nmode = 0;
6830 #endif /* DISABLE_11N */
6831
6832 #if defined(DISABLE_11AC)
6833 uint32 vhtmode = 0;
6834 #endif /* DISABLE_11AC */
6835 #ifdef USE_WL_TXBF
6836 uint32 txbf = 1;
6837 #endif /* USE_WL_TXBF */
6838 #ifdef AMPDU_VO_ENABLE
6839 struct ampdu_tid_control tid;
6840 #endif
6841 #ifdef USE_WL_FRAMEBURST
6842 uint32 frameburst = 1;
6843 #endif /* USE_WL_FRAMEBURST */
6844 #ifdef DHD_SET_FW_HIGHSPEED
6845 uint32 ack_ratio = 250;
6846 uint32 ack_ratio_depth = 64;
6847 #endif /* DHD_SET_FW_HIGHSPEED */
6848 #ifdef SUPPORT_2G_VHT
6849 uint32 vht_features = 0x3; /* 2G enable | rates all */
6850 #endif /* SUPPORT_2G_VHT */
6851 #ifdef CUSTOM_PSPRETEND_THR
6852 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
6853 #endif
6854 #ifdef PKT_FILTER_SUPPORT
6855 dhd_pkt_filter_enable = TRUE;
6856 #endif /* PKT_FILTER_SUPPORT */
6857 #ifdef WLTDLS
6858 dhd->tdls_enable = FALSE;
6859 dhd_tdls_set_mode(dhd, false);
6860 #endif /* WLTDLS */
6861 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
6862 DHD_TRACE(("Enter %s\n", __FUNCTION__));
6863
6864 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
6865 dhd->op_mode = 0;
6866 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6867 (op_mode == DHD_FLAG_MFG_MODE)) {
6868 /* Check and adjust IOCTL response timeout for Manufactring firmware */
6869 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
6870 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
6871 __FUNCTION__));
6872 }
6873 else {
6874 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
6875 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
6876 }
6877 #ifdef GET_CUSTOM_MAC_ENABLE
6878 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
6879 if (!ret) {
6880 memset(buf, 0, sizeof(buf));
6881 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
6882 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6883 if (ret < 0) {
6884 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
6885 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
6886 ret = BCME_NOTUP;
6887 goto done;
6888 }
6889 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
6890 } else {
6891 #endif /* GET_CUSTOM_MAC_ENABLE */
6892 /* Get the default device MAC address directly from firmware */
6893 memset(buf, 0, sizeof(buf));
6894 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
6895 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
6896 FALSE, 0)) < 0) {
6897 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
6898 ret = BCME_NOTUP;
6899 goto done;
6900 }
6901 /* Update public MAC address after reading from Firmware */
6902 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
6903
6904 #ifdef GET_CUSTOM_MAC_ENABLE
6905 }
6906 #endif /* GET_CUSTOM_MAC_ENABLE */
6907
6908 /* get a capabilities from firmware */
6909 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
6910 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
6911 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
6912 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
6913 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6914 __FUNCTION__, ret));
6915 goto done;
6916 }
6917 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
6918 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
6919 #ifdef SET_RANDOM_MAC_SOFTAP
6920 uint rand_mac;
6921 #endif
6922 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
6923 #if defined(ARP_OFFLOAD_SUPPORT)
6924 arpoe = 0;
6925 #endif
6926 #ifdef PKT_FILTER_SUPPORT
6927 dhd_pkt_filter_enable = FALSE;
6928 #endif
6929 #ifdef SET_RANDOM_MAC_SOFTAP
6930 SRANDOM32((uint)jiffies);
6931 rand_mac = RANDOM32();
6932 iovbuf[0] = 0x02; /* locally administered bit */
6933 iovbuf[1] = 0x1A;
6934 iovbuf[2] = 0x11;
6935 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
6936 iovbuf[4] = (unsigned char)(rand_mac >> 8);
6937 iovbuf[5] = (unsigned char)(rand_mac >> 16);
6938
6939 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
6940 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6941 if (ret < 0) {
6942 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6943 } else
6944 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
6945 #endif /* SET_RANDOM_MAC_SOFTAP */
6946 #if defined(OEM_ANDROID) && !defined(AP) && defined(WL_CFG80211)
6947 /* Turn off MPC in AP mode */
6948 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6949 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6950 sizeof(iovbuf), TRUE, 0)) < 0) {
6951 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
6952 }
6953 #endif
6954 #ifdef SUPPORT_AP_POWERSAVE
6955 dhd_set_ap_powersave(dhd, 0, TRUE);
6956 #endif
6957 #ifdef SOFTAP_UAPSD_OFF
6958 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
6959 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6960 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
6961 #endif /* SOFTAP_UAPSD_OFF */
6962 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6963 (op_mode == DHD_FLAG_MFG_MODE)) {
6964 #if defined(ARP_OFFLOAD_SUPPORT)
6965 arpoe = 0;
6966 #endif /* ARP_OFFLOAD_SUPPORT */
6967 #ifdef PKT_FILTER_SUPPORT
6968 dhd_pkt_filter_enable = FALSE;
6969 #endif /* PKT_FILTER_SUPPORT */
6970 dhd->op_mode = DHD_FLAG_MFG_MODE;
6971 } else {
6972 uint32 concurrent_mode = 0;
6973 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
6974 (op_mode == DHD_FLAG_P2P_MODE)) {
6975 #if defined(ARP_OFFLOAD_SUPPORT)
6976 arpoe = 0;
6977 #endif
6978 #ifdef PKT_FILTER_SUPPORT
6979 dhd_pkt_filter_enable = FALSE;
6980 #endif
6981 dhd->op_mode = DHD_FLAG_P2P_MODE;
6982 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
6983 (op_mode == DHD_FLAG_IBSS_MODE)) {
6984 dhd->op_mode = DHD_FLAG_IBSS_MODE;
6985 } else
6986 dhd->op_mode = DHD_FLAG_STA_MODE;
6987 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
6988 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
6989 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
6990 #if defined(ARP_OFFLOAD_SUPPORT)
6991 arpoe = 1;
6992 #endif
6993 dhd->op_mode |= concurrent_mode;
6994 }
6995
6996 /* Check if we are enabling p2p */
6997 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6998 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6999 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
7000 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7001 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
7002 }
7003
7004 #if defined(SOFTAP_AND_GC)
7005 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
7006 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
7007 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
7008 }
7009 #endif
7010 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
7011 ETHER_SET_LOCALADDR(&p2p_ea);
7012 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
7013 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
7014 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
7015 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7016 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
7017 } else {
7018 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
7019 }
7020 }
7021 #else
7022 (void)concurrent_mode;
7023 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
7024 }
7025
7026 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
7027 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
7028 /* Set Country code */
7029 if (dhd->dhd_cspec.ccode[0] != 0) {
7030 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
7031 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
7032 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
7033 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
7034 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
7035 } else {
7036 dhd_conf_set_country(dhd);
7037 dhd_conf_fix_country(dhd);
7038 }
7039 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
7040
7041 #if defined(DISABLE_11AC)
7042 bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
7043 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
7044 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
7045 #endif /* DISABLE_11AC */
7046
7047 /* Set Listen Interval */
7048 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
7049 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
7050 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
7051
7052 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
7053 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
7054 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
7055 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7056 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
7057 #if defined(ROAM_ENABLE)
7058 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
7059 sizeof(roam_trigger), TRUE, 0)) < 0)
7060 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
7061 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
7062 sizeof(roam_scan_period), TRUE, 0)) < 0)
7063 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
7064 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
7065 sizeof(roam_delta), TRUE, 0)) < 0)
7066 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
7067 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
7068 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
7069 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
7070 #ifdef ROAM_AP_ENV_DETECTION
7071 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
7072 bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode,
7073 4, iovbuf, sizeof(iovbuf));
7074 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) == BCME_OK)
7075 dhd->roam_env_detection = TRUE;
7076 else {
7077 dhd->roam_env_detection = FALSE;
7078 }
7079 }
7080 #endif /* ROAM_AP_ENV_DETECTION */
7081 #endif /* ROAM_ENABLE */
7082 dhd_conf_set_roam(dhd);
7083
7084 #ifdef OKC_SUPPORT
7085 bcm_mkiovar("okc_enable", (char *)&okc, 4, iovbuf, sizeof(iovbuf));
7086 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7087 #endif
7088 #ifdef WLTDLS
7089 /* by default TDLS on and auto mode off */
7090 _dhd_tdls_enable(dhd, true, false, NULL);
7091 #endif /* WLTDLS */
7092
7093 #ifdef DHD_ENABLE_LPC
7094 /* Set lpc 1 */
7095 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
7096 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7097 sizeof(iovbuf), TRUE, 0)) < 0) {
7098 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
7099 }
7100 #endif /* DHD_ENABLE_LPC */
7101 dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
7102
7103 /* Set PowerSave mode */
7104 if (dhd->conf->pm >= 0)
7105 power_mode = dhd->conf->pm;
7106 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
7107
7108 /* Match Host and Dongle rx alignment */
7109 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
7110 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7111
7112
7113
7114 /* Setup timeout if Beacons are lost and roam is off to report link down */
7115 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
7116 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7117 /* Setup assoc_retry_max count to reconnect target AP in dongle */
7118 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
7119 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7120 #if defined(AP) && !defined(WLP2P)
7121 /* Turn off MPC in AP mode */
7122 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
7123 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7124 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
7125 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7126 #endif /* defined(AP) && !defined(WLP2P) */
7127 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
7128 dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
7129 dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
7130 dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
7131 dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
7132 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
7133 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
7134 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
7135
7136 #if defined(OEM_ANDROID) && defined(SOFTAP)
7137 if (ap_fw_loaded == TRUE) {
7138 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
7139 }
7140 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
7141
7142 #if defined(KEEP_ALIVE)
7143 {
7144 /* Set Keep Alive : be sure to use FW with -keepalive */
7145 int res;
7146
7147 #if defined(OEM_ANDROID) && defined(SOFTAP)
7148 if (ap_fw_loaded == FALSE)
7149 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
7150 if (!(dhd->op_mode &
7151 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
7152 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
7153 DHD_ERROR(("%s set keeplive failed %d\n",
7154 __FUNCTION__, res));
7155 }
7156 }
7157 #endif /* defined(KEEP_ALIVE) */
7158
7159 #else
7160 /* get a capabilities from firmware */
7161 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
7162 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
7163 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
7164 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
7165 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
7166 __FUNCTION__, ret));
7167 goto done;
7168 }
7169 #endif /* OEM_ANDROID */
7170
7171 #ifdef USE_WL_TXBF
7172 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
7173 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7174 sizeof(iovbuf), TRUE, 0)) < 0) {
7175 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
7176 }
7177 #endif /* USE_WL_TXBF */
7178 dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
7179 #ifdef USE_WL_FRAMEBURST
7180 /* Set frameburst to value */
7181 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
7182 sizeof(frameburst), TRUE, 0)) < 0) {
7183 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
7184 }
7185 #endif /* USE_WL_FRAMEBURST */
7186 dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE);
7187 #ifdef DHD_SET_FW_HIGHSPEED
7188 /* Set ack_ratio */
7189 bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
7190 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7191 sizeof(iovbuf), TRUE, 0)) < 0) {
7192 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
7193 }
7194
7195 /* Set ack_ratio_depth */
7196 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
7197 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7198 sizeof(iovbuf), TRUE, 0)) < 0) {
7199 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
7200 }
7201 #endif /* DHD_SET_FW_HIGHSPEED */
7202 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
7203 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
7204 /* Set ampdu ba wsize to 64 or 16 */
7205 #ifdef CUSTOM_AMPDU_BA_WSIZE
7206 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
7207 #endif
7208 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
7209 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
7210 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
7211 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
7212 if (ampdu_ba_wsize != 0) {
7213 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
7214 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7215 sizeof(iovbuf), TRUE, 0)) < 0) {
7216 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
7217 __FUNCTION__, ampdu_ba_wsize, ret));
7218 }
7219 }
7220 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
7221 dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
7222
7223 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
7224 if (iov_buf == NULL) {
7225 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
7226 ret = BCME_NOMEM;
7227 goto done;
7228 }
7229 #ifdef WLAIBSS
7230 /* Configure custom IBSS beacon transmission */
7231 if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
7232 {
7233 aibss = 1;
7234 bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
7235 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7236 sizeof(iovbuf), TRUE, 0)) < 0) {
7237 DHD_ERROR(("%s Set aibss to %d failed %d\n",
7238 __FUNCTION__, aibss, ret));
7239 }
7240 #ifdef WLAIBSS_PS
7241 aibss_ps = 1;
7242 bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
7243 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7244 sizeof(iovbuf), TRUE, 0)) < 0) {
7245 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
7246 __FUNCTION__, aibss, ret));
7247 }
7248 #endif /* WLAIBSS_PS */
7249 }
7250 memset(&bcn_config, 0, sizeof(bcn_config));
7251 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
7252 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
7253 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
7254 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
7255 bcn_config.len = sizeof(bcn_config);
7256
7257 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
7258 sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN);
7259 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
7260 WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
7261 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
7262 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
7263 AIBSS_BCN_FLOOD_DUR, ret));
7264 }
7265 #endif /* WLAIBSS */
7266
7267 #if defined(CUSTOM_AMPDU_MPDU)
7268 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
7269 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
7270 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
7271 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7272 sizeof(iovbuf), TRUE, 0)) < 0) {
7273 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
7274 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
7275 }
7276 }
7277 #endif /* CUSTOM_AMPDU_MPDU */
7278
7279 #if defined(CUSTOM_AMPDU_RELEASE)
7280 ampdu_release = CUSTOM_AMPDU_RELEASE;
7281 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
7282 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
7283 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7284 sizeof(iovbuf), TRUE, 0)) < 0) {
7285 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
7286 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
7287 }
7288 }
7289 #endif /* CUSTOM_AMPDU_RELEASE */
7290
7291 #if defined(CUSTOM_AMSDU_AGGSF)
7292 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
7293 if (amsdu_aggsf != 0) {
7294 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
7295 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7296 sizeof(iovbuf), TRUE, 0)) < 0) {
7297 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
7298 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
7299 }
7300 }
7301 #endif /* CUSTOM_AMSDU_AGGSF */
7302
7303 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
7304 /* Read 4-way handshake requirements */
7305 if (dhd_use_idsup == 1) {
7306 bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
7307 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
7308 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
7309 * in-dongle supplicant.
7310 */
7311 if (ret >= 0 || ret == BCME_NOTREADY)
7312 dhd->fw_4way_handshake = TRUE;
7313 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
7314 }
7315 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
7316 #ifdef SUPPORT_2G_VHT
7317 bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
7318 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7319 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
7320 }
7321 #endif /* SUPPORT_2G_VHT */
7322 #ifdef CUSTOM_PSPRETEND_THR
7323 /* Turn off MPC in AP mode */
7324 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
7325 iovbuf, sizeof(iovbuf));
7326 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7327 sizeof(iovbuf), TRUE, 0)) < 0) {
7328 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
7329 __FUNCTION__, ret));
7330 }
7331 #endif
7332
7333 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
7334 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
7335 sizeof(iovbuf), TRUE, 0)) < 0) {
7336 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
7337 }
7338
7339 /* Read event_msgs mask */
7340 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
7341 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
7342 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
7343 goto done;
7344 }
7345 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
7346
7347 /* Setup event_msgs */
7348 setbit(eventmask, WLC_E_SET_SSID);
7349 setbit(eventmask, WLC_E_PRUNE);
7350 setbit(eventmask, WLC_E_AUTH);
7351 setbit(eventmask, WLC_E_AUTH_IND);
7352 setbit(eventmask, WLC_E_ASSOC);
7353 setbit(eventmask, WLC_E_REASSOC);
7354 setbit(eventmask, WLC_E_REASSOC_IND);
7355 setbit(eventmask, WLC_E_DEAUTH);
7356 setbit(eventmask, WLC_E_DEAUTH_IND);
7357 setbit(eventmask, WLC_E_DISASSOC_IND);
7358 setbit(eventmask, WLC_E_DISASSOC);
7359 setbit(eventmask, WLC_E_JOIN);
7360 setbit(eventmask, WLC_E_START);
7361 setbit(eventmask, WLC_E_ASSOC_IND);
7362 setbit(eventmask, WLC_E_PSK_SUP);
7363 setbit(eventmask, WLC_E_LINK);
7364 setbit(eventmask, WLC_E_NDIS_LINK);
7365 setbit(eventmask, WLC_E_MIC_ERROR);
7366 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
7367 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
7368 #ifndef WL_CFG80211
7369 setbit(eventmask, WLC_E_PMKID_CACHE);
7370 setbit(eventmask, WLC_E_TXFAIL);
7371 #endif
7372 setbit(eventmask, WLC_E_JOIN_START);
7373 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
7374 #ifdef WLMEDIA_HTSF
7375 setbit(eventmask, WLC_E_HTSFSYNC);
7376 #endif /* WLMEDIA_HTSF */
7377 #ifdef PNO_SUPPORT
7378 setbit(eventmask, WLC_E_PFN_NET_FOUND);
7379 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
7380 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
7381 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
7382 #endif /* PNO_SUPPORT */
7383 /* enable dongle roaming event */
7384 #if defined(OEM_ANDROID)
7385 setbit(eventmask, WLC_E_ROAM);
7386 setbit(eventmask, WLC_E_BSSID);
7387 #endif
7388 #ifdef WLTDLS
7389 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
7390 #endif /* WLTDLS */
7391 #ifdef WL_CFG80211
7392 setbit(eventmask, WLC_E_ESCAN_RESULT);
7393 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
7394 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
7395 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
7396 }
7397 #endif /* WL_CFG80211 */
7398 #ifdef WLAIBSS
7399 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
7400 #endif /* WLAIBSS */
7401 setbit(eventmask, WLC_E_TRACE);
7402 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
7403 /* Write updated Event mask */
7404 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
7405 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7406 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
7407 goto done;
7408 }
7409
7410 /* make up event mask ext message iovar for event larger than 128 */
7411 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
7412 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
7413 if (eventmask_msg == NULL) {
7414 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
7415 ret = BCME_NOMEM;
7416 goto done;
7417 }
7418 bzero(eventmask_msg, msglen);
7419 eventmask_msg->ver = EVENTMSGS_VER;
7420 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
7421
7422 /* Read event_msgs_ext mask */
7423 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
7424 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
7425 if (ret2 != BCME_UNSUPPORTED)
7426 ret = ret2;
7427 if (ret2 == 0) { /* event_msgs_ext must be supported */
7428 bcopy(iov_buf, eventmask_msg, msglen);
7429
7430 #ifdef BT_WIFI_HANDOVER
7431 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
7432 #endif /* BT_WIFI_HANDOVER */
7433
7434 /* Write updated Event mask */
7435 eventmask_msg->ver = EVENTMSGS_VER;
7436 eventmask_msg->command = EVENTMSGS_SET_MASK;
7437 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
7438 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
7439 msglen, iov_buf, WLC_IOCTL_SMLEN);
7440 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
7441 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
7442 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
7443 goto done;
7444 }
7445 } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
7446 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
7447 goto done;
7448 } /* unsupported is ok */
7449
7450 #ifdef OEM_ANDROID
7451 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
7452 sizeof(scan_assoc_time), TRUE, 0);
7453 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
7454 sizeof(scan_unassoc_time), TRUE, 0);
7455 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
7456 sizeof(scan_passive_time), TRUE, 0);
7457
7458 #ifdef ARP_OFFLOAD_SUPPORT
7459 /* Set and enable ARP offload feature for STA only */
7460 #if defined(OEM_ANDROID) && defined(SOFTAP)
7461 if (arpoe && !ap_fw_loaded)
7462 #else
7463 if (arpoe)
7464 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
7465 {
7466 dhd_arp_offload_enable(dhd, TRUE);
7467 dhd_arp_offload_set(dhd, dhd_arp_mode);
7468 } else {
7469 dhd_arp_offload_enable(dhd, FALSE);
7470 dhd_arp_offload_set(dhd, 0);
7471 }
7472 dhd_arp_enable = arpoe;
7473 #endif /* ARP_OFFLOAD_SUPPORT */
7474
7475 #ifdef PKT_FILTER_SUPPORT
7476 /* Setup default defintions for pktfilter , enable in suspend */
7477 dhd->pktfilter_count = 6;
7478 /* Setup filter to allow only unicast */
7479 if (dhd_master_mode) {
7480 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
7481 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
7482 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
7483 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
7484 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
7485 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
7486 /* apply APP pktfilter */
7487 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
7488 } else
7489 dhd_conf_discard_pkt_filter(dhd);
7490 dhd_conf_add_pkt_filter(dhd);
7491
7492 #if defined(SOFTAP)
7493 if (ap_fw_loaded) {
7494 dhd_enable_packet_filter(0, dhd);
7495 }
7496 #endif /* defined(SOFTAP) */
7497 dhd_set_packet_filter(dhd);
7498 #endif /* PKT_FILTER_SUPPORT */
7499 #ifdef DISABLE_11N
7500 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
7501 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
7502 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
7503 #endif /* DISABLE_11N */
7504
7505 #ifdef AMPDU_VO_ENABLE
7506 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
7507 tid.enable = TRUE;
7508 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
7509 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7510
7511 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
7512 tid.enable = TRUE;
7513 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
7514 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7515 #endif
7516 #if defined(SOFTAP_TPUT_ENHANCE)
7517 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
7518 dhd_bus_setidletime(dhd, (int)100);
7519 #ifdef DHDTCPACK_SUPPRESS
7520 dhd->tcpack_sup_enabled = FALSE;
7521 #endif
7522 #if defined(DHD_TCP_WINSIZE_ADJUST)
7523 dhd_use_tcp_window_size_adjust = TRUE;
7524 #endif
7525
7526 memset(buf, 0, sizeof(buf));
7527 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
7528 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
7529 glom = 0;
7530 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
7531 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7532 }
7533 else {
7534 if (buf[0] == 0) {
7535 glom = 1;
7536 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
7537 sizeof(iovbuf));
7538 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7539 }
7540 }
7541 }
7542 #endif /* SOFTAP_TPUT_ENHANCE */
7543
7544 /* query for 'ver' to get version info from firmware */
7545 memset(buf, 0, sizeof(buf));
7546 ptr = buf;
7547 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
7548 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
7549 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
7550 else {
7551 bcmstrtok(&ptr, "\n", 0);
7552 /* Print fw version info */
7553 DHD_ERROR(("Firmware version = %s\n", buf));
7554 dhd_set_version_info(dhd, buf);
7555 }
7556 #endif /* defined(OEM_ANDROID) */
7557
7558
7559 #if defined(BCMDBUS)
7560 #ifdef PROP_TXSTATUS
7561 if (disable_proptx ||
7562 #ifdef PROP_TXSTATUS_VSDB
7563 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
7564 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
7565 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
7566 #endif /* PROP_TXSTATUS_VSDB */
7567 FALSE) {
7568 wlfc_enable = FALSE;
7569 }
7570
7571 #ifndef DISABLE_11N
7572 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
7573 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
7574 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
7575 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
7576 if (ret2 != BCME_UNSUPPORTED)
7577 ret = ret2;
7578 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
7579 if (ret == BCME_NOTDOWN) {
7580 uint wl_down = 1;
7581 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
7582 sizeof(wl_down), TRUE, 0);
7583 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
7584 __FUNCTION__, ret2, hostreorder));
7585
7586 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
7587 iovbuf, sizeof(iovbuf));
7588 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
7589 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
7590 if (ret2 != BCME_UNSUPPORTED)
7591 ret = ret2;
7592 }
7593 #endif
7594 if (ret2 != BCME_OK)
7595 hostreorder = 0;
7596 }
7597 #endif /* DISABLE_11N */
7598
7599 #ifdef READ_CONFIG_FROM_FILE
7600 dhd_preinit_config(dhd, 0);
7601 #endif /* READ_CONFIG_FROM_FILE */
7602
7603 if (wlfc_enable)
7604 dhd_wlfc_init(dhd);
7605 #ifndef DISABLE_11N
7606 else if (hostreorder)
7607 dhd_wlfc_hostreorder_init(dhd);
7608 #endif /* DISABLE_11N */
7609
7610 #endif /* PROP_TXSTATUS */
7611 #endif
7612 #ifdef PCIE_FULL_DONGLE
7613 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
7614 if (FW_SUPPORTED(dhd, ap)) {
7615 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
7616 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
7617 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
7618 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
7619 }
7620 #endif /* PCIE_FULL_DONGLE */
7621 #ifdef PNO_SUPPORT
7622 if (!dhd->pno_state) {
7623 dhd_pno_init(dhd);
7624 }
7625 #endif
7626 #ifdef WL11U
7627 dhd_interworking_enable(dhd);
7628 #endif /* WL11U */
7629 #ifndef WL_CFG80211
7630 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
7631 #endif
7632
7633 done:
7634
7635 if (eventmask_msg)
7636 kfree(eventmask_msg);
7637 if (iov_buf)
7638 kfree(iov_buf);
7639
7640 return ret;
7641 }
7642
7643 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
7644 {
7645 struct dhd_info *dhd = dhdp->info;
7646 struct net_device *dev = NULL;
7647
7648 ASSERT(dhd && dhd->iflist[ifidx]);
7649 dev = dhd->iflist[ifidx]->net;
7650 ASSERT(dev);
7651
7652 if (netif_running(dev)) {
7653 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
7654 return BCME_NOTDOWN;
7655 }
7656
7657 #define DHD_MIN_MTU 1500
7658 #define DHD_MAX_MTU 1752
7659
7660 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
7661 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
7662 return BCME_BADARG;
7663 }
7664
7665 dev->mtu = new_mtu;
7666 return 0;
7667 }
7668
7669 #ifdef ARP_OFFLOAD_SUPPORT
7670 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
7671 void
7672 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
7673 {
7674 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
7675 int i;
7676 int ret;
7677
7678 bzero(ipv4_buf, sizeof(ipv4_buf));
7679
7680 /* display what we've got */
7681 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
7682 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
7683 #ifdef AOE_DBG
7684 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
7685 #endif
7686 /* now we saved hoste_ip table, clr it in the dongle AOE */
7687 dhd_aoe_hostip_clr(dhd_pub, idx);
7688
7689 if (ret) {
7690 DHD_ERROR(("%s failed\n", __FUNCTION__));
7691 return;
7692 }
7693
7694 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
7695 if (add && (ipv4_buf[i] == 0)) {
7696 ipv4_buf[i] = ipa;
7697 add = FALSE; /* added ipa to local table */
7698 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
7699 __FUNCTION__, i));
7700 } else if (ipv4_buf[i] == ipa) {
7701 ipv4_buf[i] = 0;
7702 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
7703 __FUNCTION__, ipa, i));
7704 }
7705
7706 if (ipv4_buf[i] != 0) {
7707 /* add back host_ip entries from our local cache */
7708 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
7709 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
7710 __FUNCTION__, ipv4_buf[i], i));
7711 }
7712 }
7713 #ifdef AOE_DBG
7714 /* see the resulting hostip table */
7715 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
7716 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
7717 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
7718 #endif
7719 }
7720
7721 /*
7722 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
7723 * whenever there is an event related to an IP address.
7724 * ptr : kernel provided pointer to IP address that has changed
7725 */
7726 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
7727 unsigned long event,
7728 void *ptr)
7729 {
7730 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
7731
7732 dhd_info_t *dhd;
7733 dhd_pub_t *dhd_pub;
7734 int idx;
7735
7736 if (!dhd_arp_enable)
7737 return NOTIFY_DONE;
7738 if (!ifa || !(ifa->ifa_dev->dev))
7739 return NOTIFY_DONE;
7740
7741 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7742 /* Filter notifications meant for non Broadcom devices */
7743 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
7744 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
7745 #if defined(WL_ENABLE_P2P_IF)
7746 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
7747 #endif /* WL_ENABLE_P2P_IF */
7748 return NOTIFY_DONE;
7749 }
7750 #endif /* LINUX_VERSION_CODE */
7751
7752 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
7753 if (!dhd)
7754 return NOTIFY_DONE;
7755
7756 dhd_pub = &dhd->pub;
7757
7758 if (dhd_pub->arp_version == 1) {
7759 idx = 0;
7760 }
7761 else {
7762 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
7763 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
7764 break;
7765 }
7766 if (idx < DHD_MAX_IFS)
7767 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
7768 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
7769 else {
7770 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
7771 idx = 0;
7772 }
7773 }
7774
7775 switch (event) {
7776 case NETDEV_UP:
7777 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
7778 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7779
7780 if (dhd->pub.busstate != DHD_BUS_DATA) {
7781 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
7782 if (dhd->pend_ipaddr) {
7783 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
7784 __FUNCTION__, dhd->pend_ipaddr));
7785 }
7786 dhd->pend_ipaddr = ifa->ifa_address;
7787 break;
7788 }
7789
7790 #ifdef AOE_IP_ALIAS_SUPPORT
7791 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
7792 __FUNCTION__));
7793 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
7794 #endif /* AOE_IP_ALIAS_SUPPORT */
7795 break;
7796
7797 case NETDEV_DOWN:
7798 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
7799 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7800 dhd->pend_ipaddr = 0;
7801 #ifdef AOE_IP_ALIAS_SUPPORT
7802 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
7803 __FUNCTION__));
7804 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
7805 #else
7806 dhd_aoe_hostip_clr(&dhd->pub, idx);
7807 dhd_aoe_arp_clr(&dhd->pub, idx);
7808 #endif /* AOE_IP_ALIAS_SUPPORT */
7809 break;
7810
7811 default:
7812 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
7813 __func__, ifa->ifa_label, event));
7814 break;
7815 }
7816 return NOTIFY_DONE;
7817 }
7818 #endif /* ARP_OFFLOAD_SUPPORT */
7819
7820 #ifdef CONFIG_IPV6
7821 /* Neighbor Discovery Offload: defered handler */
7822 static void
7823 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
7824 {
7825 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
7826 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
7827 int ret;
7828
7829 if (event != DHD_WQ_WORK_IPV6_NDO) {
7830 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7831 return;
7832 }
7833
7834 if (!ndo_work) {
7835 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
7836 return;
7837 }
7838
7839 if (!pub) {
7840 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
7841 return;
7842 }
7843
7844 if (ndo_work->if_idx) {
7845 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
7846 return;
7847 }
7848
7849 switch (ndo_work->event) {
7850 case NETDEV_UP:
7851 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__));
7852 ret = dhd_ndo_enable(pub, TRUE);
7853 if (ret < 0) {
7854 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
7855 }
7856
7857 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
7858 if (ret < 0) {
7859 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
7860 __FUNCTION__, ret));
7861 }
7862 break;
7863 case NETDEV_DOWN:
7864 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
7865 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
7866 if (ret < 0) {
7867 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
7868 __FUNCTION__, ret));
7869 goto done;
7870 }
7871
7872 ret = dhd_ndo_enable(pub, FALSE);
7873 if (ret < 0) {
7874 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
7875 goto done;
7876 }
7877 break;
7878 default:
7879 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
7880 break;
7881 }
7882 done:
7883 /* free ndo_work. alloced while scheduling the work */
7884 kfree(ndo_work);
7885
7886 return;
7887 }
7888
7889 /*
7890 * Neighbor Discovery Offload: Called when an interface
7891 * is assigned with ipv6 address.
7892 * Handles only primary interface
7893 */
7894 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
7895 unsigned long event,
7896 void *ptr)
7897 {
7898 dhd_info_t *dhd;
7899 dhd_pub_t *dhd_pub;
7900 struct inet6_ifaddr *inet6_ifa = ptr;
7901 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
7902 struct ipv6_work_info_t *ndo_info;
7903 int idx = 0; /* REVISIT */
7904
7905 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7906 /* Filter notifications meant for non Broadcom devices */
7907 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
7908 return NOTIFY_DONE;
7909 }
7910 #endif /* LINUX_VERSION_CODE */
7911
7912 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
7913 if (!dhd)
7914 return NOTIFY_DONE;
7915
7916 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
7917 return NOTIFY_DONE;
7918 dhd_pub = &dhd->pub;
7919 if (!FW_SUPPORTED(dhd_pub, ndoe))
7920 return NOTIFY_DONE;
7921
7922 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
7923 if (!ndo_info) {
7924 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
7925 return NOTIFY_DONE;
7926 }
7927
7928 ndo_info->event = event;
7929 ndo_info->if_idx = idx;
7930 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
7931
7932 /* defer the work to thread as it may block kernel */
7933 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
7934 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
7935 return NOTIFY_DONE;
7936 }
7937 #endif /* #ifdef CONFIG_IPV6 */
7938
7939 int
7940 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
7941 {
7942 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7943 dhd_if_t *ifp;
7944 struct net_device *net = NULL;
7945 int err = 0;
7946 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
7947
7948 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
7949
7950 ASSERT(dhd && dhd->iflist[ifidx]);
7951 ifp = dhd->iflist[ifidx];
7952 net = ifp->net;
7953 ASSERT(net && (ifp->idx == ifidx));
7954
7955 #ifndef P2PONEINT
7956 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7957 ASSERT(!net->open);
7958 net->get_stats = dhd_get_stats;
7959 net->do_ioctl = dhd_ioctl_entry;
7960 net->hard_start_xmit = dhd_start_xmit;
7961 net->set_mac_address = dhd_set_mac_address;
7962 net->set_multicast_list = dhd_set_multicast_list;
7963 net->open = net->stop = NULL;
7964 #else
7965 ASSERT(!net->netdev_ops);
7966 net->netdev_ops = &dhd_ops_virt;
7967 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7968 #else
7969 net->netdev_ops = &dhd_cfgp2p_ops_virt;
7970 #endif /* P2PONEINT */
7971
7972 /* Ok, link into the network layer... */
7973 if (ifidx == 0) {
7974 /*
7975 * device functions for the primary interface only
7976 */
7977 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7978 net->open = dhd_open;
7979 net->stop = dhd_stop;
7980 #else
7981 net->netdev_ops = &dhd_ops_pri;
7982 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7983 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
7984 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7985 } else {
7986 /*
7987 * We have to use the primary MAC for virtual interfaces
7988 */
7989 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
7990 #if defined(OEM_ANDROID)
7991 /*
7992 * Android sets the locally administered bit to indicate that this is a
7993 * portable hotspot. This will not work in simultaneous AP/STA mode,
7994 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7995 */
7996 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
7997 ETHER_ADDR_LEN)) {
7998 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7999 __func__, net->name));
8000 temp_addr[0] |= 0x02;
8001 }
8002 #endif /* defined(OEM_ANDROID) */
8003 }
8004
8005 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
8006 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
8007 net->ethtool_ops = &dhd_ethtool_ops;
8008 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
8009
8010 #if defined(WL_WIRELESS_EXT)
8011 #if WIRELESS_EXT < 19
8012 net->get_wireless_stats = dhd_get_wireless_stats;
8013 #endif /* WIRELESS_EXT < 19 */
8014 #if WIRELESS_EXT > 12
8015 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
8016 #endif /* WIRELESS_EXT > 12 */
8017 #endif /* defined(WL_WIRELESS_EXT) */
8018
8019 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
8020
8021 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
8022
8023 if (ifidx == 0)
8024 printf("%s\n", dhd_version);
8025
8026 if (need_rtnl_lock)
8027 err = register_netdev(net);
8028 else
8029 err = register_netdevice(net);
8030
8031 if (err != 0) {
8032 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
8033 goto fail;
8034 }
8035
8036
8037 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
8038 if (ifidx == 0) {
8039 argos_register_notifier_init(net);
8040 }
8041 #endif
8042 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
8043 MAC2STRDBG(net->dev_addr));
8044
8045 #if defined(OEM_ANDROID) && defined(SOFTAP) && defined(WL_WIRELESS_EXT) && \
8046 !defined(WL_CFG80211)
8047 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
8048 #endif
8049
8050 #if defined(OEM_ANDROID) && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && \
8051 (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))))
8052 if (ifidx == 0) {
8053 #ifdef BCMLXSDMMC
8054 up(&dhd_registration_sem);
8055 #endif
8056 if (!dhd_download_fw_on_driverload) {
8057 dhd_net_bus_devreset(net, TRUE);
8058 #ifdef BCMLXSDMMC
8059 dhd_net_bus_suspend(net);
8060 #endif /* BCMLXSDMMC */
8061 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
8062 }
8063 }
8064 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
8065 return 0;
8066
8067 fail:
8068 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
8069 net->open = NULL;
8070 #else
8071 net->netdev_ops = NULL;
8072 #endif
8073 return err;
8074 }
8075
8076 void
8077 dhd_bus_detach(dhd_pub_t *dhdp)
8078 {
8079 dhd_info_t *dhd;
8080
8081 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8082
8083 if (dhdp) {
8084 dhd = (dhd_info_t *)dhdp->info;
8085 if (dhd) {
8086
8087 /*
8088 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
8089 * calling stop again will cuase SD read/write errors.
8090 */
8091 if (dhd->pub.busstate != DHD_BUS_DOWN) {
8092 /* Stop the protocol module */
8093 dhd_prot_stop(&dhd->pub);
8094
8095 /* Stop the bus module */
8096 #ifdef BCMDBUS
8097 /* Force Dongle terminated */
8098 if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
8099 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
8100 __FUNCTION__));
8101 dbus_stop(dhd->pub.dbus);
8102 dhd->pub.busstate = DHD_BUS_DOWN;
8103 #else
8104 dhd_bus_stop(dhd->pub.bus, TRUE);
8105 #endif /* BCMDBUS */
8106 }
8107
8108 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
8109 dhd_bus_oob_intr_unregister(dhdp);
8110 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
8111 }
8112 }
8113 }
8114
8115
8116 void dhd_detach(dhd_pub_t *dhdp)
8117 {
8118 dhd_info_t *dhd;
8119 unsigned long flags;
8120 int timer_valid = FALSE;
8121
8122 if (!dhdp)
8123 return;
8124
8125 dhd = (dhd_info_t *)dhdp->info;
8126 if (!dhd)
8127 return;
8128
8129
8130 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
8131
8132 dhd->pub.up = 0;
8133 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
8134 /* Give sufficient time for threads to start running in case
8135 * dhd_attach() has failed
8136 */
8137 OSL_SLEEP(100);
8138 }
8139
8140 /* unregister all interfaces, start with virtual */
8141 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
8142 int i = 1;
8143
8144 /* unregister virtual interfaces */
8145 dhd_net_if_lock_local(dhd);
8146 for (i = 1; i < DHD_MAX_IFS; i++) {
8147 if (dhd->iflist[i])
8148 dhd_preremove_if(&dhd->pub, i, TRUE);
8149 }
8150 dhd_net_if_unlock_local(dhd);
8151
8152 /* unregister primary interface 0 */
8153 dhd_preremove_if(&dhd->pub, 0, TRUE);
8154 }
8155
8156 #ifdef PROP_TXSTATUS
8157 #ifdef DHD_WLFC_THREAD
8158 if (dhd->pub.wlfc_thread) {
8159 kthread_stop(dhd->pub.wlfc_thread);
8160 dhdp->wlfc_thread_go = TRUE;
8161 wake_up_interruptible(&dhdp->wlfc_wqhead);
8162 }
8163 dhd->pub.wlfc_thread = NULL;
8164 #endif /* DHD_WLFC_THREAD */
8165 #endif /* PROP_TXSTATUS */
8166
8167 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
8168 dhd_bus_detach(dhdp);
8169 #ifdef PCIE_FULL_DONGLE
8170 dhd_flow_rings_deinit(dhdp);
8171 #endif
8172
8173 if (dhdp->prot)
8174 dhd_prot_detach(dhdp);
8175 }
8176
8177 #ifdef ARP_OFFLOAD_SUPPORT
8178 if (dhd_inetaddr_notifier_registered) {
8179 dhd_inetaddr_notifier_registered = FALSE;
8180 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
8181 }
8182 #endif /* ARP_OFFLOAD_SUPPORT */
8183 #ifdef CONFIG_IPV6
8184 if (dhd_inet6addr_notifier_registered) {
8185 dhd_inet6addr_notifier_registered = FALSE;
8186 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
8187 }
8188 #endif
8189
8190 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8191 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
8192 if (dhd->early_suspend.suspend)
8193 unregister_early_suspend(&dhd->early_suspend);
8194 }
8195 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
8196
8197 #if defined(WL_WIRELESS_EXT)
8198 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
8199 /* Detatch and unlink in the iw */
8200 wl_iw_detach();
8201 }
8202 #endif /* defined(WL_WIRELESS_EXT) */
8203
8204 /* delete all interfaces, start with virtual */
8205 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
8206 int i = 1;
8207 dhd_if_t *ifp;
8208
8209 /* Cleanup virtual interfaces */
8210 dhd_net_if_lock_local(dhd);
8211 for (i = 1; i < DHD_MAX_IFS; i++) {
8212 if (dhd->iflist[i])
8213 dhd_remove_if(&dhd->pub, i, TRUE);
8214 }
8215 dhd_net_if_unlock_local(dhd);
8216
8217 /* delete primary interface 0 */
8218 ifp = dhd->iflist[0];
8219 ASSERT(ifp);
8220 if (ifp) {
8221 #ifdef DHD_WMF
8222 dhd_wmf_cleanup(dhdp, 0);
8223 #endif /* DHD_WMF */
8224
8225 dhd_if_del_sta_list(ifp);
8226
8227 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
8228 dhd->iflist[0] = NULL;
8229 }
8230 }
8231
8232 /* Clear the watchdog timer */
8233 DHD_GENERAL_LOCK(&dhd->pub, flags);
8234 timer_valid = dhd->wd_timer_valid;
8235 dhd->wd_timer_valid = FALSE;
8236 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
8237 if (timer_valid)
8238 del_timer_sync(&dhd->timer);
8239
8240 #ifndef BCMDBUS
8241 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
8242 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
8243 PROC_STOP(&dhd->thr_wdt_ctl);
8244 }
8245
8246 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
8247 PROC_STOP(&dhd->thr_rxf_ctl);
8248 }
8249
8250 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
8251 PROC_STOP(&dhd->thr_dpc_ctl);
8252 } else
8253 #endif /* BCMDBUS */
8254 tasklet_kill(&dhd->tasklet);
8255 #ifndef BCMDBUS
8256 }
8257 #endif
8258 #ifdef WL_CFG80211
8259 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
8260 wl_cfg80211_detach(NULL);
8261 dhd_monitor_uninit();
8262 }
8263 #endif
8264 /* free deferred work queue */
8265 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
8266 dhd->dhd_deferred_wq = NULL;
8267
8268 #ifdef BCMDBUS
8269 if (dhdp->dbus) {
8270 dbus_detach(dhdp->dbus);
8271 dhdp->dbus = NULL;
8272 }
8273 #endif /* BCMDBUS */
8274 #ifdef SHOW_LOGTRACE
8275 if (dhd->event_data.fmts)
8276 kfree(dhd->event_data.fmts);
8277 if (dhd->event_data.raw_fmts)
8278 kfree(dhd->event_data.raw_fmts);
8279 #endif /* SHOW_LOGTRACE */
8280
8281 #ifdef PNO_SUPPORT
8282 if (dhdp->pno_state)
8283 dhd_pno_deinit(dhdp);
8284 #endif
8285 #if defined(CONFIG_PM_SLEEP)
8286 if (dhd_pm_notifier_registered) {
8287 unregister_pm_notifier(&dhd_pm_notifier);
8288 dhd_pm_notifier_registered = FALSE;
8289 }
8290 #endif /* CONFIG_PM_SLEEP */
8291 #ifdef DEBUG_CPU_FREQ
8292 if (dhd->new_freq)
8293 free_percpu(dhd->new_freq);
8294 dhd->new_freq = NULL;
8295 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
8296 #endif
8297 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
8298 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
8299 #ifdef CONFIG_HAS_WAKELOCK
8300 dhd->wakelock_counter = 0;
8301 dhd->wakelock_wd_counter = 0;
8302 dhd->wakelock_rx_timeout_enable = 0;
8303 dhd->wakelock_ctrl_timeout_enable = 0;
8304 wake_lock_destroy(&dhd->wl_wifi);
8305 wake_lock_destroy(&dhd->wl_rxwake);
8306 wake_lock_destroy(&dhd->wl_ctrlwake);
8307 wake_lock_destroy(&dhd->wl_wdwake);
8308 #ifdef BCMPCIE_OOB_HOST_WAKE
8309 wake_lock_destroy(&dhd->wl_intrwake);
8310 #endif /* BCMPCIE_OOB_HOST_WAKE */
8311 #endif /* CONFIG_HAS_WAKELOCK */
8312 }
8313
8314
8315
8316
8317 #ifdef DHDTCPACK_SUPPRESS
8318 /* This will free all MEM allocated for TCPACK SUPPRESS */
8319 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8320 #endif /* DHDTCPACK_SUPPRESS */
8321 dhd_conf_detach(dhdp);
8322 }
8323
8324
8325 void
8326 dhd_free(dhd_pub_t *dhdp)
8327 {
8328 dhd_info_t *dhd;
8329 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8330
8331 if (dhdp) {
8332 int i;
8333 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
8334 if (dhdp->reorder_bufs[i]) {
8335 reorder_info_t *ptr;
8336 uint32 buf_size = sizeof(struct reorder_info);
8337
8338 ptr = dhdp->reorder_bufs[i];
8339
8340 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
8341 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
8342 i, ptr->max_idx, buf_size));
8343
8344 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
8345 dhdp->reorder_bufs[i] = NULL;
8346 }
8347 }
8348
8349 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
8350
8351 dhd = (dhd_info_t *)dhdp->info;
8352 if (dhdp->soc_ram) {
8353 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
8354 dhdp->soc_ram = NULL;
8355 }
8356
8357 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
8358 if (dhd &&
8359 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
8360 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
8361 dhd = NULL;
8362 }
8363 }
8364
8365 void
8366 dhd_clear(dhd_pub_t *dhdp)
8367 {
8368 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8369
8370 if (dhdp) {
8371 int i;
8372 #ifdef DHDTCPACK_SUPPRESS
8373 /* Clean up timer/data structure for any remaining/pending packet or timer. */
8374 dhd_tcpack_info_tbl_clean(dhdp);
8375 #endif /* DHDTCPACK_SUPPRESS */
8376 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
8377 if (dhdp->reorder_bufs[i]) {
8378 reorder_info_t *ptr;
8379 uint32 buf_size = sizeof(struct reorder_info);
8380
8381 ptr = dhdp->reorder_bufs[i];
8382
8383 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
8384 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
8385 i, ptr->max_idx, buf_size));
8386
8387 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
8388 dhdp->reorder_bufs[i] = NULL;
8389 }
8390 }
8391
8392 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
8393
8394 if (dhdp->soc_ram) {
8395 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
8396 dhdp->soc_ram = NULL;
8397 }
8398 }
8399 }
8400
8401 static void
8402 dhd_module_cleanup(void)
8403 {
8404 printf("%s: Enter\n", __FUNCTION__);
8405
8406 #ifdef BCMDBUS
8407 dbus_deregister();
8408 #else
8409 dhd_bus_unregister();
8410 #endif /* BCMDBUS */
8411
8412 #if defined(OEM_ANDROID)
8413 wl_android_exit();
8414 #endif /* OEM_ANDROID */
8415
8416 dhd_wifi_platform_unregister_drv();
8417 printf("%s: Exit\n", __FUNCTION__);
8418 }
8419
8420 static void __exit
8421 dhd_module_exit(void)
8422 {
8423 dhd_module_cleanup();
8424 unregister_reboot_notifier(&dhd_reboot_notifier);
8425 extern_wifi_set_enable(0);
8426 }
8427
8428 static int __init
8429 dhd_module_init(void)
8430 {
8431 int err;
8432 int retry = POWERUP_MAX_RETRY;
8433
8434 printf("%s: in\n", __FUNCTION__);
8435 extern_wifi_set_enable(1);
8436 DHD_PERIM_RADIO_INIT();
8437
8438 if (firmware_path[0] != '\0') {
8439 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
8440 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
8441 }
8442
8443 if (nvram_path[0] != '\0') {
8444 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
8445 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
8446 }
8447
8448 do {
8449 err = dhd_wifi_platform_register_drv();
8450 if (!err) {
8451 register_reboot_notifier(&dhd_reboot_notifier);
8452 break;
8453 }
8454 else {
8455 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
8456 __FUNCTION__, retry));
8457 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
8458 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
8459 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
8460 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
8461 }
8462 } while (retry--);
8463
8464 if (err)
8465 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
8466
8467 printf("%s: Exit err=%d\n", __FUNCTION__, err);
8468 return err;
8469 }
8470
8471 static int
8472 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
8473 {
8474 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
8475 if (code == SYS_RESTART) {
8476 #ifndef OEM_ANDROID
8477 dhd_module_cleanup();
8478 #endif
8479 }
8480
8481 return NOTIFY_DONE;
8482 }
8483
8484 #ifdef BCMDBUS
8485
8486 /*
8487 * hdrlen is space to reserve in pkt headroom for DBUS
8488 */
8489 void *
8490 dhd_dbus_probe_cb(void *arg, const char *desc, uint32 bustype, uint32 hdrlen)
8491 {
8492 osl_t *osh;
8493 int ret = 0;
8494 dbus_attrib_t attrib;
8495 dhd_pub_t *pub = NULL;
8496
8497 printf("%s: Enter\n", __FUNCTION__);
8498
8499 #if defined(MULTIPLE_SUPPLICANT)
8500 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8501 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
8502 DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
8503 }
8504 else {
8505 DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
8506 }
8507 mutex_lock(&_dhd_sdio_mutex_lock_);
8508 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8509 #endif
8510
8511 /* Ask the OS interface part for an OSL handle */
8512 if (!(osh = osl_attach(NULL, bustype, TRUE))) {
8513 DHD_ERROR(("%s: OSL attach failed\n", __FUNCTION__));
8514 ret = -ENOMEM;
8515 goto fail;
8516 }
8517
8518 /* Attach to the dhd/OS interface */
8519 if (!(pub = dhd_attach(osh, NULL /* bus */, hdrlen))) {
8520 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
8521 ret = -ENXIO;
8522 goto fail;
8523 }
8524
8525 /* Ok, finish the attach to the OS network interface */
8526 if (dhd_register_if(pub, 0, TRUE) != 0) {
8527 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
8528 ret = -ENXIO;
8529 goto fail;
8530 }
8531
8532 pub->dbus = dbus_attach(osh, pub->rxsz, DBUS_NRXQ, DBUS_NTXQ,
8533 pub->info, &dhd_dbus_cbs, NULL, NULL);
8534 if (pub->dbus) {
8535 dbus_get_attrib(pub->dbus, &attrib);
8536 DHD_ERROR(("DBUS: vid=0x%x pid=0x%x devid=0x%x bustype=0x%x mtu=%d rev=%d\n",
8537 attrib.vid, attrib.pid, attrib.devid, attrib.bustype, attrib.mtu, attrib.chiprev));
8538 } else {
8539 ret = -ENXIO;
8540 goto fail;
8541 }
8542
8543 /* dhd_conf must be attached after linking dhd to dhd->dbus,
8544 * because dhd_detech will check .info is NULL or not.
8545 */
8546 if (dhd_conf_attach(pub) != 0) {
8547 DHD_ERROR(("dhd_conf_attach failed\n"));
8548 goto fail;
8549 }
8550
8551 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
8552 * This is indeed a hack but we have to make it work properly before we have a better
8553 * solution
8554 */
8555 dhd_update_fw_nv_path(pub->info);
8556
8557 #ifdef BCM_FD_AGGR
8558 pub->info->rpc_th = bcm_rpc_tp_attach(osh, (void *)pub->dbus);
8559 if (!pub->info->rpc_th) {
8560 DHD_ERROR(("%s: bcm_rpc_tp_attach failed\n", __FUNCTION__));
8561 ret = -ENXIO;
8562 goto fail;
8563 }
8564
8565 pub->info->rpc_osh = rpc_osl_attach(osh);
8566 if (!pub->info->rpc_osh) {
8567 DHD_ERROR(("%s: rpc_osl_attach failed\n", __FUNCTION__));
8568 bcm_rpc_tp_detach(pub->info->rpc_th);
8569 pub->info->rpc_th = NULL;
8570 ret = -ENXIO;
8571 goto fail;
8572 }
8573 /* Set up the aggregation release timer */
8574 init_timer(&pub->info->rpcth_timer);
8575 pub->info->rpcth_timer.data = (ulong)pub->info;
8576 pub->info->rpcth_timer.function = dhd_rpcth_watchdog;
8577 pub->info->rpcth_timer_active = FALSE;
8578
8579 bcm_rpc_tp_register_cb(pub->info->rpc_th, NULL, pub->info,
8580 dbus_rpcth_rx_pkt, pub->info, pub->info->rpc_osh);
8581 #endif /* BCM_FD_AGGR */
8582 #ifdef BCMDBGFS
8583 dhd_dbg_remove();
8584 #endif
8585
8586 #if defined(MULTIPLE_SUPPLICANT)
8587 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8588 mutex_unlock(&_dhd_sdio_mutex_lock_);
8589 DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
8590 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8591 #endif
8592
8593 printf("%s: Exit\n", __FUNCTION__);
8594 /* This is passed to dhd_dbus_disconnect_cb */
8595 return pub->info;
8596 fail:
8597 /* Release resources in reverse order */
8598 if (osh) {
8599 if (pub) {
8600 dhd_detach(pub);
8601 dhd_free(pub);
8602 }
8603 osl_detach(osh);
8604 }
8605 #if defined(MULTIPLE_SUPPLICANT)
8606 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8607 mutex_unlock(&_dhd_sdio_mutex_lock_);
8608 DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
8609 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8610 #endif
8611
8612 BCM_REFERENCE(ret);
8613 printf("%s: Exit\n", __FUNCTION__);
8614 return NULL;
8615 }
8616
8617 void
8618 dhd_dbus_disconnect_cb(void *arg)
8619 {
8620 dhd_info_t *dhd = (dhd_info_t *)arg;
8621 dhd_pub_t *pub;
8622 osl_t *osh;
8623
8624 printf("%s: Enter\n", __FUNCTION__);
8625 dump_stack();
8626 if (dhd == NULL)
8627 return;
8628
8629 #if defined(MULTIPLE_SUPPLICANT)
8630 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8631 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) == 0) {
8632 DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__));
8633 }
8634 else {
8635 DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__));
8636 }
8637 mutex_lock(&_dhd_sdio_mutex_lock_);
8638 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8639 #endif
8640
8641 pub = &dhd->pub;
8642 osh = pub->osh;
8643 #ifdef BCM_FD_AGGR
8644 del_timer_sync(&dhd->rpcth_timer);
8645 bcm_rpc_tp_deregister_cb(dhd->rpc_th);
8646 rpc_osl_detach(dhd->rpc_osh);
8647 bcm_rpc_tp_detach(dhd->rpc_th);
8648 #endif
8649 dhd_detach(pub);
8650 dhd_free(pub);
8651
8652 if (MALLOCED(osh)) {
8653 DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
8654 }
8655 osl_detach(osh);
8656
8657 #if defined(MULTIPLE_SUPPLICANT)
8658 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8659 mutex_unlock(&_dhd_sdio_mutex_lock_);
8660 DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__));
8661 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8662 #endif /* LINUX */
8663 printf("%s: Exit\n", __FUNCTION__);
8664 }
8665 #endif /* BCMDBUS */
8666
8667 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
8668 #if defined(CONFIG_DEFERRED_INITCALLS)
8669 deferred_module_init(dhd_module_init);
8670 #elif defined(USE_LATE_INITCALL_SYNC)
8671 late_initcall_sync(dhd_module_init);
8672 #else
8673 late_initcall(dhd_module_init);
8674 #endif /* USE_LATE_INITCALL_SYNC */
8675 #else
8676 module_init(dhd_module_init);
8677 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
8678
8679 module_exit(dhd_module_exit);
8680
8681 /*
8682 * OS specific functions required to implement DHD driver in OS independent way
8683 */
8684 int
8685 dhd_os_proto_block(dhd_pub_t *pub)
8686 {
8687 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
8688
8689 if (dhd) {
8690 DHD_PERIM_UNLOCK(pub);
8691
8692 down(&dhd->proto_sem);
8693
8694 DHD_PERIM_LOCK(pub);
8695 return 1;
8696 }
8697
8698 return 0;
8699 }
8700
8701 int
8702 dhd_os_proto_unblock(dhd_pub_t *pub)
8703 {
8704 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
8705
8706 if (dhd) {
8707 up(&dhd->proto_sem);
8708 return 1;
8709 }
8710
8711 return 0;
8712 }
8713
8714 unsigned int
8715 dhd_os_get_ioctl_resp_timeout(void)
8716 {
8717 return ((unsigned int)dhd_ioctl_timeout_msec);
8718 }
8719
8720 void
8721 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
8722 {
8723 dhd_ioctl_timeout_msec = (int)timeout_msec;
8724 }
8725
8726 int
8727 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
8728 {
8729 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
8730 int timeout;
8731
8732 /* Convert timeout in millsecond to jiffies */
8733 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8734 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
8735 #else
8736 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
8737 #endif
8738
8739 DHD_PERIM_UNLOCK(pub);
8740
8741 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
8742
8743 DHD_PERIM_LOCK(pub);
8744
8745 return timeout;
8746 }
8747
8748 int
8749 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
8750 {
8751 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8752
8753 wake_up(&dhd->ioctl_resp_wait);
8754 return 0;
8755 }
8756
8757 int
8758 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition, bool *pending)
8759 {
8760 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
8761 int timeout;
8762
8763 /* Convert timeout in millsecond to jiffies */
8764 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8765 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
8766 #else
8767 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
8768 #endif
8769
8770 DHD_PERIM_UNLOCK(pub);
8771
8772 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
8773
8774 DHD_PERIM_LOCK(pub);
8775
8776 return timeout;
8777 }
8778
8779 int
8780 dhd_os_d3ack_wake(dhd_pub_t *pub)
8781 {
8782 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8783
8784 wake_up(&dhd->d3ack_wait);
8785 return 0;
8786 }
8787
8788 void
8789 dhd_os_wd_timer_extend(void *bus, bool extend)
8790 {
8791 #ifndef BCMDBUS
8792 dhd_pub_t *pub = bus;
8793 dhd_info_t *dhd = (dhd_info_t *)pub->info;
8794
8795 if (extend)
8796 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
8797 else
8798 dhd_os_wd_timer(bus, dhd->default_wd_interval);
8799 #endif /* !BCMDBUS */
8800 }
8801
8802
8803 void
8804 dhd_os_wd_timer(void *bus, uint wdtick)
8805 {
8806 #ifndef BCMDBUS
8807 dhd_pub_t *pub = bus;
8808 dhd_info_t *dhd = (dhd_info_t *)pub->info;
8809 unsigned long flags;
8810
8811 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8812
8813 if (!dhd) {
8814 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
8815 return;
8816 }
8817
8818 DHD_GENERAL_LOCK(pub, flags);
8819
8820 /* don't start the wd until fw is loaded */
8821 if (pub->busstate == DHD_BUS_DOWN) {
8822 DHD_GENERAL_UNLOCK(pub, flags);
8823 if (!wdtick)
8824 DHD_OS_WD_WAKE_UNLOCK(pub);
8825 return;
8826 }
8827
8828 /* Totally stop the timer */
8829 if (!wdtick && dhd->wd_timer_valid == TRUE) {
8830 dhd->wd_timer_valid = FALSE;
8831 DHD_GENERAL_UNLOCK(pub, flags);
8832 del_timer_sync(&dhd->timer);
8833 DHD_OS_WD_WAKE_UNLOCK(pub);
8834 return;
8835 }
8836
8837 if (wdtick) {
8838 DHD_OS_WD_WAKE_LOCK(pub);
8839 dhd_watchdog_ms = (uint)wdtick;
8840 /* Re arm the timer, at last watchdog period */
8841 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
8842 dhd->wd_timer_valid = TRUE;
8843 }
8844 DHD_GENERAL_UNLOCK(pub, flags);
8845 #endif /* BCMDBUS */
8846 }
8847
8848 void *
8849 dhd_os_open_image(char *filename)
8850 {
8851 struct file *fp;
8852
8853 fp = filp_open(filename, O_RDONLY, 0);
8854 /*
8855 * 2.6.11 (FC4) supports filp_open() but later revs don't?
8856 * Alternative:
8857 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
8858 * ???
8859 */
8860 if (IS_ERR(fp))
8861 fp = NULL;
8862
8863 return fp;
8864 }
8865
8866 int
8867 dhd_os_get_image_block(char *buf, int len, void *image)
8868 {
8869 struct file *fp = (struct file *)image;
8870 int rdlen;
8871
8872 if (!image)
8873 return 0;
8874
8875 rdlen = kernel_read(fp, fp->f_pos, buf, len);
8876 if (rdlen > 0)
8877 fp->f_pos += rdlen;
8878
8879 return rdlen;
8880 }
8881
8882 void
8883 dhd_os_close_image(void *image)
8884 {
8885 if (image)
8886 filp_close((struct file *)image, NULL);
8887 }
8888
8889 void
8890 dhd_os_sdlock(dhd_pub_t *pub)
8891 {
8892 dhd_info_t *dhd;
8893
8894 dhd = (dhd_info_t *)(pub->info);
8895
8896 #ifndef BCMDBUS
8897 if (dhd_dpc_prio >= 0)
8898 down(&dhd->sdsem);
8899 else
8900 spin_lock_bh(&dhd->sdlock);
8901 #else
8902 spin_lock_bh(&dhd->sdlock);
8903 #endif /* BCMDBUS */
8904 }
8905
8906 void
8907 dhd_os_sdunlock(dhd_pub_t *pub)
8908 {
8909 dhd_info_t *dhd;
8910
8911 dhd = (dhd_info_t *)(pub->info);
8912
8913 #ifndef BCMDBUS
8914 if (dhd_dpc_prio >= 0)
8915 up(&dhd->sdsem);
8916 else
8917 spin_unlock_bh(&dhd->sdlock);
8918 #else
8919 spin_unlock_bh(&dhd->sdlock);
8920 #endif /* BCMDBUS */
8921 }
8922
8923 void
8924 dhd_os_sdlock_txq(dhd_pub_t *pub)
8925 {
8926 dhd_info_t *dhd;
8927
8928 dhd = (dhd_info_t *)(pub->info);
8929 #ifdef BCMDBUS
8930 spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
8931 #else
8932 spin_lock_bh(&dhd->txqlock);
8933 #endif
8934 }
8935
8936 void
8937 dhd_os_sdunlock_txq(dhd_pub_t *pub)
8938 {
8939 dhd_info_t *dhd;
8940
8941 dhd = (dhd_info_t *)(pub->info);
8942 #ifdef BCMDBUS
8943 spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
8944 #else
8945 spin_unlock_bh(&dhd->txqlock);
8946 #endif
8947 }
8948
8949 void
8950 dhd_os_sdlock_rxq(dhd_pub_t *pub)
8951 {
8952 }
8953
8954 void
8955 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
8956 {
8957 }
8958
8959 static void
8960 dhd_os_rxflock(dhd_pub_t *pub)
8961 {
8962 dhd_info_t *dhd;
8963
8964 dhd = (dhd_info_t *)(pub->info);
8965 spin_lock_bh(&dhd->rxf_lock);
8966
8967 }
8968
8969 static void
8970 dhd_os_rxfunlock(dhd_pub_t *pub)
8971 {
8972 dhd_info_t *dhd;
8973
8974 dhd = (dhd_info_t *)(pub->info);
8975 spin_unlock_bh(&dhd->rxf_lock);
8976 }
8977
8978 #ifdef DHDTCPACK_SUPPRESS
8979 unsigned long
8980 dhd_os_tcpacklock(dhd_pub_t *pub)
8981 {
8982 dhd_info_t *dhd;
8983 unsigned long flags = 0;
8984
8985 dhd = (dhd_info_t *)(pub->info);
8986
8987 if (dhd) {
8988 spin_lock_irqsave(&dhd->tcpack_lock, flags);
8989 }
8990
8991 return flags;
8992 }
8993
8994 void
8995 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
8996 {
8997 dhd_info_t *dhd;
8998
8999
9000 dhd = (dhd_info_t *)(pub->info);
9001
9002 if (dhd) {
9003 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
9004 }
9005 }
9006 #endif /* DHDTCPACK_SUPPRESS */
9007
9008 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
9009 {
9010 uint8* buf;
9011 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
9012
9013 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
9014 if (buf == NULL) {
9015 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
9016 " size: %dbytes\n", __FUNCTION__, section, size));
9017 if (kmalloc_if_fail)
9018 buf = kmalloc(size, flags);
9019 }
9020
9021 return buf;
9022 }
9023
9024 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
9025 {
9026 }
9027
9028 #if defined(WL_WIRELESS_EXT)
9029 struct iw_statistics *
9030 dhd_get_wireless_stats(struct net_device *dev)
9031 {
9032 int res = 0;
9033 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9034
9035 if (!dhd->pub.up) {
9036 return NULL;
9037 }
9038
9039 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
9040
9041 if (res == 0)
9042 return &dhd->iw.wstats;
9043 else
9044 return NULL;
9045 }
9046 #endif /* defined(WL_WIRELESS_EXT) */
9047
9048 static int
9049 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
9050 wl_event_msg_t *event, void **data)
9051 {
9052 int bcmerror = 0;
9053
9054 ASSERT(dhd != NULL);
9055
9056
9057 #ifdef SHOW_LOGTRACE
9058 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
9059 #else
9060 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
9061 #endif /* SHOW_LOGTRACE */
9062
9063 if (bcmerror != BCME_OK)
9064 return (bcmerror);
9065
9066 #if defined(WL_WIRELESS_EXT)
9067 if (event->bsscfgidx == 0) {
9068 /*
9069 * Wireless ext is on primary interface only
9070 */
9071
9072 ASSERT(dhd->iflist[*ifidx] != NULL);
9073 ASSERT(dhd->iflist[*ifidx]->net != NULL);
9074
9075 if (dhd->iflist[*ifidx]->net) {
9076 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
9077 }
9078 }
9079 #endif /* defined(WL_WIRELESS_EXT) */
9080
9081 #ifdef WL_CFG80211
9082 ASSERT(dhd->iflist[*ifidx] != NULL);
9083 ASSERT(dhd->iflist[*ifidx]->net != NULL);
9084 if (dhd->iflist[*ifidx]->net)
9085 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
9086 #endif /* defined(WL_CFG80211) */
9087
9088 return (bcmerror);
9089 }
9090
9091 /* send up locally generated event */
9092 void
9093 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
9094 {
9095 switch (ntoh32(event->event_type)) {
9096 #ifdef WLBTAMP
9097 /* Send up locally generated AMP HCI Events */
9098 case WLC_E_BTA_HCI_EVENT: {
9099 struct sk_buff *p, *skb;
9100 bcm_event_t *msg;
9101 wl_event_msg_t *p_bcm_event;
9102 char *ptr;
9103 uint32 len;
9104 uint32 pktlen;
9105 dhd_if_t *ifp;
9106 dhd_info_t *dhd;
9107 uchar *eth;
9108 int ifidx;
9109
9110 len = ntoh32(event->datalen);
9111 pktlen = sizeof(bcm_event_t) + len + 2;
9112 dhd = dhdp->info;
9113 ifidx = dhd_ifname2idx(dhd, event->ifname);
9114
9115 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
9116 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
9117
9118 msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
9119
9120 bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
9121 bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
9122 ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
9123
9124 msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
9125
9126 /* BCM Vendor specific header... */
9127 msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
9128 msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
9129 bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
9130
9131 /* vendor spec header length + pvt data length (private indication
9132 * hdr + actual message itself)
9133 */
9134 msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
9135 BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
9136 msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
9137
9138 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
9139
9140 /* copy wl_event_msg_t into sk_buf */
9141
9142 /* pointer to wl_event_msg_t in sk_buf */
9143 p_bcm_event = &msg->event;
9144 bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
9145
9146 /* copy hci event into sk_buf */
9147 bcopy(data, (p_bcm_event + 1), len);
9148
9149 msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
9150 ntoh16(msg->bcm_hdr.length));
9151 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
9152
9153 ptr = (char *)(msg + 1);
9154 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
9155 * are no ethertypes which are following this
9156 */
9157 ptr[len+0] = 0x00;
9158 ptr[len+1] = 0x00;
9159
9160 skb = PKTTONATIVE(dhdp->osh, p);
9161 eth = skb->data;
9162 len = skb->len;
9163
9164 ifp = dhd->iflist[ifidx];
9165 if (ifp == NULL)
9166 ifp = dhd->iflist[0];
9167
9168 ASSERT(ifp);
9169 skb->dev = ifp->net;
9170 skb->protocol = eth_type_trans(skb, skb->dev);
9171
9172 skb->data = eth;
9173 skb->len = len;
9174
9175 /* Strip header, count, deliver upward */
9176 skb_pull(skb, ETH_HLEN);
9177
9178 /* Send the packet */
9179 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
9180 __FUNCTION__, __LINE__);
9181 if (in_interrupt()) {
9182 netif_rx(skb);
9183 } else {
9184 netif_rx_ni(skb);
9185 }
9186 }
9187 else {
9188 /* Could not allocate a sk_buf */
9189 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
9190 }
9191 break;
9192 } /* case WLC_E_BTA_HCI_EVENT */
9193 #endif /* WLBTAMP */
9194
9195 default:
9196 break;
9197 }
9198 }
9199
9200 #ifdef LOG_INTO_TCPDUMP
9201 void
9202 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
9203 {
9204 struct sk_buff *p, *skb;
9205 uint32 pktlen;
9206 int len;
9207 dhd_if_t *ifp;
9208 dhd_info_t *dhd;
9209 uchar *skb_data;
9210 int ifidx = 0;
9211 struct ether_header eth;
9212
9213 pktlen = sizeof(eth) + data_len;
9214 dhd = dhdp->info;
9215
9216 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
9217 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
9218
9219 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
9220 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
9221 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
9222 eth.ether_type = hton16(ETHER_TYPE_BRCM);
9223
9224 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
9225 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
9226 skb = PKTTONATIVE(dhdp->osh, p);
9227 skb_data = skb->data;
9228 len = skb->len;
9229
9230 ifidx = dhd_ifname2idx(dhd, "wlan0");
9231 ifp = dhd->iflist[ifidx];
9232 if (ifp == NULL)
9233 ifp = dhd->iflist[0];
9234
9235 ASSERT(ifp);
9236 skb->dev = ifp->net;
9237 skb->protocol = eth_type_trans(skb, skb->dev);
9238 skb->data = skb_data;
9239 skb->len = len;
9240
9241 /* Strip header, count, deliver upward */
9242 skb_pull(skb, ETH_HLEN);
9243
9244 /* Send the packet */
9245 if (in_interrupt()) {
9246 netif_rx(skb);
9247 } else {
9248 netif_rx_ni(skb);
9249 }
9250 }
9251 else {
9252 /* Could not allocate a sk_buf */
9253 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
9254 }
9255 }
9256 #endif /* LOG_INTO_TCPDUMP */
9257
9258 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
9259 {
9260 #if 0 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
9261 struct dhd_info *dhdinfo = dhd->info;
9262
9263 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9264 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
9265 #else
9266 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
9267 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
9268
9269 dhd_os_sdunlock(dhd);
9270 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
9271 dhd_os_sdlock(dhd);
9272 #endif
9273 return;
9274 }
9275
9276 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
9277 {
9278 #if 0 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
9279 struct dhd_info *dhdinfo = dhd->info;
9280 if (waitqueue_active(&dhdinfo->ctrl_wait))
9281 wake_up(&dhdinfo->ctrl_wait);
9282 #endif
9283 return;
9284 }
9285
9286 #if defined(BCMPCIE)
9287 int
9288 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
9289 {
9290 int ret = 0;
9291 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9292
9293 if (flag == TRUE) {
9294 /* Issue wl down command before resetting the chip */
9295 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
9296 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
9297 }
9298 #ifdef PROP_TXSTATUS
9299 if (dhd->pub.wlfc_enabled)
9300 dhd_wlfc_deinit(&dhd->pub);
9301 #endif /* PROP_TXSTATUS */
9302 #ifdef PNO_SUPPORT
9303 if (dhd->pub.pno_state)
9304 dhd_pno_deinit(&dhd->pub);
9305 #endif
9306 }
9307
9308
9309 ret = dhd_bus_devreset(&dhd->pub, flag);
9310 if (ret) {
9311 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
9312 return ret;
9313 }
9314
9315 return ret;
9316 }
9317
9318 #endif
9319
9320 int net_os_set_suspend_disable(struct net_device *dev, int val)
9321 {
9322 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9323 int ret = 0;
9324
9325 if (dhd) {
9326 ret = dhd->pub.suspend_disable_flag;
9327 dhd->pub.suspend_disable_flag = val;
9328 }
9329 return ret;
9330 }
9331
9332 int net_os_set_suspend(struct net_device *dev, int val, int force)
9333 {
9334 int ret = 0;
9335 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9336
9337 if (dhd) {
9338 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9339 ret = dhd_set_suspend(val, &dhd->pub);
9340 #else
9341 ret = dhd_suspend_resume_helper(dhd, val, force);
9342 #endif
9343 #ifdef WL_CFG80211
9344 wl_cfg80211_update_power_mode(dev);
9345 #endif
9346 }
9347 return ret;
9348 }
9349
9350 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
9351 {
9352 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9353
9354 if (dhd)
9355 dhd->pub.suspend_bcn_li_dtim = val;
9356
9357 return 0;
9358 }
9359
9360 #ifdef PKT_FILTER_SUPPORT
9361 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
9362 {
9363 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9364 char *filterp = NULL;
9365 int filter_id = 0;
9366 int ret = 0;
9367
9368 if (!dhd_master_mode)
9369 add_remove = !add_remove;
9370
9371 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
9372 (num == DHD_MDNS_FILTER_NUM))
9373 return ret;
9374 if (num >= dhd->pub.pktfilter_count)
9375 return -EINVAL;
9376 switch (num) {
9377 case DHD_BROADCAST_FILTER_NUM:
9378 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
9379 filter_id = 101;
9380 break;
9381 case DHD_MULTICAST4_FILTER_NUM:
9382 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
9383 filter_id = 102;
9384 break;
9385 case DHD_MULTICAST6_FILTER_NUM:
9386 filterp = "103 0 0 0 0xFFFF 0x3333";
9387 filter_id = 103;
9388 break;
9389 default:
9390 return -EINVAL;
9391 }
9392
9393 /* Add filter */
9394 if (add_remove) {
9395 dhd->pub.pktfilter[num] = filterp;
9396 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
9397 } else { /* Delete filter */
9398 if (dhd->pub.pktfilter[num] != NULL) {
9399 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
9400 dhd->pub.pktfilter[num] = NULL;
9401 }
9402 }
9403 return ret;
9404 }
9405
9406 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
9407
9408 {
9409 int ret = 0;
9410
9411 /* Packet filtering is set only if we still in early-suspend and
9412 * we need either to turn it ON or turn it OFF
9413 * We can always turn it OFF in case of early-suspend, but we turn it
9414 * back ON only if suspend_disable_flag was not set
9415 */
9416 if (dhdp && dhdp->up) {
9417 if (dhdp->in_suspend) {
9418 if (!val || (val && !dhdp->suspend_disable_flag))
9419 dhd_enable_packet_filter(val, dhdp);
9420 }
9421 }
9422 return ret;
9423 }
9424
9425 /* function to enable/disable packet for Network device */
9426 int net_os_enable_packet_filter(struct net_device *dev, int val)
9427 {
9428 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9429
9430 return dhd_os_enable_packet_filter(&dhd->pub, val);
9431 }
9432 #endif /* PKT_FILTER_SUPPORT */
9433
9434 int
9435 dhd_dev_init_ioctl(struct net_device *dev)
9436 {
9437 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9438 int ret;
9439
9440 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
9441 goto done;
9442
9443 done:
9444 return ret;
9445 }
9446
9447 #ifdef PNO_SUPPORT
9448 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
9449 int
9450 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
9451 {
9452 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9453
9454 return (dhd_pno_stop_for_ssid(&dhd->pub));
9455 }
9456 /* Linux wrapper to call common dhd_pno_set_for_ssid */
9457 int
9458 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
9459 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
9460 {
9461 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9462
9463 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
9464 pno_repeat, pno_freq_expo_max, channel_list, nchan));
9465 }
9466
9467 /* Linux wrapper to call common dhd_pno_enable */
9468 int
9469 dhd_dev_pno_enable(struct net_device *dev, int enable)
9470 {
9471 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9472
9473 return (dhd_pno_enable(&dhd->pub, enable));
9474 }
9475
9476 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
9477 int
9478 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
9479 struct dhd_pno_hotlist_params *hotlist_params)
9480 {
9481 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9482 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
9483 }
9484 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
9485 int
9486 dhd_dev_pno_stop_for_batch(struct net_device *dev)
9487 {
9488 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9489 return (dhd_pno_stop_for_batch(&dhd->pub));
9490 }
9491 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
9492 int
9493 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
9494 {
9495 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9496 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
9497 }
9498 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
9499 int
9500 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
9501 {
9502 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9503 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
9504 }
9505 #endif /* PNO_SUPPORT */
9506
9507 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID))
9508 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
9509 {
9510 dhd_info_t *dhd;
9511 struct net_device *dev;
9512
9513 dhd = (dhd_info_t *)dhd_info;
9514 dev = dhd->iflist[0]->net;
9515
9516 if (dev) {
9517 rtnl_lock();
9518 dev_close(dev);
9519 rtnl_unlock();
9520 #if defined(WL_WIRELESS_EXT)
9521 wl_iw_send_priv_event(dev, "HANG");
9522 #endif
9523 #if defined(WL_CFG80211)
9524 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
9525 #endif
9526 }
9527 }
9528
9529
9530 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
9531 {
9532 int ret = 0;
9533 if (dhdp) {
9534 if (!dhdp->hang_was_sent) {
9535 dhdp->hang_was_sent = 1;
9536 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
9537 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
9538 }
9539 }
9540 return ret;
9541 }
9542
9543 int net_os_send_hang_message(struct net_device *dev)
9544 {
9545 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9546 int ret = 0;
9547
9548 if (dhd) {
9549 /* Report FW problem when enabled */
9550 if (dhd->pub.hang_report) {
9551 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9552 ret = dhd_os_send_hang_message(&dhd->pub);
9553 #else
9554 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
9555 #endif
9556 } else {
9557 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
9558 __FUNCTION__));
9559 /* Enforce bus down to stop any future traffic */
9560 dhd->pub.busstate = DHD_BUS_DOWN;
9561 }
9562 }
9563 return ret;
9564 }
9565 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
9566
9567
9568 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
9569 {
9570 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9571 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
9572 }
9573
9574 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
9575 wl_country_t *cspec)
9576 {
9577 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9578 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
9579
9580 #ifdef KEEP_JP_REGREV
9581 if (strncmp(country_iso_code, "JP", 3) == 0 && strncmp(dhd->pub.vars_ccode, "JP", 3) == 0) {
9582 cspec->rev = dhd->pub.vars_regrev;
9583 }
9584 #endif /* KEEP_JP_REGREV */
9585 }
9586 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
9587 {
9588 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9589 if (dhd && dhd->pub.up) {
9590 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
9591 #ifdef WL_CFG80211
9592 wl_update_wiphybands(NULL, notify);
9593 #endif
9594 }
9595 }
9596
9597 void dhd_bus_band_set(struct net_device *dev, uint band)
9598 {
9599 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9600 if (dhd && dhd->pub.up) {
9601 #ifdef WL_CFG80211
9602 wl_update_wiphybands(NULL, true);
9603 #endif
9604 }
9605 }
9606
9607 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
9608 {
9609 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9610
9611 if (!fw || fw[0] == '\0')
9612 return -EINVAL;
9613
9614 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
9615 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
9616
9617 #if defined(OEM_ANDROID) && defined(SOFTAP)
9618 if (strstr(fw, "apsta") != NULL) {
9619 DHD_INFO(("GOT APSTA FIRMWARE\n"));
9620 ap_fw_loaded = TRUE;
9621 } else {
9622 DHD_INFO(("GOT STA FIRMWARE\n"));
9623 ap_fw_loaded = FALSE;
9624 }
9625 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
9626 return 0;
9627 }
9628
9629 void dhd_net_if_lock(struct net_device *dev)
9630 {
9631 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9632 dhd_net_if_lock_local(dhd);
9633 }
9634
9635 void dhd_net_if_unlock(struct net_device *dev)
9636 {
9637 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9638 dhd_net_if_unlock_local(dhd);
9639 }
9640
9641 static void dhd_net_if_lock_local(dhd_info_t *dhd)
9642 {
9643 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9644 if (dhd)
9645 mutex_lock(&dhd->dhd_net_if_mutex);
9646 #endif
9647 }
9648
9649 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
9650 {
9651 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9652 if (dhd)
9653 mutex_unlock(&dhd->dhd_net_if_mutex);
9654 #endif
9655 }
9656
9657 static void dhd_suspend_lock(dhd_pub_t *pub)
9658 {
9659 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9660 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9661 if (dhd)
9662 mutex_lock(&dhd->dhd_suspend_mutex);
9663 #endif
9664 }
9665
9666 static void dhd_suspend_unlock(dhd_pub_t *pub)
9667 {
9668 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9669 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9670 if (dhd)
9671 mutex_unlock(&dhd->dhd_suspend_mutex);
9672 #endif
9673 }
9674
9675 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
9676 {
9677 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9678 unsigned long flags = 0;
9679
9680 if (dhd)
9681 spin_lock_irqsave(&dhd->dhd_lock, flags);
9682
9683 return flags;
9684 }
9685
9686 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
9687 {
9688 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9689
9690 if (dhd)
9691 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
9692 }
9693
9694 /* Linux specific multipurpose spinlock API */
9695 void *
9696 dhd_os_spin_lock_init(osl_t *osh)
9697 {
9698 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
9699 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
9700 /* and this results in kernel asserts in internal builds */
9701 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
9702 if (lock)
9703 spin_lock_init(lock);
9704 return ((void *)lock);
9705 }
9706 void
9707 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
9708 {
9709 MFREE(osh, lock, sizeof(spinlock_t) + 4);
9710 }
9711 unsigned long
9712 dhd_os_spin_lock(void *lock)
9713 {
9714 unsigned long flags = 0;
9715
9716 if (lock)
9717 spin_lock_irqsave((spinlock_t *)lock, flags);
9718
9719 return flags;
9720 }
9721 void
9722 dhd_os_spin_unlock(void *lock, unsigned long flags)
9723 {
9724 if (lock)
9725 spin_unlock_irqrestore((spinlock_t *)lock, flags);
9726 }
9727
9728 static int
9729 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
9730 {
9731 return (atomic_read(&dhd->pend_8021x_cnt));
9732 }
9733
9734 #define MAX_WAIT_FOR_8021X_TX 100
9735
9736 int
9737 dhd_wait_pend8021x(struct net_device *dev)
9738 {
9739 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9740 int timeout = msecs_to_jiffies(10);
9741 int ntimes = MAX_WAIT_FOR_8021X_TX;
9742 int pend = dhd_get_pend_8021x_cnt(dhd);
9743
9744 while (ntimes && pend) {
9745 if (pend) {
9746 set_current_state(TASK_INTERRUPTIBLE);
9747 DHD_PERIM_UNLOCK(&dhd->pub);
9748 schedule_timeout(timeout);
9749 DHD_PERIM_LOCK(&dhd->pub);
9750 set_current_state(TASK_RUNNING);
9751 ntimes--;
9752 }
9753 pend = dhd_get_pend_8021x_cnt(dhd);
9754 }
9755 if (ntimes == 0)
9756 {
9757 atomic_set(&dhd->pend_8021x_cnt, 0);
9758 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
9759 }
9760 return pend;
9761 }
9762
9763 #ifdef DHD_DEBUG
9764 int
9765 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
9766 {
9767 int ret = 0;
9768 struct file *fp;
9769 mm_segment_t old_fs;
9770 loff_t pos = 0;
9771
9772 /* change to KERNEL_DS address limit */
9773 old_fs = get_fs();
9774 set_fs(KERNEL_DS);
9775
9776 /* open file to write */
9777 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
9778
9779 if (IS_ERR(fp)) {
9780 fp = NULL;
9781 printf("%s: open file error\n", __FUNCTION__);
9782 ret = -1;
9783 goto exit;
9784 }
9785
9786 /* Write buf to file */
9787 fp->f_op->write(fp, buf, size, &pos);
9788 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
9789 fp->f_op->fsync(fp, 0, size-1, 1);
9790 #else
9791 fp->f_op->fsync(fp, 1);
9792 #endif /* KERNEL_VERSION(3, 1, 0) */
9793
9794 exit:
9795 /* free buf before return */
9796 if (buf) {
9797 MFREE(dhd->osh, buf, size);
9798 }
9799 /* close file before return */
9800 if (fp)
9801 filp_close(fp, current->files);
9802 /* restore previous address limit */
9803 set_fs(old_fs);
9804
9805 return ret;
9806 }
9807 #endif /* DHD_DEBUG */
9808
9809 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
9810 {
9811 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9812 unsigned long flags;
9813 int ret = 0;
9814
9815 if (dhd) {
9816 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9817 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
9818 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
9819 #ifdef CONFIG_HAS_WAKELOCK
9820 if (dhd->wakelock_rx_timeout_enable)
9821 wake_lock_timeout(&dhd->wl_rxwake,
9822 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
9823 if (dhd->wakelock_ctrl_timeout_enable)
9824 wake_lock_timeout(&dhd->wl_ctrlwake,
9825 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
9826 #endif
9827 dhd->wakelock_rx_timeout_enable = 0;
9828 dhd->wakelock_ctrl_timeout_enable = 0;
9829 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9830 }
9831 return ret;
9832 }
9833
9834 int net_os_wake_lock_timeout(struct net_device *dev)
9835 {
9836 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9837 int ret = 0;
9838
9839 if (dhd)
9840 ret = dhd_os_wake_lock_timeout(&dhd->pub);
9841 return ret;
9842 }
9843
9844 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
9845 {
9846 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9847 unsigned long flags;
9848
9849 if (dhd) {
9850 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9851 if (val > dhd->wakelock_rx_timeout_enable)
9852 dhd->wakelock_rx_timeout_enable = val;
9853 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9854 }
9855 return 0;
9856 }
9857
9858 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
9859 {
9860 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9861 unsigned long flags;
9862
9863 if (dhd) {
9864 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9865 if (val > dhd->wakelock_ctrl_timeout_enable)
9866 dhd->wakelock_ctrl_timeout_enable = val;
9867 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9868 }
9869 return 0;
9870 }
9871
9872 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
9873 {
9874 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9875 unsigned long flags;
9876
9877 if (dhd) {
9878 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9879 dhd->wakelock_ctrl_timeout_enable = 0;
9880 #ifdef CONFIG_HAS_WAKELOCK
9881 if (wake_lock_active(&dhd->wl_ctrlwake))
9882 wake_unlock(&dhd->wl_ctrlwake);
9883 #endif
9884 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9885 }
9886 return 0;
9887 }
9888
9889 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
9890 {
9891 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9892 int ret = 0;
9893
9894 if (dhd)
9895 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
9896 return ret;
9897 }
9898
9899 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
9900 {
9901 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9902 int ret = 0;
9903
9904 if (dhd)
9905 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
9906 return ret;
9907 }
9908
9909 int dhd_os_wake_lock(dhd_pub_t *pub)
9910 {
9911 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9912 unsigned long flags;
9913 int ret = 0;
9914
9915 if (dhd) {
9916 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9917
9918 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9919 #ifdef CONFIG_HAS_WAKELOCK
9920 wake_lock(&dhd->wl_wifi);
9921 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9922 dhd_bus_dev_pm_stay_awake(pub);
9923 #endif
9924 }
9925 dhd->wakelock_counter++;
9926 ret = dhd->wakelock_counter;
9927 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9928 }
9929 return ret;
9930 }
9931
9932 int net_os_wake_lock(struct net_device *dev)
9933 {
9934 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9935 int ret = 0;
9936
9937 if (dhd)
9938 ret = dhd_os_wake_lock(&dhd->pub);
9939 return ret;
9940 }
9941
9942 int dhd_os_wake_unlock(dhd_pub_t *pub)
9943 {
9944 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9945 unsigned long flags;
9946 int ret = 0;
9947
9948 dhd_os_wake_lock_timeout(pub);
9949 if (dhd) {
9950 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9951 if (dhd->wakelock_counter > 0) {
9952 dhd->wakelock_counter--;
9953 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9954 #ifdef CONFIG_HAS_WAKELOCK
9955 wake_unlock(&dhd->wl_wifi);
9956 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9957 dhd_bus_dev_pm_relax(pub);
9958 #endif
9959 }
9960 ret = dhd->wakelock_counter;
9961 }
9962 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9963 }
9964 return ret;
9965 }
9966
9967 int dhd_os_check_wakelock(dhd_pub_t *pub)
9968 {
9969 #if defined(CONFIG_HAS_WAKELOCK) || (0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, \
9970 36)))
9971 dhd_info_t *dhd;
9972
9973 if (!pub)
9974 return 0;
9975 dhd = (dhd_info_t *)(pub->info);
9976 #endif
9977
9978 #ifdef CONFIG_HAS_WAKELOCK
9979 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9980 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9981 (wake_lock_active(&dhd->wl_wdwake))))
9982 return 1;
9983 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9984 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9985 return 1;
9986 #endif
9987 return 0;
9988 }
9989
9990 int dhd_os_check_wakelock_all(dhd_pub_t *pub)
9991 {
9992 #if defined(CONFIG_HAS_WAKELOCK) || (0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, \
9993 36)))
9994 dhd_info_t *dhd;
9995
9996 if (!pub)
9997 return 0;
9998 dhd = (dhd_info_t *)(pub->info);
9999 #endif
10000
10001 #ifdef CONFIG_HAS_WAKELOCK
10002 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
10003 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
10004 wake_lock_active(&dhd->wl_wdwake) ||
10005 wake_lock_active(&dhd->wl_rxwake) ||
10006 wake_lock_active(&dhd->wl_ctrlwake))) {
10007 return 1;
10008 }
10009 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
10010 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
10011 return 1;
10012 #endif
10013 return 0;
10014 }
10015
10016 int net_os_wake_unlock(struct net_device *dev)
10017 {
10018 dhd_info_t *dhd = DHD_DEV_INFO(dev);
10019 int ret = 0;
10020
10021 if (dhd)
10022 ret = dhd_os_wake_unlock(&dhd->pub);
10023 return ret;
10024 }
10025
10026 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
10027 {
10028 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10029 unsigned long flags;
10030 int ret = 0;
10031
10032 if (dhd) {
10033 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
10034 #ifdef CONFIG_HAS_WAKELOCK
10035 /* if wakelock_wd_counter was never used : lock it at once */
10036 if (!dhd->wakelock_wd_counter)
10037 wake_lock(&dhd->wl_wdwake);
10038 #endif
10039 dhd->wakelock_wd_counter++;
10040 ret = dhd->wakelock_wd_counter;
10041 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
10042 }
10043 return ret;
10044 }
10045
10046 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
10047 {
10048 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10049 unsigned long flags;
10050 int ret = 0;
10051
10052 if (dhd) {
10053 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
10054 if (dhd->wakelock_wd_counter) {
10055 dhd->wakelock_wd_counter = 0;
10056 #ifdef CONFIG_HAS_WAKELOCK
10057 wake_unlock(&dhd->wl_wdwake);
10058 #endif
10059 }
10060 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
10061 }
10062 return ret;
10063 }
10064
10065 #ifdef BCMPCIE_OOB_HOST_WAKE
10066 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
10067 {
10068 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10069 int ret = 0;
10070
10071 if (dhd) {
10072 #ifdef CONFIG_HAS_WAKELOCK
10073 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
10074 #endif
10075 }
10076 return ret;
10077 }
10078
10079 int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
10080 {
10081 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10082 int ret = 0;
10083
10084 if (dhd) {
10085 #ifdef CONFIG_HAS_WAKELOCK
10086 /* if wl_intrwake is active, unlock it */
10087 if (wake_lock_active(&dhd->wl_intrwake)) {
10088 wake_unlock(&dhd->wl_intrwake);
10089 }
10090 #endif
10091 }
10092 return ret;
10093 }
10094 #endif /* BCMPCIE_OOB_HOST_WAKE */
10095
10096 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
10097 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
10098 */
10099 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
10100 {
10101 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10102 unsigned long flags;
10103 int ret = 0;
10104
10105 if (dhd) {
10106 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
10107 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
10108 if (dhd->waive_wakelock == FALSE) {
10109 /* record current lock status */
10110 dhd->wakelock_before_waive = dhd->wakelock_counter;
10111 dhd->waive_wakelock = TRUE;
10112 }
10113 ret = dhd->wakelock_wd_counter;
10114 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
10115 }
10116 return ret;
10117 }
10118
10119 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
10120 {
10121 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
10122 unsigned long flags;
10123 int ret = 0;
10124
10125 if (!dhd)
10126 return 0;
10127
10128 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
10129 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
10130 if (!dhd->waive_wakelock)
10131 goto exit;
10132
10133 dhd->waive_wakelock = FALSE;
10134 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
10135 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
10136 * the lock in between, do the same by calling wake_unlock or pm_relax
10137 */
10138 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
10139 #ifdef CONFIG_HAS_WAKELOCK
10140 wake_lock(&dhd->wl_wifi);
10141 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
10142 dhd_bus_dev_pm_stay_awake(&dhd->pub);
10143 #endif
10144 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
10145 #ifdef CONFIG_HAS_WAKELOCK
10146 wake_unlock(&dhd->wl_wifi);
10147 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
10148 dhd_bus_dev_pm_relax(&dhd->pub);
10149 #endif
10150 }
10151 dhd->wakelock_before_waive = 0;
10152 exit:
10153 ret = dhd->wakelock_wd_counter;
10154 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
10155 return ret;
10156 }
10157
10158 bool dhd_os_check_if_up(dhd_pub_t *pub)
10159 {
10160 if (!pub)
10161 return FALSE;
10162 return pub->up;
10163 }
10164
10165 /* function to collect firmware, chip id and chip version info */
10166 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
10167 {
10168 int i;
10169
10170 i = snprintf(info_string, sizeof(info_string),
10171 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
10172 printf("%s\n", info_string);
10173
10174 if (!dhdp)
10175 return;
10176
10177 i = snprintf(&info_string[i], sizeof(info_string) - i,
10178 "\n Rev %x", dhd_conf_get_chiprev(dhdp));
10179 }
10180
10181 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
10182 {
10183 int ifidx;
10184 int ret = 0;
10185 dhd_info_t *dhd = NULL;
10186
10187 if (!net || !DEV_PRIV(net)) {
10188 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
10189 return -EINVAL;
10190 }
10191
10192 dhd = DHD_DEV_INFO(net);
10193 if (!dhd)
10194 return -EINVAL;
10195
10196 ifidx = dhd_net2idx(dhd, net);
10197 if (ifidx == DHD_BAD_IF) {
10198 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
10199 return -ENODEV;
10200 }
10201
10202 DHD_OS_WAKE_LOCK(&dhd->pub);
10203 DHD_PERIM_LOCK(&dhd->pub);
10204
10205 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
10206 dhd_check_hang(net, &dhd->pub, ret);
10207
10208 DHD_PERIM_UNLOCK(&dhd->pub);
10209 DHD_OS_WAKE_UNLOCK(&dhd->pub);
10210
10211 return ret;
10212 }
10213
10214 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
10215 {
10216 struct net_device *net;
10217
10218 net = dhd_idx2net(dhdp, ifidx);
10219 if (!net) {
10220 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
10221 return -EINVAL;
10222 }
10223
10224 return dhd_check_hang(net, dhdp, ret);
10225 }
10226
10227 /* Return instance */
10228 int dhd_get_instance(dhd_pub_t *dhdp)
10229 {
10230 return dhdp->info->unit;
10231 }
10232
10233
10234 #ifdef PROP_TXSTATUS
10235
10236 void dhd_wlfc_plat_init(void *dhd)
10237 {
10238 return;
10239 }
10240
10241 void dhd_wlfc_plat_deinit(void *dhd)
10242 {
10243 return;
10244 }
10245
10246 bool dhd_wlfc_skip_fc(void)
10247 {
10248 return FALSE;
10249 }
10250 #endif /* PROP_TXSTATUS */
10251
10252 #ifdef BCMDBGFS
10253
10254 #include <linux/debugfs.h>
10255
10256 extern uint32 dhd_readregl(void *bp, uint32 addr);
10257 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
10258
10259 typedef struct dhd_dbgfs {
10260 struct dentry *debugfs_dir;
10261 struct dentry *debugfs_mem;
10262 dhd_pub_t *dhdp;
10263 uint32 size;
10264 } dhd_dbgfs_t;
10265
10266 dhd_dbgfs_t g_dbgfs;
10267
10268 static int
10269 dhd_dbg_state_open(struct inode *inode, struct file *file)
10270 {
10271 file->private_data = inode->i_private;
10272 return 0;
10273 }
10274
10275 static ssize_t
10276 dhd_dbg_state_read(struct file *file, char __user *ubuf,
10277 size_t count, loff_t *ppos)
10278 {
10279 ssize_t rval;
10280 uint32 tmp;
10281 loff_t pos = *ppos;
10282 size_t ret;
10283
10284 if (pos < 0)
10285 return -EINVAL;
10286 if (pos >= g_dbgfs.size || !count)
10287 return 0;
10288 if (count > g_dbgfs.size - pos)
10289 count = g_dbgfs.size - pos;
10290
10291 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
10292 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
10293
10294 ret = copy_to_user(ubuf, &tmp, 4);
10295 if (ret == count)
10296 return -EFAULT;
10297
10298 count -= ret;
10299 *ppos = pos + count;
10300 rval = count;
10301
10302 return rval;
10303 }
10304
10305
10306 static ssize_t
10307 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
10308 {
10309 loff_t pos = *ppos;
10310 size_t ret;
10311 uint32 buf;
10312
10313 if (pos < 0)
10314 return -EINVAL;
10315 if (pos >= g_dbgfs.size || !count)
10316 return 0;
10317 if (count > g_dbgfs.size - pos)
10318 count = g_dbgfs.size - pos;
10319
10320 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
10321 if (ret == count)
10322 return -EFAULT;
10323
10324 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
10325 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
10326
10327 return count;
10328 }
10329
10330
10331 loff_t
10332 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
10333 {
10334 loff_t pos = -1;
10335
10336 switch (whence) {
10337 case 0:
10338 pos = off;
10339 break;
10340 case 1:
10341 pos = file->f_pos + off;
10342 break;
10343 case 2:
10344 pos = g_dbgfs.size - off;
10345 }
10346 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
10347 }
10348
10349 static const struct file_operations dhd_dbg_state_ops = {
10350 .read = dhd_dbg_state_read,
10351 .write = dhd_debugfs_write,
10352 .open = dhd_dbg_state_open,
10353 .llseek = dhd_debugfs_lseek
10354 };
10355
10356 static void dhd_dbg_create(void)
10357 {
10358 if (g_dbgfs.debugfs_dir) {
10359 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
10360 NULL, &dhd_dbg_state_ops);
10361 }
10362 }
10363
10364 void dhd_dbg_init(dhd_pub_t *dhdp)
10365 {
10366 int err;
10367
10368 g_dbgfs.dhdp = dhdp;
10369 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
10370
10371 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
10372 if (IS_ERR(g_dbgfs.debugfs_dir)) {
10373 err = PTR_ERR(g_dbgfs.debugfs_dir);
10374 g_dbgfs.debugfs_dir = NULL;
10375 return;
10376 }
10377
10378 dhd_dbg_create();
10379
10380 return;
10381 }
10382
10383 void dhd_dbg_remove(void)
10384 {
10385 debugfs_remove(g_dbgfs.debugfs_mem);
10386 debugfs_remove(g_dbgfs.debugfs_dir);
10387
10388 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
10389
10390 }
10391 #endif /* ifdef BCMDBGFS */
10392
10393 #ifdef WLMEDIA_HTSF
10394
10395 static
10396 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
10397 {
10398 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
10399 struct sk_buff *skb;
10400 uint32 htsf = 0;
10401 uint16 dport = 0, oldmagic = 0xACAC;
10402 char *p1;
10403 htsfts_t ts;
10404
10405 /* timestamp packet */
10406
10407 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
10408
10409 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
10410 /* memcpy(&proto, p1+26, 4); */
10411 memcpy(&dport, p1+40, 2);
10412 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
10413 dport = ntoh16(dport);
10414 }
10415
10416 /* timestamp only if icmp or udb iperf with port 5555 */
10417 /* if (proto == 17 && dport == tsport) { */
10418 if (dport >= tsport && dport <= tsport + 20) {
10419
10420 skb = (struct sk_buff *) pktbuf;
10421
10422 htsf = dhd_get_htsf(dhd, 0);
10423 memset(skb->data + 44, 0, 2); /* clear checksum */
10424 memcpy(skb->data+82, &oldmagic, 2);
10425 memcpy(skb->data+84, &htsf, 4);
10426
10427 memset(&ts, 0, sizeof(htsfts_t));
10428 ts.magic = HTSFMAGIC;
10429 ts.prio = PKTPRIO(pktbuf);
10430 ts.seqnum = htsf_seqnum++;
10431 ts.c10 = get_cycles();
10432 ts.t10 = htsf;
10433 ts.endmagic = HTSFENDMAGIC;
10434
10435 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
10436 }
10437 }
10438
10439 static void dhd_dump_htsfhisto(histo_t *his, char *s)
10440 {
10441 int pktcnt = 0, curval = 0, i;
10442 for (i = 0; i < (NUMBIN-2); i++) {
10443 curval += 500;
10444 printf("%d ", his->bin[i]);
10445 pktcnt += his->bin[i];
10446 }
10447 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
10448 his->bin[NUMBIN-1], s);
10449 }
10450
10451 static
10452 void sorttobin(int value, histo_t *histo)
10453 {
10454 int i, binval = 0;
10455
10456 if (value < 0) {
10457 histo->bin[NUMBIN-1]++;
10458 return;
10459 }
10460 if (value > histo->bin[NUMBIN-2]) /* store the max value */
10461 histo->bin[NUMBIN-2] = value;
10462
10463 for (i = 0; i < (NUMBIN-2); i++) {
10464 binval += 500; /* 500m s bins */
10465 if (value <= binval) {
10466 histo->bin[i]++;
10467 return;
10468 }
10469 }
10470 histo->bin[NUMBIN-3]++;
10471 }
10472
10473 static
10474 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
10475 {
10476 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
10477 struct sk_buff *skb;
10478 char *p1;
10479 uint16 old_magic;
10480 int d1, d2, d3, end2end;
10481 htsfts_t *htsf_ts;
10482 uint32 htsf;
10483
10484 skb = PKTTONATIVE(dhdp->osh, pktbuf);
10485 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
10486
10487 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
10488 memcpy(&old_magic, p1+78, 2);
10489 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
10490 }
10491 else
10492 return;
10493
10494 if (htsf_ts->magic == HTSFMAGIC) {
10495 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
10496 htsf_ts->cE0 = get_cycles();
10497 }
10498
10499 if (old_magic == 0xACAC) {
10500
10501 tspktcnt++;
10502 htsf = dhd_get_htsf(dhd, 0);
10503 memcpy(skb->data+92, &htsf, sizeof(uint32));
10504
10505 memcpy(&ts[tsidx].t1, skb->data+80, 16);
10506
10507 d1 = ts[tsidx].t2 - ts[tsidx].t1;
10508 d2 = ts[tsidx].t3 - ts[tsidx].t2;
10509 d3 = ts[tsidx].t4 - ts[tsidx].t3;
10510 end2end = ts[tsidx].t4 - ts[tsidx].t1;
10511
10512 sorttobin(d1, &vi_d1);
10513 sorttobin(d2, &vi_d2);
10514 sorttobin(d3, &vi_d3);
10515 sorttobin(end2end, &vi_d4);
10516
10517 if (end2end > 0 && end2end > maxdelay) {
10518 maxdelay = end2end;
10519 maxdelaypktno = tspktcnt;
10520 memcpy(&maxdelayts, &ts[tsidx], 16);
10521 }
10522 if (++tsidx >= TSMAX)
10523 tsidx = 0;
10524 }
10525 }
10526
10527 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
10528 {
10529 uint32 htsf = 0, cur_cycle, delta, delta_us;
10530 uint32 factor, baseval, baseval2;
10531 cycles_t t;
10532
10533 t = get_cycles();
10534 cur_cycle = t;
10535
10536 if (cur_cycle > dhd->htsf.last_cycle)
10537 delta = cur_cycle - dhd->htsf.last_cycle;
10538 else {
10539 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
10540 }
10541
10542 delta = delta >> 4;
10543
10544 if (dhd->htsf.coef) {
10545 /* times ten to get the first digit */
10546 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
10547 baseval = (delta*10)/factor;
10548 baseval2 = (delta*10)/(factor+1);
10549 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
10550 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
10551 }
10552 else {
10553 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
10554 }
10555
10556 return htsf;
10557 }
10558
10559 static void dhd_dump_latency(void)
10560 {
10561 int i, max = 0;
10562 int d1, d2, d3, d4, d5;
10563
10564 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
10565 for (i = 0; i < TSMAX; i++) {
10566 d1 = ts[i].t2 - ts[i].t1;
10567 d2 = ts[i].t3 - ts[i].t2;
10568 d3 = ts[i].t4 - ts[i].t3;
10569 d4 = ts[i].t4 - ts[i].t1;
10570 d5 = ts[max].t4-ts[max].t1;
10571 if (d4 > d5 && d4 > 0) {
10572 max = i;
10573 }
10574 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
10575 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
10576 d1, d2, d3, d4, i);
10577 }
10578
10579 printf("current idx = %d \n", tsidx);
10580
10581 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
10582 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
10583 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
10584 maxdelayts.t2 - maxdelayts.t1,
10585 maxdelayts.t3 - maxdelayts.t2,
10586 maxdelayts.t4 - maxdelayts.t3,
10587 maxdelayts.t4 - maxdelayts.t1);
10588 }
10589
10590
10591 static int
10592 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
10593 {
10594 wl_ioctl_t ioc;
10595 char buf[32];
10596 int ret;
10597 uint32 s1, s2;
10598
10599 struct tsf {
10600 uint32 low;
10601 uint32 high;
10602 } tsf_buf;
10603
10604 memset(&ioc, 0, sizeof(ioc));
10605 memset(&tsf_buf, 0, sizeof(tsf_buf));
10606
10607 ioc.cmd = WLC_GET_VAR;
10608 ioc.buf = buf;
10609 ioc.len = (uint)sizeof(buf);
10610 ioc.set = FALSE;
10611
10612 strncpy(buf, "tsf", sizeof(buf) - 1);
10613 buf[sizeof(buf) - 1] = '\0';
10614 s1 = dhd_get_htsf(dhd, 0);
10615 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
10616 if (ret == -EIO) {
10617 DHD_ERROR(("%s: tsf is not supported by device\n",
10618 dhd_ifname(&dhd->pub, ifidx)));
10619 return -EOPNOTSUPP;
10620 }
10621 return ret;
10622 }
10623 s2 = dhd_get_htsf(dhd, 0);
10624
10625 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
10626 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
10627 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
10628 dhd->htsf.coefdec2, s2-tsf_buf.low);
10629 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
10630 return 0;
10631 }
10632
10633 void htsf_update(dhd_info_t *dhd, void *data)
10634 {
10635 static ulong cur_cycle = 0, prev_cycle = 0;
10636 uint32 htsf, tsf_delta = 0;
10637 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
10638 ulong b, a;
10639 cycles_t t;
10640
10641 /* cycles_t in inlcude/mips/timex.h */
10642
10643 t = get_cycles();
10644
10645 prev_cycle = cur_cycle;
10646 cur_cycle = t;
10647
10648 if (cur_cycle > prev_cycle)
10649 cyc_delta = cur_cycle - prev_cycle;
10650 else {
10651 b = cur_cycle;
10652 a = prev_cycle;
10653 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
10654 }
10655
10656 if (data == NULL)
10657 printf(" tsf update ata point er is null \n");
10658
10659 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
10660 memcpy(&cur_tsf, data, sizeof(tsf_t));
10661
10662 if (cur_tsf.low == 0) {
10663 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
10664 return;
10665 }
10666
10667 if (cur_tsf.low > prev_tsf.low)
10668 tsf_delta = (cur_tsf.low - prev_tsf.low);
10669 else {
10670 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
10671 cur_tsf.low, prev_tsf.low));
10672 if (cur_tsf.high > prev_tsf.high) {
10673 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
10674 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
10675 }
10676 else
10677 return; /* do not update */
10678 }
10679
10680 if (tsf_delta) {
10681 hfactor = cyc_delta / tsf_delta;
10682 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
10683 dec1 = tmp/tsf_delta;
10684 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
10685 tmp = (tmp - (dec1*tsf_delta))*10;
10686 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
10687
10688 if (dec3 > 4) {
10689 if (dec2 == 9) {
10690 dec2 = 0;
10691 if (dec1 == 9) {
10692 dec1 = 0;
10693 hfactor++;
10694 }
10695 else {
10696 dec1++;
10697 }
10698 }
10699 else
10700 dec2++;
10701 }
10702 }
10703
10704 if (hfactor) {
10705 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
10706 dhd->htsf.coef = hfactor;
10707 dhd->htsf.last_cycle = cur_cycle;
10708 dhd->htsf.last_tsf = cur_tsf.low;
10709 dhd->htsf.coefdec1 = dec1;
10710 dhd->htsf.coefdec2 = dec2;
10711 }
10712 else {
10713 htsf = prev_tsf.low;
10714 }
10715 }
10716
10717 #endif /* WLMEDIA_HTSF */
10718
10719 #ifdef CUSTOM_SET_CPUCORE
10720 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
10721 {
10722 int e_dpc = 0, e_rxf = 0, retry_set = 0;
10723
10724 if (!(dhd->chan_isvht80)) {
10725 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
10726 return;
10727 }
10728
10729 if (DPC_CPUCORE) {
10730 do {
10731 if (set == TRUE) {
10732 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
10733 cpumask_of(DPC_CPUCORE));
10734 } else {
10735 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
10736 cpumask_of(PRIMARY_CPUCORE));
10737 }
10738 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
10739 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
10740 return;
10741 }
10742 if (e_dpc < 0)
10743 OSL_SLEEP(1);
10744 } while (e_dpc < 0);
10745 }
10746 if (RXF_CPUCORE) {
10747 do {
10748 if (set == TRUE) {
10749 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
10750 cpumask_of(RXF_CPUCORE));
10751 } else {
10752 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
10753 cpumask_of(PRIMARY_CPUCORE));
10754 }
10755 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
10756 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
10757 return;
10758 }
10759 if (e_rxf < 0)
10760 OSL_SLEEP(1);
10761 } while (e_rxf < 0);
10762 }
10763 #ifdef DHD_OF_SUPPORT
10764 interrupt_set_cpucore(set);
10765 #endif /* DHD_OF_SUPPORT */
10766 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
10767
10768 return;
10769 }
10770 #endif /* CUSTOM_SET_CPUCORE */
10771 #if defined(DHD_TCP_WINSIZE_ADJUST)
10772 static int dhd_port_list_match(int port)
10773 {
10774 int i;
10775 for (i = 0; i < MAX_TARGET_PORTS; i++) {
10776 if (target_ports[i] == port)
10777 return 1;
10778 }
10779 return 0;
10780 }
10781 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
10782 {
10783 struct iphdr *ipheader;
10784 struct tcphdr *tcpheader;
10785 uint16 win_size;
10786 int32 incremental_checksum;
10787
10788 if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
10789 return;
10790 if (skb == NULL || skb->data == NULL)
10791 return;
10792
10793 ipheader = (struct iphdr*)(skb->data);
10794
10795 if (ipheader->protocol == IPPROTO_TCP) {
10796 tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
10797 if (tcpheader) {
10798 win_size = ntoh16(tcpheader->window);
10799 if (win_size < MIN_TCP_WIN_SIZE &&
10800 dhd_port_list_match(ntoh16(tcpheader->dest))) {
10801 incremental_checksum = ntoh16(tcpheader->check);
10802 incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
10803 if (incremental_checksum < 0)
10804 --incremental_checksum;
10805 tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
10806 tcpheader->check = hton16((unsigned short)incremental_checksum);
10807 }
10808 }
10809 skb_push(skb, (ipheader->ihl)<<2);
10810 }
10811 }
10812 #endif /* DHD_TCP_WINSIZE_ADJUST */
10813
10814 /* Get interface specific ap_isolate configuration */
10815 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
10816 {
10817 dhd_info_t *dhd = dhdp->info;
10818 dhd_if_t *ifp;
10819
10820 ASSERT(idx < DHD_MAX_IFS);
10821
10822 ifp = dhd->iflist[idx];
10823
10824 return ifp->ap_isolate;
10825 }
10826
10827 /* Set interface specific ap_isolate configuration */
10828 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
10829 {
10830 dhd_info_t *dhd = dhdp->info;
10831 dhd_if_t *ifp;
10832
10833 ASSERT(idx < DHD_MAX_IFS);
10834
10835 ifp = dhd->iflist[idx];
10836
10837 ifp->ap_isolate = val;
10838
10839 return 0;
10840 }
10841
10842 #if defined(DHD_DEBUG)
10843 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
10844 {
10845 dhd_dump_t *dump = NULL;
10846 dump = MALLOC(dhdp->osh, sizeof(dhd_dump_t));
10847 dump->buf = buf;
10848 dump->bufsize = size;
10849 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
10850 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
10851 }
10852
10853 static void
10854 dhd_mem_dump(void *handle, void *event_info, u8 event)
10855 {
10856 dhd_info_t *dhd = handle;
10857 dhd_dump_t *dump = event_info;
10858
10859 if (!dhd || !dump)
10860 return;
10861
10862 if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
10863 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
10864 }
10865 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
10866 }
10867 #endif /* DHD_DEBUG */
10868
10869 #ifdef DHD_WMF
10870 /* Returns interface specific WMF configuration */
10871 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
10872 {
10873 dhd_info_t *dhd = dhdp->info;
10874 dhd_if_t *ifp;
10875
10876 ASSERT(idx < DHD_MAX_IFS);
10877
10878 ifp = dhd->iflist[idx];
10879 return &ifp->wmf;
10880 }
10881 #endif /* DHD_WMF */
10882
10883
10884 #ifdef DHD_UNICAST_DHCP
10885 static int
10886 dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
10887 uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
10888 {
10889 uint8 *frame = PKTDATA(pub->osh, pktbuf);
10890 int length = PKTLEN(pub->osh, pktbuf);
10891 uint8 *pt; /* Pointer to type field */
10892 uint16 ethertype;
10893 bool snap = FALSE;
10894 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
10895 if (length < ETHER_HDR_LEN) {
10896 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
10897 __FUNCTION__, length));
10898 return BCME_ERROR;
10899 } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
10900 /* Frame is Ethernet II */
10901 pt = frame + ETHER_TYPE_OFFSET;
10902 } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
10903 !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
10904 pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
10905 snap = TRUE;
10906 } else {
10907 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
10908 __FUNCTION__));
10909 return BCME_ERROR;
10910 }
10911
10912 ethertype = ntoh16_ua(pt);
10913
10914 /* Skip VLAN tag, if any */
10915 if (ethertype == ETHER_TYPE_8021Q) {
10916 pt += VLAN_TAG_LEN;
10917
10918 if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
10919 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
10920 __FUNCTION__, length));
10921 return BCME_ERROR;
10922 }
10923
10924 ethertype = ntoh16_ua(pt);
10925 }
10926
10927 *data_ptr = pt + ETHER_TYPE_LEN;
10928 *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
10929 *et_ptr = ethertype;
10930 *snap_ptr = snap;
10931 return BCME_OK;
10932 }
10933
10934 static int
10935 dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
10936 uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
10937 {
10938 struct ipv4_hdr *iph; /* IP frame pointer */
10939 int iplen; /* IP frame length */
10940 uint16 ethertype, iphdrlen, ippktlen;
10941 uint16 iph_frag;
10942 uint8 prot;
10943 bool snap;
10944
10945 if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
10946 &iplen, &ethertype, &snap) != 0)
10947 return BCME_ERROR;
10948
10949 if (ethertype != ETHER_TYPE_IP) {
10950 return BCME_ERROR;
10951 }
10952
10953 /* We support IPv4 only */
10954 if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
10955 return BCME_ERROR;
10956 }
10957
10958 /* Header length sanity */
10959 iphdrlen = IPV4_HLEN(iph);
10960
10961 /*
10962 * Packet length sanity; sometimes we receive eth-frame size bigger
10963 * than the IP content, which results in a bad tcp chksum
10964 */
10965 ippktlen = ntoh16(iph->tot_len);
10966 if (ippktlen < iplen) {
10967
10968 DHD_INFO(("%s: extra frame length ignored\n",
10969 __FUNCTION__));
10970 iplen = ippktlen;
10971 } else if (ippktlen > iplen) {
10972 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
10973 __FUNCTION__, ippktlen - iplen));
10974 return BCME_ERROR;
10975 }
10976
10977 if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
10978 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
10979 __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
10980 return BCME_ERROR;
10981 }
10982
10983 /*
10984 * We don't handle fragmented IP packets. A first frag is indicated by the MF
10985 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
10986 */
10987 iph_frag = ntoh16(iph->frag);
10988
10989 if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
10990 DHD_INFO(("DHD:%s: IP fragment not handled\n",
10991 __FUNCTION__));
10992 return BCME_ERROR;
10993 }
10994
10995 prot = IPV4_PROT(iph);
10996
10997 *data_ptr = (((uint8 *)iph) + iphdrlen);
10998 *len_ptr = iplen - iphdrlen;
10999 *prot_ptr = prot;
11000 return BCME_OK;
11001 }
11002
11003 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
11004 static
11005 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
11006 {
11007 dhd_sta_t* stainfo;
11008 uint8 *eh = PKTDATA(pub->osh, pktbuf);
11009 uint8 *udph;
11010 uint8 *dhcp;
11011 uint8 *chaddr;
11012 int udpl;
11013 int dhcpl;
11014 uint16 port;
11015 uint8 prot;
11016
11017 if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
11018 return BCME_ERROR;
11019 if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
11020 return BCME_ERROR;
11021 if (prot != IP_PROT_UDP)
11022 return BCME_ERROR;
11023 /* check frame length, at least UDP_HDR_LEN */
11024 if (udpl < UDP_HDR_LEN) {
11025 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
11026 __FUNCTION__));
11027 return BCME_ERROR;
11028 }
11029 port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
11030 /* only process DHCP packets from server to client */
11031 if (port != DHCP_PORT_CLIENT)
11032 return BCME_ERROR;
11033
11034 dhcp = udph + UDP_HDR_LEN;
11035 dhcpl = udpl - UDP_HDR_LEN;
11036
11037 if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
11038 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
11039 __FUNCTION__));
11040 return BCME_ERROR;
11041 }
11042 /* only process DHCP reply(offer/ack) packets */
11043 if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
11044 return BCME_ERROR;
11045 chaddr = dhcp + DHCP_CHADDR_OFFSET;
11046 stainfo = dhd_find_sta(pub, ifidx, chaddr);
11047 if (stainfo) {
11048 bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
11049 return BCME_OK;
11050 }
11051 return BCME_ERROR;
11052 }
11053 #endif /* DHD_UNICAST_DHD */
11054 #ifdef DHD_L2_FILTER
11055 /* Check if packet type is ICMP ECHO */
11056 static
11057 int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
11058 {
11059 struct bcmicmp_hdr *icmph;
11060 int udpl;
11061 uint8 prot;
11062
11063 if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
11064 return BCME_ERROR;
11065 if (prot == IP_PROT_ICMP) {
11066 if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
11067 return BCME_OK;
11068 }
11069 return BCME_ERROR;
11070 }
11071 #endif /* DHD_L2_FILTER */
11072
11073 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
11074 int dhd_rps_cpus_enable(struct net_device *net, int enable)
11075 {
11076 dhd_info_t *dhd = DHD_DEV_INFO(net);
11077 dhd_if_t *ifp;
11078 int ifidx;
11079 char * RPS_CPU_SETBUF;
11080
11081 ifidx = dhd_net2idx(dhd, net);
11082 if (ifidx == DHD_BAD_IF) {
11083 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
11084 return -ENODEV;
11085 }
11086
11087 if (ifidx == PRIMARY_INF) {
11088 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
11089 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
11090 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
11091 } else {
11092 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
11093 RPS_CPU_SETBUF = RPS_CPUS_MASK;
11094 }
11095 } else if (ifidx == VIRTUAL_INF) {
11096 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
11097 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
11098 } else {
11099 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
11100 return -EINVAL;
11101 }
11102
11103 ifp = dhd->iflist[ifidx];
11104 if (ifp) {
11105 if (enable) {
11106 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
11107 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
11108 } else {
11109 custom_rps_map_clear(ifp->net->_rx);
11110 }
11111 } else {
11112 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
11113 return -ENODEV;
11114 }
11115 return BCME_OK;
11116 }
11117
11118 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
11119 {
11120 struct rps_map *old_map, *map;
11121 cpumask_var_t mask;
11122 int err, cpu, i;
11123 static DEFINE_SPINLOCK(rps_map_lock);
11124
11125 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
11126
11127 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
11128 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
11129 return -ENOMEM;
11130 }
11131
11132 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
11133 if (err) {
11134 free_cpumask_var(mask);
11135 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
11136 return err;
11137 }
11138
11139 map = kzalloc(max_t(unsigned int,
11140 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
11141 GFP_KERNEL);
11142 if (!map) {
11143 free_cpumask_var(mask);
11144 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
11145 return -ENOMEM;
11146 }
11147
11148 i = 0;
11149 for_each_cpu(cpu, mask)
11150 map->cpus[i++] = cpu;
11151
11152 if (i)
11153 map->len = i;
11154 else {
11155 kfree(map);
11156 map = NULL;
11157 free_cpumask_var(mask);
11158 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
11159 return -1;
11160 }
11161
11162 spin_lock(&rps_map_lock);
11163 old_map = rcu_dereference_protected(queue->rps_map,
11164 lockdep_is_held(&rps_map_lock));
11165 rcu_assign_pointer(queue->rps_map, map);
11166 spin_unlock(&rps_map_lock);
11167
11168 if (map)
11169 static_key_slow_inc(&rps_needed);
11170 if (old_map) {
11171 kfree_rcu(old_map, rcu);
11172 static_key_slow_dec(&rps_needed);
11173 }
11174 free_cpumask_var(mask);
11175
11176 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
11177 return map->len;
11178 }
11179
11180 void custom_rps_map_clear(struct netdev_rx_queue *queue)
11181 {
11182 struct rps_map *map;
11183
11184 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
11185
11186 map = rcu_dereference_protected(queue->rps_map, 1);
11187 if (map) {
11188 RCU_INIT_POINTER(queue->rps_map, NULL);
11189 kfree_rcu(map, rcu);
11190 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
11191 }
11192 }
11193 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
11194
11195 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
11196 int
11197 argos_register_notifier_init(struct net_device *net)
11198 {
11199 int ret = 0;
11200
11201 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
11202 argos_rps_ctrl_data.wlan_primary_netdev = net;
11203 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
11204
11205 ret = sec_argos_register_notifier(&argos_wifi, "WIFI");
11206 if (ret < 0) {
11207 DHD_ERROR(("DHD:Failed to register WIFI notifier , ret=%d\n", ret));
11208 }
11209
11210 return ret;
11211 }
11212
11213 int
11214 argos_register_notifier_deinit(void)
11215 {
11216 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
11217
11218 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
11219 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
11220 return -1;
11221 }
11222 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
11223
11224 sec_argos_unregister_notifier(&argos_wifi, "WIFI");
11225 argos_rps_ctrl_data.wlan_primary_netdev = NULL;
11226 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
11227
11228 return 0;
11229 }
11230
11231 int
11232 argos_status_notifier_wifi_cb(struct notifier_block *notifier,
11233 unsigned long speed, void *v)
11234 {
11235 int err = 0;
11236 DHD_INFO(("DHD: %s: , speed=%ld\n", __FUNCTION__, speed));
11237 if (speed > RPS_TPUT_THRESHOLD && argos_rps_ctrl_data.wlan_primary_netdev != NULL &&
11238 argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
11239 if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
11240 err = custom_rps_map_set(argos_rps_ctrl_data.wlan_primary_netdev->_rx,
11241 RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
11242 if (err < 0)
11243 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. speed=%ld, error=%d\n",
11244 __FUNCTION__, speed, err));
11245 else {
11246 argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
11247 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
11248 __FUNCTION__, speed));
11249 }
11250 } else {
11251 DHD_ERROR(("DHD: %s: RPS_Set fail, Core=%d Offline\n", __FUNCTION__,
11252 RPS_CPUS_WLAN_CORE_ID));
11253 }
11254 } else if (speed <= RPS_TPUT_THRESHOLD && argos_rps_ctrl_data.wlan_primary_netdev != NULL) {
11255 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
11256 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
11257 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
11258 OSL_SLEEP(300);
11259 }
11260 return NOTIFY_OK;
11261 }
11262 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
11263
11264 void *dhd_get_pub(struct net_device *dev)
11265 {
11266 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
11267 if (dhdinfo)
11268 return (void *)&dhdinfo->pub;
11269 else
11270 return NULL;
11271 }