dhd: import wifi and bluetooth firmware
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd_1_201_59_x / dhd_linux.c
1 /*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * $Copyright Open Broadcom Corporation$
6 *
7 * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $
8 */
9
10 #include <typedefs.h>
11 #include <linuxver.h>
12 #include <osl.h>
13 #ifdef SHOW_LOGTRACE
14 #include <linux/syscalls.h>
15 #include <event_log.h>
16 #endif /* SHOW_LOGTRACE */
17
18
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/random.h>
28 #include <linux/spinlock.h>
29 #include <linux/ethtool.h>
30 #include <linux/fcntl.h>
31 #include <linux/fs.h>
32 #include <linux/ip.h>
33 #include <linux/reboot.h>
34 #include <linux/notifier.h>
35 #include <net/addrconf.h>
36 #ifdef ENABLE_ADAPTIVE_SCHED
37 #include <linux/cpufreq.h>
38 #endif /* ENABLE_ADAPTIVE_SCHED */
39
40 #include <asm/uaccess.h>
41 #include <asm/unaligned.h>
42
43 #include <epivers.h>
44 #include <bcmutils.h>
45 #include <bcmendian.h>
46 #include <bcmdevs.h>
47
48 #include <proto/ethernet.h>
49 #include <proto/bcmevent.h>
50 #include <proto/vlan.h>
51 #ifdef DHD_L2_FILTER
52 #include <proto/bcmicmp.h>
53 #endif
54 #include <proto/802.3.h>
55
56 #include <dngl_stats.h>
57 #include <dhd_linux_wq.h>
58 #include <dhd.h>
59 #include <dhd_linux.h>
60 #ifdef PCIE_FULL_DONGLE
61 #include <dhd_flowring.h>
62 #endif
63 #include <dhd_bus.h>
64 #include <dhd_proto.h>
65 #include <dhd_config.h>
66 #include <dhd_dbg.h>
67 #ifdef CONFIG_HAS_WAKELOCK
68 #include <linux/wakelock.h>
69 #endif
70 #ifdef WL_CFG80211
71 #include <wl_cfg80211.h>
72 #endif
73 #ifdef P2PONEINT
74 #include <wl_cfgp2p.h>
75 #endif
76 #ifdef PNO_SUPPORT
77 #include <dhd_pno.h>
78 #endif
79 #ifdef WLBTAMP
80 #include <proto/802.11_bta.h>
81 #include <proto/bt_amp_hci.h>
82 #include <dhd_bta.h>
83 #endif
84
85 #ifdef CONFIG_COMPAT
86 #include <linux/compat.h>
87 #endif
88
89 #ifdef DHD_WMF
90 #include <dhd_wmf_linux.h>
91 #endif /* DHD_WMF */
92
93 #ifdef AMPDU_VO_ENABLE
94 #include <proto/802.1d.h>
95 #endif /* AMPDU_VO_ENABLE */
96 #ifdef DHDTCPACK_SUPPRESS
97 #include <dhd_ip.h>
98 #endif /* DHDTCPACK_SUPPRESS */
99
100 #if defined(DHD_TCP_WINSIZE_ADJUST)
101 #include <linux/tcp.h>
102 #include <net/tcp.h>
103 #endif /* DHD_TCP_WINSIZE_ADJUST */
104
105 #ifdef WLMEDIA_HTSF
106 #include <linux/time.h>
107 #include <htsf.h>
108
109 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
110 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
111 #define TSMAX 1000 /* max no. of timing record kept */
112 #define NUMBIN 34
113
114 static uint32 tsidx = 0;
115 static uint32 htsf_seqnum = 0;
116 uint32 tsfsync;
117 struct timeval tsync;
118 static uint32 tsport = 5010;
119
120 typedef struct histo_ {
121 uint32 bin[NUMBIN];
122 } histo_t;
123
124 #if !ISPOWEROF2(DHD_SDALIGN)
125 #error DHD_SDALIGN is not a power of 2!
126 #endif
127
128 static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
129 #endif /* WLMEDIA_HTSF */
130
131 #if defined(DHD_TCP_WINSIZE_ADJUST)
132 #define MIN_TCP_WIN_SIZE 18000
133 #define WIN_SIZE_SCALE_FACTOR 2
134 #define MAX_TARGET_PORTS 5
135
136 static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
137 static uint dhd_use_tcp_window_size_adjust = FALSE;
138 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
139 #endif /* DHD_TCP_WINSIZE_ADJUST */
140
141
142 #if defined(SOFTAP)
143 extern bool ap_cfg_running;
144 extern bool ap_fw_loaded;
145 #endif
146
147
148 #ifdef ENABLE_ADAPTIVE_SCHED
149 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
150 #ifndef CUSTOM_CPUFREQ_THRESH
151 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
152 #endif /* CUSTOM_CPUFREQ_THRESH */
153 #endif /* ENABLE_ADAPTIVE_SCHED */
154
155 /* enable HOSTIP cache update from the host side when an eth0:N is up */
156 #define AOE_IP_ALIAS_SUPPORT 1
157
158 #ifdef BCM_FD_AGGR
159 #include <bcm_rpc.h>
160 #include <bcm_rpc_tp.h>
161 #endif
162 #ifdef PROP_TXSTATUS
163 #include <wlfc_proto.h>
164 #include <dhd_wlfc.h>
165 #endif
166
167 #include <wl_android.h>
168
169 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
170 #include <sdaudio.h>
171 #endif /* CUSTOMER_HW20 && WLANAUDIO */
172
173 #ifdef CUSTOMER_HW_AMLOGIC
174 #include <linux/amlogic/wifi_dt.h>
175 #endif
176
177 /* Maximum STA per radio */
178 #define DHD_MAX_STA 32
179
180
181 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
182 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
183 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
184
185 #ifdef ARP_OFFLOAD_SUPPORT
186 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
187 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
188 unsigned long event, void *ptr);
189 static struct notifier_block dhd_inetaddr_notifier = {
190 .notifier_call = dhd_inetaddr_notifier_call
191 };
192 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
193 * created in kernel notifier link list (with 'next' pointing to itself)
194 */
195 static bool dhd_inetaddr_notifier_registered = FALSE;
196 #endif /* ARP_OFFLOAD_SUPPORT */
197
198 #ifdef CONFIG_IPV6
199 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
200 unsigned long event, void *ptr);
201 static struct notifier_block dhd_inet6addr_notifier = {
202 .notifier_call = dhd_inet6addr_notifier_call
203 };
204 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
205 * created in kernel notifier link list (with 'next' pointing to itself)
206 */
207 static bool dhd_inet6addr_notifier_registered = FALSE;
208 #endif
209
210 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
211 #include <linux/suspend.h>
212 volatile bool dhd_mmc_suspend = FALSE;
213 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
214 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
215
216 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
217 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
218 #endif
219 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
220 static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
221 #endif
222 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
223 MODULE_LICENSE("GPL v2");
224 #endif /* LinuxVer */
225
226 #include <dhd_bus.h>
227
228 #ifdef BCM_FD_AGGR
229 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
230 #else
231 #ifndef PROP_TXSTATUS
232 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
233 #else
234 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
235 #endif
236 #endif /* BCM_FD_AGGR */
237
238 #ifdef PROP_TXSTATUS
239 extern bool dhd_wlfc_skip_fc(void);
240 extern void dhd_wlfc_plat_init(void *dhd);
241 extern void dhd_wlfc_plat_deinit(void *dhd);
242 #endif /* PROP_TXSTATUS */
243
244 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
245 const char *
246 print_tainted()
247 {
248 return "";
249 }
250 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
251
252 /* Linux wireless extension support */
253 #if defined(WL_WIRELESS_EXT)
254 #include <wl_iw.h>
255 extern wl_iw_extra_params_t g_wl_iw_params;
256 #endif /* defined(WL_WIRELESS_EXT) */
257
258 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
259 #include <linux/earlysuspend.h>
260 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
261
262 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
263
264 #ifdef PKT_FILTER_SUPPORT
265 extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
266 extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
267 extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
268 #endif
269
270
271 #ifdef READ_MACADDR
272 extern int dhd_read_macaddr(struct dhd_info *dhd);
273 #else
274 static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
275 #endif
276 #ifdef WRITE_MACADDR
277 extern int dhd_write_macaddr(struct ether_addr *mac);
278 #else
279 static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
280 #endif
281
282
283 #if defined(SOFTAP_TPUT_ENHANCE)
284 extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
285 extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
286 #endif /* SOFTAP_TPUT_ENHANCE */
287
288
289 #ifdef SET_RPS_CPUS
290 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
291 void custom_rps_map_clear(struct netdev_rx_queue *queue);
292 #ifdef CONFIG_MACH_UNIVERSAL5433
293 #define RPS_CPUS_MASK "10"
294 #else
295 #define RPS_CPUS_MASK "6"
296 #endif /* CONFIG_MACH_UNIVERSAL5433 */
297 #endif /* SET_RPS_CPUS */
298
299 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
300 static struct notifier_block dhd_reboot_notifier = {
301 .notifier_call = dhd_reboot_callback,
302 .priority = 1,
303 };
304
305
306 typedef struct dhd_if_event {
307 struct list_head list;
308 wl_event_data_if_t event;
309 char name[IFNAMSIZ+1];
310 uint8 mac[ETHER_ADDR_LEN];
311 } dhd_if_event_t;
312
313 /* Interface control information */
314 typedef struct dhd_if {
315 struct dhd_info *info; /* back pointer to dhd_info */
316 /* OS/stack specifics */
317 struct net_device *net;
318 int idx; /* iface idx in dongle */
319 uint subunit; /* subunit */
320 uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
321 bool set_macaddress;
322 bool set_multicast;
323 uint8 bssidx; /* bsscfg index for the interface */
324 bool attached; /* Delayed attachment when unset */
325 bool txflowcontrol; /* Per interface flow control indicator */
326 char name[IFNAMSIZ+1]; /* linux interface name */
327 struct net_device_stats stats;
328 #ifdef DHD_WMF
329 dhd_wmf_t wmf; /* per bsscfg wmf setting */
330 #endif /* DHD_WMF */
331 #ifdef PCIE_FULL_DONGLE
332 struct list_head sta_list; /* sll of associated stations */
333 #if !defined(BCM_GMAC3)
334 spinlock_t sta_list_lock; /* lock for manipulating sll */
335 #endif /* ! BCM_GMAC3 */
336 #endif /* PCIE_FULL_DONGLE */
337 uint32 ap_isolate; /* ap-isolation settings */
338 } dhd_if_t;
339
340 #ifdef WLMEDIA_HTSF
341 typedef struct {
342 uint32 low;
343 uint32 high;
344 } tsf_t;
345
346 typedef struct {
347 uint32 last_cycle;
348 uint32 last_sec;
349 uint32 last_tsf;
350 uint32 coef; /* scaling factor */
351 uint32 coefdec1; /* first decimal */
352 uint32 coefdec2; /* second decimal */
353 } htsf_t;
354
355 typedef struct {
356 uint32 t1;
357 uint32 t2;
358 uint32 t3;
359 uint32 t4;
360 } tstamp_t;
361
362 static tstamp_t ts[TSMAX];
363 static tstamp_t maxdelayts;
364 static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
365
366 #endif /* WLMEDIA_HTSF */
367
368 struct ipv6_work_info_t {
369 uint8 if_idx;
370 char ipv6_addr[16];
371 unsigned long event;
372 };
373
374 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
375 #define MAX_WLANAUDIO_BLACKLIST 4
376
377 struct wlanaudio_blacklist {
378 bool is_blacklist;
379 uint32 cnt;
380 ulong txfail_jiffies;
381 struct ether_addr blacklist_addr;
382 };
383 #endif /* CUSTOMER_HW20 && WLANAUDIO */
384
385 /* When Perimeter locks are deployed, any blocking calls must be preceeded
386 * with a PERIM UNLOCK and followed by a PERIM LOCK.
387 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
388 * wait_event_timeout().
389 */
390
391 /* Local private structure (extension of pub) */
392 typedef struct dhd_info {
393 #if defined(WL_WIRELESS_EXT)
394 wl_iw_t iw; /* wireless extensions state (must be first) */
395 #endif /* defined(WL_WIRELESS_EXT) */
396 dhd_pub_t pub;
397 dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
398
399 void *adapter; /* adapter information, interrupt, fw path etc. */
400 char fw_path[PATH_MAX]; /* path to firmware image */
401 char nv_path[PATH_MAX]; /* path to nvram vars file */
402 char conf_path[PATH_MAX]; /* path to config vars file */
403
404 struct semaphore proto_sem;
405 #ifdef PROP_TXSTATUS
406 spinlock_t wlfc_spinlock;
407
408 #endif /* PROP_TXSTATUS */
409 #ifdef WLMEDIA_HTSF
410 htsf_t htsf;
411 #endif
412 wait_queue_head_t ioctl_resp_wait;
413 uint32 default_wd_interval;
414
415 struct timer_list timer;
416 bool wd_timer_valid;
417 struct tasklet_struct tasklet;
418 spinlock_t sdlock;
419 spinlock_t txqlock;
420 spinlock_t dhd_lock;
421
422 struct semaphore sdsem;
423 tsk_ctl_t thr_dpc_ctl;
424 tsk_ctl_t thr_wdt_ctl;
425
426 tsk_ctl_t thr_rxf_ctl;
427 spinlock_t rxf_lock;
428 bool rxthread_enabled;
429
430 /* Wakelocks */
431 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
432 struct wake_lock wl_wifi; /* Wifi wakelock */
433 struct wake_lock wl_rxwake; /* Wifi rx wakelock */
434 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
435 struct wake_lock wl_wdwake; /* Wifi wd wakelock */
436 #ifdef BCMPCIE_OOB_HOST_WAKE
437 struct wake_lock wl_intrwake; /* Host wakeup wakelock */
438 #endif /* BCMPCIE_OOB_HOST_WAKE */
439 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
440
441 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
442 /* net_device interface lock, prevent race conditions among net_dev interface
443 * calls and wifi_on or wifi_off
444 */
445 struct mutex dhd_net_if_mutex;
446 struct mutex dhd_suspend_mutex;
447 #endif
448 spinlock_t wakelock_spinlock;
449 uint32 wakelock_counter;
450 int wakelock_wd_counter;
451 int wakelock_rx_timeout_enable;
452 int wakelock_ctrl_timeout_enable;
453 bool waive_wakelock;
454 uint32 wakelock_before_waive;
455
456 /* Thread to issue ioctl for multicast */
457 wait_queue_head_t ctrl_wait;
458 atomic_t pend_8021x_cnt;
459 dhd_attach_states_t dhd_state;
460 #ifdef SHOW_LOGTRACE
461 dhd_event_log_t event_data;
462 #endif /* SHOW_LOGTRACE */
463
464 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
465 struct early_suspend early_suspend;
466 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
467
468 #ifdef ARP_OFFLOAD_SUPPORT
469 u32 pend_ipaddr;
470 #endif /* ARP_OFFLOAD_SUPPORT */
471 #ifdef BCM_FD_AGGR
472 void *rpc_th;
473 void *rpc_osh;
474 struct timer_list rpcth_timer;
475 bool rpcth_timer_active;
476 bool fdaggr;
477 #endif
478 #ifdef DHDTCPACK_SUPPRESS
479 spinlock_t tcpack_lock;
480 #endif /* DHDTCPACK_SUPPRESS */
481 void *dhd_deferred_wq;
482 #ifdef DEBUG_CPU_FREQ
483 struct notifier_block freq_trans;
484 int __percpu *new_freq;
485 #endif
486 unsigned int unit;
487 struct notifier_block pm_notifier;
488 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
489 struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST];
490 bool is_wlanaudio_blist;
491 #endif /* CUSTOMER_HW20 && WLANAUDIO */
492 } dhd_info_t;
493
494 #define DHDIF_FWDER(dhdif) FALSE
495
496 /* Flag to indicate if we should download firmware on driver load */
497 uint dhd_download_fw_on_driverload = TRUE;
498
499 /* Definitions to provide path to the firmware and nvram
500 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
501 */
502 char firmware_path[MOD_PARAM_PATHLEN];
503 char nvram_path[MOD_PARAM_PATHLEN];
504 char config_path[MOD_PARAM_PATHLEN];
505
506 /* backup buffer for firmware and nvram path */
507 char fw_bak_path[MOD_PARAM_PATHLEN];
508 char nv_bak_path[MOD_PARAM_PATHLEN];
509
510 /* information string to keep firmware, chio, cheip version info visiable from log */
511 char info_string[MOD_PARAM_INFOLEN];
512 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
513 int op_mode = 0;
514 int disable_proptx = 0;
515 module_param(op_mode, int, 0644);
516 extern int wl_control_wl_start(struct net_device *dev);
517 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
518 struct semaphore dhd_registration_sem;
519 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
520
521 /* deferred handlers */
522 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
523 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
524 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
525 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
526 #ifdef CONFIG_IPV6
527 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
528 #endif
529
530 #ifdef WL_CFG80211
531 extern void dhd_netdev_free(struct net_device *ndev);
532 #endif /* WL_CFG80211 */
533
534 /* Error bits */
535 module_param(dhd_msg_level, int, 0);
536 #if defined(WL_WIRELESS_EXT)
537 module_param(iw_msg_level, int, 0);
538 #endif
539 #ifdef WL_CFG80211
540 module_param(wl_dbg_level, int, 0);
541 #endif
542 module_param(android_msg_level, int, 0);
543 module_param(config_msg_level, int, 0);
544
545 #ifdef ARP_OFFLOAD_SUPPORT
546 /* ARP offload enable */
547 uint dhd_arp_enable = TRUE;
548 module_param(dhd_arp_enable, uint, 0);
549
550 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
551
552 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
553
554 module_param(dhd_arp_mode, uint, 0);
555 #endif /* ARP_OFFLOAD_SUPPORT */
556
557 /* Disable Prop tx */
558 module_param(disable_proptx, int, 0644);
559 /* load firmware and/or nvram values from the filesystem */
560 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
561 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
562 module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
563
564 /* Watchdog interval */
565
566 /* extend watchdog expiration to 2 seconds when DPC is running */
567 #define WATCHDOG_EXTEND_INTERVAL (2000)
568
569 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
570 module_param(dhd_watchdog_ms, uint, 0);
571
572 #if defined(DHD_DEBUG)
573 /* Console poll interval */
574 uint dhd_console_ms = 0;
575 module_param(dhd_console_ms, uint, 0644);
576 #endif /* defined(DHD_DEBUG) */
577
578
579 uint dhd_slpauto = TRUE;
580 module_param(dhd_slpauto, uint, 0);
581
582 #ifdef PKT_FILTER_SUPPORT
583 /* Global Pkt filter enable control */
584 uint dhd_pkt_filter_enable = TRUE;
585 module_param(dhd_pkt_filter_enable, uint, 0);
586 #endif
587
588 /* Pkt filter init setup */
589 uint dhd_pkt_filter_init = 0;
590 module_param(dhd_pkt_filter_init, uint, 0);
591
592 /* Pkt filter mode control */
593 uint dhd_master_mode = FALSE;
594 module_param(dhd_master_mode, uint, 0);
595
596 int dhd_watchdog_prio = 0;
597 module_param(dhd_watchdog_prio, int, 0);
598
599 /* DPC thread priority */
600 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
601 module_param(dhd_dpc_prio, int, 0);
602
603 /* RX frame thread priority */
604 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
605 module_param(dhd_rxf_prio, int, 0);
606
607 int passive_channel_skip = 0;
608 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
609
610 #if !defined(BCMDHDUSB)
611 extern int dhd_dongle_ramsize;
612 module_param(dhd_dongle_ramsize, int, 0);
613 #endif /* BCMDHDUSB */
614
615 /* Keep track of number of instances */
616 static int dhd_found = 0;
617 static int instance_base = 0; /* Starting instance number */
618 module_param(instance_base, int, 0644);
619
620 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
621 dhd_info_t *dhd_global = NULL;
622 #endif /* CUSTOMER_HW20 && WLANAUDIO */
623
624
625
626 /* DHD Perimiter lock only used in router with bypass forwarding. */
627 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
628 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
629 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
630 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
631 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
632
633 #ifdef PCIE_FULL_DONGLE
634 #if defined(BCM_GMAC3)
635 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
636 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
637 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
638 #else /* ! BCM_GMAC3 */
639 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
640 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
641 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
642 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
643 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
644 #endif /* ! BCM_GMAC3 */
645 #endif /* PCIE_FULL_DONGLE */
646
647 /* Control fw roaming */
648 #ifdef BCMCCX
649 uint dhd_roam_disable = 0;
650 #else
651 uint dhd_roam_disable = 0;
652 #endif /* BCMCCX */
653
654 /* Control radio state */
655 uint dhd_radio_up = 1;
656
657 /* Network inteface name */
658 char iface_name[IFNAMSIZ] = {'\0'};
659 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
660
661 /* The following are specific to the SDIO dongle */
662
663 /* IOCTL response timeout */
664 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
665
666 /* Idle timeout for backplane clock */
667 int dhd_idletime = DHD_IDLETIME_TICKS;
668 module_param(dhd_idletime, int, 0);
669
670 /* Use polling */
671 uint dhd_poll = FALSE;
672 module_param(dhd_poll, uint, 0);
673
674 /* Use interrupts */
675 uint dhd_intr = TRUE;
676 module_param(dhd_intr, uint, 0);
677
678 /* SDIO Drive Strength (in milliamps) */
679 uint dhd_sdiod_drive_strength = 6;
680 module_param(dhd_sdiod_drive_strength, uint, 0);
681
682 #ifdef BCMSDIO
683 /* Tx/Rx bounds */
684 extern uint dhd_txbound;
685 extern uint dhd_rxbound;
686 module_param(dhd_txbound, uint, 0);
687 module_param(dhd_rxbound, uint, 0);
688
689 /* Deferred transmits */
690 extern uint dhd_deferred_tx;
691 module_param(dhd_deferred_tx, uint, 0);
692
693 #ifdef BCMDBGFS
694 extern void dhd_dbg_init(dhd_pub_t *dhdp);
695 extern void dhd_dbg_remove(void);
696 #endif /* BCMDBGFS */
697
698 #endif /* BCMSDIO */
699
700
701 #ifdef SDTEST
702 /* Echo packet generator (pkts/s) */
703 uint dhd_pktgen = 0;
704 module_param(dhd_pktgen, uint, 0);
705
706 /* Echo packet len (0 => sawtooth, max 2040) */
707 uint dhd_pktgen_len = 0;
708 module_param(dhd_pktgen_len, uint, 0);
709 #endif /* SDTEST */
710
711 #if defined(BCMSUP_4WAY_HANDSHAKE)
712 /* Use in dongle supplicant for 4-way handshake */
713 uint dhd_use_idsup = 0;
714 module_param(dhd_use_idsup, uint, 0);
715 #endif /* BCMSUP_4WAY_HANDSHAKE */
716
717 extern char dhd_version[];
718
719 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
720 static void dhd_net_if_lock_local(dhd_info_t *dhd);
721 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
722 static void dhd_suspend_lock(dhd_pub_t *dhdp);
723 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
724
725 #ifdef WLMEDIA_HTSF
726 void htsf_update(dhd_info_t *dhd, void *data);
727 tsf_t prev_tsf, cur_tsf;
728
729 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
730 static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
731 static void dhd_dump_latency(void);
732 static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
733 static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
734 static void dhd_dump_htsfhisto(histo_t *his, char *s);
735 #endif /* WLMEDIA_HTSF */
736
737 /* Monitor interface */
738 int dhd_monitor_init(void *dhd_pub);
739 int dhd_monitor_uninit(void);
740
741
742 #if defined(WL_WIRELESS_EXT)
743 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
744 #endif /* defined(WL_WIRELESS_EXT) */
745
746 static void dhd_dpc(ulong data);
747 /* forward decl */
748 extern int dhd_wait_pend8021x(struct net_device *dev);
749 void dhd_os_wd_timer_extend(void *bus, bool extend);
750
751 #ifdef TOE
752 #ifndef BDC
753 #error TOE requires BDC
754 #endif /* !BDC */
755 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
756 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
757 #endif /* TOE */
758
759 static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
760 wl_event_msg_t *event_ptr, void **data_ptr);
761 #ifdef DHD_UNICAST_DHCP
762 static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
763 static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
764 int *len_ptr, uint8 *prot_ptr);
765 static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
766 int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
767
768 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
769 #endif /* DHD_UNICAST_DHCP */
770 #ifdef DHD_L2_FILTER
771 static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
772 #endif
773 #if defined(CONFIG_PM_SLEEP)
774 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
775 {
776 int ret = NOTIFY_DONE;
777 bool suspend = FALSE;
778 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
779
780 BCM_REFERENCE(dhdinfo);
781 switch (action) {
782 case PM_HIBERNATION_PREPARE:
783 case PM_SUSPEND_PREPARE:
784 suspend = TRUE;
785 break;
786 case PM_POST_HIBERNATION:
787 case PM_POST_SUSPEND:
788 suspend = FALSE;
789 break;
790 }
791
792 #if defined(SUPPORT_P2P_GO_PS)
793 #ifdef PROP_TXSTATUS
794 if (suspend) {
795 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
796 dhd_wlfc_suspend(&dhdinfo->pub);
797 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
798 } else
799 dhd_wlfc_resume(&dhdinfo->pub);
800 #endif
801 #endif /* defined(SUPPORT_P2P_GO_PS) */
802
803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
804 KERNEL_VERSION(2, 6, 39))
805 dhd_mmc_suspend = suspend;
806 smp_mb();
807 #endif
808
809 return ret;
810 }
811
812 static struct notifier_block dhd_pm_notifier = {
813 .notifier_call = dhd_pm_callback,
814 .priority = 10
815 };
816 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
817 * created in kernel notifier link list (with 'next' pointing to itself)
818 */
819 static bool dhd_pm_notifier_registered = FALSE;
820
821 extern int register_pm_notifier(struct notifier_block *nb);
822 extern int unregister_pm_notifier(struct notifier_block *nb);
823 #endif /* CONFIG_PM_SLEEP */
824
825 /* Request scheduling of the bus rx frame */
826 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
827 static void dhd_os_rxflock(dhd_pub_t *pub);
828 static void dhd_os_rxfunlock(dhd_pub_t *pub);
829
830 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
831 typedef struct dhd_dev_priv {
832 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
833 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
834 int ifidx; /* interface index */
835 } dhd_dev_priv_t;
836
837 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
838 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
839 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
840 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
841 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
842
843 /** Clear the dhd net_device's private structure. */
844 static inline void
845 dhd_dev_priv_clear(struct net_device * dev)
846 {
847 dhd_dev_priv_t * dev_priv;
848 ASSERT(dev != (struct net_device *)NULL);
849 dev_priv = DHD_DEV_PRIV(dev);
850 dev_priv->dhd = (dhd_info_t *)NULL;
851 dev_priv->ifp = (dhd_if_t *)NULL;
852 dev_priv->ifidx = DHD_BAD_IF;
853 }
854
855 /** Setup the dhd net_device's private structure. */
856 static inline void
857 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
858 int ifidx)
859 {
860 dhd_dev_priv_t * dev_priv;
861 ASSERT(dev != (struct net_device *)NULL);
862 dev_priv = DHD_DEV_PRIV(dev);
863 dev_priv->dhd = dhd;
864 dev_priv->ifp = ifp;
865 dev_priv->ifidx = ifidx;
866 }
867
868 #ifdef PCIE_FULL_DONGLE
869
870 /** Dummy objects are defined with state representing bad|down.
871 * Performance gains from reducing branch conditionals, instruction parallelism,
872 * dual issue, reducing load shadows, avail of larger pipelines.
873 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
874 * is accessed via the dhd_sta_t.
875 */
876
877 /* Dummy dhd_info object */
878 dhd_info_t dhd_info_null = {
879 #if defined(BCM_GMAC3)
880 .fwdh = FWDER_NULL,
881 #endif
882 .pub = {
883 .info = &dhd_info_null,
884 #ifdef DHDTCPACK_SUPPRESS
885 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
886 #endif /* DHDTCPACK_SUPPRESS */
887 .up = FALSE, .busstate = DHD_BUS_DOWN
888 }
889 };
890 #define DHD_INFO_NULL (&dhd_info_null)
891 #define DHD_PUB_NULL (&dhd_info_null.pub)
892
893 /* Dummy netdevice object */
894 struct net_device dhd_net_dev_null = {
895 .reg_state = NETREG_UNREGISTERED
896 };
897 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
898
899 /* Dummy dhd_if object */
900 dhd_if_t dhd_if_null = {
901 #if defined(BCM_GMAC3)
902 .fwdh = FWDER_NULL,
903 #endif
904 #ifdef WMF
905 .wmf = { .wmf_enable = TRUE },
906 #endif
907 .info = DHD_INFO_NULL,
908 .net = DHD_NET_DEV_NULL,
909 .idx = DHD_BAD_IF
910 };
911 #define DHD_IF_NULL (&dhd_if_null)
912
913 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
914
915 /** Interface STA list management. */
916
917 /** Fetch the dhd_if object, given the interface index in the dhd. */
918 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
919
920 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
921 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
922 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
923
924 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
925 static void dhd_if_del_sta_list(dhd_if_t * ifp);
926 static void dhd_if_flush_sta(dhd_if_t * ifp);
927
928 /* Construct/Destruct a sta pool. */
929 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
930 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
931 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
932
933
934 /* Return interface pointer */
935 static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
936 {
937 ASSERT(ifidx < DHD_MAX_IFS);
938
939 if (ifidx >= DHD_MAX_IFS)
940 return NULL;
941
942 return dhdp->info->iflist[ifidx];
943 }
944
945 /** Reset a dhd_sta object and free into the dhd pool. */
946 static void
947 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
948 {
949 int prio;
950
951 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
952
953 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
954 id16_map_free(dhdp->staid_allocator, sta->idx);
955 for (prio = 0; prio < (int)NUMPRIO; prio++)
956 sta->flowid[prio] = FLOWID_INVALID;
957 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
958 sta->ifidx = DHD_BAD_IF;
959 bzero(sta->ea.octet, ETHER_ADDR_LEN);
960 INIT_LIST_HEAD(&sta->list);
961 sta->idx = ID16_INVALID; /* implying free */
962 }
963
964 /** Allocate a dhd_sta object from the dhd pool. */
965 static dhd_sta_t *
966 dhd_sta_alloc(dhd_pub_t * dhdp)
967 {
968 uint16 idx;
969 dhd_sta_t * sta;
970 dhd_sta_pool_t * sta_pool;
971
972 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
973
974 idx = id16_map_alloc(dhdp->staid_allocator);
975 if (idx == ID16_INVALID) {
976 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
977 return DHD_STA_NULL;
978 }
979
980 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
981 sta = &sta_pool[idx];
982
983 ASSERT((sta->idx == ID16_INVALID) &&
984 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
985 sta->idx = idx; /* implying allocated */
986
987 return sta;
988 }
989
990 /** Delete all STAs in an interface's STA list. */
991 static void
992 dhd_if_del_sta_list(dhd_if_t *ifp)
993 {
994 dhd_sta_t *sta, *next;
995 unsigned long flags;
996
997 DHD_IF_STA_LIST_LOCK(ifp, flags);
998
999 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1000 #if defined(BCM_GMAC3)
1001 if (ifp->fwdh) {
1002 /* Remove sta from WOFA forwarder. */
1003 fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
1004 }
1005 #endif /* BCM_GMAC3 */
1006 list_del(&sta->list);
1007 dhd_sta_free(&ifp->info->pub, sta);
1008 }
1009
1010 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1011
1012 return;
1013 }
1014
1015 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1016 static void
1017 dhd_if_flush_sta(dhd_if_t * ifp)
1018 {
1019 #if defined(BCM_GMAC3)
1020
1021 if (ifp && (ifp->fwdh != FWDER_NULL)) {
1022 dhd_sta_t *sta, *next;
1023 unsigned long flags;
1024
1025 DHD_IF_STA_LIST_LOCK(ifp, flags);
1026
1027 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1028 /* Remove any sta entry from WOFA forwarder. */
1029 fwder_flush(ifp->fwdh, (wofa_t)sta);
1030 }
1031
1032 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1033 }
1034 #endif /* BCM_GMAC3 */
1035 }
1036
1037 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1038 static int
1039 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1040 {
1041 int idx, sta_pool_memsz;
1042 dhd_sta_t * sta;
1043 dhd_sta_pool_t * sta_pool;
1044 void * staid_allocator;
1045
1046 ASSERT(dhdp != (dhd_pub_t *)NULL);
1047 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1048
1049 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1050 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1051 if (staid_allocator == NULL) {
1052 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1053 return BCME_ERROR;
1054 }
1055
1056 /* Pre allocate a pool of dhd_sta objects (one extra). */
1057 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1058 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1059 if (sta_pool == NULL) {
1060 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1061 id16_map_fini(dhdp->osh, staid_allocator);
1062 return BCME_ERROR;
1063 }
1064
1065 dhdp->sta_pool = sta_pool;
1066 dhdp->staid_allocator = staid_allocator;
1067
1068 /* Initialize all sta(s) for the pre-allocated free pool. */
1069 bzero((uchar *)sta_pool, sta_pool_memsz);
1070 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1071 sta = &sta_pool[idx];
1072 sta->idx = id16_map_alloc(staid_allocator);
1073 ASSERT(sta->idx <= max_sta);
1074 }
1075 /* Now place them into the pre-allocated free pool. */
1076 for (idx = 1; idx <= max_sta; idx++) {
1077 sta = &sta_pool[idx];
1078 dhd_sta_free(dhdp, sta);
1079 }
1080
1081 return BCME_OK;
1082 }
1083
1084 /** Destruct the pool of dhd_sta_t objects.
1085 * Caller must ensure that no STA objects are currently associated with an if.
1086 */
1087 static void
1088 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1089 {
1090 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1091
1092 if (sta_pool) {
1093 int idx;
1094 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1095 for (idx = 1; idx <= max_sta; idx++) {
1096 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1097 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1098 }
1099 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1100 dhdp->sta_pool = NULL;
1101 }
1102
1103 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1104 dhdp->staid_allocator = NULL;
1105 }
1106
1107 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1108 static void
1109 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1110 {
1111 int idx, sta_pool_memsz;
1112 dhd_sta_t * sta;
1113 dhd_sta_pool_t * sta_pool;
1114 void *staid_allocator;
1115
1116 if (!dhdp) {
1117 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1118 return;
1119 }
1120
1121 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1122 staid_allocator = dhdp->staid_allocator;
1123
1124 if (!sta_pool) {
1125 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1126 return;
1127 }
1128
1129 if (!staid_allocator) {
1130 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1131 return;
1132 }
1133
1134 /* clear free pool */
1135 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1136 bzero((uchar *)sta_pool, sta_pool_memsz);
1137
1138 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1139 id16_map_clear(staid_allocator, max_sta, 1);
1140
1141 /* Initialize all sta(s) for the pre-allocated free pool. */
1142 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1143 sta = &sta_pool[idx];
1144 sta->idx = id16_map_alloc(staid_allocator);
1145 ASSERT(sta->idx <= max_sta);
1146 }
1147 /* Now place them into the pre-allocated free pool. */
1148 for (idx = 1; idx <= max_sta; idx++) {
1149 sta = &sta_pool[idx];
1150 dhd_sta_free(dhdp, sta);
1151 }
1152 }
1153
1154 /** Find STA with MAC address ea in an interface's STA list. */
1155 dhd_sta_t *
1156 dhd_find_sta(void *pub, int ifidx, void *ea)
1157 {
1158 dhd_sta_t *sta;
1159 dhd_if_t *ifp;
1160 unsigned long flags;
1161
1162 ASSERT(ea != NULL);
1163 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1164 if (ifp == NULL)
1165 return DHD_STA_NULL;
1166
1167 DHD_IF_STA_LIST_LOCK(ifp, flags);
1168
1169 list_for_each_entry(sta, &ifp->sta_list, list) {
1170 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1171 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1172 return sta;
1173 }
1174 }
1175
1176 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1177
1178 return DHD_STA_NULL;
1179 }
1180
1181 /** Add STA into the interface's STA list. */
1182 dhd_sta_t *
1183 dhd_add_sta(void *pub, int ifidx, void *ea)
1184 {
1185 dhd_sta_t *sta;
1186 dhd_if_t *ifp;
1187 unsigned long flags;
1188
1189 ASSERT(ea != NULL);
1190 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1191 if (ifp == NULL)
1192 return DHD_STA_NULL;
1193
1194 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1195 if (sta == DHD_STA_NULL) {
1196 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1197 return DHD_STA_NULL;
1198 }
1199
1200 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1201
1202 /* link the sta and the dhd interface */
1203 sta->ifp = ifp;
1204 sta->ifidx = ifidx;
1205 INIT_LIST_HEAD(&sta->list);
1206
1207 DHD_IF_STA_LIST_LOCK(ifp, flags);
1208
1209 list_add_tail(&sta->list, &ifp->sta_list);
1210
1211 #if defined(BCM_GMAC3)
1212 if (ifp->fwdh) {
1213 ASSERT(ISALIGNED(ea, 2));
1214 /* Add sta to WOFA forwarder. */
1215 fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1216 }
1217 #endif /* BCM_GMAC3 */
1218
1219 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1220
1221 return sta;
1222 }
1223
1224 /** Delete STA from the interface's STA list. */
1225 void
1226 dhd_del_sta(void *pub, int ifidx, void *ea)
1227 {
1228 dhd_sta_t *sta, *next;
1229 dhd_if_t *ifp;
1230 unsigned long flags;
1231
1232 ASSERT(ea != NULL);
1233 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1234 if (ifp == NULL)
1235 return;
1236
1237 DHD_IF_STA_LIST_LOCK(ifp, flags);
1238
1239 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1240 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1241 #if defined(BCM_GMAC3)
1242 if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
1243 ASSERT(ISALIGNED(ea, 2));
1244 fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
1245 }
1246 #endif /* BCM_GMAC3 */
1247 list_del(&sta->list);
1248 dhd_sta_free(&ifp->info->pub, sta);
1249 }
1250 }
1251
1252 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1253
1254 return;
1255 }
1256
1257 /** Add STA if it doesn't exist. Not reentrant. */
1258 dhd_sta_t*
1259 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1260 {
1261 dhd_sta_t *sta;
1262
1263 sta = dhd_find_sta(pub, ifidx, ea);
1264
1265 if (!sta) {
1266 /* Add entry */
1267 sta = dhd_add_sta(pub, ifidx, ea);
1268 }
1269
1270 return sta;
1271 }
1272 #else
1273 static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
1274 static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
1275 static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
1276 static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
1277 static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
1278 dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
1279 void dhd_del_sta(void *pub, int ifidx, void *ea) {}
1280 #endif /* PCIE_FULL_DONGLE */
1281
1282
1283 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1284 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1285 {
1286 dhd_if_t *ifp;
1287 dhd_info_t *dhd = dhdp->info;
1288 int i;
1289
1290 ASSERT(bssidx < DHD_MAX_IFS);
1291 ASSERT(dhdp);
1292
1293 for (i = 0; i < DHD_MAX_IFS; i++) {
1294 ifp = dhd->iflist[i];
1295 if (ifp && (ifp->bssidx == bssidx)) {
1296 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1297 ifp->name, bssidx, i));
1298 break;
1299 }
1300 }
1301 return i;
1302 }
1303
1304 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1305 {
1306 uint32 store_idx;
1307 uint32 sent_idx;
1308
1309 if (!skb) {
1310 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1311 return BCME_ERROR;
1312 }
1313
1314 dhd_os_rxflock(dhdp);
1315 store_idx = dhdp->store_idx;
1316 sent_idx = dhdp->sent_idx;
1317 if (dhdp->skbbuf[store_idx] != NULL) {
1318 /* Make sure the previous packets are processed */
1319 dhd_os_rxfunlock(dhdp);
1320 #ifdef RXF_DEQUEUE_ON_BUSY
1321 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1322 skb, store_idx, sent_idx));
1323 return BCME_BUSY;
1324 #else /* RXF_DEQUEUE_ON_BUSY */
1325 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1326 skb, store_idx, sent_idx));
1327 /* removed msleep here, should use wait_event_timeout if we
1328 * want to give rx frame thread a chance to run
1329 */
1330 #if defined(WAIT_DEQUEUE)
1331 OSL_SLEEP(1);
1332 #endif
1333 return BCME_ERROR;
1334 #endif /* RXF_DEQUEUE_ON_BUSY */
1335 }
1336 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1337 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1338 dhdp->skbbuf[store_idx] = skb;
1339 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1340 dhd_os_rxfunlock(dhdp);
1341
1342 return BCME_OK;
1343 }
1344
1345 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1346 {
1347 uint32 store_idx;
1348 uint32 sent_idx;
1349 void *skb;
1350
1351 dhd_os_rxflock(dhdp);
1352
1353 store_idx = dhdp->store_idx;
1354 sent_idx = dhdp->sent_idx;
1355 skb = dhdp->skbbuf[sent_idx];
1356
1357 if (skb == NULL) {
1358 dhd_os_rxfunlock(dhdp);
1359 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1360 store_idx, sent_idx));
1361 return NULL;
1362 }
1363
1364 dhdp->skbbuf[sent_idx] = NULL;
1365 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1366
1367 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1368 skb, sent_idx));
1369
1370 dhd_os_rxfunlock(dhdp);
1371
1372 return skb;
1373 }
1374
1375 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1376 {
1377 #ifndef CUSTOMER_HW10
1378 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1379 #endif /* !CUSTOMER_HW10 */
1380
1381 if (prepost) { /* pre process */
1382 dhd_read_macaddr(dhd);
1383 } else { /* post process */
1384 dhd_write_macaddr(&dhd->pub.mac);
1385 }
1386
1387 return 0;
1388 }
1389
1390 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1391 static bool
1392 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
1393 {
1394 bool _apply = FALSE;
1395 /* In case of IBSS mode, apply arp pkt filter */
1396 if (op_mode & DHD_FLAG_IBSS_MODE) {
1397 _apply = TRUE;
1398 goto exit;
1399 }
1400 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1401 if ((dhd->arp_version == 1) &&
1402 (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
1403 _apply = TRUE;
1404 goto exit;
1405 }
1406
1407 exit:
1408 return _apply;
1409 }
1410 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1411
1412 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1413 #ifdef PKT_FILTER_SUPPORT
1414 void
1415 dhd_set_packet_filter_mode(struct net_device *dev, char *command)
1416 {
1417 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1418
1419 dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
1420 }
1421
1422 int
1423 dhd_set_packet_filter_ports(struct net_device *dev, char *command)
1424 {
1425 int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
1426 uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
1427 dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
1428 dhd_pub_t *dhdp = &dhdi->pub;
1429 char iovbuf[WLC_IOCTL_SMLEN];
1430
1431 /* get action */
1432 action = bcm_strtoul(command, &command, 0);
1433 if (action > PKT_FILTER_PORTS_MAX)
1434 return BCME_BADARG;
1435
1436 if (action == PKT_FILTER_PORTS_LOOPBACK) {
1437 /* echo the loopback value if port filter is supported else error */
1438 bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
1439 error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
1440 if (error < 0) {
1441 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
1442 return error;
1443 }
1444
1445 if (strstr(iovbuf, "pktfltr2"))
1446 return bcm_strtoul(command, &command, 0);
1447 else {
1448 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
1449 return BCME_UNSUPPORTED;
1450 }
1451 }
1452
1453 if (action == PKT_FILTER_PORTS_CLEAR) {
1454 /* action 0 is clear all ports */
1455 dhdp->pkt_filter_ports_count = 0;
1456 bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
1457 }
1458 else {
1459 portnum = bcm_strtoul(command, &command, 0);
1460 if (portnum == 0) {
1461 /* no ports to add or remove */
1462 return BCME_BADARG;
1463 }
1464
1465 /* get configured ports */
1466 count = dhdp->pkt_filter_ports_count;
1467 ports = dhdp->pkt_filter_ports;
1468
1469 if (action == PKT_FILTER_PORTS_ADD) {
1470 /* action 1 is add ports */
1471
1472 /* copy new ports */
1473 while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
1474 for (i = 0; i < count; i++) {
1475 /* duplicate port */
1476 if (portnum == ports[i])
1477 break;
1478 }
1479 if (portnum != ports[i])
1480 ports[count++] = portnum;
1481 portnum = bcm_strtoul(command, &command, 0);
1482 }
1483 } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
1484 /* action 2 is remove ports */
1485 bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
1486 get_count = count;
1487
1488 while (portnum != 0) {
1489 count = 0;
1490 for (i = 0; i < get_count; i++) {
1491 if (portnum != get_ports[i])
1492 ports[count++] = get_ports[i];
1493 }
1494 get_count = count;
1495 bcopy(ports, get_ports, count * sizeof(uint16));
1496 portnum = bcm_strtoul(command, &command, 0);
1497 }
1498 }
1499 dhdp->pkt_filter_ports_count = count;
1500 }
1501 return error;
1502 }
1503
1504 static void
1505 dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
1506 {
1507 int error = 0;
1508 wl_pkt_filter_ports_t *portlist = NULL;
1509 const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
1510 + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
1511 char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
1512 char iovbuf[pkt_filter_ports_buf_len];
1513
1514 DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
1515 enable, dhd->in_suspend, dhd->pkt_filter_mode,
1516 dhd->pkt_filter_ports_count));
1517
1518 bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
1519 portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
1520 portlist->version = WL_PKT_FILTER_PORTS_VERSION;
1521 portlist->reserved = 0;
1522
1523 if (enable) {
1524 if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY))
1525 return;
1526
1527 /* enable port filter */
1528 dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
1529 if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH)
1530 /* whitelist mode: FORWARD_ON_MATCH */
1531 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1532 else
1533 /* blacklist mode: DISCARD_ON_MATCH */
1534 dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
1535
1536 portlist->count = dhd->pkt_filter_ports_count;
1537 bcopy(dhd->pkt_filter_ports, portlist->ports,
1538 dhd->pkt_filter_ports_count * sizeof(uint16));
1539 } else {
1540 /* disable port filter */
1541 portlist->count = 0;
1542 dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
1543 dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
1544 }
1545
1546 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
1547 portlist->count));
1548
1549 /* update ports */
1550 bcm_mkiovar("pkt_filter_ports",
1551 (char*)portlist,
1552 (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
1553 iovbuf, sizeof(iovbuf));
1554 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1555 if (error < 0)
1556 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
1557
1558 /* update mode */
1559 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
1560 sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
1561 error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1562 if (error < 0)
1563 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
1564
1565 return;
1566 }
1567 #endif /* PKT_FILTER_SUPPORT */
1568 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1569
1570 void dhd_set_packet_filter(dhd_pub_t *dhd)
1571 {
1572 #ifdef PKT_FILTER_SUPPORT
1573 int i;
1574
1575 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1576 if (dhd_pkt_filter_enable) {
1577 for (i = 0; i < dhd->pktfilter_count; i++) {
1578 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1579 }
1580 }
1581 #endif /* PKT_FILTER_SUPPORT */
1582 }
1583
1584 void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1585 {
1586 #ifdef PKT_FILTER_SUPPORT
1587 int i;
1588
1589 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
1590
1591 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1592 dhd_enable_packet_filter_ports(dhd, value);
1593 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1594
1595 /* 1 - Enable packet filter, only allow unicast packet to send up */
1596 /* 0 - Disable packet filter */
1597 if (dhd_pkt_filter_enable && (!value ||
1598 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1599 {
1600 for (i = 0; i < dhd->pktfilter_count; i++) {
1601 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1602 if (value && (i == DHD_ARP_FILTER_NUM) &&
1603 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1604 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1605 "val %d, cnt %d, op_mode 0x%x\n",
1606 value, i, dhd->op_mode));
1607 continue;
1608 }
1609 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1610 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1611 value, dhd_master_mode);
1612 }
1613 }
1614 #endif /* PKT_FILTER_SUPPORT */
1615 }
1616
1617 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1618 {
1619 #ifndef SUPPORT_PM2_ONLY
1620 int power_mode = PM_MAX;
1621 #endif /* SUPPORT_PM2_ONLY */
1622 /* wl_pkt_filter_enable_t enable_parm; */
1623 char iovbuf[32];
1624 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1625 uint roamvar = dhd->conf->roam_off_suspend;
1626 uint nd_ra_filter = 0;
1627 int ret = 0;
1628
1629 if (!dhd)
1630 return -ENODEV;
1631
1632 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1633 __FUNCTION__, value, dhd->in_suspend));
1634
1635 dhd_suspend_lock(dhd);
1636
1637 #ifdef CUSTOM_SET_CPUCORE
1638 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1639 /* set specific cpucore */
1640 dhd_set_cpucore(dhd, TRUE);
1641 #endif /* CUSTOM_SET_CPUCORE */
1642 #ifndef SUPPORT_PM2_ONLY
1643 if (dhd->conf->pm >= 0)
1644 power_mode = dhd->conf->pm;
1645 #endif /* SUPPORT_PM2_ONLY */
1646 if (dhd->up) {
1647 if (value && dhd->in_suspend) {
1648 #ifdef PKT_FILTER_SUPPORT
1649 dhd->early_suspended = 1;
1650 #endif
1651 /* Kernel suspended */
1652 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
1653
1654 #ifndef SUPPORT_PM2_ONLY
1655 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1656 sizeof(power_mode), TRUE, 0);
1657 #endif /* SUPPORT_PM2_ONLY */
1658
1659 /* Enable packet filter, only allow unicast packet to send up */
1660 dhd_enable_packet_filter(1, dhd);
1661
1662 /* If DTIM skip is set up as default, force it to wake
1663 * each third DTIM for better power savings. Note that
1664 * one side effect is a chance to miss BC/MC packet.
1665 */
1666 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
1667 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1668 4, iovbuf, sizeof(iovbuf));
1669 if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
1670 TRUE, 0) < 0)
1671 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
1672
1673 /* Disable firmware roaming during suspend */
1674 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1675 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1676 if (FW_SUPPORTED(dhd, ndoe)) {
1677 /* enable IPv6 RA filter in firmware during suspend */
1678 nd_ra_filter = 1;
1679 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1680 iovbuf, sizeof(iovbuf));
1681 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1682 sizeof(iovbuf), TRUE, 0)) < 0)
1683 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1684 ret));
1685 }
1686 } else {
1687 #ifdef PKT_FILTER_SUPPORT
1688 dhd->early_suspended = 0;
1689 #endif
1690 /* Kernel resumed */
1691 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
1692
1693 #ifndef SUPPORT_PM2_ONLY
1694 power_mode = PM_FAST;
1695 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1696 sizeof(power_mode), TRUE, 0);
1697 #endif /* SUPPORT_PM2_ONLY */
1698 #ifdef PKT_FILTER_SUPPORT
1699 /* disable pkt filter */
1700 dhd_enable_packet_filter(0, dhd);
1701 #endif /* PKT_FILTER_SUPPORT */
1702
1703 /* restore pre-suspend setting for dtim_skip */
1704 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
1705 4, iovbuf, sizeof(iovbuf));
1706
1707 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1708 roamvar = dhd_roam_disable;
1709 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
1710 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
1711 if (FW_SUPPORTED(dhd, ndoe)) {
1712 /* disable IPv6 RA filter in firmware during suspend */
1713 nd_ra_filter = 0;
1714 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
1715 iovbuf, sizeof(iovbuf));
1716 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
1717 sizeof(iovbuf), TRUE, 0)) < 0)
1718 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1719 ret));
1720 }
1721 }
1722 }
1723 dhd_suspend_unlock(dhd);
1724
1725 return 0;
1726 }
1727
1728 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
1729 {
1730 dhd_pub_t *dhdp = &dhd->pub;
1731 int ret = 0;
1732
1733 DHD_OS_WAKE_LOCK(dhdp);
1734 DHD_PERIM_LOCK(dhdp);
1735
1736 /* Set flag when early suspend was called */
1737 dhdp->in_suspend = val;
1738 if ((force || !dhdp->suspend_disable_flag) &&
1739 dhd_support_sta_mode(dhdp))
1740 {
1741 ret = dhd_set_suspend(val, dhdp);
1742 }
1743
1744 DHD_PERIM_UNLOCK(dhdp);
1745 DHD_OS_WAKE_UNLOCK(dhdp);
1746 return ret;
1747 }
1748
1749 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1750 static void dhd_early_suspend(struct early_suspend *h)
1751 {
1752 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1753 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1754
1755 if (dhd)
1756 dhd_suspend_resume_helper(dhd, 1, 0);
1757 }
1758
1759 static void dhd_late_resume(struct early_suspend *h)
1760 {
1761 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
1762 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
1763
1764 if (dhd)
1765 dhd_suspend_resume_helper(dhd, 0, 0);
1766 }
1767 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1768
1769 /*
1770 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1771 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1772 *
1773 * dhd_timeout_start(&tmo, usec);
1774 * while (!dhd_timeout_expired(&tmo))
1775 * if (poll_something())
1776 * break;
1777 * if (dhd_timeout_expired(&tmo))
1778 * fatal();
1779 */
1780
1781 void
1782 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
1783 {
1784 tmo->limit = usec;
1785 tmo->increment = 0;
1786 tmo->elapsed = 0;
1787 tmo->tick = jiffies_to_usecs(1);
1788 }
1789
1790 int
1791 dhd_timeout_expired(dhd_timeout_t *tmo)
1792 {
1793 /* Does nothing the first call */
1794 if (tmo->increment == 0) {
1795 tmo->increment = 1;
1796 return 0;
1797 }
1798
1799 if (tmo->elapsed >= tmo->limit)
1800 return 1;
1801
1802 /* Add the delay that's about to take place */
1803 tmo->elapsed += tmo->increment;
1804
1805 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
1806 OSL_DELAY(tmo->increment);
1807 tmo->increment *= 2;
1808 if (tmo->increment > tmo->tick)
1809 tmo->increment = tmo->tick;
1810 } else {
1811 wait_queue_head_t delay_wait;
1812 DECLARE_WAITQUEUE(wait, current);
1813 init_waitqueue_head(&delay_wait);
1814 add_wait_queue(&delay_wait, &wait);
1815 set_current_state(TASK_INTERRUPTIBLE);
1816 (void)schedule_timeout(1);
1817 remove_wait_queue(&delay_wait, &wait);
1818 set_current_state(TASK_RUNNING);
1819 }
1820
1821 return 0;
1822 }
1823
1824 int
1825 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
1826 {
1827 int i = 0;
1828
1829 if (!dhd) {
1830 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
1831 return DHD_BAD_IF;
1832 }
1833 while (i < DHD_MAX_IFS) {
1834 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
1835 return i;
1836 i++;
1837 }
1838
1839 return DHD_BAD_IF;
1840 }
1841
1842 struct net_device * dhd_idx2net(void *pub, int ifidx)
1843 {
1844 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
1845 struct dhd_info *dhd_info;
1846
1847 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
1848 return NULL;
1849 dhd_info = dhd_pub->info;
1850 if (dhd_info && dhd_info->iflist[ifidx])
1851 return dhd_info->iflist[ifidx]->net;
1852 return NULL;
1853 }
1854
1855 int
1856 dhd_ifname2idx(dhd_info_t *dhd, char *name)
1857 {
1858 int i = DHD_MAX_IFS;
1859
1860 ASSERT(dhd);
1861
1862 if (name == NULL || *name == '\0')
1863 return 0;
1864
1865 while (--i > 0)
1866 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
1867 break;
1868
1869 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
1870
1871 return i; /* default - the primary interface */
1872 }
1873
1874 int
1875 dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
1876 {
1877 int i = DHD_MAX_IFS;
1878
1879 ASSERT(dhd);
1880
1881 while (--i > 0)
1882 if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
1883 break;
1884
1885 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
1886
1887 return i; /* default - the primary interface */
1888 }
1889
1890 char *
1891 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
1892 {
1893 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
1894
1895 ASSERT(dhd);
1896
1897 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
1898 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
1899 return "<if_bad>";
1900 }
1901
1902 if (dhd->iflist[ifidx] == NULL) {
1903 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
1904 return "<if_null>";
1905 }
1906
1907 if (dhd->iflist[ifidx]->net)
1908 return dhd->iflist[ifidx]->net->name;
1909
1910 return "<if_none>";
1911 }
1912
1913 uint8 *
1914 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
1915 {
1916 int i;
1917 dhd_info_t *dhd = (dhd_info_t *)dhdp;
1918
1919 ASSERT(dhd);
1920 for (i = 0; i < DHD_MAX_IFS; i++)
1921 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
1922 return dhd->iflist[i]->mac_addr;
1923
1924 return NULL;
1925 }
1926
1927
1928 static void
1929 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
1930 {
1931 struct net_device *dev;
1932 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1933 struct netdev_hw_addr *ha;
1934 #else
1935 struct dev_mc_list *mclist;
1936 #endif
1937 uint32 allmulti, cnt;
1938
1939 wl_ioctl_t ioc;
1940 char *buf, *bufp;
1941 uint buflen;
1942 int ret;
1943
1944 ASSERT(dhd && dhd->iflist[ifidx]);
1945 dev = dhd->iflist[ifidx]->net;
1946 if (!dev)
1947 return;
1948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1949 netif_addr_lock_bh(dev);
1950 #endif
1951 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1952 cnt = netdev_mc_count(dev);
1953 #else
1954 cnt = dev->mc_count;
1955 #endif /* LINUX_VERSION_CODE */
1956
1957 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1958 netif_addr_unlock_bh(dev);
1959 #endif
1960
1961 /* Determine initial value of allmulti flag */
1962 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
1963
1964 /* Send down the multicast list first. */
1965
1966
1967 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
1968 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
1969 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1970 dhd_ifname(&dhd->pub, ifidx), cnt));
1971 return;
1972 }
1973
1974 strncpy(bufp, "mcast_list", buflen - 1);
1975 bufp[buflen - 1] = '\0';
1976 bufp += strlen("mcast_list") + 1;
1977
1978 cnt = htol32(cnt);
1979 memcpy(bufp, &cnt, sizeof(cnt));
1980 bufp += sizeof(cnt);
1981
1982
1983 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1984 netif_addr_lock_bh(dev);
1985 #endif
1986 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1987 netdev_for_each_mc_addr(ha, dev) {
1988 if (!cnt)
1989 break;
1990 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
1991 bufp += ETHER_ADDR_LEN;
1992 cnt--;
1993 }
1994 #else
1995 for (mclist = dev->mc_list; (mclist && (cnt > 0));
1996 cnt--, mclist = mclist->next) {
1997 memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
1998 bufp += ETHER_ADDR_LEN;
1999 }
2000 #endif /* LINUX_VERSION_CODE */
2001
2002 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2003 netif_addr_unlock_bh(dev);
2004 #endif
2005
2006 memset(&ioc, 0, sizeof(ioc));
2007 ioc.cmd = WLC_SET_VAR;
2008 ioc.buf = buf;
2009 ioc.len = buflen;
2010 ioc.set = TRUE;
2011
2012 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2013 if (ret < 0) {
2014 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2015 dhd_ifname(&dhd->pub, ifidx), cnt));
2016 allmulti = cnt ? TRUE : allmulti;
2017 }
2018
2019 MFREE(dhd->pub.osh, buf, buflen);
2020
2021 /* Now send the allmulti setting. This is based on the setting in the
2022 * net_device flags, but might be modified above to be turned on if we
2023 * were trying to set some addresses and dongle rejected it...
2024 */
2025
2026 buflen = sizeof("allmulti") + sizeof(allmulti);
2027 if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
2028 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
2029 return;
2030 }
2031 allmulti = htol32(allmulti);
2032
2033 if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
2034 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2035 dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
2036 MFREE(dhd->pub.osh, buf, buflen);
2037 return;
2038 }
2039
2040
2041 memset(&ioc, 0, sizeof(ioc));
2042 ioc.cmd = WLC_SET_VAR;
2043 ioc.buf = buf;
2044 ioc.len = buflen;
2045 ioc.set = TRUE;
2046
2047 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2048 if (ret < 0) {
2049 DHD_ERROR(("%s: set allmulti %d failed\n",
2050 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2051 }
2052
2053 MFREE(dhd->pub.osh, buf, buflen);
2054
2055 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2056
2057 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2058
2059 allmulti = htol32(allmulti);
2060
2061 memset(&ioc, 0, sizeof(ioc));
2062 ioc.cmd = WLC_SET_PROMISC;
2063 ioc.buf = &allmulti;
2064 ioc.len = sizeof(allmulti);
2065 ioc.set = TRUE;
2066
2067 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2068 if (ret < 0) {
2069 DHD_ERROR(("%s: set promisc %d failed\n",
2070 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2071 }
2072 }
2073
2074 int
2075 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2076 {
2077 char buf[32];
2078 wl_ioctl_t ioc;
2079 int ret;
2080
2081 if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
2082 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
2083 return -1;
2084 }
2085 memset(&ioc, 0, sizeof(ioc));
2086 ioc.cmd = WLC_SET_VAR;
2087 ioc.buf = buf;
2088 ioc.len = 32;
2089 ioc.set = TRUE;
2090
2091 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2092 if (ret < 0) {
2093 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2094 } else {
2095 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2096 if (ifidx == 0)
2097 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2098 }
2099
2100 return ret;
2101 }
2102
2103 #ifdef SOFTAP
2104 extern struct net_device *ap_net_dev;
2105 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
2106 #endif
2107
2108 static void
2109 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2110 {
2111 dhd_info_t *dhd = handle;
2112 dhd_if_event_t *if_event = event_info;
2113 struct net_device *ndev;
2114 int ifidx, bssidx;
2115 int ret;
2116 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2117 struct wireless_dev *vwdev, *primary_wdev;
2118 struct net_device *primary_ndev;
2119 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2120
2121 if (event != DHD_WQ_WORK_IF_ADD) {
2122 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2123 return;
2124 }
2125
2126 if (!dhd) {
2127 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2128 return;
2129 }
2130
2131 if (!if_event) {
2132 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2133 return;
2134 }
2135
2136 dhd_net_if_lock_local(dhd);
2137 DHD_OS_WAKE_LOCK(&dhd->pub);
2138 DHD_PERIM_LOCK(&dhd->pub);
2139
2140 ifidx = if_event->event.ifidx;
2141 bssidx = if_event->event.bssidx;
2142 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2143
2144 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2145 if_event->mac, bssidx, TRUE);
2146 if (!ndev) {
2147 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2148 goto done;
2149 }
2150
2151 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2152 vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
2153 if (unlikely(!vwdev)) {
2154 WL_ERR(("Could not allocate wireless device\n"));
2155 goto done;
2156 }
2157 primary_ndev = dhd->pub.info->iflist[0]->net;
2158 primary_wdev = ndev_to_wdev(primary_ndev);
2159 vwdev->wiphy = primary_wdev->wiphy;
2160 vwdev->iftype = if_event->event.role;
2161 vwdev->netdev = ndev;
2162 ndev->ieee80211_ptr = vwdev;
2163 SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
2164 DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
2165 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2166
2167 DHD_PERIM_UNLOCK(&dhd->pub);
2168 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2169 DHD_PERIM_LOCK(&dhd->pub);
2170 if (ret != BCME_OK) {
2171 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2172 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2173 goto done;
2174 }
2175 #ifdef PCIE_FULL_DONGLE
2176 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2177 if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
2178 char iovbuf[WLC_IOCTL_SMLEN];
2179 uint32 var_int = 1;
2180
2181 memset(iovbuf, 0, sizeof(iovbuf));
2182 bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
2183 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
2184
2185 if (ret != BCME_OK) {
2186 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2187 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2188 }
2189 }
2190 #endif /* PCIE_FULL_DONGLE */
2191 done:
2192 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2193
2194 DHD_PERIM_UNLOCK(&dhd->pub);
2195 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2196 dhd_net_if_unlock_local(dhd);
2197 }
2198
2199 static void
2200 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2201 {
2202 dhd_info_t *dhd = handle;
2203 int ifidx;
2204 dhd_if_event_t *if_event = event_info;
2205
2206
2207 if (event != DHD_WQ_WORK_IF_DEL) {
2208 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2209 return;
2210 }
2211
2212 if (!dhd) {
2213 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2214 return;
2215 }
2216
2217 if (!if_event) {
2218 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2219 return;
2220 }
2221
2222 dhd_net_if_lock_local(dhd);
2223 DHD_OS_WAKE_LOCK(&dhd->pub);
2224 DHD_PERIM_LOCK(&dhd->pub);
2225
2226 ifidx = if_event->event.ifidx;
2227 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
2228
2229 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2230
2231 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2232
2233 DHD_PERIM_UNLOCK(&dhd->pub);
2234 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2235 dhd_net_if_unlock_local(dhd);
2236 }
2237
2238 static void
2239 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
2240 {
2241 dhd_info_t *dhd = handle;
2242 dhd_if_t *ifp = event_info;
2243
2244 if (event != DHD_WQ_WORK_SET_MAC) {
2245 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2246 }
2247
2248 if (!dhd) {
2249 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2250 return;
2251 }
2252
2253 dhd_net_if_lock_local(dhd);
2254 DHD_OS_WAKE_LOCK(&dhd->pub);
2255 DHD_PERIM_LOCK(&dhd->pub);
2256
2257 #ifdef SOFTAP
2258 {
2259 unsigned long flags;
2260 bool in_ap = FALSE;
2261 DHD_GENERAL_LOCK(&dhd->pub, flags);
2262 in_ap = (ap_net_dev != NULL);
2263 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2264
2265 if (in_ap) {
2266 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2267 ifp->net->name));
2268 goto done;
2269 }
2270 }
2271 #endif /* SOFTAP */
2272
2273 if (ifp == NULL || !dhd->pub.up) {
2274 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2275 goto done;
2276 }
2277
2278 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
2279 ifp->set_macaddress = FALSE;
2280 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
2281 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
2282 else
2283 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
2284
2285 done:
2286 DHD_PERIM_UNLOCK(&dhd->pub);
2287 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2288 dhd_net_if_unlock_local(dhd);
2289 }
2290
2291 static void
2292 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
2293 {
2294 dhd_info_t *dhd = handle;
2295 dhd_if_t *ifp = event_info;
2296 int ifidx;
2297
2298 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
2299 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2300 return;
2301 }
2302
2303 if (!dhd) {
2304 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2305 return;
2306 }
2307
2308 dhd_net_if_lock_local(dhd);
2309 DHD_OS_WAKE_LOCK(&dhd->pub);
2310 DHD_PERIM_LOCK(&dhd->pub);
2311
2312 #ifdef SOFTAP
2313 {
2314 bool in_ap = FALSE;
2315 unsigned long flags;
2316 DHD_GENERAL_LOCK(&dhd->pub, flags);
2317 in_ap = (ap_net_dev != NULL);
2318 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
2319
2320 if (in_ap) {
2321 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2322 ifp->net->name));
2323 ifp->set_multicast = FALSE;
2324 goto done;
2325 }
2326 }
2327 #endif /* SOFTAP */
2328
2329 if (ifp == NULL || !dhd->pub.up) {
2330 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
2331 goto done;
2332 }
2333
2334 ifidx = ifp->idx;
2335
2336
2337 _dhd_set_multicast_list(dhd, ifidx);
2338 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
2339
2340 done:
2341 DHD_PERIM_UNLOCK(&dhd->pub);
2342 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2343 dhd_net_if_unlock_local(dhd);
2344 }
2345
2346 static int
2347 dhd_set_mac_address(struct net_device *dev, void *addr)
2348 {
2349 int ret = 0;
2350
2351 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2352 struct sockaddr *sa = (struct sockaddr *)addr;
2353 int ifidx;
2354 dhd_if_t *dhdif;
2355
2356 ifidx = dhd_net2idx(dhd, dev);
2357 if (ifidx == DHD_BAD_IF)
2358 return -1;
2359
2360 dhdif = dhd->iflist[ifidx];
2361
2362 dhd_net_if_lock_local(dhd);
2363 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
2364 dhdif->set_macaddress = TRUE;
2365 dhd_net_if_unlock_local(dhd);
2366 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
2367 dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
2368 return ret;
2369 }
2370
2371 static void
2372 dhd_set_multicast_list(struct net_device *dev)
2373 {
2374 dhd_info_t *dhd = DHD_DEV_INFO(dev);
2375 int ifidx;
2376
2377 ifidx = dhd_net2idx(dhd, dev);
2378 if (ifidx == DHD_BAD_IF)
2379 return;
2380
2381 dhd->iflist[ifidx]->set_multicast = TRUE;
2382 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
2383 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
2384 }
2385
2386 #ifdef PROP_TXSTATUS
2387 int
2388 dhd_os_wlfc_block(dhd_pub_t *pub)
2389 {
2390 dhd_info_t *di = (dhd_info_t *)(pub->info);
2391 ASSERT(di != NULL);
2392 spin_lock_bh(&di->wlfc_spinlock);
2393 return 1;
2394 }
2395
2396 int
2397 dhd_os_wlfc_unblock(dhd_pub_t *pub)
2398 {
2399 dhd_info_t *di = (dhd_info_t *)(pub->info);
2400
2401 ASSERT(di != NULL);
2402 spin_unlock_bh(&di->wlfc_spinlock);
2403 return 1;
2404 }
2405
2406 #endif /* PROP_TXSTATUS */
2407
2408 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
2409 typedef struct {
2410 uint16 type;
2411 const char *str;
2412 } PKTTYPE_INFO;
2413
2414 static const PKTTYPE_INFO packet_type_info[] =
2415 {
2416 { ETHER_TYPE_IP, "IP" },
2417 { ETHER_TYPE_ARP, "ARP" },
2418 { ETHER_TYPE_BRCM, "BRCM" },
2419 { ETHER_TYPE_802_1X, "802.1X" },
2420 { ETHER_TYPE_WAI, "WAPI" },
2421 { 0, ""}
2422 };
2423
2424 static const char *_get_packet_type_str(uint16 type)
2425 {
2426 int i;
2427 int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
2428
2429 for (i = 0; i < n; i++) {
2430 if (packet_type_info[i].type == type)
2431 return packet_type_info[i].str;
2432 }
2433
2434 return packet_type_info[n].str;
2435 }
2436 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
2437
2438 #if defined(DHD_TX_DUMP)
2439 void
2440 dhd_tx_dump(osl_t *osh, void *pkt)
2441 {
2442 uint8 *dump_data;
2443 uint16 protocol;
2444 struct ether_header *eh;
2445
2446 dump_data = PKTDATA(osh, pkt);
2447 eh = (struct ether_header *) dump_data;
2448 protocol = ntoh16(eh->ether_type);
2449
2450 DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol)));
2451
2452 if (protocol == ETHER_TYPE_802_1X) {
2453 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2454 dump_data[14], dump_data[15], dump_data[30]));
2455 }
2456
2457 #if defined(DHD_TX_FULL_DUMP)
2458 {
2459 int i;
2460 uint datalen;
2461 datalen = PKTLEN(osh, pkt);
2462
2463 for (i = 0; i < datalen; i++) {
2464 DHD_ERROR(("%02X ", dump_data[i]));
2465 if ((i & 15) == 15)
2466 printk("\n");
2467 }
2468 DHD_ERROR(("\n"));
2469 }
2470 #endif /* DHD_TX_FULL_DUMP */
2471 }
2472 #endif /* DHD_TX_DUMP */
2473
2474 int BCMFASTPATH
2475 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
2476 {
2477 int ret = BCME_OK;
2478 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
2479 struct ether_header *eh = NULL;
2480
2481 /* Reject if down */
2482 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
2483 /* free the packet here since the caller won't */
2484 PKTFREE(dhdp->osh, pktbuf, TRUE);
2485 return -ENODEV;
2486 }
2487
2488 #ifdef PCIE_FULL_DONGLE
2489 if (dhdp->busstate == DHD_BUS_SUSPEND) {
2490 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
2491 PKTFREE(dhdp->osh, pktbuf, TRUE);
2492 return -EBUSY;
2493 }
2494 #endif /* PCIE_FULL_DONGLE */
2495
2496 #ifdef DHD_UNICAST_DHCP
2497 /* if dhcp_unicast is enabled, we need to convert the */
2498 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2499 if (dhdp->dhcp_unicast) {
2500 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
2501 }
2502 #endif /* DHD_UNICAST_DHCP */
2503 /* Update multicast statistic */
2504 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
2505 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
2506 eh = (struct ether_header *)pktdata;
2507
2508 if (ETHER_ISMULTI(eh->ether_dhost))
2509 dhdp->tx_multicast++;
2510 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
2511 atomic_inc(&dhd->pend_8021x_cnt);
2512 } else {
2513 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2514 return BCME_ERROR;
2515 }
2516
2517 /* Look into the packet and update the packet priority */
2518 #ifndef PKTPRIO_OVERRIDE
2519 if (PKTPRIO(pktbuf) == 0)
2520 #endif
2521 pktsetprio(pktbuf, FALSE);
2522
2523
2524 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
2525 /*
2526 * Lkup the per interface hash table, for a matching flowring. If one is not
2527 * available, allocate a unique flowid and add a flowring entry.
2528 * The found or newly created flowid is placed into the pktbuf's tag.
2529 */
2530 ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
2531 if (ret != BCME_OK) {
2532 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
2533 return ret;
2534 }
2535 #endif
2536 #if defined(DHD_TX_DUMP)
2537 dhd_tx_dump(dhdp->osh, pktbuf);
2538 #endif
2539
2540 /* terence 20150901: Micky add to ajust the 802.1X priority */
2541 /* Set the 802.1X packet with the highest priority 7 */
2542 if (dhdp->conf->pktprio8021x >= 0)
2543 pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
2544
2545 #ifdef PROP_TXSTATUS
2546 if (dhd_wlfc_is_supported(dhdp)) {
2547 /* store the interface ID */
2548 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
2549
2550 /* store destination MAC in the tag as well */
2551 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
2552
2553 /* decide which FIFO this packet belongs to */
2554 if (ETHER_ISMULTI(eh->ether_dhost))
2555 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
2556 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
2557 else
2558 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
2559 } else
2560 #endif /* PROP_TXSTATUS */
2561 /* If the protocol uses a data header, apply it */
2562 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
2563
2564 /* Use bus module to send data frame */
2565 #ifdef WLMEDIA_HTSF
2566 dhd_htsf_addtxts(dhdp, pktbuf);
2567 #endif
2568
2569 #ifdef PROP_TXSTATUS
2570 {
2571 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
2572 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
2573 /* non-proptxstatus way */
2574 #ifdef BCMPCIE
2575 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2576 #else
2577 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2578 #endif /* BCMPCIE */
2579 }
2580 }
2581 #else
2582 #ifdef BCMPCIE
2583 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
2584 #else
2585 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
2586 #endif /* BCMPCIE */
2587 #endif /* PROP_TXSTATUS */
2588
2589 return ret;
2590 }
2591
2592 int BCMFASTPATH
2593 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
2594 {
2595 int ret;
2596 uint datalen;
2597 void *pktbuf;
2598 dhd_info_t *dhd = DHD_DEV_INFO(net);
2599 dhd_if_t *ifp = NULL;
2600 int ifidx;
2601 #ifdef WLMEDIA_HTSF
2602 uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
2603 #else
2604 uint8 htsfdlystat_sz = 0;
2605 #endif
2606 #ifdef DHD_WMF
2607 struct ether_header *eh;
2608 uint8 *iph;
2609 #endif /* DHD_WMF */
2610
2611 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2612
2613 DHD_OS_WAKE_LOCK(&dhd->pub);
2614 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2615
2616 /* Reject if down */
2617 if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
2618 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
2619 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
2620 netif_stop_queue(net);
2621 /* Send Event when bus down detected during data session */
2622 if (dhd->pub.up) {
2623 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
2624 net_os_send_hang_message(net);
2625 }
2626 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2627 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2628 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2629 return -ENODEV;
2630 #else
2631 return NETDEV_TX_BUSY;
2632 #endif
2633 }
2634
2635 ifp = DHD_DEV_IFP(net);
2636 ifidx = DHD_DEV_IFIDX(net);
2637
2638 ASSERT(ifidx == dhd_net2idx(dhd, net));
2639 ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
2640
2641 if (ifidx == DHD_BAD_IF) {
2642 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
2643 netif_stop_queue(net);
2644 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2645 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2646 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2647 return -ENODEV;
2648 #else
2649 return NETDEV_TX_BUSY;
2650 #endif
2651 }
2652
2653 /* re-align socket buffer if "skb->data" is odd address */
2654 if (((unsigned long)(skb->data)) & 0x1) {
2655 unsigned char *data = skb->data;
2656 uint32 length = skb->len;
2657 PKTPUSH(dhd->pub.osh, skb, 1);
2658 memmove(skb->data, data, length);
2659 PKTSETLEN(dhd->pub.osh, skb, length);
2660 }
2661
2662 datalen = PKTLEN(dhd->pub.osh, skb);
2663
2664 /* Make sure there's enough room for any header */
2665
2666 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
2667 struct sk_buff *skb2;
2668
2669 DHD_INFO(("%s: insufficient headroom\n",
2670 dhd_ifname(&dhd->pub, ifidx)));
2671 dhd->pub.tx_realloc++;
2672
2673 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
2674
2675 dev_kfree_skb(skb);
2676 if ((skb = skb2) == NULL) {
2677 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
2678 dhd_ifname(&dhd->pub, ifidx)));
2679 ret = -ENOMEM;
2680 goto done;
2681 }
2682 }
2683
2684 /* Convert to packet */
2685 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
2686 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
2687 dhd_ifname(&dhd->pub, ifidx)));
2688 dev_kfree_skb_any(skb);
2689 ret = -ENOMEM;
2690 goto done;
2691 }
2692 #ifdef WLMEDIA_HTSF
2693 if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
2694 uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
2695 struct ether_header *eh = (struct ether_header *)pktdata;
2696
2697 if (!ETHER_ISMULTI(eh->ether_dhost) &&
2698 (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
2699 eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
2700 }
2701 }
2702 #endif
2703 #ifdef DHD_WMF
2704 eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
2705 iph = (uint8 *)eh + ETHER_HDR_LEN;
2706
2707 /* WMF processing for multicast packets
2708 * Only IPv4 packets are handled
2709 */
2710 if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
2711 (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
2712 ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
2713 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2714 void *sdu_clone;
2715 bool ucast_convert = FALSE;
2716 #ifdef DHD_UCAST_UPNP
2717 uint32 dest_ip;
2718
2719 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
2720 ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
2721 #endif /* DHD_UCAST_UPNP */
2722 #ifdef DHD_IGMP_UCQUERY
2723 ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
2724 (IPV4_PROT(iph) == IP_PROT_IGMP) &&
2725 (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
2726 #endif /* DHD_IGMP_UCQUERY */
2727 if (ucast_convert) {
2728 dhd_sta_t *sta;
2729 unsigned long flags;
2730
2731 DHD_IF_STA_LIST_LOCK(ifp, flags);
2732
2733 /* Convert upnp/igmp query to unicast for each assoc STA */
2734 list_for_each_entry(sta, &ifp->sta_list, list) {
2735 if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
2736 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2737 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2738 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2739 return (WMF_NOP);
2740 }
2741 dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
2742 }
2743
2744 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
2745 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2746 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2747
2748 PKTFREE(dhd->pub.osh, pktbuf, TRUE);
2749 return NETDEV_TX_OK;
2750 } else
2751 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
2752 {
2753 /* There will be no STA info if the packet is coming from LAN host
2754 * Pass as NULL
2755 */
2756 ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
2757 switch (ret) {
2758 case WMF_TAKEN:
2759 case WMF_DROP:
2760 /* Either taken by WMF or we should drop it.
2761 * Exiting send path
2762 */
2763 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2764 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2765 return NETDEV_TX_OK;
2766 default:
2767 /* Continue the transmit path */
2768 break;
2769 }
2770 }
2771 }
2772 #endif /* DHD_WMF */
2773
2774 #ifdef DHDTCPACK_SUPPRESS
2775 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
2776 /* If this packet has been hold or got freed, just return */
2777 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx))
2778 return 0;
2779 } else {
2780 /* If this packet has replaced another packet and got freed, just return */
2781 if (dhd_tcpack_suppress(&dhd->pub, pktbuf))
2782 return 0;
2783 }
2784 #endif /* DHDTCPACK_SUPPRESS */
2785
2786 ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
2787
2788 done:
2789 if (ret) {
2790 ifp->stats.tx_dropped++;
2791 dhd->pub.tx_dropped++;
2792 }
2793 else {
2794
2795 #ifdef PROP_TXSTATUS
2796 /* tx_packets counter can counted only when wlfc is disabled */
2797 if (!dhd_wlfc_is_supported(&dhd->pub))
2798 #endif
2799 {
2800 dhd->pub.tx_packets++;
2801 ifp->stats.tx_packets++;
2802 ifp->stats.tx_bytes += datalen;
2803 }
2804 }
2805
2806 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
2807 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2808
2809 /* Return ok: we always eat the packet */
2810 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2811 return 0;
2812 #else
2813 return NETDEV_TX_OK;
2814 #endif
2815 }
2816
2817
2818 void
2819 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
2820 {
2821 struct net_device *net;
2822 dhd_info_t *dhd = dhdp->info;
2823 int i;
2824
2825 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2826
2827 ASSERT(dhd);
2828
2829 if (ifidx == ALL_INTERFACES) {
2830 /* Flow control on all active interfaces */
2831 dhdp->txoff = state;
2832 for (i = 0; i < DHD_MAX_IFS; i++) {
2833 if (dhd->iflist[i]) {
2834 net = dhd->iflist[i]->net;
2835 if (state == ON)
2836 netif_stop_queue(net);
2837 else
2838 netif_wake_queue(net);
2839 }
2840 }
2841 }
2842 else {
2843 if (dhd->iflist[ifidx]) {
2844 net = dhd->iflist[ifidx]->net;
2845 if (state == ON)
2846 netif_stop_queue(net);
2847 else
2848 netif_wake_queue(net);
2849 }
2850 }
2851 }
2852
2853
2854 #ifdef DHD_WMF
2855 bool
2856 dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
2857 {
2858 dhd_info_t *dhd = dhdp->info;
2859
2860 return dhd->rxthread_enabled;
2861 }
2862 #endif /* DHD_WMF */
2863
2864 void
2865 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
2866 {
2867 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2868 struct sk_buff *skb;
2869 uchar *eth;
2870 uint len;
2871 void *data, *pnext = NULL;
2872 int i;
2873 dhd_if_t *ifp;
2874 wl_event_msg_t event;
2875 int tout_rx = 0;
2876 int tout_ctrl = 0;
2877 void *skbhead = NULL;
2878 void *skbprev = NULL;
2879 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2880 char *dump_data;
2881 uint16 protocol;
2882 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2883
2884 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2885
2886 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
2887 struct ether_header *eh;
2888 #ifdef WLBTAMP
2889 struct dot11_llc_snap_header *lsh;
2890 #endif
2891
2892 pnext = PKTNEXT(dhdp->osh, pktbuf);
2893 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
2894
2895 ifp = dhd->iflist[ifidx];
2896 if (ifp == NULL) {
2897 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
2898 __FUNCTION__));
2899 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2900 continue;
2901 }
2902
2903 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2904
2905 /* Dropping only data packets before registering net device to avoid kernel panic */
2906 #ifndef PROP_TXSTATUS_VSDB
2907 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
2908 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2909 #else
2910 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
2911 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
2912 #endif /* PROP_TXSTATUS_VSDB */
2913 {
2914 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
2915 __FUNCTION__));
2916 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2917 continue;
2918 }
2919
2920 #ifdef WLBTAMP
2921 lsh = (struct dot11_llc_snap_header *)&eh[1];
2922
2923 if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
2924 (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
2925 bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
2926 lsh->type == HTON16(BTA_PROT_L2CAP)) {
2927 amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
2928 ((uint8 *)eh + RFC1042_HDR_LEN);
2929 ACL_data = NULL;
2930 }
2931 #endif /* WLBTAMP */
2932
2933 #ifdef PROP_TXSTATUS
2934 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
2935 /* WLFC may send header only packet when
2936 there is an urgent message but no packet to
2937 piggy-back on
2938 */
2939 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2940 continue;
2941 }
2942 #endif
2943 #ifdef DHD_L2_FILTER
2944 /* If block_ping is enabled drop the ping packet */
2945 if (dhdp->block_ping) {
2946 if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
2947 PKTFREE(dhdp->osh, pktbuf, FALSE);
2948 continue;
2949 }
2950 }
2951 #endif
2952 #ifdef DHD_WMF
2953 /* WMF processing for multicast packets */
2954 if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
2955 dhd_sta_t *sta;
2956 int ret;
2957
2958 sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
2959 ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
2960 switch (ret) {
2961 case WMF_TAKEN:
2962 /* The packet is taken by WMF. Continue to next iteration */
2963 continue;
2964 case WMF_DROP:
2965 /* Packet DROP decision by WMF. Toss it */
2966 DHD_ERROR(("%s: WMF decides to drop packet\n",
2967 __FUNCTION__));
2968 PKTCFREE(dhdp->osh, pktbuf, FALSE);
2969 continue;
2970 default:
2971 /* Continue the transmit path */
2972 break;
2973 }
2974 }
2975 #endif /* DHD_WMF */
2976 #ifdef DHDTCPACK_SUPPRESS
2977 dhd_tcpdata_info_get(dhdp, pktbuf);
2978 #endif
2979 skb = PKTTONATIVE(dhdp->osh, pktbuf);
2980
2981 ifp = dhd->iflist[ifidx];
2982 if (ifp == NULL)
2983 ifp = dhd->iflist[0];
2984
2985 ASSERT(ifp);
2986 skb->dev = ifp->net;
2987
2988 #ifdef PCIE_FULL_DONGLE
2989 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
2990 (!ifp->ap_isolate)) {
2991 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
2992 if (ETHER_ISUCAST(eh->ether_dhost)) {
2993 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
2994 dhd_sendpkt(dhdp, ifidx, pktbuf);
2995 continue;
2996 }
2997 } else {
2998 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
2999 dhd_sendpkt(dhdp, ifidx, npktbuf);
3000 }
3001 }
3002 #endif /* PCIE_FULL_DONGLE */
3003
3004 /* Get the protocol, maintain skb around eth_type_trans()
3005 * The main reason for this hack is for the limitation of
3006 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
3007 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
3008 * coping of the packet coming from the network stack to add
3009 * BDC, Hardware header etc, during network interface registration
3010 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
3011 * for BDC, Hardware header etc. and not just the ETH_HLEN
3012 */
3013 eth = skb->data;
3014 len = skb->len;
3015
3016 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3017 dump_data = skb->data;
3018 protocol = (dump_data[12] << 8) | dump_data[13];
3019
3020 if (protocol == ETHER_TYPE_802_1X) {
3021 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3022 "ver %d, type %d, replay %d\n",
3023 dump_data[14], dump_data[15],
3024 dump_data[30]));
3025 }
3026 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3027 #if defined(DHD_RX_DUMP)
3028 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
3029 if (protocol != ETHER_TYPE_BRCM) {
3030 if (dump_data[0] == 0xFF) {
3031 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
3032
3033 if ((dump_data[12] == 8) &&
3034 (dump_data[13] == 6)) {
3035 DHD_ERROR(("%s: ARP %d\n",
3036 __FUNCTION__, dump_data[0x15]));
3037 }
3038 } else if (dump_data[0] & 1) {
3039 DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
3040 __FUNCTION__, MAC2STRDBG(dump_data)));
3041 }
3042 #ifdef DHD_RX_FULL_DUMP
3043 {
3044 int k;
3045 for (k = 0; k < skb->len; k++) {
3046 DHD_ERROR(("%02X ", dump_data[k]));
3047 if ((k & 15) == 15)
3048 DHD_ERROR(("\n"));
3049 }
3050 DHD_ERROR(("\n"));
3051 }
3052 #endif /* DHD_RX_FULL_DUMP */
3053 }
3054 #endif /* DHD_RX_DUMP */
3055
3056 skb->protocol = eth_type_trans(skb, skb->dev);
3057
3058 if (skb->pkt_type == PACKET_MULTICAST) {
3059 dhd->pub.rx_multicast++;
3060 ifp->stats.multicast++;
3061 }
3062
3063 skb->data = eth;
3064 skb->len = len;
3065
3066 #ifdef WLMEDIA_HTSF
3067 dhd_htsf_addrxts(dhdp, pktbuf);
3068 #endif
3069 /* Strip header, count, deliver upward */
3070 skb_pull(skb, ETH_HLEN);
3071
3072 /* Process special event packets and then discard them */
3073 memset(&event, 0, sizeof(event));
3074 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
3075 dhd_wl_host_event(dhd, &ifidx,
3076 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3077 skb_mac_header(skb),
3078 #else
3079 skb->mac.raw,
3080 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3081 &event,
3082 &data);
3083
3084 wl_event_to_host_order(&event);
3085 if (!tout_ctrl)
3086 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
3087 #ifdef WLBTAMP
3088 if (event.event_type == WLC_E_BTA_HCI_EVENT) {
3089 dhd_bta_doevt(dhdp, data, event.datalen);
3090 }
3091 #endif /* WLBTAMP */
3092
3093 #if defined(PNO_SUPPORT)
3094 if (event.event_type == WLC_E_PFN_NET_FOUND) {
3095 /* enforce custom wake lock to garantee that Kernel not suspended */
3096 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
3097 }
3098 #endif /* PNO_SUPPORT */
3099
3100 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3101 PKTFREE(dhdp->osh, pktbuf, FALSE);
3102 continue;
3103 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3104 } else {
3105 tout_rx = DHD_PACKET_TIMEOUT_MS;
3106
3107 #ifdef PROP_TXSTATUS
3108 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
3109 #endif /* PROP_TXSTATUS */
3110 }
3111
3112 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
3113 ifp = dhd->iflist[ifidx];
3114
3115 if (ifp->net)
3116 ifp->net->last_rx = jiffies;
3117
3118 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
3119 dhdp->dstats.rx_bytes += skb->len;
3120 dhdp->rx_packets++; /* Local count */
3121 ifp->stats.rx_bytes += skb->len;
3122 ifp->stats.rx_packets++;
3123 }
3124 #if defined(DHD_TCP_WINSIZE_ADJUST)
3125 if (dhd_use_tcp_window_size_adjust) {
3126 if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
3127 dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
3128 }
3129 }
3130 #endif /* DHD_TCP_WINSIZE_ADJUST */
3131
3132 if (in_interrupt()) {
3133 netif_rx(skb);
3134 } else {
3135 if (dhd->rxthread_enabled) {
3136 if (!skbhead)
3137 skbhead = skb;
3138 else
3139 PKTSETNEXT(dhdp->osh, skbprev, skb);
3140 skbprev = skb;
3141 } else {
3142
3143 /* If the receive is not processed inside an ISR,
3144 * the softirqd must be woken explicitly to service
3145 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3146 * by netif_rx_ni(), but in earlier kernels, we need
3147 * to do it manually.
3148 */
3149 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3150 netif_rx_ni(skb);
3151 #else
3152 ulong flags;
3153 netif_rx(skb);
3154 local_irq_save(flags);
3155 RAISE_RX_SOFTIRQ();
3156 local_irq_restore(flags);
3157 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3158 }
3159 }
3160 }
3161
3162 if (dhd->rxthread_enabled && skbhead)
3163 dhd_sched_rxf(dhdp, skbhead);
3164
3165 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
3166 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
3167 }
3168
3169 void
3170 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
3171 {
3172 /* Linux version has nothing to do */
3173 return;
3174 }
3175
3176 void
3177 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
3178 {
3179 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3180 struct ether_header *eh;
3181 uint16 type;
3182 #ifdef WLBTAMP
3183 uint len;
3184 #endif
3185
3186 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
3187
3188 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
3189 type = ntoh16(eh->ether_type);
3190
3191 if (type == ETHER_TYPE_802_1X)
3192 atomic_dec(&dhd->pend_8021x_cnt);
3193
3194 #ifdef WLBTAMP
3195 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
3196 * If yes generate packet completion event.
3197 */
3198 len = PKTLEN(dhdp->osh, txp);
3199
3200 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
3201 if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
3202 struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
3203
3204 if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
3205 ntoh16(lsh->type) == BTA_PROT_L2CAP) {
3206
3207 dhd_bta_tx_hcidata_complete(dhdp, txp, success);
3208 }
3209 }
3210 #endif /* WLBTAMP */
3211 #ifdef PROP_TXSTATUS
3212 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
3213 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
3214 uint datalen = PKTLEN(dhd->pub.osh, txp);
3215
3216 if (success) {
3217 dhd->pub.tx_packets++;
3218 ifp->stats.tx_packets++;
3219 ifp->stats.tx_bytes += datalen;
3220 } else {
3221 ifp->stats.tx_dropped++;
3222 }
3223 }
3224 #endif
3225 }
3226
3227 static struct net_device_stats *
3228 dhd_get_stats(struct net_device *net)
3229 {
3230 dhd_info_t *dhd = DHD_DEV_INFO(net);
3231 dhd_if_t *ifp;
3232 int ifidx;
3233
3234 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3235
3236 ifidx = dhd_net2idx(dhd, net);
3237 if (ifidx == DHD_BAD_IF) {
3238 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
3239
3240 memset(&net->stats, 0, sizeof(net->stats));
3241 return &net->stats;
3242 }
3243
3244 ifp = dhd->iflist[ifidx];
3245 ASSERT(dhd && ifp);
3246
3247 if (dhd->pub.up) {
3248 /* Use the protocol to get dongle stats */
3249 dhd_prot_dstats(&dhd->pub);
3250 }
3251 return &ifp->stats;
3252 }
3253
3254 static int
3255 dhd_watchdog_thread(void *data)
3256 {
3257 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3258 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3259 /* This thread doesn't need any user-level access,
3260 * so get rid of all our resources
3261 */
3262 if (dhd_watchdog_prio > 0) {
3263 struct sched_param param;
3264 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
3265 dhd_watchdog_prio:(MAX_RT_PRIO-1);
3266 setScheduler(current, SCHED_FIFO, &param);
3267 }
3268
3269 while (1)
3270 if (down_interruptible (&tsk->sema) == 0) {
3271 unsigned long flags;
3272 unsigned long jiffies_at_start = jiffies;
3273 unsigned long time_lapse;
3274
3275 SMP_RD_BARRIER_DEPENDS();
3276 if (tsk->terminated) {
3277 break;
3278 }
3279
3280 if (dhd->pub.dongle_reset == FALSE) {
3281 DHD_TIMER(("%s:\n", __FUNCTION__));
3282
3283 /* Call the bus module watchdog */
3284 dhd_bus_watchdog(&dhd->pub);
3285
3286
3287 DHD_GENERAL_LOCK(&dhd->pub, flags);
3288 /* Count the tick for reference */
3289 dhd->pub.tickcnt++;
3290 time_lapse = jiffies - jiffies_at_start;
3291
3292 /* Reschedule the watchdog */
3293 if (dhd->wd_timer_valid)
3294 mod_timer(&dhd->timer,
3295 jiffies +
3296 msecs_to_jiffies(dhd_watchdog_ms) -
3297 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
3298 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3299 }
3300 } else {
3301 break;
3302 }
3303
3304 complete_and_exit(&tsk->completed, 0);
3305 }
3306
3307 static void dhd_watchdog(ulong data)
3308 {
3309 dhd_info_t *dhd = (dhd_info_t *)data;
3310 unsigned long flags;
3311
3312 if (dhd->pub.dongle_reset) {
3313 return;
3314 }
3315
3316 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
3317 up(&dhd->thr_wdt_ctl.sema);
3318 return;
3319 }
3320
3321 /* Call the bus module watchdog */
3322 dhd_bus_watchdog(&dhd->pub);
3323
3324 DHD_GENERAL_LOCK(&dhd->pub, flags);
3325 /* Count the tick for reference */
3326 dhd->pub.tickcnt++;
3327
3328 /* Reschedule the watchdog */
3329 if (dhd->wd_timer_valid)
3330 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
3331 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3332
3333 }
3334
3335 #ifdef ENABLE_ADAPTIVE_SCHED
3336 static void
3337 dhd_sched_policy(int prio)
3338 {
3339 struct sched_param param;
3340 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
3341 param.sched_priority = 0;
3342 setScheduler(current, SCHED_NORMAL, &param);
3343 } else {
3344 if (get_scheduler_policy(current) != SCHED_FIFO) {
3345 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
3346 setScheduler(current, SCHED_FIFO, &param);
3347 }
3348 }
3349 }
3350 #endif /* ENABLE_ADAPTIVE_SCHED */
3351 #ifdef DEBUG_CPU_FREQ
3352 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
3353 {
3354 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
3355 struct cpufreq_freqs *freq = data;
3356 if (dhd) {
3357 if (!dhd->new_freq)
3358 goto exit;
3359 if (val == CPUFREQ_POSTCHANGE) {
3360 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3361 freq->new, freq->cpu));
3362 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
3363 }
3364 }
3365 exit:
3366 return 0;
3367 }
3368 #endif /* DEBUG_CPU_FREQ */
3369 static int
3370 dhd_dpc_thread(void *data)
3371 {
3372 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3373 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3374
3375 /* This thread doesn't need any user-level access,
3376 * so get rid of all our resources
3377 */
3378 if (dhd_dpc_prio > 0)
3379 {
3380 struct sched_param param;
3381 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
3382 setScheduler(current, SCHED_FIFO, &param);
3383 }
3384
3385 #ifdef CUSTOM_DPC_CPUCORE
3386 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
3387 #else
3388 if (dhd->pub.conf->dpc_cpucore >= 0) {
3389 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
3390 set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
3391 }
3392 #endif
3393 #ifdef CUSTOM_SET_CPUCORE
3394 dhd->pub.current_dpc = current;
3395 #endif /* CUSTOM_SET_CPUCORE */
3396 /* Run until signal received */
3397 while (1) {
3398 if (!binary_sema_down(tsk)) {
3399 #ifdef ENABLE_ADAPTIVE_SCHED
3400 dhd_sched_policy(dhd_dpc_prio);
3401 #endif /* ENABLE_ADAPTIVE_SCHED */
3402 SMP_RD_BARRIER_DEPENDS();
3403 if (tsk->terminated) {
3404 break;
3405 }
3406
3407 /* Call bus dpc unless it indicated down (then clean stop) */
3408 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3409 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
3410 while (dhd_bus_dpc(dhd->pub.bus)) {
3411 /* process all data */
3412 }
3413 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
3414 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3415
3416 } else {
3417 if (dhd->pub.up)
3418 dhd_bus_stop(dhd->pub.bus, TRUE);
3419 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3420 }
3421 }
3422 else
3423 break;
3424 }
3425 complete_and_exit(&tsk->completed, 0);
3426 }
3427
3428 static int
3429 dhd_rxf_thread(void *data)
3430 {
3431 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
3432 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
3433 #if defined(WAIT_DEQUEUE)
3434 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
3435 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
3436 #endif
3437 dhd_pub_t *pub = &dhd->pub;
3438
3439 /* This thread doesn't need any user-level access,
3440 * so get rid of all our resources
3441 */
3442 if (dhd_rxf_prio > 0)
3443 {
3444 struct sched_param param;
3445 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
3446 setScheduler(current, SCHED_FIFO, &param);
3447 }
3448
3449 DAEMONIZE("dhd_rxf");
3450 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
3451
3452 /* signal: thread has started */
3453 complete(&tsk->completed);
3454 #ifdef CUSTOM_SET_CPUCORE
3455 dhd->pub.current_rxf = current;
3456 #endif /* CUSTOM_SET_CPUCORE */
3457 /* Run until signal received */
3458 while (1) {
3459 if (down_interruptible(&tsk->sema) == 0) {
3460 void *skb;
3461 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
3462 ulong flags;
3463 #endif
3464 #ifdef ENABLE_ADAPTIVE_SCHED
3465 dhd_sched_policy(dhd_rxf_prio);
3466 #endif /* ENABLE_ADAPTIVE_SCHED */
3467
3468 SMP_RD_BARRIER_DEPENDS();
3469
3470 if (tsk->terminated) {
3471 break;
3472 }
3473 skb = dhd_rxf_dequeue(pub);
3474
3475 if (skb == NULL) {
3476 continue;
3477 }
3478 while (skb) {
3479 void *skbnext = PKTNEXT(pub->osh, skb);
3480 PKTSETNEXT(pub->osh, skb, NULL);
3481
3482 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3483 netif_rx_ni(skb);
3484 #else
3485 netif_rx(skb);
3486 local_irq_save(flags);
3487 RAISE_RX_SOFTIRQ();
3488 local_irq_restore(flags);
3489
3490 #endif
3491 skb = skbnext;
3492 }
3493 #if defined(WAIT_DEQUEUE)
3494 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
3495 OSL_SLEEP(1);
3496 watchdogTime = OSL_SYSUPTIME();
3497 }
3498 #endif
3499
3500 DHD_OS_WAKE_UNLOCK(pub);
3501 }
3502 else
3503 break;
3504 }
3505 complete_and_exit(&tsk->completed, 0);
3506 }
3507
3508 #ifdef BCMPCIE
3509 void dhd_dpc_kill(dhd_pub_t *dhdp)
3510 {
3511 dhd_info_t *dhd;
3512
3513 if (!dhdp)
3514 return;
3515
3516 dhd = dhdp->info;
3517
3518 if (!dhd)
3519 return;
3520
3521 tasklet_kill(&dhd->tasklet);
3522 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
3523 }
3524 #endif /* BCMPCIE */
3525
3526 static void
3527 dhd_dpc(ulong data)
3528 {
3529 dhd_info_t *dhd;
3530
3531 dhd = (dhd_info_t *)data;
3532
3533 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
3534 * down below , wake lock is set,
3535 * the tasklet is initialized in dhd_attach()
3536 */
3537 /* Call bus dpc unless it indicated down (then clean stop) */
3538 if (dhd->pub.busstate != DHD_BUS_DOWN) {
3539 if (dhd_bus_dpc(dhd->pub.bus))
3540 tasklet_schedule(&dhd->tasklet);
3541 else
3542 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3543 } else {
3544 dhd_bus_stop(dhd->pub.bus, TRUE);
3545 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3546 }
3547 }
3548
3549 void
3550 dhd_sched_dpc(dhd_pub_t *dhdp)
3551 {
3552 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3553
3554 DHD_OS_WAKE_LOCK(dhdp);
3555 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
3556 /* If the semaphore does not get up,
3557 * wake unlock should be done here
3558 */
3559 if (!binary_sema_up(&dhd->thr_dpc_ctl))
3560 DHD_OS_WAKE_UNLOCK(dhdp);
3561 return;
3562 } else {
3563 tasklet_schedule(&dhd->tasklet);
3564 }
3565 }
3566
3567 static void
3568 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
3569 {
3570 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3571 #ifdef RXF_DEQUEUE_ON_BUSY
3572 int ret = BCME_OK;
3573 int retry = 2;
3574 #endif /* RXF_DEQUEUE_ON_BUSY */
3575
3576 DHD_OS_WAKE_LOCK(dhdp);
3577
3578 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
3579 #ifdef RXF_DEQUEUE_ON_BUSY
3580 do {
3581 ret = dhd_rxf_enqueue(dhdp, skb);
3582 if (ret == BCME_OK || ret == BCME_ERROR)
3583 break;
3584 else
3585 OSL_SLEEP(50); /* waiting for dequeueing */
3586 } while (retry-- > 0);
3587
3588 if (retry <= 0 && ret == BCME_BUSY) {
3589 void *skbp = skb;
3590
3591 while (skbp) {
3592 void *skbnext = PKTNEXT(dhdp->osh, skbp);
3593 PKTSETNEXT(dhdp->osh, skbp, NULL);
3594 netif_rx_ni(skbp);
3595 skbp = skbnext;
3596 }
3597 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
3598 }
3599 else {
3600 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3601 up(&dhd->thr_rxf_ctl.sema);
3602 }
3603 }
3604 #else /* RXF_DEQUEUE_ON_BUSY */
3605 do {
3606 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
3607 break;
3608 } while (1);
3609 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
3610 up(&dhd->thr_rxf_ctl.sema);
3611 }
3612 return;
3613 #endif /* RXF_DEQUEUE_ON_BUSY */
3614 }
3615
3616 #ifdef TOE
3617 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
3618 static int
3619 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
3620 {
3621 wl_ioctl_t ioc;
3622 char buf[32];
3623 int ret;
3624
3625 memset(&ioc, 0, sizeof(ioc));
3626
3627 ioc.cmd = WLC_GET_VAR;
3628 ioc.buf = buf;
3629 ioc.len = (uint)sizeof(buf);
3630 ioc.set = FALSE;
3631
3632 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3633 buf[sizeof(buf) - 1] = '\0';
3634 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3635 /* Check for older dongle image that doesn't support toe_ol */
3636 if (ret == -EIO) {
3637 DHD_ERROR(("%s: toe not supported by device\n",
3638 dhd_ifname(&dhd->pub, ifidx)));
3639 return -EOPNOTSUPP;
3640 }
3641
3642 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3643 return ret;
3644 }
3645
3646 memcpy(toe_ol, buf, sizeof(uint32));
3647 return 0;
3648 }
3649
3650 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
3651 static int
3652 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
3653 {
3654 wl_ioctl_t ioc;
3655 char buf[32];
3656 int toe, ret;
3657
3658 memset(&ioc, 0, sizeof(ioc));
3659
3660 ioc.cmd = WLC_SET_VAR;
3661 ioc.buf = buf;
3662 ioc.len = (uint)sizeof(buf);
3663 ioc.set = TRUE;
3664
3665 /* Set toe_ol as requested */
3666
3667 strncpy(buf, "toe_ol", sizeof(buf) - 1);
3668 buf[sizeof(buf) - 1] = '\0';
3669 memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
3670
3671 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3672 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
3673 dhd_ifname(&dhd->pub, ifidx), ret));
3674 return ret;
3675 }
3676
3677 /* Enable toe globally only if any components are enabled. */
3678
3679 toe = (toe_ol != 0);
3680
3681 strcpy(buf, "toe");
3682 memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
3683
3684 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
3685 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
3686 return ret;
3687 }
3688
3689 return 0;
3690 }
3691 #endif /* TOE */
3692
3693 #if defined(WL_CFG80211)
3694 void dhd_set_scb_probe(dhd_pub_t *dhd)
3695 {
3696 #define NUM_SCB_MAX_PROBE 3
3697 int ret = 0;
3698 wl_scb_probe_t scb_probe;
3699 char iovbuf[WL_EVENTING_MASK_LEN + 12];
3700
3701 memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
3702
3703 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
3704 return;
3705
3706 bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
3707
3708 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3709 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
3710
3711 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
3712
3713 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
3714
3715 bcm_mkiovar("scb_probe", (char *)&scb_probe,
3716 sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
3717 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
3718 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
3719 #undef NUM_SCB_MAX_PROBE
3720 return;
3721 }
3722 #endif /* WL_CFG80211 */
3723
3724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3725 static void
3726 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
3727 {
3728 dhd_info_t *dhd = DHD_DEV_INFO(net);
3729
3730 snprintf(info->driver, sizeof(info->driver), "wl");
3731 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
3732 }
3733
3734 struct ethtool_ops dhd_ethtool_ops = {
3735 .get_drvinfo = dhd_ethtool_get_drvinfo
3736 };
3737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
3738
3739
3740 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3741 static int
3742 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
3743 {
3744 struct ethtool_drvinfo info;
3745 char drvname[sizeof(info.driver)];
3746 uint32 cmd;
3747 #ifdef TOE
3748 struct ethtool_value edata;
3749 uint32 toe_cmpnt, csum_dir;
3750 int ret;
3751 #endif
3752
3753 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3754
3755 /* all ethtool calls start with a cmd word */
3756 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
3757 return -EFAULT;
3758
3759 switch (cmd) {
3760 case ETHTOOL_GDRVINFO:
3761 /* Copy out any request driver name */
3762 if (copy_from_user(&info, uaddr, sizeof(info)))
3763 return -EFAULT;
3764 strncpy(drvname, info.driver, sizeof(info.driver));
3765 drvname[sizeof(info.driver)-1] = '\0';
3766
3767 /* clear struct for return */
3768 memset(&info, 0, sizeof(info));
3769 info.cmd = cmd;
3770
3771 /* if dhd requested, identify ourselves */
3772 if (strcmp(drvname, "?dhd") == 0) {
3773 snprintf(info.driver, sizeof(info.driver), "dhd");
3774 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
3775 info.version[sizeof(info.version) - 1] = '\0';
3776 }
3777
3778 /* otherwise, require dongle to be up */
3779 else if (!dhd->pub.up) {
3780 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
3781 return -ENODEV;
3782 }
3783
3784 /* finally, report dongle driver type */
3785 else if (dhd->pub.iswl)
3786 snprintf(info.driver, sizeof(info.driver), "wl");
3787 else
3788 snprintf(info.driver, sizeof(info.driver), "xx");
3789
3790 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
3791 if (copy_to_user(uaddr, &info, sizeof(info)))
3792 return -EFAULT;
3793 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
3794 (int)sizeof(drvname), drvname, info.driver));
3795 break;
3796
3797 #ifdef TOE
3798 /* Get toe offload components from dongle */
3799 case ETHTOOL_GRXCSUM:
3800 case ETHTOOL_GTXCSUM:
3801 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3802 return ret;
3803
3804 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3805
3806 edata.cmd = cmd;
3807 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
3808
3809 if (copy_to_user(uaddr, &edata, sizeof(edata)))
3810 return -EFAULT;
3811 break;
3812
3813 /* Set toe offload components in dongle */
3814 case ETHTOOL_SRXCSUM:
3815 case ETHTOOL_STXCSUM:
3816 if (copy_from_user(&edata, uaddr, sizeof(edata)))
3817 return -EFAULT;
3818
3819 /* Read the current settings, update and write back */
3820 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
3821 return ret;
3822
3823 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
3824
3825 if (edata.data != 0)
3826 toe_cmpnt |= csum_dir;
3827 else
3828 toe_cmpnt &= ~csum_dir;
3829
3830 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
3831 return ret;
3832
3833 /* If setting TX checksum mode, tell Linux the new mode */
3834 if (cmd == ETHTOOL_STXCSUM) {
3835 if (edata.data)
3836 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
3837 else
3838 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
3839 }
3840
3841 break;
3842 #endif /* TOE */
3843
3844 default:
3845 return -EOPNOTSUPP;
3846 }
3847
3848 return 0;
3849 }
3850 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3851
3852 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
3853 {
3854 dhd_info_t *dhd;
3855
3856 if (!dhdp) {
3857 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3858 return FALSE;
3859 }
3860
3861 if (!dhdp->up)
3862 return FALSE;
3863
3864 dhd = (dhd_info_t *)dhdp->info;
3865 #if !defined(BCMPCIE)
3866 if (dhd->thr_dpc_ctl.thr_pid < 0) {
3867 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
3868 return FALSE;
3869 }
3870 #endif
3871
3872 #ifdef CONFIG_MACH_UNIVERSAL5433
3873 /* old revision does not send hang message */
3874 if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
3875 #else
3876 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
3877 #endif /* CONFIG_MACH_UNIVERSAL5433 */
3878 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
3879 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
3880 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
3881 net_os_send_hang_message(net);
3882 return TRUE;
3883 }
3884 return FALSE;
3885 }
3886
3887 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
3888 {
3889 int bcmerror = BCME_OK;
3890 int buflen = 0;
3891 struct net_device *net;
3892
3893 net = dhd_idx2net(pub, ifidx);
3894 if (!net) {
3895 bcmerror = BCME_BADARG;
3896 goto done;
3897 }
3898
3899 if (data_buf)
3900 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
3901
3902 /* check for local dhd ioctl and handle it */
3903 if (ioc->driver == DHD_IOCTL_MAGIC) {
3904 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
3905 if (bcmerror)
3906 pub->bcmerror = bcmerror;
3907 goto done;
3908 }
3909
3910 /* send to dongle (must be up, and wl). */
3911 if (pub->busstate != DHD_BUS_DATA) {
3912 bcmerror = BCME_DONGLE_DOWN;
3913 goto done;
3914 }
3915
3916 if (!pub->iswl) {
3917 bcmerror = BCME_DONGLE_DOWN;
3918 goto done;
3919 }
3920
3921 /*
3922 * Flush the TX queue if required for proper message serialization:
3923 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
3924 * prevent M4 encryption and
3925 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
3926 * prevent disassoc frame being sent before WPS-DONE frame.
3927 */
3928 if (ioc->cmd == WLC_SET_KEY ||
3929 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3930 strncmp("wsec_key", data_buf, 9) == 0) ||
3931 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
3932 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
3933 ioc->cmd == WLC_DISASSOC)
3934 dhd_wait_pend8021x(net);
3935
3936 #ifdef WLMEDIA_HTSF
3937 if (data_buf) {
3938 /* short cut wl ioctl calls here */
3939 if (strcmp("htsf", data_buf) == 0) {
3940 dhd_ioctl_htsf_get(dhd, 0);
3941 return BCME_OK;
3942 }
3943
3944 if (strcmp("htsflate", data_buf) == 0) {
3945 if (ioc->set) {
3946 memset(ts, 0, sizeof(tstamp_t)*TSMAX);
3947 memset(&maxdelayts, 0, sizeof(tstamp_t));
3948 maxdelay = 0;
3949 tspktcnt = 0;
3950 maxdelaypktno = 0;
3951 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3952 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3953 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3954 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3955 } else {
3956 dhd_dump_latency();
3957 }
3958 return BCME_OK;
3959 }
3960 if (strcmp("htsfclear", data_buf) == 0) {
3961 memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
3962 memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
3963 memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
3964 memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
3965 htsf_seqnum = 0;
3966 return BCME_OK;
3967 }
3968 if (strcmp("htsfhis", data_buf) == 0) {
3969 dhd_dump_htsfhisto(&vi_d1, "H to D");
3970 dhd_dump_htsfhisto(&vi_d2, "D to D");
3971 dhd_dump_htsfhisto(&vi_d3, "D to H");
3972 dhd_dump_htsfhisto(&vi_d4, "H to H");
3973 return BCME_OK;
3974 }
3975 if (strcmp("tsport", data_buf) == 0) {
3976 if (ioc->set) {
3977 memcpy(&tsport, data_buf + 7, 4);
3978 } else {
3979 DHD_ERROR(("current timestamp port: %d \n", tsport));
3980 }
3981 return BCME_OK;
3982 }
3983 }
3984 #endif /* WLMEDIA_HTSF */
3985
3986 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
3987 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
3988 #ifdef BCM_FD_AGGR
3989 bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3990 #else
3991 bcmerror = BCME_UNSUPPORTED;
3992 #endif
3993 goto done;
3994 }
3995 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
3996
3997 done:
3998 dhd_check_hang(net, pub, bcmerror);
3999
4000 return bcmerror;
4001 }
4002
4003 static int
4004 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
4005 {
4006 dhd_info_t *dhd = DHD_DEV_INFO(net);
4007 dhd_ioctl_t ioc;
4008 int bcmerror = 0;
4009 int ifidx;
4010 int ret;
4011 void *local_buf = NULL;
4012 u16 buflen = 0;
4013
4014 DHD_OS_WAKE_LOCK(&dhd->pub);
4015 DHD_PERIM_LOCK(&dhd->pub);
4016
4017 /* Interface up check for built-in type */
4018 if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
4019 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
4020 DHD_PERIM_UNLOCK(&dhd->pub);
4021 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4022 return BCME_NOTUP;
4023 }
4024
4025 /* send to dongle only if we are not waiting for reload already */
4026 if (dhd->pub.hang_was_sent) {
4027 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
4028 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
4029 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4030 return OSL_ERROR(BCME_DONGLE_DOWN);
4031 }
4032
4033 ifidx = dhd_net2idx(dhd, net);
4034 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
4035
4036 if (ifidx == DHD_BAD_IF) {
4037 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
4038 DHD_PERIM_UNLOCK(&dhd->pub);
4039 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4040 return -1;
4041 }
4042
4043 #if defined(WL_WIRELESS_EXT)
4044 /* linux wireless extensions */
4045 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
4046 /* may recurse, do NOT lock */
4047 ret = wl_iw_ioctl(net, ifr, cmd);
4048 DHD_PERIM_UNLOCK(&dhd->pub);
4049 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4050 return ret;
4051 }
4052 #endif /* defined(WL_WIRELESS_EXT) */
4053
4054 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4055 if (cmd == SIOCETHTOOL) {
4056 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
4057 DHD_PERIM_UNLOCK(&dhd->pub);
4058 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4059 return ret;
4060 }
4061 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4062
4063 if (cmd == SIOCDEVPRIVATE+1) {
4064 ret = wl_android_priv_cmd(net, ifr, cmd);
4065 dhd_check_hang(net, &dhd->pub, ret);
4066 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4067 return ret;
4068 }
4069
4070 if (cmd != SIOCDEVPRIVATE) {
4071 DHD_PERIM_UNLOCK(&dhd->pub);
4072 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4073 return -EOPNOTSUPP;
4074 }
4075
4076 memset(&ioc, 0, sizeof(ioc));
4077
4078 #ifdef CONFIG_COMPAT
4079 if (is_compat_task()) {
4080 compat_wl_ioctl_t compat_ioc;
4081 if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
4082 bcmerror = BCME_BADADDR;
4083 goto done;
4084 }
4085 ioc.cmd = compat_ioc.cmd;
4086 ioc.buf = compat_ptr(compat_ioc.buf);
4087 ioc.len = compat_ioc.len;
4088 ioc.set = compat_ioc.set;
4089 ioc.used = compat_ioc.used;
4090 ioc.needed = compat_ioc.needed;
4091 /* To differentiate between wl and dhd read 4 more byes */
4092 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
4093 sizeof(uint)) != 0)) {
4094 bcmerror = BCME_BADADDR;
4095 goto done;
4096 }
4097 } else
4098 #endif /* CONFIG_COMPAT */
4099 {
4100 /* Copy the ioc control structure part of ioctl request */
4101 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
4102 bcmerror = BCME_BADADDR;
4103 goto done;
4104 }
4105
4106 /* To differentiate between wl and dhd read 4 more byes */
4107 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
4108 sizeof(uint)) != 0)) {
4109 bcmerror = BCME_BADADDR;
4110 goto done;
4111 }
4112 }
4113
4114 if (!capable(CAP_NET_ADMIN)) {
4115 bcmerror = BCME_EPERM;
4116 goto done;
4117 }
4118
4119 if (ioc.len > 0) {
4120 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
4121 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
4122 bcmerror = BCME_NOMEM;
4123 goto done;
4124 }
4125
4126 DHD_PERIM_UNLOCK(&dhd->pub);
4127 if (copy_from_user(local_buf, ioc.buf, buflen)) {
4128 DHD_PERIM_LOCK(&dhd->pub);
4129 bcmerror = BCME_BADADDR;
4130 goto done;
4131 }
4132 DHD_PERIM_LOCK(&dhd->pub);
4133
4134 *(char *)(local_buf + buflen) = '\0';
4135 }
4136
4137 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
4138
4139 if (!bcmerror && buflen && local_buf && ioc.buf) {
4140 DHD_PERIM_UNLOCK(&dhd->pub);
4141 if (copy_to_user(ioc.buf, local_buf, buflen))
4142 bcmerror = -EFAULT;
4143 DHD_PERIM_LOCK(&dhd->pub);
4144 }
4145
4146 done:
4147 if (local_buf)
4148 MFREE(dhd->pub.osh, local_buf, buflen+1);
4149
4150 DHD_PERIM_UNLOCK(&dhd->pub);
4151 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4152
4153 return OSL_ERROR(bcmerror);
4154 }
4155
4156 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
4157 int dhd_deepsleep(dhd_info_t *dhd, int flag)
4158 {
4159 char iovbuf[20];
4160 uint powervar = 0;
4161 dhd_pub_t *dhdp;
4162 int cnt = 0;
4163 int ret = 0;
4164
4165 dhdp = &dhd->pub;
4166
4167 switch (flag) {
4168 case 1 : /* Deepsleep on */
4169 DHD_ERROR(("dhd_deepsleep: ON\n"));
4170 /* give some time to sysioc_work before deepsleep */
4171 OSL_SLEEP(200);
4172 #ifdef PKT_FILTER_SUPPORT
4173 /* disable pkt filter */
4174 dhd_enable_packet_filter(0, dhdp);
4175 #endif /* PKT_FILTER_SUPPORT */
4176 /* Disable MPC */
4177 powervar = 0;
4178 memset(iovbuf, 0, sizeof(iovbuf));
4179 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4180 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4181
4182 /* Enable Deepsleep */
4183 powervar = 1;
4184 memset(iovbuf, 0, sizeof(iovbuf));
4185 bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4186 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4187 break;
4188
4189 case 0: /* Deepsleep Off */
4190 DHD_ERROR(("dhd_deepsleep: OFF\n"));
4191
4192 /* Disable Deepsleep */
4193 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
4194 powervar = 0;
4195 memset(iovbuf, 0, sizeof(iovbuf));
4196 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4197 iovbuf, sizeof(iovbuf));
4198 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
4199 sizeof(iovbuf), TRUE, 0);
4200
4201 memset(iovbuf, 0, sizeof(iovbuf));
4202 bcm_mkiovar("deepsleep", (char *)&powervar, 4,
4203 iovbuf, sizeof(iovbuf));
4204 if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
4205 sizeof(iovbuf), FALSE, 0)) < 0) {
4206 DHD_ERROR(("the error of dhd deepsleep status"
4207 " ret value :%d\n", ret));
4208 } else {
4209 if (!(*(int *)iovbuf)) {
4210 DHD_ERROR(("deepsleep mode is 0,"
4211 " count: %d\n", cnt));
4212 break;
4213 }
4214 }
4215 }
4216
4217 /* Enable MPC */
4218 powervar = 1;
4219 memset(iovbuf, 0, sizeof(iovbuf));
4220 bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
4221 dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
4222 break;
4223 }
4224
4225 return 0;
4226 }
4227
4228 static int
4229 dhd_stop(struct net_device *net)
4230 {
4231 int ifidx = 0;
4232 dhd_info_t *dhd = DHD_DEV_INFO(net);
4233 DHD_OS_WAKE_LOCK(&dhd->pub);
4234 DHD_PERIM_LOCK(&dhd->pub);
4235 printf("%s: Enter %p\n", __FUNCTION__, net);
4236 if (dhd->pub.up == 0) {
4237 goto exit;
4238 }
4239
4240 dhd_if_flush_sta(DHD_DEV_IFP(net));
4241
4242
4243 ifidx = dhd_net2idx(dhd, net);
4244 BCM_REFERENCE(ifidx);
4245
4246 /* Set state and stop OS transmissions */
4247 netif_stop_queue(net);
4248 dhd->pub.up = 0;
4249
4250 #ifdef WL_CFG80211
4251 if (ifidx == 0) {
4252 wl_cfg80211_down(NULL);
4253
4254 /*
4255 * For CFG80211: Clean up all the left over virtual interfaces
4256 * when the primary Interface is brought down. [ifconfig wlan0 down]
4257 */
4258 if (!dhd_download_fw_on_driverload) {
4259 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
4260 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
4261 int i;
4262
4263 dhd_net_if_lock_local(dhd);
4264 for (i = 1; i < DHD_MAX_IFS; i++)
4265 dhd_remove_if(&dhd->pub, i, FALSE);
4266 dhd_net_if_unlock_local(dhd);
4267 }
4268 }
4269 }
4270 #endif /* WL_CFG80211 */
4271
4272 #ifdef PROP_TXSTATUS
4273 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
4274 #endif
4275 /* Stop the protocol module */
4276 dhd_prot_stop(&dhd->pub);
4277
4278 OLD_MOD_DEC_USE_COUNT;
4279 exit:
4280 if (ifidx == 0 && !dhd_download_fw_on_driverload)
4281 wl_android_wifi_off(net);
4282 else {
4283 if (dhd->pub.conf->deepsleep)
4284 dhd_deepsleep(dhd, 1);
4285 }
4286 dhd->pub.rxcnt_timeout = 0;
4287 dhd->pub.txcnt_timeout = 0;
4288
4289 dhd->pub.hang_was_sent = 0;
4290
4291 /* Clear country spec for for built-in type driver */
4292 if (!dhd_download_fw_on_driverload) {
4293 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
4294 dhd->pub.dhd_cspec.rev = 0;
4295 dhd->pub.dhd_cspec.ccode[0] = 0x00;
4296 }
4297
4298 printf("%s: Exit\n", __FUNCTION__);
4299 DHD_PERIM_UNLOCK(&dhd->pub);
4300 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4301 return 0;
4302 }
4303
4304 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
4305 extern bool g_first_broadcast_scan;
4306 #endif
4307
4308 #ifdef WL11U
4309 static int dhd_interworking_enable(dhd_pub_t *dhd)
4310 {
4311 char iovbuf[WLC_IOCTL_SMLEN];
4312 uint32 enable = true;
4313 int ret = BCME_OK;
4314
4315 bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
4316 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4317 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
4318 }
4319
4320 if (ret == BCME_OK) {
4321 /* basic capabilities for HS20 REL2 */
4322 uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
4323 bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
4324 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
4325 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
4326 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
4327 }
4328 }
4329
4330 return ret;
4331 }
4332 #endif /* WL11u */
4333
4334 static int
4335 dhd_open(struct net_device *net)
4336 {
4337 dhd_info_t *dhd = DHD_DEV_INFO(net);
4338 #ifdef TOE
4339 uint32 toe_ol;
4340 #endif
4341 int ifidx;
4342 int32 ret = 0;
4343
4344 printf("%s: Enter %p\n", __FUNCTION__, net);
4345 #if defined(MULTIPLE_SUPPLICANT)
4346 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4347 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4348 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
4349 }
4350 mutex_lock(&_dhd_sdio_mutex_lock_);
4351 #endif
4352 #endif /* MULTIPLE_SUPPLICANT */
4353
4354 DHD_OS_WAKE_LOCK(&dhd->pub);
4355 DHD_PERIM_LOCK(&dhd->pub);
4356 dhd->pub.dongle_trap_occured = 0;
4357 dhd->pub.hang_was_sent = 0;
4358
4359 #if 0
4360 /*
4361 * Force start if ifconfig_up gets called before START command
4362 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4363 * This should be removed in the future
4364 */
4365 ret = wl_control_wl_start(net);
4366 if (ret != 0) {
4367 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4368 ret = -1;
4369 goto exit;
4370 }
4371 #endif
4372
4373 ifidx = dhd_net2idx(dhd, net);
4374 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
4375
4376 if (ifidx < 0) {
4377 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
4378 ret = -1;
4379 goto exit;
4380 }
4381
4382 if (!dhd->iflist[ifidx]) {
4383 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
4384 ret = -1;
4385 goto exit;
4386 }
4387
4388 if (ifidx == 0) {
4389 atomic_set(&dhd->pend_8021x_cnt, 0);
4390 if (!dhd_download_fw_on_driverload) {
4391 DHD_ERROR(("\n%s\n", dhd_version));
4392 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
4393 g_first_broadcast_scan = TRUE;
4394 #endif
4395 ret = wl_android_wifi_on(net);
4396 if (ret != 0) {
4397 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
4398 __FUNCTION__, ret));
4399 ret = -1;
4400 goto exit;
4401 }
4402 }
4403
4404 if (dhd->pub.busstate != DHD_BUS_DATA) {
4405
4406 /* try to bring up bus */
4407 DHD_PERIM_UNLOCK(&dhd->pub);
4408 ret = dhd_bus_start(&dhd->pub);
4409 DHD_PERIM_LOCK(&dhd->pub);
4410 if (ret) {
4411 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
4412 ret = -1;
4413 goto exit;
4414 }
4415
4416 }
4417 if (dhd_download_fw_on_driverload) {
4418 if (dhd->pub.conf->deepsleep)
4419 dhd_deepsleep(dhd, 0);
4420 }
4421
4422 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
4423 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
4424
4425 #ifdef TOE
4426 /* Get current TOE mode from dongle */
4427 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
4428 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
4429 else
4430 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
4431 #endif /* TOE */
4432
4433 #if defined(WL_CFG80211)
4434 if (unlikely(wl_cfg80211_up(NULL))) {
4435 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
4436 ret = -1;
4437 goto exit;
4438 }
4439 dhd_set_scb_probe(&dhd->pub);
4440 #endif /* WL_CFG80211 */
4441 }
4442
4443 /* Allow transmit calls */
4444 netif_start_queue(net);
4445 dhd->pub.up = 1;
4446
4447 #ifdef BCMDBGFS
4448 dhd_dbg_init(&dhd->pub);
4449 #endif
4450
4451 OLD_MOD_INC_USE_COUNT;
4452 exit:
4453 if (ret)
4454 dhd_stop(net);
4455
4456 DHD_PERIM_UNLOCK(&dhd->pub);
4457 DHD_OS_WAKE_UNLOCK(&dhd->pub);
4458
4459 #if defined(MULTIPLE_SUPPLICANT)
4460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4461 mutex_unlock(&_dhd_sdio_mutex_lock_);
4462 #endif
4463 #endif /* MULTIPLE_SUPPLICANT */
4464
4465 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
4466 return ret;
4467 }
4468
4469 int dhd_do_driver_init(struct net_device *net)
4470 {
4471 dhd_info_t *dhd = NULL;
4472
4473 if (!net) {
4474 DHD_ERROR(("Primary Interface not initialized \n"));
4475 return -EINVAL;
4476 }
4477
4478 #ifdef MULTIPLE_SUPPLICANT
4479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
4480 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
4481 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
4482 return 0;
4483 }
4484 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
4485 #endif /* MULTIPLE_SUPPLICANT */
4486
4487 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
4488 dhd = DHD_DEV_INFO(net);
4489
4490 /* If driver is already initialized, do nothing
4491 */
4492 if (dhd->pub.busstate == DHD_BUS_DATA) {
4493 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
4494 return 0;
4495 }
4496
4497 if (dhd_open(net) < 0) {
4498 DHD_ERROR(("Driver Init Failed \n"));
4499 return -1;
4500 }
4501
4502 return 0;
4503 }
4504
4505 int
4506 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4507 {
4508
4509 #ifdef WL_CFG80211
4510 if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4511 return BCME_OK;
4512 #endif
4513
4514 /* handle IF event caused by wl commands, SoftAP, WEXT and
4515 * anything else. This has to be done asynchronously otherwise
4516 * DPC will be blocked (and iovars will timeout as DPC has no chance
4517 * to read the response back)
4518 */
4519 if (ifevent->ifidx > 0) {
4520 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4521
4522 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4523 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4524 strncpy(if_event->name, name, IFNAMSIZ);
4525 if_event->name[IFNAMSIZ - 1] = '\0';
4526 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
4527 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
4528 }
4529
4530 return BCME_OK;
4531 }
4532
4533 int
4534 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
4535 {
4536 dhd_if_event_t *if_event;
4537
4538 #if defined(WL_CFG80211) && !defined(P2PONEINT)
4539 if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
4540 return BCME_OK;
4541 #endif /* WL_CFG80211 */
4542
4543 /* handle IF event caused by wl commands, SoftAP, WEXT and
4544 * anything else
4545 */
4546 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
4547 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
4548 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
4549 strncpy(if_event->name, name, IFNAMSIZ);
4550 if_event->name[IFNAMSIZ - 1] = '\0';
4551 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
4552 dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
4553
4554 return BCME_OK;
4555 }
4556
4557 /* unregister and free the existing net_device interface (if any) in iflist and
4558 * allocate a new one. the slot is reused. this function does NOT register the
4559 * new interface to linux kernel. dhd_register_if does the job
4560 */
4561 struct net_device*
4562 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
4563 uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
4564 {
4565 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4566 dhd_if_t *ifp;
4567
4568 ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
4569 ifp = dhdinfo->iflist[ifidx];
4570
4571 if (ifp != NULL) {
4572 if (ifp->net != NULL) {
4573 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
4574
4575 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
4576
4577 /* in unregister_netdev case, the interface gets freed by net->destructor
4578 * (which is set to free_netdev)
4579 */
4580 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4581 free_netdev(ifp->net);
4582 } else {
4583 netif_stop_queue(ifp->net);
4584 if (need_rtnl_lock)
4585 unregister_netdev(ifp->net);
4586 else
4587 unregister_netdevice(ifp->net);
4588 }
4589 ifp->net = NULL;
4590 }
4591 } else {
4592 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
4593 if (ifp == NULL) {
4594 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
4595 return NULL;
4596 }
4597 }
4598
4599 memset(ifp, 0, sizeof(dhd_if_t));
4600 ifp->info = dhdinfo;
4601 ifp->idx = ifidx;
4602 ifp->bssidx = bssidx;
4603 if (mac != NULL)
4604 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
4605
4606 /* Allocate etherdev, including space for private structure */
4607 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
4608 if (ifp->net == NULL) {
4609 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
4610 goto fail;
4611 }
4612
4613 /* Setup the dhd interface's netdevice private structure. */
4614 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
4615
4616 if (name && name[0]) {
4617 strncpy(ifp->net->name, name, IFNAMSIZ);
4618 ifp->net->name[IFNAMSIZ - 1] = '\0';
4619 }
4620 #ifdef WL_CFG80211
4621 if (ifidx == 0)
4622 ifp->net->destructor = free_netdev;
4623 else
4624 ifp->net->destructor = dhd_netdev_free;
4625 #else
4626 ifp->net->destructor = free_netdev;
4627 #endif /* WL_CFG80211 */
4628 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
4629 ifp->name[IFNAMSIZ - 1] = '\0';
4630 dhdinfo->iflist[ifidx] = ifp;
4631
4632 #ifdef PCIE_FULL_DONGLE
4633 /* Initialize STA info list */
4634 INIT_LIST_HEAD(&ifp->sta_list);
4635 DHD_IF_STA_LIST_LOCK_INIT(ifp);
4636 #endif /* PCIE_FULL_DONGLE */
4637
4638 return ifp->net;
4639
4640 fail:
4641 if (ifp != NULL) {
4642 if (ifp->net != NULL) {
4643 dhd_dev_priv_clear(ifp->net);
4644 free_netdev(ifp->net);
4645 ifp->net = NULL;
4646 }
4647 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4648 ifp = NULL;
4649 }
4650 dhdinfo->iflist[ifidx] = NULL;
4651 return NULL;
4652 }
4653
4654 /* unregister and free the the net_device interface associated with the indexed
4655 * slot, also free the slot memory and set the slot pointer to NULL
4656 */
4657 int
4658 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
4659 {
4660 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
4661 dhd_if_t *ifp;
4662
4663 ifp = dhdinfo->iflist[ifidx];
4664 if (ifp != NULL) {
4665 if (ifp->net != NULL) {
4666 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
4667
4668 /* in unregister_netdev case, the interface gets freed by net->destructor
4669 * (which is set to free_netdev)
4670 */
4671 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
4672 free_netdev(ifp->net);
4673 } else {
4674 netif_stop_queue(ifp->net);
4675
4676
4677
4678 #ifdef SET_RPS_CPUS
4679 custom_rps_map_clear(ifp->net->_rx);
4680 #endif /* SET_RPS_CPUS */
4681 if (need_rtnl_lock)
4682 unregister_netdev(ifp->net);
4683 else
4684 unregister_netdevice(ifp->net);
4685 }
4686 ifp->net = NULL;
4687 }
4688 #ifdef DHD_WMF
4689 dhd_wmf_cleanup(dhdpub, ifidx);
4690 #endif /* DHD_WMF */
4691
4692 dhd_if_del_sta_list(ifp);
4693
4694 dhdinfo->iflist[ifidx] = NULL;
4695 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
4696
4697 }
4698
4699 return BCME_OK;
4700 }
4701
4702 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4703 static struct net_device_ops dhd_ops_pri = {
4704 .ndo_open = dhd_open,
4705 .ndo_stop = dhd_stop,
4706 .ndo_get_stats = dhd_get_stats,
4707 .ndo_do_ioctl = dhd_ioctl_entry,
4708 .ndo_start_xmit = dhd_start_xmit,
4709 .ndo_set_mac_address = dhd_set_mac_address,
4710 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4711 .ndo_set_rx_mode = dhd_set_multicast_list,
4712 #else
4713 .ndo_set_multicast_list = dhd_set_multicast_list,
4714 #endif
4715 };
4716
4717 static struct net_device_ops dhd_ops_virt = {
4718 .ndo_get_stats = dhd_get_stats,
4719 .ndo_do_ioctl = dhd_ioctl_entry,
4720 .ndo_start_xmit = dhd_start_xmit,
4721 .ndo_set_mac_address = dhd_set_mac_address,
4722 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4723 .ndo_set_rx_mode = dhd_set_multicast_list,
4724 #else
4725 .ndo_set_multicast_list = dhd_set_multicast_list,
4726 #endif
4727 };
4728
4729 #ifdef P2PONEINT
4730 extern int wl_cfgp2p_if_open(struct net_device *net);
4731 extern int wl_cfgp2p_if_stop(struct net_device *net);
4732
4733 static struct net_device_ops dhd_cfgp2p_ops_virt = {
4734 .ndo_open = wl_cfgp2p_if_open,
4735 .ndo_stop = wl_cfgp2p_if_stop,
4736 .ndo_get_stats = dhd_get_stats,
4737 .ndo_do_ioctl = dhd_ioctl_entry,
4738 .ndo_start_xmit = dhd_start_xmit,
4739 .ndo_set_mac_address = dhd_set_mac_address,
4740 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4741 .ndo_set_rx_mode = dhd_set_multicast_list,
4742 #else
4743 .ndo_set_multicast_list = dhd_set_multicast_list,
4744 #endif
4745 };
4746 #endif /* P2PONEINT */
4747 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
4748
4749 #ifdef DEBUGGER
4750 extern void debugger_init(void *bus_handle);
4751 #endif
4752
4753
4754 #ifdef SHOW_LOGTRACE
4755 static char *logstrs_path = "/root/logstrs.bin";
4756 module_param(logstrs_path, charp, S_IRUGO);
4757
4758 int
4759 dhd_init_logstrs_array(dhd_event_log_t *temp)
4760 {
4761 struct file *filep = NULL;
4762 struct kstat stat;
4763 mm_segment_t fs;
4764 char *raw_fmts = NULL;
4765 int logstrs_size = 0;
4766
4767 logstr_header_t *hdr = NULL;
4768 uint32 *lognums = NULL;
4769 char *logstrs = NULL;
4770 int ram_index = 0;
4771 char **fmts;
4772 int num_fmts = 0;
4773 uint32 i = 0;
4774 int error = 0;
4775 set_fs(KERNEL_DS);
4776 fs = get_fs();
4777 filep = filp_open(logstrs_path, O_RDONLY, 0);
4778 if (IS_ERR(filep)) {
4779 DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__));
4780 goto fail;
4781 }
4782 error = vfs_stat(logstrs_path, &stat);
4783 if (error) {
4784 DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__));
4785 goto fail;
4786 }
4787 logstrs_size = (int) stat.size;
4788
4789 raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
4790 if (raw_fmts == NULL) {
4791 DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
4792 goto fail;
4793 }
4794 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
4795 DHD_ERROR(("Error: Log strings file read failed\n"));
4796 goto fail;
4797 }
4798
4799 /* Remember header from the logstrs.bin file */
4800 hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
4801 sizeof(logstr_header_t));
4802
4803 if (hdr->log_magic == LOGSTRS_MAGIC) {
4804 /*
4805 * logstrs.bin start with header.
4806 */
4807 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
4808 ram_index = (hdr->ram_lognums_offset -
4809 hdr->rom_lognums_offset) / sizeof(uint32);
4810 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
4811 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
4812 } else {
4813 /*
4814 * Legacy logstrs.bin format without header.
4815 */
4816 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
4817 if (num_fmts == 0) {
4818 /* Legacy ROM/RAM logstrs.bin format:
4819 * - ROM 'lognums' section
4820 * - RAM 'lognums' section
4821 * - ROM 'logstrs' section.
4822 * - RAM 'logstrs' section.
4823 *
4824 * 'lognums' is an array of indexes for the strings in the
4825 * 'logstrs' section. The first uint32 is 0 (index of first
4826 * string in ROM 'logstrs' section).
4827 *
4828 * The 4324b5 is the only ROM that uses this legacy format. Use the
4829 * fixed number of ROM fmtnums to find the start of the RAM
4830 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
4831 * find the ROM 'logstrs' section.
4832 */
4833 #define NUM_4324B5_ROM_FMTS 186
4834 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
4835 ram_index = NUM_4324B5_ROM_FMTS;
4836 lognums = (uint32 *) raw_fmts;
4837 num_fmts = ram_index;
4838 logstrs = (char *) &raw_fmts[num_fmts << 2];
4839 while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
4840 num_fmts++;
4841 logstrs = (char *) &raw_fmts[num_fmts << 2];
4842 }
4843 } else {
4844 /* Legacy RAM-only logstrs.bin format:
4845 * - RAM 'lognums' section
4846 * - RAM 'logstrs' section.
4847 *
4848 * 'lognums' is an array of indexes for the strings in the
4849 * 'logstrs' section. The first uint32 is an index to the
4850 * start of 'logstrs'. Therefore, if this index is divided
4851 * by 'sizeof(uint32)' it provides the number of logstr
4852 * entries.
4853 */
4854 ram_index = 0;
4855 lognums = (uint32 *) raw_fmts;
4856 logstrs = (char *) &raw_fmts[num_fmts << 2];
4857 }
4858 }
4859 fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
4860 if (fmts == NULL) {
4861 DHD_ERROR(("Failed to allocate fmts memory\n"));
4862 goto fail;
4863 }
4864
4865 for (i = 0; i < num_fmts; i++) {
4866 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
4867 * (they are 0-indexed relative to 'rom_logstrs_offset').
4868 *
4869 * RAM lognums are already indexed to point to the correct RAM logstrs (they
4870 * are 0-indexed relative to the start of the logstrs.bin file).
4871 */
4872 if (i == ram_index) {
4873 logstrs = raw_fmts;
4874 }
4875 fmts[i] = &logstrs[lognums[i]];
4876 }
4877 temp->fmts = fmts;
4878 temp->raw_fmts = raw_fmts;
4879 temp->num_fmts = num_fmts;
4880 filp_close(filep, NULL);
4881 set_fs(fs);
4882 return 0;
4883 fail:
4884 if (raw_fmts) {
4885 kfree(raw_fmts);
4886 raw_fmts = NULL;
4887 }
4888 if (!IS_ERR(filep))
4889 filp_close(filep, NULL);
4890 set_fs(fs);
4891 temp->fmts = NULL;
4892 return -1;
4893 }
4894 #endif /* SHOW_LOGTRACE */
4895
4896
4897 dhd_pub_t *
4898 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
4899 {
4900 dhd_info_t *dhd = NULL;
4901 struct net_device *net = NULL;
4902 char if_name[IFNAMSIZ] = {'\0'};
4903 uint32 bus_type = -1;
4904 uint32 bus_num = -1;
4905 uint32 slot_num = -1;
4906 wifi_adapter_info_t *adapter = NULL;
4907
4908 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
4909 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4910
4911 /* will implement get_ids for DBUS later */
4912 #if defined(BCMSDIO)
4913 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
4914 #endif
4915 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
4916
4917 /* Allocate primary dhd_info */
4918 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
4919 if (dhd == NULL) {
4920 dhd = MALLOC(osh, sizeof(dhd_info_t));
4921 if (dhd == NULL) {
4922 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
4923 goto fail;
4924 }
4925 }
4926 memset(dhd, 0, sizeof(dhd_info_t));
4927 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
4928
4929 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
4930
4931 dhd->pub.osh = osh;
4932 dhd->adapter = adapter;
4933
4934 #ifdef GET_CUSTOM_MAC_ENABLE
4935 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
4936 #endif /* GET_CUSTOM_MAC_ENABLE */
4937 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
4938 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
4939
4940 /* Initialize thread based operation and lock */
4941 sema_init(&dhd->sdsem, 1);
4942
4943 /* Link to info module */
4944 dhd->pub.info = dhd;
4945
4946
4947 /* Link to bus module */
4948 dhd->pub.bus = bus;
4949 dhd->pub.hdrlen = bus_hdrlen;
4950
4951 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
4952 * because dhd_detech will check .info is NULL or not.
4953 */
4954 if (dhd_conf_attach(&dhd->pub) != 0) {
4955 DHD_ERROR(("dhd_conf_attach failed\n"));
4956 goto fail;
4957 }
4958 dhd_conf_reset(&dhd->pub);
4959 dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
4960 dhd_conf_preinit(&dhd->pub);
4961
4962 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
4963 * This is indeed a hack but we have to make it work properly before we have a better
4964 * solution
4965 */
4966 dhd_update_fw_nv_path(dhd);
4967 #ifndef BUILD_IN_KERNEL
4968 dhd_conf_read_config(&dhd->pub, dhd->conf_path);
4969 #endif
4970
4971 /* Set network interface name if it was provided as module parameter */
4972 if (iface_name[0]) {
4973 int len;
4974 char ch;
4975 strncpy(if_name, iface_name, IFNAMSIZ);
4976 if_name[IFNAMSIZ - 1] = 0;
4977 len = strlen(if_name);
4978 ch = if_name[len - 1];
4979 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
4980 strcat(if_name, "%d");
4981 }
4982 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
4983 if (net == NULL)
4984 goto fail;
4985 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
4986
4987 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4988 net->open = NULL;
4989 #else
4990 net->netdev_ops = NULL;
4991 #endif
4992
4993 sema_init(&dhd->proto_sem, 1);
4994
4995 #ifdef PROP_TXSTATUS
4996 spin_lock_init(&dhd->wlfc_spinlock);
4997
4998 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
4999 dhd->pub.plat_init = dhd_wlfc_plat_init;
5000 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
5001 #endif /* PROP_TXSTATUS */
5002
5003 /* Initialize other structure content */
5004 init_waitqueue_head(&dhd->ioctl_resp_wait);
5005 init_waitqueue_head(&dhd->ctrl_wait);
5006
5007 /* Initialize the spinlocks */
5008 spin_lock_init(&dhd->sdlock);
5009 spin_lock_init(&dhd->txqlock);
5010 spin_lock_init(&dhd->dhd_lock);
5011 spin_lock_init(&dhd->rxf_lock);
5012 #if defined(RXFRAME_THREAD)
5013 dhd->rxthread_enabled = TRUE;
5014 #endif /* defined(RXFRAME_THREAD) */
5015
5016 #ifdef DHDTCPACK_SUPPRESS
5017 spin_lock_init(&dhd->tcpack_lock);
5018 #endif /* DHDTCPACK_SUPPRESS */
5019
5020 /* Initialize Wakelock stuff */
5021 spin_lock_init(&dhd->wakelock_spinlock);
5022 dhd->wakelock_counter = 0;
5023 dhd->wakelock_wd_counter = 0;
5024 dhd->wakelock_rx_timeout_enable = 0;
5025 dhd->wakelock_ctrl_timeout_enable = 0;
5026 #ifdef CONFIG_HAS_WAKELOCK
5027 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
5028 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
5029 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
5030 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
5031 #ifdef BCMPCIE_OOB_HOST_WAKE
5032 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
5033 #endif /* BCMPCIE_OOB_HOST_WAKE */
5034 #endif /* CONFIG_HAS_WAKELOCK */
5035 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
5036 mutex_init(&dhd->dhd_net_if_mutex);
5037 mutex_init(&dhd->dhd_suspend_mutex);
5038 #endif
5039 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
5040
5041 /* Attach and link in the protocol */
5042 if (dhd_prot_attach(&dhd->pub) != 0) {
5043 DHD_ERROR(("dhd_prot_attach failed\n"));
5044 goto fail;
5045 }
5046 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
5047
5048 #ifdef WL_CFG80211
5049 /* Attach and link in the cfg80211 */
5050 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
5051 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5052 goto fail;
5053 }
5054
5055 dhd_monitor_init(&dhd->pub);
5056 dhd_state |= DHD_ATTACH_STATE_CFG80211;
5057 #endif
5058 #if defined(WL_WIRELESS_EXT)
5059 /* Attach and link in the iw */
5060 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
5061 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
5062 DHD_ERROR(("wl_iw_attach failed\n"));
5063 goto fail;
5064 }
5065 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
5066 }
5067 #endif /* defined(WL_WIRELESS_EXT) */
5068
5069 #ifdef SHOW_LOGTRACE
5070 dhd_init_logstrs_array(&dhd->event_data);
5071 #endif /* SHOW_LOGTRACE */
5072
5073 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
5074 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
5075 goto fail;
5076 }
5077
5078
5079 /* Set up the watchdog timer */
5080 init_timer(&dhd->timer);
5081 dhd->timer.data = (ulong)dhd;
5082 dhd->timer.function = dhd_watchdog;
5083 dhd->default_wd_interval = dhd_watchdog_ms;
5084
5085 if (dhd_watchdog_prio >= 0) {
5086 /* Initialize watchdog thread */
5087 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
5088
5089 } else {
5090 dhd->thr_wdt_ctl.thr_pid = -1;
5091 }
5092
5093 #ifdef DEBUGGER
5094 debugger_init((void *) bus);
5095 #endif
5096
5097 /* Set up the bottom half handler */
5098 if (dhd_dpc_prio >= 0) {
5099 /* Initialize DPC thread */
5100 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
5101 } else {
5102 /* use tasklet for dpc */
5103 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
5104 dhd->thr_dpc_ctl.thr_pid = -1;
5105 }
5106
5107 if (dhd->rxthread_enabled) {
5108 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
5109 /* Initialize RXF thread */
5110 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
5111 }
5112
5113 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
5114
5115 #if defined(CONFIG_PM_SLEEP)
5116 if (!dhd_pm_notifier_registered) {
5117 dhd_pm_notifier_registered = TRUE;
5118 register_pm_notifier(&dhd_pm_notifier);
5119 }
5120 #endif /* CONFIG_PM_SLEEP */
5121
5122 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5123 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
5124 dhd->early_suspend.suspend = dhd_early_suspend;
5125 dhd->early_suspend.resume = dhd_late_resume;
5126 register_early_suspend(&dhd->early_suspend);
5127 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
5128 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5129
5130 #ifdef ARP_OFFLOAD_SUPPORT
5131 dhd->pend_ipaddr = 0;
5132 if (!dhd_inetaddr_notifier_registered) {
5133 dhd_inetaddr_notifier_registered = TRUE;
5134 register_inetaddr_notifier(&dhd_inetaddr_notifier);
5135 }
5136 #endif /* ARP_OFFLOAD_SUPPORT */
5137 #ifdef CONFIG_IPV6
5138 if (!dhd_inet6addr_notifier_registered) {
5139 dhd_inet6addr_notifier_registered = TRUE;
5140 register_inet6addr_notifier(&dhd_inet6addr_notifier);
5141 }
5142 #endif
5143 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
5144 #ifdef DEBUG_CPU_FREQ
5145 dhd->new_freq = alloc_percpu(int);
5146 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
5147 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
5148 #endif
5149 #ifdef DHDTCPACK_SUPPRESS
5150 #ifdef BCMSDIO
5151 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
5152 #elif defined(BCMPCIE)
5153 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
5154 #else
5155 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
5156 #endif /* BCMSDIO */
5157 #endif /* DHDTCPACK_SUPPRESS */
5158
5159 dhd_state |= DHD_ATTACH_STATE_DONE;
5160 dhd->dhd_state = dhd_state;
5161
5162 dhd_found++;
5163 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
5164 dhd_global = dhd;
5165 #endif /* CUSTOMER_HW20 && WLANAUDIO */
5166 return &dhd->pub;
5167
5168 fail:
5169 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
5170 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5171 __FUNCTION__, dhd_state, &dhd->pub));
5172 dhd->dhd_state = dhd_state;
5173 dhd_detach(&dhd->pub);
5174 dhd_free(&dhd->pub);
5175 }
5176
5177 return NULL;
5178 }
5179
5180 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
5181 {
5182 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
5183 return DHD_FLAG_HOSTAP_MODE;
5184 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
5185 return DHD_FLAG_P2P_MODE;
5186 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
5187 return DHD_FLAG_IBSS_MODE;
5188 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
5189 return DHD_FLAG_MFG_MODE;
5190
5191 return DHD_FLAG_STA_MODE;
5192 }
5193
5194 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
5195 {
5196 int fw_len;
5197 int nv_len;
5198 int conf_len;
5199 const char *fw = NULL;
5200 const char *nv = NULL;
5201 const char *conf = NULL;
5202 wifi_adapter_info_t *adapter = dhdinfo->adapter;
5203
5204
5205 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5206 * The path from adapter info is used for initialization only (as it won't change).
5207 *
5208 * The firmware_path/nvram_path module parameter may be changed by the system at run
5209 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5210 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5211 * module parameter after it is copied. We won't update the path until the module parameter
5212 * is changed again (first character is not '\0')
5213 */
5214
5215 /* set default firmware and nvram path for built-in type driver */
5216 // if (!dhd_download_fw_on_driverload) {
5217 #ifdef CONFIG_BCMDHD_FW_PATH
5218 fw = CONFIG_BCMDHD_FW_PATH;
5219 #endif /* CONFIG_BCMDHD_FW_PATH */
5220 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5221 nv = CONFIG_BCMDHD_NVRAM_PATH;
5222 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5223 // }
5224
5225 /* check if we need to initialize the path */
5226 if (dhdinfo->fw_path[0] == '\0') {
5227 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
5228 fw = adapter->fw_path;
5229
5230 }
5231 if (dhdinfo->nv_path[0] == '\0') {
5232 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
5233 nv = adapter->nv_path;
5234 }
5235 if (dhdinfo->conf_path[0] == '\0') {
5236 if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
5237 conf = adapter->conf_path;
5238 }
5239
5240 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5241 *
5242 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5243 */
5244 if (firmware_path[0] != '\0')
5245 fw = firmware_path;
5246 if (nvram_path[0] != '\0')
5247 nv = nvram_path;
5248 if (config_path[0] != '\0')
5249 conf = config_path;
5250
5251 if (fw && fw[0] != '\0') {
5252 fw_len = strlen(fw);
5253 if (fw_len >= sizeof(dhdinfo->fw_path)) {
5254 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5255 return FALSE;
5256 }
5257 strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
5258 if (dhdinfo->fw_path[fw_len-1] == '\n')
5259 dhdinfo->fw_path[fw_len-1] = '\0';
5260 }
5261 if (nv && nv[0] != '\0') {
5262 nv_len = strlen(nv);
5263 if (nv_len >= sizeof(dhdinfo->nv_path)) {
5264 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5265 return FALSE;
5266 }
5267 strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
5268 if (dhdinfo->nv_path[nv_len-1] == '\n')
5269 dhdinfo->nv_path[nv_len-1] = '\0';
5270 }
5271 if (conf && conf[0] != '\0') {
5272 conf_len = strlen(conf);
5273 if (conf_len >= sizeof(dhdinfo->conf_path)) {
5274 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
5275 return FALSE;
5276 }
5277 strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
5278 if (dhdinfo->conf_path[conf_len-1] == '\n')
5279 dhdinfo->conf_path[conf_len-1] = '\0';
5280 }
5281
5282 #if 0
5283 /* clear the path in module parameter */
5284 firmware_path[0] = '\0';
5285 nvram_path[0] = '\0';
5286 config_path[0] = '\0';
5287 #endif
5288
5289 #ifndef BCMEMBEDIMAGE
5290 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5291 if (dhdinfo->fw_path[0] == '\0') {
5292 DHD_ERROR(("firmware path not found\n"));
5293 return FALSE;
5294 }
5295 if (dhdinfo->nv_path[0] == '\0') {
5296 DHD_ERROR(("nvram path not found\n"));
5297 return FALSE;
5298 }
5299 if (dhdinfo->conf_path[0] == '\0') {
5300 dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
5301 }
5302 #ifdef CONFIG_PATH_AUTO_SELECT
5303 dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path);
5304 #endif
5305 #endif /* BCMEMBEDIMAGE */
5306
5307 return TRUE;
5308 }
5309
5310
5311 int
5312 dhd_bus_start(dhd_pub_t *dhdp)
5313 {
5314 int ret = -1;
5315 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
5316 unsigned long flags;
5317
5318 ASSERT(dhd);
5319
5320 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
5321
5322 DHD_PERIM_LOCK(dhdp);
5323
5324 /* try to download image and nvram to the dongle */
5325 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
5326 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
5327 __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
5328 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
5329 dhd->fw_path, dhd->nv_path, dhd->conf_path);
5330 if (ret < 0) {
5331 DHD_ERROR(("%s: failed to download firmware %s\n",
5332 __FUNCTION__, dhd->fw_path));
5333 DHD_PERIM_UNLOCK(dhdp);
5334 return ret;
5335 }
5336 }
5337 if (dhd->pub.busstate != DHD_BUS_LOAD) {
5338 DHD_PERIM_UNLOCK(dhdp);
5339 return -ENETDOWN;
5340 }
5341
5342 dhd_os_sdlock(dhdp);
5343
5344 /* Start the watchdog timer */
5345 dhd->pub.tickcnt = 0;
5346 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
5347
5348 /* Bring up the bus */
5349 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
5350
5351 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
5352 dhd_os_sdunlock(dhdp);
5353 DHD_PERIM_UNLOCK(dhdp);
5354 return ret;
5355 }
5356 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
5357 #if defined(BCMPCIE_OOB_HOST_WAKE)
5358 dhd_os_sdunlock(dhdp);
5359 #endif /* BCMPCIE_OOB_HOST_WAKE */
5360 /* Host registration for OOB interrupt */
5361 if (dhd_bus_oob_intr_register(dhdp)) {
5362 /* deactivate timer and wait for the handler to finish */
5363 #if !defined(BCMPCIE_OOB_HOST_WAKE)
5364 DHD_GENERAL_LOCK(&dhd->pub, flags);
5365 dhd->wd_timer_valid = FALSE;
5366 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5367 del_timer_sync(&dhd->timer);
5368
5369 dhd_os_sdunlock(dhdp);
5370 #endif /* BCMPCIE_OOB_HOST_WAKE */
5371 DHD_PERIM_UNLOCK(dhdp);
5372 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5373 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
5374 return -ENODEV;
5375 }
5376
5377 #if defined(BCMPCIE_OOB_HOST_WAKE)
5378 dhd_os_sdlock(dhdp);
5379 dhd_bus_oob_intr_set(dhdp, TRUE);
5380 #else
5381 /* Enable oob at firmware */
5382 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
5383 #endif /* BCMPCIE_OOB_HOST_WAKE */
5384 #elif defined(FORCE_WOWLAN)
5385 /* Enable oob at firmware */
5386 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
5387 #endif
5388 #ifdef PCIE_FULL_DONGLE
5389 {
5390 uint8 txpush = 0;
5391 uint32 num_flowrings; /* includes H2D common rings */
5392 num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
5393 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
5394 num_flowrings));
5395 if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
5396 dhd_os_sdunlock(dhdp);
5397 DHD_PERIM_UNLOCK(dhdp);
5398 return ret;
5399 }
5400 }
5401 #endif /* PCIE_FULL_DONGLE */
5402
5403 /* Do protocol initialization necessary for IOCTL/IOVAR */
5404 dhd_prot_init(&dhd->pub);
5405
5406 /* If bus is not ready, can't come up */
5407 if (dhd->pub.busstate != DHD_BUS_DATA) {
5408 DHD_GENERAL_LOCK(&dhd->pub, flags);
5409 dhd->wd_timer_valid = FALSE;
5410 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5411 del_timer_sync(&dhd->timer);
5412 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
5413 dhd_os_sdunlock(dhdp);
5414 DHD_PERIM_UNLOCK(dhdp);
5415 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5416 return -ENODEV;
5417 }
5418
5419 dhd_os_sdunlock(dhdp);
5420
5421 /* Bus is ready, query any dongle information */
5422 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
5423 DHD_PERIM_UNLOCK(dhdp);
5424 return ret;
5425 }
5426
5427 #ifdef ARP_OFFLOAD_SUPPORT
5428 if (dhd->pend_ipaddr) {
5429 #ifdef AOE_IP_ALIAS_SUPPORT
5430 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
5431 #endif /* AOE_IP_ALIAS_SUPPORT */
5432 dhd->pend_ipaddr = 0;
5433 }
5434 #endif /* ARP_OFFLOAD_SUPPORT */
5435
5436 DHD_PERIM_UNLOCK(dhdp);
5437 return 0;
5438 }
5439
5440 #ifdef WLTDLS
5441 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
5442 {
5443 char iovbuf[WLC_IOCTL_SMLEN];
5444 uint32 tdls = tdls_on;
5445 int ret = 0;
5446 uint32 tdls_auto_op = 0;
5447 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
5448 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
5449 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
5450 BCM_REFERENCE(mac);
5451 if (!FW_SUPPORTED(dhd, tdls))
5452 return BCME_ERROR;
5453
5454 if (dhd->tdls_enable == tdls_on)
5455 goto auto_mode;
5456 bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
5457 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
5458 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
5459 goto exit;
5460 }
5461 dhd->tdls_enable = tdls_on;
5462 auto_mode:
5463
5464 tdls_auto_op = auto_on;
5465 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
5466 iovbuf, sizeof(iovbuf));
5467 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5468 sizeof(iovbuf), TRUE, 0)) < 0) {
5469 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
5470 goto exit;
5471 }
5472
5473 if (tdls_auto_op) {
5474 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
5475 sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
5476 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5477 sizeof(iovbuf), TRUE, 0)) < 0) {
5478 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
5479 goto exit;
5480 }
5481 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
5482 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5483 sizeof(iovbuf), TRUE, 0)) < 0) {
5484 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
5485 goto exit;
5486 }
5487 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
5488 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
5489 sizeof(iovbuf), TRUE, 0)) < 0) {
5490 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
5491 goto exit;
5492 }
5493 }
5494
5495 exit:
5496 return ret;
5497 }
5498
5499 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
5500 {
5501 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5502 int ret = 0;
5503 if (dhd)
5504 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
5505 else
5506 ret = BCME_ERROR;
5507 return ret;
5508 }
5509 #ifdef PCIE_FULL_DONGLE
5510 void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
5511 {
5512 dhd_info_t *dhd = DHD_DEV_INFO(dev);
5513 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
5514 tdls_peer_node_t *cur = dhdp->peer_tbl.node;
5515 tdls_peer_node_t *new = NULL, *prev = NULL;
5516 dhd_if_t *dhdif;
5517 uint8 sa[ETHER_ADDR_LEN];
5518 int ifidx = dhd_net2idx(dhd, dev);
5519
5520 if (ifidx == DHD_BAD_IF)
5521 return;
5522
5523 dhdif = dhd->iflist[ifidx];
5524 memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
5525
5526 if (connect) {
5527 while (cur != NULL) {
5528 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5529 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
5530 __FUNCTION__, __LINE__));
5531 return;
5532 }
5533 cur = cur->next;
5534 }
5535
5536 new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
5537 if (new == NULL) {
5538 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
5539 return;
5540 }
5541 memcpy(new->addr, da, ETHER_ADDR_LEN);
5542 new->next = dhdp->peer_tbl.node;
5543 dhdp->peer_tbl.node = new;
5544 dhdp->peer_tbl.tdls_peer_count++;
5545
5546 } else {
5547 while (cur != NULL) {
5548 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
5549 dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
5550 if (prev)
5551 prev->next = cur->next;
5552 else
5553 dhdp->peer_tbl.node = cur->next;
5554 MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
5555 dhdp->peer_tbl.tdls_peer_count--;
5556 return;
5557 }
5558 prev = cur;
5559 cur = cur->next;
5560 }
5561 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
5562 }
5563 }
5564 #endif /* PCIE_FULL_DONGLE */
5565 #endif
5566
5567 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
5568 {
5569 if (!dhd)
5570 return FALSE;
5571
5572 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
5573 return TRUE;
5574 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
5575 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
5576 return TRUE;
5577 else
5578 return FALSE;
5579 }
5580 #if !defined(AP) && defined(WLP2P)
5581 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
5582 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
5583 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
5584 * would still be named as fw_bcmdhd_apsta.
5585 */
5586 uint32
5587 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
5588 {
5589 int32 ret = 0;
5590 char buf[WLC_IOCTL_SMLEN];
5591 bool mchan_supported = FALSE;
5592 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
5593 * test mode, that means we only will use the mode as it is
5594 */
5595 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
5596 return 0;
5597 if (FW_SUPPORTED(dhd, vsdb)) {
5598 mchan_supported = TRUE;
5599 }
5600 if (!FW_SUPPORTED(dhd, p2p)) {
5601 DHD_TRACE(("Chip does not support p2p\n"));
5602 return 0;
5603 }
5604 else {
5605 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
5606 memset(buf, 0, sizeof(buf));
5607 bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
5608 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
5609 FALSE, 0)) < 0) {
5610 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
5611 return 0;
5612 }
5613 else {
5614 if (buf[0] == 1) {
5615 /* By default, chip supports single chan concurrency,
5616 * now lets check for mchan
5617 */
5618 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
5619 if (mchan_supported)
5620 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
5621 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
5622 /* For customer_hw4, although ICS,
5623 * we still support concurrent mode
5624 */
5625 return ret;
5626 #else
5627 return 0;
5628 #endif
5629 }
5630 }
5631 }
5632 return 0;
5633 }
5634 #endif
5635
5636 #ifdef SUPPORT_AP_POWERSAVE
5637 #define RXCHAIN_PWRSAVE_PPS 10
5638 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
5639 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
5640 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
5641 {
5642 char iovbuf[128];
5643 int32 pps = RXCHAIN_PWRSAVE_PPS;
5644 int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
5645 int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
5646
5647 if (enable) {
5648 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5649 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5650 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5651 DHD_ERROR(("Failed to enable AP power save\n"));
5652 }
5653 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf));
5654 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5655 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5656 DHD_ERROR(("Failed to set pps\n"));
5657 }
5658 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time,
5659 4, iovbuf, sizeof(iovbuf));
5660 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5661 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5662 DHD_ERROR(("Failed to set quiet time\n"));
5663 }
5664 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check,
5665 4, iovbuf, sizeof(iovbuf));
5666 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5667 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5668 DHD_ERROR(("Failed to set stas assoc check\n"));
5669 }
5670 } else {
5671 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf));
5672 if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR,
5673 iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) {
5674 DHD_ERROR(("Failed to disable AP power save\n"));
5675 }
5676 }
5677
5678 return 0;
5679 }
5680 #endif /* SUPPORT_AP_POWERSAVE */
5681
5682
5683 #if defined(READ_CONFIG_FROM_FILE)
5684 #include <linux/fs.h>
5685 #include <linux/ctype.h>
5686
5687 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5688 bool PM_control = TRUE;
5689
5690 static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
5691 {
5692 int var_int;
5693 wl_country_t cspec = {{0}, -1, {0}};
5694 char *revstr;
5695 char *endptr = NULL;
5696 int iolen;
5697 char smbuf[WLC_IOCTL_SMLEN*2];
5698
5699 if (!strcmp(name, "country")) {
5700 revstr = strchr(value, '/');
5701 if (revstr) {
5702 cspec.rev = strtoul(revstr + 1, &endptr, 10);
5703 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5704 cspec.country_abbrev[2] = '\0';
5705 memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
5706 } else {
5707 cspec.rev = -1;
5708 memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
5709 memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
5710 get_customized_country_code(dhd->info->adapter,
5711 (char *)&cspec.country_abbrev, &cspec);
5712 }
5713 memset(smbuf, 0, sizeof(smbuf));
5714 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
5715 cspec.country_abbrev, cspec.rev));
5716 iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
5717 smbuf, sizeof(smbuf));
5718 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5719 smbuf, iolen, TRUE, 0);
5720 } else if (!strcmp(name, "roam_scan_period")) {
5721 var_int = (int)simple_strtol(value, NULL, 0);
5722 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
5723 &var_int, sizeof(var_int), TRUE, 0);
5724 } else if (!strcmp(name, "roam_delta")) {
5725 struct {
5726 int val;
5727 int band;
5728 } x;
5729 x.val = (int)simple_strtol(value, NULL, 0);
5730 /* x.band = WLC_BAND_AUTO; */
5731 x.band = WLC_BAND_ALL;
5732 return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
5733 } else if (!strcmp(name, "roam_trigger")) {
5734 int ret = 0;
5735
5736 roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
5737 roam_trigger[1] = WLC_BAND_ALL;
5738 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
5739 sizeof(roam_trigger), TRUE, 0);
5740
5741 return ret;
5742 } else if (!strcmp(name, "PM")) {
5743 int ret = 0;
5744 var_int = (int)simple_strtol(value, NULL, 0);
5745
5746 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
5747 &var_int, sizeof(var_int), TRUE, 0);
5748
5749 #if defined(CONFIG_PM_LOCK)
5750 if (var_int == 0) {
5751 g_pm_control = TRUE;
5752 printk("%s var_int=%d don't control PM\n", __func__, var_int);
5753 } else {
5754 g_pm_control = FALSE;
5755 printk("%s var_int=%d do control PM\n", __func__, var_int);
5756 }
5757 #endif
5758
5759 return ret;
5760 }
5761 #ifdef WLBTAMP
5762 else if (!strcmp(name, "btamp_chan")) {
5763 int btamp_chan;
5764 int iov_len = 0;
5765 char iovbuf[128];
5766 int ret;
5767
5768 btamp_chan = (int)simple_strtol(value, NULL, 0);
5769 iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
5770 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
5771 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
5772 __FUNCTION__, btamp_chan, ret));
5773 else
5774 DHD_ERROR(("%s btamp_chan %d set success\n",
5775 __FUNCTION__, btamp_chan));
5776 }
5777 #endif /* WLBTAMP */
5778 else if (!strcmp(name, "band")) {
5779 int ret;
5780 if (!strcmp(value, "auto"))
5781 var_int = WLC_BAND_AUTO;
5782 else if (!strcmp(value, "a"))
5783 var_int = WLC_BAND_5G;
5784 else if (!strcmp(value, "b"))
5785 var_int = WLC_BAND_2G;
5786 else if (!strcmp(value, "all"))
5787 var_int = WLC_BAND_ALL;
5788 else {
5789 printk(" set band value should be one of the a or b or all\n");
5790 var_int = WLC_BAND_AUTO;
5791 }
5792 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
5793 sizeof(var_int), TRUE, 0)) < 0)
5794 printk(" set band err=%d\n", ret);
5795 return ret;
5796 } else if (!strcmp(name, "cur_etheraddr")) {
5797 struct ether_addr ea;
5798 char buf[32];
5799 uint iovlen;
5800 int ret;
5801
5802 bcm_ether_atoe(value, &ea);
5803
5804 ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
5805 if (ret == 0) {
5806 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
5807 return 0;
5808 }
5809
5810 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
5811 ea.octet[0], ea.octet[1], ea.octet[2],
5812 ea.octet[3], ea.octet[4], ea.octet[5]));
5813
5814 iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
5815
5816 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
5817 if (ret < 0) {
5818 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
5819 return ret;
5820 }
5821 else {
5822 memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
5823 return ret;
5824 }
5825 } else if (!strcmp(name, "lpc")) {
5826 int ret = 0;
5827 char buf[32];
5828 uint iovlen;
5829 var_int = (int)simple_strtol(value, NULL, 0);
5830 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5831 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
5832 }
5833 iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf));
5834 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
5835 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
5836 }
5837 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
5838 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
5839 }
5840 return ret;
5841 } else if (!strcmp(name, "vht_features")) {
5842 int ret = 0;
5843 char buf[32];
5844 uint iovlen;
5845 var_int = (int)simple_strtol(value, NULL, 0);
5846
5847 if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
5848 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
5849 }
5850 iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf));
5851 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
5852 DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
5853 }
5854 if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
5855 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
5856 }
5857 return ret;
5858 } else {
5859 uint iovlen;
5860 char iovbuf[WLC_IOCTL_SMLEN];
5861
5862 /* wlu_iovar_setint */
5863 var_int = (int)simple_strtol(value, NULL, 0);
5864
5865 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
5866 if (!strcmp(name, "roam_off")) {
5867 /* Setup timeout if Beacons are lost to report link down */
5868 if (var_int) {
5869 uint bcn_timeout = 2;
5870 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
5871 iovbuf, sizeof(iovbuf));
5872 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
5873 }
5874 }
5875 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
5876
5877 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
5878
5879 iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
5880 iovbuf, sizeof(iovbuf));
5881 return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
5882 iovbuf, iovlen, TRUE, 0);
5883 }
5884
5885 return 0;
5886 }
5887
5888 static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
5889 {
5890 mm_segment_t old_fs;
5891 struct kstat stat;
5892 struct file *fp = NULL;
5893 unsigned int len;
5894 char *buf = NULL, *p, *name, *value;
5895 int ret = 0;
5896 char *config_path;
5897
5898 config_path = CONFIG_BCMDHD_CONFIG_PATH;
5899
5900 if (!config_path)
5901 {
5902 printk(KERN_ERR "config_path can't read. \n");
5903 return 0;
5904 }
5905
5906 old_fs = get_fs();
5907 set_fs(get_ds());
5908 if ((ret = vfs_stat(config_path, &stat))) {
5909 set_fs(old_fs);
5910 printk(KERN_ERR "%s: Failed to get information (%d)\n",
5911 config_path, ret);
5912 return ret;
5913 }
5914 set_fs(old_fs);
5915
5916 if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
5917 printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
5918 return -ENOMEM;
5919 }
5920
5921 printk("dhd_preinit_config : config path : %s \n", config_path);
5922
5923 if (!(fp = dhd_os_open_image(config_path)) ||
5924 (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
5925 goto err;
5926
5927 buf[stat.size] = '\0';
5928 for (p = buf; *p; p++) {
5929 if (isspace(*p))
5930 continue;
5931 for (name = p++; *p && !isspace(*p); p++) {
5932 if (*p == '=') {
5933 *p = '\0';
5934 p++;
5935 for (value = p; *p && !isspace(*p); p++);
5936 *p = '\0';
5937 if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
5938 printk(KERN_ERR "%s: %s=%s\n",
5939 bcmerrorstr(ret), name, value);
5940 }
5941 break;
5942 }
5943 }
5944 }
5945 ret = 0;
5946
5947 out:
5948 if (fp)
5949 dhd_os_close_image(fp);
5950 if (buf)
5951 MFREE(dhd->osh, buf, stat.size+1);
5952 return ret;
5953
5954 err:
5955 ret = -1;
5956 goto out;
5957 }
5958 #endif /* READ_CONFIG_FROM_FILE */
5959
5960 int
5961 dhd_preinit_ioctls(dhd_pub_t *dhd)
5962 {
5963 int ret = 0;
5964 char eventmask[WL_EVENTING_MASK_LEN];
5965 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
5966 uint32 buf_key_b4_m4 = 1;
5967 #ifndef WL_CFG80211
5968 u32 up = 0;
5969 #endif
5970 uint8 msglen;
5971 eventmsgs_ext_t *eventmask_msg = NULL;
5972 char* iov_buf = NULL;
5973 int ret2 = 0;
5974 #ifdef WLAIBSS
5975 aibss_bcn_force_config_t bcn_config;
5976 uint32 aibss;
5977 #ifdef WLAIBSS_PS
5978 uint32 aibss_ps;
5979 #endif /* WLAIBSS_PS */
5980 #endif /* WLAIBSS */
5981 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
5982 uint32 sup_wpa = 0;
5983 #endif
5984 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
5985 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
5986 uint32 ampdu_ba_wsize = 0;
5987 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
5988 #if defined(CUSTOM_AMPDU_MPDU)
5989 int32 ampdu_mpdu = 0;
5990 #endif
5991 #if defined(CUSTOM_AMPDU_RELEASE)
5992 int32 ampdu_release = 0;
5993 #endif
5994 #if defined(CUSTOM_AMSDU_AGGSF)
5995 int32 amsdu_aggsf = 0;
5996 #endif
5997
5998 #if defined(BCMSDIO)
5999 #ifdef PROP_TXSTATUS
6000 int wlfc_enable = TRUE;
6001 #ifndef DISABLE_11N
6002 uint32 hostreorder = 1;
6003 uint wl_down = 1;
6004 #endif /* DISABLE_11N */
6005 #endif /* PROP_TXSTATUS */
6006 #endif
6007 #ifdef PCIE_FULL_DONGLE
6008 uint32 wl_ap_isolate;
6009 #endif /* PCIE_FULL_DONGLE */
6010
6011 #ifdef DHD_ENABLE_LPC
6012 uint32 lpc = 1;
6013 #endif /* DHD_ENABLE_LPC */
6014 uint power_mode = PM_FAST;
6015 uint32 dongle_align = DHD_SDALIGN;
6016 #if defined(BCMSDIO)
6017 uint32 glom = CUSTOM_GLOM_SETTING;
6018 #endif /* defined(BCMSDIO) */
6019 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6020 uint32 credall = 1;
6021 #endif
6022 uint bcn_timeout = dhd->conf->bcn_timeout;
6023 uint retry_max = 3;
6024 #if defined(ARP_OFFLOAD_SUPPORT)
6025 int arpoe = 1;
6026 #endif
6027 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
6028 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
6029 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
6030 char buf[WLC_IOCTL_SMLEN];
6031 char *ptr;
6032 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
6033 #ifdef ROAM_ENABLE
6034 uint roamvar = 0;
6035 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
6036 int roam_scan_period[2] = {10, WLC_BAND_ALL};
6037 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
6038 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
6039 int roam_fullscan_period = 60;
6040 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6041 int roam_fullscan_period = 120;
6042 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6043 #else
6044 #ifdef DISABLE_BUILTIN_ROAM
6045 uint roamvar = 1;
6046 #endif /* DISABLE_BUILTIN_ROAM */
6047 #endif /* ROAM_ENABLE */
6048
6049 #if defined(SOFTAP)
6050 uint dtim = 1;
6051 #endif
6052 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
6053 uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
6054 struct ether_addr p2p_ea;
6055 #endif
6056 #ifdef BCMCCX
6057 uint32 ccx = 1;
6058 #endif
6059 #ifdef SOFTAP_UAPSD_OFF
6060 uint32 wme_apsd = 0;
6061 #endif /* SOFTAP_UAPSD_OFF */
6062 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
6063 uint32 apsta = 1; /* Enable APSTA mode */
6064 #elif defined(SOFTAP_AND_GC)
6065 uint32 apsta = 0;
6066 int ap_mode = 1;
6067 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
6068 #ifdef GET_CUSTOM_MAC_ENABLE
6069 struct ether_addr ea_addr;
6070 #endif /* GET_CUSTOM_MAC_ENABLE */
6071
6072 #ifdef DISABLE_11N
6073 uint32 nmode = 0;
6074 #endif /* DISABLE_11N */
6075
6076 #if defined(DISABLE_11AC)
6077 uint32 vhtmode = 0;
6078 #endif /* DISABLE_11AC */
6079 #ifdef USE_WL_TXBF
6080 uint32 txbf = 1;
6081 #endif /* USE_WL_TXBF */
6082 #ifdef AMPDU_VO_ENABLE
6083 struct ampdu_tid_control tid;
6084 #endif
6085 #ifdef USE_WL_FRAMEBURST
6086 uint32 frameburst = 1;
6087 #endif /* USE_WL_FRAMEBURST */
6088 #ifdef DHD_SET_FW_HIGHSPEED
6089 uint32 ack_ratio = 250;
6090 uint32 ack_ratio_depth = 64;
6091 #endif /* DHD_SET_FW_HIGHSPEED */
6092 #ifdef SUPPORT_2G_VHT
6093 uint32 vht_features = 0x3; /* 2G enable | rates all */
6094 #endif /* SUPPORT_2G_VHT */
6095 #ifdef CUSTOM_PSPRETEND_THR
6096 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
6097 #endif
6098 #ifdef PKT_FILTER_SUPPORT
6099 dhd_pkt_filter_enable = TRUE;
6100 #endif /* PKT_FILTER_SUPPORT */
6101 #ifdef WLTDLS
6102 dhd->tdls_enable = FALSE;
6103 #endif /* WLTDLS */
6104 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
6105 DHD_TRACE(("Enter %s\n", __FUNCTION__));
6106
6107 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
6108 #ifdef DHDTCPACK_SUPPRESS
6109 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
6110 dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
6111 #endif
6112
6113 dhd->op_mode = 0;
6114 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6115 (op_mode == DHD_FLAG_MFG_MODE)) {
6116 /* Check and adjust IOCTL response timeout for Manufactring firmware */
6117 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
6118 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
6119 __FUNCTION__));
6120 }
6121 else {
6122 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
6123 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
6124 }
6125 #ifdef GET_CUSTOM_MAC_ENABLE
6126 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
6127 if (!ret) {
6128 memset(buf, 0, sizeof(buf));
6129 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
6130 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6131 if (ret < 0) {
6132 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
6133 __FUNCTION__, MAC2STRDBG(ea_addr.octet), ret));
6134 ret = BCME_NOTUP;
6135 goto done;
6136 }
6137 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
6138 } else {
6139 #endif /* GET_CUSTOM_MAC_ENABLE */
6140 /* Get the default device MAC address directly from firmware */
6141 memset(buf, 0, sizeof(buf));
6142 bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
6143 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
6144 FALSE, 0)) < 0) {
6145 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
6146 ret = BCME_NOTUP;
6147 goto done;
6148 }
6149 /* Update public MAC address after reading from Firmware */
6150 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
6151
6152 #ifdef GET_CUSTOM_MAC_ENABLE
6153 }
6154 #endif /* GET_CUSTOM_MAC_ENABLE */
6155
6156 /* get a capabilities from firmware */
6157 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
6158 bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
6159 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
6160 sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
6161 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6162 __FUNCTION__, ret));
6163 goto done;
6164 }
6165 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
6166 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
6167 #ifdef SET_RANDOM_MAC_SOFTAP
6168 uint rand_mac;
6169 #endif
6170 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
6171 #if defined(ARP_OFFLOAD_SUPPORT)
6172 arpoe = 0;
6173 #endif
6174 #ifdef PKT_FILTER_SUPPORT
6175 dhd_pkt_filter_enable = FALSE;
6176 #endif
6177 #ifdef SET_RANDOM_MAC_SOFTAP
6178 SRANDOM32((uint)jiffies);
6179 rand_mac = RANDOM32();
6180 iovbuf[0] = 0x02; /* locally administered bit */
6181 iovbuf[1] = 0x1A;
6182 iovbuf[2] = 0x11;
6183 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
6184 iovbuf[4] = (unsigned char)(rand_mac >> 8);
6185 iovbuf[5] = (unsigned char)(rand_mac >> 16);
6186
6187 bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
6188 ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
6189 if (ret < 0) {
6190 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
6191 } else
6192 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
6193 #endif /* SET_RANDOM_MAC_SOFTAP */
6194 #if !defined(AP) && defined(WL_CFG80211)
6195 /* Turn off MPC in AP mode */
6196 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6197 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6198 sizeof(iovbuf), TRUE, 0)) < 0) {
6199 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
6200 }
6201 #endif
6202 #ifdef SUPPORT_AP_POWERSAVE
6203 dhd_set_ap_powersave(dhd, 0, TRUE);
6204 #endif
6205 #ifdef SOFTAP_UAPSD_OFF
6206 bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
6207 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6208 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
6209 #endif /* SOFTAP_UAPSD_OFF */
6210 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
6211 (op_mode == DHD_FLAG_MFG_MODE)) {
6212 #if defined(ARP_OFFLOAD_SUPPORT)
6213 arpoe = 0;
6214 #endif /* ARP_OFFLOAD_SUPPORT */
6215 #ifdef PKT_FILTER_SUPPORT
6216 dhd_pkt_filter_enable = FALSE;
6217 #endif /* PKT_FILTER_SUPPORT */
6218 dhd->op_mode = DHD_FLAG_MFG_MODE;
6219 } else {
6220 uint32 concurrent_mode = 0;
6221 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
6222 (op_mode == DHD_FLAG_P2P_MODE)) {
6223 #if defined(ARP_OFFLOAD_SUPPORT)
6224 arpoe = 0;
6225 #endif
6226 #ifdef PKT_FILTER_SUPPORT
6227 dhd_pkt_filter_enable = FALSE;
6228 #endif
6229 dhd->op_mode = DHD_FLAG_P2P_MODE;
6230 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
6231 (op_mode == DHD_FLAG_IBSS_MODE)) {
6232 dhd->op_mode = DHD_FLAG_IBSS_MODE;
6233 } else
6234 dhd->op_mode = DHD_FLAG_STA_MODE;
6235 #if !defined(AP) && defined(WLP2P)
6236 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
6237 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
6238 #if defined(ARP_OFFLOAD_SUPPORT)
6239 arpoe = 1;
6240 #endif
6241 dhd->op_mode |= concurrent_mode;
6242 }
6243
6244 /* Check if we are enabling p2p */
6245 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6246 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6247 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6248 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6249 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
6250 }
6251
6252 #if defined(SOFTAP_AND_GC)
6253 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
6254 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
6255 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
6256 }
6257 #endif
6258 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
6259 ETHER_SET_LOCALADDR(&p2p_ea);
6260 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
6261 ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
6262 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6263 iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6264 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
6265 } else {
6266 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
6267 }
6268 }
6269 #else
6270 (void)concurrent_mode;
6271 #endif
6272 }
6273
6274 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
6275 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
6276 /* Set Country code */
6277 if (dhd->dhd_cspec.ccode[0] != 0) {
6278 printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
6279 bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
6280 sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
6281 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6282 printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
6283 } else {
6284 dhd_conf_set_country(dhd);
6285 dhd_conf_fix_country(dhd);
6286 }
6287 dhd_conf_get_country(dhd, &dhd->dhd_cspec);
6288
6289 #if defined(DISABLE_11AC)
6290 bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
6291 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6292 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
6293 #endif /* DISABLE_11AC */
6294 dhd_conf_set_fw_string_cmd(dhd, "vhtmode", dhd->conf->vhtmode, 0, TRUE);
6295
6296 /* Set Listen Interval */
6297 bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
6298 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6299 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
6300
6301 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
6302 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
6303 bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
6304 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6305 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
6306 #if defined(ROAM_ENABLE)
6307 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
6308 sizeof(roam_trigger), TRUE, 0)) < 0)
6309 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
6310 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
6311 sizeof(roam_scan_period), TRUE, 0)) < 0)
6312 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
6313 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
6314 sizeof(roam_delta), TRUE, 0)) < 0)
6315 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
6316 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
6317 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6318 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
6319 #endif /* ROAM_ENABLE */
6320 dhd_conf_set_roam(dhd);
6321
6322 #ifdef BCMCCX
6323 bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
6324 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6325 #endif /* BCMCCX */
6326 #ifdef WLTDLS
6327 /* by default TDLS on and auto mode off */
6328 _dhd_tdls_enable(dhd, true, false, NULL);
6329 #endif /* WLTDLS */
6330
6331 #ifdef DHD_ENABLE_LPC
6332 /* Set lpc 1 */
6333 bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
6334 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6335 sizeof(iovbuf), TRUE, 0)) < 0) {
6336 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
6337 }
6338 #endif /* DHD_ENABLE_LPC */
6339 dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
6340
6341 /* Set PowerSave mode */
6342 if (dhd->conf->pm >= 0)
6343 power_mode = dhd->conf->pm;
6344 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
6345
6346 /* Match Host and Dongle rx alignment */
6347 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
6348 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6349
6350 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6351 /* enable credall to reduce the chance of no bus credit happened. */
6352 bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
6353 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6354 #endif
6355
6356 #if defined(BCMSDIO)
6357 if (glom != DEFAULT_GLOM_VALUE) {
6358 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
6359 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6360 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6361 }
6362 #endif /* defined(BCMSDIO) */
6363
6364 /* Setup timeout if Beacons are lost and roam is off to report link down */
6365 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
6366 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6367 /* Setup assoc_retry_max count to reconnect target AP in dongle */
6368 bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
6369 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6370 #if defined(AP) && !defined(WLP2P)
6371 /* Turn off MPC in AP mode */
6372 bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
6373 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6374 bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
6375 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6376 #endif /* defined(AP) && !defined(WLP2P) */
6377 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
6378 dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
6379 dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
6380 dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
6381 dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
6382 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
6383 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
6384 dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
6385 dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
6386
6387 #if defined(SOFTAP)
6388 if (ap_fw_loaded == TRUE) {
6389 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
6390 }
6391 #endif
6392
6393 #if defined(KEEP_ALIVE)
6394 {
6395 /* Set Keep Alive : be sure to use FW with -keepalive */
6396 int res;
6397
6398 #if defined(SOFTAP)
6399 if (ap_fw_loaded == FALSE)
6400 #endif
6401 if (!(dhd->op_mode &
6402 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
6403 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
6404 DHD_ERROR(("%s set keeplive failed %d\n",
6405 __FUNCTION__, res));
6406 }
6407 }
6408 #endif /* defined(KEEP_ALIVE) */
6409
6410 #ifdef USE_WL_TXBF
6411 bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
6412 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6413 sizeof(iovbuf), TRUE, 0)) < 0) {
6414 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
6415 }
6416 #endif /* USE_WL_TXBF */
6417 dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
6418 #ifdef USE_WL_FRAMEBURST
6419 /* Set frameburst to value */
6420 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
6421 sizeof(frameburst), TRUE, 0)) < 0) {
6422 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
6423 }
6424 #endif /* USE_WL_FRAMEBURST */
6425 dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE);
6426 #ifdef DHD_SET_FW_HIGHSPEED
6427 /* Set ack_ratio */
6428 bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
6429 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6430 sizeof(iovbuf), TRUE, 0)) < 0) {
6431 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
6432 }
6433
6434 /* Set ack_ratio_depth */
6435 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
6436 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6437 sizeof(iovbuf), TRUE, 0)) < 0) {
6438 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
6439 }
6440 #endif /* DHD_SET_FW_HIGHSPEED */
6441 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
6442 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
6443 /* Set ampdu ba wsize to 64 or 16 */
6444 #ifdef CUSTOM_AMPDU_BA_WSIZE
6445 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
6446 #endif
6447 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
6448 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
6449 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
6450 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
6451 if (ampdu_ba_wsize != 0) {
6452 bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
6453 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6454 sizeof(iovbuf), TRUE, 0)) < 0) {
6455 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
6456 __FUNCTION__, ampdu_ba_wsize, ret));
6457 }
6458 }
6459 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
6460 dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
6461
6462 iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
6463 if (iov_buf == NULL) {
6464 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
6465 ret = BCME_NOMEM;
6466 goto done;
6467 }
6468 #ifdef WLAIBSS
6469 /* Configure custom IBSS beacon transmission */
6470 if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
6471 {
6472 aibss = 1;
6473 bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
6474 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6475 sizeof(iovbuf), TRUE, 0)) < 0) {
6476 DHD_ERROR(("%s Set aibss to %d failed %d\n",
6477 __FUNCTION__, aibss, ret));
6478 }
6479 #ifdef WLAIBSS_PS
6480 aibss_ps = 1;
6481 bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
6482 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6483 sizeof(iovbuf), TRUE, 0)) < 0) {
6484 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
6485 __FUNCTION__, aibss, ret));
6486 }
6487 #endif /* WLAIBSS_PS */
6488 }
6489 memset(&bcn_config, 0, sizeof(bcn_config));
6490 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
6491 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
6492 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
6493 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
6494 bcn_config.len = sizeof(bcn_config);
6495
6496 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
6497 sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN);
6498 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
6499 WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6500 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
6501 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
6502 AIBSS_BCN_FLOOD_DUR, ret));
6503 }
6504 #endif /* WLAIBSS */
6505
6506 #if defined(CUSTOM_AMPDU_MPDU)
6507 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
6508 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
6509 bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
6510 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6511 sizeof(iovbuf), TRUE, 0)) < 0) {
6512 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
6513 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
6514 }
6515 }
6516 #endif /* CUSTOM_AMPDU_MPDU */
6517
6518 #if defined(CUSTOM_AMPDU_RELEASE)
6519 ampdu_release = CUSTOM_AMPDU_RELEASE;
6520 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
6521 bcm_mkiovar("ampdu_release", (char *)&ampdu_release, 4, iovbuf, sizeof(iovbuf));
6522 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6523 sizeof(iovbuf), TRUE, 0)) < 0) {
6524 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
6525 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
6526 }
6527 }
6528 #endif /* CUSTOM_AMPDU_RELEASE */
6529
6530 #if defined(CUSTOM_AMSDU_AGGSF)
6531 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
6532 if (amsdu_aggsf != 0) {
6533 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
6534 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6535 sizeof(iovbuf), TRUE, 0)) < 0) {
6536 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
6537 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
6538 }
6539 }
6540 #endif /* CUSTOM_AMSDU_AGGSF */
6541
6542 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
6543 /* Read 4-way handshake requirements */
6544 if (dhd_use_idsup == 1) {
6545 bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
6546 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
6547 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
6548 * in-dongle supplicant.
6549 */
6550 if (ret >= 0 || ret == BCME_NOTREADY)
6551 dhd->fw_4way_handshake = TRUE;
6552 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
6553 }
6554 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
6555 #ifdef SUPPORT_2G_VHT
6556 bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
6557 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6558 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
6559 }
6560 #endif /* SUPPORT_2G_VHT */
6561 #ifdef CUSTOM_PSPRETEND_THR
6562 /* Turn off MPC in AP mode */
6563 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
6564 iovbuf, sizeof(iovbuf));
6565 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6566 sizeof(iovbuf), TRUE, 0)) < 0) {
6567 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
6568 __FUNCTION__, ret));
6569 }
6570 #endif
6571
6572 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
6573 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
6574 sizeof(iovbuf), TRUE, 0)) < 0) {
6575 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
6576 }
6577
6578 /* Read event_msgs mask */
6579 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6580 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
6581 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
6582 goto done;
6583 }
6584 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
6585
6586 /* Setup event_msgs */
6587 setbit(eventmask, WLC_E_SET_SSID);
6588 setbit(eventmask, WLC_E_PRUNE);
6589 setbit(eventmask, WLC_E_AUTH);
6590 setbit(eventmask, WLC_E_AUTH_IND);
6591 setbit(eventmask, WLC_E_ASSOC);
6592 setbit(eventmask, WLC_E_REASSOC);
6593 setbit(eventmask, WLC_E_REASSOC_IND);
6594 setbit(eventmask, WLC_E_DEAUTH);
6595 setbit(eventmask, WLC_E_DEAUTH_IND);
6596 setbit(eventmask, WLC_E_DISASSOC_IND);
6597 setbit(eventmask, WLC_E_DISASSOC);
6598 setbit(eventmask, WLC_E_JOIN);
6599 setbit(eventmask, WLC_E_START);
6600 setbit(eventmask, WLC_E_ASSOC_IND);
6601 setbit(eventmask, WLC_E_PSK_SUP);
6602 setbit(eventmask, WLC_E_LINK);
6603 setbit(eventmask, WLC_E_NDIS_LINK);
6604 setbit(eventmask, WLC_E_MIC_ERROR);
6605 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
6606 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
6607 #ifndef WL_CFG80211
6608 setbit(eventmask, WLC_E_PMKID_CACHE);
6609 setbit(eventmask, WLC_E_TXFAIL);
6610 #endif
6611 setbit(eventmask, WLC_E_JOIN_START);
6612 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
6613 #ifdef WLMEDIA_HTSF
6614 setbit(eventmask, WLC_E_HTSFSYNC);
6615 #endif /* WLMEDIA_HTSF */
6616 #ifdef PNO_SUPPORT
6617 setbit(eventmask, WLC_E_PFN_NET_FOUND);
6618 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
6619 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
6620 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
6621 #endif /* PNO_SUPPORT */
6622 /* enable dongle roaming event */
6623 setbit(eventmask, WLC_E_ROAM);
6624 setbit(eventmask, WLC_E_BSSID);
6625 #ifdef BCMCCX
6626 setbit(eventmask, WLC_E_ADDTS_IND);
6627 setbit(eventmask, WLC_E_DELTS_IND);
6628 #endif /* BCMCCX */
6629 #ifdef WLTDLS
6630 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
6631 #endif /* WLTDLS */
6632 #ifdef WL_CFG80211
6633 setbit(eventmask, WLC_E_ESCAN_RESULT);
6634 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
6635 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
6636 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
6637 }
6638 #endif /* WL_CFG80211 */
6639 #ifdef WLAIBSS
6640 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
6641 #endif /* WLAIBSS */
6642 #ifdef CUSTOMER_HW10
6643 clrbit(eventmask, WLC_E_TRACE);
6644 #else
6645 setbit(eventmask, WLC_E_TRACE);
6646 #endif
6647 /* Write updated Event mask */
6648 bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
6649 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6650 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
6651 goto done;
6652 }
6653
6654 /* make up event mask ext message iovar for event larger than 128 */
6655 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
6656 eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
6657 if (eventmask_msg == NULL) {
6658 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
6659 ret = BCME_NOMEM;
6660 goto done;
6661 }
6662 bzero(eventmask_msg, msglen);
6663 eventmask_msg->ver = EVENTMSGS_VER;
6664 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6665
6666 /* Read event_msgs_ext mask */
6667 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
6668 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
6669 if (ret2 != BCME_UNSUPPORTED)
6670 ret = ret2;
6671 if (ret2 == 0) { /* event_msgs_ext must be supported */
6672 bcopy(iov_buf, eventmask_msg, msglen);
6673
6674 #ifdef BT_WIFI_HANDOVER
6675 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
6676 #endif /* BT_WIFI_HANDOVER */
6677
6678 /* Write updated Event mask */
6679 eventmask_msg->ver = EVENTMSGS_VER;
6680 eventmask_msg->command = EVENTMSGS_SET_MASK;
6681 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
6682 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
6683 msglen, iov_buf, WLC_IOCTL_SMLEN);
6684 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
6685 iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
6686 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
6687 goto done;
6688 }
6689 } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
6690 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
6691 goto done;
6692 } /* unsupported is ok */
6693
6694 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
6695 sizeof(scan_assoc_time), TRUE, 0);
6696 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
6697 sizeof(scan_unassoc_time), TRUE, 0);
6698 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
6699 sizeof(scan_passive_time), TRUE, 0);
6700
6701 #ifdef ARP_OFFLOAD_SUPPORT
6702 /* Set and enable ARP offload feature for STA only */
6703 #if defined(SOFTAP)
6704 if (arpoe && !ap_fw_loaded)
6705 #else
6706 if (arpoe)
6707 #endif
6708 {
6709 dhd_arp_offload_enable(dhd, TRUE);
6710 dhd_arp_offload_set(dhd, dhd_arp_mode);
6711 } else {
6712 dhd_arp_offload_enable(dhd, FALSE);
6713 dhd_arp_offload_set(dhd, 0);
6714 }
6715 dhd_arp_enable = arpoe;
6716 #endif /* ARP_OFFLOAD_SUPPORT */
6717
6718 #ifdef PKT_FILTER_SUPPORT
6719 /* Setup default defintions for pktfilter , enable in suspend */
6720 dhd->pktfilter_count = 6;
6721 /* Setup filter to allow only unicast */
6722 if (dhd_master_mode) {
6723 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
6724 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
6725 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
6726 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
6727 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
6728 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
6729 /* apply APP pktfilter */
6730 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
6731 } else
6732 dhd_conf_discard_pkt_filter(dhd);
6733 dhd_conf_add_pkt_filter(dhd);
6734
6735 #if defined(SOFTAP)
6736 if (ap_fw_loaded) {
6737 dhd_enable_packet_filter(0, dhd);
6738 }
6739 #endif /* defined(SOFTAP) */
6740 dhd_set_packet_filter(dhd);
6741 #endif /* PKT_FILTER_SUPPORT */
6742 #ifdef DISABLE_11N
6743 bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
6744 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6745 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
6746 #endif /* DISABLE_11N */
6747
6748 #ifdef AMPDU_VO_ENABLE
6749 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
6750 tid.enable = TRUE;
6751 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6752 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6753
6754 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
6755 tid.enable = TRUE;
6756 bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
6757 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6758 #endif
6759 #if defined(SOFTAP_TPUT_ENHANCE)
6760 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6761 dhd_bus_setidletime(dhd, (int)100);
6762 #ifdef DHDTCPACK_SUPPRESS
6763 dhd->tcpack_sup_enabled = FALSE;
6764 #endif
6765 #if defined(DHD_TCP_WINSIZE_ADJUST)
6766 dhd_use_tcp_window_size_adjust = TRUE;
6767 #endif
6768
6769 memset(buf, 0, sizeof(buf));
6770 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
6771 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
6772 glom = 0;
6773 bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
6774 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6775 }
6776 else {
6777 if (buf[0] == 0) {
6778 glom = 1;
6779 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
6780 sizeof(iovbuf));
6781 dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
6782 }
6783 }
6784 }
6785 #endif /* SOFTAP_TPUT_ENHANCE */
6786
6787 /* query for 'ver' to get version info from firmware */
6788 memset(buf, 0, sizeof(buf));
6789 ptr = buf;
6790 bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
6791 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
6792 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6793 else {
6794 bcmstrtok(&ptr, "\n", 0);
6795 /* Print fw version info */
6796 DHD_ERROR(("Firmware version = %s\n", buf));
6797 dhd_set_version_info(dhd, buf);
6798 }
6799
6800 #if defined(BCMSDIO)
6801 dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
6802 // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
6803 dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 1, FALSE);
6804 #endif /* defined(BCMSDIO) */
6805
6806 dhd_conf_set_disable_proptx(dhd);
6807 #if defined(BCMSDIO)
6808 #ifdef PROP_TXSTATUS
6809 if (disable_proptx ||
6810 #ifdef PROP_TXSTATUS_VSDB
6811 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
6812 (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
6813 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
6814 #endif /* PROP_TXSTATUS_VSDB */
6815 FALSE) {
6816 wlfc_enable = FALSE;
6817 }
6818
6819 #ifndef DISABLE_11N
6820 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
6821 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
6822 if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
6823 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
6824 if (ret2 != BCME_UNSUPPORTED)
6825 ret = ret2;
6826 if (ret2 != BCME_OK)
6827 hostreorder = 0;
6828 }
6829 #endif /* DISABLE_11N */
6830
6831 #ifdef READ_CONFIG_FROM_FILE
6832 dhd_preinit_config(dhd, 0);
6833 #endif /* READ_CONFIG_FROM_FILE */
6834
6835 if (wlfc_enable)
6836 dhd_wlfc_init(dhd);
6837 #ifndef DISABLE_11N
6838 else if (hostreorder)
6839 dhd_wlfc_hostreorder_init(dhd);
6840 #endif /* DISABLE_11N */
6841
6842 #endif /* PROP_TXSTATUS */
6843 #endif /* BCMSDIO || BCMBUS */
6844 #ifdef PCIE_FULL_DONGLE
6845 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
6846 if (FW_SUPPORTED(dhd, ap)) {
6847 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
6848 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
6849 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
6850 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
6851 }
6852 #endif /* PCIE_FULL_DONGLE */
6853 #ifdef PNO_SUPPORT
6854 if (!dhd->pno_state) {
6855 dhd_pno_init(dhd);
6856 }
6857 #endif
6858 #ifdef WL11U
6859 dhd_interworking_enable(dhd);
6860 #endif /* WL11U */
6861 #ifndef WL_CFG80211
6862 dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
6863 #endif
6864
6865 done:
6866
6867 if (eventmask_msg)
6868 kfree(eventmask_msg);
6869 if (iov_buf)
6870 kfree(iov_buf);
6871
6872 return ret;
6873 }
6874
6875
6876 int
6877 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
6878 {
6879 char buf[strlen(name) + 1 + cmd_len];
6880 int len = sizeof(buf);
6881 wl_ioctl_t ioc;
6882 int ret;
6883
6884 len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
6885
6886 memset(&ioc, 0, sizeof(ioc));
6887
6888 ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
6889 ioc.buf = buf;
6890 ioc.len = len;
6891 ioc.set = set;
6892
6893 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
6894 if (!set && ret >= 0)
6895 memcpy(cmd_buf, buf, cmd_len);
6896
6897 return ret;
6898 }
6899
6900 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
6901 {
6902 struct dhd_info *dhd = dhdp->info;
6903 struct net_device *dev = NULL;
6904
6905 ASSERT(dhd && dhd->iflist[ifidx]);
6906 dev = dhd->iflist[ifidx]->net;
6907 ASSERT(dev);
6908
6909 if (netif_running(dev)) {
6910 DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
6911 return BCME_NOTDOWN;
6912 }
6913
6914 #define DHD_MIN_MTU 1500
6915 #define DHD_MAX_MTU 1752
6916
6917 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
6918 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
6919 return BCME_BADARG;
6920 }
6921
6922 dev->mtu = new_mtu;
6923 return 0;
6924 }
6925
6926 #ifdef ARP_OFFLOAD_SUPPORT
6927 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
6928 void
6929 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
6930 {
6931 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
6932 int i;
6933 int ret;
6934
6935 bzero(ipv4_buf, sizeof(ipv4_buf));
6936
6937 /* display what we've got */
6938 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6939 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
6940 #ifdef AOE_DBG
6941 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6942 #endif
6943 /* now we saved hoste_ip table, clr it in the dongle AOE */
6944 dhd_aoe_hostip_clr(dhd_pub, idx);
6945
6946 if (ret) {
6947 DHD_ERROR(("%s failed\n", __FUNCTION__));
6948 return;
6949 }
6950
6951 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6952 if (add && (ipv4_buf[i] == 0)) {
6953 ipv4_buf[i] = ipa;
6954 add = FALSE; /* added ipa to local table */
6955 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
6956 __FUNCTION__, i));
6957 } else if (ipv4_buf[i] == ipa) {
6958 ipv4_buf[i] = 0;
6959 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
6960 __FUNCTION__, ipa, i));
6961 }
6962
6963 if (ipv4_buf[i] != 0) {
6964 /* add back host_ip entries from our local cache */
6965 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
6966 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
6967 __FUNCTION__, ipv4_buf[i], i));
6968 }
6969 }
6970 #ifdef AOE_DBG
6971 /* see the resulting hostip table */
6972 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
6973 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
6974 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
6975 #endif
6976 }
6977
6978 /*
6979 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
6980 * whenever there is an event related to an IP address.
6981 * ptr : kernel provided pointer to IP address that has changed
6982 */
6983 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
6984 unsigned long event,
6985 void *ptr)
6986 {
6987 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
6988
6989 dhd_info_t *dhd;
6990 dhd_pub_t *dhd_pub;
6991 int idx;
6992
6993 if (!dhd_arp_enable)
6994 return NOTIFY_DONE;
6995 if (!ifa || !(ifa->ifa_dev->dev))
6996 return NOTIFY_DONE;
6997
6998 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6999 /* Filter notifications meant for non Broadcom devices */
7000 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
7001 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
7002 #if defined(WL_ENABLE_P2P_IF)
7003 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
7004 #endif /* WL_ENABLE_P2P_IF */
7005 return NOTIFY_DONE;
7006 }
7007 #endif /* LINUX_VERSION_CODE */
7008
7009 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
7010 if (!dhd)
7011 return NOTIFY_DONE;
7012
7013 dhd_pub = &dhd->pub;
7014
7015 if (dhd_pub->arp_version == 1) {
7016 idx = 0;
7017 }
7018 else {
7019 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
7020 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
7021 break;
7022 }
7023 if (idx < DHD_MAX_IFS)
7024 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
7025 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
7026 else {
7027 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
7028 idx = 0;
7029 }
7030 }
7031
7032 switch (event) {
7033 case NETDEV_UP:
7034 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
7035 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7036
7037 if (dhd->pub.busstate != DHD_BUS_DATA) {
7038 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
7039 if (dhd->pend_ipaddr) {
7040 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
7041 __FUNCTION__, dhd->pend_ipaddr));
7042 }
7043 dhd->pend_ipaddr = ifa->ifa_address;
7044 break;
7045 }
7046
7047 #ifdef AOE_IP_ALIAS_SUPPORT
7048 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
7049 __FUNCTION__));
7050 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
7051 #endif /* AOE_IP_ALIAS_SUPPORT */
7052 break;
7053
7054 case NETDEV_DOWN:
7055 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
7056 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
7057 dhd->pend_ipaddr = 0;
7058 #ifdef AOE_IP_ALIAS_SUPPORT
7059 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
7060 __FUNCTION__));
7061 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
7062 #else
7063 dhd_aoe_hostip_clr(&dhd->pub, idx);
7064 dhd_aoe_arp_clr(&dhd->pub, idx);
7065 #endif /* AOE_IP_ALIAS_SUPPORT */
7066 break;
7067
7068 default:
7069 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
7070 __func__, ifa->ifa_label, event));
7071 break;
7072 }
7073 return NOTIFY_DONE;
7074 }
7075 #endif /* ARP_OFFLOAD_SUPPORT */
7076
7077 #ifdef CONFIG_IPV6
7078 /* Neighbor Discovery Offload: defered handler */
7079 static void
7080 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
7081 {
7082 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
7083 dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
7084 int ret;
7085
7086 if (event != DHD_WQ_WORK_IPV6_NDO) {
7087 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7088 return;
7089 }
7090
7091 if (!ndo_work) {
7092 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
7093 return;
7094 }
7095
7096 if (!pub) {
7097 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
7098 return;
7099 }
7100
7101 if (ndo_work->if_idx) {
7102 DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
7103 return;
7104 }
7105
7106 switch (ndo_work->event) {
7107 case NETDEV_UP:
7108 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__));
7109 ret = dhd_ndo_enable(pub, TRUE);
7110 if (ret < 0) {
7111 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
7112 }
7113
7114 ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
7115 if (ret < 0) {
7116 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
7117 __FUNCTION__, ret));
7118 }
7119 break;
7120 case NETDEV_DOWN:
7121 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
7122 ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
7123 if (ret < 0) {
7124 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
7125 __FUNCTION__, ret));
7126 goto done;
7127 }
7128
7129 ret = dhd_ndo_enable(pub, FALSE);
7130 if (ret < 0) {
7131 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
7132 goto done;
7133 }
7134 break;
7135 default:
7136 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
7137 break;
7138 }
7139 done:
7140 /* free ndo_work. alloced while scheduling the work */
7141 kfree(ndo_work);
7142
7143 return;
7144 }
7145
7146 /*
7147 * Neighbor Discovery Offload: Called when an interface
7148 * is assigned with ipv6 address.
7149 * Handles only primary interface
7150 */
7151 static int dhd_inet6addr_notifier_call(struct notifier_block *this,
7152 unsigned long event,
7153 void *ptr)
7154 {
7155 dhd_info_t *dhd;
7156 dhd_pub_t *dhd_pub;
7157 struct inet6_ifaddr *inet6_ifa = ptr;
7158 struct in6_addr *ipv6_addr = &inet6_ifa->addr;
7159 struct ipv6_work_info_t *ndo_info;
7160 int idx = 0; /* REVISIT */
7161
7162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7163 /* Filter notifications meant for non Broadcom devices */
7164 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
7165 return NOTIFY_DONE;
7166 }
7167 #endif /* LINUX_VERSION_CODE */
7168
7169 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
7170 if (!dhd)
7171 return NOTIFY_DONE;
7172
7173 if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
7174 return NOTIFY_DONE;
7175 dhd_pub = &dhd->pub;
7176 if (!FW_SUPPORTED(dhd_pub, ndoe))
7177 return NOTIFY_DONE;
7178
7179 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
7180 if (!ndo_info) {
7181 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
7182 return NOTIFY_DONE;
7183 }
7184
7185 ndo_info->event = event;
7186 ndo_info->if_idx = idx;
7187 memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
7188
7189 /* defer the work to thread as it may block kernel */
7190 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
7191 dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
7192 return NOTIFY_DONE;
7193 }
7194 #endif /* #ifdef CONFIG_IPV6 */
7195
7196 int
7197 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
7198 {
7199 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
7200 dhd_if_t *ifp;
7201 struct net_device *net = NULL;
7202 int err = 0;
7203 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
7204
7205 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
7206
7207 ASSERT(dhd && dhd->iflist[ifidx]);
7208 ifp = dhd->iflist[ifidx];
7209 net = ifp->net;
7210 ASSERT(net && (ifp->idx == ifidx));
7211
7212 #ifndef P2PONEINT
7213 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7214 ASSERT(!net->open);
7215 net->get_stats = dhd_get_stats;
7216 net->do_ioctl = dhd_ioctl_entry;
7217 net->hard_start_xmit = dhd_start_xmit;
7218 net->set_mac_address = dhd_set_mac_address;
7219 net->set_multicast_list = dhd_set_multicast_list;
7220 net->open = net->stop = NULL;
7221 #else
7222 ASSERT(!net->netdev_ops);
7223 net->netdev_ops = &dhd_ops_virt;
7224 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7225 #else
7226 net->netdev_ops = &dhd_cfgp2p_ops_virt;
7227 #endif /* P2PONEINT */
7228
7229 /* Ok, link into the network layer... */
7230 if (ifidx == 0) {
7231 /*
7232 * device functions for the primary interface only
7233 */
7234 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7235 net->open = dhd_open;
7236 net->stop = dhd_stop;
7237 #else
7238 net->netdev_ops = &dhd_ops_pri;
7239 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7240 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
7241 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7242 } else {
7243 /*
7244 * We have to use the primary MAC for virtual interfaces
7245 */
7246 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
7247 /*
7248 * Android sets the locally administered bit to indicate that this is a
7249 * portable hotspot. This will not work in simultaneous AP/STA mode,
7250 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7251 */
7252 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
7253 ETHER_ADDR_LEN)) {
7254 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7255 __func__, net->name));
7256 temp_addr[0] |= 0x02;
7257 }
7258 }
7259
7260 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
7261 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
7262 net->ethtool_ops = &dhd_ethtool_ops;
7263 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
7264
7265 #if defined(WL_WIRELESS_EXT)
7266 #if WIRELESS_EXT < 19
7267 net->get_wireless_stats = dhd_get_wireless_stats;
7268 #endif /* WIRELESS_EXT < 19 */
7269 #if WIRELESS_EXT > 12
7270 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
7271 #endif /* WIRELESS_EXT > 12 */
7272 #endif /* defined(WL_WIRELESS_EXT) */
7273
7274 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
7275
7276 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
7277
7278 if (ifidx == 0)
7279 printf("%s\n", dhd_version);
7280
7281 if (need_rtnl_lock)
7282 err = register_netdev(net);
7283 else
7284 err = register_netdevice(net);
7285
7286 if (err != 0) {
7287 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
7288 goto fail;
7289 }
7290
7291 #ifdef SET_RPS_CPUS
7292 err = custom_rps_map_set(net->_rx, RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
7293 if (err < 0)
7294 DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__, err));
7295 #endif /* SET_RPS_CPUS */
7296
7297
7298
7299 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
7300 MAC2STRDBG(net->dev_addr));
7301
7302 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
7303 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
7304 #endif
7305
7306 #if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
7307 KERNEL_VERSION(2, 6, 27))))
7308 if (ifidx == 0) {
7309 #ifdef BCMLXSDMMC
7310 up(&dhd_registration_sem);
7311 #endif
7312 if (!dhd_download_fw_on_driverload) {
7313 dhd_net_bus_devreset(net, TRUE);
7314 #ifdef BCMLXSDMMC
7315 dhd_net_bus_suspend(net);
7316 #endif /* BCMLXSDMMC */
7317 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
7318 }
7319 }
7320 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
7321 return 0;
7322
7323 fail:
7324 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
7325 net->open = NULL;
7326 #else
7327 net->netdev_ops = NULL;
7328 #endif
7329 return err;
7330 }
7331
7332 void
7333 dhd_bus_detach(dhd_pub_t *dhdp)
7334 {
7335 dhd_info_t *dhd;
7336
7337 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7338
7339 if (dhdp) {
7340 dhd = (dhd_info_t *)dhdp->info;
7341 if (dhd) {
7342
7343 /*
7344 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
7345 * calling stop again will cuase SD read/write errors.
7346 */
7347 if (dhd->pub.busstate != DHD_BUS_DOWN) {
7348 /* Stop the protocol module */
7349 dhd_prot_stop(&dhd->pub);
7350
7351 /* Stop the bus module */
7352 dhd_bus_stop(dhd->pub.bus, TRUE);
7353 }
7354
7355 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7356 dhd_bus_oob_intr_unregister(dhdp);
7357 #endif
7358 }
7359 }
7360 }
7361
7362
7363 void dhd_detach(dhd_pub_t *dhdp)
7364 {
7365 dhd_info_t *dhd;
7366 unsigned long flags;
7367 int timer_valid = FALSE;
7368
7369 if (!dhdp)
7370 return;
7371
7372 dhd = (dhd_info_t *)dhdp->info;
7373 if (!dhd)
7374 return;
7375
7376 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7377 dhd_global = NULL;
7378 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7379
7380 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
7381
7382 dhd->pub.up = 0;
7383 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
7384 /* Give sufficient time for threads to start running in case
7385 * dhd_attach() has failed
7386 */
7387 OSL_SLEEP(100);
7388 }
7389
7390 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
7391 dhd_bus_detach(dhdp);
7392 #ifdef PCIE_FULL_DONGLE
7393 dhd_flow_rings_deinit(dhdp);
7394 #endif
7395
7396 if (dhdp->prot)
7397 dhd_prot_detach(dhdp);
7398 }
7399
7400 #ifdef ARP_OFFLOAD_SUPPORT
7401 if (dhd_inetaddr_notifier_registered) {
7402 dhd_inetaddr_notifier_registered = FALSE;
7403 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
7404 }
7405 #endif /* ARP_OFFLOAD_SUPPORT */
7406 #ifdef CONFIG_IPV6
7407 if (dhd_inet6addr_notifier_registered) {
7408 dhd_inet6addr_notifier_registered = FALSE;
7409 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
7410 }
7411 #endif
7412
7413 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7414 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
7415 if (dhd->early_suspend.suspend)
7416 unregister_early_suspend(&dhd->early_suspend);
7417 }
7418 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7419
7420 #if defined(WL_WIRELESS_EXT)
7421 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
7422 /* Detatch and unlink in the iw */
7423 wl_iw_detach();
7424 }
7425 #endif /* defined(WL_WIRELESS_EXT) */
7426
7427 /* delete all interfaces, start with virtual */
7428 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
7429 int i = 1;
7430 dhd_if_t *ifp;
7431
7432 /* Cleanup virtual interfaces */
7433 dhd_net_if_lock_local(dhd);
7434 for (i = 1; i < DHD_MAX_IFS; i++) {
7435 if (dhd->iflist[i])
7436 dhd_remove_if(&dhd->pub, i, TRUE);
7437 }
7438 dhd_net_if_unlock_local(dhd);
7439
7440 /* delete primary interface 0 */
7441 ifp = dhd->iflist[0];
7442 ASSERT(ifp);
7443 ASSERT(ifp->net);
7444 if (ifp && ifp->net) {
7445
7446
7447
7448 /* in unregister_netdev case, the interface gets freed by net->destructor
7449 * (which is set to free_netdev)
7450 */
7451 if (ifp->net->reg_state == NETREG_UNINITIALIZED)
7452 free_netdev(ifp->net);
7453 else {
7454 #ifdef SET_RPS_CPUS
7455 custom_rps_map_clear(ifp->net->_rx);
7456 #endif /* SET_RPS_CPUS */
7457 unregister_netdev(ifp->net);
7458 }
7459 ifp->net = NULL;
7460 #ifdef DHD_WMF
7461 dhd_wmf_cleanup(dhdp, 0);
7462 #endif /* DHD_WMF */
7463
7464 dhd_if_del_sta_list(ifp);
7465
7466 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
7467 dhd->iflist[0] = NULL;
7468 }
7469 }
7470
7471 /* Clear the watchdog timer */
7472 DHD_GENERAL_LOCK(&dhd->pub, flags);
7473 timer_valid = dhd->wd_timer_valid;
7474 dhd->wd_timer_valid = FALSE;
7475 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
7476 if (timer_valid)
7477 del_timer_sync(&dhd->timer);
7478
7479 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
7480 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
7481 PROC_STOP(&dhd->thr_wdt_ctl);
7482 }
7483
7484 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
7485 PROC_STOP(&dhd->thr_rxf_ctl);
7486 }
7487
7488 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
7489 PROC_STOP(&dhd->thr_dpc_ctl);
7490 } else
7491 tasklet_kill(&dhd->tasklet);
7492 }
7493 #ifdef WL_CFG80211
7494 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
7495 wl_cfg80211_detach(NULL);
7496 dhd_monitor_uninit();
7497 }
7498 #endif
7499 /* free deferred work queue */
7500 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
7501 dhd->dhd_deferred_wq = NULL;
7502
7503 #ifdef SHOW_LOGTRACE
7504 if (dhd->event_data.fmts)
7505 kfree(dhd->event_data.fmts);
7506 if (dhd->event_data.raw_fmts)
7507 kfree(dhd->event_data.raw_fmts);
7508 #endif /* SHOW_LOGTRACE */
7509
7510 #ifdef PNO_SUPPORT
7511 if (dhdp->pno_state)
7512 dhd_pno_deinit(dhdp);
7513 #endif
7514 #if defined(CONFIG_PM_SLEEP)
7515 if (dhd_pm_notifier_registered) {
7516 unregister_pm_notifier(&dhd_pm_notifier);
7517 dhd_pm_notifier_registered = FALSE;
7518 }
7519 #endif /* CONFIG_PM_SLEEP */
7520 #ifdef DEBUG_CPU_FREQ
7521 if (dhd->new_freq)
7522 free_percpu(dhd->new_freq);
7523 dhd->new_freq = NULL;
7524 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
7525 #endif
7526 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
7527 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
7528 #ifdef CONFIG_HAS_WAKELOCK
7529 dhd->wakelock_counter = 0;
7530 dhd->wakelock_wd_counter = 0;
7531 dhd->wakelock_rx_timeout_enable = 0;
7532 dhd->wakelock_ctrl_timeout_enable = 0;
7533 wake_lock_destroy(&dhd->wl_wifi);
7534 wake_lock_destroy(&dhd->wl_rxwake);
7535 wake_lock_destroy(&dhd->wl_ctrlwake);
7536 wake_lock_destroy(&dhd->wl_wdwake);
7537 #ifdef BCMPCIE_OOB_HOST_WAKE
7538 wake_lock_destroy(&dhd->wl_intrwake);
7539 #endif /* BCMPCIE_OOB_HOST_WAKE */
7540 #endif /* CONFIG_HAS_WAKELOCK */
7541 }
7542
7543
7544
7545
7546 #ifdef DHDTCPACK_SUPPRESS
7547 /* This will free all MEM allocated for TCPACK SUPPRESS */
7548 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7549 #endif /* DHDTCPACK_SUPPRESS */
7550 dhd_conf_detach(dhdp);
7551 }
7552
7553
7554 void
7555 dhd_free(dhd_pub_t *dhdp)
7556 {
7557 dhd_info_t *dhd;
7558 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7559
7560 if (dhdp) {
7561 int i;
7562 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7563 if (dhdp->reorder_bufs[i]) {
7564 reorder_info_t *ptr;
7565 uint32 buf_size = sizeof(struct reorder_info);
7566
7567 ptr = dhdp->reorder_bufs[i];
7568
7569 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7570 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7571 i, ptr->max_idx, buf_size));
7572
7573 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7574 dhdp->reorder_bufs[i] = NULL;
7575 }
7576 }
7577
7578 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
7579
7580 dhd = (dhd_info_t *)dhdp->info;
7581 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
7582 if (dhd &&
7583 dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
7584 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
7585 dhd = NULL;
7586 }
7587 }
7588
7589 void
7590 dhd_clear(dhd_pub_t *dhdp)
7591 {
7592 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7593
7594 if (dhdp) {
7595 int i;
7596 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
7597 if (dhdp->reorder_bufs[i]) {
7598 reorder_info_t *ptr;
7599 uint32 buf_size = sizeof(struct reorder_info);
7600
7601 ptr = dhdp->reorder_bufs[i];
7602
7603 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
7604 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7605 i, ptr->max_idx, buf_size));
7606
7607 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
7608 dhdp->reorder_bufs[i] = NULL;
7609 }
7610 }
7611
7612 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
7613 }
7614 }
7615
7616 static void
7617 dhd_module_cleanup(void)
7618 {
7619 printf("%s: Enter\n", __FUNCTION__);
7620
7621 dhd_bus_unregister();
7622
7623 wl_android_exit();
7624
7625 dhd_wifi_platform_unregister_drv();
7626 #ifdef CUSTOMER_HW_AMLOGIC
7627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
7628 wifi_teardown_dt();
7629 #endif
7630 #endif
7631 printf("%s: Exit\n", __FUNCTION__);
7632 }
7633
7634 static void __exit
7635 dhd_module_exit(void)
7636 {
7637 dhd_module_cleanup();
7638 unregister_reboot_notifier(&dhd_reboot_notifier);
7639 }
7640
7641 static int __init
7642 dhd_module_init(void)
7643 {
7644 int err;
7645 int retry = POWERUP_MAX_RETRY;
7646
7647 printf("%s: in\n", __FUNCTION__);
7648 #ifdef CUSTOMER_HW_AMLOGIC
7649 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
7650 if (wifi_setup_dt()) {
7651 printf("wifi_dt : fail to setup dt\n");
7652 }
7653 #endif
7654 #endif
7655
7656 DHD_PERIM_RADIO_INIT();
7657
7658 if (firmware_path[0] != '\0') {
7659 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
7660 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7661 }
7662
7663 if (nvram_path[0] != '\0') {
7664 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
7665 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
7666 }
7667
7668 do {
7669 err = dhd_wifi_platform_register_drv();
7670 if (!err) {
7671 register_reboot_notifier(&dhd_reboot_notifier);
7672 break;
7673 }
7674 else {
7675 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
7676 __FUNCTION__, retry));
7677 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
7678 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
7679 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
7680 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
7681 }
7682 } while (retry--);
7683
7684 if (err) {
7685 #ifdef CUSTOMER_HW_AMLOGIC
7686 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
7687 wifi_teardown_dt();
7688 #endif
7689 #endif
7690 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
7691 }
7692
7693 printf("%s: Exit err=%d\n", __FUNCTION__, err);
7694 return err;
7695 }
7696
7697 static int
7698 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
7699 {
7700 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
7701 if (code == SYS_RESTART) {
7702 }
7703
7704 return NOTIFY_DONE;
7705 }
7706
7707
7708 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7709 #if defined(CONFIG_DEFERRED_INITCALLS)
7710 deferred_module_init(dhd_module_init);
7711 #elif defined(USE_LATE_INITCALL_SYNC)
7712 late_initcall_sync(dhd_module_init);
7713 #else
7714 late_initcall(dhd_module_init);
7715 #endif /* USE_LATE_INITCALL_SYNC */
7716 #else
7717 module_init(dhd_module_init);
7718 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7719
7720 module_exit(dhd_module_exit);
7721
7722 /*
7723 * OS specific functions required to implement DHD driver in OS independent way
7724 */
7725 int
7726 dhd_os_proto_block(dhd_pub_t *pub)
7727 {
7728 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7729
7730 if (dhd) {
7731 DHD_PERIM_UNLOCK(pub);
7732
7733 down(&dhd->proto_sem);
7734
7735 DHD_PERIM_LOCK(pub);
7736 return 1;
7737 }
7738
7739 return 0;
7740 }
7741
7742 int
7743 dhd_os_proto_unblock(dhd_pub_t *pub)
7744 {
7745 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7746
7747 if (dhd) {
7748 up(&dhd->proto_sem);
7749 return 1;
7750 }
7751
7752 return 0;
7753 }
7754
7755 unsigned int
7756 dhd_os_get_ioctl_resp_timeout(void)
7757 {
7758 return ((unsigned int)dhd_ioctl_timeout_msec);
7759 }
7760
7761 void
7762 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
7763 {
7764 dhd_ioctl_timeout_msec = (int)timeout_msec;
7765 }
7766
7767 int
7768 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
7769 {
7770 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
7771 int timeout;
7772
7773 /* Convert timeout in millsecond to jiffies */
7774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7775 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
7776 #else
7777 timeout = dhd_ioctl_timeout_msec * HZ / 1000;
7778 #endif
7779
7780 DHD_PERIM_UNLOCK(pub);
7781
7782 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
7783
7784 DHD_PERIM_LOCK(pub);
7785
7786 return timeout;
7787 }
7788
7789 int
7790 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
7791 {
7792 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
7793
7794 wake_up(&dhd->ioctl_resp_wait);
7795 return 0;
7796 }
7797
7798 void
7799 dhd_os_wd_timer_extend(void *bus, bool extend)
7800 {
7801 dhd_pub_t *pub = bus;
7802 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7803
7804 if (extend)
7805 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
7806 else
7807 dhd_os_wd_timer(bus, dhd->default_wd_interval);
7808 }
7809
7810
7811 void
7812 dhd_os_wd_timer(void *bus, uint wdtick)
7813 {
7814 dhd_pub_t *pub = bus;
7815 dhd_info_t *dhd = (dhd_info_t *)pub->info;
7816 unsigned long flags;
7817
7818 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7819
7820 if (!dhd) {
7821 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
7822 return;
7823 }
7824
7825 DHD_GENERAL_LOCK(pub, flags);
7826
7827 /* don't start the wd until fw is loaded */
7828 if (pub->busstate == DHD_BUS_DOWN) {
7829 DHD_GENERAL_UNLOCK(pub, flags);
7830 if (!wdtick)
7831 DHD_OS_WD_WAKE_UNLOCK(pub);
7832 return;
7833 }
7834
7835 /* Totally stop the timer */
7836 if (!wdtick && dhd->wd_timer_valid == TRUE) {
7837 dhd->wd_timer_valid = FALSE;
7838 DHD_GENERAL_UNLOCK(pub, flags);
7839 del_timer_sync(&dhd->timer);
7840 DHD_OS_WD_WAKE_UNLOCK(pub);
7841 return;
7842 }
7843
7844 if (wdtick) {
7845 DHD_OS_WD_WAKE_LOCK(pub);
7846 dhd_watchdog_ms = (uint)wdtick;
7847 /* Re arm the timer, at last watchdog period */
7848 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
7849 dhd->wd_timer_valid = TRUE;
7850 }
7851 DHD_GENERAL_UNLOCK(pub, flags);
7852 }
7853
7854 void *
7855 dhd_os_open_image(char *filename)
7856 {
7857 struct file *fp;
7858
7859 fp = filp_open(filename, O_RDONLY, 0);
7860 /*
7861 * 2.6.11 (FC4) supports filp_open() but later revs don't?
7862 * Alternative:
7863 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
7864 * ???
7865 */
7866 if (IS_ERR(fp))
7867 fp = NULL;
7868
7869 return fp;
7870 }
7871
7872 int
7873 dhd_os_get_image_block(char *buf, int len, void *image)
7874 {
7875 struct file *fp = (struct file *)image;
7876 int rdlen;
7877
7878 if (!image)
7879 return 0;
7880
7881 rdlen = kernel_read(fp, fp->f_pos, buf, len);
7882 if (rdlen > 0)
7883 fp->f_pos += rdlen;
7884
7885 return rdlen;
7886 }
7887
7888 void
7889 dhd_os_close_image(void *image)
7890 {
7891 if (image)
7892 filp_close((struct file *)image, NULL);
7893 }
7894
7895 void
7896 dhd_os_sdlock(dhd_pub_t *pub)
7897 {
7898 dhd_info_t *dhd;
7899
7900 dhd = (dhd_info_t *)(pub->info);
7901
7902 if (dhd_dpc_prio >= 0)
7903 down(&dhd->sdsem);
7904 else
7905 spin_lock_bh(&dhd->sdlock);
7906 }
7907
7908 void
7909 dhd_os_sdunlock(dhd_pub_t *pub)
7910 {
7911 dhd_info_t *dhd;
7912
7913 dhd = (dhd_info_t *)(pub->info);
7914
7915 if (dhd_dpc_prio >= 0)
7916 up(&dhd->sdsem);
7917 else
7918 spin_unlock_bh(&dhd->sdlock);
7919 }
7920
7921 void
7922 dhd_os_sdlock_txq(dhd_pub_t *pub)
7923 {
7924 dhd_info_t *dhd;
7925
7926 dhd = (dhd_info_t *)(pub->info);
7927 spin_lock_bh(&dhd->txqlock);
7928 }
7929
7930 void
7931 dhd_os_sdunlock_txq(dhd_pub_t *pub)
7932 {
7933 dhd_info_t *dhd;
7934
7935 dhd = (dhd_info_t *)(pub->info);
7936 spin_unlock_bh(&dhd->txqlock);
7937 }
7938
7939 void
7940 dhd_os_sdlock_rxq(dhd_pub_t *pub)
7941 {
7942 }
7943
7944 void
7945 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
7946 {
7947 }
7948
7949 static void
7950 dhd_os_rxflock(dhd_pub_t *pub)
7951 {
7952 dhd_info_t *dhd;
7953
7954 dhd = (dhd_info_t *)(pub->info);
7955 spin_lock_bh(&dhd->rxf_lock);
7956
7957 }
7958
7959 static void
7960 dhd_os_rxfunlock(dhd_pub_t *pub)
7961 {
7962 dhd_info_t *dhd;
7963
7964 dhd = (dhd_info_t *)(pub->info);
7965 spin_unlock_bh(&dhd->rxf_lock);
7966 }
7967
7968 #ifdef DHDTCPACK_SUPPRESS
7969 void
7970 dhd_os_tcpacklock(dhd_pub_t *pub)
7971 {
7972 dhd_info_t *dhd;
7973
7974 dhd = (dhd_info_t *)(pub->info);
7975 spin_lock_bh(&dhd->tcpack_lock);
7976
7977 }
7978
7979 void
7980 dhd_os_tcpackunlock(dhd_pub_t *pub)
7981 {
7982 dhd_info_t *dhd;
7983
7984 dhd = (dhd_info_t *)(pub->info);
7985 spin_unlock_bh(&dhd->tcpack_lock);
7986 }
7987 #endif /* DHDTCPACK_SUPPRESS */
7988
7989 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
7990 {
7991 uint8* buf;
7992 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
7993
7994 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
7995 if (buf == NULL) {
7996 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
7997 " size: %dbytes\n", __FUNCTION__, section, size));
7998 if (kmalloc_if_fail)
7999 buf = kmalloc(size, flags);
8000 }
8001
8002 return buf;
8003 }
8004
8005 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
8006 {
8007 }
8008
8009 #if defined(WL_WIRELESS_EXT)
8010 struct iw_statistics *
8011 dhd_get_wireless_stats(struct net_device *dev)
8012 {
8013 int res = 0;
8014 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8015
8016 if (!dhd->pub.up) {
8017 return NULL;
8018 }
8019
8020 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
8021
8022 if (res == 0)
8023 return &dhd->iw.wstats;
8024 else
8025 return NULL;
8026 }
8027 #endif /* defined(WL_WIRELESS_EXT) */
8028
8029 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8030 static int
8031 dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
8032 wl_event_msg_t *event, void **data)
8033 {
8034 int cnt;
8035 char eabuf[ETHER_ADDR_STR_LEN];
8036 struct ether_addr *addr = &event->addr;
8037 uint32 type = ntoh32_ua((void *)&event->event_type);
8038
8039 switch (type) {
8040 case WLC_E_TXFAIL:
8041 if (addr != NULL)
8042 bcm_ether_ntoa(addr, eabuf);
8043 else
8044 return (BCME_ERROR);
8045
8046 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8047 if (dhd->wlanaudio_blist[cnt].is_blacklist)
8048 break;
8049
8050 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
8051 addr, ETHER_ADDR_LEN)) {
8052 /* Mac address is Same */
8053 dhd->wlanaudio_blist[cnt].cnt++;
8054
8055 if (dhd->wlanaudio_blist[cnt].cnt < 15) {
8056 /* black list is false */
8057 if ((dhd->wlanaudio_blist[cnt].cnt > 10) &&
8058 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
8059 < 100)) {
8060 dhd->wlanaudio_blist[cnt].is_blacklist = true;
8061 dhd->is_wlanaudio_blist = true;
8062 }
8063 } else {
8064 if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
8065 (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
8066 > 100)) {
8067
8068 bzero(&dhd->wlanaudio_blist[cnt],
8069 sizeof(struct wlanaudio_blacklist));
8070 }
8071 }
8072 break;
8073 } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
8074 (!dhd->wlanaudio_blist[cnt].cnt)) {
8075 bcopy(addr,
8076 (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr,
8077 ETHER_ADDR_LEN);
8078 dhd->wlanaudio_blist[cnt].cnt++;
8079 dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies;
8080
8081 bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf);
8082 break;
8083 }
8084 }
8085 break;
8086 case WLC_E_AUTH :
8087 case WLC_E_AUTH_IND :
8088 case WLC_E_DEAUTH :
8089 case WLC_E_DEAUTH_IND :
8090 case WLC_E_ASSOC:
8091 case WLC_E_ASSOC_IND:
8092 case WLC_E_REASSOC:
8093 case WLC_E_REASSOC_IND:
8094 case WLC_E_DISASSOC:
8095 case WLC_E_DISASSOC_IND:
8096 {
8097 int bl_cnt = 0;
8098
8099 if (addr != NULL)
8100 bcm_ether_ntoa(addr, eabuf);
8101 else
8102 return (BCME_ERROR);
8103
8104 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8105 if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
8106 addr, ETHER_ADDR_LEN)) {
8107 /* Mac address is Same */
8108 if (dhd->wlanaudio_blist[cnt].is_blacklist) {
8109 /* black list is true */
8110 bzero(&dhd->wlanaudio_blist[cnt],
8111 sizeof(struct wlanaudio_blacklist));
8112 }
8113 }
8114 }
8115
8116 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
8117 if (dhd->wlanaudio_blist[cnt].is_blacklist)
8118 bl_cnt++;
8119 }
8120
8121 if (!bl_cnt)
8122 {
8123 dhd->is_wlanaudio_blist = false;
8124 }
8125
8126 break;
8127 }
8128 }
8129 return BCME_OK;
8130 }
8131 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8132 static int
8133 dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
8134 wl_event_msg_t *event, void **data)
8135 {
8136 int bcmerror = 0;
8137
8138 ASSERT(dhd != NULL);
8139
8140 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8141 bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data);
8142
8143 if (bcmerror != BCME_OK)
8144 return (bcmerror);
8145 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8146
8147 #ifdef SHOW_LOGTRACE
8148 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
8149 #else
8150 bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
8151 #endif /* SHOW_LOGTRACE */
8152
8153 if (bcmerror != BCME_OK)
8154 return (bcmerror);
8155
8156 #if defined(WL_WIRELESS_EXT)
8157 if (event->bsscfgidx == 0) {
8158 /*
8159 * Wireless ext is on primary interface only
8160 */
8161
8162 ASSERT(dhd->iflist[*ifidx] != NULL);
8163 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8164
8165 if (dhd->iflist[*ifidx]->net) {
8166 wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
8167 }
8168 }
8169 #endif /* defined(WL_WIRELESS_EXT) */
8170
8171 #ifdef WL_CFG80211
8172 ASSERT(dhd->iflist[*ifidx] != NULL);
8173 ASSERT(dhd->iflist[*ifidx]->net != NULL);
8174 if (dhd->iflist[*ifidx]->net)
8175 wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
8176 #endif /* defined(WL_CFG80211) */
8177
8178 return (bcmerror);
8179 }
8180
8181 /* send up locally generated event */
8182 void
8183 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
8184 {
8185 switch (ntoh32(event->event_type)) {
8186 #ifdef WLBTAMP
8187 /* Send up locally generated AMP HCI Events */
8188 case WLC_E_BTA_HCI_EVENT: {
8189 struct sk_buff *p, *skb;
8190 bcm_event_t *msg;
8191 wl_event_msg_t *p_bcm_event;
8192 char *ptr;
8193 uint32 len;
8194 uint32 pktlen;
8195 dhd_if_t *ifp;
8196 dhd_info_t *dhd;
8197 uchar *eth;
8198 int ifidx;
8199
8200 len = ntoh32(event->datalen);
8201 pktlen = sizeof(bcm_event_t) + len + 2;
8202 dhd = dhdp->info;
8203 ifidx = dhd_ifname2idx(dhd, event->ifname);
8204
8205 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8206 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8207
8208 msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
8209
8210 bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
8211 bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
8212 ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
8213
8214 msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
8215
8216 /* BCM Vendor specific header... */
8217 msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
8218 msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
8219 bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
8220
8221 /* vendor spec header length + pvt data length (private indication
8222 * hdr + actual message itself)
8223 */
8224 msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
8225 BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
8226 msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
8227
8228 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
8229
8230 /* copy wl_event_msg_t into sk_buf */
8231
8232 /* pointer to wl_event_msg_t in sk_buf */
8233 p_bcm_event = &msg->event;
8234 bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
8235
8236 /* copy hci event into sk_buf */
8237 bcopy(data, (p_bcm_event + 1), len);
8238
8239 msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
8240 ntoh16(msg->bcm_hdr.length));
8241 PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
8242
8243 ptr = (char *)(msg + 1);
8244 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
8245 * are no ethertypes which are following this
8246 */
8247 ptr[len+0] = 0x00;
8248 ptr[len+1] = 0x00;
8249
8250 skb = PKTTONATIVE(dhdp->osh, p);
8251 eth = skb->data;
8252 len = skb->len;
8253
8254 ifp = dhd->iflist[ifidx];
8255 if (ifp == NULL)
8256 ifp = dhd->iflist[0];
8257
8258 ASSERT(ifp);
8259 skb->dev = ifp->net;
8260 skb->protocol = eth_type_trans(skb, skb->dev);
8261
8262 skb->data = eth;
8263 skb->len = len;
8264
8265 /* Strip header, count, deliver upward */
8266 skb_pull(skb, ETH_HLEN);
8267
8268 /* Send the packet */
8269 if (in_interrupt()) {
8270 netif_rx(skb);
8271 } else {
8272 netif_rx_ni(skb);
8273 }
8274 }
8275 else {
8276 /* Could not allocate a sk_buf */
8277 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
8278 }
8279 break;
8280 } /* case WLC_E_BTA_HCI_EVENT */
8281 #endif /* WLBTAMP */
8282
8283 default:
8284 break;
8285 }
8286 }
8287
8288 #ifdef LOG_INTO_TCPDUMP
8289 void
8290 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
8291 {
8292 struct sk_buff *p, *skb;
8293 uint32 pktlen;
8294 int len;
8295 dhd_if_t *ifp;
8296 dhd_info_t *dhd;
8297 uchar *skb_data;
8298 int ifidx = 0;
8299 struct ether_header eth;
8300
8301 pktlen = sizeof(eth) + data_len;
8302 dhd = dhdp->info;
8303
8304 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
8305 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
8306
8307 bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
8308 bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
8309 ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
8310 eth.ether_type = hton16(ETHER_TYPE_BRCM);
8311
8312 bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
8313 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
8314 skb = PKTTONATIVE(dhdp->osh, p);
8315 skb_data = skb->data;
8316 len = skb->len;
8317
8318 ifidx = dhd_ifname2idx(dhd, "wlan0");
8319 ifp = dhd->iflist[ifidx];
8320 if (ifp == NULL)
8321 ifp = dhd->iflist[0];
8322
8323 ASSERT(ifp);
8324 skb->dev = ifp->net;
8325 skb->protocol = eth_type_trans(skb, skb->dev);
8326 skb->data = skb_data;
8327 skb->len = len;
8328
8329 /* Strip header, count, deliver upward */
8330 skb_pull(skb, ETH_HLEN);
8331
8332 /* Send the packet */
8333 if (in_interrupt()) {
8334 netif_rx(skb);
8335 } else {
8336 netif_rx_ni(skb);
8337 }
8338 }
8339 else {
8340 /* Could not allocate a sk_buf */
8341 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
8342 }
8343 }
8344 #endif /* LOG_INTO_TCPDUMP */
8345
8346 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
8347 {
8348 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8349 struct dhd_info *dhdinfo = dhd->info;
8350
8351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8352 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
8353 #else
8354 int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
8355 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8356
8357 dhd_os_sdunlock(dhd);
8358 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
8359 dhd_os_sdlock(dhd);
8360 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
8361 return;
8362 }
8363
8364 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
8365 {
8366 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8367 struct dhd_info *dhdinfo = dhd->info;
8368 if (waitqueue_active(&dhdinfo->ctrl_wait))
8369 wake_up(&dhdinfo->ctrl_wait);
8370 #endif
8371 return;
8372 }
8373
8374 #if defined(BCMSDIO) || defined(BCMPCIE)
8375 int
8376 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
8377 {
8378 int ret = 0;
8379 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8380
8381 if (flag == TRUE) {
8382 /* Issue wl down command before resetting the chip */
8383 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
8384 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
8385 }
8386 #ifdef PROP_TXSTATUS
8387 if (dhd->pub.wlfc_enabled)
8388 dhd_wlfc_deinit(&dhd->pub);
8389 #endif /* PROP_TXSTATUS */
8390 #ifdef PNO_SUPPORT
8391 if (dhd->pub.pno_state)
8392 dhd_pno_deinit(&dhd->pub);
8393 #endif
8394 }
8395
8396 #ifdef BCMSDIO
8397 if (!flag) {
8398 dhd_update_fw_nv_path(dhd);
8399 /* update firmware and nvram path to sdio bus */
8400 dhd_bus_update_fw_nv_path(dhd->pub.bus,
8401 dhd->fw_path, dhd->nv_path, dhd->conf_path);
8402 }
8403 #endif /* BCMSDIO */
8404
8405 ret = dhd_bus_devreset(&dhd->pub, flag);
8406 if (ret) {
8407 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
8408 return ret;
8409 }
8410
8411 return ret;
8412 }
8413
8414 #ifdef BCMSDIO
8415 int
8416 dhd_net_bus_suspend(struct net_device *dev)
8417 {
8418 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8419 return dhd_bus_suspend(&dhd->pub);
8420 }
8421
8422 int
8423 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
8424 {
8425 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8426 return dhd_bus_resume(&dhd->pub, stage);
8427 }
8428
8429 #endif /* BCMSDIO */
8430 #endif /* BCMSDIO || BCMPCIE */
8431
8432 int net_os_set_suspend_disable(struct net_device *dev, int val)
8433 {
8434 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8435 int ret = 0;
8436
8437 if (dhd) {
8438 ret = dhd->pub.suspend_disable_flag;
8439 dhd->pub.suspend_disable_flag = val;
8440 }
8441 return ret;
8442 }
8443
8444 int net_os_set_suspend(struct net_device *dev, int val, int force)
8445 {
8446 int ret = 0;
8447 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8448
8449 if (dhd) {
8450 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8451 ret = dhd_set_suspend(val, &dhd->pub);
8452 #else
8453 ret = dhd_suspend_resume_helper(dhd, val, force);
8454 #endif
8455 #ifdef WL_CFG80211
8456 wl_cfg80211_update_power_mode(dev);
8457 #endif
8458 }
8459 return ret;
8460 }
8461
8462 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
8463 {
8464 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8465
8466 if (dhd)
8467 dhd->pub.suspend_bcn_li_dtim = val;
8468
8469 return 0;
8470 }
8471
8472 #ifdef PKT_FILTER_SUPPORT
8473 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
8474 {
8475 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8476 char *filterp = NULL;
8477 int filter_id = 0;
8478 int ret = 0;
8479
8480 if (!dhd_master_mode)
8481 add_remove = !add_remove;
8482
8483 if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
8484 (num == DHD_MDNS_FILTER_NUM))
8485 return ret;
8486 if (num >= dhd->pub.pktfilter_count)
8487 return -EINVAL;
8488 switch (num) {
8489 case DHD_BROADCAST_FILTER_NUM:
8490 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
8491 filter_id = 101;
8492 break;
8493 case DHD_MULTICAST4_FILTER_NUM:
8494 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
8495 filter_id = 102;
8496 break;
8497 case DHD_MULTICAST6_FILTER_NUM:
8498 filterp = "103 0 0 0 0xFFFF 0x3333";
8499 filter_id = 103;
8500 break;
8501 default:
8502 return -EINVAL;
8503 }
8504
8505 /* Add filter */
8506 if (add_remove) {
8507 dhd->pub.pktfilter[num] = filterp;
8508 dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
8509 } else { /* Delete filter */
8510 if (dhd->pub.pktfilter[num] != NULL) {
8511 dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
8512 dhd->pub.pktfilter[num] = NULL;
8513 }
8514 }
8515 return ret;
8516 }
8517
8518 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
8519
8520 {
8521 int ret = 0;
8522
8523 /* Packet filtering is set only if we still in early-suspend and
8524 * we need either to turn it ON or turn it OFF
8525 * We can always turn it OFF in case of early-suspend, but we turn it
8526 * back ON only if suspend_disable_flag was not set
8527 */
8528 if (dhdp && dhdp->up) {
8529 if (dhdp->in_suspend) {
8530 if (!val || (val && !dhdp->suspend_disable_flag))
8531 dhd_enable_packet_filter(val, dhdp);
8532 }
8533 }
8534 return ret;
8535 }
8536
8537 /* function to enable/disable packet for Network device */
8538 int net_os_enable_packet_filter(struct net_device *dev, int val)
8539 {
8540 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8541
8542 return dhd_os_enable_packet_filter(&dhd->pub, val);
8543 }
8544 #endif /* PKT_FILTER_SUPPORT */
8545
8546 int
8547 dhd_dev_init_ioctl(struct net_device *dev)
8548 {
8549 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8550 int ret;
8551
8552 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
8553 goto done;
8554
8555 done:
8556 return ret;
8557 }
8558
8559 #ifdef PNO_SUPPORT
8560 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
8561 int
8562 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
8563 {
8564 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8565
8566 return (dhd_pno_stop_for_ssid(&dhd->pub));
8567 }
8568 /* Linux wrapper to call common dhd_pno_set_for_ssid */
8569 int
8570 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
8571 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
8572 {
8573 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8574
8575 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
8576 pno_repeat, pno_freq_expo_max, channel_list, nchan));
8577 }
8578
8579 /* Linux wrapper to call common dhd_pno_enable */
8580 int
8581 dhd_dev_pno_enable(struct net_device *dev, int enable)
8582 {
8583 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8584
8585 return (dhd_pno_enable(&dhd->pub, enable));
8586 }
8587
8588 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
8589 int
8590 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
8591 struct dhd_pno_hotlist_params *hotlist_params)
8592 {
8593 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8594 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
8595 }
8596 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
8597 int
8598 dhd_dev_pno_stop_for_batch(struct net_device *dev)
8599 {
8600 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8601 return (dhd_pno_stop_for_batch(&dhd->pub));
8602 }
8603 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
8604 int
8605 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
8606 {
8607 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8608 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
8609 }
8610 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
8611 int
8612 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
8613 {
8614 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8615 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
8616 }
8617 #endif /* PNO_SUPPORT */
8618
8619 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
8620 static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
8621 {
8622 dhd_info_t *dhd;
8623 struct net_device *dev;
8624
8625 dhd = (dhd_info_t *)dhd_info;
8626 dev = dhd->iflist[0]->net;
8627
8628 if (dev) {
8629 rtnl_lock();
8630 dev_close(dev);
8631 rtnl_unlock();
8632 #if defined(WL_WIRELESS_EXT)
8633 wl_iw_send_priv_event(dev, "HANG");
8634 #endif
8635 #if defined(WL_CFG80211)
8636 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8637 #endif
8638 }
8639 }
8640
8641
8642 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
8643 {
8644 int ret = 0;
8645 if (dhdp) {
8646 if (!dhdp->hang_was_sent) {
8647 dhdp->hang_was_sent = 1;
8648 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
8649 DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
8650 }
8651 }
8652 return ret;
8653 }
8654
8655 int net_os_send_hang_message(struct net_device *dev)
8656 {
8657 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8658 int ret = 0;
8659
8660 if (dhd) {
8661 /* Report FW problem when enabled */
8662 if (dhd->pub.hang_report) {
8663 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8664 ret = dhd_os_send_hang_message(&dhd->pub);
8665 #else
8666 ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
8667 #endif
8668 } else {
8669 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
8670 __FUNCTION__));
8671 /* Enforce bus down to stop any future traffic */
8672 dhd->pub.busstate = DHD_BUS_DOWN;
8673 }
8674 }
8675 return ret;
8676 }
8677 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
8678
8679
8680 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
8681 {
8682 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8683 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
8684 }
8685
8686 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
8687 wl_country_t *cspec)
8688 {
8689 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8690 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
8691 }
8692 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
8693 {
8694 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8695 if (dhd && dhd->pub.up) {
8696 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
8697 #ifdef WL_CFG80211
8698 wl_update_wiphybands(NULL, notify);
8699 #endif
8700 }
8701 }
8702
8703 void dhd_bus_band_set(struct net_device *dev, uint band)
8704 {
8705 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8706 if (dhd && dhd->pub.up) {
8707 #ifdef WL_CFG80211
8708 wl_update_wiphybands(NULL, true);
8709 #endif
8710 }
8711 }
8712
8713 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
8714 {
8715 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8716
8717 if (!fw || fw[0] == '\0')
8718 return -EINVAL;
8719
8720 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
8721 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
8722
8723 #if defined(SOFTAP)
8724 if (strstr(fw, "apsta") != NULL) {
8725 DHD_INFO(("GOT APSTA FIRMWARE\n"));
8726 ap_fw_loaded = TRUE;
8727 } else {
8728 DHD_INFO(("GOT STA FIRMWARE\n"));
8729 ap_fw_loaded = FALSE;
8730 }
8731 #endif
8732 return 0;
8733 }
8734
8735 void dhd_net_if_lock(struct net_device *dev)
8736 {
8737 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8738 dhd_net_if_lock_local(dhd);
8739 }
8740
8741 void dhd_net_if_unlock(struct net_device *dev)
8742 {
8743 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8744 dhd_net_if_unlock_local(dhd);
8745 }
8746
8747 static void dhd_net_if_lock_local(dhd_info_t *dhd)
8748 {
8749 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8750 if (dhd)
8751 mutex_lock(&dhd->dhd_net_if_mutex);
8752 #endif
8753 }
8754
8755 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
8756 {
8757 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8758 if (dhd)
8759 mutex_unlock(&dhd->dhd_net_if_mutex);
8760 #endif
8761 }
8762
8763 static void dhd_suspend_lock(dhd_pub_t *pub)
8764 {
8765 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8766 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8767 if (dhd)
8768 mutex_lock(&dhd->dhd_suspend_mutex);
8769 #endif
8770 }
8771
8772 static void dhd_suspend_unlock(dhd_pub_t *pub)
8773 {
8774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8775 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8776 if (dhd)
8777 mutex_unlock(&dhd->dhd_suspend_mutex);
8778 #endif
8779 }
8780
8781 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
8782 {
8783 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8784 unsigned long flags = 0;
8785
8786 if (dhd)
8787 spin_lock_irqsave(&dhd->dhd_lock, flags);
8788
8789 return flags;
8790 }
8791
8792 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
8793 {
8794 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8795
8796 if (dhd)
8797 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
8798 }
8799
8800 /* Linux specific multipurpose spinlock API */
8801 void *
8802 dhd_os_spin_lock_init(osl_t *osh)
8803 {
8804 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
8805 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
8806 /* and this results in kernel asserts in internal builds */
8807 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
8808 if (lock)
8809 spin_lock_init(lock);
8810 return ((void *)lock);
8811 }
8812 void
8813 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
8814 {
8815 MFREE(osh, lock, sizeof(spinlock_t) + 4);
8816 }
8817 unsigned long
8818 dhd_os_spin_lock(void *lock)
8819 {
8820 unsigned long flags = 0;
8821
8822 if (lock)
8823 spin_lock_irqsave((spinlock_t *)lock, flags);
8824
8825 return flags;
8826 }
8827 void
8828 dhd_os_spin_unlock(void *lock, unsigned long flags)
8829 {
8830 if (lock)
8831 spin_unlock_irqrestore((spinlock_t *)lock, flags);
8832 }
8833
8834 static int
8835 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
8836 {
8837 return (atomic_read(&dhd->pend_8021x_cnt));
8838 }
8839
8840 #define MAX_WAIT_FOR_8021X_TX 100
8841
8842 int
8843 dhd_wait_pend8021x(struct net_device *dev)
8844 {
8845 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8846 int timeout = msecs_to_jiffies(10);
8847 int ntimes = MAX_WAIT_FOR_8021X_TX;
8848 int pend = dhd_get_pend_8021x_cnt(dhd);
8849
8850 while (ntimes && pend) {
8851 if (pend) {
8852 set_current_state(TASK_INTERRUPTIBLE);
8853 DHD_PERIM_UNLOCK(&dhd->pub);
8854 schedule_timeout(timeout);
8855 DHD_PERIM_LOCK(&dhd->pub);
8856 set_current_state(TASK_RUNNING);
8857 ntimes--;
8858 }
8859 pend = dhd_get_pend_8021x_cnt(dhd);
8860 }
8861 if (ntimes == 0)
8862 {
8863 atomic_set(&dhd->pend_8021x_cnt, 0);
8864 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
8865 }
8866 return pend;
8867 }
8868
8869 #ifdef DHD_DEBUG
8870 int
8871 write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
8872 {
8873 int ret = 0;
8874 struct file *fp;
8875 mm_segment_t old_fs;
8876 loff_t pos = 0;
8877
8878 /* change to KERNEL_DS address limit */
8879 old_fs = get_fs();
8880 set_fs(KERNEL_DS);
8881
8882 /* open file to write */
8883 fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
8884 if (!fp) {
8885 printf("%s: open file error\n", __FUNCTION__);
8886 ret = -1;
8887 goto exit;
8888 }
8889
8890 /* Write buf to file */
8891 fp->f_op->write(fp, buf, size, &pos);
8892
8893 exit:
8894 /* free buf before return */
8895 MFREE(dhd->osh, buf, size);
8896 /* close file before return */
8897 if (fp)
8898 filp_close(fp, current->files);
8899 /* restore previous address limit */
8900 set_fs(old_fs);
8901
8902 return ret;
8903 }
8904 #endif /* DHD_DEBUG */
8905
8906 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
8907 {
8908 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8909 unsigned long flags;
8910 int ret = 0;
8911
8912 if (dhd) {
8913 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8914 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
8915 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
8916 #ifdef CONFIG_HAS_WAKELOCK
8917 if (dhd->wakelock_rx_timeout_enable)
8918 wake_lock_timeout(&dhd->wl_rxwake,
8919 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
8920 if (dhd->wakelock_ctrl_timeout_enable)
8921 wake_lock_timeout(&dhd->wl_ctrlwake,
8922 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
8923 #endif
8924 dhd->wakelock_rx_timeout_enable = 0;
8925 dhd->wakelock_ctrl_timeout_enable = 0;
8926 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8927 }
8928 return ret;
8929 }
8930
8931 int net_os_wake_lock_timeout(struct net_device *dev)
8932 {
8933 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8934 int ret = 0;
8935
8936 if (dhd)
8937 ret = dhd_os_wake_lock_timeout(&dhd->pub);
8938 return ret;
8939 }
8940
8941 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
8942 {
8943 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8944 unsigned long flags;
8945
8946 if (dhd) {
8947 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8948 if (val > dhd->wakelock_rx_timeout_enable)
8949 dhd->wakelock_rx_timeout_enable = val;
8950 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8951 }
8952 return 0;
8953 }
8954
8955 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
8956 {
8957 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8958 unsigned long flags;
8959
8960 if (dhd) {
8961 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8962 if (val > dhd->wakelock_ctrl_timeout_enable)
8963 dhd->wakelock_ctrl_timeout_enable = val;
8964 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8965 }
8966 return 0;
8967 }
8968
8969 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
8970 {
8971 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
8972 unsigned long flags;
8973
8974 if (dhd) {
8975 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
8976 dhd->wakelock_ctrl_timeout_enable = 0;
8977 #ifdef CONFIG_HAS_WAKELOCK
8978 if (wake_lock_active(&dhd->wl_ctrlwake))
8979 wake_unlock(&dhd->wl_ctrlwake);
8980 #endif
8981 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
8982 }
8983 return 0;
8984 }
8985
8986 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
8987 {
8988 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8989 int ret = 0;
8990
8991 if (dhd)
8992 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
8993 return ret;
8994 }
8995
8996 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
8997 {
8998 dhd_info_t *dhd = DHD_DEV_INFO(dev);
8999 int ret = 0;
9000
9001 if (dhd)
9002 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
9003 return ret;
9004 }
9005
9006 int dhd_os_wake_lock(dhd_pub_t *pub)
9007 {
9008 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9009 unsigned long flags;
9010 int ret = 0;
9011
9012 if (dhd) {
9013 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9014
9015 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9016 #ifdef CONFIG_HAS_WAKELOCK
9017 wake_lock(&dhd->wl_wifi);
9018 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9019 dhd_bus_dev_pm_stay_awake(pub);
9020 #endif
9021 }
9022 dhd->wakelock_counter++;
9023 ret = dhd->wakelock_counter;
9024 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9025 }
9026 return ret;
9027 }
9028
9029 int net_os_wake_lock(struct net_device *dev)
9030 {
9031 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9032 int ret = 0;
9033
9034 if (dhd)
9035 ret = dhd_os_wake_lock(&dhd->pub);
9036 return ret;
9037 }
9038
9039 int dhd_os_wake_unlock(dhd_pub_t *pub)
9040 {
9041 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9042 unsigned long flags;
9043 int ret = 0;
9044
9045 dhd_os_wake_lock_timeout(pub);
9046 if (dhd) {
9047 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9048 if (dhd->wakelock_counter > 0) {
9049 dhd->wakelock_counter--;
9050 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
9051 #ifdef CONFIG_HAS_WAKELOCK
9052 wake_unlock(&dhd->wl_wifi);
9053 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9054 dhd_bus_dev_pm_relax(pub);
9055 #endif
9056 }
9057 ret = dhd->wakelock_counter;
9058 }
9059 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9060 }
9061 return ret;
9062 }
9063
9064 int dhd_os_check_wakelock(dhd_pub_t *pub)
9065 {
9066 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9067 KERNEL_VERSION(2, 6, 36)))
9068 dhd_info_t *dhd;
9069
9070 if (!pub)
9071 return 0;
9072 dhd = (dhd_info_t *)(pub->info);
9073 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9074
9075 #ifdef CONFIG_HAS_WAKELOCK
9076 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9077 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9078 (wake_lock_active(&dhd->wl_wdwake))))
9079 return 1;
9080 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9081 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9082 return 1;
9083 #endif
9084 return 0;
9085 }
9086
9087 int dhd_os_check_wakelock_all(dhd_pub_t *pub)
9088 {
9089 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9090 KERNEL_VERSION(2, 6, 36)))
9091 dhd_info_t *dhd;
9092
9093 if (!pub)
9094 return 0;
9095 dhd = (dhd_info_t *)(pub->info);
9096 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9097
9098 #ifdef CONFIG_HAS_WAKELOCK
9099 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9100 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
9101 wake_lock_active(&dhd->wl_wdwake) ||
9102 wake_lock_active(&dhd->wl_rxwake) ||
9103 wake_lock_active(&dhd->wl_ctrlwake))) {
9104 return 1;
9105 }
9106 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9107 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
9108 return 1;
9109 #endif
9110 return 0;
9111 }
9112
9113 int net_os_wake_unlock(struct net_device *dev)
9114 {
9115 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9116 int ret = 0;
9117
9118 if (dhd)
9119 ret = dhd_os_wake_unlock(&dhd->pub);
9120 return ret;
9121 }
9122
9123 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
9124 {
9125 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9126 unsigned long flags;
9127 int ret = 0;
9128
9129 if (dhd) {
9130 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9131 #ifdef CONFIG_HAS_WAKELOCK
9132 /* if wakelock_wd_counter was never used : lock it at once */
9133 if (!dhd->wakelock_wd_counter)
9134 wake_lock(&dhd->wl_wdwake);
9135 #endif
9136 dhd->wakelock_wd_counter++;
9137 ret = dhd->wakelock_wd_counter;
9138 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9139 }
9140 return ret;
9141 }
9142
9143 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
9144 {
9145 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9146 unsigned long flags;
9147 int ret = 0;
9148
9149 if (dhd) {
9150 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9151 if (dhd->wakelock_wd_counter) {
9152 dhd->wakelock_wd_counter = 0;
9153 #ifdef CONFIG_HAS_WAKELOCK
9154 wake_unlock(&dhd->wl_wdwake);
9155 #endif
9156 }
9157 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9158 }
9159 return ret;
9160 }
9161
9162 #ifdef BCMPCIE_OOB_HOST_WAKE
9163 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
9164 {
9165 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9166 int ret = 0;
9167
9168 if (dhd) {
9169 #ifdef CONFIG_HAS_WAKELOCK
9170 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
9171 #endif
9172 }
9173 return ret;
9174 }
9175
9176 int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
9177 {
9178 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9179 int ret = 0;
9180
9181 if (dhd) {
9182 #ifdef CONFIG_HAS_WAKELOCK
9183 /* if wl_intrwake is active, unlock it */
9184 if (wake_lock_active(&dhd->wl_intrwake)) {
9185 wake_unlock(&dhd->wl_intrwake);
9186 }
9187 #endif
9188 }
9189 return ret;
9190 }
9191 #endif /* BCMPCIE_OOB_HOST_WAKE */
9192
9193 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
9194 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
9195 */
9196 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
9197 {
9198 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9199 unsigned long flags;
9200 int ret = 0;
9201
9202 if (dhd) {
9203 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9204 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9205 if (dhd->waive_wakelock == FALSE) {
9206 /* record current lock status */
9207 dhd->wakelock_before_waive = dhd->wakelock_counter;
9208 dhd->waive_wakelock = TRUE;
9209 }
9210 ret = dhd->wakelock_wd_counter;
9211 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9212 }
9213 return ret;
9214 }
9215
9216 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
9217 {
9218 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
9219 unsigned long flags;
9220 int ret = 0;
9221
9222 if (!dhd)
9223 return 0;
9224
9225 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
9226 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9227 if (!dhd->waive_wakelock)
9228 goto exit;
9229
9230 dhd->waive_wakelock = FALSE;
9231 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
9232 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
9233 * the lock in between, do the same by calling wake_unlock or pm_relax
9234 */
9235 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
9236 #ifdef CONFIG_HAS_WAKELOCK
9237 wake_lock(&dhd->wl_wifi);
9238 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9239 dhd_bus_dev_pm_stay_awake(&dhd->pub);
9240 #endif
9241 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
9242 #ifdef CONFIG_HAS_WAKELOCK
9243 wake_unlock(&dhd->wl_wifi);
9244 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9245 dhd_bus_dev_pm_relax(&dhd->pub);
9246 #endif
9247 }
9248 dhd->wakelock_before_waive = 0;
9249 exit:
9250 ret = dhd->wakelock_wd_counter;
9251 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
9252 return ret;
9253 }
9254
9255 bool dhd_os_check_if_up(dhd_pub_t *pub)
9256 {
9257 if (!pub)
9258 return FALSE;
9259 return pub->up;
9260 }
9261
9262 /* function to collect firmware, chip id and chip version info */
9263 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
9264 {
9265 int i;
9266
9267 i = snprintf(info_string, sizeof(info_string),
9268 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
9269 printf("%s\n", info_string);
9270
9271 if (!dhdp)
9272 return;
9273
9274 i = snprintf(&info_string[i], sizeof(info_string) - i,
9275 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
9276 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
9277 }
9278
9279 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
9280 {
9281 int ifidx;
9282 int ret = 0;
9283 dhd_info_t *dhd = NULL;
9284
9285 if (!net || !DEV_PRIV(net)) {
9286 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
9287 return -EINVAL;
9288 }
9289
9290 dhd = DHD_DEV_INFO(net);
9291 if (!dhd)
9292 return -EINVAL;
9293
9294 ifidx = dhd_net2idx(dhd, net);
9295 if (ifidx == DHD_BAD_IF) {
9296 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
9297 return -ENODEV;
9298 }
9299
9300 DHD_OS_WAKE_LOCK(&dhd->pub);
9301 DHD_PERIM_LOCK(&dhd->pub);
9302
9303 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
9304 dhd_check_hang(net, &dhd->pub, ret);
9305
9306 DHD_PERIM_UNLOCK(&dhd->pub);
9307 DHD_OS_WAKE_UNLOCK(&dhd->pub);
9308
9309 return ret;
9310 }
9311
9312 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
9313 {
9314 struct net_device *net;
9315
9316 net = dhd_idx2net(dhdp, ifidx);
9317 if (!net) {
9318 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
9319 return -EINVAL;
9320 }
9321
9322 return dhd_check_hang(net, dhdp, ret);
9323 }
9324
9325 /* Return instance */
9326 int dhd_get_instance(dhd_pub_t *dhdp)
9327 {
9328 return dhdp->info->unit;
9329 }
9330
9331
9332 #ifdef PROP_TXSTATUS
9333
9334 void dhd_wlfc_plat_init(void *dhd)
9335 {
9336 return;
9337 }
9338
9339 void dhd_wlfc_plat_deinit(void *dhd)
9340 {
9341 return;
9342 }
9343
9344 bool dhd_wlfc_skip_fc(void)
9345 {
9346 return FALSE;
9347 }
9348 #endif /* PROP_TXSTATUS */
9349
9350 #ifdef BCMDBGFS
9351
9352 #include <linux/debugfs.h>
9353
9354 extern uint32 dhd_readregl(void *bp, uint32 addr);
9355 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
9356
9357 typedef struct dhd_dbgfs {
9358 struct dentry *debugfs_dir;
9359 struct dentry *debugfs_mem;
9360 dhd_pub_t *dhdp;
9361 uint32 size;
9362 } dhd_dbgfs_t;
9363
9364 dhd_dbgfs_t g_dbgfs;
9365
9366 static int
9367 dhd_dbg_state_open(struct inode *inode, struct file *file)
9368 {
9369 file->private_data = inode->i_private;
9370 return 0;
9371 }
9372
9373 static ssize_t
9374 dhd_dbg_state_read(struct file *file, char __user *ubuf,
9375 size_t count, loff_t *ppos)
9376 {
9377 ssize_t rval;
9378 uint32 tmp;
9379 loff_t pos = *ppos;
9380 size_t ret;
9381
9382 if (pos < 0)
9383 return -EINVAL;
9384 if (pos >= g_dbgfs.size || !count)
9385 return 0;
9386 if (count > g_dbgfs.size - pos)
9387 count = g_dbgfs.size - pos;
9388
9389 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
9390 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
9391
9392 ret = copy_to_user(ubuf, &tmp, 4);
9393 if (ret == count)
9394 return -EFAULT;
9395
9396 count -= ret;
9397 *ppos = pos + count;
9398 rval = count;
9399
9400 return rval;
9401 }
9402
9403
9404 static ssize_t
9405 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
9406 {
9407 loff_t pos = *ppos;
9408 size_t ret;
9409 uint32 buf;
9410
9411 if (pos < 0)
9412 return -EINVAL;
9413 if (pos >= g_dbgfs.size || !count)
9414 return 0;
9415 if (count > g_dbgfs.size - pos)
9416 count = g_dbgfs.size - pos;
9417
9418 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
9419 if (ret == count)
9420 return -EFAULT;
9421
9422 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
9423 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
9424
9425 return count;
9426 }
9427
9428
9429 loff_t
9430 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
9431 {
9432 loff_t pos = -1;
9433
9434 switch (whence) {
9435 case 0:
9436 pos = off;
9437 break;
9438 case 1:
9439 pos = file->f_pos + off;
9440 break;
9441 case 2:
9442 pos = g_dbgfs.size - off;
9443 }
9444 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
9445 }
9446
9447 static const struct file_operations dhd_dbg_state_ops = {
9448 .read = dhd_dbg_state_read,
9449 .write = dhd_debugfs_write,
9450 .open = dhd_dbg_state_open,
9451 .llseek = dhd_debugfs_lseek
9452 };
9453
9454 static void dhd_dbg_create(void)
9455 {
9456 if (g_dbgfs.debugfs_dir) {
9457 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
9458 NULL, &dhd_dbg_state_ops);
9459 }
9460 }
9461
9462 void dhd_dbg_init(dhd_pub_t *dhdp)
9463 {
9464 int err;
9465
9466 g_dbgfs.dhdp = dhdp;
9467 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
9468
9469 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
9470 if (IS_ERR(g_dbgfs.debugfs_dir)) {
9471 err = PTR_ERR(g_dbgfs.debugfs_dir);
9472 g_dbgfs.debugfs_dir = NULL;
9473 return;
9474 }
9475
9476 dhd_dbg_create();
9477
9478 return;
9479 }
9480
9481 void dhd_dbg_remove(void)
9482 {
9483 debugfs_remove(g_dbgfs.debugfs_mem);
9484 debugfs_remove(g_dbgfs.debugfs_dir);
9485
9486 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
9487
9488 }
9489 #endif /* ifdef BCMDBGFS */
9490
9491 #ifdef WLMEDIA_HTSF
9492
9493 static
9494 void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
9495 {
9496 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
9497 struct sk_buff *skb;
9498 uint32 htsf = 0;
9499 uint16 dport = 0, oldmagic = 0xACAC;
9500 char *p1;
9501 htsfts_t ts;
9502
9503 /* timestamp packet */
9504
9505 p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
9506
9507 if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
9508 /* memcpy(&proto, p1+26, 4); */
9509 memcpy(&dport, p1+40, 2);
9510 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
9511 dport = ntoh16(dport);
9512 }
9513
9514 /* timestamp only if icmp or udb iperf with port 5555 */
9515 /* if (proto == 17 && dport == tsport) { */
9516 if (dport >= tsport && dport <= tsport + 20) {
9517
9518 skb = (struct sk_buff *) pktbuf;
9519
9520 htsf = dhd_get_htsf(dhd, 0);
9521 memset(skb->data + 44, 0, 2); /* clear checksum */
9522 memcpy(skb->data+82, &oldmagic, 2);
9523 memcpy(skb->data+84, &htsf, 4);
9524
9525 memset(&ts, 0, sizeof(htsfts_t));
9526 ts.magic = HTSFMAGIC;
9527 ts.prio = PKTPRIO(pktbuf);
9528 ts.seqnum = htsf_seqnum++;
9529 ts.c10 = get_cycles();
9530 ts.t10 = htsf;
9531 ts.endmagic = HTSFENDMAGIC;
9532
9533 memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
9534 }
9535 }
9536
9537 static void dhd_dump_htsfhisto(histo_t *his, char *s)
9538 {
9539 int pktcnt = 0, curval = 0, i;
9540 for (i = 0; i < (NUMBIN-2); i++) {
9541 curval += 500;
9542 printf("%d ", his->bin[i]);
9543 pktcnt += his->bin[i];
9544 }
9545 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
9546 his->bin[NUMBIN-1], s);
9547 }
9548
9549 static
9550 void sorttobin(int value, histo_t *histo)
9551 {
9552 int i, binval = 0;
9553
9554 if (value < 0) {
9555 histo->bin[NUMBIN-1]++;
9556 return;
9557 }
9558 if (value > histo->bin[NUMBIN-2]) /* store the max value */
9559 histo->bin[NUMBIN-2] = value;
9560
9561 for (i = 0; i < (NUMBIN-2); i++) {
9562 binval += 500; /* 500m s bins */
9563 if (value <= binval) {
9564 histo->bin[i]++;
9565 return;
9566 }
9567 }
9568 histo->bin[NUMBIN-3]++;
9569 }
9570
9571 static
9572 void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
9573 {
9574 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
9575 struct sk_buff *skb;
9576 char *p1;
9577 uint16 old_magic;
9578 int d1, d2, d3, end2end;
9579 htsfts_t *htsf_ts;
9580 uint32 htsf;
9581
9582 skb = PKTTONATIVE(dhdp->osh, pktbuf);
9583 p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
9584
9585 if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
9586 memcpy(&old_magic, p1+78, 2);
9587 htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
9588 }
9589 else
9590 return;
9591
9592 if (htsf_ts->magic == HTSFMAGIC) {
9593 htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
9594 htsf_ts->cE0 = get_cycles();
9595 }
9596
9597 if (old_magic == 0xACAC) {
9598
9599 tspktcnt++;
9600 htsf = dhd_get_htsf(dhd, 0);
9601 memcpy(skb->data+92, &htsf, sizeof(uint32));
9602
9603 memcpy(&ts[tsidx].t1, skb->data+80, 16);
9604
9605 d1 = ts[tsidx].t2 - ts[tsidx].t1;
9606 d2 = ts[tsidx].t3 - ts[tsidx].t2;
9607 d3 = ts[tsidx].t4 - ts[tsidx].t3;
9608 end2end = ts[tsidx].t4 - ts[tsidx].t1;
9609
9610 sorttobin(d1, &vi_d1);
9611 sorttobin(d2, &vi_d2);
9612 sorttobin(d3, &vi_d3);
9613 sorttobin(end2end, &vi_d4);
9614
9615 if (end2end > 0 && end2end > maxdelay) {
9616 maxdelay = end2end;
9617 maxdelaypktno = tspktcnt;
9618 memcpy(&maxdelayts, &ts[tsidx], 16);
9619 }
9620 if (++tsidx >= TSMAX)
9621 tsidx = 0;
9622 }
9623 }
9624
9625 uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
9626 {
9627 uint32 htsf = 0, cur_cycle, delta, delta_us;
9628 uint32 factor, baseval, baseval2;
9629 cycles_t t;
9630
9631 t = get_cycles();
9632 cur_cycle = t;
9633
9634 if (cur_cycle > dhd->htsf.last_cycle)
9635 delta = cur_cycle - dhd->htsf.last_cycle;
9636 else {
9637 delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
9638 }
9639
9640 delta = delta >> 4;
9641
9642 if (dhd->htsf.coef) {
9643 /* times ten to get the first digit */
9644 factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
9645 baseval = (delta*10)/factor;
9646 baseval2 = (delta*10)/(factor+1);
9647 delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
9648 htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
9649 }
9650 else {
9651 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
9652 }
9653
9654 return htsf;
9655 }
9656
9657 static void dhd_dump_latency(void)
9658 {
9659 int i, max = 0;
9660 int d1, d2, d3, d4, d5;
9661
9662 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
9663 for (i = 0; i < TSMAX; i++) {
9664 d1 = ts[i].t2 - ts[i].t1;
9665 d2 = ts[i].t3 - ts[i].t2;
9666 d3 = ts[i].t4 - ts[i].t3;
9667 d4 = ts[i].t4 - ts[i].t1;
9668 d5 = ts[max].t4-ts[max].t1;
9669 if (d4 > d5 && d4 > 0) {
9670 max = i;
9671 }
9672 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
9673 ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
9674 d1, d2, d3, d4, i);
9675 }
9676
9677 printf("current idx = %d \n", tsidx);
9678
9679 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
9680 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
9681 maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
9682 maxdelayts.t2 - maxdelayts.t1,
9683 maxdelayts.t3 - maxdelayts.t2,
9684 maxdelayts.t4 - maxdelayts.t3,
9685 maxdelayts.t4 - maxdelayts.t1);
9686 }
9687
9688
9689 static int
9690 dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
9691 {
9692 wl_ioctl_t ioc;
9693 char buf[32];
9694 int ret;
9695 uint32 s1, s2;
9696
9697 struct tsf {
9698 uint32 low;
9699 uint32 high;
9700 } tsf_buf;
9701
9702 memset(&ioc, 0, sizeof(ioc));
9703 memset(&tsf_buf, 0, sizeof(tsf_buf));
9704
9705 ioc.cmd = WLC_GET_VAR;
9706 ioc.buf = buf;
9707 ioc.len = (uint)sizeof(buf);
9708 ioc.set = FALSE;
9709
9710 strncpy(buf, "tsf", sizeof(buf) - 1);
9711 buf[sizeof(buf) - 1] = '\0';
9712 s1 = dhd_get_htsf(dhd, 0);
9713 if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
9714 if (ret == -EIO) {
9715 DHD_ERROR(("%s: tsf is not supported by device\n",
9716 dhd_ifname(&dhd->pub, ifidx)));
9717 return -EOPNOTSUPP;
9718 }
9719 return ret;
9720 }
9721 s2 = dhd_get_htsf(dhd, 0);
9722
9723 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
9724 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
9725 tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
9726 dhd->htsf.coefdec2, s2-tsf_buf.low);
9727 printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
9728 return 0;
9729 }
9730
9731 void htsf_update(dhd_info_t *dhd, void *data)
9732 {
9733 static ulong cur_cycle = 0, prev_cycle = 0;
9734 uint32 htsf, tsf_delta = 0;
9735 uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
9736 ulong b, a;
9737 cycles_t t;
9738
9739 /* cycles_t in inlcude/mips/timex.h */
9740
9741 t = get_cycles();
9742
9743 prev_cycle = cur_cycle;
9744 cur_cycle = t;
9745
9746 if (cur_cycle > prev_cycle)
9747 cyc_delta = cur_cycle - prev_cycle;
9748 else {
9749 b = cur_cycle;
9750 a = prev_cycle;
9751 cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
9752 }
9753
9754 if (data == NULL)
9755 printf(" tsf update ata point er is null \n");
9756
9757 memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
9758 memcpy(&cur_tsf, data, sizeof(tsf_t));
9759
9760 if (cur_tsf.low == 0) {
9761 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
9762 return;
9763 }
9764
9765 if (cur_tsf.low > prev_tsf.low)
9766 tsf_delta = (cur_tsf.low - prev_tsf.low);
9767 else {
9768 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
9769 cur_tsf.low, prev_tsf.low));
9770 if (cur_tsf.high > prev_tsf.high) {
9771 tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
9772 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
9773 }
9774 else
9775 return; /* do not update */
9776 }
9777
9778 if (tsf_delta) {
9779 hfactor = cyc_delta / tsf_delta;
9780 tmp = (cyc_delta - (hfactor * tsf_delta))*10;
9781 dec1 = tmp/tsf_delta;
9782 dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
9783 tmp = (tmp - (dec1*tsf_delta))*10;
9784 dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
9785
9786 if (dec3 > 4) {
9787 if (dec2 == 9) {
9788 dec2 = 0;
9789 if (dec1 == 9) {
9790 dec1 = 0;
9791 hfactor++;
9792 }
9793 else {
9794 dec1++;
9795 }
9796 }
9797 else
9798 dec2++;
9799 }
9800 }
9801
9802 if (hfactor) {
9803 htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
9804 dhd->htsf.coef = hfactor;
9805 dhd->htsf.last_cycle = cur_cycle;
9806 dhd->htsf.last_tsf = cur_tsf.low;
9807 dhd->htsf.coefdec1 = dec1;
9808 dhd->htsf.coefdec2 = dec2;
9809 }
9810 else {
9811 htsf = prev_tsf.low;
9812 }
9813 }
9814
9815 #endif /* WLMEDIA_HTSF */
9816
9817 #ifdef CUSTOM_SET_CPUCORE
9818 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
9819 {
9820 int e_dpc = 0, e_rxf = 0, retry_set = 0;
9821
9822 if (!(dhd->chan_isvht80)) {
9823 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
9824 return;
9825 }
9826
9827 if (DPC_CPUCORE) {
9828 do {
9829 if (set == TRUE) {
9830 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9831 cpumask_of(DPC_CPUCORE));
9832 } else {
9833 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
9834 cpumask_of(PRIMARY_CPUCORE));
9835 }
9836 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9837 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
9838 return;
9839 }
9840 if (e_dpc < 0)
9841 OSL_SLEEP(1);
9842 } while (e_dpc < 0);
9843 }
9844 if (RXF_CPUCORE) {
9845 do {
9846 if (set == TRUE) {
9847 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9848 cpumask_of(RXF_CPUCORE));
9849 } else {
9850 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
9851 cpumask_of(PRIMARY_CPUCORE));
9852 }
9853 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
9854 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
9855 return;
9856 }
9857 if (e_rxf < 0)
9858 OSL_SLEEP(1);
9859 } while (e_rxf < 0);
9860 }
9861 #ifdef DHD_OF_SUPPORT
9862 interrupt_set_cpucore(set);
9863 #endif /* DHD_OF_SUPPORT */
9864 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
9865
9866 return;
9867 }
9868 #endif /* CUSTOM_SET_CPUCORE */
9869 #if defined(DHD_TCP_WINSIZE_ADJUST)
9870 static int dhd_port_list_match(int port)
9871 {
9872 int i;
9873 for (i = 0; i < MAX_TARGET_PORTS; i++) {
9874 if (target_ports[i] == port)
9875 return 1;
9876 }
9877 return 0;
9878 }
9879 static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
9880 {
9881 struct iphdr *ipheader;
9882 struct tcphdr *tcpheader;
9883 uint16 win_size;
9884 int32 incremental_checksum;
9885
9886 if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
9887 return;
9888 if (skb == NULL || skb->data == NULL)
9889 return;
9890
9891 ipheader = (struct iphdr*)(skb->data);
9892
9893 if (ipheader->protocol == IPPROTO_TCP) {
9894 tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
9895 if (tcpheader) {
9896 win_size = ntoh16(tcpheader->window);
9897 if (win_size < MIN_TCP_WIN_SIZE &&
9898 dhd_port_list_match(ntoh16(tcpheader->dest))) {
9899 incremental_checksum = ntoh16(tcpheader->check);
9900 incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
9901 if (incremental_checksum < 0)
9902 --incremental_checksum;
9903 tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
9904 tcpheader->check = hton16((unsigned short)incremental_checksum);
9905 }
9906 }
9907 skb_push(skb, (ipheader->ihl)<<2);
9908 }
9909 }
9910 #endif /* DHD_TCP_WINSIZE_ADJUST */
9911
9912 /* Get interface specific ap_isolate configuration */
9913 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
9914 {
9915 dhd_info_t *dhd = dhdp->info;
9916 dhd_if_t *ifp;
9917
9918 ASSERT(idx < DHD_MAX_IFS);
9919
9920 ifp = dhd->iflist[idx];
9921
9922 return ifp->ap_isolate;
9923 }
9924
9925 /* Set interface specific ap_isolate configuration */
9926 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
9927 {
9928 dhd_info_t *dhd = dhdp->info;
9929 dhd_if_t *ifp;
9930
9931 ASSERT(idx < DHD_MAX_IFS);
9932
9933 ifp = dhd->iflist[idx];
9934
9935 ifp->ap_isolate = val;
9936
9937 return 0;
9938 }
9939
9940 #ifdef DHD_WMF
9941 /* Returns interface specific WMF configuration */
9942 dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
9943 {
9944 dhd_info_t *dhd = dhdp->info;
9945 dhd_if_t *ifp;
9946
9947 ASSERT(idx < DHD_MAX_IFS);
9948
9949 ifp = dhd->iflist[idx];
9950 return &ifp->wmf;
9951 }
9952 #endif /* DHD_WMF */
9953
9954
9955 #ifdef DHD_UNICAST_DHCP
9956 static int
9957 dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
9958 uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
9959 {
9960 uint8 *frame = PKTDATA(pub->osh, pktbuf);
9961 int length = PKTLEN(pub->osh, pktbuf);
9962 uint8 *pt; /* Pointer to type field */
9963 uint16 ethertype;
9964 bool snap = FALSE;
9965 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
9966 if (length < ETHER_HDR_LEN) {
9967 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
9968 __FUNCTION__, length));
9969 return BCME_ERROR;
9970 } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
9971 /* Frame is Ethernet II */
9972 pt = frame + ETHER_TYPE_OFFSET;
9973 } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
9974 !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
9975 pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
9976 snap = TRUE;
9977 } else {
9978 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
9979 __FUNCTION__));
9980 return BCME_ERROR;
9981 }
9982
9983 ethertype = ntoh16_ua(pt);
9984
9985 /* Skip VLAN tag, if any */
9986 if (ethertype == ETHER_TYPE_8021Q) {
9987 pt += VLAN_TAG_LEN;
9988
9989 if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
9990 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
9991 __FUNCTION__, length));
9992 return BCME_ERROR;
9993 }
9994
9995 ethertype = ntoh16_ua(pt);
9996 }
9997
9998 *data_ptr = pt + ETHER_TYPE_LEN;
9999 *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
10000 *et_ptr = ethertype;
10001 *snap_ptr = snap;
10002 return BCME_OK;
10003 }
10004
10005 static int
10006 dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
10007 uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
10008 {
10009 struct ipv4_hdr *iph; /* IP frame pointer */
10010 int iplen; /* IP frame length */
10011 uint16 ethertype, iphdrlen, ippktlen;
10012 uint16 iph_frag;
10013 uint8 prot;
10014 bool snap;
10015
10016 if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
10017 &iplen, &ethertype, &snap) != 0)
10018 return BCME_ERROR;
10019
10020 if (ethertype != ETHER_TYPE_IP) {
10021 return BCME_ERROR;
10022 }
10023
10024 /* We support IPv4 only */
10025 if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
10026 return BCME_ERROR;
10027 }
10028
10029 /* Header length sanity */
10030 iphdrlen = IPV4_HLEN(iph);
10031
10032 /*
10033 * Packet length sanity; sometimes we receive eth-frame size bigger
10034 * than the IP content, which results in a bad tcp chksum
10035 */
10036 ippktlen = ntoh16(iph->tot_len);
10037 if (ippktlen < iplen) {
10038
10039 DHD_INFO(("%s: extra frame length ignored\n",
10040 __FUNCTION__));
10041 iplen = ippktlen;
10042 } else if (ippktlen > iplen) {
10043 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
10044 __FUNCTION__, ippktlen - iplen));
10045 return BCME_ERROR;
10046 }
10047
10048 if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
10049 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
10050 __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
10051 return BCME_ERROR;
10052 }
10053
10054 /*
10055 * We don't handle fragmented IP packets. A first frag is indicated by the MF
10056 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
10057 */
10058 iph_frag = ntoh16(iph->frag);
10059
10060 if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
10061 DHD_INFO(("DHD:%s: IP fragment not handled\n",
10062 __FUNCTION__));
10063 return BCME_ERROR;
10064 }
10065
10066 prot = IPV4_PROT(iph);
10067
10068 *data_ptr = (((uint8 *)iph) + iphdrlen);
10069 *len_ptr = iplen - iphdrlen;
10070 *prot_ptr = prot;
10071 return BCME_OK;
10072 }
10073
10074 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
10075 static
10076 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
10077 {
10078 dhd_sta_t* stainfo;
10079 uint8 *eh = PKTDATA(pub->osh, pktbuf);
10080 uint8 *udph;
10081 uint8 *dhcp;
10082 uint8 *chaddr;
10083 int udpl;
10084 int dhcpl;
10085 uint16 port;
10086 uint8 prot;
10087
10088 if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
10089 return BCME_ERROR;
10090 if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
10091 return BCME_ERROR;
10092 if (prot != IP_PROT_UDP)
10093 return BCME_ERROR;
10094 /* check frame length, at least UDP_HDR_LEN */
10095 if (udpl < UDP_HDR_LEN) {
10096 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
10097 __FUNCTION__));
10098 return BCME_ERROR;
10099 }
10100 port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
10101 /* only process DHCP packets from server to client */
10102 if (port != DHCP_PORT_CLIENT)
10103 return BCME_ERROR;
10104
10105 dhcp = udph + UDP_HDR_LEN;
10106 dhcpl = udpl - UDP_HDR_LEN;
10107
10108 if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
10109 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
10110 __FUNCTION__));
10111 return BCME_ERROR;
10112 }
10113 /* only process DHCP reply(offer/ack) packets */
10114 if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
10115 return BCME_ERROR;
10116 chaddr = dhcp + DHCP_CHADDR_OFFSET;
10117 stainfo = dhd_find_sta(pub, ifidx, chaddr);
10118 if (stainfo) {
10119 bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
10120 return BCME_OK;
10121 }
10122 return BCME_ERROR;
10123 }
10124 #endif /* DHD_UNICAST_DHD */
10125 #ifdef DHD_L2_FILTER
10126 /* Check if packet type is ICMP ECHO */
10127 static
10128 int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
10129 {
10130 struct bcmicmp_hdr *icmph;
10131 int udpl;
10132 uint8 prot;
10133
10134 if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
10135 return BCME_ERROR;
10136 if (prot == IP_PROT_ICMP) {
10137 if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
10138 return BCME_OK;
10139 }
10140 return BCME_ERROR;
10141 }
10142 #endif /* DHD_L2_FILTER */
10143
10144 #ifdef SET_RPS_CPUS
10145 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
10146 {
10147 struct rps_map *old_map, *map;
10148 cpumask_var_t mask;
10149 int err, cpu, i;
10150 static DEFINE_SPINLOCK(rps_map_lock);
10151
10152 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10153
10154 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
10155 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
10156 return -ENOMEM;
10157 }
10158
10159 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
10160 if (err) {
10161 free_cpumask_var(mask);
10162 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
10163 return err;
10164 }
10165
10166 map = kzalloc(max_t(unsigned int,
10167 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
10168 GFP_KERNEL);
10169 if (!map) {
10170 free_cpumask_var(mask);
10171 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
10172 return -ENOMEM;
10173 }
10174
10175 i = 0;
10176 for_each_cpu(cpu, mask)
10177 map->cpus[i++] = cpu;
10178
10179 if (i)
10180 map->len = i;
10181 else {
10182 kfree(map);
10183 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
10184 map = NULL;
10185 }
10186
10187 spin_lock(&rps_map_lock);
10188 old_map = rcu_dereference_protected(queue->rps_map,
10189 lockdep_is_held(&rps_map_lock));
10190 rcu_assign_pointer(queue->rps_map, map);
10191 spin_unlock(&rps_map_lock);
10192
10193 if (map)
10194 static_key_slow_inc(&rps_needed);
10195 if (old_map) {
10196 kfree_rcu(old_map, rcu);
10197 static_key_slow_dec(&rps_needed);
10198 }
10199 free_cpumask_var(mask);
10200
10201 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
10202 return map->len;
10203 }
10204
10205 void custom_rps_map_clear(struct netdev_rx_queue *queue)
10206 {
10207 struct rps_map *map;
10208
10209 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
10210
10211 map = rcu_dereference_protected(queue->rps_map, 1);
10212 if (map) {
10213 RCU_INIT_POINTER(queue->rps_map, NULL);
10214 kfree_rcu(map, rcu);
10215 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
10216 }
10217 }
10218 #endif /* SET_RPS_CPUS */
10219
10220 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
10221 void
10222 SDA_setSharedMemory4Send(unsigned int buffer_id,
10223 unsigned char *buffer, unsigned int buffer_size,
10224 unsigned int packet_size, unsigned int headroom_size)
10225 {
10226 dhd_info_t *dhd = dhd_global;
10227
10228 sda_packet_length = packet_size;
10229
10230 ASSERT(dhd);
10231 if (dhd == NULL)
10232 return;
10233 }
10234
10235 void
10236 SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb)
10237 {
10238 dhd_info_t *dhd = dhd_global;
10239
10240 ASSERT(dhd);
10241 if (dhd == NULL)
10242 return;
10243 }
10244
10245
10246 unsigned long long
10247 SDA_getTsf(unsigned char vif_id)
10248 {
10249 dhd_info_t *dhd = dhd_global;
10250 uint64 tsf_val;
10251 char buf[WLC_IOCTL_SMLEN];
10252 int ifidx = 0;
10253
10254 struct tsf {
10255 uint32 low;
10256 uint32 high;
10257 } tsf_buf;
10258
10259 memset(buf, 0, sizeof(buf));
10260
10261 if (vif_id == 0) /* wlan0 tsf */
10262 ifidx = dhd_ifname2idx(dhd, "wlan0");
10263 else if (vif_id == 1) /* p2p0 tsf */
10264 ifidx = dhd_ifname2idx(dhd, "p2p0");
10265
10266 bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf));
10267
10268 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) {
10269 DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__));
10270 return 0;
10271 }
10272
10273 memcpy(&tsf_buf, buf, sizeof(tsf_buf));
10274 tsf_val = (uint64)tsf_buf.high;
10275 DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
10276 __FUNCTION__, tsf_buf.high, tsf_buf.low));
10277
10278 return ((tsf_val << 32) | tsf_buf.low);
10279 }
10280 EXPORT_SYMBOL(SDA_getTsf);
10281
10282 unsigned int
10283 SDA_syncTsf(void)
10284 {
10285 dhd_info_t *dhd = dhd_global;
10286 int tsf_sync = 1;
10287 char iovbuf[WLC_IOCTL_SMLEN];
10288
10289 bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf));
10290 dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
10291
10292 DHD_TRACE(("%s\n", __FUNCTION__));
10293 return 0;
10294 }
10295
10296 extern struct net_device *wl0dot1_dev;
10297
10298 void
10299 BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size)
10300 {
10301 struct sk_buff *skb;
10302 sda_packet_t *shm_packet = packet;
10303 dhd_info_t *dhd = dhd_global;
10304 int cnt;
10305
10306 static unsigned int cnt_t = 1;
10307
10308 ASSERT(dhd);
10309 if (dhd == NULL)
10310 return;
10311
10312 if (dhd->is_wlanaudio_blist) {
10313 for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
10314 if (dhd->wlanaudio_blist[cnt].is_blacklist == true) {
10315 if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet,
10316 shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN))
10317 return;
10318 }
10319 }
10320 }
10321
10322 if ((cnt_t % 10000) == 0)
10323 cnt_t = 0;
10324
10325 cnt_t++;
10326
10327 /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
10328 #define TXOFF 26
10329 skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC);
10330
10331 skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE);
10332 skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE);
10333 skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */
10334
10335 /* p2p_net */
10336 skb->dev = wl0dot1_dev;
10337 shm_packet->txTsf = 0x0;
10338 shm_packet->rxTsf = 0x0;
10339 memcpy(skb->data, &shm_packet->headroom,
10340 sda_packet_length - OFFSETOF(sda_packet_t, headroom));
10341 shm_packet->desc.ready_to_copy = 0;
10342
10343 dhd_start_xmit(skb, skb->dev);
10344 }
10345
10346 void
10347 SDA_registerCallback4Recv(unsigned char *pBufferTotal,
10348 unsigned int BufferTotalSize)
10349 {
10350 dhd_info_t *dhd = dhd_global;
10351
10352 ASSERT(dhd);
10353 if (dhd == NULL)
10354 return;
10355 }
10356
10357
10358 void
10359 SDA_setSharedMemory4Recv(unsigned char *pBufferTotal,
10360 unsigned int BufferTotalSize,
10361 unsigned int BufferUnitSize,
10362 unsigned int Headroomsize)
10363 {
10364 dhd_info_t *dhd = dhd_global;
10365
10366 ASSERT(dhd);
10367 if (dhd == NULL)
10368 return;
10369 }
10370
10371
10372 void
10373 SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize)
10374 {
10375 dhd_info_t *dhd = dhd_global;
10376
10377 ASSERT(dhd);
10378 if (dhd == NULL)
10379 return;
10380 }
10381
10382 EXPORT_SYMBOL(SDA_setSharedMemory4Send);
10383 EXPORT_SYMBOL(SDA_registerCallback4SendDone);
10384 EXPORT_SYMBOL(SDA_syncTsf);
10385 EXPORT_SYMBOL(SDA_function4Send);
10386 EXPORT_SYMBOL(SDA_registerCallback4Recv);
10387 EXPORT_SYMBOL(SDA_setSharedMemory4Recv);
10388 EXPORT_SYMBOL(SDA_function4RecvDone);
10389
10390 #endif /* CUSTOMER_HW20 && WLANAUDIO */
10391
10392 void *dhd_get_pub(struct net_device *dev)
10393 {
10394 dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
10395 if (dhdinfo)
10396 return (void *)&dhdinfo->pub;
10397 else
10398 return NULL;
10399 }
10400
10401 bool dhd_os_wd_timer_enabled(void *bus)
10402 {
10403 dhd_pub_t *pub = bus;
10404 dhd_info_t *dhd = (dhd_info_t *)pub->info;
10405
10406 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10407 if (!dhd) {
10408 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
10409 return FALSE;
10410 }
10411 return dhd->wd_timer_valid;
10412 }