2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * Copyright (C) 1999-2015, Broadcom Corporation
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
25 * $Id: dhd_linux.c 588496 2015-09-24 08:32:09Z $
32 #include <linux/syscalls.h>
33 #include <event_log.h>
34 #endif /* SHOW_LOGTRACE */
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/etherdevice.h>
45 #include <linux/random.h>
46 #include <linux/spinlock.h>
47 #include <linux/ethtool.h>
48 #include <linux/fcntl.h>
51 #include <linux/reboot.h>
52 #include <linux/notifier.h>
53 #include <net/addrconf.h>
54 #ifdef ENABLE_ADAPTIVE_SCHED
55 #include <linux/cpufreq.h>
56 #endif /* ENABLE_ADAPTIVE_SCHED */
58 #include <asm/uaccess.h>
59 #include <asm/unaligned.h>
63 #include <bcmendian.h>
66 #include <proto/ethernet.h>
67 #include <proto/bcmevent.h>
68 #include <proto/vlan.h>
70 #include <proto/bcmicmp.h>
72 #include <proto/802.3.h>
74 #include <dngl_stats.h>
75 #include <dhd_linux_wq.h>
77 #include <dhd_linux.h>
78 #ifdef PCIE_FULL_DONGLE
79 #include <dhd_flowring.h>
82 #include <dhd_proto.h>
83 #include <dhd_config.h>
85 #ifdef CONFIG_HAS_WAKELOCK
86 #include <linux/wakelock.h>
89 #include <wl_cfg80211.h>
92 #include <wl_cfgp2p.h>
98 #include <proto/802.11_bta.h>
99 #include <proto/bt_amp_hci.h>
104 #include <linux/compat.h>
108 #include <dhd_wmf_linux.h>
111 #ifdef AMPDU_VO_ENABLE
112 #include <proto/802.1d.h>
113 #endif /* AMPDU_VO_ENABLE */
114 #ifdef DHDTCPACK_SUPPRESS
116 #endif /* DHDTCPACK_SUPPRESS */
118 #if defined(DHD_TCP_WINSIZE_ADJUST)
119 #include <linux/tcp.h>
121 #endif /* DHD_TCP_WINSIZE_ADJUST */
124 #include <linux/time.h>
127 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
128 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
129 #define TSMAX 1000 /* max no. of timing record kept */
132 static uint32 tsidx
= 0;
133 static uint32 htsf_seqnum
= 0;
135 struct timeval tsync
;
136 static uint32 tsport
= 5010;
138 typedef struct histo_
{
143 #if !ISPOWEROF2(DHD_SDALIGN)
144 #error DHD_SDALIGN is not a power of 2!
147 static histo_t vi_d1
, vi_d2
, vi_d3
, vi_d4
;
148 #endif /* WLMEDIA_HTSF */
153 #endif /* quote_str */
156 #endif /* quote_str */
158 #define quote_str(s) to_str(s)
160 static char *driver_target
= "driver_target: "quote_str(BRCM_DRIVER_TARGET
);
161 #endif /* STBLINUX */
163 #if defined(DHD_TCP_WINSIZE_ADJUST)
164 #define MIN_TCP_WIN_SIZE 18000
165 #define WIN_SIZE_SCALE_FACTOR 2
166 #define MAX_TARGET_PORTS 5
168 static uint target_ports
[MAX_TARGET_PORTS
] = {20, 0, 0, 0, 0};
169 static uint dhd_use_tcp_window_size_adjust
= FALSE
;
170 static void dhd_adjust_tcp_winsize(int op_mode
, struct sk_buff
*skb
);
171 #endif /* DHD_TCP_WINSIZE_ADJUST */
174 #if defined(OEM_ANDROID) && defined(SOFTAP)
175 extern bool ap_cfg_running
;
176 extern bool ap_fw_loaded
;
178 extern void extern_wifi_set_enable(int is_on
);
180 #ifdef ENABLE_ADAPTIVE_SCHED
181 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
182 #ifndef CUSTOM_CPUFREQ_THRESH
183 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
184 #endif /* CUSTOM_CPUFREQ_THRESH */
185 #endif /* ENABLE_ADAPTIVE_SCHED */
187 /* enable HOSTIP cache update from the host side when an eth0:N is up */
188 #define AOE_IP_ALIAS_SUPPORT 1
192 #include <bcm_rpc_tp.h>
195 #include <wlfc_proto.h>
196 #include <dhd_wlfc.h>
199 #if defined(OEM_ANDROID)
200 #include <wl_android.h>
204 /* Maximum STA per radio */
205 #define DHD_MAX_STA 32
208 const uint8 wme_fifo2ac
[] = { 0, 1, 2, 3, 1, 1 };
209 const uint8 prio2fifo
[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
210 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
212 #ifdef ARP_OFFLOAD_SUPPORT
213 void aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
);
214 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
215 unsigned long event
, void *ptr
);
216 static struct notifier_block dhd_inetaddr_notifier
= {
217 .notifier_call
= dhd_inetaddr_notifier_call
219 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
220 * created in kernel notifier link list (with 'next' pointing to itself)
222 static bool dhd_inetaddr_notifier_registered
= FALSE
;
223 #endif /* ARP_OFFLOAD_SUPPORT */
226 static int dhd_inet6addr_notifier_call(struct notifier_block
*this,
227 unsigned long event
, void *ptr
);
228 static struct notifier_block dhd_inet6addr_notifier
= {
229 .notifier_call
= dhd_inet6addr_notifier_call
231 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
232 * created in kernel notifier link list (with 'next' pointing to itself)
234 static bool dhd_inet6addr_notifier_registered
= FALSE
;
237 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
238 #include <linux/suspend.h>
239 volatile bool dhd_mmc_suspend
= FALSE
;
240 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait
);
241 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
243 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
244 extern void dhd_enable_oob_intr(struct dhd_bus
*bus
, bool enable
);
245 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
246 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID))
247 static void dhd_hang_process(void *dhd_info
, void *event_data
, u8 event
);
248 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID)) */
249 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
250 MODULE_LICENSE("GPL and additional rights");
251 #endif /* LinuxVer */
253 #if defined(MULTIPLE_SUPPLICANT)
254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
255 DEFINE_MUTEX(_dhd_sdio_mutex_lock_
);
256 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
261 extern int dhd_bus_init(dhd_pub_t
*dhdp
, bool enforce_mutex
);
262 extern void dhd_bus_stop(struct dhd_bus
*bus
, bool enforce_mutex
);
263 extern void dhd_bus_unregister(void);
270 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
272 #ifndef PROP_TXSTATUS
273 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
275 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
277 #endif /* BCM_FD_AGGR */
280 extern bool dhd_wlfc_skip_fc(void);
281 extern void dhd_wlfc_plat_init(void *dhd
);
282 extern void dhd_wlfc_plat_deinit(void *dhd
);
283 #endif /* PROP_TXSTATUS */
285 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
291 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
293 /* Linux wireless extension support */
294 #if defined(WL_WIRELESS_EXT)
296 extern wl_iw_extra_params_t g_wl_iw_params
;
297 #endif /* defined(WL_WIRELESS_EXT) */
299 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
300 #include <linux/earlysuspend.h>
301 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
303 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
);
305 #ifdef PKT_FILTER_SUPPORT
306 extern void dhd_pktfilter_offload_set(dhd_pub_t
* dhd
, char *arg
);
307 extern void dhd_pktfilter_offload_enable(dhd_pub_t
* dhd
, char *arg
, int enable
, int master_mode
);
308 extern void dhd_pktfilter_offload_delete(dhd_pub_t
*dhd
, int id
);
313 extern int dhd_read_macaddr(struct dhd_info
*dhd
);
315 static inline int dhd_read_macaddr(struct dhd_info
*dhd
) { return 0; }
318 extern int dhd_write_macaddr(struct ether_addr
*mac
);
320 static inline int dhd_write_macaddr(struct ether_addr
*mac
) { return 0; }
324 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
325 int argos_register_notifier_init(struct net_device
*net
);
326 int argos_register_notifier_deinit(void);
328 extern int sec_argos_register_notifier(struct notifier_block
*n
, char *label
);
329 extern int sec_argos_unregister_notifier(struct notifier_block
*n
, char *label
);
331 static int argos_status_notifier_wifi_cb(struct notifier_block
*notifier
,
332 unsigned long speed
, void *v
);
334 static struct notifier_block argos_wifi
= {
335 .notifier_call
= argos_status_notifier_wifi_cb
,
339 struct net_device
*wlan_primary_netdev
;
340 int argos_rps_cpus_enabled
;
343 argos_rps_ctrl argos_rps_ctrl_data
;
344 #define RPS_TPUT_THRESHOLD 300
346 #endif /* ARGOS_RPS_CPU_CTL && ARGOS_CPU_SCHEDULER */
347 #if defined(SOFTAP_TPUT_ENHANCE)
348 extern void dhd_bus_setidletime(dhd_pub_t
*dhdp
, int idle_time
);
349 extern void dhd_bus_getidletime(dhd_pub_t
*dhdp
, int* idle_time
);
350 #endif /* SOFTAP_TPUT_ENHANCE */
353 #if defined(DHD_DEBUG)
354 static void dhd_mem_dump(void *dhd_info
, void *event_info
, u8 event
);
355 #endif /* DHD_DEBUG */
357 static int dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
);
358 static struct notifier_block dhd_reboot_notifier
= {
359 .notifier_call
= dhd_reboot_callback
,
364 typedef struct dhd_if_event
{
365 struct list_head list
;
366 wl_event_data_if_t event
;
367 char name
[IFNAMSIZ
+1];
368 uint8 mac
[ETHER_ADDR_LEN
];
371 /* Interface control information */
372 typedef struct dhd_if
{
373 struct dhd_info
*info
; /* back pointer to dhd_info */
374 /* OS/stack specifics */
375 struct net_device
*net
;
376 int idx
; /* iface idx in dongle */
377 uint subunit
; /* subunit */
378 uint8 mac_addr
[ETHER_ADDR_LEN
]; /* assigned MAC address */
381 uint8 bssidx
; /* bsscfg index for the interface */
382 bool attached
; /* Delayed attachment when unset */
383 bool txflowcontrol
; /* Per interface flow control indicator */
384 char name
[IFNAMSIZ
+1]; /* linux interface name */
385 struct net_device_stats stats
;
387 dhd_wmf_t wmf
; /* per bsscfg wmf setting */
389 #ifdef PCIE_FULL_DONGLE
390 struct list_head sta_list
; /* sll of associated stations */
391 spinlock_t sta_list_lock
; /* lock for manipulating sll */
392 #endif /* PCIE_FULL_DONGLE */
393 uint32 ap_isolate
; /* ap-isolation settings */
406 uint32 coef
; /* scaling factor */
407 uint32 coefdec1
; /* first decimal */
408 uint32 coefdec2
; /* second decimal */
418 static tstamp_t ts
[TSMAX
];
419 static tstamp_t maxdelayts
;
420 static uint32 maxdelay
= 0, tspktcnt
= 0, maxdelaypktno
= 0;
422 #endif /* WLMEDIA_HTSF */
424 struct ipv6_work_info_t
{
431 #if defined(DHD_DEBUG)
432 typedef struct dhd_dump
{
436 #endif /* DHD_DEBUG */
438 /* When Perimeter locks are deployed, any blocking calls must be preceeded
439 * with a PERIM UNLOCK and followed by a PERIM LOCK.
440 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
441 * wait_event_timeout().
444 /* Local private structure (extension of pub) */
445 typedef struct dhd_info
{
446 #if defined(WL_WIRELESS_EXT)
447 wl_iw_t iw
; /* wireless extensions state (must be first) */
448 #endif /* defined(WL_WIRELESS_EXT) */
450 dhd_if_t
*iflist
[DHD_MAX_IFS
]; /* for supporting multiple interfaces */
452 void *adapter
; /* adapter information, interrupt, fw path etc. */
453 char fw_path
[PATH_MAX
]; /* path to firmware image */
454 char nv_path
[PATH_MAX
]; /* path to nvram vars file */
455 char conf_path
[PATH_MAX
]; /* path to config vars file */
457 struct semaphore proto_sem
;
459 spinlock_t wlfc_spinlock
;
462 ulong wlfc_lock_flags
;
463 ulong wlfc_pub_lock_flags
;
465 #endif /* PROP_TXSTATUS */
469 wait_queue_head_t ioctl_resp_wait
;
470 wait_queue_head_t d3ack_wait
;
471 uint32 default_wd_interval
;
473 struct timer_list timer
;
475 struct tasklet_struct tasklet
;
483 struct semaphore sdsem
;
484 tsk_ctl_t thr_dpc_ctl
;
485 tsk_ctl_t thr_wdt_ctl
;
488 tsk_ctl_t thr_rxf_ctl
;
490 bool rxthread_enabled
;
493 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
494 struct wake_lock wl_wifi
; /* Wifi wakelock */
495 struct wake_lock wl_rxwake
; /* Wifi rx wakelock */
496 struct wake_lock wl_ctrlwake
; /* Wifi ctrl wakelock */
497 struct wake_lock wl_wdwake
; /* Wifi wd wakelock */
498 #ifdef BCMPCIE_OOB_HOST_WAKE
499 struct wake_lock wl_intrwake
; /* Host wakeup wakelock */
500 #endif /* BCMPCIE_OOB_HOST_WAKE */
501 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
503 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
504 /* net_device interface lock, prevent race conditions among net_dev interface
505 * calls and wifi_on or wifi_off
507 struct mutex dhd_net_if_mutex
;
508 struct mutex dhd_suspend_mutex
;
510 spinlock_t wakelock_spinlock
;
511 uint32 wakelock_counter
;
512 int wakelock_wd_counter
;
513 int wakelock_rx_timeout_enable
;
514 int wakelock_ctrl_timeout_enable
;
516 uint32 wakelock_before_waive
;
518 /* Thread to issue ioctl for multicast */
519 wait_queue_head_t ctrl_wait
;
520 atomic_t pend_8021x_cnt
;
521 dhd_attach_states_t dhd_state
;
523 dhd_event_log_t event_data
;
524 #endif /* SHOW_LOGTRACE */
526 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
527 struct early_suspend early_suspend
;
528 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
530 #ifdef ARP_OFFLOAD_SUPPORT
532 #endif /* ARP_OFFLOAD_SUPPORT */
536 struct timer_list rpcth_timer
;
537 bool rpcth_timer_active
;
540 #ifdef DHDTCPACK_SUPPRESS
541 spinlock_t tcpack_lock
;
542 #endif /* DHDTCPACK_SUPPRESS */
543 void *dhd_deferred_wq
;
544 #ifdef DEBUG_CPU_FREQ
545 struct notifier_block freq_trans
;
546 int __percpu
*new_freq
;
549 struct notifier_block pm_notifier
;
552 #define DHDIF_FWDER(dhdif) FALSE
554 /* Flag to indicate if we should download firmware on driver load */
555 uint dhd_download_fw_on_driverload
= TRUE
;
557 /* Definitions to provide path to the firmware and nvram
558 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
560 char firmware_path
[MOD_PARAM_PATHLEN
];
561 char nvram_path
[MOD_PARAM_PATHLEN
];
562 char config_path
[MOD_PARAM_PATHLEN
];
564 /* backup buffer for firmware and nvram path */
565 char fw_bak_path
[MOD_PARAM_PATHLEN
];
566 char nv_bak_path
[MOD_PARAM_PATHLEN
];
568 /* information string to keep firmware, chio, cheip version info visiable from log */
569 char info_string
[MOD_PARAM_INFOLEN
];
570 module_param_string(info_string
, info_string
, MOD_PARAM_INFOLEN
, 0444);
572 int disable_proptx
= 0;
573 module_param(op_mode
, int, 0644);
574 #if defined(OEM_ANDROID)
575 extern int wl_control_wl_start(struct net_device
*dev
);
576 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
577 struct semaphore dhd_registration_sem
;
578 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
579 #endif /* defined(OEM_ANDROID) */
581 /* deferred handlers */
582 static void dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
);
583 static void dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
);
584 static void dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
);
585 static void dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
);
587 static void dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
);
591 extern void dhd_netdev_free(struct net_device
*ndev
);
592 #endif /* WL_CFG80211 */
595 module_param(dhd_msg_level
, int, 0);
596 #if defined(WL_WIRELESS_EXT)
597 module_param(iw_msg_level
, int, 0);
600 module_param(wl_dbg_level
, int, 0);
602 module_param(android_msg_level
, int, 0);
603 module_param(config_msg_level
, int, 0);
605 #ifdef ARP_OFFLOAD_SUPPORT
606 /* ARP offload enable */
607 uint dhd_arp_enable
= TRUE
;
608 module_param(dhd_arp_enable
, uint
, 0);
610 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
612 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
;
614 module_param(dhd_arp_mode
, uint
, 0);
615 #endif /* ARP_OFFLOAD_SUPPORT */
617 #if !defined(BCMDBUS)||defined(OEM_ANDROID)
618 /* Disable Prop tx */
619 module_param(disable_proptx
, int, 0644);
620 /* load firmware and/or nvram values from the filesystem */
621 module_param_string(firmware_path
, firmware_path
, MOD_PARAM_PATHLEN
, 0660);
622 module_param_string(nvram_path
, nvram_path
, MOD_PARAM_PATHLEN
, 0660);
623 module_param_string(config_path
, config_path
, MOD_PARAM_PATHLEN
, 0);
625 /* Watchdog interval */
627 /* extend watchdog expiration to 2 seconds when DPC is running */
628 #define WATCHDOG_EXTEND_INTERVAL (2000)
630 uint dhd_watchdog_ms
= CUSTOM_DHD_WATCHDOG_MS
;
631 module_param(dhd_watchdog_ms
, uint
, 0);
633 #if defined(DHD_DEBUG)
634 /* Console poll interval */
635 #if defined(OEM_ANDROID)
636 uint dhd_console_ms
= 0;
638 uint dhd_console_ms
= 250;
640 module_param(dhd_console_ms
, uint
, 0644);
641 #endif /* defined(DHD_DEBUG) */
644 uint dhd_slpauto
= TRUE
;
645 module_param(dhd_slpauto
, uint
, 0);
647 #ifdef PKT_FILTER_SUPPORT
648 /* Global Pkt filter enable control */
649 uint dhd_pkt_filter_enable
= TRUE
;
650 module_param(dhd_pkt_filter_enable
, uint
, 0);
653 /* Pkt filter init setup */
654 uint dhd_pkt_filter_init
= 0;
655 module_param(dhd_pkt_filter_init
, uint
, 0);
657 /* Pkt filter mode control */
658 uint dhd_master_mode
= FALSE
;
659 module_param(dhd_master_mode
, uint
, 0);
661 int dhd_watchdog_prio
= 0;
662 module_param(dhd_watchdog_prio
, int, 0);
664 /* DPC thread priority */
665 int dhd_dpc_prio
= CUSTOM_DPC_PRIO_SETTING
;
666 module_param(dhd_dpc_prio
, int, 0);
668 /* RX frame thread priority */
669 int dhd_rxf_prio
= CUSTOM_RXF_PRIO_SETTING
;
670 module_param(dhd_rxf_prio
, int, 0);
672 int passive_channel_skip
= 0;
673 module_param(passive_channel_skip
, int, (S_IRUSR
|S_IWUSR
));
675 #if !defined(BCMDHDUSB)
676 extern int dhd_dongle_ramsize
;
677 module_param(dhd_dongle_ramsize
, int, 0);
678 #endif /* BCMDHDUSB */
681 /* Keep track of number of instances */
682 static int dhd_found
= 0;
683 static int instance_base
= 0; /* Starting instance number */
684 module_param(instance_base
, int, 0644);
689 /* DHD Perimiter lock only used in router with bypass forwarding. */
690 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
691 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
692 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
693 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
694 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
696 #ifdef PCIE_FULL_DONGLE
697 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
698 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
699 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
700 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
701 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
702 #endif /* PCIE_FULL_DONGLE */
704 /* Control fw roaming */
706 uint dhd_roam_disable
= 0;
708 uint dhd_roam_disable
= 1;
711 /* Control radio state */
712 uint dhd_radio_up
= 1;
714 /* Network inteface name */
715 char iface_name
[IFNAMSIZ
] = {'\0'};
716 module_param_string(iface_name
, iface_name
, IFNAMSIZ
, 0);
718 /* The following are specific to the SDIO dongle */
720 /* IOCTL response timeout */
721 int dhd_ioctl_timeout_msec
= IOCTL_RESP_TIMEOUT
;
723 /* Idle timeout for backplane clock */
724 int dhd_idletime
= DHD_IDLETIME_TICKS
;
725 module_param(dhd_idletime
, int, 0);
728 uint dhd_poll
= FALSE
;
729 module_param(dhd_poll
, uint
, 0);
732 uint dhd_intr
= TRUE
;
733 module_param(dhd_intr
, uint
, 0);
735 /* SDIO Drive Strength (in milliamps) */
736 uint dhd_sdiod_drive_strength
= 6;
737 module_param(dhd_sdiod_drive_strength
, uint
, 0);
742 #if defined(BCMSUP_4WAY_HANDSHAKE)
743 /* Use in dongle supplicant for 4-way handshake */
744 uint dhd_use_idsup
= 0;
745 module_param(dhd_use_idsup
, uint
, 0);
746 #endif /* BCMSUP_4WAY_HANDSHAKE */
748 extern char dhd_version
[];
750 int dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
);
751 static void dhd_net_if_lock_local(dhd_info_t
*dhd
);
752 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
);
753 static void dhd_suspend_lock(dhd_pub_t
*dhdp
);
754 static void dhd_suspend_unlock(dhd_pub_t
*dhdp
);
757 void htsf_update(dhd_info_t
*dhd
, void *data
);
758 tsf_t prev_tsf
, cur_tsf
;
760 uint32
dhd_get_htsf(dhd_info_t
*dhd
, int ifidx
);
761 static int dhd_ioctl_htsf_get(dhd_info_t
*dhd
, int ifidx
);
762 static void dhd_dump_latency(void);
763 static void dhd_htsf_addtxts(dhd_pub_t
*dhdp
, void *pktbuf
);
764 static void dhd_htsf_addrxts(dhd_pub_t
*dhdp
, void *pktbuf
);
765 static void dhd_dump_htsfhisto(histo_t
*his
, char *s
);
766 #endif /* WLMEDIA_HTSF */
768 /* Monitor interface */
769 int dhd_monitor_init(void *dhd_pub
);
770 int dhd_monitor_uninit(void);
773 #if defined(WL_WIRELESS_EXT)
774 struct iw_statistics
*dhd_get_wireless_stats(struct net_device
*dev
);
775 #endif /* defined(WL_WIRELESS_EXT) */
778 static void dhd_dpc(ulong data
);
781 extern int dhd_wait_pend8021x(struct net_device
*dev
);
782 void dhd_os_wd_timer_extend(void *bus
, bool extend
);
786 #error TOE requires BDC
788 static int dhd_toe_get(dhd_info_t
*dhd
, int idx
, uint32
*toe_ol
);
789 static int dhd_toe_set(dhd_info_t
*dhd
, int idx
, uint32 toe_ol
);
792 int dhd_dbus_txdata(dhd_pub_t
*dhdp
, void *pktbuf
);
795 static int dhd_wl_host_event(dhd_info_t
*dhd
, int *ifidx
, void *pktdata
,
796 wl_event_msg_t
*event_ptr
, void **data_ptr
);
797 #ifdef DHD_UNICAST_DHCP
798 static const uint8 llc_snap_hdr
[SNAP_HDR_LEN
] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
799 static int dhd_get_pkt_ip_type(dhd_pub_t
*dhd
, void *skb
, uint8
**data_ptr
,
800 int *len_ptr
, uint8
*prot_ptr
);
801 static int dhd_get_pkt_ether_type(dhd_pub_t
*dhd
, void *skb
, uint8
**data_ptr
,
802 int *len_ptr
, uint16
*et_ptr
, bool *snap_ptr
);
804 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
);
805 #endif /* DHD_UNICAST_DHCP */
807 static int dhd_l2_filter_block_ping(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
);
809 #if defined(CONFIG_PM_SLEEP)
810 static int dhd_pm_callback(struct notifier_block
*nfb
, unsigned long action
, void *ignored
)
812 int ret
= NOTIFY_DONE
;
813 bool suspend
= FALSE
;
814 dhd_info_t
*dhdinfo
= (dhd_info_t
*)container_of(nfb
, struct dhd_info
, pm_notifier
);
816 BCM_REFERENCE(dhdinfo
);
818 case PM_HIBERNATION_PREPARE
:
819 case PM_SUSPEND_PREPARE
:
822 case PM_POST_HIBERNATION
:
823 case PM_POST_SUSPEND
:
828 #if defined(SUPPORT_P2P_GO_PS)
831 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo
->pub
);
832 dhd_wlfc_suspend(&dhdinfo
->pub
);
833 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo
->pub
);
835 dhd_wlfc_resume(&dhdinfo
->pub
);
837 #endif /* defined(SUPPORT_P2P_GO_PS) */
839 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
840 KERNEL_VERSION(2, 6, 39))
841 dhd_mmc_suspend
= suspend
;
848 static struct notifier_block dhd_pm_notifier
= {
849 .notifier_call
= dhd_pm_callback
,
852 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
853 * created in kernel notifier link list (with 'next' pointing to itself)
855 static bool dhd_pm_notifier_registered
= FALSE
;
857 extern int register_pm_notifier(struct notifier_block
*nb
);
858 extern int unregister_pm_notifier(struct notifier_block
*nb
);
859 #endif /* CONFIG_PM_SLEEP */
861 /* Request scheduling of the bus rx frame */
862 static void dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
);
863 static void dhd_os_rxflock(dhd_pub_t
*pub
);
864 static void dhd_os_rxfunlock(dhd_pub_t
*pub
);
866 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
867 typedef struct dhd_dev_priv
{
868 dhd_info_t
* dhd
; /* cached pointer to dhd_info in netdevice priv */
869 dhd_if_t
* ifp
; /* cached pointer to dhd_if in netdevice priv */
870 int ifidx
; /* interface index */
873 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
874 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
875 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
876 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
877 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
879 /** Clear the dhd net_device's private structure. */
881 dhd_dev_priv_clear(struct net_device
* dev
)
883 dhd_dev_priv_t
* dev_priv
;
884 ASSERT(dev
!= (struct net_device
*)NULL
);
885 dev_priv
= DHD_DEV_PRIV(dev
);
886 dev_priv
->dhd
= (dhd_info_t
*)NULL
;
887 dev_priv
->ifp
= (dhd_if_t
*)NULL
;
888 dev_priv
->ifidx
= DHD_BAD_IF
;
891 /** Setup the dhd net_device's private structure. */
893 dhd_dev_priv_save(struct net_device
* dev
, dhd_info_t
* dhd
, dhd_if_t
* ifp
,
896 dhd_dev_priv_t
* dev_priv
;
897 ASSERT(dev
!= (struct net_device
*)NULL
);
898 dev_priv
= DHD_DEV_PRIV(dev
);
901 dev_priv
->ifidx
= ifidx
;
904 #ifdef PCIE_FULL_DONGLE
906 /** Dummy objects are defined with state representing bad|down.
907 * Performance gains from reducing branch conditionals, instruction parallelism,
908 * dual issue, reducing load shadows, avail of larger pipelines.
909 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
910 * is accessed via the dhd_sta_t.
913 /* Dummy dhd_info object */
914 dhd_info_t dhd_info_null
= {
916 .info
= &dhd_info_null
,
917 #ifdef DHDTCPACK_SUPPRESS
918 .tcpack_sup_mode
= TCPACK_SUP_REPLACE
,
919 #endif /* DHDTCPACK_SUPPRESS */
921 .busstate
= DHD_BUS_DOWN
924 #define DHD_INFO_NULL (&dhd_info_null)
925 #define DHD_PUB_NULL (&dhd_info_null.pub)
927 /* Dummy netdevice object */
928 struct net_device dhd_net_dev_null
= {
929 .reg_state
= NETREG_UNREGISTERED
931 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
933 /* Dummy dhd_if object */
934 dhd_if_t dhd_if_null
= {
936 .wmf
= { .wmf_enable
= TRUE
},
938 .info
= DHD_INFO_NULL
,
939 .net
= DHD_NET_DEV_NULL
,
942 #define DHD_IF_NULL (&dhd_if_null)
944 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
946 /** Interface STA list management. */
948 /** Fetch the dhd_if object, given the interface index in the dhd. */
949 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
);
951 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
952 static void dhd_sta_free(dhd_pub_t
*pub
, dhd_sta_t
*sta
);
953 static dhd_sta_t
* dhd_sta_alloc(dhd_pub_t
* dhdp
);
955 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
956 static void dhd_if_del_sta_list(dhd_if_t
* ifp
);
957 static void dhd_if_flush_sta(dhd_if_t
* ifp
);
959 /* Construct/Destruct a sta pool. */
960 static int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
);
961 static void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
);
962 static void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
);
965 /* Return interface pointer */
966 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
)
968 ASSERT(ifidx
< DHD_MAX_IFS
);
970 if (ifidx
>= DHD_MAX_IFS
)
973 return dhdp
->info
->iflist
[ifidx
];
976 /** Reset a dhd_sta object and free into the dhd pool. */
978 dhd_sta_free(dhd_pub_t
* dhdp
, dhd_sta_t
* sta
)
982 ASSERT((sta
!= DHD_STA_NULL
) && (sta
->idx
!= ID16_INVALID
));
984 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
985 id16_map_free(dhdp
->staid_allocator
, sta
->idx
);
986 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++)
987 sta
->flowid
[prio
] = FLOWID_INVALID
;
988 sta
->ifp
= DHD_IF_NULL
; /* dummy dhd_if object */
989 sta
->ifidx
= DHD_BAD_IF
;
990 bzero(sta
->ea
.octet
, ETHER_ADDR_LEN
);
991 INIT_LIST_HEAD(&sta
->list
);
992 sta
->idx
= ID16_INVALID
; /* implying free */
995 /** Allocate a dhd_sta object from the dhd pool. */
997 dhd_sta_alloc(dhd_pub_t
* dhdp
)
1001 dhd_sta_pool_t
* sta_pool
;
1003 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
1005 idx
= id16_map_alloc(dhdp
->staid_allocator
);
1006 if (idx
== ID16_INVALID
) {
1007 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__
));
1008 return DHD_STA_NULL
;
1011 sta_pool
= (dhd_sta_pool_t
*)(dhdp
->sta_pool
);
1012 sta
= &sta_pool
[idx
];
1014 ASSERT((sta
->idx
== ID16_INVALID
) &&
1015 (sta
->ifp
== DHD_IF_NULL
) && (sta
->ifidx
== DHD_BAD_IF
));
1016 sta
->idx
= idx
; /* implying allocated */
1021 /** Delete all STAs in an interface's STA list. */
1023 dhd_if_del_sta_list(dhd_if_t
*ifp
)
1025 dhd_sta_t
*sta
, *next
;
1026 unsigned long flags
;
1028 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1030 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1031 list_del(&sta
->list
);
1032 dhd_sta_free(&ifp
->info
->pub
, sta
);
1035 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1040 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1042 dhd_if_flush_sta(dhd_if_t
* ifp
)
1046 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1048 dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
)
1050 int idx
, sta_pool_memsz
;
1052 dhd_sta_pool_t
* sta_pool
;
1053 void * staid_allocator
;
1055 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
1056 ASSERT((dhdp
->staid_allocator
== NULL
) && (dhdp
->sta_pool
== NULL
));
1058 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1059 staid_allocator
= id16_map_init(dhdp
->osh
, max_sta
, 1);
1060 if (staid_allocator
== NULL
) {
1061 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__
));
1065 /* Pre allocate a pool of dhd_sta objects (one extra). */
1066 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
)); /* skip idx 0 */
1067 sta_pool
= (dhd_sta_pool_t
*)MALLOC(dhdp
->osh
, sta_pool_memsz
);
1068 if (sta_pool
== NULL
) {
1069 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__
));
1070 id16_map_fini(dhdp
->osh
, staid_allocator
);
1074 dhdp
->sta_pool
= sta_pool
;
1075 dhdp
->staid_allocator
= staid_allocator
;
1077 /* Initialize all sta(s) for the pre-allocated free pool. */
1078 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1079 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1080 sta
= &sta_pool
[idx
];
1081 sta
->idx
= id16_map_alloc(staid_allocator
);
1082 ASSERT(sta
->idx
<= max_sta
);
1084 /* Now place them into the pre-allocated free pool. */
1085 for (idx
= 1; idx
<= max_sta
; idx
++) {
1086 sta
= &sta_pool
[idx
];
1087 dhd_sta_free(dhdp
, sta
);
1093 /** Destruct the pool of dhd_sta_t objects.
1094 * Caller must ensure that no STA objects are currently associated with an if.
1097 dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
)
1099 dhd_sta_pool_t
* sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1103 int sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1104 for (idx
= 1; idx
<= max_sta
; idx
++) {
1105 ASSERT(sta_pool
[idx
].ifp
== DHD_IF_NULL
);
1106 ASSERT(sta_pool
[idx
].idx
== ID16_INVALID
);
1108 MFREE(dhdp
->osh
, dhdp
->sta_pool
, sta_pool_memsz
);
1109 dhdp
->sta_pool
= NULL
;
1112 id16_map_fini(dhdp
->osh
, dhdp
->staid_allocator
);
1113 dhdp
->staid_allocator
= NULL
;
1116 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1118 dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
)
1120 int idx
, sta_pool_memsz
;
1122 dhd_sta_pool_t
* sta_pool
;
1123 void *staid_allocator
;
1126 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
1130 sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1131 staid_allocator
= dhdp
->staid_allocator
;
1134 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__
));
1138 if (!staid_allocator
) {
1139 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__
));
1143 /* clear free pool */
1144 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1145 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1147 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1148 id16_map_clear(staid_allocator
, max_sta
, 1);
1150 /* Initialize all sta(s) for the pre-allocated free pool. */
1151 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1152 sta
= &sta_pool
[idx
];
1153 sta
->idx
= id16_map_alloc(staid_allocator
);
1154 ASSERT(sta
->idx
<= max_sta
);
1156 /* Now place them into the pre-allocated free pool. */
1157 for (idx
= 1; idx
<= max_sta
; idx
++) {
1158 sta
= &sta_pool
[idx
];
1159 dhd_sta_free(dhdp
, sta
);
1163 /** Find STA with MAC address ea in an interface's STA list. */
1165 dhd_find_sta(void *pub
, int ifidx
, void *ea
)
1167 dhd_sta_t
*sta
, *next
;
1169 unsigned long flags
;
1172 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1174 return DHD_STA_NULL
;
1176 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1178 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1179 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
1180 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1185 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1187 return DHD_STA_NULL
;
1190 /** Add STA into the interface's STA list. */
1192 dhd_add_sta(void *pub
, int ifidx
, void *ea
)
1196 unsigned long flags
;
1199 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1201 return DHD_STA_NULL
;
1203 sta
= dhd_sta_alloc((dhd_pub_t
*)pub
);
1204 if (sta
== DHD_STA_NULL
) {
1205 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__
));
1206 return DHD_STA_NULL
;
1209 memcpy(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
);
1211 /* link the sta and the dhd interface */
1214 INIT_LIST_HEAD(&sta
->list
);
1216 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1218 list_add_tail(&sta
->list
, &ifp
->sta_list
);
1221 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1226 /** Delete STA from the interface's STA list. */
1228 dhd_del_sta(void *pub
, int ifidx
, void *ea
)
1230 dhd_sta_t
*sta
, *next
;
1232 unsigned long flags
;
1235 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1239 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1241 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1242 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
1243 list_del(&sta
->list
);
1244 dhd_sta_free(&ifp
->info
->pub
, sta
);
1248 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1253 /** Add STA if it doesn't exist. Not reentrant. */
1255 dhd_findadd_sta(void *pub
, int ifidx
, void *ea
)
1259 sta
= dhd_find_sta(pub
, ifidx
, ea
);
1263 sta
= dhd_add_sta(pub
, ifidx
, ea
);
1269 static inline void dhd_if_flush_sta(dhd_if_t
* ifp
) { }
1270 static inline void dhd_if_del_sta_list(dhd_if_t
*ifp
) {}
1271 static inline int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
) { return BCME_OK
; }
1272 static inline void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
) {}
1273 static inline void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
) {}
1274 dhd_sta_t
*dhd_findadd_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
1275 void dhd_del_sta(void *pub
, int ifidx
, void *ea
) {}
1276 #endif /* PCIE_FULL_DONGLE */
1279 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1280 int dhd_bssidx2idx(dhd_pub_t
*dhdp
, uint32 bssidx
)
1283 dhd_info_t
*dhd
= dhdp
->info
;
1286 ASSERT(bssidx
< DHD_MAX_IFS
);
1289 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
1290 ifp
= dhd
->iflist
[i
];
1291 if (ifp
&& (ifp
->bssidx
== bssidx
)) {
1292 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1293 ifp
->name
, bssidx
, i
));
1300 static inline int dhd_rxf_enqueue(dhd_pub_t
*dhdp
, void* skb
)
1306 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1310 dhd_os_rxflock(dhdp
);
1311 store_idx
= dhdp
->store_idx
;
1312 sent_idx
= dhdp
->sent_idx
;
1313 if (dhdp
->skbbuf
[store_idx
] != NULL
) {
1314 /* Make sure the previous packets are processed */
1315 dhd_os_rxfunlock(dhdp
);
1316 #ifdef RXF_DEQUEUE_ON_BUSY
1317 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1318 skb
, store_idx
, sent_idx
));
1320 #else /* RXF_DEQUEUE_ON_BUSY */
1321 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1322 skb
, store_idx
, sent_idx
));
1323 /* removed msleep here, should use wait_event_timeout if we
1324 * want to give rx frame thread a chance to run
1326 #if defined(WAIT_DEQUEUE)
1330 #endif /* RXF_DEQUEUE_ON_BUSY */
1332 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1333 skb
, store_idx
, (store_idx
+ 1) & (MAXSKBPEND
- 1)));
1334 dhdp
->skbbuf
[store_idx
] = skb
;
1335 dhdp
->store_idx
= (store_idx
+ 1) & (MAXSKBPEND
- 1);
1336 dhd_os_rxfunlock(dhdp
);
1341 static inline void* dhd_rxf_dequeue(dhd_pub_t
*dhdp
)
1347 dhd_os_rxflock(dhdp
);
1349 store_idx
= dhdp
->store_idx
;
1350 sent_idx
= dhdp
->sent_idx
;
1351 skb
= dhdp
->skbbuf
[sent_idx
];
1354 dhd_os_rxfunlock(dhdp
);
1355 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1356 store_idx
, sent_idx
));
1360 dhdp
->skbbuf
[sent_idx
] = NULL
;
1361 dhdp
->sent_idx
= (sent_idx
+ 1) & (MAXSKBPEND
- 1);
1363 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1366 dhd_os_rxfunlock(dhdp
);
1372 int dhd_process_cid_mac(dhd_pub_t
*dhdp
, bool prepost
)
1374 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
1376 if (prepost
) { /* pre process */
1377 dhd_read_macaddr(dhd
);
1378 } else { /* post process */
1379 dhd_write_macaddr(&dhd
->pub
.mac
);
1384 #endif /* OEM_ANDROID */
1386 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1388 _turn_on_arp_filter(dhd_pub_t
*dhd
, int op_mode
)
1390 bool _apply
= FALSE
;
1391 /* In case of IBSS mode, apply arp pkt filter */
1392 if (op_mode
& DHD_FLAG_IBSS_MODE
) {
1396 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1397 if ((dhd
->arp_version
== 1) &&
1398 (op_mode
& (DHD_FLAG_P2P_GC_MODE
| DHD_FLAG_P2P_GO_MODE
))) {
1406 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1408 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1409 #ifdef PKT_FILTER_SUPPORT
1411 dhd_set_packet_filter_mode(struct net_device
*dev
, char *command
)
1413 dhd_info_t
*dhdi
= *(dhd_info_t
**)netdev_priv(dev
);
1415 dhdi
->pub
.pkt_filter_mode
= bcm_strtoul(command
, &command
, 0);
1419 dhd_set_packet_filter_ports(struct net_device
*dev
, char *command
)
1421 int i
= 0, error
= BCME_OK
, count
= 0, get_count
= 0, action
= 0;
1422 uint16 portnum
= 0, *ports
= NULL
, get_ports
[WL_PKT_FILTER_PORTS_MAX
];
1423 dhd_info_t
*dhdi
= *(dhd_info_t
**)netdev_priv(dev
);
1424 dhd_pub_t
*dhdp
= &dhdi
->pub
;
1425 char iovbuf
[WLC_IOCTL_SMLEN
];
1428 action
= bcm_strtoul(command
, &command
, 0);
1429 if (action
> PKT_FILTER_PORTS_MAX
)
1432 if (action
== PKT_FILTER_PORTS_LOOPBACK
) {
1433 /* echo the loopback value if port filter is supported else error */
1434 bcm_mkiovar("cap", NULL
, 0, iovbuf
, sizeof(iovbuf
));
1435 error
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
1437 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__
, error
));
1441 if (strstr(iovbuf
, "pktfltr2"))
1442 return bcm_strtoul(command
, &command
, 0);
1444 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__
));
1445 return BCME_UNSUPPORTED
;
1449 if (action
== PKT_FILTER_PORTS_CLEAR
) {
1450 /* action 0 is clear all ports */
1451 dhdp
->pkt_filter_ports_count
= 0;
1452 bzero(dhdp
->pkt_filter_ports
, sizeof(dhdp
->pkt_filter_ports
));
1455 portnum
= bcm_strtoul(command
, &command
, 0);
1457 /* no ports to add or remove */
1461 /* get configured ports */
1462 count
= dhdp
->pkt_filter_ports_count
;
1463 ports
= dhdp
->pkt_filter_ports
;
1465 if (action
== PKT_FILTER_PORTS_ADD
) {
1466 /* action 1 is add ports */
1468 /* copy new ports */
1469 while ((portnum
!= 0) && (count
< WL_PKT_FILTER_PORTS_MAX
)) {
1470 for (i
= 0; i
< count
; i
++) {
1471 /* duplicate port */
1472 if (portnum
== ports
[i
])
1475 if (portnum
!= ports
[i
])
1476 ports
[count
++] = portnum
;
1477 portnum
= bcm_strtoul(command
, &command
, 0);
1479 } else if ((action
== PKT_FILTER_PORTS_DEL
) && (count
> 0)) {
1480 /* action 2 is remove ports */
1481 bcopy(dhdp
->pkt_filter_ports
, get_ports
, count
* sizeof(uint16
));
1484 while (portnum
!= 0) {
1486 for (i
= 0; i
< get_count
; i
++) {
1487 if (portnum
!= get_ports
[i
])
1488 ports
[count
++] = get_ports
[i
];
1491 bcopy(ports
, get_ports
, count
* sizeof(uint16
));
1492 portnum
= bcm_strtoul(command
, &command
, 0);
1495 dhdp
->pkt_filter_ports_count
= count
;
1501 dhd_enable_packet_filter_ports(dhd_pub_t
*dhd
, bool enable
)
1504 wl_pkt_filter_ports_t
*portlist
= NULL
;
1505 const uint pkt_filter_ports_buf_len
= sizeof("pkt_filter_ports")
1506 + WL_PKT_FILTER_PORTS_FIXED_LEN
+ (WL_PKT_FILTER_PORTS_MAX
* sizeof(uint16
));
1507 char pkt_filter_ports_buf
[pkt_filter_ports_buf_len
];
1508 char iovbuf
[pkt_filter_ports_buf_len
];
1510 DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__
,
1511 enable
, dhd
->in_suspend
, dhd
->pkt_filter_mode
,
1512 dhd
->pkt_filter_ports_count
));
1514 bzero(pkt_filter_ports_buf
, sizeof(pkt_filter_ports_buf
));
1515 portlist
= (wl_pkt_filter_ports_t
*)pkt_filter_ports_buf
;
1516 portlist
->version
= WL_PKT_FILTER_PORTS_VERSION
;
1517 portlist
->reserved
= 0;
1520 if (!(dhd
->pkt_filter_mode
& PKT_FILTER_MODE_PORTS_ONLY
))
1523 /* enable port filter */
1524 dhd_master_mode
|= PKT_FILTER_MODE_PORTS_ONLY
;
1525 if (dhd
->pkt_filter_mode
& PKT_FILTER_MODE_FORWARD_ON_MATCH
)
1526 /* whitelist mode: FORWARD_ON_MATCH */
1527 dhd_master_mode
|= PKT_FILTER_MODE_FORWARD_ON_MATCH
;
1529 /* blacklist mode: DISCARD_ON_MATCH */
1530 dhd_master_mode
&= ~PKT_FILTER_MODE_FORWARD_ON_MATCH
;
1532 portlist
->count
= dhd
->pkt_filter_ports_count
;
1533 bcopy(dhd
->pkt_filter_ports
, portlist
->ports
,
1534 dhd
->pkt_filter_ports_count
* sizeof(uint16
));
1536 /* disable port filter */
1537 portlist
->count
= 0;
1538 dhd_master_mode
&= ~PKT_FILTER_MODE_PORTS_ONLY
;
1539 dhd_master_mode
|= PKT_FILTER_MODE_FORWARD_ON_MATCH
;
1542 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__
, dhd_master_mode
,
1546 bcm_mkiovar("pkt_filter_ports",
1548 (WL_PKT_FILTER_PORTS_FIXED_LEN
+ (portlist
->count
* sizeof(uint16
))),
1549 iovbuf
, sizeof(iovbuf
));
1550 error
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1552 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__
, error
));
1555 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode
,
1556 sizeof(dhd_master_mode
), iovbuf
, sizeof(iovbuf
));
1557 error
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1559 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__
, error
));
1563 #endif /* PKT_FILTER_SUPPORT */
1564 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1566 void dhd_set_packet_filter(dhd_pub_t
*dhd
)
1568 #ifdef PKT_FILTER_SUPPORT
1571 DHD_TRACE(("%s: enter\n", __FUNCTION__
));
1572 if (dhd_pkt_filter_enable
) {
1573 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
1574 dhd_pktfilter_offload_set(dhd
, dhd
->pktfilter
[i
]);
1577 #endif /* PKT_FILTER_SUPPORT */
1580 void dhd_enable_packet_filter(int value
, dhd_pub_t
*dhd
)
1582 #ifdef PKT_FILTER_SUPPORT
1585 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__
, value
));
1587 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1588 dhd_enable_packet_filter_ports(dhd
, value
);
1589 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1591 /* 1 - Enable packet filter, only allow unicast packet to send up */
1592 /* 0 - Disable packet filter */
1593 if (dhd_pkt_filter_enable
&& (!value
||
1594 (dhd_support_sta_mode(dhd
) && !dhd
->dhcp_in_progress
)))
1596 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
1597 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1598 if (value
&& (i
== DHD_ARP_FILTER_NUM
) &&
1599 !_turn_on_arp_filter(dhd
, dhd
->op_mode
)) {
1600 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1601 "val %d, cnt %d, op_mode 0x%x\n",
1602 value
, i
, dhd
->op_mode
));
1605 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1606 dhd_pktfilter_offload_enable(dhd
, dhd
->pktfilter
[i
],
1607 value
, dhd_master_mode
);
1610 #endif /* PKT_FILTER_SUPPORT */
1613 static int dhd_set_suspend(int value
, dhd_pub_t
*dhd
)
1615 #ifndef SUPPORT_PM2_ONLY
1616 int power_mode
= PM_MAX
;
1617 #endif /* SUPPORT_PM2_ONLY */
1618 /* wl_pkt_filter_enable_t enable_parm; */
1620 int bcn_li_dtim
= 0; /* Default bcn_li_dtim in resume mode is 0 */
1622 uint roamvar
= dhd
->conf
->roam_off_suspend
;
1623 uint nd_ra_filter
= 0;
1625 #endif /* OEM_ANDROID */
1630 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1631 __FUNCTION__
, value
, dhd
->in_suspend
));
1633 dhd_suspend_lock(dhd
);
1635 #ifdef CUSTOM_SET_CPUCORE
1636 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__
, value
));
1637 /* set specific cpucore */
1638 dhd_set_cpucore(dhd
, TRUE
);
1639 #endif /* CUSTOM_SET_CPUCORE */
1640 #ifndef SUPPORT_PM2_ONLY
1641 if (dhd
->conf
->pm
>= 0)
1642 power_mode
= dhd
->conf
->pm
;
1643 #endif /* SUPPORT_PM2_ONLY */
1645 if (value
&& dhd
->in_suspend
) {
1646 #ifdef PKT_FILTER_SUPPORT
1647 dhd
->early_suspended
= 1;
1649 /* Kernel suspended */
1650 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__
));
1652 #ifndef SUPPORT_PM2_ONLY
1653 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
1654 sizeof(power_mode
), TRUE
, 0);
1655 #endif /* SUPPORT_PM2_ONLY */
1657 /* Enable packet filter, only allow unicast packet to send up */
1658 dhd_enable_packet_filter(1, dhd
);
1660 /* If DTIM skip is set up as default, force it to wake
1661 * each third DTIM for better power savings. Note that
1662 * one side effect is a chance to miss BC/MC packet.
1665 /* Do not set bcn_li_ditm on WFD mode */
1666 if (dhd
->tdls_mode
) {
1670 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
);
1671 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim
,
1672 4, iovbuf
, sizeof(iovbuf
));
1673 if (dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
),
1675 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__
));
1678 /* Disable firmware roaming during suspend */
1679 bcm_mkiovar("roam_off", (char *)&roamvar
, 4, iovbuf
, sizeof(iovbuf
));
1680 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1681 if (FW_SUPPORTED(dhd
, ndoe
)) {
1682 /* enable IPv6 RA filter in firmware during suspend */
1684 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter
, 4,
1685 iovbuf
, sizeof(iovbuf
));
1686 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
1687 sizeof(iovbuf
), TRUE
, 0)) < 0)
1688 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1691 #endif /* OEM_ANDROID */
1693 #ifdef PKT_FILTER_SUPPORT
1694 dhd
->early_suspended
= 0;
1696 /* Kernel resumed */
1697 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__
));
1699 #ifndef SUPPORT_PM2_ONLY
1700 power_mode
= PM_FAST
;
1701 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
1702 sizeof(power_mode
), TRUE
, 0);
1703 #endif /* SUPPORT_PM2_ONLY */
1704 #ifdef PKT_FILTER_SUPPORT
1705 /* disable pkt filter */
1706 dhd_enable_packet_filter(0, dhd
);
1707 #endif /* PKT_FILTER_SUPPORT */
1709 /* restore pre-suspend setting for dtim_skip */
1710 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim
,
1711 4, iovbuf
, sizeof(iovbuf
));
1713 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1715 roamvar
= dhd_roam_disable
;
1716 bcm_mkiovar("roam_off", (char *)&roamvar
, 4, iovbuf
, sizeof(iovbuf
));
1717 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1718 if (FW_SUPPORTED(dhd
, ndoe
)) {
1719 /* disable IPv6 RA filter in firmware during suspend */
1721 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter
, 4,
1722 iovbuf
, sizeof(iovbuf
));
1723 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
1724 sizeof(iovbuf
), TRUE
, 0)) < 0)
1725 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1728 #endif /* OEM_ANDROID */
1731 dhd_suspend_unlock(dhd
);
1736 static int dhd_suspend_resume_helper(struct dhd_info
*dhd
, int val
, int force
)
1738 dhd_pub_t
*dhdp
= &dhd
->pub
;
1741 DHD_OS_WAKE_LOCK(dhdp
);
1742 DHD_PERIM_LOCK(dhdp
);
1744 /* Set flag when early suspend was called */
1745 dhdp
->in_suspend
= val
;
1746 if ((force
|| !dhdp
->suspend_disable_flag
) &&
1747 dhd_support_sta_mode(dhdp
))
1749 ret
= dhd_set_suspend(val
, dhdp
);
1752 DHD_PERIM_UNLOCK(dhdp
);
1753 DHD_OS_WAKE_UNLOCK(dhdp
);
1757 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1758 static void dhd_early_suspend(struct early_suspend
*h
)
1760 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
1761 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
1764 dhd_suspend_resume_helper(dhd
, 1, 0);
1767 static void dhd_late_resume(struct early_suspend
*h
)
1769 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
1770 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
1773 dhd_suspend_resume_helper(dhd
, 0, 0);
1775 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1778 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1779 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1781 * dhd_timeout_start(&tmo, usec);
1782 * while (!dhd_timeout_expired(&tmo))
1783 * if (poll_something())
1785 * if (dhd_timeout_expired(&tmo))
1790 dhd_timeout_start(dhd_timeout_t
*tmo
, uint usec
)
1795 tmo
->tick
= jiffies_to_usecs(1);
1799 dhd_timeout_expired(dhd_timeout_t
*tmo
)
1801 /* Does nothing the first call */
1802 if (tmo
->increment
== 0) {
1807 if (tmo
->elapsed
>= tmo
->limit
)
1810 /* Add the delay that's about to take place */
1811 tmo
->elapsed
+= tmo
->increment
;
1813 if ((!CAN_SLEEP()) || tmo
->increment
< tmo
->tick
) {
1814 OSL_DELAY(tmo
->increment
);
1815 tmo
->increment
*= 2;
1816 if (tmo
->increment
> tmo
->tick
)
1817 tmo
->increment
= tmo
->tick
;
1819 wait_queue_head_t delay_wait
;
1820 DECLARE_WAITQUEUE(wait
, current
);
1821 init_waitqueue_head(&delay_wait
);
1822 add_wait_queue(&delay_wait
, &wait
);
1823 set_current_state(TASK_INTERRUPTIBLE
);
1824 (void)schedule_timeout(1);
1825 remove_wait_queue(&delay_wait
, &wait
);
1826 set_current_state(TASK_RUNNING
);
1833 dhd_net2idx(dhd_info_t
*dhd
, struct net_device
*net
)
1838 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__
));
1841 while (i
< DHD_MAX_IFS
) {
1842 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->net
&& (dhd
->iflist
[i
]->net
== net
))
1850 struct net_device
* dhd_idx2net(void *pub
, int ifidx
)
1852 struct dhd_pub
*dhd_pub
= (struct dhd_pub
*)pub
;
1853 struct dhd_info
*dhd_info
;
1855 if (!dhd_pub
|| ifidx
< 0 || ifidx
>= DHD_MAX_IFS
)
1857 dhd_info
= dhd_pub
->info
;
1858 if (dhd_info
&& dhd_info
->iflist
[ifidx
])
1859 return dhd_info
->iflist
[ifidx
]->net
;
1864 dhd_ifname2idx(dhd_info_t
*dhd
, char *name
)
1866 int i
= DHD_MAX_IFS
;
1870 if (name
== NULL
|| *name
== '\0')
1874 if (dhd
->iflist
[i
] && !strncmp(dhd
->iflist
[i
]->name
, name
, IFNAMSIZ
))
1877 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__
, i
, name
));
1879 return i
; /* default - the primary interface */
1883 dhd_ifidx2hostidx(dhd_info_t
*dhd
, int ifidx
)
1885 int i
= DHD_MAX_IFS
;
1890 if (dhd
->iflist
[i
] && (dhd
->iflist
[i
]->idx
== ifidx
))
1893 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__
, i
, ifidx
));
1895 return i
; /* default - the primary interface */
1899 dhd_ifname(dhd_pub_t
*dhdp
, int ifidx
)
1901 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
1905 if (ifidx
< 0 || ifidx
>= DHD_MAX_IFS
) {
1906 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__
, ifidx
));
1910 if (dhd
->iflist
[ifidx
] == NULL
) {
1911 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__
, ifidx
));
1915 if (dhd
->iflist
[ifidx
]->net
)
1916 return dhd
->iflist
[ifidx
]->net
->name
;
1922 dhd_bssidx2bssid(dhd_pub_t
*dhdp
, int idx
)
1925 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
;
1928 for (i
= 0; i
< DHD_MAX_IFS
; i
++)
1929 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->bssidx
== idx
)
1930 return dhd
->iflist
[i
]->mac_addr
;
1936 #define DBUS_NRXQ 50
1937 #define DBUS_NTXQ 100
1940 dhd_dbus_send_complete(void *handle
, void *info
, int status
)
1942 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
1945 if ((dhd
== NULL
) || (pkt
== NULL
))
1948 if (status
== DBUS_OK
) {
1949 dhd
->pub
.dstats
.tx_packets
++;
1951 DHD_ERROR(("TX error=%d\n", status
));
1952 dhd
->pub
.dstats
.tx_errors
++;
1954 #ifdef PROP_TXSTATUS
1955 if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt
)) &&
1956 (dhd_wlfc_txcomplete(&dhd
->pub
, pkt
, status
== 0) != WLFC_UNSUPPORTED
)) {
1959 #endif /* PROP_TXSTATUS */
1960 PKTFREE(dhd
->pub
.osh
, pkt
, TRUE
);
1964 dhd_dbus_recv_pkt(void *handle
, void *pkt
)
1966 uchar reorder_info_buf
[WLHOST_REORDERDATA_TOTLEN
];
1967 uint reorder_info_len
;
1969 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
1975 /* If the protocol uses a data header, check and remove it */
1976 if (dhd_prot_hdrpull(&dhd
->pub
, &ifidx
, pkt
, reorder_info_buf
,
1977 &reorder_info_len
) != 0) {
1978 DHD_ERROR(("rx protocol error\n"));
1979 PKTFREE(dhd
->pub
.osh
, pkt
, FALSE
);
1980 dhd
->pub
.rx_errors
++;
1984 if (reorder_info_len
) {
1985 /* Reordering info from the firmware */
1986 dhd_process_pkt_reorder_info(&dhd
->pub
, reorder_info_buf
, reorder_info_len
,
1994 dhd_rx_frame(&dhd
->pub
, ifidx
, pkt
, pkt_count
, 0);
1998 dhd_dbus_recv_buf(void *handle
, uint8
*buf
, int len
)
2000 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
2006 if ((pkt
= PKTGET(dhd
->pub
.osh
, len
, FALSE
)) == NULL
) {
2007 DHD_ERROR(("PKTGET (rx) failed=%d\n", len
));
2011 bcopy(buf
, PKTDATA(dhd
->pub
.osh
, pkt
), len
);
2012 dhd_dbus_recv_pkt(dhd
, pkt
);
2016 dhd_dbus_txflowcontrol(void *handle
, bool onoff
)
2018 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
2019 bool wlfc_enabled
= FALSE
;
2024 #ifdef PROP_TXSTATUS
2025 wlfc_enabled
= (dhd_wlfc_flowcontrol(&dhd
->pub
, onoff
, !onoff
) != WLFC_UNSUPPORTED
);
2028 if (!wlfc_enabled
) {
2029 dhd_txflowcontrol(&dhd
->pub
, ALL_INTERFACES
, onoff
);
2034 dhd_dbus_errhandler(void *handle
, int err
)
2039 dhd_dbus_ctl_complete(void *handle
, int type
, int status
)
2041 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
2046 if (type
== DBUS_CBCTL_READ
) {
2047 if (status
== DBUS_OK
)
2048 dhd
->pub
.rx_ctlpkts
++;
2050 dhd
->pub
.rx_ctlerrs
++;
2051 } else if (type
== DBUS_CBCTL_WRITE
) {
2052 if (status
== DBUS_OK
)
2053 dhd
->pub
.tx_ctlpkts
++;
2055 dhd
->pub
.tx_ctlerrs
++;
2058 dhd_prot_ctl_complete(&dhd
->pub
);
2062 dhd_dbus_state_change(void *handle
, int state
)
2064 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
2069 if (state
== DBUS_STATE_DOWN
) {
2070 DHD_TRACE(("%s: DBUS is down\n", __FUNCTION__
));
2071 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
2072 } else if (state
== DBUS_STATE_UP
) {
2073 DHD_TRACE(("%s: DBUS is up\n", __FUNCTION__
));
2074 dhd
->pub
.busstate
= DHD_BUS_DATA
;
2077 DHD_TRACE(("%s: DBUS current state=%d\n", __FUNCTION__
, state
));
2081 dhd_dbus_pktget(void *handle
, uint len
, bool send
)
2083 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
2090 dhd_os_sdlock_txq(&dhd
->pub
);
2091 p
= PKTGET(dhd
->pub
.osh
, len
, TRUE
);
2092 dhd_os_sdunlock_txq(&dhd
->pub
);
2094 dhd_os_sdlock_rxq(&dhd
->pub
);
2095 p
= PKTGET(dhd
->pub
.osh
, len
, FALSE
);
2096 dhd_os_sdunlock_rxq(&dhd
->pub
);
2103 dhd_dbus_pktfree(void *handle
, void *p
, bool send
)
2105 dhd_info_t
*dhd
= (dhd_info_t
*)handle
;
2111 #ifdef PROP_TXSTATUS
2112 if (DHD_PKTTAG_WLFCPKT(PKTTAG(p
)) &&
2113 (dhd_wlfc_txcomplete(&dhd
->pub
, p
, FALSE
) != WLFC_UNSUPPORTED
)) {
2116 #endif /* PROP_TXSTATUS */
2118 dhd_os_sdlock_txq(&dhd
->pub
);
2119 PKTFREE(dhd
->pub
.osh
, p
, TRUE
);
2120 dhd_os_sdunlock_txq(&dhd
->pub
);
2122 dhd_os_sdlock_rxq(&dhd
->pub
);
2123 PKTFREE(dhd
->pub
.osh
, p
, FALSE
);
2124 dhd_os_sdunlock_rxq(&dhd
->pub
);
2131 dbus_rpcth_tx_complete(void *ctx
, void *pktbuf
, int status
)
2133 dhd_info_t
*dhd
= (dhd_info_t
*)ctx
;
2136 while (pktbuf
&& dhd
) {
2137 tmp
= PKTNEXT(dhd
->pub
.osh
, pktbuf
);
2138 PKTSETNEXT(dhd
->pub
.osh
, pktbuf
, NULL
);
2139 dhd_dbus_send_complete(ctx
, pktbuf
, status
);
2144 dbus_rpcth_rx_pkt(void *context
, rpc_buf_t
*rpc_buf
)
2146 dhd_dbus_recv_pkt(context
, rpc_buf
);
2150 dbus_rpcth_rx_aggrpkt(void *context
, void *rpc_buf
)
2152 dhd_info_t
*dhd
= (dhd_info_t
*)context
;
2157 /* all the de-aggregated packets are delivered back to function dbus_rpcth_rx_pkt()
2160 bcm_rpc_dbus_recv_aggrpkt(dhd
->rpc_th
, rpc_buf
,
2161 bcm_rpc_buf_len_get(dhd
->rpc_th
, rpc_buf
));
2163 /* free the original packet */
2164 dhd_dbus_pktfree(context
, rpc_buf
, FALSE
);
2168 dbus_rpcth_rx_aggrbuf(void *context
, uint8
*buf
, int len
)
2170 dhd_info_t
*dhd
= (dhd_info_t
*)context
;
2175 if (dhd
->fdaggr
& BCM_FDAGGR_D2H_ENABLED
) {
2176 bcm_rpc_dbus_recv_aggrbuf(dhd
->rpc_th
, buf
, len
);
2179 dhd_dbus_recv_buf(context
, buf
, len
);
2185 dhd_rpcth_watchdog(ulong data
)
2187 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
2189 if (dhd
->pub
.dongle_reset
) {
2193 dhd
->rpcth_timer_active
= FALSE
;
2194 /* release packets in the aggregation queue */
2195 bcm_rpc_tp_watchdog(dhd
->rpc_th
);
2199 dhd_fdaggr_ioctl(dhd_pub_t
*dhd_pub
, int ifindex
, wl_ioctl_t
*ioc
, void *buf
, int len
)
2204 rpc_th
= dhd_pub
->info
->rpc_th
;
2206 if (!strcmp("rpc_agg", ioc
->buf
)) {
2208 uint32 rpc_agg_host
;
2209 uint32 rpc_agg_dngl
;
2212 memcpy(&rpc_agg
, ioc
->buf
+ strlen("rpc_agg") + 1, sizeof(uint32
));
2213 rpc_agg_host
= rpc_agg
& BCM_RPC_TP_HOST_AGG_MASK
;
2215 bcm_rpc_tp_agg_set(rpc_th
, rpc_agg_host
, TRUE
);
2217 bcm_rpc_tp_agg_set(rpc_th
, BCM_RPC_TP_HOST_AGG_MASK
, FALSE
);
2218 bcmerror
= dhd_wl_ioctl(dhd_pub
, ifindex
, ioc
, buf
, len
);
2220 DHD_ERROR(("usb aggregation not supported\n"));
2222 dhd_pub
->info
->fdaggr
= 0;
2223 if (rpc_agg
& BCM_RPC_TP_HOST_AGG_MASK
)
2224 dhd_pub
->info
->fdaggr
|= BCM_FDAGGR_H2D_ENABLED
;
2225 if (rpc_agg
& BCM_RPC_TP_DNGL_AGG_MASK
)
2226 dhd_pub
->info
->fdaggr
|= BCM_FDAGGR_D2H_ENABLED
;
2229 rpc_agg_host
= bcm_rpc_tp_agg_get(rpc_th
);
2230 bcmerror
= dhd_wl_ioctl(dhd_pub
, ifindex
, ioc
, buf
, len
);
2232 memcpy(&rpc_agg_dngl
, buf
, sizeof(uint32
));
2233 rpc_agg
= (rpc_agg_host
& BCM_RPC_TP_HOST_AGG_MASK
) |
2234 (rpc_agg_dngl
& BCM_RPC_TP_DNGL_AGG_MASK
);
2235 memcpy(buf
, &rpc_agg
, sizeof(uint32
));
2238 } else if (!strcmp("rpc_host_agglimit", ioc
->buf
)) {
2244 memcpy(&agglimit
, ioc
->buf
+ strlen("rpc_host_agglimit") + 1,
2246 sf
= agglimit
>> 16;
2247 bytes
= agglimit
& 0xFFFF;
2248 bcm_rpc_tp_agg_limit_set(rpc_th
, sf
, bytes
);
2250 bcm_rpc_tp_agg_limit_get(rpc_th
, &sf
, &bytes
);
2251 agglimit
= (uint32
)((sf
<< 16) + bytes
);
2252 memcpy(buf
, &agglimit
, sizeof(uint32
));
2256 bcmerror
= dhd_wl_ioctl(dhd_pub
, ifindex
, ioc
, buf
, len
);
2260 #endif /* BCM_FD_AGGR */
2262 static dbus_callbacks_t dhd_dbus_cbs
= {
2264 dbus_rpcth_tx_complete
,
2265 dbus_rpcth_rx_aggrbuf
,
2266 dbus_rpcth_rx_aggrpkt
,
2268 dhd_dbus_send_complete
,
2272 dhd_dbus_txflowcontrol
,
2273 dhd_dbus_errhandler
,
2274 dhd_dbus_ctl_complete
,
2275 dhd_dbus_state_change
,
2281 dhd_bus_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
2283 bcm_bprintf(strbuf
, "Bus USB\n");
2287 dhd_bus_clearcounts(dhd_pub_t
*dhdp
)
2292 dhd_bus_dpc(struct dhd_bus
*bus
)
2298 dhd_dbus_txdata(dhd_pub_t
*dhdp
, void *pktbuf
)
2304 if (((dhd_info_t
*)(dhdp
->info
))->fdaggr
& BCM_FDAGGR_H2D_ENABLED
)
2309 dhd
= (dhd_info_t
*)(dhdp
->info
);
2310 ret
= bcm_rpc_tp_buf_send(dhd
->rpc_th
, pktbuf
);
2311 if (dhd
->rpcth_timer_active
== FALSE
) {
2312 dhd
->rpcth_timer_active
= TRUE
;
2313 mod_timer(&dhd
->rpcth_timer
, jiffies
+ BCM_RPC_TP_HOST_TMOUT
* HZ
/ 1000);
2317 #endif /* BCM_FD_AGGR */
2318 return dbus_send_txdata(dhdp
->dbus
, pktbuf
);
2321 #endif /* BCMDBUS */
2324 _dhd_set_multicast_list(dhd_info_t
*dhd
, int ifidx
)
2326 struct net_device
*dev
;
2327 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2328 struct netdev_hw_addr
*ha
;
2330 struct dev_mc_list
*mclist
;
2332 uint32 allmulti
, cnt
;
2339 #ifdef MCAST_LIST_ACCUMULATION
2341 uint32 cnt_iface
[DHD_MAX_IFS
];
2345 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2346 if (dhd
->iflist
[i
]) {
2347 dev
= dhd
->iflist
[i
]->net
;
2351 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
2352 dev
= dhd
->iflist
[ifidx
]->net
;
2355 #endif /* MCAST_LIST_ACCUMULATION */
2356 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2357 netif_addr_lock_bh(dev
);
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2360 #ifdef MCAST_LIST_ACCUMULATION
2361 cnt_iface
[i
] = netdev_mc_count(dev
);
2362 cnt
+= cnt_iface
[i
];
2364 cnt
= netdev_mc_count(dev
);
2365 #endif /* MCAST_LIST_ACCUMULATION */
2367 #ifdef MCAST_LIST_ACCUMULATION
2368 cnt
+= dev
->mc_count
;
2370 cnt
= dev
->mc_count
;
2371 #endif /* MCAST_LIST_ACCUMULATION */
2372 #endif /* LINUX_VERSION_CODE */
2374 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2375 netif_addr_unlock_bh(dev
);
2378 /* Determine initial value of allmulti flag */
2379 #ifdef MCAST_LIST_ACCUMULATION
2380 allmulti
|= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
2384 allmulti
= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
2385 #endif /* MCAST_LIST_ACCUMULATION */
2387 /* Send down the multicast list first. */
2390 buflen
= sizeof("mcast_list") + sizeof(cnt
) + (cnt
* ETHER_ADDR_LEN
);
2391 if (!(bufp
= buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
2392 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2393 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
2397 strncpy(bufp
, "mcast_list", buflen
- 1);
2398 bufp
[buflen
- 1] = '\0';
2399 bufp
+= strlen("mcast_list") + 1;
2402 memcpy(bufp
, &cnt
, sizeof(cnt
));
2403 bufp
+= sizeof(cnt
);
2405 #ifdef MCAST_LIST_ACCUMULATION
2406 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2407 if (dhd
->iflist
[i
]) {
2408 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i
));
2409 dev
= dhd
->iflist
[i
]->net
;
2410 #endif /* MCAST_LIST_ACCUMULATION */
2412 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2413 netif_addr_lock_bh(dev
);
2415 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2416 netdev_for_each_mc_addr(ha
, dev
) {
2417 #ifdef MCAST_LIST_ACCUMULATION
2421 #endif /* MCAST_LIST_ACCUMULATION */
2423 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
2424 bufp
+= ETHER_ADDR_LEN
;
2425 #ifdef MCAST_LIST_ACCUMULATION
2426 DHD_TRACE(("_dhd_set_multicast_list: cnt "
2428 cnt_iface
[i
], MAC2STRDBG(ha
->addr
)));
2432 #endif /* MCAST_LIST_ACCUMULATION */
2435 #ifdef MCAST_LIST_ACCUMULATION
2436 for (mclist
= dev
->mc_list
; (mclist
&& (cnt_iface
[i
] > 0));
2437 cnt_iface
[i
]--, mclist
= mclist
->next
)
2439 for (mclist
= dev
->mc_list
; (mclist
&& (cnt
> 0));
2440 cnt
--, mclist
= mclist
->next
)
2441 #endif /* MCAST_LIST_ACCUMULATION */
2443 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
2444 bufp
+= ETHER_ADDR_LEN
;
2446 #endif /* LINUX_VERSION_CODE */
2448 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2449 netif_addr_unlock_bh(dev
);
2451 #ifdef MCAST_LIST_ACCUMULATION
2454 #endif /* MCAST_LIST_ACCUMULATION */
2456 memset(&ioc
, 0, sizeof(ioc
));
2457 ioc
.cmd
= WLC_SET_VAR
;
2462 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2464 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2465 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
2466 allmulti
= cnt
? TRUE
: allmulti
;
2469 MFREE(dhd
->pub
.osh
, buf
, buflen
);
2471 /* Now send the allmulti setting. This is based on the setting in the
2472 * net_device flags, but might be modified above to be turned on if we
2473 * were trying to set some addresses and dongle rejected it...
2476 buflen
= sizeof("allmulti") + sizeof(allmulti
);
2477 if (!(buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
2478 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd
->pub
, ifidx
)));
2481 allmulti
= htol32(allmulti
);
2483 if (!bcm_mkiovar("allmulti", (void*)&allmulti
, sizeof(allmulti
), buf
, buflen
)) {
2484 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2485 dhd_ifname(&dhd
->pub
, ifidx
), (int)sizeof(allmulti
), buflen
));
2486 MFREE(dhd
->pub
.osh
, buf
, buflen
);
2491 memset(&ioc
, 0, sizeof(ioc
));
2492 ioc
.cmd
= WLC_SET_VAR
;
2497 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2499 DHD_ERROR(("%s: set allmulti %d failed\n",
2500 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
2503 MFREE(dhd
->pub
.osh
, buf
, buflen
);
2505 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2507 #ifdef MCAST_LIST_ACCUMULATION
2509 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2510 if (dhd
->iflist
[i
]) {
2511 dev
= dhd
->iflist
[i
]->net
;
2512 allmulti
|= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
2516 allmulti
= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
2517 #endif /* MCAST_LIST_ACCUMULATION */
2519 allmulti
= htol32(allmulti
);
2521 memset(&ioc
, 0, sizeof(ioc
));
2522 ioc
.cmd
= WLC_SET_PROMISC
;
2523 ioc
.buf
= &allmulti
;
2524 ioc
.len
= sizeof(allmulti
);
2527 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2529 DHD_ERROR(("%s: set promisc %d failed\n",
2530 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
2535 _dhd_set_mac_address(dhd_info_t
*dhd
, int ifidx
, uint8
*addr
)
2541 if (!bcm_mkiovar("cur_etheraddr", (char*)addr
, ETHER_ADDR_LEN
, buf
, 32)) {
2542 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd
->pub
, ifidx
)));
2545 memset(&ioc
, 0, sizeof(ioc
));
2546 ioc
.cmd
= WLC_SET_VAR
;
2551 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2553 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd
->pub
, ifidx
)));
2555 memcpy(dhd
->iflist
[ifidx
]->net
->dev_addr
, addr
, ETHER_ADDR_LEN
);
2557 memcpy(dhd
->pub
.mac
.octet
, addr
, ETHER_ADDR_LEN
);
2564 extern struct net_device
*ap_net_dev
;
2565 extern tsk_ctl_t ap_eth_ctl
; /* ap netdev heper thread ctl */
2569 dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
)
2571 dhd_info_t
*dhd
= handle
;
2572 dhd_if_event_t
*if_event
= event_info
;
2573 struct net_device
*ndev
;
2576 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2577 struct wireless_dev
*vwdev
, *primary_wdev
;
2578 struct net_device
*primary_ndev
;
2579 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2581 if (event
!= DHD_WQ_WORK_IF_ADD
) {
2582 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2587 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2592 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
2596 dhd_net_if_lock_local(dhd
);
2597 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2598 DHD_PERIM_LOCK(&dhd
->pub
);
2600 ifidx
= if_event
->event
.ifidx
;
2601 bssidx
= if_event
->event
.bssidx
;
2602 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__
, ifidx
));
2604 ndev
= dhd_allocate_if(&dhd
->pub
, ifidx
, if_event
->name
,
2605 if_event
->mac
, bssidx
, TRUE
);
2607 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__
));
2611 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2612 vwdev
= kzalloc(sizeof(*vwdev
), GFP_KERNEL
);
2613 if (unlikely(!vwdev
)) {
2614 DHD_ERROR(("%s :Could not allocate wireless device\n", __FUNCTION__
));
2617 primary_ndev
= dhd
->pub
.info
->iflist
[0]->net
;
2618 primary_wdev
= ndev_to_wdev(primary_ndev
);
2619 vwdev
->wiphy
= primary_wdev
->wiphy
;
2620 vwdev
->iftype
= if_event
->event
.role
;
2621 vwdev
->netdev
= ndev
;
2622 ndev
->ieee80211_ptr
= vwdev
;
2623 SET_NETDEV_DEV(ndev
, wiphy_dev(vwdev
->wiphy
));
2624 DHD_ERROR(("virtual interface(%s) is created\n", if_event
->name
));
2625 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2627 DHD_PERIM_UNLOCK(&dhd
->pub
);
2628 ret
= dhd_register_if(&dhd
->pub
, ifidx
, TRUE
);
2629 DHD_PERIM_LOCK(&dhd
->pub
);
2630 if (ret
!= BCME_OK
) {
2631 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__
));
2632 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
2635 #ifdef PCIE_FULL_DONGLE
2636 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2637 if (FW_SUPPORTED((&dhd
->pub
), ap
) && !(DHD_IF_ROLE_STA(if_event
->event
.role
))) {
2638 char iovbuf
[WLC_IOCTL_SMLEN
];
2641 memset(iovbuf
, 0, sizeof(iovbuf
));
2642 bcm_mkiovar("ap_isolate", (char *)&var_int
, 4, iovbuf
, sizeof(iovbuf
));
2643 ret
= dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, ifidx
);
2645 if (ret
!= BCME_OK
) {
2646 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__
));
2647 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
2650 #endif /* PCIE_FULL_DONGLE */
2652 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
2654 DHD_PERIM_UNLOCK(&dhd
->pub
);
2655 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2656 dhd_net_if_unlock_local(dhd
);
2660 dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
)
2662 dhd_info_t
*dhd
= handle
;
2664 dhd_if_event_t
*if_event
= event_info
;
2667 if (event
!= DHD_WQ_WORK_IF_DEL
) {
2668 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2673 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2678 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
2682 dhd_net_if_lock_local(dhd
);
2683 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2684 DHD_PERIM_LOCK(&dhd
->pub
);
2686 ifidx
= if_event
->event
.ifidx
;
2687 DHD_TRACE(("Removing interface with idx %d\n", ifidx
));
2689 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
2691 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
2693 DHD_PERIM_UNLOCK(&dhd
->pub
);
2694 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2695 dhd_net_if_unlock_local(dhd
);
2699 dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
)
2701 dhd_info_t
*dhd
= handle
;
2702 dhd_if_t
*ifp
= event_info
;
2704 if (event
!= DHD_WQ_WORK_SET_MAC
) {
2705 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2709 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2713 dhd_net_if_lock_local(dhd
);
2714 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2715 DHD_PERIM_LOCK(&dhd
->pub
);
2719 unsigned long flags
;
2721 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
2722 in_ap
= (ap_net_dev
!= NULL
);
2723 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
2726 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2733 if (ifp
== NULL
|| !dhd
->pub
.up
) {
2734 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
2738 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__
));
2739 ifp
->set_macaddress
= FALSE
;
2740 if (_dhd_set_mac_address(dhd
, ifp
->idx
, ifp
->mac_addr
) == 0)
2741 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__
));
2743 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__
));
2746 DHD_PERIM_UNLOCK(&dhd
->pub
);
2747 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2748 dhd_net_if_unlock_local(dhd
);
2752 dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
)
2754 dhd_info_t
*dhd
= handle
;
2755 dhd_if_t
*ifp
= event_info
;
2758 if (event
!= DHD_WQ_WORK_SET_MCAST_LIST
) {
2759 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2764 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2768 dhd_net_if_lock_local(dhd
);
2769 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2770 DHD_PERIM_LOCK(&dhd
->pub
);
2775 unsigned long flags
;
2776 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
2777 in_ap
= (ap_net_dev
!= NULL
);
2778 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
2781 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2783 ifp
->set_multicast
= FALSE
;
2789 if (ifp
== NULL
|| !dhd
->pub
.up
) {
2790 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
2796 #ifdef MCAST_LIST_ACCUMULATION
2798 #endif /* MCAST_LIST_ACCUMULATION */
2800 _dhd_set_multicast_list(dhd
, ifidx
);
2801 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__
, ifidx
));
2804 DHD_PERIM_UNLOCK(&dhd
->pub
);
2805 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2806 dhd_net_if_unlock_local(dhd
);
2810 dhd_set_mac_address(struct net_device
*dev
, void *addr
)
2814 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
2815 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
2819 ifidx
= dhd_net2idx(dhd
, dev
);
2820 if (ifidx
== DHD_BAD_IF
)
2823 dhdif
= dhd
->iflist
[ifidx
];
2825 dhd_net_if_lock_local(dhd
);
2826 memcpy(dhdif
->mac_addr
, sa
->sa_data
, ETHER_ADDR_LEN
);
2827 dhdif
->set_macaddress
= TRUE
;
2828 dhd_net_if_unlock_local(dhd
);
2829 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhdif
, DHD_WQ_WORK_SET_MAC
,
2830 dhd_set_mac_addr_handler
, DHD_WORK_PRIORITY_LOW
);
2835 dhd_set_multicast_list(struct net_device
*dev
)
2837 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
2840 ifidx
= dhd_net2idx(dhd
, dev
);
2841 if (ifidx
== DHD_BAD_IF
)
2844 dhd
->iflist
[ifidx
]->set_multicast
= TRUE
;
2845 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhd
->iflist
[ifidx
],
2846 DHD_WQ_WORK_SET_MCAST_LIST
, dhd_set_mcast_list_handler
, DHD_WORK_PRIORITY_LOW
);
2849 #ifdef PROP_TXSTATUS
2851 dhd_os_wlfc_block(dhd_pub_t
*pub
)
2853 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
2856 spin_lock_irqsave(&di
->wlfc_spinlock
, di
->wlfc_lock_flags
);
2858 spin_lock_bh(&di
->wlfc_spinlock
);
2864 dhd_os_wlfc_unblock(dhd_pub_t
*pub
)
2866 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
2870 spin_unlock_irqrestore(&di
->wlfc_spinlock
, di
->wlfc_lock_flags
);
2872 spin_unlock_bh(&di
->wlfc_spinlock
);
2877 #endif /* PROP_TXSTATUS */
2879 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
2885 static const PKTTYPE_INFO packet_type_info
[] =
2887 { ETHER_TYPE_IP
, "IP" },
2888 { ETHER_TYPE_ARP
, "ARP" },
2889 { ETHER_TYPE_BRCM
, "BRCM" },
2890 { ETHER_TYPE_802_1X
, "802.1X" },
2891 { ETHER_TYPE_WAI
, "WAPI" },
2895 static const char *_get_packet_type_str(uint16 type
)
2898 int n
= sizeof(packet_type_info
)/sizeof(packet_type_info
[1]) - 1;
2900 for (i
= 0; i
< n
; i
++) {
2901 if (packet_type_info
[i
].type
== type
)
2902 return packet_type_info
[i
].str
;
2905 return packet_type_info
[n
].str
;
2907 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
2909 #if defined(DHD_TX_DUMP)
2911 dhd_tx_dump(osl_t
*osh
, void *pkt
)
2915 struct ether_header
*eh
;
2917 dump_data
= PKTDATA(osh
, pkt
);
2918 eh
= (struct ether_header
*) dump_data
;
2919 protocol
= ntoh16(eh
->ether_type
);
2921 DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol
)));
2923 if (protocol
== ETHER_TYPE_802_1X
) {
2924 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2925 dump_data
[14], dump_data
[15], dump_data
[30]));
2928 #if defined(DHD_TX_FULL_DUMP)
2932 datalen
= PKTLEN(osh
, pkt
);
2934 for (i
= 0; i
< datalen
; i
++) {
2935 DHD_ERROR(("%02X ", dump_data
[i
]));
2941 #endif /* DHD_TX_FULL_DUMP */
2943 #endif /* DHD_TX_DUMP */
2946 dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
2949 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
2950 struct ether_header
*eh
= NULL
;
2952 /* Reject if down */
2953 if (!dhdp
->up
|| (dhdp
->busstate
== DHD_BUS_DOWN
)) {
2954 /* free the packet here since the caller won't */
2955 PKTFREE(dhdp
->osh
, pktbuf
, TRUE
);
2959 #ifdef PCIE_FULL_DONGLE
2960 if (dhdp
->busstate
== DHD_BUS_SUSPEND
) {
2961 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
2962 PKTFREE(dhdp
->osh
, pktbuf
, TRUE
);
2965 #endif /* PCIE_FULL_DONGLE */
2967 #ifdef DHD_UNICAST_DHCP
2968 /* if dhcp_unicast is enabled, we need to convert the */
2969 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2970 if (dhdp
->dhcp_unicast
) {
2971 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp
, pktbuf
, ifidx
);
2973 #endif /* DHD_UNICAST_DHCP */
2974 /* Update multicast statistic */
2975 if (PKTLEN(dhdp
->osh
, pktbuf
) >= ETHER_HDR_LEN
) {
2976 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
2977 eh
= (struct ether_header
*)pktdata
;
2979 if (ETHER_ISMULTI(eh
->ether_dhost
))
2980 dhdp
->tx_multicast
++;
2981 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_802_1X
)
2982 atomic_inc(&dhd
->pend_8021x_cnt
);
2983 #ifdef DHD_DHCP_DUMP
2984 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) {
2988 uint16 udp_port_pos
;
2989 uint8
*ptr8
= (uint8
*)&pktdata
[ETHER_HDR_LEN
];
2990 uint8 ip_header_len
= (*ptr8
& 0x0f)<<2;
2992 udp_port_pos
= ETHER_HDR_LEN
+ ip_header_len
;
2993 source_port
= (pktdata
[udp_port_pos
] << 8) | pktdata
[udp_port_pos
+1];
2994 dest_port
= (pktdata
[udp_port_pos
+2] << 8) | pktdata
[udp_port_pos
+3];
2995 if (source_port
== 0x0044 || dest_port
== 0x0044) {
2996 dump_hex
= (pktdata
[udp_port_pos
+249] << 8) |
2997 pktdata
[udp_port_pos
+250];
2998 if (dump_hex
== 0x0101) {
2999 DHD_ERROR(("DHCP - DISCOVER [TX]\n"));
3000 } else if (dump_hex
== 0x0102) {
3001 DHD_ERROR(("DHCP - OFFER [TX]\n"));
3002 } else if (dump_hex
== 0x0103) {
3003 DHD_ERROR(("DHCP - REQUEST [TX]\n"));
3004 } else if (dump_hex
== 0x0105) {
3005 DHD_ERROR(("DHCP - ACK [TX]\n"));
3007 DHD_ERROR(("DHCP - 0x%X [TX]\n", dump_hex
));
3009 } else if (source_port
== 0x0043 || dest_port
== 0x0043) {
3010 DHD_ERROR(("DHCP - BOOTP [RX]\n"));
3013 #endif /* DHD_DHCP_DUMP */
3015 PKTFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
3019 /* Look into the packet and update the packet priority */
3020 #ifndef PKTPRIO_OVERRIDE
3021 if (PKTPRIO(pktbuf
) == 0)
3024 pktsetprio_qms(pktbuf
, wl_get_up_table(), FALSE
);
3026 pktsetprio(pktbuf
, FALSE
);
3027 #endif /* QOS_MAP_SET */
3030 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
3032 * Lkup the per interface hash table, for a matching flowring. If one is not
3033 * available, allocate a unique flowid and add a flowring entry.
3034 * The found or newly created flowid is placed into the pktbuf's tag.
3036 ret
= dhd_flowid_update(dhdp
, ifidx
, dhdp
->flow_prio_map
[(PKTPRIO(pktbuf
))], pktbuf
);
3037 if (ret
!= BCME_OK
) {
3038 PKTCFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
3042 #if defined(DHD_TX_DUMP)
3043 dhd_tx_dump(dhdp
->osh
, pktbuf
);
3046 /* terence 20150901: Micky add to ajust the 802.1X priority */
3047 /* Set the 802.1X packet with the highest priority 7 */
3048 if (dhdp
->conf
->pktprio8021x
>= 0)
3049 pktset8021xprio(pktbuf
, dhdp
->conf
->pktprio8021x
);
3051 #ifdef PROP_TXSTATUS
3052 if (dhd_wlfc_is_supported(dhdp
)) {
3053 /* store the interface ID */
3054 DHD_PKTTAG_SETIF(PKTTAG(pktbuf
), ifidx
);
3056 /* store destination MAC in the tag as well */
3057 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf
), eh
->ether_dhost
);
3059 /* decide which FIFO this packet belongs to */
3060 if (ETHER_ISMULTI(eh
->ether_dhost
))
3061 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3062 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), AC_COUNT
);
3064 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), WME_PRIO2AC(PKTPRIO(pktbuf
)));
3066 #endif /* PROP_TXSTATUS */
3067 /* If the protocol uses a data header, apply it */
3068 dhd_prot_hdrpush(dhdp
, ifidx
, pktbuf
);
3070 /* Use bus module to send data frame */
3072 dhd_htsf_addtxts(dhdp
, pktbuf
);
3076 #ifdef PROP_TXSTATUS
3077 if (dhd_wlfc_commit_packets(dhdp
, (f_commitpkt_t
)dhd_dbus_txdata
,
3078 dhdp
, pktbuf
, TRUE
) == WLFC_UNSUPPORTED
) {
3079 /* non-proptxstatus way */
3080 ret
= dhd_dbus_txdata(dhdp
, pktbuf
);
3083 ret
= dhd_dbus_txdata(dhdp
, pktbuf
);
3084 #endif /* PROP_TXSTATUS */
3086 PKTFREE(dhdp
->osh
, pktbuf
, TRUE
);
3088 #ifdef PROP_TXSTATUS
3090 if (dhd_wlfc_commit_packets(dhdp
, (f_commitpkt_t
)dhd_bus_txdata
,
3091 dhdp
->bus
, pktbuf
, TRUE
) == WLFC_UNSUPPORTED
) {
3092 /* non-proptxstatus way */
3094 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
3096 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
3097 #endif /* BCMPCIE */
3102 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
3104 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
3105 #endif /* BCMPCIE */
3106 #endif /* PROP_TXSTATUS */
3108 #endif /* BCMDBUS */
3114 dhd_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
3119 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
3120 dhd_if_t
*ifp
= NULL
;
3123 uint8 htsfdlystat_sz
= dhd
->pub
.htsfdlystat_sz
;
3125 uint8 htsfdlystat_sz
= 0;
3128 struct ether_header
*eh
;
3130 #endif /* DHD_WMF */
3132 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3134 DHD_OS_WAKE_LOCK(&dhd
->pub
);
3135 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3137 /* Reject if down */
3138 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
|| dhd
->pub
.hang_was_sent
) {
3139 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3140 __FUNCTION__
, dhd
->pub
.up
, dhd
->pub
.busstate
));
3141 netif_stop_queue(net
);
3142 #if defined(OEM_ANDROID)
3143 /* Send Event when bus down detected during data session */
3145 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__
));
3146 net_os_send_hang_message(net
);
3148 #endif /* OEM_ANDROID */
3149 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3150 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3151 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3154 return NETDEV_TX_BUSY
;
3158 ifp
= DHD_DEV_IFP(net
);
3159 ifidx
= DHD_DEV_IFIDX(net
);
3161 if (ifidx
== DHD_BAD_IF
) {
3162 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__
, ifidx
));
3163 netif_stop_queue(net
);
3164 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3165 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3166 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3169 return NETDEV_TX_BUSY
;
3173 ASSERT(ifidx
== dhd_net2idx(dhd
, net
));
3174 ASSERT((ifp
!= NULL
) && ((ifidx
< DHD_MAX_IFS
) && (ifp
== dhd
->iflist
[ifidx
])));
3176 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
3178 /* re-align socket buffer if "skb->data" is odd address */
3179 if (((unsigned long)(skb
->data
)) & 0x1) {
3180 unsigned char *data
= skb
->data
;
3181 uint32 length
= skb
->len
;
3182 PKTPUSH(dhd
->pub
.osh
, skb
, 1);
3183 memmove(skb
->data
, data
, length
);
3184 PKTSETLEN(dhd
->pub
.osh
, skb
, length
);
3187 datalen
= PKTLEN(dhd
->pub
.osh
, skb
);
3189 /* Make sure there's enough room for any header */
3191 if (skb_headroom(skb
) < dhd
->pub
.hdrlen
+ htsfdlystat_sz
) {
3192 struct sk_buff
*skb2
;
3194 DHD_INFO(("%s: insufficient headroom\n",
3195 dhd_ifname(&dhd
->pub
, ifidx
)));
3196 dhd
->pub
.tx_realloc
++;
3198 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
3199 skb2
= skb_realloc_headroom(skb
, dhd
->pub
.hdrlen
+ htsfdlystat_sz
);
3202 if ((skb
= skb2
) == NULL
) {
3203 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
3204 dhd_ifname(&dhd
->pub
, ifidx
)));
3208 bcm_object_trace_opr(skb
, BCM_OBJDBG_ADD_PKT
, __FUNCTION__
, __LINE__
);
3211 /* Convert to packet */
3212 if (!(pktbuf
= PKTFRMNATIVE(dhd
->pub
.osh
, skb
))) {
3213 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
3214 dhd_ifname(&dhd
->pub
, ifidx
)));
3215 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
, __FUNCTION__
, __LINE__
);
3216 dev_kfree_skb_any(skb
);
3221 if (htsfdlystat_sz
&& PKTLEN(dhd
->pub
.osh
, pktbuf
) >= ETHER_ADDR_LEN
) {
3222 uint8
*pktdata
= (uint8
*)PKTDATA(dhd
->pub
.osh
, pktbuf
);
3223 struct ether_header
*eh
= (struct ether_header
*)pktdata
;
3225 if (!ETHER_ISMULTI(eh
->ether_dhost
) &&
3226 (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
)) {
3227 eh
->ether_type
= hton16(ETHER_TYPE_BRCM_PKTDLYSTATS
);
3232 eh
= (struct ether_header
*)PKTDATA(dhd
->pub
.osh
, pktbuf
);
3233 iph
= (uint8
*)eh
+ ETHER_HDR_LEN
;
3235 /* WMF processing for multicast packets
3236 * Only IPv4 packets are handled
3238 if (ifp
->wmf
.wmf_enable
&& (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) &&
3239 (IP_VER(iph
) == IP_VER_4
) && (ETHER_ISMULTI(eh
->ether_dhost
) ||
3240 ((IPV4_PROT(iph
) == IP_PROT_IGMP
) && dhd
->pub
.wmf_ucast_igmp
))) {
3241 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
3243 bool ucast_convert
= FALSE
;
3244 #ifdef DHD_UCAST_UPNP
3247 dest_ip
= ntoh32(*((uint32
*)(iph
+ IPV4_DEST_IP_OFFSET
)));
3248 ucast_convert
= dhd
->pub
.wmf_ucast_upnp
&& MCAST_ADDR_UPNP_SSDP(dest_ip
);
3249 #endif /* DHD_UCAST_UPNP */
3250 #ifdef DHD_IGMP_UCQUERY
3251 ucast_convert
|= dhd
->pub
.wmf_ucast_igmp_query
&&
3252 (IPV4_PROT(iph
) == IP_PROT_IGMP
) &&
3253 (*(iph
+ IPV4_HLEN(iph
)) == IGMPV2_HOST_MEMBERSHIP_QUERY
);
3254 #endif /* DHD_IGMP_UCQUERY */
3255 if (ucast_convert
) {
3257 unsigned long flags
;
3259 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
3261 /* Convert upnp/igmp query to unicast for each assoc STA */
3262 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
3263 if ((sdu_clone
= PKTDUP(dhd
->pub
.osh
, pktbuf
)) == NULL
) {
3264 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
3265 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3266 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3269 dhd_wmf_forward(ifp
->wmf
.wmfh
, sdu_clone
, 0, sta
, 1);
3272 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
3273 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3274 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3276 PKTFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
3277 return NETDEV_TX_OK
;
3279 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
3281 /* There will be no STA info if the packet is coming from LAN host
3284 ret
= dhd_wmf_packets_handle(&dhd
->pub
, pktbuf
, NULL
, ifidx
, 0);
3288 /* Either taken by WMF or we should drop it.
3291 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3292 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3293 return NETDEV_TX_OK
;
3295 /* Continue the transmit path */
3300 #endif /* DHD_WMF */
3302 #ifdef DHDTCPACK_SUPPRESS
3303 if (dhd
->pub
.tcpack_sup_mode
== TCPACK_SUP_HOLD
) {
3304 /* If this packet has been hold or got freed, just return */
3305 if (dhd_tcpack_hold(&dhd
->pub
, pktbuf
, ifidx
)) {
3310 /* If this packet has replaced another packet and got freed, just return */
3311 if (dhd_tcpack_suppress(&dhd
->pub
, pktbuf
)) {
3316 #endif /* DHDTCPACK_SUPPRESS */
3318 ret
= dhd_sendpkt(&dhd
->pub
, ifidx
, pktbuf
);
3322 ifp
->stats
.tx_dropped
++;
3323 dhd
->pub
.tx_dropped
++;
3327 #ifdef PROP_TXSTATUS
3328 /* tx_packets counter can counted only when wlfc is disabled */
3329 if (!dhd_wlfc_is_supported(&dhd
->pub
))
3332 dhd
->pub
.tx_packets
++;
3333 ifp
->stats
.tx_packets
++;
3334 ifp
->stats
.tx_bytes
+= datalen
;
3338 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
3339 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3341 /* Return ok: we always eat the packet */
3342 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
3345 return NETDEV_TX_OK
;
3351 dhd_txflowcontrol(dhd_pub_t
*dhdp
, int ifidx
, bool state
)
3353 struct net_device
*net
;
3354 dhd_info_t
*dhd
= dhdp
->info
;
3357 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3361 if (ifidx
== ALL_INTERFACES
) {
3362 /* Flow control on all active interfaces */
3363 dhdp
->txoff
= state
;
3364 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
3365 if (dhd
->iflist
[i
]) {
3366 net
= dhd
->iflist
[i
]->net
;
3368 netif_stop_queue(net
);
3370 netif_wake_queue(net
);
3375 if (dhd
->iflist
[ifidx
]) {
3376 net
= dhd
->iflist
[ifidx
]->net
;
3378 netif_stop_queue(net
);
3380 netif_wake_queue(net
);
3388 dhd_is_rxthread_enabled(dhd_pub_t
*dhdp
)
3390 dhd_info_t
*dhd
= dhdp
->info
;
3392 return dhd
->rxthread_enabled
;
3394 #endif /* DHD_WMF */
3397 dhd_rx_frame(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
, int numpkt
, uint8 chan
)
3399 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
3400 struct sk_buff
*skb
;
3403 void *data
, *pnext
= NULL
;
3406 wl_event_msg_t event
;
3407 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3410 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
3411 void *skbhead
= NULL
;
3412 void *skbprev
= NULL
;
3413 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3416 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3418 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3420 for (i
= 0; pktbuf
&& i
< numpkt
; i
++, pktbuf
= pnext
) {
3421 struct ether_header
*eh
;
3423 struct dot11_llc_snap_header
*lsh
;
3426 pnext
= PKTNEXT(dhdp
->osh
, pktbuf
);
3427 PKTSETNEXT(dhdp
->osh
, pktbuf
, NULL
);
3429 ifp
= dhd
->iflist
[ifidx
];
3431 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
3433 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
3437 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
3439 /* Dropping only data packets before registering net device to avoid kernel panic */
3440 #ifndef PROP_TXSTATUS_VSDB
3441 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
) &&
3442 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
3444 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
|| !dhd
->pub
.up
) &&
3445 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
3446 #endif /* PROP_TXSTATUS_VSDB */
3448 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
3450 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
3455 lsh
= (struct dot11_llc_snap_header
*)&eh
[1];
3457 if ((ntoh16(eh
->ether_type
) < ETHER_TYPE_MIN
) &&
3458 (PKTLEN(dhdp
->osh
, pktbuf
) >= RFC1042_HDR_LEN
) &&
3459 bcmp(lsh
, BT_SIG_SNAP_MPROT
, DOT11_LLC_SNAP_HDR_LEN
- 2) == 0 &&
3460 lsh
->type
== HTON16(BTA_PROT_L2CAP
)) {
3461 amp_hci_ACL_data_t
*ACL_data
= (amp_hci_ACL_data_t
*)
3462 ((uint8
*)eh
+ RFC1042_HDR_LEN
);
3465 #endif /* WLBTAMP */
3467 #ifdef PROP_TXSTATUS
3468 if (dhd_wlfc_is_header_only_pkt(dhdp
, pktbuf
)) {
3469 /* WLFC may send header only packet when
3470 there is an urgent message but no packet to
3473 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
3477 #ifdef DHD_L2_FILTER
3478 /* If block_ping is enabled drop the ping packet */
3479 if (dhdp
->block_ping
) {
3480 if (dhd_l2_filter_block_ping(dhdp
, pktbuf
, ifidx
) == BCME_OK
) {
3481 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
3487 /* WMF processing for multicast packets */
3488 if (ifp
->wmf
.wmf_enable
&& (ETHER_ISMULTI(eh
->ether_dhost
))) {
3492 sta
= dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_shost
);
3493 ret
= dhd_wmf_packets_handle(dhdp
, pktbuf
, sta
, ifidx
, 1);
3496 /* The packet is taken by WMF. Continue to next iteration */
3499 /* Packet DROP decision by WMF. Toss it */
3500 DHD_ERROR(("%s: WMF decides to drop packet\n",
3502 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
3505 /* Continue the transmit path */
3509 #endif /* DHD_WMF */
3510 #ifdef DHDTCPACK_SUPPRESS
3511 dhd_tcpdata_info_get(dhdp
, pktbuf
);
3513 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
3515 ifp
= dhd
->iflist
[ifidx
];
3517 ifp
= dhd
->iflist
[0];
3520 skb
->dev
= ifp
->net
;
3522 #ifdef PCIE_FULL_DONGLE
3523 if ((DHD_IF_ROLE_AP(dhdp
, ifidx
) || DHD_IF_ROLE_P2PGO(dhdp
, ifidx
)) &&
3524 (!ifp
->ap_isolate
)) {
3525 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
3526 if (ETHER_ISUCAST(eh
->ether_dhost
)) {
3527 if (dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_dhost
)) {
3528 dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
3532 void *npktbuf
= PKTDUP(dhdp
->osh
, pktbuf
);
3533 dhd_sendpkt(dhdp
, ifidx
, npktbuf
);
3536 #endif /* PCIE_FULL_DONGLE */
3538 /* Get the protocol, maintain skb around eth_type_trans()
3539 * The main reason for this hack is for the limitation of
3540 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
3541 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
3542 * coping of the packet coming from the network stack to add
3543 * BDC, Hardware header etc, during network interface registration
3544 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
3545 * for BDC, Hardware header etc. and not just the ETH_HLEN
3550 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3551 dump_data
= skb
->data
;
3552 protocol
= (dump_data
[12] << 8) | dump_data
[13];
3553 if (protocol
== ETHER_TYPE_802_1X
) {
3554 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3555 "ver %d, type %d, replay %d\n",
3556 dump_data
[14], dump_data
[15],
3559 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3560 #ifdef DHD_DHCP_DUMP
3561 if (protocol
!= ETHER_TYPE_BRCM
&& protocol
== ETHER_TYPE_IP
) {
3565 uint16 udp_port_pos
;
3566 uint8
*ptr8
= (uint8
*)&dump_data
[ETHER_HDR_LEN
];
3567 uint8 ip_header_len
= (*ptr8
& 0x0f)<<2;
3569 udp_port_pos
= ETHER_HDR_LEN
+ ip_header_len
;
3570 source_port
= (dump_data
[udp_port_pos
] << 8) | dump_data
[udp_port_pos
+1];
3571 dest_port
= (dump_data
[udp_port_pos
+2] << 8) | dump_data
[udp_port_pos
+3];
3572 if (source_port
== 0x0044 || dest_port
== 0x0044) {
3573 dump_hex
= (dump_data
[udp_port_pos
+249] << 8) |
3574 dump_data
[udp_port_pos
+250];
3575 if (dump_hex
== 0x0101) {
3576 DHD_ERROR(("DHCP - DISCOVER [RX]\n"));
3577 } else if (dump_hex
== 0x0102) {
3578 DHD_ERROR(("DHCP - OFFER [RX]\n"));
3579 } else if (dump_hex
== 0x0103) {
3580 DHD_ERROR(("DHCP - REQUEST [RX]\n"));
3581 } else if (dump_hex
== 0x0105) {
3582 DHD_ERROR(("DHCP - ACK [RX]\n"));
3584 DHD_ERROR(("DHCP - 0x%X [RX]\n", dump_hex
));
3586 } else if (source_port
== 0x0043 || dest_port
== 0x0043) {
3587 DHD_ERROR(("DHCP - BOOTP [RX]\n"));
3590 #endif /* DHD_DHCP_DUMP */
3591 #if defined(DHD_RX_DUMP)
3592 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol
)));
3593 if (protocol
!= ETHER_TYPE_BRCM
) {
3594 if (dump_data
[0] == 0xFF) {
3595 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__
));
3597 if ((dump_data
[12] == 8) &&
3598 (dump_data
[13] == 6)) {
3599 DHD_ERROR(("%s: ARP %d\n",
3600 __FUNCTION__
, dump_data
[0x15]));
3602 } else if (dump_data
[0] & 1) {
3603 DHD_ERROR(("%s: MULTICAST: " MACDBG
"\n",
3604 __FUNCTION__
, MAC2STRDBG(dump_data
)));
3606 #ifdef DHD_RX_FULL_DUMP
3609 for (k
= 0; k
< skb
->len
; k
++) {
3610 DHD_ERROR(("%02X ", dump_data
[k
]));
3616 #endif /* DHD_RX_FULL_DUMP */
3618 #endif /* DHD_RX_DUMP */
3620 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3622 if (skb
->pkt_type
== PACKET_MULTICAST
) {
3623 dhd
->pub
.rx_multicast
++;
3624 ifp
->stats
.multicast
++;
3631 dhd_htsf_addrxts(dhdp
, pktbuf
);
3633 /* Strip header, count, deliver upward */
3634 skb_pull(skb
, ETH_HLEN
);
3636 /* Process special event packets and then discard them */
3637 memset(&event
, 0, sizeof(event
));
3638 if (ntoh16(skb
->protocol
) == ETHER_TYPE_BRCM
) {
3639 dhd_wl_host_event(dhd
, &ifidx
,
3640 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3641 skb_mac_header(skb
),
3644 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3648 wl_event_to_host_order(&event
);
3649 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3651 tout_ctrl
= DHD_PACKET_TIMEOUT_MS
;
3652 #endif /* (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)) */
3654 if (event
.event_type
== WLC_E_BTA_HCI_EVENT
) {
3655 dhd_bta_doevt(dhdp
, data
, event
.datalen
);
3657 #endif /* WLBTAMP */
3659 #if (defined(OEM_ANDROID) && defined(PNO_SUPPORT))
3660 if (event
.event_type
== WLC_E_PFN_NET_FOUND
) {
3661 /* enforce custom wake lock to garantee that Kernel not suspended */
3662 tout_ctrl
= CUSTOM_PNO_EVENT_LOCK_xTIME
* DHD_PACKET_TIMEOUT_MS
;
3664 #endif /* PNO_SUPPORT */
3666 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3667 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
3669 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3671 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3672 tout_rx
= DHD_PACKET_TIMEOUT_MS
;
3673 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
3675 #ifdef PROP_TXSTATUS
3676 dhd_wlfc_save_rxpath_ac_time(dhdp
, (uint8
)PKTPRIO(skb
));
3677 #endif /* PROP_TXSTATUS */
3680 ASSERT(ifidx
< DHD_MAX_IFS
&& dhd
->iflist
[ifidx
]);
3681 ifp
= dhd
->iflist
[ifidx
];
3684 ifp
->net
->last_rx
= jiffies
;
3686 if (ntoh16(skb
->protocol
) != ETHER_TYPE_BRCM
) {
3687 dhdp
->dstats
.rx_bytes
+= skb
->len
;
3688 dhdp
->rx_packets
++; /* Local count */
3689 ifp
->stats
.rx_bytes
+= skb
->len
;
3690 ifp
->stats
.rx_packets
++;
3692 #if defined(DHD_TCP_WINSIZE_ADJUST)
3693 if (dhd_use_tcp_window_size_adjust
) {
3694 if (ifidx
== 0 && ntoh16(skb
->protocol
) == ETHER_TYPE_IP
) {
3695 dhd_adjust_tcp_winsize(dhdp
->op_mode
, skb
);
3698 #endif /* DHD_TCP_WINSIZE_ADJUST */
3700 if (in_interrupt()) {
3701 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
3702 __FUNCTION__
, __LINE__
);
3705 if (dhd
->rxthread_enabled
) {
3709 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
3713 /* If the receive is not processed inside an ISR,
3714 * the softirqd must be woken explicitly to service
3715 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3716 * by netif_rx_ni(), but in earlier kernels, we need
3717 * to do it manually.
3719 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
3720 __FUNCTION__
, __LINE__
);
3721 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3726 local_irq_save(flags
);
3728 local_irq_restore(flags
);
3729 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3734 if (dhd
->rxthread_enabled
&& skbhead
)
3735 dhd_sched_rxf(dhdp
, skbhead
);
3737 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
3738 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp
, tout_rx
);
3739 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp
, tout_ctrl
);
3740 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
3744 dhd_event(struct dhd_info
*dhd
, char *evpkt
, int evlen
, int ifidx
)
3746 /* Linux version has nothing to do */
3751 dhd_txcomplete(dhd_pub_t
*dhdp
, void *txp
, bool success
)
3753 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
3754 struct ether_header
*eh
;
3760 dhd_prot_hdrpull(dhdp
, NULL
, txp
, NULL
, NULL
);
3762 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, txp
);
3763 type
= ntoh16(eh
->ether_type
);
3765 if (type
== ETHER_TYPE_802_1X
)
3766 atomic_dec(&dhd
->pend_8021x_cnt
);
3769 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
3770 * If yes generate packet completion event.
3772 len
= PKTLEN(dhdp
->osh
, txp
);
3774 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
3775 if ((type
< ETHER_TYPE_MIN
) && (len
>= RFC1042_HDR_LEN
)) {
3776 struct dot11_llc_snap_header
*lsh
= (struct dot11_llc_snap_header
*)&eh
[1];
3778 if (bcmp(lsh
, BT_SIG_SNAP_MPROT
, DOT11_LLC_SNAP_HDR_LEN
- 2) == 0 &&
3779 ntoh16(lsh
->type
) == BTA_PROT_L2CAP
) {
3781 dhd_bta_tx_hcidata_complete(dhdp
, txp
, success
);
3784 #endif /* WLBTAMP */
3785 #ifdef PROP_TXSTATUS
3786 if (dhdp
->wlfc_state
&& (dhdp
->proptxstatus_mode
!= WLFC_FCMODE_NONE
)) {
3787 dhd_if_t
*ifp
= dhd
->iflist
[DHD_PKTTAG_IF(PKTTAG(txp
))];
3788 uint datalen
= PKTLEN(dhd
->pub
.osh
, txp
);
3791 dhd
->pub
.tx_packets
++;
3792 ifp
->stats
.tx_packets
++;
3793 ifp
->stats
.tx_bytes
+= datalen
;
3795 ifp
->stats
.tx_dropped
++;
3801 static struct net_device_stats
*
3802 dhd_get_stats(struct net_device
*net
)
3804 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
3808 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3810 ifidx
= dhd_net2idx(dhd
, net
);
3811 if (ifidx
== DHD_BAD_IF
) {
3812 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__
));
3814 memset(&net
->stats
, 0, sizeof(net
->stats
));
3818 ifp
= dhd
->iflist
[ifidx
];
3822 /* Use the protocol to get dongle stats */
3823 dhd_prot_dstats(&dhd
->pub
);
3830 dhd_watchdog_thread(void *data
)
3832 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
3833 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
3834 /* This thread doesn't need any user-level access,
3835 * so get rid of all our resources
3837 if (dhd_watchdog_prio
> 0) {
3838 struct sched_param param
;
3839 param
.sched_priority
= (dhd_watchdog_prio
< MAX_RT_PRIO
)?
3840 dhd_watchdog_prio
:(MAX_RT_PRIO
-1);
3841 setScheduler(current
, SCHED_FIFO
, ¶m
);
3845 if (down_interruptible (&tsk
->sema
) == 0) {
3846 unsigned long flags
;
3847 unsigned long jiffies_at_start
= jiffies
;
3848 unsigned long time_lapse
;
3850 SMP_RD_BARRIER_DEPENDS();
3851 if (tsk
->terminated
) {
3855 if (dhd
->pub
.dongle_reset
== FALSE
) {
3856 DHD_TIMER(("%s:\n", __FUNCTION__
));
3858 /* Call the bus module watchdog */
3859 dhd_bus_watchdog(&dhd
->pub
);
3862 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
3863 /* Count the tick for reference */
3865 time_lapse
= jiffies
- jiffies_at_start
;
3867 /* Reschedule the watchdog */
3868 if (dhd
->wd_timer_valid
)
3869 mod_timer(&dhd
->timer
,
3871 msecs_to_jiffies(dhd_watchdog_ms
) -
3872 min(msecs_to_jiffies(dhd_watchdog_ms
), time_lapse
));
3873 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
3879 complete_and_exit(&tsk
->completed
, 0);
3882 static void dhd_watchdog(ulong data
)
3884 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
3885 unsigned long flags
;
3887 if (dhd
->pub
.dongle_reset
) {
3891 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
3892 up(&dhd
->thr_wdt_ctl
.sema
);
3896 /* Call the bus module watchdog */
3897 dhd_bus_watchdog(&dhd
->pub
);
3899 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
3900 /* Count the tick for reference */
3903 /* Reschedule the watchdog */
3904 if (dhd
->wd_timer_valid
)
3905 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
3906 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
3910 #ifdef ENABLE_ADAPTIVE_SCHED
3912 dhd_sched_policy(int prio
)
3914 struct sched_param param
;
3915 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH
) {
3916 param
.sched_priority
= 0;
3917 setScheduler(current
, SCHED_NORMAL
, ¶m
);
3919 if (get_scheduler_policy(current
) != SCHED_FIFO
) {
3920 param
.sched_priority
= (prio
< MAX_RT_PRIO
)? prio
: (MAX_RT_PRIO
-1);
3921 setScheduler(current
, SCHED_FIFO
, ¶m
);
3925 #endif /* ENABLE_ADAPTIVE_SCHED */
3926 #ifdef DEBUG_CPU_FREQ
3927 static int dhd_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
3929 dhd_info_t
*dhd
= container_of(nb
, struct dhd_info
, freq_trans
);
3930 struct cpufreq_freqs
*freq
= data
;
3934 if (val
== CPUFREQ_POSTCHANGE
) {
3935 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3936 freq
->new, freq
->cpu
));
3937 *per_cpu_ptr(dhd
->new_freq
, freq
->cpu
) = freq
->new;
3943 #endif /* DEBUG_CPU_FREQ */
3945 dhd_dpc_thread(void *data
)
3947 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
3948 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
3950 /* This thread doesn't need any user-level access,
3951 * so get rid of all our resources
3953 if (dhd_dpc_prio
> 0)
3955 struct sched_param param
;
3956 param
.sched_priority
= (dhd_dpc_prio
< MAX_RT_PRIO
)?dhd_dpc_prio
:(MAX_RT_PRIO
-1);
3957 setScheduler(current
, SCHED_FIFO
, ¶m
);
3960 #ifdef CUSTOM_DPC_CPUCORE
3961 set_cpus_allowed_ptr(current
, cpumask_of(CUSTOM_DPC_CPUCORE
));
3963 if (dhd
->pub
.conf
->dpc_cpucore
>= 0) {
3964 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__
, dhd
->pub
.conf
->dpc_cpucore
);
3965 set_cpus_allowed_ptr(current
, cpumask_of(dhd
->pub
.conf
->dpc_cpucore
));
3968 #ifdef CUSTOM_SET_CPUCORE
3969 dhd
->pub
.current_dpc
= current
;
3970 #endif /* CUSTOM_SET_CPUCORE */
3971 /* Run until signal received */
3973 if (!binary_sema_down(tsk
)) {
3974 #ifdef ENABLE_ADAPTIVE_SCHED
3975 dhd_sched_policy(dhd_dpc_prio
);
3976 #endif /* ENABLE_ADAPTIVE_SCHED */
3977 SMP_RD_BARRIER_DEPENDS();
3978 if (tsk
->terminated
) {
3982 /* Call bus dpc unless it indicated down (then clean stop) */
3983 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
3984 dhd_os_wd_timer_extend(&dhd
->pub
, TRUE
);
3985 while (dhd_bus_dpc(dhd
->pub
.bus
)) {
3986 /* process all data */
3988 dhd_os_wd_timer_extend(&dhd
->pub
, FALSE
);
3989 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3993 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
3994 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4000 complete_and_exit(&tsk
->completed
, 0);
4004 dhd_rxf_thread(void *data
)
4006 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
4007 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
4008 #if defined(WAIT_DEQUEUE)
4009 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
4010 ulong watchdogTime
= OSL_SYSUPTIME(); /* msec */
4012 dhd_pub_t
*pub
= &dhd
->pub
;
4014 /* This thread doesn't need any user-level access,
4015 * so get rid of all our resources
4017 if (dhd_rxf_prio
> 0)
4019 struct sched_param param
;
4020 param
.sched_priority
= (dhd_rxf_prio
< MAX_RT_PRIO
)?dhd_rxf_prio
:(MAX_RT_PRIO
-1);
4021 setScheduler(current
, SCHED_FIFO
, ¶m
);
4024 DAEMONIZE("dhd_rxf");
4025 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
4027 /* signal: thread has started */
4028 complete(&tsk
->completed
);
4029 #ifdef CUSTOM_SET_CPUCORE
4030 dhd
->pub
.current_rxf
= current
;
4031 #endif /* CUSTOM_SET_CPUCORE */
4032 /* Run until signal received */
4034 if (down_interruptible(&tsk
->sema
) == 0) {
4036 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
4039 #ifdef ENABLE_ADAPTIVE_SCHED
4040 dhd_sched_policy(dhd_rxf_prio
);
4041 #endif /* ENABLE_ADAPTIVE_SCHED */
4043 SMP_RD_BARRIER_DEPENDS();
4045 if (tsk
->terminated
) {
4048 skb
= dhd_rxf_dequeue(pub
);
4054 void *skbnext
= PKTNEXT(pub
->osh
, skb
);
4055 PKTSETNEXT(pub
->osh
, skb
, NULL
);
4056 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
4057 __FUNCTION__
, __LINE__
);
4058 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
4062 local_irq_save(flags
);
4064 local_irq_restore(flags
);
4069 #if defined(WAIT_DEQUEUE)
4070 if (OSL_SYSUPTIME() - watchdogTime
> RXF_WATCHDOG_TIME
) {
4072 watchdogTime
= OSL_SYSUPTIME();
4076 DHD_OS_WAKE_UNLOCK(pub
);
4081 complete_and_exit(&tsk
->completed
, 0);
4085 void dhd_dpc_kill(dhd_pub_t
*dhdp
)
4097 tasklet_kill(&dhd
->tasklet
);
4098 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__
));
4100 #endif /* BCMPCIE */
4107 dhd
= (dhd_info_t
*)data
;
4109 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
4110 * down below , wake lock is set,
4111 * the tasklet is initialized in dhd_attach()
4113 /* Call bus dpc unless it indicated down (then clean stop) */
4114 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
4115 if (dhd_bus_dpc(dhd
->pub
.bus
))
4116 tasklet_schedule(&dhd
->tasklet
);
4118 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4120 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
4121 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4126 dhd_sched_dpc(dhd_pub_t
*dhdp
)
4128 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
4130 DHD_OS_WAKE_LOCK(dhdp
);
4131 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
4132 /* If the semaphore does not get up,
4133 * wake unlock should be done here
4135 if (!binary_sema_up(&dhd
->thr_dpc_ctl
))
4136 DHD_OS_WAKE_UNLOCK(dhdp
);
4139 tasklet_schedule(&dhd
->tasklet
);
4142 #endif /* BCMDBUS */
4145 dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
)
4147 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
4148 #ifdef RXF_DEQUEUE_ON_BUSY
4151 #endif /* RXF_DEQUEUE_ON_BUSY */
4153 DHD_OS_WAKE_LOCK(dhdp
);
4155 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
4156 #ifdef RXF_DEQUEUE_ON_BUSY
4158 ret
= dhd_rxf_enqueue(dhdp
, skb
);
4159 if (ret
== BCME_OK
|| ret
== BCME_ERROR
)
4162 OSL_SLEEP(50); /* waiting for dequeueing */
4163 } while (retry
-- > 0);
4165 if (retry
<= 0 && ret
== BCME_BUSY
) {
4169 void *skbnext
= PKTNEXT(dhdp
->osh
, skbp
);
4170 PKTSETNEXT(dhdp
->osh
, skbp
, NULL
);
4174 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
4177 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
4178 up(&dhd
->thr_rxf_ctl
.sema
);
4181 #else /* RXF_DEQUEUE_ON_BUSY */
4183 if (dhd_rxf_enqueue(dhdp
, skb
) == BCME_OK
)
4186 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
4187 up(&dhd
->thr_rxf_ctl
.sema
);
4190 #endif /* RXF_DEQUEUE_ON_BUSY */
4194 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
4196 dhd_toe_get(dhd_info_t
*dhd
, int ifidx
, uint32
*toe_ol
)
4202 memset(&ioc
, 0, sizeof(ioc
));
4204 ioc
.cmd
= WLC_GET_VAR
;
4206 ioc
.len
= (uint
)sizeof(buf
);
4209 strncpy(buf
, "toe_ol", sizeof(buf
) - 1);
4210 buf
[sizeof(buf
) - 1] = '\0';
4211 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
4212 /* Check for older dongle image that doesn't support toe_ol */
4214 DHD_ERROR(("%s: toe not supported by device\n",
4215 dhd_ifname(&dhd
->pub
, ifidx
)));
4219 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
4223 memcpy(toe_ol
, buf
, sizeof(uint32
));
4227 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
4229 dhd_toe_set(dhd_info_t
*dhd
, int ifidx
, uint32 toe_ol
)
4235 memset(&ioc
, 0, sizeof(ioc
));
4237 ioc
.cmd
= WLC_SET_VAR
;
4239 ioc
.len
= (uint
)sizeof(buf
);
4242 /* Set toe_ol as requested */
4244 strncpy(buf
, "toe_ol", sizeof(buf
) - 1);
4245 buf
[sizeof(buf
) - 1] = '\0';
4246 memcpy(&buf
[sizeof("toe_ol")], &toe_ol
, sizeof(uint32
));
4248 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
4249 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
4250 dhd_ifname(&dhd
->pub
, ifidx
), ret
));
4254 /* Enable toe globally only if any components are enabled. */
4256 toe
= (toe_ol
!= 0);
4259 memcpy(&buf
[sizeof("toe")], &toe
, sizeof(uint32
));
4261 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
4262 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
4270 #if defined(WL_CFG80211)
4271 void dhd_set_scb_probe(dhd_pub_t
*dhd
)
4273 #define NUM_SCB_MAX_PROBE 3
4275 wl_scb_probe_t scb_probe
;
4276 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12];
4278 memset(&scb_probe
, 0, sizeof(wl_scb_probe_t
));
4280 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
)
4283 bcm_mkiovar("scb_probe", NULL
, 0, iovbuf
, sizeof(iovbuf
));
4285 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
4286 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__
));
4288 memcpy(&scb_probe
, iovbuf
, sizeof(wl_scb_probe_t
));
4290 scb_probe
.scb_max_probe
= NUM_SCB_MAX_PROBE
;
4292 bcm_mkiovar("scb_probe", (char *)&scb_probe
,
4293 sizeof(wl_scb_probe_t
), iovbuf
, sizeof(iovbuf
));
4294 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
4295 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__
));
4296 #undef NUM_SCB_MAX_PROBE
4299 #endif /* WL_CFG80211 */
4301 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
4303 dhd_ethtool_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*info
)
4305 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4307 snprintf(info
->driver
, sizeof(info
->driver
), "wl");
4308 snprintf(info
->version
, sizeof(info
->version
), "%lu", dhd
->pub
.drv_version
);
4311 struct ethtool_ops dhd_ethtool_ops
= {
4312 .get_drvinfo
= dhd_ethtool_get_drvinfo
4314 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
4317 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4319 dhd_ethtool(dhd_info_t
*dhd
, void *uaddr
)
4321 struct ethtool_drvinfo info
;
4322 char drvname
[sizeof(info
.driver
)];
4325 struct ethtool_value edata
;
4326 uint32 toe_cmpnt
, csum_dir
;
4330 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4332 /* all ethtool calls start with a cmd word */
4333 if (copy_from_user(&cmd
, uaddr
, sizeof (uint32
)))
4337 case ETHTOOL_GDRVINFO
:
4338 /* Copy out any request driver name */
4339 if (copy_from_user(&info
, uaddr
, sizeof(info
)))
4341 strncpy(drvname
, info
.driver
, sizeof(info
.driver
));
4342 drvname
[sizeof(info
.driver
)-1] = '\0';
4344 /* clear struct for return */
4345 memset(&info
, 0, sizeof(info
));
4348 /* if dhd requested, identify ourselves */
4349 if (strcmp(drvname
, "?dhd") == 0) {
4350 snprintf(info
.driver
, sizeof(info
.driver
), "dhd");
4351 strncpy(info
.version
, EPI_VERSION_STR
, sizeof(info
.version
) - 1);
4352 info
.version
[sizeof(info
.version
) - 1] = '\0';
4355 /* otherwise, require dongle to be up */
4356 else if (!dhd
->pub
.up
) {
4357 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__
));
4361 /* finally, report dongle driver type */
4362 else if (dhd
->pub
.iswl
)
4363 snprintf(info
.driver
, sizeof(info
.driver
), "wl");
4365 snprintf(info
.driver
, sizeof(info
.driver
), "xx");
4367 snprintf(info
.version
, sizeof(info
.version
), "%lu", dhd
->pub
.drv_version
);
4368 if (copy_to_user(uaddr
, &info
, sizeof(info
)))
4370 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__
,
4371 (int)sizeof(drvname
), drvname
, info
.driver
));
4375 /* Get toe offload components from dongle */
4376 case ETHTOOL_GRXCSUM
:
4377 case ETHTOOL_GTXCSUM
:
4378 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
4381 csum_dir
= (cmd
== ETHTOOL_GTXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
4384 edata
.data
= (toe_cmpnt
& csum_dir
) ? 1 : 0;
4386 if (copy_to_user(uaddr
, &edata
, sizeof(edata
)))
4390 /* Set toe offload components in dongle */
4391 case ETHTOOL_SRXCSUM
:
4392 case ETHTOOL_STXCSUM
:
4393 if (copy_from_user(&edata
, uaddr
, sizeof(edata
)))
4396 /* Read the current settings, update and write back */
4397 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
4400 csum_dir
= (cmd
== ETHTOOL_STXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
4402 if (edata
.data
!= 0)
4403 toe_cmpnt
|= csum_dir
;
4405 toe_cmpnt
&= ~csum_dir
;
4407 if ((ret
= dhd_toe_set(dhd
, 0, toe_cmpnt
)) < 0)
4410 /* If setting TX checksum mode, tell Linux the new mode */
4411 if (cmd
== ETHTOOL_STXCSUM
) {
4413 dhd
->iflist
[0]->net
->features
|= NETIF_F_IP_CSUM
;
4415 dhd
->iflist
[0]->net
->features
&= ~NETIF_F_IP_CSUM
;
4427 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4429 static bool dhd_check_hang(struct net_device
*net
, dhd_pub_t
*dhdp
, int error
)
4431 #if defined(OEM_ANDROID)
4435 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
4442 dhd
= (dhd_info_t
*)dhdp
->info
;
4443 #if (!defined(BCMDBUS) && !defined(BCMPCIE))
4444 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
4445 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__
));
4448 #endif /* BCMDBUS */
4450 #ifdef CONFIG_MACH_UNIVERSAL5433
4451 /* old revision does not send hang message */
4452 if ((check_rev() && (error
== -ETIMEDOUT
)) || (error
== -EREMOTEIO
) ||
4454 if ((error
== -ETIMEDOUT
) || (error
== -EREMOTEIO
) ||
4455 #endif /* CONFIG_MACH_UNIVERSAL5433 */
4456 ((dhdp
->busstate
== DHD_BUS_DOWN
) && (!dhdp
->dongle_reset
))) {
4457 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__
,
4458 dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
, error
, dhdp
->busstate
));
4459 net_os_send_hang_message(net
);
4462 #endif /* OEM_ANDROID */
4466 int dhd_ioctl_process(dhd_pub_t
*pub
, int ifidx
, dhd_ioctl_t
*ioc
, void *data_buf
)
4468 int bcmerror
= BCME_OK
;
4470 struct net_device
*net
;
4472 net
= dhd_idx2net(pub
, ifidx
);
4474 bcmerror
= BCME_BADARG
;
4479 buflen
= MIN(ioc
->len
, DHD_IOCTL_MAXLEN
);
4481 /* check for local dhd ioctl and handle it */
4482 if (ioc
->driver
== DHD_IOCTL_MAGIC
) {
4483 bcmerror
= dhd_ioctl((void *)pub
, ioc
, data_buf
, buflen
);
4485 pub
->bcmerror
= bcmerror
;
4490 /* send to dongle (must be up, and wl). */
4491 if (pub
->busstate
!= DHD_BUS_DATA
) {
4492 #if !defined(OEM_ANDROID)
4493 int ret
= dhd_bus_start(pub
);
4495 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
4496 bcmerror
= BCME_DONGLE_DOWN
;
4500 bcmerror
= BCME_DONGLE_DOWN
;
4506 bcmerror
= BCME_DONGLE_DOWN
;
4509 #endif /* BCMDBUS */
4512 * Flush the TX queue if required for proper message serialization:
4513 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
4514 * prevent M4 encryption and
4515 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
4516 * prevent disassoc frame being sent before WPS-DONE frame.
4518 if (ioc
->cmd
== WLC_SET_KEY
||
4519 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
4520 strncmp("wsec_key", data_buf
, 9) == 0) ||
4521 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
4522 strncmp("bsscfg:wsec_key", data_buf
, 15) == 0) ||
4523 ioc
->cmd
== WLC_DISASSOC
)
4524 dhd_wait_pend8021x(net
);
4528 /* short cut wl ioctl calls here */
4529 if (strcmp("htsf", data_buf
) == 0) {
4530 dhd_ioctl_htsf_get(dhd
, 0);
4534 if (strcmp("htsflate", data_buf
) == 0) {
4536 memset(ts
, 0, sizeof(tstamp_t
)*TSMAX
);
4537 memset(&maxdelayts
, 0, sizeof(tstamp_t
));
4541 memset(&vi_d1
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4542 memset(&vi_d2
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4543 memset(&vi_d3
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4544 memset(&vi_d4
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4550 if (strcmp("htsfclear", data_buf
) == 0) {
4551 memset(&vi_d1
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4552 memset(&vi_d2
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4553 memset(&vi_d3
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4554 memset(&vi_d4
.bin
, 0, sizeof(uint32
)*NUMBIN
);
4558 if (strcmp("htsfhis", data_buf
) == 0) {
4559 dhd_dump_htsfhisto(&vi_d1
, "H to D");
4560 dhd_dump_htsfhisto(&vi_d2
, "D to D");
4561 dhd_dump_htsfhisto(&vi_d3
, "D to H");
4562 dhd_dump_htsfhisto(&vi_d4
, "H to H");
4565 if (strcmp("tsport", data_buf
) == 0) {
4567 memcpy(&tsport
, data_buf
+ 7, 4);
4569 DHD_ERROR(("current timestamp port: %d \n", tsport
));
4574 #endif /* WLMEDIA_HTSF */
4576 if ((ioc
->cmd
== WLC_SET_VAR
|| ioc
->cmd
== WLC_GET_VAR
) &&
4577 data_buf
!= NULL
&& strncmp("rpc_", data_buf
, 4) == 0) {
4579 bcmerror
= dhd_fdaggr_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
4581 bcmerror
= BCME_UNSUPPORTED
;
4585 bcmerror
= dhd_wl_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
4588 #if defined(OEM_ANDROID)
4589 dhd_check_hang(net
, pub
, bcmerror
);
4590 #endif /* OEM_ANDROID */
4596 dhd_ioctl_entry(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
4598 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4603 void *local_buf
= NULL
;
4606 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4607 DHD_PERIM_LOCK(&dhd
->pub
);
4609 #if defined(OEM_ANDROID)
4610 /* Interface up check for built-in type */
4611 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== 0) {
4612 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__
));
4613 DHD_PERIM_UNLOCK(&dhd
->pub
);
4614 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4618 /* send to dongle only if we are not waiting for reload already */
4619 if (dhd
->pub
.hang_was_sent
) {
4620 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__
));
4621 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd
->pub
, DHD_EVENT_TIMEOUT_MS
);
4622 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4623 return OSL_ERROR(BCME_DONGLE_DOWN
);
4625 #endif /* (OEM_ANDROID) */
4627 ifidx
= dhd_net2idx(dhd
, net
);
4628 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__
, ifidx
, cmd
));
4630 if (ifidx
== DHD_BAD_IF
) {
4631 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__
));
4632 DHD_PERIM_UNLOCK(&dhd
->pub
);
4633 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4637 #if defined(WL_WIRELESS_EXT)
4638 /* linux wireless extensions */
4639 if ((cmd
>= SIOCIWFIRST
) && (cmd
<= SIOCIWLAST
)) {
4640 /* may recurse, do NOT lock */
4641 ret
= wl_iw_ioctl(net
, ifr
, cmd
);
4642 DHD_PERIM_UNLOCK(&dhd
->pub
);
4643 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4646 #endif /* defined(WL_WIRELESS_EXT) */
4648 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4649 if (cmd
== SIOCETHTOOL
) {
4650 ret
= dhd_ethtool(dhd
, (void*)ifr
->ifr_data
);
4651 DHD_PERIM_UNLOCK(&dhd
->pub
);
4652 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4655 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4657 #if defined(OEM_ANDROID) || defined(P2PONEINT)
4658 if (cmd
== SIOCDEVPRIVATE
+1) {
4659 ret
= wl_android_priv_cmd(net
, ifr
, cmd
);
4660 dhd_check_hang(net
, &dhd
->pub
, ret
);
4661 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4664 #endif /* OEM_ANDROID */
4666 if (cmd
!= SIOCDEVPRIVATE
) {
4667 DHD_PERIM_UNLOCK(&dhd
->pub
);
4668 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4672 memset(&ioc
, 0, sizeof(ioc
));
4674 #ifdef CONFIG_COMPAT
4675 if (is_compat_task()) {
4676 compat_wl_ioctl_t compat_ioc
;
4677 if (copy_from_user(&compat_ioc
, ifr
->ifr_data
, sizeof(compat_wl_ioctl_t
))) {
4678 bcmerror
= BCME_BADADDR
;
4681 ioc
.cmd
= compat_ioc
.cmd
;
4682 ioc
.buf
= compat_ptr(compat_ioc
.buf
);
4683 ioc
.len
= compat_ioc
.len
;
4684 ioc
.set
= compat_ioc
.set
;
4685 ioc
.used
= compat_ioc
.used
;
4686 ioc
.needed
= compat_ioc
.needed
;
4687 /* To differentiate between wl and dhd read 4 more byes */
4688 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(compat_wl_ioctl_t
),
4689 sizeof(uint
)) != 0)) {
4690 bcmerror
= BCME_BADADDR
;
4694 #endif /* CONFIG_COMPAT */
4696 /* Copy the ioc control structure part of ioctl request */
4697 if (copy_from_user(&ioc
, ifr
->ifr_data
, sizeof(wl_ioctl_t
))) {
4698 bcmerror
= BCME_BADADDR
;
4702 /* To differentiate between wl and dhd read 4 more byes */
4703 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(wl_ioctl_t
),
4704 sizeof(uint
)) != 0)) {
4705 bcmerror
= BCME_BADADDR
;
4710 if (!capable(CAP_NET_ADMIN
)) {
4711 bcmerror
= BCME_EPERM
;
4716 buflen
= MIN(ioc
.len
, DHD_IOCTL_MAXLEN
);
4717 if (!(local_buf
= MALLOC(dhd
->pub
.osh
, buflen
+1))) {
4718 bcmerror
= BCME_NOMEM
;
4722 DHD_PERIM_UNLOCK(&dhd
->pub
);
4723 if (copy_from_user(local_buf
, ioc
.buf
, buflen
)) {
4724 DHD_PERIM_LOCK(&dhd
->pub
);
4725 bcmerror
= BCME_BADADDR
;
4728 DHD_PERIM_LOCK(&dhd
->pub
);
4730 *(char *)(local_buf
+ buflen
) = '\0';
4733 bcmerror
= dhd_ioctl_process(&dhd
->pub
, ifidx
, &ioc
, local_buf
);
4735 if (!bcmerror
&& buflen
&& local_buf
&& ioc
.buf
) {
4736 DHD_PERIM_UNLOCK(&dhd
->pub
);
4737 if (copy_to_user(ioc
.buf
, local_buf
, buflen
))
4739 DHD_PERIM_LOCK(&dhd
->pub
);
4744 MFREE(dhd
->pub
.osh
, local_buf
, buflen
+1);
4746 DHD_PERIM_UNLOCK(&dhd
->pub
);
4747 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4749 return OSL_ERROR(bcmerror
);
4752 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
4753 int dhd_deepsleep(dhd_info_t
*dhd
, int flag
)
4764 case 1 : /* Deepsleep on */
4765 DHD_ERROR(("[WiFi] Deepsleep On\n"));
4766 /* give some time to sysioc_work before deepsleep */
4768 #ifdef PKT_FILTER_SUPPORT
4769 /* disable pkt filter */
4770 dhd_enable_packet_filter(0, dhdp
);
4771 #endif /* PKT_FILTER_SUPPORT */
4774 memset(iovbuf
, 0, sizeof(iovbuf
));
4775 bcm_mkiovar("mpc", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
4776 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
4778 /* Enable Deepsleep */
4780 memset(iovbuf
, 0, sizeof(iovbuf
));
4781 bcm_mkiovar("deepsleep", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
4782 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
4785 case 0: /* Deepsleep Off */
4786 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
4788 /* Disable Deepsleep */
4789 for (cnt
= 0; cnt
< MAX_TRY_CNT
; cnt
++) {
4791 memset(iovbuf
, 0, sizeof(iovbuf
));
4792 bcm_mkiovar("deepsleep", (char *)&powervar
, 4,
4793 iovbuf
, sizeof(iovbuf
));
4794 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
,
4795 sizeof(iovbuf
), TRUE
, 0);
4797 memset(iovbuf
, 0, sizeof(iovbuf
));
4798 bcm_mkiovar("deepsleep", (char *)&powervar
, 4,
4799 iovbuf
, sizeof(iovbuf
));
4800 if ((ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
,
4801 sizeof(iovbuf
), FALSE
, 0)) < 0) {
4802 DHD_ERROR(("the error of dhd deepsleep status"
4803 " ret value :%d\n", ret
));
4805 if (!(*(int *)iovbuf
)) {
4806 DHD_ERROR(("deepsleep mode is 0,"
4807 " count: %d\n", cnt
));
4815 memset(iovbuf
, 0, sizeof(iovbuf
));
4816 bcm_mkiovar("mpc", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
4817 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
4825 dhd_stop(struct net_device
*net
)
4828 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4829 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4830 DHD_PERIM_LOCK(&dhd
->pub
);
4831 printf("%s: Enter %p\n", __FUNCTION__
, net
);
4832 if (dhd
->pub
.up
== 0) {
4836 dhd_if_flush_sta(DHD_DEV_IFP(net
));
4839 ifidx
= dhd_net2idx(dhd
, net
);
4840 BCM_REFERENCE(ifidx
);
4842 /* Set state and stop OS transmissions */
4843 netif_stop_queue(net
);
4848 wl_cfg80211_down(NULL
);
4851 * For CFG80211: Clean up all the left over virtual interfaces
4852 * when the primary Interface is brought down. [ifconfig wlan0 down]
4854 if (!dhd_download_fw_on_driverload
) {
4855 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) &&
4856 (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
4859 dhd_net_if_lock_local(dhd
);
4860 for (i
= 1; i
< DHD_MAX_IFS
; i
++)
4861 dhd_remove_if(&dhd
->pub
, i
, FALSE
);
4862 #ifdef ARP_OFFLOAD_SUPPORT
4863 if (dhd_inetaddr_notifier_registered
) {
4864 dhd_inetaddr_notifier_registered
= FALSE
;
4865 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
4867 #endif /* ARP_OFFLOAD_SUPPORT */
4869 if (dhd_inet6addr_notifier_registered
) {
4870 dhd_inet6addr_notifier_registered
= FALSE
;
4871 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
4873 #endif /* CONFIG_IPV6 */
4874 dhd_net_if_unlock_local(dhd
);
4876 cancel_work_sync(dhd
->dhd_deferred_wq
);
4879 #endif /* WL_CFG80211 */
4881 #ifdef PROP_TXSTATUS
4882 dhd_wlfc_cleanup(&dhd
->pub
, NULL
, 0);
4884 /* Stop the protocol module */
4885 dhd_prot_stop(&dhd
->pub
);
4887 OLD_MOD_DEC_USE_COUNT
;
4889 #if defined(WL_CFG80211) && defined(OEM_ANDROID)
4890 if (ifidx
== 0 && !dhd_download_fw_on_driverload
)
4891 wl_android_wifi_off(net
);
4893 if (dhd
->pub
.conf
&& dhd
->pub
.conf
->deepsleep
)
4894 dhd_deepsleep(dhd
, 1);
4896 #endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
4897 dhd
->pub
.rxcnt_timeout
= 0;
4898 dhd
->pub
.txcnt_timeout
= 0;
4900 dhd
->pub
.hang_was_sent
= 0;
4902 /* Clear country spec for for built-in type driver */
4903 if (!dhd_download_fw_on_driverload
) {
4904 dhd
->pub
.dhd_cspec
.country_abbrev
[0] = 0x00;
4905 dhd
->pub
.dhd_cspec
.rev
= 0;
4906 dhd
->pub
.dhd_cspec
.ccode
[0] = 0x00;
4909 printf("%s: Exit\n", __FUNCTION__
);
4910 DHD_PERIM_UNLOCK(&dhd
->pub
);
4911 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4915 #if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
4916 defined(USE_INITIAL_SHORT_DWELL_TIME))
4917 extern bool g_first_broadcast_scan
;
4918 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
4921 static int dhd_interworking_enable(dhd_pub_t
*dhd
)
4923 char iovbuf
[WLC_IOCTL_SMLEN
];
4924 uint32 enable
= true;
4927 bcm_mkiovar("interworking", (char *)&enable
, sizeof(enable
), iovbuf
, sizeof(iovbuf
));
4928 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
4929 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__
, ret
));
4932 if (ret
== BCME_OK
) {
4933 /* basic capabilities for HS20 REL2 */
4934 uint32 cap
= WL_WNM_BSSTRANS
| WL_WNM_NOTIF
;
4935 bcm_mkiovar("wnm", (char *)&cap
, sizeof(cap
), iovbuf
, sizeof(iovbuf
));
4936 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
4937 iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
4938 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__
, ret
));
4947 dhd_open(struct net_device
*net
)
4949 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4954 char iovbuf
[WLC_IOCTL_SMLEN
];
4955 dbus_config_t config
;
4956 uint32 agglimit
= 0;
4957 uint32 rpc_agg
= BCM_RPC_TP_DNGL_AGG_DPC
; /* host aggr not enabled yet */
4958 #endif /* BCM_FD_AGGR */
4962 printf("%s: Enter %p\n", __FUNCTION__
, net
);
4963 #if defined(MULTIPLE_SUPPLICANT)
4964 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
4965 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
4966 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__
));
4968 mutex_lock(&_dhd_sdio_mutex_lock_
);
4970 #endif /* MULTIPLE_SUPPLICANT */
4972 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4973 DHD_PERIM_LOCK(&dhd
->pub
);
4974 dhd
->pub
.dongle_trap_occured
= 0;
4975 dhd
->pub
.hang_was_sent
= 0;
4979 * Force start if ifconfig_up gets called before START command
4980 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4981 * This should be removed in the future
4983 ret
= wl_control_wl_start(net
);
4985 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
4989 #endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */
4991 ifidx
= dhd_net2idx(dhd
, net
);
4992 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
4995 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__
));
5000 if (!dhd
->iflist
[ifidx
]) {
5001 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__
));
5007 atomic_set(&dhd
->pend_8021x_cnt
, 0);
5008 dhd_update_fw_nv_path(dhd
); // terence 20140807: fix for op_mode issue
5009 if (!dhd_download_fw_on_driverload
) {
5010 DHD_ERROR(("\n%s\n", dhd_version
));
5011 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
5012 g_first_broadcast_scan
= TRUE
;
5013 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
5014 ret
= wl_android_wifi_on(net
);
5016 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
5017 __FUNCTION__
, ret
));
5023 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
5026 /* try to bring up bus */
5027 DHD_PERIM_UNLOCK(&dhd
->pub
);
5028 ret
= dhd_bus_start(&dhd
->pub
);
5029 DHD_PERIM_LOCK(&dhd
->pub
);
5031 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
5036 if ((ret
= dbus_up(dhd
->pub
.dbus
)) != 0) {
5037 DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__
, ret
));
5040 dhd
->pub
.busstate
= DHD_BUS_DATA
;
5042 /* Bus is ready, query any dongle information */
5043 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
5044 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
5047 #endif /* BCMDBUS */
5052 config
.config_id
= DBUS_CONFIG_ID_AGGR_LIMIT
;
5055 memset(iovbuf
, 0, sizeof(iovbuf
));
5056 bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit
, 4,
5057 iovbuf
, sizeof(iovbuf
));
5059 if (!dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0)) {
5060 agglimit
= *(uint32
*)iovbuf
;
5061 config
.aggr_param
.maxrxsf
= agglimit
>> BCM_RPC_TP_AGG_SF_SHIFT
;
5062 config
.aggr_param
.maxrxsize
= agglimit
& BCM_RPC_TP_AGG_BYTES_MASK
;
5063 DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
5064 agglimit
, config
.aggr_param
.maxrxsf
, config
.aggr_param
.maxrxsize
));
5065 if (bcm_rpc_tp_set_config(dhd
->pub
.info
->rpc_th
, &config
)) {
5066 DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
5069 DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
5070 rpc_agg
&= ~BCM_RPC_TP_DNGL_AGG_DPC
;
5073 /* Set aggregation for TX */
5074 bcm_rpc_tp_agg_set(dhd
->pub
.info
->rpc_th
, BCM_RPC_TP_HOST_AGG_MASK
,
5075 rpc_agg
& BCM_RPC_TP_HOST_AGG_MASK
);
5077 /* Set aggregation for RX */
5078 memset(iovbuf
, 0, sizeof(iovbuf
));
5079 bcm_mkiovar("rpc_agg", (char *)&rpc_agg
, sizeof(rpc_agg
), iovbuf
, sizeof(iovbuf
));
5080 if (!dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) {
5081 dhd
->pub
.info
->fdaggr
= 0;
5082 if (rpc_agg
& BCM_RPC_TP_HOST_AGG_MASK
)
5083 dhd
->pub
.info
->fdaggr
|= BCM_FDAGGR_H2D_ENABLED
;
5084 if (rpc_agg
& BCM_RPC_TP_DNGL_AGG_MASK
)
5085 dhd
->pub
.info
->fdaggr
|= BCM_FDAGGR_D2H_ENABLED
;
5087 DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__
, ret
));
5089 #endif /* BCM_FD_AGGR */
5090 if (dhd_download_fw_on_driverload
) {
5091 if (dhd
->pub
.conf
->deepsleep
)
5092 dhd_deepsleep(dhd
, 0);
5095 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
5096 memcpy(net
->dev_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
5099 /* Get current TOE mode from dongle */
5100 if (dhd_toe_get(dhd
, ifidx
, &toe_ol
) >= 0 && (toe_ol
& TOE_TX_CSUM_OL
) != 0)
5101 dhd
->iflist
[ifidx
]->net
->features
|= NETIF_F_IP_CSUM
;
5103 dhd
->iflist
[ifidx
]->net
->features
&= ~NETIF_F_IP_CSUM
;
5106 #if defined(WL_CFG80211)
5107 if (unlikely(wl_cfg80211_up(NULL
))) {
5108 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__
));
5112 if (!dhd_download_fw_on_driverload
) {
5113 #ifdef ARP_OFFLOAD_SUPPORT
5114 dhd
->pend_ipaddr
= 0;
5115 if (!dhd_inetaddr_notifier_registered
) {
5116 dhd_inetaddr_notifier_registered
= TRUE
;
5117 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
5119 #endif /* ARP_OFFLOAD_SUPPORT */
5121 if (!dhd_inet6addr_notifier_registered
) {
5122 dhd_inet6addr_notifier_registered
= TRUE
;
5123 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
5125 #endif /* CONFIG_IPV6 */
5127 dhd_set_scb_probe(&dhd
->pub
);
5128 #endif /* WL_CFG80211 */
5131 /* Allow transmit calls */
5132 netif_start_queue(net
);
5136 dhd_dbg_init(&dhd
->pub
);
5139 OLD_MOD_INC_USE_COUNT
;
5144 DHD_PERIM_UNLOCK(&dhd
->pub
);
5145 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
5147 #if defined(MULTIPLE_SUPPLICANT)
5148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
5149 mutex_unlock(&_dhd_sdio_mutex_lock_
);
5151 #endif /* MULTIPLE_SUPPLICANT */
5153 printf("%s: Exit ret=%d\n", __FUNCTION__
, ret
);
5157 int dhd_do_driver_init(struct net_device
*net
)
5159 dhd_info_t
*dhd
= NULL
;
5162 DHD_ERROR(("Primary Interface not initialized \n"));
5166 #ifdef MULTIPLE_SUPPLICANT
5167 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
5168 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
5169 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__
));
5172 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
5173 #endif /* MULTIPLE_SUPPLICANT */
5175 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
5176 dhd
= DHD_DEV_INFO(net
);
5178 /* If driver is already initialized, do nothing
5180 if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
5181 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
5185 if (dhd_open(net
) < 0) {
5186 DHD_ERROR(("Driver Init Failed \n"));
5194 dhd_event_ifadd(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
5198 if (wl_cfg80211_notify_ifadd(ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
5202 /* handle IF event caused by wl commands, SoftAP, WEXT and
5203 * anything else. This has to be done asynchronously otherwise
5204 * DPC will be blocked (and iovars will timeout as DPC has no chance
5205 * to read the response back)
5207 if (ifevent
->ifidx
> 0) {
5208 dhd_if_event_t
*if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
5210 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
5211 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
5212 strncpy(if_event
->name
, name
, IFNAMSIZ
);
5213 if_event
->name
[IFNAMSIZ
- 1] = '\0';
5214 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
,
5215 DHD_WQ_WORK_IF_ADD
, dhd_ifadd_event_handler
, DHD_WORK_PRIORITY_LOW
);
5222 dhd_event_ifdel(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
5224 dhd_if_event_t
*if_event
;
5226 #if defined(WL_CFG80211) && !defined(P2PONEINT)
5227 if (wl_cfg80211_notify_ifdel(ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
5229 #endif /* WL_CFG80211 */
5231 /* handle IF event caused by wl commands, SoftAP, WEXT and
5234 if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
5235 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
5236 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
5237 strncpy(if_event
->name
, name
, IFNAMSIZ
);
5238 if_event
->name
[IFNAMSIZ
- 1] = '\0';
5239 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
, DHD_WQ_WORK_IF_DEL
,
5240 dhd_ifdel_event_handler
, DHD_WORK_PRIORITY_LOW
);
5245 /* unregister and free the existing net_device interface (if any) in iflist and
5246 * allocate a new one. the slot is reused. this function does NOT register the
5247 * new interface to linux kernel. dhd_register_if does the job
5250 dhd_allocate_if(dhd_pub_t
*dhdpub
, int ifidx
, char *name
,
5251 uint8
*mac
, uint8 bssidx
, bool need_rtnl_lock
)
5253 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
5256 ASSERT(dhdinfo
&& (ifidx
< DHD_MAX_IFS
));
5257 ifp
= dhdinfo
->iflist
[ifidx
];
5260 if (ifp
->net
!= NULL
) {
5261 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__
, ifp
->net
->name
));
5263 dhd_dev_priv_clear(ifp
->net
); /* clear net_device private */
5265 /* in unregister_netdev case, the interface gets freed by net->destructor
5266 * (which is set to free_netdev)
5268 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
5269 free_netdev(ifp
->net
);
5271 netif_stop_queue(ifp
->net
);
5273 unregister_netdev(ifp
->net
);
5275 unregister_netdevice(ifp
->net
);
5280 ifp
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_t
));
5282 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__
, sizeof(dhd_if_t
)));
5287 memset(ifp
, 0, sizeof(dhd_if_t
));
5288 ifp
->info
= dhdinfo
;
5290 ifp
->bssidx
= bssidx
;
5292 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
5294 /* Allocate etherdev, including space for private structure */
5295 ifp
->net
= alloc_etherdev(DHD_DEV_PRIV_SIZE
);
5296 if (ifp
->net
== NULL
) {
5297 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__
, sizeof(dhdinfo
)));
5301 /* Setup the dhd interface's netdevice private structure. */
5302 dhd_dev_priv_save(ifp
->net
, dhdinfo
, ifp
, ifidx
);
5304 if (name
&& name
[0]) {
5305 strncpy(ifp
->net
->name
, name
, IFNAMSIZ
);
5306 ifp
->net
->name
[IFNAMSIZ
- 1] = '\0';
5310 ifp
->net
->destructor
= free_netdev
;
5312 ifp
->net
->destructor
= dhd_netdev_free
;
5314 ifp
->net
->destructor
= free_netdev
;
5315 #endif /* WL_CFG80211 */
5316 strncpy(ifp
->name
, ifp
->net
->name
, IFNAMSIZ
);
5317 ifp
->name
[IFNAMSIZ
- 1] = '\0';
5318 dhdinfo
->iflist
[ifidx
] = ifp
;
5320 #ifdef PCIE_FULL_DONGLE
5321 /* Initialize STA info list */
5322 INIT_LIST_HEAD(&ifp
->sta_list
);
5323 DHD_IF_STA_LIST_LOCK_INIT(ifp
);
5324 #endif /* PCIE_FULL_DONGLE */
5330 if (ifp
->net
!= NULL
) {
5331 dhd_dev_priv_clear(ifp
->net
);
5332 free_netdev(ifp
->net
);
5335 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
5338 dhdinfo
->iflist
[ifidx
] = NULL
;
5342 /* unregister the the net_device interface associated with the indexed slot
5345 dhd_preremove_if(dhd_pub_t
*dhdp
, int ifidx
, bool need_rtnl_lock
)
5347 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdp
->info
;
5350 ifp
= dhdinfo
->iflist
[ifidx
];
5352 if (ifp
->net
!= NULL
) {
5353 DHD_ERROR(("unregister interface '%s' idx %d\n", ifp
->net
->name
, ifp
->idx
));
5355 /* in unregister_netdev case, the interface gets freed by net->destructor
5356 * (which is set to free_netdev)
5358 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
5359 free_netdev(ifp
->net
);
5361 netif_stop_queue(ifp
->net
);
5365 #if defined(ARGOS_RPS_CPU_CTL) && defined(ARGOS_CPU_SCHEDULER)
5367 argos_register_notifier_deinit();
5372 custom_rps_map_clear(ifp
->net
->_rx
);
5373 #endif /* SET_RPS_CPUS */
5375 unregister_netdev(ifp
->net
);
5377 unregister_netdevice(ifp
->net
);
5386 /* free the the net_device interface associated with the indexed
5387 * slot, also free the slot memory and set the slot pointer to NULL
5390 dhd_remove_if(dhd_pub_t
*dhdpub
, int ifidx
, bool need_rtnl_lock
)
5392 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
5395 ifp
= dhdinfo
->iflist
[ifidx
];
5397 DHD_ERROR(("deleting if idx %d\n", ifp
->idx
));
5399 dhd_preremove_if(dhdpub
, ifidx
, need_rtnl_lock
);
5401 dhd_wmf_cleanup(dhdpub
, ifidx
);
5402 #endif /* DHD_WMF */
5404 dhd_if_del_sta_list(ifp
);
5406 dhdinfo
->iflist
[ifidx
] = NULL
;
5407 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
5414 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
5415 static struct net_device_ops dhd_ops_pri
= {
5416 .ndo_open
= dhd_open
,
5417 .ndo_stop
= dhd_stop
,
5418 .ndo_get_stats
= dhd_get_stats
,
5419 .ndo_do_ioctl
= dhd_ioctl_entry
,
5420 .ndo_start_xmit
= dhd_start_xmit
,
5421 .ndo_set_mac_address
= dhd_set_mac_address
,
5422 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
5423 .ndo_set_rx_mode
= dhd_set_multicast_list
,
5425 .ndo_set_multicast_list
= dhd_set_multicast_list
,
5429 static struct net_device_ops dhd_ops_virt
= {
5430 .ndo_get_stats
= dhd_get_stats
,
5431 .ndo_do_ioctl
= dhd_ioctl_entry
,
5432 .ndo_start_xmit
= dhd_start_xmit
,
5433 .ndo_set_mac_address
= dhd_set_mac_address
,
5434 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
5435 .ndo_set_rx_mode
= dhd_set_multicast_list
,
5437 .ndo_set_multicast_list
= dhd_set_multicast_list
,
5442 extern int wl_cfgp2p_if_open(struct net_device
*net
);
5443 extern int wl_cfgp2p_if_stop(struct net_device
*net
);
5445 static struct net_device_ops dhd_cfgp2p_ops_virt
= {
5446 .ndo_open
= wl_cfgp2p_if_open
,
5447 .ndo_stop
= wl_cfgp2p_if_stop
,
5448 .ndo_get_stats
= dhd_get_stats
,
5449 .ndo_do_ioctl
= dhd_ioctl_entry
,
5450 .ndo_start_xmit
= dhd_start_xmit
,
5451 .ndo_set_mac_address
= dhd_set_mac_address
,
5452 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
5453 .ndo_set_rx_mode
= dhd_set_multicast_list
,
5455 .ndo_set_multicast_list
= dhd_set_multicast_list
,
5458 #endif /* P2PONEINT */
5459 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
5462 extern void debugger_init(void *bus_handle
);
5466 #ifdef SHOW_LOGTRACE
5467 static char *logstrs_path
= "/root/logstrs.bin";
5468 module_param(logstrs_path
, charp
, S_IRUGO
);
5471 dhd_init_logstrs_array(dhd_event_log_t
*temp
)
5473 struct file
*filep
= NULL
;
5476 char *raw_fmts
= NULL
;
5477 int logstrs_size
= 0;
5479 logstr_header_t
*hdr
= NULL
;
5480 uint32
*lognums
= NULL
;
5481 char *logstrs
= NULL
;
5489 filep
= filp_open(logstrs_path
, O_RDONLY
, 0);
5490 if (IS_ERR(filep
)) {
5491 DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__
));
5494 error
= vfs_stat(logstrs_path
, &stat
);
5496 DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__
));
5499 logstrs_size
= (int) stat
.size
;
5501 raw_fmts
= kmalloc(logstrs_size
, GFP_KERNEL
);
5502 if (raw_fmts
== NULL
) {
5503 DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
5506 if (vfs_read(filep
, raw_fmts
, logstrs_size
, &filep
->f_pos
) != logstrs_size
) {
5507 DHD_ERROR(("Error: Log strings file read failed\n"));
5511 /* Remember header from the logstrs.bin file */
5512 hdr
= (logstr_header_t
*) (raw_fmts
+ logstrs_size
-
5513 sizeof(logstr_header_t
));
5515 if (hdr
->log_magic
== LOGSTRS_MAGIC
) {
5517 * logstrs.bin start with header.
5519 num_fmts
= hdr
->rom_logstrs_offset
/ sizeof(uint32
);
5520 ram_index
= (hdr
->ram_lognums_offset
-
5521 hdr
->rom_lognums_offset
) / sizeof(uint32
);
5522 lognums
= (uint32
*) &raw_fmts
[hdr
->rom_lognums_offset
];
5523 logstrs
= (char *) &raw_fmts
[hdr
->rom_logstrs_offset
];
5526 * Legacy logstrs.bin format without header.
5528 num_fmts
= *((uint32
*) (raw_fmts
)) / sizeof(uint32
);
5529 if (num_fmts
== 0) {
5530 /* Legacy ROM/RAM logstrs.bin format:
5531 * - ROM 'lognums' section
5532 * - RAM 'lognums' section
5533 * - ROM 'logstrs' section.
5534 * - RAM 'logstrs' section.
5536 * 'lognums' is an array of indexes for the strings in the
5537 * 'logstrs' section. The first uint32 is 0 (index of first
5538 * string in ROM 'logstrs' section).
5540 * The 4324b5 is the only ROM that uses this legacy format. Use the
5541 * fixed number of ROM fmtnums to find the start of the RAM
5542 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
5543 * find the ROM 'logstrs' section.
5545 #define NUM_4324B5_ROM_FMTS 186
5546 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
5547 ram_index
= NUM_4324B5_ROM_FMTS
;
5548 lognums
= (uint32
*) raw_fmts
;
5549 num_fmts
= ram_index
;
5550 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
5551 while (strncmp(FIRST_4324B5_ROM_LOGSTR
, logstrs
, 4)) {
5553 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
5556 /* Legacy RAM-only logstrs.bin format:
5557 * - RAM 'lognums' section
5558 * - RAM 'logstrs' section.
5560 * 'lognums' is an array of indexes for the strings in the
5561 * 'logstrs' section. The first uint32 is an index to the
5562 * start of 'logstrs'. Therefore, if this index is divided
5563 * by 'sizeof(uint32)' it provides the number of logstr
5567 lognums
= (uint32
*) raw_fmts
;
5568 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
5571 fmts
= kmalloc(num_fmts
* sizeof(char *), GFP_KERNEL
);
5573 DHD_ERROR(("Failed to allocate fmts memory\n"));
5577 for (i
= 0; i
< num_fmts
; i
++) {
5578 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
5579 * (they are 0-indexed relative to 'rom_logstrs_offset').
5581 * RAM lognums are already indexed to point to the correct RAM logstrs (they
5582 * are 0-indexed relative to the start of the logstrs.bin file).
5584 if (i
== ram_index
) {
5587 fmts
[i
] = &logstrs
[lognums
[i
]];
5590 temp
->raw_fmts
= raw_fmts
;
5591 temp
->num_fmts
= num_fmts
;
5592 filp_close(filep
, NULL
);
5601 filp_close(filep
, NULL
);
5606 #endif /* SHOW_LOGTRACE */
5610 dhd_attach(osl_t
*osh
, struct dhd_bus
*bus
, uint bus_hdrlen
)
5612 dhd_info_t
*dhd
= NULL
;
5613 struct net_device
*net
= NULL
;
5614 char if_name
[IFNAMSIZ
] = {'\0'};
5615 uint32 bus_type
= -1;
5616 uint32 bus_num
= -1;
5617 uint32 slot_num
= -1;
5618 wifi_adapter_info_t
*adapter
= NULL
;
5620 dhd_attach_states_t dhd_state
= DHD_ATTACH_STATE_INIT
;
5621 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5624 DHD_ERROR(("%s\n", driver_target
));
5625 #endif /* STBLINUX */
5626 /* will implement get_ids for DBUS later */
5627 adapter
= dhd_wifi_platform_get_adapter(bus_type
, bus_num
, slot_num
);
5629 /* Allocate primary dhd_info */
5630 dhd
= wifi_platform_prealloc(adapter
, DHD_PREALLOC_DHD_INFO
, sizeof(dhd_info_t
));
5632 dhd
= MALLOC(osh
, sizeof(dhd_info_t
));
5634 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__
));
5638 memset(dhd
, 0, sizeof(dhd_info_t
));
5639 dhd_state
|= DHD_ATTACH_STATE_DHD_ALLOC
;
5641 dhd
->unit
= dhd_found
+ instance_base
; /* do not increment dhd_found, yet */
5644 dhd
->adapter
= adapter
;
5646 #ifdef GET_CUSTOM_MAC_ENABLE
5647 wifi_platform_get_mac_addr(dhd
->adapter
, dhd
->pub
.mac
.octet
);
5648 #endif /* GET_CUSTOM_MAC_ENABLE */
5650 dhd
->thr_dpc_ctl
.thr_pid
= DHD_PID_KT_TL_INVALID
;
5651 dhd
->thr_wdt_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
5653 /* Initialize thread based operation and lock */
5654 sema_init(&dhd
->sdsem
, 1);
5656 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
5657 * This is indeed a hack but we have to make it work properly before we have a better
5660 dhd_update_fw_nv_path(dhd
);
5661 #endif /* BCMDBUS */
5663 /* Link to info module */
5664 dhd
->pub
.info
= dhd
;
5667 /* Link to bus module */
5669 dhd
->pub
.hdrlen
= bus_hdrlen
;
5671 /* Set network interface name if it was provided as module parameter */
5672 if (iface_name
[0]) {
5675 strncpy(if_name
, iface_name
, IFNAMSIZ
);
5676 if_name
[IFNAMSIZ
- 1] = 0;
5677 len
= strlen(if_name
);
5678 ch
= if_name
[len
- 1];
5679 if ((ch
> '9' || ch
< '0') && (len
< IFNAMSIZ
- 2))
5680 strcat(if_name
, "%d");
5682 net
= dhd_allocate_if(&dhd
->pub
, 0, if_name
, NULL
, 0, TRUE
);
5685 dhd_state
|= DHD_ATTACH_STATE_ADD_IF
;
5687 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
5690 net
->netdev_ops
= NULL
;
5693 sema_init(&dhd
->proto_sem
, 1);
5695 #ifdef PROP_TXSTATUS
5696 spin_lock_init(&dhd
->wlfc_spinlock
);
5698 dhd
->pub
.skip_fc
= dhd_wlfc_skip_fc
;
5699 dhd
->pub
.plat_init
= dhd_wlfc_plat_init
;
5700 dhd
->pub
.plat_deinit
= dhd_wlfc_plat_deinit
;
5702 #ifdef DHD_WLFC_THREAD
5703 init_waitqueue_head(&dhd
->pub
.wlfc_wqhead
);
5704 dhd
->pub
.wlfc_thread
= kthread_create(dhd_wlfc_transfer_packets
, &dhd
->pub
, "wlfc-thread");
5705 if (IS_ERR(dhd
->pub
.wlfc_thread
)) {
5706 DHD_ERROR(("create wlfc thread failed\n"));
5709 wake_up_process(dhd
->pub
.wlfc_thread
);
5711 #endif /* DHD_WLFC_THREAD */
5712 #endif /* PROP_TXSTATUS */
5714 /* Initialize other structure content */
5715 init_waitqueue_head(&dhd
->ioctl_resp_wait
);
5716 init_waitqueue_head(&dhd
->d3ack_wait
);
5717 init_waitqueue_head(&dhd
->ctrl_wait
);
5719 /* Initialize the spinlocks */
5720 spin_lock_init(&dhd
->sdlock
);
5721 spin_lock_init(&dhd
->txqlock
);
5722 spin_lock_init(&dhd
->dhd_lock
);
5723 spin_lock_init(&dhd
->rxf_lock
);
5724 #if defined(RXFRAME_THREAD)
5725 dhd
->rxthread_enabled
= TRUE
;
5726 #endif /* defined(RXFRAME_THREAD) */
5728 #ifdef DHDTCPACK_SUPPRESS
5729 spin_lock_init(&dhd
->tcpack_lock
);
5730 #endif /* DHDTCPACK_SUPPRESS */
5732 /* Initialize Wakelock stuff */
5733 spin_lock_init(&dhd
->wakelock_spinlock
);
5734 dhd
->wakelock_counter
= 0;
5735 dhd
->wakelock_wd_counter
= 0;
5736 dhd
->wakelock_rx_timeout_enable
= 0;
5737 dhd
->wakelock_ctrl_timeout_enable
= 0;
5738 #ifdef CONFIG_HAS_WAKELOCK
5739 wake_lock_init(&dhd
->wl_wifi
, WAKE_LOCK_SUSPEND
, "wlan_wake");
5740 wake_lock_init(&dhd
->wl_rxwake
, WAKE_LOCK_SUSPEND
, "wlan_rx_wake");
5741 wake_lock_init(&dhd
->wl_ctrlwake
, WAKE_LOCK_SUSPEND
, "wlan_ctrl_wake");
5742 wake_lock_init(&dhd
->wl_wdwake
, WAKE_LOCK_SUSPEND
, "wlan_wd_wake");
5743 #ifdef BCMPCIE_OOB_HOST_WAKE
5744 wake_lock_init(&dhd
->wl_intrwake
, WAKE_LOCK_SUSPEND
, "wlan_oob_irq_wake");
5745 #endif /* BCMPCIE_OOB_HOST_WAKE */
5746 #endif /* CONFIG_HAS_WAKELOCK */
5747 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
5748 mutex_init(&dhd
->dhd_net_if_mutex
);
5749 mutex_init(&dhd
->dhd_suspend_mutex
);
5751 dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
5753 /* Attach and link in the protocol */
5754 if (dhd_prot_attach(&dhd
->pub
) != 0) {
5755 DHD_ERROR(("dhd_prot_attach failed\n"));
5758 dhd_state
|= DHD_ATTACH_STATE_PROT_ATTACH
;
5761 /* Attach and link in the cfg80211 */
5762 if (unlikely(wl_cfg80211_attach(net
, &dhd
->pub
))) {
5763 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5767 dhd_monitor_init(&dhd
->pub
);
5768 dhd_state
|= DHD_ATTACH_STATE_CFG80211
;
5770 #if defined(WL_WIRELESS_EXT)
5771 /* Attach and link in the iw */
5772 if (!(dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
5773 if (wl_iw_attach(net
, (void *)&dhd
->pub
) != 0) {
5774 DHD_ERROR(("wl_iw_attach failed\n"));
5777 dhd_state
|= DHD_ATTACH_STATE_WL_ATTACH
;
5779 #endif /* defined(WL_WIRELESS_EXT) */
5781 #ifdef SHOW_LOGTRACE
5782 dhd_init_logstrs_array(&dhd
->event_data
);
5783 #endif /* SHOW_LOGTRACE */
5785 if (dhd_sta_pool_init(&dhd
->pub
, DHD_MAX_STA
) != BCME_OK
) {
5786 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__
, DHD_MAX_STA
));
5792 /* Set up the watchdog timer */
5793 init_timer(&dhd
->timer
);
5794 dhd
->timer
.data
= (ulong
)dhd
;
5795 dhd
->timer
.function
= dhd_watchdog
;
5796 dhd
->default_wd_interval
= dhd_watchdog_ms
;
5798 if (dhd_watchdog_prio
>= 0) {
5799 /* Initialize watchdog thread */
5800 PROC_START(dhd_watchdog_thread
, dhd
, &dhd
->thr_wdt_ctl
, 0, "dhd_watchdog_thread");
5803 dhd
->thr_wdt_ctl
.thr_pid
= -1;
5807 debugger_init((void *) bus
);
5810 /* Set up the bottom half handler */
5811 if (dhd_dpc_prio
>= 0) {
5812 /* Initialize DPC thread */
5813 PROC_START(dhd_dpc_thread
, dhd
, &dhd
->thr_dpc_ctl
, 0, "dhd_dpc");
5815 /* use tasklet for dpc */
5816 tasklet_init(&dhd
->tasklet
, dhd_dpc
, (ulong
)dhd
);
5817 dhd
->thr_dpc_ctl
.thr_pid
= -1;
5820 if (dhd
->rxthread_enabled
) {
5821 bzero(&dhd
->pub
.skbbuf
[0], sizeof(void *) * MAXSKBPEND
);
5822 /* Initialize RXF thread */
5823 PROC_START(dhd_rxf_thread
, dhd
, &dhd
->thr_rxf_ctl
, 0, "dhd_rxf");
5825 #endif /* BCMDBUS */
5827 dhd_state
|= DHD_ATTACH_STATE_THREADS_CREATED
;
5829 #if defined(CONFIG_PM_SLEEP)
5830 if (!dhd_pm_notifier_registered
) {
5831 dhd_pm_notifier_registered
= TRUE
;
5832 register_pm_notifier(&dhd_pm_notifier
);
5834 #endif /* CONFIG_PM_SLEEP */
5836 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5837 dhd
->early_suspend
.level
= EARLY_SUSPEND_LEVEL_BLANK_SCREEN
+ 20;
5838 dhd
->early_suspend
.suspend
= dhd_early_suspend
;
5839 dhd
->early_suspend
.resume
= dhd_late_resume
;
5840 register_early_suspend(&dhd
->early_suspend
);
5841 dhd_state
|= DHD_ATTACH_STATE_EARLYSUSPEND_DONE
;
5842 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5844 #ifdef ARP_OFFLOAD_SUPPORT
5845 dhd
->pend_ipaddr
= 0;
5846 if (!dhd_inetaddr_notifier_registered
) {
5847 dhd_inetaddr_notifier_registered
= TRUE
;
5848 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
5850 #endif /* ARP_OFFLOAD_SUPPORT */
5852 if (!dhd_inet6addr_notifier_registered
) {
5853 dhd_inet6addr_notifier_registered
= TRUE
;
5854 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
5857 dhd
->dhd_deferred_wq
= dhd_deferred_work_init((void *)dhd
);
5858 #ifdef DEBUG_CPU_FREQ
5859 dhd
->new_freq
= alloc_percpu(int);
5860 dhd
->freq_trans
.notifier_call
= dhd_cpufreq_notifier
;
5861 cpufreq_register_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
5863 #ifdef DHDTCPACK_SUPPRESS
5864 #if defined(BCMPCIE)
5865 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_HOLD
);
5867 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
5869 #endif /* DHDTCPACK_SUPPRESS */
5871 dhd_state
|= DHD_ATTACH_STATE_DONE
;
5872 dhd
->dhd_state
= dhd_state
;
5878 if (dhd_state
>= DHD_ATTACH_STATE_DHD_ALLOC
) {
5879 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5880 __FUNCTION__
, dhd_state
, &dhd
->pub
));
5881 dhd
->dhd_state
= dhd_state
;
5882 dhd_detach(&dhd
->pub
);
5883 dhd_free(&dhd
->pub
);
5889 int dhd_get_fw_mode(dhd_info_t
*dhdinfo
)
5891 if (strstr(dhdinfo
->fw_path
, "_apsta") != NULL
)
5892 return DHD_FLAG_HOSTAP_MODE
;
5893 if (strstr(dhdinfo
->fw_path
, "_p2p") != NULL
)
5894 return DHD_FLAG_P2P_MODE
;
5895 if (strstr(dhdinfo
->fw_path
, "_ibss") != NULL
)
5896 return DHD_FLAG_IBSS_MODE
;
5897 if (strstr(dhdinfo
->fw_path
, "_mfg") != NULL
)
5898 return DHD_FLAG_MFG_MODE
;
5900 return DHD_FLAG_STA_MODE
;
5903 bool dhd_update_fw_nv_path(dhd_info_t
*dhdinfo
)
5908 const char *fw
= NULL
;
5909 const char *nv
= NULL
;
5910 const char *conf
= NULL
;
5911 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
5914 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5915 * The path from adapter info is used for initialization only (as it won't change).
5917 * The firmware_path/nvram_path module parameter may be changed by the system at run
5918 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5919 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5920 * module parameter after it is copied. We won't update the path until the module parameter
5921 * is changed again (first character is not '\0')
5924 /* set default firmware and nvram path for built-in type driver */
5925 // if (!dhd_download_fw_on_driverload) {
5926 #ifdef CONFIG_BCMDHD_FW_PATH
5927 fw
= CONFIG_BCMDHD_FW_PATH
;
5928 #endif /* CONFIG_BCMDHD_FW_PATH */
5929 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5930 nv
= CONFIG_BCMDHD_NVRAM_PATH
;
5931 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5934 /* check if we need to initialize the path */
5935 if (dhdinfo
->fw_path
[0] == '\0') {
5936 if (adapter
&& adapter
->fw_path
&& adapter
->fw_path
[0] != '\0')
5937 fw
= adapter
->fw_path
;
5940 if (dhdinfo
->nv_path
[0] == '\0') {
5941 if (adapter
&& adapter
->nv_path
&& adapter
->nv_path
[0] != '\0')
5942 nv
= adapter
->nv_path
;
5944 if (dhdinfo
->conf_path
[0] == '\0') {
5945 if (adapter
&& adapter
->conf_path
&& adapter
->conf_path
[0] != '\0')
5946 conf
= adapter
->conf_path
;
5949 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5951 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5953 if (firmware_path
[0] != '\0')
5955 if (nvram_path
[0] != '\0')
5957 if (config_path
[0] != '\0')
5960 if (fw
&& fw
[0] != '\0') {
5961 fw_len
= strlen(fw
);
5962 if (fw_len
>= sizeof(dhdinfo
->fw_path
)) {
5963 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5966 strncpy(dhdinfo
->fw_path
, fw
, sizeof(dhdinfo
->fw_path
));
5967 if (dhdinfo
->fw_path
[fw_len
-1] == '\n')
5968 dhdinfo
->fw_path
[fw_len
-1] = '\0';
5970 if (nv
&& nv
[0] != '\0') {
5971 nv_len
= strlen(nv
);
5972 if (nv_len
>= sizeof(dhdinfo
->nv_path
)) {
5973 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5976 strncpy(dhdinfo
->nv_path
, nv
, sizeof(dhdinfo
->nv_path
));
5977 if (dhdinfo
->nv_path
[nv_len
-1] == '\n')
5978 dhdinfo
->nv_path
[nv_len
-1] = '\0';
5980 if (conf
&& conf
[0] != '\0') {
5981 conf_len
= strlen(conf
);
5982 if (conf_len
>= sizeof(dhdinfo
->conf_path
)) {
5983 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
5986 strncpy(dhdinfo
->conf_path
, conf
, sizeof(dhdinfo
->conf_path
));
5987 if (dhdinfo
->conf_path
[conf_len
-1] == '\n')
5988 dhdinfo
->conf_path
[conf_len
-1] = '\0';
5992 /* clear the path in module parameter */
5993 firmware_path
[0] = '\0';
5994 nvram_path
[0] = '\0';
5995 config_path
[0] = '\0';
5998 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5999 if (dhdinfo
->fw_path
[0] == '\0') {
6000 DHD_ERROR(("firmware path not found\n"));
6002 printf("%s: fw_path=%s\n", __FUNCTION__
, dhdinfo
->fw_path
);
6004 if (dhdinfo
->nv_path
[0] == '\0') {
6005 DHD_ERROR(("nvram path not found\n"));
6009 if (dhdinfo
->conf_path
[0] == '\0') {
6010 dhd_conf_set_conf_path_by_fw_path(&dhdinfo
->pub
, dhdinfo
->conf_path
, dhdinfo
->fw_path
);
6012 dhdinfo
->pub
.conf_path
= dhdinfo
->conf_path
;
6013 printf("%s: conf_path=%s\n", __FUNCTION__
, dhdinfo
->conf_path
);
6021 dhd_bus_start(dhd_pub_t
*dhdp
)
6024 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
6025 unsigned long flags
;
6029 DHD_TRACE(("Enter %s:\n", __FUNCTION__
));
6031 DHD_PERIM_LOCK(dhdp
);
6033 /* try to download image and nvram to the dongle */
6034 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
&& dhd_update_fw_nv_path(dhd
)) {
6035 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__
, dhd
->fw_path
, dhd
->nv_path
));
6036 ret
= dhd_bus_download_firmware(dhd
->pub
.bus
, dhd
->pub
.osh
,
6037 dhd
->fw_path
, dhd
->nv_path
);
6039 DHD_ERROR(("%s: failed to download firmware %s\n",
6040 __FUNCTION__
, dhd
->fw_path
));
6041 DHD_PERIM_UNLOCK(dhdp
);
6045 if (dhd
->pub
.busstate
!= DHD_BUS_LOAD
) {
6046 DHD_PERIM_UNLOCK(dhdp
);
6050 dhd_os_sdlock(dhdp
);
6052 /* Start the watchdog timer */
6053 dhd
->pub
.tickcnt
= 0;
6054 dhd_os_wd_timer(&dhd
->pub
, dhd_watchdog_ms
);
6056 /* Bring up the bus */
6057 if ((ret
= dhd_bus_init(&dhd
->pub
, FALSE
)) != 0) {
6059 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__
, ret
));
6060 dhd_os_sdunlock(dhdp
);
6061 DHD_PERIM_UNLOCK(dhdp
);
6064 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
6065 #if defined(BCMPCIE_OOB_HOST_WAKE)
6066 dhd_os_sdunlock(dhdp
);
6067 #endif /* BCMPCIE_OOB_HOST_WAKE */
6068 /* Host registration for OOB interrupt */
6069 if (dhd_bus_oob_intr_register(dhdp
)) {
6070 /* deactivate timer and wait for the handler to finish */
6071 #if !defined(BCMPCIE_OOB_HOST_WAKE)
6072 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6073 dhd
->wd_timer_valid
= FALSE
;
6074 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6075 del_timer_sync(&dhd
->timer
);
6077 dhd_os_sdunlock(dhdp
);
6078 #endif /* BCMPCIE_OOB_HOST_WAKE */
6079 DHD_PERIM_UNLOCK(dhdp
);
6080 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
6081 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__
));
6085 #if defined(BCMPCIE_OOB_HOST_WAKE)
6086 dhd_os_sdlock(dhdp
);
6087 dhd_bus_oob_intr_set(dhdp
, TRUE
);
6089 /* Enable oob at firmware */
6090 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
6091 #endif /* BCMPCIE_OOB_HOST_WAKE */
6092 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
6093 #ifdef PCIE_FULL_DONGLE
6096 uint32 num_flowrings
; /* includes H2D common rings */
6097 num_flowrings
= dhd_bus_max_h2d_queues(dhd
->pub
.bus
, &txpush
);
6098 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__
,
6100 if ((ret
= dhd_flow_rings_init(&dhd
->pub
, num_flowrings
)) != BCME_OK
) {
6101 dhd_os_sdunlock(dhdp
);
6102 DHD_PERIM_UNLOCK(dhdp
);
6106 #endif /* PCIE_FULL_DONGLE */
6108 /* Do protocol initialization necessary for IOCTL/IOVAR */
6109 dhd_prot_init(&dhd
->pub
);
6111 /* If bus is not ready, can't come up */
6112 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
6113 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
6114 dhd
->wd_timer_valid
= FALSE
;
6115 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
6116 del_timer_sync(&dhd
->timer
);
6117 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__
));
6118 dhd_os_sdunlock(dhdp
);
6119 DHD_PERIM_UNLOCK(dhdp
);
6120 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
6124 dhd_os_sdunlock(dhdp
);
6126 /* Bus is ready, query any dongle information */
6127 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
6128 DHD_PERIM_UNLOCK(dhdp
);
6132 #ifdef ARP_OFFLOAD_SUPPORT
6133 if (dhd
->pend_ipaddr
) {
6134 #ifdef AOE_IP_ALIAS_SUPPORT
6135 aoe_update_host_ipv4_table(&dhd
->pub
, dhd
->pend_ipaddr
, TRUE
, 0);
6136 #endif /* AOE_IP_ALIAS_SUPPORT */
6137 dhd
->pend_ipaddr
= 0;
6139 #endif /* ARP_OFFLOAD_SUPPORT */
6141 DHD_PERIM_UNLOCK(dhdp
);
6144 #endif /* BCMDBUS */
6147 int _dhd_tdls_enable(dhd_pub_t
*dhd
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
6149 char iovbuf
[WLC_IOCTL_SMLEN
];
6150 uint32 tdls
= tdls_on
;
6152 uint32 tdls_auto_op
= 0;
6153 uint32 tdls_idle_time
= CUSTOM_TDLS_IDLE_MODE_SETTING
;
6154 int32 tdls_rssi_high
= CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
;
6155 int32 tdls_rssi_low
= CUSTOM_TDLS_RSSI_THRESHOLD_LOW
;
6157 if (!FW_SUPPORTED(dhd
, tdls
))
6160 if (dhd
->tdls_enable
== tdls_on
)
6162 bcm_mkiovar("tdls_enable", (char *)&tdls
, sizeof(tdls
), iovbuf
, sizeof(iovbuf
));
6163 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
6164 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__
, tdls
, ret
));
6167 dhd
->tdls_enable
= tdls_on
;
6170 tdls_auto_op
= auto_on
;
6171 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op
, sizeof(tdls_auto_op
),
6172 iovbuf
, sizeof(iovbuf
));
6173 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6174 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6175 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__
, ret
));
6180 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time
,
6181 sizeof(tdls_idle_time
), iovbuf
, sizeof(iovbuf
));
6182 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6183 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6184 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__
, ret
));
6187 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high
, 4, iovbuf
, sizeof(iovbuf
));
6188 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6189 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6190 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__
, ret
));
6193 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low
, 4, iovbuf
, sizeof(iovbuf
));
6194 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6195 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6196 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__
, ret
));
6205 int dhd_tdls_enable(struct net_device
*dev
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
6207 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
6210 ret
= _dhd_tdls_enable(&dhd
->pub
, tdls_on
, auto_on
, mac
);
6217 dhd_tdls_set_mode(dhd_pub_t
*dhd
, bool wfd_mode
)
6219 char iovbuf
[WLC_IOCTL_SMLEN
];
6221 bool auto_on
= false;
6222 uint32 mode
= wfd_mode
;
6225 ret
= _dhd_tdls_enable(dhd
, false, auto_on
, NULL
);
6227 DHD_ERROR(("%s Disable tdls_auto_op failed. %d\n", __FUNCTION__
, ret
));
6232 bcm_mkiovar("tdls_wfd_mode", (char *)&mode
, sizeof(mode
),
6233 iovbuf
, sizeof(iovbuf
));
6234 if (((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6235 sizeof(iovbuf
), TRUE
, 0)) < 0) &&
6236 (ret
!= BCME_UNSUPPORTED
)) {
6237 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__
, ret
));
6241 ret
= _dhd_tdls_enable(dhd
, true, auto_on
, NULL
);
6243 DHD_ERROR(("%s enable tdls_auto_op failed. %d\n", __FUNCTION__
, ret
));
6247 dhd
->tdls_mode
= mode
;
6251 #ifdef PCIE_FULL_DONGLE
6252 void dhd_tdls_update_peer_info(struct net_device
*dev
, bool connect
, uint8
*da
)
6254 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
6255 dhd_pub_t
*dhdp
= (dhd_pub_t
*)&dhd
->pub
;
6256 tdls_peer_node_t
*cur
= dhdp
->peer_tbl
.node
;
6257 tdls_peer_node_t
*new = NULL
, *prev
= NULL
;
6259 uint8 sa
[ETHER_ADDR_LEN
];
6260 int ifidx
= dhd_net2idx(dhd
, dev
);
6262 if (ifidx
== DHD_BAD_IF
)
6265 dhdif
= dhd
->iflist
[ifidx
];
6266 memcpy(sa
, dhdif
->mac_addr
, ETHER_ADDR_LEN
);
6269 while (cur
!= NULL
) {
6270 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
6271 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
6272 __FUNCTION__
, __LINE__
));
6278 new = MALLOC(dhdp
->osh
, sizeof(tdls_peer_node_t
));
6280 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__
));
6283 memcpy(new->addr
, da
, ETHER_ADDR_LEN
);
6284 new->next
= dhdp
->peer_tbl
.node
;
6285 dhdp
->peer_tbl
.node
= new;
6286 dhdp
->peer_tbl
.tdls_peer_count
++;
6289 while (cur
!= NULL
) {
6290 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
6291 dhd_flow_rings_delete_for_peer(dhdp
, ifidx
, da
);
6293 prev
->next
= cur
->next
;
6295 dhdp
->peer_tbl
.node
= cur
->next
;
6296 MFREE(dhdp
->osh
, cur
, sizeof(tdls_peer_node_t
));
6297 dhdp
->peer_tbl
.tdls_peer_count
--;
6303 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__
));
6306 #endif /* PCIE_FULL_DONGLE */
6307 #endif /* BCMDBUS */
6309 bool dhd_is_concurrent_mode(dhd_pub_t
*dhd
)
6314 if (dhd
->op_mode
& DHD_FLAG_CONCURR_MULTI_CHAN_MODE
)
6316 else if ((dhd
->op_mode
& DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
) ==
6317 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
)
6322 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
6323 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
6324 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
6325 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
6326 * would still be named as fw_bcmdhd_apsta.
6329 dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
)
6332 char buf
[WLC_IOCTL_SMLEN
];
6333 bool mchan_supported
= FALSE
;
6334 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
6335 * test mode, that means we only will use the mode as it is
6337 if (dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))
6339 if (FW_SUPPORTED(dhd
, vsdb
)) {
6340 mchan_supported
= TRUE
;
6342 if (!FW_SUPPORTED(dhd
, p2p
)) {
6343 DHD_TRACE(("Chip does not support p2p\n"));
6347 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
6348 memset(buf
, 0, sizeof(buf
));
6349 bcm_mkiovar("p2p", 0, 0, buf
, sizeof(buf
));
6350 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
),
6352 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__
, ret
));
6357 /* By default, chip supports single chan concurrency,
6358 * now lets check for mchan
6360 ret
= DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
;
6361 if (mchan_supported
)
6362 ret
|= DHD_FLAG_CONCURR_MULTI_CHAN_MODE
;
6363 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
6364 /* For customer_hw4, although ICS,
6365 * we still support concurrent mode
6376 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
6378 #ifdef SUPPORT_AP_POWERSAVE
6379 #define RXCHAIN_PWRSAVE_PPS 10
6380 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
6381 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
6382 int dhd_set_ap_powersave(dhd_pub_t
*dhdp
, int ifidx
, int enable
)
6385 int32 pps
= RXCHAIN_PWRSAVE_PPS
;
6386 int32 quiet_time
= RXCHAIN_PWRSAVE_QUIET_TIME
;
6387 int32 stas_assoc_check
= RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK
;
6390 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable
, 4, iovbuf
, sizeof(iovbuf
));
6391 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
6392 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
6393 DHD_ERROR(("Failed to enable AP power save\n"));
6395 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps
, 4, iovbuf
, sizeof(iovbuf
));
6396 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
6397 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
6398 DHD_ERROR(("Failed to set pps\n"));
6400 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time
,
6401 4, iovbuf
, sizeof(iovbuf
));
6402 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
6403 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
6404 DHD_ERROR(("Failed to set quiet time\n"));
6406 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check
,
6407 4, iovbuf
, sizeof(iovbuf
));
6408 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
6409 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
6410 DHD_ERROR(("Failed to set stas assoc check\n"));
6413 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable
, 4, iovbuf
, sizeof(iovbuf
));
6414 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
6415 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
6416 DHD_ERROR(("Failed to disable AP power save\n"));
6422 #endif /* SUPPORT_AP_POWERSAVE */
6425 #if defined(READ_CONFIG_FROM_FILE)
6426 #include <linux/fs.h>
6427 #include <linux/ctype.h>
6429 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
6430 bool PM_control
= TRUE
;
6432 static int dhd_preinit_proc(dhd_pub_t
*dhd
, int ifidx
, char *name
, char *value
)
6435 wl_country_t cspec
= {{0}, -1, {0}};
6437 char *endptr
= NULL
;
6439 char smbuf
[WLC_IOCTL_SMLEN
*2];
6440 #ifdef ROAM_AP_ENV_DETECTION
6441 int roam_env_mode
= AP_ENV_INDETERMINATE
;
6442 #endif /* ROAM_AP_ENV_DETECTION */
6444 if (!strcmp(name
, "country")) {
6445 revstr
= strchr(value
, '/');
6447 cspec
.rev
= strtoul(revstr
+ 1, &endptr
, 10);
6448 memcpy(cspec
.country_abbrev
, value
, WLC_CNTRY_BUF_SZ
);
6449 cspec
.country_abbrev
[2] = '\0';
6450 memcpy(cspec
.ccode
, cspec
.country_abbrev
, WLC_CNTRY_BUF_SZ
);
6453 memcpy(cspec
.country_abbrev
, value
, WLC_CNTRY_BUF_SZ
);
6454 memcpy(cspec
.ccode
, value
, WLC_CNTRY_BUF_SZ
);
6455 get_customized_country_code(dhd
->info
->adapter
,
6456 (char *)&cspec
.country_abbrev
, &cspec
);
6458 memset(smbuf
, 0, sizeof(smbuf
));
6459 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
6460 cspec
.country_abbrev
, cspec
.rev
));
6461 iolen
= bcm_mkiovar("country", (char*)&cspec
, sizeof(cspec
),
6462 smbuf
, sizeof(smbuf
));
6463 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
6464 smbuf
, iolen
, TRUE
, 0);
6465 } else if (!strcmp(name
, "roam_scan_period")) {
6466 var_int
= (int)simple_strtol(value
, NULL
, 0);
6467 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
,
6468 &var_int
, sizeof(var_int
), TRUE
, 0);
6469 } else if (!strcmp(name
, "roam_delta")) {
6474 x
.val
= (int)simple_strtol(value
, NULL
, 0);
6475 /* x.band = WLC_BAND_AUTO; */
6476 x
.band
= WLC_BAND_ALL
;
6477 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, &x
, sizeof(x
), TRUE
, 0);
6478 } else if (!strcmp(name
, "roam_trigger")) {
6481 roam_trigger
[0] = (int)simple_strtol(value
, NULL
, 0);
6482 roam_trigger
[1] = WLC_BAND_ALL
;
6483 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, &roam_trigger
,
6484 sizeof(roam_trigger
), TRUE
, 0);
6486 #ifdef ROAM_AP_ENV_DETECTION
6487 if (roam_trigger
[0] == WL_AUTO_ROAM_TRIGGER
) {
6489 bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode
,
6490 4, iovbuf
, sizeof(iovbuf
));
6491 if (dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6492 sizeof(iovbuf
), TRUE
, 0) == BCME_OK
) {
6493 dhd
->roam_env_detection
= TRUE
;
6495 dhd
->roam_env_detection
= FALSE
;
6498 #endif /* ROAM_AP_ENV_DETECTION */
6500 } else if (!strcmp(name
, "PM")) {
6502 var_int
= (int)simple_strtol(value
, NULL
, 0);
6504 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
,
6505 &var_int
, sizeof(var_int
), TRUE
, 0);
6507 #if defined(CONFIG_CONTROL_PM) || defined(CONFIG_PM_LOCK)
6509 g_pm_control
= TRUE
;
6510 printk("%s var_int=%d don't control PM\n", __func__
, var_int
);
6512 g_pm_control
= FALSE
;
6513 printk("%s var_int=%d do control PM\n", __func__
, var_int
);
6520 else if (!strcmp(name
, "btamp_chan")) {
6526 btamp_chan
= (int)simple_strtol(value
, NULL
, 0);
6527 iov_len
= bcm_mkiovar("btamp_chan", (char *)&btamp_chan
, 4, iovbuf
, sizeof(iovbuf
));
6528 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, 0) < 0))
6529 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
6530 __FUNCTION__
, btamp_chan
, ret
));
6532 DHD_ERROR(("%s btamp_chan %d set success\n",
6533 __FUNCTION__
, btamp_chan
));
6535 #endif /* WLBTAMP */
6536 else if (!strcmp(name
, "band")) {
6538 if (!strcmp(value
, "auto"))
6539 var_int
= WLC_BAND_AUTO
;
6540 else if (!strcmp(value
, "a"))
6541 var_int
= WLC_BAND_5G
;
6542 else if (!strcmp(value
, "b"))
6543 var_int
= WLC_BAND_2G
;
6544 else if (!strcmp(value
, "all"))
6545 var_int
= WLC_BAND_ALL
;
6547 printk(" set band value should be one of the a or b or all\n");
6548 var_int
= WLC_BAND_AUTO
;
6550 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_BAND
, &var_int
,
6551 sizeof(var_int
), TRUE
, 0)) < 0)
6552 printk(" set band err=%d\n", ret
);
6554 } else if (!strcmp(name
, "cur_etheraddr")) {
6555 struct ether_addr ea
;
6560 bcm_ether_atoe(value
, &ea
);
6562 ret
= memcmp(&ea
.octet
, dhd
->mac
.octet
, ETHER_ADDR_LEN
);
6564 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__
));
6568 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__
,
6569 ea
.octet
[0], ea
.octet
[1], ea
.octet
[2],
6570 ea
.octet
[3], ea
.octet
[4], ea
.octet
[5]));
6572 iovlen
= bcm_mkiovar("cur_etheraddr", (char*)&ea
, ETHER_ADDR_LEN
, buf
, 32);
6574 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, iovlen
, TRUE
, 0);
6576 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
6580 memcpy(dhd
->mac
.octet
, (void *)&ea
, ETHER_ADDR_LEN
);
6583 } else if (!strcmp(name
, "lpc")) {
6587 var_int
= (int)simple_strtol(value
, NULL
, 0);
6588 if (dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
6589 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__
));
6591 iovlen
= bcm_mkiovar("lpc", (char *)&var_int
, 4, buf
, sizeof(buf
));
6592 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, iovlen
, TRUE
, 0)) < 0) {
6593 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
6595 if (dhd_wl_ioctl_cmd(dhd
, WLC_UP
, NULL
, 0, TRUE
, 0) < 0) {
6596 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__
));
6599 } else if (!strcmp(name
, "vht_features")) {
6603 var_int
= (int)simple_strtol(value
, NULL
, 0);
6605 if (dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
6606 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__
));
6608 iovlen
= bcm_mkiovar("vht_features", (char *)&var_int
, 4, buf
, sizeof(buf
));
6609 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, iovlen
, TRUE
, 0)) < 0) {
6610 DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__
, ret
));
6612 if (dhd_wl_ioctl_cmd(dhd
, WLC_UP
, NULL
, 0, TRUE
, 0) < 0) {
6613 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__
));
6618 char iovbuf
[WLC_IOCTL_SMLEN
];
6620 /* wlu_iovar_setint */
6621 var_int
= (int)simple_strtol(value
, NULL
, 0);
6623 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
6624 if (!strcmp(name
, "roam_off")) {
6625 /* Setup timeout if Beacons are lost to report link down */
6627 uint bcn_timeout
= 2;
6628 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout
, 4,
6629 iovbuf
, sizeof(iovbuf
));
6630 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6633 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
6635 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__
, name
, var_int
));
6637 iovlen
= bcm_mkiovar(name
, (char *)&var_int
, sizeof(var_int
),
6638 iovbuf
, sizeof(iovbuf
));
6639 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
6640 iovbuf
, iovlen
, TRUE
, 0);
6646 static int dhd_preinit_config(dhd_pub_t
*dhd
, int ifidx
)
6648 mm_segment_t old_fs
;
6650 struct file
*fp
= NULL
;
6652 char *buf
= NULL
, *p
, *name
, *value
;
6656 config_path
= CONFIG_BCMDHD_CONFIG_PATH
;
6660 printk(KERN_ERR
"config_path can't read. \n");
6666 if ((ret
= vfs_stat(config_path
, &stat
))) {
6668 printk(KERN_ERR
"%s: Failed to get information (%d)\n",
6674 if (!(buf
= MALLOC(dhd
->osh
, stat
.size
+ 1))) {
6675 printk(KERN_ERR
"Failed to allocate memory %llu bytes\n", stat
.size
);
6679 printk("dhd_preinit_config : config path : %s \n", config_path
);
6681 if (!(fp
= dhd_os_open_image(config_path
)) ||
6682 (len
= dhd_os_get_image_block(buf
, stat
.size
, fp
)) < 0)
6685 buf
[stat
.size
] = '\0';
6686 for (p
= buf
; *p
; p
++) {
6689 for (name
= p
++; *p
&& !isspace(*p
); p
++) {
6693 for (value
= p
; *p
&& !isspace(*p
); p
++);
6695 if ((ret
= dhd_preinit_proc(dhd
, ifidx
, name
, value
)) < 0) {
6696 printk(KERN_ERR
"%s: %s=%s\n",
6697 bcmerrorstr(ret
), name
, value
);
6707 dhd_os_close_image(fp
);
6709 MFREE(dhd
->osh
, buf
, stat
.size
+1);
6716 #endif /* READ_CONFIG_FROM_FILE */
6719 dhd_preinit_ioctls(dhd_pub_t
*dhd
)
6722 char eventmask
[WL_EVENTING_MASK_LEN
];
6723 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12]; /* Room for "event_msgs" + '\0' + bitvec */
6724 uint32 buf_key_b4_m4
= 1;
6729 eventmsgs_ext_t
*eventmask_msg
= NULL
;
6730 char* iov_buf
= NULL
;
6733 aibss_bcn_force_config_t bcn_config
;
6737 #endif /* WLAIBSS_PS */
6738 #endif /* WLAIBSS */
6739 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
6742 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
6743 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
6744 uint32 ampdu_ba_wsize
= 0;
6745 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
6746 #if defined(CUSTOM_AMPDU_MPDU)
6747 int32 ampdu_mpdu
= 0;
6749 #if defined(CUSTOM_AMPDU_RELEASE)
6750 int32 ampdu_release
= 0;
6752 #if defined(CUSTOM_AMSDU_AGGSF)
6753 int32 amsdu_aggsf
= 0;
6756 #if defined(BCMDBUS)
6757 #ifdef PROP_TXSTATUS
6758 int wlfc_enable
= TRUE
;
6760 uint32 hostreorder
= 1;
6762 #endif /* DISABLE_11N */
6763 #endif /* PROP_TXSTATUS */
6765 #ifdef PCIE_FULL_DONGLE
6766 uint32 wl_ap_isolate
;
6767 #endif /* PCIE_FULL_DONGLE */
6770 #ifdef DHD_ENABLE_LPC
6772 #endif /* DHD_ENABLE_LPC */
6773 uint power_mode
= PM_FAST
;
6774 uint32 dongle_align
= DHD_SDALIGN
;
6775 uint bcn_timeout
= dhd
->conf
->bcn_timeout
;
6777 #if defined(ARP_OFFLOAD_SUPPORT)
6780 int scan_assoc_time
= DHD_SCAN_ASSOC_ACTIVE_TIME
;
6781 int scan_unassoc_time
= DHD_SCAN_UNASSOC_ACTIVE_TIME
;
6782 int scan_passive_time
= DHD_SCAN_PASSIVE_TIME
;
6783 char buf
[WLC_IOCTL_SMLEN
];
6785 uint32 listen_interval
= CUSTOM_LISTEN_INTERVAL
; /* Default Listen Interval in Beacons */
6788 int roam_trigger
[2] = {CUSTOM_ROAM_TRIGGER_SETTING
, WLC_BAND_ALL
};
6789 int roam_scan_period
[2] = {10, WLC_BAND_ALL
};
6790 int roam_delta
[2] = {CUSTOM_ROAM_DELTA_SETTING
, WLC_BAND_ALL
};
6791 #ifdef ROAM_AP_ENV_DETECTION
6792 int roam_env_mode
= AP_ENV_INDETERMINATE
;
6793 #endif /* ROAM_AP_ENV_DETECTION */
6794 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
6795 int roam_fullscan_period
= 60;
6796 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6797 int roam_fullscan_period
= 120;
6798 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6800 #ifdef DISABLE_BUILTIN_ROAM
6802 #endif /* DISABLE_BUILTIN_ROAM */
6803 #endif /* ROAM_ENABLE */
6808 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
6809 uint32 mpc
= 0; /* Turn MPC off for AP/APSTA mode */
6810 struct ether_addr p2p_ea
;
6812 #ifdef SOFTAP_UAPSD_OFF
6813 uint32 wme_apsd
= 0;
6814 #endif /* SOFTAP_UAPSD_OFF */
6815 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
6816 uint32 apsta
= 1; /* Enable APSTA mode */
6817 #elif defined(SOFTAP_AND_GC)
6820 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
6821 #ifdef GET_CUSTOM_MAC_ENABLE
6822 struct ether_addr ea_addr
;
6823 #endif /* GET_CUSTOM_MAC_ENABLE */
6830 #endif /* DISABLE_11N */
6832 #if defined(DISABLE_11AC)
6834 #endif /* DISABLE_11AC */
6837 #endif /* USE_WL_TXBF */
6838 #ifdef AMPDU_VO_ENABLE
6839 struct ampdu_tid_control tid
;
6841 #ifdef USE_WL_FRAMEBURST
6842 uint32 frameburst
= 1;
6843 #endif /* USE_WL_FRAMEBURST */
6844 #ifdef DHD_SET_FW_HIGHSPEED
6845 uint32 ack_ratio
= 250;
6846 uint32 ack_ratio_depth
= 64;
6847 #endif /* DHD_SET_FW_HIGHSPEED */
6848 #ifdef SUPPORT_2G_VHT
6849 uint32 vht_features
= 0x3; /* 2G enable | rates all */
6850 #endif /* SUPPORT_2G_VHT */
6851 #ifdef CUSTOM_PSPRETEND_THR
6852 uint32 pspretend_thr
= CUSTOM_PSPRETEND_THR
;
6854 #ifdef PKT_FILTER_SUPPORT
6855 dhd_pkt_filter_enable
= TRUE
;
6856 #endif /* PKT_FILTER_SUPPORT */
6858 dhd
->tdls_enable
= FALSE
;
6859 dhd_tdls_set_mode(dhd
, false);
6861 dhd
->suspend_bcn_li_dtim
= CUSTOM_SUSPEND_BCN_LI_DTIM
;
6862 DHD_TRACE(("Enter %s\n", __FUNCTION__
));
6864 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_BAND", WLC_SET_BAND
, dhd
->conf
->band
, 0, FALSE
);
6866 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
6867 (op_mode
== DHD_FLAG_MFG_MODE
)) {
6868 /* Check and adjust IOCTL response timeout for Manufactring firmware */
6869 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT
);
6870 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
6874 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT
);
6875 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__
));
6877 #ifdef GET_CUSTOM_MAC_ENABLE
6878 ret
= wifi_platform_get_mac_addr(dhd
->info
->adapter
, ea_addr
.octet
);
6880 memset(buf
, 0, sizeof(buf
));
6881 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr
, ETHER_ADDR_LEN
, buf
, sizeof(buf
));
6882 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
6884 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG
", error=%d\n",
6885 __FUNCTION__
, MAC2STRDBG(ea_addr
.octet
), ret
));
6889 memcpy(dhd
->mac
.octet
, ea_addr
.octet
, ETHER_ADDR_LEN
);
6891 #endif /* GET_CUSTOM_MAC_ENABLE */
6892 /* Get the default device MAC address directly from firmware */
6893 memset(buf
, 0, sizeof(buf
));
6894 bcm_mkiovar("cur_etheraddr", 0, 0, buf
, sizeof(buf
));
6895 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
),
6897 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__
, ret
));
6901 /* Update public MAC address after reading from Firmware */
6902 memcpy(dhd
->mac
.octet
, buf
, ETHER_ADDR_LEN
);
6904 #ifdef GET_CUSTOM_MAC_ENABLE
6906 #endif /* GET_CUSTOM_MAC_ENABLE */
6908 /* get a capabilities from firmware */
6909 memset(dhd
->fw_capabilities
, 0, sizeof(dhd
->fw_capabilities
));
6910 bcm_mkiovar("cap", 0, 0, dhd
->fw_capabilities
, sizeof(dhd
->fw_capabilities
));
6911 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, dhd
->fw_capabilities
,
6912 sizeof(dhd
->fw_capabilities
), FALSE
, 0)) < 0) {
6913 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6914 __FUNCTION__
, ret
));
6917 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_HOSTAP_MODE
) ||
6918 (op_mode
== DHD_FLAG_HOSTAP_MODE
)) {
6919 #ifdef SET_RANDOM_MAC_SOFTAP
6922 dhd
->op_mode
= DHD_FLAG_HOSTAP_MODE
;
6923 #if defined(ARP_OFFLOAD_SUPPORT)
6926 #ifdef PKT_FILTER_SUPPORT
6927 dhd_pkt_filter_enable
= FALSE
;
6929 #ifdef SET_RANDOM_MAC_SOFTAP
6930 SRANDOM32((uint
)jiffies
);
6931 rand_mac
= RANDOM32();
6932 iovbuf
[0] = 0x02; /* locally administered bit */
6935 iovbuf
[3] = (unsigned char)(rand_mac
& 0x0F) | 0xF0;
6936 iovbuf
[4] = (unsigned char)(rand_mac
>> 8);
6937 iovbuf
[5] = (unsigned char)(rand_mac
>> 16);
6939 bcm_mkiovar("cur_etheraddr", (void *)iovbuf
, ETHER_ADDR_LEN
, buf
, sizeof(buf
));
6940 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
6942 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
6944 memcpy(dhd
->mac
.octet
, iovbuf
, ETHER_ADDR_LEN
);
6945 #endif /* SET_RANDOM_MAC_SOFTAP */
6946 #if defined(OEM_ANDROID) && !defined(AP) && defined(WL_CFG80211)
6947 /* Turn off MPC in AP mode */
6948 bcm_mkiovar("mpc", (char *)&mpc
, 4, iovbuf
, sizeof(iovbuf
));
6949 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6950 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6951 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__
, ret
));
6954 #ifdef SUPPORT_AP_POWERSAVE
6955 dhd_set_ap_powersave(dhd
, 0, TRUE
);
6957 #ifdef SOFTAP_UAPSD_OFF
6958 bcm_mkiovar("wme_apsd", (char *)&wme_apsd
, 4, iovbuf
, sizeof(iovbuf
));
6959 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6960 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__
, ret
));
6961 #endif /* SOFTAP_UAPSD_OFF */
6962 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
6963 (op_mode
== DHD_FLAG_MFG_MODE
)) {
6964 #if defined(ARP_OFFLOAD_SUPPORT)
6966 #endif /* ARP_OFFLOAD_SUPPORT */
6967 #ifdef PKT_FILTER_SUPPORT
6968 dhd_pkt_filter_enable
= FALSE
;
6969 #endif /* PKT_FILTER_SUPPORT */
6970 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
6972 uint32 concurrent_mode
= 0;
6973 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_P2P_MODE
) ||
6974 (op_mode
== DHD_FLAG_P2P_MODE
)) {
6975 #if defined(ARP_OFFLOAD_SUPPORT)
6978 #ifdef PKT_FILTER_SUPPORT
6979 dhd_pkt_filter_enable
= FALSE
;
6981 dhd
->op_mode
= DHD_FLAG_P2P_MODE
;
6982 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_IBSS_MODE
) ||
6983 (op_mode
== DHD_FLAG_IBSS_MODE
)) {
6984 dhd
->op_mode
= DHD_FLAG_IBSS_MODE
;
6986 dhd
->op_mode
= DHD_FLAG_STA_MODE
;
6987 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
6988 if (dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
&&
6989 (concurrent_mode
= dhd_get_concurrent_capabilites(dhd
))) {
6990 #if defined(ARP_OFFLOAD_SUPPORT)
6993 dhd
->op_mode
|= concurrent_mode
;
6996 /* Check if we are enabling p2p */
6997 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
6998 bcm_mkiovar("apsta", (char *)&apsta
, 4, iovbuf
, sizeof(iovbuf
));
6999 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
7000 iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
7001 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__
, ret
));
7004 #if defined(SOFTAP_AND_GC)
7005 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_AP
,
7006 (char *)&ap_mode
, sizeof(ap_mode
), TRUE
, 0)) < 0) {
7007 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__
, ret
));
7010 memcpy(&p2p_ea
, &dhd
->mac
, ETHER_ADDR_LEN
);
7011 ETHER_SET_LOCALADDR(&p2p_ea
);
7012 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea
,
7013 ETHER_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
7014 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
7015 iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
7016 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__
, ret
));
7018 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
7022 (void)concurrent_mode
;
7023 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
7026 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG
"\n",
7027 dhd
->op_mode
, MAC2STRDBG(dhd
->mac
.octet
)));
7028 /* Set Country code */
7029 if (dhd
->dhd_cspec
.ccode
[0] != 0) {
7030 printf("Set country %s, revision %d\n", dhd
->dhd_cspec
.ccode
, dhd
->dhd_cspec
.rev
);
7031 bcm_mkiovar("country", (char *)&dhd
->dhd_cspec
,
7032 sizeof(wl_country_t
), iovbuf
, sizeof(iovbuf
));
7033 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
7034 printf("%s: country code setting failed %d\n", __FUNCTION__
, ret
);
7036 dhd_conf_set_country(dhd
);
7037 dhd_conf_fix_country(dhd
);
7039 dhd_conf_get_country(dhd
, &dhd
->dhd_cspec
);
7041 #if defined(DISABLE_11AC)
7042 bcm_mkiovar("vhtmode", (char *)&vhtmode
, 4, iovbuf
, sizeof(iovbuf
));
7043 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
7044 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__
, ret
));
7045 #endif /* DISABLE_11AC */
7047 /* Set Listen Interval */
7048 bcm_mkiovar("assoc_listen", (char *)&listen_interval
, 4, iovbuf
, sizeof(iovbuf
));
7049 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
7050 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__
, ret
));
7052 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
7053 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
7054 bcm_mkiovar("roam_off", (char *)&roamvar
, 4, iovbuf
, sizeof(iovbuf
));
7055 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7056 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
7057 #if defined(ROAM_ENABLE)
7058 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, roam_trigger
,
7059 sizeof(roam_trigger
), TRUE
, 0)) < 0)
7060 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__
, ret
));
7061 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
, roam_scan_period
,
7062 sizeof(roam_scan_period
), TRUE
, 0)) < 0)
7063 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__
, ret
));
7064 if ((dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, roam_delta
,
7065 sizeof(roam_delta
), TRUE
, 0)) < 0)
7066 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__
, ret
));
7067 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period
, 4, iovbuf
, sizeof(iovbuf
));
7068 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
7069 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__
, ret
));
7070 #ifdef ROAM_AP_ENV_DETECTION
7071 if (roam_trigger
[0] == WL_AUTO_ROAM_TRIGGER
) {
7072 bcm_mkiovar("roam_env_detection", (char *)&roam_env_mode
,
7073 4, iovbuf
, sizeof(iovbuf
));
7074 if (dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0) == BCME_OK
)
7075 dhd
->roam_env_detection
= TRUE
;
7077 dhd
->roam_env_detection
= FALSE
;
7080 #endif /* ROAM_AP_ENV_DETECTION */
7081 #endif /* ROAM_ENABLE */
7082 dhd_conf_set_roam(dhd
);
7085 bcm_mkiovar("okc_enable", (char *)&okc
, 4, iovbuf
, sizeof(iovbuf
));
7086 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7089 /* by default TDLS on and auto mode off */
7090 _dhd_tdls_enable(dhd
, true, false, NULL
);
7093 #ifdef DHD_ENABLE_LPC
7095 bcm_mkiovar("lpc", (char *)&lpc
, 4, iovbuf
, sizeof(iovbuf
));
7096 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7097 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7098 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
7100 #endif /* DHD_ENABLE_LPC */
7101 dhd_conf_set_fw_string_cmd(dhd
, "lpc", dhd
->conf
->lpc
, 0, FALSE
);
7103 /* Set PowerSave mode */
7104 if (dhd
->conf
->pm
>= 0)
7105 power_mode
= dhd
->conf
->pm
;
7106 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
, sizeof(power_mode
), TRUE
, 0);
7108 /* Match Host and Dongle rx alignment */
7109 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align
, 4, iovbuf
, sizeof(iovbuf
));
7110 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7114 /* Setup timeout if Beacons are lost and roam is off to report link down */
7115 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout
, 4, iovbuf
, sizeof(iovbuf
));
7116 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7117 /* Setup assoc_retry_max count to reconnect target AP in dongle */
7118 bcm_mkiovar("assoc_retry_max", (char *)&retry_max
, 4, iovbuf
, sizeof(iovbuf
));
7119 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7120 #if defined(AP) && !defined(WLP2P)
7121 /* Turn off MPC in AP mode */
7122 bcm_mkiovar("mpc", (char *)&mpc
, 4, iovbuf
, sizeof(iovbuf
));
7123 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7124 bcm_mkiovar("apsta", (char *)&apsta
, 4, iovbuf
, sizeof(iovbuf
));
7125 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7126 #endif /* defined(AP) && !defined(WLP2P) */
7127 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
7128 dhd_conf_set_fw_string_cmd(dhd
, "mimo_bw_cap", dhd
->conf
->mimo_bw_cap
, 1, TRUE
);
7129 dhd_conf_set_fw_string_cmd(dhd
, "force_wme_ac", dhd
->conf
->force_wme_ac
, 1, FALSE
);
7130 dhd_conf_set_fw_string_cmd(dhd
, "stbc_tx", dhd
->conf
->stbc
, 0, FALSE
);
7131 dhd_conf_set_fw_string_cmd(dhd
, "stbc_rx", dhd
->conf
->stbc
, 0, FALSE
);
7132 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_SRL", WLC_SET_SRL
, dhd
->conf
->srl
, 0, TRUE
);
7133 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_LRL", WLC_SET_LRL
, dhd
->conf
->lrl
, 0, FALSE
);
7134 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT
, dhd
->conf
->spect
, 0, FALSE
);
7136 #if defined(OEM_ANDROID) && defined(SOFTAP)
7137 if (ap_fw_loaded
== TRUE
) {
7138 dhd_wl_ioctl_cmd(dhd
, WLC_SET_DTIMPRD
, (char *)&dtim
, sizeof(dtim
), TRUE
, 0);
7140 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
7142 #if defined(KEEP_ALIVE)
7144 /* Set Keep Alive : be sure to use FW with -keepalive */
7147 #if defined(OEM_ANDROID) && defined(SOFTAP)
7148 if (ap_fw_loaded
== FALSE
)
7149 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
7150 if (!(dhd
->op_mode
&
7151 (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))) {
7152 if ((res
= dhd_keep_alive_onoff(dhd
)) < 0)
7153 DHD_ERROR(("%s set keeplive failed %d\n",
7154 __FUNCTION__
, res
));
7157 #endif /* defined(KEEP_ALIVE) */
7160 /* get a capabilities from firmware */
7161 memset(dhd
->fw_capabilities
, 0, sizeof(dhd
->fw_capabilities
));
7162 bcm_mkiovar("cap", 0, 0, dhd
->fw_capabilities
, sizeof(dhd
->fw_capabilities
));
7163 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, dhd
->fw_capabilities
,
7164 sizeof(dhd
->fw_capabilities
), FALSE
, 0)) < 0) {
7165 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
7166 __FUNCTION__
, ret
));
7169 #endif /* OEM_ANDROID */
7172 bcm_mkiovar("txbf", (char *)&txbf
, 4, iovbuf
, sizeof(iovbuf
));
7173 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7174 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7175 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__
, ret
));
7177 #endif /* USE_WL_TXBF */
7178 dhd_conf_set_fw_string_cmd(dhd
, "txbf", dhd
->conf
->txbf
, 0, FALSE
);
7179 #ifdef USE_WL_FRAMEBURST
7180 /* Set frameburst to value */
7181 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_FAKEFRAG
, (char *)&frameburst
,
7182 sizeof(frameburst
), TRUE
, 0)) < 0) {
7183 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__
, ret
));
7185 #endif /* USE_WL_FRAMEBURST */
7186 dhd_conf_set_fw_string_cmd(dhd
, "frameburst", dhd
->conf
->frameburst
, 0, FALSE
);
7187 #ifdef DHD_SET_FW_HIGHSPEED
7189 bcm_mkiovar("ack_ratio", (char *)&ack_ratio
, 4, iovbuf
, sizeof(iovbuf
));
7190 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7191 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7192 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__
, ret
));
7195 /* Set ack_ratio_depth */
7196 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth
, 4, iovbuf
, sizeof(iovbuf
));
7197 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7198 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7199 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__
, ret
));
7201 #endif /* DHD_SET_FW_HIGHSPEED */
7202 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
7203 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
7204 /* Set ampdu ba wsize to 64 or 16 */
7205 #ifdef CUSTOM_AMPDU_BA_WSIZE
7206 ampdu_ba_wsize
= CUSTOM_AMPDU_BA_WSIZE
;
7208 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
7209 if (dhd
->op_mode
== DHD_FLAG_IBSS_MODE
)
7210 ampdu_ba_wsize
= CUSTOM_IBSS_AMPDU_BA_WSIZE
;
7211 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
7212 if (ampdu_ba_wsize
!= 0) {
7213 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize
, 4, iovbuf
, sizeof(iovbuf
));
7214 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7215 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7216 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
7217 __FUNCTION__
, ampdu_ba_wsize
, ret
));
7220 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
7221 dhd_conf_set_fw_string_cmd(dhd
, "ampdu_ba_wsize", dhd
->conf
->ampdu_ba_wsize
, 1, FALSE
);
7223 iov_buf
= (char*)kmalloc(WLC_IOCTL_SMLEN
, GFP_KERNEL
);
7224 if (iov_buf
== NULL
) {
7225 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN
));
7230 /* Configure custom IBSS beacon transmission */
7231 if (dhd
->op_mode
& DHD_FLAG_IBSS_MODE
)
7234 bcm_mkiovar("aibss", (char *)&aibss
, 4, iovbuf
, sizeof(iovbuf
));
7235 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7236 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7237 DHD_ERROR(("%s Set aibss to %d failed %d\n",
7238 __FUNCTION__
, aibss
, ret
));
7242 bcm_mkiovar("aibss_ps", (char *)&aibss_ps
, 4, iovbuf
, sizeof(iovbuf
));
7243 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7244 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7245 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
7246 __FUNCTION__
, aibss
, ret
));
7248 #endif /* WLAIBSS_PS */
7250 memset(&bcn_config
, 0, sizeof(bcn_config
));
7251 bcn_config
.initial_min_bcn_dur
= AIBSS_INITIAL_MIN_BCN_DUR
;
7252 bcn_config
.min_bcn_dur
= AIBSS_MIN_BCN_DUR
;
7253 bcn_config
.bcn_flood_dur
= AIBSS_BCN_FLOOD_DUR
;
7254 bcn_config
.version
= AIBSS_BCN_FORCE_CONFIG_VER_0
;
7255 bcn_config
.len
= sizeof(bcn_config
);
7257 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config
,
7258 sizeof(aibss_bcn_force_config_t
), iov_buf
, WLC_IOCTL_SMLEN
);
7259 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iov_buf
,
7260 WLC_IOCTL_SMLEN
, TRUE
, 0)) < 0) {
7261 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
7262 __FUNCTION__
, AIBSS_INITIAL_MIN_BCN_DUR
, AIBSS_MIN_BCN_DUR
,
7263 AIBSS_BCN_FLOOD_DUR
, ret
));
7265 #endif /* WLAIBSS */
7267 #if defined(CUSTOM_AMPDU_MPDU)
7268 ampdu_mpdu
= CUSTOM_AMPDU_MPDU
;
7269 if (ampdu_mpdu
!= 0 && (ampdu_mpdu
<= ampdu_ba_wsize
)) {
7270 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu
, 4, iovbuf
, sizeof(iovbuf
));
7271 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7272 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7273 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
7274 __FUNCTION__
, CUSTOM_AMPDU_MPDU
, ret
));
7277 #endif /* CUSTOM_AMPDU_MPDU */
7279 #if defined(CUSTOM_AMPDU_RELEASE)
7280 ampdu_release
= CUSTOM_AMPDU_RELEASE
;
7281 if (ampdu_release
!= 0 && (ampdu_release
<= ampdu_ba_wsize
)) {
7282 bcm_mkiovar("ampdu_release", (char *)&du_release
, 4, iovbuf
, sizeof(iovbuf
));
7283 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7284 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7285 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
7286 __FUNCTION__
, CUSTOM_AMPDU_RELEASE
, ret
));
7289 #endif /* CUSTOM_AMPDU_RELEASE */
7291 #if defined(CUSTOM_AMSDU_AGGSF)
7292 amsdu_aggsf
= CUSTOM_AMSDU_AGGSF
;
7293 if (amsdu_aggsf
!= 0) {
7294 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf
, 4, iovbuf
, sizeof(iovbuf
));
7295 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7296 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7297 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
7298 __FUNCTION__
, CUSTOM_AMSDU_AGGSF
, ret
));
7301 #endif /* CUSTOM_AMSDU_AGGSF */
7303 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
7304 /* Read 4-way handshake requirements */
7305 if (dhd_use_idsup
== 1) {
7306 bcm_mkiovar("sup_wpa", (char *)&sup_wpa
, 4, iovbuf
, sizeof(iovbuf
));
7307 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
7308 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
7309 * in-dongle supplicant.
7311 if (ret
>= 0 || ret
== BCME_NOTREADY
)
7312 dhd
->fw_4way_handshake
= TRUE
;
7313 DHD_TRACE(("4-way handshake mode is: %d\n", dhd
->fw_4way_handshake
));
7315 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
7316 #ifdef SUPPORT_2G_VHT
7317 bcm_mkiovar("vht_features", (char *)&vht_features
, 4, iovbuf
, sizeof(iovbuf
));
7318 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
7319 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__
, ret
));
7321 #endif /* SUPPORT_2G_VHT */
7322 #ifdef CUSTOM_PSPRETEND_THR
7323 /* Turn off MPC in AP mode */
7324 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr
, 4,
7325 iovbuf
, sizeof(iovbuf
));
7326 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7327 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7328 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
7329 __FUNCTION__
, ret
));
7333 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4
, 4, iovbuf
, sizeof(iovbuf
));
7334 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
7335 sizeof(iovbuf
), TRUE
, 0)) < 0) {
7336 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__
, ret
));
7339 /* Read event_msgs mask */
7340 bcm_mkiovar("event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
, sizeof(iovbuf
));
7341 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0)) < 0) {
7342 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__
, ret
));
7345 bcopy(iovbuf
, eventmask
, WL_EVENTING_MASK_LEN
);
7347 /* Setup event_msgs */
7348 setbit(eventmask
, WLC_E_SET_SSID
);
7349 setbit(eventmask
, WLC_E_PRUNE
);
7350 setbit(eventmask
, WLC_E_AUTH
);
7351 setbit(eventmask
, WLC_E_AUTH_IND
);
7352 setbit(eventmask
, WLC_E_ASSOC
);
7353 setbit(eventmask
, WLC_E_REASSOC
);
7354 setbit(eventmask
, WLC_E_REASSOC_IND
);
7355 setbit(eventmask
, WLC_E_DEAUTH
);
7356 setbit(eventmask
, WLC_E_DEAUTH_IND
);
7357 setbit(eventmask
, WLC_E_DISASSOC_IND
);
7358 setbit(eventmask
, WLC_E_DISASSOC
);
7359 setbit(eventmask
, WLC_E_JOIN
);
7360 setbit(eventmask
, WLC_E_START
);
7361 setbit(eventmask
, WLC_E_ASSOC_IND
);
7362 setbit(eventmask
, WLC_E_PSK_SUP
);
7363 setbit(eventmask
, WLC_E_LINK
);
7364 setbit(eventmask
, WLC_E_NDIS_LINK
);
7365 setbit(eventmask
, WLC_E_MIC_ERROR
);
7366 setbit(eventmask
, WLC_E_ASSOC_REQ_IE
);
7367 setbit(eventmask
, WLC_E_ASSOC_RESP_IE
);
7369 setbit(eventmask
, WLC_E_PMKID_CACHE
);
7370 setbit(eventmask
, WLC_E_TXFAIL
);
7372 setbit(eventmask
, WLC_E_JOIN_START
);
7373 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
7375 setbit(eventmask
, WLC_E_HTSFSYNC
);
7376 #endif /* WLMEDIA_HTSF */
7378 setbit(eventmask
, WLC_E_PFN_NET_FOUND
);
7379 setbit(eventmask
, WLC_E_PFN_BEST_BATCHING
);
7380 setbit(eventmask
, WLC_E_PFN_BSSID_NET_FOUND
);
7381 setbit(eventmask
, WLC_E_PFN_BSSID_NET_LOST
);
7382 #endif /* PNO_SUPPORT */
7383 /* enable dongle roaming event */
7384 #if defined(OEM_ANDROID)
7385 setbit(eventmask
, WLC_E_ROAM
);
7386 setbit(eventmask
, WLC_E_BSSID
);
7389 setbit(eventmask
, WLC_E_TDLS_PEER_EVENT
);
7392 setbit(eventmask
, WLC_E_ESCAN_RESULT
);
7393 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
7394 setbit(eventmask
, WLC_E_ACTION_FRAME_RX
);
7395 setbit(eventmask
, WLC_E_P2P_DISC_LISTEN_COMPLETE
);
7397 #endif /* WL_CFG80211 */
7399 setbit(eventmask
, WLC_E_AIBSS_TXFAIL
);
7400 #endif /* WLAIBSS */
7401 setbit(eventmask
, WLC_E_TRACE
);
7402 setbit(eventmask
, WLC_E_CSA_COMPLETE_IND
);
7403 /* Write updated Event mask */
7404 bcm_mkiovar("event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
, sizeof(iovbuf
));
7405 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
7406 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__
, ret
));
7410 /* make up event mask ext message iovar for event larger than 128 */
7411 msglen
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
+ EVENTMSGS_EXT_STRUCT_SIZE
;
7412 eventmask_msg
= (eventmsgs_ext_t
*)kmalloc(msglen
, GFP_KERNEL
);
7413 if (eventmask_msg
== NULL
) {
7414 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen
));
7418 bzero(eventmask_msg
, msglen
);
7419 eventmask_msg
->ver
= EVENTMSGS_VER
;
7420 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
7422 /* Read event_msgs_ext mask */
7423 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg
, msglen
, iov_buf
, WLC_IOCTL_SMLEN
);
7424 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iov_buf
, WLC_IOCTL_SMLEN
, FALSE
, 0);
7425 if (ret2
!= BCME_UNSUPPORTED
)
7427 if (ret2
== 0) { /* event_msgs_ext must be supported */
7428 bcopy(iov_buf
, eventmask_msg
, msglen
);
7430 #ifdef BT_WIFI_HANDOVER
7431 setbit(eventmask_msg
->mask
, WLC_E_BT_WIFI_HANDOVER_REQ
);
7432 #endif /* BT_WIFI_HANDOVER */
7434 /* Write updated Event mask */
7435 eventmask_msg
->ver
= EVENTMSGS_VER
;
7436 eventmask_msg
->command
= EVENTMSGS_SET_MASK
;
7437 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
7438 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg
,
7439 msglen
, iov_buf
, WLC_IOCTL_SMLEN
);
7440 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
7441 iov_buf
, WLC_IOCTL_SMLEN
, TRUE
, 0)) < 0) {
7442 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__
, ret
));
7445 } else if (ret2
< 0 && ret2
!= BCME_UNSUPPORTED
) {
7446 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__
, ret2
));
7448 } /* unsupported is ok */
7451 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_CHANNEL_TIME
, (char *)&scan_assoc_time
,
7452 sizeof(scan_assoc_time
), TRUE
, 0);
7453 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_UNASSOC_TIME
, (char *)&scan_unassoc_time
,
7454 sizeof(scan_unassoc_time
), TRUE
, 0);
7455 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_PASSIVE_TIME
, (char *)&scan_passive_time
,
7456 sizeof(scan_passive_time
), TRUE
, 0);
7458 #ifdef ARP_OFFLOAD_SUPPORT
7459 /* Set and enable ARP offload feature for STA only */
7460 #if defined(OEM_ANDROID) && defined(SOFTAP)
7461 if (arpoe
&& !ap_fw_loaded
)
7464 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
7466 dhd_arp_offload_enable(dhd
, TRUE
);
7467 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
7469 dhd_arp_offload_enable(dhd
, FALSE
);
7470 dhd_arp_offload_set(dhd
, 0);
7472 dhd_arp_enable
= arpoe
;
7473 #endif /* ARP_OFFLOAD_SUPPORT */
7475 #ifdef PKT_FILTER_SUPPORT
7476 /* Setup default defintions for pktfilter , enable in suspend */
7477 dhd
->pktfilter_count
= 6;
7478 /* Setup filter to allow only unicast */
7479 if (dhd_master_mode
) {
7480 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0x01 0x00";
7481 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = NULL
;
7482 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = NULL
;
7483 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
7484 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
7485 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
7486 /* apply APP pktfilter */
7487 dhd
->pktfilter
[DHD_ARP_FILTER_NUM
] = "105 0 0 12 0xFFFF 0x0806";
7489 dhd_conf_discard_pkt_filter(dhd
);
7490 dhd_conf_add_pkt_filter(dhd
);
7494 dhd_enable_packet_filter(0, dhd
);
7496 #endif /* defined(SOFTAP) */
7497 dhd_set_packet_filter(dhd
);
7498 #endif /* PKT_FILTER_SUPPORT */
7500 bcm_mkiovar("nmode", (char *)&nmode
, 4, iovbuf
, sizeof(iovbuf
));
7501 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
7502 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__
, ret
));
7503 #endif /* DISABLE_11N */
7505 #ifdef AMPDU_VO_ENABLE
7506 tid
.tid
= PRIO_8021D_VO
; /* Enable TID(6) for voice */
7508 bcm_mkiovar("ampdu_tid", (char *)&tid
, sizeof(tid
), iovbuf
, sizeof(iovbuf
));
7509 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7511 tid
.tid
= PRIO_8021D_NC
; /* Enable TID(7) for voice */
7513 bcm_mkiovar("ampdu_tid", (char *)&tid
, sizeof(tid
), iovbuf
, sizeof(iovbuf
));
7514 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7516 #if defined(SOFTAP_TPUT_ENHANCE)
7517 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
7518 dhd_bus_setidletime(dhd
, (int)100);
7519 #ifdef DHDTCPACK_SUPPRESS
7520 dhd
->tcpack_sup_enabled
= FALSE
;
7522 #if defined(DHD_TCP_WINSIZE_ADJUST)
7523 dhd_use_tcp_window_size_adjust
= TRUE
;
7526 memset(buf
, 0, sizeof(buf
));
7527 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf
, sizeof(buf
));
7528 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, 0)) < 0) {
7530 bcm_mkiovar("bus:txglom", (char *)&glom
, 4, iovbuf
, sizeof(iovbuf
));
7531 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7536 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom
, 4, iovbuf
,
7538 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7542 #endif /* SOFTAP_TPUT_ENHANCE */
7544 /* query for 'ver' to get version info from firmware */
7545 memset(buf
, 0, sizeof(buf
));
7547 bcm_mkiovar("ver", (char *)&buf
, 4, buf
, sizeof(buf
));
7548 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, 0)) < 0)
7549 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
7551 bcmstrtok(&ptr
, "\n", 0);
7552 /* Print fw version info */
7553 DHD_ERROR(("Firmware version = %s\n", buf
));
7554 dhd_set_version_info(dhd
, buf
);
7556 #endif /* defined(OEM_ANDROID) */
7559 #if defined(BCMDBUS)
7560 #ifdef PROP_TXSTATUS
7561 if (disable_proptx
||
7562 #ifdef PROP_TXSTATUS_VSDB
7563 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
7564 (dhd
->op_mode
!= DHD_FLAG_HOSTAP_MODE
&&
7565 dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
) ||
7566 #endif /* PROP_TXSTATUS_VSDB */
7568 wlfc_enable
= FALSE
;
7572 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
7573 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder
, 4, iovbuf
, sizeof(iovbuf
));
7574 if ((ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
7575 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__
, ret2
));
7576 if (ret2
!= BCME_UNSUPPORTED
)
7578 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
7579 if (ret
== BCME_NOTDOWN
) {
7581 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
,
7582 sizeof(wl_down
), TRUE
, 0);
7583 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
7584 __FUNCTION__
, ret2
, hostreorder
));
7586 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder
, 4,
7587 iovbuf
, sizeof(iovbuf
));
7588 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
7589 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__
, ret2
));
7590 if (ret2
!= BCME_UNSUPPORTED
)
7594 if (ret2
!= BCME_OK
)
7597 #endif /* DISABLE_11N */
7599 #ifdef READ_CONFIG_FROM_FILE
7600 dhd_preinit_config(dhd
, 0);
7601 #endif /* READ_CONFIG_FROM_FILE */
7606 else if (hostreorder
)
7607 dhd_wlfc_hostreorder_init(dhd
);
7608 #endif /* DISABLE_11N */
7610 #endif /* PROP_TXSTATUS */
7612 #ifdef PCIE_FULL_DONGLE
7613 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
7614 if (FW_SUPPORTED(dhd
, ap
)) {
7615 wl_ap_isolate
= AP_ISOLATE_SENDUP_ALL
;
7616 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate
, 4, iovbuf
, sizeof(iovbuf
));
7617 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
7618 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
7620 #endif /* PCIE_FULL_DONGLE */
7622 if (!dhd
->pno_state
) {
7627 dhd_interworking_enable(dhd
);
7630 dhd_wl_ioctl_cmd(dhd
, WLC_UP
, (char *)&up
, sizeof(up
), TRUE
, 0);
7636 kfree(eventmask_msg
);
7643 int dhd_change_mtu(dhd_pub_t
*dhdp
, int new_mtu
, int ifidx
)
7645 struct dhd_info
*dhd
= dhdp
->info
;
7646 struct net_device
*dev
= NULL
;
7648 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
7649 dev
= dhd
->iflist
[ifidx
]->net
;
7652 if (netif_running(dev
)) {
7653 DHD_ERROR(("%s: Must be down to change its MTU\n", dev
->name
));
7654 return BCME_NOTDOWN
;
7657 #define DHD_MIN_MTU 1500
7658 #define DHD_MAX_MTU 1752
7660 if ((new_mtu
< DHD_MIN_MTU
) || (new_mtu
> DHD_MAX_MTU
)) {
7661 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__
, new_mtu
));
7669 #ifdef ARP_OFFLOAD_SUPPORT
7670 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
7672 aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
)
7674 u32 ipv4_buf
[MAX_IPV4_ENTRIES
]; /* temp save for AOE host_ip table */
7678 bzero(ipv4_buf
, sizeof(ipv4_buf
));
7680 /* display what we've got */
7681 ret
= dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
7682 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__
));
7684 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
7686 /* now we saved hoste_ip table, clr it in the dongle AOE */
7687 dhd_aoe_hostip_clr(dhd_pub
, idx
);
7690 DHD_ERROR(("%s failed\n", __FUNCTION__
));
7694 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
7695 if (add
&& (ipv4_buf
[i
] == 0)) {
7697 add
= FALSE
; /* added ipa to local table */
7698 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
7700 } else if (ipv4_buf
[i
] == ipa
) {
7702 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
7703 __FUNCTION__
, ipa
, i
));
7706 if (ipv4_buf
[i
] != 0) {
7707 /* add back host_ip entries from our local cache */
7708 dhd_arp_offload_add_ip(dhd_pub
, ipv4_buf
[i
], idx
);
7709 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
7710 __FUNCTION__
, ipv4_buf
[i
], i
));
7714 /* see the resulting hostip table */
7715 dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
7716 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__
));
7717 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
7722 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
7723 * whenever there is an event related to an IP address.
7724 * ptr : kernel provided pointer to IP address that has changed
7726 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
7727 unsigned long event
,
7730 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
7736 if (!dhd_arp_enable
)
7738 if (!ifa
|| !(ifa
->ifa_dev
->dev
))
7741 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7742 /* Filter notifications meant for non Broadcom devices */
7743 if ((ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_pri
) &&
7744 (ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_virt
)) {
7745 #if defined(WL_ENABLE_P2P_IF)
7746 if (!wl_cfgp2p_is_ifops(ifa
->ifa_dev
->dev
->netdev_ops
))
7747 #endif /* WL_ENABLE_P2P_IF */
7750 #endif /* LINUX_VERSION_CODE */
7752 dhd
= DHD_DEV_INFO(ifa
->ifa_dev
->dev
);
7756 dhd_pub
= &dhd
->pub
;
7758 if (dhd_pub
->arp_version
== 1) {
7762 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
7763 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
== ifa
->ifa_dev
->dev
)
7766 if (idx
< DHD_MAX_IFS
)
7767 DHD_TRACE(("ifidx : %p %s %d\n", dhd
->iflist
[idx
]->net
,
7768 dhd
->iflist
[idx
]->name
, dhd
->iflist
[idx
]->idx
));
7770 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa
->ifa_label
));
7777 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
7778 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
7780 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
7781 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__
));
7782 if (dhd
->pend_ipaddr
) {
7783 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
7784 __FUNCTION__
, dhd
->pend_ipaddr
));
7786 dhd
->pend_ipaddr
= ifa
->ifa_address
;
7790 #ifdef AOE_IP_ALIAS_SUPPORT
7791 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
7793 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, TRUE
, idx
);
7794 #endif /* AOE_IP_ALIAS_SUPPORT */
7798 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
7799 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
7800 dhd
->pend_ipaddr
= 0;
7801 #ifdef AOE_IP_ALIAS_SUPPORT
7802 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
7804 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, FALSE
, idx
);
7806 dhd_aoe_hostip_clr(&dhd
->pub
, idx
);
7807 dhd_aoe_arp_clr(&dhd
->pub
, idx
);
7808 #endif /* AOE_IP_ALIAS_SUPPORT */
7812 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
7813 __func__
, ifa
->ifa_label
, event
));
7818 #endif /* ARP_OFFLOAD_SUPPORT */
7821 /* Neighbor Discovery Offload: defered handler */
7823 dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
)
7825 struct ipv6_work_info_t
*ndo_work
= (struct ipv6_work_info_t
*)event_data
;
7826 dhd_pub_t
*pub
= &((dhd_info_t
*)dhd_info
)->pub
;
7829 if (event
!= DHD_WQ_WORK_IPV6_NDO
) {
7830 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
7835 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__
));
7840 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__
));
7844 if (ndo_work
->if_idx
) {
7845 DHD_ERROR(("%s: idx %d \n", __FUNCTION__
, ndo_work
->if_idx
));
7849 switch (ndo_work
->event
) {
7851 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__
));
7852 ret
= dhd_ndo_enable(pub
, TRUE
);
7854 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__
, ret
));
7857 ret
= dhd_ndo_add_ip(pub
, &ndo_work
->ipv6_addr
[0], ndo_work
->if_idx
);
7859 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
7860 __FUNCTION__
, ret
));
7864 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__
));
7865 ret
= dhd_ndo_remove_ip(pub
, ndo_work
->if_idx
);
7867 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
7868 __FUNCTION__
, ret
));
7872 ret
= dhd_ndo_enable(pub
, FALSE
);
7874 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__
, ret
));
7879 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__
));
7883 /* free ndo_work. alloced while scheduling the work */
7890 * Neighbor Discovery Offload: Called when an interface
7891 * is assigned with ipv6 address.
7892 * Handles only primary interface
7894 static int dhd_inet6addr_notifier_call(struct notifier_block
*this,
7895 unsigned long event
,
7900 struct inet6_ifaddr
*inet6_ifa
= ptr
;
7901 struct in6_addr
*ipv6_addr
= &inet6_ifa
->addr
;
7902 struct ipv6_work_info_t
*ndo_info
;
7903 int idx
= 0; /* REVISIT */
7905 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7906 /* Filter notifications meant for non Broadcom devices */
7907 if (inet6_ifa
->idev
->dev
->netdev_ops
!= &dhd_ops_pri
) {
7910 #endif /* LINUX_VERSION_CODE */
7912 dhd
= DHD_DEV_INFO(inet6_ifa
->idev
->dev
);
7916 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
!= inet6_ifa
->idev
->dev
)
7918 dhd_pub
= &dhd
->pub
;
7919 if (!FW_SUPPORTED(dhd_pub
, ndoe
))
7922 ndo_info
= (struct ipv6_work_info_t
*)kzalloc(sizeof(struct ipv6_work_info_t
), GFP_ATOMIC
);
7924 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__
));
7928 ndo_info
->event
= event
;
7929 ndo_info
->if_idx
= idx
;
7930 memcpy(&ndo_info
->ipv6_addr
[0], ipv6_addr
, IPV6_ADDR_LEN
);
7932 /* defer the work to thread as it may block kernel */
7933 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)ndo_info
, DHD_WQ_WORK_IPV6_NDO
,
7934 dhd_inet6_work_handler
, DHD_WORK_PRIORITY_LOW
);
7937 #endif /* #ifdef CONFIG_IPV6 */
7940 dhd_register_if(dhd_pub_t
*dhdp
, int ifidx
, bool need_rtnl_lock
)
7942 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7944 struct net_device
*net
= NULL
;
7946 uint8 temp_addr
[ETHER_ADDR_LEN
] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
7948 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
7950 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
7951 ifp
= dhd
->iflist
[ifidx
];
7953 ASSERT(net
&& (ifp
->idx
== ifidx
));
7956 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7958 net
->get_stats
= dhd_get_stats
;
7959 net
->do_ioctl
= dhd_ioctl_entry
;
7960 net
->hard_start_xmit
= dhd_start_xmit
;
7961 net
->set_mac_address
= dhd_set_mac_address
;
7962 net
->set_multicast_list
= dhd_set_multicast_list
;
7963 net
->open
= net
->stop
= NULL
;
7965 ASSERT(!net
->netdev_ops
);
7966 net
->netdev_ops
= &dhd_ops_virt
;
7967 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7969 net
->netdev_ops
= &dhd_cfgp2p_ops_virt
;
7970 #endif /* P2PONEINT */
7972 /* Ok, link into the network layer... */
7975 * device functions for the primary interface only
7977 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7978 net
->open
= dhd_open
;
7979 net
->stop
= dhd_stop
;
7981 net
->netdev_ops
= &dhd_ops_pri
;
7982 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7983 if (!ETHER_ISNULLADDR(dhd
->pub
.mac
.octet
))
7984 memcpy(temp_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
7987 * We have to use the primary MAC for virtual interfaces
7989 memcpy(temp_addr
, ifp
->mac_addr
, ETHER_ADDR_LEN
);
7990 #if defined(OEM_ANDROID)
7992 * Android sets the locally administered bit to indicate that this is a
7993 * portable hotspot. This will not work in simultaneous AP/STA mode,
7994 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7996 if (!memcmp(temp_addr
, dhd
->iflist
[0]->mac_addr
,
7998 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7999 __func__
, net
->name
));
8000 temp_addr
[0] |= 0x02;
8002 #endif /* defined(OEM_ANDROID) */
8005 net
->hard_header_len
= ETH_HLEN
+ dhd
->pub
.hdrlen
;
8006 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
8007 net
->ethtool_ops
= &dhd_ethtool_ops
;
8008 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
8010 #if defined(WL_WIRELESS_EXT)
8011 #if WIRELESS_EXT < 19
8012 net
->get_wireless_stats
= dhd_get_wireless_stats
;
8013 #endif /* WIRELESS_EXT < 19 */
8014 #if WIRELESS_EXT > 12
8015 net
->wireless_handlers
= (struct iw_handler_def
*)&wl_iw_handler_def
;
8016 #endif /* WIRELESS_EXT > 12 */
8017 #endif /* defined(WL_WIRELESS_EXT) */
8019 dhd
->pub
.rxsz
= DBUS_RX_BUFFER_SIZE_DHD(net
);
8021 memcpy(net
->dev_addr
, temp_addr
, ETHER_ADDR_LEN
);
8024 printf("%s\n", dhd_version
);
8027 err
= register_netdev(net
);
8029 err
= register_netdevice(net
);
8032 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net
->name
, err
));
8037 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
8039 argos_register_notifier_init(net
);
8042 printf("Register interface [%s] MAC: "MACDBG
"\n\n", net
->name
,
8043 MAC2STRDBG(net
->dev_addr
));
8045 #if defined(OEM_ANDROID) && defined(SOFTAP) && defined(WL_WIRELESS_EXT) && \
8046 !defined(WL_CFG80211)
8047 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
8050 #if defined(OEM_ANDROID) && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && \
8051 (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))))
8054 up(&dhd_registration_sem
);
8056 if (!dhd_download_fw_on_driverload
) {
8057 dhd_net_bus_devreset(net
, TRUE
);
8059 dhd_net_bus_suspend(net
);
8060 #endif /* BCMLXSDMMC */
8061 wifi_platform_set_power(dhdp
->info
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
8064 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
8068 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
8071 net
->netdev_ops
= NULL
;
8077 dhd_bus_detach(dhd_pub_t
*dhdp
)
8081 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8084 dhd
= (dhd_info_t
*)dhdp
->info
;
8088 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
8089 * calling stop again will cuase SD read/write errors.
8091 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
8092 /* Stop the protocol module */
8093 dhd_prot_stop(&dhd
->pub
);
8095 /* Stop the bus module */
8097 /* Force Dongle terminated */
8098 if (dhd_wl_ioctl_cmd(dhdp
, WLC_TERMINATED
, NULL
, 0, TRUE
, 0) < 0)
8099 DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
8101 dbus_stop(dhd
->pub
.dbus
);
8102 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
8104 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
8105 #endif /* BCMDBUS */
8108 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
8109 dhd_bus_oob_intr_unregister(dhdp
);
8110 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
8116 void dhd_detach(dhd_pub_t
*dhdp
)
8119 unsigned long flags
;
8120 int timer_valid
= FALSE
;
8125 dhd
= (dhd_info_t
*)dhdp
->info
;
8130 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__
, dhd
->dhd_state
));
8133 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
8134 /* Give sufficient time for threads to start running in case
8135 * dhd_attach() has failed
8140 /* unregister all interfaces, start with virtual */
8141 if (dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) {
8144 /* unregister virtual interfaces */
8145 dhd_net_if_lock_local(dhd
);
8146 for (i
= 1; i
< DHD_MAX_IFS
; i
++) {
8148 dhd_preremove_if(&dhd
->pub
, i
, TRUE
);
8150 dhd_net_if_unlock_local(dhd
);
8152 /* unregister primary interface 0 */
8153 dhd_preremove_if(&dhd
->pub
, 0, TRUE
);
8156 #ifdef PROP_TXSTATUS
8157 #ifdef DHD_WLFC_THREAD
8158 if (dhd
->pub
.wlfc_thread
) {
8159 kthread_stop(dhd
->pub
.wlfc_thread
);
8160 dhdp
->wlfc_thread_go
= TRUE
;
8161 wake_up_interruptible(&dhdp
->wlfc_wqhead
);
8163 dhd
->pub
.wlfc_thread
= NULL
;
8164 #endif /* DHD_WLFC_THREAD */
8165 #endif /* PROP_TXSTATUS */
8167 if (dhd
->dhd_state
& DHD_ATTACH_STATE_PROT_ATTACH
) {
8168 dhd_bus_detach(dhdp
);
8169 #ifdef PCIE_FULL_DONGLE
8170 dhd_flow_rings_deinit(dhdp
);
8174 dhd_prot_detach(dhdp
);
8177 #ifdef ARP_OFFLOAD_SUPPORT
8178 if (dhd_inetaddr_notifier_registered
) {
8179 dhd_inetaddr_notifier_registered
= FALSE
;
8180 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
8182 #endif /* ARP_OFFLOAD_SUPPORT */
8184 if (dhd_inet6addr_notifier_registered
) {
8185 dhd_inet6addr_notifier_registered
= FALSE
;
8186 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
8190 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8191 if (dhd
->dhd_state
& DHD_ATTACH_STATE_EARLYSUSPEND_DONE
) {
8192 if (dhd
->early_suspend
.suspend
)
8193 unregister_early_suspend(&dhd
->early_suspend
);
8195 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
8197 #if defined(WL_WIRELESS_EXT)
8198 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WL_ATTACH
) {
8199 /* Detatch and unlink in the iw */
8202 #endif /* defined(WL_WIRELESS_EXT) */
8204 /* delete all interfaces, start with virtual */
8205 if (dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) {
8209 /* Cleanup virtual interfaces */
8210 dhd_net_if_lock_local(dhd
);
8211 for (i
= 1; i
< DHD_MAX_IFS
; i
++) {
8213 dhd_remove_if(&dhd
->pub
, i
, TRUE
);
8215 dhd_net_if_unlock_local(dhd
);
8217 /* delete primary interface 0 */
8218 ifp
= dhd
->iflist
[0];
8222 dhd_wmf_cleanup(dhdp
, 0);
8223 #endif /* DHD_WMF */
8225 dhd_if_del_sta_list(ifp
);
8227 MFREE(dhd
->pub
.osh
, ifp
, sizeof(*ifp
));
8228 dhd
->iflist
[0] = NULL
;
8232 /* Clear the watchdog timer */
8233 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
8234 timer_valid
= dhd
->wd_timer_valid
;
8235 dhd
->wd_timer_valid
= FALSE
;
8236 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
8238 del_timer_sync(&dhd
->timer
);
8241 if (dhd
->dhd_state
& DHD_ATTACH_STATE_THREADS_CREATED
) {
8242 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
8243 PROC_STOP(&dhd
->thr_wdt_ctl
);
8246 if (dhd
->rxthread_enabled
&& dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
8247 PROC_STOP(&dhd
->thr_rxf_ctl
);
8250 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
8251 PROC_STOP(&dhd
->thr_dpc_ctl
);
8253 #endif /* BCMDBUS */
8254 tasklet_kill(&dhd
->tasklet
);
8259 if (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
) {
8260 wl_cfg80211_detach(NULL
);
8261 dhd_monitor_uninit();
8264 /* free deferred work queue */
8265 dhd_deferred_work_deinit(dhd
->dhd_deferred_wq
);
8266 dhd
->dhd_deferred_wq
= NULL
;
8270 dbus_detach(dhdp
->dbus
);
8273 #endif /* BCMDBUS */
8274 #ifdef SHOW_LOGTRACE
8275 if (dhd
->event_data
.fmts
)
8276 kfree(dhd
->event_data
.fmts
);
8277 if (dhd
->event_data
.raw_fmts
)
8278 kfree(dhd
->event_data
.raw_fmts
);
8279 #endif /* SHOW_LOGTRACE */
8282 if (dhdp
->pno_state
)
8283 dhd_pno_deinit(dhdp
);
8285 #if defined(CONFIG_PM_SLEEP)
8286 if (dhd_pm_notifier_registered
) {
8287 unregister_pm_notifier(&dhd_pm_notifier
);
8288 dhd_pm_notifier_registered
= FALSE
;
8290 #endif /* CONFIG_PM_SLEEP */
8291 #ifdef DEBUG_CPU_FREQ
8293 free_percpu(dhd
->new_freq
);
8294 dhd
->new_freq
= NULL
;
8295 cpufreq_unregister_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
8297 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) {
8298 DHD_TRACE(("wd wakelock count:%d\n", dhd
->wakelock_wd_counter
));
8299 #ifdef CONFIG_HAS_WAKELOCK
8300 dhd
->wakelock_counter
= 0;
8301 dhd
->wakelock_wd_counter
= 0;
8302 dhd
->wakelock_rx_timeout_enable
= 0;
8303 dhd
->wakelock_ctrl_timeout_enable
= 0;
8304 wake_lock_destroy(&dhd
->wl_wifi
);
8305 wake_lock_destroy(&dhd
->wl_rxwake
);
8306 wake_lock_destroy(&dhd
->wl_ctrlwake
);
8307 wake_lock_destroy(&dhd
->wl_wdwake
);
8308 #ifdef BCMPCIE_OOB_HOST_WAKE
8309 wake_lock_destroy(&dhd
->wl_intrwake
);
8310 #endif /* BCMPCIE_OOB_HOST_WAKE */
8311 #endif /* CONFIG_HAS_WAKELOCK */
8317 #ifdef DHDTCPACK_SUPPRESS
8318 /* This will free all MEM allocated for TCPACK SUPPRESS */
8319 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
8320 #endif /* DHDTCPACK_SUPPRESS */
8321 dhd_conf_detach(dhdp
);
8326 dhd_free(dhd_pub_t
*dhdp
)
8329 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8333 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
8334 if (dhdp
->reorder_bufs
[i
]) {
8335 reorder_info_t
*ptr
;
8336 uint32 buf_size
= sizeof(struct reorder_info
);
8338 ptr
= dhdp
->reorder_bufs
[i
];
8340 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
8341 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
8342 i
, ptr
->max_idx
, buf_size
));
8344 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
8345 dhdp
->reorder_bufs
[i
] = NULL
;
8349 dhd_sta_pool_fini(dhdp
, DHD_MAX_STA
);
8351 dhd
= (dhd_info_t
*)dhdp
->info
;
8352 if (dhdp
->soc_ram
) {
8353 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
8354 dhdp
->soc_ram
= NULL
;
8357 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
8359 dhd
!= (dhd_info_t
*)dhd_os_prealloc(dhdp
, DHD_PREALLOC_DHD_INFO
, 0, FALSE
))
8360 MFREE(dhd
->pub
.osh
, dhd
, sizeof(*dhd
));
8366 dhd_clear(dhd_pub_t
*dhdp
)
8368 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8372 #ifdef DHDTCPACK_SUPPRESS
8373 /* Clean up timer/data structure for any remaining/pending packet or timer. */
8374 dhd_tcpack_info_tbl_clean(dhdp
);
8375 #endif /* DHDTCPACK_SUPPRESS */
8376 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
8377 if (dhdp
->reorder_bufs
[i
]) {
8378 reorder_info_t
*ptr
;
8379 uint32 buf_size
= sizeof(struct reorder_info
);
8381 ptr
= dhdp
->reorder_bufs
[i
];
8383 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
8384 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
8385 i
, ptr
->max_idx
, buf_size
));
8387 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
8388 dhdp
->reorder_bufs
[i
] = NULL
;
8392 dhd_sta_pool_clear(dhdp
, DHD_MAX_STA
);
8394 if (dhdp
->soc_ram
) {
8395 MFREE(dhdp
->osh
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
8396 dhdp
->soc_ram
= NULL
;
8402 dhd_module_cleanup(void)
8404 printf("%s: Enter\n", __FUNCTION__
);
8409 dhd_bus_unregister();
8410 #endif /* BCMDBUS */
8412 #if defined(OEM_ANDROID)
8414 #endif /* OEM_ANDROID */
8416 dhd_wifi_platform_unregister_drv();
8417 printf("%s: Exit\n", __FUNCTION__
);
8421 dhd_module_exit(void)
8423 dhd_module_cleanup();
8424 unregister_reboot_notifier(&dhd_reboot_notifier
);
8425 extern_wifi_set_enable(0);
8429 dhd_module_init(void)
8432 int retry
= POWERUP_MAX_RETRY
;
8434 printf("%s: in\n", __FUNCTION__
);
8435 extern_wifi_set_enable(1);
8436 DHD_PERIM_RADIO_INIT();
8438 if (firmware_path
[0] != '\0') {
8439 strncpy(fw_bak_path
, firmware_path
, MOD_PARAM_PATHLEN
);
8440 fw_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
8443 if (nvram_path
[0] != '\0') {
8444 strncpy(nv_bak_path
, nvram_path
, MOD_PARAM_PATHLEN
);
8445 nv_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
8449 err
= dhd_wifi_platform_register_drv();
8451 register_reboot_notifier(&dhd_reboot_notifier
);
8455 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
8456 __FUNCTION__
, retry
));
8457 strncpy(firmware_path
, fw_bak_path
, MOD_PARAM_PATHLEN
);
8458 firmware_path
[MOD_PARAM_PATHLEN
-1] = '\0';
8459 strncpy(nvram_path
, nv_bak_path
, MOD_PARAM_PATHLEN
);
8460 nvram_path
[MOD_PARAM_PATHLEN
-1] = '\0';
8465 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__
));
8467 printf("%s: Exit err=%d\n", __FUNCTION__
, err
);
8472 dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
)
8474 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__
, code
));
8475 if (code
== SYS_RESTART
) {
8477 dhd_module_cleanup();
8487 * hdrlen is space to reserve in pkt headroom for DBUS
8490 dhd_dbus_probe_cb(void *arg
, const char *desc
, uint32 bustype
, uint32 hdrlen
)
8494 dbus_attrib_t attrib
;
8495 dhd_pub_t
*pub
= NULL
;
8497 printf("%s: Enter\n", __FUNCTION__
);
8499 #if defined(MULTIPLE_SUPPLICANT)
8500 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8501 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) == 0) {
8502 DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__
));
8505 DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__
));
8507 mutex_lock(&_dhd_sdio_mutex_lock_
);
8508 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8511 /* Ask the OS interface part for an OSL handle */
8512 if (!(osh
= osl_attach(NULL
, bustype
, TRUE
))) {
8513 DHD_ERROR(("%s: OSL attach failed\n", __FUNCTION__
));
8518 /* Attach to the dhd/OS interface */
8519 if (!(pub
= dhd_attach(osh
, NULL
/* bus */, hdrlen
))) {
8520 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__
));
8525 /* Ok, finish the attach to the OS network interface */
8526 if (dhd_register_if(pub
, 0, TRUE
) != 0) {
8527 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__
));
8532 pub
->dbus
= dbus_attach(osh
, pub
->rxsz
, DBUS_NRXQ
, DBUS_NTXQ
,
8533 pub
->info
, &dhd_dbus_cbs
, NULL
, NULL
);
8535 dbus_get_attrib(pub
->dbus
, &attrib
);
8536 DHD_ERROR(("DBUS: vid=0x%x pid=0x%x devid=0x%x bustype=0x%x mtu=%d rev=%d\n",
8537 attrib
.vid
, attrib
.pid
, attrib
.devid
, attrib
.bustype
, attrib
.mtu
, attrib
.chiprev
));
8543 /* dhd_conf must be attached after linking dhd to dhd->dbus,
8544 * because dhd_detech will check .info is NULL or not.
8546 if (dhd_conf_attach(pub
) != 0) {
8547 DHD_ERROR(("dhd_conf_attach failed\n"));
8551 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
8552 * This is indeed a hack but we have to make it work properly before we have a better
8555 dhd_update_fw_nv_path(pub
->info
);
8558 pub
->info
->rpc_th
= bcm_rpc_tp_attach(osh
, (void *)pub
->dbus
);
8559 if (!pub
->info
->rpc_th
) {
8560 DHD_ERROR(("%s: bcm_rpc_tp_attach failed\n", __FUNCTION__
));
8565 pub
->info
->rpc_osh
= rpc_osl_attach(osh
);
8566 if (!pub
->info
->rpc_osh
) {
8567 DHD_ERROR(("%s: rpc_osl_attach failed\n", __FUNCTION__
));
8568 bcm_rpc_tp_detach(pub
->info
->rpc_th
);
8569 pub
->info
->rpc_th
= NULL
;
8573 /* Set up the aggregation release timer */
8574 init_timer(&pub
->info
->rpcth_timer
);
8575 pub
->info
->rpcth_timer
.data
= (ulong
)pub
->info
;
8576 pub
->info
->rpcth_timer
.function
= dhd_rpcth_watchdog
;
8577 pub
->info
->rpcth_timer_active
= FALSE
;
8579 bcm_rpc_tp_register_cb(pub
->info
->rpc_th
, NULL
, pub
->info
,
8580 dbus_rpcth_rx_pkt
, pub
->info
, pub
->info
->rpc_osh
);
8581 #endif /* BCM_FD_AGGR */
8586 #if defined(MULTIPLE_SUPPLICANT)
8587 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8588 mutex_unlock(&_dhd_sdio_mutex_lock_
);
8589 DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__
));
8590 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8593 printf("%s: Exit\n", __FUNCTION__
);
8594 /* This is passed to dhd_dbus_disconnect_cb */
8597 /* Release resources in reverse order */
8605 #if defined(MULTIPLE_SUPPLICANT)
8606 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8607 mutex_unlock(&_dhd_sdio_mutex_lock_
);
8608 DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__
));
8609 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8613 printf("%s: Exit\n", __FUNCTION__
);
8618 dhd_dbus_disconnect_cb(void *arg
)
8620 dhd_info_t
*dhd
= (dhd_info_t
*)arg
;
8624 printf("%s: Enter\n", __FUNCTION__
);
8629 #if defined(MULTIPLE_SUPPLICANT)
8630 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8631 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) == 0) {
8632 DHD_ERROR(("%s : no mutex held. set lock\n", __FUNCTION__
));
8635 DHD_ERROR(("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__
));
8637 mutex_lock(&_dhd_sdio_mutex_lock_
);
8638 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8644 del_timer_sync(&dhd
->rpcth_timer
);
8645 bcm_rpc_tp_deregister_cb(dhd
->rpc_th
);
8646 rpc_osl_detach(dhd
->rpc_osh
);
8647 bcm_rpc_tp_detach(dhd
->rpc_th
);
8652 if (MALLOCED(osh
)) {
8653 DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__
, MALLOCED(osh
)));
8657 #if defined(MULTIPLE_SUPPLICANT)
8658 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
8659 mutex_unlock(&_dhd_sdio_mutex_lock_
);
8660 DHD_ERROR(("%s : the lock is released.\n", __FUNCTION__
));
8661 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
8663 printf("%s: Exit\n", __FUNCTION__
);
8665 #endif /* BCMDBUS */
8667 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
8668 #if defined(CONFIG_DEFERRED_INITCALLS)
8669 deferred_module_init(dhd_module_init
);
8670 #elif defined(USE_LATE_INITCALL_SYNC)
8671 late_initcall_sync(dhd_module_init
);
8673 late_initcall(dhd_module_init
);
8674 #endif /* USE_LATE_INITCALL_SYNC */
8676 module_init(dhd_module_init
);
8677 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
8679 module_exit(dhd_module_exit
);
8682 * OS specific functions required to implement DHD driver in OS independent way
8685 dhd_os_proto_block(dhd_pub_t
*pub
)
8687 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
8690 DHD_PERIM_UNLOCK(pub
);
8692 down(&dhd
->proto_sem
);
8694 DHD_PERIM_LOCK(pub
);
8702 dhd_os_proto_unblock(dhd_pub_t
*pub
)
8704 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
8707 up(&dhd
->proto_sem
);
8715 dhd_os_get_ioctl_resp_timeout(void)
8717 return ((unsigned int)dhd_ioctl_timeout_msec
);
8721 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec
)
8723 dhd_ioctl_timeout_msec
= (int)timeout_msec
;
8727 dhd_os_ioctl_resp_wait(dhd_pub_t
*pub
, uint
*condition
, bool *pending
)
8729 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
8732 /* Convert timeout in millsecond to jiffies */
8733 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8734 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
8736 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
8739 DHD_PERIM_UNLOCK(pub
);
8741 timeout
= wait_event_timeout(dhd
->ioctl_resp_wait
, (*condition
), timeout
);
8743 DHD_PERIM_LOCK(pub
);
8749 dhd_os_ioctl_resp_wake(dhd_pub_t
*pub
)
8751 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8753 wake_up(&dhd
->ioctl_resp_wait
);
8758 dhd_os_d3ack_wait(dhd_pub_t
*pub
, uint
*condition
, bool *pending
)
8760 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
8763 /* Convert timeout in millsecond to jiffies */
8764 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8765 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
8767 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
8770 DHD_PERIM_UNLOCK(pub
);
8772 timeout
= wait_event_timeout(dhd
->d3ack_wait
, (*condition
), timeout
);
8774 DHD_PERIM_LOCK(pub
);
8780 dhd_os_d3ack_wake(dhd_pub_t
*pub
)
8782 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8784 wake_up(&dhd
->d3ack_wait
);
8789 dhd_os_wd_timer_extend(void *bus
, bool extend
)
8792 dhd_pub_t
*pub
= bus
;
8793 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
8796 dhd_os_wd_timer(bus
, WATCHDOG_EXTEND_INTERVAL
);
8798 dhd_os_wd_timer(bus
, dhd
->default_wd_interval
);
8799 #endif /* !BCMDBUS */
8804 dhd_os_wd_timer(void *bus
, uint wdtick
)
8807 dhd_pub_t
*pub
= bus
;
8808 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
8809 unsigned long flags
;
8811 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8814 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
8818 DHD_GENERAL_LOCK(pub
, flags
);
8820 /* don't start the wd until fw is loaded */
8821 if (pub
->busstate
== DHD_BUS_DOWN
) {
8822 DHD_GENERAL_UNLOCK(pub
, flags
);
8824 DHD_OS_WD_WAKE_UNLOCK(pub
);
8828 /* Totally stop the timer */
8829 if (!wdtick
&& dhd
->wd_timer_valid
== TRUE
) {
8830 dhd
->wd_timer_valid
= FALSE
;
8831 DHD_GENERAL_UNLOCK(pub
, flags
);
8832 del_timer_sync(&dhd
->timer
);
8833 DHD_OS_WD_WAKE_UNLOCK(pub
);
8838 DHD_OS_WD_WAKE_LOCK(pub
);
8839 dhd_watchdog_ms
= (uint
)wdtick
;
8840 /* Re arm the timer, at last watchdog period */
8841 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
8842 dhd
->wd_timer_valid
= TRUE
;
8844 DHD_GENERAL_UNLOCK(pub
, flags
);
8845 #endif /* BCMDBUS */
8849 dhd_os_open_image(char *filename
)
8853 fp
= filp_open(filename
, O_RDONLY
, 0);
8855 * 2.6.11 (FC4) supports filp_open() but later revs don't?
8857 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
8867 dhd_os_get_image_block(char *buf
, int len
, void *image
)
8869 struct file
*fp
= (struct file
*)image
;
8875 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
8883 dhd_os_close_image(void *image
)
8886 filp_close((struct file
*)image
, NULL
);
8890 dhd_os_sdlock(dhd_pub_t
*pub
)
8894 dhd
= (dhd_info_t
*)(pub
->info
);
8897 if (dhd_dpc_prio
>= 0)
8900 spin_lock_bh(&dhd
->sdlock
);
8902 spin_lock_bh(&dhd
->sdlock
);
8903 #endif /* BCMDBUS */
8907 dhd_os_sdunlock(dhd_pub_t
*pub
)
8911 dhd
= (dhd_info_t
*)(pub
->info
);
8914 if (dhd_dpc_prio
>= 0)
8917 spin_unlock_bh(&dhd
->sdlock
);
8919 spin_unlock_bh(&dhd
->sdlock
);
8920 #endif /* BCMDBUS */
8924 dhd_os_sdlock_txq(dhd_pub_t
*pub
)
8928 dhd
= (dhd_info_t
*)(pub
->info
);
8930 spin_lock_irqsave(&dhd
->txqlock
, dhd
->txqlock_flags
);
8932 spin_lock_bh(&dhd
->txqlock
);
8937 dhd_os_sdunlock_txq(dhd_pub_t
*pub
)
8941 dhd
= (dhd_info_t
*)(pub
->info
);
8943 spin_unlock_irqrestore(&dhd
->txqlock
, dhd
->txqlock_flags
);
8945 spin_unlock_bh(&dhd
->txqlock
);
8950 dhd_os_sdlock_rxq(dhd_pub_t
*pub
)
8955 dhd_os_sdunlock_rxq(dhd_pub_t
*pub
)
8960 dhd_os_rxflock(dhd_pub_t
*pub
)
8964 dhd
= (dhd_info_t
*)(pub
->info
);
8965 spin_lock_bh(&dhd
->rxf_lock
);
8970 dhd_os_rxfunlock(dhd_pub_t
*pub
)
8974 dhd
= (dhd_info_t
*)(pub
->info
);
8975 spin_unlock_bh(&dhd
->rxf_lock
);
8978 #ifdef DHDTCPACK_SUPPRESS
8980 dhd_os_tcpacklock(dhd_pub_t
*pub
)
8983 unsigned long flags
= 0;
8985 dhd
= (dhd_info_t
*)(pub
->info
);
8988 spin_lock_irqsave(&dhd
->tcpack_lock
, flags
);
8995 dhd_os_tcpackunlock(dhd_pub_t
*pub
, unsigned long flags
)
9000 dhd
= (dhd_info_t
*)(pub
->info
);
9003 spin_unlock_irqrestore(&dhd
->tcpack_lock
, flags
);
9006 #endif /* DHDTCPACK_SUPPRESS */
9008 uint8
* dhd_os_prealloc(dhd_pub_t
*dhdpub
, int section
, uint size
, bool kmalloc_if_fail
)
9011 gfp_t flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
9013 buf
= (uint8
*)wifi_platform_prealloc(dhdpub
->info
->adapter
, section
, size
);
9015 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
9016 " size: %dbytes\n", __FUNCTION__
, section
, size
));
9017 if (kmalloc_if_fail
)
9018 buf
= kmalloc(size
, flags
);
9024 void dhd_os_prefree(dhd_pub_t
*dhdpub
, void *addr
, uint size
)
9028 #if defined(WL_WIRELESS_EXT)
9029 struct iw_statistics
*
9030 dhd_get_wireless_stats(struct net_device
*dev
)
9033 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9039 res
= wl_iw_get_wireless_stats(dev
, &dhd
->iw
.wstats
);
9042 return &dhd
->iw
.wstats
;
9046 #endif /* defined(WL_WIRELESS_EXT) */
9049 dhd_wl_host_event(dhd_info_t
*dhd
, int *ifidx
, void *pktdata
,
9050 wl_event_msg_t
*event
, void **data
)
9054 ASSERT(dhd
!= NULL
);
9057 #ifdef SHOW_LOGTRACE
9058 bcmerror
= wl_host_event(&dhd
->pub
, ifidx
, pktdata
, event
, data
, &dhd
->event_data
);
9060 bcmerror
= wl_host_event(&dhd
->pub
, ifidx
, pktdata
, event
, data
, NULL
);
9061 #endif /* SHOW_LOGTRACE */
9063 if (bcmerror
!= BCME_OK
)
9066 #if defined(WL_WIRELESS_EXT)
9067 if (event
->bsscfgidx
== 0) {
9069 * Wireless ext is on primary interface only
9072 ASSERT(dhd
->iflist
[*ifidx
] != NULL
);
9073 ASSERT(dhd
->iflist
[*ifidx
]->net
!= NULL
);
9075 if (dhd
->iflist
[*ifidx
]->net
) {
9076 wl_iw_event(dhd
->iflist
[*ifidx
]->net
, event
, *data
);
9079 #endif /* defined(WL_WIRELESS_EXT) */
9082 ASSERT(dhd
->iflist
[*ifidx
] != NULL
);
9083 ASSERT(dhd
->iflist
[*ifidx
]->net
!= NULL
);
9084 if (dhd
->iflist
[*ifidx
]->net
)
9085 wl_cfg80211_event(dhd
->iflist
[*ifidx
]->net
, event
, *data
);
9086 #endif /* defined(WL_CFG80211) */
9091 /* send up locally generated event */
9093 dhd_sendup_event(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
, void *data
)
9095 switch (ntoh32(event
->event_type
)) {
9097 /* Send up locally generated AMP HCI Events */
9098 case WLC_E_BTA_HCI_EVENT
: {
9099 struct sk_buff
*p
, *skb
;
9101 wl_event_msg_t
*p_bcm_event
;
9110 len
= ntoh32(event
->datalen
);
9111 pktlen
= sizeof(bcm_event_t
) + len
+ 2;
9113 ifidx
= dhd_ifname2idx(dhd
, event
->ifname
);
9115 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
9116 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
9118 msg
= (bcm_event_t
*) PKTDATA(dhdp
->osh
, p
);
9120 bcopy(&dhdp
->mac
, &msg
->eth
.ether_dhost
, ETHER_ADDR_LEN
);
9121 bcopy(&dhdp
->mac
, &msg
->eth
.ether_shost
, ETHER_ADDR_LEN
);
9122 ETHER_TOGGLE_LOCALADDR(&msg
->eth
.ether_shost
);
9124 msg
->eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
9126 /* BCM Vendor specific header... */
9127 msg
->bcm_hdr
.subtype
= hton16(BCMILCP_SUBTYPE_VENDOR_LONG
);
9128 msg
->bcm_hdr
.version
= BCMILCP_BCM_SUBTYPEHDR_VERSION
;
9129 bcopy(BRCM_OUI
, &msg
->bcm_hdr
.oui
[0], DOT11_OUI_LEN
);
9131 /* vendor spec header length + pvt data length (private indication
9132 * hdr + actual message itself)
9134 msg
->bcm_hdr
.length
= hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH
+
9135 BCM_MSG_LEN
+ sizeof(wl_event_msg_t
) + (uint16
)len
);
9136 msg
->bcm_hdr
.usr_subtype
= hton16(BCMILCP_BCM_SUBTYPE_EVENT
);
9138 PKTSETLEN(dhdp
->osh
, p
, (sizeof(bcm_event_t
) + len
+ 2));
9140 /* copy wl_event_msg_t into sk_buf */
9142 /* pointer to wl_event_msg_t in sk_buf */
9143 p_bcm_event
= &msg
->event
;
9144 bcopy(event
, p_bcm_event
, sizeof(wl_event_msg_t
));
9146 /* copy hci event into sk_buf */
9147 bcopy(data
, (p_bcm_event
+ 1), len
);
9149 msg
->bcm_hdr
.length
= hton16(sizeof(wl_event_msg_t
) +
9150 ntoh16(msg
->bcm_hdr
.length
));
9151 PKTSETLEN(dhdp
->osh
, p
, (sizeof(bcm_event_t
) + len
+ 2));
9153 ptr
= (char *)(msg
+ 1);
9154 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
9155 * are no ethertypes which are following this
9160 skb
= PKTTONATIVE(dhdp
->osh
, p
);
9164 ifp
= dhd
->iflist
[ifidx
];
9166 ifp
= dhd
->iflist
[0];
9169 skb
->dev
= ifp
->net
;
9170 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
9175 /* Strip header, count, deliver upward */
9176 skb_pull(skb
, ETH_HLEN
);
9178 /* Send the packet */
9179 bcm_object_trace_opr(skb
, BCM_OBJDBG_REMOVE
,
9180 __FUNCTION__
, __LINE__
);
9181 if (in_interrupt()) {
9188 /* Could not allocate a sk_buf */
9189 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__
));
9192 } /* case WLC_E_BTA_HCI_EVENT */
9193 #endif /* WLBTAMP */
9200 #ifdef LOG_INTO_TCPDUMP
9202 dhd_sendup_log(dhd_pub_t
*dhdp
, void *data
, int data_len
)
9204 struct sk_buff
*p
, *skb
;
9211 struct ether_header eth
;
9213 pktlen
= sizeof(eth
) + data_len
;
9216 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
9217 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
9219 bcopy(&dhdp
->mac
, ð
.ether_dhost
, ETHER_ADDR_LEN
);
9220 bcopy(&dhdp
->mac
, ð
.ether_shost
, ETHER_ADDR_LEN
);
9221 ETHER_TOGGLE_LOCALADDR(ð
.ether_shost
);
9222 eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
9224 bcopy((void *)ð
, PKTDATA(dhdp
->osh
, p
), sizeof(eth
));
9225 bcopy(data
, PKTDATA(dhdp
->osh
, p
) + sizeof(eth
), data_len
);
9226 skb
= PKTTONATIVE(dhdp
->osh
, p
);
9227 skb_data
= skb
->data
;
9230 ifidx
= dhd_ifname2idx(dhd
, "wlan0");
9231 ifp
= dhd
->iflist
[ifidx
];
9233 ifp
= dhd
->iflist
[0];
9236 skb
->dev
= ifp
->net
;
9237 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
9238 skb
->data
= skb_data
;
9241 /* Strip header, count, deliver upward */
9242 skb_pull(skb
, ETH_HLEN
);
9244 /* Send the packet */
9245 if (in_interrupt()) {
9252 /* Could not allocate a sk_buf */
9253 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__
));
9256 #endif /* LOG_INTO_TCPDUMP */
9258 void dhd_wait_for_event(dhd_pub_t
*dhd
, bool *lockvar
)
9260 #if 0 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
9261 struct dhd_info
*dhdinfo
= dhd
->info
;
9263 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9264 int timeout
= msecs_to_jiffies(IOCTL_RESP_TIMEOUT
);
9266 int timeout
= (IOCTL_RESP_TIMEOUT
/ 1000) * HZ
;
9267 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
9269 dhd_os_sdunlock(dhd
);
9270 wait_event_timeout(dhdinfo
->ctrl_wait
, (*lockvar
== FALSE
), timeout
);
9276 void dhd_wait_event_wakeup(dhd_pub_t
*dhd
)
9278 #if 0 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
9279 struct dhd_info
*dhdinfo
= dhd
->info
;
9280 if (waitqueue_active(&dhdinfo
->ctrl_wait
))
9281 wake_up(&dhdinfo
->ctrl_wait
);
9286 #if defined(BCMPCIE)
9288 dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
)
9291 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9294 /* Issue wl down command before resetting the chip */
9295 if (dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
9296 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__
));
9298 #ifdef PROP_TXSTATUS
9299 if (dhd
->pub
.wlfc_enabled
)
9300 dhd_wlfc_deinit(&dhd
->pub
);
9301 #endif /* PROP_TXSTATUS */
9303 if (dhd
->pub
.pno_state
)
9304 dhd_pno_deinit(&dhd
->pub
);
9309 ret
= dhd_bus_devreset(&dhd
->pub
, flag
);
9311 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__
, ret
));
9320 int net_os_set_suspend_disable(struct net_device
*dev
, int val
)
9322 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9326 ret
= dhd
->pub
.suspend_disable_flag
;
9327 dhd
->pub
.suspend_disable_flag
= val
;
9332 int net_os_set_suspend(struct net_device
*dev
, int val
, int force
)
9335 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9338 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
9339 ret
= dhd_set_suspend(val
, &dhd
->pub
);
9341 ret
= dhd_suspend_resume_helper(dhd
, val
, force
);
9344 wl_cfg80211_update_power_mode(dev
);
9350 int net_os_set_suspend_bcn_li_dtim(struct net_device
*dev
, int val
)
9352 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9355 dhd
->pub
.suspend_bcn_li_dtim
= val
;
9360 #ifdef PKT_FILTER_SUPPORT
9361 int net_os_rxfilter_add_remove(struct net_device
*dev
, int add_remove
, int num
)
9363 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9364 char *filterp
= NULL
;
9368 if (!dhd_master_mode
)
9369 add_remove
= !add_remove
;
9371 if (!dhd
|| (num
== DHD_UNICAST_FILTER_NUM
) ||
9372 (num
== DHD_MDNS_FILTER_NUM
))
9374 if (num
>= dhd
->pub
.pktfilter_count
)
9377 case DHD_BROADCAST_FILTER_NUM
:
9378 filterp
= "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
9381 case DHD_MULTICAST4_FILTER_NUM
:
9382 filterp
= "102 0 0 0 0xFFFFFF 0x01005E";
9385 case DHD_MULTICAST6_FILTER_NUM
:
9386 filterp
= "103 0 0 0 0xFFFF 0x3333";
9395 dhd
->pub
.pktfilter
[num
] = filterp
;
9396 dhd_pktfilter_offload_set(&dhd
->pub
, dhd
->pub
.pktfilter
[num
]);
9397 } else { /* Delete filter */
9398 if (dhd
->pub
.pktfilter
[num
] != NULL
) {
9399 dhd_pktfilter_offload_delete(&dhd
->pub
, filter_id
);
9400 dhd
->pub
.pktfilter
[num
] = NULL
;
9406 int dhd_os_enable_packet_filter(dhd_pub_t
*dhdp
, int val
)
9411 /* Packet filtering is set only if we still in early-suspend and
9412 * we need either to turn it ON or turn it OFF
9413 * We can always turn it OFF in case of early-suspend, but we turn it
9414 * back ON only if suspend_disable_flag was not set
9416 if (dhdp
&& dhdp
->up
) {
9417 if (dhdp
->in_suspend
) {
9418 if (!val
|| (val
&& !dhdp
->suspend_disable_flag
))
9419 dhd_enable_packet_filter(val
, dhdp
);
9425 /* function to enable/disable packet for Network device */
9426 int net_os_enable_packet_filter(struct net_device
*dev
, int val
)
9428 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9430 return dhd_os_enable_packet_filter(&dhd
->pub
, val
);
9432 #endif /* PKT_FILTER_SUPPORT */
9435 dhd_dev_init_ioctl(struct net_device
*dev
)
9437 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9440 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0)
9448 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
9450 dhd_dev_pno_stop_for_ssid(struct net_device
*dev
)
9452 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9454 return (dhd_pno_stop_for_ssid(&dhd
->pub
));
9456 /* Linux wrapper to call common dhd_pno_set_for_ssid */
9458 dhd_dev_pno_set_for_ssid(struct net_device
*dev
, wlc_ssid_t
* ssids_local
, int nssid
,
9459 uint16 scan_fr
, int pno_repeat
, int pno_freq_expo_max
, uint16
*channel_list
, int nchan
)
9461 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9463 return (dhd_pno_set_for_ssid(&dhd
->pub
, ssids_local
, nssid
, scan_fr
,
9464 pno_repeat
, pno_freq_expo_max
, channel_list
, nchan
));
9467 /* Linux wrapper to call common dhd_pno_enable */
9469 dhd_dev_pno_enable(struct net_device
*dev
, int enable
)
9471 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9473 return (dhd_pno_enable(&dhd
->pub
, enable
));
9476 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
9478 dhd_dev_pno_set_for_hotlist(struct net_device
*dev
, wl_pfn_bssid_t
*p_pfn_bssid
,
9479 struct dhd_pno_hotlist_params
*hotlist_params
)
9481 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9482 return (dhd_pno_set_for_hotlist(&dhd
->pub
, p_pfn_bssid
, hotlist_params
));
9484 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
9486 dhd_dev_pno_stop_for_batch(struct net_device
*dev
)
9488 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9489 return (dhd_pno_stop_for_batch(&dhd
->pub
));
9491 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
9493 dhd_dev_pno_set_for_batch(struct net_device
*dev
, struct dhd_pno_batch_params
*batch_params
)
9495 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9496 return (dhd_pno_set_for_batch(&dhd
->pub
, batch_params
));
9498 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
9500 dhd_dev_pno_get_for_batch(struct net_device
*dev
, char *buf
, int bufsize
)
9502 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9503 return (dhd_pno_get_for_batch(&dhd
->pub
, buf
, bufsize
, PNO_STATUS_NORMAL
));
9505 #endif /* PNO_SUPPORT */
9507 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(OEM_ANDROID))
9508 static void dhd_hang_process(void *dhd_info
, void *event_info
, u8 event
)
9511 struct net_device
*dev
;
9513 dhd
= (dhd_info_t
*)dhd_info
;
9514 dev
= dhd
->iflist
[0]->net
;
9520 #if defined(WL_WIRELESS_EXT)
9521 wl_iw_send_priv_event(dev
, "HANG");
9523 #if defined(WL_CFG80211)
9524 wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
9530 int dhd_os_send_hang_message(dhd_pub_t
*dhdp
)
9534 if (!dhdp
->hang_was_sent
) {
9535 dhdp
->hang_was_sent
= 1;
9536 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dhdp
,
9537 DHD_WQ_WORK_HANG_MSG
, dhd_hang_process
, DHD_WORK_PRIORITY_HIGH
);
9543 int net_os_send_hang_message(struct net_device
*dev
)
9545 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9549 /* Report FW problem when enabled */
9550 if (dhd
->pub
.hang_report
) {
9551 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
9552 ret
= dhd_os_send_hang_message(&dhd
->pub
);
9554 ret
= wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
9557 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
9559 /* Enforce bus down to stop any future traffic */
9560 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
9565 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
9568 int dhd_net_wifi_platform_set_power(struct net_device
*dev
, bool on
, unsigned long delay_msec
)
9570 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9571 return wifi_platform_set_power(dhd
->adapter
, on
, delay_msec
);
9574 void dhd_get_customized_country_code(struct net_device
*dev
, char *country_iso_code
,
9575 wl_country_t
*cspec
)
9577 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9578 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
);
9580 #ifdef KEEP_JP_REGREV
9581 if (strncmp(country_iso_code
, "JP", 3) == 0 && strncmp(dhd
->pub
.vars_ccode
, "JP", 3) == 0) {
9582 cspec
->rev
= dhd
->pub
.vars_regrev
;
9584 #endif /* KEEP_JP_REGREV */
9586 void dhd_bus_country_set(struct net_device
*dev
, wl_country_t
*cspec
, bool notify
)
9588 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9589 if (dhd
&& dhd
->pub
.up
) {
9590 memcpy(&dhd
->pub
.dhd_cspec
, cspec
, sizeof(wl_country_t
));
9592 wl_update_wiphybands(NULL
, notify
);
9597 void dhd_bus_band_set(struct net_device
*dev
, uint band
)
9599 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9600 if (dhd
&& dhd
->pub
.up
) {
9602 wl_update_wiphybands(NULL
, true);
9607 int dhd_net_set_fw_path(struct net_device
*dev
, char *fw
)
9609 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9611 if (!fw
|| fw
[0] == '\0')
9614 strncpy(dhd
->fw_path
, fw
, sizeof(dhd
->fw_path
) - 1);
9615 dhd
->fw_path
[sizeof(dhd
->fw_path
)-1] = '\0';
9617 #if defined(OEM_ANDROID) && defined(SOFTAP)
9618 if (strstr(fw
, "apsta") != NULL
) {
9619 DHD_INFO(("GOT APSTA FIRMWARE\n"));
9620 ap_fw_loaded
= TRUE
;
9622 DHD_INFO(("GOT STA FIRMWARE\n"));
9623 ap_fw_loaded
= FALSE
;
9625 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
9629 void dhd_net_if_lock(struct net_device
*dev
)
9631 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9632 dhd_net_if_lock_local(dhd
);
9635 void dhd_net_if_unlock(struct net_device
*dev
)
9637 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9638 dhd_net_if_unlock_local(dhd
);
9641 static void dhd_net_if_lock_local(dhd_info_t
*dhd
)
9643 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9645 mutex_lock(&dhd
->dhd_net_if_mutex
);
9649 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
)
9651 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9653 mutex_unlock(&dhd
->dhd_net_if_mutex
);
9657 static void dhd_suspend_lock(dhd_pub_t
*pub
)
9659 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9660 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9662 mutex_lock(&dhd
->dhd_suspend_mutex
);
9666 static void dhd_suspend_unlock(dhd_pub_t
*pub
)
9668 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(OEM_ANDROID)
9669 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9671 mutex_unlock(&dhd
->dhd_suspend_mutex
);
9675 unsigned long dhd_os_general_spin_lock(dhd_pub_t
*pub
)
9677 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9678 unsigned long flags
= 0;
9681 spin_lock_irqsave(&dhd
->dhd_lock
, flags
);
9686 void dhd_os_general_spin_unlock(dhd_pub_t
*pub
, unsigned long flags
)
9688 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9691 spin_unlock_irqrestore(&dhd
->dhd_lock
, flags
);
9694 /* Linux specific multipurpose spinlock API */
9696 dhd_os_spin_lock_init(osl_t
*osh
)
9698 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
9699 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
9700 /* and this results in kernel asserts in internal builds */
9701 spinlock_t
* lock
= MALLOC(osh
, sizeof(spinlock_t
) + 4);
9703 spin_lock_init(lock
);
9704 return ((void *)lock
);
9707 dhd_os_spin_lock_deinit(osl_t
*osh
, void *lock
)
9709 MFREE(osh
, lock
, sizeof(spinlock_t
) + 4);
9712 dhd_os_spin_lock(void *lock
)
9714 unsigned long flags
= 0;
9717 spin_lock_irqsave((spinlock_t
*)lock
, flags
);
9722 dhd_os_spin_unlock(void *lock
, unsigned long flags
)
9725 spin_unlock_irqrestore((spinlock_t
*)lock
, flags
);
9729 dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
)
9731 return (atomic_read(&dhd
->pend_8021x_cnt
));
9734 #define MAX_WAIT_FOR_8021X_TX 100
9737 dhd_wait_pend8021x(struct net_device
*dev
)
9739 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9740 int timeout
= msecs_to_jiffies(10);
9741 int ntimes
= MAX_WAIT_FOR_8021X_TX
;
9742 int pend
= dhd_get_pend_8021x_cnt(dhd
);
9744 while (ntimes
&& pend
) {
9746 set_current_state(TASK_INTERRUPTIBLE
);
9747 DHD_PERIM_UNLOCK(&dhd
->pub
);
9748 schedule_timeout(timeout
);
9749 DHD_PERIM_LOCK(&dhd
->pub
);
9750 set_current_state(TASK_RUNNING
);
9753 pend
= dhd_get_pend_8021x_cnt(dhd
);
9757 atomic_set(&dhd
->pend_8021x_cnt
, 0);
9758 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__
));
9765 write_to_file(dhd_pub_t
*dhd
, uint8
*buf
, int size
)
9769 mm_segment_t old_fs
;
9772 /* change to KERNEL_DS address limit */
9776 /* open file to write */
9777 fp
= filp_open("/tmp/mem_dump", O_WRONLY
|O_CREAT
, 0640);
9781 printf("%s: open file error\n", __FUNCTION__
);
9786 /* Write buf to file */
9787 fp
->f_op
->write(fp
, buf
, size
, &pos
);
9788 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
9789 fp
->f_op
->fsync(fp
, 0, size
-1, 1);
9791 fp
->f_op
->fsync(fp
, 1);
9792 #endif /* KERNEL_VERSION(3, 1, 0) */
9795 /* free buf before return */
9797 MFREE(dhd
->osh
, buf
, size
);
9799 /* close file before return */
9801 filp_close(fp
, current
->files
);
9802 /* restore previous address limit */
9807 #endif /* DHD_DEBUG */
9809 int dhd_os_wake_lock_timeout(dhd_pub_t
*pub
)
9811 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9812 unsigned long flags
;
9816 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9817 ret
= dhd
->wakelock_rx_timeout_enable
> dhd
->wakelock_ctrl_timeout_enable
?
9818 dhd
->wakelock_rx_timeout_enable
: dhd
->wakelock_ctrl_timeout_enable
;
9819 #ifdef CONFIG_HAS_WAKELOCK
9820 if (dhd
->wakelock_rx_timeout_enable
)
9821 wake_lock_timeout(&dhd
->wl_rxwake
,
9822 msecs_to_jiffies(dhd
->wakelock_rx_timeout_enable
));
9823 if (dhd
->wakelock_ctrl_timeout_enable
)
9824 wake_lock_timeout(&dhd
->wl_ctrlwake
,
9825 msecs_to_jiffies(dhd
->wakelock_ctrl_timeout_enable
));
9827 dhd
->wakelock_rx_timeout_enable
= 0;
9828 dhd
->wakelock_ctrl_timeout_enable
= 0;
9829 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9834 int net_os_wake_lock_timeout(struct net_device
*dev
)
9836 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9840 ret
= dhd_os_wake_lock_timeout(&dhd
->pub
);
9844 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t
*pub
, int val
)
9846 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9847 unsigned long flags
;
9850 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9851 if (val
> dhd
->wakelock_rx_timeout_enable
)
9852 dhd
->wakelock_rx_timeout_enable
= val
;
9853 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9858 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t
*pub
, int val
)
9860 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9861 unsigned long flags
;
9864 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9865 if (val
> dhd
->wakelock_ctrl_timeout_enable
)
9866 dhd
->wakelock_ctrl_timeout_enable
= val
;
9867 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9872 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t
*pub
)
9874 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9875 unsigned long flags
;
9878 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9879 dhd
->wakelock_ctrl_timeout_enable
= 0;
9880 #ifdef CONFIG_HAS_WAKELOCK
9881 if (wake_lock_active(&dhd
->wl_ctrlwake
))
9882 wake_unlock(&dhd
->wl_ctrlwake
);
9884 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9889 int net_os_wake_lock_rx_timeout_enable(struct net_device
*dev
, int val
)
9891 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9895 ret
= dhd_os_wake_lock_rx_timeout_enable(&dhd
->pub
, val
);
9899 int net_os_wake_lock_ctrl_timeout_enable(struct net_device
*dev
, int val
)
9901 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9905 ret
= dhd_os_wake_lock_ctrl_timeout_enable(&dhd
->pub
, val
);
9909 int dhd_os_wake_lock(dhd_pub_t
*pub
)
9911 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9912 unsigned long flags
;
9916 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9918 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
9919 #ifdef CONFIG_HAS_WAKELOCK
9920 wake_lock(&dhd
->wl_wifi
);
9921 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9922 dhd_bus_dev_pm_stay_awake(pub
);
9925 dhd
->wakelock_counter
++;
9926 ret
= dhd
->wakelock_counter
;
9927 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9932 int net_os_wake_lock(struct net_device
*dev
)
9934 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9938 ret
= dhd_os_wake_lock(&dhd
->pub
);
9942 int dhd_os_wake_unlock(dhd_pub_t
*pub
)
9944 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9945 unsigned long flags
;
9948 dhd_os_wake_lock_timeout(pub
);
9950 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9951 if (dhd
->wakelock_counter
> 0) {
9952 dhd
->wakelock_counter
--;
9953 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
9954 #ifdef CONFIG_HAS_WAKELOCK
9955 wake_unlock(&dhd
->wl_wifi
);
9956 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9957 dhd_bus_dev_pm_relax(pub
);
9960 ret
= dhd
->wakelock_counter
;
9962 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9967 int dhd_os_check_wakelock(dhd_pub_t
*pub
)
9969 #if defined(CONFIG_HAS_WAKELOCK) || (0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, \
9975 dhd
= (dhd_info_t
*)(pub
->info
);
9978 #ifdef CONFIG_HAS_WAKELOCK
9979 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9980 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
9981 (wake_lock_active(&dhd
->wl_wdwake
))))
9983 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9984 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
9990 int dhd_os_check_wakelock_all(dhd_pub_t
*pub
)
9992 #if defined(CONFIG_HAS_WAKELOCK) || (0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, \
9998 dhd
= (dhd_info_t
*)(pub
->info
);
10001 #ifdef CONFIG_HAS_WAKELOCK
10002 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
10003 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
10004 wake_lock_active(&dhd
->wl_wdwake
) ||
10005 wake_lock_active(&dhd
->wl_rxwake
) ||
10006 wake_lock_active(&dhd
->wl_ctrlwake
))) {
10009 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
10010 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
10016 int net_os_wake_unlock(struct net_device
*dev
)
10018 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
10022 ret
= dhd_os_wake_unlock(&dhd
->pub
);
10026 int dhd_os_wd_wake_lock(dhd_pub_t
*pub
)
10028 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
10029 unsigned long flags
;
10033 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
10034 #ifdef CONFIG_HAS_WAKELOCK
10035 /* if wakelock_wd_counter was never used : lock it at once */
10036 if (!dhd
->wakelock_wd_counter
)
10037 wake_lock(&dhd
->wl_wdwake
);
10039 dhd
->wakelock_wd_counter
++;
10040 ret
= dhd
->wakelock_wd_counter
;
10041 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
10046 int dhd_os_wd_wake_unlock(dhd_pub_t
*pub
)
10048 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
10049 unsigned long flags
;
10053 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
10054 if (dhd
->wakelock_wd_counter
) {
10055 dhd
->wakelock_wd_counter
= 0;
10056 #ifdef CONFIG_HAS_WAKELOCK
10057 wake_unlock(&dhd
->wl_wdwake
);
10060 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
10065 #ifdef BCMPCIE_OOB_HOST_WAKE
10066 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
10068 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
10072 #ifdef CONFIG_HAS_WAKELOCK
10073 wake_lock_timeout(&dhd
->wl_intrwake
, msecs_to_jiffies(val
));
10079 int dhd_os_oob_irq_wake_unlock(dhd_pub_t
*pub
)
10081 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
10085 #ifdef CONFIG_HAS_WAKELOCK
10086 /* if wl_intrwake is active, unlock it */
10087 if (wake_lock_active(&dhd
->wl_intrwake
)) {
10088 wake_unlock(&dhd
->wl_intrwake
);
10094 #endif /* BCMPCIE_OOB_HOST_WAKE */
10096 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
10097 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
10099 int dhd_os_wake_lock_waive(dhd_pub_t
*pub
)
10101 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
10102 unsigned long flags
;
10106 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
10107 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
10108 if (dhd
->waive_wakelock
== FALSE
) {
10109 /* record current lock status */
10110 dhd
->wakelock_before_waive
= dhd
->wakelock_counter
;
10111 dhd
->waive_wakelock
= TRUE
;
10113 ret
= dhd
->wakelock_wd_counter
;
10114 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
10119 int dhd_os_wake_lock_restore(dhd_pub_t
*pub
)
10121 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
10122 unsigned long flags
;
10128 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
10129 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
10130 if (!dhd
->waive_wakelock
)
10133 dhd
->waive_wakelock
= FALSE
;
10134 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
10135 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
10136 * the lock in between, do the same by calling wake_unlock or pm_relax
10138 if (dhd
->wakelock_before_waive
== 0 && dhd
->wakelock_counter
> 0) {
10139 #ifdef CONFIG_HAS_WAKELOCK
10140 wake_lock(&dhd
->wl_wifi
);
10141 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
10142 dhd_bus_dev_pm_stay_awake(&dhd
->pub
);
10144 } else if (dhd
->wakelock_before_waive
> 0 && dhd
->wakelock_counter
== 0) {
10145 #ifdef CONFIG_HAS_WAKELOCK
10146 wake_unlock(&dhd
->wl_wifi
);
10147 #elif 0 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
10148 dhd_bus_dev_pm_relax(&dhd
->pub
);
10151 dhd
->wakelock_before_waive
= 0;
10153 ret
= dhd
->wakelock_wd_counter
;
10154 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
10158 bool dhd_os_check_if_up(dhd_pub_t
*pub
)
10165 /* function to collect firmware, chip id and chip version info */
10166 void dhd_set_version_info(dhd_pub_t
*dhdp
, char *fw
)
10170 i
= snprintf(info_string
, sizeof(info_string
),
10171 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR
, fw
);
10172 printf("%s\n", info_string
);
10177 i
= snprintf(&info_string
[i
], sizeof(info_string
) - i
,
10178 "\n Rev %x", dhd_conf_get_chiprev(dhdp
));
10181 int dhd_ioctl_entry_local(struct net_device
*net
, wl_ioctl_t
*ioc
, int cmd
)
10185 dhd_info_t
*dhd
= NULL
;
10187 if (!net
|| !DEV_PRIV(net
)) {
10188 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__
));
10192 dhd
= DHD_DEV_INFO(net
);
10196 ifidx
= dhd_net2idx(dhd
, net
);
10197 if (ifidx
== DHD_BAD_IF
) {
10198 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
10202 DHD_OS_WAKE_LOCK(&dhd
->pub
);
10203 DHD_PERIM_LOCK(&dhd
->pub
);
10205 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, ioc
, ioc
->buf
, ioc
->len
);
10206 dhd_check_hang(net
, &dhd
->pub
, ret
);
10208 DHD_PERIM_UNLOCK(&dhd
->pub
);
10209 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
10214 bool dhd_os_check_hang(dhd_pub_t
*dhdp
, int ifidx
, int ret
)
10216 struct net_device
*net
;
10218 net
= dhd_idx2net(dhdp
, ifidx
);
10220 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__
, ifidx
));
10224 return dhd_check_hang(net
, dhdp
, ret
);
10227 /* Return instance */
10228 int dhd_get_instance(dhd_pub_t
*dhdp
)
10230 return dhdp
->info
->unit
;
10234 #ifdef PROP_TXSTATUS
10236 void dhd_wlfc_plat_init(void *dhd
)
10241 void dhd_wlfc_plat_deinit(void *dhd
)
10246 bool dhd_wlfc_skip_fc(void)
10250 #endif /* PROP_TXSTATUS */
10254 #include <linux/debugfs.h>
10256 extern uint32
dhd_readregl(void *bp
, uint32 addr
);
10257 extern uint32
dhd_writeregl(void *bp
, uint32 addr
, uint32 data
);
10259 typedef struct dhd_dbgfs
{
10260 struct dentry
*debugfs_dir
;
10261 struct dentry
*debugfs_mem
;
10266 dhd_dbgfs_t g_dbgfs
;
10269 dhd_dbg_state_open(struct inode
*inode
, struct file
*file
)
10271 file
->private_data
= inode
->i_private
;
10276 dhd_dbg_state_read(struct file
*file
, char __user
*ubuf
,
10277 size_t count
, loff_t
*ppos
)
10281 loff_t pos
= *ppos
;
10286 if (pos
>= g_dbgfs
.size
|| !count
)
10288 if (count
> g_dbgfs
.size
- pos
)
10289 count
= g_dbgfs
.size
- pos
;
10291 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
10292 tmp
= dhd_readregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3));
10294 ret
= copy_to_user(ubuf
, &tmp
, 4);
10299 *ppos
= pos
+ count
;
10307 dhd_debugfs_write(struct file
*file
, const char __user
*ubuf
, size_t count
, loff_t
*ppos
)
10309 loff_t pos
= *ppos
;
10315 if (pos
>= g_dbgfs
.size
|| !count
)
10317 if (count
> g_dbgfs
.size
- pos
)
10318 count
= g_dbgfs
.size
- pos
;
10320 ret
= copy_from_user(&buf
, ubuf
, sizeof(uint32
));
10324 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
10325 dhd_writeregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3), buf
);
10332 dhd_debugfs_lseek(struct file
*file
, loff_t off
, int whence
)
10341 pos
= file
->f_pos
+ off
;
10344 pos
= g_dbgfs
.size
- off
;
10346 return (pos
< 0 || pos
> g_dbgfs
.size
) ? -EINVAL
: (file
->f_pos
= pos
);
10349 static const struct file_operations dhd_dbg_state_ops
= {
10350 .read
= dhd_dbg_state_read
,
10351 .write
= dhd_debugfs_write
,
10352 .open
= dhd_dbg_state_open
,
10353 .llseek
= dhd_debugfs_lseek
10356 static void dhd_dbg_create(void)
10358 if (g_dbgfs
.debugfs_dir
) {
10359 g_dbgfs
.debugfs_mem
= debugfs_create_file("mem", 0644, g_dbgfs
.debugfs_dir
,
10360 NULL
, &dhd_dbg_state_ops
);
10364 void dhd_dbg_init(dhd_pub_t
*dhdp
)
10368 g_dbgfs
.dhdp
= dhdp
;
10369 g_dbgfs
.size
= 0x20000000; /* Allow access to various cores regs */
10371 g_dbgfs
.debugfs_dir
= debugfs_create_dir("dhd", 0);
10372 if (IS_ERR(g_dbgfs
.debugfs_dir
)) {
10373 err
= PTR_ERR(g_dbgfs
.debugfs_dir
);
10374 g_dbgfs
.debugfs_dir
= NULL
;
10383 void dhd_dbg_remove(void)
10385 debugfs_remove(g_dbgfs
.debugfs_mem
);
10386 debugfs_remove(g_dbgfs
.debugfs_dir
);
10388 bzero((unsigned char *) &g_dbgfs
, sizeof(g_dbgfs
));
10391 #endif /* ifdef BCMDBGFS */
10393 #ifdef WLMEDIA_HTSF
10396 void dhd_htsf_addtxts(dhd_pub_t
*dhdp
, void *pktbuf
)
10398 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
10399 struct sk_buff
*skb
;
10401 uint16 dport
= 0, oldmagic
= 0xACAC;
10405 /* timestamp packet */
10407 p1
= (char*) PKTDATA(dhdp
->osh
, pktbuf
);
10409 if (PKTLEN(dhdp
->osh
, pktbuf
) > HTSF_MINLEN
) {
10410 /* memcpy(&proto, p1+26, 4); */
10411 memcpy(&dport
, p1
+40, 2);
10412 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
10413 dport
= ntoh16(dport
);
10416 /* timestamp only if icmp or udb iperf with port 5555 */
10417 /* if (proto == 17 && dport == tsport) { */
10418 if (dport
>= tsport
&& dport
<= tsport
+ 20) {
10420 skb
= (struct sk_buff
*) pktbuf
;
10422 htsf
= dhd_get_htsf(dhd
, 0);
10423 memset(skb
->data
+ 44, 0, 2); /* clear checksum */
10424 memcpy(skb
->data
+82, &oldmagic
, 2);
10425 memcpy(skb
->data
+84, &htsf
, 4);
10427 memset(&ts
, 0, sizeof(htsfts_t
));
10428 ts
.magic
= HTSFMAGIC
;
10429 ts
.prio
= PKTPRIO(pktbuf
);
10430 ts
.seqnum
= htsf_seqnum
++;
10431 ts
.c10
= get_cycles();
10433 ts
.endmagic
= HTSFENDMAGIC
;
10435 memcpy(skb
->data
+ HTSF_HOSTOFFSET
, &ts
, sizeof(ts
));
10439 static void dhd_dump_htsfhisto(histo_t
*his
, char *s
)
10441 int pktcnt
= 0, curval
= 0, i
;
10442 for (i
= 0; i
< (NUMBIN
-2); i
++) {
10444 printf("%d ", his
->bin
[i
]);
10445 pktcnt
+= his
->bin
[i
];
10447 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his
->bin
[NUMBIN
-2], pktcnt
,
10448 his
->bin
[NUMBIN
-1], s
);
10452 void sorttobin(int value
, histo_t
*histo
)
10457 histo
->bin
[NUMBIN
-1]++;
10460 if (value
> histo
->bin
[NUMBIN
-2]) /* store the max value */
10461 histo
->bin
[NUMBIN
-2] = value
;
10463 for (i
= 0; i
< (NUMBIN
-2); i
++) {
10464 binval
+= 500; /* 500m s bins */
10465 if (value
<= binval
) {
10470 histo
->bin
[NUMBIN
-3]++;
10474 void dhd_htsf_addrxts(dhd_pub_t
*dhdp
, void *pktbuf
)
10476 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
10477 struct sk_buff
*skb
;
10480 int d1
, d2
, d3
, end2end
;
10484 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
10485 p1
= (char*)PKTDATA(dhdp
->osh
, pktbuf
);
10487 if (PKTLEN(osh
, pktbuf
) > HTSF_MINLEN
) {
10488 memcpy(&old_magic
, p1
+78, 2);
10489 htsf_ts
= (htsfts_t
*) (p1
+ HTSF_HOSTOFFSET
- 4);
10494 if (htsf_ts
->magic
== HTSFMAGIC
) {
10495 htsf_ts
->tE0
= dhd_get_htsf(dhd
, 0);
10496 htsf_ts
->cE0
= get_cycles();
10499 if (old_magic
== 0xACAC) {
10502 htsf
= dhd_get_htsf(dhd
, 0);
10503 memcpy(skb
->data
+92, &htsf
, sizeof(uint32
));
10505 memcpy(&ts
[tsidx
].t1
, skb
->data
+80, 16);
10507 d1
= ts
[tsidx
].t2
- ts
[tsidx
].t1
;
10508 d2
= ts
[tsidx
].t3
- ts
[tsidx
].t2
;
10509 d3
= ts
[tsidx
].t4
- ts
[tsidx
].t3
;
10510 end2end
= ts
[tsidx
].t4
- ts
[tsidx
].t1
;
10512 sorttobin(d1
, &vi_d1
);
10513 sorttobin(d2
, &vi_d2
);
10514 sorttobin(d3
, &vi_d3
);
10515 sorttobin(end2end
, &vi_d4
);
10517 if (end2end
> 0 && end2end
> maxdelay
) {
10518 maxdelay
= end2end
;
10519 maxdelaypktno
= tspktcnt
;
10520 memcpy(&maxdelayts
, &ts
[tsidx
], 16);
10522 if (++tsidx
>= TSMAX
)
10527 uint32
dhd_get_htsf(dhd_info_t
*dhd
, int ifidx
)
10529 uint32 htsf
= 0, cur_cycle
, delta
, delta_us
;
10530 uint32 factor
, baseval
, baseval2
;
10536 if (cur_cycle
> dhd
->htsf
.last_cycle
)
10537 delta
= cur_cycle
- dhd
->htsf
.last_cycle
;
10539 delta
= cur_cycle
+ (0xFFFFFFFF - dhd
->htsf
.last_cycle
);
10542 delta
= delta
>> 4;
10544 if (dhd
->htsf
.coef
) {
10545 /* times ten to get the first digit */
10546 factor
= (dhd
->htsf
.coef
*10 + dhd
->htsf
.coefdec1
);
10547 baseval
= (delta
*10)/factor
;
10548 baseval2
= (delta
*10)/(factor
+1);
10549 delta_us
= (baseval
- (((baseval
- baseval2
) * dhd
->htsf
.coefdec2
)) / 10);
10550 htsf
= (delta_us
<< 4) + dhd
->htsf
.last_tsf
+ HTSF_BUS_DELAY
;
10553 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
10559 static void dhd_dump_latency(void)
10562 int d1
, d2
, d3
, d4
, d5
;
10564 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
10565 for (i
= 0; i
< TSMAX
; i
++) {
10566 d1
= ts
[i
].t2
- ts
[i
].t1
;
10567 d2
= ts
[i
].t3
- ts
[i
].t2
;
10568 d3
= ts
[i
].t4
- ts
[i
].t3
;
10569 d4
= ts
[i
].t4
- ts
[i
].t1
;
10570 d5
= ts
[max
].t4
-ts
[max
].t1
;
10571 if (d4
> d5
&& d4
> 0) {
10574 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
10575 ts
[i
].t1
, ts
[i
].t2
, ts
[i
].t3
, ts
[i
].t4
,
10576 d1
, d2
, d3
, d4
, i
);
10579 printf("current idx = %d \n", tsidx
);
10581 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay
, maxdelaypktno
, tspktcnt
);
10582 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
10583 maxdelayts
.t1
, maxdelayts
.t2
, maxdelayts
.t3
, maxdelayts
.t4
,
10584 maxdelayts
.t2
- maxdelayts
.t1
,
10585 maxdelayts
.t3
- maxdelayts
.t2
,
10586 maxdelayts
.t4
- maxdelayts
.t3
,
10587 maxdelayts
.t4
- maxdelayts
.t1
);
10592 dhd_ioctl_htsf_get(dhd_info_t
*dhd
, int ifidx
)
10604 memset(&ioc
, 0, sizeof(ioc
));
10605 memset(&tsf_buf
, 0, sizeof(tsf_buf
));
10607 ioc
.cmd
= WLC_GET_VAR
;
10609 ioc
.len
= (uint
)sizeof(buf
);
10612 strncpy(buf
, "tsf", sizeof(buf
) - 1);
10613 buf
[sizeof(buf
) - 1] = '\0';
10614 s1
= dhd_get_htsf(dhd
, 0);
10615 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
10617 DHD_ERROR(("%s: tsf is not supported by device\n",
10618 dhd_ifname(&dhd
->pub
, ifidx
)));
10619 return -EOPNOTSUPP
;
10623 s2
= dhd_get_htsf(dhd
, 0);
10625 memcpy(&tsf_buf
, buf
, sizeof(tsf_buf
));
10626 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
10627 tsf_buf
.high
, tsf_buf
.low
, s2
, dhd
->htsf
.coef
, dhd
->htsf
.coefdec1
,
10628 dhd
->htsf
.coefdec2
, s2
-tsf_buf
.low
);
10629 printf("lasttsf=%08X lastcycle=%08X\n", dhd
->htsf
.last_tsf
, dhd
->htsf
.last_cycle
);
10633 void htsf_update(dhd_info_t
*dhd
, void *data
)
10635 static ulong cur_cycle
= 0, prev_cycle
= 0;
10636 uint32 htsf
, tsf_delta
= 0;
10637 uint32 hfactor
= 0, cyc_delta
, dec1
= 0, dec2
, dec3
, tmp
;
10641 /* cycles_t in inlcude/mips/timex.h */
10645 prev_cycle
= cur_cycle
;
10648 if (cur_cycle
> prev_cycle
)
10649 cyc_delta
= cur_cycle
- prev_cycle
;
10653 cyc_delta
= cur_cycle
+ (0xFFFFFFFF - prev_cycle
);
10657 printf(" tsf update ata point er is null \n");
10659 memcpy(&prev_tsf
, &cur_tsf
, sizeof(tsf_t
));
10660 memcpy(&cur_tsf
, data
, sizeof(tsf_t
));
10662 if (cur_tsf
.low
== 0) {
10663 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
10667 if (cur_tsf
.low
> prev_tsf
.low
)
10668 tsf_delta
= (cur_tsf
.low
- prev_tsf
.low
);
10670 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
10671 cur_tsf
.low
, prev_tsf
.low
));
10672 if (cur_tsf
.high
> prev_tsf
.high
) {
10673 tsf_delta
= cur_tsf
.low
+ (0xFFFFFFFF - prev_tsf
.low
);
10674 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta
));
10677 return; /* do not update */
10681 hfactor
= cyc_delta
/ tsf_delta
;
10682 tmp
= (cyc_delta
- (hfactor
* tsf_delta
))*10;
10683 dec1
= tmp
/tsf_delta
;
10684 dec2
= ((tmp
- dec1
*tsf_delta
)*10) / tsf_delta
;
10685 tmp
= (tmp
- (dec1
*tsf_delta
))*10;
10686 dec3
= ((tmp
- dec2
*tsf_delta
)*10) / tsf_delta
;
10705 htsf
= ((cyc_delta
* 10) / (hfactor
*10+dec1
)) + prev_tsf
.low
;
10706 dhd
->htsf
.coef
= hfactor
;
10707 dhd
->htsf
.last_cycle
= cur_cycle
;
10708 dhd
->htsf
.last_tsf
= cur_tsf
.low
;
10709 dhd
->htsf
.coefdec1
= dec1
;
10710 dhd
->htsf
.coefdec2
= dec2
;
10713 htsf
= prev_tsf
.low
;
10717 #endif /* WLMEDIA_HTSF */
10719 #ifdef CUSTOM_SET_CPUCORE
10720 void dhd_set_cpucore(dhd_pub_t
*dhd
, int set
)
10722 int e_dpc
= 0, e_rxf
= 0, retry_set
= 0;
10724 if (!(dhd
->chan_isvht80
)) {
10725 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__
, dhd
->chan_isvht80
));
10732 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
10733 cpumask_of(DPC_CPUCORE
));
10735 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
10736 cpumask_of(PRIMARY_CPUCORE
));
10738 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
10739 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__
, e_dpc
));
10744 } while (e_dpc
< 0);
10749 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
10750 cpumask_of(RXF_CPUCORE
));
10752 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
10753 cpumask_of(PRIMARY_CPUCORE
));
10755 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
10756 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__
, e_rxf
));
10761 } while (e_rxf
< 0);
10763 #ifdef DHD_OF_SUPPORT
10764 interrupt_set_cpucore(set
);
10765 #endif /* DHD_OF_SUPPORT */
10766 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__
, set
));
10770 #endif /* CUSTOM_SET_CPUCORE */
10771 #if defined(DHD_TCP_WINSIZE_ADJUST)
10772 static int dhd_port_list_match(int port
)
10775 for (i
= 0; i
< MAX_TARGET_PORTS
; i
++) {
10776 if (target_ports
[i
] == port
)
10781 static void dhd_adjust_tcp_winsize(int op_mode
, struct sk_buff
*skb
)
10783 struct iphdr
*ipheader
;
10784 struct tcphdr
*tcpheader
;
10786 int32 incremental_checksum
;
10788 if (!(op_mode
& DHD_FLAG_HOSTAP_MODE
))
10790 if (skb
== NULL
|| skb
->data
== NULL
)
10793 ipheader
= (struct iphdr
*)(skb
->data
);
10795 if (ipheader
->protocol
== IPPROTO_TCP
) {
10796 tcpheader
= (struct tcphdr
*) skb_pull(skb
, (ipheader
->ihl
)<<2);
10798 win_size
= ntoh16(tcpheader
->window
);
10799 if (win_size
< MIN_TCP_WIN_SIZE
&&
10800 dhd_port_list_match(ntoh16(tcpheader
->dest
))) {
10801 incremental_checksum
= ntoh16(tcpheader
->check
);
10802 incremental_checksum
+= win_size
- win_size
*WIN_SIZE_SCALE_FACTOR
;
10803 if (incremental_checksum
< 0)
10804 --incremental_checksum
;
10805 tcpheader
->window
= hton16(win_size
*WIN_SIZE_SCALE_FACTOR
);
10806 tcpheader
->check
= hton16((unsigned short)incremental_checksum
);
10809 skb_push(skb
, (ipheader
->ihl
)<<2);
10812 #endif /* DHD_TCP_WINSIZE_ADJUST */
10814 /* Get interface specific ap_isolate configuration */
10815 int dhd_get_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
)
10817 dhd_info_t
*dhd
= dhdp
->info
;
10820 ASSERT(idx
< DHD_MAX_IFS
);
10822 ifp
= dhd
->iflist
[idx
];
10824 return ifp
->ap_isolate
;
10827 /* Set interface specific ap_isolate configuration */
10828 int dhd_set_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
10830 dhd_info_t
*dhd
= dhdp
->info
;
10833 ASSERT(idx
< DHD_MAX_IFS
);
10835 ifp
= dhd
->iflist
[idx
];
10837 ifp
->ap_isolate
= val
;
10842 #if defined(DHD_DEBUG)
10843 void dhd_schedule_memdump(dhd_pub_t
*dhdp
, uint8
*buf
, uint32 size
)
10845 dhd_dump_t
*dump
= NULL
;
10846 dump
= MALLOC(dhdp
->osh
, sizeof(dhd_dump_t
));
10848 dump
->bufsize
= size
;
10849 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dump
,
10850 DHD_WQ_WORK_SOC_RAM_DUMP
, dhd_mem_dump
, DHD_WORK_PRIORITY_HIGH
);
10854 dhd_mem_dump(void *handle
, void *event_info
, u8 event
)
10856 dhd_info_t
*dhd
= handle
;
10857 dhd_dump_t
*dump
= event_info
;
10862 if (write_to_file(&dhd
->pub
, dump
->buf
, dump
->bufsize
)) {
10863 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__
));
10865 MFREE(dhd
->pub
.osh
, dump
, sizeof(dhd_dump_t
));
10867 #endif /* DHD_DEBUG */
10870 /* Returns interface specific WMF configuration */
10871 dhd_wmf_t
* dhd_wmf_conf(dhd_pub_t
*dhdp
, uint32 idx
)
10873 dhd_info_t
*dhd
= dhdp
->info
;
10876 ASSERT(idx
< DHD_MAX_IFS
);
10878 ifp
= dhd
->iflist
[idx
];
10881 #endif /* DHD_WMF */
10884 #ifdef DHD_UNICAST_DHCP
10886 dhd_get_pkt_ether_type(dhd_pub_t
*pub
, void *pktbuf
,
10887 uint8
**data_ptr
, int *len_ptr
, uint16
*et_ptr
, bool *snap_ptr
)
10889 uint8
*frame
= PKTDATA(pub
->osh
, pktbuf
);
10890 int length
= PKTLEN(pub
->osh
, pktbuf
);
10891 uint8
*pt
; /* Pointer to type field */
10894 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
10895 if (length
< ETHER_HDR_LEN
) {
10896 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
10897 __FUNCTION__
, length
));
10899 } else if (ntoh16_ua(frame
+ ETHER_TYPE_OFFSET
) >= ETHER_TYPE_MIN
) {
10900 /* Frame is Ethernet II */
10901 pt
= frame
+ ETHER_TYPE_OFFSET
;
10902 } else if (length
>= ETHER_HDR_LEN
+ SNAP_HDR_LEN
+ ETHER_TYPE_LEN
&&
10903 !bcmp(llc_snap_hdr
, frame
+ ETHER_HDR_LEN
, SNAP_HDR_LEN
)) {
10904 pt
= frame
+ ETHER_HDR_LEN
+ SNAP_HDR_LEN
;
10907 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
10912 ethertype
= ntoh16_ua(pt
);
10914 /* Skip VLAN tag, if any */
10915 if (ethertype
== ETHER_TYPE_8021Q
) {
10916 pt
+= VLAN_TAG_LEN
;
10918 if ((pt
+ ETHER_TYPE_LEN
) > (frame
+ length
)) {
10919 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
10920 __FUNCTION__
, length
));
10924 ethertype
= ntoh16_ua(pt
);
10927 *data_ptr
= pt
+ ETHER_TYPE_LEN
;
10928 *len_ptr
= length
- (pt
+ ETHER_TYPE_LEN
- frame
);
10929 *et_ptr
= ethertype
;
10935 dhd_get_pkt_ip_type(dhd_pub_t
*pub
, void *pktbuf
,
10936 uint8
**data_ptr
, int *len_ptr
, uint8
*prot_ptr
)
10938 struct ipv4_hdr
*iph
; /* IP frame pointer */
10939 int iplen
; /* IP frame length */
10940 uint16 ethertype
, iphdrlen
, ippktlen
;
10945 if (dhd_get_pkt_ether_type(pub
, pktbuf
, (uint8
**)&iph
,
10946 &iplen
, ðertype
, &snap
) != 0)
10949 if (ethertype
!= ETHER_TYPE_IP
) {
10953 /* We support IPv4 only */
10954 if (iplen
< IPV4_OPTIONS_OFFSET
|| (IP_VER(iph
) != IP_VER_4
)) {
10958 /* Header length sanity */
10959 iphdrlen
= IPV4_HLEN(iph
);
10962 * Packet length sanity; sometimes we receive eth-frame size bigger
10963 * than the IP content, which results in a bad tcp chksum
10965 ippktlen
= ntoh16(iph
->tot_len
);
10966 if (ippktlen
< iplen
) {
10968 DHD_INFO(("%s: extra frame length ignored\n",
10971 } else if (ippktlen
> iplen
) {
10972 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
10973 __FUNCTION__
, ippktlen
- iplen
));
10977 if (iphdrlen
< IPV4_OPTIONS_OFFSET
|| iphdrlen
> iplen
) {
10978 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
10979 __FUNCTION__
, iphdrlen
, IPV4_OPTIONS_OFFSET
, iplen
));
10984 * We don't handle fragmented IP packets. A first frag is indicated by the MF
10985 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
10987 iph_frag
= ntoh16(iph
->frag
);
10989 if ((iph_frag
& IPV4_FRAG_MORE
) || (iph_frag
& IPV4_FRAG_OFFSET_MASK
) != 0) {
10990 DHD_INFO(("DHD:%s: IP fragment not handled\n",
10995 prot
= IPV4_PROT(iph
);
10997 *data_ptr
= (((uint8
*)iph
) + iphdrlen
);
10998 *len_ptr
= iplen
- iphdrlen
;
11003 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
11005 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
)
11007 dhd_sta_t
* stainfo
;
11008 uint8
*eh
= PKTDATA(pub
->osh
, pktbuf
);
11017 if (!ETHER_ISMULTI(eh
+ ETHER_DEST_OFFSET
))
11019 if (dhd_get_pkt_ip_type(pub
, pktbuf
, &udph
, &udpl
, &prot
) != 0)
11021 if (prot
!= IP_PROT_UDP
)
11023 /* check frame length, at least UDP_HDR_LEN */
11024 if (udpl
< UDP_HDR_LEN
) {
11025 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
11029 port
= ntoh16_ua(udph
+ UDP_DEST_PORT_OFFSET
);
11030 /* only process DHCP packets from server to client */
11031 if (port
!= DHCP_PORT_CLIENT
)
11034 dhcp
= udph
+ UDP_HDR_LEN
;
11035 dhcpl
= udpl
- UDP_HDR_LEN
;
11037 if (dhcpl
< DHCP_CHADDR_OFFSET
+ ETHER_ADDR_LEN
) {
11038 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
11042 /* only process DHCP reply(offer/ack) packets */
11043 if (*(dhcp
+ DHCP_TYPE_OFFSET
) != DHCP_TYPE_REPLY
)
11045 chaddr
= dhcp
+ DHCP_CHADDR_OFFSET
;
11046 stainfo
= dhd_find_sta(pub
, ifidx
, chaddr
);
11048 bcopy(chaddr
, eh
+ ETHER_DEST_OFFSET
, ETHER_ADDR_LEN
);
11053 #endif /* DHD_UNICAST_DHD */
11054 #ifdef DHD_L2_FILTER
11055 /* Check if packet type is ICMP ECHO */
11057 int dhd_l2_filter_block_ping(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
)
11059 struct bcmicmp_hdr
*icmph
;
11063 if (dhd_get_pkt_ip_type(pub
, pktbuf
, (uint8
**)&icmph
, &udpl
, &prot
) != 0)
11065 if (prot
== IP_PROT_ICMP
) {
11066 if (icmph
->type
== ICMP_TYPE_ECHO_REQUEST
)
11071 #endif /* DHD_L2_FILTER */
11073 #if defined(SET_RPS_CPUS) || defined(ARGOS_RPS_CPU_CTL)
11074 int dhd_rps_cpus_enable(struct net_device
*net
, int enable
)
11076 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
11079 char * RPS_CPU_SETBUF
;
11081 ifidx
= dhd_net2idx(dhd
, net
);
11082 if (ifidx
== DHD_BAD_IF
) {
11083 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
11087 if (ifidx
== PRIMARY_INF
) {
11088 if (dhd
->pub
.op_mode
== DHD_FLAG_IBSS_MODE
) {
11089 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__
));
11090 RPS_CPU_SETBUF
= RPS_CPUS_MASK_IBSS
;
11092 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__
));
11093 RPS_CPU_SETBUF
= RPS_CPUS_MASK
;
11095 } else if (ifidx
== VIRTUAL_INF
) {
11096 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__
));
11097 RPS_CPU_SETBUF
= RPS_CPUS_MASK_P2P
;
11099 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__
, ifidx
));
11103 ifp
= dhd
->iflist
[ifidx
];
11106 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__
, RPS_CPU_SETBUF
));
11107 custom_rps_map_set(ifp
->net
->_rx
, RPS_CPU_SETBUF
, strlen(RPS_CPU_SETBUF
));
11109 custom_rps_map_clear(ifp
->net
->_rx
);
11112 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__
));
11118 int custom_rps_map_set(struct netdev_rx_queue
*queue
, char *buf
, size_t len
)
11120 struct rps_map
*old_map
, *map
;
11121 cpumask_var_t mask
;
11123 static DEFINE_SPINLOCK(rps_map_lock
);
11125 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
11127 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
11128 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__
));
11132 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
11134 free_cpumask_var(mask
);
11135 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__
));
11139 map
= kzalloc(max_t(unsigned int,
11140 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
11143 free_cpumask_var(mask
);
11144 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__
));
11149 for_each_cpu(cpu
, mask
)
11150 map
->cpus
[i
++] = cpu
;
11157 free_cpumask_var(mask
);
11158 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__
));
11162 spin_lock(&rps_map_lock
);
11163 old_map
= rcu_dereference_protected(queue
->rps_map
,
11164 lockdep_is_held(&rps_map_lock
));
11165 rcu_assign_pointer(queue
->rps_map
, map
);
11166 spin_unlock(&rps_map_lock
);
11169 static_key_slow_inc(&rps_needed
);
11171 kfree_rcu(old_map
, rcu
);
11172 static_key_slow_dec(&rps_needed
);
11174 free_cpumask_var(mask
);
11176 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__
, map
->len
));
11180 void custom_rps_map_clear(struct netdev_rx_queue
*queue
)
11182 struct rps_map
*map
;
11184 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
11186 map
= rcu_dereference_protected(queue
->rps_map
, 1);
11188 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
11189 kfree_rcu(map
, rcu
);
11190 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__
));
11193 #endif /* SET_RPS_CPUS || ARGOS_RPS_CPU_CTL */
11195 #if defined(ARGOS_CPU_SCHEDULER) && defined(ARGOS_RPS_CPU_CTL)
11197 argos_register_notifier_init(struct net_device
*net
)
11201 DHD_INFO(("DHD: %s: \n", __FUNCTION__
));
11202 argos_rps_ctrl_data
.wlan_primary_netdev
= net
;
11203 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
11205 ret
= sec_argos_register_notifier(&argos_wifi
, "WIFI");
11207 DHD_ERROR(("DHD:Failed to register WIFI notifier , ret=%d\n", ret
));
11214 argos_register_notifier_deinit(void)
11216 DHD_INFO(("DHD: %s: \n", __FUNCTION__
));
11218 if (argos_rps_ctrl_data
.wlan_primary_netdev
== NULL
) {
11219 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__
));
11222 custom_rps_map_clear(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
);
11224 sec_argos_unregister_notifier(&argos_wifi
, "WIFI");
11225 argos_rps_ctrl_data
.wlan_primary_netdev
= NULL
;
11226 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
11232 argos_status_notifier_wifi_cb(struct notifier_block
*notifier
,
11233 unsigned long speed
, void *v
)
11236 DHD_INFO(("DHD: %s: , speed=%ld\n", __FUNCTION__
, speed
));
11237 if (speed
> RPS_TPUT_THRESHOLD
&& argos_rps_ctrl_data
.wlan_primary_netdev
!= NULL
&&
11238 argos_rps_ctrl_data
.argos_rps_cpus_enabled
== 0) {
11239 if (cpu_online(RPS_CPUS_WLAN_CORE_ID
)) {
11240 err
= custom_rps_map_set(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
,
11241 RPS_CPUS_MASK
, strlen(RPS_CPUS_MASK
));
11243 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. speed=%ld, error=%d\n",
11244 __FUNCTION__
, speed
, err
));
11246 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 1;
11247 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
11248 __FUNCTION__
, speed
));
11251 DHD_ERROR(("DHD: %s: RPS_Set fail, Core=%d Offline\n", __FUNCTION__
,
11252 RPS_CPUS_WLAN_CORE_ID
));
11254 } else if (speed
<= RPS_TPUT_THRESHOLD
&& argos_rps_ctrl_data
.wlan_primary_netdev
!= NULL
) {
11255 custom_rps_map_clear(argos_rps_ctrl_data
.wlan_primary_netdev
->_rx
);
11256 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__
, speed
));
11257 argos_rps_ctrl_data
.argos_rps_cpus_enabled
= 0;
11262 #endif /* ARGOS_CPU_SCHEDULER && ARGOS_RPS_CPU_CTL */
11264 void *dhd_get_pub(struct net_device
*dev
)
11266 dhd_info_t
*dhdinfo
= *(dhd_info_t
**)netdev_priv(dev
);
11268 return (void *)&dhdinfo
->pub
;