2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
5 * $Copyright Open Broadcom Corporation$
7 * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $
14 #include <linux/syscalls.h>
15 #include <event_log.h>
16 #endif /* SHOW_LOGTRACE */
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/random.h>
28 #include <linux/spinlock.h>
29 #include <linux/ethtool.h>
30 #include <linux/fcntl.h>
33 #include <linux/reboot.h>
34 #include <linux/notifier.h>
35 #include <net/addrconf.h>
36 #ifdef ENABLE_ADAPTIVE_SCHED
37 #include <linux/cpufreq.h>
38 #endif /* ENABLE_ADAPTIVE_SCHED */
40 #include <asm/uaccess.h>
41 #include <asm/unaligned.h>
45 #include <bcmendian.h>
48 #include <proto/ethernet.h>
49 #include <proto/bcmevent.h>
50 #include <proto/vlan.h>
52 #include <proto/bcmicmp.h>
54 #include <proto/802.3.h>
56 #include <dngl_stats.h>
57 #include <dhd_linux_wq.h>
59 #include <dhd_linux.h>
60 #ifdef PCIE_FULL_DONGLE
61 #include <dhd_flowring.h>
64 #include <dhd_proto.h>
65 #include <dhd_config.h>
67 #ifdef CONFIG_HAS_WAKELOCK
68 #include <linux/wakelock.h>
71 #include <wl_cfg80211.h>
74 #include <wl_cfgp2p.h>
80 #include <proto/802.11_bta.h>
81 #include <proto/bt_amp_hci.h>
86 #include <linux/compat.h>
90 #include <dhd_wmf_linux.h>
93 #ifdef AMPDU_VO_ENABLE
94 #include <proto/802.1d.h>
95 #endif /* AMPDU_VO_ENABLE */
96 #ifdef DHDTCPACK_SUPPRESS
98 #endif /* DHDTCPACK_SUPPRESS */
100 #if defined(DHD_TCP_WINSIZE_ADJUST)
101 #include <linux/tcp.h>
103 #endif /* DHD_TCP_WINSIZE_ADJUST */
106 #include <linux/time.h>
109 #define HTSF_MINLEN 200 /* min. packet length to timestamp */
110 #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
111 #define TSMAX 1000 /* max no. of timing record kept */
114 static uint32 tsidx
= 0;
115 static uint32 htsf_seqnum
= 0;
117 struct timeval tsync
;
118 static uint32 tsport
= 5010;
120 typedef struct histo_
{
124 #if !ISPOWEROF2(DHD_SDALIGN)
125 #error DHD_SDALIGN is not a power of 2!
128 static histo_t vi_d1
, vi_d2
, vi_d3
, vi_d4
;
129 #endif /* WLMEDIA_HTSF */
131 #if defined(DHD_TCP_WINSIZE_ADJUST)
132 #define MIN_TCP_WIN_SIZE 18000
133 #define WIN_SIZE_SCALE_FACTOR 2
134 #define MAX_TARGET_PORTS 5
136 static uint target_ports
[MAX_TARGET_PORTS
] = {20, 0, 0, 0, 0};
137 static uint dhd_use_tcp_window_size_adjust
= FALSE
;
138 static void dhd_adjust_tcp_winsize(int op_mode
, struct sk_buff
*skb
);
139 #endif /* DHD_TCP_WINSIZE_ADJUST */
143 extern bool ap_cfg_running
;
144 extern bool ap_fw_loaded
;
148 #ifdef ENABLE_ADAPTIVE_SCHED
149 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
150 #ifndef CUSTOM_CPUFREQ_THRESH
151 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
152 #endif /* CUSTOM_CPUFREQ_THRESH */
153 #endif /* ENABLE_ADAPTIVE_SCHED */
155 /* enable HOSTIP cache update from the host side when an eth0:N is up */
156 #define AOE_IP_ALIAS_SUPPORT 1
160 #include <bcm_rpc_tp.h>
163 #include <wlfc_proto.h>
164 #include <dhd_wlfc.h>
167 #include <wl_android.h>
169 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
171 #endif /* CUSTOMER_HW20 && WLANAUDIO */
173 #ifdef CUSTOMER_HW_AMLOGIC
174 #include <linux/amlogic/wifi_dt.h>
177 /* Maximum STA per radio */
178 #define DHD_MAX_STA 32
181 const uint8 wme_fifo2ac
[] = { 0, 1, 2, 3, 1, 1 };
182 const uint8 prio2fifo
[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
183 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
185 #ifdef ARP_OFFLOAD_SUPPORT
186 void aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
);
187 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
188 unsigned long event
, void *ptr
);
189 static struct notifier_block dhd_inetaddr_notifier
= {
190 .notifier_call
= dhd_inetaddr_notifier_call
192 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
193 * created in kernel notifier link list (with 'next' pointing to itself)
195 static bool dhd_inetaddr_notifier_registered
= FALSE
;
196 #endif /* ARP_OFFLOAD_SUPPORT */
199 static int dhd_inet6addr_notifier_call(struct notifier_block
*this,
200 unsigned long event
, void *ptr
);
201 static struct notifier_block dhd_inet6addr_notifier
= {
202 .notifier_call
= dhd_inet6addr_notifier_call
204 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
205 * created in kernel notifier link list (with 'next' pointing to itself)
207 static bool dhd_inet6addr_notifier_registered
= FALSE
;
210 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
211 #include <linux/suspend.h>
212 volatile bool dhd_mmc_suspend
= FALSE
;
213 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait
);
214 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
216 #if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
217 extern void dhd_enable_oob_intr(struct dhd_bus
*bus
, bool enable
);
219 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
220 static void dhd_hang_process(void *dhd_info
, void *event_data
, u8 event
);
222 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
223 MODULE_LICENSE("GPL v2");
224 #endif /* LinuxVer */
229 #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
231 #ifndef PROP_TXSTATUS
232 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
234 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
236 #endif /* BCM_FD_AGGR */
239 extern bool dhd_wlfc_skip_fc(void);
240 extern void dhd_wlfc_plat_init(void *dhd
);
241 extern void dhd_wlfc_plat_deinit(void *dhd
);
242 #endif /* PROP_TXSTATUS */
244 #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
250 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
252 /* Linux wireless extension support */
253 #if defined(WL_WIRELESS_EXT)
255 extern wl_iw_extra_params_t g_wl_iw_params
;
256 #endif /* defined(WL_WIRELESS_EXT) */
258 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
259 #include <linux/earlysuspend.h>
260 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
262 extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t
*dhd
);
264 #ifdef PKT_FILTER_SUPPORT
265 extern void dhd_pktfilter_offload_set(dhd_pub_t
* dhd
, char *arg
);
266 extern void dhd_pktfilter_offload_enable(dhd_pub_t
* dhd
, char *arg
, int enable
, int master_mode
);
267 extern void dhd_pktfilter_offload_delete(dhd_pub_t
*dhd
, int id
);
272 extern int dhd_read_macaddr(struct dhd_info
*dhd
);
274 static inline int dhd_read_macaddr(struct dhd_info
*dhd
) { return 0; }
277 extern int dhd_write_macaddr(struct ether_addr
*mac
);
279 static inline int dhd_write_macaddr(struct ether_addr
*mac
) { return 0; }
283 #if defined(SOFTAP_TPUT_ENHANCE)
284 extern void dhd_bus_setidletime(dhd_pub_t
*dhdp
, int idle_time
);
285 extern void dhd_bus_getidletime(dhd_pub_t
*dhdp
, int* idle_time
);
286 #endif /* SOFTAP_TPUT_ENHANCE */
290 int custom_rps_map_set(struct netdev_rx_queue
*queue
, char *buf
, size_t len
);
291 void custom_rps_map_clear(struct netdev_rx_queue
*queue
);
292 #ifdef CONFIG_MACH_UNIVERSAL5433
293 #define RPS_CPUS_MASK "10"
295 #define RPS_CPUS_MASK "6"
296 #endif /* CONFIG_MACH_UNIVERSAL5433 */
297 #endif /* SET_RPS_CPUS */
299 static int dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
);
300 static struct notifier_block dhd_reboot_notifier
= {
301 .notifier_call
= dhd_reboot_callback
,
306 typedef struct dhd_if_event
{
307 struct list_head list
;
308 wl_event_data_if_t event
;
309 char name
[IFNAMSIZ
+1];
310 uint8 mac
[ETHER_ADDR_LEN
];
313 /* Interface control information */
314 typedef struct dhd_if
{
315 struct dhd_info
*info
; /* back pointer to dhd_info */
316 /* OS/stack specifics */
317 struct net_device
*net
;
318 int idx
; /* iface idx in dongle */
319 uint subunit
; /* subunit */
320 uint8 mac_addr
[ETHER_ADDR_LEN
]; /* assigned MAC address */
323 uint8 bssidx
; /* bsscfg index for the interface */
324 bool attached
; /* Delayed attachment when unset */
325 bool txflowcontrol
; /* Per interface flow control indicator */
326 char name
[IFNAMSIZ
+1]; /* linux interface name */
327 struct net_device_stats stats
;
329 dhd_wmf_t wmf
; /* per bsscfg wmf setting */
331 #ifdef PCIE_FULL_DONGLE
332 struct list_head sta_list
; /* sll of associated stations */
333 #if !defined(BCM_GMAC3)
334 spinlock_t sta_list_lock
; /* lock for manipulating sll */
335 #endif /* ! BCM_GMAC3 */
336 #endif /* PCIE_FULL_DONGLE */
337 uint32 ap_isolate
; /* ap-isolation settings */
350 uint32 coef
; /* scaling factor */
351 uint32 coefdec1
; /* first decimal */
352 uint32 coefdec2
; /* second decimal */
362 static tstamp_t ts
[TSMAX
];
363 static tstamp_t maxdelayts
;
364 static uint32 maxdelay
= 0, tspktcnt
= 0, maxdelaypktno
= 0;
366 #endif /* WLMEDIA_HTSF */
368 struct ipv6_work_info_t
{
374 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
375 #define MAX_WLANAUDIO_BLACKLIST 4
377 struct wlanaudio_blacklist
{
380 ulong txfail_jiffies
;
381 struct ether_addr blacklist_addr
;
383 #endif /* CUSTOMER_HW20 && WLANAUDIO */
385 /* When Perimeter locks are deployed, any blocking calls must be preceeded
386 * with a PERIM UNLOCK and followed by a PERIM LOCK.
387 * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
388 * wait_event_timeout().
391 /* Local private structure (extension of pub) */
392 typedef struct dhd_info
{
393 #if defined(WL_WIRELESS_EXT)
394 wl_iw_t iw
; /* wireless extensions state (must be first) */
395 #endif /* defined(WL_WIRELESS_EXT) */
397 dhd_if_t
*iflist
[DHD_MAX_IFS
]; /* for supporting multiple interfaces */
399 void *adapter
; /* adapter information, interrupt, fw path etc. */
400 char fw_path
[PATH_MAX
]; /* path to firmware image */
401 char nv_path
[PATH_MAX
]; /* path to nvram vars file */
402 char conf_path
[PATH_MAX
]; /* path to config vars file */
404 struct semaphore proto_sem
;
406 spinlock_t wlfc_spinlock
;
408 #endif /* PROP_TXSTATUS */
412 wait_queue_head_t ioctl_resp_wait
;
413 uint32 default_wd_interval
;
415 struct timer_list timer
;
417 struct tasklet_struct tasklet
;
422 struct semaphore sdsem
;
423 tsk_ctl_t thr_dpc_ctl
;
424 tsk_ctl_t thr_wdt_ctl
;
426 tsk_ctl_t thr_rxf_ctl
;
428 bool rxthread_enabled
;
431 #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
432 struct wake_lock wl_wifi
; /* Wifi wakelock */
433 struct wake_lock wl_rxwake
; /* Wifi rx wakelock */
434 struct wake_lock wl_ctrlwake
; /* Wifi ctrl wakelock */
435 struct wake_lock wl_wdwake
; /* Wifi wd wakelock */
436 #ifdef BCMPCIE_OOB_HOST_WAKE
437 struct wake_lock wl_intrwake
; /* Host wakeup wakelock */
438 #endif /* BCMPCIE_OOB_HOST_WAKE */
439 #endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
441 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
442 /* net_device interface lock, prevent race conditions among net_dev interface
443 * calls and wifi_on or wifi_off
445 struct mutex dhd_net_if_mutex
;
446 struct mutex dhd_suspend_mutex
;
448 spinlock_t wakelock_spinlock
;
449 uint32 wakelock_counter
;
450 int wakelock_wd_counter
;
451 int wakelock_rx_timeout_enable
;
452 int wakelock_ctrl_timeout_enable
;
454 uint32 wakelock_before_waive
;
456 /* Thread to issue ioctl for multicast */
457 wait_queue_head_t ctrl_wait
;
458 atomic_t pend_8021x_cnt
;
459 dhd_attach_states_t dhd_state
;
461 dhd_event_log_t event_data
;
462 #endif /* SHOW_LOGTRACE */
464 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
465 struct early_suspend early_suspend
;
466 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
468 #ifdef ARP_OFFLOAD_SUPPORT
470 #endif /* ARP_OFFLOAD_SUPPORT */
474 struct timer_list rpcth_timer
;
475 bool rpcth_timer_active
;
478 #ifdef DHDTCPACK_SUPPRESS
479 spinlock_t tcpack_lock
;
480 #endif /* DHDTCPACK_SUPPRESS */
481 void *dhd_deferred_wq
;
482 #ifdef DEBUG_CPU_FREQ
483 struct notifier_block freq_trans
;
484 int __percpu
*new_freq
;
487 struct notifier_block pm_notifier
;
488 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
489 struct wlanaudio_blacklist wlanaudio_blist
[MAX_WLANAUDIO_BLACKLIST
];
490 bool is_wlanaudio_blist
;
491 #endif /* CUSTOMER_HW20 && WLANAUDIO */
494 #define DHDIF_FWDER(dhdif) FALSE
496 /* Flag to indicate if we should download firmware on driver load */
497 uint dhd_download_fw_on_driverload
= TRUE
;
499 /* Definitions to provide path to the firmware and nvram
500 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
502 char firmware_path
[MOD_PARAM_PATHLEN
];
503 char nvram_path
[MOD_PARAM_PATHLEN
];
504 char config_path
[MOD_PARAM_PATHLEN
];
506 /* backup buffer for firmware and nvram path */
507 char fw_bak_path
[MOD_PARAM_PATHLEN
];
508 char nv_bak_path
[MOD_PARAM_PATHLEN
];
510 /* information string to keep firmware, chio, cheip version info visiable from log */
511 char info_string
[MOD_PARAM_INFOLEN
];
512 module_param_string(info_string
, info_string
, MOD_PARAM_INFOLEN
, 0444);
514 int disable_proptx
= 0;
515 module_param(op_mode
, int, 0644);
516 extern int wl_control_wl_start(struct net_device
*dev
);
517 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
518 struct semaphore dhd_registration_sem
;
519 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
521 /* deferred handlers */
522 static void dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
);
523 static void dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
);
524 static void dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
);
525 static void dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
);
527 static void dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
);
531 extern void dhd_netdev_free(struct net_device
*ndev
);
532 #endif /* WL_CFG80211 */
535 module_param(dhd_msg_level
, int, 0);
536 #if defined(WL_WIRELESS_EXT)
537 module_param(iw_msg_level
, int, 0);
540 module_param(wl_dbg_level
, int, 0);
542 module_param(android_msg_level
, int, 0);
543 module_param(config_msg_level
, int, 0);
545 #ifdef ARP_OFFLOAD_SUPPORT
546 /* ARP offload enable */
547 uint dhd_arp_enable
= TRUE
;
548 module_param(dhd_arp_enable
, uint
, 0);
550 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
552 uint dhd_arp_mode
= ARP_OL_AGENT
| ARP_OL_PEER_AUTO_REPLY
;
554 module_param(dhd_arp_mode
, uint
, 0);
555 #endif /* ARP_OFFLOAD_SUPPORT */
557 /* Disable Prop tx */
558 module_param(disable_proptx
, int, 0644);
559 /* load firmware and/or nvram values from the filesystem */
560 module_param_string(firmware_path
, firmware_path
, MOD_PARAM_PATHLEN
, 0660);
561 module_param_string(nvram_path
, nvram_path
, MOD_PARAM_PATHLEN
, 0660);
562 module_param_string(config_path
, config_path
, MOD_PARAM_PATHLEN
, 0);
564 /* Watchdog interval */
566 /* extend watchdog expiration to 2 seconds when DPC is running */
567 #define WATCHDOG_EXTEND_INTERVAL (2000)
569 uint dhd_watchdog_ms
= CUSTOM_DHD_WATCHDOG_MS
;
570 module_param(dhd_watchdog_ms
, uint
, 0);
572 #if defined(DHD_DEBUG)
573 /* Console poll interval */
574 uint dhd_console_ms
= 0;
575 module_param(dhd_console_ms
, uint
, 0644);
576 #endif /* defined(DHD_DEBUG) */
579 uint dhd_slpauto
= TRUE
;
580 module_param(dhd_slpauto
, uint
, 0);
582 #ifdef PKT_FILTER_SUPPORT
583 /* Global Pkt filter enable control */
584 uint dhd_pkt_filter_enable
= TRUE
;
585 module_param(dhd_pkt_filter_enable
, uint
, 0);
588 /* Pkt filter init setup */
589 uint dhd_pkt_filter_init
= 0;
590 module_param(dhd_pkt_filter_init
, uint
, 0);
592 /* Pkt filter mode control */
593 uint dhd_master_mode
= FALSE
;
594 module_param(dhd_master_mode
, uint
, 0);
596 int dhd_watchdog_prio
= 0;
597 module_param(dhd_watchdog_prio
, int, 0);
599 /* DPC thread priority */
600 int dhd_dpc_prio
= CUSTOM_DPC_PRIO_SETTING
;
601 module_param(dhd_dpc_prio
, int, 0);
603 /* RX frame thread priority */
604 int dhd_rxf_prio
= CUSTOM_RXF_PRIO_SETTING
;
605 module_param(dhd_rxf_prio
, int, 0);
607 int passive_channel_skip
= 0;
608 module_param(passive_channel_skip
, int, (S_IRUSR
|S_IWUSR
));
610 #if !defined(BCMDHDUSB)
611 extern int dhd_dongle_ramsize
;
612 module_param(dhd_dongle_ramsize
, int, 0);
613 #endif /* BCMDHDUSB */
615 /* Keep track of number of instances */
616 static int dhd_found
= 0;
617 static int instance_base
= 0; /* Starting instance number */
618 module_param(instance_base
, int, 0644);
620 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
621 dhd_info_t
*dhd_global
= NULL
;
622 #endif /* CUSTOMER_HW20 && WLANAUDIO */
626 /* DHD Perimiter lock only used in router with bypass forwarding. */
627 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
628 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
629 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
630 #define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
631 #define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
633 #ifdef PCIE_FULL_DONGLE
634 #if defined(BCM_GMAC3)
635 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
636 #define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
637 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
638 #else /* ! BCM_GMAC3 */
639 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
640 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
641 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
642 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
643 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
644 #endif /* ! BCM_GMAC3 */
645 #endif /* PCIE_FULL_DONGLE */
647 /* Control fw roaming */
649 uint dhd_roam_disable
= 0;
651 uint dhd_roam_disable
= 0;
654 /* Control radio state */
655 uint dhd_radio_up
= 1;
657 /* Network inteface name */
658 char iface_name
[IFNAMSIZ
] = {'\0'};
659 module_param_string(iface_name
, iface_name
, IFNAMSIZ
, 0);
661 /* The following are specific to the SDIO dongle */
663 /* IOCTL response timeout */
664 int dhd_ioctl_timeout_msec
= IOCTL_RESP_TIMEOUT
;
666 /* Idle timeout for backplane clock */
667 int dhd_idletime
= DHD_IDLETIME_TICKS
;
668 module_param(dhd_idletime
, int, 0);
671 uint dhd_poll
= FALSE
;
672 module_param(dhd_poll
, uint
, 0);
675 uint dhd_intr
= TRUE
;
676 module_param(dhd_intr
, uint
, 0);
678 /* SDIO Drive Strength (in milliamps) */
679 uint dhd_sdiod_drive_strength
= 6;
680 module_param(dhd_sdiod_drive_strength
, uint
, 0);
684 extern uint dhd_txbound
;
685 extern uint dhd_rxbound
;
686 module_param(dhd_txbound
, uint
, 0);
687 module_param(dhd_rxbound
, uint
, 0);
689 /* Deferred transmits */
690 extern uint dhd_deferred_tx
;
691 module_param(dhd_deferred_tx
, uint
, 0);
694 extern void dhd_dbg_init(dhd_pub_t
*dhdp
);
695 extern void dhd_dbg_remove(void);
696 #endif /* BCMDBGFS */
702 /* Echo packet generator (pkts/s) */
704 module_param(dhd_pktgen
, uint
, 0);
706 /* Echo packet len (0 => sawtooth, max 2040) */
707 uint dhd_pktgen_len
= 0;
708 module_param(dhd_pktgen_len
, uint
, 0);
711 #if defined(BCMSUP_4WAY_HANDSHAKE)
712 /* Use in dongle supplicant for 4-way handshake */
713 uint dhd_use_idsup
= 0;
714 module_param(dhd_use_idsup
, uint
, 0);
715 #endif /* BCMSUP_4WAY_HANDSHAKE */
717 extern char dhd_version
[];
719 int dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
);
720 static void dhd_net_if_lock_local(dhd_info_t
*dhd
);
721 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
);
722 static void dhd_suspend_lock(dhd_pub_t
*dhdp
);
723 static void dhd_suspend_unlock(dhd_pub_t
*dhdp
);
726 void htsf_update(dhd_info_t
*dhd
, void *data
);
727 tsf_t prev_tsf
, cur_tsf
;
729 uint32
dhd_get_htsf(dhd_info_t
*dhd
, int ifidx
);
730 static int dhd_ioctl_htsf_get(dhd_info_t
*dhd
, int ifidx
);
731 static void dhd_dump_latency(void);
732 static void dhd_htsf_addtxts(dhd_pub_t
*dhdp
, void *pktbuf
);
733 static void dhd_htsf_addrxts(dhd_pub_t
*dhdp
, void *pktbuf
);
734 static void dhd_dump_htsfhisto(histo_t
*his
, char *s
);
735 #endif /* WLMEDIA_HTSF */
737 /* Monitor interface */
738 int dhd_monitor_init(void *dhd_pub
);
739 int dhd_monitor_uninit(void);
742 #if defined(WL_WIRELESS_EXT)
743 struct iw_statistics
*dhd_get_wireless_stats(struct net_device
*dev
);
744 #endif /* defined(WL_WIRELESS_EXT) */
746 static void dhd_dpc(ulong data
);
748 extern int dhd_wait_pend8021x(struct net_device
*dev
);
749 void dhd_os_wd_timer_extend(void *bus
, bool extend
);
753 #error TOE requires BDC
755 static int dhd_toe_get(dhd_info_t
*dhd
, int idx
, uint32
*toe_ol
);
756 static int dhd_toe_set(dhd_info_t
*dhd
, int idx
, uint32 toe_ol
);
759 static int dhd_wl_host_event(dhd_info_t
*dhd
, int *ifidx
, void *pktdata
,
760 wl_event_msg_t
*event_ptr
, void **data_ptr
);
761 #ifdef DHD_UNICAST_DHCP
762 static const uint8 llc_snap_hdr
[SNAP_HDR_LEN
] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
763 static int dhd_get_pkt_ip_type(dhd_pub_t
*dhd
, void *skb
, uint8
**data_ptr
,
764 int *len_ptr
, uint8
*prot_ptr
);
765 static int dhd_get_pkt_ether_type(dhd_pub_t
*dhd
, void *skb
, uint8
**data_ptr
,
766 int *len_ptr
, uint16
*et_ptr
, bool *snap_ptr
);
768 static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
);
769 #endif /* DHD_UNICAST_DHCP */
771 static int dhd_l2_filter_block_ping(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
);
773 #if defined(CONFIG_PM_SLEEP)
774 static int dhd_pm_callback(struct notifier_block
*nfb
, unsigned long action
, void *ignored
)
776 int ret
= NOTIFY_DONE
;
777 bool suspend
= FALSE
;
778 dhd_info_t
*dhdinfo
= (dhd_info_t
*)container_of(nfb
, struct dhd_info
, pm_notifier
);
780 BCM_REFERENCE(dhdinfo
);
782 case PM_HIBERNATION_PREPARE
:
783 case PM_SUSPEND_PREPARE
:
786 case PM_POST_HIBERNATION
:
787 case PM_POST_SUSPEND
:
792 #if defined(SUPPORT_P2P_GO_PS)
795 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo
->pub
);
796 dhd_wlfc_suspend(&dhdinfo
->pub
);
797 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo
->pub
);
799 dhd_wlfc_resume(&dhdinfo
->pub
);
801 #endif /* defined(SUPPORT_P2P_GO_PS) */
803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
804 KERNEL_VERSION(2, 6, 39))
805 dhd_mmc_suspend
= suspend
;
812 static struct notifier_block dhd_pm_notifier
= {
813 .notifier_call
= dhd_pm_callback
,
816 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
817 * created in kernel notifier link list (with 'next' pointing to itself)
819 static bool dhd_pm_notifier_registered
= FALSE
;
821 extern int register_pm_notifier(struct notifier_block
*nb
);
822 extern int unregister_pm_notifier(struct notifier_block
*nb
);
823 #endif /* CONFIG_PM_SLEEP */
825 /* Request scheduling of the bus rx frame */
826 static void dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
);
827 static void dhd_os_rxflock(dhd_pub_t
*pub
);
828 static void dhd_os_rxfunlock(dhd_pub_t
*pub
);
830 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
831 typedef struct dhd_dev_priv
{
832 dhd_info_t
* dhd
; /* cached pointer to dhd_info in netdevice priv */
833 dhd_if_t
* ifp
; /* cached pointer to dhd_if in netdevice priv */
834 int ifidx
; /* interface index */
837 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
838 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
839 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
840 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
841 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
843 /** Clear the dhd net_device's private structure. */
845 dhd_dev_priv_clear(struct net_device
* dev
)
847 dhd_dev_priv_t
* dev_priv
;
848 ASSERT(dev
!= (struct net_device
*)NULL
);
849 dev_priv
= DHD_DEV_PRIV(dev
);
850 dev_priv
->dhd
= (dhd_info_t
*)NULL
;
851 dev_priv
->ifp
= (dhd_if_t
*)NULL
;
852 dev_priv
->ifidx
= DHD_BAD_IF
;
855 /** Setup the dhd net_device's private structure. */
857 dhd_dev_priv_save(struct net_device
* dev
, dhd_info_t
* dhd
, dhd_if_t
* ifp
,
860 dhd_dev_priv_t
* dev_priv
;
861 ASSERT(dev
!= (struct net_device
*)NULL
);
862 dev_priv
= DHD_DEV_PRIV(dev
);
865 dev_priv
->ifidx
= ifidx
;
868 #ifdef PCIE_FULL_DONGLE
870 /** Dummy objects are defined with state representing bad|down.
871 * Performance gains from reducing branch conditionals, instruction parallelism,
872 * dual issue, reducing load shadows, avail of larger pipelines.
873 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
874 * is accessed via the dhd_sta_t.
877 /* Dummy dhd_info object */
878 dhd_info_t dhd_info_null
= {
879 #if defined(BCM_GMAC3)
883 .info
= &dhd_info_null
,
884 #ifdef DHDTCPACK_SUPPRESS
885 .tcpack_sup_mode
= TCPACK_SUP_REPLACE
,
886 #endif /* DHDTCPACK_SUPPRESS */
887 .up
= FALSE
, .busstate
= DHD_BUS_DOWN
890 #define DHD_INFO_NULL (&dhd_info_null)
891 #define DHD_PUB_NULL (&dhd_info_null.pub)
893 /* Dummy netdevice object */
894 struct net_device dhd_net_dev_null
= {
895 .reg_state
= NETREG_UNREGISTERED
897 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
899 /* Dummy dhd_if object */
900 dhd_if_t dhd_if_null
= {
901 #if defined(BCM_GMAC3)
905 .wmf
= { .wmf_enable
= TRUE
},
907 .info
= DHD_INFO_NULL
,
908 .net
= DHD_NET_DEV_NULL
,
911 #define DHD_IF_NULL (&dhd_if_null)
913 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
915 /** Interface STA list management. */
917 /** Fetch the dhd_if object, given the interface index in the dhd. */
918 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
);
920 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
921 static void dhd_sta_free(dhd_pub_t
*pub
, dhd_sta_t
*sta
);
922 static dhd_sta_t
* dhd_sta_alloc(dhd_pub_t
* dhdp
);
924 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
925 static void dhd_if_del_sta_list(dhd_if_t
* ifp
);
926 static void dhd_if_flush_sta(dhd_if_t
* ifp
);
928 /* Construct/Destruct a sta pool. */
929 static int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
);
930 static void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
);
931 static void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
);
934 /* Return interface pointer */
935 static inline dhd_if_t
*dhd_get_ifp(dhd_pub_t
*dhdp
, uint32 ifidx
)
937 ASSERT(ifidx
< DHD_MAX_IFS
);
939 if (ifidx
>= DHD_MAX_IFS
)
942 return dhdp
->info
->iflist
[ifidx
];
945 /** Reset a dhd_sta object and free into the dhd pool. */
947 dhd_sta_free(dhd_pub_t
* dhdp
, dhd_sta_t
* sta
)
951 ASSERT((sta
!= DHD_STA_NULL
) && (sta
->idx
!= ID16_INVALID
));
953 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
954 id16_map_free(dhdp
->staid_allocator
, sta
->idx
);
955 for (prio
= 0; prio
< (int)NUMPRIO
; prio
++)
956 sta
->flowid
[prio
] = FLOWID_INVALID
;
957 sta
->ifp
= DHD_IF_NULL
; /* dummy dhd_if object */
958 sta
->ifidx
= DHD_BAD_IF
;
959 bzero(sta
->ea
.octet
, ETHER_ADDR_LEN
);
960 INIT_LIST_HEAD(&sta
->list
);
961 sta
->idx
= ID16_INVALID
; /* implying free */
964 /** Allocate a dhd_sta object from the dhd pool. */
966 dhd_sta_alloc(dhd_pub_t
* dhdp
)
970 dhd_sta_pool_t
* sta_pool
;
972 ASSERT((dhdp
->staid_allocator
!= NULL
) && (dhdp
->sta_pool
!= NULL
));
974 idx
= id16_map_alloc(dhdp
->staid_allocator
);
975 if (idx
== ID16_INVALID
) {
976 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__
));
980 sta_pool
= (dhd_sta_pool_t
*)(dhdp
->sta_pool
);
981 sta
= &sta_pool
[idx
];
983 ASSERT((sta
->idx
== ID16_INVALID
) &&
984 (sta
->ifp
== DHD_IF_NULL
) && (sta
->ifidx
== DHD_BAD_IF
));
985 sta
->idx
= idx
; /* implying allocated */
990 /** Delete all STAs in an interface's STA list. */
992 dhd_if_del_sta_list(dhd_if_t
*ifp
)
994 dhd_sta_t
*sta
, *next
;
997 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
999 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1000 #if defined(BCM_GMAC3)
1002 /* Remove sta from WOFA forwarder. */
1003 fwder_deassoc(ifp
->fwdh
, (uint16
*)(sta
->ea
.octet
), (wofa_t
)sta
);
1005 #endif /* BCM_GMAC3 */
1006 list_del(&sta
->list
);
1007 dhd_sta_free(&ifp
->info
->pub
, sta
);
1010 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1015 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1017 dhd_if_flush_sta(dhd_if_t
* ifp
)
1019 #if defined(BCM_GMAC3)
1021 if (ifp
&& (ifp
->fwdh
!= FWDER_NULL
)) {
1022 dhd_sta_t
*sta
, *next
;
1023 unsigned long flags
;
1025 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1027 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1028 /* Remove any sta entry from WOFA forwarder. */
1029 fwder_flush(ifp
->fwdh
, (wofa_t
)sta
);
1032 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1034 #endif /* BCM_GMAC3 */
1037 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1039 dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
)
1041 int idx
, sta_pool_memsz
;
1043 dhd_sta_pool_t
* sta_pool
;
1044 void * staid_allocator
;
1046 ASSERT(dhdp
!= (dhd_pub_t
*)NULL
);
1047 ASSERT((dhdp
->staid_allocator
== NULL
) && (dhdp
->sta_pool
== NULL
));
1049 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1050 staid_allocator
= id16_map_init(dhdp
->osh
, max_sta
, 1);
1051 if (staid_allocator
== NULL
) {
1052 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__
));
1056 /* Pre allocate a pool of dhd_sta objects (one extra). */
1057 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
)); /* skip idx 0 */
1058 sta_pool
= (dhd_sta_pool_t
*)MALLOC(dhdp
->osh
, sta_pool_memsz
);
1059 if (sta_pool
== NULL
) {
1060 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__
));
1061 id16_map_fini(dhdp
->osh
, staid_allocator
);
1065 dhdp
->sta_pool
= sta_pool
;
1066 dhdp
->staid_allocator
= staid_allocator
;
1068 /* Initialize all sta(s) for the pre-allocated free pool. */
1069 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1070 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1071 sta
= &sta_pool
[idx
];
1072 sta
->idx
= id16_map_alloc(staid_allocator
);
1073 ASSERT(sta
->idx
<= max_sta
);
1075 /* Now place them into the pre-allocated free pool. */
1076 for (idx
= 1; idx
<= max_sta
; idx
++) {
1077 sta
= &sta_pool
[idx
];
1078 dhd_sta_free(dhdp
, sta
);
1084 /** Destruct the pool of dhd_sta_t objects.
1085 * Caller must ensure that no STA objects are currently associated with an if.
1088 dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
)
1090 dhd_sta_pool_t
* sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1094 int sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1095 for (idx
= 1; idx
<= max_sta
; idx
++) {
1096 ASSERT(sta_pool
[idx
].ifp
== DHD_IF_NULL
);
1097 ASSERT(sta_pool
[idx
].idx
== ID16_INVALID
);
1099 MFREE(dhdp
->osh
, dhdp
->sta_pool
, sta_pool_memsz
);
1100 dhdp
->sta_pool
= NULL
;
1103 id16_map_fini(dhdp
->osh
, dhdp
->staid_allocator
);
1104 dhdp
->staid_allocator
= NULL
;
1107 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1109 dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
)
1111 int idx
, sta_pool_memsz
;
1113 dhd_sta_pool_t
* sta_pool
;
1114 void *staid_allocator
;
1117 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
1121 sta_pool
= (dhd_sta_pool_t
*)dhdp
->sta_pool
;
1122 staid_allocator
= dhdp
->staid_allocator
;
1125 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__
));
1129 if (!staid_allocator
) {
1130 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__
));
1134 /* clear free pool */
1135 sta_pool_memsz
= ((max_sta
+ 1) * sizeof(dhd_sta_t
));
1136 bzero((uchar
*)sta_pool
, sta_pool_memsz
);
1138 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1139 id16_map_clear(staid_allocator
, max_sta
, 1);
1141 /* Initialize all sta(s) for the pre-allocated free pool. */
1142 for (idx
= max_sta
; idx
>= 1; idx
--) { /* skip sta_pool[0] */
1143 sta
= &sta_pool
[idx
];
1144 sta
->idx
= id16_map_alloc(staid_allocator
);
1145 ASSERT(sta
->idx
<= max_sta
);
1147 /* Now place them into the pre-allocated free pool. */
1148 for (idx
= 1; idx
<= max_sta
; idx
++) {
1149 sta
= &sta_pool
[idx
];
1150 dhd_sta_free(dhdp
, sta
);
1154 /** Find STA with MAC address ea in an interface's STA list. */
1156 dhd_find_sta(void *pub
, int ifidx
, void *ea
)
1160 unsigned long flags
;
1163 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1165 return DHD_STA_NULL
;
1167 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1169 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
1170 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
1171 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1176 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1178 return DHD_STA_NULL
;
1181 /** Add STA into the interface's STA list. */
1183 dhd_add_sta(void *pub
, int ifidx
, void *ea
)
1187 unsigned long flags
;
1190 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1192 return DHD_STA_NULL
;
1194 sta
= dhd_sta_alloc((dhd_pub_t
*)pub
);
1195 if (sta
== DHD_STA_NULL
) {
1196 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__
));
1197 return DHD_STA_NULL
;
1200 memcpy(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
);
1202 /* link the sta and the dhd interface */
1205 INIT_LIST_HEAD(&sta
->list
);
1207 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1209 list_add_tail(&sta
->list
, &ifp
->sta_list
);
1211 #if defined(BCM_GMAC3)
1213 ASSERT(ISALIGNED(ea
, 2));
1214 /* Add sta to WOFA forwarder. */
1215 fwder_reassoc(ifp
->fwdh
, (uint16
*)ea
, (wofa_t
)sta
);
1217 #endif /* BCM_GMAC3 */
1219 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1224 /** Delete STA from the interface's STA list. */
1226 dhd_del_sta(void *pub
, int ifidx
, void *ea
)
1228 dhd_sta_t
*sta
, *next
;
1230 unsigned long flags
;
1233 ifp
= dhd_get_ifp((dhd_pub_t
*)pub
, ifidx
);
1237 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
1239 list_for_each_entry_safe(sta
, next
, &ifp
->sta_list
, list
) {
1240 if (!memcmp(sta
->ea
.octet
, ea
, ETHER_ADDR_LEN
)) {
1241 #if defined(BCM_GMAC3)
1242 if (ifp
->fwdh
) { /* Found a sta, remove from WOFA forwarder. */
1243 ASSERT(ISALIGNED(ea
, 2));
1244 fwder_deassoc(ifp
->fwdh
, (uint16
*)ea
, (wofa_t
)sta
);
1246 #endif /* BCM_GMAC3 */
1247 list_del(&sta
->list
);
1248 dhd_sta_free(&ifp
->info
->pub
, sta
);
1252 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
1257 /** Add STA if it doesn't exist. Not reentrant. */
1259 dhd_findadd_sta(void *pub
, int ifidx
, void *ea
)
1263 sta
= dhd_find_sta(pub
, ifidx
, ea
);
1267 sta
= dhd_add_sta(pub
, ifidx
, ea
);
1273 static inline void dhd_if_flush_sta(dhd_if_t
* ifp
) { }
1274 static inline void dhd_if_del_sta_list(dhd_if_t
*ifp
) {}
1275 static inline int dhd_sta_pool_init(dhd_pub_t
*dhdp
, int max_sta
) { return BCME_OK
; }
1276 static inline void dhd_sta_pool_fini(dhd_pub_t
*dhdp
, int max_sta
) {}
1277 static inline void dhd_sta_pool_clear(dhd_pub_t
*dhdp
, int max_sta
) {}
1278 dhd_sta_t
*dhd_findadd_sta(void *pub
, int ifidx
, void *ea
) { return NULL
; }
1279 void dhd_del_sta(void *pub
, int ifidx
, void *ea
) {}
1280 #endif /* PCIE_FULL_DONGLE */
1283 /* Returns dhd iflist index correspondig the the bssidx provided by apps */
1284 int dhd_bssidx2idx(dhd_pub_t
*dhdp
, uint32 bssidx
)
1287 dhd_info_t
*dhd
= dhdp
->info
;
1290 ASSERT(bssidx
< DHD_MAX_IFS
);
1293 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
1294 ifp
= dhd
->iflist
[i
];
1295 if (ifp
&& (ifp
->bssidx
== bssidx
)) {
1296 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1297 ifp
->name
, bssidx
, i
));
1304 static inline int dhd_rxf_enqueue(dhd_pub_t
*dhdp
, void* skb
)
1310 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1314 dhd_os_rxflock(dhdp
);
1315 store_idx
= dhdp
->store_idx
;
1316 sent_idx
= dhdp
->sent_idx
;
1317 if (dhdp
->skbbuf
[store_idx
] != NULL
) {
1318 /* Make sure the previous packets are processed */
1319 dhd_os_rxfunlock(dhdp
);
1320 #ifdef RXF_DEQUEUE_ON_BUSY
1321 DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1322 skb
, store_idx
, sent_idx
));
1324 #else /* RXF_DEQUEUE_ON_BUSY */
1325 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1326 skb
, store_idx
, sent_idx
));
1327 /* removed msleep here, should use wait_event_timeout if we
1328 * want to give rx frame thread a chance to run
1330 #if defined(WAIT_DEQUEUE)
1334 #endif /* RXF_DEQUEUE_ON_BUSY */
1336 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1337 skb
, store_idx
, (store_idx
+ 1) & (MAXSKBPEND
- 1)));
1338 dhdp
->skbbuf
[store_idx
] = skb
;
1339 dhdp
->store_idx
= (store_idx
+ 1) & (MAXSKBPEND
- 1);
1340 dhd_os_rxfunlock(dhdp
);
1345 static inline void* dhd_rxf_dequeue(dhd_pub_t
*dhdp
)
1351 dhd_os_rxflock(dhdp
);
1353 store_idx
= dhdp
->store_idx
;
1354 sent_idx
= dhdp
->sent_idx
;
1355 skb
= dhdp
->skbbuf
[sent_idx
];
1358 dhd_os_rxfunlock(dhdp
);
1359 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1360 store_idx
, sent_idx
));
1364 dhdp
->skbbuf
[sent_idx
] = NULL
;
1365 dhdp
->sent_idx
= (sent_idx
+ 1) & (MAXSKBPEND
- 1);
1367 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1370 dhd_os_rxfunlock(dhdp
);
1375 int dhd_process_cid_mac(dhd_pub_t
*dhdp
, bool prepost
)
1377 #ifndef CUSTOMER_HW10
1378 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
1379 #endif /* !CUSTOMER_HW10 */
1381 if (prepost
) { /* pre process */
1382 dhd_read_macaddr(dhd
);
1383 } else { /* post process */
1384 dhd_write_macaddr(&dhd
->pub
.mac
);
1390 #if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
1392 _turn_on_arp_filter(dhd_pub_t
*dhd
, int op_mode
)
1394 bool _apply
= FALSE
;
1395 /* In case of IBSS mode, apply arp pkt filter */
1396 if (op_mode
& DHD_FLAG_IBSS_MODE
) {
1400 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1401 if ((dhd
->arp_version
== 1) &&
1402 (op_mode
& (DHD_FLAG_P2P_GC_MODE
| DHD_FLAG_P2P_GO_MODE
))) {
1410 #endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
1412 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1413 #ifdef PKT_FILTER_SUPPORT
1415 dhd_set_packet_filter_mode(struct net_device
*dev
, char *command
)
1417 dhd_info_t
*dhdi
= *(dhd_info_t
**)netdev_priv(dev
);
1419 dhdi
->pub
.pkt_filter_mode
= bcm_strtoul(command
, &command
, 0);
1423 dhd_set_packet_filter_ports(struct net_device
*dev
, char *command
)
1425 int i
= 0, error
= BCME_OK
, count
= 0, get_count
= 0, action
= 0;
1426 uint16 portnum
= 0, *ports
= NULL
, get_ports
[WL_PKT_FILTER_PORTS_MAX
];
1427 dhd_info_t
*dhdi
= *(dhd_info_t
**)netdev_priv(dev
);
1428 dhd_pub_t
*dhdp
= &dhdi
->pub
;
1429 char iovbuf
[WLC_IOCTL_SMLEN
];
1432 action
= bcm_strtoul(command
, &command
, 0);
1433 if (action
> PKT_FILTER_PORTS_MAX
)
1436 if (action
== PKT_FILTER_PORTS_LOOPBACK
) {
1437 /* echo the loopback value if port filter is supported else error */
1438 bcm_mkiovar("cap", NULL
, 0, iovbuf
, sizeof(iovbuf
));
1439 error
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
1441 DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__
, error
));
1445 if (strstr(iovbuf
, "pktfltr2"))
1446 return bcm_strtoul(command
, &command
, 0);
1448 DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__
));
1449 return BCME_UNSUPPORTED
;
1453 if (action
== PKT_FILTER_PORTS_CLEAR
) {
1454 /* action 0 is clear all ports */
1455 dhdp
->pkt_filter_ports_count
= 0;
1456 bzero(dhdp
->pkt_filter_ports
, sizeof(dhdp
->pkt_filter_ports
));
1459 portnum
= bcm_strtoul(command
, &command
, 0);
1461 /* no ports to add or remove */
1465 /* get configured ports */
1466 count
= dhdp
->pkt_filter_ports_count
;
1467 ports
= dhdp
->pkt_filter_ports
;
1469 if (action
== PKT_FILTER_PORTS_ADD
) {
1470 /* action 1 is add ports */
1472 /* copy new ports */
1473 while ((portnum
!= 0) && (count
< WL_PKT_FILTER_PORTS_MAX
)) {
1474 for (i
= 0; i
< count
; i
++) {
1475 /* duplicate port */
1476 if (portnum
== ports
[i
])
1479 if (portnum
!= ports
[i
])
1480 ports
[count
++] = portnum
;
1481 portnum
= bcm_strtoul(command
, &command
, 0);
1483 } else if ((action
== PKT_FILTER_PORTS_DEL
) && (count
> 0)) {
1484 /* action 2 is remove ports */
1485 bcopy(dhdp
->pkt_filter_ports
, get_ports
, count
* sizeof(uint16
));
1488 while (portnum
!= 0) {
1490 for (i
= 0; i
< get_count
; i
++) {
1491 if (portnum
!= get_ports
[i
])
1492 ports
[count
++] = get_ports
[i
];
1495 bcopy(ports
, get_ports
, count
* sizeof(uint16
));
1496 portnum
= bcm_strtoul(command
, &command
, 0);
1499 dhdp
->pkt_filter_ports_count
= count
;
1505 dhd_enable_packet_filter_ports(dhd_pub_t
*dhd
, bool enable
)
1508 wl_pkt_filter_ports_t
*portlist
= NULL
;
1509 const uint pkt_filter_ports_buf_len
= sizeof("pkt_filter_ports")
1510 + WL_PKT_FILTER_PORTS_FIXED_LEN
+ (WL_PKT_FILTER_PORTS_MAX
* sizeof(uint16
));
1511 char pkt_filter_ports_buf
[pkt_filter_ports_buf_len
];
1512 char iovbuf
[pkt_filter_ports_buf_len
];
1514 DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__
,
1515 enable
, dhd
->in_suspend
, dhd
->pkt_filter_mode
,
1516 dhd
->pkt_filter_ports_count
));
1518 bzero(pkt_filter_ports_buf
, sizeof(pkt_filter_ports_buf
));
1519 portlist
= (wl_pkt_filter_ports_t
*)pkt_filter_ports_buf
;
1520 portlist
->version
= WL_PKT_FILTER_PORTS_VERSION
;
1521 portlist
->reserved
= 0;
1524 if (!(dhd
->pkt_filter_mode
& PKT_FILTER_MODE_PORTS_ONLY
))
1527 /* enable port filter */
1528 dhd_master_mode
|= PKT_FILTER_MODE_PORTS_ONLY
;
1529 if (dhd
->pkt_filter_mode
& PKT_FILTER_MODE_FORWARD_ON_MATCH
)
1530 /* whitelist mode: FORWARD_ON_MATCH */
1531 dhd_master_mode
|= PKT_FILTER_MODE_FORWARD_ON_MATCH
;
1533 /* blacklist mode: DISCARD_ON_MATCH */
1534 dhd_master_mode
&= ~PKT_FILTER_MODE_FORWARD_ON_MATCH
;
1536 portlist
->count
= dhd
->pkt_filter_ports_count
;
1537 bcopy(dhd
->pkt_filter_ports
, portlist
->ports
,
1538 dhd
->pkt_filter_ports_count
* sizeof(uint16
));
1540 /* disable port filter */
1541 portlist
->count
= 0;
1542 dhd_master_mode
&= ~PKT_FILTER_MODE_PORTS_ONLY
;
1543 dhd_master_mode
|= PKT_FILTER_MODE_FORWARD_ON_MATCH
;
1546 DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__
, dhd_master_mode
,
1550 bcm_mkiovar("pkt_filter_ports",
1552 (WL_PKT_FILTER_PORTS_FIXED_LEN
+ (portlist
->count
* sizeof(uint16
))),
1553 iovbuf
, sizeof(iovbuf
));
1554 error
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1556 DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__
, error
));
1559 bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode
,
1560 sizeof(dhd_master_mode
), iovbuf
, sizeof(iovbuf
));
1561 error
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1563 DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__
, error
));
1567 #endif /* PKT_FILTER_SUPPORT */
1568 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1570 void dhd_set_packet_filter(dhd_pub_t
*dhd
)
1572 #ifdef PKT_FILTER_SUPPORT
1575 DHD_TRACE(("%s: enter\n", __FUNCTION__
));
1576 if (dhd_pkt_filter_enable
) {
1577 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
1578 dhd_pktfilter_offload_set(dhd
, dhd
->pktfilter
[i
]);
1581 #endif /* PKT_FILTER_SUPPORT */
1584 void dhd_enable_packet_filter(int value
, dhd_pub_t
*dhd
)
1586 #ifdef PKT_FILTER_SUPPORT
1589 DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__
, value
));
1591 #if defined(CUSTOM_PLATFORM_NV_TEGRA)
1592 dhd_enable_packet_filter_ports(dhd
, value
);
1593 #endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
1595 /* 1 - Enable packet filter, only allow unicast packet to send up */
1596 /* 0 - Disable packet filter */
1597 if (dhd_pkt_filter_enable
&& (!value
||
1598 (dhd_support_sta_mode(dhd
) && !dhd
->dhcp_in_progress
)))
1600 for (i
= 0; i
< dhd
->pktfilter_count
; i
++) {
1601 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1602 if (value
&& (i
== DHD_ARP_FILTER_NUM
) &&
1603 !_turn_on_arp_filter(dhd
, dhd
->op_mode
)) {
1604 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1605 "val %d, cnt %d, op_mode 0x%x\n",
1606 value
, i
, dhd
->op_mode
));
1609 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1610 dhd_pktfilter_offload_enable(dhd
, dhd
->pktfilter
[i
],
1611 value
, dhd_master_mode
);
1614 #endif /* PKT_FILTER_SUPPORT */
1617 static int dhd_set_suspend(int value
, dhd_pub_t
*dhd
)
1619 #ifndef SUPPORT_PM2_ONLY
1620 int power_mode
= PM_MAX
;
1621 #endif /* SUPPORT_PM2_ONLY */
1622 /* wl_pkt_filter_enable_t enable_parm; */
1624 int bcn_li_dtim
= 0; /* Default bcn_li_dtim in resume mode is 0 */
1625 uint roamvar
= dhd
->conf
->roam_off_suspend
;
1626 uint nd_ra_filter
= 0;
1632 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1633 __FUNCTION__
, value
, dhd
->in_suspend
));
1635 dhd_suspend_lock(dhd
);
1637 #ifdef CUSTOM_SET_CPUCORE
1638 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__
, value
));
1639 /* set specific cpucore */
1640 dhd_set_cpucore(dhd
, TRUE
);
1641 #endif /* CUSTOM_SET_CPUCORE */
1642 #ifndef SUPPORT_PM2_ONLY
1643 if (dhd
->conf
->pm
>= 0)
1644 power_mode
= dhd
->conf
->pm
;
1645 #endif /* SUPPORT_PM2_ONLY */
1647 if (value
&& dhd
->in_suspend
) {
1648 #ifdef PKT_FILTER_SUPPORT
1649 dhd
->early_suspended
= 1;
1651 /* Kernel suspended */
1652 DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__
));
1654 #ifndef SUPPORT_PM2_ONLY
1655 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
1656 sizeof(power_mode
), TRUE
, 0);
1657 #endif /* SUPPORT_PM2_ONLY */
1659 /* Enable packet filter, only allow unicast packet to send up */
1660 dhd_enable_packet_filter(1, dhd
);
1662 /* If DTIM skip is set up as default, force it to wake
1663 * each third DTIM for better power savings. Note that
1664 * one side effect is a chance to miss BC/MC packet.
1666 bcn_li_dtim
= dhd_get_suspend_bcn_li_dtim(dhd
);
1667 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim
,
1668 4, iovbuf
, sizeof(iovbuf
));
1669 if (dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
),
1671 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__
));
1673 /* Disable firmware roaming during suspend */
1674 bcm_mkiovar("roam_off", (char *)&roamvar
, 4, iovbuf
, sizeof(iovbuf
));
1675 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1676 if (FW_SUPPORTED(dhd
, ndoe
)) {
1677 /* enable IPv6 RA filter in firmware during suspend */
1679 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter
, 4,
1680 iovbuf
, sizeof(iovbuf
));
1681 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
1682 sizeof(iovbuf
), TRUE
, 0)) < 0)
1683 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1687 #ifdef PKT_FILTER_SUPPORT
1688 dhd
->early_suspended
= 0;
1690 /* Kernel resumed */
1691 DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__
));
1693 #ifndef SUPPORT_PM2_ONLY
1694 power_mode
= PM_FAST
;
1695 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
,
1696 sizeof(power_mode
), TRUE
, 0);
1697 #endif /* SUPPORT_PM2_ONLY */
1698 #ifdef PKT_FILTER_SUPPORT
1699 /* disable pkt filter */
1700 dhd_enable_packet_filter(0, dhd
);
1701 #endif /* PKT_FILTER_SUPPORT */
1703 /* restore pre-suspend setting for dtim_skip */
1704 bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim
,
1705 4, iovbuf
, sizeof(iovbuf
));
1707 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1708 roamvar
= dhd_roam_disable
;
1709 bcm_mkiovar("roam_off", (char *)&roamvar
, 4, iovbuf
, sizeof(iovbuf
));
1710 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
1711 if (FW_SUPPORTED(dhd
, ndoe
)) {
1712 /* disable IPv6 RA filter in firmware during suspend */
1714 bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter
, 4,
1715 iovbuf
, sizeof(iovbuf
));
1716 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
1717 sizeof(iovbuf
), TRUE
, 0)) < 0)
1718 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
1723 dhd_suspend_unlock(dhd
);
1728 static int dhd_suspend_resume_helper(struct dhd_info
*dhd
, int val
, int force
)
1730 dhd_pub_t
*dhdp
= &dhd
->pub
;
1733 DHD_OS_WAKE_LOCK(dhdp
);
1734 DHD_PERIM_LOCK(dhdp
);
1736 /* Set flag when early suspend was called */
1737 dhdp
->in_suspend
= val
;
1738 if ((force
|| !dhdp
->suspend_disable_flag
) &&
1739 dhd_support_sta_mode(dhdp
))
1741 ret
= dhd_set_suspend(val
, dhdp
);
1744 DHD_PERIM_UNLOCK(dhdp
);
1745 DHD_OS_WAKE_UNLOCK(dhdp
);
1749 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1750 static void dhd_early_suspend(struct early_suspend
*h
)
1752 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
1753 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
1756 dhd_suspend_resume_helper(dhd
, 1, 0);
1759 static void dhd_late_resume(struct early_suspend
*h
)
1761 struct dhd_info
*dhd
= container_of(h
, struct dhd_info
, early_suspend
);
1762 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__
));
1765 dhd_suspend_resume_helper(dhd
, 0, 0);
1767 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
1770 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
1771 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
1773 * dhd_timeout_start(&tmo, usec);
1774 * while (!dhd_timeout_expired(&tmo))
1775 * if (poll_something())
1777 * if (dhd_timeout_expired(&tmo))
1782 dhd_timeout_start(dhd_timeout_t
*tmo
, uint usec
)
1787 tmo
->tick
= jiffies_to_usecs(1);
1791 dhd_timeout_expired(dhd_timeout_t
*tmo
)
1793 /* Does nothing the first call */
1794 if (tmo
->increment
== 0) {
1799 if (tmo
->elapsed
>= tmo
->limit
)
1802 /* Add the delay that's about to take place */
1803 tmo
->elapsed
+= tmo
->increment
;
1805 if ((!CAN_SLEEP()) || tmo
->increment
< tmo
->tick
) {
1806 OSL_DELAY(tmo
->increment
);
1807 tmo
->increment
*= 2;
1808 if (tmo
->increment
> tmo
->tick
)
1809 tmo
->increment
= tmo
->tick
;
1811 wait_queue_head_t delay_wait
;
1812 DECLARE_WAITQUEUE(wait
, current
);
1813 init_waitqueue_head(&delay_wait
);
1814 add_wait_queue(&delay_wait
, &wait
);
1815 set_current_state(TASK_INTERRUPTIBLE
);
1816 (void)schedule_timeout(1);
1817 remove_wait_queue(&delay_wait
, &wait
);
1818 set_current_state(TASK_RUNNING
);
1825 dhd_net2idx(dhd_info_t
*dhd
, struct net_device
*net
)
1830 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__
));
1833 while (i
< DHD_MAX_IFS
) {
1834 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->net
&& (dhd
->iflist
[i
]->net
== net
))
1842 struct net_device
* dhd_idx2net(void *pub
, int ifidx
)
1844 struct dhd_pub
*dhd_pub
= (struct dhd_pub
*)pub
;
1845 struct dhd_info
*dhd_info
;
1847 if (!dhd_pub
|| ifidx
< 0 || ifidx
>= DHD_MAX_IFS
)
1849 dhd_info
= dhd_pub
->info
;
1850 if (dhd_info
&& dhd_info
->iflist
[ifidx
])
1851 return dhd_info
->iflist
[ifidx
]->net
;
1856 dhd_ifname2idx(dhd_info_t
*dhd
, char *name
)
1858 int i
= DHD_MAX_IFS
;
1862 if (name
== NULL
|| *name
== '\0')
1866 if (dhd
->iflist
[i
] && !strncmp(dhd
->iflist
[i
]->name
, name
, IFNAMSIZ
))
1869 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__
, i
, name
));
1871 return i
; /* default - the primary interface */
1875 dhd_ifidx2hostidx(dhd_info_t
*dhd
, int ifidx
)
1877 int i
= DHD_MAX_IFS
;
1882 if (dhd
->iflist
[i
] && (dhd
->iflist
[i
]->idx
== ifidx
))
1885 DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__
, i
, ifidx
));
1887 return i
; /* default - the primary interface */
1891 dhd_ifname(dhd_pub_t
*dhdp
, int ifidx
)
1893 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
1897 if (ifidx
< 0 || ifidx
>= DHD_MAX_IFS
) {
1898 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__
, ifidx
));
1902 if (dhd
->iflist
[ifidx
] == NULL
) {
1903 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__
, ifidx
));
1907 if (dhd
->iflist
[ifidx
]->net
)
1908 return dhd
->iflist
[ifidx
]->net
->name
;
1914 dhd_bssidx2bssid(dhd_pub_t
*dhdp
, int idx
)
1917 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
;
1920 for (i
= 0; i
< DHD_MAX_IFS
; i
++)
1921 if (dhd
->iflist
[i
] && dhd
->iflist
[i
]->bssidx
== idx
)
1922 return dhd
->iflist
[i
]->mac_addr
;
1929 _dhd_set_multicast_list(dhd_info_t
*dhd
, int ifidx
)
1931 struct net_device
*dev
;
1932 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1933 struct netdev_hw_addr
*ha
;
1935 struct dev_mc_list
*mclist
;
1937 uint32 allmulti
, cnt
;
1944 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
1945 dev
= dhd
->iflist
[ifidx
]->net
;
1948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1949 netif_addr_lock_bh(dev
);
1951 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1952 cnt
= netdev_mc_count(dev
);
1954 cnt
= dev
->mc_count
;
1955 #endif /* LINUX_VERSION_CODE */
1957 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1958 netif_addr_unlock_bh(dev
);
1961 /* Determine initial value of allmulti flag */
1962 allmulti
= (dev
->flags
& IFF_ALLMULTI
) ? TRUE
: FALSE
;
1964 /* Send down the multicast list first. */
1967 buflen
= sizeof("mcast_list") + sizeof(cnt
) + (cnt
* ETHER_ADDR_LEN
);
1968 if (!(bufp
= buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
1969 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
1970 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
1974 strncpy(bufp
, "mcast_list", buflen
- 1);
1975 bufp
[buflen
- 1] = '\0';
1976 bufp
+= strlen("mcast_list") + 1;
1979 memcpy(bufp
, &cnt
, sizeof(cnt
));
1980 bufp
+= sizeof(cnt
);
1983 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
1984 netif_addr_lock_bh(dev
);
1986 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
1987 netdev_for_each_mc_addr(ha
, dev
) {
1990 memcpy(bufp
, ha
->addr
, ETHER_ADDR_LEN
);
1991 bufp
+= ETHER_ADDR_LEN
;
1995 for (mclist
= dev
->mc_list
; (mclist
&& (cnt
> 0));
1996 cnt
--, mclist
= mclist
->next
) {
1997 memcpy(bufp
, (void *)mclist
->dmi_addr
, ETHER_ADDR_LEN
);
1998 bufp
+= ETHER_ADDR_LEN
;
2000 #endif /* LINUX_VERSION_CODE */
2002 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
2003 netif_addr_unlock_bh(dev
);
2006 memset(&ioc
, 0, sizeof(ioc
));
2007 ioc
.cmd
= WLC_SET_VAR
;
2012 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2014 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2015 dhd_ifname(&dhd
->pub
, ifidx
), cnt
));
2016 allmulti
= cnt
? TRUE
: allmulti
;
2019 MFREE(dhd
->pub
.osh
, buf
, buflen
);
2021 /* Now send the allmulti setting. This is based on the setting in the
2022 * net_device flags, but might be modified above to be turned on if we
2023 * were trying to set some addresses and dongle rejected it...
2026 buflen
= sizeof("allmulti") + sizeof(allmulti
);
2027 if (!(buf
= MALLOC(dhd
->pub
.osh
, buflen
))) {
2028 DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd
->pub
, ifidx
)));
2031 allmulti
= htol32(allmulti
);
2033 if (!bcm_mkiovar("allmulti", (void*)&allmulti
, sizeof(allmulti
), buf
, buflen
)) {
2034 DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
2035 dhd_ifname(&dhd
->pub
, ifidx
), (int)sizeof(allmulti
), buflen
));
2036 MFREE(dhd
->pub
.osh
, buf
, buflen
);
2041 memset(&ioc
, 0, sizeof(ioc
));
2042 ioc
.cmd
= WLC_SET_VAR
;
2047 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2049 DHD_ERROR(("%s: set allmulti %d failed\n",
2050 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
2053 MFREE(dhd
->pub
.osh
, buf
, buflen
);
2055 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2057 allmulti
= (dev
->flags
& IFF_PROMISC
) ? TRUE
: FALSE
;
2059 allmulti
= htol32(allmulti
);
2061 memset(&ioc
, 0, sizeof(ioc
));
2062 ioc
.cmd
= WLC_SET_PROMISC
;
2063 ioc
.buf
= &allmulti
;
2064 ioc
.len
= sizeof(allmulti
);
2067 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2069 DHD_ERROR(("%s: set promisc %d failed\n",
2070 dhd_ifname(&dhd
->pub
, ifidx
), ltoh32(allmulti
)));
2075 _dhd_set_mac_address(dhd_info_t
*dhd
, int ifidx
, uint8
*addr
)
2081 if (!bcm_mkiovar("cur_etheraddr", (char*)addr
, ETHER_ADDR_LEN
, buf
, 32)) {
2082 DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd
->pub
, ifidx
)));
2085 memset(&ioc
, 0, sizeof(ioc
));
2086 ioc
.cmd
= WLC_SET_VAR
;
2091 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
2093 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd
->pub
, ifidx
)));
2095 memcpy(dhd
->iflist
[ifidx
]->net
->dev_addr
, addr
, ETHER_ADDR_LEN
);
2097 memcpy(dhd
->pub
.mac
.octet
, addr
, ETHER_ADDR_LEN
);
2104 extern struct net_device
*ap_net_dev
;
2105 extern tsk_ctl_t ap_eth_ctl
; /* ap netdev heper thread ctl */
2109 dhd_ifadd_event_handler(void *handle
, void *event_info
, u8 event
)
2111 dhd_info_t
*dhd
= handle
;
2112 dhd_if_event_t
*if_event
= event_info
;
2113 struct net_device
*ndev
;
2116 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2117 struct wireless_dev
*vwdev
, *primary_wdev
;
2118 struct net_device
*primary_ndev
;
2119 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2121 if (event
!= DHD_WQ_WORK_IF_ADD
) {
2122 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2127 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2132 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
2136 dhd_net_if_lock_local(dhd
);
2137 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2138 DHD_PERIM_LOCK(&dhd
->pub
);
2140 ifidx
= if_event
->event
.ifidx
;
2141 bssidx
= if_event
->event
.bssidx
;
2142 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__
, ifidx
));
2144 ndev
= dhd_allocate_if(&dhd
->pub
, ifidx
, if_event
->name
,
2145 if_event
->mac
, bssidx
, TRUE
);
2147 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__
));
2151 #if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
2152 vwdev
= kzalloc(sizeof(*vwdev
), GFP_KERNEL
);
2153 if (unlikely(!vwdev
)) {
2154 WL_ERR(("Could not allocate wireless device\n"));
2157 primary_ndev
= dhd
->pub
.info
->iflist
[0]->net
;
2158 primary_wdev
= ndev_to_wdev(primary_ndev
);
2159 vwdev
->wiphy
= primary_wdev
->wiphy
;
2160 vwdev
->iftype
= if_event
->event
.role
;
2161 vwdev
->netdev
= ndev
;
2162 ndev
->ieee80211_ptr
= vwdev
;
2163 SET_NETDEV_DEV(ndev
, wiphy_dev(vwdev
->wiphy
));
2164 DHD_ERROR(("virtual interface(%s) is created\n", if_event
->name
));
2165 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
2167 DHD_PERIM_UNLOCK(&dhd
->pub
);
2168 ret
= dhd_register_if(&dhd
->pub
, ifidx
, TRUE
);
2169 DHD_PERIM_LOCK(&dhd
->pub
);
2170 if (ret
!= BCME_OK
) {
2171 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__
));
2172 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
2175 #ifdef PCIE_FULL_DONGLE
2176 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2177 if (FW_SUPPORTED((&dhd
->pub
), ap
) && !(DHD_IF_ROLE_STA(if_event
->event
.role
))) {
2178 char iovbuf
[WLC_IOCTL_SMLEN
];
2181 memset(iovbuf
, 0, sizeof(iovbuf
));
2182 bcm_mkiovar("ap_isolate", (char *)&var_int
, 4, iovbuf
, sizeof(iovbuf
));
2183 ret
= dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, ifidx
);
2185 if (ret
!= BCME_OK
) {
2186 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__
));
2187 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
2190 #endif /* PCIE_FULL_DONGLE */
2192 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
2194 DHD_PERIM_UNLOCK(&dhd
->pub
);
2195 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2196 dhd_net_if_unlock_local(dhd
);
2200 dhd_ifdel_event_handler(void *handle
, void *event_info
, u8 event
)
2202 dhd_info_t
*dhd
= handle
;
2204 dhd_if_event_t
*if_event
= event_info
;
2207 if (event
!= DHD_WQ_WORK_IF_DEL
) {
2208 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2213 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2218 DHD_ERROR(("%s: event data is null \n", __FUNCTION__
));
2222 dhd_net_if_lock_local(dhd
);
2223 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2224 DHD_PERIM_LOCK(&dhd
->pub
);
2226 ifidx
= if_event
->event
.ifidx
;
2227 DHD_TRACE(("Removing interface with idx %d\n", ifidx
));
2229 dhd_remove_if(&dhd
->pub
, ifidx
, TRUE
);
2231 MFREE(dhd
->pub
.osh
, if_event
, sizeof(dhd_if_event_t
));
2233 DHD_PERIM_UNLOCK(&dhd
->pub
);
2234 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2235 dhd_net_if_unlock_local(dhd
);
2239 dhd_set_mac_addr_handler(void *handle
, void *event_info
, u8 event
)
2241 dhd_info_t
*dhd
= handle
;
2242 dhd_if_t
*ifp
= event_info
;
2244 if (event
!= DHD_WQ_WORK_SET_MAC
) {
2245 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2249 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2253 dhd_net_if_lock_local(dhd
);
2254 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2255 DHD_PERIM_LOCK(&dhd
->pub
);
2259 unsigned long flags
;
2261 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
2262 in_ap
= (ap_net_dev
!= NULL
);
2263 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
2266 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
2273 if (ifp
== NULL
|| !dhd
->pub
.up
) {
2274 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
2278 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__
));
2279 ifp
->set_macaddress
= FALSE
;
2280 if (_dhd_set_mac_address(dhd
, ifp
->idx
, ifp
->mac_addr
) == 0)
2281 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__
));
2283 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__
));
2286 DHD_PERIM_UNLOCK(&dhd
->pub
);
2287 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2288 dhd_net_if_unlock_local(dhd
);
2292 dhd_set_mcast_list_handler(void *handle
, void *event_info
, u8 event
)
2294 dhd_info_t
*dhd
= handle
;
2295 dhd_if_t
*ifp
= event_info
;
2298 if (event
!= DHD_WQ_WORK_SET_MCAST_LIST
) {
2299 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
2304 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__
));
2308 dhd_net_if_lock_local(dhd
);
2309 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2310 DHD_PERIM_LOCK(&dhd
->pub
);
2315 unsigned long flags
;
2316 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
2317 in_ap
= (ap_net_dev
!= NULL
);
2318 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
2321 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
2323 ifp
->set_multicast
= FALSE
;
2329 if (ifp
== NULL
|| !dhd
->pub
.up
) {
2330 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__
));
2337 _dhd_set_multicast_list(dhd
, ifidx
);
2338 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__
, ifidx
));
2341 DHD_PERIM_UNLOCK(&dhd
->pub
);
2342 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2343 dhd_net_if_unlock_local(dhd
);
2347 dhd_set_mac_address(struct net_device
*dev
, void *addr
)
2351 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
2352 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
2356 ifidx
= dhd_net2idx(dhd
, dev
);
2357 if (ifidx
== DHD_BAD_IF
)
2360 dhdif
= dhd
->iflist
[ifidx
];
2362 dhd_net_if_lock_local(dhd
);
2363 memcpy(dhdif
->mac_addr
, sa
->sa_data
, ETHER_ADDR_LEN
);
2364 dhdif
->set_macaddress
= TRUE
;
2365 dhd_net_if_unlock_local(dhd
);
2366 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhdif
, DHD_WQ_WORK_SET_MAC
,
2367 dhd_set_mac_addr_handler
, DHD_WORK_PRIORITY_LOW
);
2372 dhd_set_multicast_list(struct net_device
*dev
)
2374 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
2377 ifidx
= dhd_net2idx(dhd
, dev
);
2378 if (ifidx
== DHD_BAD_IF
)
2381 dhd
->iflist
[ifidx
]->set_multicast
= TRUE
;
2382 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)dhd
->iflist
[ifidx
],
2383 DHD_WQ_WORK_SET_MCAST_LIST
, dhd_set_mcast_list_handler
, DHD_WORK_PRIORITY_LOW
);
2386 #ifdef PROP_TXSTATUS
2388 dhd_os_wlfc_block(dhd_pub_t
*pub
)
2390 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
2392 spin_lock_bh(&di
->wlfc_spinlock
);
2397 dhd_os_wlfc_unblock(dhd_pub_t
*pub
)
2399 dhd_info_t
*di
= (dhd_info_t
*)(pub
->info
);
2402 spin_unlock_bh(&di
->wlfc_spinlock
);
2406 #endif /* PROP_TXSTATUS */
2408 #if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
2414 static const PKTTYPE_INFO packet_type_info
[] =
2416 { ETHER_TYPE_IP
, "IP" },
2417 { ETHER_TYPE_ARP
, "ARP" },
2418 { ETHER_TYPE_BRCM
, "BRCM" },
2419 { ETHER_TYPE_802_1X
, "802.1X" },
2420 { ETHER_TYPE_WAI
, "WAPI" },
2424 static const char *_get_packet_type_str(uint16 type
)
2427 int n
= sizeof(packet_type_info
)/sizeof(packet_type_info
[1]) - 1;
2429 for (i
= 0; i
< n
; i
++) {
2430 if (packet_type_info
[i
].type
== type
)
2431 return packet_type_info
[i
].str
;
2434 return packet_type_info
[n
].str
;
2436 #endif /* DHD_RX_DUMP || DHD_TX_DUMP */
2438 #if defined(DHD_TX_DUMP)
2440 dhd_tx_dump(osl_t
*osh
, void *pkt
)
2444 struct ether_header
*eh
;
2446 dump_data
= PKTDATA(osh
, pkt
);
2447 eh
= (struct ether_header
*) dump_data
;
2448 protocol
= ntoh16(eh
->ether_type
);
2450 DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol
)));
2452 if (protocol
== ETHER_TYPE_802_1X
) {
2453 DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
2454 dump_data
[14], dump_data
[15], dump_data
[30]));
2457 #if defined(DHD_TX_FULL_DUMP)
2461 datalen
= PKTLEN(osh
, pkt
);
2463 for (i
= 0; i
< datalen
; i
++) {
2464 DHD_ERROR(("%02X ", dump_data
[i
]));
2470 #endif /* DHD_TX_FULL_DUMP */
2472 #endif /* DHD_TX_DUMP */
2475 dhd_sendpkt(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
)
2478 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
2479 struct ether_header
*eh
= NULL
;
2481 /* Reject if down */
2482 if (!dhdp
->up
|| (dhdp
->busstate
== DHD_BUS_DOWN
)) {
2483 /* free the packet here since the caller won't */
2484 PKTFREE(dhdp
->osh
, pktbuf
, TRUE
);
2488 #ifdef PCIE_FULL_DONGLE
2489 if (dhdp
->busstate
== DHD_BUS_SUSPEND
) {
2490 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__
));
2491 PKTFREE(dhdp
->osh
, pktbuf
, TRUE
);
2494 #endif /* PCIE_FULL_DONGLE */
2496 #ifdef DHD_UNICAST_DHCP
2497 /* if dhcp_unicast is enabled, we need to convert the */
2498 /* broadcast DHCP ACK/REPLY packets to Unicast. */
2499 if (dhdp
->dhcp_unicast
) {
2500 dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp
, pktbuf
, ifidx
);
2502 #endif /* DHD_UNICAST_DHCP */
2503 /* Update multicast statistic */
2504 if (PKTLEN(dhdp
->osh
, pktbuf
) >= ETHER_HDR_LEN
) {
2505 uint8
*pktdata
= (uint8
*)PKTDATA(dhdp
->osh
, pktbuf
);
2506 eh
= (struct ether_header
*)pktdata
;
2508 if (ETHER_ISMULTI(eh
->ether_dhost
))
2509 dhdp
->tx_multicast
++;
2510 if (ntoh16(eh
->ether_type
) == ETHER_TYPE_802_1X
)
2511 atomic_inc(&dhd
->pend_8021x_cnt
);
2513 PKTFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
2517 /* Look into the packet and update the packet priority */
2518 #ifndef PKTPRIO_OVERRIDE
2519 if (PKTPRIO(pktbuf
) == 0)
2521 pktsetprio(pktbuf
, FALSE
);
2524 #if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
2526 * Lkup the per interface hash table, for a matching flowring. If one is not
2527 * available, allocate a unique flowid and add a flowring entry.
2528 * The found or newly created flowid is placed into the pktbuf's tag.
2530 ret
= dhd_flowid_update(dhdp
, ifidx
, dhdp
->flow_prio_map
[(PKTPRIO(pktbuf
))], pktbuf
);
2531 if (ret
!= BCME_OK
) {
2532 PKTCFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
2536 #if defined(DHD_TX_DUMP)
2537 dhd_tx_dump(dhdp
->osh
, pktbuf
);
2540 /* terence 20150901: Micky add to ajust the 802.1X priority */
2541 /* Set the 802.1X packet with the highest priority 7 */
2542 if (dhdp
->conf
->pktprio8021x
>= 0)
2543 pktset8021xprio(pktbuf
, dhdp
->conf
->pktprio8021x
);
2545 #ifdef PROP_TXSTATUS
2546 if (dhd_wlfc_is_supported(dhdp
)) {
2547 /* store the interface ID */
2548 DHD_PKTTAG_SETIF(PKTTAG(pktbuf
), ifidx
);
2550 /* store destination MAC in the tag as well */
2551 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf
), eh
->ether_dhost
);
2553 /* decide which FIFO this packet belongs to */
2554 if (ETHER_ISMULTI(eh
->ether_dhost
))
2555 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
2556 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), AC_COUNT
);
2558 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf
), WME_PRIO2AC(PKTPRIO(pktbuf
)));
2560 #endif /* PROP_TXSTATUS */
2561 /* If the protocol uses a data header, apply it */
2562 dhd_prot_hdrpush(dhdp
, ifidx
, pktbuf
);
2564 /* Use bus module to send data frame */
2566 dhd_htsf_addtxts(dhdp
, pktbuf
);
2569 #ifdef PROP_TXSTATUS
2571 if (dhd_wlfc_commit_packets(dhdp
, (f_commitpkt_t
)dhd_bus_txdata
,
2572 dhdp
->bus
, pktbuf
, TRUE
) == WLFC_UNSUPPORTED
) {
2573 /* non-proptxstatus way */
2575 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
2577 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
2578 #endif /* BCMPCIE */
2583 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
, (uint8
)ifidx
);
2585 ret
= dhd_bus_txdata(dhdp
->bus
, pktbuf
);
2586 #endif /* BCMPCIE */
2587 #endif /* PROP_TXSTATUS */
2593 dhd_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2598 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
2599 dhd_if_t
*ifp
= NULL
;
2602 uint8 htsfdlystat_sz
= dhd
->pub
.htsfdlystat_sz
;
2604 uint8 htsfdlystat_sz
= 0;
2607 struct ether_header
*eh
;
2609 #endif /* DHD_WMF */
2611 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2613 DHD_OS_WAKE_LOCK(&dhd
->pub
);
2614 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2616 /* Reject if down */
2617 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
|| dhd
->pub
.hang_was_sent
) {
2618 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
2619 __FUNCTION__
, dhd
->pub
.up
, dhd
->pub
.busstate
));
2620 netif_stop_queue(net
);
2621 /* Send Event when bus down detected during data session */
2623 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__
));
2624 net_os_send_hang_message(net
);
2626 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2627 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2628 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2631 return NETDEV_TX_BUSY
;
2635 ifp
= DHD_DEV_IFP(net
);
2636 ifidx
= DHD_DEV_IFIDX(net
);
2638 ASSERT(ifidx
== dhd_net2idx(dhd
, net
));
2639 ASSERT((ifp
!= NULL
) && (ifp
== dhd
->iflist
[ifidx
]));
2641 if (ifidx
== DHD_BAD_IF
) {
2642 DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__
, ifidx
));
2643 netif_stop_queue(net
);
2644 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2645 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2646 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2649 return NETDEV_TX_BUSY
;
2653 /* re-align socket buffer if "skb->data" is odd address */
2654 if (((unsigned long)(skb
->data
)) & 0x1) {
2655 unsigned char *data
= skb
->data
;
2656 uint32 length
= skb
->len
;
2657 PKTPUSH(dhd
->pub
.osh
, skb
, 1);
2658 memmove(skb
->data
, data
, length
);
2659 PKTSETLEN(dhd
->pub
.osh
, skb
, length
);
2662 datalen
= PKTLEN(dhd
->pub
.osh
, skb
);
2664 /* Make sure there's enough room for any header */
2666 if (skb_headroom(skb
) < dhd
->pub
.hdrlen
+ htsfdlystat_sz
) {
2667 struct sk_buff
*skb2
;
2669 DHD_INFO(("%s: insufficient headroom\n",
2670 dhd_ifname(&dhd
->pub
, ifidx
)));
2671 dhd
->pub
.tx_realloc
++;
2673 skb2
= skb_realloc_headroom(skb
, dhd
->pub
.hdrlen
+ htsfdlystat_sz
);
2676 if ((skb
= skb2
) == NULL
) {
2677 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
2678 dhd_ifname(&dhd
->pub
, ifidx
)));
2684 /* Convert to packet */
2685 if (!(pktbuf
= PKTFRMNATIVE(dhd
->pub
.osh
, skb
))) {
2686 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
2687 dhd_ifname(&dhd
->pub
, ifidx
)));
2688 dev_kfree_skb_any(skb
);
2693 if (htsfdlystat_sz
&& PKTLEN(dhd
->pub
.osh
, pktbuf
) >= ETHER_ADDR_LEN
) {
2694 uint8
*pktdata
= (uint8
*)PKTDATA(dhd
->pub
.osh
, pktbuf
);
2695 struct ether_header
*eh
= (struct ether_header
*)pktdata
;
2697 if (!ETHER_ISMULTI(eh
->ether_dhost
) &&
2698 (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
)) {
2699 eh
->ether_type
= hton16(ETHER_TYPE_BRCM_PKTDLYSTATS
);
2704 eh
= (struct ether_header
*)PKTDATA(dhd
->pub
.osh
, pktbuf
);
2705 iph
= (uint8
*)eh
+ ETHER_HDR_LEN
;
2707 /* WMF processing for multicast packets
2708 * Only IPv4 packets are handled
2710 if (ifp
->wmf
.wmf_enable
&& (ntoh16(eh
->ether_type
) == ETHER_TYPE_IP
) &&
2711 (IP_VER(iph
) == IP_VER_4
) && (ETHER_ISMULTI(eh
->ether_dhost
) ||
2712 ((IPV4_PROT(iph
) == IP_PROT_IGMP
) && dhd
->pub
.wmf_ucast_igmp
))) {
2713 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
2715 bool ucast_convert
= FALSE
;
2716 #ifdef DHD_UCAST_UPNP
2719 dest_ip
= ntoh32(*((uint32
*)(iph
+ IPV4_DEST_IP_OFFSET
)));
2720 ucast_convert
= dhd
->pub
.wmf_ucast_upnp
&& MCAST_ADDR_UPNP_SSDP(dest_ip
);
2721 #endif /* DHD_UCAST_UPNP */
2722 #ifdef DHD_IGMP_UCQUERY
2723 ucast_convert
|= dhd
->pub
.wmf_ucast_igmp_query
&&
2724 (IPV4_PROT(iph
) == IP_PROT_IGMP
) &&
2725 (*(iph
+ IPV4_HLEN(iph
)) == IGMPV2_HOST_MEMBERSHIP_QUERY
);
2726 #endif /* DHD_IGMP_UCQUERY */
2727 if (ucast_convert
) {
2729 unsigned long flags
;
2731 DHD_IF_STA_LIST_LOCK(ifp
, flags
);
2733 /* Convert upnp/igmp query to unicast for each assoc STA */
2734 list_for_each_entry(sta
, &ifp
->sta_list
, list
) {
2735 if ((sdu_clone
= PKTDUP(dhd
->pub
.osh
, pktbuf
)) == NULL
) {
2736 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2737 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2738 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2741 dhd_wmf_forward(ifp
->wmf
.wmfh
, sdu_clone
, 0, sta
, 1);
2744 DHD_IF_STA_LIST_UNLOCK(ifp
, flags
);
2745 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2746 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2748 PKTFREE(dhd
->pub
.osh
, pktbuf
, TRUE
);
2749 return NETDEV_TX_OK
;
2751 #endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
2753 /* There will be no STA info if the packet is coming from LAN host
2756 ret
= dhd_wmf_packets_handle(&dhd
->pub
, pktbuf
, NULL
, ifidx
, 0);
2760 /* Either taken by WMF or we should drop it.
2763 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2764 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2765 return NETDEV_TX_OK
;
2767 /* Continue the transmit path */
2772 #endif /* DHD_WMF */
2774 #ifdef DHDTCPACK_SUPPRESS
2775 if (dhd
->pub
.tcpack_sup_mode
== TCPACK_SUP_HOLD
) {
2776 /* If this packet has been hold or got freed, just return */
2777 if (dhd_tcpack_hold(&dhd
->pub
, pktbuf
, ifidx
))
2780 /* If this packet has replaced another packet and got freed, just return */
2781 if (dhd_tcpack_suppress(&dhd
->pub
, pktbuf
))
2784 #endif /* DHDTCPACK_SUPPRESS */
2786 ret
= dhd_sendpkt(&dhd
->pub
, ifidx
, pktbuf
);
2790 ifp
->stats
.tx_dropped
++;
2791 dhd
->pub
.tx_dropped
++;
2795 #ifdef PROP_TXSTATUS
2796 /* tx_packets counter can counted only when wlfc is disabled */
2797 if (!dhd_wlfc_is_supported(&dhd
->pub
))
2800 dhd
->pub
.tx_packets
++;
2801 ifp
->stats
.tx_packets
++;
2802 ifp
->stats
.tx_bytes
+= datalen
;
2806 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd
), TRUE
);
2807 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
2809 /* Return ok: we always eat the packet */
2810 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
2813 return NETDEV_TX_OK
;
2819 dhd_txflowcontrol(dhd_pub_t
*dhdp
, int ifidx
, bool state
)
2821 struct net_device
*net
;
2822 dhd_info_t
*dhd
= dhdp
->info
;
2825 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2829 if (ifidx
== ALL_INTERFACES
) {
2830 /* Flow control on all active interfaces */
2831 dhdp
->txoff
= state
;
2832 for (i
= 0; i
< DHD_MAX_IFS
; i
++) {
2833 if (dhd
->iflist
[i
]) {
2834 net
= dhd
->iflist
[i
]->net
;
2836 netif_stop_queue(net
);
2838 netif_wake_queue(net
);
2843 if (dhd
->iflist
[ifidx
]) {
2844 net
= dhd
->iflist
[ifidx
]->net
;
2846 netif_stop_queue(net
);
2848 netif_wake_queue(net
);
2856 dhd_is_rxthread_enabled(dhd_pub_t
*dhdp
)
2858 dhd_info_t
*dhd
= dhdp
->info
;
2860 return dhd
->rxthread_enabled
;
2862 #endif /* DHD_WMF */
2865 dhd_rx_frame(dhd_pub_t
*dhdp
, int ifidx
, void *pktbuf
, int numpkt
, uint8 chan
)
2867 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
2868 struct sk_buff
*skb
;
2871 void *data
, *pnext
= NULL
;
2874 wl_event_msg_t event
;
2877 void *skbhead
= NULL
;
2878 void *skbprev
= NULL
;
2879 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
2882 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
2884 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2886 for (i
= 0; pktbuf
&& i
< numpkt
; i
++, pktbuf
= pnext
) {
2887 struct ether_header
*eh
;
2889 struct dot11_llc_snap_header
*lsh
;
2892 pnext
= PKTNEXT(dhdp
->osh
, pktbuf
);
2893 PKTSETNEXT(dhdp
->osh
, pktbuf
, NULL
);
2895 ifp
= dhd
->iflist
[ifidx
];
2897 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
2899 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
2903 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
2905 /* Dropping only data packets before registering net device to avoid kernel panic */
2906 #ifndef PROP_TXSTATUS_VSDB
2907 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
) &&
2908 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
2910 if ((!ifp
->net
|| ifp
->net
->reg_state
!= NETREG_REGISTERED
|| !dhd
->pub
.up
) &&
2911 (ntoh16(eh
->ether_type
) != ETHER_TYPE_BRCM
))
2912 #endif /* PROP_TXSTATUS_VSDB */
2914 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
2916 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
2921 lsh
= (struct dot11_llc_snap_header
*)&eh
[1];
2923 if ((ntoh16(eh
->ether_type
) < ETHER_TYPE_MIN
) &&
2924 (PKTLEN(dhdp
->osh
, pktbuf
) >= RFC1042_HDR_LEN
) &&
2925 bcmp(lsh
, BT_SIG_SNAP_MPROT
, DOT11_LLC_SNAP_HDR_LEN
- 2) == 0 &&
2926 lsh
->type
== HTON16(BTA_PROT_L2CAP
)) {
2927 amp_hci_ACL_data_t
*ACL_data
= (amp_hci_ACL_data_t
*)
2928 ((uint8
*)eh
+ RFC1042_HDR_LEN
);
2931 #endif /* WLBTAMP */
2933 #ifdef PROP_TXSTATUS
2934 if (dhd_wlfc_is_header_only_pkt(dhdp
, pktbuf
)) {
2935 /* WLFC may send header only packet when
2936 there is an urgent message but no packet to
2939 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
2943 #ifdef DHD_L2_FILTER
2944 /* If block_ping is enabled drop the ping packet */
2945 if (dhdp
->block_ping
) {
2946 if (dhd_l2_filter_block_ping(dhdp
, pktbuf
, ifidx
) == BCME_OK
) {
2947 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
2953 /* WMF processing for multicast packets */
2954 if (ifp
->wmf
.wmf_enable
&& (ETHER_ISMULTI(eh
->ether_dhost
))) {
2958 sta
= dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_shost
);
2959 ret
= dhd_wmf_packets_handle(dhdp
, pktbuf
, sta
, ifidx
, 1);
2962 /* The packet is taken by WMF. Continue to next iteration */
2965 /* Packet DROP decision by WMF. Toss it */
2966 DHD_ERROR(("%s: WMF decides to drop packet\n",
2968 PKTCFREE(dhdp
->osh
, pktbuf
, FALSE
);
2971 /* Continue the transmit path */
2975 #endif /* DHD_WMF */
2976 #ifdef DHDTCPACK_SUPPRESS
2977 dhd_tcpdata_info_get(dhdp
, pktbuf
);
2979 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
2981 ifp
= dhd
->iflist
[ifidx
];
2983 ifp
= dhd
->iflist
[0];
2986 skb
->dev
= ifp
->net
;
2988 #ifdef PCIE_FULL_DONGLE
2989 if ((DHD_IF_ROLE_AP(dhdp
, ifidx
) || DHD_IF_ROLE_P2PGO(dhdp
, ifidx
)) &&
2990 (!ifp
->ap_isolate
)) {
2991 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, pktbuf
);
2992 if (ETHER_ISUCAST(eh
->ether_dhost
)) {
2993 if (dhd_find_sta(dhdp
, ifidx
, (void *)eh
->ether_dhost
)) {
2994 dhd_sendpkt(dhdp
, ifidx
, pktbuf
);
2998 void *npktbuf
= PKTDUP(dhdp
->osh
, pktbuf
);
2999 dhd_sendpkt(dhdp
, ifidx
, npktbuf
);
3002 #endif /* PCIE_FULL_DONGLE */
3004 /* Get the protocol, maintain skb around eth_type_trans()
3005 * The main reason for this hack is for the limitation of
3006 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
3007 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
3008 * coping of the packet coming from the network stack to add
3009 * BDC, Hardware header etc, during network interface registration
3010 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
3011 * for BDC, Hardware header etc. and not just the ETH_HLEN
3016 #if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
3017 dump_data
= skb
->data
;
3018 protocol
= (dump_data
[12] << 8) | dump_data
[13];
3020 if (protocol
== ETHER_TYPE_802_1X
) {
3021 DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
3022 "ver %d, type %d, replay %d\n",
3023 dump_data
[14], dump_data
[15],
3026 #endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
3027 #if defined(DHD_RX_DUMP)
3028 DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol
)));
3029 if (protocol
!= ETHER_TYPE_BRCM
) {
3030 if (dump_data
[0] == 0xFF) {
3031 DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__
));
3033 if ((dump_data
[12] == 8) &&
3034 (dump_data
[13] == 6)) {
3035 DHD_ERROR(("%s: ARP %d\n",
3036 __FUNCTION__
, dump_data
[0x15]));
3038 } else if (dump_data
[0] & 1) {
3039 DHD_ERROR(("%s: MULTICAST: " MACDBG
"\n",
3040 __FUNCTION__
, MAC2STRDBG(dump_data
)));
3042 #ifdef DHD_RX_FULL_DUMP
3045 for (k
= 0; k
< skb
->len
; k
++) {
3046 DHD_ERROR(("%02X ", dump_data
[k
]));
3052 #endif /* DHD_RX_FULL_DUMP */
3054 #endif /* DHD_RX_DUMP */
3056 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3058 if (skb
->pkt_type
== PACKET_MULTICAST
) {
3059 dhd
->pub
.rx_multicast
++;
3060 ifp
->stats
.multicast
++;
3067 dhd_htsf_addrxts(dhdp
, pktbuf
);
3069 /* Strip header, count, deliver upward */
3070 skb_pull(skb
, ETH_HLEN
);
3072 /* Process special event packets and then discard them */
3073 memset(&event
, 0, sizeof(event
));
3074 if (ntoh16(skb
->protocol
) == ETHER_TYPE_BRCM
) {
3075 dhd_wl_host_event(dhd
, &ifidx
,
3076 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
3077 skb_mac_header(skb
),
3080 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
3084 wl_event_to_host_order(&event
);
3086 tout_ctrl
= DHD_PACKET_TIMEOUT_MS
;
3088 if (event
.event_type
== WLC_E_BTA_HCI_EVENT
) {
3089 dhd_bta_doevt(dhdp
, data
, event
.datalen
);
3091 #endif /* WLBTAMP */
3093 #if defined(PNO_SUPPORT)
3094 if (event
.event_type
== WLC_E_PFN_NET_FOUND
) {
3095 /* enforce custom wake lock to garantee that Kernel not suspended */
3096 tout_ctrl
= CUSTOM_PNO_EVENT_LOCK_xTIME
* DHD_PACKET_TIMEOUT_MS
;
3098 #endif /* PNO_SUPPORT */
3100 #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
3101 PKTFREE(dhdp
->osh
, pktbuf
, FALSE
);
3103 #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
3105 tout_rx
= DHD_PACKET_TIMEOUT_MS
;
3107 #ifdef PROP_TXSTATUS
3108 dhd_wlfc_save_rxpath_ac_time(dhdp
, (uint8
)PKTPRIO(skb
));
3109 #endif /* PROP_TXSTATUS */
3112 ASSERT(ifidx
< DHD_MAX_IFS
&& dhd
->iflist
[ifidx
]);
3113 ifp
= dhd
->iflist
[ifidx
];
3116 ifp
->net
->last_rx
= jiffies
;
3118 if (ntoh16(skb
->protocol
) != ETHER_TYPE_BRCM
) {
3119 dhdp
->dstats
.rx_bytes
+= skb
->len
;
3120 dhdp
->rx_packets
++; /* Local count */
3121 ifp
->stats
.rx_bytes
+= skb
->len
;
3122 ifp
->stats
.rx_packets
++;
3124 #if defined(DHD_TCP_WINSIZE_ADJUST)
3125 if (dhd_use_tcp_window_size_adjust
) {
3126 if (ifidx
== 0 && ntoh16(skb
->protocol
) == ETHER_TYPE_IP
) {
3127 dhd_adjust_tcp_winsize(dhdp
->op_mode
, skb
);
3130 #endif /* DHD_TCP_WINSIZE_ADJUST */
3132 if (in_interrupt()) {
3135 if (dhd
->rxthread_enabled
) {
3139 PKTSETNEXT(dhdp
->osh
, skbprev
, skb
);
3143 /* If the receive is not processed inside an ISR,
3144 * the softirqd must be woken explicitly to service
3145 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3146 * by netif_rx_ni(), but in earlier kernels, we need
3147 * to do it manually.
3149 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3154 local_irq_save(flags
);
3156 local_irq_restore(flags
);
3157 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
3162 if (dhd
->rxthread_enabled
&& skbhead
)
3163 dhd_sched_rxf(dhdp
, skbhead
);
3165 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp
, tout_rx
);
3166 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp
, tout_ctrl
);
3170 dhd_event(struct dhd_info
*dhd
, char *evpkt
, int evlen
, int ifidx
)
3172 /* Linux version has nothing to do */
3177 dhd_txcomplete(dhd_pub_t
*dhdp
, void *txp
, bool success
)
3179 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
3180 struct ether_header
*eh
;
3186 dhd_prot_hdrpull(dhdp
, NULL
, txp
, NULL
, NULL
);
3188 eh
= (struct ether_header
*)PKTDATA(dhdp
->osh
, txp
);
3189 type
= ntoh16(eh
->ether_type
);
3191 if (type
== ETHER_TYPE_802_1X
)
3192 atomic_dec(&dhd
->pend_8021x_cnt
);
3195 /* Crack open the packet and check to see if it is BT HCI ACL data packet.
3196 * If yes generate packet completion event.
3198 len
= PKTLEN(dhdp
->osh
, txp
);
3200 /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
3201 if ((type
< ETHER_TYPE_MIN
) && (len
>= RFC1042_HDR_LEN
)) {
3202 struct dot11_llc_snap_header
*lsh
= (struct dot11_llc_snap_header
*)&eh
[1];
3204 if (bcmp(lsh
, BT_SIG_SNAP_MPROT
, DOT11_LLC_SNAP_HDR_LEN
- 2) == 0 &&
3205 ntoh16(lsh
->type
) == BTA_PROT_L2CAP
) {
3207 dhd_bta_tx_hcidata_complete(dhdp
, txp
, success
);
3210 #endif /* WLBTAMP */
3211 #ifdef PROP_TXSTATUS
3212 if (dhdp
->wlfc_state
&& (dhdp
->proptxstatus_mode
!= WLFC_FCMODE_NONE
)) {
3213 dhd_if_t
*ifp
= dhd
->iflist
[DHD_PKTTAG_IF(PKTTAG(txp
))];
3214 uint datalen
= PKTLEN(dhd
->pub
.osh
, txp
);
3217 dhd
->pub
.tx_packets
++;
3218 ifp
->stats
.tx_packets
++;
3219 ifp
->stats
.tx_bytes
+= datalen
;
3221 ifp
->stats
.tx_dropped
++;
3227 static struct net_device_stats
*
3228 dhd_get_stats(struct net_device
*net
)
3230 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
3234 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3236 ifidx
= dhd_net2idx(dhd
, net
);
3237 if (ifidx
== DHD_BAD_IF
) {
3238 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__
));
3240 memset(&net
->stats
, 0, sizeof(net
->stats
));
3244 ifp
= dhd
->iflist
[ifidx
];
3248 /* Use the protocol to get dongle stats */
3249 dhd_prot_dstats(&dhd
->pub
);
3255 dhd_watchdog_thread(void *data
)
3257 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
3258 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
3259 /* This thread doesn't need any user-level access,
3260 * so get rid of all our resources
3262 if (dhd_watchdog_prio
> 0) {
3263 struct sched_param param
;
3264 param
.sched_priority
= (dhd_watchdog_prio
< MAX_RT_PRIO
)?
3265 dhd_watchdog_prio
:(MAX_RT_PRIO
-1);
3266 setScheduler(current
, SCHED_FIFO
, ¶m
);
3270 if (down_interruptible (&tsk
->sema
) == 0) {
3271 unsigned long flags
;
3272 unsigned long jiffies_at_start
= jiffies
;
3273 unsigned long time_lapse
;
3275 SMP_RD_BARRIER_DEPENDS();
3276 if (tsk
->terminated
) {
3280 if (dhd
->pub
.dongle_reset
== FALSE
) {
3281 DHD_TIMER(("%s:\n", __FUNCTION__
));
3283 /* Call the bus module watchdog */
3284 dhd_bus_watchdog(&dhd
->pub
);
3287 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
3288 /* Count the tick for reference */
3290 time_lapse
= jiffies
- jiffies_at_start
;
3292 /* Reschedule the watchdog */
3293 if (dhd
->wd_timer_valid
)
3294 mod_timer(&dhd
->timer
,
3296 msecs_to_jiffies(dhd_watchdog_ms
) -
3297 min(msecs_to_jiffies(dhd_watchdog_ms
), time_lapse
));
3298 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
3304 complete_and_exit(&tsk
->completed
, 0);
3307 static void dhd_watchdog(ulong data
)
3309 dhd_info_t
*dhd
= (dhd_info_t
*)data
;
3310 unsigned long flags
;
3312 if (dhd
->pub
.dongle_reset
) {
3316 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
3317 up(&dhd
->thr_wdt_ctl
.sema
);
3321 /* Call the bus module watchdog */
3322 dhd_bus_watchdog(&dhd
->pub
);
3324 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
3325 /* Count the tick for reference */
3328 /* Reschedule the watchdog */
3329 if (dhd
->wd_timer_valid
)
3330 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
3331 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
3335 #ifdef ENABLE_ADAPTIVE_SCHED
3337 dhd_sched_policy(int prio
)
3339 struct sched_param param
;
3340 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH
) {
3341 param
.sched_priority
= 0;
3342 setScheduler(current
, SCHED_NORMAL
, ¶m
);
3344 if (get_scheduler_policy(current
) != SCHED_FIFO
) {
3345 param
.sched_priority
= (prio
< MAX_RT_PRIO
)? prio
: (MAX_RT_PRIO
-1);
3346 setScheduler(current
, SCHED_FIFO
, ¶m
);
3350 #endif /* ENABLE_ADAPTIVE_SCHED */
3351 #ifdef DEBUG_CPU_FREQ
3352 static int dhd_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
3354 dhd_info_t
*dhd
= container_of(nb
, struct dhd_info
, freq_trans
);
3355 struct cpufreq_freqs
*freq
= data
;
3359 if (val
== CPUFREQ_POSTCHANGE
) {
3360 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
3361 freq
->new, freq
->cpu
));
3362 *per_cpu_ptr(dhd
->new_freq
, freq
->cpu
) = freq
->new;
3368 #endif /* DEBUG_CPU_FREQ */
3370 dhd_dpc_thread(void *data
)
3372 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
3373 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
3375 /* This thread doesn't need any user-level access,
3376 * so get rid of all our resources
3378 if (dhd_dpc_prio
> 0)
3380 struct sched_param param
;
3381 param
.sched_priority
= (dhd_dpc_prio
< MAX_RT_PRIO
)?dhd_dpc_prio
:(MAX_RT_PRIO
-1);
3382 setScheduler(current
, SCHED_FIFO
, ¶m
);
3385 #ifdef CUSTOM_DPC_CPUCORE
3386 set_cpus_allowed_ptr(current
, cpumask_of(CUSTOM_DPC_CPUCORE
));
3388 if (dhd
->pub
.conf
->dpc_cpucore
>= 0) {
3389 printf("%s: set dpc_cpucore %d from config.txt\n", __FUNCTION__
, dhd
->pub
.conf
->dpc_cpucore
);
3390 set_cpus_allowed_ptr(current
, cpumask_of(dhd
->pub
.conf
->dpc_cpucore
));
3393 #ifdef CUSTOM_SET_CPUCORE
3394 dhd
->pub
.current_dpc
= current
;
3395 #endif /* CUSTOM_SET_CPUCORE */
3396 /* Run until signal received */
3398 if (!binary_sema_down(tsk
)) {
3399 #ifdef ENABLE_ADAPTIVE_SCHED
3400 dhd_sched_policy(dhd_dpc_prio
);
3401 #endif /* ENABLE_ADAPTIVE_SCHED */
3402 SMP_RD_BARRIER_DEPENDS();
3403 if (tsk
->terminated
) {
3407 /* Call bus dpc unless it indicated down (then clean stop) */
3408 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
3409 dhd_os_wd_timer_extend(&dhd
->pub
, TRUE
);
3410 while (dhd_bus_dpc(dhd
->pub
.bus
)) {
3411 /* process all data */
3413 dhd_os_wd_timer_extend(&dhd
->pub
, FALSE
);
3414 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3418 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
3419 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3425 complete_and_exit(&tsk
->completed
, 0);
3429 dhd_rxf_thread(void *data
)
3431 tsk_ctl_t
*tsk
= (tsk_ctl_t
*)data
;
3432 dhd_info_t
*dhd
= (dhd_info_t
*)tsk
->parent
;
3433 #if defined(WAIT_DEQUEUE)
3434 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
3435 ulong watchdogTime
= OSL_SYSUPTIME(); /* msec */
3437 dhd_pub_t
*pub
= &dhd
->pub
;
3439 /* This thread doesn't need any user-level access,
3440 * so get rid of all our resources
3442 if (dhd_rxf_prio
> 0)
3444 struct sched_param param
;
3445 param
.sched_priority
= (dhd_rxf_prio
< MAX_RT_PRIO
)?dhd_rxf_prio
:(MAX_RT_PRIO
-1);
3446 setScheduler(current
, SCHED_FIFO
, ¶m
);
3449 DAEMONIZE("dhd_rxf");
3450 /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
3452 /* signal: thread has started */
3453 complete(&tsk
->completed
);
3454 #ifdef CUSTOM_SET_CPUCORE
3455 dhd
->pub
.current_rxf
= current
;
3456 #endif /* CUSTOM_SET_CPUCORE */
3457 /* Run until signal received */
3459 if (down_interruptible(&tsk
->sema
) == 0) {
3461 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
3464 #ifdef ENABLE_ADAPTIVE_SCHED
3465 dhd_sched_policy(dhd_rxf_prio
);
3466 #endif /* ENABLE_ADAPTIVE_SCHED */
3468 SMP_RD_BARRIER_DEPENDS();
3470 if (tsk
->terminated
) {
3473 skb
= dhd_rxf_dequeue(pub
);
3479 void *skbnext
= PKTNEXT(pub
->osh
, skb
);
3480 PKTSETNEXT(pub
->osh
, skb
, NULL
);
3482 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
3486 local_irq_save(flags
);
3488 local_irq_restore(flags
);
3493 #if defined(WAIT_DEQUEUE)
3494 if (OSL_SYSUPTIME() - watchdogTime
> RXF_WATCHDOG_TIME
) {
3496 watchdogTime
= OSL_SYSUPTIME();
3500 DHD_OS_WAKE_UNLOCK(pub
);
3505 complete_and_exit(&tsk
->completed
, 0);
3509 void dhd_dpc_kill(dhd_pub_t
*dhdp
)
3521 tasklet_kill(&dhd
->tasklet
);
3522 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__
));
3524 #endif /* BCMPCIE */
3531 dhd
= (dhd_info_t
*)data
;
3533 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
3534 * down below , wake lock is set,
3535 * the tasklet is initialized in dhd_attach()
3537 /* Call bus dpc unless it indicated down (then clean stop) */
3538 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
3539 if (dhd_bus_dpc(dhd
->pub
.bus
))
3540 tasklet_schedule(&dhd
->tasklet
);
3542 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3544 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
3545 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
3550 dhd_sched_dpc(dhd_pub_t
*dhdp
)
3552 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
3554 DHD_OS_WAKE_LOCK(dhdp
);
3555 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
3556 /* If the semaphore does not get up,
3557 * wake unlock should be done here
3559 if (!binary_sema_up(&dhd
->thr_dpc_ctl
))
3560 DHD_OS_WAKE_UNLOCK(dhdp
);
3563 tasklet_schedule(&dhd
->tasklet
);
3568 dhd_sched_rxf(dhd_pub_t
*dhdp
, void *skb
)
3570 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
3571 #ifdef RXF_DEQUEUE_ON_BUSY
3574 #endif /* RXF_DEQUEUE_ON_BUSY */
3576 DHD_OS_WAKE_LOCK(dhdp
);
3578 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
3579 #ifdef RXF_DEQUEUE_ON_BUSY
3581 ret
= dhd_rxf_enqueue(dhdp
, skb
);
3582 if (ret
== BCME_OK
|| ret
== BCME_ERROR
)
3585 OSL_SLEEP(50); /* waiting for dequeueing */
3586 } while (retry
-- > 0);
3588 if (retry
<= 0 && ret
== BCME_BUSY
) {
3592 void *skbnext
= PKTNEXT(dhdp
->osh
, skbp
);
3593 PKTSETNEXT(dhdp
->osh
, skbp
, NULL
);
3597 DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
3600 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
3601 up(&dhd
->thr_rxf_ctl
.sema
);
3604 #else /* RXF_DEQUEUE_ON_BUSY */
3606 if (dhd_rxf_enqueue(dhdp
, skb
) == BCME_OK
)
3609 if (dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
3610 up(&dhd
->thr_rxf_ctl
.sema
);
3613 #endif /* RXF_DEQUEUE_ON_BUSY */
3617 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
3619 dhd_toe_get(dhd_info_t
*dhd
, int ifidx
, uint32
*toe_ol
)
3625 memset(&ioc
, 0, sizeof(ioc
));
3627 ioc
.cmd
= WLC_GET_VAR
;
3629 ioc
.len
= (uint
)sizeof(buf
);
3632 strncpy(buf
, "toe_ol", sizeof(buf
) - 1);
3633 buf
[sizeof(buf
) - 1] = '\0';
3634 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
3635 /* Check for older dongle image that doesn't support toe_ol */
3637 DHD_ERROR(("%s: toe not supported by device\n",
3638 dhd_ifname(&dhd
->pub
, ifidx
)));
3642 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
3646 memcpy(toe_ol
, buf
, sizeof(uint32
));
3650 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
3652 dhd_toe_set(dhd_info_t
*dhd
, int ifidx
, uint32 toe_ol
)
3658 memset(&ioc
, 0, sizeof(ioc
));
3660 ioc
.cmd
= WLC_SET_VAR
;
3662 ioc
.len
= (uint
)sizeof(buf
);
3665 /* Set toe_ol as requested */
3667 strncpy(buf
, "toe_ol", sizeof(buf
) - 1);
3668 buf
[sizeof(buf
) - 1] = '\0';
3669 memcpy(&buf
[sizeof("toe_ol")], &toe_ol
, sizeof(uint32
));
3671 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
3672 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
3673 dhd_ifname(&dhd
->pub
, ifidx
), ret
));
3677 /* Enable toe globally only if any components are enabled. */
3679 toe
= (toe_ol
!= 0);
3682 memcpy(&buf
[sizeof("toe")], &toe
, sizeof(uint32
));
3684 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
3685 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd
->pub
, ifidx
), ret
));
3693 #if defined(WL_CFG80211)
3694 void dhd_set_scb_probe(dhd_pub_t
*dhd
)
3696 #define NUM_SCB_MAX_PROBE 3
3698 wl_scb_probe_t scb_probe
;
3699 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12];
3701 memset(&scb_probe
, 0, sizeof(wl_scb_probe_t
));
3703 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
)
3706 bcm_mkiovar("scb_probe", NULL
, 0, iovbuf
, sizeof(iovbuf
));
3708 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
3709 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__
));
3711 memcpy(&scb_probe
, iovbuf
, sizeof(wl_scb_probe_t
));
3713 scb_probe
.scb_max_probe
= NUM_SCB_MAX_PROBE
;
3715 bcm_mkiovar("scb_probe", (char *)&scb_probe
,
3716 sizeof(wl_scb_probe_t
), iovbuf
, sizeof(iovbuf
));
3717 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
3718 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__
));
3719 #undef NUM_SCB_MAX_PROBE
3722 #endif /* WL_CFG80211 */
3724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3726 dhd_ethtool_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*info
)
3728 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
3730 snprintf(info
->driver
, sizeof(info
->driver
), "wl");
3731 snprintf(info
->version
, sizeof(info
->version
), "%lu", dhd
->pub
.drv_version
);
3734 struct ethtool_ops dhd_ethtool_ops
= {
3735 .get_drvinfo
= dhd_ethtool_get_drvinfo
3737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
3740 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
3742 dhd_ethtool(dhd_info_t
*dhd
, void *uaddr
)
3744 struct ethtool_drvinfo info
;
3745 char drvname
[sizeof(info
.driver
)];
3748 struct ethtool_value edata
;
3749 uint32 toe_cmpnt
, csum_dir
;
3753 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3755 /* all ethtool calls start with a cmd word */
3756 if (copy_from_user(&cmd
, uaddr
, sizeof (uint32
)))
3760 case ETHTOOL_GDRVINFO
:
3761 /* Copy out any request driver name */
3762 if (copy_from_user(&info
, uaddr
, sizeof(info
)))
3764 strncpy(drvname
, info
.driver
, sizeof(info
.driver
));
3765 drvname
[sizeof(info
.driver
)-1] = '\0';
3767 /* clear struct for return */
3768 memset(&info
, 0, sizeof(info
));
3771 /* if dhd requested, identify ourselves */
3772 if (strcmp(drvname
, "?dhd") == 0) {
3773 snprintf(info
.driver
, sizeof(info
.driver
), "dhd");
3774 strncpy(info
.version
, EPI_VERSION_STR
, sizeof(info
.version
) - 1);
3775 info
.version
[sizeof(info
.version
) - 1] = '\0';
3778 /* otherwise, require dongle to be up */
3779 else if (!dhd
->pub
.up
) {
3780 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__
));
3784 /* finally, report dongle driver type */
3785 else if (dhd
->pub
.iswl
)
3786 snprintf(info
.driver
, sizeof(info
.driver
), "wl");
3788 snprintf(info
.driver
, sizeof(info
.driver
), "xx");
3790 snprintf(info
.version
, sizeof(info
.version
), "%lu", dhd
->pub
.drv_version
);
3791 if (copy_to_user(uaddr
, &info
, sizeof(info
)))
3793 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__
,
3794 (int)sizeof(drvname
), drvname
, info
.driver
));
3798 /* Get toe offload components from dongle */
3799 case ETHTOOL_GRXCSUM
:
3800 case ETHTOOL_GTXCSUM
:
3801 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
3804 csum_dir
= (cmd
== ETHTOOL_GTXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
3807 edata
.data
= (toe_cmpnt
& csum_dir
) ? 1 : 0;
3809 if (copy_to_user(uaddr
, &edata
, sizeof(edata
)))
3813 /* Set toe offload components in dongle */
3814 case ETHTOOL_SRXCSUM
:
3815 case ETHTOOL_STXCSUM
:
3816 if (copy_from_user(&edata
, uaddr
, sizeof(edata
)))
3819 /* Read the current settings, update and write back */
3820 if ((ret
= dhd_toe_get(dhd
, 0, &toe_cmpnt
)) < 0)
3823 csum_dir
= (cmd
== ETHTOOL_STXCSUM
) ? TOE_TX_CSUM_OL
: TOE_RX_CSUM_OL
;
3825 if (edata
.data
!= 0)
3826 toe_cmpnt
|= csum_dir
;
3828 toe_cmpnt
&= ~csum_dir
;
3830 if ((ret
= dhd_toe_set(dhd
, 0, toe_cmpnt
)) < 0)
3833 /* If setting TX checksum mode, tell Linux the new mode */
3834 if (cmd
== ETHTOOL_STXCSUM
) {
3836 dhd
->iflist
[0]->net
->features
|= NETIF_F_IP_CSUM
;
3838 dhd
->iflist
[0]->net
->features
&= ~NETIF_F_IP_CSUM
;
3850 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
3852 static bool dhd_check_hang(struct net_device
*net
, dhd_pub_t
*dhdp
, int error
)
3857 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
3864 dhd
= (dhd_info_t
*)dhdp
->info
;
3865 #if !defined(BCMPCIE)
3866 if (dhd
->thr_dpc_ctl
.thr_pid
< 0) {
3867 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__
));
3872 #ifdef CONFIG_MACH_UNIVERSAL5433
3873 /* old revision does not send hang message */
3874 if ((check_rev() && (error
== -ETIMEDOUT
)) || (error
== -EREMOTEIO
) ||
3876 if ((error
== -ETIMEDOUT
) || (error
== -EREMOTEIO
) ||
3877 #endif /* CONFIG_MACH_UNIVERSAL5433 */
3878 ((dhdp
->busstate
== DHD_BUS_DOWN
) && (!dhdp
->dongle_reset
))) {
3879 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__
,
3880 dhdp
->rxcnt_timeout
, dhdp
->txcnt_timeout
, error
, dhdp
->busstate
));
3881 net_os_send_hang_message(net
);
3887 int dhd_ioctl_process(dhd_pub_t
*pub
, int ifidx
, dhd_ioctl_t
*ioc
, void *data_buf
)
3889 int bcmerror
= BCME_OK
;
3891 struct net_device
*net
;
3893 net
= dhd_idx2net(pub
, ifidx
);
3895 bcmerror
= BCME_BADARG
;
3900 buflen
= MIN(ioc
->len
, DHD_IOCTL_MAXLEN
);
3902 /* check for local dhd ioctl and handle it */
3903 if (ioc
->driver
== DHD_IOCTL_MAGIC
) {
3904 bcmerror
= dhd_ioctl((void *)pub
, ioc
, data_buf
, buflen
);
3906 pub
->bcmerror
= bcmerror
;
3910 /* send to dongle (must be up, and wl). */
3911 if (pub
->busstate
!= DHD_BUS_DATA
) {
3912 bcmerror
= BCME_DONGLE_DOWN
;
3917 bcmerror
= BCME_DONGLE_DOWN
;
3922 * Flush the TX queue if required for proper message serialization:
3923 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
3924 * prevent M4 encryption and
3925 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
3926 * prevent disassoc frame being sent before WPS-DONE frame.
3928 if (ioc
->cmd
== WLC_SET_KEY
||
3929 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
3930 strncmp("wsec_key", data_buf
, 9) == 0) ||
3931 (ioc
->cmd
== WLC_SET_VAR
&& data_buf
!= NULL
&&
3932 strncmp("bsscfg:wsec_key", data_buf
, 15) == 0) ||
3933 ioc
->cmd
== WLC_DISASSOC
)
3934 dhd_wait_pend8021x(net
);
3938 /* short cut wl ioctl calls here */
3939 if (strcmp("htsf", data_buf
) == 0) {
3940 dhd_ioctl_htsf_get(dhd
, 0);
3944 if (strcmp("htsflate", data_buf
) == 0) {
3946 memset(ts
, 0, sizeof(tstamp_t
)*TSMAX
);
3947 memset(&maxdelayts
, 0, sizeof(tstamp_t
));
3951 memset(&vi_d1
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3952 memset(&vi_d2
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3953 memset(&vi_d3
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3954 memset(&vi_d4
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3960 if (strcmp("htsfclear", data_buf
) == 0) {
3961 memset(&vi_d1
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3962 memset(&vi_d2
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3963 memset(&vi_d3
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3964 memset(&vi_d4
.bin
, 0, sizeof(uint32
)*NUMBIN
);
3968 if (strcmp("htsfhis", data_buf
) == 0) {
3969 dhd_dump_htsfhisto(&vi_d1
, "H to D");
3970 dhd_dump_htsfhisto(&vi_d2
, "D to D");
3971 dhd_dump_htsfhisto(&vi_d3
, "D to H");
3972 dhd_dump_htsfhisto(&vi_d4
, "H to H");
3975 if (strcmp("tsport", data_buf
) == 0) {
3977 memcpy(&tsport
, data_buf
+ 7, 4);
3979 DHD_ERROR(("current timestamp port: %d \n", tsport
));
3984 #endif /* WLMEDIA_HTSF */
3986 if ((ioc
->cmd
== WLC_SET_VAR
|| ioc
->cmd
== WLC_GET_VAR
) &&
3987 data_buf
!= NULL
&& strncmp("rpc_", data_buf
, 4) == 0) {
3989 bcmerror
= dhd_fdaggr_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
3991 bcmerror
= BCME_UNSUPPORTED
;
3995 bcmerror
= dhd_wl_ioctl(pub
, ifidx
, (wl_ioctl_t
*)ioc
, data_buf
, buflen
);
3998 dhd_check_hang(net
, pub
, bcmerror
);
4004 dhd_ioctl_entry(struct net_device
*net
, struct ifreq
*ifr
, int cmd
)
4006 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4011 void *local_buf
= NULL
;
4014 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4015 DHD_PERIM_LOCK(&dhd
->pub
);
4017 /* Interface up check for built-in type */
4018 if (!dhd_download_fw_on_driverload
&& dhd
->pub
.up
== 0) {
4019 DHD_ERROR(("%s: Interface is down \n", __FUNCTION__
));
4020 DHD_PERIM_UNLOCK(&dhd
->pub
);
4021 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4025 /* send to dongle only if we are not waiting for reload already */
4026 if (dhd
->pub
.hang_was_sent
) {
4027 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__
));
4028 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd
->pub
, DHD_EVENT_TIMEOUT_MS
);
4029 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4030 return OSL_ERROR(BCME_DONGLE_DOWN
);
4033 ifidx
= dhd_net2idx(dhd
, net
);
4034 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__
, ifidx
, cmd
));
4036 if (ifidx
== DHD_BAD_IF
) {
4037 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__
));
4038 DHD_PERIM_UNLOCK(&dhd
->pub
);
4039 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4043 #if defined(WL_WIRELESS_EXT)
4044 /* linux wireless extensions */
4045 if ((cmd
>= SIOCIWFIRST
) && (cmd
<= SIOCIWLAST
)) {
4046 /* may recurse, do NOT lock */
4047 ret
= wl_iw_ioctl(net
, ifr
, cmd
);
4048 DHD_PERIM_UNLOCK(&dhd
->pub
);
4049 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4052 #endif /* defined(WL_WIRELESS_EXT) */
4054 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
4055 if (cmd
== SIOCETHTOOL
) {
4056 ret
= dhd_ethtool(dhd
, (void*)ifr
->ifr_data
);
4057 DHD_PERIM_UNLOCK(&dhd
->pub
);
4058 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4061 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
4063 if (cmd
== SIOCDEVPRIVATE
+1) {
4064 ret
= wl_android_priv_cmd(net
, ifr
, cmd
);
4065 dhd_check_hang(net
, &dhd
->pub
, ret
);
4066 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4070 if (cmd
!= SIOCDEVPRIVATE
) {
4071 DHD_PERIM_UNLOCK(&dhd
->pub
);
4072 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4076 memset(&ioc
, 0, sizeof(ioc
));
4078 #ifdef CONFIG_COMPAT
4079 if (is_compat_task()) {
4080 compat_wl_ioctl_t compat_ioc
;
4081 if (copy_from_user(&compat_ioc
, ifr
->ifr_data
, sizeof(compat_wl_ioctl_t
))) {
4082 bcmerror
= BCME_BADADDR
;
4085 ioc
.cmd
= compat_ioc
.cmd
;
4086 ioc
.buf
= compat_ptr(compat_ioc
.buf
);
4087 ioc
.len
= compat_ioc
.len
;
4088 ioc
.set
= compat_ioc
.set
;
4089 ioc
.used
= compat_ioc
.used
;
4090 ioc
.needed
= compat_ioc
.needed
;
4091 /* To differentiate between wl and dhd read 4 more byes */
4092 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(compat_wl_ioctl_t
),
4093 sizeof(uint
)) != 0)) {
4094 bcmerror
= BCME_BADADDR
;
4098 #endif /* CONFIG_COMPAT */
4100 /* Copy the ioc control structure part of ioctl request */
4101 if (copy_from_user(&ioc
, ifr
->ifr_data
, sizeof(wl_ioctl_t
))) {
4102 bcmerror
= BCME_BADADDR
;
4106 /* To differentiate between wl and dhd read 4 more byes */
4107 if ((copy_from_user(&ioc
.driver
, (char *)ifr
->ifr_data
+ sizeof(wl_ioctl_t
),
4108 sizeof(uint
)) != 0)) {
4109 bcmerror
= BCME_BADADDR
;
4114 if (!capable(CAP_NET_ADMIN
)) {
4115 bcmerror
= BCME_EPERM
;
4120 buflen
= MIN(ioc
.len
, DHD_IOCTL_MAXLEN
);
4121 if (!(local_buf
= MALLOC(dhd
->pub
.osh
, buflen
+1))) {
4122 bcmerror
= BCME_NOMEM
;
4126 DHD_PERIM_UNLOCK(&dhd
->pub
);
4127 if (copy_from_user(local_buf
, ioc
.buf
, buflen
)) {
4128 DHD_PERIM_LOCK(&dhd
->pub
);
4129 bcmerror
= BCME_BADADDR
;
4132 DHD_PERIM_LOCK(&dhd
->pub
);
4134 *(char *)(local_buf
+ buflen
) = '\0';
4137 bcmerror
= dhd_ioctl_process(&dhd
->pub
, ifidx
, &ioc
, local_buf
);
4139 if (!bcmerror
&& buflen
&& local_buf
&& ioc
.buf
) {
4140 DHD_PERIM_UNLOCK(&dhd
->pub
);
4141 if (copy_to_user(ioc
.buf
, local_buf
, buflen
))
4143 DHD_PERIM_LOCK(&dhd
->pub
);
4148 MFREE(dhd
->pub
.osh
, local_buf
, buflen
+1);
4150 DHD_PERIM_UNLOCK(&dhd
->pub
);
4151 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4153 return OSL_ERROR(bcmerror
);
4156 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
4157 int dhd_deepsleep(dhd_info_t
*dhd
, int flag
)
4168 case 1 : /* Deepsleep on */
4169 DHD_ERROR(("dhd_deepsleep: ON\n"));
4170 /* give some time to sysioc_work before deepsleep */
4172 #ifdef PKT_FILTER_SUPPORT
4173 /* disable pkt filter */
4174 dhd_enable_packet_filter(0, dhdp
);
4175 #endif /* PKT_FILTER_SUPPORT */
4178 memset(iovbuf
, 0, sizeof(iovbuf
));
4179 bcm_mkiovar("mpc", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
4180 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
4182 /* Enable Deepsleep */
4184 memset(iovbuf
, 0, sizeof(iovbuf
));
4185 bcm_mkiovar("deepsleep", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
4186 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
4189 case 0: /* Deepsleep Off */
4190 DHD_ERROR(("dhd_deepsleep: OFF\n"));
4192 /* Disable Deepsleep */
4193 for (cnt
= 0; cnt
< MAX_TRY_CNT
; cnt
++) {
4195 memset(iovbuf
, 0, sizeof(iovbuf
));
4196 bcm_mkiovar("deepsleep", (char *)&powervar
, 4,
4197 iovbuf
, sizeof(iovbuf
));
4198 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
,
4199 sizeof(iovbuf
), TRUE
, 0);
4201 memset(iovbuf
, 0, sizeof(iovbuf
));
4202 bcm_mkiovar("deepsleep", (char *)&powervar
, 4,
4203 iovbuf
, sizeof(iovbuf
));
4204 if ((ret
= dhd_wl_ioctl_cmd(dhdp
, WLC_GET_VAR
, iovbuf
,
4205 sizeof(iovbuf
), FALSE
, 0)) < 0) {
4206 DHD_ERROR(("the error of dhd deepsleep status"
4207 " ret value :%d\n", ret
));
4209 if (!(*(int *)iovbuf
)) {
4210 DHD_ERROR(("deepsleep mode is 0,"
4211 " count: %d\n", cnt
));
4219 memset(iovbuf
, 0, sizeof(iovbuf
));
4220 bcm_mkiovar("mpc", (char *)&powervar
, 4, iovbuf
, sizeof(iovbuf
));
4221 dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
4229 dhd_stop(struct net_device
*net
)
4232 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4233 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4234 DHD_PERIM_LOCK(&dhd
->pub
);
4235 printf("%s: Enter %p\n", __FUNCTION__
, net
);
4236 if (dhd
->pub
.up
== 0) {
4240 dhd_if_flush_sta(DHD_DEV_IFP(net
));
4243 ifidx
= dhd_net2idx(dhd
, net
);
4244 BCM_REFERENCE(ifidx
);
4246 /* Set state and stop OS transmissions */
4247 netif_stop_queue(net
);
4252 wl_cfg80211_down(NULL
);
4255 * For CFG80211: Clean up all the left over virtual interfaces
4256 * when the primary Interface is brought down. [ifconfig wlan0 down]
4258 if (!dhd_download_fw_on_driverload
) {
4259 if ((dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) &&
4260 (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
4263 dhd_net_if_lock_local(dhd
);
4264 for (i
= 1; i
< DHD_MAX_IFS
; i
++)
4265 dhd_remove_if(&dhd
->pub
, i
, FALSE
);
4266 dhd_net_if_unlock_local(dhd
);
4270 #endif /* WL_CFG80211 */
4272 #ifdef PROP_TXSTATUS
4273 dhd_wlfc_cleanup(&dhd
->pub
, NULL
, 0);
4275 /* Stop the protocol module */
4276 dhd_prot_stop(&dhd
->pub
);
4278 OLD_MOD_DEC_USE_COUNT
;
4280 if (ifidx
== 0 && !dhd_download_fw_on_driverload
)
4281 wl_android_wifi_off(net
);
4283 if (dhd
->pub
.conf
->deepsleep
)
4284 dhd_deepsleep(dhd
, 1);
4286 dhd
->pub
.rxcnt_timeout
= 0;
4287 dhd
->pub
.txcnt_timeout
= 0;
4289 dhd
->pub
.hang_was_sent
= 0;
4291 /* Clear country spec for for built-in type driver */
4292 if (!dhd_download_fw_on_driverload
) {
4293 dhd
->pub
.dhd_cspec
.country_abbrev
[0] = 0x00;
4294 dhd
->pub
.dhd_cspec
.rev
= 0;
4295 dhd
->pub
.dhd_cspec
.ccode
[0] = 0x00;
4298 printf("%s: Exit\n", __FUNCTION__
);
4299 DHD_PERIM_UNLOCK(&dhd
->pub
);
4300 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4304 #if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
4305 extern bool g_first_broadcast_scan
;
4309 static int dhd_interworking_enable(dhd_pub_t
*dhd
)
4311 char iovbuf
[WLC_IOCTL_SMLEN
];
4312 uint32 enable
= true;
4315 bcm_mkiovar("interworking", (char *)&enable
, sizeof(enable
), iovbuf
, sizeof(iovbuf
));
4316 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
4317 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__
, ret
));
4320 if (ret
== BCME_OK
) {
4321 /* basic capabilities for HS20 REL2 */
4322 uint32 cap
= WL_WNM_BSSTRANS
| WL_WNM_NOTIF
;
4323 bcm_mkiovar("wnm", (char *)&cap
, sizeof(cap
), iovbuf
, sizeof(iovbuf
));
4324 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
4325 iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
4326 DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__
, ret
));
4335 dhd_open(struct net_device
*net
)
4337 dhd_info_t
*dhd
= DHD_DEV_INFO(net
);
4344 printf("%s: Enter %p\n", __FUNCTION__
, net
);
4345 #if defined(MULTIPLE_SUPPLICANT)
4346 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4347 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
4348 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__
));
4350 mutex_lock(&_dhd_sdio_mutex_lock_
);
4352 #endif /* MULTIPLE_SUPPLICANT */
4354 DHD_OS_WAKE_LOCK(&dhd
->pub
);
4355 DHD_PERIM_LOCK(&dhd
->pub
);
4356 dhd
->pub
.dongle_trap_occured
= 0;
4357 dhd
->pub
.hang_was_sent
= 0;
4361 * Force start if ifconfig_up gets called before START command
4362 * We keep WEXT's wl_control_wl_start to provide backward compatibility
4363 * This should be removed in the future
4365 ret
= wl_control_wl_start(net
);
4367 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
4373 ifidx
= dhd_net2idx(dhd
, net
);
4374 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
4377 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__
));
4382 if (!dhd
->iflist
[ifidx
]) {
4383 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__
));
4389 atomic_set(&dhd
->pend_8021x_cnt
, 0);
4390 if (!dhd_download_fw_on_driverload
) {
4391 DHD_ERROR(("\n%s\n", dhd_version
));
4392 #if defined(USE_INITIAL_SHORT_DWELL_TIME)
4393 g_first_broadcast_scan
= TRUE
;
4395 ret
= wl_android_wifi_on(net
);
4397 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
4398 __FUNCTION__
, ret
));
4404 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
4406 /* try to bring up bus */
4407 DHD_PERIM_UNLOCK(&dhd
->pub
);
4408 ret
= dhd_bus_start(&dhd
->pub
);
4409 DHD_PERIM_LOCK(&dhd
->pub
);
4411 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__
, ret
));
4417 if (dhd_download_fw_on_driverload
) {
4418 if (dhd
->pub
.conf
->deepsleep
)
4419 dhd_deepsleep(dhd
, 0);
4422 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
4423 memcpy(net
->dev_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
4426 /* Get current TOE mode from dongle */
4427 if (dhd_toe_get(dhd
, ifidx
, &toe_ol
) >= 0 && (toe_ol
& TOE_TX_CSUM_OL
) != 0)
4428 dhd
->iflist
[ifidx
]->net
->features
|= NETIF_F_IP_CSUM
;
4430 dhd
->iflist
[ifidx
]->net
->features
&= ~NETIF_F_IP_CSUM
;
4433 #if defined(WL_CFG80211)
4434 if (unlikely(wl_cfg80211_up(NULL
))) {
4435 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__
));
4439 dhd_set_scb_probe(&dhd
->pub
);
4440 #endif /* WL_CFG80211 */
4443 /* Allow transmit calls */
4444 netif_start_queue(net
);
4448 dhd_dbg_init(&dhd
->pub
);
4451 OLD_MOD_INC_USE_COUNT
;
4456 DHD_PERIM_UNLOCK(&dhd
->pub
);
4457 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
4459 #if defined(MULTIPLE_SUPPLICANT)
4460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
4461 mutex_unlock(&_dhd_sdio_mutex_lock_
);
4463 #endif /* MULTIPLE_SUPPLICANT */
4465 printf("%s: Exit ret=%d\n", __FUNCTION__
, ret
);
4469 int dhd_do_driver_init(struct net_device
*net
)
4471 dhd_info_t
*dhd
= NULL
;
4474 DHD_ERROR(("Primary Interface not initialized \n"));
4478 #ifdef MULTIPLE_SUPPLICANT
4479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
4480 if (mutex_is_locked(&_dhd_sdio_mutex_lock_
) != 0) {
4481 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__
));
4484 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
4485 #endif /* MULTIPLE_SUPPLICANT */
4487 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
4488 dhd
= DHD_DEV_INFO(net
);
4490 /* If driver is already initialized, do nothing
4492 if (dhd
->pub
.busstate
== DHD_BUS_DATA
) {
4493 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
4497 if (dhd_open(net
) < 0) {
4498 DHD_ERROR(("Driver Init Failed \n"));
4506 dhd_event_ifadd(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
4510 if (wl_cfg80211_notify_ifadd(ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
4514 /* handle IF event caused by wl commands, SoftAP, WEXT and
4515 * anything else. This has to be done asynchronously otherwise
4516 * DPC will be blocked (and iovars will timeout as DPC has no chance
4517 * to read the response back)
4519 if (ifevent
->ifidx
> 0) {
4520 dhd_if_event_t
*if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
4522 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
4523 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
4524 strncpy(if_event
->name
, name
, IFNAMSIZ
);
4525 if_event
->name
[IFNAMSIZ
- 1] = '\0';
4526 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
,
4527 DHD_WQ_WORK_IF_ADD
, dhd_ifadd_event_handler
, DHD_WORK_PRIORITY_LOW
);
4534 dhd_event_ifdel(dhd_info_t
*dhdinfo
, wl_event_data_if_t
*ifevent
, char *name
, uint8
*mac
)
4536 dhd_if_event_t
*if_event
;
4538 #if defined(WL_CFG80211) && !defined(P2PONEINT)
4539 if (wl_cfg80211_notify_ifdel(ifevent
->ifidx
, name
, mac
, ifevent
->bssidx
) == BCME_OK
)
4541 #endif /* WL_CFG80211 */
4543 /* handle IF event caused by wl commands, SoftAP, WEXT and
4546 if_event
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_event_t
));
4547 memcpy(&if_event
->event
, ifevent
, sizeof(if_event
->event
));
4548 memcpy(if_event
->mac
, mac
, ETHER_ADDR_LEN
);
4549 strncpy(if_event
->name
, name
, IFNAMSIZ
);
4550 if_event
->name
[IFNAMSIZ
- 1] = '\0';
4551 dhd_deferred_schedule_work(dhdinfo
->dhd_deferred_wq
, (void *)if_event
, DHD_WQ_WORK_IF_DEL
,
4552 dhd_ifdel_event_handler
, DHD_WORK_PRIORITY_LOW
);
4557 /* unregister and free the existing net_device interface (if any) in iflist and
4558 * allocate a new one. the slot is reused. this function does NOT register the
4559 * new interface to linux kernel. dhd_register_if does the job
4562 dhd_allocate_if(dhd_pub_t
*dhdpub
, int ifidx
, char *name
,
4563 uint8
*mac
, uint8 bssidx
, bool need_rtnl_lock
)
4565 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
4568 ASSERT(dhdinfo
&& (ifidx
< DHD_MAX_IFS
));
4569 ifp
= dhdinfo
->iflist
[ifidx
];
4572 if (ifp
->net
!= NULL
) {
4573 DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__
, ifp
->net
->name
));
4575 dhd_dev_priv_clear(ifp
->net
); /* clear net_device private */
4577 /* in unregister_netdev case, the interface gets freed by net->destructor
4578 * (which is set to free_netdev)
4580 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
4581 free_netdev(ifp
->net
);
4583 netif_stop_queue(ifp
->net
);
4585 unregister_netdev(ifp
->net
);
4587 unregister_netdevice(ifp
->net
);
4592 ifp
= MALLOC(dhdinfo
->pub
.osh
, sizeof(dhd_if_t
));
4594 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__
, sizeof(dhd_if_t
)));
4599 memset(ifp
, 0, sizeof(dhd_if_t
));
4600 ifp
->info
= dhdinfo
;
4602 ifp
->bssidx
= bssidx
;
4604 memcpy(&ifp
->mac_addr
, mac
, ETHER_ADDR_LEN
);
4606 /* Allocate etherdev, including space for private structure */
4607 ifp
->net
= alloc_etherdev(DHD_DEV_PRIV_SIZE
);
4608 if (ifp
->net
== NULL
) {
4609 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__
, sizeof(dhdinfo
)));
4613 /* Setup the dhd interface's netdevice private structure. */
4614 dhd_dev_priv_save(ifp
->net
, dhdinfo
, ifp
, ifidx
);
4616 if (name
&& name
[0]) {
4617 strncpy(ifp
->net
->name
, name
, IFNAMSIZ
);
4618 ifp
->net
->name
[IFNAMSIZ
- 1] = '\0';
4622 ifp
->net
->destructor
= free_netdev
;
4624 ifp
->net
->destructor
= dhd_netdev_free
;
4626 ifp
->net
->destructor
= free_netdev
;
4627 #endif /* WL_CFG80211 */
4628 strncpy(ifp
->name
, ifp
->net
->name
, IFNAMSIZ
);
4629 ifp
->name
[IFNAMSIZ
- 1] = '\0';
4630 dhdinfo
->iflist
[ifidx
] = ifp
;
4632 #ifdef PCIE_FULL_DONGLE
4633 /* Initialize STA info list */
4634 INIT_LIST_HEAD(&ifp
->sta_list
);
4635 DHD_IF_STA_LIST_LOCK_INIT(ifp
);
4636 #endif /* PCIE_FULL_DONGLE */
4642 if (ifp
->net
!= NULL
) {
4643 dhd_dev_priv_clear(ifp
->net
);
4644 free_netdev(ifp
->net
);
4647 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
4650 dhdinfo
->iflist
[ifidx
] = NULL
;
4654 /* unregister and free the the net_device interface associated with the indexed
4655 * slot, also free the slot memory and set the slot pointer to NULL
4658 dhd_remove_if(dhd_pub_t
*dhdpub
, int ifidx
, bool need_rtnl_lock
)
4660 dhd_info_t
*dhdinfo
= (dhd_info_t
*)dhdpub
->info
;
4663 ifp
= dhdinfo
->iflist
[ifidx
];
4665 if (ifp
->net
!= NULL
) {
4666 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp
->net
->name
, ifp
->idx
));
4668 /* in unregister_netdev case, the interface gets freed by net->destructor
4669 * (which is set to free_netdev)
4671 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
) {
4672 free_netdev(ifp
->net
);
4674 netif_stop_queue(ifp
->net
);
4679 custom_rps_map_clear(ifp
->net
->_rx
);
4680 #endif /* SET_RPS_CPUS */
4682 unregister_netdev(ifp
->net
);
4684 unregister_netdevice(ifp
->net
);
4689 dhd_wmf_cleanup(dhdpub
, ifidx
);
4690 #endif /* DHD_WMF */
4692 dhd_if_del_sta_list(ifp
);
4694 dhdinfo
->iflist
[ifidx
] = NULL
;
4695 MFREE(dhdinfo
->pub
.osh
, ifp
, sizeof(*ifp
));
4702 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
4703 static struct net_device_ops dhd_ops_pri
= {
4704 .ndo_open
= dhd_open
,
4705 .ndo_stop
= dhd_stop
,
4706 .ndo_get_stats
= dhd_get_stats
,
4707 .ndo_do_ioctl
= dhd_ioctl_entry
,
4708 .ndo_start_xmit
= dhd_start_xmit
,
4709 .ndo_set_mac_address
= dhd_set_mac_address
,
4710 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4711 .ndo_set_rx_mode
= dhd_set_multicast_list
,
4713 .ndo_set_multicast_list
= dhd_set_multicast_list
,
4717 static struct net_device_ops dhd_ops_virt
= {
4718 .ndo_get_stats
= dhd_get_stats
,
4719 .ndo_do_ioctl
= dhd_ioctl_entry
,
4720 .ndo_start_xmit
= dhd_start_xmit
,
4721 .ndo_set_mac_address
= dhd_set_mac_address
,
4722 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4723 .ndo_set_rx_mode
= dhd_set_multicast_list
,
4725 .ndo_set_multicast_list
= dhd_set_multicast_list
,
4730 extern int wl_cfgp2p_if_open(struct net_device
*net
);
4731 extern int wl_cfgp2p_if_stop(struct net_device
*net
);
4733 static struct net_device_ops dhd_cfgp2p_ops_virt
= {
4734 .ndo_open
= wl_cfgp2p_if_open
,
4735 .ndo_stop
= wl_cfgp2p_if_stop
,
4736 .ndo_get_stats
= dhd_get_stats
,
4737 .ndo_do_ioctl
= dhd_ioctl_entry
,
4738 .ndo_start_xmit
= dhd_start_xmit
,
4739 .ndo_set_mac_address
= dhd_set_mac_address
,
4740 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
4741 .ndo_set_rx_mode
= dhd_set_multicast_list
,
4743 .ndo_set_multicast_list
= dhd_set_multicast_list
,
4746 #endif /* P2PONEINT */
4747 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
4750 extern void debugger_init(void *bus_handle
);
4754 #ifdef SHOW_LOGTRACE
4755 static char *logstrs_path
= "/root/logstrs.bin";
4756 module_param(logstrs_path
, charp
, S_IRUGO
);
4759 dhd_init_logstrs_array(dhd_event_log_t
*temp
)
4761 struct file
*filep
= NULL
;
4764 char *raw_fmts
= NULL
;
4765 int logstrs_size
= 0;
4767 logstr_header_t
*hdr
= NULL
;
4768 uint32
*lognums
= NULL
;
4769 char *logstrs
= NULL
;
4777 filep
= filp_open(logstrs_path
, O_RDONLY
, 0);
4778 if (IS_ERR(filep
)) {
4779 DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__
));
4782 error
= vfs_stat(logstrs_path
, &stat
);
4784 DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__
));
4787 logstrs_size
= (int) stat
.size
;
4789 raw_fmts
= kmalloc(logstrs_size
, GFP_KERNEL
);
4790 if (raw_fmts
== NULL
) {
4791 DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
4794 if (vfs_read(filep
, raw_fmts
, logstrs_size
, &filep
->f_pos
) != logstrs_size
) {
4795 DHD_ERROR(("Error: Log strings file read failed\n"));
4799 /* Remember header from the logstrs.bin file */
4800 hdr
= (logstr_header_t
*) (raw_fmts
+ logstrs_size
-
4801 sizeof(logstr_header_t
));
4803 if (hdr
->log_magic
== LOGSTRS_MAGIC
) {
4805 * logstrs.bin start with header.
4807 num_fmts
= hdr
->rom_logstrs_offset
/ sizeof(uint32
);
4808 ram_index
= (hdr
->ram_lognums_offset
-
4809 hdr
->rom_lognums_offset
) / sizeof(uint32
);
4810 lognums
= (uint32
*) &raw_fmts
[hdr
->rom_lognums_offset
];
4811 logstrs
= (char *) &raw_fmts
[hdr
->rom_logstrs_offset
];
4814 * Legacy logstrs.bin format without header.
4816 num_fmts
= *((uint32
*) (raw_fmts
)) / sizeof(uint32
);
4817 if (num_fmts
== 0) {
4818 /* Legacy ROM/RAM logstrs.bin format:
4819 * - ROM 'lognums' section
4820 * - RAM 'lognums' section
4821 * - ROM 'logstrs' section.
4822 * - RAM 'logstrs' section.
4824 * 'lognums' is an array of indexes for the strings in the
4825 * 'logstrs' section. The first uint32 is 0 (index of first
4826 * string in ROM 'logstrs' section).
4828 * The 4324b5 is the only ROM that uses this legacy format. Use the
4829 * fixed number of ROM fmtnums to find the start of the RAM
4830 * 'lognums' section. Use the fixed first ROM string ("Con\n") to
4831 * find the ROM 'logstrs' section.
4833 #define NUM_4324B5_ROM_FMTS 186
4834 #define FIRST_4324B5_ROM_LOGSTR "Con\n"
4835 ram_index
= NUM_4324B5_ROM_FMTS
;
4836 lognums
= (uint32
*) raw_fmts
;
4837 num_fmts
= ram_index
;
4838 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
4839 while (strncmp(FIRST_4324B5_ROM_LOGSTR
, logstrs
, 4)) {
4841 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
4844 /* Legacy RAM-only logstrs.bin format:
4845 * - RAM 'lognums' section
4846 * - RAM 'logstrs' section.
4848 * 'lognums' is an array of indexes for the strings in the
4849 * 'logstrs' section. The first uint32 is an index to the
4850 * start of 'logstrs'. Therefore, if this index is divided
4851 * by 'sizeof(uint32)' it provides the number of logstr
4855 lognums
= (uint32
*) raw_fmts
;
4856 logstrs
= (char *) &raw_fmts
[num_fmts
<< 2];
4859 fmts
= kmalloc(num_fmts
* sizeof(char *), GFP_KERNEL
);
4861 DHD_ERROR(("Failed to allocate fmts memory\n"));
4865 for (i
= 0; i
< num_fmts
; i
++) {
4866 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
4867 * (they are 0-indexed relative to 'rom_logstrs_offset').
4869 * RAM lognums are already indexed to point to the correct RAM logstrs (they
4870 * are 0-indexed relative to the start of the logstrs.bin file).
4872 if (i
== ram_index
) {
4875 fmts
[i
] = &logstrs
[lognums
[i
]];
4878 temp
->raw_fmts
= raw_fmts
;
4879 temp
->num_fmts
= num_fmts
;
4880 filp_close(filep
, NULL
);
4889 filp_close(filep
, NULL
);
4894 #endif /* SHOW_LOGTRACE */
4898 dhd_attach(osl_t
*osh
, struct dhd_bus
*bus
, uint bus_hdrlen
)
4900 dhd_info_t
*dhd
= NULL
;
4901 struct net_device
*net
= NULL
;
4902 char if_name
[IFNAMSIZ
] = {'\0'};
4903 uint32 bus_type
= -1;
4904 uint32 bus_num
= -1;
4905 uint32 slot_num
= -1;
4906 wifi_adapter_info_t
*adapter
= NULL
;
4908 dhd_attach_states_t dhd_state
= DHD_ATTACH_STATE_INIT
;
4909 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4911 /* will implement get_ids for DBUS later */
4912 #if defined(BCMSDIO)
4913 dhd_bus_get_ids(bus
, &bus_type
, &bus_num
, &slot_num
);
4915 adapter
= dhd_wifi_platform_get_adapter(bus_type
, bus_num
, slot_num
);
4917 /* Allocate primary dhd_info */
4918 dhd
= wifi_platform_prealloc(adapter
, DHD_PREALLOC_DHD_INFO
, sizeof(dhd_info_t
));
4920 dhd
= MALLOC(osh
, sizeof(dhd_info_t
));
4922 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__
));
4926 memset(dhd
, 0, sizeof(dhd_info_t
));
4927 dhd_state
|= DHD_ATTACH_STATE_DHD_ALLOC
;
4929 dhd
->unit
= dhd_found
+ instance_base
; /* do not increment dhd_found, yet */
4932 dhd
->adapter
= adapter
;
4934 #ifdef GET_CUSTOM_MAC_ENABLE
4935 wifi_platform_get_mac_addr(dhd
->adapter
, dhd
->pub
.mac
.octet
);
4936 #endif /* GET_CUSTOM_MAC_ENABLE */
4937 dhd
->thr_dpc_ctl
.thr_pid
= DHD_PID_KT_TL_INVALID
;
4938 dhd
->thr_wdt_ctl
.thr_pid
= DHD_PID_KT_INVALID
;
4940 /* Initialize thread based operation and lock */
4941 sema_init(&dhd
->sdsem
, 1);
4943 /* Link to info module */
4944 dhd
->pub
.info
= dhd
;
4947 /* Link to bus module */
4949 dhd
->pub
.hdrlen
= bus_hdrlen
;
4951 /* dhd_conf must be attached after linking dhd to dhd->pub.info,
4952 * because dhd_detech will check .info is NULL or not.
4954 if (dhd_conf_attach(&dhd
->pub
) != 0) {
4955 DHD_ERROR(("dhd_conf_attach failed\n"));
4958 dhd_conf_reset(&dhd
->pub
);
4959 dhd_conf_set_chiprev(&dhd
->pub
, dhd_bus_chip(bus
), dhd_bus_chiprev(bus
));
4960 dhd_conf_preinit(&dhd
->pub
);
4962 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
4963 * This is indeed a hack but we have to make it work properly before we have a better
4966 dhd_update_fw_nv_path(dhd
);
4967 #ifndef BUILD_IN_KERNEL
4968 dhd_conf_read_config(&dhd
->pub
, dhd
->conf_path
);
4971 /* Set network interface name if it was provided as module parameter */
4972 if (iface_name
[0]) {
4975 strncpy(if_name
, iface_name
, IFNAMSIZ
);
4976 if_name
[IFNAMSIZ
- 1] = 0;
4977 len
= strlen(if_name
);
4978 ch
= if_name
[len
- 1];
4979 if ((ch
> '9' || ch
< '0') && (len
< IFNAMSIZ
- 2))
4980 strcat(if_name
, "%d");
4982 net
= dhd_allocate_if(&dhd
->pub
, 0, if_name
, NULL
, 0, TRUE
);
4985 dhd_state
|= DHD_ATTACH_STATE_ADD_IF
;
4987 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
4990 net
->netdev_ops
= NULL
;
4993 sema_init(&dhd
->proto_sem
, 1);
4995 #ifdef PROP_TXSTATUS
4996 spin_lock_init(&dhd
->wlfc_spinlock
);
4998 dhd
->pub
.skip_fc
= dhd_wlfc_skip_fc
;
4999 dhd
->pub
.plat_init
= dhd_wlfc_plat_init
;
5000 dhd
->pub
.plat_deinit
= dhd_wlfc_plat_deinit
;
5001 #endif /* PROP_TXSTATUS */
5003 /* Initialize other structure content */
5004 init_waitqueue_head(&dhd
->ioctl_resp_wait
);
5005 init_waitqueue_head(&dhd
->ctrl_wait
);
5007 /* Initialize the spinlocks */
5008 spin_lock_init(&dhd
->sdlock
);
5009 spin_lock_init(&dhd
->txqlock
);
5010 spin_lock_init(&dhd
->dhd_lock
);
5011 spin_lock_init(&dhd
->rxf_lock
);
5012 #if defined(RXFRAME_THREAD)
5013 dhd
->rxthread_enabled
= TRUE
;
5014 #endif /* defined(RXFRAME_THREAD) */
5016 #ifdef DHDTCPACK_SUPPRESS
5017 spin_lock_init(&dhd
->tcpack_lock
);
5018 #endif /* DHDTCPACK_SUPPRESS */
5020 /* Initialize Wakelock stuff */
5021 spin_lock_init(&dhd
->wakelock_spinlock
);
5022 dhd
->wakelock_counter
= 0;
5023 dhd
->wakelock_wd_counter
= 0;
5024 dhd
->wakelock_rx_timeout_enable
= 0;
5025 dhd
->wakelock_ctrl_timeout_enable
= 0;
5026 #ifdef CONFIG_HAS_WAKELOCK
5027 wake_lock_init(&dhd
->wl_wifi
, WAKE_LOCK_SUSPEND
, "wlan_wake");
5028 wake_lock_init(&dhd
->wl_rxwake
, WAKE_LOCK_SUSPEND
, "wlan_rx_wake");
5029 wake_lock_init(&dhd
->wl_ctrlwake
, WAKE_LOCK_SUSPEND
, "wlan_ctrl_wake");
5030 wake_lock_init(&dhd
->wl_wdwake
, WAKE_LOCK_SUSPEND
, "wlan_wd_wake");
5031 #ifdef BCMPCIE_OOB_HOST_WAKE
5032 wake_lock_init(&dhd
->wl_intrwake
, WAKE_LOCK_SUSPEND
, "wlan_oob_irq_wake");
5033 #endif /* BCMPCIE_OOB_HOST_WAKE */
5034 #endif /* CONFIG_HAS_WAKELOCK */
5035 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
5036 mutex_init(&dhd
->dhd_net_if_mutex
);
5037 mutex_init(&dhd
->dhd_suspend_mutex
);
5039 dhd_state
|= DHD_ATTACH_STATE_WAKELOCKS_INIT
;
5041 /* Attach and link in the protocol */
5042 if (dhd_prot_attach(&dhd
->pub
) != 0) {
5043 DHD_ERROR(("dhd_prot_attach failed\n"));
5046 dhd_state
|= DHD_ATTACH_STATE_PROT_ATTACH
;
5049 /* Attach and link in the cfg80211 */
5050 if (unlikely(wl_cfg80211_attach(net
, &dhd
->pub
))) {
5051 DHD_ERROR(("wl_cfg80211_attach failed\n"));
5055 dhd_monitor_init(&dhd
->pub
);
5056 dhd_state
|= DHD_ATTACH_STATE_CFG80211
;
5058 #if defined(WL_WIRELESS_EXT)
5059 /* Attach and link in the iw */
5060 if (!(dhd_state
& DHD_ATTACH_STATE_CFG80211
)) {
5061 if (wl_iw_attach(net
, (void *)&dhd
->pub
) != 0) {
5062 DHD_ERROR(("wl_iw_attach failed\n"));
5065 dhd_state
|= DHD_ATTACH_STATE_WL_ATTACH
;
5067 #endif /* defined(WL_WIRELESS_EXT) */
5069 #ifdef SHOW_LOGTRACE
5070 dhd_init_logstrs_array(&dhd
->event_data
);
5071 #endif /* SHOW_LOGTRACE */
5073 if (dhd_sta_pool_init(&dhd
->pub
, DHD_MAX_STA
) != BCME_OK
) {
5074 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__
, DHD_MAX_STA
));
5079 /* Set up the watchdog timer */
5080 init_timer(&dhd
->timer
);
5081 dhd
->timer
.data
= (ulong
)dhd
;
5082 dhd
->timer
.function
= dhd_watchdog
;
5083 dhd
->default_wd_interval
= dhd_watchdog_ms
;
5085 if (dhd_watchdog_prio
>= 0) {
5086 /* Initialize watchdog thread */
5087 PROC_START(dhd_watchdog_thread
, dhd
, &dhd
->thr_wdt_ctl
, 0, "dhd_watchdog_thread");
5090 dhd
->thr_wdt_ctl
.thr_pid
= -1;
5094 debugger_init((void *) bus
);
5097 /* Set up the bottom half handler */
5098 if (dhd_dpc_prio
>= 0) {
5099 /* Initialize DPC thread */
5100 PROC_START(dhd_dpc_thread
, dhd
, &dhd
->thr_dpc_ctl
, 0, "dhd_dpc");
5102 /* use tasklet for dpc */
5103 tasklet_init(&dhd
->tasklet
, dhd_dpc
, (ulong
)dhd
);
5104 dhd
->thr_dpc_ctl
.thr_pid
= -1;
5107 if (dhd
->rxthread_enabled
) {
5108 bzero(&dhd
->pub
.skbbuf
[0], sizeof(void *) * MAXSKBPEND
);
5109 /* Initialize RXF thread */
5110 PROC_START(dhd_rxf_thread
, dhd
, &dhd
->thr_rxf_ctl
, 0, "dhd_rxf");
5113 dhd_state
|= DHD_ATTACH_STATE_THREADS_CREATED
;
5115 #if defined(CONFIG_PM_SLEEP)
5116 if (!dhd_pm_notifier_registered
) {
5117 dhd_pm_notifier_registered
= TRUE
;
5118 register_pm_notifier(&dhd_pm_notifier
);
5120 #endif /* CONFIG_PM_SLEEP */
5122 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
5123 dhd
->early_suspend
.level
= EARLY_SUSPEND_LEVEL_BLANK_SCREEN
+ 20;
5124 dhd
->early_suspend
.suspend
= dhd_early_suspend
;
5125 dhd
->early_suspend
.resume
= dhd_late_resume
;
5126 register_early_suspend(&dhd
->early_suspend
);
5127 dhd_state
|= DHD_ATTACH_STATE_EARLYSUSPEND_DONE
;
5128 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
5130 #ifdef ARP_OFFLOAD_SUPPORT
5131 dhd
->pend_ipaddr
= 0;
5132 if (!dhd_inetaddr_notifier_registered
) {
5133 dhd_inetaddr_notifier_registered
= TRUE
;
5134 register_inetaddr_notifier(&dhd_inetaddr_notifier
);
5136 #endif /* ARP_OFFLOAD_SUPPORT */
5138 if (!dhd_inet6addr_notifier_registered
) {
5139 dhd_inet6addr_notifier_registered
= TRUE
;
5140 register_inet6addr_notifier(&dhd_inet6addr_notifier
);
5143 dhd
->dhd_deferred_wq
= dhd_deferred_work_init((void *)dhd
);
5144 #ifdef DEBUG_CPU_FREQ
5145 dhd
->new_freq
= alloc_percpu(int);
5146 dhd
->freq_trans
.notifier_call
= dhd_cpufreq_notifier
;
5147 cpufreq_register_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
5149 #ifdef DHDTCPACK_SUPPRESS
5151 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_DELAYTX
);
5152 #elif defined(BCMPCIE)
5153 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_HOLD
);
5155 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
5156 #endif /* BCMSDIO */
5157 #endif /* DHDTCPACK_SUPPRESS */
5159 dhd_state
|= DHD_ATTACH_STATE_DONE
;
5160 dhd
->dhd_state
= dhd_state
;
5163 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
5165 #endif /* CUSTOMER_HW20 && WLANAUDIO */
5169 if (dhd_state
>= DHD_ATTACH_STATE_DHD_ALLOC
) {
5170 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
5171 __FUNCTION__
, dhd_state
, &dhd
->pub
));
5172 dhd
->dhd_state
= dhd_state
;
5173 dhd_detach(&dhd
->pub
);
5174 dhd_free(&dhd
->pub
);
5180 int dhd_get_fw_mode(dhd_info_t
*dhdinfo
)
5182 if (strstr(dhdinfo
->fw_path
, "_apsta") != NULL
)
5183 return DHD_FLAG_HOSTAP_MODE
;
5184 if (strstr(dhdinfo
->fw_path
, "_p2p") != NULL
)
5185 return DHD_FLAG_P2P_MODE
;
5186 if (strstr(dhdinfo
->fw_path
, "_ibss") != NULL
)
5187 return DHD_FLAG_IBSS_MODE
;
5188 if (strstr(dhdinfo
->fw_path
, "_mfg") != NULL
)
5189 return DHD_FLAG_MFG_MODE
;
5191 return DHD_FLAG_STA_MODE
;
5194 bool dhd_update_fw_nv_path(dhd_info_t
*dhdinfo
)
5199 const char *fw
= NULL
;
5200 const char *nv
= NULL
;
5201 const char *conf
= NULL
;
5202 wifi_adapter_info_t
*adapter
= dhdinfo
->adapter
;
5205 /* Update firmware and nvram path. The path may be from adapter info or module parameter
5206 * The path from adapter info is used for initialization only (as it won't change).
5208 * The firmware_path/nvram_path module parameter may be changed by the system at run
5209 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
5210 * command may change dhdinfo->fw_path. As such we need to clear the path info in
5211 * module parameter after it is copied. We won't update the path until the module parameter
5212 * is changed again (first character is not '\0')
5215 /* set default firmware and nvram path for built-in type driver */
5216 // if (!dhd_download_fw_on_driverload) {
5217 #ifdef CONFIG_BCMDHD_FW_PATH
5218 fw
= CONFIG_BCMDHD_FW_PATH
;
5219 #endif /* CONFIG_BCMDHD_FW_PATH */
5220 #ifdef CONFIG_BCMDHD_NVRAM_PATH
5221 nv
= CONFIG_BCMDHD_NVRAM_PATH
;
5222 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
5225 /* check if we need to initialize the path */
5226 if (dhdinfo
->fw_path
[0] == '\0') {
5227 if (adapter
&& adapter
->fw_path
&& adapter
->fw_path
[0] != '\0')
5228 fw
= adapter
->fw_path
;
5231 if (dhdinfo
->nv_path
[0] == '\0') {
5232 if (adapter
&& adapter
->nv_path
&& adapter
->nv_path
[0] != '\0')
5233 nv
= adapter
->nv_path
;
5235 if (dhdinfo
->conf_path
[0] == '\0') {
5236 if (adapter
&& adapter
->conf_path
&& adapter
->conf_path
[0] != '\0')
5237 conf
= adapter
->conf_path
;
5240 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
5242 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
5244 if (firmware_path
[0] != '\0')
5246 if (nvram_path
[0] != '\0')
5248 if (config_path
[0] != '\0')
5251 if (fw
&& fw
[0] != '\0') {
5252 fw_len
= strlen(fw
);
5253 if (fw_len
>= sizeof(dhdinfo
->fw_path
)) {
5254 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
5257 strncpy(dhdinfo
->fw_path
, fw
, sizeof(dhdinfo
->fw_path
));
5258 if (dhdinfo
->fw_path
[fw_len
-1] == '\n')
5259 dhdinfo
->fw_path
[fw_len
-1] = '\0';
5261 if (nv
&& nv
[0] != '\0') {
5262 nv_len
= strlen(nv
);
5263 if (nv_len
>= sizeof(dhdinfo
->nv_path
)) {
5264 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
5267 strncpy(dhdinfo
->nv_path
, nv
, sizeof(dhdinfo
->nv_path
));
5268 if (dhdinfo
->nv_path
[nv_len
-1] == '\n')
5269 dhdinfo
->nv_path
[nv_len
-1] = '\0';
5271 if (conf
&& conf
[0] != '\0') {
5272 conf_len
= strlen(conf
);
5273 if (conf_len
>= sizeof(dhdinfo
->conf_path
)) {
5274 DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
5277 strncpy(dhdinfo
->conf_path
, conf
, sizeof(dhdinfo
->conf_path
));
5278 if (dhdinfo
->conf_path
[conf_len
-1] == '\n')
5279 dhdinfo
->conf_path
[conf_len
-1] = '\0';
5283 /* clear the path in module parameter */
5284 firmware_path
[0] = '\0';
5285 nvram_path
[0] = '\0';
5286 config_path
[0] = '\0';
5289 #ifndef BCMEMBEDIMAGE
5290 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
5291 if (dhdinfo
->fw_path
[0] == '\0') {
5292 DHD_ERROR(("firmware path not found\n"));
5295 if (dhdinfo
->nv_path
[0] == '\0') {
5296 DHD_ERROR(("nvram path not found\n"));
5299 if (dhdinfo
->conf_path
[0] == '\0') {
5300 dhd_conf_set_conf_path_by_nv_path(&dhdinfo
->pub
, dhdinfo
->conf_path
, dhdinfo
->nv_path
);
5302 #ifdef CONFIG_PATH_AUTO_SELECT
5303 dhd_conf_set_conf_name_by_chip(&dhdinfo
->pub
, dhdinfo
->conf_path
);
5305 #endif /* BCMEMBEDIMAGE */
5312 dhd_bus_start(dhd_pub_t
*dhdp
)
5315 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
5316 unsigned long flags
;
5320 DHD_TRACE(("Enter %s:\n", __FUNCTION__
));
5322 DHD_PERIM_LOCK(dhdp
);
5324 /* try to download image and nvram to the dongle */
5325 if (dhd
->pub
.busstate
== DHD_BUS_DOWN
&& dhd_update_fw_nv_path(dhd
)) {
5326 DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
5327 __FUNCTION__
, dhd
->fw_path
, dhd
->nv_path
, dhd
->conf_path
));
5328 ret
= dhd_bus_download_firmware(dhd
->pub
.bus
, dhd
->pub
.osh
,
5329 dhd
->fw_path
, dhd
->nv_path
, dhd
->conf_path
);
5331 DHD_ERROR(("%s: failed to download firmware %s\n",
5332 __FUNCTION__
, dhd
->fw_path
));
5333 DHD_PERIM_UNLOCK(dhdp
);
5337 if (dhd
->pub
.busstate
!= DHD_BUS_LOAD
) {
5338 DHD_PERIM_UNLOCK(dhdp
);
5342 dhd_os_sdlock(dhdp
);
5344 /* Start the watchdog timer */
5345 dhd
->pub
.tickcnt
= 0;
5346 dhd_os_wd_timer(&dhd
->pub
, dhd_watchdog_ms
);
5348 /* Bring up the bus */
5349 if ((ret
= dhd_bus_init(&dhd
->pub
, FALSE
)) != 0) {
5351 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__
, ret
));
5352 dhd_os_sdunlock(dhdp
);
5353 DHD_PERIM_UNLOCK(dhdp
);
5356 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
5357 #if defined(BCMPCIE_OOB_HOST_WAKE)
5358 dhd_os_sdunlock(dhdp
);
5359 #endif /* BCMPCIE_OOB_HOST_WAKE */
5360 /* Host registration for OOB interrupt */
5361 if (dhd_bus_oob_intr_register(dhdp
)) {
5362 /* deactivate timer and wait for the handler to finish */
5363 #if !defined(BCMPCIE_OOB_HOST_WAKE)
5364 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5365 dhd
->wd_timer_valid
= FALSE
;
5366 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5367 del_timer_sync(&dhd
->timer
);
5369 dhd_os_sdunlock(dhdp
);
5370 #endif /* BCMPCIE_OOB_HOST_WAKE */
5371 DHD_PERIM_UNLOCK(dhdp
);
5372 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
5373 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__
));
5377 #if defined(BCMPCIE_OOB_HOST_WAKE)
5378 dhd_os_sdlock(dhdp
);
5379 dhd_bus_oob_intr_set(dhdp
, TRUE
);
5381 /* Enable oob at firmware */
5382 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
5383 #endif /* BCMPCIE_OOB_HOST_WAKE */
5384 #elif defined(FORCE_WOWLAN)
5385 /* Enable oob at firmware */
5386 dhd_enable_oob_intr(dhd
->pub
.bus
, TRUE
);
5388 #ifdef PCIE_FULL_DONGLE
5391 uint32 num_flowrings
; /* includes H2D common rings */
5392 num_flowrings
= dhd_bus_max_h2d_queues(dhd
->pub
.bus
, &txpush
);
5393 DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__
,
5395 if ((ret
= dhd_flow_rings_init(&dhd
->pub
, num_flowrings
)) != BCME_OK
) {
5396 dhd_os_sdunlock(dhdp
);
5397 DHD_PERIM_UNLOCK(dhdp
);
5401 #endif /* PCIE_FULL_DONGLE */
5403 /* Do protocol initialization necessary for IOCTL/IOVAR */
5404 dhd_prot_init(&dhd
->pub
);
5406 /* If bus is not ready, can't come up */
5407 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
5408 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
5409 dhd
->wd_timer_valid
= FALSE
;
5410 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
5411 del_timer_sync(&dhd
->timer
);
5412 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__
));
5413 dhd_os_sdunlock(dhdp
);
5414 DHD_PERIM_UNLOCK(dhdp
);
5415 DHD_OS_WD_WAKE_UNLOCK(&dhd
->pub
);
5419 dhd_os_sdunlock(dhdp
);
5421 /* Bus is ready, query any dongle information */
5422 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0) {
5423 DHD_PERIM_UNLOCK(dhdp
);
5427 #ifdef ARP_OFFLOAD_SUPPORT
5428 if (dhd
->pend_ipaddr
) {
5429 #ifdef AOE_IP_ALIAS_SUPPORT
5430 aoe_update_host_ipv4_table(&dhd
->pub
, dhd
->pend_ipaddr
, TRUE
, 0);
5431 #endif /* AOE_IP_ALIAS_SUPPORT */
5432 dhd
->pend_ipaddr
= 0;
5434 #endif /* ARP_OFFLOAD_SUPPORT */
5436 DHD_PERIM_UNLOCK(dhdp
);
5441 int _dhd_tdls_enable(dhd_pub_t
*dhd
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
5443 char iovbuf
[WLC_IOCTL_SMLEN
];
5444 uint32 tdls
= tdls_on
;
5446 uint32 tdls_auto_op
= 0;
5447 uint32 tdls_idle_time
= CUSTOM_TDLS_IDLE_MODE_SETTING
;
5448 int32 tdls_rssi_high
= CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
;
5449 int32 tdls_rssi_low
= CUSTOM_TDLS_RSSI_THRESHOLD_LOW
;
5451 if (!FW_SUPPORTED(dhd
, tdls
))
5454 if (dhd
->tdls_enable
== tdls_on
)
5456 bcm_mkiovar("tdls_enable", (char *)&tdls
, sizeof(tdls
), iovbuf
, sizeof(iovbuf
));
5457 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
5458 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__
, tdls
, ret
));
5461 dhd
->tdls_enable
= tdls_on
;
5464 tdls_auto_op
= auto_on
;
5465 bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op
, sizeof(tdls_auto_op
),
5466 iovbuf
, sizeof(iovbuf
));
5467 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
5468 sizeof(iovbuf
), TRUE
, 0)) < 0) {
5469 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__
, ret
));
5474 bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time
,
5475 sizeof(tdls_idle_time
), iovbuf
, sizeof(iovbuf
));
5476 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
5477 sizeof(iovbuf
), TRUE
, 0)) < 0) {
5478 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__
, ret
));
5481 bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high
, 4, iovbuf
, sizeof(iovbuf
));
5482 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
5483 sizeof(iovbuf
), TRUE
, 0)) < 0) {
5484 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__
, ret
));
5487 bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low
, 4, iovbuf
, sizeof(iovbuf
));
5488 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
5489 sizeof(iovbuf
), TRUE
, 0)) < 0) {
5490 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__
, ret
));
5499 int dhd_tdls_enable(struct net_device
*dev
, bool tdls_on
, bool auto_on
, struct ether_addr
*mac
)
5501 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
5504 ret
= _dhd_tdls_enable(&dhd
->pub
, tdls_on
, auto_on
, mac
);
5509 #ifdef PCIE_FULL_DONGLE
5510 void dhd_tdls_update_peer_info(struct net_device
*dev
, bool connect
, uint8
*da
)
5512 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
5513 dhd_pub_t
*dhdp
= (dhd_pub_t
*)&dhd
->pub
;
5514 tdls_peer_node_t
*cur
= dhdp
->peer_tbl
.node
;
5515 tdls_peer_node_t
*new = NULL
, *prev
= NULL
;
5517 uint8 sa
[ETHER_ADDR_LEN
];
5518 int ifidx
= dhd_net2idx(dhd
, dev
);
5520 if (ifidx
== DHD_BAD_IF
)
5523 dhdif
= dhd
->iflist
[ifidx
];
5524 memcpy(sa
, dhdif
->mac_addr
, ETHER_ADDR_LEN
);
5527 while (cur
!= NULL
) {
5528 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
5529 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
5530 __FUNCTION__
, __LINE__
));
5536 new = MALLOC(dhdp
->osh
, sizeof(tdls_peer_node_t
));
5538 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__
));
5541 memcpy(new->addr
, da
, ETHER_ADDR_LEN
);
5542 new->next
= dhdp
->peer_tbl
.node
;
5543 dhdp
->peer_tbl
.node
= new;
5544 dhdp
->peer_tbl
.tdls_peer_count
++;
5547 while (cur
!= NULL
) {
5548 if (!memcmp(da
, cur
->addr
, ETHER_ADDR_LEN
)) {
5549 dhd_flow_rings_delete_for_peer(dhdp
, ifidx
, da
);
5551 prev
->next
= cur
->next
;
5553 dhdp
->peer_tbl
.node
= cur
->next
;
5554 MFREE(dhdp
->osh
, cur
, sizeof(tdls_peer_node_t
));
5555 dhdp
->peer_tbl
.tdls_peer_count
--;
5561 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__
));
5564 #endif /* PCIE_FULL_DONGLE */
5567 bool dhd_is_concurrent_mode(dhd_pub_t
*dhd
)
5572 if (dhd
->op_mode
& DHD_FLAG_CONCURR_MULTI_CHAN_MODE
)
5574 else if ((dhd
->op_mode
& DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
) ==
5575 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
)
5580 #if !defined(AP) && defined(WLP2P)
5581 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
5582 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
5583 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
5584 * would still be named as fw_bcmdhd_apsta.
5587 dhd_get_concurrent_capabilites(dhd_pub_t
*dhd
)
5590 char buf
[WLC_IOCTL_SMLEN
];
5591 bool mchan_supported
= FALSE
;
5592 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
5593 * test mode, that means we only will use the mode as it is
5595 if (dhd
->op_mode
& (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))
5597 if (FW_SUPPORTED(dhd
, vsdb
)) {
5598 mchan_supported
= TRUE
;
5600 if (!FW_SUPPORTED(dhd
, p2p
)) {
5601 DHD_TRACE(("Chip does not support p2p\n"));
5605 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
5606 memset(buf
, 0, sizeof(buf
));
5607 bcm_mkiovar("p2p", 0, 0, buf
, sizeof(buf
));
5608 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
),
5610 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__
, ret
));
5615 /* By default, chip supports single chan concurrency,
5616 * now lets check for mchan
5618 ret
= DHD_FLAG_CONCURR_SINGLE_CHAN_MODE
;
5619 if (mchan_supported
)
5620 ret
|= DHD_FLAG_CONCURR_MULTI_CHAN_MODE
;
5621 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
5622 /* For customer_hw4, although ICS,
5623 * we still support concurrent mode
5636 #ifdef SUPPORT_AP_POWERSAVE
5637 #define RXCHAIN_PWRSAVE_PPS 10
5638 #define RXCHAIN_PWRSAVE_QUIET_TIME 10
5639 #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
5640 int dhd_set_ap_powersave(dhd_pub_t
*dhdp
, int ifidx
, int enable
)
5643 int32 pps
= RXCHAIN_PWRSAVE_PPS
;
5644 int32 quiet_time
= RXCHAIN_PWRSAVE_QUIET_TIME
;
5645 int32 stas_assoc_check
= RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK
;
5648 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable
, 4, iovbuf
, sizeof(iovbuf
));
5649 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
5650 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
5651 DHD_ERROR(("Failed to enable AP power save\n"));
5653 bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps
, 4, iovbuf
, sizeof(iovbuf
));
5654 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
5655 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
5656 DHD_ERROR(("Failed to set pps\n"));
5658 bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time
,
5659 4, iovbuf
, sizeof(iovbuf
));
5660 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
5661 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
5662 DHD_ERROR(("Failed to set quiet time\n"));
5664 bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check
,
5665 4, iovbuf
, sizeof(iovbuf
));
5666 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
5667 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
5668 DHD_ERROR(("Failed to set stas assoc check\n"));
5671 bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable
, 4, iovbuf
, sizeof(iovbuf
));
5672 if (dhd_wl_ioctl_cmd(dhdp
, WLC_SET_VAR
,
5673 iovbuf
, sizeof(iovbuf
), TRUE
, 0) != BCME_OK
) {
5674 DHD_ERROR(("Failed to disable AP power save\n"));
5680 #endif /* SUPPORT_AP_POWERSAVE */
5683 #if defined(READ_CONFIG_FROM_FILE)
5684 #include <linux/fs.h>
5685 #include <linux/ctype.h>
5687 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5688 bool PM_control
= TRUE
;
5690 static int dhd_preinit_proc(dhd_pub_t
*dhd
, int ifidx
, char *name
, char *value
)
5693 wl_country_t cspec
= {{0}, -1, {0}};
5695 char *endptr
= NULL
;
5697 char smbuf
[WLC_IOCTL_SMLEN
*2];
5699 if (!strcmp(name
, "country")) {
5700 revstr
= strchr(value
, '/');
5702 cspec
.rev
= strtoul(revstr
+ 1, &endptr
, 10);
5703 memcpy(cspec
.country_abbrev
, value
, WLC_CNTRY_BUF_SZ
);
5704 cspec
.country_abbrev
[2] = '\0';
5705 memcpy(cspec
.ccode
, cspec
.country_abbrev
, WLC_CNTRY_BUF_SZ
);
5708 memcpy(cspec
.country_abbrev
, value
, WLC_CNTRY_BUF_SZ
);
5709 memcpy(cspec
.ccode
, value
, WLC_CNTRY_BUF_SZ
);
5710 get_customized_country_code(dhd
->info
->adapter
,
5711 (char *)&cspec
.country_abbrev
, &cspec
);
5713 memset(smbuf
, 0, sizeof(smbuf
));
5714 DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
5715 cspec
.country_abbrev
, cspec
.rev
));
5716 iolen
= bcm_mkiovar("country", (char*)&cspec
, sizeof(cspec
),
5717 smbuf
, sizeof(smbuf
));
5718 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
5719 smbuf
, iolen
, TRUE
, 0);
5720 } else if (!strcmp(name
, "roam_scan_period")) {
5721 var_int
= (int)simple_strtol(value
, NULL
, 0);
5722 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
,
5723 &var_int
, sizeof(var_int
), TRUE
, 0);
5724 } else if (!strcmp(name
, "roam_delta")) {
5729 x
.val
= (int)simple_strtol(value
, NULL
, 0);
5730 /* x.band = WLC_BAND_AUTO; */
5731 x
.band
= WLC_BAND_ALL
;
5732 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, &x
, sizeof(x
), TRUE
, 0);
5733 } else if (!strcmp(name
, "roam_trigger")) {
5736 roam_trigger
[0] = (int)simple_strtol(value
, NULL
, 0);
5737 roam_trigger
[1] = WLC_BAND_ALL
;
5738 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, &roam_trigger
,
5739 sizeof(roam_trigger
), TRUE
, 0);
5742 } else if (!strcmp(name
, "PM")) {
5744 var_int
= (int)simple_strtol(value
, NULL
, 0);
5746 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
,
5747 &var_int
, sizeof(var_int
), TRUE
, 0);
5749 #if defined(CONFIG_PM_LOCK)
5751 g_pm_control
= TRUE
;
5752 printk("%s var_int=%d don't control PM\n", __func__
, var_int
);
5754 g_pm_control
= FALSE
;
5755 printk("%s var_int=%d do control PM\n", __func__
, var_int
);
5762 else if (!strcmp(name
, "btamp_chan")) {
5768 btamp_chan
= (int)simple_strtol(value
, NULL
, 0);
5769 iov_len
= bcm_mkiovar("btamp_chan", (char *)&btamp_chan
, 4, iovbuf
, sizeof(iovbuf
));
5770 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, iov_len
, TRUE
, 0) < 0))
5771 DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
5772 __FUNCTION__
, btamp_chan
, ret
));
5774 DHD_ERROR(("%s btamp_chan %d set success\n",
5775 __FUNCTION__
, btamp_chan
));
5777 #endif /* WLBTAMP */
5778 else if (!strcmp(name
, "band")) {
5780 if (!strcmp(value
, "auto"))
5781 var_int
= WLC_BAND_AUTO
;
5782 else if (!strcmp(value
, "a"))
5783 var_int
= WLC_BAND_5G
;
5784 else if (!strcmp(value
, "b"))
5785 var_int
= WLC_BAND_2G
;
5786 else if (!strcmp(value
, "all"))
5787 var_int
= WLC_BAND_ALL
;
5789 printk(" set band value should be one of the a or b or all\n");
5790 var_int
= WLC_BAND_AUTO
;
5792 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_BAND
, &var_int
,
5793 sizeof(var_int
), TRUE
, 0)) < 0)
5794 printk(" set band err=%d\n", ret
);
5796 } else if (!strcmp(name
, "cur_etheraddr")) {
5797 struct ether_addr ea
;
5802 bcm_ether_atoe(value
, &ea
);
5804 ret
= memcmp(&ea
.octet
, dhd
->mac
.octet
, ETHER_ADDR_LEN
);
5806 DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__
));
5810 DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__
,
5811 ea
.octet
[0], ea
.octet
[1], ea
.octet
[2],
5812 ea
.octet
[3], ea
.octet
[4], ea
.octet
[5]));
5814 iovlen
= bcm_mkiovar("cur_etheraddr", (char*)&ea
, ETHER_ADDR_LEN
, buf
, 32);
5816 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, iovlen
, TRUE
, 0);
5818 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
5822 memcpy(dhd
->mac
.octet
, (void *)&ea
, ETHER_ADDR_LEN
);
5825 } else if (!strcmp(name
, "lpc")) {
5829 var_int
= (int)simple_strtol(value
, NULL
, 0);
5830 if (dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
5831 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__
));
5833 iovlen
= bcm_mkiovar("lpc", (char *)&var_int
, 4, buf
, sizeof(buf
));
5834 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, iovlen
, TRUE
, 0)) < 0) {
5835 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
5837 if (dhd_wl_ioctl_cmd(dhd
, WLC_UP
, NULL
, 0, TRUE
, 0) < 0) {
5838 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__
));
5841 } else if (!strcmp(name
, "vht_features")) {
5845 var_int
= (int)simple_strtol(value
, NULL
, 0);
5847 if (dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
5848 DHD_ERROR(("%s: wl down failed\n", __FUNCTION__
));
5850 iovlen
= bcm_mkiovar("vht_features", (char *)&var_int
, 4, buf
, sizeof(buf
));
5851 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, iovlen
, TRUE
, 0)) < 0) {
5852 DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__
, ret
));
5854 if (dhd_wl_ioctl_cmd(dhd
, WLC_UP
, NULL
, 0, TRUE
, 0) < 0) {
5855 DHD_ERROR(("%s: wl up failed\n", __FUNCTION__
));
5860 char iovbuf
[WLC_IOCTL_SMLEN
];
5862 /* wlu_iovar_setint */
5863 var_int
= (int)simple_strtol(value
, NULL
, 0);
5865 /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
5866 if (!strcmp(name
, "roam_off")) {
5867 /* Setup timeout if Beacons are lost to report link down */
5869 uint bcn_timeout
= 2;
5870 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout
, 4,
5871 iovbuf
, sizeof(iovbuf
));
5872 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
5875 /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
5877 DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__
, name
, var_int
));
5879 iovlen
= bcm_mkiovar(name
, (char *)&var_int
, sizeof(var_int
),
5880 iovbuf
, sizeof(iovbuf
));
5881 return dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
5882 iovbuf
, iovlen
, TRUE
, 0);
5888 static int dhd_preinit_config(dhd_pub_t
*dhd
, int ifidx
)
5890 mm_segment_t old_fs
;
5892 struct file
*fp
= NULL
;
5894 char *buf
= NULL
, *p
, *name
, *value
;
5898 config_path
= CONFIG_BCMDHD_CONFIG_PATH
;
5902 printk(KERN_ERR
"config_path can't read. \n");
5908 if ((ret
= vfs_stat(config_path
, &stat
))) {
5910 printk(KERN_ERR
"%s: Failed to get information (%d)\n",
5916 if (!(buf
= MALLOC(dhd
->osh
, stat
.size
+ 1))) {
5917 printk(KERN_ERR
"Failed to allocate memory %llu bytes\n", stat
.size
);
5921 printk("dhd_preinit_config : config path : %s \n", config_path
);
5923 if (!(fp
= dhd_os_open_image(config_path
)) ||
5924 (len
= dhd_os_get_image_block(buf
, stat
.size
, fp
)) < 0)
5927 buf
[stat
.size
] = '\0';
5928 for (p
= buf
; *p
; p
++) {
5931 for (name
= p
++; *p
&& !isspace(*p
); p
++) {
5935 for (value
= p
; *p
&& !isspace(*p
); p
++);
5937 if ((ret
= dhd_preinit_proc(dhd
, ifidx
, name
, value
)) < 0) {
5938 printk(KERN_ERR
"%s: %s=%s\n",
5939 bcmerrorstr(ret
), name
, value
);
5949 dhd_os_close_image(fp
);
5951 MFREE(dhd
->osh
, buf
, stat
.size
+1);
5958 #endif /* READ_CONFIG_FROM_FILE */
5961 dhd_preinit_ioctls(dhd_pub_t
*dhd
)
5964 char eventmask
[WL_EVENTING_MASK_LEN
];
5965 char iovbuf
[WL_EVENTING_MASK_LEN
+ 12]; /* Room for "event_msgs" + '\0' + bitvec */
5966 uint32 buf_key_b4_m4
= 1;
5971 eventmsgs_ext_t
*eventmask_msg
= NULL
;
5972 char* iov_buf
= NULL
;
5975 aibss_bcn_force_config_t bcn_config
;
5979 #endif /* WLAIBSS_PS */
5980 #endif /* WLAIBSS */
5981 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
5984 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
5985 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
5986 uint32 ampdu_ba_wsize
= 0;
5987 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
5988 #if defined(CUSTOM_AMPDU_MPDU)
5989 int32 ampdu_mpdu
= 0;
5991 #if defined(CUSTOM_AMPDU_RELEASE)
5992 int32 ampdu_release
= 0;
5994 #if defined(CUSTOM_AMSDU_AGGSF)
5995 int32 amsdu_aggsf
= 0;
5998 #if defined(BCMSDIO)
5999 #ifdef PROP_TXSTATUS
6000 int wlfc_enable
= TRUE
;
6002 uint32 hostreorder
= 1;
6004 #endif /* DISABLE_11N */
6005 #endif /* PROP_TXSTATUS */
6007 #ifdef PCIE_FULL_DONGLE
6008 uint32 wl_ap_isolate
;
6009 #endif /* PCIE_FULL_DONGLE */
6011 #ifdef DHD_ENABLE_LPC
6013 #endif /* DHD_ENABLE_LPC */
6014 uint power_mode
= PM_FAST
;
6015 uint32 dongle_align
= DHD_SDALIGN
;
6016 #if defined(BCMSDIO)
6017 uint32 glom
= CUSTOM_GLOM_SETTING
;
6018 #endif /* defined(BCMSDIO) */
6019 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6022 uint bcn_timeout
= dhd
->conf
->bcn_timeout
;
6024 #if defined(ARP_OFFLOAD_SUPPORT)
6027 int scan_assoc_time
= DHD_SCAN_ASSOC_ACTIVE_TIME
;
6028 int scan_unassoc_time
= DHD_SCAN_UNASSOC_ACTIVE_TIME
;
6029 int scan_passive_time
= DHD_SCAN_PASSIVE_TIME
;
6030 char buf
[WLC_IOCTL_SMLEN
];
6032 uint32 listen_interval
= CUSTOM_LISTEN_INTERVAL
; /* Default Listen Interval in Beacons */
6035 int roam_trigger
[2] = {CUSTOM_ROAM_TRIGGER_SETTING
, WLC_BAND_ALL
};
6036 int roam_scan_period
[2] = {10, WLC_BAND_ALL
};
6037 int roam_delta
[2] = {CUSTOM_ROAM_DELTA_SETTING
, WLC_BAND_ALL
};
6038 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
6039 int roam_fullscan_period
= 60;
6040 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6041 int roam_fullscan_period
= 120;
6042 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
6044 #ifdef DISABLE_BUILTIN_ROAM
6046 #endif /* DISABLE_BUILTIN_ROAM */
6047 #endif /* ROAM_ENABLE */
6052 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
6053 uint32 mpc
= 0; /* Turn MPC off for AP/APSTA mode */
6054 struct ether_addr p2p_ea
;
6059 #ifdef SOFTAP_UAPSD_OFF
6060 uint32 wme_apsd
= 0;
6061 #endif /* SOFTAP_UAPSD_OFF */
6062 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
6063 uint32 apsta
= 1; /* Enable APSTA mode */
6064 #elif defined(SOFTAP_AND_GC)
6067 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
6068 #ifdef GET_CUSTOM_MAC_ENABLE
6069 struct ether_addr ea_addr
;
6070 #endif /* GET_CUSTOM_MAC_ENABLE */
6074 #endif /* DISABLE_11N */
6076 #if defined(DISABLE_11AC)
6078 #endif /* DISABLE_11AC */
6081 #endif /* USE_WL_TXBF */
6082 #ifdef AMPDU_VO_ENABLE
6083 struct ampdu_tid_control tid
;
6085 #ifdef USE_WL_FRAMEBURST
6086 uint32 frameburst
= 1;
6087 #endif /* USE_WL_FRAMEBURST */
6088 #ifdef DHD_SET_FW_HIGHSPEED
6089 uint32 ack_ratio
= 250;
6090 uint32 ack_ratio_depth
= 64;
6091 #endif /* DHD_SET_FW_HIGHSPEED */
6092 #ifdef SUPPORT_2G_VHT
6093 uint32 vht_features
= 0x3; /* 2G enable | rates all */
6094 #endif /* SUPPORT_2G_VHT */
6095 #ifdef CUSTOM_PSPRETEND_THR
6096 uint32 pspretend_thr
= CUSTOM_PSPRETEND_THR
;
6098 #ifdef PKT_FILTER_SUPPORT
6099 dhd_pkt_filter_enable
= TRUE
;
6100 #endif /* PKT_FILTER_SUPPORT */
6102 dhd
->tdls_enable
= FALSE
;
6104 dhd
->suspend_bcn_li_dtim
= CUSTOM_SUSPEND_BCN_LI_DTIM
;
6105 DHD_TRACE(("Enter %s\n", __FUNCTION__
));
6107 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_BAND", WLC_SET_BAND
, dhd
->conf
->band
, 0, FALSE
);
6108 #ifdef DHDTCPACK_SUPPRESS
6109 printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__
, dhd
->conf
->tcpack_sup_mode
);
6110 dhd_tcpack_suppress_set(dhd
, dhd
->conf
->tcpack_sup_mode
);
6114 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
6115 (op_mode
== DHD_FLAG_MFG_MODE
)) {
6116 /* Check and adjust IOCTL response timeout for Manufactring firmware */
6117 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT
);
6118 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
6122 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT
);
6123 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__
));
6125 #ifdef GET_CUSTOM_MAC_ENABLE
6126 ret
= wifi_platform_get_mac_addr(dhd
->info
->adapter
, ea_addr
.octet
);
6128 memset(buf
, 0, sizeof(buf
));
6129 bcm_mkiovar("cur_etheraddr", (void *)&ea_addr
, ETHER_ADDR_LEN
, buf
, sizeof(buf
));
6130 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
6132 DHD_ERROR(("%s: can't set MAC address MAC="MACDBG
", error=%d\n",
6133 __FUNCTION__
, MAC2STRDBG(ea_addr
.octet
), ret
));
6137 memcpy(dhd
->mac
.octet
, ea_addr
.octet
, ETHER_ADDR_LEN
);
6139 #endif /* GET_CUSTOM_MAC_ENABLE */
6140 /* Get the default device MAC address directly from firmware */
6141 memset(buf
, 0, sizeof(buf
));
6142 bcm_mkiovar("cur_etheraddr", 0, 0, buf
, sizeof(buf
));
6143 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
),
6145 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__
, ret
));
6149 /* Update public MAC address after reading from Firmware */
6150 memcpy(dhd
->mac
.octet
, buf
, ETHER_ADDR_LEN
);
6152 #ifdef GET_CUSTOM_MAC_ENABLE
6154 #endif /* GET_CUSTOM_MAC_ENABLE */
6156 /* get a capabilities from firmware */
6157 memset(dhd
->fw_capabilities
, 0, sizeof(dhd
->fw_capabilities
));
6158 bcm_mkiovar("cap", 0, 0, dhd
->fw_capabilities
, sizeof(dhd
->fw_capabilities
));
6159 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, dhd
->fw_capabilities
,
6160 sizeof(dhd
->fw_capabilities
), FALSE
, 0)) < 0) {
6161 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
6162 __FUNCTION__
, ret
));
6165 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_HOSTAP_MODE
) ||
6166 (op_mode
== DHD_FLAG_HOSTAP_MODE
)) {
6167 #ifdef SET_RANDOM_MAC_SOFTAP
6170 dhd
->op_mode
= DHD_FLAG_HOSTAP_MODE
;
6171 #if defined(ARP_OFFLOAD_SUPPORT)
6174 #ifdef PKT_FILTER_SUPPORT
6175 dhd_pkt_filter_enable
= FALSE
;
6177 #ifdef SET_RANDOM_MAC_SOFTAP
6178 SRANDOM32((uint
)jiffies
);
6179 rand_mac
= RANDOM32();
6180 iovbuf
[0] = 0x02; /* locally administered bit */
6183 iovbuf
[3] = (unsigned char)(rand_mac
& 0x0F) | 0xF0;
6184 iovbuf
[4] = (unsigned char)(rand_mac
>> 8);
6185 iovbuf
[5] = (unsigned char)(rand_mac
>> 16);
6187 bcm_mkiovar("cur_etheraddr", (void *)iovbuf
, ETHER_ADDR_LEN
, buf
, sizeof(buf
));
6188 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, buf
, sizeof(buf
), TRUE
, 0);
6190 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__
, ret
));
6192 memcpy(dhd
->mac
.octet
, iovbuf
, ETHER_ADDR_LEN
);
6193 #endif /* SET_RANDOM_MAC_SOFTAP */
6194 #if !defined(AP) && defined(WL_CFG80211)
6195 /* Turn off MPC in AP mode */
6196 bcm_mkiovar("mpc", (char *)&mpc
, 4, iovbuf
, sizeof(iovbuf
));
6197 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6198 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6199 DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__
, ret
));
6202 #ifdef SUPPORT_AP_POWERSAVE
6203 dhd_set_ap_powersave(dhd
, 0, TRUE
);
6205 #ifdef SOFTAP_UAPSD_OFF
6206 bcm_mkiovar("wme_apsd", (char *)&wme_apsd
, 4, iovbuf
, sizeof(iovbuf
));
6207 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6208 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__
, ret
));
6209 #endif /* SOFTAP_UAPSD_OFF */
6210 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_MFG_MODE
) ||
6211 (op_mode
== DHD_FLAG_MFG_MODE
)) {
6212 #if defined(ARP_OFFLOAD_SUPPORT)
6214 #endif /* ARP_OFFLOAD_SUPPORT */
6215 #ifdef PKT_FILTER_SUPPORT
6216 dhd_pkt_filter_enable
= FALSE
;
6217 #endif /* PKT_FILTER_SUPPORT */
6218 dhd
->op_mode
= DHD_FLAG_MFG_MODE
;
6220 uint32 concurrent_mode
= 0;
6221 if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_P2P_MODE
) ||
6222 (op_mode
== DHD_FLAG_P2P_MODE
)) {
6223 #if defined(ARP_OFFLOAD_SUPPORT)
6226 #ifdef PKT_FILTER_SUPPORT
6227 dhd_pkt_filter_enable
= FALSE
;
6229 dhd
->op_mode
= DHD_FLAG_P2P_MODE
;
6230 } else if ((!op_mode
&& dhd_get_fw_mode(dhd
->info
) == DHD_FLAG_IBSS_MODE
) ||
6231 (op_mode
== DHD_FLAG_IBSS_MODE
)) {
6232 dhd
->op_mode
= DHD_FLAG_IBSS_MODE
;
6234 dhd
->op_mode
= DHD_FLAG_STA_MODE
;
6235 #if !defined(AP) && defined(WLP2P)
6236 if (dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
&&
6237 (concurrent_mode
= dhd_get_concurrent_capabilites(dhd
))) {
6238 #if defined(ARP_OFFLOAD_SUPPORT)
6241 dhd
->op_mode
|= concurrent_mode
;
6244 /* Check if we are enabling p2p */
6245 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
6246 bcm_mkiovar("apsta", (char *)&apsta
, 4, iovbuf
, sizeof(iovbuf
));
6247 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
6248 iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
6249 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__
, ret
));
6252 #if defined(SOFTAP_AND_GC)
6253 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_AP
,
6254 (char *)&ap_mode
, sizeof(ap_mode
), TRUE
, 0)) < 0) {
6255 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__
, ret
));
6258 memcpy(&p2p_ea
, &dhd
->mac
, ETHER_ADDR_LEN
);
6259 ETHER_SET_LOCALADDR(&p2p_ea
);
6260 bcm_mkiovar("p2p_da_override", (char *)&p2p_ea
,
6261 ETHER_ADDR_LEN
, iovbuf
, sizeof(iovbuf
));
6262 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
6263 iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
6264 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__
, ret
));
6266 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
6270 (void)concurrent_mode
;
6274 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG
"\n",
6275 dhd
->op_mode
, MAC2STRDBG(dhd
->mac
.octet
)));
6276 /* Set Country code */
6277 if (dhd
->dhd_cspec
.ccode
[0] != 0) {
6278 printf("Set country %s, revision %d\n", dhd
->dhd_cspec
.ccode
, dhd
->dhd_cspec
.rev
);
6279 bcm_mkiovar("country", (char *)&dhd
->dhd_cspec
,
6280 sizeof(wl_country_t
), iovbuf
, sizeof(iovbuf
));
6281 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6282 printf("%s: country code setting failed %d\n", __FUNCTION__
, ret
);
6284 dhd_conf_set_country(dhd
);
6285 dhd_conf_fix_country(dhd
);
6287 dhd_conf_get_country(dhd
, &dhd
->dhd_cspec
);
6289 #if defined(DISABLE_11AC)
6290 bcm_mkiovar("vhtmode", (char *)&vhtmode
, 4, iovbuf
, sizeof(iovbuf
));
6291 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6292 DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__
, ret
));
6293 #endif /* DISABLE_11AC */
6294 dhd_conf_set_fw_string_cmd(dhd
, "vhtmode", dhd
->conf
->vhtmode
, 0, TRUE
);
6296 /* Set Listen Interval */
6297 bcm_mkiovar("assoc_listen", (char *)&listen_interval
, 4, iovbuf
, sizeof(iovbuf
));
6298 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6299 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__
, ret
));
6301 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
6302 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
6303 bcm_mkiovar("roam_off", (char *)&roamvar
, 4, iovbuf
, sizeof(iovbuf
));
6304 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6305 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
6306 #if defined(ROAM_ENABLE)
6307 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_TRIGGER
, roam_trigger
,
6308 sizeof(roam_trigger
), TRUE
, 0)) < 0)
6309 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__
, ret
));
6310 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_SCAN_PERIOD
, roam_scan_period
,
6311 sizeof(roam_scan_period
), TRUE
, 0)) < 0)
6312 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__
, ret
));
6313 if ((dhd_wl_ioctl_cmd(dhd
, WLC_SET_ROAM_DELTA
, roam_delta
,
6314 sizeof(roam_delta
), TRUE
, 0)) < 0)
6315 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__
, ret
));
6316 bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period
, 4, iovbuf
, sizeof(iovbuf
));
6317 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6318 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__
, ret
));
6319 #endif /* ROAM_ENABLE */
6320 dhd_conf_set_roam(dhd
);
6323 bcm_mkiovar("ccx_enable", (char *)&ccx
, 4, iovbuf
, sizeof(iovbuf
));
6324 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6327 /* by default TDLS on and auto mode off */
6328 _dhd_tdls_enable(dhd
, true, false, NULL
);
6331 #ifdef DHD_ENABLE_LPC
6333 bcm_mkiovar("lpc", (char *)&lpc
, 4, iovbuf
, sizeof(iovbuf
));
6334 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6335 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6336 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__
, ret
));
6338 #endif /* DHD_ENABLE_LPC */
6339 dhd_conf_set_fw_string_cmd(dhd
, "lpc", dhd
->conf
->lpc
, 0, FALSE
);
6341 /* Set PowerSave mode */
6342 if (dhd
->conf
->pm
>= 0)
6343 power_mode
= dhd
->conf
->pm
;
6344 dhd_wl_ioctl_cmd(dhd
, WLC_SET_PM
, (char *)&power_mode
, sizeof(power_mode
), TRUE
, 0);
6346 /* Match Host and Dongle rx alignment */
6347 bcm_mkiovar("bus:txglomalign", (char *)&dongle_align
, 4, iovbuf
, sizeof(iovbuf
));
6348 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6350 #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
6351 /* enable credall to reduce the chance of no bus credit happened. */
6352 bcm_mkiovar("bus:credall", (char *)&credall
, 4, iovbuf
, sizeof(iovbuf
));
6353 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6356 #if defined(BCMSDIO)
6357 if (glom
!= DEFAULT_GLOM_VALUE
) {
6358 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__
, glom
));
6359 bcm_mkiovar("bus:txglom", (char *)&glom
, 4, iovbuf
, sizeof(iovbuf
));
6360 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6362 #endif /* defined(BCMSDIO) */
6364 /* Setup timeout if Beacons are lost and roam is off to report link down */
6365 bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout
, 4, iovbuf
, sizeof(iovbuf
));
6366 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6367 /* Setup assoc_retry_max count to reconnect target AP in dongle */
6368 bcm_mkiovar("assoc_retry_max", (char *)&retry_max
, 4, iovbuf
, sizeof(iovbuf
));
6369 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6370 #if defined(AP) && !defined(WLP2P)
6371 /* Turn off MPC in AP mode */
6372 bcm_mkiovar("mpc", (char *)&mpc
, 4, iovbuf
, sizeof(iovbuf
));
6373 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6374 bcm_mkiovar("apsta", (char *)&apsta
, 4, iovbuf
, sizeof(iovbuf
));
6375 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6376 #endif /* defined(AP) && !defined(WLP2P) */
6377 /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
6378 dhd_conf_set_fw_string_cmd(dhd
, "mimo_bw_cap", dhd
->conf
->mimo_bw_cap
, 1, TRUE
);
6379 dhd_conf_set_fw_string_cmd(dhd
, "force_wme_ac", dhd
->conf
->force_wme_ac
, 1, FALSE
);
6380 dhd_conf_set_fw_string_cmd(dhd
, "stbc_tx", dhd
->conf
->stbc
, 0, FALSE
);
6381 dhd_conf_set_fw_string_cmd(dhd
, "stbc_rx", dhd
->conf
->stbc
, 0, FALSE
);
6382 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_SRL", WLC_SET_SRL
, dhd
->conf
->srl
, 0, TRUE
);
6383 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_LRL", WLC_SET_LRL
, dhd
->conf
->lrl
, 0, FALSE
);
6384 dhd_conf_set_fw_int_cmd(dhd
, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT
, dhd
->conf
->spect
, 0, FALSE
);
6385 dhd_conf_set_fw_string_cmd(dhd
, "rsdb_mode", dhd
->conf
->rsdb_mode
, -1, TRUE
);
6388 if (ap_fw_loaded
== TRUE
) {
6389 dhd_wl_ioctl_cmd(dhd
, WLC_SET_DTIMPRD
, (char *)&dtim
, sizeof(dtim
), TRUE
, 0);
6393 #if defined(KEEP_ALIVE)
6395 /* Set Keep Alive : be sure to use FW with -keepalive */
6399 if (ap_fw_loaded
== FALSE
)
6401 if (!(dhd
->op_mode
&
6402 (DHD_FLAG_HOSTAP_MODE
| DHD_FLAG_MFG_MODE
))) {
6403 if ((res
= dhd_keep_alive_onoff(dhd
)) < 0)
6404 DHD_ERROR(("%s set keeplive failed %d\n",
6405 __FUNCTION__
, res
));
6408 #endif /* defined(KEEP_ALIVE) */
6411 bcm_mkiovar("txbf", (char *)&txbf
, 4, iovbuf
, sizeof(iovbuf
));
6412 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6413 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6414 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__
, ret
));
6416 #endif /* USE_WL_TXBF */
6417 dhd_conf_set_fw_string_cmd(dhd
, "txbf", dhd
->conf
->txbf
, 0, FALSE
);
6418 #ifdef USE_WL_FRAMEBURST
6419 /* Set frameburst to value */
6420 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_FAKEFRAG
, (char *)&frameburst
,
6421 sizeof(frameburst
), TRUE
, 0)) < 0) {
6422 DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__
, ret
));
6424 #endif /* USE_WL_FRAMEBURST */
6425 dhd_conf_set_fw_string_cmd(dhd
, "frameburst", dhd
->conf
->frameburst
, 0, FALSE
);
6426 #ifdef DHD_SET_FW_HIGHSPEED
6428 bcm_mkiovar("ack_ratio", (char *)&ack_ratio
, 4, iovbuf
, sizeof(iovbuf
));
6429 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6430 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6431 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__
, ret
));
6434 /* Set ack_ratio_depth */
6435 bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth
, 4, iovbuf
, sizeof(iovbuf
));
6436 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6437 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6438 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__
, ret
));
6440 #endif /* DHD_SET_FW_HIGHSPEED */
6441 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
6442 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
6443 /* Set ampdu ba wsize to 64 or 16 */
6444 #ifdef CUSTOM_AMPDU_BA_WSIZE
6445 ampdu_ba_wsize
= CUSTOM_AMPDU_BA_WSIZE
;
6447 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
6448 if (dhd
->op_mode
== DHD_FLAG_IBSS_MODE
)
6449 ampdu_ba_wsize
= CUSTOM_IBSS_AMPDU_BA_WSIZE
;
6450 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
6451 if (ampdu_ba_wsize
!= 0) {
6452 bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize
, 4, iovbuf
, sizeof(iovbuf
));
6453 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6454 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6455 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
6456 __FUNCTION__
, ampdu_ba_wsize
, ret
));
6459 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
6460 dhd_conf_set_fw_string_cmd(dhd
, "ampdu_ba_wsize", dhd
->conf
->ampdu_ba_wsize
, 1, FALSE
);
6462 iov_buf
= (char*)kmalloc(WLC_IOCTL_SMLEN
, GFP_KERNEL
);
6463 if (iov_buf
== NULL
) {
6464 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN
));
6469 /* Configure custom IBSS beacon transmission */
6470 if (dhd
->op_mode
& DHD_FLAG_IBSS_MODE
)
6473 bcm_mkiovar("aibss", (char *)&aibss
, 4, iovbuf
, sizeof(iovbuf
));
6474 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6475 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6476 DHD_ERROR(("%s Set aibss to %d failed %d\n",
6477 __FUNCTION__
, aibss
, ret
));
6481 bcm_mkiovar("aibss_ps", (char *)&aibss_ps
, 4, iovbuf
, sizeof(iovbuf
));
6482 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6483 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6484 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
6485 __FUNCTION__
, aibss
, ret
));
6487 #endif /* WLAIBSS_PS */
6489 memset(&bcn_config
, 0, sizeof(bcn_config
));
6490 bcn_config
.initial_min_bcn_dur
= AIBSS_INITIAL_MIN_BCN_DUR
;
6491 bcn_config
.min_bcn_dur
= AIBSS_MIN_BCN_DUR
;
6492 bcn_config
.bcn_flood_dur
= AIBSS_BCN_FLOOD_DUR
;
6493 bcn_config
.version
= AIBSS_BCN_FORCE_CONFIG_VER_0
;
6494 bcn_config
.len
= sizeof(bcn_config
);
6496 bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config
,
6497 sizeof(aibss_bcn_force_config_t
), iov_buf
, WLC_IOCTL_SMLEN
);
6498 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iov_buf
,
6499 WLC_IOCTL_SMLEN
, TRUE
, 0)) < 0) {
6500 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
6501 __FUNCTION__
, AIBSS_INITIAL_MIN_BCN_DUR
, AIBSS_MIN_BCN_DUR
,
6502 AIBSS_BCN_FLOOD_DUR
, ret
));
6504 #endif /* WLAIBSS */
6506 #if defined(CUSTOM_AMPDU_MPDU)
6507 ampdu_mpdu
= CUSTOM_AMPDU_MPDU
;
6508 if (ampdu_mpdu
!= 0 && (ampdu_mpdu
<= ampdu_ba_wsize
)) {
6509 bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu
, 4, iovbuf
, sizeof(iovbuf
));
6510 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6511 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6512 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
6513 __FUNCTION__
, CUSTOM_AMPDU_MPDU
, ret
));
6516 #endif /* CUSTOM_AMPDU_MPDU */
6518 #if defined(CUSTOM_AMPDU_RELEASE)
6519 ampdu_release
= CUSTOM_AMPDU_RELEASE
;
6520 if (ampdu_release
!= 0 && (ampdu_release
<= ampdu_ba_wsize
)) {
6521 bcm_mkiovar("ampdu_release", (char *)&du_release
, 4, iovbuf
, sizeof(iovbuf
));
6522 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6523 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6524 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
6525 __FUNCTION__
, CUSTOM_AMPDU_RELEASE
, ret
));
6528 #endif /* CUSTOM_AMPDU_RELEASE */
6530 #if defined(CUSTOM_AMSDU_AGGSF)
6531 amsdu_aggsf
= CUSTOM_AMSDU_AGGSF
;
6532 if (amsdu_aggsf
!= 0) {
6533 bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf
, 4, iovbuf
, sizeof(iovbuf
));
6534 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6535 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6536 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
6537 __FUNCTION__
, CUSTOM_AMSDU_AGGSF
, ret
));
6540 #endif /* CUSTOM_AMSDU_AGGSF */
6542 #if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
6543 /* Read 4-way handshake requirements */
6544 if (dhd_use_idsup
== 1) {
6545 bcm_mkiovar("sup_wpa", (char *)&sup_wpa
, 4, iovbuf
, sizeof(iovbuf
));
6546 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0);
6547 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
6548 * in-dongle supplicant.
6550 if (ret
>= 0 || ret
== BCME_NOTREADY
)
6551 dhd
->fw_4way_handshake
= TRUE
;
6552 DHD_TRACE(("4-way handshake mode is: %d\n", dhd
->fw_4way_handshake
));
6554 #endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
6555 #ifdef SUPPORT_2G_VHT
6556 bcm_mkiovar("vht_features", (char *)&vht_features
, 4, iovbuf
, sizeof(iovbuf
));
6557 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
6558 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__
, ret
));
6560 #endif /* SUPPORT_2G_VHT */
6561 #ifdef CUSTOM_PSPRETEND_THR
6562 /* Turn off MPC in AP mode */
6563 bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr
, 4,
6564 iovbuf
, sizeof(iovbuf
));
6565 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6566 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6567 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
6568 __FUNCTION__
, ret
));
6572 bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4
, 4, iovbuf
, sizeof(iovbuf
));
6573 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
,
6574 sizeof(iovbuf
), TRUE
, 0)) < 0) {
6575 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__
, ret
));
6578 /* Read event_msgs mask */
6579 bcm_mkiovar("event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
, sizeof(iovbuf
));
6580 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iovbuf
, sizeof(iovbuf
), FALSE
, 0)) < 0) {
6581 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__
, ret
));
6584 bcopy(iovbuf
, eventmask
, WL_EVENTING_MASK_LEN
);
6586 /* Setup event_msgs */
6587 setbit(eventmask
, WLC_E_SET_SSID
);
6588 setbit(eventmask
, WLC_E_PRUNE
);
6589 setbit(eventmask
, WLC_E_AUTH
);
6590 setbit(eventmask
, WLC_E_AUTH_IND
);
6591 setbit(eventmask
, WLC_E_ASSOC
);
6592 setbit(eventmask
, WLC_E_REASSOC
);
6593 setbit(eventmask
, WLC_E_REASSOC_IND
);
6594 setbit(eventmask
, WLC_E_DEAUTH
);
6595 setbit(eventmask
, WLC_E_DEAUTH_IND
);
6596 setbit(eventmask
, WLC_E_DISASSOC_IND
);
6597 setbit(eventmask
, WLC_E_DISASSOC
);
6598 setbit(eventmask
, WLC_E_JOIN
);
6599 setbit(eventmask
, WLC_E_START
);
6600 setbit(eventmask
, WLC_E_ASSOC_IND
);
6601 setbit(eventmask
, WLC_E_PSK_SUP
);
6602 setbit(eventmask
, WLC_E_LINK
);
6603 setbit(eventmask
, WLC_E_NDIS_LINK
);
6604 setbit(eventmask
, WLC_E_MIC_ERROR
);
6605 setbit(eventmask
, WLC_E_ASSOC_REQ_IE
);
6606 setbit(eventmask
, WLC_E_ASSOC_RESP_IE
);
6608 setbit(eventmask
, WLC_E_PMKID_CACHE
);
6609 setbit(eventmask
, WLC_E_TXFAIL
);
6611 setbit(eventmask
, WLC_E_JOIN_START
);
6612 // setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
6614 setbit(eventmask
, WLC_E_HTSFSYNC
);
6615 #endif /* WLMEDIA_HTSF */
6617 setbit(eventmask
, WLC_E_PFN_NET_FOUND
);
6618 setbit(eventmask
, WLC_E_PFN_BEST_BATCHING
);
6619 setbit(eventmask
, WLC_E_PFN_BSSID_NET_FOUND
);
6620 setbit(eventmask
, WLC_E_PFN_BSSID_NET_LOST
);
6621 #endif /* PNO_SUPPORT */
6622 /* enable dongle roaming event */
6623 setbit(eventmask
, WLC_E_ROAM
);
6624 setbit(eventmask
, WLC_E_BSSID
);
6626 setbit(eventmask
, WLC_E_ADDTS_IND
);
6627 setbit(eventmask
, WLC_E_DELTS_IND
);
6630 setbit(eventmask
, WLC_E_TDLS_PEER_EVENT
);
6633 setbit(eventmask
, WLC_E_ESCAN_RESULT
);
6634 if (dhd
->op_mode
& DHD_FLAG_P2P_MODE
) {
6635 setbit(eventmask
, WLC_E_ACTION_FRAME_RX
);
6636 setbit(eventmask
, WLC_E_P2P_DISC_LISTEN_COMPLETE
);
6638 #endif /* WL_CFG80211 */
6640 setbit(eventmask
, WLC_E_AIBSS_TXFAIL
);
6641 #endif /* WLAIBSS */
6642 #ifdef CUSTOMER_HW10
6643 clrbit(eventmask
, WLC_E_TRACE
);
6645 setbit(eventmask
, WLC_E_TRACE
);
6647 /* Write updated Event mask */
6648 bcm_mkiovar("event_msgs", eventmask
, WL_EVENTING_MASK_LEN
, iovbuf
, sizeof(iovbuf
));
6649 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
6650 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__
, ret
));
6654 /* make up event mask ext message iovar for event larger than 128 */
6655 msglen
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
+ EVENTMSGS_EXT_STRUCT_SIZE
;
6656 eventmask_msg
= (eventmsgs_ext_t
*)kmalloc(msglen
, GFP_KERNEL
);
6657 if (eventmask_msg
== NULL
) {
6658 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen
));
6662 bzero(eventmask_msg
, msglen
);
6663 eventmask_msg
->ver
= EVENTMSGS_VER
;
6664 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
6666 /* Read event_msgs_ext mask */
6667 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg
, msglen
, iov_buf
, WLC_IOCTL_SMLEN
);
6668 ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, iov_buf
, WLC_IOCTL_SMLEN
, FALSE
, 0);
6669 if (ret2
!= BCME_UNSUPPORTED
)
6671 if (ret2
== 0) { /* event_msgs_ext must be supported */
6672 bcopy(iov_buf
, eventmask_msg
, msglen
);
6674 #ifdef BT_WIFI_HANDOVER
6675 setbit(eventmask_msg
->mask
, WLC_E_BT_WIFI_HANDOVER_REQ
);
6676 #endif /* BT_WIFI_HANDOVER */
6678 /* Write updated Event mask */
6679 eventmask_msg
->ver
= EVENTMSGS_VER
;
6680 eventmask_msg
->command
= EVENTMSGS_SET_MASK
;
6681 eventmask_msg
->len
= ROUNDUP(WLC_E_LAST
, NBBY
)/NBBY
;
6682 bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg
,
6683 msglen
, iov_buf
, WLC_IOCTL_SMLEN
);
6684 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
,
6685 iov_buf
, WLC_IOCTL_SMLEN
, TRUE
, 0)) < 0) {
6686 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__
, ret
));
6689 } else if (ret2
< 0 && ret2
!= BCME_UNSUPPORTED
) {
6690 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__
, ret2
));
6692 } /* unsupported is ok */
6694 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_CHANNEL_TIME
, (char *)&scan_assoc_time
,
6695 sizeof(scan_assoc_time
), TRUE
, 0);
6696 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_UNASSOC_TIME
, (char *)&scan_unassoc_time
,
6697 sizeof(scan_unassoc_time
), TRUE
, 0);
6698 dhd_wl_ioctl_cmd(dhd
, WLC_SET_SCAN_PASSIVE_TIME
, (char *)&scan_passive_time
,
6699 sizeof(scan_passive_time
), TRUE
, 0);
6701 #ifdef ARP_OFFLOAD_SUPPORT
6702 /* Set and enable ARP offload feature for STA only */
6704 if (arpoe
&& !ap_fw_loaded
)
6709 dhd_arp_offload_enable(dhd
, TRUE
);
6710 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
6712 dhd_arp_offload_enable(dhd
, FALSE
);
6713 dhd_arp_offload_set(dhd
, 0);
6715 dhd_arp_enable
= arpoe
;
6716 #endif /* ARP_OFFLOAD_SUPPORT */
6718 #ifdef PKT_FILTER_SUPPORT
6719 /* Setup default defintions for pktfilter , enable in suspend */
6720 dhd
->pktfilter_count
= 6;
6721 /* Setup filter to allow only unicast */
6722 if (dhd_master_mode
) {
6723 dhd
->pktfilter
[DHD_UNICAST_FILTER_NUM
] = "100 0 0 0 0x01 0x00";
6724 dhd
->pktfilter
[DHD_BROADCAST_FILTER_NUM
] = NULL
;
6725 dhd
->pktfilter
[DHD_MULTICAST4_FILTER_NUM
] = NULL
;
6726 dhd
->pktfilter
[DHD_MULTICAST6_FILTER_NUM
] = NULL
;
6727 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
6728 dhd
->pktfilter
[DHD_MDNS_FILTER_NUM
] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
6729 /* apply APP pktfilter */
6730 dhd
->pktfilter
[DHD_ARP_FILTER_NUM
] = "105 0 0 12 0xFFFF 0x0806";
6732 dhd_conf_discard_pkt_filter(dhd
);
6733 dhd_conf_add_pkt_filter(dhd
);
6737 dhd_enable_packet_filter(0, dhd
);
6739 #endif /* defined(SOFTAP) */
6740 dhd_set_packet_filter(dhd
);
6741 #endif /* PKT_FILTER_SUPPORT */
6743 bcm_mkiovar("nmode", (char *)&nmode
, 4, iovbuf
, sizeof(iovbuf
));
6744 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6745 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__
, ret
));
6746 #endif /* DISABLE_11N */
6748 #ifdef AMPDU_VO_ENABLE
6749 tid
.tid
= PRIO_8021D_VO
; /* Enable TID(6) for voice */
6751 bcm_mkiovar("ampdu_tid", (char *)&tid
, sizeof(tid
), iovbuf
, sizeof(iovbuf
));
6752 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6754 tid
.tid
= PRIO_8021D_NC
; /* Enable TID(7) for voice */
6756 bcm_mkiovar("ampdu_tid", (char *)&tid
, sizeof(tid
), iovbuf
, sizeof(iovbuf
));
6757 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6759 #if defined(SOFTAP_TPUT_ENHANCE)
6760 if (dhd
->op_mode
& DHD_FLAG_HOSTAP_MODE
) {
6761 dhd_bus_setidletime(dhd
, (int)100);
6762 #ifdef DHDTCPACK_SUPPRESS
6763 dhd
->tcpack_sup_enabled
= FALSE
;
6765 #if defined(DHD_TCP_WINSIZE_ADJUST)
6766 dhd_use_tcp_window_size_adjust
= TRUE
;
6769 memset(buf
, 0, sizeof(buf
));
6770 bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf
, sizeof(buf
));
6771 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, 0)) < 0) {
6773 bcm_mkiovar("bus:txglom", (char *)&glom
, 4, iovbuf
, sizeof(iovbuf
));
6774 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6779 bcm_mkiovar("bus:txglom_auto_control", (char *)&glom
, 4, iovbuf
,
6781 dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
6785 #endif /* SOFTAP_TPUT_ENHANCE */
6787 /* query for 'ver' to get version info from firmware */
6788 memset(buf
, 0, sizeof(buf
));
6790 bcm_mkiovar("ver", (char *)&buf
, 4, buf
, sizeof(buf
));
6791 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, 0)) < 0)
6792 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
6794 bcmstrtok(&ptr
, "\n", 0);
6795 /* Print fw version info */
6796 DHD_ERROR(("Firmware version = %s\n", buf
));
6797 dhd_set_version_info(dhd
, buf
);
6800 #if defined(BCMSDIO)
6801 dhd_txglom_enable(dhd
, dhd
->conf
->bus_rxglom
);
6802 // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
6803 dhd_conf_set_fw_string_cmd(dhd
, "bus:txglom", dhd
->conf
->bus_txglom
, 1, FALSE
);
6804 #endif /* defined(BCMSDIO) */
6806 dhd_conf_set_disable_proptx(dhd
);
6807 #if defined(BCMSDIO)
6808 #ifdef PROP_TXSTATUS
6809 if (disable_proptx
||
6810 #ifdef PROP_TXSTATUS_VSDB
6811 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
6812 (dhd
->op_mode
!= DHD_FLAG_HOSTAP_MODE
&&
6813 dhd
->op_mode
!= DHD_FLAG_IBSS_MODE
) ||
6814 #endif /* PROP_TXSTATUS_VSDB */
6816 wlfc_enable
= FALSE
;
6820 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_DOWN
, (char *)&wl_down
, sizeof(wl_down
), TRUE
, 0);
6821 bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder
, 4, iovbuf
, sizeof(iovbuf
));
6822 if ((ret2
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0) {
6823 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__
, ret2
));
6824 if (ret2
!= BCME_UNSUPPORTED
)
6826 if (ret2
!= BCME_OK
)
6829 #endif /* DISABLE_11N */
6831 #ifdef READ_CONFIG_FROM_FILE
6832 dhd_preinit_config(dhd
, 0);
6833 #endif /* READ_CONFIG_FROM_FILE */
6838 else if (hostreorder
)
6839 dhd_wlfc_hostreorder_init(dhd
);
6840 #endif /* DISABLE_11N */
6842 #endif /* PROP_TXSTATUS */
6843 #endif /* BCMSDIO || BCMBUS */
6844 #ifdef PCIE_FULL_DONGLE
6845 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
6846 if (FW_SUPPORTED(dhd
, ap
)) {
6847 wl_ap_isolate
= AP_ISOLATE_SENDUP_ALL
;
6848 bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate
, 4, iovbuf
, sizeof(iovbuf
));
6849 if ((ret
= dhd_wl_ioctl_cmd(dhd
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0)) < 0)
6850 DHD_ERROR(("%s failed %d\n", __FUNCTION__
, ret
));
6852 #endif /* PCIE_FULL_DONGLE */
6854 if (!dhd
->pno_state
) {
6859 dhd_interworking_enable(dhd
);
6862 dhd_wl_ioctl_cmd(dhd
, WLC_UP
, (char *)&up
, sizeof(up
), TRUE
, 0);
6868 kfree(eventmask_msg
);
6877 dhd_iovar(dhd_pub_t
*pub
, int ifidx
, char *name
, char *cmd_buf
, uint cmd_len
, int set
)
6879 char buf
[strlen(name
) + 1 + cmd_len
];
6880 int len
= sizeof(buf
);
6884 len
= bcm_mkiovar(name
, cmd_buf
, cmd_len
, buf
, len
);
6886 memset(&ioc
, 0, sizeof(ioc
));
6888 ioc
.cmd
= set
? WLC_SET_VAR
: WLC_GET_VAR
;
6893 ret
= dhd_wl_ioctl(pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
);
6894 if (!set
&& ret
>= 0)
6895 memcpy(cmd_buf
, buf
, cmd_len
);
6900 int dhd_change_mtu(dhd_pub_t
*dhdp
, int new_mtu
, int ifidx
)
6902 struct dhd_info
*dhd
= dhdp
->info
;
6903 struct net_device
*dev
= NULL
;
6905 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
6906 dev
= dhd
->iflist
[ifidx
]->net
;
6909 if (netif_running(dev
)) {
6910 DHD_ERROR(("%s: Must be down to change its MTU\n", dev
->name
));
6911 return BCME_NOTDOWN
;
6914 #define DHD_MIN_MTU 1500
6915 #define DHD_MAX_MTU 1752
6917 if ((new_mtu
< DHD_MIN_MTU
) || (new_mtu
> DHD_MAX_MTU
)) {
6918 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__
, new_mtu
));
6926 #ifdef ARP_OFFLOAD_SUPPORT
6927 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
6929 aoe_update_host_ipv4_table(dhd_pub_t
*dhd_pub
, u32 ipa
, bool add
, int idx
)
6931 u32 ipv4_buf
[MAX_IPV4_ENTRIES
]; /* temp save for AOE host_ip table */
6935 bzero(ipv4_buf
, sizeof(ipv4_buf
));
6937 /* display what we've got */
6938 ret
= dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
6939 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__
));
6941 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
6943 /* now we saved hoste_ip table, clr it in the dongle AOE */
6944 dhd_aoe_hostip_clr(dhd_pub
, idx
);
6947 DHD_ERROR(("%s failed\n", __FUNCTION__
));
6951 for (i
= 0; i
< MAX_IPV4_ENTRIES
; i
++) {
6952 if (add
&& (ipv4_buf
[i
] == 0)) {
6954 add
= FALSE
; /* added ipa to local table */
6955 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
6957 } else if (ipv4_buf
[i
] == ipa
) {
6959 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
6960 __FUNCTION__
, ipa
, i
));
6963 if (ipv4_buf
[i
] != 0) {
6964 /* add back host_ip entries from our local cache */
6965 dhd_arp_offload_add_ip(dhd_pub
, ipv4_buf
[i
], idx
);
6966 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
6967 __FUNCTION__
, ipv4_buf
[i
], i
));
6971 /* see the resulting hostip table */
6972 dhd_arp_get_arp_hostip_table(dhd_pub
, ipv4_buf
, sizeof(ipv4_buf
), idx
);
6973 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__
));
6974 dhd_print_buf(ipv4_buf
, 32, 4); /* max 8 IPs 4b each */
6979 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
6980 * whenever there is an event related to an IP address.
6981 * ptr : kernel provided pointer to IP address that has changed
6983 static int dhd_inetaddr_notifier_call(struct notifier_block
*this,
6984 unsigned long event
,
6987 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
6993 if (!dhd_arp_enable
)
6995 if (!ifa
|| !(ifa
->ifa_dev
->dev
))
6998 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
6999 /* Filter notifications meant for non Broadcom devices */
7000 if ((ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_pri
) &&
7001 (ifa
->ifa_dev
->dev
->netdev_ops
!= &dhd_ops_virt
)) {
7002 #if defined(WL_ENABLE_P2P_IF)
7003 if (!wl_cfgp2p_is_ifops(ifa
->ifa_dev
->dev
->netdev_ops
))
7004 #endif /* WL_ENABLE_P2P_IF */
7007 #endif /* LINUX_VERSION_CODE */
7009 dhd
= DHD_DEV_INFO(ifa
->ifa_dev
->dev
);
7013 dhd_pub
= &dhd
->pub
;
7015 if (dhd_pub
->arp_version
== 1) {
7019 for (idx
= 0; idx
< DHD_MAX_IFS
; idx
++) {
7020 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
== ifa
->ifa_dev
->dev
)
7023 if (idx
< DHD_MAX_IFS
)
7024 DHD_TRACE(("ifidx : %p %s %d\n", dhd
->iflist
[idx
]->net
,
7025 dhd
->iflist
[idx
]->name
, dhd
->iflist
[idx
]->idx
));
7027 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa
->ifa_label
));
7034 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
7035 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
7037 if (dhd
->pub
.busstate
!= DHD_BUS_DATA
) {
7038 DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__
));
7039 if (dhd
->pend_ipaddr
) {
7040 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
7041 __FUNCTION__
, dhd
->pend_ipaddr
));
7043 dhd
->pend_ipaddr
= ifa
->ifa_address
;
7047 #ifdef AOE_IP_ALIAS_SUPPORT
7048 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
7050 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, TRUE
, idx
);
7051 #endif /* AOE_IP_ALIAS_SUPPORT */
7055 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
7056 __FUNCTION__
, ifa
->ifa_label
, ifa
->ifa_address
));
7057 dhd
->pend_ipaddr
= 0;
7058 #ifdef AOE_IP_ALIAS_SUPPORT
7059 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
7061 aoe_update_host_ipv4_table(dhd_pub
, ifa
->ifa_address
, FALSE
, idx
);
7063 dhd_aoe_hostip_clr(&dhd
->pub
, idx
);
7064 dhd_aoe_arp_clr(&dhd
->pub
, idx
);
7065 #endif /* AOE_IP_ALIAS_SUPPORT */
7069 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
7070 __func__
, ifa
->ifa_label
, event
));
7075 #endif /* ARP_OFFLOAD_SUPPORT */
7078 /* Neighbor Discovery Offload: defered handler */
7080 dhd_inet6_work_handler(void *dhd_info
, void *event_data
, u8 event
)
7082 struct ipv6_work_info_t
*ndo_work
= (struct ipv6_work_info_t
*)event_data
;
7083 dhd_pub_t
*pub
= &((dhd_info_t
*)dhd_info
)->pub
;
7086 if (event
!= DHD_WQ_WORK_IPV6_NDO
) {
7087 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__
));
7092 DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__
));
7097 DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__
));
7101 if (ndo_work
->if_idx
) {
7102 DHD_ERROR(("%s: idx %d \n", __FUNCTION__
, ndo_work
->if_idx
));
7106 switch (ndo_work
->event
) {
7108 DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__
));
7109 ret
= dhd_ndo_enable(pub
, TRUE
);
7111 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__
, ret
));
7114 ret
= dhd_ndo_add_ip(pub
, &ndo_work
->ipv6_addr
[0], ndo_work
->if_idx
);
7116 DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
7117 __FUNCTION__
, ret
));
7121 DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__
));
7122 ret
= dhd_ndo_remove_ip(pub
, ndo_work
->if_idx
);
7124 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
7125 __FUNCTION__
, ret
));
7129 ret
= dhd_ndo_enable(pub
, FALSE
);
7131 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__
, ret
));
7136 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__
));
7140 /* free ndo_work. alloced while scheduling the work */
7147 * Neighbor Discovery Offload: Called when an interface
7148 * is assigned with ipv6 address.
7149 * Handles only primary interface
7151 static int dhd_inet6addr_notifier_call(struct notifier_block
*this,
7152 unsigned long event
,
7157 struct inet6_ifaddr
*inet6_ifa
= ptr
;
7158 struct in6_addr
*ipv6_addr
= &inet6_ifa
->addr
;
7159 struct ipv6_work_info_t
*ndo_info
;
7160 int idx
= 0; /* REVISIT */
7162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
7163 /* Filter notifications meant for non Broadcom devices */
7164 if (inet6_ifa
->idev
->dev
->netdev_ops
!= &dhd_ops_pri
) {
7167 #endif /* LINUX_VERSION_CODE */
7169 dhd
= DHD_DEV_INFO(inet6_ifa
->idev
->dev
);
7173 if (dhd
->iflist
[idx
] && dhd
->iflist
[idx
]->net
!= inet6_ifa
->idev
->dev
)
7175 dhd_pub
= &dhd
->pub
;
7176 if (!FW_SUPPORTED(dhd_pub
, ndoe
))
7179 ndo_info
= (struct ipv6_work_info_t
*)kzalloc(sizeof(struct ipv6_work_info_t
), GFP_ATOMIC
);
7181 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__
));
7185 ndo_info
->event
= event
;
7186 ndo_info
->if_idx
= idx
;
7187 memcpy(&ndo_info
->ipv6_addr
[0], ipv6_addr
, IPV6_ADDR_LEN
);
7189 /* defer the work to thread as it may block kernel */
7190 dhd_deferred_schedule_work(dhd
->dhd_deferred_wq
, (void *)ndo_info
, DHD_WQ_WORK_IPV6_NDO
,
7191 dhd_inet6_work_handler
, DHD_WORK_PRIORITY_LOW
);
7194 #endif /* #ifdef CONFIG_IPV6 */
7197 dhd_register_if(dhd_pub_t
*dhdp
, int ifidx
, bool need_rtnl_lock
)
7199 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
7201 struct net_device
*net
= NULL
;
7203 uint8 temp_addr
[ETHER_ADDR_LEN
] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
7205 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__
, ifidx
));
7207 ASSERT(dhd
&& dhd
->iflist
[ifidx
]);
7208 ifp
= dhd
->iflist
[ifidx
];
7210 ASSERT(net
&& (ifp
->idx
== ifidx
));
7213 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7215 net
->get_stats
= dhd_get_stats
;
7216 net
->do_ioctl
= dhd_ioctl_entry
;
7217 net
->hard_start_xmit
= dhd_start_xmit
;
7218 net
->set_mac_address
= dhd_set_mac_address
;
7219 net
->set_multicast_list
= dhd_set_multicast_list
;
7220 net
->open
= net
->stop
= NULL
;
7222 ASSERT(!net
->netdev_ops
);
7223 net
->netdev_ops
= &dhd_ops_virt
;
7224 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7226 net
->netdev_ops
= &dhd_cfgp2p_ops_virt
;
7227 #endif /* P2PONEINT */
7229 /* Ok, link into the network layer... */
7232 * device functions for the primary interface only
7234 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
7235 net
->open
= dhd_open
;
7236 net
->stop
= dhd_stop
;
7238 net
->netdev_ops
= &dhd_ops_pri
;
7239 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
7240 if (!ETHER_ISNULLADDR(dhd
->pub
.mac
.octet
))
7241 memcpy(temp_addr
, dhd
->pub
.mac
.octet
, ETHER_ADDR_LEN
);
7244 * We have to use the primary MAC for virtual interfaces
7246 memcpy(temp_addr
, ifp
->mac_addr
, ETHER_ADDR_LEN
);
7248 * Android sets the locally administered bit to indicate that this is a
7249 * portable hotspot. This will not work in simultaneous AP/STA mode,
7250 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
7252 if (!memcmp(temp_addr
, dhd
->iflist
[0]->mac_addr
,
7254 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
7255 __func__
, net
->name
));
7256 temp_addr
[0] |= 0x02;
7260 net
->hard_header_len
= ETH_HLEN
+ dhd
->pub
.hdrlen
;
7261 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
7262 net
->ethtool_ops
= &dhd_ethtool_ops
;
7263 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
7265 #if defined(WL_WIRELESS_EXT)
7266 #if WIRELESS_EXT < 19
7267 net
->get_wireless_stats
= dhd_get_wireless_stats
;
7268 #endif /* WIRELESS_EXT < 19 */
7269 #if WIRELESS_EXT > 12
7270 net
->wireless_handlers
= (struct iw_handler_def
*)&wl_iw_handler_def
;
7271 #endif /* WIRELESS_EXT > 12 */
7272 #endif /* defined(WL_WIRELESS_EXT) */
7274 dhd
->pub
.rxsz
= DBUS_RX_BUFFER_SIZE_DHD(net
);
7276 memcpy(net
->dev_addr
, temp_addr
, ETHER_ADDR_LEN
);
7279 printf("%s\n", dhd_version
);
7282 err
= register_netdev(net
);
7284 err
= register_netdevice(net
);
7287 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net
->name
, err
));
7292 err
= custom_rps_map_set(net
->_rx
, RPS_CPUS_MASK
, strlen(RPS_CPUS_MASK
));
7294 DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__
, err
));
7295 #endif /* SET_RPS_CPUS */
7299 printf("Register interface [%s] MAC: "MACDBG
"\n\n", net
->name
,
7300 MAC2STRDBG(net
->dev_addr
));
7302 #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
7303 // wl_iw_iscan_set_scan_broadcast_prep(net, 1);
7306 #if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
7307 KERNEL_VERSION(2, 6, 27))))
7310 up(&dhd_registration_sem
);
7312 if (!dhd_download_fw_on_driverload
) {
7313 dhd_net_bus_devreset(net
, TRUE
);
7315 dhd_net_bus_suspend(net
);
7316 #endif /* BCMLXSDMMC */
7317 wifi_platform_set_power(dhdp
->info
->adapter
, FALSE
, WIFI_TURNOFF_DELAY
);
7320 #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
7324 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
7327 net
->netdev_ops
= NULL
;
7333 dhd_bus_detach(dhd_pub_t
*dhdp
)
7337 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7340 dhd
= (dhd_info_t
*)dhdp
->info
;
7344 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
7345 * calling stop again will cuase SD read/write errors.
7347 if (dhd
->pub
.busstate
!= DHD_BUS_DOWN
) {
7348 /* Stop the protocol module */
7349 dhd_prot_stop(&dhd
->pub
);
7351 /* Stop the bus module */
7352 dhd_bus_stop(dhd
->pub
.bus
, TRUE
);
7355 #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
7356 dhd_bus_oob_intr_unregister(dhdp
);
7363 void dhd_detach(dhd_pub_t
*dhdp
)
7366 unsigned long flags
;
7367 int timer_valid
= FALSE
;
7372 dhd
= (dhd_info_t
*)dhdp
->info
;
7376 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
7378 #endif /* CUSTOMER_HW20 && WLANAUDIO */
7380 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__
, dhd
->dhd_state
));
7383 if (!(dhd
->dhd_state
& DHD_ATTACH_STATE_DONE
)) {
7384 /* Give sufficient time for threads to start running in case
7385 * dhd_attach() has failed
7390 if (dhd
->dhd_state
& DHD_ATTACH_STATE_PROT_ATTACH
) {
7391 dhd_bus_detach(dhdp
);
7392 #ifdef PCIE_FULL_DONGLE
7393 dhd_flow_rings_deinit(dhdp
);
7397 dhd_prot_detach(dhdp
);
7400 #ifdef ARP_OFFLOAD_SUPPORT
7401 if (dhd_inetaddr_notifier_registered
) {
7402 dhd_inetaddr_notifier_registered
= FALSE
;
7403 unregister_inetaddr_notifier(&dhd_inetaddr_notifier
);
7405 #endif /* ARP_OFFLOAD_SUPPORT */
7407 if (dhd_inet6addr_notifier_registered
) {
7408 dhd_inet6addr_notifier_registered
= FALSE
;
7409 unregister_inet6addr_notifier(&dhd_inet6addr_notifier
);
7413 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
7414 if (dhd
->dhd_state
& DHD_ATTACH_STATE_EARLYSUSPEND_DONE
) {
7415 if (dhd
->early_suspend
.suspend
)
7416 unregister_early_suspend(&dhd
->early_suspend
);
7418 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
7420 #if defined(WL_WIRELESS_EXT)
7421 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WL_ATTACH
) {
7422 /* Detatch and unlink in the iw */
7425 #endif /* defined(WL_WIRELESS_EXT) */
7427 /* delete all interfaces, start with virtual */
7428 if (dhd
->dhd_state
& DHD_ATTACH_STATE_ADD_IF
) {
7432 /* Cleanup virtual interfaces */
7433 dhd_net_if_lock_local(dhd
);
7434 for (i
= 1; i
< DHD_MAX_IFS
; i
++) {
7436 dhd_remove_if(&dhd
->pub
, i
, TRUE
);
7438 dhd_net_if_unlock_local(dhd
);
7440 /* delete primary interface 0 */
7441 ifp
= dhd
->iflist
[0];
7444 if (ifp
&& ifp
->net
) {
7448 /* in unregister_netdev case, the interface gets freed by net->destructor
7449 * (which is set to free_netdev)
7451 if (ifp
->net
->reg_state
== NETREG_UNINITIALIZED
)
7452 free_netdev(ifp
->net
);
7455 custom_rps_map_clear(ifp
->net
->_rx
);
7456 #endif /* SET_RPS_CPUS */
7457 unregister_netdev(ifp
->net
);
7461 dhd_wmf_cleanup(dhdp
, 0);
7462 #endif /* DHD_WMF */
7464 dhd_if_del_sta_list(ifp
);
7466 MFREE(dhd
->pub
.osh
, ifp
, sizeof(*ifp
));
7467 dhd
->iflist
[0] = NULL
;
7471 /* Clear the watchdog timer */
7472 DHD_GENERAL_LOCK(&dhd
->pub
, flags
);
7473 timer_valid
= dhd
->wd_timer_valid
;
7474 dhd
->wd_timer_valid
= FALSE
;
7475 DHD_GENERAL_UNLOCK(&dhd
->pub
, flags
);
7477 del_timer_sync(&dhd
->timer
);
7479 if (dhd
->dhd_state
& DHD_ATTACH_STATE_THREADS_CREATED
) {
7480 if (dhd
->thr_wdt_ctl
.thr_pid
>= 0) {
7481 PROC_STOP(&dhd
->thr_wdt_ctl
);
7484 if (dhd
->rxthread_enabled
&& dhd
->thr_rxf_ctl
.thr_pid
>= 0) {
7485 PROC_STOP(&dhd
->thr_rxf_ctl
);
7488 if (dhd
->thr_dpc_ctl
.thr_pid
>= 0) {
7489 PROC_STOP(&dhd
->thr_dpc_ctl
);
7491 tasklet_kill(&dhd
->tasklet
);
7494 if (dhd
->dhd_state
& DHD_ATTACH_STATE_CFG80211
) {
7495 wl_cfg80211_detach(NULL
);
7496 dhd_monitor_uninit();
7499 /* free deferred work queue */
7500 dhd_deferred_work_deinit(dhd
->dhd_deferred_wq
);
7501 dhd
->dhd_deferred_wq
= NULL
;
7503 #ifdef SHOW_LOGTRACE
7504 if (dhd
->event_data
.fmts
)
7505 kfree(dhd
->event_data
.fmts
);
7506 if (dhd
->event_data
.raw_fmts
)
7507 kfree(dhd
->event_data
.raw_fmts
);
7508 #endif /* SHOW_LOGTRACE */
7511 if (dhdp
->pno_state
)
7512 dhd_pno_deinit(dhdp
);
7514 #if defined(CONFIG_PM_SLEEP)
7515 if (dhd_pm_notifier_registered
) {
7516 unregister_pm_notifier(&dhd_pm_notifier
);
7517 dhd_pm_notifier_registered
= FALSE
;
7519 #endif /* CONFIG_PM_SLEEP */
7520 #ifdef DEBUG_CPU_FREQ
7522 free_percpu(dhd
->new_freq
);
7523 dhd
->new_freq
= NULL
;
7524 cpufreq_unregister_notifier(&dhd
->freq_trans
, CPUFREQ_TRANSITION_NOTIFIER
);
7526 if (dhd
->dhd_state
& DHD_ATTACH_STATE_WAKELOCKS_INIT
) {
7527 DHD_TRACE(("wd wakelock count:%d\n", dhd
->wakelock_wd_counter
));
7528 #ifdef CONFIG_HAS_WAKELOCK
7529 dhd
->wakelock_counter
= 0;
7530 dhd
->wakelock_wd_counter
= 0;
7531 dhd
->wakelock_rx_timeout_enable
= 0;
7532 dhd
->wakelock_ctrl_timeout_enable
= 0;
7533 wake_lock_destroy(&dhd
->wl_wifi
);
7534 wake_lock_destroy(&dhd
->wl_rxwake
);
7535 wake_lock_destroy(&dhd
->wl_ctrlwake
);
7536 wake_lock_destroy(&dhd
->wl_wdwake
);
7537 #ifdef BCMPCIE_OOB_HOST_WAKE
7538 wake_lock_destroy(&dhd
->wl_intrwake
);
7539 #endif /* BCMPCIE_OOB_HOST_WAKE */
7540 #endif /* CONFIG_HAS_WAKELOCK */
7546 #ifdef DHDTCPACK_SUPPRESS
7547 /* This will free all MEM allocated for TCPACK SUPPRESS */
7548 dhd_tcpack_suppress_set(&dhd
->pub
, TCPACK_SUP_OFF
);
7549 #endif /* DHDTCPACK_SUPPRESS */
7550 dhd_conf_detach(dhdp
);
7555 dhd_free(dhd_pub_t
*dhdp
)
7558 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7562 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
7563 if (dhdp
->reorder_bufs
[i
]) {
7564 reorder_info_t
*ptr
;
7565 uint32 buf_size
= sizeof(struct reorder_info
);
7567 ptr
= dhdp
->reorder_bufs
[i
];
7569 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
7570 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7571 i
, ptr
->max_idx
, buf_size
));
7573 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
7574 dhdp
->reorder_bufs
[i
] = NULL
;
7578 dhd_sta_pool_fini(dhdp
, DHD_MAX_STA
);
7580 dhd
= (dhd_info_t
*)dhdp
->info
;
7581 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
7583 dhd
!= (dhd_info_t
*)dhd_os_prealloc(dhdp
, DHD_PREALLOC_DHD_INFO
, 0, FALSE
))
7584 MFREE(dhd
->pub
.osh
, dhd
, sizeof(*dhd
));
7590 dhd_clear(dhd_pub_t
*dhdp
)
7592 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7596 for (i
= 0; i
< ARRAYSIZE(dhdp
->reorder_bufs
); i
++) {
7597 if (dhdp
->reorder_bufs
[i
]) {
7598 reorder_info_t
*ptr
;
7599 uint32 buf_size
= sizeof(struct reorder_info
);
7601 ptr
= dhdp
->reorder_bufs
[i
];
7603 buf_size
+= ((ptr
->max_idx
+ 1) * sizeof(void*));
7604 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
7605 i
, ptr
->max_idx
, buf_size
));
7607 MFREE(dhdp
->osh
, dhdp
->reorder_bufs
[i
], buf_size
);
7608 dhdp
->reorder_bufs
[i
] = NULL
;
7612 dhd_sta_pool_clear(dhdp
, DHD_MAX_STA
);
7617 dhd_module_cleanup(void)
7619 printf("%s: Enter\n", __FUNCTION__
);
7621 dhd_bus_unregister();
7625 dhd_wifi_platform_unregister_drv();
7626 #ifdef CUSTOMER_HW_AMLOGIC
7627 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
7631 printf("%s: Exit\n", __FUNCTION__
);
7635 dhd_module_exit(void)
7637 dhd_module_cleanup();
7638 unregister_reboot_notifier(&dhd_reboot_notifier
);
7642 dhd_module_init(void)
7645 int retry
= POWERUP_MAX_RETRY
;
7647 printf("%s: in\n", __FUNCTION__
);
7648 #ifdef CUSTOMER_HW_AMLOGIC
7649 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
7650 if (wifi_setup_dt()) {
7651 printf("wifi_dt : fail to setup dt\n");
7656 DHD_PERIM_RADIO_INIT();
7658 if (firmware_path
[0] != '\0') {
7659 strncpy(fw_bak_path
, firmware_path
, MOD_PARAM_PATHLEN
);
7660 fw_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
7663 if (nvram_path
[0] != '\0') {
7664 strncpy(nv_bak_path
, nvram_path
, MOD_PARAM_PATHLEN
);
7665 nv_bak_path
[MOD_PARAM_PATHLEN
-1] = '\0';
7669 err
= dhd_wifi_platform_register_drv();
7671 register_reboot_notifier(&dhd_reboot_notifier
);
7675 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
7676 __FUNCTION__
, retry
));
7677 strncpy(firmware_path
, fw_bak_path
, MOD_PARAM_PATHLEN
);
7678 firmware_path
[MOD_PARAM_PATHLEN
-1] = '\0';
7679 strncpy(nvram_path
, nv_bak_path
, MOD_PARAM_PATHLEN
);
7680 nvram_path
[MOD_PARAM_PATHLEN
-1] = '\0';
7685 #ifdef CUSTOMER_HW_AMLOGIC
7686 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
7690 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__
));
7693 printf("%s: Exit err=%d\n", __FUNCTION__
, err
);
7698 dhd_reboot_callback(struct notifier_block
*this, unsigned long code
, void *unused
)
7700 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__
, code
));
7701 if (code
== SYS_RESTART
) {
7708 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
7709 #if defined(CONFIG_DEFERRED_INITCALLS)
7710 deferred_module_init(dhd_module_init
);
7711 #elif defined(USE_LATE_INITCALL_SYNC)
7712 late_initcall_sync(dhd_module_init
);
7714 late_initcall(dhd_module_init
);
7715 #endif /* USE_LATE_INITCALL_SYNC */
7717 module_init(dhd_module_init
);
7718 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
7720 module_exit(dhd_module_exit
);
7723 * OS specific functions required to implement DHD driver in OS independent way
7726 dhd_os_proto_block(dhd_pub_t
*pub
)
7728 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
7731 DHD_PERIM_UNLOCK(pub
);
7733 down(&dhd
->proto_sem
);
7735 DHD_PERIM_LOCK(pub
);
7743 dhd_os_proto_unblock(dhd_pub_t
*pub
)
7745 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
7748 up(&dhd
->proto_sem
);
7756 dhd_os_get_ioctl_resp_timeout(void)
7758 return ((unsigned int)dhd_ioctl_timeout_msec
);
7762 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec
)
7764 dhd_ioctl_timeout_msec
= (int)timeout_msec
;
7768 dhd_os_ioctl_resp_wait(dhd_pub_t
*pub
, uint
*condition
, bool *pending
)
7770 dhd_info_t
* dhd
= (dhd_info_t
*)(pub
->info
);
7773 /* Convert timeout in millsecond to jiffies */
7774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
7775 timeout
= msecs_to_jiffies(dhd_ioctl_timeout_msec
);
7777 timeout
= dhd_ioctl_timeout_msec
* HZ
/ 1000;
7780 DHD_PERIM_UNLOCK(pub
);
7782 timeout
= wait_event_timeout(dhd
->ioctl_resp_wait
, (*condition
), timeout
);
7784 DHD_PERIM_LOCK(pub
);
7790 dhd_os_ioctl_resp_wake(dhd_pub_t
*pub
)
7792 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
7794 wake_up(&dhd
->ioctl_resp_wait
);
7799 dhd_os_wd_timer_extend(void *bus
, bool extend
)
7801 dhd_pub_t
*pub
= bus
;
7802 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
7805 dhd_os_wd_timer(bus
, WATCHDOG_EXTEND_INTERVAL
);
7807 dhd_os_wd_timer(bus
, dhd
->default_wd_interval
);
7812 dhd_os_wd_timer(void *bus
, uint wdtick
)
7814 dhd_pub_t
*pub
= bus
;
7815 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
7816 unsigned long flags
;
7818 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7821 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
7825 DHD_GENERAL_LOCK(pub
, flags
);
7827 /* don't start the wd until fw is loaded */
7828 if (pub
->busstate
== DHD_BUS_DOWN
) {
7829 DHD_GENERAL_UNLOCK(pub
, flags
);
7831 DHD_OS_WD_WAKE_UNLOCK(pub
);
7835 /* Totally stop the timer */
7836 if (!wdtick
&& dhd
->wd_timer_valid
== TRUE
) {
7837 dhd
->wd_timer_valid
= FALSE
;
7838 DHD_GENERAL_UNLOCK(pub
, flags
);
7839 del_timer_sync(&dhd
->timer
);
7840 DHD_OS_WD_WAKE_UNLOCK(pub
);
7845 DHD_OS_WD_WAKE_LOCK(pub
);
7846 dhd_watchdog_ms
= (uint
)wdtick
;
7847 /* Re arm the timer, at last watchdog period */
7848 mod_timer(&dhd
->timer
, jiffies
+ msecs_to_jiffies(dhd_watchdog_ms
));
7849 dhd
->wd_timer_valid
= TRUE
;
7851 DHD_GENERAL_UNLOCK(pub
, flags
);
7855 dhd_os_open_image(char *filename
)
7859 fp
= filp_open(filename
, O_RDONLY
, 0);
7861 * 2.6.11 (FC4) supports filp_open() but later revs don't?
7863 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
7873 dhd_os_get_image_block(char *buf
, int len
, void *image
)
7875 struct file
*fp
= (struct file
*)image
;
7881 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
7889 dhd_os_close_image(void *image
)
7892 filp_close((struct file
*)image
, NULL
);
7896 dhd_os_sdlock(dhd_pub_t
*pub
)
7900 dhd
= (dhd_info_t
*)(pub
->info
);
7902 if (dhd_dpc_prio
>= 0)
7905 spin_lock_bh(&dhd
->sdlock
);
7909 dhd_os_sdunlock(dhd_pub_t
*pub
)
7913 dhd
= (dhd_info_t
*)(pub
->info
);
7915 if (dhd_dpc_prio
>= 0)
7918 spin_unlock_bh(&dhd
->sdlock
);
7922 dhd_os_sdlock_txq(dhd_pub_t
*pub
)
7926 dhd
= (dhd_info_t
*)(pub
->info
);
7927 spin_lock_bh(&dhd
->txqlock
);
7931 dhd_os_sdunlock_txq(dhd_pub_t
*pub
)
7935 dhd
= (dhd_info_t
*)(pub
->info
);
7936 spin_unlock_bh(&dhd
->txqlock
);
7940 dhd_os_sdlock_rxq(dhd_pub_t
*pub
)
7945 dhd_os_sdunlock_rxq(dhd_pub_t
*pub
)
7950 dhd_os_rxflock(dhd_pub_t
*pub
)
7954 dhd
= (dhd_info_t
*)(pub
->info
);
7955 spin_lock_bh(&dhd
->rxf_lock
);
7960 dhd_os_rxfunlock(dhd_pub_t
*pub
)
7964 dhd
= (dhd_info_t
*)(pub
->info
);
7965 spin_unlock_bh(&dhd
->rxf_lock
);
7968 #ifdef DHDTCPACK_SUPPRESS
7970 dhd_os_tcpacklock(dhd_pub_t
*pub
)
7974 dhd
= (dhd_info_t
*)(pub
->info
);
7975 spin_lock_bh(&dhd
->tcpack_lock
);
7980 dhd_os_tcpackunlock(dhd_pub_t
*pub
)
7984 dhd
= (dhd_info_t
*)(pub
->info
);
7985 spin_unlock_bh(&dhd
->tcpack_lock
);
7987 #endif /* DHDTCPACK_SUPPRESS */
7989 uint8
* dhd_os_prealloc(dhd_pub_t
*dhdpub
, int section
, uint size
, bool kmalloc_if_fail
)
7992 gfp_t flags
= CAN_SLEEP() ? GFP_KERNEL
: GFP_ATOMIC
;
7994 buf
= (uint8
*)wifi_platform_prealloc(dhdpub
->info
->adapter
, section
, size
);
7996 DHD_ERROR(("%s: failed to alloc memory, section: %d,"
7997 " size: %dbytes\n", __FUNCTION__
, section
, size
));
7998 if (kmalloc_if_fail
)
7999 buf
= kmalloc(size
, flags
);
8005 void dhd_os_prefree(dhd_pub_t
*dhdpub
, void *addr
, uint size
)
8009 #if defined(WL_WIRELESS_EXT)
8010 struct iw_statistics
*
8011 dhd_get_wireless_stats(struct net_device
*dev
)
8014 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8020 res
= wl_iw_get_wireless_stats(dev
, &dhd
->iw
.wstats
);
8023 return &dhd
->iw
.wstats
;
8027 #endif /* defined(WL_WIRELESS_EXT) */
8029 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8031 dhd_wlanaudio_event(dhd_info_t
*dhd
, int *ifidx
, void *pktdata
,
8032 wl_event_msg_t
*event
, void **data
)
8035 char eabuf
[ETHER_ADDR_STR_LEN
];
8036 struct ether_addr
*addr
= &event
->addr
;
8037 uint32 type
= ntoh32_ua((void *)&event
->event_type
);
8042 bcm_ether_ntoa(addr
, eabuf
);
8044 return (BCME_ERROR
);
8046 for (cnt
= 0; cnt
< MAX_WLANAUDIO_BLACKLIST
; cnt
++) {
8047 if (dhd
->wlanaudio_blist
[cnt
].is_blacklist
)
8050 if (!bcmp(&dhd
->wlanaudio_blist
[cnt
].blacklist_addr
,
8051 addr
, ETHER_ADDR_LEN
)) {
8052 /* Mac address is Same */
8053 dhd
->wlanaudio_blist
[cnt
].cnt
++;
8055 if (dhd
->wlanaudio_blist
[cnt
].cnt
< 15) {
8056 /* black list is false */
8057 if ((dhd
->wlanaudio_blist
[cnt
].cnt
> 10) &&
8058 (jiffies
- dhd
->wlanaudio_blist
[cnt
].txfail_jiffies
8060 dhd
->wlanaudio_blist
[cnt
].is_blacklist
= true;
8061 dhd
->is_wlanaudio_blist
= true;
8064 if ((!dhd
->wlanaudio_blist
[cnt
].is_blacklist
) &&
8065 (jiffies
- dhd
->wlanaudio_blist
[cnt
].txfail_jiffies
8068 bzero(&dhd
->wlanaudio_blist
[cnt
],
8069 sizeof(struct wlanaudio_blacklist
));
8073 } else if ((!dhd
->wlanaudio_blist
[cnt
].is_blacklist
) &&
8074 (!dhd
->wlanaudio_blist
[cnt
].cnt
)) {
8076 (char*)&dhd
->wlanaudio_blist
[cnt
].blacklist_addr
,
8078 dhd
->wlanaudio_blist
[cnt
].cnt
++;
8079 dhd
->wlanaudio_blist
[cnt
].txfail_jiffies
= jiffies
;
8081 bcm_ether_ntoa(&dhd
->wlanaudio_blist
[cnt
].blacklist_addr
, eabuf
);
8087 case WLC_E_AUTH_IND
:
8089 case WLC_E_DEAUTH_IND
:
8091 case WLC_E_ASSOC_IND
:
8093 case WLC_E_REASSOC_IND
:
8094 case WLC_E_DISASSOC
:
8095 case WLC_E_DISASSOC_IND
:
8100 bcm_ether_ntoa(addr
, eabuf
);
8102 return (BCME_ERROR
);
8104 for (cnt
= 0; cnt
< MAX_WLANAUDIO_BLACKLIST
; cnt
++) {
8105 if (!bcmp(&dhd
->wlanaudio_blist
[cnt
].blacklist_addr
,
8106 addr
, ETHER_ADDR_LEN
)) {
8107 /* Mac address is Same */
8108 if (dhd
->wlanaudio_blist
[cnt
].is_blacklist
) {
8109 /* black list is true */
8110 bzero(&dhd
->wlanaudio_blist
[cnt
],
8111 sizeof(struct wlanaudio_blacklist
));
8116 for (cnt
= 0; cnt
< MAX_WLANAUDIO_BLACKLIST
; cnt
++) {
8117 if (dhd
->wlanaudio_blist
[cnt
].is_blacklist
)
8123 dhd
->is_wlanaudio_blist
= false;
8131 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8133 dhd_wl_host_event(dhd_info_t
*dhd
, int *ifidx
, void *pktdata
,
8134 wl_event_msg_t
*event
, void **data
)
8138 ASSERT(dhd
!= NULL
);
8140 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
8141 bcmerror
= dhd_wlanaudio_event(dhd
, ifidx
, pktdata
, event
, data
);
8143 if (bcmerror
!= BCME_OK
)
8145 #endif /* CUSTOMER_HW20 && WLANAUDIO */
8147 #ifdef SHOW_LOGTRACE
8148 bcmerror
= wl_host_event(&dhd
->pub
, ifidx
, pktdata
, event
, data
, &dhd
->event_data
);
8150 bcmerror
= wl_host_event(&dhd
->pub
, ifidx
, pktdata
, event
, data
, NULL
);
8151 #endif /* SHOW_LOGTRACE */
8153 if (bcmerror
!= BCME_OK
)
8156 #if defined(WL_WIRELESS_EXT)
8157 if (event
->bsscfgidx
== 0) {
8159 * Wireless ext is on primary interface only
8162 ASSERT(dhd
->iflist
[*ifidx
] != NULL
);
8163 ASSERT(dhd
->iflist
[*ifidx
]->net
!= NULL
);
8165 if (dhd
->iflist
[*ifidx
]->net
) {
8166 wl_iw_event(dhd
->iflist
[*ifidx
]->net
, event
, *data
);
8169 #endif /* defined(WL_WIRELESS_EXT) */
8172 ASSERT(dhd
->iflist
[*ifidx
] != NULL
);
8173 ASSERT(dhd
->iflist
[*ifidx
]->net
!= NULL
);
8174 if (dhd
->iflist
[*ifidx
]->net
)
8175 wl_cfg80211_event(dhd
->iflist
[*ifidx
]->net
, event
, *data
);
8176 #endif /* defined(WL_CFG80211) */
8181 /* send up locally generated event */
8183 dhd_sendup_event(dhd_pub_t
*dhdp
, wl_event_msg_t
*event
, void *data
)
8185 switch (ntoh32(event
->event_type
)) {
8187 /* Send up locally generated AMP HCI Events */
8188 case WLC_E_BTA_HCI_EVENT
: {
8189 struct sk_buff
*p
, *skb
;
8191 wl_event_msg_t
*p_bcm_event
;
8200 len
= ntoh32(event
->datalen
);
8201 pktlen
= sizeof(bcm_event_t
) + len
+ 2;
8203 ifidx
= dhd_ifname2idx(dhd
, event
->ifname
);
8205 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
8206 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
8208 msg
= (bcm_event_t
*) PKTDATA(dhdp
->osh
, p
);
8210 bcopy(&dhdp
->mac
, &msg
->eth
.ether_dhost
, ETHER_ADDR_LEN
);
8211 bcopy(&dhdp
->mac
, &msg
->eth
.ether_shost
, ETHER_ADDR_LEN
);
8212 ETHER_TOGGLE_LOCALADDR(&msg
->eth
.ether_shost
);
8214 msg
->eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
8216 /* BCM Vendor specific header... */
8217 msg
->bcm_hdr
.subtype
= hton16(BCMILCP_SUBTYPE_VENDOR_LONG
);
8218 msg
->bcm_hdr
.version
= BCMILCP_BCM_SUBTYPEHDR_VERSION
;
8219 bcopy(BRCM_OUI
, &msg
->bcm_hdr
.oui
[0], DOT11_OUI_LEN
);
8221 /* vendor spec header length + pvt data length (private indication
8222 * hdr + actual message itself)
8224 msg
->bcm_hdr
.length
= hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH
+
8225 BCM_MSG_LEN
+ sizeof(wl_event_msg_t
) + (uint16
)len
);
8226 msg
->bcm_hdr
.usr_subtype
= hton16(BCMILCP_BCM_SUBTYPE_EVENT
);
8228 PKTSETLEN(dhdp
->osh
, p
, (sizeof(bcm_event_t
) + len
+ 2));
8230 /* copy wl_event_msg_t into sk_buf */
8232 /* pointer to wl_event_msg_t in sk_buf */
8233 p_bcm_event
= &msg
->event
;
8234 bcopy(event
, p_bcm_event
, sizeof(wl_event_msg_t
));
8236 /* copy hci event into sk_buf */
8237 bcopy(data
, (p_bcm_event
+ 1), len
);
8239 msg
->bcm_hdr
.length
= hton16(sizeof(wl_event_msg_t
) +
8240 ntoh16(msg
->bcm_hdr
.length
));
8241 PKTSETLEN(dhdp
->osh
, p
, (sizeof(bcm_event_t
) + len
+ 2));
8243 ptr
= (char *)(msg
+ 1);
8244 /* Last 2 bytes of the message are 0x00 0x00 to signal that there
8245 * are no ethertypes which are following this
8250 skb
= PKTTONATIVE(dhdp
->osh
, p
);
8254 ifp
= dhd
->iflist
[ifidx
];
8256 ifp
= dhd
->iflist
[0];
8259 skb
->dev
= ifp
->net
;
8260 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
8265 /* Strip header, count, deliver upward */
8266 skb_pull(skb
, ETH_HLEN
);
8268 /* Send the packet */
8269 if (in_interrupt()) {
8276 /* Could not allocate a sk_buf */
8277 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__
));
8280 } /* case WLC_E_BTA_HCI_EVENT */
8281 #endif /* WLBTAMP */
8288 #ifdef LOG_INTO_TCPDUMP
8290 dhd_sendup_log(dhd_pub_t
*dhdp
, void *data
, int data_len
)
8292 struct sk_buff
*p
, *skb
;
8299 struct ether_header eth
;
8301 pktlen
= sizeof(eth
) + data_len
;
8304 if ((p
= PKTGET(dhdp
->osh
, pktlen
, FALSE
))) {
8305 ASSERT(ISALIGNED((uintptr
)PKTDATA(dhdp
->osh
, p
), sizeof(uint32
)));
8307 bcopy(&dhdp
->mac
, ð
.ether_dhost
, ETHER_ADDR_LEN
);
8308 bcopy(&dhdp
->mac
, ð
.ether_shost
, ETHER_ADDR_LEN
);
8309 ETHER_TOGGLE_LOCALADDR(ð
.ether_shost
);
8310 eth
.ether_type
= hton16(ETHER_TYPE_BRCM
);
8312 bcopy((void *)ð
, PKTDATA(dhdp
->osh
, p
), sizeof(eth
));
8313 bcopy(data
, PKTDATA(dhdp
->osh
, p
) + sizeof(eth
), data_len
);
8314 skb
= PKTTONATIVE(dhdp
->osh
, p
);
8315 skb_data
= skb
->data
;
8318 ifidx
= dhd_ifname2idx(dhd
, "wlan0");
8319 ifp
= dhd
->iflist
[ifidx
];
8321 ifp
= dhd
->iflist
[0];
8324 skb
->dev
= ifp
->net
;
8325 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
8326 skb
->data
= skb_data
;
8329 /* Strip header, count, deliver upward */
8330 skb_pull(skb
, ETH_HLEN
);
8332 /* Send the packet */
8333 if (in_interrupt()) {
8340 /* Could not allocate a sk_buf */
8341 DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__
));
8344 #endif /* LOG_INTO_TCPDUMP */
8346 void dhd_wait_for_event(dhd_pub_t
*dhd
, bool *lockvar
)
8348 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8349 struct dhd_info
*dhdinfo
= dhd
->info
;
8351 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8352 int timeout
= msecs_to_jiffies(IOCTL_RESP_TIMEOUT
);
8354 int timeout
= (IOCTL_RESP_TIMEOUT
/ 1000) * HZ
;
8355 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
8357 dhd_os_sdunlock(dhd
);
8358 wait_event_timeout(dhdinfo
->ctrl_wait
, (*lockvar
== FALSE
), timeout
);
8360 #endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
8364 void dhd_wait_event_wakeup(dhd_pub_t
*dhd
)
8366 #if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
8367 struct dhd_info
*dhdinfo
= dhd
->info
;
8368 if (waitqueue_active(&dhdinfo
->ctrl_wait
))
8369 wake_up(&dhdinfo
->ctrl_wait
);
8374 #if defined(BCMSDIO) || defined(BCMPCIE)
8376 dhd_net_bus_devreset(struct net_device
*dev
, uint8 flag
)
8379 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8382 /* Issue wl down command before resetting the chip */
8383 if (dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_DOWN
, NULL
, 0, TRUE
, 0) < 0) {
8384 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__
));
8386 #ifdef PROP_TXSTATUS
8387 if (dhd
->pub
.wlfc_enabled
)
8388 dhd_wlfc_deinit(&dhd
->pub
);
8389 #endif /* PROP_TXSTATUS */
8391 if (dhd
->pub
.pno_state
)
8392 dhd_pno_deinit(&dhd
->pub
);
8398 dhd_update_fw_nv_path(dhd
);
8399 /* update firmware and nvram path to sdio bus */
8400 dhd_bus_update_fw_nv_path(dhd
->pub
.bus
,
8401 dhd
->fw_path
, dhd
->nv_path
, dhd
->conf_path
);
8403 #endif /* BCMSDIO */
8405 ret
= dhd_bus_devreset(&dhd
->pub
, flag
);
8407 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__
, ret
));
8416 dhd_net_bus_suspend(struct net_device
*dev
)
8418 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8419 return dhd_bus_suspend(&dhd
->pub
);
8423 dhd_net_bus_resume(struct net_device
*dev
, uint8 stage
)
8425 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8426 return dhd_bus_resume(&dhd
->pub
, stage
);
8429 #endif /* BCMSDIO */
8430 #endif /* BCMSDIO || BCMPCIE */
8432 int net_os_set_suspend_disable(struct net_device
*dev
, int val
)
8434 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8438 ret
= dhd
->pub
.suspend_disable_flag
;
8439 dhd
->pub
.suspend_disable_flag
= val
;
8444 int net_os_set_suspend(struct net_device
*dev
, int val
, int force
)
8447 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8450 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8451 ret
= dhd_set_suspend(val
, &dhd
->pub
);
8453 ret
= dhd_suspend_resume_helper(dhd
, val
, force
);
8456 wl_cfg80211_update_power_mode(dev
);
8462 int net_os_set_suspend_bcn_li_dtim(struct net_device
*dev
, int val
)
8464 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8467 dhd
->pub
.suspend_bcn_li_dtim
= val
;
8472 #ifdef PKT_FILTER_SUPPORT
8473 int net_os_rxfilter_add_remove(struct net_device
*dev
, int add_remove
, int num
)
8475 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8476 char *filterp
= NULL
;
8480 if (!dhd_master_mode
)
8481 add_remove
= !add_remove
;
8483 if (!dhd
|| (num
== DHD_UNICAST_FILTER_NUM
) ||
8484 (num
== DHD_MDNS_FILTER_NUM
))
8486 if (num
>= dhd
->pub
.pktfilter_count
)
8489 case DHD_BROADCAST_FILTER_NUM
:
8490 filterp
= "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
8493 case DHD_MULTICAST4_FILTER_NUM
:
8494 filterp
= "102 0 0 0 0xFFFFFF 0x01005E";
8497 case DHD_MULTICAST6_FILTER_NUM
:
8498 filterp
= "103 0 0 0 0xFFFF 0x3333";
8507 dhd
->pub
.pktfilter
[num
] = filterp
;
8508 dhd_pktfilter_offload_set(&dhd
->pub
, dhd
->pub
.pktfilter
[num
]);
8509 } else { /* Delete filter */
8510 if (dhd
->pub
.pktfilter
[num
] != NULL
) {
8511 dhd_pktfilter_offload_delete(&dhd
->pub
, filter_id
);
8512 dhd
->pub
.pktfilter
[num
] = NULL
;
8518 int dhd_os_enable_packet_filter(dhd_pub_t
*dhdp
, int val
)
8523 /* Packet filtering is set only if we still in early-suspend and
8524 * we need either to turn it ON or turn it OFF
8525 * We can always turn it OFF in case of early-suspend, but we turn it
8526 * back ON only if suspend_disable_flag was not set
8528 if (dhdp
&& dhdp
->up
) {
8529 if (dhdp
->in_suspend
) {
8530 if (!val
|| (val
&& !dhdp
->suspend_disable_flag
))
8531 dhd_enable_packet_filter(val
, dhdp
);
8537 /* function to enable/disable packet for Network device */
8538 int net_os_enable_packet_filter(struct net_device
*dev
, int val
)
8540 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8542 return dhd_os_enable_packet_filter(&dhd
->pub
, val
);
8544 #endif /* PKT_FILTER_SUPPORT */
8547 dhd_dev_init_ioctl(struct net_device
*dev
)
8549 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8552 if ((ret
= dhd_sync_with_dongle(&dhd
->pub
)) < 0)
8560 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
8562 dhd_dev_pno_stop_for_ssid(struct net_device
*dev
)
8564 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8566 return (dhd_pno_stop_for_ssid(&dhd
->pub
));
8568 /* Linux wrapper to call common dhd_pno_set_for_ssid */
8570 dhd_dev_pno_set_for_ssid(struct net_device
*dev
, wlc_ssid_t
* ssids_local
, int nssid
,
8571 uint16 scan_fr
, int pno_repeat
, int pno_freq_expo_max
, uint16
*channel_list
, int nchan
)
8573 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8575 return (dhd_pno_set_for_ssid(&dhd
->pub
, ssids_local
, nssid
, scan_fr
,
8576 pno_repeat
, pno_freq_expo_max
, channel_list
, nchan
));
8579 /* Linux wrapper to call common dhd_pno_enable */
8581 dhd_dev_pno_enable(struct net_device
*dev
, int enable
)
8583 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8585 return (dhd_pno_enable(&dhd
->pub
, enable
));
8588 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
8590 dhd_dev_pno_set_for_hotlist(struct net_device
*dev
, wl_pfn_bssid_t
*p_pfn_bssid
,
8591 struct dhd_pno_hotlist_params
*hotlist_params
)
8593 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8594 return (dhd_pno_set_for_hotlist(&dhd
->pub
, p_pfn_bssid
, hotlist_params
));
8596 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
8598 dhd_dev_pno_stop_for_batch(struct net_device
*dev
)
8600 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8601 return (dhd_pno_stop_for_batch(&dhd
->pub
));
8603 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
8605 dhd_dev_pno_set_for_batch(struct net_device
*dev
, struct dhd_pno_batch_params
*batch_params
)
8607 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8608 return (dhd_pno_set_for_batch(&dhd
->pub
, batch_params
));
8610 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
8612 dhd_dev_pno_get_for_batch(struct net_device
*dev
, char *buf
, int bufsize
)
8614 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8615 return (dhd_pno_get_for_batch(&dhd
->pub
, buf
, bufsize
, PNO_STATUS_NORMAL
));
8617 #endif /* PNO_SUPPORT */
8619 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
8620 static void dhd_hang_process(void *dhd_info
, void *event_info
, u8 event
)
8623 struct net_device
*dev
;
8625 dhd
= (dhd_info_t
*)dhd_info
;
8626 dev
= dhd
->iflist
[0]->net
;
8632 #if defined(WL_WIRELESS_EXT)
8633 wl_iw_send_priv_event(dev
, "HANG");
8635 #if defined(WL_CFG80211)
8636 wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
8642 int dhd_os_send_hang_message(dhd_pub_t
*dhdp
)
8646 if (!dhdp
->hang_was_sent
) {
8647 dhdp
->hang_was_sent
= 1;
8648 dhd_deferred_schedule_work(dhdp
->info
->dhd_deferred_wq
, (void *)dhdp
,
8649 DHD_WQ_WORK_HANG_MSG
, dhd_hang_process
, DHD_WORK_PRIORITY_HIGH
);
8655 int net_os_send_hang_message(struct net_device
*dev
)
8657 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8661 /* Report FW problem when enabled */
8662 if (dhd
->pub
.hang_report
) {
8663 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
8664 ret
= dhd_os_send_hang_message(&dhd
->pub
);
8666 ret
= wl_cfg80211_hang(dev
, WLAN_REASON_UNSPECIFIED
);
8669 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
8671 /* Enforce bus down to stop any future traffic */
8672 dhd
->pub
.busstate
= DHD_BUS_DOWN
;
8677 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
8680 int dhd_net_wifi_platform_set_power(struct net_device
*dev
, bool on
, unsigned long delay_msec
)
8682 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8683 return wifi_platform_set_power(dhd
->adapter
, on
, delay_msec
);
8686 void dhd_get_customized_country_code(struct net_device
*dev
, char *country_iso_code
,
8687 wl_country_t
*cspec
)
8689 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8690 get_customized_country_code(dhd
->adapter
, country_iso_code
, cspec
);
8692 void dhd_bus_country_set(struct net_device
*dev
, wl_country_t
*cspec
, bool notify
)
8694 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8695 if (dhd
&& dhd
->pub
.up
) {
8696 memcpy(&dhd
->pub
.dhd_cspec
, cspec
, sizeof(wl_country_t
));
8698 wl_update_wiphybands(NULL
, notify
);
8703 void dhd_bus_band_set(struct net_device
*dev
, uint band
)
8705 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8706 if (dhd
&& dhd
->pub
.up
) {
8708 wl_update_wiphybands(NULL
, true);
8713 int dhd_net_set_fw_path(struct net_device
*dev
, char *fw
)
8715 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8717 if (!fw
|| fw
[0] == '\0')
8720 strncpy(dhd
->fw_path
, fw
, sizeof(dhd
->fw_path
) - 1);
8721 dhd
->fw_path
[sizeof(dhd
->fw_path
)-1] = '\0';
8724 if (strstr(fw
, "apsta") != NULL
) {
8725 DHD_INFO(("GOT APSTA FIRMWARE\n"));
8726 ap_fw_loaded
= TRUE
;
8728 DHD_INFO(("GOT STA FIRMWARE\n"));
8729 ap_fw_loaded
= FALSE
;
8735 void dhd_net_if_lock(struct net_device
*dev
)
8737 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8738 dhd_net_if_lock_local(dhd
);
8741 void dhd_net_if_unlock(struct net_device
*dev
)
8743 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8744 dhd_net_if_unlock_local(dhd
);
8747 static void dhd_net_if_lock_local(dhd_info_t
*dhd
)
8749 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8751 mutex_lock(&dhd
->dhd_net_if_mutex
);
8755 static void dhd_net_if_unlock_local(dhd_info_t
*dhd
)
8757 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8759 mutex_unlock(&dhd
->dhd_net_if_mutex
);
8763 static void dhd_suspend_lock(dhd_pub_t
*pub
)
8765 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8766 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8768 mutex_lock(&dhd
->dhd_suspend_mutex
);
8772 static void dhd_suspend_unlock(dhd_pub_t
*pub
)
8774 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
8775 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8777 mutex_unlock(&dhd
->dhd_suspend_mutex
);
8781 unsigned long dhd_os_general_spin_lock(dhd_pub_t
*pub
)
8783 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8784 unsigned long flags
= 0;
8787 spin_lock_irqsave(&dhd
->dhd_lock
, flags
);
8792 void dhd_os_general_spin_unlock(dhd_pub_t
*pub
, unsigned long flags
)
8794 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8797 spin_unlock_irqrestore(&dhd
->dhd_lock
, flags
);
8800 /* Linux specific multipurpose spinlock API */
8802 dhd_os_spin_lock_init(osl_t
*osh
)
8804 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
8805 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
8806 /* and this results in kernel asserts in internal builds */
8807 spinlock_t
* lock
= MALLOC(osh
, sizeof(spinlock_t
) + 4);
8809 spin_lock_init(lock
);
8810 return ((void *)lock
);
8813 dhd_os_spin_lock_deinit(osl_t
*osh
, void *lock
)
8815 MFREE(osh
, lock
, sizeof(spinlock_t
) + 4);
8818 dhd_os_spin_lock(void *lock
)
8820 unsigned long flags
= 0;
8823 spin_lock_irqsave((spinlock_t
*)lock
, flags
);
8828 dhd_os_spin_unlock(void *lock
, unsigned long flags
)
8831 spin_unlock_irqrestore((spinlock_t
*)lock
, flags
);
8835 dhd_get_pend_8021x_cnt(dhd_info_t
*dhd
)
8837 return (atomic_read(&dhd
->pend_8021x_cnt
));
8840 #define MAX_WAIT_FOR_8021X_TX 100
8843 dhd_wait_pend8021x(struct net_device
*dev
)
8845 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8846 int timeout
= msecs_to_jiffies(10);
8847 int ntimes
= MAX_WAIT_FOR_8021X_TX
;
8848 int pend
= dhd_get_pend_8021x_cnt(dhd
);
8850 while (ntimes
&& pend
) {
8852 set_current_state(TASK_INTERRUPTIBLE
);
8853 DHD_PERIM_UNLOCK(&dhd
->pub
);
8854 schedule_timeout(timeout
);
8855 DHD_PERIM_LOCK(&dhd
->pub
);
8856 set_current_state(TASK_RUNNING
);
8859 pend
= dhd_get_pend_8021x_cnt(dhd
);
8863 atomic_set(&dhd
->pend_8021x_cnt
, 0);
8864 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__
));
8871 write_to_file(dhd_pub_t
*dhd
, uint8
*buf
, int size
)
8875 mm_segment_t old_fs
;
8878 /* change to KERNEL_DS address limit */
8882 /* open file to write */
8883 fp
= filp_open("/tmp/mem_dump", O_WRONLY
|O_CREAT
, 0640);
8885 printf("%s: open file error\n", __FUNCTION__
);
8890 /* Write buf to file */
8891 fp
->f_op
->write(fp
, buf
, size
, &pos
);
8894 /* free buf before return */
8895 MFREE(dhd
->osh
, buf
, size
);
8896 /* close file before return */
8898 filp_close(fp
, current
->files
);
8899 /* restore previous address limit */
8904 #endif /* DHD_DEBUG */
8906 int dhd_os_wake_lock_timeout(dhd_pub_t
*pub
)
8908 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8909 unsigned long flags
;
8913 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
8914 ret
= dhd
->wakelock_rx_timeout_enable
> dhd
->wakelock_ctrl_timeout_enable
?
8915 dhd
->wakelock_rx_timeout_enable
: dhd
->wakelock_ctrl_timeout_enable
;
8916 #ifdef CONFIG_HAS_WAKELOCK
8917 if (dhd
->wakelock_rx_timeout_enable
)
8918 wake_lock_timeout(&dhd
->wl_rxwake
,
8919 msecs_to_jiffies(dhd
->wakelock_rx_timeout_enable
));
8920 if (dhd
->wakelock_ctrl_timeout_enable
)
8921 wake_lock_timeout(&dhd
->wl_ctrlwake
,
8922 msecs_to_jiffies(dhd
->wakelock_ctrl_timeout_enable
));
8924 dhd
->wakelock_rx_timeout_enable
= 0;
8925 dhd
->wakelock_ctrl_timeout_enable
= 0;
8926 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
8931 int net_os_wake_lock_timeout(struct net_device
*dev
)
8933 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8937 ret
= dhd_os_wake_lock_timeout(&dhd
->pub
);
8941 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t
*pub
, int val
)
8943 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8944 unsigned long flags
;
8947 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
8948 if (val
> dhd
->wakelock_rx_timeout_enable
)
8949 dhd
->wakelock_rx_timeout_enable
= val
;
8950 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
8955 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t
*pub
, int val
)
8957 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8958 unsigned long flags
;
8961 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
8962 if (val
> dhd
->wakelock_ctrl_timeout_enable
)
8963 dhd
->wakelock_ctrl_timeout_enable
= val
;
8964 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
8969 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t
*pub
)
8971 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
8972 unsigned long flags
;
8975 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
8976 dhd
->wakelock_ctrl_timeout_enable
= 0;
8977 #ifdef CONFIG_HAS_WAKELOCK
8978 if (wake_lock_active(&dhd
->wl_ctrlwake
))
8979 wake_unlock(&dhd
->wl_ctrlwake
);
8981 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
8986 int net_os_wake_lock_rx_timeout_enable(struct net_device
*dev
, int val
)
8988 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
8992 ret
= dhd_os_wake_lock_rx_timeout_enable(&dhd
->pub
, val
);
8996 int net_os_wake_lock_ctrl_timeout_enable(struct net_device
*dev
, int val
)
8998 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9002 ret
= dhd_os_wake_lock_ctrl_timeout_enable(&dhd
->pub
, val
);
9006 int dhd_os_wake_lock(dhd_pub_t
*pub
)
9008 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9009 unsigned long flags
;
9013 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9015 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
9016 #ifdef CONFIG_HAS_WAKELOCK
9017 wake_lock(&dhd
->wl_wifi
);
9018 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9019 dhd_bus_dev_pm_stay_awake(pub
);
9022 dhd
->wakelock_counter
++;
9023 ret
= dhd
->wakelock_counter
;
9024 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9029 int net_os_wake_lock(struct net_device
*dev
)
9031 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9035 ret
= dhd_os_wake_lock(&dhd
->pub
);
9039 int dhd_os_wake_unlock(dhd_pub_t
*pub
)
9041 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9042 unsigned long flags
;
9045 dhd_os_wake_lock_timeout(pub
);
9047 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9048 if (dhd
->wakelock_counter
> 0) {
9049 dhd
->wakelock_counter
--;
9050 if (dhd
->wakelock_counter
== 0 && !dhd
->waive_wakelock
) {
9051 #ifdef CONFIG_HAS_WAKELOCK
9052 wake_unlock(&dhd
->wl_wifi
);
9053 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9054 dhd_bus_dev_pm_relax(pub
);
9057 ret
= dhd
->wakelock_counter
;
9059 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9064 int dhd_os_check_wakelock(dhd_pub_t
*pub
)
9066 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9067 KERNEL_VERSION(2, 6, 36)))
9072 dhd
= (dhd_info_t
*)(pub
->info
);
9073 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9075 #ifdef CONFIG_HAS_WAKELOCK
9076 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9077 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
9078 (wake_lock_active(&dhd
->wl_wdwake
))))
9080 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9081 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
9087 int dhd_os_check_wakelock_all(dhd_pub_t
*pub
)
9089 #if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
9090 KERNEL_VERSION(2, 6, 36)))
9095 dhd
= (dhd_info_t
*)(pub
->info
);
9096 #endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
9098 #ifdef CONFIG_HAS_WAKELOCK
9099 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
9100 if (dhd
&& (wake_lock_active(&dhd
->wl_wifi
) ||
9101 wake_lock_active(&dhd
->wl_wdwake
) ||
9102 wake_lock_active(&dhd
->wl_rxwake
) ||
9103 wake_lock_active(&dhd
->wl_ctrlwake
))) {
9106 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9107 if (dhd
&& (dhd
->wakelock_counter
> 0) && dhd_bus_dev_pm_enabled(pub
))
9113 int net_os_wake_unlock(struct net_device
*dev
)
9115 dhd_info_t
*dhd
= DHD_DEV_INFO(dev
);
9119 ret
= dhd_os_wake_unlock(&dhd
->pub
);
9123 int dhd_os_wd_wake_lock(dhd_pub_t
*pub
)
9125 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9126 unsigned long flags
;
9130 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9131 #ifdef CONFIG_HAS_WAKELOCK
9132 /* if wakelock_wd_counter was never used : lock it at once */
9133 if (!dhd
->wakelock_wd_counter
)
9134 wake_lock(&dhd
->wl_wdwake
);
9136 dhd
->wakelock_wd_counter
++;
9137 ret
= dhd
->wakelock_wd_counter
;
9138 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9143 int dhd_os_wd_wake_unlock(dhd_pub_t
*pub
)
9145 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9146 unsigned long flags
;
9150 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9151 if (dhd
->wakelock_wd_counter
) {
9152 dhd
->wakelock_wd_counter
= 0;
9153 #ifdef CONFIG_HAS_WAKELOCK
9154 wake_unlock(&dhd
->wl_wdwake
);
9157 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9162 #ifdef BCMPCIE_OOB_HOST_WAKE
9163 int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t
*pub
, int val
)
9165 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9169 #ifdef CONFIG_HAS_WAKELOCK
9170 wake_lock_timeout(&dhd
->wl_intrwake
, msecs_to_jiffies(val
));
9176 int dhd_os_oob_irq_wake_unlock(dhd_pub_t
*pub
)
9178 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9182 #ifdef CONFIG_HAS_WAKELOCK
9183 /* if wl_intrwake is active, unlock it */
9184 if (wake_lock_active(&dhd
->wl_intrwake
)) {
9185 wake_unlock(&dhd
->wl_intrwake
);
9191 #endif /* BCMPCIE_OOB_HOST_WAKE */
9193 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
9194 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
9196 int dhd_os_wake_lock_waive(dhd_pub_t
*pub
)
9198 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9199 unsigned long flags
;
9203 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9204 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9205 if (dhd
->waive_wakelock
== FALSE
) {
9206 /* record current lock status */
9207 dhd
->wakelock_before_waive
= dhd
->wakelock_counter
;
9208 dhd
->waive_wakelock
= TRUE
;
9210 ret
= dhd
->wakelock_wd_counter
;
9211 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9216 int dhd_os_wake_lock_restore(dhd_pub_t
*pub
)
9218 dhd_info_t
*dhd
= (dhd_info_t
*)(pub
->info
);
9219 unsigned long flags
;
9225 spin_lock_irqsave(&dhd
->wakelock_spinlock
, flags
);
9226 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
9227 if (!dhd
->waive_wakelock
)
9230 dhd
->waive_wakelock
= FALSE
;
9231 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
9232 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
9233 * the lock in between, do the same by calling wake_unlock or pm_relax
9235 if (dhd
->wakelock_before_waive
== 0 && dhd
->wakelock_counter
> 0) {
9236 #ifdef CONFIG_HAS_WAKELOCK
9237 wake_lock(&dhd
->wl_wifi
);
9238 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9239 dhd_bus_dev_pm_stay_awake(&dhd
->pub
);
9241 } else if (dhd
->wakelock_before_waive
> 0 && dhd
->wakelock_counter
== 0) {
9242 #ifdef CONFIG_HAS_WAKELOCK
9243 wake_unlock(&dhd
->wl_wifi
);
9244 #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
9245 dhd_bus_dev_pm_relax(&dhd
->pub
);
9248 dhd
->wakelock_before_waive
= 0;
9250 ret
= dhd
->wakelock_wd_counter
;
9251 spin_unlock_irqrestore(&dhd
->wakelock_spinlock
, flags
);
9255 bool dhd_os_check_if_up(dhd_pub_t
*pub
)
9262 /* function to collect firmware, chip id and chip version info */
9263 void dhd_set_version_info(dhd_pub_t
*dhdp
, char *fw
)
9267 i
= snprintf(info_string
, sizeof(info_string
),
9268 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR
, fw
);
9269 printf("%s\n", info_string
);
9274 i
= snprintf(&info_string
[i
], sizeof(info_string
) - i
,
9275 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp
),
9276 dhd_bus_chiprev_id(dhdp
), dhd_bus_chippkg_id(dhdp
));
9279 int dhd_ioctl_entry_local(struct net_device
*net
, wl_ioctl_t
*ioc
, int cmd
)
9283 dhd_info_t
*dhd
= NULL
;
9285 if (!net
|| !DEV_PRIV(net
)) {
9286 DHD_ERROR(("%s invalid parameter\n", __FUNCTION__
));
9290 dhd
= DHD_DEV_INFO(net
);
9294 ifidx
= dhd_net2idx(dhd
, net
);
9295 if (ifidx
== DHD_BAD_IF
) {
9296 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__
));
9300 DHD_OS_WAKE_LOCK(&dhd
->pub
);
9301 DHD_PERIM_LOCK(&dhd
->pub
);
9303 ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, ioc
, ioc
->buf
, ioc
->len
);
9304 dhd_check_hang(net
, &dhd
->pub
, ret
);
9306 DHD_PERIM_UNLOCK(&dhd
->pub
);
9307 DHD_OS_WAKE_UNLOCK(&dhd
->pub
);
9312 bool dhd_os_check_hang(dhd_pub_t
*dhdp
, int ifidx
, int ret
)
9314 struct net_device
*net
;
9316 net
= dhd_idx2net(dhdp
, ifidx
);
9318 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__
, ifidx
));
9322 return dhd_check_hang(net
, dhdp
, ret
);
9325 /* Return instance */
9326 int dhd_get_instance(dhd_pub_t
*dhdp
)
9328 return dhdp
->info
->unit
;
9332 #ifdef PROP_TXSTATUS
9334 void dhd_wlfc_plat_init(void *dhd
)
9339 void dhd_wlfc_plat_deinit(void *dhd
)
9344 bool dhd_wlfc_skip_fc(void)
9348 #endif /* PROP_TXSTATUS */
9352 #include <linux/debugfs.h>
9354 extern uint32
dhd_readregl(void *bp
, uint32 addr
);
9355 extern uint32
dhd_writeregl(void *bp
, uint32 addr
, uint32 data
);
9357 typedef struct dhd_dbgfs
{
9358 struct dentry
*debugfs_dir
;
9359 struct dentry
*debugfs_mem
;
9364 dhd_dbgfs_t g_dbgfs
;
9367 dhd_dbg_state_open(struct inode
*inode
, struct file
*file
)
9369 file
->private_data
= inode
->i_private
;
9374 dhd_dbg_state_read(struct file
*file
, char __user
*ubuf
,
9375 size_t count
, loff_t
*ppos
)
9384 if (pos
>= g_dbgfs
.size
|| !count
)
9386 if (count
> g_dbgfs
.size
- pos
)
9387 count
= g_dbgfs
.size
- pos
;
9389 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
9390 tmp
= dhd_readregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3));
9392 ret
= copy_to_user(ubuf
, &tmp
, 4);
9397 *ppos
= pos
+ count
;
9405 dhd_debugfs_write(struct file
*file
, const char __user
*ubuf
, size_t count
, loff_t
*ppos
)
9413 if (pos
>= g_dbgfs
.size
|| !count
)
9415 if (count
> g_dbgfs
.size
- pos
)
9416 count
= g_dbgfs
.size
- pos
;
9418 ret
= copy_from_user(&buf
, ubuf
, sizeof(uint32
));
9422 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
9423 dhd_writeregl(g_dbgfs
.dhdp
->bus
, file
->f_pos
& (~3), buf
);
9430 dhd_debugfs_lseek(struct file
*file
, loff_t off
, int whence
)
9439 pos
= file
->f_pos
+ off
;
9442 pos
= g_dbgfs
.size
- off
;
9444 return (pos
< 0 || pos
> g_dbgfs
.size
) ? -EINVAL
: (file
->f_pos
= pos
);
9447 static const struct file_operations dhd_dbg_state_ops
= {
9448 .read
= dhd_dbg_state_read
,
9449 .write
= dhd_debugfs_write
,
9450 .open
= dhd_dbg_state_open
,
9451 .llseek
= dhd_debugfs_lseek
9454 static void dhd_dbg_create(void)
9456 if (g_dbgfs
.debugfs_dir
) {
9457 g_dbgfs
.debugfs_mem
= debugfs_create_file("mem", 0644, g_dbgfs
.debugfs_dir
,
9458 NULL
, &dhd_dbg_state_ops
);
9462 void dhd_dbg_init(dhd_pub_t
*dhdp
)
9466 g_dbgfs
.dhdp
= dhdp
;
9467 g_dbgfs
.size
= 0x20000000; /* Allow access to various cores regs */
9469 g_dbgfs
.debugfs_dir
= debugfs_create_dir("dhd", 0);
9470 if (IS_ERR(g_dbgfs
.debugfs_dir
)) {
9471 err
= PTR_ERR(g_dbgfs
.debugfs_dir
);
9472 g_dbgfs
.debugfs_dir
= NULL
;
9481 void dhd_dbg_remove(void)
9483 debugfs_remove(g_dbgfs
.debugfs_mem
);
9484 debugfs_remove(g_dbgfs
.debugfs_dir
);
9486 bzero((unsigned char *) &g_dbgfs
, sizeof(g_dbgfs
));
9489 #endif /* ifdef BCMDBGFS */
9494 void dhd_htsf_addtxts(dhd_pub_t
*dhdp
, void *pktbuf
)
9496 dhd_info_t
*dhd
= (dhd_info_t
*)(dhdp
->info
);
9497 struct sk_buff
*skb
;
9499 uint16 dport
= 0, oldmagic
= 0xACAC;
9503 /* timestamp packet */
9505 p1
= (char*) PKTDATA(dhdp
->osh
, pktbuf
);
9507 if (PKTLEN(dhdp
->osh
, pktbuf
) > HTSF_MINLEN
) {
9508 /* memcpy(&proto, p1+26, 4); */
9509 memcpy(&dport
, p1
+40, 2);
9510 /* proto = ((ntoh32(proto))>> 16) & 0xFF; */
9511 dport
= ntoh16(dport
);
9514 /* timestamp only if icmp or udb iperf with port 5555 */
9515 /* if (proto == 17 && dport == tsport) { */
9516 if (dport
>= tsport
&& dport
<= tsport
+ 20) {
9518 skb
= (struct sk_buff
*) pktbuf
;
9520 htsf
= dhd_get_htsf(dhd
, 0);
9521 memset(skb
->data
+ 44, 0, 2); /* clear checksum */
9522 memcpy(skb
->data
+82, &oldmagic
, 2);
9523 memcpy(skb
->data
+84, &htsf
, 4);
9525 memset(&ts
, 0, sizeof(htsfts_t
));
9526 ts
.magic
= HTSFMAGIC
;
9527 ts
.prio
= PKTPRIO(pktbuf
);
9528 ts
.seqnum
= htsf_seqnum
++;
9529 ts
.c10
= get_cycles();
9531 ts
.endmagic
= HTSFENDMAGIC
;
9533 memcpy(skb
->data
+ HTSF_HOSTOFFSET
, &ts
, sizeof(ts
));
9537 static void dhd_dump_htsfhisto(histo_t
*his
, char *s
)
9539 int pktcnt
= 0, curval
= 0, i
;
9540 for (i
= 0; i
< (NUMBIN
-2); i
++) {
9542 printf("%d ", his
->bin
[i
]);
9543 pktcnt
+= his
->bin
[i
];
9545 printf(" max: %d TotPkt: %d neg: %d [%s]\n", his
->bin
[NUMBIN
-2], pktcnt
,
9546 his
->bin
[NUMBIN
-1], s
);
9550 void sorttobin(int value
, histo_t
*histo
)
9555 histo
->bin
[NUMBIN
-1]++;
9558 if (value
> histo
->bin
[NUMBIN
-2]) /* store the max value */
9559 histo
->bin
[NUMBIN
-2] = value
;
9561 for (i
= 0; i
< (NUMBIN
-2); i
++) {
9562 binval
+= 500; /* 500m s bins */
9563 if (value
<= binval
) {
9568 histo
->bin
[NUMBIN
-3]++;
9572 void dhd_htsf_addrxts(dhd_pub_t
*dhdp
, void *pktbuf
)
9574 dhd_info_t
*dhd
= (dhd_info_t
*)dhdp
->info
;
9575 struct sk_buff
*skb
;
9578 int d1
, d2
, d3
, end2end
;
9582 skb
= PKTTONATIVE(dhdp
->osh
, pktbuf
);
9583 p1
= (char*)PKTDATA(dhdp
->osh
, pktbuf
);
9585 if (PKTLEN(osh
, pktbuf
) > HTSF_MINLEN
) {
9586 memcpy(&old_magic
, p1
+78, 2);
9587 htsf_ts
= (htsfts_t
*) (p1
+ HTSF_HOSTOFFSET
- 4);
9592 if (htsf_ts
->magic
== HTSFMAGIC
) {
9593 htsf_ts
->tE0
= dhd_get_htsf(dhd
, 0);
9594 htsf_ts
->cE0
= get_cycles();
9597 if (old_magic
== 0xACAC) {
9600 htsf
= dhd_get_htsf(dhd
, 0);
9601 memcpy(skb
->data
+92, &htsf
, sizeof(uint32
));
9603 memcpy(&ts
[tsidx
].t1
, skb
->data
+80, 16);
9605 d1
= ts
[tsidx
].t2
- ts
[tsidx
].t1
;
9606 d2
= ts
[tsidx
].t3
- ts
[tsidx
].t2
;
9607 d3
= ts
[tsidx
].t4
- ts
[tsidx
].t3
;
9608 end2end
= ts
[tsidx
].t4
- ts
[tsidx
].t1
;
9610 sorttobin(d1
, &vi_d1
);
9611 sorttobin(d2
, &vi_d2
);
9612 sorttobin(d3
, &vi_d3
);
9613 sorttobin(end2end
, &vi_d4
);
9615 if (end2end
> 0 && end2end
> maxdelay
) {
9617 maxdelaypktno
= tspktcnt
;
9618 memcpy(&maxdelayts
, &ts
[tsidx
], 16);
9620 if (++tsidx
>= TSMAX
)
9625 uint32
dhd_get_htsf(dhd_info_t
*dhd
, int ifidx
)
9627 uint32 htsf
= 0, cur_cycle
, delta
, delta_us
;
9628 uint32 factor
, baseval
, baseval2
;
9634 if (cur_cycle
> dhd
->htsf
.last_cycle
)
9635 delta
= cur_cycle
- dhd
->htsf
.last_cycle
;
9637 delta
= cur_cycle
+ (0xFFFFFFFF - dhd
->htsf
.last_cycle
);
9642 if (dhd
->htsf
.coef
) {
9643 /* times ten to get the first digit */
9644 factor
= (dhd
->htsf
.coef
*10 + dhd
->htsf
.coefdec1
);
9645 baseval
= (delta
*10)/factor
;
9646 baseval2
= (delta
*10)/(factor
+1);
9647 delta_us
= (baseval
- (((baseval
- baseval2
) * dhd
->htsf
.coefdec2
)) / 10);
9648 htsf
= (delta_us
<< 4) + dhd
->htsf
.last_tsf
+ HTSF_BUS_DELAY
;
9651 DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
9657 static void dhd_dump_latency(void)
9660 int d1
, d2
, d3
, d4
, d5
;
9662 printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
9663 for (i
= 0; i
< TSMAX
; i
++) {
9664 d1
= ts
[i
].t2
- ts
[i
].t1
;
9665 d2
= ts
[i
].t3
- ts
[i
].t2
;
9666 d3
= ts
[i
].t4
- ts
[i
].t3
;
9667 d4
= ts
[i
].t4
- ts
[i
].t1
;
9668 d5
= ts
[max
].t4
-ts
[max
].t1
;
9669 if (d4
> d5
&& d4
> 0) {
9672 printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
9673 ts
[i
].t1
, ts
[i
].t2
, ts
[i
].t3
, ts
[i
].t4
,
9677 printf("current idx = %d \n", tsidx
);
9679 printf("Highest latency %d pkt no.%d total=%d\n", maxdelay
, maxdelaypktno
, tspktcnt
);
9680 printf("%08X %08X %08X %08X \t%d %d %d %d\n",
9681 maxdelayts
.t1
, maxdelayts
.t2
, maxdelayts
.t3
, maxdelayts
.t4
,
9682 maxdelayts
.t2
- maxdelayts
.t1
,
9683 maxdelayts
.t3
- maxdelayts
.t2
,
9684 maxdelayts
.t4
- maxdelayts
.t3
,
9685 maxdelayts
.t4
- maxdelayts
.t1
);
9690 dhd_ioctl_htsf_get(dhd_info_t
*dhd
, int ifidx
)
9702 memset(&ioc
, 0, sizeof(ioc
));
9703 memset(&tsf_buf
, 0, sizeof(tsf_buf
));
9705 ioc
.cmd
= WLC_GET_VAR
;
9707 ioc
.len
= (uint
)sizeof(buf
);
9710 strncpy(buf
, "tsf", sizeof(buf
) - 1);
9711 buf
[sizeof(buf
) - 1] = '\0';
9712 s1
= dhd_get_htsf(dhd
, 0);
9713 if ((ret
= dhd_wl_ioctl(&dhd
->pub
, ifidx
, &ioc
, ioc
.buf
, ioc
.len
)) < 0) {
9715 DHD_ERROR(("%s: tsf is not supported by device\n",
9716 dhd_ifname(&dhd
->pub
, ifidx
)));
9721 s2
= dhd_get_htsf(dhd
, 0);
9723 memcpy(&tsf_buf
, buf
, sizeof(tsf_buf
));
9724 printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
9725 tsf_buf
.high
, tsf_buf
.low
, s2
, dhd
->htsf
.coef
, dhd
->htsf
.coefdec1
,
9726 dhd
->htsf
.coefdec2
, s2
-tsf_buf
.low
);
9727 printf("lasttsf=%08X lastcycle=%08X\n", dhd
->htsf
.last_tsf
, dhd
->htsf
.last_cycle
);
9731 void htsf_update(dhd_info_t
*dhd
, void *data
)
9733 static ulong cur_cycle
= 0, prev_cycle
= 0;
9734 uint32 htsf
, tsf_delta
= 0;
9735 uint32 hfactor
= 0, cyc_delta
, dec1
= 0, dec2
, dec3
, tmp
;
9739 /* cycles_t in inlcude/mips/timex.h */
9743 prev_cycle
= cur_cycle
;
9746 if (cur_cycle
> prev_cycle
)
9747 cyc_delta
= cur_cycle
- prev_cycle
;
9751 cyc_delta
= cur_cycle
+ (0xFFFFFFFF - prev_cycle
);
9755 printf(" tsf update ata point er is null \n");
9757 memcpy(&prev_tsf
, &cur_tsf
, sizeof(tsf_t
));
9758 memcpy(&cur_tsf
, data
, sizeof(tsf_t
));
9760 if (cur_tsf
.low
== 0) {
9761 DHD_INFO((" ---- 0 TSF, do not update, return\n"));
9765 if (cur_tsf
.low
> prev_tsf
.low
)
9766 tsf_delta
= (cur_tsf
.low
- prev_tsf
.low
);
9768 DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
9769 cur_tsf
.low
, prev_tsf
.low
));
9770 if (cur_tsf
.high
> prev_tsf
.high
) {
9771 tsf_delta
= cur_tsf
.low
+ (0xFFFFFFFF - prev_tsf
.low
);
9772 DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta
));
9775 return; /* do not update */
9779 hfactor
= cyc_delta
/ tsf_delta
;
9780 tmp
= (cyc_delta
- (hfactor
* tsf_delta
))*10;
9781 dec1
= tmp
/tsf_delta
;
9782 dec2
= ((tmp
- dec1
*tsf_delta
)*10) / tsf_delta
;
9783 tmp
= (tmp
- (dec1
*tsf_delta
))*10;
9784 dec3
= ((tmp
- dec2
*tsf_delta
)*10) / tsf_delta
;
9803 htsf
= ((cyc_delta
* 10) / (hfactor
*10+dec1
)) + prev_tsf
.low
;
9804 dhd
->htsf
.coef
= hfactor
;
9805 dhd
->htsf
.last_cycle
= cur_cycle
;
9806 dhd
->htsf
.last_tsf
= cur_tsf
.low
;
9807 dhd
->htsf
.coefdec1
= dec1
;
9808 dhd
->htsf
.coefdec2
= dec2
;
9811 htsf
= prev_tsf
.low
;
9815 #endif /* WLMEDIA_HTSF */
9817 #ifdef CUSTOM_SET_CPUCORE
9818 void dhd_set_cpucore(dhd_pub_t
*dhd
, int set
)
9820 int e_dpc
= 0, e_rxf
= 0, retry_set
= 0;
9822 if (!(dhd
->chan_isvht80
)) {
9823 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__
, dhd
->chan_isvht80
));
9830 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
9831 cpumask_of(DPC_CPUCORE
));
9833 e_dpc
= set_cpus_allowed_ptr(dhd
->current_dpc
,
9834 cpumask_of(PRIMARY_CPUCORE
));
9836 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
9837 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__
, e_dpc
));
9842 } while (e_dpc
< 0);
9847 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
9848 cpumask_of(RXF_CPUCORE
));
9850 e_rxf
= set_cpus_allowed_ptr(dhd
->current_rxf
,
9851 cpumask_of(PRIMARY_CPUCORE
));
9853 if (retry_set
++ > MAX_RETRY_SET_CPUCORE
) {
9854 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__
, e_rxf
));
9859 } while (e_rxf
< 0);
9861 #ifdef DHD_OF_SUPPORT
9862 interrupt_set_cpucore(set
);
9863 #endif /* DHD_OF_SUPPORT */
9864 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__
, set
));
9868 #endif /* CUSTOM_SET_CPUCORE */
9869 #if defined(DHD_TCP_WINSIZE_ADJUST)
9870 static int dhd_port_list_match(int port
)
9873 for (i
= 0; i
< MAX_TARGET_PORTS
; i
++) {
9874 if (target_ports
[i
] == port
)
9879 static void dhd_adjust_tcp_winsize(int op_mode
, struct sk_buff
*skb
)
9881 struct iphdr
*ipheader
;
9882 struct tcphdr
*tcpheader
;
9884 int32 incremental_checksum
;
9886 if (!(op_mode
& DHD_FLAG_HOSTAP_MODE
))
9888 if (skb
== NULL
|| skb
->data
== NULL
)
9891 ipheader
= (struct iphdr
*)(skb
->data
);
9893 if (ipheader
->protocol
== IPPROTO_TCP
) {
9894 tcpheader
= (struct tcphdr
*) skb_pull(skb
, (ipheader
->ihl
)<<2);
9896 win_size
= ntoh16(tcpheader
->window
);
9897 if (win_size
< MIN_TCP_WIN_SIZE
&&
9898 dhd_port_list_match(ntoh16(tcpheader
->dest
))) {
9899 incremental_checksum
= ntoh16(tcpheader
->check
);
9900 incremental_checksum
+= win_size
- win_size
*WIN_SIZE_SCALE_FACTOR
;
9901 if (incremental_checksum
< 0)
9902 --incremental_checksum
;
9903 tcpheader
->window
= hton16(win_size
*WIN_SIZE_SCALE_FACTOR
);
9904 tcpheader
->check
= hton16((unsigned short)incremental_checksum
);
9907 skb_push(skb
, (ipheader
->ihl
)<<2);
9910 #endif /* DHD_TCP_WINSIZE_ADJUST */
9912 /* Get interface specific ap_isolate configuration */
9913 int dhd_get_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
)
9915 dhd_info_t
*dhd
= dhdp
->info
;
9918 ASSERT(idx
< DHD_MAX_IFS
);
9920 ifp
= dhd
->iflist
[idx
];
9922 return ifp
->ap_isolate
;
9925 /* Set interface specific ap_isolate configuration */
9926 int dhd_set_ap_isolate(dhd_pub_t
*dhdp
, uint32 idx
, int val
)
9928 dhd_info_t
*dhd
= dhdp
->info
;
9931 ASSERT(idx
< DHD_MAX_IFS
);
9933 ifp
= dhd
->iflist
[idx
];
9935 ifp
->ap_isolate
= val
;
9941 /* Returns interface specific WMF configuration */
9942 dhd_wmf_t
* dhd_wmf_conf(dhd_pub_t
*dhdp
, uint32 idx
)
9944 dhd_info_t
*dhd
= dhdp
->info
;
9947 ASSERT(idx
< DHD_MAX_IFS
);
9949 ifp
= dhd
->iflist
[idx
];
9952 #endif /* DHD_WMF */
9955 #ifdef DHD_UNICAST_DHCP
9957 dhd_get_pkt_ether_type(dhd_pub_t
*pub
, void *pktbuf
,
9958 uint8
**data_ptr
, int *len_ptr
, uint16
*et_ptr
, bool *snap_ptr
)
9960 uint8
*frame
= PKTDATA(pub
->osh
, pktbuf
);
9961 int length
= PKTLEN(pub
->osh
, pktbuf
);
9962 uint8
*pt
; /* Pointer to type field */
9965 /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
9966 if (length
< ETHER_HDR_LEN
) {
9967 DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
9968 __FUNCTION__
, length
));
9970 } else if (ntoh16_ua(frame
+ ETHER_TYPE_OFFSET
) >= ETHER_TYPE_MIN
) {
9971 /* Frame is Ethernet II */
9972 pt
= frame
+ ETHER_TYPE_OFFSET
;
9973 } else if (length
>= ETHER_HDR_LEN
+ SNAP_HDR_LEN
+ ETHER_TYPE_LEN
&&
9974 !bcmp(llc_snap_hdr
, frame
+ ETHER_HDR_LEN
, SNAP_HDR_LEN
)) {
9975 pt
= frame
+ ETHER_HDR_LEN
+ SNAP_HDR_LEN
;
9978 DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
9983 ethertype
= ntoh16_ua(pt
);
9985 /* Skip VLAN tag, if any */
9986 if (ethertype
== ETHER_TYPE_8021Q
) {
9989 if ((pt
+ ETHER_TYPE_LEN
) > (frame
+ length
)) {
9990 DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
9991 __FUNCTION__
, length
));
9995 ethertype
= ntoh16_ua(pt
);
9998 *data_ptr
= pt
+ ETHER_TYPE_LEN
;
9999 *len_ptr
= length
- (pt
+ ETHER_TYPE_LEN
- frame
);
10000 *et_ptr
= ethertype
;
10006 dhd_get_pkt_ip_type(dhd_pub_t
*pub
, void *pktbuf
,
10007 uint8
**data_ptr
, int *len_ptr
, uint8
*prot_ptr
)
10009 struct ipv4_hdr
*iph
; /* IP frame pointer */
10010 int iplen
; /* IP frame length */
10011 uint16 ethertype
, iphdrlen
, ippktlen
;
10016 if (dhd_get_pkt_ether_type(pub
, pktbuf
, (uint8
**)&iph
,
10017 &iplen
, ðertype
, &snap
) != 0)
10020 if (ethertype
!= ETHER_TYPE_IP
) {
10024 /* We support IPv4 only */
10025 if (iplen
< IPV4_OPTIONS_OFFSET
|| (IP_VER(iph
) != IP_VER_4
)) {
10029 /* Header length sanity */
10030 iphdrlen
= IPV4_HLEN(iph
);
10033 * Packet length sanity; sometimes we receive eth-frame size bigger
10034 * than the IP content, which results in a bad tcp chksum
10036 ippktlen
= ntoh16(iph
->tot_len
);
10037 if (ippktlen
< iplen
) {
10039 DHD_INFO(("%s: extra frame length ignored\n",
10042 } else if (ippktlen
> iplen
) {
10043 DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
10044 __FUNCTION__
, ippktlen
- iplen
));
10048 if (iphdrlen
< IPV4_OPTIONS_OFFSET
|| iphdrlen
> iplen
) {
10049 DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
10050 __FUNCTION__
, iphdrlen
, IPV4_OPTIONS_OFFSET
, iplen
));
10055 * We don't handle fragmented IP packets. A first frag is indicated by the MF
10056 * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
10058 iph_frag
= ntoh16(iph
->frag
);
10060 if ((iph_frag
& IPV4_FRAG_MORE
) || (iph_frag
& IPV4_FRAG_OFFSET_MASK
) != 0) {
10061 DHD_INFO(("DHD:%s: IP fragment not handled\n",
10066 prot
= IPV4_PROT(iph
);
10068 *data_ptr
= (((uint8
*)iph
) + iphdrlen
);
10069 *len_ptr
= iplen
- iphdrlen
;
10074 /** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
10076 int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
)
10078 dhd_sta_t
* stainfo
;
10079 uint8
*eh
= PKTDATA(pub
->osh
, pktbuf
);
10088 if (!ETHER_ISMULTI(eh
+ ETHER_DEST_OFFSET
))
10090 if (dhd_get_pkt_ip_type(pub
, pktbuf
, &udph
, &udpl
, &prot
) != 0)
10092 if (prot
!= IP_PROT_UDP
)
10094 /* check frame length, at least UDP_HDR_LEN */
10095 if (udpl
< UDP_HDR_LEN
) {
10096 DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
10100 port
= ntoh16_ua(udph
+ UDP_DEST_PORT_OFFSET
);
10101 /* only process DHCP packets from server to client */
10102 if (port
!= DHCP_PORT_CLIENT
)
10105 dhcp
= udph
+ UDP_HDR_LEN
;
10106 dhcpl
= udpl
- UDP_HDR_LEN
;
10108 if (dhcpl
< DHCP_CHADDR_OFFSET
+ ETHER_ADDR_LEN
) {
10109 DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
10113 /* only process DHCP reply(offer/ack) packets */
10114 if (*(dhcp
+ DHCP_TYPE_OFFSET
) != DHCP_TYPE_REPLY
)
10116 chaddr
= dhcp
+ DHCP_CHADDR_OFFSET
;
10117 stainfo
= dhd_find_sta(pub
, ifidx
, chaddr
);
10119 bcopy(chaddr
, eh
+ ETHER_DEST_OFFSET
, ETHER_ADDR_LEN
);
10124 #endif /* DHD_UNICAST_DHD */
10125 #ifdef DHD_L2_FILTER
10126 /* Check if packet type is ICMP ECHO */
10128 int dhd_l2_filter_block_ping(dhd_pub_t
*pub
, void *pktbuf
, int ifidx
)
10130 struct bcmicmp_hdr
*icmph
;
10134 if (dhd_get_pkt_ip_type(pub
, pktbuf
, (uint8
**)&icmph
, &udpl
, &prot
) != 0)
10136 if (prot
== IP_PROT_ICMP
) {
10137 if (icmph
->type
== ICMP_TYPE_ECHO_REQUEST
)
10142 #endif /* DHD_L2_FILTER */
10144 #ifdef SET_RPS_CPUS
10145 int custom_rps_map_set(struct netdev_rx_queue
*queue
, char *buf
, size_t len
)
10147 struct rps_map
*old_map
, *map
;
10148 cpumask_var_t mask
;
10150 static DEFINE_SPINLOCK(rps_map_lock
);
10152 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
10154 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
10155 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__
));
10159 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
10161 free_cpumask_var(mask
);
10162 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__
));
10166 map
= kzalloc(max_t(unsigned int,
10167 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
10170 free_cpumask_var(mask
);
10171 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__
));
10176 for_each_cpu(cpu
, mask
)
10177 map
->cpus
[i
++] = cpu
;
10183 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__
));
10187 spin_lock(&rps_map_lock
);
10188 old_map
= rcu_dereference_protected(queue
->rps_map
,
10189 lockdep_is_held(&rps_map_lock
));
10190 rcu_assign_pointer(queue
->rps_map
, map
);
10191 spin_unlock(&rps_map_lock
);
10194 static_key_slow_inc(&rps_needed
);
10196 kfree_rcu(old_map
, rcu
);
10197 static_key_slow_dec(&rps_needed
);
10199 free_cpumask_var(mask
);
10201 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__
, map
->len
));
10205 void custom_rps_map_clear(struct netdev_rx_queue
*queue
)
10207 struct rps_map
*map
;
10209 DHD_INFO(("%s : Entered.\n", __FUNCTION__
));
10211 map
= rcu_dereference_protected(queue
->rps_map
, 1);
10213 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
10214 kfree_rcu(map
, rcu
);
10215 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__
));
10218 #endif /* SET_RPS_CPUS */
10220 #if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
10222 SDA_setSharedMemory4Send(unsigned int buffer_id
,
10223 unsigned char *buffer
, unsigned int buffer_size
,
10224 unsigned int packet_size
, unsigned int headroom_size
)
10226 dhd_info_t
*dhd
= dhd_global
;
10228 sda_packet_length
= packet_size
;
10236 SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb
)
10238 dhd_info_t
*dhd
= dhd_global
;
10247 SDA_getTsf(unsigned char vif_id
)
10249 dhd_info_t
*dhd
= dhd_global
;
10251 char buf
[WLC_IOCTL_SMLEN
];
10259 memset(buf
, 0, sizeof(buf
));
10261 if (vif_id
== 0) /* wlan0 tsf */
10262 ifidx
= dhd_ifname2idx(dhd
, "wlan0");
10263 else if (vif_id
== 1) /* p2p0 tsf */
10264 ifidx
= dhd_ifname2idx(dhd
, "p2p0");
10266 bcm_mkiovar("tsf_bss", 0, 0, buf
, sizeof(buf
));
10268 if (dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, ifidx
) < 0) {
10269 DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__
));
10273 memcpy(&tsf_buf
, buf
, sizeof(tsf_buf
));
10274 tsf_val
= (uint64
)tsf_buf
.high
;
10275 DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
10276 __FUNCTION__
, tsf_buf
.high
, tsf_buf
.low
));
10278 return ((tsf_val
<< 32) | tsf_buf
.low
);
10280 EXPORT_SYMBOL(SDA_getTsf
);
10285 dhd_info_t
*dhd
= dhd_global
;
10287 char iovbuf
[WLC_IOCTL_SMLEN
];
10289 bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync
, 4, iovbuf
, sizeof(iovbuf
));
10290 dhd_wl_ioctl_cmd(&dhd
->pub
, WLC_SET_VAR
, iovbuf
, sizeof(iovbuf
), TRUE
, 0);
10292 DHD_TRACE(("%s\n", __FUNCTION__
));
10296 extern struct net_device
*wl0dot1_dev
;
10299 BCMFASTPATH
SDA_function4Send(uint buffer_id
, void *packet
, uint packet_size
)
10301 struct sk_buff
*skb
;
10302 sda_packet_t
*shm_packet
= packet
;
10303 dhd_info_t
*dhd
= dhd_global
;
10306 static unsigned int cnt_t
= 1;
10312 if (dhd
->is_wlanaudio_blist
) {
10313 for (cnt
= 0; cnt
< MAX_WLANAUDIO_BLACKLIST
; cnt
++) {
10314 if (dhd
->wlanaudio_blist
[cnt
].is_blacklist
== true) {
10315 if (!bcmp(dhd
->wlanaudio_blist
[cnt
].blacklist_addr
.octet
,
10316 shm_packet
->headroom
.ether_dhost
, ETHER_ADDR_LEN
))
10322 if ((cnt_t
% 10000) == 0)
10327 /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
10329 skb
= __dev_alloc_skb(TXOFF
+ sda_packet_length
- SDA_PKT_HEADER_SIZE
, GFP_ATOMIC
);
10331 skb_reserve(skb
, TXOFF
- SDA_HEADROOM_SIZE
);
10332 skb_put(skb
, sda_packet_length
- SDA_PKT_HEADER_SIZE
+ SDA_HEADROOM_SIZE
);
10333 skb
->priority
= PRIO_8021D_VO
; /* PRIO_8021D_VO or PRIO_8021D_VI */
10336 skb
->dev
= wl0dot1_dev
;
10337 shm_packet
->txTsf
= 0x0;
10338 shm_packet
->rxTsf
= 0x0;
10339 memcpy(skb
->data
, &shm_packet
->headroom
,
10340 sda_packet_length
- OFFSETOF(sda_packet_t
, headroom
));
10341 shm_packet
->desc
.ready_to_copy
= 0;
10343 dhd_start_xmit(skb
, skb
->dev
);
10347 SDA_registerCallback4Recv(unsigned char *pBufferTotal
,
10348 unsigned int BufferTotalSize
)
10350 dhd_info_t
*dhd
= dhd_global
;
10359 SDA_setSharedMemory4Recv(unsigned char *pBufferTotal
,
10360 unsigned int BufferTotalSize
,
10361 unsigned int BufferUnitSize
,
10362 unsigned int Headroomsize
)
10364 dhd_info_t
*dhd
= dhd_global
;
10373 SDA_function4RecvDone(unsigned char * pBuffer
, unsigned int BufferSize
)
10375 dhd_info_t
*dhd
= dhd_global
;
10382 EXPORT_SYMBOL(SDA_setSharedMemory4Send
);
10383 EXPORT_SYMBOL(SDA_registerCallback4SendDone
);
10384 EXPORT_SYMBOL(SDA_syncTsf
);
10385 EXPORT_SYMBOL(SDA_function4Send
);
10386 EXPORT_SYMBOL(SDA_registerCallback4Recv
);
10387 EXPORT_SYMBOL(SDA_setSharedMemory4Recv
);
10388 EXPORT_SYMBOL(SDA_function4RecvDone
);
10390 #endif /* CUSTOMER_HW20 && WLANAUDIO */
10392 void *dhd_get_pub(struct net_device
*dev
)
10394 dhd_info_t
*dhdinfo
= *(dhd_info_t
**)netdev_priv(dev
);
10396 return (void *)&dhdinfo
->pub
;
10401 bool dhd_os_wd_timer_enabled(void *bus
)
10403 dhd_pub_t
*pub
= bus
;
10404 dhd_info_t
*dhd
= (dhd_info_t
*)pub
->info
;
10406 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
10408 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__
));
10411 return dhd
->wd_timer_valid
;